hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
0cb30e44b8f105ea002f0d0ff20caf95ce120154.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <stdint.h>
#define STB_IMAGE_IMPLEMENTATION
#include "stb_image.h"
#define STB_IMAGE_WRITE_IMPLEMENTATION
#include "stb_image_write.h"
__global__
void filter(int height, int width, double *ker, double *d_img, double *d_img_res)
{
int gidx = threadIdx.x + blockDim.x * blockIdx.x;
int j = gidx / width / 3;
int i = gidx / 3 - j * width;
int ch = gidx - i * 3 - j * width * 3;
int size = height * width * 3;
if (gidx < size)
{
if (i == 0 || j == 0 || i == width - 1 || j == height - 1)
{
d_img_res[j*width*3 + i*3 + ch] = d_img[j*width*3 + i*3 + ch];
}
else
{
d_img_res[j*width*3 + i*3 + ch] = (d_img[j*width*3 + i*3 + ch]*ker[4] + \
d_img[(j + 1) *width * 3 + (i - 1) * 3 + ch]*ker[0] + \
d_img[(j + 1) *width * 3 + (i + 1) * 3 + ch]*ker[8] + \
d_img[(j - 1) *width * 3 + (i - 1) * 3 + ch]*ker[6] + \
d_img[(j - 1) *width * 3 + (i + 1) * 3 + ch]*ker[2] + \
d_img[(j + 1) *width * 3 + i * 3 + ch]*ker[3] + \
d_img[j *width * 3 + (i - 1) * 3 + ch]*ker[1] + \
d_img[(j - 1) *width * 3 + i * 3 + ch]*ker[5] + \
d_img[j * width * 3 + (i + 1)*3 + ch]*ker[7]);
}
if (d_img_res[j*width*3 +i*3 + ch] < 0)
{
d_img_res[j*width*3 + i*3 + ch] = 0;
}
}
}
int main(int argc, char **argv)
{
int width, height, bpp, size;
double *ker = (double *) calloc(sizeof(double), 9);
double *d_ker;
char *ker_name;
ker_name = argv[1];
if (strcmp(ker_name, "edge") == 0)
{
ker[0] = ker[6] = ker[2] = ker[8] = -1;
ker[1] = ker[3] = ker[7] = ker[5] = -1;
ker[4] = 8;
}
else if (strcmp(ker_name, "sharpen") == 0)
{
ker[0] = ker[6] = ker[2] = ker[8] = 0;
ker[1] = ker[3] = ker[7] = ker[5] = -1;
ker[4] = 5;
}
else if (strcmp(ker_name, "gaussian") == 0)
{
ker[0] = ker[6] = ker[2] = ker[8] = 1 / 16.;
ker[1] = ker[3] = ker[7] = ker[5] = 2 / 16.;
ker[4] = 4 / 16.;
}
hipMalloc(&d_ker, sizeof(double)*9);
hipMemcpy(d_ker, ker, sizeof(double) * 9, hipMemcpyHostToDevice);
uint8_t* h_img = stbi_load("corgi.jpg", &width, &height, &bpp, 3);
size = height * width * 3;
double * h_buf = (double *) malloc(sizeof(double) * size);
double *d_img;
double *d_img_res;
hipMalloc(&d_img, sizeof(double) * size);
hipMalloc(&d_img_res, sizeof(double) * size);
for (int i = 0; i < size; i++)
{
h_buf[i] = (double) h_img[i];
}
hipMemcpy(d_img, h_buf, sizeof(double) * size, hipMemcpyHostToDevice);
int block_size, grid_size;
block_size = 1024;
grid_size = size/block_size;
dim3 dimBlock(block_size);
dim3 dimGrid(grid_size);
hipLaunchKernelGGL(( filter), dim3(dimGrid), dim3(dimBlock), 0, 0, height, width, d_ker, d_img, d_img_res);
hipDeviceSynchronize();
double *h_buf_res = (double *)malloc(sizeof(double) * size);
hipMemcpy(h_buf_res, d_img_res, sizeof(double) * size, hipMemcpyDeviceToHost);
for (int i = 0; i < size; i++)
{
h_img[i] = uint8_t (h_buf_res[i]);
}
stbi_write_png("res_corgi.jpg", width, height, 3, h_img, width * 3);
return 0;
}
| 0cb30e44b8f105ea002f0d0ff20caf95ce120154.cu | #include <stdio.h>
#include <string.h>
#include <math.h>
#include <stdint.h>
#define STB_IMAGE_IMPLEMENTATION
#include "stb_image.h"
#define STB_IMAGE_WRITE_IMPLEMENTATION
#include "stb_image_write.h"
__global__
void filter(int height, int width, double *ker, double *d_img, double *d_img_res)
{
int gidx = threadIdx.x + blockDim.x * blockIdx.x;
int j = gidx / width / 3;
int i = gidx / 3 - j * width;
int ch = gidx - i * 3 - j * width * 3;
int size = height * width * 3;
if (gidx < size)
{
if (i == 0 || j == 0 || i == width - 1 || j == height - 1)
{
d_img_res[j*width*3 + i*3 + ch] = d_img[j*width*3 + i*3 + ch];
}
else
{
d_img_res[j*width*3 + i*3 + ch] = (d_img[j*width*3 + i*3 + ch]*ker[4] + \
d_img[(j + 1) *width * 3 + (i - 1) * 3 + ch]*ker[0] + \
d_img[(j + 1) *width * 3 + (i + 1) * 3 + ch]*ker[8] + \
d_img[(j - 1) *width * 3 + (i - 1) * 3 + ch]*ker[6] + \
d_img[(j - 1) *width * 3 + (i + 1) * 3 + ch]*ker[2] + \
d_img[(j + 1) *width * 3 + i * 3 + ch]*ker[3] + \
d_img[j *width * 3 + (i - 1) * 3 + ch]*ker[1] + \
d_img[(j - 1) *width * 3 + i * 3 + ch]*ker[5] + \
d_img[j * width * 3 + (i + 1)*3 + ch]*ker[7]);
}
if (d_img_res[j*width*3 +i*3 + ch] < 0)
{
d_img_res[j*width*3 + i*3 + ch] = 0;
}
}
}
int main(int argc, char **argv)
{
int width, height, bpp, size;
double *ker = (double *) calloc(sizeof(double), 9);
double *d_ker;
char *ker_name;
ker_name = argv[1];
if (strcmp(ker_name, "edge") == 0)
{
ker[0] = ker[6] = ker[2] = ker[8] = -1;
ker[1] = ker[3] = ker[7] = ker[5] = -1;
ker[4] = 8;
}
else if (strcmp(ker_name, "sharpen") == 0)
{
ker[0] = ker[6] = ker[2] = ker[8] = 0;
ker[1] = ker[3] = ker[7] = ker[5] = -1;
ker[4] = 5;
}
else if (strcmp(ker_name, "gaussian") == 0)
{
ker[0] = ker[6] = ker[2] = ker[8] = 1 / 16.;
ker[1] = ker[3] = ker[7] = ker[5] = 2 / 16.;
ker[4] = 4 / 16.;
}
cudaMalloc(&d_ker, sizeof(double)*9);
cudaMemcpy(d_ker, ker, sizeof(double) * 9, cudaMemcpyHostToDevice);
uint8_t* h_img = stbi_load("corgi.jpg", &width, &height, &bpp, 3);
size = height * width * 3;
double * h_buf = (double *) malloc(sizeof(double) * size);
double *d_img;
double *d_img_res;
cudaMalloc(&d_img, sizeof(double) * size);
cudaMalloc(&d_img_res, sizeof(double) * size);
for (int i = 0; i < size; i++)
{
h_buf[i] = (double) h_img[i];
}
cudaMemcpy(d_img, h_buf, sizeof(double) * size, cudaMemcpyHostToDevice);
int block_size, grid_size;
block_size = 1024;
grid_size = size/block_size;
dim3 dimBlock(block_size);
dim3 dimGrid(grid_size);
filter<<<dimGrid, dimBlock>>>(height, width, d_ker, d_img, d_img_res);
cudaDeviceSynchronize();
double *h_buf_res = (double *)malloc(sizeof(double) * size);
cudaMemcpy(h_buf_res, d_img_res, sizeof(double) * size, cudaMemcpyDeviceToHost);
for (int i = 0; i < size; i++)
{
h_img[i] = uint8_t (h_buf_res[i]);
}
stbi_write_png("res_corgi.jpg", width, height, 3, h_img, width * 3);
return 0;
}
|
bbb12b52f5361b985f33ed96a3b0d89441f250a5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Copyright (c) 2016-present, Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <hipcub/hipcub.hpp>
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/sequence_ops.h"
#include "caffe2/core/operator.h"
#include "caffe2/core/tensor.h"
namespace caffe2 {
namespace {
template <typename T>
__global__ void AddPaddingKernel(
const T* in,
int block_size,
int lengths_size,
int outer_size,
const int32_t* lengths_prefix_sum,
const T* padding_start_ptr,
int start_padding_width_blocks,
const T* padding_end_ptr,
int end_padding_width_blocks,
T* out,
int32_t* lengths_out) {
int element_idx = blockIdx.x;
int prior_padding =
element_idx * (start_padding_width_blocks + end_padding_width_blocks);
int out_start_idx = element_idx == 0
? 0
: lengths_prefix_sum[element_idx - 1] + prior_padding;
int len_blocks;
int in_start_idx;
if (lengths_prefix_sum) {
len_blocks = lengths_prefix_sum[element_idx] -
(element_idx == 0 ? 0 : lengths_prefix_sum[element_idx - 1]);
in_start_idx = lengths_prefix_sum[element_idx] - len_blocks;
} else {
// Only one element, use the outer size
CUDA_KERNEL_ASSERT(lengths_size == 1);
len_blocks = outer_size;
in_start_idx = 0;
}
out_start_idx *= block_size;
in_start_idx *= block_size;
int len = len_blocks * block_size;
int start_padding_width = start_padding_width_blocks * block_size;
int end_padding_width = end_padding_width_blocks * block_size;
// start pad
T* out_ptr = out + out_start_idx;
for (int i = threadIdx.x; i < start_padding_width; i += blockDim.x) {
T fill = padding_start_ptr ? padding_start_ptr[i % block_size] : T(0);
out_ptr[i] = fill;
}
// payload
for (int i = threadIdx.x; i < len; i += blockDim.x) {
out_ptr[i + start_padding_width] = in[in_start_idx + i];
}
// end pad
for (int i = threadIdx.x; i < end_padding_width; i += blockDim.x) {
T fill = padding_end_ptr ? padding_end_ptr[i % block_size] : T(0);
out_ptr[i + start_padding_width + len] = fill;
}
// update the lengths
if (threadIdx.x == 0 && lengths_out != nullptr) {
lengths_out[element_idx] =
len_blocks + start_padding_width_blocks + end_padding_width_blocks;
}
}
template <typename T>
__global__ void RemovePaddingKernel(
const T* in,
int block_size,
int lengths_size,
int outer_size,
const int32_t* lengths_prefix_sum,
int start_padding_width_blocks,
int end_padding_width_blocks,
T* out,
int32_t* lengths_out) {
int element_idx = blockIdx.x;
int prior_padding =
element_idx * (start_padding_width_blocks + end_padding_width_blocks);
int out_start_idx = element_idx == 0
? 0
: lengths_prefix_sum[element_idx - 1] - prior_padding;
int len_blocks;
int in_start_idx;
if (lengths_prefix_sum) {
len_blocks = lengths_prefix_sum[element_idx] -
(element_idx == 0 ? 0 : lengths_prefix_sum[element_idx - 1]);
in_start_idx = lengths_prefix_sum[element_idx] - len_blocks;
} else {
// Only one element, use the outer size
CUDA_KERNEL_ASSERT(lengths_size == 1);
len_blocks = outer_size;
in_start_idx = 0;
}
out_start_idx *= block_size;
in_start_idx *= block_size;
int len = len_blocks * block_size;
int start_padding_width = start_padding_width_blocks * block_size;
// payload
T* out_ptr = out + out_start_idx;
for (int i = threadIdx.x; i < len; i += blockDim.x) {
out_ptr[in_start_idx + i] = in[i + start_padding_width];
}
// update the lengths
if (threadIdx.x == 0 && lengths_out != nullptr) {
lengths_out[element_idx] =
len_blocks - (start_padding_width_blocks + end_padding_width_blocks);
}
}
void lengths_prefix_sum(
const int32_t* lengths,
int32_t num_items,
Tensor<CUDAContext>* prefix_buffer,
Tensor<CUDAContext>* prefix_sum,
CUDAContext* context) {
// Retrieve buffer size
size_t temp_storage_bytes = 0;
prefix_sum->Resize(num_items);
hipcub::DeviceScan::InclusiveSum(
NULL,
temp_storage_bytes,
lengths,
prefix_sum->mutable_data<int32_t>(),
num_items,
context->cuda_stream());
// Allocate temporary storage
auto buffer_size = (temp_storage_bytes + sizeof(int32_t)) / sizeof(int32_t);
prefix_buffer->Resize(buffer_size);
void* d_temp_storage =
static_cast<void*>(prefix_buffer->mutable_data<int32_t>());
hipcub::DeviceScan::InclusiveSum(
d_temp_storage,
temp_storage_bytes,
lengths,
prefix_sum->mutable_data<int32_t>(),
num_items,
context->cuda_stream());
}
} // namespace
template <>
template <typename T>
bool AddPaddingOp<CUDAContext>::DoRunWithType() {
const auto& in = Input(0);
CAFFE_ENFORCE_GE(in.ndim(), 1);
const int32_t outer_size = in.dims()[0];
const auto block_size = std::accumulate(
in.dims().begin() + 1, in.dims().end(), 1, std::multiplies<TIndex>());
// if no lengths is provided, assume it is a single full-span entry
const int32_t* lengths_ptr = nullptr;
int32_t lengths_size = 1;
if (InputSize() > 1) {
const auto& lengths = Input(1);
lengths_ptr = lengths.data<int32_t>();
lengths_size = lengths.size();
}
// fetch paddings
// input_size == 2 : pad with zeros
// input_size == 3 : start and end paddings are the same
// input_size == 4 : different start and end paddings
const T* padding_start_ptr = nullptr;
const T* padding_end_ptr = nullptr;
if (InputSize() >= 3) {
auto& padding_start = Input(2);
CAFFE_ENFORCE_EQ(block_size, padding_start.size());
padding_start_ptr = padding_start.template data<T>();
}
if (InputSize() == 4) {
auto& padding_end = Input(3);
CAFFE_ENFORCE_EQ(block_size, padding_end.size());
padding_end_ptr = padding_end.template data<T>();
} else {
padding_end_ptr = padding_start_ptr;
}
auto* out = Output(0);
{
auto out_dims = in.dims();
out_dims[0] += (startPaddingWidth_ + endPaddingWidth_) * lengths_size;
out->Resize(std::move(out_dims));
}
const auto* in_ptr = in.template data<T>();
auto* out_ptr = out->template mutable_data<T>();
// Step 1: compute prefix sum over the lengths -- unless
// there were no lengths given, i.e there is only one segment
const int32_t* lengths_prefix_sum_ptr = nullptr;
if (lengths_ptr != nullptr) {
lengths_prefix_sum(
lengths_ptr,
lengths_size,
&lengths_prefix_sum_buffer_,
&lengths_prefix_sum_,
&context_);
lengths_prefix_sum_ptr = lengths_prefix_sum_.data<int32_t>();
}
int32_t* lengths_out_ptr = nullptr;
if (OutputSize() > 1) {
auto* lengths_out = Output(1);
lengths_out->Resize(lengths_size);
lengths_out_ptr = lengths_out->mutable_data<int32_t>();
}
if (lengths_size == 0) {
return true;
}
// Compute the padding using the accumulated lengths
hipLaunchKernelGGL(( AddPaddingKernel<T>)
, dim3(lengths_size), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(),
in_ptr,
block_size,
lengths_size,
outer_size,
lengths_prefix_sum_ptr,
padding_start_ptr,
startPaddingWidth_,
padding_end_ptr,
endPaddingWidth_,
out_ptr,
lengths_out_ptr);
return true;
}
REGISTER_CUDA_OPERATOR(AddPadding, AddPaddingOp<CUDAContext>);
template <>
template <typename T>
bool RemovePaddingOp<CUDAContext>::DoRunWithType() {
const auto& in = Input(0);
CAFFE_ENFORCE_GE(in.ndim(), 1);
const int32_t outer_size = in.dims()[0];
const auto block_size = std::accumulate(
in.dims().begin() + 1, in.dims().end(), 1, std::multiplies<TIndex>());
// if no lengths is provided, assume it is a single full-span entry
const int32_t* lengths_ptr = nullptr;
int32_t lengths_size = 1;
if (InputSize() > 1) {
const auto& lengths = Input(1);
lengths_ptr = lengths.data<int32_t>();
lengths_size = lengths.size();
}
auto* out = Output(0);
{
auto out_dims = in.dims();
out_dims[0] -= (startPaddingWidth_ + endPaddingWidth_) * lengths_size;
out->Resize(std::move(out_dims));
}
const auto* in_ptr = in.template data<T>();
auto* out_ptr = out->template mutable_data<T>();
// Step 1: compute prefix sum over the (padded) lengths -- unless
// there were no lengths given, i.e there is only one segment
const int32_t* lengths_prefix_sum_ptr = nullptr;
if (lengths_ptr != nullptr) {
lengths_prefix_sum(
lengths_ptr,
lengths_size,
&lengths_prefix_sum_buffer_,
&lengths_prefix_sum_,
&context_);
lengths_prefix_sum_ptr = lengths_prefix_sum_.data<int32_t>();
}
int32_t* lengths_out_ptr = nullptr;
if (OutputSize() > 1) {
auto* lengths_out = Output(1);
lengths_out->Resize(lengths_size);
lengths_out_ptr = lengths_out->mutable_data<int32_t>();
}
if (lengths_size == 0) {
return true;
}
// Compute the padding using the accumulated lengths
hipLaunchKernelGGL(( RemovePaddingKernel<T>)
, dim3(lengths_size), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(),
in_ptr,
block_size,
lengths_size,
outer_size,
lengths_prefix_sum_ptr,
startPaddingWidth_,
endPaddingWidth_,
out_ptr,
lengths_out_ptr);
return true;
}
REGISTER_CUDA_OPERATOR(RemovePadding, RemovePaddingOp<CUDAContext>);
} // namespace caffe2
| bbb12b52f5361b985f33ed96a3b0d89441f250a5.cu | /**
* Copyright (c) 2016-present, Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cub/cub.cuh>
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/sequence_ops.h"
#include "caffe2/core/operator.h"
#include "caffe2/core/tensor.h"
namespace caffe2 {
namespace {
template <typename T>
__global__ void AddPaddingKernel(
const T* in,
int block_size,
int lengths_size,
int outer_size,
const int32_t* lengths_prefix_sum,
const T* padding_start_ptr,
int start_padding_width_blocks,
const T* padding_end_ptr,
int end_padding_width_blocks,
T* out,
int32_t* lengths_out) {
int element_idx = blockIdx.x;
int prior_padding =
element_idx * (start_padding_width_blocks + end_padding_width_blocks);
int out_start_idx = element_idx == 0
? 0
: lengths_prefix_sum[element_idx - 1] + prior_padding;
int len_blocks;
int in_start_idx;
if (lengths_prefix_sum) {
len_blocks = lengths_prefix_sum[element_idx] -
(element_idx == 0 ? 0 : lengths_prefix_sum[element_idx - 1]);
in_start_idx = lengths_prefix_sum[element_idx] - len_blocks;
} else {
// Only one element, use the outer size
CUDA_KERNEL_ASSERT(lengths_size == 1);
len_blocks = outer_size;
in_start_idx = 0;
}
out_start_idx *= block_size;
in_start_idx *= block_size;
int len = len_blocks * block_size;
int start_padding_width = start_padding_width_blocks * block_size;
int end_padding_width = end_padding_width_blocks * block_size;
// start pad
T* out_ptr = out + out_start_idx;
for (int i = threadIdx.x; i < start_padding_width; i += blockDim.x) {
T fill = padding_start_ptr ? padding_start_ptr[i % block_size] : T(0);
out_ptr[i] = fill;
}
// payload
for (int i = threadIdx.x; i < len; i += blockDim.x) {
out_ptr[i + start_padding_width] = in[in_start_idx + i];
}
// end pad
for (int i = threadIdx.x; i < end_padding_width; i += blockDim.x) {
T fill = padding_end_ptr ? padding_end_ptr[i % block_size] : T(0);
out_ptr[i + start_padding_width + len] = fill;
}
// update the lengths
if (threadIdx.x == 0 && lengths_out != nullptr) {
lengths_out[element_idx] =
len_blocks + start_padding_width_blocks + end_padding_width_blocks;
}
}
template <typename T>
__global__ void RemovePaddingKernel(
const T* in,
int block_size,
int lengths_size,
int outer_size,
const int32_t* lengths_prefix_sum,
int start_padding_width_blocks,
int end_padding_width_blocks,
T* out,
int32_t* lengths_out) {
int element_idx = blockIdx.x;
int prior_padding =
element_idx * (start_padding_width_blocks + end_padding_width_blocks);
int out_start_idx = element_idx == 0
? 0
: lengths_prefix_sum[element_idx - 1] - prior_padding;
int len_blocks;
int in_start_idx;
if (lengths_prefix_sum) {
len_blocks = lengths_prefix_sum[element_idx] -
(element_idx == 0 ? 0 : lengths_prefix_sum[element_idx - 1]);
in_start_idx = lengths_prefix_sum[element_idx] - len_blocks;
} else {
// Only one element, use the outer size
CUDA_KERNEL_ASSERT(lengths_size == 1);
len_blocks = outer_size;
in_start_idx = 0;
}
out_start_idx *= block_size;
in_start_idx *= block_size;
int len = len_blocks * block_size;
int start_padding_width = start_padding_width_blocks * block_size;
// payload
T* out_ptr = out + out_start_idx;
for (int i = threadIdx.x; i < len; i += blockDim.x) {
out_ptr[in_start_idx + i] = in[i + start_padding_width];
}
// update the lengths
if (threadIdx.x == 0 && lengths_out != nullptr) {
lengths_out[element_idx] =
len_blocks - (start_padding_width_blocks + end_padding_width_blocks);
}
}
void lengths_prefix_sum(
const int32_t* lengths,
int32_t num_items,
Tensor<CUDAContext>* prefix_buffer,
Tensor<CUDAContext>* prefix_sum,
CUDAContext* context) {
// Retrieve buffer size
size_t temp_storage_bytes = 0;
prefix_sum->Resize(num_items);
cub::DeviceScan::InclusiveSum(
NULL,
temp_storage_bytes,
lengths,
prefix_sum->mutable_data<int32_t>(),
num_items,
context->cuda_stream());
// Allocate temporary storage
auto buffer_size = (temp_storage_bytes + sizeof(int32_t)) / sizeof(int32_t);
prefix_buffer->Resize(buffer_size);
void* d_temp_storage =
static_cast<void*>(prefix_buffer->mutable_data<int32_t>());
cub::DeviceScan::InclusiveSum(
d_temp_storage,
temp_storage_bytes,
lengths,
prefix_sum->mutable_data<int32_t>(),
num_items,
context->cuda_stream());
}
} // namespace
template <>
template <typename T>
bool AddPaddingOp<CUDAContext>::DoRunWithType() {
const auto& in = Input(0);
CAFFE_ENFORCE_GE(in.ndim(), 1);
const int32_t outer_size = in.dims()[0];
const auto block_size = std::accumulate(
in.dims().begin() + 1, in.dims().end(), 1, std::multiplies<TIndex>());
// if no lengths is provided, assume it is a single full-span entry
const int32_t* lengths_ptr = nullptr;
int32_t lengths_size = 1;
if (InputSize() > 1) {
const auto& lengths = Input(1);
lengths_ptr = lengths.data<int32_t>();
lengths_size = lengths.size();
}
// fetch paddings
// input_size == 2 : pad with zeros
// input_size == 3 : start and end paddings are the same
// input_size == 4 : different start and end paddings
const T* padding_start_ptr = nullptr;
const T* padding_end_ptr = nullptr;
if (InputSize() >= 3) {
auto& padding_start = Input(2);
CAFFE_ENFORCE_EQ(block_size, padding_start.size());
padding_start_ptr = padding_start.template data<T>();
}
if (InputSize() == 4) {
auto& padding_end = Input(3);
CAFFE_ENFORCE_EQ(block_size, padding_end.size());
padding_end_ptr = padding_end.template data<T>();
} else {
padding_end_ptr = padding_start_ptr;
}
auto* out = Output(0);
{
auto out_dims = in.dims();
out_dims[0] += (startPaddingWidth_ + endPaddingWidth_) * lengths_size;
out->Resize(std::move(out_dims));
}
const auto* in_ptr = in.template data<T>();
auto* out_ptr = out->template mutable_data<T>();
// Step 1: compute prefix sum over the lengths -- unless
// there were no lengths given, i.e there is only one segment
const int32_t* lengths_prefix_sum_ptr = nullptr;
if (lengths_ptr != nullptr) {
lengths_prefix_sum(
lengths_ptr,
lengths_size,
&lengths_prefix_sum_buffer_,
&lengths_prefix_sum_,
&context_);
lengths_prefix_sum_ptr = lengths_prefix_sum_.data<int32_t>();
}
int32_t* lengths_out_ptr = nullptr;
if (OutputSize() > 1) {
auto* lengths_out = Output(1);
lengths_out->Resize(lengths_size);
lengths_out_ptr = lengths_out->mutable_data<int32_t>();
}
if (lengths_size == 0) {
return true;
}
// Compute the padding using the accumulated lengths
AddPaddingKernel<T>
<<<lengths_size, CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>(
in_ptr,
block_size,
lengths_size,
outer_size,
lengths_prefix_sum_ptr,
padding_start_ptr,
startPaddingWidth_,
padding_end_ptr,
endPaddingWidth_,
out_ptr,
lengths_out_ptr);
return true;
}
REGISTER_CUDA_OPERATOR(AddPadding, AddPaddingOp<CUDAContext>);
template <>
template <typename T>
bool RemovePaddingOp<CUDAContext>::DoRunWithType() {
const auto& in = Input(0);
CAFFE_ENFORCE_GE(in.ndim(), 1);
const int32_t outer_size = in.dims()[0];
const auto block_size = std::accumulate(
in.dims().begin() + 1, in.dims().end(), 1, std::multiplies<TIndex>());
// if no lengths is provided, assume it is a single full-span entry
const int32_t* lengths_ptr = nullptr;
int32_t lengths_size = 1;
if (InputSize() > 1) {
const auto& lengths = Input(1);
lengths_ptr = lengths.data<int32_t>();
lengths_size = lengths.size();
}
auto* out = Output(0);
{
auto out_dims = in.dims();
out_dims[0] -= (startPaddingWidth_ + endPaddingWidth_) * lengths_size;
out->Resize(std::move(out_dims));
}
const auto* in_ptr = in.template data<T>();
auto* out_ptr = out->template mutable_data<T>();
// Step 1: compute prefix sum over the (padded) lengths -- unless
// there were no lengths given, i.e there is only one segment
const int32_t* lengths_prefix_sum_ptr = nullptr;
if (lengths_ptr != nullptr) {
lengths_prefix_sum(
lengths_ptr,
lengths_size,
&lengths_prefix_sum_buffer_,
&lengths_prefix_sum_,
&context_);
lengths_prefix_sum_ptr = lengths_prefix_sum_.data<int32_t>();
}
int32_t* lengths_out_ptr = nullptr;
if (OutputSize() > 1) {
auto* lengths_out = Output(1);
lengths_out->Resize(lengths_size);
lengths_out_ptr = lengths_out->mutable_data<int32_t>();
}
if (lengths_size == 0) {
return true;
}
// Compute the padding using the accumulated lengths
RemovePaddingKernel<T>
<<<lengths_size, CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>(
in_ptr,
block_size,
lengths_size,
outer_size,
lengths_prefix_sum_ptr,
startPaddingWidth_,
endPaddingWidth_,
out_ptr,
lengths_out_ptr);
return true;
}
REGISTER_CUDA_OPERATOR(RemovePadding, RemovePaddingOp<CUDAContext>);
} // namespace caffe2
|
455479af7ae7aea05330fd0d8dbe99c1cc49fa61.hip | // !!! This is a file automatically generated by hipify!!!
#if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<32>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<32>;
using ThreadBlockShape = cutlass::gemm::GemmShape<128, 256, 64>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 64>;
using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationHSwishClamp<
int8_t, 8, int32_t, int32_t, float>;
using Convolution = cutlass::convolution::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t,
LayoutSrc, int32_t, LayoutSrc, int32_t,
cutlass::convolution::ConvType::kConvolution, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm75,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::convolution::threadblock::ConvolutionNCxHWxThreadblockSwizzle<
cutlass::convolution::ConvType::kConvolution>,
2, 16, 16, false>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const int8_t* d_src,
const int8_t* d_filter,
const int32_t* d_bias,
const int8_t* d_z,
int8_t* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
hipStream_t stream);
#pragma GCC diagnostic pop
#endif
| 455479af7ae7aea05330fd0d8dbe99c1cc49fa61.cu | #if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<32>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<32>;
using ThreadBlockShape = cutlass::gemm::GemmShape<128, 256, 64>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 64>;
using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationHSwishClamp<
int8_t, 8, int32_t, int32_t, float>;
using Convolution = cutlass::convolution::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t,
LayoutSrc, int32_t, LayoutSrc, int32_t,
cutlass::convolution::ConvType::kConvolution, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm75,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::convolution::threadblock::ConvolutionNCxHWxThreadblockSwizzle<
cutlass::convolution::ConvType::kConvolution>,
2, 16, 16, false>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const int8_t* d_src,
const int8_t* d_filter,
const int32_t* d_bias,
const int8_t* d_z,
int8_t* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
cudaStream_t stream);
#pragma GCC diagnostic pop
#endif
|
88e3bf03153dd5c74b6d70815cfb86ccc7384c5c.hip | // !!! This is a file automatically generated by hipify!!!
/*******************************************************
* Copyright (c) 2015-2019, ArrayFire
* All rights reserved.
*
* This file is distributed under 3-clause BSD license.
* The complete license agreement can be obtained at:
* http://arrayfire.com/licenses/BSD-3-Clause
********************************************************/
#include <hip/hip_complex.h>
#include <hip/hip_runtime.h>
#include <forge.h>
#define USE_FORGE_CUDA_COPY_HELPERS
#include <fg/compute_copy.h>
#include <cstdio>
#include <iostream>
const unsigned DIMX = 1000;
const unsigned DIMY = 800;
static const float ZMIN = 0.1f;
static const float ZMAX = 10.f;
const float DX = 0.005f;
const size_t ZSIZE = (size_t)((ZMAX - ZMIN) / DX + 1);
void kernel(float t, float dx, float* dev_out);
int main(void) {
float* dev_out;
/*
* First Forge call should be a window creation call
* so that necessary OpenGL context is created for any
* other forge::* object to be created successfully
*/
forge::Window wnd(DIMX, DIMY, "Three dimensional line plot demo");
wnd.makeCurrent();
forge::Chart chart(FG_CHART_3D);
chart.setAxesLabelFormat("%3.1f", "%3.1f", "%.2e");
chart.setAxesLimits(-1.1f, 1.1f, -1.1f, 1.1f, 0.f, 10.f);
chart.setAxesTitles("x-axis", "y-axis", "z-axis");
forge::Plot plot3 = chart.plot(ZSIZE, forge::f32);
static float t = 0;
FORGE_CUDA_CHECK(hipMalloc((void**)&dev_out, ZSIZE * 3 * sizeof(float)));
kernel(t, DX, dev_out);
GfxHandle* handle;
createGLBuffer(&handle, plot3.vertices(), FORGE_VERTEX_BUFFER);
/* copy your data into the vertex buffer object exposed by
* forge::Plot class and then proceed to rendering.
* To help the users with copying the data from compute
* memory to display memory, Forge provides copy headers
* along with the library to help with this task
*/
copyToGLBuffer(handle, (ComputeResourceHandle)dev_out,
plot3.verticesSize());
do {
t += 0.01f;
kernel(t, DX, dev_out);
copyToGLBuffer(handle, (ComputeResourceHandle)dev_out,
plot3.verticesSize());
wnd.draw(chart);
} while (!wnd.close());
FORGE_CUDA_CHECK(hipFree(dev_out));
releaseGLBuffer(handle);
return 0;
}
__global__ void generateCurve(float t, float dx, float* out, const float ZMIN,
const size_t ZSIZE) {
int offset = blockIdx.x * blockDim.x + threadIdx.x;
float z = ZMIN + offset * dx;
if (offset < ZSIZE) {
out[3 * offset] = cos(z * t + t) / z;
out[3 * offset + 1] = sin(z * t + t) / z;
out[3 * offset + 2] = z + 0.1 * sin(t);
}
}
inline int divup(int a, int b) { return (a + b - 1) / b; }
void kernel(float t, float dx, float* dev_out) {
static const dim3 threads(1024);
dim3 blocks(divup(ZSIZE, 1024));
// clang-format off
hipLaunchKernelGGL(( generateCurve), dim3(blocks), dim3(threads), 0, 0, t, dx, dev_out, ZMIN, ZSIZE);
// clang-format on
}
| 88e3bf03153dd5c74b6d70815cfb86ccc7384c5c.cu | /*******************************************************
* Copyright (c) 2015-2019, ArrayFire
* All rights reserved.
*
* This file is distributed under 3-clause BSD license.
* The complete license agreement can be obtained at:
* http://arrayfire.com/licenses/BSD-3-Clause
********************************************************/
#include <cuComplex.h>
#include <cuda_runtime.h>
#include <forge.h>
#define USE_FORGE_CUDA_COPY_HELPERS
#include <fg/compute_copy.h>
#include <cstdio>
#include <iostream>
const unsigned DIMX = 1000;
const unsigned DIMY = 800;
static const float ZMIN = 0.1f;
static const float ZMAX = 10.f;
const float DX = 0.005f;
const size_t ZSIZE = (size_t)((ZMAX - ZMIN) / DX + 1);
void kernel(float t, float dx, float* dev_out);
int main(void) {
float* dev_out;
/*
* First Forge call should be a window creation call
* so that necessary OpenGL context is created for any
* other forge::* object to be created successfully
*/
forge::Window wnd(DIMX, DIMY, "Three dimensional line plot demo");
wnd.makeCurrent();
forge::Chart chart(FG_CHART_3D);
chart.setAxesLabelFormat("%3.1f", "%3.1f", "%.2e");
chart.setAxesLimits(-1.1f, 1.1f, -1.1f, 1.1f, 0.f, 10.f);
chart.setAxesTitles("x-axis", "y-axis", "z-axis");
forge::Plot plot3 = chart.plot(ZSIZE, forge::f32);
static float t = 0;
FORGE_CUDA_CHECK(cudaMalloc((void**)&dev_out, ZSIZE * 3 * sizeof(float)));
kernel(t, DX, dev_out);
GfxHandle* handle;
createGLBuffer(&handle, plot3.vertices(), FORGE_VERTEX_BUFFER);
/* copy your data into the vertex buffer object exposed by
* forge::Plot class and then proceed to rendering.
* To help the users with copying the data from compute
* memory to display memory, Forge provides copy headers
* along with the library to help with this task
*/
copyToGLBuffer(handle, (ComputeResourceHandle)dev_out,
plot3.verticesSize());
do {
t += 0.01f;
kernel(t, DX, dev_out);
copyToGLBuffer(handle, (ComputeResourceHandle)dev_out,
plot3.verticesSize());
wnd.draw(chart);
} while (!wnd.close());
FORGE_CUDA_CHECK(cudaFree(dev_out));
releaseGLBuffer(handle);
return 0;
}
__global__ void generateCurve(float t, float dx, float* out, const float ZMIN,
const size_t ZSIZE) {
int offset = blockIdx.x * blockDim.x + threadIdx.x;
float z = ZMIN + offset * dx;
if (offset < ZSIZE) {
out[3 * offset] = cos(z * t + t) / z;
out[3 * offset + 1] = sin(z * t + t) / z;
out[3 * offset + 2] = z + 0.1 * sin(t);
}
}
inline int divup(int a, int b) { return (a + b - 1) / b; }
void kernel(float t, float dx, float* dev_out) {
static const dim3 threads(1024);
dim3 blocks(divup(ZSIZE, 1024));
// clang-format off
generateCurve<<<blocks, threads>>>(t, dx, dev_out, ZMIN, ZSIZE);
// clang-format on
}
|
3304a9b297dac352c1f9b2216bf500ce76c9ac8f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void VectorAdd(int *a, int *b, int *c, int n)
{
int i = blockIdx.x*blockDim.x+threadIdx.x;
//for (i = 0; i < n; ++i)
if (i < n)
c[i] = a[i] + b[i];
} | 3304a9b297dac352c1f9b2216bf500ce76c9ac8f.cu | #include "includes.h"
__global__ void VectorAdd(int *a, int *b, int *c, int n)
{
int i = blockIdx.x*blockDim.x+threadIdx.x;
//for (i = 0; i < n; ++i)
if (i < n)
c[i] = a[i] + b[i];
} |
d1e56f70a8d33fc38471179a0cf3835935ae3a13.hip | // !!! This is a file automatically generated by hipify!!!
/*!
* Copyright 2019-2021 by XGBoost Contributors
*
* \file data.cu
* \brief Handles setting metainfo from array interface.
*/
#include "xgboost/data.h"
#include "xgboost/logging.h"
#include "xgboost/json.h"
#include "array_interface.h"
#include "../common/device_helpers.cuh"
#include "device_adapter_hip.cuh"
#include "simple_dmatrix.h"
namespace xgboost {
void CopyInfoImpl(ArrayInterface column, HostDeviceVector<float>* out) {
auto SetDeviceToPtr = [](void* ptr) {
hipPointerAttribute_t attr;
dh::safe_cuda(hipPointerGetAttributes(&attr, ptr));
int32_t ptr_device = attr.device;
dh::safe_cuda(hipSetDevice(ptr_device));
return ptr_device;
};
auto ptr_device = SetDeviceToPtr(column.data);
out->SetDevice(ptr_device);
out->Resize(column.num_rows);
auto p_dst = thrust::device_pointer_cast(out->DevicePointer());
dh::LaunchN(ptr_device, column.num_rows, [=] __device__(size_t idx) {
p_dst[idx] = column.GetElement(idx);
});
}
namespace {
auto SetDeviceToPtr(void *ptr) {
hipPointerAttribute_t attr;
dh::safe_cuda(hipPointerGetAttributes(&attr, ptr));
int32_t ptr_device = attr.device;
dh::safe_cuda(hipSetDevice(ptr_device));
return ptr_device;
}
} // anonymous namespace
void CopyGroupInfoImpl(ArrayInterface column, std::vector<bst_group_t>* out) {
CHECK(column.type != ArrayInterface::kF4 && column.type != ArrayInterface::kF8)
<< "Expected integer for group info.";
auto ptr_device = SetDeviceToPtr(column.data);
dh::TemporaryArray<bst_group_t> temp(column.num_rows);
auto d_tmp = temp.data();
dh::LaunchN(ptr_device, column.num_rows, [=] __device__(size_t idx) {
d_tmp[idx] = column.GetElement<size_t>(idx);
});
auto length = column.num_rows;
out->resize(length + 1);
out->at(0) = 0;
thrust::copy(temp.data(), temp.data() + length, out->begin() + 1);
std::partial_sum(out->begin(), out->end(), out->begin());
}
namespace {
// thrust::all_of tries to copy lambda function.
struct AllOfOp {
__device__ bool operator()(float w) {
return w >= 0;
}
};
} // anonymous namespace
void MetaInfo::SetInfo(const char * c_key, std::string const& interface_str) {
Json j_interface = Json::Load({interface_str.c_str(), interface_str.size()});
auto const& j_arr = get<Array>(j_interface);
CHECK_EQ(j_arr.size(), 1)
<< "MetaInfo: " << c_key << ". " << ArrayInterfaceErrors::Dimension(1);
ArrayInterface array_interface(interface_str);
std::string key{c_key};
CHECK(!array_interface.valid.Data())
<< "Meta info " << key << " should be dense, found validity mask";
CHECK_EQ(array_interface.num_cols, 1)
<< "Meta info should be a single column.";
if (array_interface.num_rows == 0) {
return;
}
if (key == "label") {
CopyInfoImpl(array_interface, &labels_);
} else if (key == "weight") {
CopyInfoImpl(array_interface, &weights_);
auto ptr = weights_.ConstDevicePointer();
auto valid =
thrust::all_of(thrust::device, ptr, ptr + weights_.Size(), AllOfOp{});
CHECK(valid) << "Weights must be positive values.";
} else if (key == "base_margin") {
CopyInfoImpl(array_interface, &base_margin_);
} else if (key == "group") {
CopyGroupInfoImpl(array_interface, &group_ptr_);
return;
} else if (key == "qid") {
auto it = dh::MakeTransformIterator<uint32_t>(
thrust::make_counting_iterator(0ul),
[array_interface] __device__(size_t i) {
return array_interface.GetElement<uint32_t>(i);
});
dh::caching_device_vector<bool> flag(1);
auto d_flag = dh::ToSpan(flag);
auto d = SetDeviceToPtr(array_interface.data);
dh::LaunchN(d, 1, [=] __device__(size_t) { d_flag[0] = true; });
dh::LaunchN(d, array_interface.num_rows - 1, [=] __device__(size_t i) {
if (array_interface.GetElement<uint32_t>(i) >
array_interface.GetElement<uint32_t>(i + 1)) {
d_flag[0] = false;
}
});
bool non_dec = true;
dh::safe_cuda(hipMemcpy(&non_dec, flag.data().get(), sizeof(bool),
hipMemcpyDeviceToHost));
CHECK(non_dec)
<< "`qid` must be sorted in increasing order along with data.";
size_t bytes = 0;
dh::caching_device_vector<uint32_t> out(array_interface.num_rows);
dh::caching_device_vector<uint32_t> cnt(array_interface.num_rows);
HostDeviceVector<int> d_num_runs_out(1, 0, d);
hipcub::DeviceRunLengthEncode::Encode(nullptr, bytes, it, out.begin(),
cnt.begin(), d_num_runs_out.DevicePointer(),
array_interface.num_rows);
dh::caching_device_vector<char> tmp(bytes);
hipcub::DeviceRunLengthEncode::Encode(tmp.data().get(), bytes, it, out.begin(),
cnt.begin(), d_num_runs_out.DevicePointer(),
array_interface.num_rows);
auto h_num_runs_out = d_num_runs_out.HostSpan()[0];
group_ptr_.clear(); group_ptr_.resize(h_num_runs_out + 1, 0);
dh::XGBCachingDeviceAllocator<char> alloc;
thrust::inclusive_scan(thrust::hip::par(alloc), cnt.begin(),
cnt.begin() + h_num_runs_out, cnt.begin());
thrust::copy(cnt.begin(), cnt.begin() + h_num_runs_out,
group_ptr_.begin() + 1);
return;
} else if (key == "label_lower_bound") {
CopyInfoImpl(array_interface, &labels_lower_bound_);
return;
} else if (key == "label_upper_bound") {
CopyInfoImpl(array_interface, &labels_upper_bound_);
return;
} else if (key == "feature_weights") {
CopyInfoImpl(array_interface, &feature_weigths);
auto d_feature_weights = feature_weigths.ConstDeviceSpan();
auto valid = thrust::all_of(
thrust::device, d_feature_weights.data(),
d_feature_weights.data() + d_feature_weights.size(), AllOfOp{});
CHECK(valid) << "Feature weight must be greater than 0.";
return;
} else {
LOG(FATAL) << "Unknown metainfo: " << key;
}
}
template <typename AdapterT>
DMatrix* DMatrix::Create(AdapterT* adapter, float missing, int nthread,
const std::string& cache_prefix, size_t page_size) {
CHECK_EQ(cache_prefix.size(), 0)
<< "Device memory construction is not currently supported with external "
"memory.";
return new data::SimpleDMatrix(adapter, missing, nthread);
}
template DMatrix* DMatrix::Create<data::CudfAdapter>(
data::CudfAdapter* adapter, float missing, int nthread,
const std::string& cache_prefix, size_t page_size);
template DMatrix* DMatrix::Create<data::CupyAdapter>(
data::CupyAdapter* adapter, float missing, int nthread,
const std::string& cache_prefix, size_t page_size);
} // namespace xgboost
| d1e56f70a8d33fc38471179a0cf3835935ae3a13.cu | /*!
* Copyright 2019-2021 by XGBoost Contributors
*
* \file data.cu
* \brief Handles setting metainfo from array interface.
*/
#include "xgboost/data.h"
#include "xgboost/logging.h"
#include "xgboost/json.h"
#include "array_interface.h"
#include "../common/device_helpers.cuh"
#include "device_adapter.cuh"
#include "simple_dmatrix.h"
namespace xgboost {
void CopyInfoImpl(ArrayInterface column, HostDeviceVector<float>* out) {
auto SetDeviceToPtr = [](void* ptr) {
cudaPointerAttributes attr;
dh::safe_cuda(cudaPointerGetAttributes(&attr, ptr));
int32_t ptr_device = attr.device;
dh::safe_cuda(cudaSetDevice(ptr_device));
return ptr_device;
};
auto ptr_device = SetDeviceToPtr(column.data);
out->SetDevice(ptr_device);
out->Resize(column.num_rows);
auto p_dst = thrust::device_pointer_cast(out->DevicePointer());
dh::LaunchN(ptr_device, column.num_rows, [=] __device__(size_t idx) {
p_dst[idx] = column.GetElement(idx);
});
}
namespace {
auto SetDeviceToPtr(void *ptr) {
cudaPointerAttributes attr;
dh::safe_cuda(cudaPointerGetAttributes(&attr, ptr));
int32_t ptr_device = attr.device;
dh::safe_cuda(cudaSetDevice(ptr_device));
return ptr_device;
}
} // anonymous namespace
void CopyGroupInfoImpl(ArrayInterface column, std::vector<bst_group_t>* out) {
CHECK(column.type != ArrayInterface::kF4 && column.type != ArrayInterface::kF8)
<< "Expected integer for group info.";
auto ptr_device = SetDeviceToPtr(column.data);
dh::TemporaryArray<bst_group_t> temp(column.num_rows);
auto d_tmp = temp.data();
dh::LaunchN(ptr_device, column.num_rows, [=] __device__(size_t idx) {
d_tmp[idx] = column.GetElement<size_t>(idx);
});
auto length = column.num_rows;
out->resize(length + 1);
out->at(0) = 0;
thrust::copy(temp.data(), temp.data() + length, out->begin() + 1);
std::partial_sum(out->begin(), out->end(), out->begin());
}
namespace {
// thrust::all_of tries to copy lambda function.
struct AllOfOp {
__device__ bool operator()(float w) {
return w >= 0;
}
};
} // anonymous namespace
void MetaInfo::SetInfo(const char * c_key, std::string const& interface_str) {
Json j_interface = Json::Load({interface_str.c_str(), interface_str.size()});
auto const& j_arr = get<Array>(j_interface);
CHECK_EQ(j_arr.size(), 1)
<< "MetaInfo: " << c_key << ". " << ArrayInterfaceErrors::Dimension(1);
ArrayInterface array_interface(interface_str);
std::string key{c_key};
CHECK(!array_interface.valid.Data())
<< "Meta info " << key << " should be dense, found validity mask";
CHECK_EQ(array_interface.num_cols, 1)
<< "Meta info should be a single column.";
if (array_interface.num_rows == 0) {
return;
}
if (key == "label") {
CopyInfoImpl(array_interface, &labels_);
} else if (key == "weight") {
CopyInfoImpl(array_interface, &weights_);
auto ptr = weights_.ConstDevicePointer();
auto valid =
thrust::all_of(thrust::device, ptr, ptr + weights_.Size(), AllOfOp{});
CHECK(valid) << "Weights must be positive values.";
} else if (key == "base_margin") {
CopyInfoImpl(array_interface, &base_margin_);
} else if (key == "group") {
CopyGroupInfoImpl(array_interface, &group_ptr_);
return;
} else if (key == "qid") {
auto it = dh::MakeTransformIterator<uint32_t>(
thrust::make_counting_iterator(0ul),
[array_interface] __device__(size_t i) {
return array_interface.GetElement<uint32_t>(i);
});
dh::caching_device_vector<bool> flag(1);
auto d_flag = dh::ToSpan(flag);
auto d = SetDeviceToPtr(array_interface.data);
dh::LaunchN(d, 1, [=] __device__(size_t) { d_flag[0] = true; });
dh::LaunchN(d, array_interface.num_rows - 1, [=] __device__(size_t i) {
if (array_interface.GetElement<uint32_t>(i) >
array_interface.GetElement<uint32_t>(i + 1)) {
d_flag[0] = false;
}
});
bool non_dec = true;
dh::safe_cuda(cudaMemcpy(&non_dec, flag.data().get(), sizeof(bool),
cudaMemcpyDeviceToHost));
CHECK(non_dec)
<< "`qid` must be sorted in increasing order along with data.";
size_t bytes = 0;
dh::caching_device_vector<uint32_t> out(array_interface.num_rows);
dh::caching_device_vector<uint32_t> cnt(array_interface.num_rows);
HostDeviceVector<int> d_num_runs_out(1, 0, d);
cub::DeviceRunLengthEncode::Encode(nullptr, bytes, it, out.begin(),
cnt.begin(), d_num_runs_out.DevicePointer(),
array_interface.num_rows);
dh::caching_device_vector<char> tmp(bytes);
cub::DeviceRunLengthEncode::Encode(tmp.data().get(), bytes, it, out.begin(),
cnt.begin(), d_num_runs_out.DevicePointer(),
array_interface.num_rows);
auto h_num_runs_out = d_num_runs_out.HostSpan()[0];
group_ptr_.clear(); group_ptr_.resize(h_num_runs_out + 1, 0);
dh::XGBCachingDeviceAllocator<char> alloc;
thrust::inclusive_scan(thrust::cuda::par(alloc), cnt.begin(),
cnt.begin() + h_num_runs_out, cnt.begin());
thrust::copy(cnt.begin(), cnt.begin() + h_num_runs_out,
group_ptr_.begin() + 1);
return;
} else if (key == "label_lower_bound") {
CopyInfoImpl(array_interface, &labels_lower_bound_);
return;
} else if (key == "label_upper_bound") {
CopyInfoImpl(array_interface, &labels_upper_bound_);
return;
} else if (key == "feature_weights") {
CopyInfoImpl(array_interface, &feature_weigths);
auto d_feature_weights = feature_weigths.ConstDeviceSpan();
auto valid = thrust::all_of(
thrust::device, d_feature_weights.data(),
d_feature_weights.data() + d_feature_weights.size(), AllOfOp{});
CHECK(valid) << "Feature weight must be greater than 0.";
return;
} else {
LOG(FATAL) << "Unknown metainfo: " << key;
}
}
template <typename AdapterT>
DMatrix* DMatrix::Create(AdapterT* adapter, float missing, int nthread,
const std::string& cache_prefix, size_t page_size) {
CHECK_EQ(cache_prefix.size(), 0)
<< "Device memory construction is not currently supported with external "
"memory.";
return new data::SimpleDMatrix(adapter, missing, nthread);
}
template DMatrix* DMatrix::Create<data::CudfAdapter>(
data::CudfAdapter* adapter, float missing, int nthread,
const std::string& cache_prefix, size_t page_size);
template DMatrix* DMatrix::Create<data::CupyAdapter>(
data::CupyAdapter* adapter, float missing, int nthread,
const std::string& cache_prefix, size_t page_size);
} // namespace xgboost
|
4c99b9e2e685561e9f91083ce0e93d7bd1b12ed1.hip | // !!! This is a file automatically generated by hipify!!!
#if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<4>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<4>;
using LayoutDst = cutlass::layout::TensorNCHW;
using ThreadBlockShape = cutlass::gemm::GemmShape<32, 32, 32>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 32>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationRelu<
float, 1, int32_t, float, float>;
using Convolution = cutlass::conv::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, float,
LayoutDst, float, LayoutDst, int32_t,
cutlass::conv::ConvType::kConvolution, cutlass::arch::OpClassSimt, cutlass::arch::Sm61,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::conv::threadblock::ConvolutionFpropNCxHWxThreadblockSwizzle,
2, 4, 16, true,
cutlass::arch::OpMultiplyAdd>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const typename Convolution::ElementSrc* d_src,
const typename Convolution::ElementFilter* d_filter,
const typename Convolution::ElementBias* d_bias,
const typename Convolution::ElementDst* d_z,
typename Convolution::ElementDst* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
hipStream_t stream);
#pragma GCC diagnostic pop
#endif
| 4c99b9e2e685561e9f91083ce0e93d7bd1b12ed1.cu | #if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<4>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<4>;
using LayoutDst = cutlass::layout::TensorNCHW;
using ThreadBlockShape = cutlass::gemm::GemmShape<32, 32, 32>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 32>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationRelu<
float, 1, int32_t, float, float>;
using Convolution = cutlass::conv::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, float,
LayoutDst, float, LayoutDst, int32_t,
cutlass::conv::ConvType::kConvolution, cutlass::arch::OpClassSimt, cutlass::arch::Sm61,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::conv::threadblock::ConvolutionFpropNCxHWxThreadblockSwizzle,
2, 4, 16, true,
cutlass::arch::OpMultiplyAdd>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const typename Convolution::ElementSrc* d_src,
const typename Convolution::ElementFilter* d_filter,
const typename Convolution::ElementBias* d_bias,
const typename Convolution::ElementDst* d_z,
typename Convolution::ElementDst* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
cudaStream_t stream);
#pragma GCC diagnostic pop
#endif
|
6d307f3f51063dec002026c235f899592f9da9a4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "math/matrix_type.h"
#include "math/vector_type.h"
#include "tracking/m_estimator.h"
#include "utils/safe_call.h"
#include "tracking/reduce_sum.h"
#include <thrust/device_vector.h>
namespace fusion
{
// TODO : Robust RGB Estimation
// STATUS: On halt
// struct RGBSelection
// {
// __device__ inline bool find_corresp(
// const int &x,
// const int &y,
// float &curr_val,
// float &last_val,
// float &dx,
// float &dy,
// Vector4f &pt) const
// {
// // reference point
// pt = last_vmap.ptr(y)[x];
// if (isnan(pt.x) || pt.w < 0)
// return false;
// // reference point in curr frame
// pt = T_last_curr(pt);
// // reference intensity
// last_val = last_intensity.ptr(y)[x];
// if (!isfinite(last_val))
// return false;
// auto u = fx * pt.x / pt.z + cx;
// auto v = fy * pt.y / pt.z + cy;
// if (u >= 1 && v >= 1 && u <= cols - 2 && v <= rows - 2)
// {
// curr_val = interpolate_bilinear(curr_intensity, u, v);
// dx = interpolate_bilinear(curr_intensity_dx, u, v);
// dy = interpolate_bilinear(curr_intensity_dy, u, v);
// // point selection criteria
// // TODO : Optimise this
// return (dx > 2 || dy > 2) &&
// isfinite(curr_val) &&
// isfinite(dx) && isfinite(dy);
// }
// return false;
// }
// __device__ float interpolate_bilinear(cv::cuda::PtrStep<float> image, float &x, float &y) const
// {
// int u = ::floor(x), v = ::floor(y);
// float coeff_x = x - u, coeff_y = y - v;
// return (image.ptr(v)[u] * (1 - coeff_x) + image.ptr(v)[u + 1] * coeff_x) * (1 - coeff_y) +
// (image.ptr(v + 1)[u] * (1 - coeff_x) + image.ptr(v + 1)[u + 1] * coeff_x) * coeff_y;
// }
// __device__ __inline__ void operator()() const
// {
// for (int k = blockIdx.x * blockDim.x + threadIdx.x; k < N; k += blockDim.x * gridDim.x)
// {
// const int y = k / cols;
// const int x = k - y * cols;
// if (y >= cols || x >= rows)
// return;
// Vector4f pt;
// float curr_val, last_val, dx, dy;
// bool corresp_found = find_corresp(x, y, curr_val, last_val, dx, dy, pt);
// if (corresp_found)
// {
// uint index = atomicAdd(num_corresp, 1);
// array_image[index] = Vector4f(last_val, curr_val, dx, dy);
// array_point[index] = pt;
// error_term[index] = pow(curr_val - last_val, 2);
// }
// }
// }
// cv::cuda::PtrStep<Vector4f> last_vmap;
// cv::cuda::PtrStep<float> last_intensity;
// cv::cuda::PtrStep<float> curr_intensity;
// cv::cuda::PtrStep<float> curr_intensity_dx;
// cv::cuda::PtrStep<float> curr_intensity_dy;
// float fx, fy, cx, cy;
// DeviceMatrix3x4 T_last_curr;
// Matrix3x3f RLastCurr;
// Vector3f TLastCurr;
// int N, cols, rows;
// int *num_corresp;
// Vector4f *array_image;
// Vector4f *array_point;
// float *error_term;
// };
// __global__ void compute_rgb_corresp_kernel(RGBSelection delegate)
// {
// delegate();
// }
// __global__ void compute_variance_kernel(float *error_term, float *variance_term, float mean, uint max_idx)
// {
// uint x = threadIdx.x + blockDim.x * blockIdx.x;
// if (x >= max_idx)
// return;
// variance_term[x] = pow(error_term[x] - mean, 2);
// }
// void compute_rgb_corresp(
// const cv::cuda::GpuMat last_vmap,
// const cv::cuda::GpuMat last_intensity,
// const cv::cuda::GpuMat curr_intensity,
// const cv::cuda::GpuMat curr_intensity_dx,
// const cv::cuda::GpuMat curr_intensity_dy,
// const Sophus::SE3d &frame_pose,
// const IntrinsicMatrix K,
// Vector4f *transformed_points,
// Vector4f *image_corresp_data,
// float *error_term_array,
// float *variance_term_array,
// float &mean_estimate,
// float &stdev_estimated,
// uint &num_corresp)
// {
// auto cols = last_vmap.cols;
// auto rows = last_vmap.rows;
// RGBSelection delegate;
// delegate.last_vmap = last_vmap;
// delegate.last_intensity = last_intensity;
// delegate.curr_intensity = curr_intensity;
// delegate.curr_intensity_dx = curr_intensity_dx;
// delegate.curr_intensity_dy = curr_intensity_dy;
// delegate.T_last_curr = frame_pose;
// delegate.array_image = image_corresp_data;
// delegate.array_point = transformed_points;
// delegate.error_term = error_term_array;
// delegate.fx = K.fx;
// delegate.fy = K.fy;
// delegate.cx = K.cx;
// delegate.cy = K.cy;
// delegate.cols = cols;
// delegate.rows = rows;
// delegate.N = cols * rows;
// safe_call(hipMalloc(&delegate.num_corresp, sizeof(uint)));
// safe_call(hipMemset(delegate.num_corresp, 0, sizeof(uint)));
// compute_rgb_corresp_kernel<<<96, 224>>>(delegate);
// safe_call(hipMemcpy(&num_corresp, delegate.num_corresp, sizeof(uint), hipMemcpyDeviceToHost));
// if (num_corresp <= 1)
// return;
// thrust::device_ptr<float> error_term(error_term_array);
// thrust::device_ptr<float> variance_term(variance_term_array);
// float sum_error = thrust::reduce(error_term, error_term + num_corresp);
// mean_estimate = 0;
// stdev_estimated = std::sqrt(sum_error / (num_corresp - 6));
// // dim3 thread(MAX_THREAD);
// // dim3 block(div_up(num_corresp, thread.x));
// // compute_variance_kernel<<<block, thread>>>(error_term_array, variance_term_array, mean_estimate, num_corresp);
// // float sum_variance = thrust::reduce(variance_term, variance_term + num_corresp);
// // stdev_estimated = sqrt(sum_variance / (num_corresp - 1));
// std::cout << "mean : " << mean_estimate << " stddev : " << stdev_estimated << " num_corresp : " << num_corresp << std::endl;
// safe_call(hipFree(delegate.num_corresp));
// }
// // TODO : Robust RGB Estimation
// // STATUS: On halt
// struct RGBLeastSquares
// {
// cv::cuda::PtrStep<float> out;
// Vector4f *transformed_points;
// Vector4f *image_corresp_data;
// float mean_estimated;
// float stdev_estimated;
// uint num_corresp;
// float fx, fy;
// size_t N;
// __device__ void compute_jacobian(const int &k, float *sum)
// {
// float row[7] = {0, 0, 0, 0, 0, 0, 0};
// float weight = 0;
// if (k < num_corresp)
// {
// Vector3f p_transformed = ToVector3(transformed_points[k]);
// Vector4f image = image_corresp_data[k];
// float z_inv = 1.0 / p_transformed.z;
// Vector3f left;
// left.x = image.z * fx * z_inv;
// left.y = image.w * fy * z_inv;
// left.z = -(left.x * p_transformed.x + left.y * p_transformed.y) * z_inv;
// float residual = image.y - image.x; // curr_val - last_val
// float res_normalized = residual / stdev_estimated;
// float threshold_huber = 1.345 * stdev_estimated;
// if (fabs(res_normalized) < threshold_huber)
// weight = 1;
// else
// weight = threshold_huber / fabs(res_normalized);
// row[6] = (-residual);
// // printf("%f, %f\n", res_normalized, threshold_huber);
// *(Vector3f *)&row[0] = left;
// *(Vector3f *)&row[3] = p_transformed.cross(left);
// }
// int count = 0;
// #pragma unroll
// for (int i = 0; i < 7; ++i)
// {
// #pragma unroll
// for (int j = i; j < 7; ++j)
// sum[count++] = row[i] * row[j];
// }
// }
// __device__ void operator()()
// {
// float sum[29] = {0, 0, 0, 0, 0,
// 0, 0, 0, 0, 0,
// 0, 0, 0, 0, 0,
// 0, 0, 0, 0, 0,
// 0, 0, 0, 0, 0,
// 0, 0, 0, 0};
// float val[29];
// for (int k = blockIdx.x * blockDim.x + threadIdx.x; k < N; k += blockDim.x * gridDim.x)
// {
// compute_jacobian(k, val);
// #pragma unroll
// for (int i = 0; i < 29; ++i)
// sum[i] += val[i];
// }
// BlockReduce<float, 29>(sum);
// if (threadIdx.x == 0)
// {
// #pragma unroll
// for (int i = 0; i < 29; ++i)
// out.ptr(blockIdx.x)[i] = sum[i];
// }
// }
// }; // struct RGBLeastSquares
// __global__ void compute_least_square_RGB_kernel(RGBLeastSquares delegate)
// {
// delegate();
// }
// // TODO : Robust RGB Estimation
// // STATUS: On halt
// void compute_least_square_RGB(
// const uint num_corresp,
// Vector4f *transformed_points,
// Vector4f *image_corresp_data,
// const float mean_estimated,
// const float stdev_estimated,
// const IntrinsicMatrix K,
// cv::cuda::GpuMat sum,
// cv::cuda::GpuMat out,
// float *hessian_estimated,
// float *residual_estimated,
// float *residual)
// {
// RGBLeastSquares delegate;
// delegate.fx = K.fx;
// delegate.fy = K.fy;
// delegate.out = sum;
// delegate.N = num_corresp;
// delegate.num_corresp = num_corresp;
// delegate.image_corresp_data = image_corresp_data;
// delegate.transformed_points = transformed_points;
// delegate.mean_estimated = mean_estimated;
// delegate.stdev_estimated = stdev_estimated;
// compute_least_square_RGB_kernel<<<96, 224>>>(delegate);
// cv::cuda::reduce(sum, out, 0, cv::REDUCE_SUM);
// cv::Mat host_data;
// out.download(host_data);
// create_jtjjtr<6, 7>(host_data, hessian_estimated, residual_estimated);
// residual[0] = host_data.ptr<float>()[27];
// residual[1] = num_corresp;
// // std::cout << residual[0] << " : " << residual[1] << std::endl;
// }
struct RgbReduction2
{
__device__ bool find_corresp(int &x, int &y)
{
Vector4f pt = last_vmap.ptr(y)[x];
if (pt.w < 0 || isnan(pt.x))
return false;
i_l = last_image.ptr(y)[x];
if (!isfinite(i_l))
return false;
p_transformed = pose(ToVector3(pt));
u0 = p_transformed.x / p_transformed.z * fx + cx;
v0 = p_transformed.y / p_transformed.z * fy + cy;
if (u0 >= 2 && u0 < cols - 2 && v0 >= 2 && v0 < rows - 2)
{
i_c = interp2(curr_image, u0, v0);
dx = interp2(dIdx, u0, v0);
dy = interp2(dIdy, u0, v0);
return (dx > 2 || dy > 2) && isfinite(i_c) && isfinite(dx) && isfinite(dy);
}
return false;
}
__device__ float interp2(cv::cuda::PtrStep<float> image, float &x, float &y)
{
int u = ::floor(x), v = ::floor(y);
float coeff_x = x - u, coeff_y = y - v;
return (image.ptr(v)[u] * (1 - coeff_x) + image.ptr(v)[u + 1] * coeff_x) * (1 - coeff_y) +
(image.ptr(v + 1)[u] * (1 - coeff_x) + image.ptr(v + 1)[u + 1] * coeff_x) * coeff_y;
}
__device__ void compute_jacobian(int &k, float *sum)
{
int y = k / cols;
int x = k - y * cols;
bool corresp_found = find_corresp(x, y);
float row[7] = {0, 0, 0, 0, 0, 0, 0};
if (corresp_found)
{
Vector3f left;
float z_inv = 1.0 / p_transformed.z;
left.x = dx * fx * z_inv;
left.y = dy * fy * z_inv;
left.z = -(left.x * p_transformed.x + left.y * p_transformed.y) * z_inv;
float residual = i_c - i_l;
if (stddev > 10e-5)
residual /= stddev;
float huber_th = 1.345 * stddev;
float weight = 1;
if (fabs(residual) > huber_th && stddev > 10e-6)
{
weight = sqrtf(huber_th / fabs(residual));
}
row[6] = weight * (-residual);
*(Vector3f *)&row[0] = weight * left;
*(Vector3f *)&row[3] = weight * p_transformed.cross(left);
}
int count = 0;
#pragma unroll
for (int i = 0; i < 7; ++i)
#pragma unroll
for (int j = i; j < 7; ++j)
sum[count++] = row[i] * row[j];
sum[count] = (float)corresp_found;
}
__device__ __forceinline__ void operator()()
{
float sum[29] = {0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0};
float val[29];
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x)
{
compute_jacobian(i, val);
#pragma unroll
for (int j = 0; j < 29; ++j)
sum[j] += val[j];
}
BlockReduce<float, 29>(sum);
if (threadIdx.x == 0)
#pragma unroll
for (int i = 0; i < 29; ++i)
out.ptr(blockIdx.x)[i] = sum[i];
}
int cols, rows, N;
float u0, v0;
Matrix3x4f pose;
float fx, fy, cx, cy, invfx, invfy;
cv::cuda::PtrStep<Vector4f> point_cloud, last_vmap;
cv::cuda::PtrStep<float> last_image, curr_image;
cv::cuda::PtrStep<float> dIdx, dIdy;
cv::cuda::PtrStep<float> out;
Vector3f p_transformed, p_last;
float stddev;
private:
float i_c, i_l, dx, dy;
};
__global__ void rgb_reduce_kernel2(RgbReduction2 rr)
{
rr();
}
void rgb_step(const cv::cuda::GpuMat &curr_intensity,
const cv::cuda::GpuMat &last_intensity,
const cv::cuda::GpuMat &last_vmap,
const cv::cuda::GpuMat &curr_vmap,
const cv::cuda::GpuMat &intensity_dx,
const cv::cuda::GpuMat &intensity_dy,
cv::cuda::GpuMat &sum,
cv::cuda::GpuMat &out,
const float stddev_estimate,
const Sophus::SE3d &pose,
const IntrinsicMatrix K,
float *jtj, float *jtr,
float *residual)
{
int cols = curr_intensity.cols;
int rows = curr_intensity.rows;
RgbReduction2 rr;
rr.cols = cols;
rr.rows = rows;
rr.N = cols * rows;
rr.last_image = last_intensity;
rr.curr_image = curr_intensity;
rr.point_cloud = curr_vmap;
rr.last_vmap = last_vmap;
rr.dIdx = intensity_dx;
rr.dIdy = intensity_dy;
rr.pose = pose.cast<float>().matrix3x4();
rr.stddev = stddev_estimate;
rr.fx = K.fx;
rr.fy = K.fy;
rr.cx = K.cx;
rr.cy = K.cy;
rr.invfx = K.invfx;
rr.invfy = K.invfy;
rr.out = sum;
hipLaunchKernelGGL(( rgb_reduce_kernel2), dim3(96), dim3(224), 0, 0, rr);
cv::cuda::reduce(sum, out, 0, cv::REDUCE_SUM);
cv::Mat host_data;
out.download(host_data);
create_jtjjtr<6, 7>(host_data, jtj, jtr);
residual[0] = host_data.ptr<float>()[27];
residual[1] = host_data.ptr<float>()[28];
}
} // namespace fusion | 6d307f3f51063dec002026c235f899592f9da9a4.cu | #include "math/matrix_type.h"
#include "math/vector_type.h"
#include "tracking/m_estimator.h"
#include "utils/safe_call.h"
#include "tracking/reduce_sum.h"
#include <thrust/device_vector.h>
namespace fusion
{
// TODO : Robust RGB Estimation
// STATUS: On halt
// struct RGBSelection
// {
// __device__ inline bool find_corresp(
// const int &x,
// const int &y,
// float &curr_val,
// float &last_val,
// float &dx,
// float &dy,
// Vector4f &pt) const
// {
// // reference point
// pt = last_vmap.ptr(y)[x];
// if (isnan(pt.x) || pt.w < 0)
// return false;
// // reference point in curr frame
// pt = T_last_curr(pt);
// // reference intensity
// last_val = last_intensity.ptr(y)[x];
// if (!isfinite(last_val))
// return false;
// auto u = fx * pt.x / pt.z + cx;
// auto v = fy * pt.y / pt.z + cy;
// if (u >= 1 && v >= 1 && u <= cols - 2 && v <= rows - 2)
// {
// curr_val = interpolate_bilinear(curr_intensity, u, v);
// dx = interpolate_bilinear(curr_intensity_dx, u, v);
// dy = interpolate_bilinear(curr_intensity_dy, u, v);
// // point selection criteria
// // TODO : Optimise this
// return (dx > 2 || dy > 2) &&
// isfinite(curr_val) &&
// isfinite(dx) && isfinite(dy);
// }
// return false;
// }
// __device__ float interpolate_bilinear(cv::cuda::PtrStep<float> image, float &x, float &y) const
// {
// int u = std::floor(x), v = std::floor(y);
// float coeff_x = x - u, coeff_y = y - v;
// return (image.ptr(v)[u] * (1 - coeff_x) + image.ptr(v)[u + 1] * coeff_x) * (1 - coeff_y) +
// (image.ptr(v + 1)[u] * (1 - coeff_x) + image.ptr(v + 1)[u + 1] * coeff_x) * coeff_y;
// }
// __device__ __inline__ void operator()() const
// {
// for (int k = blockIdx.x * blockDim.x + threadIdx.x; k < N; k += blockDim.x * gridDim.x)
// {
// const int y = k / cols;
// const int x = k - y * cols;
// if (y >= cols || x >= rows)
// return;
// Vector4f pt;
// float curr_val, last_val, dx, dy;
// bool corresp_found = find_corresp(x, y, curr_val, last_val, dx, dy, pt);
// if (corresp_found)
// {
// uint index = atomicAdd(num_corresp, 1);
// array_image[index] = Vector4f(last_val, curr_val, dx, dy);
// array_point[index] = pt;
// error_term[index] = pow(curr_val - last_val, 2);
// }
// }
// }
// cv::cuda::PtrStep<Vector4f> last_vmap;
// cv::cuda::PtrStep<float> last_intensity;
// cv::cuda::PtrStep<float> curr_intensity;
// cv::cuda::PtrStep<float> curr_intensity_dx;
// cv::cuda::PtrStep<float> curr_intensity_dy;
// float fx, fy, cx, cy;
// DeviceMatrix3x4 T_last_curr;
// Matrix3x3f RLastCurr;
// Vector3f TLastCurr;
// int N, cols, rows;
// int *num_corresp;
// Vector4f *array_image;
// Vector4f *array_point;
// float *error_term;
// };
// __global__ void compute_rgb_corresp_kernel(RGBSelection delegate)
// {
// delegate();
// }
// __global__ void compute_variance_kernel(float *error_term, float *variance_term, float mean, uint max_idx)
// {
// uint x = threadIdx.x + blockDim.x * blockIdx.x;
// if (x >= max_idx)
// return;
// variance_term[x] = pow(error_term[x] - mean, 2);
// }
// void compute_rgb_corresp(
// const cv::cuda::GpuMat last_vmap,
// const cv::cuda::GpuMat last_intensity,
// const cv::cuda::GpuMat curr_intensity,
// const cv::cuda::GpuMat curr_intensity_dx,
// const cv::cuda::GpuMat curr_intensity_dy,
// const Sophus::SE3d &frame_pose,
// const IntrinsicMatrix K,
// Vector4f *transformed_points,
// Vector4f *image_corresp_data,
// float *error_term_array,
// float *variance_term_array,
// float &mean_estimate,
// float &stdev_estimated,
// uint &num_corresp)
// {
// auto cols = last_vmap.cols;
// auto rows = last_vmap.rows;
// RGBSelection delegate;
// delegate.last_vmap = last_vmap;
// delegate.last_intensity = last_intensity;
// delegate.curr_intensity = curr_intensity;
// delegate.curr_intensity_dx = curr_intensity_dx;
// delegate.curr_intensity_dy = curr_intensity_dy;
// delegate.T_last_curr = frame_pose;
// delegate.array_image = image_corresp_data;
// delegate.array_point = transformed_points;
// delegate.error_term = error_term_array;
// delegate.fx = K.fx;
// delegate.fy = K.fy;
// delegate.cx = K.cx;
// delegate.cy = K.cy;
// delegate.cols = cols;
// delegate.rows = rows;
// delegate.N = cols * rows;
// safe_call(cudaMalloc(&delegate.num_corresp, sizeof(uint)));
// safe_call(cudaMemset(delegate.num_corresp, 0, sizeof(uint)));
// compute_rgb_corresp_kernel<<<96, 224>>>(delegate);
// safe_call(cudaMemcpy(&num_corresp, delegate.num_corresp, sizeof(uint), cudaMemcpyDeviceToHost));
// if (num_corresp <= 1)
// return;
// thrust::device_ptr<float> error_term(error_term_array);
// thrust::device_ptr<float> variance_term(variance_term_array);
// float sum_error = thrust::reduce(error_term, error_term + num_corresp);
// mean_estimate = 0;
// stdev_estimated = std::sqrt(sum_error / (num_corresp - 6));
// // dim3 thread(MAX_THREAD);
// // dim3 block(div_up(num_corresp, thread.x));
// // compute_variance_kernel<<<block, thread>>>(error_term_array, variance_term_array, mean_estimate, num_corresp);
// // float sum_variance = thrust::reduce(variance_term, variance_term + num_corresp);
// // stdev_estimated = sqrt(sum_variance / (num_corresp - 1));
// std::cout << "mean : " << mean_estimate << " stddev : " << stdev_estimated << " num_corresp : " << num_corresp << std::endl;
// safe_call(cudaFree(delegate.num_corresp));
// }
// // TODO : Robust RGB Estimation
// // STATUS: On halt
// struct RGBLeastSquares
// {
// cv::cuda::PtrStep<float> out;
// Vector4f *transformed_points;
// Vector4f *image_corresp_data;
// float mean_estimated;
// float stdev_estimated;
// uint num_corresp;
// float fx, fy;
// size_t N;
// __device__ void compute_jacobian(const int &k, float *sum)
// {
// float row[7] = {0, 0, 0, 0, 0, 0, 0};
// float weight = 0;
// if (k < num_corresp)
// {
// Vector3f p_transformed = ToVector3(transformed_points[k]);
// Vector4f image = image_corresp_data[k];
// float z_inv = 1.0 / p_transformed.z;
// Vector3f left;
// left.x = image.z * fx * z_inv;
// left.y = image.w * fy * z_inv;
// left.z = -(left.x * p_transformed.x + left.y * p_transformed.y) * z_inv;
// float residual = image.y - image.x; // curr_val - last_val
// float res_normalized = residual / stdev_estimated;
// float threshold_huber = 1.345 * stdev_estimated;
// if (fabs(res_normalized) < threshold_huber)
// weight = 1;
// else
// weight = threshold_huber / fabs(res_normalized);
// row[6] = (-residual);
// // printf("%f, %f\n", res_normalized, threshold_huber);
// *(Vector3f *)&row[0] = left;
// *(Vector3f *)&row[3] = p_transformed.cross(left);
// }
// int count = 0;
// #pragma unroll
// for (int i = 0; i < 7; ++i)
// {
// #pragma unroll
// for (int j = i; j < 7; ++j)
// sum[count++] = row[i] * row[j];
// }
// }
// __device__ void operator()()
// {
// float sum[29] = {0, 0, 0, 0, 0,
// 0, 0, 0, 0, 0,
// 0, 0, 0, 0, 0,
// 0, 0, 0, 0, 0,
// 0, 0, 0, 0, 0,
// 0, 0, 0, 0};
// float val[29];
// for (int k = blockIdx.x * blockDim.x + threadIdx.x; k < N; k += blockDim.x * gridDim.x)
// {
// compute_jacobian(k, val);
// #pragma unroll
// for (int i = 0; i < 29; ++i)
// sum[i] += val[i];
// }
// BlockReduce<float, 29>(sum);
// if (threadIdx.x == 0)
// {
// #pragma unroll
// for (int i = 0; i < 29; ++i)
// out.ptr(blockIdx.x)[i] = sum[i];
// }
// }
// }; // struct RGBLeastSquares
// __global__ void compute_least_square_RGB_kernel(RGBLeastSquares delegate)
// {
// delegate();
// }
// // TODO : Robust RGB Estimation
// // STATUS: On halt
// void compute_least_square_RGB(
// const uint num_corresp,
// Vector4f *transformed_points,
// Vector4f *image_corresp_data,
// const float mean_estimated,
// const float stdev_estimated,
// const IntrinsicMatrix K,
// cv::cuda::GpuMat sum,
// cv::cuda::GpuMat out,
// float *hessian_estimated,
// float *residual_estimated,
// float *residual)
// {
// RGBLeastSquares delegate;
// delegate.fx = K.fx;
// delegate.fy = K.fy;
// delegate.out = sum;
// delegate.N = num_corresp;
// delegate.num_corresp = num_corresp;
// delegate.image_corresp_data = image_corresp_data;
// delegate.transformed_points = transformed_points;
// delegate.mean_estimated = mean_estimated;
// delegate.stdev_estimated = stdev_estimated;
// compute_least_square_RGB_kernel<<<96, 224>>>(delegate);
// cv::cuda::reduce(sum, out, 0, cv::REDUCE_SUM);
// cv::Mat host_data;
// out.download(host_data);
// create_jtjjtr<6, 7>(host_data, hessian_estimated, residual_estimated);
// residual[0] = host_data.ptr<float>()[27];
// residual[1] = num_corresp;
// // std::cout << residual[0] << " : " << residual[1] << std::endl;
// }
struct RgbReduction2
{
__device__ bool find_corresp(int &x, int &y)
{
Vector4f pt = last_vmap.ptr(y)[x];
if (pt.w < 0 || isnan(pt.x))
return false;
i_l = last_image.ptr(y)[x];
if (!isfinite(i_l))
return false;
p_transformed = pose(ToVector3(pt));
u0 = p_transformed.x / p_transformed.z * fx + cx;
v0 = p_transformed.y / p_transformed.z * fy + cy;
if (u0 >= 2 && u0 < cols - 2 && v0 >= 2 && v0 < rows - 2)
{
i_c = interp2(curr_image, u0, v0);
dx = interp2(dIdx, u0, v0);
dy = interp2(dIdy, u0, v0);
return (dx > 2 || dy > 2) && isfinite(i_c) && isfinite(dx) && isfinite(dy);
}
return false;
}
__device__ float interp2(cv::cuda::PtrStep<float> image, float &x, float &y)
{
int u = std::floor(x), v = std::floor(y);
float coeff_x = x - u, coeff_y = y - v;
return (image.ptr(v)[u] * (1 - coeff_x) + image.ptr(v)[u + 1] * coeff_x) * (1 - coeff_y) +
(image.ptr(v + 1)[u] * (1 - coeff_x) + image.ptr(v + 1)[u + 1] * coeff_x) * coeff_y;
}
__device__ void compute_jacobian(int &k, float *sum)
{
int y = k / cols;
int x = k - y * cols;
bool corresp_found = find_corresp(x, y);
float row[7] = {0, 0, 0, 0, 0, 0, 0};
if (corresp_found)
{
Vector3f left;
float z_inv = 1.0 / p_transformed.z;
left.x = dx * fx * z_inv;
left.y = dy * fy * z_inv;
left.z = -(left.x * p_transformed.x + left.y * p_transformed.y) * z_inv;
float residual = i_c - i_l;
if (stddev > 10e-5)
residual /= stddev;
float huber_th = 1.345 * stddev;
float weight = 1;
if (fabs(residual) > huber_th && stddev > 10e-6)
{
weight = sqrtf(huber_th / fabs(residual));
}
row[6] = weight * (-residual);
*(Vector3f *)&row[0] = weight * left;
*(Vector3f *)&row[3] = weight * p_transformed.cross(left);
}
int count = 0;
#pragma unroll
for (int i = 0; i < 7; ++i)
#pragma unroll
for (int j = i; j < 7; ++j)
sum[count++] = row[i] * row[j];
sum[count] = (float)corresp_found;
}
__device__ __forceinline__ void operator()()
{
float sum[29] = {0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0};
float val[29];
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x)
{
compute_jacobian(i, val);
#pragma unroll
for (int j = 0; j < 29; ++j)
sum[j] += val[j];
}
BlockReduce<float, 29>(sum);
if (threadIdx.x == 0)
#pragma unroll
for (int i = 0; i < 29; ++i)
out.ptr(blockIdx.x)[i] = sum[i];
}
int cols, rows, N;
float u0, v0;
Matrix3x4f pose;
float fx, fy, cx, cy, invfx, invfy;
cv::cuda::PtrStep<Vector4f> point_cloud, last_vmap;
cv::cuda::PtrStep<float> last_image, curr_image;
cv::cuda::PtrStep<float> dIdx, dIdy;
cv::cuda::PtrStep<float> out;
Vector3f p_transformed, p_last;
float stddev;
private:
float i_c, i_l, dx, dy;
};
__global__ void rgb_reduce_kernel2(RgbReduction2 rr)
{
rr();
}
void rgb_step(const cv::cuda::GpuMat &curr_intensity,
const cv::cuda::GpuMat &last_intensity,
const cv::cuda::GpuMat &last_vmap,
const cv::cuda::GpuMat &curr_vmap,
const cv::cuda::GpuMat &intensity_dx,
const cv::cuda::GpuMat &intensity_dy,
cv::cuda::GpuMat &sum,
cv::cuda::GpuMat &out,
const float stddev_estimate,
const Sophus::SE3d &pose,
const IntrinsicMatrix K,
float *jtj, float *jtr,
float *residual)
{
int cols = curr_intensity.cols;
int rows = curr_intensity.rows;
RgbReduction2 rr;
rr.cols = cols;
rr.rows = rows;
rr.N = cols * rows;
rr.last_image = last_intensity;
rr.curr_image = curr_intensity;
rr.point_cloud = curr_vmap;
rr.last_vmap = last_vmap;
rr.dIdx = intensity_dx;
rr.dIdy = intensity_dy;
rr.pose = pose.cast<float>().matrix3x4();
rr.stddev = stddev_estimate;
rr.fx = K.fx;
rr.fy = K.fy;
rr.cx = K.cx;
rr.cy = K.cy;
rr.invfx = K.invfx;
rr.invfy = K.invfy;
rr.out = sum;
rgb_reduce_kernel2<<<96, 224>>>(rr);
cv::cuda::reduce(sum, out, 0, cv::REDUCE_SUM);
cv::Mat host_data;
out.download(host_data);
create_jtjjtr<6, 7>(host_data, jtj, jtr);
residual[0] = host_data.ptr<float>()[27];
residual[1] = host_data.ptr<float>()[28];
}
} // namespace fusion |
6ca02fd1cccbad57b251822a541da27333f0cd74.hip | // !!! This is a file automatically generated by hipify!!!
#include "PdfBenchmarker.h"
#include "PdfBenchmarker_gpu.h"
#include <hip/hip_runtime.h>
#include <hiprand/hiprand_kernel.h>
#include "RngTest.h"
namespace vecrng {
void PdfBenchmarker::RunCuda()
{
int nDevice;
bool cudaEnabled = false;
hipGetDeviceCount(&nDevice);
if(nDevice > 0) {
hipDeviceReset();
cudaEnabled = true;
}
else {
printf("Waning: No Cuda Capable Device ...\n");
}
//cuda event timing
hipEvent_t start;
hipEvent_t stop;
hipEventCreate (&start);
hipEventCreate (&stop);
//set the default number of threads and thread blocks - should be setable
//theNThreads should be a power of 2 (due to reduction operations on GPU)
int theNBlocks = 64;
int theNThreads = 256;
//1. MRG32k3a:
vecRng::MRG32k3a<vecRng::ScalarBackend> *mrg32k2a = new vecRng::MRG32k3a<vecRng::ScalarBackend>();
vecRng::MRG32k3a<vecRng::ScalarBackend>::State_t* statesMRG32k3a_d = 0;
hipMalloc((void**)&statesMRG32k3a_d, theNBlocks*theNThreads*sizeof(vecRng::MRG32k3a<vecRng::ScalarBackend>::State_t));
mrg32k2a->Initialize(statesMRG32k3a_d, theNBlocks*theNThreads);
//4 curandStateMRG32k3a
curandStateMRG32k3a* devStatesMRG32k3a = 0;
hipMalloc(&devStatesMRG32k3a,theNBlocks*theNThreads*sizeof(curandStateMRG32k3a));
curand_setup_gpu_gauss(devStatesMRG32k3a, time(NULL), theNBlocks, theNThreads);
//return values for varification
double *result_h;
double *result_c;
double *result_d;
result_h = (double*) calloc(theNBlocks, sizeof(double));
result_c = (double*) calloc(theNBlocks, sizeof(double));
hipMalloc((void**)&result_d,theNBlocks*sizeof(double));
float meanEventTime[kNumberPdf + 1];
float sigmaEventTime[kNumberPdf + 1];
double rngEvent[kNumberPdf +1];
float *trialEventTime = new float [fRepetition];;
for (int k = 0; k < kNumberPdf + 1; ++k) {
meanEventTime[k] = 0.0;
sigmaEventTime[k] = 0.;
rngEvent[k] = 0.0;
float elapsedTotalTime = 0.;
for (unsigned r = 0; r < fRepetition; ++r) {
trialEventTime[r] = 0.0;
hipMemset(result_d,0,theNBlocks*theNThreads*sizeof(double));
if(cudaEnabled) {
hipEventRecord (start,0);
//call CUDA kernel
if(k == 0) {
CudaMRG32k3aGauss(statesMRG32k3a_d, result_d, fNSample, theNBlocks, theNThreads);
}
if(k == 1) {
CurandMRG32k3aGauss(devStatesMRG32k3a,result_d,fNSample,theNBlocks,theNThreads);
}
hipEventRecord (stop,0);
hipEventSynchronize (stop);
hipEventElapsedTime (&trialEventTime[r],start,stop);
//copy the result for varification
hipMemcpy(result_h,result_d,theNBlocks*sizeof(double),hipMemcpyDeviceToHost);
for(int i = 0 ; i < theNBlocks ; ++i) {
rngEvent[k] += result_h[i];
}
elapsedTotalTime += trialEventTime[r]; //ms
}
}
meanEventTime[k] = elapsedTotalTime/fRepetition;
float variance = 0;
for (unsigned r = 0; r < fRepetition; ++r) {
float delta = (trialEventTime[r] - meanEventTime[k]);
variance += delta*delta;
}
sigmaEventTime[k] = sqrt(variance/fRepetition);
}
delete trialEventTime;
for (int k = 0; k < kNumberPdf + 1; ++k) {
if(k < kNumberPdf) {
printf(" %s CudaBackend Time = %6.4f +- %6.4f msec Sum = %g\n",
PdfName[k], meanEventTime[k], sigmaEventTime[k], rngEvent[k]);
}
if(k== kNumberPdf) {
printf(" %s Nvidia Time = %6.4f +- %6.4f msec Sum = %g\n",
"CurandMRG32k3aNormal", meanEventTime[k], sigmaEventTime[k], rngEvent[k]);
}
}
//clean up: destory cuda event and free memory on device and host
hipEventDestroy(start);
hipEventDestroy(stop);
hipFree(statesMRG32k3a_d);
hipFree(devStatesMRG32k3a);
hipFree(result_d);
free(result_h);
free(result_c);
delete mrg32k2a;
}
} // end of vecrng namespace
| 6ca02fd1cccbad57b251822a541da27333f0cd74.cu | #include "PdfBenchmarker.h"
#include "PdfBenchmarker_gpu.h"
#include <cuda.h>
#include <curand_kernel.h>
#include "RngTest.h"
namespace vecrng {
void PdfBenchmarker::RunCuda()
{
int nDevice;
bool cudaEnabled = false;
cudaGetDeviceCount(&nDevice);
if(nDevice > 0) {
cudaDeviceReset();
cudaEnabled = true;
}
else {
printf("Waning: No Cuda Capable Device ...\n");
}
//cuda event timing
cudaEvent_t start;
cudaEvent_t stop;
cudaEventCreate (&start);
cudaEventCreate (&stop);
//set the default number of threads and thread blocks - should be setable
//theNThreads should be a power of 2 (due to reduction operations on GPU)
int theNBlocks = 64;
int theNThreads = 256;
//1. MRG32k3a:
vecRng::MRG32k3a<vecRng::ScalarBackend> *mrg32k2a = new vecRng::MRG32k3a<vecRng::ScalarBackend>();
vecRng::MRG32k3a<vecRng::ScalarBackend>::State_t* statesMRG32k3a_d = 0;
cudaMalloc((void**)&statesMRG32k3a_d, theNBlocks*theNThreads*sizeof(vecRng::MRG32k3a<vecRng::ScalarBackend>::State_t));
mrg32k2a->Initialize(statesMRG32k3a_d, theNBlocks*theNThreads);
//4 curandStateMRG32k3a
curandStateMRG32k3a* devStatesMRG32k3a = 0;
cudaMalloc(&devStatesMRG32k3a,theNBlocks*theNThreads*sizeof(curandStateMRG32k3a));
curand_setup_gpu_gauss(devStatesMRG32k3a, time(NULL), theNBlocks, theNThreads);
//return values for varification
double *result_h;
double *result_c;
double *result_d;
result_h = (double*) calloc(theNBlocks, sizeof(double));
result_c = (double*) calloc(theNBlocks, sizeof(double));
cudaMalloc((void**)&result_d,theNBlocks*sizeof(double));
float meanEventTime[kNumberPdf + 1];
float sigmaEventTime[kNumberPdf + 1];
double rngEvent[kNumberPdf +1];
float *trialEventTime = new float [fRepetition];;
for (int k = 0; k < kNumberPdf + 1; ++k) {
meanEventTime[k] = 0.0;
sigmaEventTime[k] = 0.;
rngEvent[k] = 0.0;
float elapsedTotalTime = 0.;
for (unsigned r = 0; r < fRepetition; ++r) {
trialEventTime[r] = 0.0;
cudaMemset(result_d,0,theNBlocks*theNThreads*sizeof(double));
if(cudaEnabled) {
cudaEventRecord (start,0);
//call CUDA kernel
if(k == 0) {
CudaMRG32k3aGauss(statesMRG32k3a_d, result_d, fNSample, theNBlocks, theNThreads);
}
if(k == 1) {
CurandMRG32k3aGauss(devStatesMRG32k3a,result_d,fNSample,theNBlocks,theNThreads);
}
cudaEventRecord (stop,0);
cudaEventSynchronize (stop);
cudaEventElapsedTime (&trialEventTime[r],start,stop);
//copy the result for varification
cudaMemcpy(result_h,result_d,theNBlocks*sizeof(double),cudaMemcpyDeviceToHost);
for(int i = 0 ; i < theNBlocks ; ++i) {
rngEvent[k] += result_h[i];
}
elapsedTotalTime += trialEventTime[r]; //ms
}
}
meanEventTime[k] = elapsedTotalTime/fRepetition;
float variance = 0;
for (unsigned r = 0; r < fRepetition; ++r) {
float delta = (trialEventTime[r] - meanEventTime[k]);
variance += delta*delta;
}
sigmaEventTime[k] = sqrt(variance/fRepetition);
}
delete trialEventTime;
for (int k = 0; k < kNumberPdf + 1; ++k) {
if(k < kNumberPdf) {
printf(" %s CudaBackend Time = %6.4f +- %6.4f msec Sum = %g\n",
PdfName[k], meanEventTime[k], sigmaEventTime[k], rngEvent[k]);
}
if(k== kNumberPdf) {
printf(" %s Nvidia Time = %6.4f +- %6.4f msec Sum = %g\n",
"CurandMRG32k3aNormal", meanEventTime[k], sigmaEventTime[k], rngEvent[k]);
}
}
//clean up: destory cuda event and free memory on device and host
cudaEventDestroy(start);
cudaEventDestroy(stop);
cudaFree(statesMRG32k3a_d);
cudaFree(devStatesMRG32k3a);
cudaFree(result_d);
free(result_h);
free(result_c);
delete mrg32k2a;
}
} // end of vecrng namespace
|
61ba0835ca64d3c1c8a0910e311f5fbbb6a75244.hip | // !!! This is a file automatically generated by hipify!!!
//pass
//--blockDim=[64,1] --gridDim=[64,1]
#include <hip/hip_runtime.h>
#define BIN_COUNT 64
////////////////////////////////////////////////////////////////////////////////
// GPU-specific definitions
////////////////////////////////////////////////////////////////////////////////
//Fast mul on G8x / G9x / G100
#define IMUL(a, b) a * b
////////////////////////////////////////////////////////////////////////////////
// Merge blockN histograms into gridDim.x histograms
// blockDim.x == BIN_COUNT
// gridDim.x == BLOCK_N2
////////////////////////////////////////////////////////////////////////////////
#define MERGE_THREADS 64
__global__ void mergeHistogram64Kernel(
unsigned int *d_Histogram,
unsigned int *d_PartialHistograms,
unsigned int blockN
){
__shared__ unsigned int data[MERGE_THREADS];
unsigned int sum = 0;
for(unsigned int i = threadIdx.x; i < blockN; i += MERGE_THREADS) {
sum += d_PartialHistograms[blockIdx.x + i * BIN_COUNT];
}
data[threadIdx.x] = sum;
for(unsigned int stride = MERGE_THREADS / 2;
stride > 0; stride >>= 1){
__syncthreads();
/* BUGINJECT: ADD_BARRIER, DOWN */
if(threadIdx.x < stride) {
#ifdef MUTATION
__syncthreads();
#endif
data[threadIdx.x] += data[threadIdx.x + stride];
}
}
if(threadIdx.x == 0)
d_Histogram[blockIdx.x] = data[0];
}
| 61ba0835ca64d3c1c8a0910e311f5fbbb6a75244.cu | //pass
//--blockDim=[64,1] --gridDim=[64,1]
#include <cuda.h>
#define BIN_COUNT 64
////////////////////////////////////////////////////////////////////////////////
// GPU-specific definitions
////////////////////////////////////////////////////////////////////////////////
//Fast mul on G8x / G9x / G100
#define IMUL(a, b) a * b
////////////////////////////////////////////////////////////////////////////////
// Merge blockN histograms into gridDim.x histograms
// blockDim.x == BIN_COUNT
// gridDim.x == BLOCK_N2
////////////////////////////////////////////////////////////////////////////////
#define MERGE_THREADS 64
__global__ void mergeHistogram64Kernel(
unsigned int *d_Histogram,
unsigned int *d_PartialHistograms,
unsigned int blockN
){
__shared__ unsigned int data[MERGE_THREADS];
unsigned int sum = 0;
for(unsigned int i = threadIdx.x; i < blockN; i += MERGE_THREADS) {
sum += d_PartialHistograms[blockIdx.x + i * BIN_COUNT];
}
data[threadIdx.x] = sum;
for(unsigned int stride = MERGE_THREADS / 2;
stride > 0; stride >>= 1){
__syncthreads();
/* BUGINJECT: ADD_BARRIER, DOWN */
if(threadIdx.x < stride) {
#ifdef MUTATION
__syncthreads();
#endif
data[threadIdx.x] += data[threadIdx.x + stride];
}
}
if(threadIdx.x == 0)
d_Histogram[blockIdx.x] = data[0];
}
|
73280dd250693622cc40c4bde4021afe676f04d1.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column.hpp>
#include <cudf/column/column_device_view.cuh>
#include <cudf/column/column_factories.hpp>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/strings/detail/utilities.hpp>
#include <cudf/strings/split/partition.hpp>
#include <cudf/strings/string_view.cuh>
#include <cudf/strings/strings_column_view.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <cudf/utilities/error.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/for_each.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/pair.h>
#include <vector>
namespace cudf {
namespace strings {
namespace detail {
using string_index_pair = thrust::pair<char const*, size_type>;
namespace {
//
// Partition splits the string at the first occurrence of delimiter, and returns 3 elements
// containing the part before the delimiter, the delimiter itself, and the part after the delimiter.
// If the delimiter is not found, return 3 elements containing the string itself, followed by two
// empty strings.
//
// strs = ["abcde", nullptr, "a_bc_def", "a__bc", "_ab_cd", "ab_cd_"]
// results = partition(strs,"_")
// col0 col1 col2
// 0 abcde "" ""
// 1 null null null
// 2 a _ bc_df
// 3 a _ _bc
// 4 "" _ ab_cd
// 5 ab _ cd_
//
struct partition_fn {
column_device_view const d_strings; // strings to split
string_view const d_delimiter; // delimiter for split
string_index_pair* d_indices_left{}; // the three
string_index_pair* d_indices_delim{}; // output columns
string_index_pair* d_indices_right{}; // amigos
partition_fn(column_device_view const& d_strings,
string_view const& d_delimiter,
rmm::device_uvector<string_index_pair>& indices_left,
rmm::device_uvector<string_index_pair>& indices_delim,
rmm::device_uvector<string_index_pair>& indices_right)
: d_strings(d_strings),
d_delimiter(d_delimiter),
d_indices_left(indices_left.data()),
d_indices_delim(indices_delim.data()),
d_indices_right(indices_right.data())
{
}
__device__ void set_null_entries(size_type idx)
{
if (d_indices_left) {
d_indices_left[idx] = string_index_pair{nullptr, 0};
d_indices_delim[idx] = string_index_pair{nullptr, 0};
d_indices_right[idx] = string_index_pair{nullptr, 0};
}
}
__device__ size_type check_delimiter(size_type idx,
string_view const& d_str,
string_view::const_iterator& itr)
{
size_type offset = itr.byte_offset();
size_type pos = -1;
if (d_delimiter.empty()) {
if (*itr <= ' ') // whitespace delimited
pos = offset;
} else {
auto bytes = ::min(d_str.size_bytes() - offset, d_delimiter.size_bytes());
if (d_delimiter.compare(d_str.data() + offset, bytes) == 0) pos = offset;
}
if (pos >= 0) // delimiter found, set results
{
d_indices_left[idx] = string_index_pair{d_str.data(), offset};
if (d_delimiter.empty()) {
d_indices_delim[idx] = string_index_pair{d_str.data() + offset, 1};
++offset;
} else {
d_indices_delim[idx] = string_index_pair{d_delimiter.data(), d_delimiter.size_bytes()};
offset += d_delimiter.size_bytes();
}
d_indices_right[idx] = string_index_pair{d_str.data() + offset, d_str.size_bytes() - offset};
}
return pos;
}
__device__ void operator()(size_type idx)
{
if (d_strings.is_null(idx)) {
set_null_entries(idx);
return;
}
string_view d_str = d_strings.element<string_view>(idx);
size_type pos = -1;
for (auto itr = d_str.begin(); (pos < 0) && (itr < d_str.end()); ++itr)
pos = check_delimiter(idx, d_str, itr);
if (pos < 0) // delimiter not found
{
d_indices_left[idx] = string_index_pair{d_str.data(), d_str.size_bytes()};
d_indices_delim[idx] = string_index_pair{"", 0}; // two empty
d_indices_right[idx] = string_index_pair{"", 0}; // strings added
}
}
};
//
// This follows most of the same logic as partition above except that the delimiter
// search starts from the end of each string. Also, if no delimiter is found the
// resulting array includes two empty strings followed by the original string.
//
// strs = ["abcde", nullptr, "a_bc_def", "a__bc", "_ab_cd", "ab_cd_"]
// results = rpartition(strs,"_")
// col0 col1 col2
// 0 "" "" abcde
// 1 null null null
// 2 a_bc _ df
// 3 a_ _ bc
// 4 ab _ cd
// 5 ab_cd _ ""
//
struct rpartition_fn : public partition_fn {
rpartition_fn(column_device_view const& d_strings,
string_view const& d_delimiter,
rmm::device_uvector<string_index_pair>& indices_left,
rmm::device_uvector<string_index_pair>& indices_delim,
rmm::device_uvector<string_index_pair>& indices_right)
: partition_fn(d_strings, d_delimiter, indices_left, indices_delim, indices_right)
{
}
__device__ void operator()(size_type idx)
{
if (d_strings.is_null(idx)) {
set_null_entries(idx);
return;
}
string_view d_str = d_strings.element<string_view>(idx);
size_type pos = -1;
auto itr = d_str.end();
while ((pos < 0) && (d_str.begin() < itr)) {
--itr;
pos = check_delimiter(idx, d_str, itr);
}
if (pos < 0) // delimiter not found
{
d_indices_left[idx] = string_index_pair{"", 0}; // two empty
d_indices_delim[idx] = string_index_pair{"", 0}; // strings
d_indices_right[idx] = string_index_pair{d_str.data(), d_str.size_bytes()};
}
}
};
} // namespace
std::unique_ptr<table> partition(strings_column_view const& strings,
string_scalar const& delimiter,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_EXPECTS(delimiter.is_valid(stream), "Parameter delimiter must be valid");
auto strings_count = strings.size();
if (strings_count == 0) return std::make_unique<table>(std::vector<std::unique_ptr<column>>());
auto strings_column = column_device_view::create(strings.parent(), stream);
string_view d_delimiter(delimiter.data(), delimiter.size());
auto left_indices = rmm::device_uvector<string_index_pair>(strings_count, stream);
auto delim_indices = rmm::device_uvector<string_index_pair>(strings_count, stream);
auto right_indices = rmm::device_uvector<string_index_pair>(strings_count, stream);
partition_fn partitioner(
*strings_column, d_delimiter, left_indices, delim_indices, right_indices);
thrust::for_each_n(rmm::exec_policy(stream),
thrust::make_counting_iterator<size_type>(0),
strings_count,
partitioner);
std::vector<std::unique_ptr<column>> results;
results.emplace_back(make_strings_column(left_indices, stream, mr));
results.emplace_back(make_strings_column(delim_indices, stream, mr));
results.emplace_back(make_strings_column(right_indices, stream, mr));
return std::make_unique<table>(std::move(results));
}
std::unique_ptr<table> rpartition(strings_column_view const& strings,
string_scalar const& delimiter,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_EXPECTS(delimiter.is_valid(stream), "Parameter delimiter must be valid");
auto strings_count = strings.size();
if (strings_count == 0) return std::make_unique<table>(std::vector<std::unique_ptr<column>>());
auto strings_column = column_device_view::create(strings.parent(), stream);
string_view d_delimiter(delimiter.data(), delimiter.size());
auto left_indices = rmm::device_uvector<string_index_pair>(strings_count, stream);
auto delim_indices = rmm::device_uvector<string_index_pair>(strings_count, stream);
auto right_indices = rmm::device_uvector<string_index_pair>(strings_count, stream);
rpartition_fn partitioner(
*strings_column, d_delimiter, left_indices, delim_indices, right_indices);
thrust::for_each_n(rmm::exec_policy(stream),
thrust::make_counting_iterator<size_type>(0),
strings_count,
partitioner);
std::vector<std::unique_ptr<column>> results;
results.emplace_back(make_strings_column(left_indices, stream, mr));
results.emplace_back(make_strings_column(delim_indices, stream, mr));
results.emplace_back(make_strings_column(right_indices, stream, mr));
return std::make_unique<table>(std::move(results));
}
} // namespace detail
// external APIs
std::unique_ptr<table> partition(strings_column_view const& strings,
string_scalar const& delimiter,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::partition(strings, delimiter, cudf::get_default_stream(), mr);
}
std::unique_ptr<table> rpartition(strings_column_view const& strings,
string_scalar const& delimiter,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::rpartition(strings, delimiter, cudf::get_default_stream(), mr);
}
} // namespace strings
} // namespace cudf
| 73280dd250693622cc40c4bde4021afe676f04d1.cu | /*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column.hpp>
#include <cudf/column/column_device_view.cuh>
#include <cudf/column/column_factories.hpp>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/strings/detail/utilities.hpp>
#include <cudf/strings/split/partition.hpp>
#include <cudf/strings/string_view.cuh>
#include <cudf/strings/strings_column_view.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <cudf/utilities/error.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/for_each.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/pair.h>
#include <vector>
namespace cudf {
namespace strings {
namespace detail {
using string_index_pair = thrust::pair<char const*, size_type>;
namespace {
//
// Partition splits the string at the first occurrence of delimiter, and returns 3 elements
// containing the part before the delimiter, the delimiter itself, and the part after the delimiter.
// If the delimiter is not found, return 3 elements containing the string itself, followed by two
// empty strings.
//
// strs = ["abcde", nullptr, "a_bc_def", "a__bc", "_ab_cd", "ab_cd_"]
// results = partition(strs,"_")
// col0 col1 col2
// 0 abcde "" ""
// 1 null null null
// 2 a _ bc_déf
// 3 a _ _bc
// 4 "" _ ab_cd
// 5 ab _ cd_
//
struct partition_fn {
column_device_view const d_strings; // strings to split
string_view const d_delimiter; // delimiter for split
string_index_pair* d_indices_left{}; // the three
string_index_pair* d_indices_delim{}; // output columns
string_index_pair* d_indices_right{}; // amigos
partition_fn(column_device_view const& d_strings,
string_view const& d_delimiter,
rmm::device_uvector<string_index_pair>& indices_left,
rmm::device_uvector<string_index_pair>& indices_delim,
rmm::device_uvector<string_index_pair>& indices_right)
: d_strings(d_strings),
d_delimiter(d_delimiter),
d_indices_left(indices_left.data()),
d_indices_delim(indices_delim.data()),
d_indices_right(indices_right.data())
{
}
__device__ void set_null_entries(size_type idx)
{
if (d_indices_left) {
d_indices_left[idx] = string_index_pair{nullptr, 0};
d_indices_delim[idx] = string_index_pair{nullptr, 0};
d_indices_right[idx] = string_index_pair{nullptr, 0};
}
}
__device__ size_type check_delimiter(size_type idx,
string_view const& d_str,
string_view::const_iterator& itr)
{
size_type offset = itr.byte_offset();
size_type pos = -1;
if (d_delimiter.empty()) {
if (*itr <= ' ') // whitespace delimited
pos = offset;
} else {
auto bytes = std::min(d_str.size_bytes() - offset, d_delimiter.size_bytes());
if (d_delimiter.compare(d_str.data() + offset, bytes) == 0) pos = offset;
}
if (pos >= 0) // delimiter found, set results
{
d_indices_left[idx] = string_index_pair{d_str.data(), offset};
if (d_delimiter.empty()) {
d_indices_delim[idx] = string_index_pair{d_str.data() + offset, 1};
++offset;
} else {
d_indices_delim[idx] = string_index_pair{d_delimiter.data(), d_delimiter.size_bytes()};
offset += d_delimiter.size_bytes();
}
d_indices_right[idx] = string_index_pair{d_str.data() + offset, d_str.size_bytes() - offset};
}
return pos;
}
__device__ void operator()(size_type idx)
{
if (d_strings.is_null(idx)) {
set_null_entries(idx);
return;
}
string_view d_str = d_strings.element<string_view>(idx);
size_type pos = -1;
for (auto itr = d_str.begin(); (pos < 0) && (itr < d_str.end()); ++itr)
pos = check_delimiter(idx, d_str, itr);
if (pos < 0) // delimiter not found
{
d_indices_left[idx] = string_index_pair{d_str.data(), d_str.size_bytes()};
d_indices_delim[idx] = string_index_pair{"", 0}; // two empty
d_indices_right[idx] = string_index_pair{"", 0}; // strings added
}
}
};
//
// This follows most of the same logic as partition above except that the delimiter
// search starts from the end of each string. Also, if no delimiter is found the
// resulting array includes two empty strings followed by the original string.
//
// strs = ["abcde", nullptr, "a_bc_def", "a__bc", "_ab_cd", "ab_cd_"]
// results = rpartition(strs,"_")
// col0 col1 col2
// 0 "" "" abcde
// 1 null null null
// 2 a_bc _ déf
// 3 a_ _ bc
// 4 ab _ cd
// 5 ab_cd _ ""
//
struct rpartition_fn : public partition_fn {
rpartition_fn(column_device_view const& d_strings,
string_view const& d_delimiter,
rmm::device_uvector<string_index_pair>& indices_left,
rmm::device_uvector<string_index_pair>& indices_delim,
rmm::device_uvector<string_index_pair>& indices_right)
: partition_fn(d_strings, d_delimiter, indices_left, indices_delim, indices_right)
{
}
__device__ void operator()(size_type idx)
{
if (d_strings.is_null(idx)) {
set_null_entries(idx);
return;
}
string_view d_str = d_strings.element<string_view>(idx);
size_type pos = -1;
auto itr = d_str.end();
while ((pos < 0) && (d_str.begin() < itr)) {
--itr;
pos = check_delimiter(idx, d_str, itr);
}
if (pos < 0) // delimiter not found
{
d_indices_left[idx] = string_index_pair{"", 0}; // two empty
d_indices_delim[idx] = string_index_pair{"", 0}; // strings
d_indices_right[idx] = string_index_pair{d_str.data(), d_str.size_bytes()};
}
}
};
} // namespace
std::unique_ptr<table> partition(strings_column_view const& strings,
string_scalar const& delimiter,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_EXPECTS(delimiter.is_valid(stream), "Parameter delimiter must be valid");
auto strings_count = strings.size();
if (strings_count == 0) return std::make_unique<table>(std::vector<std::unique_ptr<column>>());
auto strings_column = column_device_view::create(strings.parent(), stream);
string_view d_delimiter(delimiter.data(), delimiter.size());
auto left_indices = rmm::device_uvector<string_index_pair>(strings_count, stream);
auto delim_indices = rmm::device_uvector<string_index_pair>(strings_count, stream);
auto right_indices = rmm::device_uvector<string_index_pair>(strings_count, stream);
partition_fn partitioner(
*strings_column, d_delimiter, left_indices, delim_indices, right_indices);
thrust::for_each_n(rmm::exec_policy(stream),
thrust::make_counting_iterator<size_type>(0),
strings_count,
partitioner);
std::vector<std::unique_ptr<column>> results;
results.emplace_back(make_strings_column(left_indices, stream, mr));
results.emplace_back(make_strings_column(delim_indices, stream, mr));
results.emplace_back(make_strings_column(right_indices, stream, mr));
return std::make_unique<table>(std::move(results));
}
std::unique_ptr<table> rpartition(strings_column_view const& strings,
string_scalar const& delimiter,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_EXPECTS(delimiter.is_valid(stream), "Parameter delimiter must be valid");
auto strings_count = strings.size();
if (strings_count == 0) return std::make_unique<table>(std::vector<std::unique_ptr<column>>());
auto strings_column = column_device_view::create(strings.parent(), stream);
string_view d_delimiter(delimiter.data(), delimiter.size());
auto left_indices = rmm::device_uvector<string_index_pair>(strings_count, stream);
auto delim_indices = rmm::device_uvector<string_index_pair>(strings_count, stream);
auto right_indices = rmm::device_uvector<string_index_pair>(strings_count, stream);
rpartition_fn partitioner(
*strings_column, d_delimiter, left_indices, delim_indices, right_indices);
thrust::for_each_n(rmm::exec_policy(stream),
thrust::make_counting_iterator<size_type>(0),
strings_count,
partitioner);
std::vector<std::unique_ptr<column>> results;
results.emplace_back(make_strings_column(left_indices, stream, mr));
results.emplace_back(make_strings_column(delim_indices, stream, mr));
results.emplace_back(make_strings_column(right_indices, stream, mr));
return std::make_unique<table>(std::move(results));
}
} // namespace detail
// external APIs
std::unique_ptr<table> partition(strings_column_view const& strings,
string_scalar const& delimiter,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::partition(strings, delimiter, cudf::get_default_stream(), mr);
}
std::unique_ptr<table> rpartition(strings_column_view const& strings,
string_scalar const& delimiter,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::rpartition(strings, delimiter, cudf::get_default_stream(), mr);
}
} // namespace strings
} // namespace cudf
|
893c340f3221df9ba9a30ad1aea5534c5c6d6ad1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* @file alg4.cu
* @brief CUDA device code for GPU-Efficient Recursive Filtering Algorithm 4
* @author Rodolfo Lima
* @date September, 2011
*/
//== INCLUDES =================================================================
#include <cmath>
#include <cstdio>
#include <cfloat>
#include <cassert>
#include <iostream>
#include <algorithm>
#include <util.h>
#include <symbol.h>
#include <gpufilter.h>
#include <gpuconsts.cuh>
#include <alg4.cuh>
//== NAMESPACES ===============================================================
namespace gpufilter {
//== IMPLEMENTATION ===========================================================
//-- Utilities ----------------------------------------------------------------
template <class T>
__device__ inline void swap(T& a, T& b) {
T c = a;
a = b;
b = c;
}
__device__ float2 operator + ( const float2 &a,
const float2 &b ) {
return make_float2(a.x+b.x, a.y+b.y);
}
__device__ float2& operator += ( float2& a,
const float2& b ) {
a.x += b.x;
a.y += b.y;
return a;
}
__device__ float2 operator * ( const float2& a,
float b ) {
return make_float2(a.x*b, a.y*b);
}
__device__ float2 operator * ( float a,
const float2& b ) {
return b*a;
}
__device__ float2 operator / ( const float2& a,
float b ) {
return make_float2(a.x/b, a.y/b);
}
__device__ float2 mul2x2( const float2& v,
Matrix<float,2,2> mat) {
return make_float2(v.x*mat[0][0] + v.y*mat[1][0],
v.x*mat[0][1] + v.y*mat[1][1]);
}
//-- Algorithm 4_2 Stage 1 ----------------------------------------------------
__global__ __launch_bounds__(WS*DW, DNB)
void alg4_stage1( float2 *g_transp_pybar,
float2 *g_transp_ezhat )
{
int tx = threadIdx.x, ty = threadIdx.y, m = blockIdx.x*2, n = blockIdx.y;
// Each cuda block will work on two horizontally adjacent WSxWS
// input data blocks, so allocate enough shared memory for these.
__shared__ float s_block[WS*2][WS+1];
float (*bdata)[WS+1] = (float (*)[WS+1]) &s_block[ty][tx],
(*bdata2)[WS+1] = (float (*)[WS+1])&s_block[ty+WS][tx];
// Load data into shared memory
float tu = ((m-c_border)*WS+tx+.5f)*c_inv_width,
tv = ((n-c_border)*WS+ty+.5f)*c_inv_height;
#pragma unroll
for (int i=0; i<WS-(WS%DW); i+=DW)
{
**bdata = tex2D(t_in, tu, tv);
bdata += DW;
**bdata2 = tex2D(t_in, tu+WS*c_inv_width, tv);
bdata2 += DW;
tv += DW*c_inv_height;
}
if (ty < WS%DW)
{
**bdata = tex2D(t_in, tu, tv);
**bdata2 = tex2D(t_in, tu+WS*c_inv_width, tv);
}
m += ty;
if (m >= c_m_size)
return;
// We use a transposed matrix for pybar and ezhat to have
// coalesced memory accesses. This is the index for these
// transposed buffers.
g_transp_pybar += m*c_carry_height + n*WS + tx;
g_transp_ezhat += m*c_carry_height + n*WS + tx;
__syncthreads();
float2 prev; // .x -> p0, .y -> p1
if (ty < 2)
{
float *bdata = s_block[tx+ty*WS];
// calculate pybar, scan left -> right
prev = make_float2(0,*bdata++);
#pragma unroll
for (int j=1; j<WS; ++j, ++bdata)
{
*bdata = prev.x = *bdata - prev.y*c_a1 - prev.x*c_a2;
swap(prev.x, prev.y);
}
if (m < c_m_size-1)
*g_transp_pybar = prev*c_b0;
if (m > 0)
{
// calculate ezhat, scan right -> left
prev = make_float2(*--bdata, 0);
--bdata;
#pragma unroll
for (int j=WS-2; j>=0; --j, --bdata)
{
*bdata = prev.y = *bdata - prev.x*c_a1 - prev.y*c_a2;
swap(prev.x, prev.y);
}
*g_transp_ezhat = prev*(c_b0*c_b0);
}
}
}
//-- Algorithm 4_2 Stage 2 and 3 or Stage 5 and 6 -----------------------------
__device__
void alg4_stage2_3v5_6( float2 *g_transp_pybar,
float2 *g_transp_ezhat )
{
int tx = threadIdx.x, ty = threadIdx.y, n = blockIdx.y;
__shared__ float2 s_transp_block[DW][WS];
float2 *bdata = &s_transp_block[ty][tx];
// P(ybar) -> P(y) processing --------------------------------------
float2 *transp_pybar = g_transp_pybar + ty*c_carry_height + n*WS+tx;
// first column-block
// read P(ybar)
*bdata = *transp_pybar;
float2 py; // P(Y), .x = p0, .y = p1
__syncthreads();
if (ty == 0)
{
float2 (*bdata)[WS] = (float2 (*)[WS]) &s_transp_block[0][tx];
// (24): P_m(y) = P_m(ybar) + A^b_F * P_{m-1}(y)
py = **bdata++;
#pragma unroll
for (int m=1; m<blockDim.y; ++m, ++bdata)
**bdata = py = **bdata + mul2x2(py,c_AbF2);
}
__syncthreads();
// write P(y)
if (ty > 0) // first one doesn't need fixing
*transp_pybar = *bdata;
transp_pybar += c_carry_height*blockDim.y;
// middle column-blocks
int m = blockDim.y;
if (m == DW)
{
int mmax = c_m_size-(c_m_size%DW)-1;
for (; m<mmax; m+=DW)
{
*bdata = *transp_pybar;
__syncthreads();
if (ty == 0)
{
float2 (*bdata)[WS] = (float2 (*)[WS]) &s_transp_block[0][tx];
#pragma unroll
for (int dm=0; dm<DW; ++dm, ++bdata)
**bdata = py = **bdata + mul2x2(py,c_AbF2);
}
__syncthreads();
*transp_pybar = *bdata;
transp_pybar += c_carry_height*DW;
}
}
// remaining column-blocks
if (m < c_m_size-1)
{
if (m+ty < c_m_size-1)
*bdata = *transp_pybar;
int remaining = c_m_size-1 - m;
__syncthreads();
if (ty == 0)
{
float2 (*bdata)[WS] = (float2 (*)[WS]) &s_transp_block[0][tx];
#pragma unroll
for (int dm=0; dm<remaining; ++dm, ++bdata)
**bdata = py = **bdata + mul2x2(py,c_AbF2);
}
__syncthreads();
if (m+ty < c_m_size-1)
*transp_pybar = *bdata;
}
// E(zhat) -> E(z) processing --------------------------------------
int idx = (c_m_size-1-ty)*c_carry_height + n*WS+tx;
const float2 *transp_pm1y = g_transp_pybar + idx - c_carry_height;
// last column-block
float2 *transp_ezhat = g_transp_ezhat + idx;
m = c_m_size-1;
// all pybars must be updated!
__syncthreads();
float2 ez;
if (m-ty > 0)
{
*bdata = *transp_ezhat;
*bdata += mul2x2(*transp_pm1y,c_AFP_HARB);
__syncthreads();
if (ty == 0)
{
float2 (*bdata)[WS] = (float2 (*)[WS]) &s_transp_block[0][tx];
ez = **bdata++;
for (int dm=1; dm<blockDim.y; ++dm, ++bdata)
**bdata = ez = **bdata + mul2x2(ez,c_AbR2);
}
__syncthreads();
*transp_ezhat = *bdata;
}
transp_ezhat -= c_carry_height*blockDim.y;
transp_pm1y -= c_carry_height*blockDim.y;
// middle column-blocks
m = c_m_size-1 - blockDim.y;
if (blockDim.y == DW)
{
int mmin = c_m_size%DW;
for (; m>=mmin; m-=DW)
{
if (m > 0)
{
*bdata = *transp_ezhat;
if (m-ty > 0)
*bdata += mul2x2(*transp_pm1y,c_AFP_HARB);
__syncthreads();
if (ty == 0)
{
float2 (*bdata)[WS] = (float2 (*)[WS]) &s_transp_block[0][tx];
#pragma unroll
for (int dm=0; dm<DW; ++dm, ++bdata)
**bdata = ez = **bdata + mul2x2(ez,c_AbR2);
}
__syncthreads();
*transp_ezhat = *bdata;
}
transp_ezhat -= DW*c_carry_height;
transp_pm1y -= DW*c_carry_height;
}
}
// remaining column-blocks
if (m > 0)
{
int remaining = m+1;
if (m-ty >= 0)
{
*bdata = *transp_ezhat;
if (m-ty > 0)
*bdata += mul2x2(*transp_pm1y,c_AFP_HARB);
}
__syncthreads();
if (ty == 0)
{
float2 (*bdata)[WS] = (float2 (*)[WS]) &s_transp_block[0][tx];
// (24): P_m(y) = P_m(ybar) + A^b_F * P_{m-1}(y)
#pragma unroll
for (int dm=0; dm<remaining; ++dm, ++bdata)
**bdata = ez = **bdata + mul2x2(ez,c_AbR2);
}
__syncthreads();
if (m-ty > 0)
*transp_ezhat = *bdata;
}
}
//-- Algorithm 4_2 Stage 4 or Stage 7 -----------------------------------------
template <bool p_fusion>
__device__
void alg4_stage4v7( float *g_transp_out,
float2 *g_transp_py,
float2 *g_transp_ez,
float2 *g_pubar,
float2 *g_evhat,
int out_stride )
{
int tx = threadIdx.x, ty = threadIdx.y, m = blockIdx.x*2, n = blockIdx.y;
// Each cuda block will work on two horizontally adjacent WSxWS
// input data blocks, so allocate enough shared memory for these.
__shared__ float s_block[WS*2][WS+1];
float (*bdata)[WS+1] = (float (*)[WS+1]) &s_block[ty][tx],
(*bdata2)[WS+1] = (float (*)[WS+1])&s_block[ty+WS][tx];
// Load data into shared memory
float tu = ((m-c_border)*WS+tx + 0.5f)*c_inv_width,
tv = ((n-c_border)*WS+ty + 0.5f)*c_inv_height;
#pragma unroll
for (int i=0; i<WS-(WS%DW); i+=DW)
{
**bdata = tex2D(t_in, tu, tv);
bdata += DW;
**bdata2 = tex2D(t_in, tu+WS*c_inv_width, tv);
bdata2 += DW;
tv += DW*c_inv_height;
}
if (ty < WS%DW)
{
**bdata = tex2D(t_in, tu, tv);
**bdata2 = tex2D(t_in, tu+WS*c_inv_width, tv);
}
m += ty;
if (m >= c_m_size)
return;
// We use a transposed matrix for py and ez to have coalesced
// memory accesses. This is the index for these transposed
// buffers.
g_transp_py += (m-1)*c_carry_height + n*WS + tx;
g_transp_ez += (m+1)*c_carry_height + n*WS + tx;
__syncthreads();
if (ty < 2)
{
float2 prev; // .x -> p0, .y -> p1
float *bdata = s_block[tx+ty*WS];
// calculate pybar, scan left -> right
if (m > 0)
prev = *g_transp_py * c_inv_b0;
else
prev = make_float2(0,0);
#pragma unroll
for (int j=0; j<WS; ++j, ++bdata)
{
*bdata = prev.x = *bdata - prev.y*c_a1 - prev.x*c_a2;
swap(prev.x, prev.y);
}
--bdata;
// calculate ez, scan right -> left
if (m < c_m_size-1)
prev = *g_transp_ez;
else
prev = make_float2(0,0);
float b0_2 = c_b0*c_b0;
// For some reason it's faster when this is here then inside
// the next if block
int x = (m-c_border+1)*WS-1;
int y = (n-c_border)*WS+tx;
// current block intersects transp_out's area?
if (m >= c_border && m <= c_last_m && n >= c_border && n <= c_last_n)
{
// image's end is in the middle of the block and we're outside
// the image width?
if (x >= c_width)
{
// process data until we get into the image
int j;
#pragma unroll
for (j=x; j>=c_width; --j, --bdata)
{
prev.y = *bdata*b0_2 - prev.x*c_a1 - prev.y*c_a2;
if (p_fusion)
*bdata = prev.y;
swap(prev.x, prev.y);
}
// now we're inside the image, we must write to transp_out
float *out = g_transp_out + (c_width-1)*out_stride + y;
int mmin = x-(WS-1);
#pragma unroll
for (;j>=mmin; --j, --bdata, out -= out_stride)
{
prev.y = *bdata*b0_2 - prev.x*c_a1 - prev.y*c_a2;
if (p_fusion)
*bdata = prev.y;
if (y < c_height)
*out = prev.y;
swap(prev.x, prev.y);
}
}
else
{
float *out = g_transp_out + x*out_stride + y;
#pragma unroll
for (int j=WS-1; j>=0; --j, --bdata, out -= out_stride)
{
prev.y = *bdata*b0_2 - prev.x*c_a1 - prev.y*c_a2;
if (p_fusion)
*bdata = prev.y;
if (y < c_height)
*out = prev.y;
swap(prev.x, prev.y);
}
}
}
else
{
#pragma unroll
for (int j=WS-1; j>=0; --j, --bdata)
{
prev.y = *bdata*b0_2 - prev.x*c_a1 - prev.y*c_a2;
if (p_fusion)
*bdata = prev.y;
swap(prev.x, prev.y);
}
}
if (p_fusion)
{
g_pubar += n*c_carry_width + m*WS + tx;
g_evhat += n*c_carry_width + m*WS + tx;
float (*bdata)[WS+1] = (float (*)[WS+1]) &s_block[ty*WS][tx];
// calculate pubar, scan left -> right
float2 prev = make_float2(0,**bdata++);
#pragma unroll
for (int i=1; i<WS; ++i, ++bdata)
{
**bdata = prev.x = **bdata - prev.y*c_a1 - prev.x*c_a2;
swap(prev.x, prev.y);
}
if (n < c_n_size-1)
*g_pubar = prev*c_b0;
if (n > 0)
{
// calculate evhat, scan right -> left
prev = make_float2(**--bdata, 0);
--bdata;
#pragma unroll
for (int i=WS-2; i>=0; --i, --bdata)
{
prev.y = **bdata - prev.x*c_a1 - prev.y*c_a2;
swap(prev.x, prev.y);
}
*g_evhat = prev*b0_2;
}
}
}
}
//-- Algorithm 4_2 Stage 2 and 3 ----------------------------------------------
__global__ __launch_bounds__(WS*DW, DNB)
void alg4_stage2_3( float2 *g_transp_pybar,
float2 *g_transp_ezhat ) {
alg4_stage2_3v5_6( g_transp_pybar, g_transp_ezhat );
}
//-- Algorithm 4_2 Stage 4 ----------------------------------------------------
__global__ __launch_bounds__(WS*DW, DNB)
void alg4_stage4( float *g_transp_out,
float2 *g_transp_py,
float2 *g_transp_ez,
float2 *g_pubar,
float2 *g_evhat,
int out_stride ) {
alg4_stage4v7<true>( g_transp_out, g_transp_py, g_transp_ez, g_pubar,
g_evhat, out_stride );
}
//-- Algorithm 4_2 Stage 5 and 6 ----------------------------------------------
__global__ __launch_bounds__(WS*DW, DNB)
void alg4_stage5_6( float2 *g_transp_pybar,
float2 *g_transp_ezhat ) {
alg4_stage2_3v5_6( g_transp_pybar, g_transp_ezhat );
}
//-- Algorithm 4_2 Stage 7 ----------------------------------------------------
__global__ __launch_bounds__(WS*DW, DNB)
void alg4_stage7( float *g_out,
float2 *g_transp_py,
float2 *g_transp_ez,
int out_stride ) {
alg4_stage4v7<false>( g_out, g_transp_py, g_transp_ez, 0, 0,
out_stride );
}
//-- Host ---------------------------------------------------------------------
__host__
inline int transp_out_height( const int& h ) {
// hipBindTexture2D chokes when memory block stride isn't
// multiple of 256 bytes, let's add some padding.
return ((h+WS-1)/WS)*WS;
}
__host__
void prepare_alg4( alg_setup& algs,
alg_setup& algs_transp,
dvector<float>& d_out,
dvector<float>& d_transp_out,
dvector<float2>& d_transp_pybar,
dvector<float2>& d_transp_ezhat,
dvector<float2>& d_pubar,
dvector<float2>& d_evhat,
hipArray *& a_in,
const float *h_in,
const int& w,
const int& h,
const float& b0,
const float& a1,
const float& a2,
const int& extb,
const initcond& ic )
{
up_constants_coefficients2( b0, a1, a2 );
calc_alg_setup( algs, w, h, extb );
calc_alg_setup( algs_transp, h, w, extb );
d_out.resize( w * h );
d_transp_out.resize( transp_out_height(h) * w );
d_transp_pybar.resize( algs.m_size * algs.carry_height );
d_transp_ezhat.resize( algs.m_size * algs.carry_height );
d_pubar.resize( algs.n_size * algs.carry_width );
d_evhat.resize( algs.n_size * algs.carry_width );
d_transp_pybar.fill_zero();
d_transp_ezhat.fill_zero();
d_pubar.fill_zero();
d_evhat.fill_zero();
up_texture( a_in, h_in, w, h, ic );
}
__host__
void alg4( dvector<float>& d_out,
dvector<float>& d_transp_out,
dvector<float2>& d_transp_pybar,
dvector<float2>& d_transp_ezhat,
dvector<float2>& d_pubar,
dvector<float2>& d_evhat,
const hipArray *a_in,
const alg_setup& algs,
const alg_setup& algs_transp )
{
dvector<float2> d_transp_py, d_transp_ez, d_pu, d_ev;
hipBindTextureToArray( t_in, a_in );
up_alg_setup( algs );
hipLaunchKernelGGL(( alg4_stage1),
dim3(dim3((algs.m_size+2-1)/2, algs.n_size)), dim3(dim3(WS, DW)) , 0, 0,
d_transp_pybar, d_transp_ezhat );
hipLaunchKernelGGL(( alg4_stage2_3),
dim3(dim3(1, algs.n_size)), dim3(dim3(WS, std::min<int>(algs.m_size, DW))) , 0, 0,
d_transp_pybar, d_transp_ezhat );
swap( d_transp_pybar, d_transp_py );
swap( d_transp_ezhat, d_transp_ez );
hipLaunchKernelGGL(( alg4_stage4),
dim3(dim3((algs.m_size+2-1)/2, algs.n_size)), dim3(dim3(WS, DW)) , 0, 0,
d_transp_out, d_transp_py, d_transp_ez, d_pubar, d_evhat,
transp_out_height(algs.height) );
up_alg_setup( algs_transp );
hipLaunchKernelGGL(( alg4_stage5_6),
dim3(dim3(1, algs.m_size)), dim3(dim3(WS, std::min<int>(algs.n_size, DW))) , 0, 0,
d_pubar, d_evhat );
swap( d_pubar, d_pu );
swap( d_evhat, d_ev );
hipUnbindTexture( t_in );
size_t offset;
hipBindTexture2D( &offset, t_in, d_transp_out, algs.height, algs.width,
transp_out_height(algs.height)*sizeof(float) );
hipLaunchKernelGGL(( alg4_stage7),
dim3(dim3((algs.n_size+2-1)/2, algs.m_size)), dim3(dim3(WS, DW)) , 0, 0,
d_out, d_pu, d_ev, algs.width );
swap( d_ev, d_evhat );
swap( d_pu, d_pubar );
swap( d_transp_ez, d_transp_ezhat );
swap( d_transp_py, d_transp_pybar );
hipUnbindTexture( t_in );
}
__host__
void alg4( float *h_inout,
const int& w,
const int& h,
const float& b0,
const float& a1,
const float& a2,
const int& extb,
const initcond& ic )
{
alg_setup algs, algs_transp;
dvector<float> d_out, d_transp_out;
dvector<float2> d_transp_pybar, d_transp_ezhat, d_pubar, d_evhat;
hipArray *a_in;
prepare_alg4( algs, algs_transp, d_out, d_transp_out, d_transp_pybar,
d_transp_ezhat, d_pubar, d_evhat, a_in, h_inout, w, h,
b0, a1, a2, extb, ic );
alg4( d_out, d_transp_out, d_transp_pybar, d_transp_ezhat, d_pubar,
d_evhat, a_in, algs, algs_transp );
d_out.copy_to( h_inout, w * h );
hipFreeArray( a_in );
}
//=============================================================================
} // namespace gpufilter
//=============================================================================
// vi: ai ts=4 sw=4
| 893c340f3221df9ba9a30ad1aea5534c5c6d6ad1.cu | /**
* @file alg4.cu
* @brief CUDA device code for GPU-Efficient Recursive Filtering Algorithm 4
* @author Rodolfo Lima
* @date September, 2011
*/
//== INCLUDES =================================================================
#include <cmath>
#include <cstdio>
#include <cfloat>
#include <cassert>
#include <iostream>
#include <algorithm>
#include <util.h>
#include <symbol.h>
#include <gpufilter.h>
#include <gpuconsts.cuh>
#include <alg4.cuh>
//== NAMESPACES ===============================================================
namespace gpufilter {
//== IMPLEMENTATION ===========================================================
//-- Utilities ----------------------------------------------------------------
template <class T>
__device__ inline void swap(T& a, T& b) {
T c = a;
a = b;
b = c;
}
__device__ float2 operator + ( const float2 &a,
const float2 &b ) {
return make_float2(a.x+b.x, a.y+b.y);
}
__device__ float2& operator += ( float2& a,
const float2& b ) {
a.x += b.x;
a.y += b.y;
return a;
}
__device__ float2 operator * ( const float2& a,
float b ) {
return make_float2(a.x*b, a.y*b);
}
__device__ float2 operator * ( float a,
const float2& b ) {
return b*a;
}
__device__ float2 operator / ( const float2& a,
float b ) {
return make_float2(a.x/b, a.y/b);
}
__device__ float2 mul2x2( const float2& v,
Matrix<float,2,2> mat) {
return make_float2(v.x*mat[0][0] + v.y*mat[1][0],
v.x*mat[0][1] + v.y*mat[1][1]);
}
//-- Algorithm 4_2 Stage 1 ----------------------------------------------------
__global__ __launch_bounds__(WS*DW, DNB)
void alg4_stage1( float2 *g_transp_pybar,
float2 *g_transp_ezhat )
{
int tx = threadIdx.x, ty = threadIdx.y, m = blockIdx.x*2, n = blockIdx.y;
// Each cuda block will work on two horizontally adjacent WSxWS
// input data blocks, so allocate enough shared memory for these.
__shared__ float s_block[WS*2][WS+1];
float (*bdata)[WS+1] = (float (*)[WS+1]) &s_block[ty][tx],
(*bdata2)[WS+1] = (float (*)[WS+1])&s_block[ty+WS][tx];
// Load data into shared memory
float tu = ((m-c_border)*WS+tx+.5f)*c_inv_width,
tv = ((n-c_border)*WS+ty+.5f)*c_inv_height;
#pragma unroll
for (int i=0; i<WS-(WS%DW); i+=DW)
{
**bdata = tex2D(t_in, tu, tv);
bdata += DW;
**bdata2 = tex2D(t_in, tu+WS*c_inv_width, tv);
bdata2 += DW;
tv += DW*c_inv_height;
}
if (ty < WS%DW)
{
**bdata = tex2D(t_in, tu, tv);
**bdata2 = tex2D(t_in, tu+WS*c_inv_width, tv);
}
m += ty;
if (m >= c_m_size)
return;
// We use a transposed matrix for pybar and ezhat to have
// coalesced memory accesses. This is the index for these
// transposed buffers.
g_transp_pybar += m*c_carry_height + n*WS + tx;
g_transp_ezhat += m*c_carry_height + n*WS + tx;
__syncthreads();
float2 prev; // .x -> p0, .y -> p1
if (ty < 2)
{
float *bdata = s_block[tx+ty*WS];
// calculate pybar, scan left -> right
prev = make_float2(0,*bdata++);
#pragma unroll
for (int j=1; j<WS; ++j, ++bdata)
{
*bdata = prev.x = *bdata - prev.y*c_a1 - prev.x*c_a2;
swap(prev.x, prev.y);
}
if (m < c_m_size-1)
*g_transp_pybar = prev*c_b0;
if (m > 0)
{
// calculate ezhat, scan right -> left
prev = make_float2(*--bdata, 0);
--bdata;
#pragma unroll
for (int j=WS-2; j>=0; --j, --bdata)
{
*bdata = prev.y = *bdata - prev.x*c_a1 - prev.y*c_a2;
swap(prev.x, prev.y);
}
*g_transp_ezhat = prev*(c_b0*c_b0);
}
}
}
//-- Algorithm 4_2 Stage 2 and 3 or Stage 5 and 6 -----------------------------
__device__
void alg4_stage2_3v5_6( float2 *g_transp_pybar,
float2 *g_transp_ezhat )
{
int tx = threadIdx.x, ty = threadIdx.y, n = blockIdx.y;
__shared__ float2 s_transp_block[DW][WS];
float2 *bdata = &s_transp_block[ty][tx];
// P(ybar) -> P(y) processing --------------------------------------
float2 *transp_pybar = g_transp_pybar + ty*c_carry_height + n*WS+tx;
// first column-block
// read P(ybar)
*bdata = *transp_pybar;
float2 py; // P(Y), .x = p0, .y = p1
__syncthreads();
if (ty == 0)
{
float2 (*bdata)[WS] = (float2 (*)[WS]) &s_transp_block[0][tx];
// (24): P_m(y) = P_m(ybar) + A^b_F * P_{m-1}(y)
py = **bdata++;
#pragma unroll
for (int m=1; m<blockDim.y; ++m, ++bdata)
**bdata = py = **bdata + mul2x2(py,c_AbF2);
}
__syncthreads();
// write P(y)
if (ty > 0) // first one doesn't need fixing
*transp_pybar = *bdata;
transp_pybar += c_carry_height*blockDim.y;
// middle column-blocks
int m = blockDim.y;
if (m == DW)
{
int mmax = c_m_size-(c_m_size%DW)-1;
for (; m<mmax; m+=DW)
{
*bdata = *transp_pybar;
__syncthreads();
if (ty == 0)
{
float2 (*bdata)[WS] = (float2 (*)[WS]) &s_transp_block[0][tx];
#pragma unroll
for (int dm=0; dm<DW; ++dm, ++bdata)
**bdata = py = **bdata + mul2x2(py,c_AbF2);
}
__syncthreads();
*transp_pybar = *bdata;
transp_pybar += c_carry_height*DW;
}
}
// remaining column-blocks
if (m < c_m_size-1)
{
if (m+ty < c_m_size-1)
*bdata = *transp_pybar;
int remaining = c_m_size-1 - m;
__syncthreads();
if (ty == 0)
{
float2 (*bdata)[WS] = (float2 (*)[WS]) &s_transp_block[0][tx];
#pragma unroll
for (int dm=0; dm<remaining; ++dm, ++bdata)
**bdata = py = **bdata + mul2x2(py,c_AbF2);
}
__syncthreads();
if (m+ty < c_m_size-1)
*transp_pybar = *bdata;
}
// E(zhat) -> E(z) processing --------------------------------------
int idx = (c_m_size-1-ty)*c_carry_height + n*WS+tx;
const float2 *transp_pm1y = g_transp_pybar + idx - c_carry_height;
// last column-block
float2 *transp_ezhat = g_transp_ezhat + idx;
m = c_m_size-1;
// all pybars must be updated!
__syncthreads();
float2 ez;
if (m-ty > 0)
{
*bdata = *transp_ezhat;
*bdata += mul2x2(*transp_pm1y,c_AFP_HARB);
__syncthreads();
if (ty == 0)
{
float2 (*bdata)[WS] = (float2 (*)[WS]) &s_transp_block[0][tx];
ez = **bdata++;
for (int dm=1; dm<blockDim.y; ++dm, ++bdata)
**bdata = ez = **bdata + mul2x2(ez,c_AbR2);
}
__syncthreads();
*transp_ezhat = *bdata;
}
transp_ezhat -= c_carry_height*blockDim.y;
transp_pm1y -= c_carry_height*blockDim.y;
// middle column-blocks
m = c_m_size-1 - blockDim.y;
if (blockDim.y == DW)
{
int mmin = c_m_size%DW;
for (; m>=mmin; m-=DW)
{
if (m > 0)
{
*bdata = *transp_ezhat;
if (m-ty > 0)
*bdata += mul2x2(*transp_pm1y,c_AFP_HARB);
__syncthreads();
if (ty == 0)
{
float2 (*bdata)[WS] = (float2 (*)[WS]) &s_transp_block[0][tx];
#pragma unroll
for (int dm=0; dm<DW; ++dm, ++bdata)
**bdata = ez = **bdata + mul2x2(ez,c_AbR2);
}
__syncthreads();
*transp_ezhat = *bdata;
}
transp_ezhat -= DW*c_carry_height;
transp_pm1y -= DW*c_carry_height;
}
}
// remaining column-blocks
if (m > 0)
{
int remaining = m+1;
if (m-ty >= 0)
{
*bdata = *transp_ezhat;
if (m-ty > 0)
*bdata += mul2x2(*transp_pm1y,c_AFP_HARB);
}
__syncthreads();
if (ty == 0)
{
float2 (*bdata)[WS] = (float2 (*)[WS]) &s_transp_block[0][tx];
// (24): P_m(y) = P_m(ybar) + A^b_F * P_{m-1}(y)
#pragma unroll
for (int dm=0; dm<remaining; ++dm, ++bdata)
**bdata = ez = **bdata + mul2x2(ez,c_AbR2);
}
__syncthreads();
if (m-ty > 0)
*transp_ezhat = *bdata;
}
}
//-- Algorithm 4_2 Stage 4 or Stage 7 -----------------------------------------
template <bool p_fusion>
__device__
void alg4_stage4v7( float *g_transp_out,
float2 *g_transp_py,
float2 *g_transp_ez,
float2 *g_pubar,
float2 *g_evhat,
int out_stride )
{
int tx = threadIdx.x, ty = threadIdx.y, m = blockIdx.x*2, n = blockIdx.y;
// Each cuda block will work on two horizontally adjacent WSxWS
// input data blocks, so allocate enough shared memory for these.
__shared__ float s_block[WS*2][WS+1];
float (*bdata)[WS+1] = (float (*)[WS+1]) &s_block[ty][tx],
(*bdata2)[WS+1] = (float (*)[WS+1])&s_block[ty+WS][tx];
// Load data into shared memory
float tu = ((m-c_border)*WS+tx + 0.5f)*c_inv_width,
tv = ((n-c_border)*WS+ty + 0.5f)*c_inv_height;
#pragma unroll
for (int i=0; i<WS-(WS%DW); i+=DW)
{
**bdata = tex2D(t_in, tu, tv);
bdata += DW;
**bdata2 = tex2D(t_in, tu+WS*c_inv_width, tv);
bdata2 += DW;
tv += DW*c_inv_height;
}
if (ty < WS%DW)
{
**bdata = tex2D(t_in, tu, tv);
**bdata2 = tex2D(t_in, tu+WS*c_inv_width, tv);
}
m += ty;
if (m >= c_m_size)
return;
// We use a transposed matrix for py and ez to have coalesced
// memory accesses. This is the index for these transposed
// buffers.
g_transp_py += (m-1)*c_carry_height + n*WS + tx;
g_transp_ez += (m+1)*c_carry_height + n*WS + tx;
__syncthreads();
if (ty < 2)
{
float2 prev; // .x -> p0, .y -> p1
float *bdata = s_block[tx+ty*WS];
// calculate pybar, scan left -> right
if (m > 0)
prev = *g_transp_py * c_inv_b0;
else
prev = make_float2(0,0);
#pragma unroll
for (int j=0; j<WS; ++j, ++bdata)
{
*bdata = prev.x = *bdata - prev.y*c_a1 - prev.x*c_a2;
swap(prev.x, prev.y);
}
--bdata;
// calculate ez, scan right -> left
if (m < c_m_size-1)
prev = *g_transp_ez;
else
prev = make_float2(0,0);
float b0_2 = c_b0*c_b0;
// For some reason it's faster when this is here then inside
// the next if block
int x = (m-c_border+1)*WS-1;
int y = (n-c_border)*WS+tx;
// current block intersects transp_out's area?
if (m >= c_border && m <= c_last_m && n >= c_border && n <= c_last_n)
{
// image's end is in the middle of the block and we're outside
// the image width?
if (x >= c_width)
{
// process data until we get into the image
int j;
#pragma unroll
for (j=x; j>=c_width; --j, --bdata)
{
prev.y = *bdata*b0_2 - prev.x*c_a1 - prev.y*c_a2;
if (p_fusion)
*bdata = prev.y;
swap(prev.x, prev.y);
}
// now we're inside the image, we must write to transp_out
float *out = g_transp_out + (c_width-1)*out_stride + y;
int mmin = x-(WS-1);
#pragma unroll
for (;j>=mmin; --j, --bdata, out -= out_stride)
{
prev.y = *bdata*b0_2 - prev.x*c_a1 - prev.y*c_a2;
if (p_fusion)
*bdata = prev.y;
if (y < c_height)
*out = prev.y;
swap(prev.x, prev.y);
}
}
else
{
float *out = g_transp_out + x*out_stride + y;
#pragma unroll
for (int j=WS-1; j>=0; --j, --bdata, out -= out_stride)
{
prev.y = *bdata*b0_2 - prev.x*c_a1 - prev.y*c_a2;
if (p_fusion)
*bdata = prev.y;
if (y < c_height)
*out = prev.y;
swap(prev.x, prev.y);
}
}
}
else
{
#pragma unroll
for (int j=WS-1; j>=0; --j, --bdata)
{
prev.y = *bdata*b0_2 - prev.x*c_a1 - prev.y*c_a2;
if (p_fusion)
*bdata = prev.y;
swap(prev.x, prev.y);
}
}
if (p_fusion)
{
g_pubar += n*c_carry_width + m*WS + tx;
g_evhat += n*c_carry_width + m*WS + tx;
float (*bdata)[WS+1] = (float (*)[WS+1]) &s_block[ty*WS][tx];
// calculate pubar, scan left -> right
float2 prev = make_float2(0,**bdata++);
#pragma unroll
for (int i=1; i<WS; ++i, ++bdata)
{
**bdata = prev.x = **bdata - prev.y*c_a1 - prev.x*c_a2;
swap(prev.x, prev.y);
}
if (n < c_n_size-1)
*g_pubar = prev*c_b0;
if (n > 0)
{
// calculate evhat, scan right -> left
prev = make_float2(**--bdata, 0);
--bdata;
#pragma unroll
for (int i=WS-2; i>=0; --i, --bdata)
{
prev.y = **bdata - prev.x*c_a1 - prev.y*c_a2;
swap(prev.x, prev.y);
}
*g_evhat = prev*b0_2;
}
}
}
}
//-- Algorithm 4_2 Stage 2 and 3 ----------------------------------------------
__global__ __launch_bounds__(WS*DW, DNB)
void alg4_stage2_3( float2 *g_transp_pybar,
float2 *g_transp_ezhat ) {
alg4_stage2_3v5_6( g_transp_pybar, g_transp_ezhat );
}
//-- Algorithm 4_2 Stage 4 ----------------------------------------------------
__global__ __launch_bounds__(WS*DW, DNB)
void alg4_stage4( float *g_transp_out,
float2 *g_transp_py,
float2 *g_transp_ez,
float2 *g_pubar,
float2 *g_evhat,
int out_stride ) {
alg4_stage4v7<true>( g_transp_out, g_transp_py, g_transp_ez, g_pubar,
g_evhat, out_stride );
}
//-- Algorithm 4_2 Stage 5 and 6 ----------------------------------------------
__global__ __launch_bounds__(WS*DW, DNB)
void alg4_stage5_6( float2 *g_transp_pybar,
float2 *g_transp_ezhat ) {
alg4_stage2_3v5_6( g_transp_pybar, g_transp_ezhat );
}
//-- Algorithm 4_2 Stage 7 ----------------------------------------------------
__global__ __launch_bounds__(WS*DW, DNB)
void alg4_stage7( float *g_out,
float2 *g_transp_py,
float2 *g_transp_ez,
int out_stride ) {
alg4_stage4v7<false>( g_out, g_transp_py, g_transp_ez, 0, 0,
out_stride );
}
//-- Host ---------------------------------------------------------------------
__host__
inline int transp_out_height( const int& h ) {
// cudaBindTexture2D chokes when memory block stride isn't
// multiple of 256 bytes, let's add some padding.
return ((h+WS-1)/WS)*WS;
}
__host__
void prepare_alg4( alg_setup& algs,
alg_setup& algs_transp,
dvector<float>& d_out,
dvector<float>& d_transp_out,
dvector<float2>& d_transp_pybar,
dvector<float2>& d_transp_ezhat,
dvector<float2>& d_pubar,
dvector<float2>& d_evhat,
cudaArray *& a_in,
const float *h_in,
const int& w,
const int& h,
const float& b0,
const float& a1,
const float& a2,
const int& extb,
const initcond& ic )
{
up_constants_coefficients2( b0, a1, a2 );
calc_alg_setup( algs, w, h, extb );
calc_alg_setup( algs_transp, h, w, extb );
d_out.resize( w * h );
d_transp_out.resize( transp_out_height(h) * w );
d_transp_pybar.resize( algs.m_size * algs.carry_height );
d_transp_ezhat.resize( algs.m_size * algs.carry_height );
d_pubar.resize( algs.n_size * algs.carry_width );
d_evhat.resize( algs.n_size * algs.carry_width );
d_transp_pybar.fill_zero();
d_transp_ezhat.fill_zero();
d_pubar.fill_zero();
d_evhat.fill_zero();
up_texture( a_in, h_in, w, h, ic );
}
__host__
void alg4( dvector<float>& d_out,
dvector<float>& d_transp_out,
dvector<float2>& d_transp_pybar,
dvector<float2>& d_transp_ezhat,
dvector<float2>& d_pubar,
dvector<float2>& d_evhat,
const cudaArray *a_in,
const alg_setup& algs,
const alg_setup& algs_transp )
{
dvector<float2> d_transp_py, d_transp_ez, d_pu, d_ev;
cudaBindTextureToArray( t_in, a_in );
up_alg_setup( algs );
alg4_stage1<<<
dim3((algs.m_size+2-1)/2, algs.n_size), dim3(WS, DW) >>>(
d_transp_pybar, d_transp_ezhat );
alg4_stage2_3<<<
dim3(1, algs.n_size), dim3(WS, std::min<int>(algs.m_size, DW)) >>>(
d_transp_pybar, d_transp_ezhat );
swap( d_transp_pybar, d_transp_py );
swap( d_transp_ezhat, d_transp_ez );
alg4_stage4<<<
dim3((algs.m_size+2-1)/2, algs.n_size), dim3(WS, DW) >>>(
d_transp_out, d_transp_py, d_transp_ez, d_pubar, d_evhat,
transp_out_height(algs.height) );
up_alg_setup( algs_transp );
alg4_stage5_6<<<
dim3(1, algs.m_size), dim3(WS, std::min<int>(algs.n_size, DW)) >>>(
d_pubar, d_evhat );
swap( d_pubar, d_pu );
swap( d_evhat, d_ev );
cudaUnbindTexture( t_in );
size_t offset;
cudaBindTexture2D( &offset, t_in, d_transp_out, algs.height, algs.width,
transp_out_height(algs.height)*sizeof(float) );
alg4_stage7<<<
dim3((algs.n_size+2-1)/2, algs.m_size), dim3(WS, DW) >>>(
d_out, d_pu, d_ev, algs.width );
swap( d_ev, d_evhat );
swap( d_pu, d_pubar );
swap( d_transp_ez, d_transp_ezhat );
swap( d_transp_py, d_transp_pybar );
cudaUnbindTexture( t_in );
}
__host__
void alg4( float *h_inout,
const int& w,
const int& h,
const float& b0,
const float& a1,
const float& a2,
const int& extb,
const initcond& ic )
{
alg_setup algs, algs_transp;
dvector<float> d_out, d_transp_out;
dvector<float2> d_transp_pybar, d_transp_ezhat, d_pubar, d_evhat;
cudaArray *a_in;
prepare_alg4( algs, algs_transp, d_out, d_transp_out, d_transp_pybar,
d_transp_ezhat, d_pubar, d_evhat, a_in, h_inout, w, h,
b0, a1, a2, extb, ic );
alg4( d_out, d_transp_out, d_transp_pybar, d_transp_ezhat, d_pubar,
d_evhat, a_in, algs, algs_transp );
d_out.copy_to( h_inout, w * h );
cudaFreeArray( a_in );
}
//=============================================================================
} // namespace gpufilter
//=============================================================================
// vi: ai ts=4 sw=4
|
fcf9ac8a9ec93ae92fa06c44fee8adebbef06834.hip | // !!! This is a file automatically generated by hipify!!!
//THIS IS THE MATRIX MATRIX MULTIPLICATION IN MULTIPLE GPUS
#include <stdio.h>
#include <hip/hip_runtime.h>
#include "mpi.h"
//------------------------------------------------------------------------------------------------------------------------------------------
#define BLOCKSIZE 16
//--------------------------------------------------------------------------------------------------------------------------------------------
int IntializingMatrixVectors(float **, float **, float **, int , int , int );
int CheckDevice(int );
//--------------------------------------------------------------------------------------------------------------------------------------------
//Pragma routine to report the detail of cuda error
#define CUDA_SAFE_CALL(call) \
do{ \
hipError_t err = call; \
if(err != hipSuccess) \
{ \
fprintf(stderr, "Cuda error in file '%s' in line %i : %s.\n",\
__FILE__, __LINE__, hipGetErrorString( err) ); \
exit(1); \
} \
} while (0) \
//----------------------------------------------------------------------------------------------------------------------------------------
//Kernel that performs Matrix Vector Multiplication
__global__ void MatrixVectorMultiplication(float *Matrix,float *Vector,float *Solution,int VectorLength, int ScatterSize, int ThreadDim)
{
int tidx = threadIdx.x;
int tidy = threadIdx.y;
int ThreadIndex = (ThreadDim * tidx) + tidy;
int MaxNumThread = ThreadDim * ThreadDim;
//int VectLen = MatrixSize;
int count,ThreadColumnIndex,pass = 0 ;
float TempResult = 0.0f;
while( (ThreadColumnIndex = (ThreadIndex + MaxNumThread * pass)) < ScatterSize )
{
TempResult = 0.0f;
for( count = 0; count < VectorLength; count++)
TempResult += Matrix[ThreadColumnIndex*VectorLength+count] * Vector[count];
Solution[ThreadColumnIndex] = TempResult;
pass++;
}
__syncthreads();
}//End of Matrix Vector Multiplication Device Function
//---------------------------------------------------------------------------------------------------------------------------------------
int main(int argc, char **argv)
{
int MyRank, NumberOfProcessors;
int Root = 0, Index, Status = 1;
float *MatrixA, *VectorB, *ResultVector;
float *MyMatrixA, *MyResultVector;
float *DeviceMyMatrixA, *DeviceMyResultVector, *DeviceVectorB;
int RowsNo, ColsNo, VectorSize, ScatterSize, IndexCol, IndexValue, DeviceStatus;
//MPI Intialization
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &MyRank);
MPI_Comm_size(MPI_COMM_WORLD, &NumberOfProcessors);
//Checking if valid number of arguements have been passed
if(argc != 4)
{
if(MyRank == Root)
printf("Usage:< mpirun >< -n >< Number of processors >< ./Program Name >< Number of Rows of Matri x>< Number of Columns of Matrix >< VectorSize > \n");
MPI_Finalize();
exit(-1);
}
//Assigning values to RowsNo, ColsNo, VectorSize from the arguements passed
RowsNo = atoi( argv[1] );
ColsNo = atoi( argv[2] );
VectorSize = atoi( argv[3] );
//Checking if columns is equal to vector size
if(ColsNo != VectorSize)
{
if(MyRank == Root)
printf("Entered wrong input, Number of columns of matrix should be equal to size of the vector \n");
MPI_Finalize();
exit(-1);
}
if(RowsNo < NumberOfProcessors)
{
if(MyRank == Root)
printf("Given number of Rows of the matrix should be more than number of processors \n");
MPI_Finalize();
exit(-1);
}
//Checking if Matrix can be distributed evenly to all the nodes
if(RowsNo % NumberOfProcessors != 0)
{
if(MyRank == Root)
printf("The Rows of the matrix can not be distributed evenly among processors \n");
MPI_Finalize();
exit(-1);
}
//Root node intializes the Matrix, Vector and Result Vector
if(MyRank == Root)
Status = IntializingMatrixVectors(&MatrixA, &VectorB, &ResultVector, RowsNo, ColsNo, VectorSize);
MPI_Barrier(MPI_COMM_WORLD);
MPI_Bcast(&Status, 1, MPI_INT, Root, MPI_COMM_WORLD);
//Checking if Status returned by the IntializingMatrixVectors
if(Status == 0)
{
if(MyRank == Root)
printf("Memory is not available to allocate for the Variables \n");
MPI_Finalize();
exit(-1);
}
//Allocating memory for the Vector by all nodes expect root node
if(MyRank != Root)
VectorB = (float *)malloc(VectorSize * sizeof(float));
//Broad casting the Vector to all the nodes from root node
MPI_Bcast(VectorB, VectorSize, MPI_FLOAT, Root, MPI_COMM_WORLD);
//Calculating the Scatter size of the Matrix
ScatterSize = RowsNo / NumberOfProcessors;
//Allocating the memory on the host for the MyMatrixA and MyResultVector by all nodes
MyMatrixA = (float *)malloc(ScatterSize * ColsNo * sizeof(float) );
if(MyMatrixA == NULL)
Status = 0;
MyResultVector = (float *)malloc(ScatterSize * sizeof(float));
if(MyResultVector == NULL)
Status = 0;
//Distributing the Matrix among to all the nodes
MPI_Scatter(MatrixA, ScatterSize * ColsNo, MPI_FLOAT, MyMatrixA, ScatterSize * ColsNo, MPI_FLOAT, Root, MPI_COMM_WORLD);
DeviceStatus = CheckDevice(MyRank);
if(DeviceStatus == 0)
{
printf("Processor with rank %d doing partial product of two vectors on CPU \n",MyRank);
for(Index = 0 ; Index < ScatterSize ; Index++)
{
MyResultVector[Index] = 0;
IndexValue = Index * ColsNo;
for(IndexCol = 0; IndexCol < ColsNo; IndexCol++)
MyResultVector[Index] += (MyMatrixA[IndexValue++] * VectorB[IndexCol]);
}
}
else
{
//Defining Thread Grid and Thread Block
dim3 DimGrid(1, 1);
dim3 DimBlock(BLOCKSIZE, BLOCKSIZE);
//Allocating the Memory on the device memory
CUDA_SAFE_CALL( hipMalloc( (void **)&DeviceMyMatrixA, ScatterSize * ColsNo * sizeof(float) ) );
CUDA_SAFE_CALL( hipMalloc( (void **)&DeviceMyResultVector, ScatterSize * sizeof(float) ) );
CUDA_SAFE_CALL( hipMalloc( (void **)&DeviceVectorB, VectorSize * sizeof(float) ) );
//Copying the data from host to device
hipMemcpy( (void *)DeviceMyMatrixA, (void *)MyMatrixA, ScatterSize * ColsNo * sizeof(float), hipMemcpyHostToDevice );
hipMemcpy( (void *)DeviceVectorB, (void *)VectorB, VectorSize * sizeof(float), hipMemcpyHostToDevice );
//Calling the kernel which performs Matrix Vector Product
hipLaunchKernelGGL(( MatrixVectorMultiplication), dim3(DimGrid), dim3(DimBlock), 0, 0, DeviceMyMatrixA, DeviceVectorB, DeviceMyResultVector, ColsNo, ScatterSize, BLOCKSIZE);
//Copying the value of patial result vector from device to host
hipMemcpy( (void *)MyResultVector, (void *)DeviceMyResultVector, ScatterSize * sizeof(float), hipMemcpyDeviceToHost );
}
MPI_Barrier(MPI_COMM_WORLD);
//Root processor gathering from all nodes to get the final result vector
MPI_Gather( MyResultVector, ScatterSize, MPI_FLOAT, ResultVector, ScatterSize, MPI_FLOAT, Root, MPI_COMM_WORLD);
//Root processor printing the resultant vector
if(MyRank == Root)
{
printf("The resultant vector with size %d is \n",RowsNo);
for(Index = 0; Index < RowsNo; Index++)
printf(" %f \n", ResultVector[Index]);
//freeing the Vectors allocated by the root node
free(MatrixA);
free(ResultVector);
}
//Freeing the host memory
free(MyMatrixA);
free(VectorB);
free(MyResultVector);
//Freeing the device memory
CUDA_SAFE_CALL( hipFree( DeviceMyMatrixA ) );
CUDA_SAFE_CALL( hipFree( DeviceVectorB ) );
CUDA_SAFE_CALL( hipFree( DeviceMyResultVector ) );
MPI_Finalize();
return(0);
}//End of Main function
//---------------------------------------------------------------------------------------------------------------------------------------
int IntializingMatrixVectors(float **MatrixA, float **VectorB, float **ResultVector, int RowsNo, int ColsNo, int VectorSize)
{
float *TempMatrixA, *TempVectorB, *TempResultVector;
int Status, Index;
//Allocating memory on the host
TempMatrixA = (float *)malloc(RowsNo * ColsNo * sizeof(float));
if(TempMatrixA == NULL)
Status = 0;
TempVectorB = (float *)malloc(VectorSize * sizeof(float));
if(TempVectorB == NULL)
Status = 0;
TempResultVector = (float *)malloc(RowsNo * sizeof(float));
if(TempResultVector == NULL)
Status = 0;
//Intializing the Matrix and the Vectors
for(Index = 0; Index < RowsNo * ColsNo; Index++)
TempMatrixA[Index] = 1.0f;
for(Index = 0; Index < VectorSize; Index++)
TempVectorB[Index] = 1.0f;
for(Index = 0; Index < RowsNo; Index++)
TempResultVector[Index] = 0.0f;
*MatrixA = TempMatrixA;
*VectorB = TempVectorB;
*ResultVector = TempResultVector;
return(Status);
}//End of the function
//-------------------------------------------------------------------------------------------------------------------------------------
int CheckDevice(int MyRank)
{
int DeviceCount, Device;
struct hipDeviceProp_t Properties;
hipGetDeviceCount(&DeviceCount);
if(DeviceCount >= 1)
{
hipGetDevice(&Device);
hipGetDeviceProperties(&Properties, Device);
printf("Processor with rank %d has the Device by name %s and computation is done on this device \n",MyRank, Properties.name);
}
return(DeviceCount);
}
| fcf9ac8a9ec93ae92fa06c44fee8adebbef06834.cu | //THIS IS THE MATRIX MATRIX MULTIPLICATION IN MULTIPLE GPUS
#include <stdio.h>
#include <cuda.h>
#include "mpi.h"
//------------------------------------------------------------------------------------------------------------------------------------------
#define BLOCKSIZE 16
//--------------------------------------------------------------------------------------------------------------------------------------------
int IntializingMatrixVectors(float **, float **, float **, int , int , int );
int CheckDevice(int );
//--------------------------------------------------------------------------------------------------------------------------------------------
//Pragma routine to report the detail of cuda error
#define CUDA_SAFE_CALL(call) \
do{ \
cudaError_t err = call; \
if(err != cudaSuccess) \
{ \
fprintf(stderr, "Cuda error in file '%s' in line %i : %s.\n",\
__FILE__, __LINE__, cudaGetErrorString( err) ); \
exit(1); \
} \
} while (0) \
//----------------------------------------------------------------------------------------------------------------------------------------
//Kernel that performs Matrix Vector Multiplication
__global__ void MatrixVectorMultiplication(float *Matrix,float *Vector,float *Solution,int VectorLength, int ScatterSize, int ThreadDim)
{
int tidx = threadIdx.x;
int tidy = threadIdx.y;
int ThreadIndex = (ThreadDim * tidx) + tidy;
int MaxNumThread = ThreadDim * ThreadDim;
//int VectLen = MatrixSize;
int count,ThreadColumnIndex,pass = 0 ;
float TempResult = 0.0f;
while( (ThreadColumnIndex = (ThreadIndex + MaxNumThread * pass)) < ScatterSize )
{
TempResult = 0.0f;
for( count = 0; count < VectorLength; count++)
TempResult += Matrix[ThreadColumnIndex*VectorLength+count] * Vector[count];
Solution[ThreadColumnIndex] = TempResult;
pass++;
}
__syncthreads();
}//End of Matrix Vector Multiplication Device Function
//---------------------------------------------------------------------------------------------------------------------------------------
int main(int argc, char **argv)
{
int MyRank, NumberOfProcessors;
int Root = 0, Index, Status = 1;
float *MatrixA, *VectorB, *ResultVector;
float *MyMatrixA, *MyResultVector;
float *DeviceMyMatrixA, *DeviceMyResultVector, *DeviceVectorB;
int RowsNo, ColsNo, VectorSize, ScatterSize, IndexCol, IndexValue, DeviceStatus;
//MPI Intialization
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &MyRank);
MPI_Comm_size(MPI_COMM_WORLD, &NumberOfProcessors);
//Checking if valid number of arguements have been passed
if(argc != 4)
{
if(MyRank == Root)
printf("Usage:< mpirun >< -n >< Number of processors >< ./Program Name >< Number of Rows of Matri x>< Number of Columns of Matrix >< VectorSize > \n");
MPI_Finalize();
exit(-1);
}
//Assigning values to RowsNo, ColsNo, VectorSize from the arguements passed
RowsNo = atoi( argv[1] );
ColsNo = atoi( argv[2] );
VectorSize = atoi( argv[3] );
//Checking if columns is equal to vector size
if(ColsNo != VectorSize)
{
if(MyRank == Root)
printf("Entered wrong input, Number of columns of matrix should be equal to size of the vector \n");
MPI_Finalize();
exit(-1);
}
if(RowsNo < NumberOfProcessors)
{
if(MyRank == Root)
printf("Given number of Rows of the matrix should be more than number of processors \n");
MPI_Finalize();
exit(-1);
}
//Checking if Matrix can be distributed evenly to all the nodes
if(RowsNo % NumberOfProcessors != 0)
{
if(MyRank == Root)
printf("The Rows of the matrix can not be distributed evenly among processors \n");
MPI_Finalize();
exit(-1);
}
//Root node intializes the Matrix, Vector and Result Vector
if(MyRank == Root)
Status = IntializingMatrixVectors(&MatrixA, &VectorB, &ResultVector, RowsNo, ColsNo, VectorSize);
MPI_Barrier(MPI_COMM_WORLD);
MPI_Bcast(&Status, 1, MPI_INT, Root, MPI_COMM_WORLD);
//Checking if Status returned by the IntializingMatrixVectors
if(Status == 0)
{
if(MyRank == Root)
printf("Memory is not available to allocate for the Variables \n");
MPI_Finalize();
exit(-1);
}
//Allocating memory for the Vector by all nodes expect root node
if(MyRank != Root)
VectorB = (float *)malloc(VectorSize * sizeof(float));
//Broad casting the Vector to all the nodes from root node
MPI_Bcast(VectorB, VectorSize, MPI_FLOAT, Root, MPI_COMM_WORLD);
//Calculating the Scatter size of the Matrix
ScatterSize = RowsNo / NumberOfProcessors;
//Allocating the memory on the host for the MyMatrixA and MyResultVector by all nodes
MyMatrixA = (float *)malloc(ScatterSize * ColsNo * sizeof(float) );
if(MyMatrixA == NULL)
Status = 0;
MyResultVector = (float *)malloc(ScatterSize * sizeof(float));
if(MyResultVector == NULL)
Status = 0;
//Distributing the Matrix among to all the nodes
MPI_Scatter(MatrixA, ScatterSize * ColsNo, MPI_FLOAT, MyMatrixA, ScatterSize * ColsNo, MPI_FLOAT, Root, MPI_COMM_WORLD);
DeviceStatus = CheckDevice(MyRank);
if(DeviceStatus == 0)
{
printf("Processor with rank %d doing partial product of two vectors on CPU \n",MyRank);
for(Index = 0 ; Index < ScatterSize ; Index++)
{
MyResultVector[Index] = 0;
IndexValue = Index * ColsNo;
for(IndexCol = 0; IndexCol < ColsNo; IndexCol++)
MyResultVector[Index] += (MyMatrixA[IndexValue++] * VectorB[IndexCol]);
}
}
else
{
//Defining Thread Grid and Thread Block
dim3 DimGrid(1, 1);
dim3 DimBlock(BLOCKSIZE, BLOCKSIZE);
//Allocating the Memory on the device memory
CUDA_SAFE_CALL( cudaMalloc( (void **)&DeviceMyMatrixA, ScatterSize * ColsNo * sizeof(float) ) );
CUDA_SAFE_CALL( cudaMalloc( (void **)&DeviceMyResultVector, ScatterSize * sizeof(float) ) );
CUDA_SAFE_CALL( cudaMalloc( (void **)&DeviceVectorB, VectorSize * sizeof(float) ) );
//Copying the data from host to device
cudaMemcpy( (void *)DeviceMyMatrixA, (void *)MyMatrixA, ScatterSize * ColsNo * sizeof(float), cudaMemcpyHostToDevice );
cudaMemcpy( (void *)DeviceVectorB, (void *)VectorB, VectorSize * sizeof(float), cudaMemcpyHostToDevice );
//Calling the kernel which performs Matrix Vector Product
MatrixVectorMultiplication<<<DimGrid, DimBlock>>>(DeviceMyMatrixA, DeviceVectorB, DeviceMyResultVector, ColsNo, ScatterSize, BLOCKSIZE);
//Copying the value of patial result vector from device to host
cudaMemcpy( (void *)MyResultVector, (void *)DeviceMyResultVector, ScatterSize * sizeof(float), cudaMemcpyDeviceToHost );
}
MPI_Barrier(MPI_COMM_WORLD);
//Root processor gathering from all nodes to get the final result vector
MPI_Gather( MyResultVector, ScatterSize, MPI_FLOAT, ResultVector, ScatterSize, MPI_FLOAT, Root, MPI_COMM_WORLD);
//Root processor printing the resultant vector
if(MyRank == Root)
{
printf("The resultant vector with size %d is \n",RowsNo);
for(Index = 0; Index < RowsNo; Index++)
printf(" %f \n", ResultVector[Index]);
//freeing the Vectors allocated by the root node
free(MatrixA);
free(ResultVector);
}
//Freeing the host memory
free(MyMatrixA);
free(VectorB);
free(MyResultVector);
//Freeing the device memory
CUDA_SAFE_CALL( cudaFree( DeviceMyMatrixA ) );
CUDA_SAFE_CALL( cudaFree( DeviceVectorB ) );
CUDA_SAFE_CALL( cudaFree( DeviceMyResultVector ) );
MPI_Finalize();
return(0);
}//End of Main function
//---------------------------------------------------------------------------------------------------------------------------------------
int IntializingMatrixVectors(float **MatrixA, float **VectorB, float **ResultVector, int RowsNo, int ColsNo, int VectorSize)
{
float *TempMatrixA, *TempVectorB, *TempResultVector;
int Status, Index;
//Allocating memory on the host
TempMatrixA = (float *)malloc(RowsNo * ColsNo * sizeof(float));
if(TempMatrixA == NULL)
Status = 0;
TempVectorB = (float *)malloc(VectorSize * sizeof(float));
if(TempVectorB == NULL)
Status = 0;
TempResultVector = (float *)malloc(RowsNo * sizeof(float));
if(TempResultVector == NULL)
Status = 0;
//Intializing the Matrix and the Vectors
for(Index = 0; Index < RowsNo * ColsNo; Index++)
TempMatrixA[Index] = 1.0f;
for(Index = 0; Index < VectorSize; Index++)
TempVectorB[Index] = 1.0f;
for(Index = 0; Index < RowsNo; Index++)
TempResultVector[Index] = 0.0f;
*MatrixA = TempMatrixA;
*VectorB = TempVectorB;
*ResultVector = TempResultVector;
return(Status);
}//End of the function
//-------------------------------------------------------------------------------------------------------------------------------------
int CheckDevice(int MyRank)
{
int DeviceCount, Device;
struct cudaDeviceProp Properties;
cudaGetDeviceCount(&DeviceCount);
if(DeviceCount >= 1)
{
cudaGetDevice(&Device);
cudaGetDeviceProperties(&Properties, Device);
printf("Processor with rank %d has the Device by name %s and computation is done on this device \n",MyRank, Properties.name);
}
return(DeviceCount);
}
|
ef69d6c47dfb166f6214e4669c66c0bb297eb0bc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "caffe2/utils/math/elementwise.h"
#include "caffe2/core/context_gpu.h"
#include "caffe2/utils/math_utils.h"
namespace caffe2 {
namespace math {
namespace {
template <typename T>
__global__ void SinCosCUDAKernel(const int N, const T* X, T* S, T* C) {
const int i = blockIdx.x * CAFFE_CUDA_NUM_THREADS + threadIdx.x;
if (i < N) {
#if __CUDA_ARCH__ >= 350 || defined(__HIP_PLATFORM_HCC__)
c10::hip::compat::sincos(__ldg(X + i), S + i, C + i);
#else
c10::hip::compat::sincos(X[i], S + i, C + i);
#endif
}
}
template <typename T>
__global__ void AffineChannelNCHWCUDAKernel(
const int C,
const int M,
const int HxW,
const T* X,
const T* scale,
const T* bias,
T* Y);
template <>
__global__ void AffineChannelNCHWCUDAKernel<float>(
const int C,
const int M,
const int HxW,
const float* X,
const float* scale,
const float* bias,
float* Y) {
const int nc = blockIdx.x / M;
const int c = nc % C;
const int w = blockIdx.x % M * CAFFE_CUDA_NUM_THREADS + threadIdx.x;
if (w < HxW) {
const int index = nc * HxW + w;
#if __CUDA_ARCH__ >= 350 || defined(__HIP_PLATFORM_HCC__)
Y[index] = fmaf(__ldg(X + index), __ldg(scale + c), __ldg(bias + c));
#else
Y[index] = fmaf(X[index], scale[c], bias[c]);
#endif
}
}
template <typename T>
__global__ void AffineChannelNHWCCUDAKernel(
const int C,
const T* X,
const T* scale,
const T* bias,
T* Y);
template <>
__global__ void AffineChannelNHWCCUDAKernel<float>(
const int C,
const float* X,
const float* scale,
const float* bias,
float* Y) {
const int c = blockIdx.y * CAFFE_CUDA_NUM_THREADS + threadIdx.x;
if (c < C) {
const int index = blockIdx.x * C + c;
#if __CUDA_ARCH__ >= 350 || defined(__HIP_PLATFORM_HCC__)
Y[index] = fmaf(__ldg(X + index), __ldg(scale + c), __ldg(bias + c));
#else
Y[index] = fmaf(X[index], scale[c], bias[c]);
#endif
}
}
} // namespace
#define DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(T, Func, KernelFunc) \
__global__ void Func##CUDAKernel(const int N, const T* X, T* Y) { \
const int i = blockIdx.x * CAFFE_CUDA_NUM_THREADS + threadIdx.x; \
if (i < N) { \
Y[i] = KernelFunc(X[i]); \
} \
} \
template <> \
CAFFE2_CUDA_EXPORT void Func<T, CUDAContext>( \
const int N, const T* X, T* Y, CUDAContext* context) { \
if (N > 0) { \
const int K = DivUp(N, CAFFE_CUDA_NUM_THREADS); \
hipLaunchKernelGGL(( Func##CUDAKernel), \
K, \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream(), N, X, Y); \
} \
}
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Exp, expf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Log, logf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Cos, cosf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Acos, acosf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Sin, sinf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Asin, asinf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Tan, tanf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Atan, atanf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Sinh, sinhf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Cosh, coshf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Tanh, tanhf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Abs, fabsf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Sqr, utils::Square<float>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Sqrt, sqrtf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Rsqrt, rsqrtf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Cbrt, cbrtf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Erf, erff)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(double, Erf, erf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Cube, utils::Cube<float>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(double, Cube, utils::Cube<double>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(
std::int32_t,
Cube,
utils::Cube<std::int32_t>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(
std::int64_t,
Cube,
utils::Cube<std::int64_t>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(bool, Not, utils::Not)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Neg, utils::Negate<float>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(double, Neg, utils::Negate<double>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(
std::int32_t,
Neg,
utils::Negate<std::int32_t>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(
std::int64_t,
Neg,
utils::Negate<std::int64_t>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Sign, utils::Sign<float>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(double, Sign, utils::Sign<double>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(
std::int32_t,
Sign,
utils::Sign<std::int32_t>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(
std::int64_t,
Sign,
utils::Sign<std::int64_t>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Inv, utils::Inv<float>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(double, Inv, utils::Inv<double>)
#undef DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION
#define CAFFE2_SPECIALIZED_CUDA_SINCOS(T) \
template <> \
CAFFE2_CUDA_EXPORT void SinCos<T, CUDAContext>( \
const int N, const T* X, T* S, T* C, CUDAContext* context) { \
if (N > 0) { \
const int K = DivUp(N, CAFFE_CUDA_NUM_THREADS); \
hipLaunchKernelGGL(( SinCosCUDAKernel), \
K, \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream(), N, X, S, C); \
} \
}
CAFFE2_SPECIALIZED_CUDA_SINCOS(float)
CAFFE2_SPECIALIZED_CUDA_SINCOS(double)
#undef CAFFE2_SPECIALIZED_CUDA_SINCOS
#define CAFFE2_SPECIALIZED_CUDA_AFFINE_CHANNEL(T) \
template <> \
CAFFE2_CUDA_EXPORT void AffineChannel<T, CUDAContext, StorageOrder::NCHW>( \
const int N, \
const int C, \
const int HxW, \
const T* X, \
const T* scale, \
const T* bias, \
T* Y, \
CUDAContext* context) { \
const int M = DivUp(HxW, CAFFE_CUDA_NUM_THREADS); \
hipLaunchKernelGGL(( AffineChannelNCHWCUDAKernel<T>) \
, dim3(N * C * M), dim3(CAFFE_CUDA_NUM_THREADS), 0, context->cuda_stream(), \
C, M, HxW, X, scale, bias, Y); \
} \
template <> \
CAFFE2_CUDA_EXPORT void AffineChannel<T, CUDAContext, StorageOrder::NHWC>( \
const int N, \
const int C, \
const int HxW, \
const T* X, \
const T* scale, \
const T* bias, \
T* Y, \
CUDAContext* context) { \
const int M = DivUp(C, CAFFE_CUDA_NUM_THREADS); \
hipLaunchKernelGGL(( AffineChannelNHWCCUDAKernel<T>) \
, dim3(dim3(N* HxW, M)), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream(), C, X, scale, bias, Y); \
}
CAFFE2_SPECIALIZED_CUDA_AFFINE_CHANNEL(float)
#undef CAFFE2_SPECIALIZED_CUDA_AFFINE_CHANNEL
} // namespace math
} // namespace caffe2
| ef69d6c47dfb166f6214e4669c66c0bb297eb0bc.cu | #include "caffe2/utils/math/elementwise.h"
#include "caffe2/core/context_gpu.h"
#include "caffe2/utils/math_utils.h"
namespace caffe2 {
namespace math {
namespace {
template <typename T>
__global__ void SinCosCUDAKernel(const int N, const T* X, T* S, T* C) {
const int i = blockIdx.x * CAFFE_CUDA_NUM_THREADS + threadIdx.x;
if (i < N) {
#if __CUDA_ARCH__ >= 350 || defined(__HIP_PLATFORM_HCC__)
c10::cuda::compat::sincos(__ldg(X + i), S + i, C + i);
#else
c10::cuda::compat::sincos(X[i], S + i, C + i);
#endif
}
}
template <typename T>
__global__ void AffineChannelNCHWCUDAKernel(
const int C,
const int M,
const int HxW,
const T* X,
const T* scale,
const T* bias,
T* Y);
template <>
__global__ void AffineChannelNCHWCUDAKernel<float>(
const int C,
const int M,
const int HxW,
const float* X,
const float* scale,
const float* bias,
float* Y) {
const int nc = blockIdx.x / M;
const int c = nc % C;
const int w = blockIdx.x % M * CAFFE_CUDA_NUM_THREADS + threadIdx.x;
if (w < HxW) {
const int index = nc * HxW + w;
#if __CUDA_ARCH__ >= 350 || defined(__HIP_PLATFORM_HCC__)
Y[index] = fmaf(__ldg(X + index), __ldg(scale + c), __ldg(bias + c));
#else
Y[index] = fmaf(X[index], scale[c], bias[c]);
#endif
}
}
template <typename T>
__global__ void AffineChannelNHWCCUDAKernel(
const int C,
const T* X,
const T* scale,
const T* bias,
T* Y);
template <>
__global__ void AffineChannelNHWCCUDAKernel<float>(
const int C,
const float* X,
const float* scale,
const float* bias,
float* Y) {
const int c = blockIdx.y * CAFFE_CUDA_NUM_THREADS + threadIdx.x;
if (c < C) {
const int index = blockIdx.x * C + c;
#if __CUDA_ARCH__ >= 350 || defined(__HIP_PLATFORM_HCC__)
Y[index] = fmaf(__ldg(X + index), __ldg(scale + c), __ldg(bias + c));
#else
Y[index] = fmaf(X[index], scale[c], bias[c]);
#endif
}
}
} // namespace
#define DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(T, Func, KernelFunc) \
__global__ void Func##CUDAKernel(const int N, const T* X, T* Y) { \
const int i = blockIdx.x * CAFFE_CUDA_NUM_THREADS + threadIdx.x; \
if (i < N) { \
Y[i] = KernelFunc(X[i]); \
} \
} \
template <> \
CAFFE2_CUDA_EXPORT void Func<T, CUDAContext>( \
const int N, const T* X, T* Y, CUDAContext* context) { \
if (N > 0) { \
const int K = DivUp(N, CAFFE_CUDA_NUM_THREADS); \
Func##CUDAKernel<<< \
K, \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream()>>>(N, X, Y); \
} \
}
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Exp, expf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Log, logf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Cos, cosf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Acos, acosf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Sin, sinf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Asin, asinf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Tan, tanf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Atan, atanf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Sinh, sinhf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Cosh, coshf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Tanh, tanhf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Abs, fabsf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Sqr, utils::Square<float>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Sqrt, sqrtf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Rsqrt, rsqrtf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Cbrt, cbrtf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Erf, erff)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(double, Erf, erf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Cube, utils::Cube<float>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(double, Cube, utils::Cube<double>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(
std::int32_t,
Cube,
utils::Cube<std::int32_t>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(
std::int64_t,
Cube,
utils::Cube<std::int64_t>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(bool, Not, utils::Not)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Neg, utils::Negate<float>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(double, Neg, utils::Negate<double>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(
std::int32_t,
Neg,
utils::Negate<std::int32_t>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(
std::int64_t,
Neg,
utils::Negate<std::int64_t>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Sign, utils::Sign<float>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(double, Sign, utils::Sign<double>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(
std::int32_t,
Sign,
utils::Sign<std::int32_t>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(
std::int64_t,
Sign,
utils::Sign<std::int64_t>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Inv, utils::Inv<float>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(double, Inv, utils::Inv<double>)
#undef DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION
#define CAFFE2_SPECIALIZED_CUDA_SINCOS(T) \
template <> \
CAFFE2_CUDA_EXPORT void SinCos<T, CUDAContext>( \
const int N, const T* X, T* S, T* C, CUDAContext* context) { \
if (N > 0) { \
const int K = DivUp(N, CAFFE_CUDA_NUM_THREADS); \
SinCosCUDAKernel<<< \
K, \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream()>>>(N, X, S, C); \
} \
}
CAFFE2_SPECIALIZED_CUDA_SINCOS(float)
CAFFE2_SPECIALIZED_CUDA_SINCOS(double)
#undef CAFFE2_SPECIALIZED_CUDA_SINCOS
#define CAFFE2_SPECIALIZED_CUDA_AFFINE_CHANNEL(T) \
template <> \
CAFFE2_CUDA_EXPORT void AffineChannel<T, CUDAContext, StorageOrder::NCHW>( \
const int N, \
const int C, \
const int HxW, \
const T* X, \
const T* scale, \
const T* bias, \
T* Y, \
CUDAContext* context) { \
const int M = DivUp(HxW, CAFFE_CUDA_NUM_THREADS); \
AffineChannelNCHWCUDAKernel<T> \
<<<N * C * M, CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>( \
C, M, HxW, X, scale, bias, Y); \
} \
template <> \
CAFFE2_CUDA_EXPORT void AffineChannel<T, CUDAContext, StorageOrder::NHWC>( \
const int N, \
const int C, \
const int HxW, \
const T* X, \
const T* scale, \
const T* bias, \
T* Y, \
CUDAContext* context) { \
const int M = DivUp(C, CAFFE_CUDA_NUM_THREADS); \
AffineChannelNHWCCUDAKernel<T> \
<<<dim3(N* HxW, M), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream()>>>(C, X, scale, bias, Y); \
}
CAFFE2_SPECIALIZED_CUDA_AFFINE_CHANNEL(float)
#undef CAFFE2_SPECIALIZED_CUDA_AFFINE_CHANNEL
} // namespace math
} // namespace caffe2
|
74be6fd77ccaefde15ec5df3552e49ffb39c8d52.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2007 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws. Users and possessors of this source code
* are hereby granted a nonexclusive, royalty-free license to use this code
* in individual and commercial software.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*
* Any use of this source code in individual and commercial software must
* include, in the user documentation and internal comments to the code,
* the above Disclaimer and U.S. Government End Users Notice.
*/
/**
**************************************************************************
* \file dct8x8.cu
* \brief Contains entry point, wrappers to host and device code and benchmark.
*
* This sample implements forward and inverse Discrete Cosine Transform to blocks
* of image pixels (of 8x8 size), as in JPEG standard. The typical work flow is as
* follows:
* 1. Run CPU version (Host code) and measure execution time;
* 2. Run CUDA version (Device code) and measure execution time;
* 3. Output execution timings and calculate CUDA speedup.
*/
#include "Common.h"
/**
* The number of DCT kernel calls
*/
#if 1
#define BENCHMARK_SIZE 1
#else
#define BENCHMARK_SIZE 10
#endif
/**
* The PSNR values over this threshold indicate images equality
*/
#define PSNR_THRESHOLD_EQUAL 30
/**
* Texture reference that is passed through this global variable into device code.
* This is done because any conventional passing through argument list way results
* in compiler internal error. 2008.03.11
*/
texture<float, 2, hipReadModeElementType> TexSrc;
// includes kernels
#include "dct8x8_kernel1.cuh"
#include "dct8x8_kernel2.cuh"
#include "dct8x8_kernel_short.cuh"
#include "dct8x8_kernel_quantization.cuh"
/**
**************************************************************************
* Wrapper function for 1st gold version of DCT, quantization and IDCT implementations
*
* \param ImgSrc [IN] - Source byte image plane
* \param ImgDst [IN] - Quantized result byte image plane
* \param Stride [IN] - Stride for both source and result planes
* \param Size [IN] - Size of both planes
*
* \return Execution time in milliseconds
*/
float WrapperGold1(byte *ImgSrc, byte *ImgDst, int Stride, ROI Size)
{
//allocate float buffers for DCT and other data
int StrideF;
float *ImgF1 = MallocPlaneFloat(Size.width, Size.height, &StrideF);
float *ImgF2 = MallocPlaneFloat(Size.width, Size.height, &StrideF);
//convert source image to float representation
CopyByte2Float(ImgSrc, Stride, ImgF1, StrideF, Size);
AddFloatPlane(-128.0f, ImgF1, StrideF, Size);
//create and start CUDA timer
unsigned int timerGold = 0;
cutilCheckError(cutCreateTimer(&timerGold));
cutilCheckError(cutResetTimer(timerGold));
//perform block-wise DCT processing and benchmarking
for (int i=0; i<BENCHMARK_SIZE; i++)
{
cutilCheckError(cutStartTimer(timerGold));
computeDCT8x8Gold1(ImgF1, ImgF2, StrideF, Size);
cutilCheckError(cutStopTimer(timerGold));
}
//stop and destroy CUDA timer
float TimerGoldSpan = cutGetAverageTimerValue(timerGold);
cutilCheckError(cutDeleteTimer(timerGold));
//perform quantization
quantizeGoldFloat(ImgF2, StrideF, Size);
//perform block-wise IDCT processing
computeIDCT8x8Gold1(ImgF2, ImgF1, StrideF, Size);
//convert image back to byte representation
AddFloatPlane(128.0f, ImgF1, StrideF, Size);
CopyFloat2Byte(ImgF1, StrideF, ImgDst, Stride, Size);
//free float buffers
FreePlane(ImgF1);
FreePlane(ImgF2);
//return time taken by the operation
return TimerGoldSpan;
}
/**
**************************************************************************
* Wrapper function for 2nd gold version of DCT, quantization and IDCT implementations
*
* \param ImgSrc [IN] - Source byte image plane
* \param ImgDst [IN] - Quantized result byte image plane
* \param Stride [IN] - Stride for both source and result planes
* \param Size [IN] - Size of both planes
*
* \return Execution time in milliseconds
*/
float WrapperGold2(byte *ImgSrc, byte *ImgDst, int Stride, ROI Size)
{
//allocate float buffers for DCT and other data
int StrideF;
float *ImgF1 = MallocPlaneFloat(Size.width, Size.height, &StrideF);
float *ImgF2 = MallocPlaneFloat(Size.width, Size.height, &StrideF);
//convert source image to float representation
CopyByte2Float(ImgSrc, Stride, ImgF1, StrideF, Size);
AddFloatPlane(-128.0f, ImgF1, StrideF, Size);
//create and start CUDA timer
unsigned int timerGold = 0;
cutilCheckError(cutCreateTimer(&timerGold));
cutilCheckError(cutResetTimer(timerGold));
//perform block-wise DCT processing and benchmarking
for (int i=0; i<BENCHMARK_SIZE; i++)
{
cutilCheckError(cutStartTimer(timerGold));
computeDCT8x8Gold2(ImgF1, ImgF2, StrideF, Size);
cutilCheckError(cutStopTimer(timerGold));
}
//stop and destroy CUDA timer
float TimerGoldSpan = cutGetAverageTimerValue(timerGold);
cutilCheckError(cutDeleteTimer(timerGold));
//perform quantization
quantizeGoldFloat(ImgF2, StrideF, Size);
//perform block-wise IDCT processing
computeIDCT8x8Gold2(ImgF2, ImgF1, StrideF, Size);
//convert image back to byte representation
AddFloatPlane(128.0f, ImgF1, StrideF, Size);
CopyFloat2Byte(ImgF1, StrideF, ImgDst, Stride, Size);
//free float buffers
FreePlane(ImgF1);
FreePlane(ImgF2);
//return time taken by the operation
return TimerGoldSpan;
}
/**
**************************************************************************
* Wrapper function for 1st CUDA version of DCT, quantization and IDCT implementations
*
* \param ImgSrc [IN] - Source byte image plane
* \param ImgDst [IN] - Quantized result byte image plane
* \param Stride [IN] - Stride for both source and result planes
* \param Size [IN] - Size of both planes
*
* \return Execution time in milliseconds
*/
float WrapperCUDA1(byte *ImgSrc, byte *ImgDst, int Stride, ROI Size)
{
//prepare channel format descriptor for passing texture into kernels
hipChannelFormatDesc floattex = hipCreateChannelDesc<float>();
//allocate device memory
hipArray *Src;
float *Dst;
size_t DstStride;
cutilSafeCall(hipMallocArray(&Src, &floattex, Size.width, Size.height));
cutilSafeCall(hipMallocPitch((void **)(&Dst), &DstStride, Size.width * sizeof(float), Size.height));
DstStride /= sizeof(float);
//convert source image to float representation
int ImgSrcFStride;
float *ImgSrcF = MallocPlaneFloat(Size.width, Size.height, &ImgSrcFStride);
CopyByte2Float(ImgSrc, Stride, ImgSrcF, ImgSrcFStride, Size);
AddFloatPlane(-128.0f, ImgSrcF, ImgSrcFStride, Size);
//copy from host memory to device
cutilSafeCall(hipMemcpy2DToArray(Src, 0, 0,
ImgSrcF, ImgSrcFStride * sizeof(float),
Size.width * sizeof(float), Size.height,
hipMemcpyHostToDevice) );
//setup execution parameters
dim3 threads(BLOCK_SIZE, BLOCK_SIZE);
dim3 grid(Size.width / BLOCK_SIZE, Size.height / BLOCK_SIZE);
//create and start CUDA timer
unsigned int timerCUDA = 0;
cutilCheckError(cutCreateTimer(&timerCUDA));
cutilCheckError(cutResetTimer(timerCUDA));
//execute DCT kernel and benchmark
cutilSafeCall(hipBindTextureToArray(TexSrc, Src));
for (int i=0; i<BENCHMARK_SIZE; i++)
{
cutilCheckError(cutStartTimer(timerCUDA));
hipLaunchKernelGGL(( CUDAkernel1DCT), dim3(grid), dim3(threads) , 0, 0, Dst, (int) DstStride, 0, 0);
cutilSafeCall(hipDeviceSynchronize());
cutilCheckError(cutStopTimer(timerCUDA));
}
cutilSafeCall(hipUnbindTexture(TexSrc));
cutilCheckMsg("Kernel execution failed");
// finalize CUDA timer
float TimerCUDASpan = cutGetAverageTimerValue(timerCUDA);
cutilCheckError(cutDeleteTimer(timerCUDA));
// execute Quantization kernel
hipLaunchKernelGGL(( CUDAkernelQuantizationFloat), dim3(grid), dim3(threads) , 0, 0, Dst, (int) DstStride);
cutilCheckMsg("Kernel execution failed");
//copy quantized coefficients from host memory to device array
cutilSafeCall(hipMemcpy2DToArray(Src, 0, 0,
Dst, DstStride * sizeof(float),
Size.width * sizeof(float), Size.height,
hipMemcpyDeviceToDevice) );
// execute IDCT kernel
cutilSafeCall(hipBindTextureToArray(TexSrc, Src));
hipLaunchKernelGGL(( CUDAkernel1IDCT), dim3(grid), dim3(threads) , 0, 0, Dst, (int) DstStride, 0, 0);
cutilSafeCall(hipUnbindTexture(TexSrc));
cutilCheckMsg("Kernel execution failed");
//copy quantized image block to host
cutilSafeCall(hipMemcpy2D(ImgSrcF, ImgSrcFStride * sizeof(float),
Dst, DstStride * sizeof(float),
Size.width * sizeof(float), Size.height,
hipMemcpyDeviceToHost) );
//convert image back to byte representation
AddFloatPlane(128.0f, ImgSrcF, ImgSrcFStride, Size);
CopyFloat2Byte(ImgSrcF, ImgSrcFStride, ImgDst, Stride, Size);
//clean up memory
cutilSafeCall(hipFreeArray(Src));
cutilSafeCall(hipFree(Dst));
FreePlane(ImgSrcF);
//return time taken by the operation
return TimerCUDASpan;
}
/**
**************************************************************************
* Wrapper function for 2nd CUDA version of DCT, quantization and IDCT implementations
*
* \param ImgSrc [IN] - Source byte image plane
* \param ImgDst [IN] - Quantized result byte image plane
* \param Stride [IN] - Stride for both source and result planes
* \param Size [IN] - Size of both planes
*
* \return Execution time in milliseconds
*/
float WrapperCUDA2(byte *ImgSrc, byte *ImgDst, int Stride, ROI Size)
{
//allocate host buffers for DCT and other data
int StrideF;
float *ImgF1 = MallocPlaneFloat(Size.width, Size.height, &StrideF);
//convert source image to float representation
CopyByte2Float(ImgSrc, Stride, ImgF1, StrideF, Size);
AddFloatPlane(-128.0f, ImgF1, StrideF, Size);
//allocate device memory
float *SrcDst;
size_t DeviceStride;
cutilSafeCall(hipMallocPitch((void **)(&SrcDst), &DeviceStride, Size.width * sizeof(float), Size.height));
DeviceStride /= sizeof(float);
//copy from host memory to device
cutilSafeCall(hipMemcpy2D(SrcDst, DeviceStride * sizeof(float),
ImgF1, StrideF * sizeof(float),
Size.width * sizeof(float), Size.height,
hipMemcpyHostToDevice) );
//create and start CUDA timer
unsigned int timerCUDA = 0;
cutilCheckError(cutCreateTimer(&timerCUDA));
cutilCheckError(cutResetTimer(timerCUDA));
//setup execution parameters
dim3 GridFullWarps(Size.width / KER2_BLOCK_WIDTH, Size.height / KER2_BLOCK_HEIGHT, 1);
dim3 ThreadsFullWarps(8, KER2_BLOCK_WIDTH/8, KER2_BLOCK_HEIGHT/8);
//perform block-wise DCT processing and benchmarking
cutilCheckError(cutStartTimer(timerCUDA));
hipLaunchKernelGGL(( CUDAkernel2DCT), dim3(GridFullWarps), dim3(ThreadsFullWarps) , 0, 0, SrcDst, (int) DeviceStride);
cutilSafeCall(hipDeviceSynchronize());
cutilCheckError(cutStopTimer(timerCUDA));
cutilCheckMsg("Kernel execution failed");
// finalize CUDA timer
float TimerCUDASpan = cutGetAverageTimerValue(timerCUDA);
cutilCheckError(cutDeleteTimer(timerCUDA));
//setup execution parameters for quantization
dim3 ThreadsSmallBlocks(BLOCK_SIZE, BLOCK_SIZE);
dim3 GridSmallBlocks(Size.width / BLOCK_SIZE, Size.height / BLOCK_SIZE);
// execute Quantization kernel
hipLaunchKernelGGL(( CUDAkernelQuantizationFloat), dim3(GridSmallBlocks), dim3(ThreadsSmallBlocks) , 0, 0, SrcDst, (int) DeviceStride);
cutilCheckMsg("Kernel execution failed");
//perform block-wise IDCT processing
hipLaunchKernelGGL(( CUDAkernel2IDCT), dim3(GridFullWarps), dim3(ThreadsFullWarps) , 0, 0, SrcDst, (int) DeviceStride);
cutilSafeCall(hipDeviceSynchronize());
cutilCheckMsg("Kernel execution failed");
//copy quantized image block to host
cutilSafeCall(hipMemcpy2D(ImgF1, StrideF * sizeof(float),
SrcDst, DeviceStride * sizeof(float),
Size.width * sizeof(float), Size.height,
hipMemcpyDeviceToHost) );
//convert image back to byte representation
AddFloatPlane(128.0f, ImgF1, StrideF, Size);
CopyFloat2Byte(ImgF1, StrideF, ImgDst, Stride, Size);
//clean up memory
cutilSafeCall(hipFree(SrcDst));
FreePlane(ImgF1);
//return time taken by the operation
return TimerCUDASpan;
}
/**
**************************************************************************
* Wrapper function for short CUDA version of DCT, quantization and IDCT implementations
*
* \param ImgSrc [IN] - Source byte image plane
* \param ImgDst [IN] - Quantized result byte image plane
* \param Stride [IN] - Stride for both source and result planes
* \param Size [IN] - Size of both planes
*
* \return Execution time in milliseconds
*/
float WrapperCUDAshort(byte *ImgSrc, byte *ImgDst, int Stride, ROI Size)
{
//allocate host buffers for DCT and other data
int StrideS;
short *ImgS1 = MallocPlaneShort(Size.width, Size.height, &StrideS);
//convert source image to short representation centered at 128
for (int i=0; i<Size.height; i++)
{
for (int j=0; j<Size.width; j++)
{
ImgS1[i*StrideS+j] = (short)ImgSrc[i*Stride+j] - 128;
}
}
//allocate device memory
short *SrcDst;
size_t DeviceStride;
cutilSafeCall(hipMallocPitch((void **)(&SrcDst), &DeviceStride, Size.width * sizeof(short), Size.height));
DeviceStride /= sizeof(short);
//copy from host memory to device
cutilSafeCall(hipMemcpy2D(SrcDst, DeviceStride * sizeof(short),
ImgS1, StrideS * sizeof(short),
Size.width * sizeof(short), Size.height,
hipMemcpyHostToDevice) );
//create and start CUDA timer
unsigned int timerLibJpeg = 0;
cutilCheckError(cutCreateTimer(&timerLibJpeg));
cutilCheckError(cutResetTimer(timerLibJpeg));
//setup execution parameters
dim3 GridShort(Size.width / KERS_BLOCK_WIDTH, Size.height / KERS_BLOCK_HEIGHT, 1);
dim3 ThreadsShort(8, KERS_BLOCK_WIDTH/8, KERS_BLOCK_HEIGHT/8);
//perform block-wise DCT processing and benchmarking
cutilCheckError(cutStartTimer(timerLibJpeg));
hipLaunchKernelGGL(( CUDAkernelShortDCT), dim3(GridShort), dim3(ThreadsShort) , 0, 0, SrcDst, (int) DeviceStride);
cutilSafeCall(hipDeviceSynchronize());
cutilCheckError(cutStopTimer(timerLibJpeg));
cutilCheckMsg("Kernel execution failed");
//stop and destroy CUDA timer
float TimerLibJpegSpan16b = cutGetAverageTimerValue(timerLibJpeg);
cutilCheckError(cutDeleteTimer(timerLibJpeg));
//setup execution parameters for quantization
dim3 ThreadsSmallBlocks(BLOCK_SIZE, BLOCK_SIZE);
dim3 GridSmallBlocks(Size.width / BLOCK_SIZE, Size.height / BLOCK_SIZE);
// execute Quantization kernel
hipLaunchKernelGGL(( CUDAkernelQuantizationShort), dim3(GridSmallBlocks), dim3(ThreadsSmallBlocks) , 0, 0, SrcDst, (int) DeviceStride);
cutilCheckMsg("Kernel execution failed");
//perform block-wise IDCT processing
hipLaunchKernelGGL(( CUDAkernelShortIDCT), dim3(GridShort), dim3(ThreadsShort) , 0, 0, SrcDst, (int) DeviceStride);
cutilSafeCall(hipDeviceSynchronize());
cutilCheckMsg("Kernel execution failed");
//copy quantized image block to host
cutilSafeCall(hipMemcpy2D(ImgS1, StrideS * sizeof(short),
SrcDst, DeviceStride * sizeof(short),
Size.width * sizeof(short), Size.height,
hipMemcpyDeviceToHost) );
//convert image back to byte representation
for (int i=0; i<Size.height; i++)
{
for (int j=0; j<Size.width; j++)
{
ImgDst[i*Stride+j] = clamp_0_255(ImgS1[i*StrideS+j] + 128);
}
}
//free float buffers
cutilSafeCall(hipFree(SrcDst));
FreePlane(ImgS1);
//return time taken by the operation
return TimerLibJpegSpan16b;
}
/**
**************************************************************************
* Program entry point
*
* \param argc [IN] - Number of command-line arguments
* \param argv [IN] - Array of command-line arguments
*
* \return Status code
*/
int main(int argc, char** argv)
{
//
// Sample initialization
//
//initialize CUDA
if( cutCheckCmdLineFlag(argc, (const char**)argv, "device") )
cutilDeviceInit(argc, argv);
else
hipSetDevice( cutGetMaxGflopsDeviceId() );
//source and results image filenames
char SampleImageFname[] = "barbara.bmp";
char SampleImageFnameResGold1[] = "barbara_gold1.bmp";
char SampleImageFnameResGold2[] = "barbara_gold2.bmp";
char SampleImageFnameResCUDA1[] = "barbara_cuda1.bmp";
char SampleImageFnameResCUDA2[] = "barbara_cuda2.bmp";
char SampleImageFnameResCUDAshort[] = "barbara_cuda_short.bmp";
char *pSampleImageFpath = cutFindFilePath(SampleImageFname, argv[0]);
//preload image (acquire dimensions)
int ImgWidth, ImgHeight;
ROI ImgSize;
int res = PreLoadBmp(pSampleImageFpath, &ImgWidth, &ImgHeight);
ImgSize.width = ImgWidth;
ImgSize.height = ImgHeight;
//CONSOLE INFORMATION: saying hello to user
printf("CUDA sample DCT/IDCT implementation\n");
printf("===================================\n");
printf("Loading test image: %s... ", SampleImageFname);
if (res)
{
printf("\nError: Image file not found or invalid!\n");
printf("Press ENTER to exit...\n");
getchar();
//finalize
cutilExit(argc, argv);
return 1;
}
//check image dimensions are multiples of BLOCK_SIZE
if (ImgWidth % BLOCK_SIZE != 0 || ImgHeight % BLOCK_SIZE != 0)
{
printf("\nError: Input image dimensions must be multiples of 8!\n");
printf("Press ENTER to exit...\n");
getchar();
//finalize
cutilExit(argc, argv);
return 1;
}
printf("[%d x %d]... ", ImgWidth, ImgHeight);
//allocate image buffers
int ImgStride;
byte *ImgSrc = MallocPlaneByte(ImgWidth, ImgHeight, &ImgStride);
byte *ImgDstGold1 = MallocPlaneByte(ImgWidth, ImgHeight, &ImgStride);
byte *ImgDstGold2 = MallocPlaneByte(ImgWidth, ImgHeight, &ImgStride);
byte *ImgDstCUDA1 = MallocPlaneByte(ImgWidth, ImgHeight, &ImgStride);
byte *ImgDstCUDA2 = MallocPlaneByte(ImgWidth, ImgHeight, &ImgStride);
byte *ImgDstCUDAshort = MallocPlaneByte(ImgWidth, ImgHeight, &ImgStride);
//load sample image
LoadBmpAsGray(pSampleImageFpath, ImgStride, ImgSize, ImgSrc);
//
// RUNNING WRAPPERS
//
//compute Gold 1 version of DCT/quantization/IDCT
printf("Success\nRunning Gold 1 (CPU) version... ");
float TimeGold1 = WrapperGold1(ImgSrc, ImgDstGold1, ImgStride, ImgSize);
//compute Gold 2 version of DCT/quantization/IDCT
printf("Success\nRunning Gold 2 (CPU) version... ");
float TimeGold2 = WrapperGold2(ImgSrc, ImgDstGold2, ImgStride, ImgSize);
//compute CUDA 1 version of DCT/quantization/IDCT
printf("Success\nRunning CUDA 1 (GPU) version... ");
float TimeCUDA1 = WrapperCUDA1(ImgSrc, ImgDstCUDA1, ImgStride, ImgSize);
//compute CUDA 2 version of DCT/quantization/IDCT
printf("Success\nRunning CUDA 2 (GPU) version... ");
float TimeCUDA2 = WrapperCUDA2(ImgSrc, ImgDstCUDA2, ImgStride, ImgSize);
//compute CUDA short version of DCT/quantization/IDCT
printf("Success\nRunning CUDA short (GPU) version... ");
float TimeCUDAshort = WrapperCUDAshort(ImgSrc, ImgDstCUDAshort, ImgStride, ImgSize);
//
// Execution statistics, result saving and validation
//
//dump result of Gold 1 processing
printf("Success\nDumping result to %s... ", SampleImageFnameResGold1);
DumpBmpAsGray(SampleImageFnameResGold1, ImgDstGold1, ImgStride, ImgSize);
//dump result of Gold 2 processing
printf("Success\nDumping result to %s... ", SampleImageFnameResGold2);
DumpBmpAsGray(SampleImageFnameResGold2, ImgDstGold2, ImgStride, ImgSize);
//dump result of CUDA 1 processing
printf("Success\nDumping result to %s... ", SampleImageFnameResCUDA1);
DumpBmpAsGray(SampleImageFnameResCUDA1, ImgDstCUDA1, ImgStride, ImgSize);
//dump result of CUDA 2 processing
printf("Success\nDumping result to %s... ", SampleImageFnameResCUDA2);
DumpBmpAsGray(SampleImageFnameResCUDA2, ImgDstCUDA2, ImgStride, ImgSize);
//dump result of CUDA short processing
printf("Success\nDumping result to %s... ", SampleImageFnameResCUDAshort);
DumpBmpAsGray(SampleImageFnameResCUDAshort, ImgDstCUDAshort, ImgStride, ImgSize);
//print speed info
printf("Success\n");
#if 0
printf("Processing time : not relevant in CUDA emulation mode\n");
#else
printf("Processing time (CUDA 1) : %f ms \n", TimeCUDA1);
printf("Processing time (CUDA 2) : %f ms \n", TimeCUDA2);
printf("Processing time (CUDA short): %f ms \n", TimeCUDAshort);
#endif
//calculate PSNR between each pair of images
float PSNR_Src_DstGold1 = CalculatePSNR(ImgSrc, ImgDstGold1, ImgStride, ImgSize);
float PSNR_Src_DstGold2 = CalculatePSNR(ImgSrc, ImgDstGold2, ImgStride, ImgSize);
float PSNR_Src_DstCUDA1 = CalculatePSNR(ImgSrc, ImgDstCUDA1, ImgStride, ImgSize);
float PSNR_Src_DstCUDA2 = CalculatePSNR(ImgSrc, ImgDstCUDA2, ImgStride, ImgSize);
float PSNR_Src_DstCUDAshort = CalculatePSNR(ImgSrc, ImgDstCUDAshort, ImgStride, ImgSize);
float PSNR_DstGold1_DstCUDA1 = CalculatePSNR(ImgDstGold1, ImgDstCUDA1, ImgStride, ImgSize);
float PSNR_DstGold2_DstCUDA2 = CalculatePSNR(ImgDstGold2, ImgDstCUDA2, ImgStride, ImgSize);
float PSNR_DstGold2_DstCUDA16b = CalculatePSNR(ImgDstGold2, ImgDstCUDAshort, ImgStride, ImgSize);
printf("PSNR Original <---> CPU(Gold 1) : %f\n", PSNR_Src_DstGold1);
printf("PSNR Original <---> CPU(Gold 2) : %f\n", PSNR_Src_DstGold2);
printf("PSNR Original <---> GPU(CUDA 1) : %f\n", PSNR_Src_DstCUDA1);
printf("PSNR Original <---> GPU(CUDA 2) : %f\n", PSNR_Src_DstCUDA2);
printf("PSNR Original <---> GPU(CUDA short): %f\n", PSNR_Src_DstCUDAshort);
printf("PSNR CPU(Gold 1) <---> GPU(CUDA 1) : %f\n", PSNR_DstGold1_DstCUDA1);
printf("PSNR CPU(Gold 2) <---> GPU(CUDA 2) : %f\n", PSNR_DstGold2_DstCUDA2);
printf("PSNR CPU(Gold 2) <---> GPU(CUDA short): %f\n", PSNR_DstGold2_DstCUDA16b);
if (PSNR_DstGold1_DstCUDA1 > PSNR_THRESHOLD_EQUAL && PSNR_DstGold2_DstCUDA2 > PSNR_THRESHOLD_EQUAL && PSNR_DstGold2_DstCUDA16b > PSNR_THRESHOLD_EQUAL)
{
printf("\nTEST PASSED!\n");
}
else
{
printf("\nTEST FAILED! (CPU and GPU results differ too much)\n");
}
//
// Finalization
//
//release byte planes
FreePlane(ImgSrc);
FreePlane(ImgDstGold1);
FreePlane(ImgDstGold2);
FreePlane(ImgDstCUDA1);
FreePlane(ImgDstCUDA2);
FreePlane(ImgDstCUDAshort);
//finalize
hipDeviceReset();
cutilExit(argc, argv);
return 0;
}
| 74be6fd77ccaefde15ec5df3552e49ffb39c8d52.cu | /*
* Copyright 1993-2007 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws. Users and possessors of this source code
* are hereby granted a nonexclusive, royalty-free license to use this code
* in individual and commercial software.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*
* Any use of this source code in individual and commercial software must
* include, in the user documentation and internal comments to the code,
* the above Disclaimer and U.S. Government End Users Notice.
*/
/**
**************************************************************************
* \file dct8x8.cu
* \brief Contains entry point, wrappers to host and device code and benchmark.
*
* This sample implements forward and inverse Discrete Cosine Transform to blocks
* of image pixels (of 8x8 size), as in JPEG standard. The typical work flow is as
* follows:
* 1. Run CPU version (Host code) and measure execution time;
* 2. Run CUDA version (Device code) and measure execution time;
* 3. Output execution timings and calculate CUDA speedup.
*/
#include "Common.h"
/**
* The number of DCT kernel calls
*/
#if 1
#define BENCHMARK_SIZE 1
#else
#define BENCHMARK_SIZE 10
#endif
/**
* The PSNR values over this threshold indicate images equality
*/
#define PSNR_THRESHOLD_EQUAL 30
/**
* Texture reference that is passed through this global variable into device code.
* This is done because any conventional passing through argument list way results
* in compiler internal error. 2008.03.11
*/
texture<float, 2, cudaReadModeElementType> TexSrc;
// includes kernels
#include "dct8x8_kernel1.cuh"
#include "dct8x8_kernel2.cuh"
#include "dct8x8_kernel_short.cuh"
#include "dct8x8_kernel_quantization.cuh"
/**
**************************************************************************
* Wrapper function for 1st gold version of DCT, quantization and IDCT implementations
*
* \param ImgSrc [IN] - Source byte image plane
* \param ImgDst [IN] - Quantized result byte image plane
* \param Stride [IN] - Stride for both source and result planes
* \param Size [IN] - Size of both planes
*
* \return Execution time in milliseconds
*/
float WrapperGold1(byte *ImgSrc, byte *ImgDst, int Stride, ROI Size)
{
//allocate float buffers for DCT and other data
int StrideF;
float *ImgF1 = MallocPlaneFloat(Size.width, Size.height, &StrideF);
float *ImgF2 = MallocPlaneFloat(Size.width, Size.height, &StrideF);
//convert source image to float representation
CopyByte2Float(ImgSrc, Stride, ImgF1, StrideF, Size);
AddFloatPlane(-128.0f, ImgF1, StrideF, Size);
//create and start CUDA timer
unsigned int timerGold = 0;
cutilCheckError(cutCreateTimer(&timerGold));
cutilCheckError(cutResetTimer(timerGold));
//perform block-wise DCT processing and benchmarking
for (int i=0; i<BENCHMARK_SIZE; i++)
{
cutilCheckError(cutStartTimer(timerGold));
computeDCT8x8Gold1(ImgF1, ImgF2, StrideF, Size);
cutilCheckError(cutStopTimer(timerGold));
}
//stop and destroy CUDA timer
float TimerGoldSpan = cutGetAverageTimerValue(timerGold);
cutilCheckError(cutDeleteTimer(timerGold));
//perform quantization
quantizeGoldFloat(ImgF2, StrideF, Size);
//perform block-wise IDCT processing
computeIDCT8x8Gold1(ImgF2, ImgF1, StrideF, Size);
//convert image back to byte representation
AddFloatPlane(128.0f, ImgF1, StrideF, Size);
CopyFloat2Byte(ImgF1, StrideF, ImgDst, Stride, Size);
//free float buffers
FreePlane(ImgF1);
FreePlane(ImgF2);
//return time taken by the operation
return TimerGoldSpan;
}
/**
**************************************************************************
* Wrapper function for 2nd gold version of DCT, quantization and IDCT implementations
*
* \param ImgSrc [IN] - Source byte image plane
* \param ImgDst [IN] - Quantized result byte image plane
* \param Stride [IN] - Stride for both source and result planes
* \param Size [IN] - Size of both planes
*
* \return Execution time in milliseconds
*/
float WrapperGold2(byte *ImgSrc, byte *ImgDst, int Stride, ROI Size)
{
//allocate float buffers for DCT and other data
int StrideF;
float *ImgF1 = MallocPlaneFloat(Size.width, Size.height, &StrideF);
float *ImgF2 = MallocPlaneFloat(Size.width, Size.height, &StrideF);
//convert source image to float representation
CopyByte2Float(ImgSrc, Stride, ImgF1, StrideF, Size);
AddFloatPlane(-128.0f, ImgF1, StrideF, Size);
//create and start CUDA timer
unsigned int timerGold = 0;
cutilCheckError(cutCreateTimer(&timerGold));
cutilCheckError(cutResetTimer(timerGold));
//perform block-wise DCT processing and benchmarking
for (int i=0; i<BENCHMARK_SIZE; i++)
{
cutilCheckError(cutStartTimer(timerGold));
computeDCT8x8Gold2(ImgF1, ImgF2, StrideF, Size);
cutilCheckError(cutStopTimer(timerGold));
}
//stop and destroy CUDA timer
float TimerGoldSpan = cutGetAverageTimerValue(timerGold);
cutilCheckError(cutDeleteTimer(timerGold));
//perform quantization
quantizeGoldFloat(ImgF2, StrideF, Size);
//perform block-wise IDCT processing
computeIDCT8x8Gold2(ImgF2, ImgF1, StrideF, Size);
//convert image back to byte representation
AddFloatPlane(128.0f, ImgF1, StrideF, Size);
CopyFloat2Byte(ImgF1, StrideF, ImgDst, Stride, Size);
//free float buffers
FreePlane(ImgF1);
FreePlane(ImgF2);
//return time taken by the operation
return TimerGoldSpan;
}
/**
**************************************************************************
* Wrapper function for 1st CUDA version of DCT, quantization and IDCT implementations
*
* \param ImgSrc [IN] - Source byte image plane
* \param ImgDst [IN] - Quantized result byte image plane
* \param Stride [IN] - Stride for both source and result planes
* \param Size [IN] - Size of both planes
*
* \return Execution time in milliseconds
*/
float WrapperCUDA1(byte *ImgSrc, byte *ImgDst, int Stride, ROI Size)
{
//prepare channel format descriptor for passing texture into kernels
cudaChannelFormatDesc floattex = cudaCreateChannelDesc<float>();
//allocate device memory
cudaArray *Src;
float *Dst;
size_t DstStride;
cutilSafeCall(cudaMallocArray(&Src, &floattex, Size.width, Size.height));
cutilSafeCall(cudaMallocPitch((void **)(&Dst), &DstStride, Size.width * sizeof(float), Size.height));
DstStride /= sizeof(float);
//convert source image to float representation
int ImgSrcFStride;
float *ImgSrcF = MallocPlaneFloat(Size.width, Size.height, &ImgSrcFStride);
CopyByte2Float(ImgSrc, Stride, ImgSrcF, ImgSrcFStride, Size);
AddFloatPlane(-128.0f, ImgSrcF, ImgSrcFStride, Size);
//copy from host memory to device
cutilSafeCall(cudaMemcpy2DToArray(Src, 0, 0,
ImgSrcF, ImgSrcFStride * sizeof(float),
Size.width * sizeof(float), Size.height,
cudaMemcpyHostToDevice) );
//setup execution parameters
dim3 threads(BLOCK_SIZE, BLOCK_SIZE);
dim3 grid(Size.width / BLOCK_SIZE, Size.height / BLOCK_SIZE);
//create and start CUDA timer
unsigned int timerCUDA = 0;
cutilCheckError(cutCreateTimer(&timerCUDA));
cutilCheckError(cutResetTimer(timerCUDA));
//execute DCT kernel and benchmark
cutilSafeCall(cudaBindTextureToArray(TexSrc, Src));
for (int i=0; i<BENCHMARK_SIZE; i++)
{
cutilCheckError(cutStartTimer(timerCUDA));
CUDAkernel1DCT<<< grid, threads >>>(Dst, (int) DstStride, 0, 0);
cutilSafeCall(cudaThreadSynchronize());
cutilCheckError(cutStopTimer(timerCUDA));
}
cutilSafeCall(cudaUnbindTexture(TexSrc));
cutilCheckMsg("Kernel execution failed");
// finalize CUDA timer
float TimerCUDASpan = cutGetAverageTimerValue(timerCUDA);
cutilCheckError(cutDeleteTimer(timerCUDA));
// execute Quantization kernel
CUDAkernelQuantizationFloat<<< grid, threads >>>(Dst, (int) DstStride);
cutilCheckMsg("Kernel execution failed");
//copy quantized coefficients from host memory to device array
cutilSafeCall(cudaMemcpy2DToArray(Src, 0, 0,
Dst, DstStride * sizeof(float),
Size.width * sizeof(float), Size.height,
cudaMemcpyDeviceToDevice) );
// execute IDCT kernel
cutilSafeCall(cudaBindTextureToArray(TexSrc, Src));
CUDAkernel1IDCT<<< grid, threads >>>(Dst, (int) DstStride, 0, 0);
cutilSafeCall(cudaUnbindTexture(TexSrc));
cutilCheckMsg("Kernel execution failed");
//copy quantized image block to host
cutilSafeCall(cudaMemcpy2D(ImgSrcF, ImgSrcFStride * sizeof(float),
Dst, DstStride * sizeof(float),
Size.width * sizeof(float), Size.height,
cudaMemcpyDeviceToHost) );
//convert image back to byte representation
AddFloatPlane(128.0f, ImgSrcF, ImgSrcFStride, Size);
CopyFloat2Byte(ImgSrcF, ImgSrcFStride, ImgDst, Stride, Size);
//clean up memory
cutilSafeCall(cudaFreeArray(Src));
cutilSafeCall(cudaFree(Dst));
FreePlane(ImgSrcF);
//return time taken by the operation
return TimerCUDASpan;
}
/**
**************************************************************************
* Wrapper function for 2nd CUDA version of DCT, quantization and IDCT implementations
*
* \param ImgSrc [IN] - Source byte image plane
* \param ImgDst [IN] - Quantized result byte image plane
* \param Stride [IN] - Stride for both source and result planes
* \param Size [IN] - Size of both planes
*
* \return Execution time in milliseconds
*/
float WrapperCUDA2(byte *ImgSrc, byte *ImgDst, int Stride, ROI Size)
{
//allocate host buffers for DCT and other data
int StrideF;
float *ImgF1 = MallocPlaneFloat(Size.width, Size.height, &StrideF);
//convert source image to float representation
CopyByte2Float(ImgSrc, Stride, ImgF1, StrideF, Size);
AddFloatPlane(-128.0f, ImgF1, StrideF, Size);
//allocate device memory
float *SrcDst;
size_t DeviceStride;
cutilSafeCall(cudaMallocPitch((void **)(&SrcDst), &DeviceStride, Size.width * sizeof(float), Size.height));
DeviceStride /= sizeof(float);
//copy from host memory to device
cutilSafeCall(cudaMemcpy2D(SrcDst, DeviceStride * sizeof(float),
ImgF1, StrideF * sizeof(float),
Size.width * sizeof(float), Size.height,
cudaMemcpyHostToDevice) );
//create and start CUDA timer
unsigned int timerCUDA = 0;
cutilCheckError(cutCreateTimer(&timerCUDA));
cutilCheckError(cutResetTimer(timerCUDA));
//setup execution parameters
dim3 GridFullWarps(Size.width / KER2_BLOCK_WIDTH, Size.height / KER2_BLOCK_HEIGHT, 1);
dim3 ThreadsFullWarps(8, KER2_BLOCK_WIDTH/8, KER2_BLOCK_HEIGHT/8);
//perform block-wise DCT processing and benchmarking
cutilCheckError(cutStartTimer(timerCUDA));
CUDAkernel2DCT<<< GridFullWarps, ThreadsFullWarps >>>(SrcDst, (int) DeviceStride);
cutilSafeCall(cudaThreadSynchronize());
cutilCheckError(cutStopTimer(timerCUDA));
cutilCheckMsg("Kernel execution failed");
// finalize CUDA timer
float TimerCUDASpan = cutGetAverageTimerValue(timerCUDA);
cutilCheckError(cutDeleteTimer(timerCUDA));
//setup execution parameters for quantization
dim3 ThreadsSmallBlocks(BLOCK_SIZE, BLOCK_SIZE);
dim3 GridSmallBlocks(Size.width / BLOCK_SIZE, Size.height / BLOCK_SIZE);
// execute Quantization kernel
CUDAkernelQuantizationFloat<<< GridSmallBlocks, ThreadsSmallBlocks >>>(SrcDst, (int) DeviceStride);
cutilCheckMsg("Kernel execution failed");
//perform block-wise IDCT processing
CUDAkernel2IDCT<<< GridFullWarps, ThreadsFullWarps >>>(SrcDst, (int) DeviceStride);
cutilSafeCall(cudaThreadSynchronize());
cutilCheckMsg("Kernel execution failed");
//copy quantized image block to host
cutilSafeCall(cudaMemcpy2D(ImgF1, StrideF * sizeof(float),
SrcDst, DeviceStride * sizeof(float),
Size.width * sizeof(float), Size.height,
cudaMemcpyDeviceToHost) );
//convert image back to byte representation
AddFloatPlane(128.0f, ImgF1, StrideF, Size);
CopyFloat2Byte(ImgF1, StrideF, ImgDst, Stride, Size);
//clean up memory
cutilSafeCall(cudaFree(SrcDst));
FreePlane(ImgF1);
//return time taken by the operation
return TimerCUDASpan;
}
/**
**************************************************************************
* Wrapper function for short CUDA version of DCT, quantization and IDCT implementations
*
* \param ImgSrc [IN] - Source byte image plane
* \param ImgDst [IN] - Quantized result byte image plane
* \param Stride [IN] - Stride for both source and result planes
* \param Size [IN] - Size of both planes
*
* \return Execution time in milliseconds
*/
float WrapperCUDAshort(byte *ImgSrc, byte *ImgDst, int Stride, ROI Size)
{
//allocate host buffers for DCT and other data
int StrideS;
short *ImgS1 = MallocPlaneShort(Size.width, Size.height, &StrideS);
//convert source image to short representation centered at 128
for (int i=0; i<Size.height; i++)
{
for (int j=0; j<Size.width; j++)
{
ImgS1[i*StrideS+j] = (short)ImgSrc[i*Stride+j] - 128;
}
}
//allocate device memory
short *SrcDst;
size_t DeviceStride;
cutilSafeCall(cudaMallocPitch((void **)(&SrcDst), &DeviceStride, Size.width * sizeof(short), Size.height));
DeviceStride /= sizeof(short);
//copy from host memory to device
cutilSafeCall(cudaMemcpy2D(SrcDst, DeviceStride * sizeof(short),
ImgS1, StrideS * sizeof(short),
Size.width * sizeof(short), Size.height,
cudaMemcpyHostToDevice) );
//create and start CUDA timer
unsigned int timerLibJpeg = 0;
cutilCheckError(cutCreateTimer(&timerLibJpeg));
cutilCheckError(cutResetTimer(timerLibJpeg));
//setup execution parameters
dim3 GridShort(Size.width / KERS_BLOCK_WIDTH, Size.height / KERS_BLOCK_HEIGHT, 1);
dim3 ThreadsShort(8, KERS_BLOCK_WIDTH/8, KERS_BLOCK_HEIGHT/8);
//perform block-wise DCT processing and benchmarking
cutilCheckError(cutStartTimer(timerLibJpeg));
CUDAkernelShortDCT<<< GridShort, ThreadsShort >>>(SrcDst, (int) DeviceStride);
cutilSafeCall(cudaThreadSynchronize());
cutilCheckError(cutStopTimer(timerLibJpeg));
cutilCheckMsg("Kernel execution failed");
//stop and destroy CUDA timer
float TimerLibJpegSpan16b = cutGetAverageTimerValue(timerLibJpeg);
cutilCheckError(cutDeleteTimer(timerLibJpeg));
//setup execution parameters for quantization
dim3 ThreadsSmallBlocks(BLOCK_SIZE, BLOCK_SIZE);
dim3 GridSmallBlocks(Size.width / BLOCK_SIZE, Size.height / BLOCK_SIZE);
// execute Quantization kernel
CUDAkernelQuantizationShort<<< GridSmallBlocks, ThreadsSmallBlocks >>>(SrcDst, (int) DeviceStride);
cutilCheckMsg("Kernel execution failed");
//perform block-wise IDCT processing
CUDAkernelShortIDCT<<< GridShort, ThreadsShort >>>(SrcDst, (int) DeviceStride);
cutilSafeCall(cudaThreadSynchronize());
cutilCheckMsg("Kernel execution failed");
//copy quantized image block to host
cutilSafeCall(cudaMemcpy2D(ImgS1, StrideS * sizeof(short),
SrcDst, DeviceStride * sizeof(short),
Size.width * sizeof(short), Size.height,
cudaMemcpyDeviceToHost) );
//convert image back to byte representation
for (int i=0; i<Size.height; i++)
{
for (int j=0; j<Size.width; j++)
{
ImgDst[i*Stride+j] = clamp_0_255(ImgS1[i*StrideS+j] + 128);
}
}
//free float buffers
cutilSafeCall(cudaFree(SrcDst));
FreePlane(ImgS1);
//return time taken by the operation
return TimerLibJpegSpan16b;
}
/**
**************************************************************************
* Program entry point
*
* \param argc [IN] - Number of command-line arguments
* \param argv [IN] - Array of command-line arguments
*
* \return Status code
*/
int main(int argc, char** argv)
{
//
// Sample initialization
//
//initialize CUDA
if( cutCheckCmdLineFlag(argc, (const char**)argv, "device") )
cutilDeviceInit(argc, argv);
else
cudaSetDevice( cutGetMaxGflopsDeviceId() );
//source and results image filenames
char SampleImageFname[] = "barbara.bmp";
char SampleImageFnameResGold1[] = "barbara_gold1.bmp";
char SampleImageFnameResGold2[] = "barbara_gold2.bmp";
char SampleImageFnameResCUDA1[] = "barbara_cuda1.bmp";
char SampleImageFnameResCUDA2[] = "barbara_cuda2.bmp";
char SampleImageFnameResCUDAshort[] = "barbara_cuda_short.bmp";
char *pSampleImageFpath = cutFindFilePath(SampleImageFname, argv[0]);
//preload image (acquire dimensions)
int ImgWidth, ImgHeight;
ROI ImgSize;
int res = PreLoadBmp(pSampleImageFpath, &ImgWidth, &ImgHeight);
ImgSize.width = ImgWidth;
ImgSize.height = ImgHeight;
//CONSOLE INFORMATION: saying hello to user
printf("CUDA sample DCT/IDCT implementation\n");
printf("===================================\n");
printf("Loading test image: %s... ", SampleImageFname);
if (res)
{
printf("\nError: Image file not found or invalid!\n");
printf("Press ENTER to exit...\n");
getchar();
//finalize
cutilExit(argc, argv);
return 1;
}
//check image dimensions are multiples of BLOCK_SIZE
if (ImgWidth % BLOCK_SIZE != 0 || ImgHeight % BLOCK_SIZE != 0)
{
printf("\nError: Input image dimensions must be multiples of 8!\n");
printf("Press ENTER to exit...\n");
getchar();
//finalize
cutilExit(argc, argv);
return 1;
}
printf("[%d x %d]... ", ImgWidth, ImgHeight);
//allocate image buffers
int ImgStride;
byte *ImgSrc = MallocPlaneByte(ImgWidth, ImgHeight, &ImgStride);
byte *ImgDstGold1 = MallocPlaneByte(ImgWidth, ImgHeight, &ImgStride);
byte *ImgDstGold2 = MallocPlaneByte(ImgWidth, ImgHeight, &ImgStride);
byte *ImgDstCUDA1 = MallocPlaneByte(ImgWidth, ImgHeight, &ImgStride);
byte *ImgDstCUDA2 = MallocPlaneByte(ImgWidth, ImgHeight, &ImgStride);
byte *ImgDstCUDAshort = MallocPlaneByte(ImgWidth, ImgHeight, &ImgStride);
//load sample image
LoadBmpAsGray(pSampleImageFpath, ImgStride, ImgSize, ImgSrc);
//
// RUNNING WRAPPERS
//
//compute Gold 1 version of DCT/quantization/IDCT
printf("Success\nRunning Gold 1 (CPU) version... ");
float TimeGold1 = WrapperGold1(ImgSrc, ImgDstGold1, ImgStride, ImgSize);
//compute Gold 2 version of DCT/quantization/IDCT
printf("Success\nRunning Gold 2 (CPU) version... ");
float TimeGold2 = WrapperGold2(ImgSrc, ImgDstGold2, ImgStride, ImgSize);
//compute CUDA 1 version of DCT/quantization/IDCT
printf("Success\nRunning CUDA 1 (GPU) version... ");
float TimeCUDA1 = WrapperCUDA1(ImgSrc, ImgDstCUDA1, ImgStride, ImgSize);
//compute CUDA 2 version of DCT/quantization/IDCT
printf("Success\nRunning CUDA 2 (GPU) version... ");
float TimeCUDA2 = WrapperCUDA2(ImgSrc, ImgDstCUDA2, ImgStride, ImgSize);
//compute CUDA short version of DCT/quantization/IDCT
printf("Success\nRunning CUDA short (GPU) version... ");
float TimeCUDAshort = WrapperCUDAshort(ImgSrc, ImgDstCUDAshort, ImgStride, ImgSize);
//
// Execution statistics, result saving and validation
//
//dump result of Gold 1 processing
printf("Success\nDumping result to %s... ", SampleImageFnameResGold1);
DumpBmpAsGray(SampleImageFnameResGold1, ImgDstGold1, ImgStride, ImgSize);
//dump result of Gold 2 processing
printf("Success\nDumping result to %s... ", SampleImageFnameResGold2);
DumpBmpAsGray(SampleImageFnameResGold2, ImgDstGold2, ImgStride, ImgSize);
//dump result of CUDA 1 processing
printf("Success\nDumping result to %s... ", SampleImageFnameResCUDA1);
DumpBmpAsGray(SampleImageFnameResCUDA1, ImgDstCUDA1, ImgStride, ImgSize);
//dump result of CUDA 2 processing
printf("Success\nDumping result to %s... ", SampleImageFnameResCUDA2);
DumpBmpAsGray(SampleImageFnameResCUDA2, ImgDstCUDA2, ImgStride, ImgSize);
//dump result of CUDA short processing
printf("Success\nDumping result to %s... ", SampleImageFnameResCUDAshort);
DumpBmpAsGray(SampleImageFnameResCUDAshort, ImgDstCUDAshort, ImgStride, ImgSize);
//print speed info
printf("Success\n");
#if 0
printf("Processing time : not relevant in CUDA emulation mode\n");
#else
printf("Processing time (CUDA 1) : %f ms \n", TimeCUDA1);
printf("Processing time (CUDA 2) : %f ms \n", TimeCUDA2);
printf("Processing time (CUDA short): %f ms \n", TimeCUDAshort);
#endif
//calculate PSNR between each pair of images
float PSNR_Src_DstGold1 = CalculatePSNR(ImgSrc, ImgDstGold1, ImgStride, ImgSize);
float PSNR_Src_DstGold2 = CalculatePSNR(ImgSrc, ImgDstGold2, ImgStride, ImgSize);
float PSNR_Src_DstCUDA1 = CalculatePSNR(ImgSrc, ImgDstCUDA1, ImgStride, ImgSize);
float PSNR_Src_DstCUDA2 = CalculatePSNR(ImgSrc, ImgDstCUDA2, ImgStride, ImgSize);
float PSNR_Src_DstCUDAshort = CalculatePSNR(ImgSrc, ImgDstCUDAshort, ImgStride, ImgSize);
float PSNR_DstGold1_DstCUDA1 = CalculatePSNR(ImgDstGold1, ImgDstCUDA1, ImgStride, ImgSize);
float PSNR_DstGold2_DstCUDA2 = CalculatePSNR(ImgDstGold2, ImgDstCUDA2, ImgStride, ImgSize);
float PSNR_DstGold2_DstCUDA16b = CalculatePSNR(ImgDstGold2, ImgDstCUDAshort, ImgStride, ImgSize);
printf("PSNR Original <---> CPU(Gold 1) : %f\n", PSNR_Src_DstGold1);
printf("PSNR Original <---> CPU(Gold 2) : %f\n", PSNR_Src_DstGold2);
printf("PSNR Original <---> GPU(CUDA 1) : %f\n", PSNR_Src_DstCUDA1);
printf("PSNR Original <---> GPU(CUDA 2) : %f\n", PSNR_Src_DstCUDA2);
printf("PSNR Original <---> GPU(CUDA short): %f\n", PSNR_Src_DstCUDAshort);
printf("PSNR CPU(Gold 1) <---> GPU(CUDA 1) : %f\n", PSNR_DstGold1_DstCUDA1);
printf("PSNR CPU(Gold 2) <---> GPU(CUDA 2) : %f\n", PSNR_DstGold2_DstCUDA2);
printf("PSNR CPU(Gold 2) <---> GPU(CUDA short): %f\n", PSNR_DstGold2_DstCUDA16b);
if (PSNR_DstGold1_DstCUDA1 > PSNR_THRESHOLD_EQUAL && PSNR_DstGold2_DstCUDA2 > PSNR_THRESHOLD_EQUAL && PSNR_DstGold2_DstCUDA16b > PSNR_THRESHOLD_EQUAL)
{
printf("\nTEST PASSED!\n");
}
else
{
printf("\nTEST FAILED! (CPU and GPU results differ too much)\n");
}
//
// Finalization
//
//release byte planes
FreePlane(ImgSrc);
FreePlane(ImgDstGold1);
FreePlane(ImgDstGold2);
FreePlane(ImgDstCUDA1);
FreePlane(ImgDstCUDA2);
FreePlane(ImgDstCUDAshort);
//finalize
cudaThreadExit();
cutilExit(argc, argv);
return 0;
}
|
a5b807478e637b97b7e47fe4cf68a0414ee627f2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright 2023 CMU, Facebook, LANL, MIT, NVIDIA, and Stanford (alphabetical)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../cnn_helper.h"
#include "rnn.h"
#include "rnn_mapper.h"
struct EmbedInitParams {
DnnHandle handle;
int batchSize, outputSize, vocabSize;
};
Tensor RnnModel::add_embed_node(Tensor x,
int vocab_size,
int output_size,
ParallelConfig pc,
SharedVariable params) {
assert(x.numDim == 2);
assert(x.adim[1] == LSTM_PER_NODE_LENGTH);
assert(x.pdim[1] == LSTM_PER_NODE_LENGTH);
Embed *node = new Embed(config, x, vocab_size, output_size, pc, params);
layers.push_back(node);
return node->outputs[0];
}
Embed::Embed(RnnConfig config,
Tensor x,
int _vocab_size,
int _output_size,
ParallelConfig pc,
SharedVariable _params)
: RnnOp(x, pc, _params), batchSize(x.adim[0]), vocabSize(_vocab_size),
outputSize(_output_size) {
Context ctx = config.lg_ctx;
HighLevelRuntime *runtime = config.lg_hlr;
assert(pc.nDims == 1);
{
Rect<1> rect(Point<1>(0), Point<1>(pc.dim[0] - 1));
part_rect = rect;
}
IndexSpaceT<1> part_is = runtime->create_index_space(ctx, part_rect);
FieldSpace fs = config.field_space;
Rect<3, coord_t> y_rect(
Point<3>(0, 0, 0),
Point<3>(outputSize - 1, batchSize - 1, LSTM_PER_NODE_LENGTH - 1));
IndexSpaceT<3> y_is = runtime->create_index_space(ctx, y_rect);
LogicalRegion y_lr = runtime->create_logical_region(ctx, y_is, fs);
LogicalRegion y_grad_lr = runtime->create_logical_region(ctx, y_is, fs);
int num_par_n = part_rect.hi[0] - part_rect.lo[0] + 1;
assert(batchSize % num_par_n == 0);
int extent_n = batchSize / num_par_n;
int extent_c = outputSize;
Rect<3, coord_t> extent(
Point<3>(0, 0, 0),
Point<3>(extent_c - 1, extent_n - 1, LSTM_PER_NODE_LENGTH - 1));
Transform<3, 1, coord_t> trans;
trans[0][0] = 0;
trans[1][0] = extent_n;
trans[2][0] = 0;
IndexPartition y_ip = runtime->create_partition_by_restriction(
ctx, y_is, part_is, trans, extent);
assert(runtime->is_index_partition_disjoint(ctx, y_ip));
assert(runtime->is_index_partition_complete(ctx, y_ip));
LogicalPartition y_lp = runtime->get_logical_partition(ctx, y_lr, y_ip);
LogicalPartition y_grad_lp =
runtime->get_logical_partition(ctx, y_grad_lr, y_ip);
outputs[0].region = y_lr;
outputs[0].region_grad = y_grad_lr;
outputs[0].partition = y_lp;
outputs[0].partition_grad = y_grad_lp;
outputs[0].numDim = 3;
outputs[0].adim[0] = outputSize;
outputs[0].adim[1] = batchSize;
outputs[0].adim[2] = LSTM_PER_NODE_LENGTH;
outputs[0].pdim[0] = extent_c;
outputs[0].pdim[1] = extent_n;
outputs[0].pdim[2] = LSTM_PER_NODE_LENGTH;
}
/*
regions[0] (I): x
regions[1] (I): w
regions[2] (O): y
*/
OpMeta *Embed::init_task(Task const *task,
std::vector<PhysicalRegion> const ®ions,
Context ctx,
Runtime *runtime) {
assert(regions.size() == 3);
assert(task->regions.size() == 3);
EmbedInitParams const *embed = (EmbedInitParams *)task->args;
Rect<2> rect_x = runtime->get_index_space_domain(
ctx, task->regions[0].region.get_index_space());
Rect<1> rect_w = runtime->get_index_space_domain(
ctx, task->regions[1].region.get_index_space());
Rect<3> rect_y = runtime->get_index_space_domain(
ctx, task->regions[2].region.get_index_space());
assert(rect_x.hi[0] - rect_x.lo[0] + 1 == embed->batchSize);
assert(rect_x.hi[1] - rect_x.lo[1] + 1 == LSTM_PER_NODE_LENGTH);
assert(rect_w.hi[0] - rect_w.lo[0] + 1 ==
embed->vocabSize * embed->outputSize);
assert(rect_y.hi[0] - rect_y.lo[0] + 1 == embed->outputSize);
assert(rect_y.hi[1] - rect_y.lo[1] + 1 == embed->batchSize);
assert(rect_y.hi[2] - rect_y.lo[2] + 1 == LSTM_PER_NODE_LENGTH);
EmbedMeta *m = new EmbedMeta(embed->handle);
m->profiling_runtime = false;
return m;
}
void Embed::init(RnnModel const &model) {
Context ctx = model.config.lg_ctx;
Runtime *runtime = model.config.lg_hlr;
int idx = 0;
for (PointInRectIterator<1> it(part_rect); it(); it++, idx++) {
EmbedInitParams initParams;
initParams.handle = model.dnn_handlers[paraConfig.gpu[idx]];
initParams.batchSize = outputs[0].pdim[1];
initParams.outputSize = outputs[0].pdim[0];
initParams.vocabSize = vocabSize;
// batch is the first dim of input and the second dim of output
assert(inputs[0].pdim[0] == outputs[0].pdim[1]);
TaskLauncher launcher(EMBED_INIT_TASK_ID,
TaskArgument(&initParams, sizeof(initParams)),
Predicate::TRUE_PRED,
0 /*MapperID*/,
RnnMapper::assign_to_gpu(paraConfig.gpu[idx]));
DomainPoint dp(*it);
{
LogicalRegion x =
runtime->get_logical_subregion_by_color(inputs[0].partition, dp);
launcher.add_region_requirement(
RegionRequirement(x, READ_ONLY, EXCLUSIVE, inputs[0].region));
launcher.add_field(0, FID_DATA);
}
launcher.add_region_requirement(
RegionRequirement(params.region, READ_ONLY, EXCLUSIVE, params.region));
launcher.add_field(1, FID_DATA);
{
LogicalRegion y =
runtime->get_logical_subregion_by_color(outputs[0].partition, dp);
launcher.add_region_requirement(
RegionRequirement(y, WRITE_ONLY, EXCLUSIVE, outputs[0].region));
launcher.add_field(2, FID_DATA);
}
Future f = runtime->execute_task(ctx, launcher);
meta[idx] = f.get_result<OpMeta *>();
}
}
__global__ void embedForward(int const *x_ptr,
float const *embed,
float *y_ptr,
coord_t numElements,
int shift,
int outputSize) {
CUDA_KERNEL_LOOP(i, numElements) {
int idx = i >> shift;
int off = i & (outputSize - 1);
int wordIdx = x_ptr[idx];
y_ptr[i] = embed[(wordIdx << shift) + off];
}
}
__global__ void embedBackward(int const *x_ptr,
float *embed,
float const *y_ptr,
coord_t numElements,
int shift,
int outputSize) {
CUDA_KERNEL_LOOP(i, numElements) {
int idx = i >> shift;
int off = i & (outputSize - 1);
int wordIdx = x_ptr[idx];
atomicAdd(embed + (wordIdx << shift) + off, y_ptr[i]);
}
}
/*
regions[0](I): x
regions[1](I): w
regions[2](O): y
*/
void Embed::forward_task(Task const *task,
std::vector<PhysicalRegion> const ®ions,
Context ctx,
Runtime *runtime) {
#ifndef DISABLE_COMPUTATION
assert(regions.size() == 3);
assert(task->regions.size() == 3);
EmbedMeta const *m = *((EmbedMeta **)task->args);
AccessorRO<int, 2> const acc_x(regions[0], FID_DATA);
AccessorRO<float, 1> const acc_w(regions[1], FID_DATA);
AccessorWO<float, 3> const acc_y(regions[2], FID_DATA);
Rect<2> rect_x = runtime->get_index_space_domain(
ctx, task->regions[0].region.get_index_space());
Rect<1> rect_w = runtime->get_index_space_domain(
ctx, task->regions[1].region.get_index_space());
Rect<3> rect_y = runtime->get_index_space_domain(
ctx, task->regions[2].region.get_index_space());
assert(acc_x.accessor.is_dense_arbitrary(rect_x));
assert(acc_w.accessor.is_dense_arbitrary(rect_w));
assert(acc_y.accessor.is_dense_arbitrary(rect_y));
int batch_size = rect_y.hi[1] - rect_y.lo[1] + 1;
int output_size = rect_y.hi[0] - rect_y.lo[0] + 1;
int const *x_ptr = acc_x.ptr(rect_x.lo);
float const *w_ptr = acc_w.ptr(rect_w.lo);
float *y_ptr = acc_y.ptr(rect_y.lo);
hipEvent_t t_start, t_end;
if (m->profiling_runtime) {
hipEventCreate(&t_start);
hipEventCreate(&t_end);
hipEventRecord(t_start);
}
int shift = 0;
int size = 1;
while (size < output_size) {
size = size * 2;
shift = shift + 1;
}
assert(size == output_size);
hipLaunchKernelGGL(( embedForward), dim3(GET_BLOCKS(rect_y.volume())), dim3(CUDA_NUM_THREADS), 0, 0,
x_ptr, w_ptr, y_ptr, rect_y.volume(), shift, output_size);
if (m->profiling_runtime) {
hipEventRecord(t_end);
checkCUDA(hipEventSynchronize(t_end));
float elapsed = 0;
checkCUDA(hipEventElapsedTime(&elapsed, t_start, t_end));
hipEventDestroy(t_start);
hipEventDestroy(t_end);
printf("Embed forward time = %.2lfms\n", elapsed);
}
#endif
}
void Embed::forward(RnnModel const &model) {
Context ctx = model.config.lg_ctx;
Runtime *runtime = model.config.lg_hlr;
int idx = 0;
for (PointInRectIterator<1> it(part_rect); it(); it++, idx++) {
OpMeta *mp = meta[idx];
TaskLauncher launcher(EMBED_FWD_TASK_ID,
TaskArgument(&mp, sizeof(OpMeta *)),
Predicate::TRUE_PRED,
0 /*MapperID*/,
RnnMapper::assign_to_gpu(paraConfig.gpu[idx]));
DomainPoint dp(*it);
{
LogicalRegion x =
runtime->get_logical_subregion_by_color(inputs[0].partition, dp);
launcher.add_region_requirement(
RegionRequirement(x, READ_ONLY, EXCLUSIVE, inputs[0].region));
launcher.add_field(0, FID_DATA);
}
launcher.add_region_requirement(
RegionRequirement(params.region, READ_ONLY, EXCLUSIVE, params.region));
launcher.add_field(1, FID_DATA);
{
LogicalRegion y =
runtime->get_logical_subregion_by_color(outputs[0].partition, dp);
launcher.add_region_requirement(
RegionRequirement(y, WRITE_ONLY, EXCLUSIVE, outputs[0].region));
launcher.add_field(2, FID_DATA);
}
runtime->execute_task(ctx, launcher);
}
}
/*
regions[0](I): x
regions[1](I/O): w_grad
regions[2](I): y_grad
*/
void Embed::backward_task(Task const *task,
std::vector<PhysicalRegion> const ®ions,
Context ctx,
Runtime *runtime) {
#ifndef DISABLE_COMPUTATION
assert(regions.size() == 3);
assert(task->regions.size() == 3);
EmbedMeta const *m = *((EmbedMeta **)task->args);
AccessorRO<int, 2> const acc_x(regions[0], FID_DATA);
AccessorRW<float, 1> const acc_w(regions[1], FID_DATA);
AccessorRO<float, 3> const acc_y(regions[2], FID_DATA);
Rect<2> rect_x = runtime->get_index_space_domain(
ctx, task->regions[0].region.get_index_space());
Rect<1> rect_w = runtime->get_index_space_domain(
ctx, task->regions[1].region.get_index_space());
Rect<3> rect_y = runtime->get_index_space_domain(
ctx, task->regions[2].region.get_index_space());
assert(acc_x.accessor.is_dense_arbitrary(rect_x));
assert(acc_w.accessor.is_dense_arbitrary(rect_w));
assert(acc_y.accessor.is_dense_arbitrary(rect_y));
int batch_size = rect_y.hi[1] - rect_y.lo[1] + 1;
int output_size = rect_y.hi[0] - rect_y.lo[0] + 1;
int const *x_ptr = acc_x.ptr(rect_x.lo);
float *w_ptr = acc_w.ptr(rect_w.lo);
float const *y_ptr = acc_y.ptr(rect_y.lo);
hipEvent_t t_start, t_end;
if (m->profiling_runtime) {
hipEventCreate(&t_start);
hipEventCreate(&t_end);
hipEventRecord(t_start);
}
int shift = 0;
int size = 1;
while (size < output_size) {
size = size * 2;
shift = shift + 1;
}
assert(size == output_size);
hipLaunchKernelGGL(( embedBackward), dim3(GET_BLOCKS(rect_y.volume())), dim3(CUDA_NUM_THREADS), 0, 0,
x_ptr, w_ptr, y_ptr, rect_y.volume(), shift, output_size);
if (m->profiling_runtime) {
hipEventRecord(t_end);
checkCUDA(hipEventSynchronize(t_end));
float elapsed = 0;
checkCUDA(hipEventElapsedTime(&elapsed, t_start, t_end));
hipEventDestroy(t_start);
hipEventDestroy(t_end);
printf("Embed backward time = %.2lfms\n", elapsed);
}
#endif
}
void Embed::backward(RnnModel const &model) {
Context ctx = model.config.lg_ctx;
Runtime *runtime = model.config.lg_hlr;
int idx = 0;
for (PointInRectIterator<1> it(part_rect); it(); it++, idx++) {
OpMeta *mp = meta[idx];
TaskLauncher launcher(EMBED_BWD_TASK_ID,
TaskArgument(&mp, sizeof(OpMeta *)),
Predicate::TRUE_PRED,
0 /*MapperID*/,
RnnMapper::assign_to_gpu(paraConfig.gpu[idx]));
DomainPoint dp(*it);
{
LogicalRegion x =
runtime->get_logical_subregion_by_color(inputs[0].partition, dp);
launcher.add_region_requirement(
RegionRequirement(x, READ_ONLY, EXCLUSIVE, inputs[0].region));
launcher.add_field(0, FID_DATA);
}
launcher.add_region_requirement(
RegionRequirement(params.gradients[paraConfig.gpu[idx]],
READ_WRITE,
EXCLUSIVE,
params.gradients[paraConfig.gpu[idx]]));
launcher.add_field(1, FID_DATA);
{
LogicalRegion y_grad = runtime->get_logical_subregion_by_color(
outputs[0].partition_grad, dp);
launcher.add_region_requirement(RegionRequirement(
y_grad, READ_ONLY, EXCLUSIVE, outputs[0].region_grad));
launcher.add_field(2, FID_DATA);
}
runtime->execute_task(ctx, launcher);
}
}
void Embed::update(RnnModel const &model) {}
| a5b807478e637b97b7e47fe4cf68a0414ee627f2.cu | /* Copyright 2023 CMU, Facebook, LANL, MIT, NVIDIA, and Stanford (alphabetical)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../cnn_helper.h"
#include "rnn.h"
#include "rnn_mapper.h"
struct EmbedInitParams {
DnnHandle handle;
int batchSize, outputSize, vocabSize;
};
Tensor RnnModel::add_embed_node(Tensor x,
int vocab_size,
int output_size,
ParallelConfig pc,
SharedVariable params) {
assert(x.numDim == 2);
assert(x.adim[1] == LSTM_PER_NODE_LENGTH);
assert(x.pdim[1] == LSTM_PER_NODE_LENGTH);
Embed *node = new Embed(config, x, vocab_size, output_size, pc, params);
layers.push_back(node);
return node->outputs[0];
}
Embed::Embed(RnnConfig config,
Tensor x,
int _vocab_size,
int _output_size,
ParallelConfig pc,
SharedVariable _params)
: RnnOp(x, pc, _params), batchSize(x.adim[0]), vocabSize(_vocab_size),
outputSize(_output_size) {
Context ctx = config.lg_ctx;
HighLevelRuntime *runtime = config.lg_hlr;
assert(pc.nDims == 1);
{
Rect<1> rect(Point<1>(0), Point<1>(pc.dim[0] - 1));
part_rect = rect;
}
IndexSpaceT<1> part_is = runtime->create_index_space(ctx, part_rect);
FieldSpace fs = config.field_space;
Rect<3, coord_t> y_rect(
Point<3>(0, 0, 0),
Point<3>(outputSize - 1, batchSize - 1, LSTM_PER_NODE_LENGTH - 1));
IndexSpaceT<3> y_is = runtime->create_index_space(ctx, y_rect);
LogicalRegion y_lr = runtime->create_logical_region(ctx, y_is, fs);
LogicalRegion y_grad_lr = runtime->create_logical_region(ctx, y_is, fs);
int num_par_n = part_rect.hi[0] - part_rect.lo[0] + 1;
assert(batchSize % num_par_n == 0);
int extent_n = batchSize / num_par_n;
int extent_c = outputSize;
Rect<3, coord_t> extent(
Point<3>(0, 0, 0),
Point<3>(extent_c - 1, extent_n - 1, LSTM_PER_NODE_LENGTH - 1));
Transform<3, 1, coord_t> trans;
trans[0][0] = 0;
trans[1][0] = extent_n;
trans[2][0] = 0;
IndexPartition y_ip = runtime->create_partition_by_restriction(
ctx, y_is, part_is, trans, extent);
assert(runtime->is_index_partition_disjoint(ctx, y_ip));
assert(runtime->is_index_partition_complete(ctx, y_ip));
LogicalPartition y_lp = runtime->get_logical_partition(ctx, y_lr, y_ip);
LogicalPartition y_grad_lp =
runtime->get_logical_partition(ctx, y_grad_lr, y_ip);
outputs[0].region = y_lr;
outputs[0].region_grad = y_grad_lr;
outputs[0].partition = y_lp;
outputs[0].partition_grad = y_grad_lp;
outputs[0].numDim = 3;
outputs[0].adim[0] = outputSize;
outputs[0].adim[1] = batchSize;
outputs[0].adim[2] = LSTM_PER_NODE_LENGTH;
outputs[0].pdim[0] = extent_c;
outputs[0].pdim[1] = extent_n;
outputs[0].pdim[2] = LSTM_PER_NODE_LENGTH;
}
/*
regions[0] (I): x
regions[1] (I): w
regions[2] (O): y
*/
OpMeta *Embed::init_task(Task const *task,
std::vector<PhysicalRegion> const ®ions,
Context ctx,
Runtime *runtime) {
assert(regions.size() == 3);
assert(task->regions.size() == 3);
EmbedInitParams const *embed = (EmbedInitParams *)task->args;
Rect<2> rect_x = runtime->get_index_space_domain(
ctx, task->regions[0].region.get_index_space());
Rect<1> rect_w = runtime->get_index_space_domain(
ctx, task->regions[1].region.get_index_space());
Rect<3> rect_y = runtime->get_index_space_domain(
ctx, task->regions[2].region.get_index_space());
assert(rect_x.hi[0] - rect_x.lo[0] + 1 == embed->batchSize);
assert(rect_x.hi[1] - rect_x.lo[1] + 1 == LSTM_PER_NODE_LENGTH);
assert(rect_w.hi[0] - rect_w.lo[0] + 1 ==
embed->vocabSize * embed->outputSize);
assert(rect_y.hi[0] - rect_y.lo[0] + 1 == embed->outputSize);
assert(rect_y.hi[1] - rect_y.lo[1] + 1 == embed->batchSize);
assert(rect_y.hi[2] - rect_y.lo[2] + 1 == LSTM_PER_NODE_LENGTH);
EmbedMeta *m = new EmbedMeta(embed->handle);
m->profiling_runtime = false;
return m;
}
void Embed::init(RnnModel const &model) {
Context ctx = model.config.lg_ctx;
Runtime *runtime = model.config.lg_hlr;
int idx = 0;
for (PointInRectIterator<1> it(part_rect); it(); it++, idx++) {
EmbedInitParams initParams;
initParams.handle = model.dnn_handlers[paraConfig.gpu[idx]];
initParams.batchSize = outputs[0].pdim[1];
initParams.outputSize = outputs[0].pdim[0];
initParams.vocabSize = vocabSize;
// batch is the first dim of input and the second dim of output
assert(inputs[0].pdim[0] == outputs[0].pdim[1]);
TaskLauncher launcher(EMBED_INIT_TASK_ID,
TaskArgument(&initParams, sizeof(initParams)),
Predicate::TRUE_PRED,
0 /*MapperID*/,
RnnMapper::assign_to_gpu(paraConfig.gpu[idx]));
DomainPoint dp(*it);
{
LogicalRegion x =
runtime->get_logical_subregion_by_color(inputs[0].partition, dp);
launcher.add_region_requirement(
RegionRequirement(x, READ_ONLY, EXCLUSIVE, inputs[0].region));
launcher.add_field(0, FID_DATA);
}
launcher.add_region_requirement(
RegionRequirement(params.region, READ_ONLY, EXCLUSIVE, params.region));
launcher.add_field(1, FID_DATA);
{
LogicalRegion y =
runtime->get_logical_subregion_by_color(outputs[0].partition, dp);
launcher.add_region_requirement(
RegionRequirement(y, WRITE_ONLY, EXCLUSIVE, outputs[0].region));
launcher.add_field(2, FID_DATA);
}
Future f = runtime->execute_task(ctx, launcher);
meta[idx] = f.get_result<OpMeta *>();
}
}
__global__ void embedForward(int const *x_ptr,
float const *embed,
float *y_ptr,
coord_t numElements,
int shift,
int outputSize) {
CUDA_KERNEL_LOOP(i, numElements) {
int idx = i >> shift;
int off = i & (outputSize - 1);
int wordIdx = x_ptr[idx];
y_ptr[i] = embed[(wordIdx << shift) + off];
}
}
__global__ void embedBackward(int const *x_ptr,
float *embed,
float const *y_ptr,
coord_t numElements,
int shift,
int outputSize) {
CUDA_KERNEL_LOOP(i, numElements) {
int idx = i >> shift;
int off = i & (outputSize - 1);
int wordIdx = x_ptr[idx];
atomicAdd(embed + (wordIdx << shift) + off, y_ptr[i]);
}
}
/*
regions[0](I): x
regions[1](I): w
regions[2](O): y
*/
void Embed::forward_task(Task const *task,
std::vector<PhysicalRegion> const ®ions,
Context ctx,
Runtime *runtime) {
#ifndef DISABLE_COMPUTATION
assert(regions.size() == 3);
assert(task->regions.size() == 3);
EmbedMeta const *m = *((EmbedMeta **)task->args);
AccessorRO<int, 2> const acc_x(regions[0], FID_DATA);
AccessorRO<float, 1> const acc_w(regions[1], FID_DATA);
AccessorWO<float, 3> const acc_y(regions[2], FID_DATA);
Rect<2> rect_x = runtime->get_index_space_domain(
ctx, task->regions[0].region.get_index_space());
Rect<1> rect_w = runtime->get_index_space_domain(
ctx, task->regions[1].region.get_index_space());
Rect<3> rect_y = runtime->get_index_space_domain(
ctx, task->regions[2].region.get_index_space());
assert(acc_x.accessor.is_dense_arbitrary(rect_x));
assert(acc_w.accessor.is_dense_arbitrary(rect_w));
assert(acc_y.accessor.is_dense_arbitrary(rect_y));
int batch_size = rect_y.hi[1] - rect_y.lo[1] + 1;
int output_size = rect_y.hi[0] - rect_y.lo[0] + 1;
int const *x_ptr = acc_x.ptr(rect_x.lo);
float const *w_ptr = acc_w.ptr(rect_w.lo);
float *y_ptr = acc_y.ptr(rect_y.lo);
cudaEvent_t t_start, t_end;
if (m->profiling_runtime) {
cudaEventCreate(&t_start);
cudaEventCreate(&t_end);
cudaEventRecord(t_start);
}
int shift = 0;
int size = 1;
while (size < output_size) {
size = size * 2;
shift = shift + 1;
}
assert(size == output_size);
embedForward<<<GET_BLOCKS(rect_y.volume()), CUDA_NUM_THREADS>>>(
x_ptr, w_ptr, y_ptr, rect_y.volume(), shift, output_size);
if (m->profiling_runtime) {
cudaEventRecord(t_end);
checkCUDA(cudaEventSynchronize(t_end));
float elapsed = 0;
checkCUDA(cudaEventElapsedTime(&elapsed, t_start, t_end));
cudaEventDestroy(t_start);
cudaEventDestroy(t_end);
printf("Embed forward time = %.2lfms\n", elapsed);
}
#endif
}
void Embed::forward(RnnModel const &model) {
Context ctx = model.config.lg_ctx;
Runtime *runtime = model.config.lg_hlr;
int idx = 0;
for (PointInRectIterator<1> it(part_rect); it(); it++, idx++) {
OpMeta *mp = meta[idx];
TaskLauncher launcher(EMBED_FWD_TASK_ID,
TaskArgument(&mp, sizeof(OpMeta *)),
Predicate::TRUE_PRED,
0 /*MapperID*/,
RnnMapper::assign_to_gpu(paraConfig.gpu[idx]));
DomainPoint dp(*it);
{
LogicalRegion x =
runtime->get_logical_subregion_by_color(inputs[0].partition, dp);
launcher.add_region_requirement(
RegionRequirement(x, READ_ONLY, EXCLUSIVE, inputs[0].region));
launcher.add_field(0, FID_DATA);
}
launcher.add_region_requirement(
RegionRequirement(params.region, READ_ONLY, EXCLUSIVE, params.region));
launcher.add_field(1, FID_DATA);
{
LogicalRegion y =
runtime->get_logical_subregion_by_color(outputs[0].partition, dp);
launcher.add_region_requirement(
RegionRequirement(y, WRITE_ONLY, EXCLUSIVE, outputs[0].region));
launcher.add_field(2, FID_DATA);
}
runtime->execute_task(ctx, launcher);
}
}
/*
regions[0](I): x
regions[1](I/O): w_grad
regions[2](I): y_grad
*/
void Embed::backward_task(Task const *task,
std::vector<PhysicalRegion> const ®ions,
Context ctx,
Runtime *runtime) {
#ifndef DISABLE_COMPUTATION
assert(regions.size() == 3);
assert(task->regions.size() == 3);
EmbedMeta const *m = *((EmbedMeta **)task->args);
AccessorRO<int, 2> const acc_x(regions[0], FID_DATA);
AccessorRW<float, 1> const acc_w(regions[1], FID_DATA);
AccessorRO<float, 3> const acc_y(regions[2], FID_DATA);
Rect<2> rect_x = runtime->get_index_space_domain(
ctx, task->regions[0].region.get_index_space());
Rect<1> rect_w = runtime->get_index_space_domain(
ctx, task->regions[1].region.get_index_space());
Rect<3> rect_y = runtime->get_index_space_domain(
ctx, task->regions[2].region.get_index_space());
assert(acc_x.accessor.is_dense_arbitrary(rect_x));
assert(acc_w.accessor.is_dense_arbitrary(rect_w));
assert(acc_y.accessor.is_dense_arbitrary(rect_y));
int batch_size = rect_y.hi[1] - rect_y.lo[1] + 1;
int output_size = rect_y.hi[0] - rect_y.lo[0] + 1;
int const *x_ptr = acc_x.ptr(rect_x.lo);
float *w_ptr = acc_w.ptr(rect_w.lo);
float const *y_ptr = acc_y.ptr(rect_y.lo);
cudaEvent_t t_start, t_end;
if (m->profiling_runtime) {
cudaEventCreate(&t_start);
cudaEventCreate(&t_end);
cudaEventRecord(t_start);
}
int shift = 0;
int size = 1;
while (size < output_size) {
size = size * 2;
shift = shift + 1;
}
assert(size == output_size);
embedBackward<<<GET_BLOCKS(rect_y.volume()), CUDA_NUM_THREADS>>>(
x_ptr, w_ptr, y_ptr, rect_y.volume(), shift, output_size);
if (m->profiling_runtime) {
cudaEventRecord(t_end);
checkCUDA(cudaEventSynchronize(t_end));
float elapsed = 0;
checkCUDA(cudaEventElapsedTime(&elapsed, t_start, t_end));
cudaEventDestroy(t_start);
cudaEventDestroy(t_end);
printf("Embed backward time = %.2lfms\n", elapsed);
}
#endif
}
void Embed::backward(RnnModel const &model) {
Context ctx = model.config.lg_ctx;
Runtime *runtime = model.config.lg_hlr;
int idx = 0;
for (PointInRectIterator<1> it(part_rect); it(); it++, idx++) {
OpMeta *mp = meta[idx];
TaskLauncher launcher(EMBED_BWD_TASK_ID,
TaskArgument(&mp, sizeof(OpMeta *)),
Predicate::TRUE_PRED,
0 /*MapperID*/,
RnnMapper::assign_to_gpu(paraConfig.gpu[idx]));
DomainPoint dp(*it);
{
LogicalRegion x =
runtime->get_logical_subregion_by_color(inputs[0].partition, dp);
launcher.add_region_requirement(
RegionRequirement(x, READ_ONLY, EXCLUSIVE, inputs[0].region));
launcher.add_field(0, FID_DATA);
}
launcher.add_region_requirement(
RegionRequirement(params.gradients[paraConfig.gpu[idx]],
READ_WRITE,
EXCLUSIVE,
params.gradients[paraConfig.gpu[idx]]));
launcher.add_field(1, FID_DATA);
{
LogicalRegion y_grad = runtime->get_logical_subregion_by_color(
outputs[0].partition_grad, dp);
launcher.add_region_requirement(RegionRequirement(
y_grad, READ_ONLY, EXCLUSIVE, outputs[0].region_grad));
launcher.add_field(2, FID_DATA);
}
runtime->execute_task(ctx, launcher);
}
}
void Embed::update(RnnModel const &model) {}
|
b4716f7ea063ad21f6c24b919558c31b83754086.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "harris_detector_gpu.h" #include <iostream>
#include <limits> #include <algorithm> #include <cstdio>
global
void convolve_kernel(T *image, double *result, int rows,int cols, double *kernal, int kernal_dim)
{
int ty = blockIdx.y * blockDim.y + threadIdx.y; int tx = blockIdx.x * blockDim.x + threadIdx.x; int kernel_offset = kernal_dim / 2.0f;
int image_row = ty; int image_col = tx;
if(image_row >= kernel_offset && image_row < rows - kernel_offset &&
image_col >= kernel_offset && image_col < cols - kernel_offset){
double value = 0.0f;
for(int i=0; i<kernal_dim; ++i) {
int row = (image_row - kernel_offset) + i; for(int j=0; j<kernal_dim; ++j) {
int col = (image_col - kernel_offset) + j; value += kernal[i * kernal_dim + j] *
(double)image[row * cols + col];
}
}
result[image_row * cols + image_col] = (double)value;
}
}
global
void non_maxima_suppression_kernel(double *image,double *result,int rows, int cols, int window_dim)
{
int ty = blockIdx.y * blockDim.y + threadIdx.y; int tx = blockIdx.x * blockDim.x + threadIdx.x; int row = ty;
int col = tx;
int DIM = window_dim; int OFF = DIM / 2;
if(row >= OFF && row < rows - OFF && col >= OFF && col < cols - OFF) {
double filtered= image[row * cols + col]; bool running = true;
for(int i=0; i<DIM && running; ++i) { int r = (row - OFF) + i;
for(int j=0; j<DIM && running; ++j) { int c = (col - OFF) + j;
if(i == DIM/2 && j == DIM/2) continue;
double temp = image[r * cols + c]; if(temp > filtered) {
filtered = 0; running = false;
}
}
}
result[row * cols + col] = filtered;
}
}
void eigen_values(double M[2][2], double *l1,host device double *l2) {
double d = M[0][0];
double e = M[0][1];
double f = M[1][0];
double g = M[1][1];
*l1 = ((d + g) + sqrt(pow(d + g, 2.0) - 4*(d*g - f*e))) / 2.0f;
*l2 = ((d + g) - sqrt(pow(d + g, 2.0) - 4*(d*g - f*e))) / 2.0f;
}
device
double sum_neighbors(double *image, int row, int col,int cols, int window_dim) {
int window_center = window_dim / 2.0f; double sum = 100.0f;
for(int i=0; i<window_dim; ++i) {
int image_row = (row - window_center) + i; for(int j=0; j<window_dim; ++j) {
int image_col = (col - window_center) + j; sum += image[image_row * cols + image_col];
}
}
return sum;
}
global
void detect_corners_kernel(double *dx2, double *dy2,double *dydx, int rows, int cols, double k,double *corner_response, int window_dim)
{
int ty = blockIdx.y * blockDim.y + threadIdx.y; int tx = blockIdx.x * blockDim.x + threadIdx.x; int window_offset = window_dim / 2.0f;
int image_row = ty; int image_col = tx; double M[2][2];
if(image_row < rows - window_offset && image_col < cols - window_offset &&
image_row >= window_offset && image_col >= window_offset) {
M[0][0] = sum_neighbors(dx2, image_row, image_col,
cols, window_dim);
M[0][1] = sum_neighbors(dydx, image_row, image_col,
cols, window_dim);
M[1][1] = sum_neighbors(dy2, image_row, image_col,
cols, window_dim);
M[1][0] = M[0][1];
double l1, l2; eigen_values(M, &l1, &l2);
double r = l1 * l2 - k * pow(l1 + l2, 2.0); corner_response[image_row * cols + image_col] = r > 0 ? r : 0;
}
}
template<typename T>
static double *convolve(T *image, int rows, int cols, double *kernal,int kernal_size) {
using namespace harris_detection;
double *deviceResult = alloc_device<double>(rows, cols, true); double *deviceKernel = to_device<double>(kernal, kernal_size,
kernal_size);
T *deviceImage = to_device<unsigned char>(image, rows, cols);
dim3 dimGrid(ceil(cols / (double)TILE_DIM),
ceil(rows / (double)TILE_DIM)); dim3 dimBlock(TILE_DIM, TILE_DIM);
hipLaunchKernelGGL(( convolve_kernel<T>) , dim3(dimGrid), dim3(dimBlock) , 0, 0, deviceImage,
deviceResult, rows, cols, deviceKernel, kernal_size);
hipDeviceSynchronize();
double *host_result = to_host<double>(deviceResult, rows, cols); hipFree(deviceKernel);
hipFree(deviceImage); hipFree(deviceResult);
return host_result;
}
static double *non_maxima_supression(double *image, int rows, int cols,int window_dim)
{
using namespace harris_detection;
double *deviceResult = alloc_device<double>(rows, cols, true); double *deviceImage = to_device<double>(image, rows, cols);
dim3 dimGrid(ceil(cols / (double)TILE_DIM),
ceil(rows / (double)TILE_DIM)); dim3 dimBlock(TILE_DIM, TILE_DIM);
hipLaunchKernelGGL(( non_maxima_suppression_kernel) , dim3(dimGrid), dim3(dimBlock)
, 0, 0, deviceImage, deviceResult, rows, cols, window_dim);
CUDA_SAFE(hipDeviceSynchronize());
double *host_result = to_host<double>(deviceResult, rows, cols); hipFree(deviceImage);
hipFree(deviceResult);
return host_result;
}
static double *corner_detector(double *dx2, double *dy2, double *dxdy,int rows, int cols, double k, int window_dim)
{
using namespace harris_detection;
double *deviceDx2 = to_device<double>(dx2, rows, cols); double *deviceDy2 = to_device<double>(dy2, rows, cols); double *deviceDxDy = to_device<double>(dxdy, rows, cols);
double *deviceCornerResponse = alloc_device<double>(rows, cols,
true);
dim3 dimGrid(ceil(cols/ (double)TILE_DIM),
ceil(rows / (double)TILE_DIM)); dim3 dimBlock(TILE_DIM, TILE_DIM);
hipLaunchKernelGGL(( detect_corners_kernel) , dim3(dimGrid), dim3(dimBlock) , 0, 0, deviceDx2,
deviceDy2, deviceDxDy, rows, cols, k,
deviceCornerResponse, window_dim);
hipDeviceSynchronize();
double *hostCornerResponse = to_host<double>(deviceCornerResponse,
rows, cols);
hipFree(deviceCornerResponse); hipFree(deviceDx2); hipFree(deviceDy2); hipFree(deviceDxDy);
return hostCornerResponse;
}
namespace harris_detection { namespace naive{
void detect_features(std::vector<cv::KeyPoint> &features,unsigned char *image, int rows, int cols, double k, double thresh, int window_dim)
{
const int NMS_DIM = 5;
double *smoothed = convolve<unsigned char>(image, rows,cols, filters::gaussian_3x3,3);
double *dx = convolve<unsigned char>(image, rows, cols,
filters::sobel_x_3x3, 3);
double *dy = convolve<unsigned char>(image, rows, cols,
filters::sobel_y_3x3, 3);
double *dxdy = new double[rows * cols]; for(int i=0; i<rows * cols; ++i) {
dxdy[i] = dx[i] * dy[i];
dx[i] *= dx[i];
dy[i] *= dy[i];
}
double *corner_response = corner_detector(dx, dy, dxdy,
rows, cols, k, window_dim); double *suppressed = non_maxima_supression(corner_response,
rows, cols, NMS_DIM);
for(int i=0; i < rows; i++) { for(int j=0; j < cols; ++j) {
if(suppressed[i * cols + j] > 0.0) { features.push_back(cv::KeyPoint(j, i, 5, -1));
}
}
}
delete dx; delete dy; delete dxdy;
delete corner_response; delete suppressed; delete smoothed;
}
}
} | b4716f7ea063ad21f6c24b919558c31b83754086.cu |
#include "harris_detector_gpu.h" #include <iostream>
#include <limits> #include <algorithm> #include <cstdio>
global
void convolve_kernel(T *image, double *result, int rows,int cols, double *kernal, int kernal_dim)
{
int ty = blockIdx.y * blockDim.y + threadIdx.y; int tx = blockIdx.x * blockDim.x + threadIdx.x; int kernel_offset = kernal_dim / 2.0f;
int image_row = ty; int image_col = tx;
if(image_row >= kernel_offset && image_row < rows - kernel_offset &&
image_col >= kernel_offset && image_col < cols - kernel_offset){
double value = 0.0f;
for(int i=0; i<kernal_dim; ++i) {
int row = (image_row - kernel_offset) + i; for(int j=0; j<kernal_dim; ++j) {
int col = (image_col - kernel_offset) + j; value += kernal[i * kernal_dim + j] *
(double)image[row * cols + col];
}
}
result[image_row * cols + image_col] = (double)value;
}
}
global
void non_maxima_suppression_kernel(double *image,double *result,int rows, int cols, int window_dim)
{
int ty = blockIdx.y * blockDim.y + threadIdx.y; int tx = blockIdx.x * blockDim.x + threadIdx.x; int row = ty;
int col = tx;
int DIM = window_dim; int OFF = DIM / 2;
if(row >= OFF && row < rows - OFF && col >= OFF && col < cols - OFF) {
double filtered= image[row * cols + col]; bool running = true;
for(int i=0; i<DIM && running; ++i) { int r = (row - OFF) + i;
for(int j=0; j<DIM && running; ++j) { int c = (col - OFF) + j;
if(i == DIM/2 && j == DIM/2) continue;
double temp = image[r * cols + c]; if(temp > filtered) {
filtered = 0; running = false;
}
}
}
result[row * cols + col] = filtered;
}
}
void eigen_values(double M[2][2], double *l1,host device double *l2) {
double d = M[0][0];
double e = M[0][1];
double f = M[1][0];
double g = M[1][1];
*l1 = ((d + g) + sqrt(pow(d + g, 2.0) - 4*(d*g - f*e))) / 2.0f;
*l2 = ((d + g) - sqrt(pow(d + g, 2.0) - 4*(d*g - f*e))) / 2.0f;
}
device
double sum_neighbors(double *image, int row, int col,int cols, int window_dim) {
int window_center = window_dim / 2.0f; double sum = 100.0f;
for(int i=0; i<window_dim; ++i) {
int image_row = (row - window_center) + i; for(int j=0; j<window_dim; ++j) {
int image_col = (col - window_center) + j; sum += image[image_row * cols + image_col];
}
}
return sum;
}
global
void detect_corners_kernel(double *dx2, double *dy2,double *dydx, int rows, int cols, double k,double *corner_response, int window_dim)
{
int ty = blockIdx.y * blockDim.y + threadIdx.y; int tx = blockIdx.x * blockDim.x + threadIdx.x; int window_offset = window_dim / 2.0f;
int image_row = ty; int image_col = tx; double M[2][2];
if(image_row < rows - window_offset && image_col < cols - window_offset &&
image_row >= window_offset && image_col >= window_offset) {
M[0][0] = sum_neighbors(dx2, image_row, image_col,
cols, window_dim);
M[0][1] = sum_neighbors(dydx, image_row, image_col,
cols, window_dim);
M[1][1] = sum_neighbors(dy2, image_row, image_col,
cols, window_dim);
M[1][0] = M[0][1];
double l1, l2; eigen_values(M, &l1, &l2);
double r = l1 * l2 - k * pow(l1 + l2, 2.0); corner_response[image_row * cols + image_col] = r > 0 ? r : 0;
}
}
template<typename T>
static double *convolve(T *image, int rows, int cols, double *kernal,int kernal_size) {
using namespace harris_detection;
double *deviceResult = alloc_device<double>(rows, cols, true); double *deviceKernel = to_device<double>(kernal, kernal_size,
kernal_size);
T *deviceImage = to_device<unsigned char>(image, rows, cols);
dim3 dimGrid(ceil(cols / (double)TILE_DIM),
ceil(rows / (double)TILE_DIM)); dim3 dimBlock(TILE_DIM, TILE_DIM);
convolve_kernel<T> <<< dimGrid, dimBlock >>>(deviceImage,
deviceResult, rows, cols, deviceKernel, kernal_size);
cudaDeviceSynchronize();
double *host_result = to_host<double>(deviceResult, rows, cols); cudaFree(deviceKernel);
cudaFree(deviceImage); cudaFree(deviceResult);
return host_result;
}
static double *non_maxima_supression(double *image, int rows, int cols,int window_dim)
{
using namespace harris_detection;
double *deviceResult = alloc_device<double>(rows, cols, true); double *deviceImage = to_device<double>(image, rows, cols);
dim3 dimGrid(ceil(cols / (double)TILE_DIM),
ceil(rows / (double)TILE_DIM)); dim3 dimBlock(TILE_DIM, TILE_DIM);
non_maxima_suppression_kernel <<< dimGrid, dimBlock
>>>(deviceImage, deviceResult, rows, cols, window_dim);
CUDA_SAFE(cudaDeviceSynchronize());
double *host_result = to_host<double>(deviceResult, rows, cols); cudaFree(deviceImage);
cudaFree(deviceResult);
return host_result;
}
static double *corner_detector(double *dx2, double *dy2, double *dxdy,int rows, int cols, double k, int window_dim)
{
using namespace harris_detection;
double *deviceDx2 = to_device<double>(dx2, rows, cols); double *deviceDy2 = to_device<double>(dy2, rows, cols); double *deviceDxDy = to_device<double>(dxdy, rows, cols);
double *deviceCornerResponse = alloc_device<double>(rows, cols,
true);
dim3 dimGrid(ceil(cols/ (double)TILE_DIM),
ceil(rows / (double)TILE_DIM)); dim3 dimBlock(TILE_DIM, TILE_DIM);
detect_corners_kernel <<< dimGrid, dimBlock >>> (deviceDx2,
deviceDy2, deviceDxDy, rows, cols, k,
deviceCornerResponse, window_dim);
cudaDeviceSynchronize();
double *hostCornerResponse = to_host<double>(deviceCornerResponse,
rows, cols);
cudaFree(deviceCornerResponse); cudaFree(deviceDx2); cudaFree(deviceDy2); cudaFree(deviceDxDy);
return hostCornerResponse;
}
namespace harris_detection { namespace naive{
void detect_features(std::vector<cv::KeyPoint> &features,unsigned char *image, int rows, int cols, double k, double thresh, int window_dim)
{
const int NMS_DIM = 5;
double *smoothed = convolve<unsigned char>(image, rows,cols, filters::gaussian_3x3,3);
double *dx = convolve<unsigned char>(image, rows, cols,
filters::sobel_x_3x3, 3);
double *dy = convolve<unsigned char>(image, rows, cols,
filters::sobel_y_3x3, 3);
double *dxdy = new double[rows * cols]; for(int i=0; i<rows * cols; ++i) {
dxdy[i] = dx[i] * dy[i];
dx[i] *= dx[i];
dy[i] *= dy[i];
}
double *corner_response = corner_detector(dx, dy, dxdy,
rows, cols, k, window_dim); double *suppressed = non_maxima_supression(corner_response,
rows, cols, NMS_DIM);
for(int i=0; i < rows; i++) { for(int j=0; j < cols; ++j) {
if(suppressed[i * cols + j] > 0.0) { features.push_back(cv::KeyPoint(j, i, 5, -1));
}
}
}
delete dx; delete dy; delete dxdy;
delete corner_response; delete suppressed; delete smoothed;
}
}
} |
74b76912084a6f3d78774723971e6b8e577f435b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Homework 1
// Color to Greyscale Conversion
//A common way to represent color images is known as RGBA - the color
//is specified by how much Red, Grean and Blue is in it.
//The 'A' stands for Alpha and is used for transparency, it will be
//ignored in this homework.
//Each channel Red, Blue, Green and Alpha is represented by one byte.
//Since we are using one byte for each color there are 256 different
//possible values for each color. This means we use 4 bytes per pixel.
//Greyscale images are represented by a single intensity value per pixel
//which is one byte in size.
//To convert an image from color to grayscale one simple method is to
//set the intensity to the average of the RGB channels. But we will
//use a more sophisticated method that takes into account how the eye
//perceives color and weights the channels unequally.
//The eye responds most strongly to green followed by red and then blue.
//The NTSC (National Television System Committee) recommends the following
//formula for color to greyscale conversion:
//I = .299f * R + .587f * G + .114f * B
//Notice the trailing f's on the numbers which indicate that they are
//single precision floating point constants and not double precision
//constants.
//You should fill in the kernel as well as set the block and grid sizes
//so that the entire image is processed.
#include "utils.h"
__global__
void rgba_to_greyscale(const uchar4* const rgbaImage,
unsigned char* const greyImage,
int numRows, int numCols)
{
//TODO
//Fill in the kernel to convert from color to greyscale
//the mapping from components of a uchar4 to RGBA is:
// .x -> R ; .y -> G ; .z -> B ; .w -> A
//
//The output (greyImage) at each pixel should be the result of
//applying the formula: output = .299f * R + .587f * G + .114f * B;
//Note: We will be ignoring the alpha channel for this conversion
//First create a mapping from the 2D block and grid locations
//to an absolute 2D location in the image, then use that to
//calculate a 1D offset
int col_idx = blockIdx.x * blockDim.x + threadIdx.x;
int row_idx = blockIdx.y * blockDim.y + threadIdx.y;
int offset = numCols * row_idx + col_idx;
// int row_idx = blockIdx.x;
// int col_idx = threadIdx.x;
// int offset = blockDim.x * row_idx + col_idx;
// Check if this position is valid or not
if(row_idx < numRows && col_idx < numCols){
const unsigned char R = rgbaImage[offset].x;
const unsigned char G = rgbaImage[offset].y;
const unsigned char B = rgbaImage[offset].z;
greyImage[offset] = .299f * R + .587f * G + .114f * B;
// uchar4 rgbaPixel = rgbaImage[offset];
// float ChannelSum = .299f * rgbaPixel.x + .587f * rgbaPixel.y + .114f * rgbaPixel.z;
// greyImage[offset] = ChannelSum;
}
}
void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage,
unsigned char* const d_greyImage, size_t numRows, size_t numCols)
{
//You must fill in the correct sizes for the blockSize and gridSize
//currently only one block with one thread is being launched
const dim3 blockSize(16, 16); //TODO
const dim3 gridSize((numCols - 1) / blockSize.x + 1,
(numRows - 1) / blockSize.y + 1); //TODO
// const dim3 blockSize(numRows, 1, 1); //TODO
// const dim3 gridSize(numCols, 1, 1); //TODO
hipLaunchKernelGGL(( rgba_to_greyscale), dim3(gridSize), dim3(blockSize), 0, 0, d_rgbaImage, d_greyImage, numRows, numCols);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
}
| 74b76912084a6f3d78774723971e6b8e577f435b.cu | // Homework 1
// Color to Greyscale Conversion
//A common way to represent color images is known as RGBA - the color
//is specified by how much Red, Grean and Blue is in it.
//The 'A' stands for Alpha and is used for transparency, it will be
//ignored in this homework.
//Each channel Red, Blue, Green and Alpha is represented by one byte.
//Since we are using one byte for each color there are 256 different
//possible values for each color. This means we use 4 bytes per pixel.
//Greyscale images are represented by a single intensity value per pixel
//which is one byte in size.
//To convert an image from color to grayscale one simple method is to
//set the intensity to the average of the RGB channels. But we will
//use a more sophisticated method that takes into account how the eye
//perceives color and weights the channels unequally.
//The eye responds most strongly to green followed by red and then blue.
//The NTSC (National Television System Committee) recommends the following
//formula for color to greyscale conversion:
//I = .299f * R + .587f * G + .114f * B
//Notice the trailing f's on the numbers which indicate that they are
//single precision floating point constants and not double precision
//constants.
//You should fill in the kernel as well as set the block and grid sizes
//so that the entire image is processed.
#include "utils.h"
__global__
void rgba_to_greyscale(const uchar4* const rgbaImage,
unsigned char* const greyImage,
int numRows, int numCols)
{
//TODO
//Fill in the kernel to convert from color to greyscale
//the mapping from components of a uchar4 to RGBA is:
// .x -> R ; .y -> G ; .z -> B ; .w -> A
//
//The output (greyImage) at each pixel should be the result of
//applying the formula: output = .299f * R + .587f * G + .114f * B;
//Note: We will be ignoring the alpha channel for this conversion
//First create a mapping from the 2D block and grid locations
//to an absolute 2D location in the image, then use that to
//calculate a 1D offset
int col_idx = blockIdx.x * blockDim.x + threadIdx.x;
int row_idx = blockIdx.y * blockDim.y + threadIdx.y;
int offset = numCols * row_idx + col_idx;
// int row_idx = blockIdx.x;
// int col_idx = threadIdx.x;
// int offset = blockDim.x * row_idx + col_idx;
// Check if this position is valid or not
if(row_idx < numRows && col_idx < numCols){
const unsigned char R = rgbaImage[offset].x;
const unsigned char G = rgbaImage[offset].y;
const unsigned char B = rgbaImage[offset].z;
greyImage[offset] = .299f * R + .587f * G + .114f * B;
// uchar4 rgbaPixel = rgbaImage[offset];
// float ChannelSum = .299f * rgbaPixel.x + .587f * rgbaPixel.y + .114f * rgbaPixel.z;
// greyImage[offset] = ChannelSum;
}
}
void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage,
unsigned char* const d_greyImage, size_t numRows, size_t numCols)
{
//You must fill in the correct sizes for the blockSize and gridSize
//currently only one block with one thread is being launched
const dim3 blockSize(16, 16); //TODO
const dim3 gridSize((numCols - 1) / blockSize.x + 1,
(numRows - 1) / blockSize.y + 1); //TODO
// const dim3 blockSize(numRows, 1, 1); //TODO
// const dim3 gridSize(numCols, 1, 1); //TODO
rgba_to_greyscale<<<gridSize, blockSize>>>(d_rgbaImage, d_greyImage, numRows, numCols);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
}
|
38a91cf589cff3b4eb68645c4f9d938d52252d5c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "optixParams.h" // our launch params
extern "C" {
__constant__ LaunchParams optixLaunchParams;
}
// a single ray type
enum { PHONG=0, SHADOW, RAY_TYPE_COUNT };
struct colorPRD{
float3 color;
unsigned int seed;
} ;
struct shadowPRD{
float shadowAtt;
unsigned int seed;
} ;
// -------------------------------------------------------
// closest hit computes color based lolely on the triangle normal
extern "C" __global__ void __closesthit__radiance() {
colorPRD &prd = *(colorPRD *)getPRD<colorPRD>();
const TriangleMeshSBTData &sbtData
= *(const TriangleMeshSBTData*)optixGetSbtDataPointer();
// retrieve primitive id and indexes
const int primID = optixGetPrimitiveIndex();
const uint3 index = sbtData.index[primID];
// get barycentric coordinates
const float u = optixGetTriangleBarycentrics().x;
const float v = optixGetTriangleBarycentrics().y;
// compute normal
const float4 n
= (1.f-u-v) * sbtData.vertexD.normal[index.x]
+ u * sbtData.vertexD.normal[index.y]
+ v * sbtData.vertexD.normal[index.z];
// intersection position
const float3 pos = optixGetWorldRayOrigin() + optixGetRayTmax()*optixGetWorldRayDirection();
// direction towards light
float3 lPos = make_float3(optixLaunchParams.global->lightPos);
float3 lDir = normalize(lPos - pos);
float3 nn = normalize(make_float3(n));
float intensity = max(dot(lDir, nn),0.0f);
int numRays = optixLaunchParams.global->aoRays;
float ambientOcclusion = 0;
// Ambient Occlusion
shadowPRD AOPRD;
AOPRD.shadowAtt = 1.0f;
AOPRD.seed = prd.seed;
uint32_t u0, u1;
packPointer( &AOPRD, u0, u1 );
for ( int i = 0; i < numRays; ++i) {
const float z1 = rnd(prd.seed);
const float z2 = rnd(prd.seed);
float3 rayDir;
cosine_sample_hemisphere( z1, z2, rayDir );
Onb onb( nn );
onb.inverse_transform( rayDir );
optixTrace(optixLaunchParams.traversable,
pos,
rayDir,
0.01f, // tmin
optixLaunchParams.global->aoRadius, //tmax
0.0f, // rayTime
OptixVisibilityMask( 255 ),
OPTIX_RAY_FLAG_NONE, //OPTIX_RAY_FLAG_NONE,
SHADOW, // SBT offset
RAY_TYPE_COUNT, // SBT stride
SHADOW, // missSBTIndex
u0, u1 );
ambientOcclusion += AOPRD.shadowAtt;
}
prd.color = make_float3(ambientOcclusion / numRays);
}
// any hit to ignore intersections with back facing geometry
extern "C" __global__ void __anyhit__radiance() {
}
// miss sets the background color
extern "C" __global__ void __miss__radiance() {
colorPRD &prd = *(colorPRD*)getPRD<colorPRD>();
// set blue as background color
prd.color = make_float3(0.0f, 0.0f, 1.0f);
}
// -----------------------------------------------
// Shadow rays
extern "C" __global__ void __closesthit__shadow() {
shadowPRD &prd = *(shadowPRD*)getPRD<shadowPRD>();
prd.shadowAtt = 0.0f;
}
// any hit for shadows
extern "C" __global__ void __anyhit__shadow() {
}
// miss for shadows
extern "C" __global__ void __miss__shadow() {
shadowPRD &prd = *(shadowPRD*)getPRD<shadowPRD>();
prd.shadowAtt = 1.0f;
}
// -----------------------------------------------
// Primary Rays
extern "C" __global__ void __raygen__renderFrame() {
// compute a test pattern based on pixel ID
const int ix = optixGetLaunchIndex().x;
const int iy = optixGetLaunchIndex().y;
const auto &camera = optixLaunchParams.camera;
if (optixLaunchParams.frame.frame == 0 && ix == 0 && iy == 0) {
// print info to console
printf("===========================================\n");
printf("Nau Ray-Tracing Debug\n");
const float4 &ld = optixLaunchParams.global->lightPos;
printf("LightPos: %f, %f %f %f\n", ld.x,ld.y,ld.z,ld.w);
printf("Launch dim: %u %u\n", optixGetLaunchDimensions().x, optixGetLaunchDimensions().y);
printf("Rays per pixel squared: %d \n", optixLaunchParams.frame.raysPerPixel);
printf("===========================================\n");
}
// ray payload
colorPRD pixelColorPRD;
pixelColorPRD.color = make_float3(1.f);
float raysPerPixel = float(optixLaunchParams.frame.raysPerPixel);
// half pixel
float2 delta = make_float2(1.0f/raysPerPixel, 1.0f/raysPerPixel);
// compute ray direction
// normalized screen plane position, in [-1, 1]^2
float red = 0.0f, blue = 0.0f, green = 0.0f;
for (int i = 0; i < raysPerPixel; ++i) {
for (int j = 0; j < raysPerPixel; ++j) {
uint32_t seed = tea<4>( ix * optixGetLaunchDimensions().x + iy, i*raysPerPixel + j );
pixelColorPRD.seed = seed;
uint32_t u0, u1;
packPointer( &pixelColorPRD, u0, u1 );
const float2 subpixel_jitter = make_float2( i * delta.x + delta.x * rnd( seed ), j * delta.y + delta.y * rnd( seed ) );
const float2 screen(make_float2(ix + subpixel_jitter.x, iy + subpixel_jitter.y)
/ make_float2(optixGetLaunchDimensions().x, optixGetLaunchDimensions().y) * 2.0 - 1.0);
// note: nau already takes into account the field of view and ratio when computing
// camera horizontal and vertival
float3 rayDir = normalize(camera.direction
+ (screen.x ) * camera.horizontal
+ (screen.y ) * camera.vertical);
// trace primary ray
optixTrace(optixLaunchParams.traversable,
camera.position,
rayDir,
0.f, // tmin
1e20f, // tmax
0.0f, // rayTime
OptixVisibilityMask( 255 ),
OPTIX_RAY_FLAG_NONE,//,OPTIX_RAY_FLAG_DISABLE_ANYHIT
PHONG, // SBT offset
RAY_TYPE_COUNT, // SBT stride
PHONG, // missSBTIndex
u0, u1 );
red += pixelColorPRD.color.x / (raysPerPixel*raysPerPixel);
green += pixelColorPRD.color.y / (raysPerPixel*raysPerPixel);
blue += pixelColorPRD.color.z / (raysPerPixel*raysPerPixel);
}
}
//convert float (0-1) to int (0-255)
const int r = int(255.0f*red);
const int g = int(255.0f*green);
const int b = int(255.0f*blue);
// convert to 32-bit rgba value
const uint32_t rgba = 0xff000000
| (r<<0) | (g<<8) | (b<<16);
// compute index
const uint32_t fbIndex = ix + iy*optixGetLaunchDimensions().x;
// write to output buffer
optixLaunchParams.frame.colorBuffer[fbIndex] = rgba;
}
| 38a91cf589cff3b4eb68645c4f9d938d52252d5c.cu | #include "optixParams.h" // our launch params
extern "C" {
__constant__ LaunchParams optixLaunchParams;
}
// a single ray type
enum { PHONG=0, SHADOW, RAY_TYPE_COUNT };
struct colorPRD{
float3 color;
unsigned int seed;
} ;
struct shadowPRD{
float shadowAtt;
unsigned int seed;
} ;
// -------------------------------------------------------
// closest hit computes color based lolely on the triangle normal
extern "C" __global__ void __closesthit__radiance() {
colorPRD &prd = *(colorPRD *)getPRD<colorPRD>();
const TriangleMeshSBTData &sbtData
= *(const TriangleMeshSBTData*)optixGetSbtDataPointer();
// retrieve primitive id and indexes
const int primID = optixGetPrimitiveIndex();
const uint3 index = sbtData.index[primID];
// get barycentric coordinates
const float u = optixGetTriangleBarycentrics().x;
const float v = optixGetTriangleBarycentrics().y;
// compute normal
const float4 n
= (1.f-u-v) * sbtData.vertexD.normal[index.x]
+ u * sbtData.vertexD.normal[index.y]
+ v * sbtData.vertexD.normal[index.z];
// intersection position
const float3 pos = optixGetWorldRayOrigin() + optixGetRayTmax()*optixGetWorldRayDirection();
// direction towards light
float3 lPos = make_float3(optixLaunchParams.global->lightPos);
float3 lDir = normalize(lPos - pos);
float3 nn = normalize(make_float3(n));
float intensity = max(dot(lDir, nn),0.0f);
int numRays = optixLaunchParams.global->aoRays;
float ambientOcclusion = 0;
// Ambient Occlusion
shadowPRD AOPRD;
AOPRD.shadowAtt = 1.0f;
AOPRD.seed = prd.seed;
uint32_t u0, u1;
packPointer( &AOPRD, u0, u1 );
for ( int i = 0; i < numRays; ++i) {
const float z1 = rnd(prd.seed);
const float z2 = rnd(prd.seed);
float3 rayDir;
cosine_sample_hemisphere( z1, z2, rayDir );
Onb onb( nn );
onb.inverse_transform( rayDir );
optixTrace(optixLaunchParams.traversable,
pos,
rayDir,
0.01f, // tmin
optixLaunchParams.global->aoRadius, //tmax
0.0f, // rayTime
OptixVisibilityMask( 255 ),
OPTIX_RAY_FLAG_NONE, //OPTIX_RAY_FLAG_NONE,
SHADOW, // SBT offset
RAY_TYPE_COUNT, // SBT stride
SHADOW, // missSBTIndex
u0, u1 );
ambientOcclusion += AOPRD.shadowAtt;
}
prd.color = make_float3(ambientOcclusion / numRays);
}
// any hit to ignore intersections with back facing geometry
extern "C" __global__ void __anyhit__radiance() {
}
// miss sets the background color
extern "C" __global__ void __miss__radiance() {
colorPRD &prd = *(colorPRD*)getPRD<colorPRD>();
// set blue as background color
prd.color = make_float3(0.0f, 0.0f, 1.0f);
}
// -----------------------------------------------
// Shadow rays
extern "C" __global__ void __closesthit__shadow() {
shadowPRD &prd = *(shadowPRD*)getPRD<shadowPRD>();
prd.shadowAtt = 0.0f;
}
// any hit for shadows
extern "C" __global__ void __anyhit__shadow() {
}
// miss for shadows
extern "C" __global__ void __miss__shadow() {
shadowPRD &prd = *(shadowPRD*)getPRD<shadowPRD>();
prd.shadowAtt = 1.0f;
}
// -----------------------------------------------
// Primary Rays
extern "C" __global__ void __raygen__renderFrame() {
// compute a test pattern based on pixel ID
const int ix = optixGetLaunchIndex().x;
const int iy = optixGetLaunchIndex().y;
const auto &camera = optixLaunchParams.camera;
if (optixLaunchParams.frame.frame == 0 && ix == 0 && iy == 0) {
// print info to console
printf("===========================================\n");
printf("Nau Ray-Tracing Debug\n");
const float4 &ld = optixLaunchParams.global->lightPos;
printf("LightPos: %f, %f %f %f\n", ld.x,ld.y,ld.z,ld.w);
printf("Launch dim: %u %u\n", optixGetLaunchDimensions().x, optixGetLaunchDimensions().y);
printf("Rays per pixel squared: %d \n", optixLaunchParams.frame.raysPerPixel);
printf("===========================================\n");
}
// ray payload
colorPRD pixelColorPRD;
pixelColorPRD.color = make_float3(1.f);
float raysPerPixel = float(optixLaunchParams.frame.raysPerPixel);
// half pixel
float2 delta = make_float2(1.0f/raysPerPixel, 1.0f/raysPerPixel);
// compute ray direction
// normalized screen plane position, in [-1, 1]^2
float red = 0.0f, blue = 0.0f, green = 0.0f;
for (int i = 0; i < raysPerPixel; ++i) {
for (int j = 0; j < raysPerPixel; ++j) {
uint32_t seed = tea<4>( ix * optixGetLaunchDimensions().x + iy, i*raysPerPixel + j );
pixelColorPRD.seed = seed;
uint32_t u0, u1;
packPointer( &pixelColorPRD, u0, u1 );
const float2 subpixel_jitter = make_float2( i * delta.x + delta.x * rnd( seed ), j * delta.y + delta.y * rnd( seed ) );
const float2 screen(make_float2(ix + subpixel_jitter.x, iy + subpixel_jitter.y)
/ make_float2(optixGetLaunchDimensions().x, optixGetLaunchDimensions().y) * 2.0 - 1.0);
// note: nau already takes into account the field of view and ratio when computing
// camera horizontal and vertival
float3 rayDir = normalize(camera.direction
+ (screen.x ) * camera.horizontal
+ (screen.y ) * camera.vertical);
// trace primary ray
optixTrace(optixLaunchParams.traversable,
camera.position,
rayDir,
0.f, // tmin
1e20f, // tmax
0.0f, // rayTime
OptixVisibilityMask( 255 ),
OPTIX_RAY_FLAG_NONE,//,OPTIX_RAY_FLAG_DISABLE_ANYHIT
PHONG, // SBT offset
RAY_TYPE_COUNT, // SBT stride
PHONG, // missSBTIndex
u0, u1 );
red += pixelColorPRD.color.x / (raysPerPixel*raysPerPixel);
green += pixelColorPRD.color.y / (raysPerPixel*raysPerPixel);
blue += pixelColorPRD.color.z / (raysPerPixel*raysPerPixel);
}
}
//convert float (0-1) to int (0-255)
const int r = int(255.0f*red);
const int g = int(255.0f*green);
const int b = int(255.0f*blue);
// convert to 32-bit rgba value
const uint32_t rgba = 0xff000000
| (r<<0) | (g<<8) | (b<<16);
// compute index
const uint32_t fbIndex = ix + iy*optixGetLaunchDimensions().x;
// write to output buffer
optixLaunchParams.frame.colorBuffer[fbIndex] = rgba;
}
|
259019bf43e7d7ff2e13056ed5f518b3d30eb1ac.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// MP 1
#include <wb.h>
__global__ void vecAdd(float *in1, float *in2, float *out, int len) {
//@@ Insert code to implement vector addition here
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < len) {
out[i] = in1[i] + in2[i];
}
}
int main(int argc, char **argv) {
wbArg_t args;
int inputLength;
float *hostInput1;
float *hostInput2;
float *hostOutput;
float *deviceInput1;
float *deviceInput2;
float *deviceOutput;
args = wbArg_read(argc, argv);
wbTime_start(Generic, "Importing data and creating memory on host");
hostInput1 =
(float *)wbImport(wbArg_getInputFile(args, 0), &inputLength);
hostInput2 =
(float *)wbImport(wbArg_getInputFile(args, 1), &inputLength);
hostOutput = (float *)malloc(inputLength * sizeof(float));
wbTime_stop(Generic, "Importing data and creating memory on host");
wbLog(TRACE, "The input length is ", inputLength);
wbTime_start(GPU, "Allocating GPU memory.");
//@@ Allocate GPU memory here
int size = inputLength * sizeof(float);
hipMalloc((void **) &deviceInput1, size);
hipMalloc((void **) &deviceInput2, size);
hipMalloc((void **) &deviceOutput, size);
wbTime_stop(GPU, "Allocating GPU memory.");
wbTime_start(GPU, "Copying input memory to the GPU.");
//@@ Copy memory to the GPU here
hipMemcpy(deviceInput1, hostInput1, size, hipMemcpyHostToDevice);
hipMemcpy(deviceInput2, hostInput2, size, hipMemcpyHostToDevice);
wbTime_stop(GPU, "Copying input memory to the GPU.");
//@@ Initialize the grid and block dimensions here
dim3 DimGrid(ceil(inputLength/256.0), 1, 1);
dim3 DimBlock(256, 1, 1);
wbTime_start(Compute, "Performing CUDA computation");
//@@ Launch the GPU Kernel here
hipLaunchKernelGGL(( vecAdd), dim3(DimGrid), dim3(DimBlock), 0, 0, deviceInput1, deviceInput2, deviceOutput, inputLength);
hipDeviceSynchronize();
wbTime_stop(Compute, "Performing CUDA computation");
wbTime_start(Copy, "Copying output memory to the CPU");
//@@ Copy the GPU memory back to the CPU here
hipMemcpy(hostOutput, deviceOutput, size, hipMemcpyDeviceToHost);
wbTime_stop(Copy, "Copying output memory to the CPU");
wbTime_start(GPU, "Freeing GPU Memory");
//@@ Free the GPU memory here
hipFree(deviceInput1);
hipFree(deviceInput2);
hipFree(deviceOutput);
wbTime_stop(GPU, "Freeing GPU Memory");
wbSolution(args, hostOutput, inputLength);
free(hostInput1);
free(hostInput2);
free(hostOutput);
return 0;
} | 259019bf43e7d7ff2e13056ed5f518b3d30eb1ac.cu | // MP 1
#include <wb.h>
__global__ void vecAdd(float *in1, float *in2, float *out, int len) {
//@@ Insert code to implement vector addition here
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < len) {
out[i] = in1[i] + in2[i];
}
}
int main(int argc, char **argv) {
wbArg_t args;
int inputLength;
float *hostInput1;
float *hostInput2;
float *hostOutput;
float *deviceInput1;
float *deviceInput2;
float *deviceOutput;
args = wbArg_read(argc, argv);
wbTime_start(Generic, "Importing data and creating memory on host");
hostInput1 =
(float *)wbImport(wbArg_getInputFile(args, 0), &inputLength);
hostInput2 =
(float *)wbImport(wbArg_getInputFile(args, 1), &inputLength);
hostOutput = (float *)malloc(inputLength * sizeof(float));
wbTime_stop(Generic, "Importing data and creating memory on host");
wbLog(TRACE, "The input length is ", inputLength);
wbTime_start(GPU, "Allocating GPU memory.");
//@@ Allocate GPU memory here
int size = inputLength * sizeof(float);
cudaMalloc((void **) &deviceInput1, size);
cudaMalloc((void **) &deviceInput2, size);
cudaMalloc((void **) &deviceOutput, size);
wbTime_stop(GPU, "Allocating GPU memory.");
wbTime_start(GPU, "Copying input memory to the GPU.");
//@@ Copy memory to the GPU here
cudaMemcpy(deviceInput1, hostInput1, size, cudaMemcpyHostToDevice);
cudaMemcpy(deviceInput2, hostInput2, size, cudaMemcpyHostToDevice);
wbTime_stop(GPU, "Copying input memory to the GPU.");
//@@ Initialize the grid and block dimensions here
dim3 DimGrid(ceil(inputLength/256.0), 1, 1);
dim3 DimBlock(256, 1, 1);
wbTime_start(Compute, "Performing CUDA computation");
//@@ Launch the GPU Kernel here
vecAdd<<<DimGrid, DimBlock>>>(deviceInput1, deviceInput2, deviceOutput, inputLength);
cudaDeviceSynchronize();
wbTime_stop(Compute, "Performing CUDA computation");
wbTime_start(Copy, "Copying output memory to the CPU");
//@@ Copy the GPU memory back to the CPU here
cudaMemcpy(hostOutput, deviceOutput, size, cudaMemcpyDeviceToHost);
wbTime_stop(Copy, "Copying output memory to the CPU");
wbTime_start(GPU, "Freeing GPU Memory");
//@@ Free the GPU memory here
cudaFree(deviceInput1);
cudaFree(deviceInput2);
cudaFree(deviceOutput);
wbTime_stop(GPU, "Freeing GPU Memory");
wbSolution(args, hostOutput, inputLength);
free(hostInput1);
free(hostInput2);
free(hostOutput);
return 0;
} |
0cdd633c31cdb5a2beee56c9032b2f3f2efef0ab.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "common.h"
#include "timer.h"
#define BLOCK_DIM 64
__global__ void nw_gpu3_kernel (unsigned char * reference_d, unsigned char* query_d, int* matrix_d, unsigned int N, unsigned int round) {
__shared__ unsigned int q_offset;
__shared__ unsigned int r_offset;
__shared__ unsigned int loop_limit;
__shared__ int matrix_s[BLOCK_DIM*BLOCK_DIM];
if(threadIdx.x == 0){
//Check if it is round 1 or 2 in overall matrix of blocks
if (round == 1){
q_offset = BLOCK_DIM*blockIdx.x;
r_offset = BLOCK_DIM*(gridDim.x - 1 - blockIdx.x);
}
else if (round == 2){
q_offset = BLOCK_DIM*((N + BLOCK_DIM - 1)/BLOCK_DIM - gridDim.x + blockIdx.x );
r_offset = BLOCK_DIM*((N + BLOCK_DIM - 1)/BLOCK_DIM - blockIdx.x - 1);
}
//Loop limit is used as a boundary check
//If the block is not complete and some elements are out of bounds, we can loop fewer times
loop_limit = (((N-q_offset) > BLOCK_DIM && (N-r_offset) > BLOCK_DIM) || N%BLOCK_DIM == 0)? 2*BLOCK_DIM : ((N-q_offset) < BLOCK_DIM && (N-r_offset) < BLOCK_DIM)? 2*(N % BLOCK_DIM) : BLOCK_DIM + N % BLOCK_DIM;
}
__syncthreads();
for (int i = 1; i < loop_limit; i++){
//Check if it is round 1 or 2 within the block
int idx = (i < BLOCK_DIM + 1)? i : 2*BLOCK_DIM - i;
int q_t = 0;
int r_t = 0;
if (i < BLOCK_DIM + 1) {
//This is round 1;
q_t = threadIdx.x;
r_t = idx - threadIdx.x - 1;
}
else {
//This is round 2
q_t = BLOCK_DIM - idx + threadIdx.x;
r_t = BLOCK_DIM - threadIdx.x - 1;
}
int q = q_t + q_offset;
int r = r_t + r_offset;
int max = 0;
if(threadIdx.x < idx && q < N && r < N) {
int top = (q == 0)?((r + 1)*DELETION):(q_t == 0)?(matrix_d[(q - 1)*N + r]):matrix_s[(q_t -1)*BLOCK_DIM + r_t];
int left = (r == 0)?((q + 1)*INSERTION):(r_t == 0)?(matrix_d[q*N + (r - 1)]):matrix_s[q_t*BLOCK_DIM + (r_t - 1)];
int topleft = (q == 0)?(r*DELETION):(r == 0)?(q*INSERTION):(q_t == 0 || r_t == 0)?(matrix_d[(q - 1)*N + (r - 1)]):matrix_s[(q_t - 1)*BLOCK_DIM + (r_t - 1)];
// Find scores based on neighbors
int insertion = top + INSERTION;
int deletion = left + DELETION;
int match = topleft + ((query_d[q] == reference_d[r])?MATCH:MISMATCH);
// Select best score
max = (insertion > deletion)?insertion:deletion;
max = (match > max)?match:max;
matrix_s[q_t*BLOCK_DIM + r_t] = max;
}
__syncthreads();
}
for(int it = 0; it < BLOCK_DIM && q_offset + it < N; it++){
if(r_offset + threadIdx.x < N){
matrix_d[(q_offset + it)*N + r_offset +threadIdx.x] = matrix_s[it*BLOCK_DIM + threadIdx.x];
}
}
}
void nw_gpu3(unsigned char* reference_d, unsigned char* query_d, int* matrix_d, unsigned int N) {
//Each tile is of dimension BLOCK_DIM*BLOCK_DIM
//Max number of threads simultaneoulsy active in a tile is BLOCK_DIM
//So number of threads per block is BLOCK_DIM
int numThreadsPerBlock = BLOCK_DIM;
for (unsigned int i = 1; i < (N + BLOCK_DIM - 1)/BLOCK_DIM + 1; i++) {
//Number of blocks (i.e. of tiles) is equal to the iteration number
int numBlocks = i;
hipLaunchKernelGGL(( nw_gpu3_kernel) , dim3(numBlocks), dim3(numThreadsPerBlock) , 0, 0, reference_d, query_d, matrix_d, N, 1);
hipDeviceSynchronize();
}
for (int i = (N + BLOCK_DIM - 1)/BLOCK_DIM -1; i>0; i--){
int numBlocks = i;
hipLaunchKernelGGL(( nw_gpu3_kernel) , dim3(numBlocks), dim3(numThreadsPerBlock) , 0, 0, reference_d, query_d, matrix_d, N, 2);
hipDeviceSynchronize();
}
}
| 0cdd633c31cdb5a2beee56c9032b2f3f2efef0ab.cu |
#include "common.h"
#include "timer.h"
#define BLOCK_DIM 64
__global__ void nw_gpu3_kernel (unsigned char * reference_d, unsigned char* query_d, int* matrix_d, unsigned int N, unsigned int round) {
__shared__ unsigned int q_offset;
__shared__ unsigned int r_offset;
__shared__ unsigned int loop_limit;
__shared__ int matrix_s[BLOCK_DIM*BLOCK_DIM];
if(threadIdx.x == 0){
//Check if it is round 1 or 2 in overall matrix of blocks
if (round == 1){
q_offset = BLOCK_DIM*blockIdx.x;
r_offset = BLOCK_DIM*(gridDim.x - 1 - blockIdx.x);
}
else if (round == 2){
q_offset = BLOCK_DIM*((N + BLOCK_DIM - 1)/BLOCK_DIM - gridDim.x + blockIdx.x );
r_offset = BLOCK_DIM*((N + BLOCK_DIM - 1)/BLOCK_DIM - blockIdx.x - 1);
}
//Loop limit is used as a boundary check
//If the block is not complete and some elements are out of bounds, we can loop fewer times
loop_limit = (((N-q_offset) > BLOCK_DIM && (N-r_offset) > BLOCK_DIM) || N%BLOCK_DIM == 0)? 2*BLOCK_DIM : ((N-q_offset) < BLOCK_DIM && (N-r_offset) < BLOCK_DIM)? 2*(N % BLOCK_DIM) : BLOCK_DIM + N % BLOCK_DIM;
}
__syncthreads();
for (int i = 1; i < loop_limit; i++){
//Check if it is round 1 or 2 within the block
int idx = (i < BLOCK_DIM + 1)? i : 2*BLOCK_DIM - i;
int q_t = 0;
int r_t = 0;
if (i < BLOCK_DIM + 1) {
//This is round 1;
q_t = threadIdx.x;
r_t = idx - threadIdx.x - 1;
}
else {
//This is round 2
q_t = BLOCK_DIM - idx + threadIdx.x;
r_t = BLOCK_DIM - threadIdx.x - 1;
}
int q = q_t + q_offset;
int r = r_t + r_offset;
int max = 0;
if(threadIdx.x < idx && q < N && r < N) {
int top = (q == 0)?((r + 1)*DELETION):(q_t == 0)?(matrix_d[(q - 1)*N + r]):matrix_s[(q_t -1)*BLOCK_DIM + r_t];
int left = (r == 0)?((q + 1)*INSERTION):(r_t == 0)?(matrix_d[q*N + (r - 1)]):matrix_s[q_t*BLOCK_DIM + (r_t - 1)];
int topleft = (q == 0)?(r*DELETION):(r == 0)?(q*INSERTION):(q_t == 0 || r_t == 0)?(matrix_d[(q - 1)*N + (r - 1)]):matrix_s[(q_t - 1)*BLOCK_DIM + (r_t - 1)];
// Find scores based on neighbors
int insertion = top + INSERTION;
int deletion = left + DELETION;
int match = topleft + ((query_d[q] == reference_d[r])?MATCH:MISMATCH);
// Select best score
max = (insertion > deletion)?insertion:deletion;
max = (match > max)?match:max;
matrix_s[q_t*BLOCK_DIM + r_t] = max;
}
__syncthreads();
}
for(int it = 0; it < BLOCK_DIM && q_offset + it < N; it++){
if(r_offset + threadIdx.x < N){
matrix_d[(q_offset + it)*N + r_offset +threadIdx.x] = matrix_s[it*BLOCK_DIM + threadIdx.x];
}
}
}
void nw_gpu3(unsigned char* reference_d, unsigned char* query_d, int* matrix_d, unsigned int N) {
//Each tile is of dimension BLOCK_DIM*BLOCK_DIM
//Max number of threads simultaneoulsy active in a tile is BLOCK_DIM
//So number of threads per block is BLOCK_DIM
int numThreadsPerBlock = BLOCK_DIM;
for (unsigned int i = 1; i < (N + BLOCK_DIM - 1)/BLOCK_DIM + 1; i++) {
//Number of blocks (i.e. of tiles) is equal to the iteration number
int numBlocks = i;
nw_gpu3_kernel <<< numBlocks, numThreadsPerBlock >>> (reference_d, query_d, matrix_d, N, 1);
cudaDeviceSynchronize();
}
for (int i = (N + BLOCK_DIM - 1)/BLOCK_DIM -1; i>0; i--){
int numBlocks = i;
nw_gpu3_kernel <<< numBlocks, numThreadsPerBlock >>> (reference_d, query_d, matrix_d, N, 2);
cudaDeviceSynchronize();
}
}
|
448412773da16c22948dbacc6e77c692a678f763.hip | // !!! This is a file automatically generated by hipify!!!
// User: [email protected]
// ExecutionRequest[P:'extinguishing256.cu',P:1,T:1,args:'',q:'cudalb']
// May 16 2019 18:04:36
#include "cputils.h" // Added by tablon
/*
* Simplified simulation of fire extinguishing
*
* Computacion Paralela, Grado en Informatica (Universidad de Valladolid)
* 2018/2019
*
* v1.4
*
* (c) 2019 Arturo Gonzalez Escribano
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <float.h>
#include "cputils.h"
#include <hip/hip_runtime.h>
#define RADIUS_TYPE_1 3
#define RADIUS_TYPE_2_3 9
#define THRESHOLD 0.1f
/* Structure to store data of an extinguishing team */
typedef struct {
int x,y;
int type;
int target;
} Team;
/* Structure to store data of a fire focal point */
typedef struct {
int x,y;
int start;
int heat;
int active; // States: 0 Not yet activated; 1 Active; 2 Deactivated by a team
} FocalPoint;
/* Macro function to simplify accessing with two coordinates to a flattened array */
#define accessMat( arr, exp1, exp2 ) arr[ (exp1) * columns + (exp2) ]
/*
* Function: Print usage line in stderr
*/
void show_usage( char *program_name ) {
fprintf(stderr,"Usage: %s <config_file> | <command_line_args>\n", program_name );
fprintf(stderr,"\t<config_file> ::= -f <file_name>\n");
fprintf(stderr,"\t<command_line_args> ::= <rows> <columns> <maxIter> <numTeams> [ <teamX> <teamY> <teamType> ... ] <numFocalPoints> [ <focalX> <focalY> <focalStart> <focalTemperature> ... ]\n");
fprintf(stderr,"\n");
}
#ifdef DEBUG
/*
* Function: Print the current state of the simulation
*/
void print_status( int iteration, int rows, int columns, float *surface, int num_teams, Team *teams, int num_focal, FocalPoint *focal, float global_residual ) {
/*
* You don't need to optimize this function, it is only for pretty printing and debugging purposes.
* It is not compiled in the production versions of the program.
* Thus, it is never used when measuring times in the leaderboard
*/
int i,j;
printf("Iteration: %d\n", iteration );
printf("+");
for( j=0; j<columns; j++ ) printf("---");
printf("+\n");
for( i=0; i<rows; i++ ) {
printf("|");
for( j=0; j<columns; j++ ) {
char symbol;
if ( accessMat( surface, i, j ) >= 1000 ) symbol = '*';
else if ( accessMat( surface, i, j ) >= 100 ) symbol = '0' + (int)(accessMat( surface, i, j )/100);
else if ( accessMat( surface, i, j ) >= 50 ) symbol = '+';
else if ( accessMat( surface, i, j ) >= 25 ) symbol = '.';
else symbol = '0';
int t;
int flag_team = 0;
for( t=0; t<num_teams; t++ )
if ( teams[t].x == i && teams[t].y == j ) { flag_team = 1; break; }
if ( flag_team ) printf("[%c]", symbol );
else {
int f;
int flag_focal = 0;
for( f=0; f<num_focal; f++ )
if ( focal[f].x == i && focal[f].y == j && focal[f].active == 1 ) { flag_focal = 1; break; }
if ( flag_focal ) printf("(%c)", symbol );
else printf(" %c ", symbol );
}
}
printf("|\n");
}
printf("+");
for( j=0; j<columns; j++ ) printf("---");
printf("+\n");
printf("Global residual: %f\n\n", global_residual);
}
#endif
#define currentGPU 0
__global__ void initializeMatriz(float *surface, int size){
int IDX_Thread = threadIdx.x;
int IDX_Block = blockIdx.x;
int threads_per_block = blockDim.x;
int i = IDX_Block * threads_per_block + IDX_Thread;
if(i<size)
surface[i] = 0.0;
}
__global__ void activateFocalPoints(float *surface, FocalPoint *focal, int size, int iter, int *num_deactivated, int *first_activation){
int IDX_Thread = threadIdx.x;
int IDX_Block = blockIdx.x;
int threads_per_block = blockDim.x;
int i = IDX_Block * threads_per_block + IDX_Thread;
if(i<size){
if(focal[i].start == iter){
focal[i].active = 1;
first_activation[i]=1;
} else if( focal[i].active==2) num_deactivated[i]=1;
}
}
__global__ void stepKernel(float *surfaceA, float *surfaceB, int rows, int columns){
int IDX_Thread = threadIdx.x;
int IDX_Block = blockIdx.x;
int threads_per_block = blockDim.x;
int gid = IDX_Block * threads_per_block + IDX_Thread;
int i = gid/columns;
int j = gid%columns;
if(i<rows*columns)
if(i>0)
if(j>0)
if(i<rows-1)
if(j<columns-1)
accessMat( surfaceA, i, j ) = (
accessMat( surfaceB, i-1, j ) +
accessMat( surfaceB, i+1, j ) +
accessMat( surfaceB, i, j-1 ) +
accessMat( surfaceB, i, j+1 ) ) / 4;
}
__global__ void updateHeat(float *surface, FocalPoint *focal, int size, int rows, int columns){
int IDX_Thread = threadIdx.x;
int IDX_Block = blockIdx.x;
int threads_per_block = blockDim.x;
int i = IDX_Block * threads_per_block + IDX_Thread;
if(i<size)
if ( focal[i].active == 1 ){
accessMat( surface, focal[i].x, focal[i].y ) = focal[i].heat;
}
}
__global__ void reductionGlobalResidual( float *surface, float *surfaceCopy, int rows, int columns, float *global ){
int IDX_Thread = threadIdx.x;
int IDX_Block = blockIdx.x;
int threads_per_block = blockDim.x;
int gid = IDX_Block * threads_per_block + IDX_Thread;
int i = gid/columns;
int j = gid%columns;
if(i<rows*columns)
if ( fabs( accessMat( surfaceCopy, i, j ) - accessMat( surface, i, j ) ) >= THRESHOLD )
*global=THRESHOLD;
}
/*
* MAIN PROGRAM
*/
int main(int argc, char *argv[]) {
int i,j,t;
// Simulation data
int rows, columns, max_iter;
float *surface, *surfaceCopy;
int num_teams, num_focal;
Team *teams;
FocalPoint *focal;
/* 1. Read simulation arguments */
/* 1.1. Check minimum number of arguments */
if (argc<2) {
fprintf(stderr,"-- Error in arguments: No arguments\n");
show_usage( argv[0] );
exit( EXIT_FAILURE );
}
int read_from_file = ! strcmp( argv[1], "-f" );
/* 1.2. Read configuration from file */
if ( read_from_file ) {
/* 1.2.1. Open file */
if (argc<3) {
fprintf(stderr,"-- Error in arguments: file-name argument missing\n");
show_usage( argv[0] );
exit( EXIT_FAILURE );
}
FILE *args = cp_abrir_fichero( argv[2] );
if ( args == NULL ) {
fprintf(stderr,"-- Error in file: not found: %s\n", argv[1]);
exit( EXIT_FAILURE );
}
/* 1.2.2. Read surface and maximum number of iterations */
int ok;
ok = fscanf(args, "%d %d %d", &rows, &columns, &max_iter);
if ( ok != 3 ) {
fprintf(stderr,"-- Error in file: reading rows, columns, max_iter from file: %s\n", argv[1]);
exit( EXIT_FAILURE );
}
surface = (float *)malloc( sizeof(float) * (size_t)rows * (size_t)columns );
surfaceCopy = (float *)malloc( sizeof(float) * (size_t)rows * (size_t)columns );
if ( surface == NULL || surfaceCopy == NULL ) {
fprintf(stderr,"-- Error allocating: surface structures\n");
exit( EXIT_FAILURE );
}
/* 1.2.3. Teams information */
ok = fscanf(args, "%d", &num_teams );
if ( ok != 1 ) {
fprintf(stderr,"-- Error file, reading num_teams from file: %s\n", argv[1]);
exit( EXIT_FAILURE );
}
teams = (Team *)malloc( sizeof(Team) * (size_t)num_teams );
if ( teams == NULL ) {
fprintf(stderr,"-- Error allocating: %d teams\n", num_teams );
exit( EXIT_FAILURE );
}
for( i=0; i<num_teams; i++ ) {
ok = fscanf(args, "%d %d %d", &teams[i].x, &teams[i].y, &teams[i].type);
if ( ok != 3 ) {
fprintf(stderr,"-- Error in file: reading team %d from file: %s\n", i, argv[1]);
exit( EXIT_FAILURE );
}
}
/* 1.2.4. Focal points information */
ok = fscanf(args, "%d", &num_focal );
if ( ok != 1 ) {
fprintf(stderr,"-- Error in file: reading num_focal from file: %s\n", argv[1]);
exit( EXIT_FAILURE );
}
focal = (FocalPoint *)malloc( sizeof(FocalPoint) * (size_t)num_focal );
if ( focal == NULL ) {
fprintf(stderr,"-- Error allocating: %d focal points\n", num_focal );
exit( EXIT_FAILURE );
}
for( i=0; i<num_focal; i++ ) {
ok = fscanf(args, "%d %d %d %d", &focal[i].x, &focal[i].y, &focal[i].start, &focal[i].heat);
if ( ok != 4 ) {
fprintf(stderr,"-- Error in file: reading focal point %d from file: %s\n", i, argv[1]);
exit( EXIT_FAILURE );
}
focal[i].active = 0;
}
}
/* 1.3. Read configuration from arguments */
else {
/* 1.3.1. Check minimum number of arguments */
if (argc<6) {
fprintf(stderr, "-- Error in arguments: not enough arguments when reading configuration from the command line\n");
show_usage( argv[0] );
exit( EXIT_FAILURE );
}
/* 1.3.2. Surface and maximum number of iterations */
rows = atoi( argv[1] );
columns = atoi( argv[2] );
max_iter = atoi( argv[3] );
surface = (float *)malloc( sizeof(float) * (size_t)rows * (size_t)columns );
surfaceCopy = (float *)malloc( sizeof(float) * (size_t)rows * (size_t)columns );
/* 1.3.3. Teams information */
num_teams = atoi( argv[4] );
teams = (Team *)malloc( sizeof(Team) * (size_t)num_teams );
if ( teams == NULL ) {
fprintf(stderr,"-- Error allocating: %d teams\n", num_teams );
exit( EXIT_FAILURE );
}
if ( argc < num_teams*3 + 5 ) {
fprintf(stderr,"-- Error in arguments: not enough arguments for %d teams\n", num_teams );
exit( EXIT_FAILURE );
}
for( i=0; i<num_teams; i++ ) {
teams[i].x = atoi( argv[5+i*3] );
teams[i].y = atoi( argv[6+i*3] );
teams[i].type = atoi( argv[7+i*3] );
}
/* 1.3.4. Focal points information */
int focal_args = 5 + i*3;
if ( argc < focal_args+1 ) {
fprintf(stderr,"-- Error in arguments: not enough arguments for the number of focal points\n");
show_usage( argv[0] );
exit( EXIT_FAILURE );
}
num_focal = atoi( argv[focal_args] );
focal = (FocalPoint *)malloc( sizeof(FocalPoint) * (size_t)num_focal );
if ( teams == NULL ) {
fprintf(stderr,"-- Error allocating: %d focal points\n", num_focal );
exit( EXIT_FAILURE );
}
if ( argc < focal_args + 1 + num_focal*4 ) {
fprintf(stderr,"-- Error in arguments: not enough arguments for %d focal points\n", num_focal );
exit( EXIT_FAILURE );
}
for( i=0; i<num_focal; i++ ) {
focal[i].x = atoi( argv[focal_args+i*4+1] );
focal[i].y = atoi( argv[focal_args+i*4+2] );
focal[i].start = atoi( argv[focal_args+i*4+3] );
focal[i].heat = atoi( argv[focal_args+i*4+4] );
focal[i].active = 0;
}
/* 1.3.5. Sanity check: No extra arguments at the end of line */
if ( argc > focal_args+i*4+1 ) {
fprintf(stderr,"-- Error in arguments: extra arguments at the end of the command line\n");
show_usage( argv[0] );
exit( EXIT_FAILURE );
}
}
#ifdef DEBUG
/* 1.4. Print arguments */
printf("Arguments, Rows: %d, Columns: %d, max_iter: %d\n", rows, columns, max_iter);
printf("Arguments, Teams: %d, Focal points: %d\n", num_teams, num_focal );
for( i=0; i<num_teams; i++ ) {
printf("\tTeam %d, position (%d,%d), type: %d\n", i, teams[i].x, teams[i].y, teams[i].type );
}
for( i=0; i<num_focal; i++ ) {
printf("\tFocal_point %d, position (%d,%d), start time: %d, temperature: %d\n", i,
focal[i].x,
focal[i].y,
focal[i].start,
focal[i].heat );
}
#endif // DEBUG
/* 2. Select GPU and start global timer */
hipSetDevice(0);
hipDeviceSynchronize();
double ttotal = cp_Wtime();
/*
*
* START HERE: DO NOT CHANGE THE CODE ABOVE THIS POINT
*
*/
int threads_per_block;
int blocks_per_grid_surface;
int blocks_per_grid_focal;
float *dSurface;
float *dSurfaceCopy;
FocalPoint *dFocal;
float *dGlobal;
hipMalloc( (void**) &dSurface, sizeof(float)* (rows*columns));
hipMalloc( (void**) &dSurfaceCopy, sizeof(float)* (rows*columns));
hipMalloc( (void**) &dFocal, sizeof(FocalPoint)* (num_focal));
hipMalloc( (void**) &dGlobal, sizeof(float));
hipMemcpy(dFocal, focal, sizeof(FocalPoint)* (num_focal), hipMemcpyHostToDevice);
threads_per_block = 256;
blocks_per_grid_surface = (rows * columns / threads_per_block) + 1;
blocks_per_grid_focal = (num_focal / threads_per_block) + 1;
//initializeMatriz<<<blocks_per_grid_surface, threads_per_block>>>(dSurfaceCopy, rows*columns);
hipMemset(dSurface, 0.0, sizeof(float)* (rows*columns));
/* 4. Simulation */
int iter;
int flag_stability = 0;
int first_activation;
int num_deactivated;
int hFirst_activation[num_focal];
int hNum_deactivated[num_focal];
int *dFirst_activation;
int *dNum_deactivated;
hipMalloc( (void**) &dFirst_activation, sizeof(int)* (num_focal));
hipMalloc( (void**) &dNum_deactivated, sizeof(int)* (num_focal));
for( iter=0; iter<max_iter && ! flag_stability; iter++ ) {
hipLaunchKernelGGL(( activateFocalPoints), dim3(blocks_per_grid_focal), dim3(threads_per_block), 0, 0, dSurface, dFocal, num_focal, iter, dNum_deactivated, dFirst_activation);
hipMemcpy(hNum_deactivated, dNum_deactivated, sizeof(int)* (num_focal), hipMemcpyDeviceToHost);
hipMemcpy(hFirst_activation, dFirst_activation, sizeof(int)* (num_focal), hipMemcpyDeviceToHost);
num_deactivated=0;
first_activation=0;
for(i=0; i<num_focal; i++){
num_deactivated += hNum_deactivated[i];
first_activation += hFirst_activation[i];
}
if(!first_activation) continue;
/* 4.2. Propagate heat (10 steps per each team movement) */
float global_residual = 0.0f;
int step;
////PRINT
// hipMemcpy(surfaceCopy, dSurfaceCopy, sizeof(float)* rows*columns, hipMemcpyDeviceToHost);
// printf("ITER: %d\tSTEP: %d\n", iter, step);
// for(i=1; i<rows; i++){
// for(j=1; j<columns; j++){
// printf("%f\t\t", accessMat( surfaceCopy, i, j ));
// }
// printf("\n");
// }
// printf("\n\n\n");
for( step=0; step<10; step++ ) {
//IMPAR
if(step%2){
/* 4.2.1. Update heat on active focal points */
hipLaunchKernelGGL(( updateHeat), dim3(blocks_per_grid_focal), dim3(threads_per_block), 0, 0, dSurfaceCopy, dFocal, num_focal, rows, columns);
/* 4.2.3. Update surface values (skip borders) */
hipLaunchKernelGGL(( stepKernel), dim3(blocks_per_grid_surface), dim3(threads_per_block), 0, 0, dSurface, dSurfaceCopy, rows, columns);
//PAR
}else{
/* 4.2.1. Update heat on active focal points */
hipLaunchKernelGGL(( updateHeat), dim3(blocks_per_grid_focal), dim3(threads_per_block), 0, 0, dSurface, dFocal, num_focal, rows, columns);
/* 4.2.3. Update surface values (skip borders) */
hipLaunchKernelGGL(( stepKernel), dim3(blocks_per_grid_surface), dim3(threads_per_block), 0, 0, dSurfaceCopy, dSurface, rows, columns);
/* 4.2.4. Compute the maximum residual difference (absolute value) */
if(step==0){
hipMemset(dGlobal, 0.0, sizeof(float));
hipLaunchKernelGGL(( reductionGlobalResidual), dim3(blocks_per_grid_surface), dim3(threads_per_block), 0, 0, dSurface, dSurfaceCopy, rows, columns, dGlobal);
hipMemcpy(&global_residual, dGlobal, sizeof(float), hipMemcpyDeviceToHost);
}
}
}
hipMemcpy(surface, dSurface, sizeof(float)* rows*columns, hipMemcpyDeviceToHost);
hipMemcpy(focal, dFocal, sizeof(FocalPoint)* num_focal, hipMemcpyDeviceToHost);
/* If the global residual is lower than THRESHOLD, we have reached enough stability, stop simulation at the end of this iteration */
if( num_deactivated == num_focal)
if(global_residual < THRESHOLD ) flag_stability = 1;
/* 4.3. Move teams */
if(num_deactivated < num_focal)
for( t=0; t<num_teams; t++ ) {
/* 4.3.1. Choose nearest focal point */
float distance = FLT_MAX;
int target = -1;
for( j=0; j<num_focal; j++ ) {
if ( focal[j].active != 1 ) continue; // Skip non-active focal points
float dx = focal[j].x - teams[t].x;
float dy = focal[j].y - teams[t].y;
float local_distance = sqrtf( dx*dx + dy*dy );
if ( local_distance < distance ) {
distance = local_distance;
target = j;
}
}
/* 4.3.2. Annotate target for the next stage */
teams[t].target = target;
/* 4.3.3. No active focal point to choose, no movement */
if ( target == -1 ) continue;
/* 4.3.4. Move in the focal point direction */
if ( teams[t].type == 1 ) {
// Type 1: Can move in diagonal
if ( focal[target].x < teams[t].x ) teams[t].x--;
if ( focal[target].x > teams[t].x ) teams[t].x++;
if ( focal[target].y < teams[t].y ) teams[t].y--;
if ( focal[target].y > teams[t].y ) teams[t].y++;
}
else if ( teams[t].type == 2 ) {
// Type 2: First in horizontal direction, then in vertical direction
if ( focal[target].y < teams[t].y ) teams[t].y--;
else if ( focal[target].y > teams[t].y ) teams[t].y++;
else if ( focal[target].x < teams[t].x ) teams[t].x--;
else if ( focal[target].x > teams[t].x ) teams[t].x++;
}
else {
// Type 3: First in vertical direction, then in horizontal direction
if ( focal[target].x < teams[t].x ) teams[t].x--;
else if ( focal[target].x > teams[t].x ) teams[t].x++;
else if ( focal[target].y < teams[t].y ) teams[t].y--;
else if ( focal[target].y > teams[t].y ) teams[t].y++;
}
}
/* 4.4. Team actions */
for( t=0; t<num_teams; t++ ) {
/* 4.4.1. Deactivate the target focal point when it is reached */
int target = teams[t].target;
if ( target != -1 && focal[target].x == teams[t].x && focal[target].y == teams[t].y
&& focal[target].active == 1 )
focal[target].active = 2;
/* 4.4.2. Reduce heat in a circle around the team */
int radius;
// Influence area of fixed radius depending on type
if ( teams[t].type == 1 ) radius = RADIUS_TYPE_1;
else radius = RADIUS_TYPE_2_3;
for( i=teams[t].x-radius; i<=teams[t].x+radius; i++ ) {
if( i < 1 ) continue; // Out of the heated surface
if( i >= rows-1 ) continue;
for( j=teams[t].y-radius; j<=teams[t].y+radius; j++ ) {
if( j < 1 ) continue; // Out of the heated surface
if( j >= columns-1 ) continue;
float dx = teams[t].x - i;
float dy = teams[t].y - j;
float distance = sqrtf( dx*dx + dy*dy );
if ( distance <= radius ) {
accessMat( surface, i, j ) = accessMat( surface, i, j ) * ( 1 - 0.25 ); // Team efficiency factor
}
}
}
}
hipMemcpy(dSurface, surface, sizeof(float)* rows*columns, hipMemcpyHostToDevice);
hipMemcpy(dFocal, focal, sizeof(FocalPoint)* num_focal, hipMemcpyHostToDevice);
#ifdef DEBUG
/* 4.5. DEBUG: Print the current state of the simulation at the end of each iteration */
print_status( iter, rows, columns, surface, num_teams, teams, num_focal, focal, global_residual );
#endif // DEBUG
}
hipFree(dSurface);
hipFree(dSurfaceCopy);
hipFree(dFocal);
hipDeviceReset();
/*
*
* STOP HERE: DO NOT CHANGE THE CODE BELOW THIS POINT
*
*/
/* 5. Stop global time */
hipDeviceSynchronize();
ttotal = cp_Wtime() - ttotal;
/* 6. Output for leaderboard */
printf("\n");
/* 6.1. Total computation time */
printf("Time: %lf\n", ttotal );
/* 6.2. Results: Number of iterations, position of teams, residual heat on the focal points */
printf("Result: %d", iter);
/*
for (i=0; i<num_teams; i++)
printf(" %d %d", teams[i].x, teams[i].y );
*/
for (i=0; i<num_focal; i++)
printf(" %.6f", accessMat( surface, focal[i].x, focal[i].y ) );
printf("\n");
/* 7. Free resources */
free( teams );
free( focal );
free( surface );
free( surfaceCopy );
/* 8. End */
return 0;
}
| 448412773da16c22948dbacc6e77c692a678f763.cu | // User: [email protected]
// ExecutionRequest[P:'extinguishing256.cu',P:1,T:1,args:'',q:'cudalb']
// May 16 2019 18:04:36
#include "cputils.h" // Added by tablon
/*
* Simplified simulation of fire extinguishing
*
* Computacion Paralela, Grado en Informatica (Universidad de Valladolid)
* 2018/2019
*
* v1.4
*
* (c) 2019 Arturo Gonzalez Escribano
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <float.h>
#include "cputils.h"
#include <cuda.h>
#define RADIUS_TYPE_1 3
#define RADIUS_TYPE_2_3 9
#define THRESHOLD 0.1f
/* Structure to store data of an extinguishing team */
typedef struct {
int x,y;
int type;
int target;
} Team;
/* Structure to store data of a fire focal point */
typedef struct {
int x,y;
int start;
int heat;
int active; // States: 0 Not yet activated; 1 Active; 2 Deactivated by a team
} FocalPoint;
/* Macro function to simplify accessing with two coordinates to a flattened array */
#define accessMat( arr, exp1, exp2 ) arr[ (exp1) * columns + (exp2) ]
/*
* Function: Print usage line in stderr
*/
void show_usage( char *program_name ) {
fprintf(stderr,"Usage: %s <config_file> | <command_line_args>\n", program_name );
fprintf(stderr,"\t<config_file> ::= -f <file_name>\n");
fprintf(stderr,"\t<command_line_args> ::= <rows> <columns> <maxIter> <numTeams> [ <teamX> <teamY> <teamType> ... ] <numFocalPoints> [ <focalX> <focalY> <focalStart> <focalTemperature> ... ]\n");
fprintf(stderr,"\n");
}
#ifdef DEBUG
/*
* Function: Print the current state of the simulation
*/
void print_status( int iteration, int rows, int columns, float *surface, int num_teams, Team *teams, int num_focal, FocalPoint *focal, float global_residual ) {
/*
* You don't need to optimize this function, it is only for pretty printing and debugging purposes.
* It is not compiled in the production versions of the program.
* Thus, it is never used when measuring times in the leaderboard
*/
int i,j;
printf("Iteration: %d\n", iteration );
printf("+");
for( j=0; j<columns; j++ ) printf("---");
printf("+\n");
for( i=0; i<rows; i++ ) {
printf("|");
for( j=0; j<columns; j++ ) {
char symbol;
if ( accessMat( surface, i, j ) >= 1000 ) symbol = '*';
else if ( accessMat( surface, i, j ) >= 100 ) symbol = '0' + (int)(accessMat( surface, i, j )/100);
else if ( accessMat( surface, i, j ) >= 50 ) symbol = '+';
else if ( accessMat( surface, i, j ) >= 25 ) symbol = '.';
else symbol = '0';
int t;
int flag_team = 0;
for( t=0; t<num_teams; t++ )
if ( teams[t].x == i && teams[t].y == j ) { flag_team = 1; break; }
if ( flag_team ) printf("[%c]", symbol );
else {
int f;
int flag_focal = 0;
for( f=0; f<num_focal; f++ )
if ( focal[f].x == i && focal[f].y == j && focal[f].active == 1 ) { flag_focal = 1; break; }
if ( flag_focal ) printf("(%c)", symbol );
else printf(" %c ", symbol );
}
}
printf("|\n");
}
printf("+");
for( j=0; j<columns; j++ ) printf("---");
printf("+\n");
printf("Global residual: %f\n\n", global_residual);
}
#endif
#define currentGPU 0
__global__ void initializeMatriz(float *surface, int size){
int IDX_Thread = threadIdx.x;
int IDX_Block = blockIdx.x;
int threads_per_block = blockDim.x;
int i = IDX_Block * threads_per_block + IDX_Thread;
if(i<size)
surface[i] = 0.0;
}
__global__ void activateFocalPoints(float *surface, FocalPoint *focal, int size, int iter, int *num_deactivated, int *first_activation){
int IDX_Thread = threadIdx.x;
int IDX_Block = blockIdx.x;
int threads_per_block = blockDim.x;
int i = IDX_Block * threads_per_block + IDX_Thread;
if(i<size){
if(focal[i].start == iter){
focal[i].active = 1;
first_activation[i]=1;
} else if( focal[i].active==2) num_deactivated[i]=1;
}
}
__global__ void stepKernel(float *surfaceA, float *surfaceB, int rows, int columns){
int IDX_Thread = threadIdx.x;
int IDX_Block = blockIdx.x;
int threads_per_block = blockDim.x;
int gid = IDX_Block * threads_per_block + IDX_Thread;
int i = gid/columns;
int j = gid%columns;
if(i<rows*columns)
if(i>0)
if(j>0)
if(i<rows-1)
if(j<columns-1)
accessMat( surfaceA, i, j ) = (
accessMat( surfaceB, i-1, j ) +
accessMat( surfaceB, i+1, j ) +
accessMat( surfaceB, i, j-1 ) +
accessMat( surfaceB, i, j+1 ) ) / 4;
}
__global__ void updateHeat(float *surface, FocalPoint *focal, int size, int rows, int columns){
int IDX_Thread = threadIdx.x;
int IDX_Block = blockIdx.x;
int threads_per_block = blockDim.x;
int i = IDX_Block * threads_per_block + IDX_Thread;
if(i<size)
if ( focal[i].active == 1 ){
accessMat( surface, focal[i].x, focal[i].y ) = focal[i].heat;
}
}
__global__ void reductionGlobalResidual( float *surface, float *surfaceCopy, int rows, int columns, float *global ){
int IDX_Thread = threadIdx.x;
int IDX_Block = blockIdx.x;
int threads_per_block = blockDim.x;
int gid = IDX_Block * threads_per_block + IDX_Thread;
int i = gid/columns;
int j = gid%columns;
if(i<rows*columns)
if ( fabs( accessMat( surfaceCopy, i, j ) - accessMat( surface, i, j ) ) >= THRESHOLD )
*global=THRESHOLD;
}
/*
* MAIN PROGRAM
*/
int main(int argc, char *argv[]) {
int i,j,t;
// Simulation data
int rows, columns, max_iter;
float *surface, *surfaceCopy;
int num_teams, num_focal;
Team *teams;
FocalPoint *focal;
/* 1. Read simulation arguments */
/* 1.1. Check minimum number of arguments */
if (argc<2) {
fprintf(stderr,"-- Error in arguments: No arguments\n");
show_usage( argv[0] );
exit( EXIT_FAILURE );
}
int read_from_file = ! strcmp( argv[1], "-f" );
/* 1.2. Read configuration from file */
if ( read_from_file ) {
/* 1.2.1. Open file */
if (argc<3) {
fprintf(stderr,"-- Error in arguments: file-name argument missing\n");
show_usage( argv[0] );
exit( EXIT_FAILURE );
}
FILE *args = cp_abrir_fichero( argv[2] );
if ( args == NULL ) {
fprintf(stderr,"-- Error in file: not found: %s\n", argv[1]);
exit( EXIT_FAILURE );
}
/* 1.2.2. Read surface and maximum number of iterations */
int ok;
ok = fscanf(args, "%d %d %d", &rows, &columns, &max_iter);
if ( ok != 3 ) {
fprintf(stderr,"-- Error in file: reading rows, columns, max_iter from file: %s\n", argv[1]);
exit( EXIT_FAILURE );
}
surface = (float *)malloc( sizeof(float) * (size_t)rows * (size_t)columns );
surfaceCopy = (float *)malloc( sizeof(float) * (size_t)rows * (size_t)columns );
if ( surface == NULL || surfaceCopy == NULL ) {
fprintf(stderr,"-- Error allocating: surface structures\n");
exit( EXIT_FAILURE );
}
/* 1.2.3. Teams information */
ok = fscanf(args, "%d", &num_teams );
if ( ok != 1 ) {
fprintf(stderr,"-- Error file, reading num_teams from file: %s\n", argv[1]);
exit( EXIT_FAILURE );
}
teams = (Team *)malloc( sizeof(Team) * (size_t)num_teams );
if ( teams == NULL ) {
fprintf(stderr,"-- Error allocating: %d teams\n", num_teams );
exit( EXIT_FAILURE );
}
for( i=0; i<num_teams; i++ ) {
ok = fscanf(args, "%d %d %d", &teams[i].x, &teams[i].y, &teams[i].type);
if ( ok != 3 ) {
fprintf(stderr,"-- Error in file: reading team %d from file: %s\n", i, argv[1]);
exit( EXIT_FAILURE );
}
}
/* 1.2.4. Focal points information */
ok = fscanf(args, "%d", &num_focal );
if ( ok != 1 ) {
fprintf(stderr,"-- Error in file: reading num_focal from file: %s\n", argv[1]);
exit( EXIT_FAILURE );
}
focal = (FocalPoint *)malloc( sizeof(FocalPoint) * (size_t)num_focal );
if ( focal == NULL ) {
fprintf(stderr,"-- Error allocating: %d focal points\n", num_focal );
exit( EXIT_FAILURE );
}
for( i=0; i<num_focal; i++ ) {
ok = fscanf(args, "%d %d %d %d", &focal[i].x, &focal[i].y, &focal[i].start, &focal[i].heat);
if ( ok != 4 ) {
fprintf(stderr,"-- Error in file: reading focal point %d from file: %s\n", i, argv[1]);
exit( EXIT_FAILURE );
}
focal[i].active = 0;
}
}
/* 1.3. Read configuration from arguments */
else {
/* 1.3.1. Check minimum number of arguments */
if (argc<6) {
fprintf(stderr, "-- Error in arguments: not enough arguments when reading configuration from the command line\n");
show_usage( argv[0] );
exit( EXIT_FAILURE );
}
/* 1.3.2. Surface and maximum number of iterations */
rows = atoi( argv[1] );
columns = atoi( argv[2] );
max_iter = atoi( argv[3] );
surface = (float *)malloc( sizeof(float) * (size_t)rows * (size_t)columns );
surfaceCopy = (float *)malloc( sizeof(float) * (size_t)rows * (size_t)columns );
/* 1.3.3. Teams information */
num_teams = atoi( argv[4] );
teams = (Team *)malloc( sizeof(Team) * (size_t)num_teams );
if ( teams == NULL ) {
fprintf(stderr,"-- Error allocating: %d teams\n", num_teams );
exit( EXIT_FAILURE );
}
if ( argc < num_teams*3 + 5 ) {
fprintf(stderr,"-- Error in arguments: not enough arguments for %d teams\n", num_teams );
exit( EXIT_FAILURE );
}
for( i=0; i<num_teams; i++ ) {
teams[i].x = atoi( argv[5+i*3] );
teams[i].y = atoi( argv[6+i*3] );
teams[i].type = atoi( argv[7+i*3] );
}
/* 1.3.4. Focal points information */
int focal_args = 5 + i*3;
if ( argc < focal_args+1 ) {
fprintf(stderr,"-- Error in arguments: not enough arguments for the number of focal points\n");
show_usage( argv[0] );
exit( EXIT_FAILURE );
}
num_focal = atoi( argv[focal_args] );
focal = (FocalPoint *)malloc( sizeof(FocalPoint) * (size_t)num_focal );
if ( teams == NULL ) {
fprintf(stderr,"-- Error allocating: %d focal points\n", num_focal );
exit( EXIT_FAILURE );
}
if ( argc < focal_args + 1 + num_focal*4 ) {
fprintf(stderr,"-- Error in arguments: not enough arguments for %d focal points\n", num_focal );
exit( EXIT_FAILURE );
}
for( i=0; i<num_focal; i++ ) {
focal[i].x = atoi( argv[focal_args+i*4+1] );
focal[i].y = atoi( argv[focal_args+i*4+2] );
focal[i].start = atoi( argv[focal_args+i*4+3] );
focal[i].heat = atoi( argv[focal_args+i*4+4] );
focal[i].active = 0;
}
/* 1.3.5. Sanity check: No extra arguments at the end of line */
if ( argc > focal_args+i*4+1 ) {
fprintf(stderr,"-- Error in arguments: extra arguments at the end of the command line\n");
show_usage( argv[0] );
exit( EXIT_FAILURE );
}
}
#ifdef DEBUG
/* 1.4. Print arguments */
printf("Arguments, Rows: %d, Columns: %d, max_iter: %d\n", rows, columns, max_iter);
printf("Arguments, Teams: %d, Focal points: %d\n", num_teams, num_focal );
for( i=0; i<num_teams; i++ ) {
printf("\tTeam %d, position (%d,%d), type: %d\n", i, teams[i].x, teams[i].y, teams[i].type );
}
for( i=0; i<num_focal; i++ ) {
printf("\tFocal_point %d, position (%d,%d), start time: %d, temperature: %d\n", i,
focal[i].x,
focal[i].y,
focal[i].start,
focal[i].heat );
}
#endif // DEBUG
/* 2. Select GPU and start global timer */
cudaSetDevice(0);
cudaDeviceSynchronize();
double ttotal = cp_Wtime();
/*
*
* START HERE: DO NOT CHANGE THE CODE ABOVE THIS POINT
*
*/
int threads_per_block;
int blocks_per_grid_surface;
int blocks_per_grid_focal;
float *dSurface;
float *dSurfaceCopy;
FocalPoint *dFocal;
float *dGlobal;
cudaMalloc( (void**) &dSurface, sizeof(float)* (rows*columns));
cudaMalloc( (void**) &dSurfaceCopy, sizeof(float)* (rows*columns));
cudaMalloc( (void**) &dFocal, sizeof(FocalPoint)* (num_focal));
cudaMalloc( (void**) &dGlobal, sizeof(float));
cudaMemcpy(dFocal, focal, sizeof(FocalPoint)* (num_focal), cudaMemcpyHostToDevice);
threads_per_block = 256;
blocks_per_grid_surface = (rows * columns / threads_per_block) + 1;
blocks_per_grid_focal = (num_focal / threads_per_block) + 1;
//initializeMatriz<<<blocks_per_grid_surface, threads_per_block>>>(dSurfaceCopy, rows*columns);
cudaMemset(dSurface, 0.0, sizeof(float)* (rows*columns));
/* 4. Simulation */
int iter;
int flag_stability = 0;
int first_activation;
int num_deactivated;
int hFirst_activation[num_focal];
int hNum_deactivated[num_focal];
int *dFirst_activation;
int *dNum_deactivated;
cudaMalloc( (void**) &dFirst_activation, sizeof(int)* (num_focal));
cudaMalloc( (void**) &dNum_deactivated, sizeof(int)* (num_focal));
for( iter=0; iter<max_iter && ! flag_stability; iter++ ) {
activateFocalPoints<<<blocks_per_grid_focal, threads_per_block>>>(dSurface, dFocal, num_focal, iter, dNum_deactivated, dFirst_activation);
cudaMemcpy(hNum_deactivated, dNum_deactivated, sizeof(int)* (num_focal), cudaMemcpyDeviceToHost);
cudaMemcpy(hFirst_activation, dFirst_activation, sizeof(int)* (num_focal), cudaMemcpyDeviceToHost);
num_deactivated=0;
first_activation=0;
for(i=0; i<num_focal; i++){
num_deactivated += hNum_deactivated[i];
first_activation += hFirst_activation[i];
}
if(!first_activation) continue;
/* 4.2. Propagate heat (10 steps per each team movement) */
float global_residual = 0.0f;
int step;
////PRINT
// cudaMemcpy(surfaceCopy, dSurfaceCopy, sizeof(float)* rows*columns, cudaMemcpyDeviceToHost);
// printf("ITER: %d\tSTEP: %d\n", iter, step);
// for(i=1; i<rows; i++){
// for(j=1; j<columns; j++){
// printf("%f\t\t", accessMat( surfaceCopy, i, j ));
// }
// printf("\n");
// }
// printf("\n\n\n");
for( step=0; step<10; step++ ) {
//IMPAR
if(step%2){
/* 4.2.1. Update heat on active focal points */
updateHeat<<<blocks_per_grid_focal, threads_per_block>>>(dSurfaceCopy, dFocal, num_focal, rows, columns);
/* 4.2.3. Update surface values (skip borders) */
stepKernel<<<blocks_per_grid_surface, threads_per_block>>>(dSurface, dSurfaceCopy, rows, columns);
//PAR
}else{
/* 4.2.1. Update heat on active focal points */
updateHeat<<<blocks_per_grid_focal, threads_per_block>>>(dSurface, dFocal, num_focal, rows, columns);
/* 4.2.3. Update surface values (skip borders) */
stepKernel<<<blocks_per_grid_surface, threads_per_block>>>(dSurfaceCopy, dSurface, rows, columns);
/* 4.2.4. Compute the maximum residual difference (absolute value) */
if(step==0){
cudaMemset(dGlobal, 0.0, sizeof(float));
reductionGlobalResidual<<<blocks_per_grid_surface, threads_per_block>>>(dSurface, dSurfaceCopy, rows, columns, dGlobal);
cudaMemcpy(&global_residual, dGlobal, sizeof(float), cudaMemcpyDeviceToHost);
}
}
}
cudaMemcpy(surface, dSurface, sizeof(float)* rows*columns, cudaMemcpyDeviceToHost);
cudaMemcpy(focal, dFocal, sizeof(FocalPoint)* num_focal, cudaMemcpyDeviceToHost);
/* If the global residual is lower than THRESHOLD, we have reached enough stability, stop simulation at the end of this iteration */
if( num_deactivated == num_focal)
if(global_residual < THRESHOLD ) flag_stability = 1;
/* 4.3. Move teams */
if(num_deactivated < num_focal)
for( t=0; t<num_teams; t++ ) {
/* 4.3.1. Choose nearest focal point */
float distance = FLT_MAX;
int target = -1;
for( j=0; j<num_focal; j++ ) {
if ( focal[j].active != 1 ) continue; // Skip non-active focal points
float dx = focal[j].x - teams[t].x;
float dy = focal[j].y - teams[t].y;
float local_distance = sqrtf( dx*dx + dy*dy );
if ( local_distance < distance ) {
distance = local_distance;
target = j;
}
}
/* 4.3.2. Annotate target for the next stage */
teams[t].target = target;
/* 4.3.3. No active focal point to choose, no movement */
if ( target == -1 ) continue;
/* 4.3.4. Move in the focal point direction */
if ( teams[t].type == 1 ) {
// Type 1: Can move in diagonal
if ( focal[target].x < teams[t].x ) teams[t].x--;
if ( focal[target].x > teams[t].x ) teams[t].x++;
if ( focal[target].y < teams[t].y ) teams[t].y--;
if ( focal[target].y > teams[t].y ) teams[t].y++;
}
else if ( teams[t].type == 2 ) {
// Type 2: First in horizontal direction, then in vertical direction
if ( focal[target].y < teams[t].y ) teams[t].y--;
else if ( focal[target].y > teams[t].y ) teams[t].y++;
else if ( focal[target].x < teams[t].x ) teams[t].x--;
else if ( focal[target].x > teams[t].x ) teams[t].x++;
}
else {
// Type 3: First in vertical direction, then in horizontal direction
if ( focal[target].x < teams[t].x ) teams[t].x--;
else if ( focal[target].x > teams[t].x ) teams[t].x++;
else if ( focal[target].y < teams[t].y ) teams[t].y--;
else if ( focal[target].y > teams[t].y ) teams[t].y++;
}
}
/* 4.4. Team actions */
for( t=0; t<num_teams; t++ ) {
/* 4.4.1. Deactivate the target focal point when it is reached */
int target = teams[t].target;
if ( target != -1 && focal[target].x == teams[t].x && focal[target].y == teams[t].y
&& focal[target].active == 1 )
focal[target].active = 2;
/* 4.4.2. Reduce heat in a circle around the team */
int radius;
// Influence area of fixed radius depending on type
if ( teams[t].type == 1 ) radius = RADIUS_TYPE_1;
else radius = RADIUS_TYPE_2_3;
for( i=teams[t].x-radius; i<=teams[t].x+radius; i++ ) {
if( i < 1 ) continue; // Out of the heated surface
if( i >= rows-1 ) continue;
for( j=teams[t].y-radius; j<=teams[t].y+radius; j++ ) {
if( j < 1 ) continue; // Out of the heated surface
if( j >= columns-1 ) continue;
float dx = teams[t].x - i;
float dy = teams[t].y - j;
float distance = sqrtf( dx*dx + dy*dy );
if ( distance <= radius ) {
accessMat( surface, i, j ) = accessMat( surface, i, j ) * ( 1 - 0.25 ); // Team efficiency factor
}
}
}
}
cudaMemcpy(dSurface, surface, sizeof(float)* rows*columns, cudaMemcpyHostToDevice);
cudaMemcpy(dFocal, focal, sizeof(FocalPoint)* num_focal, cudaMemcpyHostToDevice);
#ifdef DEBUG
/* 4.5. DEBUG: Print the current state of the simulation at the end of each iteration */
print_status( iter, rows, columns, surface, num_teams, teams, num_focal, focal, global_residual );
#endif // DEBUG
}
cudaFree(dSurface);
cudaFree(dSurfaceCopy);
cudaFree(dFocal);
cudaDeviceReset();
/*
*
* STOP HERE: DO NOT CHANGE THE CODE BELOW THIS POINT
*
*/
/* 5. Stop global time */
cudaDeviceSynchronize();
ttotal = cp_Wtime() - ttotal;
/* 6. Output for leaderboard */
printf("\n");
/* 6.1. Total computation time */
printf("Time: %lf\n", ttotal );
/* 6.2. Results: Number of iterations, position of teams, residual heat on the focal points */
printf("Result: %d", iter);
/*
for (i=0; i<num_teams; i++)
printf(" %d %d", teams[i].x, teams[i].y );
*/
for (i=0; i<num_focal; i++)
printf(" %.6f", accessMat( surface, focal[i].x, focal[i].y ) );
printf("\n");
/* 7. Free resources */
free( teams );
free( focal );
free( surface );
free( surfaceCopy );
/* 8. End */
return 0;
}
|
ce6a3894a3312506e7d3dca1e834fc97a9844e12.hip | // !!! This is a file automatically generated by hipify!!!
#include "cp.h"
#include <hip/hip_runtime.h>
#include <iostream>
#include <stdlib.h>
#include <sys/time.h>
#define CHECK(x) check(x, #x)
using namespace std;
inline void check(hipError_t err, const char* context) {
if (err != hipSuccess) {
std::cerr << "CUDA error: " << context << ": "
<< hipGetErrorString(err) << std::endl;
std::exit(EXIT_FAILURE);
}
}
inline int static divup(int a, int b) {
return (a + b - 1)/b;
}
inline int static roundup(int a, int b) {
return divup(a, b) * b;
}
__global__ void mykernel(int ny, int nx, int round_ny, int round_nx, const float* data, float* result) {
int i_threadId = threadIdx.x;
int j_threadId = threadIdx.y;
int i_BlockId = blockIdx.x;
int j_BlockId = blockIdx.y;
if (i_BlockId > j_BlockId)
return;
float temp_result[8][8];
for (int ib = 0; ib < 8; ++ib) {
for (int jb = 0; jb < 8; ++jb) {
temp_result[ib][jb] = 0;
}
}
// for (int ib = 0; ib < 8; ++ib) {
// cout<< endl;
// for (int jb = 0; jb < 8; ++jb) {
// cout<<temp_result[ib][jb];
// }
// }
float x[8];
float y[8];
for(int k = 0; k<nx; ++k)
{
for (int ib = 0; ib < 8; ++ib) {
int i = i_BlockId * 64 + ib * 8 + i_threadId;
x[ib] = data[round_ny*k + i];
}
for (int jb = 0; jb < 8; ++jb) {
int j = j_BlockId * 64 + jb * 8 + j_threadId;
y[jb] = data[round_ny*k + j];
}
for (int ib = 0; ib < 8; ++ib) {
for (int jb = 0; jb < 8; ++jb) {
temp_result[ib][jb] += x[ib] * y[jb];
}
}
}
// for (int ib = 0; ib < 8; ++ib) {
// cout<< endl;
// for (int jb = 0; jb < 8; ++jb) {
// cout<< temp_result[ib][jb];
// }
// }
for (int ib = 0; ib < 8; ++ib) {
for (int jb = 0; jb < 8; ++jb) {
int i = i_BlockId * 64 + ib * 8 + i_threadId;
int j = j_BlockId * 64 + jb * 8 + j_threadId;
if (i < ny && j < ny) {
result[ny*i + j] = temp_result[ib][jb];
}
}
}
}
void correlate(int ny, int nx, const float* data, float* result) {
int round_ny = roundup(ny, 64);
int round_nx = roundup(nx, 64);
// cout<<"Success0";
float* X = (float*) calloc(round_ny*round_nx ,sizeof(float));
float* Y = (float*) calloc(round_ny*round_nx ,sizeof(float));
int elem_size =32;
float mean[elem_size], norm[elem_size];
// cout<<"Success1";
for (int i = 0; i < ny; ++i)
{
for(int j=0; j<elem_size;++j)
{
mean[j]=norm[j]=0;
}
for (int j = 0; j+elem_size <= nx; j+=elem_size)
{
for(int k=0; k<elem_size; ++k)
{
mean[k] += data[j+k + nx*i];
}
}
// cout<<"Success2";
// for (int j = 0; j+elem_size <= nx; j+=elem_size)
// {
// cout<< endl;
// for(int k=0; k<elem_size; ++k)
// {
// cout<<mean[k];
// }
// }
for(int j = nx%elem_size; j>0; --j)
{
mean[0] += data[nx-j + nx*i];
}
for(int j = 1; j<elem_size; ++j)
{
mean[0] += mean[j];
}
mean[0] /= nx;
for(int j=0; j+elem_size<=nx; j +=elem_size)
{
for(int k=0; k<elem_size; ++k)
{
X[j+k + round_nx*i] = data[j+k + nx*i] - mean[0];
norm[k] += pow(data[j+k + nx*i]-mean[0], 2);
}
}
// cout<<"Success3";
for(int j = nx%elem_size; j>0; --j)
{
X[nx-j + round_nx*i] = data[nx-j + nx*i] - mean[0];
norm[0] += pow(data[nx-j + nx*i]-mean[0], 2);
}
for(int j =1; j<elem_size; ++j)
norm[0] += norm[j];
norm[0] = sqrt(norm[0]);
// cout<<"Success4";
for(int j=0; j<nx; ++j)
{
X[j + round_nx*i] /= norm[0];
}
}
for(int i = 0; i < ny; ++i)
{
// cout<<endl;
for(int j=0; j<nx; ++j)
{
Y[round_ny*j+i] = X[round_nx*i+j];
// cout<<"Y[round_ny*j+i]";
}
}
// Transfering data between CPU and GPU
float* dGPU = NULL;
CHECK(hipMalloc((void**)&dGPU, round_ny * round_nx * sizeof(float)));
float* rGPU = NULL;
CHECK(hipMalloc((void**)&rGPU, ny * ny * sizeof(float)));
CHECK(hipMemcpy(dGPU, Y, round_ny * round_nx * sizeof(float), hipMemcpyHostToDevice));
dim3 dimBlock(8, 8);
dim3 dimGrid(round_ny/64, round_ny/64);
hipLaunchKernelGGL(( mykernel), dim3(dimGrid), dim3(dimBlock), 0, 0, ny, nx, round_ny, round_nx, dGPU, rGPU);
CHECK(hipGetLastError());
CHECK(hipMemcpy(result, rGPU, ny * ny * sizeof(float), hipMemcpyDeviceToHost));
CHECK(hipFree(dGPU));
CHECK(hipFree(rGPU));
free(X);
free(Y);
}
| ce6a3894a3312506e7d3dca1e834fc97a9844e12.cu | #include "cp.h"
#include <cuda_runtime.h>
#include <iostream>
#include <stdlib.h>
#include <sys/time.h>
#define CHECK(x) check(x, #x)
using namespace std;
inline void check(cudaError_t err, const char* context) {
if (err != cudaSuccess) {
std::cerr << "CUDA error: " << context << ": "
<< cudaGetErrorString(err) << std::endl;
std::exit(EXIT_FAILURE);
}
}
inline int static divup(int a, int b) {
return (a + b - 1)/b;
}
inline int static roundup(int a, int b) {
return divup(a, b) * b;
}
__global__ void mykernel(int ny, int nx, int round_ny, int round_nx, const float* data, float* result) {
int i_threadId = threadIdx.x;
int j_threadId = threadIdx.y;
int i_BlockId = blockIdx.x;
int j_BlockId = blockIdx.y;
if (i_BlockId > j_BlockId)
return;
float temp_result[8][8];
for (int ib = 0; ib < 8; ++ib) {
for (int jb = 0; jb < 8; ++jb) {
temp_result[ib][jb] = 0;
}
}
// for (int ib = 0; ib < 8; ++ib) {
// cout<< endl;
// for (int jb = 0; jb < 8; ++jb) {
// cout<<temp_result[ib][jb];
// }
// }
float x[8];
float y[8];
for(int k = 0; k<nx; ++k)
{
for (int ib = 0; ib < 8; ++ib) {
int i = i_BlockId * 64 + ib * 8 + i_threadId;
x[ib] = data[round_ny*k + i];
}
for (int jb = 0; jb < 8; ++jb) {
int j = j_BlockId * 64 + jb * 8 + j_threadId;
y[jb] = data[round_ny*k + j];
}
for (int ib = 0; ib < 8; ++ib) {
for (int jb = 0; jb < 8; ++jb) {
temp_result[ib][jb] += x[ib] * y[jb];
}
}
}
// for (int ib = 0; ib < 8; ++ib) {
// cout<< endl;
// for (int jb = 0; jb < 8; ++jb) {
// cout<< temp_result[ib][jb];
// }
// }
for (int ib = 0; ib < 8; ++ib) {
for (int jb = 0; jb < 8; ++jb) {
int i = i_BlockId * 64 + ib * 8 + i_threadId;
int j = j_BlockId * 64 + jb * 8 + j_threadId;
if (i < ny && j < ny) {
result[ny*i + j] = temp_result[ib][jb];
}
}
}
}
void correlate(int ny, int nx, const float* data, float* result) {
int round_ny = roundup(ny, 64);
int round_nx = roundup(nx, 64);
// cout<<"Success0";
float* X = (float*) calloc(round_ny*round_nx ,sizeof(float));
float* Y = (float*) calloc(round_ny*round_nx ,sizeof(float));
int elem_size =32;
float mean[elem_size], norm[elem_size];
// cout<<"Success1";
for (int i = 0; i < ny; ++i)
{
for(int j=0; j<elem_size;++j)
{
mean[j]=norm[j]=0;
}
for (int j = 0; j+elem_size <= nx; j+=elem_size)
{
for(int k=0; k<elem_size; ++k)
{
mean[k] += data[j+k + nx*i];
}
}
// cout<<"Success2";
// for (int j = 0; j+elem_size <= nx; j+=elem_size)
// {
// cout<< endl;
// for(int k=0; k<elem_size; ++k)
// {
// cout<<mean[k];
// }
// }
for(int j = nx%elem_size; j>0; --j)
{
mean[0] += data[nx-j + nx*i];
}
for(int j = 1; j<elem_size; ++j)
{
mean[0] += mean[j];
}
mean[0] /= nx;
for(int j=0; j+elem_size<=nx; j +=elem_size)
{
for(int k=0; k<elem_size; ++k)
{
X[j+k + round_nx*i] = data[j+k + nx*i] - mean[0];
norm[k] += pow(data[j+k + nx*i]-mean[0], 2);
}
}
// cout<<"Success3";
for(int j = nx%elem_size; j>0; --j)
{
X[nx-j + round_nx*i] = data[nx-j + nx*i] - mean[0];
norm[0] += pow(data[nx-j + nx*i]-mean[0], 2);
}
for(int j =1; j<elem_size; ++j)
norm[0] += norm[j];
norm[0] = sqrt(norm[0]);
// cout<<"Success4";
for(int j=0; j<nx; ++j)
{
X[j + round_nx*i] /= norm[0];
}
}
for(int i = 0; i < ny; ++i)
{
// cout<<endl;
for(int j=0; j<nx; ++j)
{
Y[round_ny*j+i] = X[round_nx*i+j];
// cout<<"Y[round_ny*j+i]";
}
}
// Transfering data between CPU and GPU
float* dGPU = NULL;
CHECK(cudaMalloc((void**)&dGPU, round_ny * round_nx * sizeof(float)));
float* rGPU = NULL;
CHECK(cudaMalloc((void**)&rGPU, ny * ny * sizeof(float)));
CHECK(cudaMemcpy(dGPU, Y, round_ny * round_nx * sizeof(float), cudaMemcpyHostToDevice));
dim3 dimBlock(8, 8);
dim3 dimGrid(round_ny/64, round_ny/64);
mykernel<<<dimGrid, dimBlock>>>(ny, nx, round_ny, round_nx, dGPU, rGPU);
CHECK(cudaGetLastError());
CHECK(cudaMemcpy(result, rGPU, ny * ny * sizeof(float), cudaMemcpyDeviceToHost));
CHECK(cudaFree(dGPU));
CHECK(cudaFree(rGPU));
free(X);
free(Y);
}
|
bbc2e92450d0761cc4670ecb5a268775e7205d70.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <time.h>
#include "function.h"
#include "sgyhead.h"
#include "common.h"
#include "kernel_hip.cuh"
// shotprofileId[5][nshot]
// 1 : sx shotprofileId[0][ishot]
// 2 : ntrace shotprofileId[1][ishot]
// 3 : itraceAll shotprofileId[2][ishot]
// 4 : offset shotprofileId[3][ishot]
extern "C" void rtm_real(int idevice, int nshot, int startshot, int dshot, int medium_flag, int flag_layout, int sgyflag, int endianflag, int ismth,
int nx, int nz, int nt, float dx, float dz, float dt, int nx0, int nz0, int nt0, float dx0, float dz0, float dt0,
int npml, int lx, int lz, float tsnap, float fdom, float spz0, float amp, float alp, float direct, float direct0,
float *vp0, float *rho0, float *diffcoef, float *wavelet, char filename_shotgather[40], int **shotprofileId,
float *image1, float *image2s, float *image3s, float *image4s, float *image5s,
float *image2m, float *image3m, float *image4m, float *image5m, float *illum)
{
// common parameter
int nxpad,nzpad;
int ntsnap = (int)(tsnap/dt);
int nw = (int)(direct/(fdom*dt)),
tlength = (int)(direct0/dt);
int it,ix,ishot;
long long offset_trace;
FILE *fp;
// wavefield extrapolation parameter
float mstimer;
char buffrecord[40];
float vpmax;
float spx,spz;
static float dx2,dz2,_dt,_dtx,_dtz;
static int nsx,nsz,nxz;
static dim3 dimGrid,dimBlock,
dimGridp,dimGridvx,dimGridvz,dimGridvxb,dimGridvzb,
dimGridvxlr,dimGridvztb,
dimGridplr,dimGridptb,
dimGridpcooner,
dimGridrecord,
dimGridpmllr,dimGridpmltb,dimBlockpmllr,dimBlockpmltb,
dimGridfull;
// variables on host
float *vp,*rho,*temp,*record,*tr;
float *vxspmllr,*vzspmltb,*pspmllr,*pspmltb;
float *p;
// variables on device
float *d_wavelet,*d_diffcoef;
float *d_source,*d_record,*d_vp,*d_rho;
float *d_p,*d_vx,*d_vz,*d_p_pre,
*d_ps,*d_vxs,*d_vzs,*d_ps_pre;
float *d_pl1,*d_pl2,*d_pr1,*d_pr2,
*d_pt1,*d_pt2,*d_pb1,*d_pb2;
float *d_ddx,*d_ddz,*d_ddxVx,*d_ddzVz;
int *d_norder,*d_norderx,*d_norderz;
float *d_vxspmllr,*d_vzspmltb,*d_pspmllr,*d_pspmltb;
// single shot image
float *d_g1, // cross-coorelation
*d_g2,*d_g2ud,*d_g2du,*d_g2lr,*d_g2rl, // wavefiled-decomposition
*d_g3,*d_g3_true,*d_g31,*d_g32, // poynting vector d_g32 true amplitude
*d_g4; // energy norm
float *d_image1,
*d_image2s,*d_image3s,*d_image4s,*d_image5s,
*d_image2m,*d_image3m,*d_image4m,*d_image5m,
*d_imagetrue,*d_illum;
float *imagetrue,*d_Illum;
float *d_taper;
int nwin=50;
float alpha=0.06;
float **seisobs,**seiscal;
_dtx = dt/dx;
_dtz = dt/dz;
_dt = 1.0/dt;
dx2 = dx*dx;
dz2 = dz*dz;
hipSetDevice(idevice);
check_gpu_error("Failed to initialize device");
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipMalloc(&d_wavelet, nt*sizeof(float));
hipMemset(d_wavelet, 0, nt*sizeof(float));
hipLaunchKernelGGL(( cuda_ricker_wavelet), dim3((nt+511)/512),dim3(512), 0, 0, d_wavelet, fdom, dt, nt);
hipMalloc(&d_image1, nx*nz*sizeof(float));
hipMalloc(&d_image2s, nx*nz*sizeof(float));
hipMalloc(&d_image3s, nx*nz*sizeof(float));
hipMalloc(&d_image4s, nx*nz*sizeof(float));
hipMalloc(&d_image5s, nx*nz*sizeof(float));
hipMalloc(&d_image2m, nx*nz*sizeof(float));
hipMalloc(&d_image3m, nx*nz*sizeof(float));
hipMalloc(&d_image4m, nx*nz*sizeof(float));
hipMalloc(&d_image5m, nx*nz*sizeof(float));
hipMalloc(&d_Illum, nx*nz*sizeof(float));
hipMalloc(&d_imagetrue, nx*nz*sizeof(float));
hipMemset(d_image1, 0, nx*nz*sizeof(float));
hipMemset(d_image2s, 0, nx*nz*sizeof(float));
hipMemset(d_image3s, 0, nx*nz*sizeof(float));
hipMemset(d_image4s, 0, nx*nz*sizeof(float));
hipMemset(d_image5s, 0, nx*nz*sizeof(float));
hipMemset(d_image2m, 0, nx*nz*sizeof(float));
hipMemset(d_image3m, 0, nx*nz*sizeof(float));
hipMemset(d_image4m, 0, nx*nz*sizeof(float));
hipMemset(d_image5m, 0, nx*nz*sizeof(float));
hipMemset(d_Illum, 0, nx*nz*sizeof(float));
hipMemset(d_imagetrue, 0, nx*nz*sizeof(float));
imagetrue=(float *)malloc(nx*nz*sizeof(float));
memset(imagetrue, 0, nx*nz*sizeof(float));
for (ishot = startshot; ishot <= nshot; ishot=ishot+dshot)
{
hipEventRecord(start);
//==============================================================================
//offset
int noffset,nx1,nx2,nxlength;
int noffset0,nx10,nx20,nxlength0;
float offsetmax,offset;
offsetmax = (float)(shotprofileId[3][ishot-1] + (shotprofileId[1][ishot-1] - 1)*dx0);
offset = (float)(shotprofileId[3][ishot-1]);
spx = (float)(shotprofileId[0][ishot-1]);
spz = spz0;
// given
noffset0 = (int)(offset/dx0);
nx10 = (int)(spx/dx0);
nx20 = (int)((spx + offsetmax)/dx0);
nxlength0 = nx20 - nx10 + 1;
// optimal
noffset = (int)(offset/dx);
nx1 = (int)(spx/dx);
nx2 = (int)((spx + offsetmax)/dx);
nxlength = nx2 - nx1 + 1;
// optimal extend
spx = spx - nx1*dx; // local
spx = spx + npml*dx;
spz = spz + npml*dz;
nxpad = nxlength + 2*npml;
nzpad = nz + 2*npml;
nsx = (int)(spx/dx);
nsz = (int)(spz/dz);
nxz = nxpad*nzpad;
// block and thread
dimBlock = dim3(Block_Sizez, Block_Sizex);
dimBlockpmllr = dim3(Block_Sizez,N/2);
dimBlockpmltb = dim3(N/2,Block_Sizex);
dimGrid = dim3((nzpad+Block_Sizez-1)/Block_Sizez, (nxpad+Block_Sizex-1)/Block_Sizex);
dimGridp = dim3((nzpad-2*npml+Block_Sizez-1)/Block_Sizez, (nxpad-2*npml+Block_Sizex-1)/Block_Sizex);
dimGridvx = dim3((nzpad+Block_Sizez-1)/Block_Sizez, (nxpad-2*npml-1+Block_Sizex-1)/Block_Sizex);
dimGridvz = dim3((nzpad-2*npml-1+Block_Sizez-1)/Block_Sizez, (nxpad+Block_Sizex-1)/Block_Sizex);
dimGridvxb = dim3((nzpad-2*npml+Block_Sizez-1)/Block_Sizez, (nxpad-2*npml-1+Block_Sizex-1)/Block_Sizex);
dimGridvzb = dim3((nzpad-2*npml-1+Block_Sizez-1)/Block_Sizez,(nxpad-2*npml+Block_Sizex-1)/Block_Sizex);
dimGridvxlr = dim3((nzpad+Block_Sizez-1)/Block_Sizez,2);
dimGridvztb = dim3(2,(nxpad+Block_Sizex-1)/Block_Sizex);
dimGridplr = dim3((nzpad-2*npml+Block_Sizez-1)/Block_Sizez,2);
dimGridptb = dim3(2,(nxpad-2*npml+Block_Sizex-1)/Block_Sizex);
dimGridpcooner = dim3(2,2);
dimGridrecord = dim3((nt+Block_Sizez-1)/Block_Sizez, (nxpad-2*npml-noffset+Block_Sizex-1)/Block_Sizex);
dimGridpmllr = dim3((nzpad-2*npml+Block_Sizez-1)/Block_Sizez,2);
dimGridpmltb = dim3(2,(nxpad-2*npml+Block_Sizex-1)/Block_Sizex);
//
record = (float *)malloc(nt*(nxlength-noffset)*sizeof(float));
temp = (float *)malloc(nz*nxlength*sizeof(float));
vp = (float *)malloc(nzpad*nxpad*sizeof(float));
rho= (float *)malloc(nzpad*nxpad*sizeof(float));
p = (float *)malloc(nzpad*nxpad*sizeof(float));
vxspmllr = (float *)malloc(N*(nzpad-2*npml)*nt*sizeof(float));
vzspmltb = (float *)malloc(N*(nxpad-2*npml)*nt*sizeof(float));
pspmllr = (float *)malloc(N*(nzpad-2*npml)*nt*sizeof(float));
pspmltb = (float *)malloc(N*(nxpad-2*npml)*nt*sizeof(float));
tr = (float *)malloc(nt0*sizeof(float));
memset(record, 0, nt*(nxlength-noffset)*sizeof(float));
memset(temp, 0, nz*nxlength*sizeof(float));
memset(vp, 0, nxpad*nzpad*sizeof(float));
memset(rho, 0, nxpad*nzpad*sizeof(float));
memset(p, 0, nxpad*nzpad*sizeof(float));
memset(vxspmllr,0, N*(nzpad-2*npml)*nt*sizeof(float));
memset(vzspmltb,0, N*(nxpad-2*npml)*nt*sizeof(float));
memset(pspmllr, 0, N*(nzpad-2*npml)*nt*sizeof(float));
memset(pspmltb, 0, N*(nxpad-2*npml)*nt*sizeof(float));
//===============================================================================
extractvel1(temp, vp0, nx, nz, nx1, nx2);
extendvel1(vp, temp, nxlength, nz, npml);
extractrho1(temp, rho0, nx, nz, nx1, nx2);
extendvel1(rho, temp, nxlength, nz, npml);
free(temp);
// pml layers smooth
if (medium_flag){
pmlvelsmooth1d(vp, nxpad, nzpad, npml);
pmlvelsmooth1d(rho, nxpad, nzpad, npml);}
vpmax = sqrtf(Maxval1(vp, nzpad*nxpad));
// alloc device memory
hipMalloc(&d_diffcoef, (N/2)*(N/2)*sizeof(float));
hipMalloc(&d_record, (nxlength-noffset)*nt*sizeof(float));
hipMalloc(&d_source, nxz*sizeof(float));
hipMalloc(&d_vp, nxz*sizeof(float));
hipMalloc(&d_rho, nxz*sizeof(float));
hipMalloc(&d_p, nxz*sizeof(float));
hipMalloc(&d_p_pre, nxz*sizeof(float));
hipMalloc(&d_vx, (nxpad-1)*nzpad*sizeof(float));
hipMalloc(&d_vz, nxpad*(nzpad-1)*sizeof(float));
hipMalloc(&d_ps, nxz*sizeof(float));
hipMalloc(&d_ps_pre, nxz*sizeof(float));
hipMalloc(&d_vxs, (nxpad-1)*nzpad*sizeof(float));
hipMalloc(&d_vzs, nxpad*(nzpad-1)*sizeof(float));
hipMalloc(&d_pl1, npml*nzpad*sizeof(float));
hipMalloc(&d_pl2, npml*nzpad*sizeof(float));
hipMalloc(&d_pr1, npml*nzpad*sizeof(float));
hipMalloc(&d_pr2, npml*nzpad*sizeof(float));
hipMalloc(&d_pt1, npml*(nxpad-2*npml)*sizeof(float));
hipMalloc(&d_pt2, npml*(nxpad-2*npml)*sizeof(float));
hipMalloc(&d_pb1, npml*(nxpad-2*npml)*sizeof(float));
hipMalloc(&d_pb2, npml*(nxpad-2*npml)*sizeof(float));
hipMalloc(&d_ddx, nxpad*sizeof(float));
hipMalloc(&d_ddz, nzpad*sizeof(float));
hipMalloc(&d_ddxVx, (nxpad-1)*sizeof(float));
hipMalloc(&d_ddzVz, (nzpad-1)*sizeof(float));
hipMalloc(&d_norder, nxz*sizeof(int));
hipMalloc(&d_norderx, (nxpad-1)*sizeof(int));
hipMalloc(&d_norderz, (nzpad-1)*sizeof(int));
hipMalloc(&d_vxspmllr, N*(nzpad-2*npml)*sizeof(float));
hipMalloc(&d_vzspmltb, N*(nxpad-2*npml)*sizeof(float));
hipMalloc(&d_pspmllr, N*(nzpad-2*npml)*sizeof(float));
hipMalloc(&d_pspmltb, N*(nxpad-2*npml)*sizeof(float));
hipMalloc(&d_g1, (nxpad-2*npml)*(nzpad-2*npml)*sizeof(float));
hipMalloc(&d_g2, (nxpad-2*npml)*(nzpad-2*npml)*sizeof(float));
hipMalloc(&d_g3, (nxpad-2*npml)*(nzpad-2*npml)*sizeof(float));
hipMalloc(&d_g3_true, (nxpad-2*npml)*(nzpad-2*npml)*sizeof(float));
hipMalloc(&d_g4, (nxpad-2*npml)*(nzpad-2*npml)*sizeof(float));
hipMalloc(&d_g2ud, (nxpad-2*npml)*(nzpad-2*npml)*sizeof(float));
hipMalloc(&d_g2du, (nxpad-2*npml)*(nzpad-2*npml)*sizeof(float));
hipMalloc(&d_g2lr, (nxpad-2*npml)*(nzpad-2*npml)*sizeof(float));
hipMalloc(&d_g2rl, (nxpad-2*npml)*(nzpad-2*npml)*sizeof(float));
hipMalloc(&d_g31, (nxpad-2*npml)*(nzpad-2*npml)*61*sizeof(float));
hipMalloc(&d_g32, (nxpad-2*npml)*(nzpad-2*npml)*61*sizeof(float));
hipMalloc(&d_illum, (nxpad-2*npml)*(nzpad-2*npml)*sizeof(float));
hipMalloc(&d_taper, (nxpad-2*npml)*(nzpad-2*npml)*sizeof(float));
hipMemcpy(d_diffcoef, diffcoef, (N/2)*(N/2)*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_vp, vp, nxz*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_rho, rho, nxz*sizeof(float), hipMemcpyHostToDevice);
hipMemset(d_record, 0, (nxlength-noffset)*nt*sizeof(float));
hipMemset(d_p, 0, nxpad*nzpad*sizeof(float));
hipMemset(d_vx, 0, (nxpad-1)*nzpad*sizeof(float));
hipMemset(d_vz, 0, nxpad*(nzpad-1)*sizeof(float));
hipMemset(d_p_pre, 0, nxpad*nzpad*sizeof(float));
hipMemset(d_ps, 0, nxpad*nzpad*sizeof(float));
hipMemset(d_vxs, 0, (nxpad-1)*nzpad*sizeof(float));
hipMemset(d_vzs, 0, nxpad*(nzpad-1)*sizeof(float));
hipMemset(d_ps_pre, 0, nxpad*nzpad*sizeof(float));
hipMemset(d_pl1, 0, npml*nzpad*sizeof(float));
hipMemset(d_pl2, 0, npml*nzpad*sizeof(float));
hipMemset(d_pr1, 0, npml*nzpad*sizeof(float));
hipMemset(d_pr2, 0, npml*nzpad*sizeof(float));
hipMemset(d_pt1, 0, npml*(nxpad-2*npml)*sizeof(float));
hipMemset(d_pt2, 0, npml*(nxpad-2*npml)*sizeof(float));
hipMemset(d_pb1, 0, npml*(nxpad-2*npml)*sizeof(float));
hipMemset(d_pb2, 0, npml*(nxpad-2*npml)*sizeof(float));
hipMemset(d_ddx, 0, nxpad*sizeof(float));
hipMemset(d_ddz, 0, nzpad*sizeof(float));
hipMemset(d_ddxVx, 0, (nxpad-1)*sizeof(float));
hipMemset(d_ddzVz, 0, (nzpad-1)*sizeof(float));
hipMemset(d_norder, 0, nxpad*nzpad*sizeof(int));
hipMemset(d_norderx,0, (nxpad-1)*sizeof(int));
hipMemset(d_norderz,0, (nzpad-1)*sizeof(int));
hipMemset(d_vxspmllr,0, N*(nzpad-2*npml)*sizeof(float));
hipMemset(d_vzspmltb,0, N*(nxpad-2*npml)*sizeof(float));
hipMemset(d_pspmllr, 0, N*(nzpad-2*npml)*sizeof(float));
hipMemset(d_pspmltb, 0, N*(nxpad-2*npml)*sizeof(float));
hipMemset(d_g1, 0, (nxpad-2*npml)*(nzpad-2*npml)*sizeof(float));
hipMemset(d_g2, 0, (nxpad-2*npml)*(nzpad-2*npml)*sizeof(float));
hipMemset(d_g3, 0, (nxpad-2*npml)*(nzpad-2*npml)*sizeof(float));
hipMemset(d_g3_true, 0, (nxpad-2*npml)*(nzpad-2*npml)*sizeof(float));
hipMemset(d_g4, 0, (nxpad-2*npml)*(nzpad-2*npml)*sizeof(float));
hipMemset(d_g2ud, 0, (nxpad-2*npml)*(nzpad-2*npml)*sizeof(float));
hipMemset(d_g2du, 0, (nxpad-2*npml)*(nzpad-2*npml)*sizeof(float));
hipMemset(d_g2lr, 0, (nxpad-2*npml)*(nzpad-2*npml)*sizeof(float));
hipMemset(d_g2rl, 0, (nxpad-2*npml)*(nzpad-2*npml)*sizeof(float));
hipMemset(d_g31, 0, (nxpad-2*npml)*(nzpad-2*npml)*61*sizeof(float));
hipMemset(d_g32, 0, (nxpad-2*npml)*(nzpad-2*npml)*61*sizeof(float));
hipMemset(d_illum, 0, (nxpad-2*npml)*(nzpad-2*npml)*sizeof(float));
hipMemset(d_taper, 0, (nxpad-2*npml)*(nzpad-2*npml)*sizeof(float));
hipLaunchKernelGGL(( cuda_source), dim3(dimGrid),dim3(dimBlock), 0, 0, d_source, nsx, nsz, nxpad, nzpad, amp, alp, dx2, dz2);
hipLaunchKernelGGL(( cuda_pmlCoeffpx), dim3((nxpad+127)/128),dim3(128), 0, 0, d_ddx, vpmax, dx, npml, nxpad);
hipLaunchKernelGGL(( cuda_pmlCoeffpz), dim3((nzpad+127)/128),dim3(128), 0, 0, d_ddz, vpmax, dz, npml, nzpad);
hipLaunchKernelGGL(( cuda_pmlCoeffvx), dim3((nxpad+126)/128),dim3(128), 0, 0, d_ddxVx, vpmax, dx, npml, nxpad-1);
hipLaunchKernelGGL(( cuda_pmlCoeffvz), dim3((nzpad+126)/128),dim3(128), 0, 0, d_ddzVz, vpmax, dz, npml, nzpad-1);
hipLaunchKernelGGL(( cuda_norder), dim3(dimGrid),dim3(dimBlock), 0, 0, d_norder, nxpad, nzpad);
hipLaunchKernelGGL(( cuda_norderx), dim3((nxpad+126)/128),dim3(128), 0, 0, d_norderx, nxpad-1);
hipLaunchKernelGGL(( cuda_norderz), dim3((nzpad+126)/128),dim3(128), 0, 0, d_norderz, nzpad-1);
while (2*nwin >= nxlength)
nwin -= 3;
hipLaunchKernelGGL(( cuda_taper_calculate), dim3(dimGridp),dim3(dimBlock), 0, 0, d_taper, nxpad-2*npml, nzpad-2*npml, nwin, alpha);
printf("N0. %d shot source wavefield calculating......\n",ishot);
// calculate source wavefiled to save pml layer
for (it=0; it<nt; it++)
{
if (it%ntsnap == 0){
hipMemcpy(p, d_p, nxz*sizeof(float), hipMemcpyDeviceToHost);
printf("source-the current shot: %d\ttime: %f s; wavefield: %.5e\n",ishot,it*dt, absMaxval1(p, nxz));}
hipLaunchKernelGGL(( cuda_forward_vx), dim3(dimGridvx),dim3(dimBlock), 0, 0, d_p, d_vx, d_rho, d_diffcoef, _dtx, npml, nxpad, nzpad);
hipLaunchKernelGGL(( cuda_forward_vz), dim3(dimGridvz),dim3(dimBlock), 0, 0, d_p, d_vz, d_rho, d_diffcoef, _dtz, npml, nxpad, nzpad);
hipLaunchKernelGGL(( cuda_pml_vxlr), dim3(dimGridvxlr),dim3(dimBlock), 0, 0, d_p, d_vx, d_rho, d_diffcoef, d_ddxVx, _dtx, dt, npml, nxpad, nzpad, d_norderx);
hipLaunchKernelGGL(( cuda_pml_vztb), dim3(dimGridvztb),dim3(dimBlock), 0, 0, d_p, d_vz, d_rho, d_diffcoef, d_ddzVz, _dtz, dt, npml, nxpad, nzpad, d_norderz);
hipLaunchKernelGGL(( cuda_forward_p), dim3(dimGridp),dim3(dimBlock), 0, 0, d_p, d_vx, d_vz, d_rho, d_vp, d_diffcoef, _dtx, _dtz, npml, nxpad, nzpad);
hipLaunchKernelGGL(( cuda_pml_plr), dim3(dimGridplr),dim3(dimBlock), 0, 0, d_p, d_vx, d_vz, d_pl1, d_pl2, d_pr1, d_pr2, d_rho, d_vp, d_diffcoef, d_ddx, d_ddz,_dtx, _dtz, dt, npml, nxpad, nzpad, d_norder);
hipLaunchKernelGGL(( cuda_pml_ptb), dim3(dimGridptb),dim3(dimBlock), 0, 0, d_p, d_vx, d_vz, d_pt1, d_pt2, d_pb1, d_pb2, d_rho, d_vp, d_diffcoef, d_ddz, _dtx, _dtz, dt, npml, nxpad, nzpad, d_norder);
hipLaunchKernelGGL(( cuda_pml_pconner), dim3(dimGridpcooner),dim3(dimBlock), 0, 0, d_p, d_vx, d_vz, d_pl1, d_pl2, d_pr1, d_pr2, d_rho, d_vp, d_diffcoef, d_ddx, d_ddz,_dtx, _dtz, dt, npml, nxpad, nzpad, d_norder);
hipLaunchKernelGGL(( cuda_add_source), dim3(dimGrid),dim3(dimBlock), 0, 0, d_p, d_source, d_wavelet, dt, 1, nxpad, nzpad, it);
hipLaunchKernelGGL(( save_d_vxpml), dim3(dimGridpmllr),dim3(dimBlockpmllr), 0, 0, d_vx, d_vxspmllr, nxpad, nzpad, npml);
hipLaunchKernelGGL(( save_d_vzpml), dim3(dimGridpmltb),dim3(dimBlockpmltb), 0, 0, d_vz, d_vzspmltb, nxpad, nzpad, npml);
hipLaunchKernelGGL(( save_d_ppmllr), dim3(dimGridpmllr),dim3(dimBlockpmllr), 0, 0, d_p, d_pspmllr, nxpad, nzpad, npml);
hipLaunchKernelGGL(( save_d_ppmltb), dim3(dimGridpmltb),dim3(dimBlockpmltb), 0, 0, d_p, d_pspmltb, nxpad, nzpad, npml);
hipMemcpy(&vxspmllr[it*N*(nzpad-2*npml)], d_vxspmllr, N*(nzpad-2*npml)*sizeof(float), hipMemcpyDeviceToHost);
hipMemcpy(&vzspmltb[it*N*(nxpad-2*npml)], d_vzspmltb, N*(nxpad-2*npml)*sizeof(float), hipMemcpyDeviceToHost);
hipMemcpy(&pspmllr[it*N*(nzpad-2*npml)], d_pspmllr, N*(nzpad-2*npml)*sizeof(float), hipMemcpyDeviceToHost);
hipMemcpy(&pspmltb[it*N*(nxpad-2*npml)], d_pspmltb, N*(nxpad-2*npml)*sizeof(float), hipMemcpyDeviceToHost);
}
// initial source wavefiled
// save last snap used to reconstruction source wavefield
hipMemcpy(d_ps, d_p, nxz*sizeof(float), hipMemcpyDeviceToDevice);
// initial receiver wavefield
hipMemset(d_p, 0, nxpad*nzpad*sizeof(float));
hipMemset(d_vx, 0, (nxpad-1)*nzpad*sizeof(float));
hipMemset(d_vz, 0, nxpad*(nzpad-1)*sizeof(float));
hipMemset(d_pl1, 0, npml*nzpad*sizeof(float));
hipMemset(d_pl2, 0, npml*nzpad*sizeof(float));
hipMemset(d_pr1, 0, npml*nzpad*sizeof(float));
hipMemset(d_pr2, 0, npml*nzpad*sizeof(float));
hipMemset(d_pt1, 0, npml*(nxpad-2*npml)*sizeof(float));
hipMemset(d_pt2, 0, npml*(nxpad-2*npml)*sizeof(float));
hipMemset(d_pb1, 0, npml*(nxpad-2*npml)*sizeof(float));
hipMemset(d_pb2, 0, npml*(nxpad-2*npml)*sizeof(float));
hipMemset(d_vxspmllr,0, N*(nzpad-2*npml)*sizeof(float));
hipMemset(d_vzspmltb,0, N*(nxpad-2*npml)*sizeof(float));
hipMemset(d_pspmllr, 0, N*(nzpad-2*npml)*sizeof(float));
hipMemset(d_pspmltb, 0, N*(nxpad-2*npml)*sizeof(float));
if (ismth)
{
velsmooth1d(vp, nzpad, nxpad, ismth);
velsmooth1d(rho, nzpad, nxpad, ismth);
hipMemcpy(d_vp, vp, nxz*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_rho, rho, nxz*sizeof(float), hipMemcpyHostToDevice);
}
//===============================================================================
// prepare seismic profile
seisobs=Creat2dArray(nt0,nxlength0-noffset0);
seiscal=Creat2dArray(nt, nxlength-noffset);
sprintf(buffrecord,"./seisReal/%s",filename_shotgather);
offset_trace = (shotprofileId[2][ishot-1] - 1)*(240 + nt0*sizeof(float)) + 3600*sgyflag;
fp = fopen(buffrecord,"rb");
fseek(fp,offset_trace,0);
for (ix=0; ix<shotprofileId[1][ishot-1]; ix++)
{
fseek(fp,240L,1);
fread(tr,sizeof(float),nt0,fp);
float_to_float(tr, nt0, endianflag);
for (it=0; it<nt0; it++)
seisobs[it][ix] = tr[it];
}
fclose(fp);
Interpseis2d(seiscal,seisobs,nxlength-noffset,nxlength0-noffset0,nt,nt0,dt,dt0);
for (it=0;it<nt;it++)
for (ix=0;ix<nxlength-noffset;ix++)
record[it*(nxlength-noffset)+ix] = seiscal[it][ix];
free2dArray(seiscal, nt, nxlength-noffset);
free2dArray(seisobs, nt0, nxlength0-noffset0);
hipMemcpy(d_record, record, (nxlength-noffset)*nt*sizeof(float), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( cuda_mute2), dim3(dimGridrecord),dim3(dimBlock), 0, 0, d_record, d_vp, nsx, nsz, nt, npml, nxlength, noffset, nw, tlength, fdom, dx2, dz2, _dt);
// implement RTM
// insert seismic record for the last time slice
hipLaunchKernelGGL(( cuda_insert_record2), dim3((nxlength-noffset+127)/128),dim3(128), 0, 0, d_p, &d_record[(nt-1)*(nxlength - noffset)], npml, nxlength, noffset, dt);
// backforward record
printf("source wavefiled prepared over...\nBegin backward......\n");
for (it=nt-2; it>=0; it--)
{
if (it%ntsnap == 0){
hipMemcpy(p, d_p, nxz*sizeof(float), hipMemcpyDeviceToHost);
printf("backward-the current shot: %d\ttime: %f s; wavefield: %.5e\n",ishot, it*dt, absMaxval1(p, nxz));}
hipMemcpy(d_ps_pre, d_ps, nxz*sizeof(float), hipMemcpyDeviceToDevice);
hipMemcpy(d_p_pre, d_p, nxz*sizeof(float), hipMemcpyDeviceToDevice);
// source wavefield 1: read vx vz pml; 2: calculate inner vx vz; 3: read p pml; 4; calculate inner p
// 1 -- 2
hipMemcpy(d_vxspmllr, &vxspmllr[(it+1)*N*(nzpad-2*npml)], N*(nzpad-2*npml)*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_vzspmltb, &vzspmltb[(it+1)*N*(nxpad-2*npml)], N*(nxpad-2*npml)*sizeof(float), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( read_d_vxpml), dim3(dimGridpmllr),dim3(dimBlockpmllr), 0, 0, d_vxs, d_vxspmllr, nxpad, nzpad, npml);
hipLaunchKernelGGL(( read_d_vzpml), dim3(dimGridpmltb),dim3(dimBlockpmltb), 0, 0, d_vzs, d_vzspmltb, nxpad, nzpad, npml);
hipLaunchKernelGGL(( cuda_backward_vx), dim3(dimGridvxb),dim3(dimBlock), 0, 0, d_ps, d_vxs, d_rho, d_diffcoef, _dtx, npml, nxpad, nzpad);
hipLaunchKernelGGL(( cuda_backward_vz), dim3(dimGridvzb),dim3(dimBlock), 0, 0, d_ps, d_vzs, d_rho, d_diffcoef, _dtz, npml, nxpad, nzpad);
// 3 -- 4
hipMemcpy(d_pspmllr, &pspmllr[it*N*(nzpad-2*npml)], N*(nzpad-2*npml)*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_pspmltb, &pspmltb[it*N*(nxpad-2*npml)], N*(nxpad-2*npml)*sizeof(float), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( read_d_ppmllr), dim3(dimGridpmllr),dim3(dimBlockpmllr), 0, 0, d_ps, d_pspmllr, nxpad, nzpad, npml);
hipLaunchKernelGGL(( read_d_ppmltb), dim3(dimGridpmltb),dim3(dimBlockpmltb), 0, 0, d_ps, d_pspmltb, nxpad, nzpad, npml);
hipLaunchKernelGGL(( cuda_backward_p), dim3(dimGridp),dim3(dimBlock), 0, 0, d_ps, d_vxs, d_vzs, d_rho, d_vp, d_diffcoef, _dtx, _dtz, npml, nxpad, nzpad);
// insert source
hipLaunchKernelGGL(( cuda_add_source), dim3(dimGrid),dim3(dimBlock), 0, 0, d_ps, d_source, d_wavelet, dt, 2, nxpad, nzpad, it);
// receiver wavefield
hipLaunchKernelGGL(( cuda_forward_vx), dim3(dimGridvx),dim3(dimBlock), 0, 0, d_p, d_vx, d_rho, d_diffcoef, _dtx, npml, nxpad, nzpad);
hipLaunchKernelGGL(( cuda_forward_vz), dim3(dimGridvz),dim3(dimBlock), 0, 0, d_p, d_vz, d_rho, d_diffcoef, _dtz, npml, nxpad, nzpad);
hipLaunchKernelGGL(( cuda_pml_vxlr), dim3(dimGridvxlr),dim3(dimBlock), 0, 0, d_p, d_vx, d_rho, d_diffcoef, d_ddxVx, _dtx, dt, npml, nxpad, nzpad, d_norderx);
hipLaunchKernelGGL(( cuda_pml_vztb), dim3(dimGridvztb),dim3(dimBlock), 0, 0, d_p, d_vz, d_rho, d_diffcoef, d_ddzVz, _dtz, dt, npml, nxpad, nzpad, d_norderz);
hipLaunchKernelGGL(( cuda_forward_p), dim3(dimGridp),dim3(dimBlock), 0, 0, d_p, d_vx, d_vz, d_rho, d_vp, d_diffcoef, _dtx, _dtz, npml, nxpad, nzpad);
hipLaunchKernelGGL(( cuda_pml_plr), dim3(dimGridplr),dim3(dimBlock), 0, 0, d_p, d_vx, d_vz, d_pl1, d_pl2, d_pr1, d_pr2, d_rho, d_vp, d_diffcoef, d_ddx, d_ddz,_dtx, _dtz, dt, npml, nxpad, nzpad, d_norder);
hipLaunchKernelGGL(( cuda_pml_ptb), dim3(dimGridptb),dim3(dimBlock), 0, 0, d_p, d_vx, d_vz, d_pt1, d_pt2, d_pb1, d_pb2, d_rho, d_vp, d_diffcoef, d_ddz, _dtx, _dtz, dt, npml, nxpad, nzpad, d_norder);
hipLaunchKernelGGL(( cuda_pml_pconner), dim3(dimGridpcooner),dim3(dimBlock), 0, 0, d_p, d_vx, d_vz, d_pl1, d_pl2, d_pr1, d_pr2, d_rho, d_vp, d_diffcoef, d_ddx, d_ddz,_dtx, _dtz, dt, npml, nxpad, nzpad, d_norder);
// insert source
hipLaunchKernelGGL(( cuda_insert_record2), dim3((nxlength-noffset+127)/128),dim3(128), 0, 0, d_p, &d_record[it*(nxlength-noffset)], npml, nxlength, noffset, dt);
// imaging condition:
hipLaunchKernelGGL(( cuda_cross_coorelation), dim3(dimGridp),dim3(dimBlock), 0, 0, d_ps,d_p,d_g1,d_illum,nxpad,nzpad,npml);
hipLaunchKernelGGL(( cuda_wavefield_decomposition), dim3(dimGridp),dim3(dimBlock), 0, 0, d_ps,d_vxs,d_vzs,d_p,d_vx,d_vz,d_g2ud,d_g2du,d_g2lr,d_g2rl,nxpad,nzpad,npml);
hipLaunchKernelGGL(( cuda_poynting), dim3(dimGridp),dim3(dimBlock), 0, 0, d_ps,d_vxs,d_vzs,d_p,d_vx,d_vz,d_vp,d_g31,d_g32,nxpad,nzpad,npml);
hipLaunchKernelGGL(( cuda_energynorm), dim3(dimGridp),dim3(dimBlock), 0, 0, d_ps,d_ps_pre,d_p,d_p_pre,d_vp,d_g4,dx,dz,dt,nxpad,nzpad,npml);
}
// abtain g2 and g3
hipLaunchKernelGGL(( cuda_stack_udlr), dim3(dimGridp),dim3(dimBlock), 0, 0, d_g2ud,d_g2du,d_g2lr,d_g2rl,d_g2,nxlength,nz);
hipLaunchKernelGGL(( cuda_stack_theta), dim3(dimGridp),dim3(dimBlock), 0, 0, d_g31,d_g3,nxlength,nz);
hipLaunchKernelGGL(( cuda_stack_theta), dim3(dimGridp),dim3(dimBlock), 0, 0, d_g32,d_g3_true,nxlength,nz);
// taper image
hipLaunchKernelGGL(( cuda_taper_image), dim3(dimGridp),dim3(dimBlock), 0, 0, d_g1, d_taper, nxlength, nz);
hipLaunchKernelGGL(( cuda_taper_image), dim3(dimGridp),dim3(dimBlock), 0, 0, d_g2, d_taper, nxlength, nz);
hipLaunchKernelGGL(( cuda_taper_image), dim3(dimGridp),dim3(dimBlock), 0, 0, d_g3, d_taper, nxlength, nz);
hipLaunchKernelGGL(( cuda_taper_image), dim3(dimGridp),dim3(dimBlock), 0, 0, d_g4, d_taper, nxlength, nz);
hipLaunchKernelGGL(( cuda_taper_image), dim3(dimGridp),dim3(dimBlock), 0, 0, d_g3_true, d_taper, nxlength, nz);
hipLaunchKernelGGL(( cuda_taper_image), dim3(dimGridp),dim3(dimBlock), 0, 0, d_illum, d_taper, nxlength, nz);
// single-shot normalized
hipLaunchKernelGGL(( cuda_applyics), dim3(dimGridp),dim3(dimBlock), 0, 0, d_image2s,d_g1,d_illum,nx,nxlength,nx1,nz);
hipLaunchKernelGGL(( cuda_applyics), dim3(dimGridp),dim3(dimBlock), 0, 0, d_image3s,d_g2,d_illum,nx,nxlength,nx1,nz);
hipLaunchKernelGGL(( cuda_applyics), dim3(dimGridp),dim3(dimBlock), 0, 0, d_image4s,d_g3,d_illum,nx,nxlength,nx1,nz);
hipLaunchKernelGGL(( cuda_applyics), dim3(dimGridp),dim3(dimBlock), 0, 0, d_image5s,d_g4,d_illum,nx,nxlength,nx1,nz);
// multi-shot normalized
hipLaunchKernelGGL(( cuda_applyic), dim3(dimGridp),dim3(dimBlock), 0, 0, d_image1, d_g1,nx,nxlength,nx1,nz);
hipLaunchKernelGGL(( cuda_applyic), dim3(dimGridp),dim3(dimBlock), 0, 0, d_image2m, d_g1,nx,nxlength,nx1,nz);
hipLaunchKernelGGL(( cuda_applyic), dim3(dimGridp),dim3(dimBlock), 0, 0, d_image3m, d_g2,nx,nxlength,nx1,nz);
hipLaunchKernelGGL(( cuda_applyic), dim3(dimGridp),dim3(dimBlock), 0, 0, d_image4m, d_g3,nx,nxlength,nx1,nz);
hipLaunchKernelGGL(( cuda_applyic), dim3(dimGridp),dim3(dimBlock), 0, 0, d_image5m, d_g4,nx,nxlength,nx1,nz);
hipLaunchKernelGGL(( cuda_applyic), dim3(dimGridp),dim3(dimBlock), 0, 0, d_imagetrue, d_g3_true,nx,nxlength,nx1,nz);
hipLaunchKernelGGL(( cuda_applyic), dim3(dimGridp),dim3(dimBlock), 0, 0, d_Illum, d_illum,nx,nxlength,nx1,nz);
// output temp image
if ((ishot-1)%50 == 0)
{
// single-shot normalized
// Image2s
hipMemcpy(image2s, d_image2s, nx*nz*sizeof(float), hipMemcpyDeviceToHost);
sprintf(buffrecord,"./output/Image2stemp.su");
Output1d(image2s, nz, nx, dx, buffrecord, 1);
// Image3s
hipMemcpy(image3s, d_image3s, nx*nz*sizeof(float), hipMemcpyDeviceToHost);
sprintf(buffrecord,"./output/Image3stemp.su");
Output1d(image3s, nz, nx, dx, buffrecord, 1);
// Image4s
hipMemcpy(image4s, d_image4s, nx*nz*sizeof(float), hipMemcpyDeviceToHost);
sprintf(buffrecord,"./output/Image4stemp.su");
Output1d(image4s, nz, nx, dx, buffrecord, 1);
// Image5s
hipMemcpy(image5s, d_image5s, nx*nz*sizeof(float), hipMemcpyDeviceToHost);
sprintf(buffrecord,"./output/Image5stemp.su");
Output1d(image5s, nz, nx, dx, buffrecord, 1);
// multishot normlized
// Image1
hipMemcpy(image1, d_image1, nx*nz*sizeof(float), hipMemcpyDeviceToHost);
sprintf(buffrecord,"./output/Image1temp.su");
Output1d(image1, nz, nx, dx, buffrecord, 1);
// Illum
hipMemcpy(illum, d_Illum, nx*nz*sizeof(float), hipMemcpyDeviceToHost);
sprintf(buffrecord,"./output/Illumtemp.su");
Output1d(illum, nz, nx, dx, buffrecord, 1);
// Image2m
hipMemcpy(image2m, d_image2m, nx*nz*sizeof(float), hipMemcpyDeviceToHost);
sprintf(buffrecord,"./output/Image2mtemp.su");
Output1d(image2m, nz, nx, dx, buffrecord, 1);
// Image3m
hipMemcpy(image3m, d_image3m, nx*nz*sizeof(float), hipMemcpyDeviceToHost);
sprintf(buffrecord,"./output/Image3mtemp.su");
Output1d(image3m, nz, nx, dx, buffrecord, 1);
// Image4m
hipMemcpy(image4m, d_image4m, nx*nz*sizeof(float), hipMemcpyDeviceToHost);
sprintf(buffrecord,"./output/Image4mtemp.su");
Output1d(image4m, nz, nx, dx, buffrecord, 1);
// Image5m
hipMemcpy(image5m, d_image5m, nx*nz*sizeof(float), hipMemcpyDeviceToHost);
sprintf(buffrecord,"./output/Image5mtemp.su");
Output1d(image5m, nz, nx, dx, buffrecord, 1);
// Imagetrue
hipMemcpy(imagetrue, d_imagetrue, nx*nz*sizeof(float), hipMemcpyDeviceToHost);
sprintf(buffrecord,"./output/Imagetruetemp.su");
Output1d(imagetrue, nz, nx, dx, buffrecord, 1);
}
free(record);
free(vp);
free(rho);
free(vxspmllr);
free(vzspmltb);
free(pspmllr);
free(pspmltb);
free(p);
free(tr);
hipFree(d_diffcoef);hipFree(d_record);hipFree(d_source);
hipFree(d_vp);hipFree(d_rho);
hipFree(d_p); hipFree(d_vx); hipFree(d_vz); hipFree(d_p_pre);
hipFree(d_ps);hipFree(d_vxs);hipFree(d_vzs);hipFree(d_ps_pre);
hipFree(d_pl1);hipFree(d_pl2);hipFree(d_pr1);hipFree(d_pr2);
hipFree(d_pt1);hipFree(d_pt2);hipFree(d_pb1);hipFree(d_pb2);
hipFree(d_ddx);hipFree(d_ddz);hipFree(d_ddxVx);hipFree(d_ddzVz);
hipFree(d_norder);hipFree(d_norderx);hipFree(d_norderz);
hipFree(d_vxspmllr);hipFree(d_vzspmltb);hipFree(d_pspmllr);hipFree(d_pspmltb);
hipFree(d_g1); hipFree(d_g2); hipFree(d_g3); hipFree(d_g4);
hipFree(d_g2ud);hipFree(d_g2du);hipFree(d_g2lr);hipFree(d_g2rl);
hipFree(d_g31); hipFree(d_g32); hipFree(d_g3_true);
hipFree(d_illum);
hipFree(d_taper);
hipEventRecord(stop);
hipEventSynchronize(stop);
hipEventElapsedTime(&mstimer, start, stop);
printf("%d shot finished: %g (s)\n",ishot, mstimer*1.e-3);
}
hipMemcpy(imagetrue, d_imagetrue, nx*nz*sizeof(float), hipMemcpyDeviceToHost);
sprintf(buffrecord,"./output/%dImagetrue.su",idevice);
Output1d(imagetrue, nz, nx, dx, buffrecord, 1);
hipMemcpy(image2s, d_image2s, nx*nz*sizeof(float), hipMemcpyDeviceToHost);
hipMemcpy(image3s, d_image3s, nx*nz*sizeof(float), hipMemcpyDeviceToHost);
hipMemcpy(image4s, d_image4s, nx*nz*sizeof(float), hipMemcpyDeviceToHost);
hipMemcpy(image5s, d_image5s, nx*nz*sizeof(float), hipMemcpyDeviceToHost);
hipMemcpy(illum, d_Illum, nx*nz*sizeof(float), hipMemcpyDeviceToHost);
hipMemcpy(image1, d_image1, nx*nz*sizeof(float), hipMemcpyDeviceToHost);
hipMemcpy(image2m, d_image2m, nx*nz*sizeof(float), hipMemcpyDeviceToHost);
hipMemcpy(image3m, d_image3m, nx*nz*sizeof(float), hipMemcpyDeviceToHost);
hipMemcpy(image4m, d_image4m, nx*nz*sizeof(float), hipMemcpyDeviceToHost);
hipMemcpy(image5m, d_image5m, nx*nz*sizeof(float), hipMemcpyDeviceToHost);
hipFree(d_image1);
hipFree(d_image2s);hipFree(d_image3s);hipFree(d_image4s);hipFree(d_image5s);
hipFree(d_image2m);hipFree(d_image3m);hipFree(d_image4m);hipFree(d_image5m);
hipFree(d_Illum);
hipFree(d_imagetrue);
free(imagetrue);
}
| bbc2e92450d0761cc4670ecb5a268775e7205d70.cu | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <time.h>
#include "function.h"
#include "sgyhead.h"
#include "common.h"
#include "kernel.cuh"
// shotprofileId[5][nshot]
// 1 行: sx shotprofileId[0][ishot]
// 2 行: ntrace shotprofileId[1][ishot]
// 3 行: itraceAll shotprofileId[2][ishot]
// 4 行: offset shotprofileId[3][ishot]
extern "C" void rtm_real(int idevice, int nshot, int startshot, int dshot, int medium_flag, int flag_layout, int sgyflag, int endianflag, int ismth,
int nx, int nz, int nt, float dx, float dz, float dt, int nx0, int nz0, int nt0, float dx0, float dz0, float dt0,
int npml, int lx, int lz, float tsnap, float fdom, float spz0, float amp, float alp, float direct, float direct0,
float *vp0, float *rho0, float *diffcoef, float *wavelet, char filename_shotgather[40], int **shotprofileId,
float *image1, float *image2s, float *image3s, float *image4s, float *image5s,
float *image2m, float *image3m, float *image4m, float *image5m, float *illum)
{
// common parameter
int nxpad,nzpad;
int ntsnap = (int)(tsnap/dt);
int nw = (int)(direct/(fdom*dt)),
tlength = (int)(direct0/dt);
int it,ix,ishot;
long long offset_trace;
FILE *fp;
// wavefield extrapolation parameter
float mstimer;
char buffrecord[40];
float vpmax;
float spx,spz;
static float dx2,dz2,_dt,_dtx,_dtz;
static int nsx,nsz,nxz;
static dim3 dimGrid,dimBlock,
dimGridp,dimGridvx,dimGridvz,dimGridvxb,dimGridvzb,
dimGridvxlr,dimGridvztb,
dimGridplr,dimGridptb,
dimGridpcooner,
dimGridrecord,
dimGridpmllr,dimGridpmltb,dimBlockpmllr,dimBlockpmltb,
dimGridfull;
// variables on host
float *vp,*rho,*temp,*record,*tr;
float *vxspmllr,*vzspmltb,*pspmllr,*pspmltb;
float *p;
// variables on device
float *d_wavelet,*d_diffcoef;
float *d_source,*d_record,*d_vp,*d_rho;
float *d_p,*d_vx,*d_vz,*d_p_pre,
*d_ps,*d_vxs,*d_vzs,*d_ps_pre;
float *d_pl1,*d_pl2,*d_pr1,*d_pr2,
*d_pt1,*d_pt2,*d_pb1,*d_pb2;
float *d_ddx,*d_ddz,*d_ddxVx,*d_ddzVz;
int *d_norder,*d_norderx,*d_norderz;
float *d_vxspmllr,*d_vzspmltb,*d_pspmllr,*d_pspmltb;
// single shot image
float *d_g1, // cross-coorelation
*d_g2,*d_g2ud,*d_g2du,*d_g2lr,*d_g2rl, // wavefiled-decomposition
*d_g3,*d_g3_true,*d_g31,*d_g32, // poynting vector d_g32 true amplitude
*d_g4; // energy norm
float *d_image1,
*d_image2s,*d_image3s,*d_image4s,*d_image5s,
*d_image2m,*d_image3m,*d_image4m,*d_image5m,
*d_imagetrue,*d_illum;
float *imagetrue,*d_Illum;
float *d_taper;
int nwin=50;
float alpha=0.06;
float **seisobs,**seiscal;
_dtx = dt/dx;
_dtz = dt/dz;
_dt = 1.0/dt;
dx2 = dx*dx;
dz2 = dz*dz;
cudaSetDevice(idevice);
check_gpu_error("Failed to initialize device");
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaMalloc(&d_wavelet, nt*sizeof(float));
cudaMemset(d_wavelet, 0, nt*sizeof(float));
cuda_ricker_wavelet<<<(nt+511)/512,512>>>(d_wavelet, fdom, dt, nt);
cudaMalloc(&d_image1, nx*nz*sizeof(float));
cudaMalloc(&d_image2s, nx*nz*sizeof(float));
cudaMalloc(&d_image3s, nx*nz*sizeof(float));
cudaMalloc(&d_image4s, nx*nz*sizeof(float));
cudaMalloc(&d_image5s, nx*nz*sizeof(float));
cudaMalloc(&d_image2m, nx*nz*sizeof(float));
cudaMalloc(&d_image3m, nx*nz*sizeof(float));
cudaMalloc(&d_image4m, nx*nz*sizeof(float));
cudaMalloc(&d_image5m, nx*nz*sizeof(float));
cudaMalloc(&d_Illum, nx*nz*sizeof(float));
cudaMalloc(&d_imagetrue, nx*nz*sizeof(float));
cudaMemset(d_image1, 0, nx*nz*sizeof(float));
cudaMemset(d_image2s, 0, nx*nz*sizeof(float));
cudaMemset(d_image3s, 0, nx*nz*sizeof(float));
cudaMemset(d_image4s, 0, nx*nz*sizeof(float));
cudaMemset(d_image5s, 0, nx*nz*sizeof(float));
cudaMemset(d_image2m, 0, nx*nz*sizeof(float));
cudaMemset(d_image3m, 0, nx*nz*sizeof(float));
cudaMemset(d_image4m, 0, nx*nz*sizeof(float));
cudaMemset(d_image5m, 0, nx*nz*sizeof(float));
cudaMemset(d_Illum, 0, nx*nz*sizeof(float));
cudaMemset(d_imagetrue, 0, nx*nz*sizeof(float));
imagetrue=(float *)malloc(nx*nz*sizeof(float));
memset(imagetrue, 0, nx*nz*sizeof(float));
for (ishot = startshot; ishot <= nshot; ishot=ishot+dshot)
{
cudaEventRecord(start);
//==============================================================================
//单边排列(排列位于右侧,最小偏移距为offset)
int noffset,nx1,nx2,nxlength;
int noffset0,nx10,nx20,nxlength0;
float offsetmax,offset;
offsetmax = (float)(shotprofileId[3][ishot-1] + (shotprofileId[1][ishot-1] - 1)*dx0);
offset = (float)(shotprofileId[3][ishot-1]);
spx = (float)(shotprofileId[0][ishot-1]);
spz = spz0;
// given
noffset0 = (int)(offset/dx0);
nx10 = (int)(spx/dx0);
nx20 = (int)((spx + offsetmax)/dx0);
nxlength0 = nx20 - nx10 + 1;
// optimal
noffset = (int)(offset/dx);
nx1 = (int)(spx/dx);
nx2 = (int)((spx + offsetmax)/dx);
nxlength = nx2 - nx1 + 1;
// optimal extend
spx = spx - nx1*dx; // local
spx = spx + npml*dx;
spz = spz + npml*dz;
nxpad = nxlength + 2*npml;
nzpad = nz + 2*npml;
nsx = (int)(spx/dx);
nsz = (int)(spz/dz);
nxz = nxpad*nzpad;
// block and thread
dimBlock = dim3(Block_Sizez, Block_Sizex);
dimBlockpmllr = dim3(Block_Sizez,N/2);
dimBlockpmltb = dim3(N/2,Block_Sizex);
dimGrid = dim3((nzpad+Block_Sizez-1)/Block_Sizez, (nxpad+Block_Sizex-1)/Block_Sizex);
dimGridp = dim3((nzpad-2*npml+Block_Sizez-1)/Block_Sizez, (nxpad-2*npml+Block_Sizex-1)/Block_Sizex);
dimGridvx = dim3((nzpad+Block_Sizez-1)/Block_Sizez, (nxpad-2*npml-1+Block_Sizex-1)/Block_Sizex);
dimGridvz = dim3((nzpad-2*npml-1+Block_Sizez-1)/Block_Sizez, (nxpad+Block_Sizex-1)/Block_Sizex);
dimGridvxb = dim3((nzpad-2*npml+Block_Sizez-1)/Block_Sizez, (nxpad-2*npml-1+Block_Sizex-1)/Block_Sizex);
dimGridvzb = dim3((nzpad-2*npml-1+Block_Sizez-1)/Block_Sizez,(nxpad-2*npml+Block_Sizex-1)/Block_Sizex);
dimGridvxlr = dim3((nzpad+Block_Sizez-1)/Block_Sizez,2);
dimGridvztb = dim3(2,(nxpad+Block_Sizex-1)/Block_Sizex);
dimGridplr = dim3((nzpad-2*npml+Block_Sizez-1)/Block_Sizez,2);
dimGridptb = dim3(2,(nxpad-2*npml+Block_Sizex-1)/Block_Sizex);
dimGridpcooner = dim3(2,2);
dimGridrecord = dim3((nt+Block_Sizez-1)/Block_Sizez, (nxpad-2*npml-noffset+Block_Sizex-1)/Block_Sizex);
dimGridpmllr = dim3((nzpad-2*npml+Block_Sizez-1)/Block_Sizez,2);
dimGridpmltb = dim3(2,(nxpad-2*npml+Block_Sizex-1)/Block_Sizex);
//模拟参数换算与准备
record = (float *)malloc(nt*(nxlength-noffset)*sizeof(float));
temp = (float *)malloc(nz*nxlength*sizeof(float));
vp = (float *)malloc(nzpad*nxpad*sizeof(float));
rho= (float *)malloc(nzpad*nxpad*sizeof(float));
p = (float *)malloc(nzpad*nxpad*sizeof(float));
vxspmllr = (float *)malloc(N*(nzpad-2*npml)*nt*sizeof(float));
vzspmltb = (float *)malloc(N*(nxpad-2*npml)*nt*sizeof(float));
pspmllr = (float *)malloc(N*(nzpad-2*npml)*nt*sizeof(float));
pspmltb = (float *)malloc(N*(nxpad-2*npml)*nt*sizeof(float));
tr = (float *)malloc(nt0*sizeof(float));
memset(record, 0, nt*(nxlength-noffset)*sizeof(float));
memset(temp, 0, nz*nxlength*sizeof(float));
memset(vp, 0, nxpad*nzpad*sizeof(float));
memset(rho, 0, nxpad*nzpad*sizeof(float));
memset(p, 0, nxpad*nzpad*sizeof(float));
memset(vxspmllr,0, N*(nzpad-2*npml)*nt*sizeof(float));
memset(vzspmltb,0, N*(nxpad-2*npml)*nt*sizeof(float));
memset(pspmllr, 0, N*(nzpad-2*npml)*nt*sizeof(float));
memset(pspmltb, 0, N*(nxpad-2*npml)*nt*sizeof(float));
//===============================================================================
extractvel1(temp, vp0, nx, nz, nx1, nx2);
extendvel1(vp, temp, nxlength, nz, npml);
extractrho1(temp, rho0, nx, nz, nx1, nx2);
extendvel1(rho, temp, nxlength, nz, npml);
free(temp);
// pml layers smooth
if (medium_flag){
pmlvelsmooth1d(vp, nxpad, nzpad, npml);
pmlvelsmooth1d(rho, nxpad, nzpad, npml);}
vpmax = sqrtf(Maxval1(vp, nzpad*nxpad));
// alloc device memory
cudaMalloc(&d_diffcoef, (N/2)*(N/2)*sizeof(float));
cudaMalloc(&d_record, (nxlength-noffset)*nt*sizeof(float));
cudaMalloc(&d_source, nxz*sizeof(float));
cudaMalloc(&d_vp, nxz*sizeof(float));
cudaMalloc(&d_rho, nxz*sizeof(float));
cudaMalloc(&d_p, nxz*sizeof(float));
cudaMalloc(&d_p_pre, nxz*sizeof(float));
cudaMalloc(&d_vx, (nxpad-1)*nzpad*sizeof(float));
cudaMalloc(&d_vz, nxpad*(nzpad-1)*sizeof(float));
cudaMalloc(&d_ps, nxz*sizeof(float));
cudaMalloc(&d_ps_pre, nxz*sizeof(float));
cudaMalloc(&d_vxs, (nxpad-1)*nzpad*sizeof(float));
cudaMalloc(&d_vzs, nxpad*(nzpad-1)*sizeof(float));
cudaMalloc(&d_pl1, npml*nzpad*sizeof(float));
cudaMalloc(&d_pl2, npml*nzpad*sizeof(float));
cudaMalloc(&d_pr1, npml*nzpad*sizeof(float));
cudaMalloc(&d_pr2, npml*nzpad*sizeof(float));
cudaMalloc(&d_pt1, npml*(nxpad-2*npml)*sizeof(float));
cudaMalloc(&d_pt2, npml*(nxpad-2*npml)*sizeof(float));
cudaMalloc(&d_pb1, npml*(nxpad-2*npml)*sizeof(float));
cudaMalloc(&d_pb2, npml*(nxpad-2*npml)*sizeof(float));
cudaMalloc(&d_ddx, nxpad*sizeof(float));
cudaMalloc(&d_ddz, nzpad*sizeof(float));
cudaMalloc(&d_ddxVx, (nxpad-1)*sizeof(float));
cudaMalloc(&d_ddzVz, (nzpad-1)*sizeof(float));
cudaMalloc(&d_norder, nxz*sizeof(int));
cudaMalloc(&d_norderx, (nxpad-1)*sizeof(int));
cudaMalloc(&d_norderz, (nzpad-1)*sizeof(int));
cudaMalloc(&d_vxspmllr, N*(nzpad-2*npml)*sizeof(float));
cudaMalloc(&d_vzspmltb, N*(nxpad-2*npml)*sizeof(float));
cudaMalloc(&d_pspmllr, N*(nzpad-2*npml)*sizeof(float));
cudaMalloc(&d_pspmltb, N*(nxpad-2*npml)*sizeof(float));
cudaMalloc(&d_g1, (nxpad-2*npml)*(nzpad-2*npml)*sizeof(float));
cudaMalloc(&d_g2, (nxpad-2*npml)*(nzpad-2*npml)*sizeof(float));
cudaMalloc(&d_g3, (nxpad-2*npml)*(nzpad-2*npml)*sizeof(float));
cudaMalloc(&d_g3_true, (nxpad-2*npml)*(nzpad-2*npml)*sizeof(float));
cudaMalloc(&d_g4, (nxpad-2*npml)*(nzpad-2*npml)*sizeof(float));
cudaMalloc(&d_g2ud, (nxpad-2*npml)*(nzpad-2*npml)*sizeof(float));
cudaMalloc(&d_g2du, (nxpad-2*npml)*(nzpad-2*npml)*sizeof(float));
cudaMalloc(&d_g2lr, (nxpad-2*npml)*(nzpad-2*npml)*sizeof(float));
cudaMalloc(&d_g2rl, (nxpad-2*npml)*(nzpad-2*npml)*sizeof(float));
cudaMalloc(&d_g31, (nxpad-2*npml)*(nzpad-2*npml)*61*sizeof(float));
cudaMalloc(&d_g32, (nxpad-2*npml)*(nzpad-2*npml)*61*sizeof(float));
cudaMalloc(&d_illum, (nxpad-2*npml)*(nzpad-2*npml)*sizeof(float));
cudaMalloc(&d_taper, (nxpad-2*npml)*(nzpad-2*npml)*sizeof(float));
cudaMemcpy(d_diffcoef, diffcoef, (N/2)*(N/2)*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_vp, vp, nxz*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_rho, rho, nxz*sizeof(float), cudaMemcpyHostToDevice);
cudaMemset(d_record, 0, (nxlength-noffset)*nt*sizeof(float));
cudaMemset(d_p, 0, nxpad*nzpad*sizeof(float));
cudaMemset(d_vx, 0, (nxpad-1)*nzpad*sizeof(float));
cudaMemset(d_vz, 0, nxpad*(nzpad-1)*sizeof(float));
cudaMemset(d_p_pre, 0, nxpad*nzpad*sizeof(float));
cudaMemset(d_ps, 0, nxpad*nzpad*sizeof(float));
cudaMemset(d_vxs, 0, (nxpad-1)*nzpad*sizeof(float));
cudaMemset(d_vzs, 0, nxpad*(nzpad-1)*sizeof(float));
cudaMemset(d_ps_pre, 0, nxpad*nzpad*sizeof(float));
cudaMemset(d_pl1, 0, npml*nzpad*sizeof(float));
cudaMemset(d_pl2, 0, npml*nzpad*sizeof(float));
cudaMemset(d_pr1, 0, npml*nzpad*sizeof(float));
cudaMemset(d_pr2, 0, npml*nzpad*sizeof(float));
cudaMemset(d_pt1, 0, npml*(nxpad-2*npml)*sizeof(float));
cudaMemset(d_pt2, 0, npml*(nxpad-2*npml)*sizeof(float));
cudaMemset(d_pb1, 0, npml*(nxpad-2*npml)*sizeof(float));
cudaMemset(d_pb2, 0, npml*(nxpad-2*npml)*sizeof(float));
cudaMemset(d_ddx, 0, nxpad*sizeof(float));
cudaMemset(d_ddz, 0, nzpad*sizeof(float));
cudaMemset(d_ddxVx, 0, (nxpad-1)*sizeof(float));
cudaMemset(d_ddzVz, 0, (nzpad-1)*sizeof(float));
cudaMemset(d_norder, 0, nxpad*nzpad*sizeof(int));
cudaMemset(d_norderx,0, (nxpad-1)*sizeof(int));
cudaMemset(d_norderz,0, (nzpad-1)*sizeof(int));
cudaMemset(d_vxspmllr,0, N*(nzpad-2*npml)*sizeof(float));
cudaMemset(d_vzspmltb,0, N*(nxpad-2*npml)*sizeof(float));
cudaMemset(d_pspmllr, 0, N*(nzpad-2*npml)*sizeof(float));
cudaMemset(d_pspmltb, 0, N*(nxpad-2*npml)*sizeof(float));
cudaMemset(d_g1, 0, (nxpad-2*npml)*(nzpad-2*npml)*sizeof(float));
cudaMemset(d_g2, 0, (nxpad-2*npml)*(nzpad-2*npml)*sizeof(float));
cudaMemset(d_g3, 0, (nxpad-2*npml)*(nzpad-2*npml)*sizeof(float));
cudaMemset(d_g3_true, 0, (nxpad-2*npml)*(nzpad-2*npml)*sizeof(float));
cudaMemset(d_g4, 0, (nxpad-2*npml)*(nzpad-2*npml)*sizeof(float));
cudaMemset(d_g2ud, 0, (nxpad-2*npml)*(nzpad-2*npml)*sizeof(float));
cudaMemset(d_g2du, 0, (nxpad-2*npml)*(nzpad-2*npml)*sizeof(float));
cudaMemset(d_g2lr, 0, (nxpad-2*npml)*(nzpad-2*npml)*sizeof(float));
cudaMemset(d_g2rl, 0, (nxpad-2*npml)*(nzpad-2*npml)*sizeof(float));
cudaMemset(d_g31, 0, (nxpad-2*npml)*(nzpad-2*npml)*61*sizeof(float));
cudaMemset(d_g32, 0, (nxpad-2*npml)*(nzpad-2*npml)*61*sizeof(float));
cudaMemset(d_illum, 0, (nxpad-2*npml)*(nzpad-2*npml)*sizeof(float));
cudaMemset(d_taper, 0, (nxpad-2*npml)*(nzpad-2*npml)*sizeof(float));
cuda_source<<<dimGrid,dimBlock>>>(d_source, nsx, nsz, nxpad, nzpad, amp, alp, dx2, dz2);
cuda_pmlCoeffpx<<<(nxpad+127)/128,128>>>(d_ddx, vpmax, dx, npml, nxpad);
cuda_pmlCoeffpz<<<(nzpad+127)/128,128>>>(d_ddz, vpmax, dz, npml, nzpad);
cuda_pmlCoeffvx<<<(nxpad+126)/128,128>>>(d_ddxVx, vpmax, dx, npml, nxpad-1);
cuda_pmlCoeffvz<<<(nzpad+126)/128,128>>>(d_ddzVz, vpmax, dz, npml, nzpad-1);
cuda_norder<<<dimGrid,dimBlock>>> (d_norder, nxpad, nzpad);
cuda_norderx<<<(nxpad+126)/128,128>>> (d_norderx, nxpad-1);
cuda_norderz<<<(nzpad+126)/128,128>>> (d_norderz, nzpad-1);
while (2*nwin >= nxlength)
nwin -= 3;
cuda_taper_calculate<<<dimGridp,dimBlock>>>(d_taper, nxpad-2*npml, nzpad-2*npml, nwin, alpha);
printf("N0. %d shot source wavefield calculating......\n",ishot);
// calculate source wavefiled to save pml layer
for (it=0; it<nt; it++)
{
if (it%ntsnap == 0){
cudaMemcpy(p, d_p, nxz*sizeof(float), cudaMemcpyDeviceToHost);
printf("source-the current shot: %d\ttime: %f s; wavefield: %.5e\n",ishot,it*dt, absMaxval1(p, nxz));}
cuda_forward_vx<<<dimGridvx,dimBlock>>>(d_p, d_vx, d_rho, d_diffcoef, _dtx, npml, nxpad, nzpad);
cuda_forward_vz<<<dimGridvz,dimBlock>>>(d_p, d_vz, d_rho, d_diffcoef, _dtz, npml, nxpad, nzpad);
cuda_pml_vxlr<<<dimGridvxlr,dimBlock>>>(d_p, d_vx, d_rho, d_diffcoef, d_ddxVx, _dtx, dt, npml, nxpad, nzpad, d_norderx);
cuda_pml_vztb<<<dimGridvztb,dimBlock>>>(d_p, d_vz, d_rho, d_diffcoef, d_ddzVz, _dtz, dt, npml, nxpad, nzpad, d_norderz);
cuda_forward_p<<<dimGridp,dimBlock>>>(d_p, d_vx, d_vz, d_rho, d_vp, d_diffcoef, _dtx, _dtz, npml, nxpad, nzpad);
cuda_pml_plr<<<dimGridplr,dimBlock>>>(d_p, d_vx, d_vz, d_pl1, d_pl2, d_pr1, d_pr2, d_rho, d_vp, d_diffcoef, d_ddx, d_ddz,_dtx, _dtz, dt, npml, nxpad, nzpad, d_norder);
cuda_pml_ptb<<<dimGridptb,dimBlock>>>(d_p, d_vx, d_vz, d_pt1, d_pt2, d_pb1, d_pb2, d_rho, d_vp, d_diffcoef, d_ddz, _dtx, _dtz, dt, npml, nxpad, nzpad, d_norder);
cuda_pml_pconner<<<dimGridpcooner,dimBlock>>>(d_p, d_vx, d_vz, d_pl1, d_pl2, d_pr1, d_pr2, d_rho, d_vp, d_diffcoef, d_ddx, d_ddz,_dtx, _dtz, dt, npml, nxpad, nzpad, d_norder);
cuda_add_source<<<dimGrid,dimBlock>>>(d_p, d_source, d_wavelet, dt, 1, nxpad, nzpad, it);
save_d_vxpml<<<dimGridpmllr,dimBlockpmllr>>>(d_vx, d_vxspmllr, nxpad, nzpad, npml);
save_d_vzpml<<<dimGridpmltb,dimBlockpmltb>>>(d_vz, d_vzspmltb, nxpad, nzpad, npml);
save_d_ppmllr<<<dimGridpmllr,dimBlockpmllr>>>(d_p, d_pspmllr, nxpad, nzpad, npml);
save_d_ppmltb<<<dimGridpmltb,dimBlockpmltb>>>(d_p, d_pspmltb, nxpad, nzpad, npml);
cudaMemcpy(&vxspmllr[it*N*(nzpad-2*npml)], d_vxspmllr, N*(nzpad-2*npml)*sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(&vzspmltb[it*N*(nxpad-2*npml)], d_vzspmltb, N*(nxpad-2*npml)*sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(&pspmllr[it*N*(nzpad-2*npml)], d_pspmllr, N*(nzpad-2*npml)*sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(&pspmltb[it*N*(nxpad-2*npml)], d_pspmltb, N*(nxpad-2*npml)*sizeof(float), cudaMemcpyDeviceToHost);
}
// initial source wavefiled
// save last snap used to reconstruction source wavefield
cudaMemcpy(d_ps, d_p, nxz*sizeof(float), cudaMemcpyDeviceToDevice);
// initial receiver wavefield
cudaMemset(d_p, 0, nxpad*nzpad*sizeof(float));
cudaMemset(d_vx, 0, (nxpad-1)*nzpad*sizeof(float));
cudaMemset(d_vz, 0, nxpad*(nzpad-1)*sizeof(float));
cudaMemset(d_pl1, 0, npml*nzpad*sizeof(float));
cudaMemset(d_pl2, 0, npml*nzpad*sizeof(float));
cudaMemset(d_pr1, 0, npml*nzpad*sizeof(float));
cudaMemset(d_pr2, 0, npml*nzpad*sizeof(float));
cudaMemset(d_pt1, 0, npml*(nxpad-2*npml)*sizeof(float));
cudaMemset(d_pt2, 0, npml*(nxpad-2*npml)*sizeof(float));
cudaMemset(d_pb1, 0, npml*(nxpad-2*npml)*sizeof(float));
cudaMemset(d_pb2, 0, npml*(nxpad-2*npml)*sizeof(float));
cudaMemset(d_vxspmllr,0, N*(nzpad-2*npml)*sizeof(float));
cudaMemset(d_vzspmltb,0, N*(nxpad-2*npml)*sizeof(float));
cudaMemset(d_pspmllr, 0, N*(nzpad-2*npml)*sizeof(float));
cudaMemset(d_pspmltb, 0, N*(nxpad-2*npml)*sizeof(float));
if (ismth)
{
velsmooth1d(vp, nzpad, nxpad, ismth);
velsmooth1d(rho, nzpad, nxpad, ismth);
cudaMemcpy(d_vp, vp, nxz*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_rho, rho, nxz*sizeof(float), cudaMemcpyHostToDevice);
}
//===============================================================================
// prepare seismic profile
seisobs=Creat2dArray(nt0,nxlength0-noffset0);
seiscal=Creat2dArray(nt, nxlength-noffset);
sprintf(buffrecord,"./seisReal/%s",filename_shotgather);
offset_trace = (shotprofileId[2][ishot-1] - 1)*(240 + nt0*sizeof(float)) + 3600*sgyflag;
fp = fopen(buffrecord,"rb");
fseek(fp,offset_trace,0);
for (ix=0; ix<shotprofileId[1][ishot-1]; ix++)
{
fseek(fp,240L,1);
fread(tr,sizeof(float),nt0,fp);
float_to_float(tr, nt0, endianflag);
for (it=0; it<nt0; it++)
seisobs[it][ix] = tr[it];
}
fclose(fp);
Interpseis2d(seiscal,seisobs,nxlength-noffset,nxlength0-noffset0,nt,nt0,dt,dt0);
for (it=0;it<nt;it++)
for (ix=0;ix<nxlength-noffset;ix++)
record[it*(nxlength-noffset)+ix] = seiscal[it][ix];
free2dArray(seiscal, nt, nxlength-noffset);
free2dArray(seisobs, nt0, nxlength0-noffset0);
cudaMemcpy(d_record, record, (nxlength-noffset)*nt*sizeof(float), cudaMemcpyHostToDevice);
cuda_mute2<<<dimGridrecord,dimBlock>>>(d_record, d_vp, nsx, nsz, nt, npml, nxlength, noffset, nw, tlength, fdom, dx2, dz2, _dt);
// implement RTM
// insert seismic record for the last time slice
cuda_insert_record2<<<(nxlength-noffset+127)/128,128>>>(d_p, &d_record[(nt-1)*(nxlength - noffset)], npml, nxlength, noffset, dt);
// backforward record
printf("source wavefiled prepared over...\nBegin backward......\n");
for (it=nt-2; it>=0; it--)
{
if (it%ntsnap == 0){
cudaMemcpy(p, d_p, nxz*sizeof(float), cudaMemcpyDeviceToHost);
printf("backward-the current shot: %d\ttime: %f s; wavefield: %.5e\n",ishot, it*dt, absMaxval1(p, nxz));}
cudaMemcpy(d_ps_pre, d_ps, nxz*sizeof(float), cudaMemcpyDeviceToDevice);
cudaMemcpy(d_p_pre, d_p, nxz*sizeof(float), cudaMemcpyDeviceToDevice);
// source wavefield 1: read vx vz pml; 2: calculate inner vx vz; 3: read p pml; 4; calculate inner p
// 1 -- 2
cudaMemcpy(d_vxspmllr, &vxspmllr[(it+1)*N*(nzpad-2*npml)], N*(nzpad-2*npml)*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_vzspmltb, &vzspmltb[(it+1)*N*(nxpad-2*npml)], N*(nxpad-2*npml)*sizeof(float), cudaMemcpyHostToDevice);
read_d_vxpml<<<dimGridpmllr,dimBlockpmllr>>>(d_vxs, d_vxspmllr, nxpad, nzpad, npml);
read_d_vzpml<<<dimGridpmltb,dimBlockpmltb>>>(d_vzs, d_vzspmltb, nxpad, nzpad, npml);
cuda_backward_vx<<<dimGridvxb,dimBlock>>>(d_ps, d_vxs, d_rho, d_diffcoef, _dtx, npml, nxpad, nzpad);
cuda_backward_vz<<<dimGridvzb,dimBlock>>>(d_ps, d_vzs, d_rho, d_diffcoef, _dtz, npml, nxpad, nzpad);
// 3 -- 4
cudaMemcpy(d_pspmllr, &pspmllr[it*N*(nzpad-2*npml)], N*(nzpad-2*npml)*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_pspmltb, &pspmltb[it*N*(nxpad-2*npml)], N*(nxpad-2*npml)*sizeof(float), cudaMemcpyHostToDevice);
read_d_ppmllr<<<dimGridpmllr,dimBlockpmllr>>>(d_ps, d_pspmllr, nxpad, nzpad, npml);
read_d_ppmltb<<<dimGridpmltb,dimBlockpmltb>>>(d_ps, d_pspmltb, nxpad, nzpad, npml);
cuda_backward_p<<<dimGridp,dimBlock>>>(d_ps, d_vxs, d_vzs, d_rho, d_vp, d_diffcoef, _dtx, _dtz, npml, nxpad, nzpad);
// insert source
cuda_add_source<<<dimGrid,dimBlock>>>(d_ps, d_source, d_wavelet, dt, 2, nxpad, nzpad, it);
// receiver wavefield
cuda_forward_vx<<<dimGridvx,dimBlock>>>(d_p, d_vx, d_rho, d_diffcoef, _dtx, npml, nxpad, nzpad);
cuda_forward_vz<<<dimGridvz,dimBlock>>>(d_p, d_vz, d_rho, d_diffcoef, _dtz, npml, nxpad, nzpad);
cuda_pml_vxlr<<<dimGridvxlr,dimBlock>>>(d_p, d_vx, d_rho, d_diffcoef, d_ddxVx, _dtx, dt, npml, nxpad, nzpad, d_norderx);
cuda_pml_vztb<<<dimGridvztb,dimBlock>>>(d_p, d_vz, d_rho, d_diffcoef, d_ddzVz, _dtz, dt, npml, nxpad, nzpad, d_norderz);
cuda_forward_p<<<dimGridp,dimBlock>>>(d_p, d_vx, d_vz, d_rho, d_vp, d_diffcoef, _dtx, _dtz, npml, nxpad, nzpad);
cuda_pml_plr<<<dimGridplr,dimBlock>>> (d_p, d_vx, d_vz, d_pl1, d_pl2, d_pr1, d_pr2, d_rho, d_vp, d_diffcoef, d_ddx, d_ddz,_dtx, _dtz, dt, npml, nxpad, nzpad, d_norder);
cuda_pml_ptb<<<dimGridptb,dimBlock>>> (d_p, d_vx, d_vz, d_pt1, d_pt2, d_pb1, d_pb2, d_rho, d_vp, d_diffcoef, d_ddz, _dtx, _dtz, dt, npml, nxpad, nzpad, d_norder);
cuda_pml_pconner<<<dimGridpcooner,dimBlock>>>(d_p, d_vx, d_vz, d_pl1, d_pl2, d_pr1, d_pr2, d_rho, d_vp, d_diffcoef, d_ddx, d_ddz,_dtx, _dtz, dt, npml, nxpad, nzpad, d_norder);
// insert source
cuda_insert_record2<<<(nxlength-noffset+127)/128,128>>>(d_p, &d_record[it*(nxlength-noffset)], npml, nxlength, noffset, dt);
// imaging condition:
cuda_cross_coorelation<<<dimGridp,dimBlock>>>(d_ps,d_p,d_g1,d_illum,nxpad,nzpad,npml);
cuda_wavefield_decomposition<<<dimGridp,dimBlock>>>(d_ps,d_vxs,d_vzs,d_p,d_vx,d_vz,d_g2ud,d_g2du,d_g2lr,d_g2rl,nxpad,nzpad,npml);
cuda_poynting<<<dimGridp,dimBlock>>>(d_ps,d_vxs,d_vzs,d_p,d_vx,d_vz,d_vp,d_g31,d_g32,nxpad,nzpad,npml);
cuda_energynorm<<<dimGridp,dimBlock>>>(d_ps,d_ps_pre,d_p,d_p_pre,d_vp,d_g4,dx,dz,dt,nxpad,nzpad,npml);
}
// abtain g2 and g3
cuda_stack_udlr<<<dimGridp,dimBlock>>>(d_g2ud,d_g2du,d_g2lr,d_g2rl,d_g2,nxlength,nz);
cuda_stack_theta<<<dimGridp,dimBlock>>>(d_g31,d_g3,nxlength,nz);
cuda_stack_theta<<<dimGridp,dimBlock>>>(d_g32,d_g3_true,nxlength,nz);
// taper image
cuda_taper_image<<<dimGridp,dimBlock>>>(d_g1, d_taper, nxlength, nz);
cuda_taper_image<<<dimGridp,dimBlock>>>(d_g2, d_taper, nxlength, nz);
cuda_taper_image<<<dimGridp,dimBlock>>>(d_g3, d_taper, nxlength, nz);
cuda_taper_image<<<dimGridp,dimBlock>>>(d_g4, d_taper, nxlength, nz);
cuda_taper_image<<<dimGridp,dimBlock>>>(d_g3_true, d_taper, nxlength, nz);
cuda_taper_image<<<dimGridp,dimBlock>>>(d_illum, d_taper, nxlength, nz);
// single-shot normalized
cuda_applyics<<<dimGridp,dimBlock>>>(d_image2s,d_g1,d_illum,nx,nxlength,nx1,nz);
cuda_applyics<<<dimGridp,dimBlock>>>(d_image3s,d_g2,d_illum,nx,nxlength,nx1,nz);
cuda_applyics<<<dimGridp,dimBlock>>>(d_image4s,d_g3,d_illum,nx,nxlength,nx1,nz);
cuda_applyics<<<dimGridp,dimBlock>>>(d_image5s,d_g4,d_illum,nx,nxlength,nx1,nz);
// multi-shot normalized
cuda_applyic<<<dimGridp,dimBlock>>> (d_image1, d_g1,nx,nxlength,nx1,nz);
cuda_applyic<<<dimGridp,dimBlock>>> (d_image2m, d_g1,nx,nxlength,nx1,nz);
cuda_applyic<<<dimGridp,dimBlock>>> (d_image3m, d_g2,nx,nxlength,nx1,nz);
cuda_applyic<<<dimGridp,dimBlock>>> (d_image4m, d_g3,nx,nxlength,nx1,nz);
cuda_applyic<<<dimGridp,dimBlock>>> (d_image5m, d_g4,nx,nxlength,nx1,nz);
cuda_applyic<<<dimGridp,dimBlock>>> (d_imagetrue, d_g3_true,nx,nxlength,nx1,nz);
cuda_applyic<<<dimGridp,dimBlock>>> (d_Illum, d_illum,nx,nxlength,nx1,nz);
// output temp image
if ((ishot-1)%50 == 0)
{
// single-shot normalized
// Image2s
cudaMemcpy(image2s, d_image2s, nx*nz*sizeof(float), cudaMemcpyDeviceToHost);
sprintf(buffrecord,"./output/Image2stemp.su");
Output1d(image2s, nz, nx, dx, buffrecord, 1);
// Image3s
cudaMemcpy(image3s, d_image3s, nx*nz*sizeof(float), cudaMemcpyDeviceToHost);
sprintf(buffrecord,"./output/Image3stemp.su");
Output1d(image3s, nz, nx, dx, buffrecord, 1);
// Image4s
cudaMemcpy(image4s, d_image4s, nx*nz*sizeof(float), cudaMemcpyDeviceToHost);
sprintf(buffrecord,"./output/Image4stemp.su");
Output1d(image4s, nz, nx, dx, buffrecord, 1);
// Image5s
cudaMemcpy(image5s, d_image5s, nx*nz*sizeof(float), cudaMemcpyDeviceToHost);
sprintf(buffrecord,"./output/Image5stemp.su");
Output1d(image5s, nz, nx, dx, buffrecord, 1);
// multishot normlized
// Image1
cudaMemcpy(image1, d_image1, nx*nz*sizeof(float), cudaMemcpyDeviceToHost);
sprintf(buffrecord,"./output/Image1temp.su");
Output1d(image1, nz, nx, dx, buffrecord, 1);
// Illum
cudaMemcpy(illum, d_Illum, nx*nz*sizeof(float), cudaMemcpyDeviceToHost);
sprintf(buffrecord,"./output/Illumtemp.su");
Output1d(illum, nz, nx, dx, buffrecord, 1);
// Image2m
cudaMemcpy(image2m, d_image2m, nx*nz*sizeof(float), cudaMemcpyDeviceToHost);
sprintf(buffrecord,"./output/Image2mtemp.su");
Output1d(image2m, nz, nx, dx, buffrecord, 1);
// Image3m
cudaMemcpy(image3m, d_image3m, nx*nz*sizeof(float), cudaMemcpyDeviceToHost);
sprintf(buffrecord,"./output/Image3mtemp.su");
Output1d(image3m, nz, nx, dx, buffrecord, 1);
// Image4m
cudaMemcpy(image4m, d_image4m, nx*nz*sizeof(float), cudaMemcpyDeviceToHost);
sprintf(buffrecord,"./output/Image4mtemp.su");
Output1d(image4m, nz, nx, dx, buffrecord, 1);
// Image5m
cudaMemcpy(image5m, d_image5m, nx*nz*sizeof(float), cudaMemcpyDeviceToHost);
sprintf(buffrecord,"./output/Image5mtemp.su");
Output1d(image5m, nz, nx, dx, buffrecord, 1);
// Imagetrue
cudaMemcpy(imagetrue, d_imagetrue, nx*nz*sizeof(float), cudaMemcpyDeviceToHost);
sprintf(buffrecord,"./output/Imagetruetemp.su");
Output1d(imagetrue, nz, nx, dx, buffrecord, 1);
}
free(record);
free(vp);
free(rho);
free(vxspmllr);
free(vzspmltb);
free(pspmllr);
free(pspmltb);
free(p);
free(tr);
cudaFree(d_diffcoef);cudaFree(d_record);cudaFree(d_source);
cudaFree(d_vp);cudaFree(d_rho);
cudaFree(d_p); cudaFree(d_vx); cudaFree(d_vz); cudaFree(d_p_pre);
cudaFree(d_ps);cudaFree(d_vxs);cudaFree(d_vzs);cudaFree(d_ps_pre);
cudaFree(d_pl1);cudaFree(d_pl2);cudaFree(d_pr1);cudaFree(d_pr2);
cudaFree(d_pt1);cudaFree(d_pt2);cudaFree(d_pb1);cudaFree(d_pb2);
cudaFree(d_ddx);cudaFree(d_ddz);cudaFree(d_ddxVx);cudaFree(d_ddzVz);
cudaFree(d_norder);cudaFree(d_norderx);cudaFree(d_norderz);
cudaFree(d_vxspmllr);cudaFree(d_vzspmltb);cudaFree(d_pspmllr);cudaFree(d_pspmltb);
cudaFree(d_g1); cudaFree(d_g2); cudaFree(d_g3); cudaFree(d_g4);
cudaFree(d_g2ud);cudaFree(d_g2du);cudaFree(d_g2lr);cudaFree(d_g2rl);
cudaFree(d_g31); cudaFree(d_g32); cudaFree(d_g3_true);
cudaFree(d_illum);
cudaFree(d_taper);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&mstimer, start, stop);
printf("%d shot finished: %g (s)\n",ishot, mstimer*1.e-3);
}
cudaMemcpy(imagetrue, d_imagetrue, nx*nz*sizeof(float), cudaMemcpyDeviceToHost);
sprintf(buffrecord,"./output/%dImagetrue.su",idevice);
Output1d(imagetrue, nz, nx, dx, buffrecord, 1);
cudaMemcpy(image2s, d_image2s, nx*nz*sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(image3s, d_image3s, nx*nz*sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(image4s, d_image4s, nx*nz*sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(image5s, d_image5s, nx*nz*sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(illum, d_Illum, nx*nz*sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(image1, d_image1, nx*nz*sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(image2m, d_image2m, nx*nz*sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(image3m, d_image3m, nx*nz*sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(image4m, d_image4m, nx*nz*sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(image5m, d_image5m, nx*nz*sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(d_image1);
cudaFree(d_image2s);cudaFree(d_image3s);cudaFree(d_image4s);cudaFree(d_image5s);
cudaFree(d_image2m);cudaFree(d_image3m);cudaFree(d_image4m);cudaFree(d_image5m);
cudaFree(d_Illum);
cudaFree(d_imagetrue);
free(imagetrue);
}
|
28918d2c71b72e0588a3d472f7a95177b70d8d5c.hip | // !!! This is a file automatically generated by hipify!!!
// cuda
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
//#include <hip/device_functions.h>
#include <conio.h>
//local
#include "sort.h"
//#define SEQ_ONLY
// local
#include "CudaErrorCheck.cu"
__constant__ int SORT_BASE;
__constant__ int SORT_BASE_EXP;
int HOST_SORT_BASE = -1;
int HOST_SORT_BASE_EXP = -1;
template<typename T>
__host__ __device__ inline T ceil_div(T a, T b) {
return (a + b - 1) / b;
}
__global__ void serial_radixsort(uint32_t* arr, int max, int n_chunks, float chunk_width, int* index, uint32_t* temp_memory, int N) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= n_chunks) {
return;
}
// left and right bounds for sorting, inclusive and exclusive, respectively
int l_bound = idx == 0 ? chunk_width * idx : (int)(chunk_width * idx) + 1;
int r_bound = min(N, (int)(chunk_width * (idx + 1)));
//partial_radixsort_func(arr, max, l_bound, r_bound-l_bound, index, temp_memory, N);
//int exp_pow = 0;
//for (int exp = 1; max / exp > 0; exp *= SORT_BASE) {
for (int exp_pow = 0; max / (1 << exp_pow) > 0; exp_pow += SORT_BASE_EXP) {
//int output[MAX_POINTS]; // output array
//int output_index[MAX_POINTS]; // output array
int i;
#ifndef SEQ_ONLY
uint16_t count[(1 << MAX_SORT_BASE_EXP)] = { 0 };
assert((r_bound - l_bound) < (1 << 16));
#else
uint32_t count[(1 << MAX_SORT_BASE_EXP)] = { 0 };
#endif
// Store count of occurrences in count[]
for (i = l_bound; i < r_bound; i++) {
count[(arr[i] >> exp_pow) % SORT_BASE]++;
}
// Change count[i] so that count[i] now contains actual
// position of this digit in output[]
for (i = 1; i < SORT_BASE; i++) {
count[i] += count[i - 1];
}
// Build the output array
for (i = r_bound - 1; i >= l_bound; i--)
{
assert(i < N);
int key = (arr[i] >> exp_pow) % SORT_BASE;
temp_memory[l_bound + count[key] - 1] = arr[i];
temp_memory[l_bound + N + count[key] - 1] = index[i];
count[key]--;
}
// Copy the output array to arr[], so that arr[] now
// contains sorted numbers according to current digit
for (i = l_bound; i < r_bound; i++)
{
arr[i] = temp_memory[i];
index[i] = temp_memory[i + N];
}
//exp_pow += SORT_BASE_EXP;
}
}
__global__ void serial_insertionsort(uint32_t* arr, int max, int n_chunks, float chunk_width, int* index, int N) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= n_chunks) {
return;
}
// left and right bounds for sorting, inclusive and exclusive, respectively
int l_bound = idx == 0 ? chunk_width * idx : (int)(chunk_width * idx) + 1;
int r_bound = min(N, (int)(chunk_width * (idx + 1)));
for (int i = 1; i < r_bound - l_bound; ++i) {
int j = i;
while (j > 0 && arr[l_bound + j - 1] > arr[l_bound + j]) {
int old_j_arr = arr[l_bound + j];
arr[l_bound + j] = arr[l_bound + j - 1];
arr[l_bound + j - 1] = old_j_arr;
int old_j_idx = index[l_bound + j];
index[l_bound + j] = index[l_bound + j - 1];
index[l_bound + j - 1] = old_j_idx;
j--;
}
}
}
//// A function to do counting sort of arr[] according to
//// the digit represented by exp.
//__device__ void parallel_countSort(int *arr, const int start, const int n, const int offset, int exp, int *index, int *output)
//{
// int size = start + n;
//
// //int output[MAX_POINTS]; // output array
// //int output_index[MAX_POINTS]; // output array
// int i, count[10] = { 0 };
//
// // Store count of occurrences in count[]
// for (i = start; i < size; i++)
// count[(arr[i] / exp) % 10]++;
//
// // Change count[i] so that count[i] now contains actual
// // position of this digit in output[]
// for (i = 1; i < 10; i++)
// count[i] += count[i - 1];
//
// // Build the output array
// for (i = size - 1; i >= start; i--)
// {
// output[start + count[(arr[i] / exp) % 10] - 1] = arr[i];
// output[offset + start + size + count[(arr[i] / exp) % 10] - 1] = index[i];
// count[(arr[i] / exp) % 10]--;
// }
//
// // Copy the output array to arr[], so that arr[] now
// // contains sorted numbers according to current digit
// for (i = start; i < size; i++)
// {
// arr[i] = output[i];
// index[i] = output[offset + i + size];
// }
//}
//
//
///*!
//Radix sort
//The main function to that sorts a part of arr[], starting at start and ranges of size n using countsort
//NOTE, this search cannot run in parallel
//@param arr, the integer array that shall be sorted
//@param start, the start index at wto sort per sub-array
//@param max, the overall number of all points to sort by this parallel call of radix sort.
//@param index - a auxiliary index array that keeps the index of each point.
//@param temp_memory - temporary memory to store the output data. It must be of 2 * n
//*/
//__global__ void parallel_radixsort(int* arr, const int n, const int max, int *index, int *temp_memory)
//{
// int i = blockIdx.x;// * blockDim.x + threadIdx.x;
//
// int start = i * n;
// int offset = max;
//
// // Find the maximum number to know number of digits
// int m = partial_getMax(arr, start, n);
//
// // Do counting sort for every digit. Note that instead
// // of passing digit number, exp is passed. exp is 10^i
// // where i is current digit number
// for (int exp = 1; m / exp > 0; exp *= 10)
// parallel_countSort(arr, start, n, offset, exp, index, temp_memory);
//
// //for(int i=start;i<n+start; i++)
// // index[i] = offset;
//}
//////////////////////////////////////////////////////////////////////////////////////////////////
// Parallel chunked radix sort
//////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void printChunkHist(int* hist, int chunk_id, int chunk_width, int tpb) {
int blocks_per_chunk = 1 + (chunk_width + 1 + tpb - 1) / tpb; // ceil(chunk_width / tpb)
int sums[32] = {0};
for (int d = 0; d < SORT_BASE; d++) {
printf("digit %02d: ", d);
for (int b = 0; b < blocks_per_chunk; b++) {
int idx = (chunk_id * blocks_per_chunk * SORT_BASE) + b + (d * blocks_per_chunk);
sums[b] += hist[idx];
printf("%03d ", hist[idx]);
}
printf("\n");
}
printf("sums ");
for (int i = 0; i < 32; i++) {
printf("%03d ", sums[i]);
}
printf("\n");
}
// Parallel chunked count
__global__ void PC_count(uint32_t* arr, int* global_hist, int N, float chunk_width, int tpb, int exp_pow) {
extern __shared__ int block_hist[];
// Initialize counts to 0
if (threadIdx.x < SORT_BASE) {
block_hist[threadIdx.x] = 0;
block_hist[threadIdx.x + SORT_BASE] = 0;
}
__syncthreads();
// Index in the array for this thread
int pt_idx = threadIdx.x + blockDim.x * blockIdx.x;
// Chunk for this point
int this_chunk = pt_idx / (chunk_width);
// Chunk for the first element in this block
int base_chunk = ((blockDim.x * blockIdx.x)) / chunk_width;
// Relative chunk for this block, for indexing into the histogram
int relative_chunk = this_chunk - base_chunk;
// If this point index exceeds the array bounds or is part of a previously created node, don't process it
bool splitting = ceil(pt_idx / chunk_width) < ((pt_idx + 1) / chunk_width) && pt_idx != 0;
if (pt_idx < N && !splitting) {
// Add this to the block-local histogram, for the correct chunk
int hist_idx_chunk0 = (arr[pt_idx] >> exp_pow) % SORT_BASE;
atomicAdd(&block_hist[hist_idx_chunk0 + (relative_chunk * SORT_BASE)], 1);
}
__syncthreads();
//int blocks_per_chunk = 1 + (chunk_width + tpb - 1) / tpb; // ceil(chunk_width / tpb)
int blocks_per_chunk = 1 + (int)ceil(chunk_width / tpb);
//V2: int blocks_per_chunk = ceil_div((int)chunk_width - tpb, 2*tpb) * 2 + (((int)chunk_width & tpb - 1) != 0);
// Index of first point in this chunk
//int chunk_first_idx = this_chunk == 0 ? chunk_width * this_chunk : (int)(chunk_width * this_chunk) + 1;
// V2: int chunk_first_idx = base_chunk == 0 ? chunk_width * base_chunk : (int)(chunk_width * base_chunk) + 1;
int chunk_first_idx = this_chunk == 0 ? chunk_width * base_chunk : (int)(chunk_width * base_chunk) + 1;
// Block index of the first block in this chunk
int chunk_first_block = chunk_first_idx / tpb;
int relative_block_idx = blockIdx.x - chunk_first_block;
// Point index at the end of this block
int pt_end_block = (blockIdx.x + 1) * tpb - 1;
// Chunk at the end of this block
int chunk_end_block = pt_end_block / chunk_width;
// Add local block histogram to global histogram
if (threadIdx.x < SORT_BASE) {
int global_hist_start_b0 = (base_chunk * blocks_per_chunk * SORT_BASE) + relative_block_idx;
atomicAdd(&global_hist[global_hist_start_b0 + (threadIdx.x * blocks_per_chunk)], block_hist[threadIdx.x]);
//V2: global_hist[global_hist_start_b0 + (threadIdx.x * blocks_per_chunk)] = block_hist[threadIdx.x];
// TODO: Will this overflow the memory for the last chunk? (&& blockIdx.x < blockDim.x?)
if (chunk_end_block != base_chunk) {
int global_hist_start_b1 = ((base_chunk + 1) * blocks_per_chunk * SORT_BASE);
//V2: int global_hist_start_b1 = global_hist_start_b0 + 1;
atomicAdd(&global_hist[global_hist_start_b1 + (threadIdx.x * blocks_per_chunk)], block_hist[threadIdx.x + SORT_BASE]);
//V2: global_hist[global_hist_start_b1 + (threadIdx.x * blocks_per_chunk)] = block_hist[threadIdx.x + SORT_BASE];
}
}
}
__global__ void distributeCounts(const uint32_t* __restrict__ arr, int* __restrict__ global_hist, int* __restrict__ index, uint32_t* __restrict__ output, int N, float chunk_width, int tpb, int exp_pow) {
int idx = threadIdx.x + blockDim.x * blockIdx.x;
//int blocks_per_chunk = 1 + (chunk_width + tpb - 1) / tpb; // ceil(chunk_width / tpb)
int blocks_per_chunk = 1 + (int)ceil(chunk_width / tpb);
// V2: int blocks_per_chunk = ceil_div((int)chunk_width - tpb, 2 * tpb) * 2 + (((int)chunk_width & tpb - 1) != 0);
//int total_chunks = (N + chunk_width - 1) / chunk_width; // ceil(N / chunk_width)
int total_chunks = ceil(N / chunk_width);
if (idx >= total_chunks * blocks_per_chunk) {
return;
}
int chunk_idx = idx / blocks_per_chunk;
// Endpoint of the histogram range (exclusive)
// Subtract 1 because the final point is part of a splitting node
int chunk_end = min(N, (int) (chunk_width * (chunk_idx + 1)));
int chunk_start = idx == 0 ? chunk_width * chunk_idx : (int)(chunk_width * chunk_idx) + 1;
// Block index within this chunk
int relative_block_idx = idx % blocks_per_chunk;
// Block index relative to the entire array (equivalent to block indices from PC_count)
int global_block_idx = (chunk_start / tpb) + relative_block_idx;
int hist_start = max(chunk_start, global_block_idx * tpb);
int hist_end = min(chunk_end, (global_block_idx + 1) * tpb);
int global_hist_start = (chunk_idx * blocks_per_chunk * SORT_BASE) + relative_block_idx;
for (int i = hist_end - 1; i >= hist_start; i--) {
int key = (arr[i] >> exp_pow) % SORT_BASE;
// Access the summed histogram. Add chunk_idx to account for the points that are part of a node, which don't get counted in the histogram
// After accessing the value, decrement the value in global_hist (The -- operator)
int into_idx = (--global_hist[global_hist_start + key * blocks_per_chunk]) + chunk_idx;
output[into_idx] = arr[i];
output[N + into_idx] = index[i];
}
}
/*
Moves points from temporary array back to normal array, and updates the index array
*/
__global__ void move_temp(uint32_t* arr, uint32_t* temp_arr, int* index, const int N) {
int idx = threadIdx.x + blockDim.x * blockIdx.x;
if (idx >= N) {
return;
}
arr[idx] = temp_arr[idx];
index[idx] = temp_arr[idx + N];
}
__global__ void print_chunk(uint32_t* arr, int chunk_idx, int chunk_width) {
int chunk_start = (chunk_width) * chunk_idx;
for (int i = 0; i < chunk_width-1; i++) {
printf("%d, ", arr[chunk_start + i]);
}
printf("\n");
}
ChunkedSorter::ChunkedSorter(int n_points, int min_tpb){// : g_allocator(true) {
// The most global memory we will need for per-block, per-chunk histograms
// Worst case is every block covers two chunks. Se have N/TPB blocks, so we
// will need two histograms per block, and each histogram is <sort_base> integers
// TODO: Can change histogram from ints to short or byte, depending on TPB
int max_sort_base = 1 << MAX_SORT_BASE_EXP;
size_t max_hist_items = (int)ceil((double)n_points / min_tpb) * 2 * max_sort_base;
hist_memory_size = max_hist_items * sizeof(int);
CudaSafeCall( hipMalloc(&d_blocked_hists, hist_memory_size) );
// Calculate necessary size of temporary memory for CUB scan
hipcub::DeviceScan::InclusiveSum(d_scan_temp, scan_temp_size, d_blocked_hists, d_blocked_hists, max_hist_items);
CudaSafeCall(hipMalloc(&d_scan_temp, scan_temp_size));
// TODO: necessary?
hipDeviceSynchronize();
}
ChunkedSorter::~ChunkedSorter() {
CudaSafeCall(hipFree(d_blocked_hists));
CudaSafeCall(hipFree(d_scan_temp));
}
void ChunkedSorter::setBase(int exp_pow) {
int sort_base = 1 << exp_pow;
if (HOST_SORT_BASE_EXP == exp_pow) {
// Already set
return;
}
hipMemcpyToSymbol(SORT_BASE_EXP, &exp_pow, sizeof(int), 0, hipMemcpyHostToDevice);
hipMemcpyToSymbol(SORT_BASE, &sort_base, sizeof(int), 0, hipMemcpyHostToDevice);
CudaCheckError();
HOST_SORT_BASE_EXP = exp_pow;
HOST_SORT_BASE = sort_base;
}
void ChunkedSorter::sort(uint32_t* arr, int* d_index, uint32_t* temp_memory, int N, int level, int max_elem, int tpb) {
assert(tpb >= HOST_SORT_BASE);
float chunk_width = (float)N / (1 << level);
int blocks_per_chunk = 1 + (int)ceil(chunk_width / tpb);
// V2: int blocks_per_chunk = ceil_div((int)chunk_width - tpb, 2 * tpb) * 2 + (((int)chunk_width & tpb - 1) != 0);
int n_global_chunks = blocks_per_chunk * (1 << level);
int n_hist_items = n_global_chunks * HOST_SORT_BASE;
size_t used_hist_size = n_hist_items * sizeof(int);
// Allocate temporary storage for parallel scan
//void* d_temp_storage = NULL;
//size_t temp_storage_bytes = 0;
//
//g_allocator.DeviceAllocate(&d_temp_storage, temp_storage_bytes);
// Do counting sort for every digit. Note that instead
// of passing digit number, exp is passed. exp is 10^i
// where i is current digit number
int exp_pow = 0;
for (int exp = 1; max_elem / exp > 0; exp *= HOST_SORT_BASE) {
// Reset histogram counts to 0
CudaSafeCall(hipMemset(d_blocked_hists, 0, used_hist_size));
//hipDeviceSynchronize();
// Counts occurences of digits
size_t smem_size = (1 << HOST_SORT_BASE_EXP) * 2 * sizeof(int);
hipLaunchKernelGGL(( PC_count), dim3((N + tpb - 1) / tpb), dim3(tpb), smem_size , 0, arr, d_blocked_hists, N, chunk_width, tpb, exp_pow);
//hipDeviceSynchronize();
CudaCheckError();
// Run
hipcub::DeviceScan::InclusiveSum(d_scan_temp, scan_temp_size, d_blocked_hists, d_blocked_hists, n_hist_items);
//hipDeviceSynchronize();
CudaCheckError();
hipLaunchKernelGGL(( distributeCounts), dim3((n_global_chunks + tpb - 1) / tpb), dim3(tpb), 0, 0, arr, d_blocked_hists, d_index, temp_memory, N, chunk_width, tpb, exp_pow);
//hipDeviceSynchronize();
CudaCheckError();
hipLaunchKernelGGL(( move_temp), dim3((N + tpb - 1) / tpb), dim3(tpb), 0, 0, arr, temp_memory, d_index, N);
//hipDeviceSynchronize();
CudaCheckError();
exp_pow += HOST_SORT_BASE_EXP;
}
} | 28918d2c71b72e0588a3d472f7a95177b70d8d5c.cu |
// cuda
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
//#include <device_functions.h>
#include <conio.h>
//local
#include "sort.h"
//#define SEQ_ONLY
// local
#include "CudaErrorCheck.cu"
__constant__ int SORT_BASE;
__constant__ int SORT_BASE_EXP;
int HOST_SORT_BASE = -1;
int HOST_SORT_BASE_EXP = -1;
template<typename T>
__host__ __device__ inline T ceil_div(T a, T b) {
return (a + b - 1) / b;
}
__global__ void serial_radixsort(uint32_t* arr, int max, int n_chunks, float chunk_width, int* index, uint32_t* temp_memory, int N) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= n_chunks) {
return;
}
// left and right bounds for sorting, inclusive and exclusive, respectively
int l_bound = idx == 0 ? chunk_width * idx : (int)(chunk_width * idx) + 1;
int r_bound = min(N, (int)(chunk_width * (idx + 1)));
//partial_radixsort_func(arr, max, l_bound, r_bound-l_bound, index, temp_memory, N);
//int exp_pow = 0;
//for (int exp = 1; max / exp > 0; exp *= SORT_BASE) {
for (int exp_pow = 0; max / (1 << exp_pow) > 0; exp_pow += SORT_BASE_EXP) {
//int output[MAX_POINTS]; // output array
//int output_index[MAX_POINTS]; // output array
int i;
#ifndef SEQ_ONLY
uint16_t count[(1 << MAX_SORT_BASE_EXP)] = { 0 };
assert((r_bound - l_bound) < (1 << 16));
#else
uint32_t count[(1 << MAX_SORT_BASE_EXP)] = { 0 };
#endif
// Store count of occurrences in count[]
for (i = l_bound; i < r_bound; i++) {
count[(arr[i] >> exp_pow) % SORT_BASE]++;
}
// Change count[i] so that count[i] now contains actual
// position of this digit in output[]
for (i = 1; i < SORT_BASE; i++) {
count[i] += count[i - 1];
}
// Build the output array
for (i = r_bound - 1; i >= l_bound; i--)
{
assert(i < N);
int key = (arr[i] >> exp_pow) % SORT_BASE;
temp_memory[l_bound + count[key] - 1] = arr[i];
temp_memory[l_bound + N + count[key] - 1] = index[i];
count[key]--;
}
// Copy the output array to arr[], so that arr[] now
// contains sorted numbers according to current digit
for (i = l_bound; i < r_bound; i++)
{
arr[i] = temp_memory[i];
index[i] = temp_memory[i + N];
}
//exp_pow += SORT_BASE_EXP;
}
}
__global__ void serial_insertionsort(uint32_t* arr, int max, int n_chunks, float chunk_width, int* index, int N) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= n_chunks) {
return;
}
// left and right bounds for sorting, inclusive and exclusive, respectively
int l_bound = idx == 0 ? chunk_width * idx : (int)(chunk_width * idx) + 1;
int r_bound = min(N, (int)(chunk_width * (idx + 1)));
for (int i = 1; i < r_bound - l_bound; ++i) {
int j = i;
while (j > 0 && arr[l_bound + j - 1] > arr[l_bound + j]) {
int old_j_arr = arr[l_bound + j];
arr[l_bound + j] = arr[l_bound + j - 1];
arr[l_bound + j - 1] = old_j_arr;
int old_j_idx = index[l_bound + j];
index[l_bound + j] = index[l_bound + j - 1];
index[l_bound + j - 1] = old_j_idx;
j--;
}
}
}
//// A function to do counting sort of arr[] according to
//// the digit represented by exp.
//__device__ void parallel_countSort(int *arr, const int start, const int n, const int offset, int exp, int *index, int *output)
//{
// int size = start + n;
//
// //int output[MAX_POINTS]; // output array
// //int output_index[MAX_POINTS]; // output array
// int i, count[10] = { 0 };
//
// // Store count of occurrences in count[]
// for (i = start; i < size; i++)
// count[(arr[i] / exp) % 10]++;
//
// // Change count[i] so that count[i] now contains actual
// // position of this digit in output[]
// for (i = 1; i < 10; i++)
// count[i] += count[i - 1];
//
// // Build the output array
// for (i = size - 1; i >= start; i--)
// {
// output[start + count[(arr[i] / exp) % 10] - 1] = arr[i];
// output[offset + start + size + count[(arr[i] / exp) % 10] - 1] = index[i];
// count[(arr[i] / exp) % 10]--;
// }
//
// // Copy the output array to arr[], so that arr[] now
// // contains sorted numbers according to current digit
// for (i = start; i < size; i++)
// {
// arr[i] = output[i];
// index[i] = output[offset + i + size];
// }
//}
//
//
///*!
//Radix sort
//The main function to that sorts a part of arr[], starting at start and ranges of size n using countsort
//NOTE, this search cannot run in parallel
//@param arr, the integer array that shall be sorted
//@param start, the start index at wto sort per sub-array
//@param max, the overall number of all points to sort by this parallel call of radix sort.
//@param index - a auxiliary index array that keeps the index of each point.
//@param temp_memory - temporary memory to store the output data. It must be of 2 * n
//*/
//__global__ void parallel_radixsort(int* arr, const int n, const int max, int *index, int *temp_memory)
//{
// int i = blockIdx.x;// * blockDim.x + threadIdx.x;
//
// int start = i * n;
// int offset = max;
//
// // Find the maximum number to know number of digits
// int m = partial_getMax(arr, start, n);
//
// // Do counting sort for every digit. Note that instead
// // of passing digit number, exp is passed. exp is 10^i
// // where i is current digit number
// for (int exp = 1; m / exp > 0; exp *= 10)
// parallel_countSort(arr, start, n, offset, exp, index, temp_memory);
//
// //for(int i=start;i<n+start; i++)
// // index[i] = offset;
//}
//////////////////////////////////////////////////////////////////////////////////////////////////
// Parallel chunked radix sort
//////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void printChunkHist(int* hist, int chunk_id, int chunk_width, int tpb) {
int blocks_per_chunk = 1 + (chunk_width + 1 + tpb - 1) / tpb; // ceil(chunk_width / tpb)
int sums[32] = {0};
for (int d = 0; d < SORT_BASE; d++) {
printf("digit %02d: ", d);
for (int b = 0; b < blocks_per_chunk; b++) {
int idx = (chunk_id * blocks_per_chunk * SORT_BASE) + b + (d * blocks_per_chunk);
sums[b] += hist[idx];
printf("%03d ", hist[idx]);
}
printf("\n");
}
printf("sums ");
for (int i = 0; i < 32; i++) {
printf("%03d ", sums[i]);
}
printf("\n");
}
// Parallel chunked count
__global__ void PC_count(uint32_t* arr, int* global_hist, int N, float chunk_width, int tpb, int exp_pow) {
extern __shared__ int block_hist[];
// Initialize counts to 0
if (threadIdx.x < SORT_BASE) {
block_hist[threadIdx.x] = 0;
block_hist[threadIdx.x + SORT_BASE] = 0;
}
__syncthreads();
// Index in the array for this thread
int pt_idx = threadIdx.x + blockDim.x * blockIdx.x;
// Chunk for this point
int this_chunk = pt_idx / (chunk_width);
// Chunk for the first element in this block
int base_chunk = ((blockDim.x * blockIdx.x)) / chunk_width;
// Relative chunk for this block, for indexing into the histogram
int relative_chunk = this_chunk - base_chunk;
// If this point index exceeds the array bounds or is part of a previously created node, don't process it
bool splitting = ceil(pt_idx / chunk_width) < ((pt_idx + 1) / chunk_width) && pt_idx != 0;
if (pt_idx < N && !splitting) {
// Add this to the block-local histogram, for the correct chunk
int hist_idx_chunk0 = (arr[pt_idx] >> exp_pow) % SORT_BASE;
atomicAdd(&block_hist[hist_idx_chunk0 + (relative_chunk * SORT_BASE)], 1);
}
__syncthreads();
//int blocks_per_chunk = 1 + (chunk_width + tpb - 1) / tpb; // ceil(chunk_width / tpb)
int blocks_per_chunk = 1 + (int)ceil(chunk_width / tpb);
//V2: int blocks_per_chunk = ceil_div((int)chunk_width - tpb, 2*tpb) * 2 + (((int)chunk_width & tpb - 1) != 0);
// Index of first point in this chunk
//int chunk_first_idx = this_chunk == 0 ? chunk_width * this_chunk : (int)(chunk_width * this_chunk) + 1;
// V2: int chunk_first_idx = base_chunk == 0 ? chunk_width * base_chunk : (int)(chunk_width * base_chunk) + 1;
int chunk_first_idx = this_chunk == 0 ? chunk_width * base_chunk : (int)(chunk_width * base_chunk) + 1;
// Block index of the first block in this chunk
int chunk_first_block = chunk_first_idx / tpb;
int relative_block_idx = blockIdx.x - chunk_first_block;
// Point index at the end of this block
int pt_end_block = (blockIdx.x + 1) * tpb - 1;
// Chunk at the end of this block
int chunk_end_block = pt_end_block / chunk_width;
// Add local block histogram to global histogram
if (threadIdx.x < SORT_BASE) {
int global_hist_start_b0 = (base_chunk * blocks_per_chunk * SORT_BASE) + relative_block_idx;
atomicAdd(&global_hist[global_hist_start_b0 + (threadIdx.x * blocks_per_chunk)], block_hist[threadIdx.x]);
//V2: global_hist[global_hist_start_b0 + (threadIdx.x * blocks_per_chunk)] = block_hist[threadIdx.x];
// TODO: Will this overflow the memory for the last chunk? (&& blockIdx.x < blockDim.x?)
if (chunk_end_block != base_chunk) {
int global_hist_start_b1 = ((base_chunk + 1) * blocks_per_chunk * SORT_BASE);
//V2: int global_hist_start_b1 = global_hist_start_b0 + 1;
atomicAdd(&global_hist[global_hist_start_b1 + (threadIdx.x * blocks_per_chunk)], block_hist[threadIdx.x + SORT_BASE]);
//V2: global_hist[global_hist_start_b1 + (threadIdx.x * blocks_per_chunk)] = block_hist[threadIdx.x + SORT_BASE];
}
}
}
__global__ void distributeCounts(const uint32_t* __restrict__ arr, int* __restrict__ global_hist, int* __restrict__ index, uint32_t* __restrict__ output, int N, float chunk_width, int tpb, int exp_pow) {
int idx = threadIdx.x + blockDim.x * blockIdx.x;
//int blocks_per_chunk = 1 + (chunk_width + tpb - 1) / tpb; // ceil(chunk_width / tpb)
int blocks_per_chunk = 1 + (int)ceil(chunk_width / tpb);
// V2: int blocks_per_chunk = ceil_div((int)chunk_width - tpb, 2 * tpb) * 2 + (((int)chunk_width & tpb - 1) != 0);
//int total_chunks = (N + chunk_width - 1) / chunk_width; // ceil(N / chunk_width)
int total_chunks = ceil(N / chunk_width);
if (idx >= total_chunks * blocks_per_chunk) {
return;
}
int chunk_idx = idx / blocks_per_chunk;
// Endpoint of the histogram range (exclusive)
// Subtract 1 because the final point is part of a splitting node
int chunk_end = min(N, (int) (chunk_width * (chunk_idx + 1)));
int chunk_start = idx == 0 ? chunk_width * chunk_idx : (int)(chunk_width * chunk_idx) + 1;
// Block index within this chunk
int relative_block_idx = idx % blocks_per_chunk;
// Block index relative to the entire array (equivalent to block indices from PC_count)
int global_block_idx = (chunk_start / tpb) + relative_block_idx;
int hist_start = max(chunk_start, global_block_idx * tpb);
int hist_end = min(chunk_end, (global_block_idx + 1) * tpb);
int global_hist_start = (chunk_idx * blocks_per_chunk * SORT_BASE) + relative_block_idx;
for (int i = hist_end - 1; i >= hist_start; i--) {
int key = (arr[i] >> exp_pow) % SORT_BASE;
// Access the summed histogram. Add chunk_idx to account for the points that are part of a node, which don't get counted in the histogram
// After accessing the value, decrement the value in global_hist (The -- operator)
int into_idx = (--global_hist[global_hist_start + key * blocks_per_chunk]) + chunk_idx;
output[into_idx] = arr[i];
output[N + into_idx] = index[i];
}
}
/*
Moves points from temporary array back to normal array, and updates the index array
*/
__global__ void move_temp(uint32_t* arr, uint32_t* temp_arr, int* index, const int N) {
int idx = threadIdx.x + blockDim.x * blockIdx.x;
if (idx >= N) {
return;
}
arr[idx] = temp_arr[idx];
index[idx] = temp_arr[idx + N];
}
__global__ void print_chunk(uint32_t* arr, int chunk_idx, int chunk_width) {
int chunk_start = (chunk_width) * chunk_idx;
for (int i = 0; i < chunk_width-1; i++) {
printf("%d, ", arr[chunk_start + i]);
}
printf("\n");
}
ChunkedSorter::ChunkedSorter(int n_points, int min_tpb){// : g_allocator(true) {
// The most global memory we will need for per-block, per-chunk histograms
// Worst case is every block covers two chunks. Se have N/TPB blocks, so we
// will need two histograms per block, and each histogram is <sort_base> integers
// TODO: Can change histogram from ints to short or byte, depending on TPB
int max_sort_base = 1 << MAX_SORT_BASE_EXP;
size_t max_hist_items = (int)ceil((double)n_points / min_tpb) * 2 * max_sort_base;
hist_memory_size = max_hist_items * sizeof(int);
CudaSafeCall( cudaMalloc(&d_blocked_hists, hist_memory_size) );
// Calculate necessary size of temporary memory for CUB scan
cub::DeviceScan::InclusiveSum(d_scan_temp, scan_temp_size, d_blocked_hists, d_blocked_hists, max_hist_items);
CudaSafeCall(cudaMalloc(&d_scan_temp, scan_temp_size));
// TODO: necessary?
cudaDeviceSynchronize();
}
ChunkedSorter::~ChunkedSorter() {
CudaSafeCall(cudaFree(d_blocked_hists));
CudaSafeCall(cudaFree(d_scan_temp));
}
void ChunkedSorter::setBase(int exp_pow) {
int sort_base = 1 << exp_pow;
if (HOST_SORT_BASE_EXP == exp_pow) {
// Already set
return;
}
cudaMemcpyToSymbol(SORT_BASE_EXP, &exp_pow, sizeof(int), 0, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(SORT_BASE, &sort_base, sizeof(int), 0, cudaMemcpyHostToDevice);
CudaCheckError();
HOST_SORT_BASE_EXP = exp_pow;
HOST_SORT_BASE = sort_base;
}
void ChunkedSorter::sort(uint32_t* arr, int* d_index, uint32_t* temp_memory, int N, int level, int max_elem, int tpb) {
assert(tpb >= HOST_SORT_BASE);
float chunk_width = (float)N / (1 << level);
int blocks_per_chunk = 1 + (int)ceil(chunk_width / tpb);
// V2: int blocks_per_chunk = ceil_div((int)chunk_width - tpb, 2 * tpb) * 2 + (((int)chunk_width & tpb - 1) != 0);
int n_global_chunks = blocks_per_chunk * (1 << level);
int n_hist_items = n_global_chunks * HOST_SORT_BASE;
size_t used_hist_size = n_hist_items * sizeof(int);
// Allocate temporary storage for parallel scan
//void* d_temp_storage = NULL;
//size_t temp_storage_bytes = 0;
//
//g_allocator.DeviceAllocate(&d_temp_storage, temp_storage_bytes);
// Do counting sort for every digit. Note that instead
// of passing digit number, exp is passed. exp is 10^i
// where i is current digit number
int exp_pow = 0;
for (int exp = 1; max_elem / exp > 0; exp *= HOST_SORT_BASE) {
// Reset histogram counts to 0
CudaSafeCall(cudaMemset(d_blocked_hists, 0, used_hist_size));
//cudaDeviceSynchronize();
// Counts occurences of digits
size_t smem_size = (1 << HOST_SORT_BASE_EXP) * 2 * sizeof(int);
PC_count<<<(N + tpb - 1) / tpb, tpb, smem_size >>>(arr, d_blocked_hists, N, chunk_width, tpb, exp_pow);
//cudaDeviceSynchronize();
CudaCheckError();
// Run
cub::DeviceScan::InclusiveSum(d_scan_temp, scan_temp_size, d_blocked_hists, d_blocked_hists, n_hist_items);
//cudaDeviceSynchronize();
CudaCheckError();
distributeCounts<<<(n_global_chunks + tpb - 1) / tpb, tpb>>>(arr, d_blocked_hists, d_index, temp_memory, N, chunk_width, tpb, exp_pow);
//cudaDeviceSynchronize();
CudaCheckError();
move_temp<<<(N + tpb - 1) / tpb, tpb>>>(arr, temp_memory, d_index, N);
//cudaDeviceSynchronize();
CudaCheckError();
exp_pow += HOST_SORT_BASE_EXP;
}
} |
33b2345ce08b65c7342835c486187d5890788aa8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "compute/pow/pow_internal.h"
#define BLK_SIZE 1024
namespace magmadnn {
namespace internal {
template <typename T>
__global__ void kernel_pow_grad_device(T *x, int power, T *grad, T *out, bool grad_is_scalar, unsigned int size) {
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int stride = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < size; i += stride) {
out[i] = grad[(grad_is_scalar) ? 0 : i] * ((T) power) * powf(x[i], power - 1);
}
}
template <>
__global__ void kernel_pow_grad_device(int *x, int power, int *grad, int *out, bool grad_is_scalar, unsigned int size) {
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int stride = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < size; i += stride) {
out[i] = grad[(grad_is_scalar) ? 0 : i] * ((int) power) * ((int) powf((float) x[i], power - 1));
}
}
template <typename T>
void pow_grad_device(Tensor<T> *x, int power, Tensor<T> *grad, Tensor<T> *out) {
unsigned int size = out->get_size();
hipLaunchKernelGGL(( kernel_pow_grad_device), dim3((size + BLK_SIZE - 1) / BLK_SIZE), dim3(BLK_SIZE), 0, 0,
x->get_ptr(), power, grad->get_ptr(), out->get_ptr(), (grad->get_size() == 1), size);
}
template void pow_grad_device(Tensor<int> *x, int power, Tensor<int> *input, Tensor<int> *out);
template void pow_grad_device(Tensor<float> *x, int power, Tensor<float> *input, Tensor<float> *out);
template void pow_grad_device(Tensor<double> *x, int power, Tensor<double> *input, Tensor<double> *out);
} // namespace internal
} // namespace magmadnn
#undef BLK_SIZE
| 33b2345ce08b65c7342835c486187d5890788aa8.cu |
#include "compute/pow/pow_internal.h"
#define BLK_SIZE 1024
namespace magmadnn {
namespace internal {
template <typename T>
__global__ void kernel_pow_grad_device(T *x, int power, T *grad, T *out, bool grad_is_scalar, unsigned int size) {
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int stride = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < size; i += stride) {
out[i] = grad[(grad_is_scalar) ? 0 : i] * ((T) power) * powf(x[i], power - 1);
}
}
template <>
__global__ void kernel_pow_grad_device(int *x, int power, int *grad, int *out, bool grad_is_scalar, unsigned int size) {
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int stride = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < size; i += stride) {
out[i] = grad[(grad_is_scalar) ? 0 : i] * ((int) power) * ((int) powf((float) x[i], power - 1));
}
}
template <typename T>
void pow_grad_device(Tensor<T> *x, int power, Tensor<T> *grad, Tensor<T> *out) {
unsigned int size = out->get_size();
kernel_pow_grad_device<<<(size + BLK_SIZE - 1) / BLK_SIZE, BLK_SIZE>>>(
x->get_ptr(), power, grad->get_ptr(), out->get_ptr(), (grad->get_size() == 1), size);
}
template void pow_grad_device(Tensor<int> *x, int power, Tensor<int> *input, Tensor<int> *out);
template void pow_grad_device(Tensor<float> *x, int power, Tensor<float> *input, Tensor<float> *out);
template void pow_grad_device(Tensor<double> *x, int power, Tensor<double> *input, Tensor<double> *out);
} // namespace internal
} // namespace magmadnn
#undef BLK_SIZE
|
acea4f091fefcf4c9f375b2b68722c89878845b3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void kEltwiseLogregCost(float* predmap, float* indmap, float*indlogpred, float* correctprobs, int numCases, int numTasks, int per_thread_case) {
const int task_id = blockIdx.x;
const int start_tx = threadIdx.x * per_thread_case;
const int end_tx = min(start_tx + per_thread_case, numCases);
const float EPSILON=1e-20; // Minimum value allowed, avoid log( 0 )
if (task_id >= numTasks) {
return;
}
for (int c_id = start_tx; c_id < end_tx; ++c_id) {
int pos = task_id * numCases + c_id;
float t = __fdividef(1.0f, 1.0f + __expf(-predmap[ pos ]));
if (indmap[pos] == 1) {
t = fmaxf(t, EPSILON);
indlogpred[pos] = __logf(t);
correctprobs[pos] = t;
} else {
t = 1-t;
t = fmaxf(t, EPSILON);
indlogpred[pos] = __logf(t);
correctprobs[pos] = t;
}
}
} | acea4f091fefcf4c9f375b2b68722c89878845b3.cu | #include "includes.h"
__global__ void kEltwiseLogregCost(float* predmap, float* indmap, float*indlogpred, float* correctprobs, int numCases, int numTasks, int per_thread_case) {
const int task_id = blockIdx.x;
const int start_tx = threadIdx.x * per_thread_case;
const int end_tx = min(start_tx + per_thread_case, numCases);
const float EPSILON=1e-20; // Minimum value allowed, avoid log( 0 )
if (task_id >= numTasks) {
return;
}
for (int c_id = start_tx; c_id < end_tx; ++c_id) {
int pos = task_id * numCases + c_id;
float t = __fdividef(1.0f, 1.0f + __expf(-predmap[ pos ]));
if (indmap[pos] == 1) {
t = fmaxf(t, EPSILON);
indlogpred[pos] = __logf(t);
correctprobs[pos] = t;
} else {
t = 1-t;
t = fmaxf(t, EPSILON);
indlogpred[pos] = __logf(t);
correctprobs[pos] = t;
}
}
} |
fe1ea7f12e91d2779f6cda0f60a914499dd2b55d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
// This file is auto-generated. See "generate_kernels.py"
#include <ATen/native/transformers/hip/mem_eff_attention/kernel_backward.h>
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, true, 64, 64, 64, true>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, true, 64, 64, 64, true>::kMinBlocksPerSm)
fmha_cutlassB_bf16_aligned_64x64_k64_seqaligned_sm80(typename AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, true, 64, 64, 64, true>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 800
#if __CUDA_ARCH__ < 1000
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, true, 64, 64, 64, true>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_bf16_aligned_64x64_k64_seqaligned_sm80` is for sm80-sm100, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, true, 64, 64, 64>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, true, 64, 64, 64>::kMinBlocksPerSm)
fmha_cutlassB_bf16_aligned_64x64_k64_sm80(typename AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, true, 64, 64, 64>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 800
#if __CUDA_ARCH__ < 1000
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, true, 64, 64, 64>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_bf16_aligned_64x64_k64_sm80` is for sm80-sm100, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
| fe1ea7f12e91d2779f6cda0f60a914499dd2b55d.cu | /*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
// This file is auto-generated. See "generate_kernels.py"
#include <ATen/native/transformers/cuda/mem_eff_attention/kernel_backward.h>
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, true, 64, 64, 64, true>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, true, 64, 64, 64, true>::kMinBlocksPerSm)
fmha_cutlassB_bf16_aligned_64x64_k64_seqaligned_sm80(typename AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, true, 64, 64, 64, true>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 800
#if __CUDA_ARCH__ < 1000
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, true, 64, 64, 64, true>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_bf16_aligned_64x64_k64_seqaligned_sm80` is for sm80-sm100, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, true, 64, 64, 64>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, true, 64, 64, 64>::kMinBlocksPerSm)
fmha_cutlassB_bf16_aligned_64x64_k64_sm80(typename AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, true, 64, 64, 64>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 800
#if __CUDA_ARCH__ < 1000
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, true, 64, 64, 64>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_bf16_aligned_64x64_k64_sm80` is for sm80-sm100, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
|
3ec53e05d68428ff8939ebc3dd41a1d33dc8a60a.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2020-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cugraph/experimental/include_cuco_static_map.cuh>
#include <cugraph/experimental/detail/graph_utils.cuh>
#include <cugraph/experimental/graph.hpp>
#include <cugraph/experimental/graph_functions.hpp>
#include <cugraph/experimental/graph_view.hpp>
#include <cugraph/patterns/copy_to_adj_matrix_row_col.cuh>
#include <cugraph/utilities/error.hpp>
#include <cugraph/utilities/shuffle_comm.cuh>
#include <rmm/thrust_rmm_allocator.h>
#include <raft/handle.hpp>
#include <rmm/device_uvector.hpp>
#include <rmm/mr/device/per_device_resource.hpp>
#include <rmm/mr/device/polymorphic_allocator.hpp>
#include <thrust/copy.h>
#include <thrust/iterator/discard_iterator.h>
#include <thrust/sort.h>
#include <thrust/tuple.h>
#include <algorithm>
#include <iterator>
#include <numeric>
#include <tuple>
#include <utility>
namespace cugraph {
namespace experimental {
// FIXME: think about requiring old_new_label_pairs to be pre-shuffled
template <typename vertex_t, bool multi_gpu>
void relabel(raft::handle_t const& handle,
std::tuple<vertex_t const*, vertex_t const*> old_new_label_pairs,
vertex_t num_label_pairs,
vertex_t* labels /* [INOUT] */,
vertex_t num_labels,
bool skip_missing_labels,
bool do_expensive_check)
{
double constexpr load_factor = 0.7;
// FIXME: remove this check once we drop Pascal support
CUGRAPH_EXPECTS(handle.get_device_properties().major >= 7,
"Relabel not supported on Pascal and older architectures.");
#ifdef CUCO_STATIC_MAP_DEFINED
if (multi_gpu) {
auto& comm = handle.get_comms();
auto const comm_size = comm.get_size();
auto key_func = detail::compute_gpu_id_from_vertex_t<vertex_t>{comm_size};
// find unique old labels (to be relabeled)
rmm::device_uvector<vertex_t> unique_old_labels(num_labels, handle.get_stream());
thrust::copy(rmm::exec_policy(handle.get_stream())->on(handle.get_stream()),
labels,
labels + num_labels,
unique_old_labels.data());
thrust::sort(rmm::exec_policy(handle.get_stream())->on(handle.get_stream()),
unique_old_labels.begin(),
unique_old_labels.end());
unique_old_labels.resize(
thrust::distance(
unique_old_labels.begin(),
thrust::unique(rmm::exec_policy(handle.get_stream())->on(handle.get_stream()),
unique_old_labels.begin(),
unique_old_labels.end())),
handle.get_stream());
unique_old_labels.shrink_to_fit(handle.get_stream());
// collect new labels for the unique old labels
rmm::device_uvector<vertex_t> new_labels_for_unique_old_labels(0, handle.get_stream());
{
// shuffle the old_new_label_pairs based on applying the compute_gpu_id_from_vertex_t functor
// to the old labels
rmm::device_uvector<vertex_t> rx_label_pair_old_labels(0, handle.get_stream());
rmm::device_uvector<vertex_t> rx_label_pair_new_labels(0, handle.get_stream());
{
rmm::device_uvector<vertex_t> label_pair_old_labels(num_label_pairs, handle.get_stream());
rmm::device_uvector<vertex_t> label_pair_new_labels(num_label_pairs, handle.get_stream());
thrust::copy(rmm::exec_policy(handle.get_stream())->on(handle.get_stream()),
std::get<0>(old_new_label_pairs),
std::get<0>(old_new_label_pairs) + num_label_pairs,
label_pair_old_labels.begin());
thrust::copy(rmm::exec_policy(handle.get_stream())->on(handle.get_stream()),
std::get<1>(old_new_label_pairs),
std::get<1>(old_new_label_pairs) + num_label_pairs,
label_pair_new_labels.begin());
auto pair_first = thrust::make_zip_iterator(
thrust::make_tuple(label_pair_old_labels.begin(), label_pair_new_labels.begin()));
std::forward_as_tuple(std::tie(rx_label_pair_old_labels, rx_label_pair_new_labels),
std::ignore) =
groupby_gpuid_and_shuffle_values(
handle.get_comms(),
pair_first,
pair_first + num_label_pairs,
[key_func] __device__(auto val) { return key_func(thrust::get<0>(val)); },
handle.get_stream());
}
// update intermediate relabel map
CUDA_TRY(hipStreamSynchronize(
handle.get_stream())); // cuco::static_map currently does not take stream
auto poly_alloc =
rmm::mr::polymorphic_allocator<char>(rmm::mr::get_current_device_resource());
auto stream_adapter =
rmm::mr::make_stream_allocator_adaptor(poly_alloc, hipStream_t{nullptr});
cuco::static_map<vertex_t, vertex_t, cuda::thread_scope_device, decltype(stream_adapter)>
relabel_map{// cuco::static_map requires at least one empty slot
::max(static_cast<size_t>(
static_cast<double>(rx_label_pair_old_labels.size()) / load_factor),
rx_label_pair_old_labels.size() + 1),
invalid_vertex_id<vertex_t>::value,
invalid_vertex_id<vertex_t>::value,
stream_adapter};
auto pair_first = thrust::make_transform_iterator(
thrust::make_zip_iterator(
thrust::make_tuple(rx_label_pair_old_labels.begin(), rx_label_pair_new_labels.begin())),
[] __device__(auto val) {
return thrust::make_pair(thrust::get<0>(val), thrust::get<1>(val));
});
relabel_map.insert(pair_first, pair_first + rx_label_pair_old_labels.size());
rx_label_pair_old_labels.resize(0, handle.get_stream());
rx_label_pair_new_labels.resize(0, handle.get_stream());
rx_label_pair_old_labels.shrink_to_fit(handle.get_stream());
rx_label_pair_new_labels.shrink_to_fit(handle.get_stream());
// shuffle unique_old_labels, relabel using the intermediate relabel map, and shuffle back
{
rmm::device_uvector<vertex_t> rx_unique_old_labels(0, handle.get_stream());
std::vector<size_t> rx_value_counts{};
std::tie(rx_unique_old_labels, rx_value_counts) = groupby_gpuid_and_shuffle_values(
handle.get_comms(),
unique_old_labels.begin(),
unique_old_labels.end(),
[key_func] __device__(auto val) { return key_func(val); },
handle.get_stream());
CUDA_TRY(hipStreamSynchronize(
handle.get_stream())); // cuco::static_map currently does not take stream
if (skip_missing_labels) {
thrust::transform(rmm::exec_policy(handle.get_stream())->on(handle.get_stream()),
rx_unique_old_labels.begin(),
rx_unique_old_labels.end(),
rx_unique_old_labels.begin(),
[view = relabel_map.get_device_view()] __device__(auto old_label) {
auto found = view.find(old_label);
return found != view.end() ? view.find(old_label)->second.load(
cuda::std::memory_order_relaxed)
: old_label;
});
} else {
relabel_map.find(
rx_unique_old_labels.begin(),
rx_unique_old_labels.end(),
rx_unique_old_labels.begin()); // now rx_unique_old_lables hold new labels for the
// corresponding old labels
}
std::tie(new_labels_for_unique_old_labels, std::ignore) = shuffle_values(
handle.get_comms(), rx_unique_old_labels.begin(), rx_value_counts, handle.get_stream());
}
}
handle.get_stream_view().synchronize(); // cuco::static_map currently does not take stream
{
auto poly_alloc =
rmm::mr::polymorphic_allocator<char>(rmm::mr::get_current_device_resource());
auto stream_adapter =
rmm::mr::make_stream_allocator_adaptor(poly_alloc, hipStream_t{nullptr});
cuco::static_map<vertex_t, vertex_t, cuda::thread_scope_device, decltype(stream_adapter)>
relabel_map{
// cuco::static_map requires at least one empty slot
::max(static_cast<size_t>(static_cast<double>(unique_old_labels.size()) / load_factor),
unique_old_labels.size() + 1),
invalid_vertex_id<vertex_t>::value,
invalid_vertex_id<vertex_t>::value,
stream_adapter};
auto pair_first = thrust::make_transform_iterator(
thrust::make_zip_iterator(
thrust::make_tuple(unique_old_labels.begin(), new_labels_for_unique_old_labels.begin())),
[] __device__(auto val) {
return thrust::make_pair(thrust::get<0>(val), thrust::get<1>(val));
});
relabel_map.insert(pair_first, pair_first + unique_old_labels.size());
relabel_map.find(labels, labels + num_labels, labels);
}
} else {
cuco::static_map<vertex_t, vertex_t> relabel_map(
// cuco::static_map requires at least one empty slot
::max(static_cast<size_t>(static_cast<double>(num_label_pairs) / load_factor),
static_cast<size_t>(num_label_pairs) + 1),
invalid_vertex_id<vertex_t>::value,
invalid_vertex_id<vertex_t>::value);
auto pair_first = thrust::make_transform_iterator(
thrust::make_zip_iterator(
thrust::make_tuple(std::get<0>(old_new_label_pairs), std::get<1>(old_new_label_pairs))),
[] __device__(auto val) {
return thrust::make_pair(thrust::get<0>(val), thrust::get<1>(val));
});
relabel_map.insert(pair_first, pair_first + num_label_pairs);
if (skip_missing_labels) {
thrust::transform(rmm::exec_policy(handle.get_stream())->on(handle.get_stream()),
labels,
labels + num_labels,
labels,
[view = relabel_map.get_device_view()] __device__(auto old_label) {
auto found = view.find(old_label);
return found != view.end() ? view.find(old_label)->second.load(
cuda::std::memory_order_relaxed)
: old_label;
});
} else {
relabel_map.find(labels, labels + num_labels, labels);
}
}
if (do_expensive_check && !skip_missing_labels) {
CUGRAPH_EXPECTS(
thrust::count(rmm::exec_policy(handle.get_stream())->on(handle.get_stream()),
labels,
labels + num_labels,
invalid_vertex_id<vertex_t>::value) == 0,
"Invalid input argument: labels include old label values missing in old_new_label_pairs.");
}
#endif
return;
}
// explicit instantiation
template void relabel<int32_t, true>(raft::handle_t const& handle,
std::tuple<int32_t const*, int32_t const*> old_new_label_pairs,
int32_t num_label_pairs,
int32_t* labels,
int32_t num_labels,
bool skip_missing_labels,
bool do_expensive_check);
template void relabel<int32_t, false>(
raft::handle_t const& handle,
std::tuple<int32_t const*, int32_t const*> old_new_label_pairs,
int32_t num_label_pairs,
int32_t* labels,
int32_t num_labels,
bool skip_missing_labels,
bool do_expensive_check);
template void relabel<int64_t, true>(raft::handle_t const& handle,
std::tuple<int64_t const*, int64_t const*> old_new_label_pairs,
int64_t num_label_pairs,
int64_t* labels,
int64_t num_labels,
bool skip_missing_labels,
bool do_expensive_check);
template void relabel<int64_t, false>(
raft::handle_t const& handle,
std::tuple<int64_t const*, int64_t const*> old_new_label_pairs,
int64_t num_label_pairs,
int64_t* labels,
int64_t num_labels,
bool skip_missing_labels,
bool do_expensive_check);
} // namespace experimental
} // namespace cugraph
| 3ec53e05d68428ff8939ebc3dd41a1d33dc8a60a.cu | /*
* Copyright (c) 2020-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cugraph/experimental/include_cuco_static_map.cuh>
#include <cugraph/experimental/detail/graph_utils.cuh>
#include <cugraph/experimental/graph.hpp>
#include <cugraph/experimental/graph_functions.hpp>
#include <cugraph/experimental/graph_view.hpp>
#include <cugraph/patterns/copy_to_adj_matrix_row_col.cuh>
#include <cugraph/utilities/error.hpp>
#include <cugraph/utilities/shuffle_comm.cuh>
#include <rmm/thrust_rmm_allocator.h>
#include <raft/handle.hpp>
#include <rmm/device_uvector.hpp>
#include <rmm/mr/device/per_device_resource.hpp>
#include <rmm/mr/device/polymorphic_allocator.hpp>
#include <thrust/copy.h>
#include <thrust/iterator/discard_iterator.h>
#include <thrust/sort.h>
#include <thrust/tuple.h>
#include <algorithm>
#include <iterator>
#include <numeric>
#include <tuple>
#include <utility>
namespace cugraph {
namespace experimental {
// FIXME: think about requiring old_new_label_pairs to be pre-shuffled
template <typename vertex_t, bool multi_gpu>
void relabel(raft::handle_t const& handle,
std::tuple<vertex_t const*, vertex_t const*> old_new_label_pairs,
vertex_t num_label_pairs,
vertex_t* labels /* [INOUT] */,
vertex_t num_labels,
bool skip_missing_labels,
bool do_expensive_check)
{
double constexpr load_factor = 0.7;
// FIXME: remove this check once we drop Pascal support
CUGRAPH_EXPECTS(handle.get_device_properties().major >= 7,
"Relabel not supported on Pascal and older architectures.");
#ifdef CUCO_STATIC_MAP_DEFINED
if (multi_gpu) {
auto& comm = handle.get_comms();
auto const comm_size = comm.get_size();
auto key_func = detail::compute_gpu_id_from_vertex_t<vertex_t>{comm_size};
// find unique old labels (to be relabeled)
rmm::device_uvector<vertex_t> unique_old_labels(num_labels, handle.get_stream());
thrust::copy(rmm::exec_policy(handle.get_stream())->on(handle.get_stream()),
labels,
labels + num_labels,
unique_old_labels.data());
thrust::sort(rmm::exec_policy(handle.get_stream())->on(handle.get_stream()),
unique_old_labels.begin(),
unique_old_labels.end());
unique_old_labels.resize(
thrust::distance(
unique_old_labels.begin(),
thrust::unique(rmm::exec_policy(handle.get_stream())->on(handle.get_stream()),
unique_old_labels.begin(),
unique_old_labels.end())),
handle.get_stream());
unique_old_labels.shrink_to_fit(handle.get_stream());
// collect new labels for the unique old labels
rmm::device_uvector<vertex_t> new_labels_for_unique_old_labels(0, handle.get_stream());
{
// shuffle the old_new_label_pairs based on applying the compute_gpu_id_from_vertex_t functor
// to the old labels
rmm::device_uvector<vertex_t> rx_label_pair_old_labels(0, handle.get_stream());
rmm::device_uvector<vertex_t> rx_label_pair_new_labels(0, handle.get_stream());
{
rmm::device_uvector<vertex_t> label_pair_old_labels(num_label_pairs, handle.get_stream());
rmm::device_uvector<vertex_t> label_pair_new_labels(num_label_pairs, handle.get_stream());
thrust::copy(rmm::exec_policy(handle.get_stream())->on(handle.get_stream()),
std::get<0>(old_new_label_pairs),
std::get<0>(old_new_label_pairs) + num_label_pairs,
label_pair_old_labels.begin());
thrust::copy(rmm::exec_policy(handle.get_stream())->on(handle.get_stream()),
std::get<1>(old_new_label_pairs),
std::get<1>(old_new_label_pairs) + num_label_pairs,
label_pair_new_labels.begin());
auto pair_first = thrust::make_zip_iterator(
thrust::make_tuple(label_pair_old_labels.begin(), label_pair_new_labels.begin()));
std::forward_as_tuple(std::tie(rx_label_pair_old_labels, rx_label_pair_new_labels),
std::ignore) =
groupby_gpuid_and_shuffle_values(
handle.get_comms(),
pair_first,
pair_first + num_label_pairs,
[key_func] __device__(auto val) { return key_func(thrust::get<0>(val)); },
handle.get_stream());
}
// update intermediate relabel map
CUDA_TRY(cudaStreamSynchronize(
handle.get_stream())); // cuco::static_map currently does not take stream
auto poly_alloc =
rmm::mr::polymorphic_allocator<char>(rmm::mr::get_current_device_resource());
auto stream_adapter =
rmm::mr::make_stream_allocator_adaptor(poly_alloc, cudaStream_t{nullptr});
cuco::static_map<vertex_t, vertex_t, cuda::thread_scope_device, decltype(stream_adapter)>
relabel_map{// cuco::static_map requires at least one empty slot
std::max(static_cast<size_t>(
static_cast<double>(rx_label_pair_old_labels.size()) / load_factor),
rx_label_pair_old_labels.size() + 1),
invalid_vertex_id<vertex_t>::value,
invalid_vertex_id<vertex_t>::value,
stream_adapter};
auto pair_first = thrust::make_transform_iterator(
thrust::make_zip_iterator(
thrust::make_tuple(rx_label_pair_old_labels.begin(), rx_label_pair_new_labels.begin())),
[] __device__(auto val) {
return thrust::make_pair(thrust::get<0>(val), thrust::get<1>(val));
});
relabel_map.insert(pair_first, pair_first + rx_label_pair_old_labels.size());
rx_label_pair_old_labels.resize(0, handle.get_stream());
rx_label_pair_new_labels.resize(0, handle.get_stream());
rx_label_pair_old_labels.shrink_to_fit(handle.get_stream());
rx_label_pair_new_labels.shrink_to_fit(handle.get_stream());
// shuffle unique_old_labels, relabel using the intermediate relabel map, and shuffle back
{
rmm::device_uvector<vertex_t> rx_unique_old_labels(0, handle.get_stream());
std::vector<size_t> rx_value_counts{};
std::tie(rx_unique_old_labels, rx_value_counts) = groupby_gpuid_and_shuffle_values(
handle.get_comms(),
unique_old_labels.begin(),
unique_old_labels.end(),
[key_func] __device__(auto val) { return key_func(val); },
handle.get_stream());
CUDA_TRY(cudaStreamSynchronize(
handle.get_stream())); // cuco::static_map currently does not take stream
if (skip_missing_labels) {
thrust::transform(rmm::exec_policy(handle.get_stream())->on(handle.get_stream()),
rx_unique_old_labels.begin(),
rx_unique_old_labels.end(),
rx_unique_old_labels.begin(),
[view = relabel_map.get_device_view()] __device__(auto old_label) {
auto found = view.find(old_label);
return found != view.end() ? view.find(old_label)->second.load(
cuda::std::memory_order_relaxed)
: old_label;
});
} else {
relabel_map.find(
rx_unique_old_labels.begin(),
rx_unique_old_labels.end(),
rx_unique_old_labels.begin()); // now rx_unique_old_lables hold new labels for the
// corresponding old labels
}
std::tie(new_labels_for_unique_old_labels, std::ignore) = shuffle_values(
handle.get_comms(), rx_unique_old_labels.begin(), rx_value_counts, handle.get_stream());
}
}
handle.get_stream_view().synchronize(); // cuco::static_map currently does not take stream
{
auto poly_alloc =
rmm::mr::polymorphic_allocator<char>(rmm::mr::get_current_device_resource());
auto stream_adapter =
rmm::mr::make_stream_allocator_adaptor(poly_alloc, cudaStream_t{nullptr});
cuco::static_map<vertex_t, vertex_t, cuda::thread_scope_device, decltype(stream_adapter)>
relabel_map{
// cuco::static_map requires at least one empty slot
std::max(static_cast<size_t>(static_cast<double>(unique_old_labels.size()) / load_factor),
unique_old_labels.size() + 1),
invalid_vertex_id<vertex_t>::value,
invalid_vertex_id<vertex_t>::value,
stream_adapter};
auto pair_first = thrust::make_transform_iterator(
thrust::make_zip_iterator(
thrust::make_tuple(unique_old_labels.begin(), new_labels_for_unique_old_labels.begin())),
[] __device__(auto val) {
return thrust::make_pair(thrust::get<0>(val), thrust::get<1>(val));
});
relabel_map.insert(pair_first, pair_first + unique_old_labels.size());
relabel_map.find(labels, labels + num_labels, labels);
}
} else {
cuco::static_map<vertex_t, vertex_t> relabel_map(
// cuco::static_map requires at least one empty slot
std::max(static_cast<size_t>(static_cast<double>(num_label_pairs) / load_factor),
static_cast<size_t>(num_label_pairs) + 1),
invalid_vertex_id<vertex_t>::value,
invalid_vertex_id<vertex_t>::value);
auto pair_first = thrust::make_transform_iterator(
thrust::make_zip_iterator(
thrust::make_tuple(std::get<0>(old_new_label_pairs), std::get<1>(old_new_label_pairs))),
[] __device__(auto val) {
return thrust::make_pair(thrust::get<0>(val), thrust::get<1>(val));
});
relabel_map.insert(pair_first, pair_first + num_label_pairs);
if (skip_missing_labels) {
thrust::transform(rmm::exec_policy(handle.get_stream())->on(handle.get_stream()),
labels,
labels + num_labels,
labels,
[view = relabel_map.get_device_view()] __device__(auto old_label) {
auto found = view.find(old_label);
return found != view.end() ? view.find(old_label)->second.load(
cuda::std::memory_order_relaxed)
: old_label;
});
} else {
relabel_map.find(labels, labels + num_labels, labels);
}
}
if (do_expensive_check && !skip_missing_labels) {
CUGRAPH_EXPECTS(
thrust::count(rmm::exec_policy(handle.get_stream())->on(handle.get_stream()),
labels,
labels + num_labels,
invalid_vertex_id<vertex_t>::value) == 0,
"Invalid input argument: labels include old label values missing in old_new_label_pairs.");
}
#endif
return;
}
// explicit instantiation
template void relabel<int32_t, true>(raft::handle_t const& handle,
std::tuple<int32_t const*, int32_t const*> old_new_label_pairs,
int32_t num_label_pairs,
int32_t* labels,
int32_t num_labels,
bool skip_missing_labels,
bool do_expensive_check);
template void relabel<int32_t, false>(
raft::handle_t const& handle,
std::tuple<int32_t const*, int32_t const*> old_new_label_pairs,
int32_t num_label_pairs,
int32_t* labels,
int32_t num_labels,
bool skip_missing_labels,
bool do_expensive_check);
template void relabel<int64_t, true>(raft::handle_t const& handle,
std::tuple<int64_t const*, int64_t const*> old_new_label_pairs,
int64_t num_label_pairs,
int64_t* labels,
int64_t num_labels,
bool skip_missing_labels,
bool do_expensive_check);
template void relabel<int64_t, false>(
raft::handle_t const& handle,
std::tuple<int64_t const*, int64_t const*> old_new_label_pairs,
int64_t num_label_pairs,
int64_t* labels,
int64_t num_labels,
bool skip_missing_labels,
bool do_expensive_check);
} // namespace experimental
} // namespace cugraph
|
e48183ea171eae8af55abf11cd1306a82e056bea.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <thrust/execution_policy.h>
#include <thrust/random.h>
#include <thrust/scan.h>
#include <thrust/transform.h>
#include "paddle/fluid/framework/eigen.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/operator.h"
#include "paddle/fluid/operators/multinomial_op.h"
#include "paddle/fluid/platform/transform.h"
namespace paddle {
namespace operators {
template <typename T>
__global__ void NormalizeProbability(T* norm_probs, const T* in_data,
T* sum_rows) {
int id = threadIdx.x + blockIdx.x * blockDim.x +
blockIdx.y * gridDim.x * blockDim.x;
norm_probs[id] = in_data[id] / sum_rows[blockIdx.y];
}
template <typename T>
__global__ void GetCumulativeProbs(T* norm_probs_data,
int64_t num_distributions,
int64_t num_categories,
T* cumulative_probs) {
for (int id = blockIdx.x; id < num_distributions; id += gridDim.x) {
thrust::inclusive_scan(thrust::device,
norm_probs_data + id * num_categories,
norm_probs_data + (id + 1) * num_categories,
cumulative_probs + id * num_categories);
}
}
template <typename T>
struct RandomGeneratorCudaFunctor {
unsigned int seed_;
__host__ __device__ RandomGeneratorCudaFunctor(int seed) : seed_(seed) {}
__host__ __device__ T operator()(const unsigned int n) const {
thrust::minstd_rand rng;
rng.seed(seed_);
thrust::uniform_real_distribution<T> dist(0.0, 1.0);
rng.discard(n);
return dist(rng);
}
};
template <typename T>
__device__ int binarySearchFunctor(T* cumulative_probs, T* norm_probs_data,
int num_categories, T rng_number) {
int left = 0;
int right = num_categories;
while (right - left > 0) {
int mid = left + (right - left) / 2;
T temp_prob = cumulative_probs[mid];
if (temp_prob < rng_number) {
left = mid + 1;
} else {
right = mid;
}
}
if (left == num_categories) {
left = num_categories - 1;
}
while (left >= 1 && norm_probs_data[left] == 0) left--;
return left;
}
template <typename T>
__global__ void sampleMultinomialWithReplacement(
T* rng_data, const int64_t num_samples, int64_t* out_data,
const int64_t num_distributions, const int64_t num_categories,
T* cumulative_probs, T* norm_probs_data) {
// use binary search to get the selected category sample id.
// let cumulative_probs[id-1] < rng_data < cumulative_probs[id].
int idx = threadIdx.x + blockIdx.x * blockDim.x +
blockIdx.y * gridDim.x * blockDim.x;
// for every distribution
for (int dist = blockIdx.y; dist < num_distributions; dist += gridDim.y) {
// for every sample
for (int sample = blockIdx.x * blockDim.x + threadIdx.x;
sample < num_samples; sample += blockDim.x * gridDim.x) {
T rng_number = rng_data[sample + dist * num_samples];
// Find the bucket that a uniform random number lies in
int selected_category = binarySearchFunctor<T>(
cumulative_probs + dist * num_categories,
norm_probs_data + dist * num_categories, num_categories, rng_number);
out_data[sample + dist * num_samples] = selected_category;
}
}
}
template <typename T>
class MultinomialOpKernel<platform::CUDADeviceContext, T>
: public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
const auto x = ctx.Input<framework::Tensor>("X");
auto out = ctx.Output<framework::Tensor>("Out");
const int64_t num_samples = ctx.Attr<int>("num_samples");
const bool replacement = ctx.Attr<bool>("replacement");
auto* in_data = x->data<T>();
int64_t* out_data = out->mutable_data<int64_t>(ctx.GetPlace());
auto in_dims = x->dims();
int64_t in_rank = in_dims.size();
const int64_t num_categories = in_dims[in_rank - 1];
const int64_t num_distributions = in_rank > 1 ? in_dims[in_rank - 2] : 1;
// If replacement is False, it's not a replaceable sample. Every category
// can
// be used only once. So after every sample, probability of the distribution
// will change. The implementation can't be parallelizable. Thus, call CPU
// implementation ``MultinomialFunctor`` to sample the distribution.
if (!replacement) {
int64_t in_data_numel = x->numel();
int64_t out_data_numel = out->numel();
T* cpu_in_data = new T[in_data_numel];
int64_t* cpu_out_data = new int64_t[out_data_numel];
hipMemcpy(cpu_in_data, in_data, in_data_numel * sizeof(T),
hipMemcpyDeviceToHost);
MultinomialFunctor<T>(cpu_out_data, cpu_in_data, num_samples, replacement,
num_categories, num_distributions);
hipMemcpy(out_data, cpu_out_data, out_data_numel * sizeof(int64_t),
hipMemcpyHostToDevice);
delete[] cpu_in_data;
delete[] cpu_out_data;
return;
}
// Sum of input may not be 1. To get probability in range [0, 1], calculate
// sum of each row of input, and then use the sum to normalize the input.
// sum_row_data: sum of each row
framework::Tensor sum_rows_tensor;
auto* sum_rows_data =
sum_rows_tensor.mutable_data<T>({num_distributions}, ctx.GetPlace());
auto& place = *ctx.template device_context<platform::CUDADeviceContext>()
.eigen_device();
if (num_distributions == 1) {
auto eigen_input = framework::EigenVector<T>::Flatten(*x);
auto eigen_sum_rows = framework::EigenVector<T>::Flatten(sum_rows_tensor);
eigen_sum_rows.device(place) =
eigen_input.sum(Eigen::DSizes<int, 1>(1))
.eval()
.reshape(Eigen::DSizes<int, 1>(sum_rows_tensor.dims()[0]));
} else {
auto eigen_input = framework::EigenMatrix<T>::From(*x);
auto eigen_sum_rows = framework::EigenVector<T>::Flatten(sum_rows_tensor);
eigen_sum_rows.device(place) = eigen_input.sum(Eigen::DSizes<int, 1>(1));
}
// Normalize row of each distribution to get the probability in range [0,
// 1].
// norm_probs_data: probability of the distribution
framework::Tensor norm_probs_tensor;
auto* norm_probs_data = norm_probs_tensor.mutable_data<T>(
{num_distributions, num_categories}, ctx.GetPlace());
// number of threads in a block is min(num_categories, 512)
dim3 block_norm(num_categories < 512 ? num_categories : 512);
dim3 grid_norm((num_categories - 1) / block_norm.x + 1, num_distributions);
hipLaunchKernelGGL(( NormalizeProbability<
T>), dim3(grid_norm), dim3(block_norm), 0, ctx.cuda_device_context().stream(),
norm_probs_data, in_data, sum_rows_data);
// Get cumulative probability of each distribution. It's the same function
// of
// ``cumsum`` op.
framework::Tensor cumulative_probs_tensor;
auto* cumulative_probs = cumulative_probs_tensor.mutable_data<T>(
{num_distributions, num_categories}, ctx.GetPlace());
dim3 block_cumsum(1);
dim3 grid_cumsum(num_distributions);
hipLaunchKernelGGL(( GetCumulativeProbs<T>), dim3(grid_cumsum), dim3(block_cumsum), 0,
ctx.cuda_device_context().stream(),
norm_probs_data, num_distributions, num_categories, cumulative_probs);
// Generate random number for each sample.
std::random_device rd;
auto seed = rd();
framework::Tensor rng_data_tensor;
auto* rng_data = rng_data_tensor.mutable_data<T>(
{num_distributions, num_samples}, ctx.GetPlace());
thrust::counting_iterator<unsigned int> index_sequence_begin(0);
platform::Transform<platform::CUDADeviceContext> trans;
auto* context =
static_cast<const platform::CUDADeviceContext*>(&ctx.device_context());
trans(*context, index_sequence_begin,
index_sequence_begin + num_distributions * num_samples, rng_data,
RandomGeneratorCudaFunctor<T>(seed));
// Sample the multinomial distributions.
dim3 block_sample(128);
dim3 grid_sample((num_samples - 1) / block_sample.x + 1, num_distributions);
hipLaunchKernelGGL(( sampleMultinomialWithReplacement<T>), dim3(grid_sample), dim3(block_sample), 0,
ctx.cuda_device_context().stream(),
rng_data, num_samples, out_data, num_distributions, num_categories,
cumulative_probs, norm_probs_data);
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
namespace plat = paddle::platform;
REGISTER_OP_CUDA_KERNEL(
multinomial, ops::MultinomialOpKernel<plat::CUDADeviceContext, float>,
ops::MultinomialOpKernel<plat::CUDADeviceContext, double>);
| e48183ea171eae8af55abf11cd1306a82e056bea.cu | /* Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <thrust/execution_policy.h>
#include <thrust/random.h>
#include <thrust/scan.h>
#include <thrust/transform.h>
#include "paddle/fluid/framework/eigen.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/operator.h"
#include "paddle/fluid/operators/multinomial_op.h"
#include "paddle/fluid/platform/transform.h"
namespace paddle {
namespace operators {
template <typename T>
__global__ void NormalizeProbability(T* norm_probs, const T* in_data,
T* sum_rows) {
int id = threadIdx.x + blockIdx.x * blockDim.x +
blockIdx.y * gridDim.x * blockDim.x;
norm_probs[id] = in_data[id] / sum_rows[blockIdx.y];
}
template <typename T>
__global__ void GetCumulativeProbs(T* norm_probs_data,
int64_t num_distributions,
int64_t num_categories,
T* cumulative_probs) {
for (int id = blockIdx.x; id < num_distributions; id += gridDim.x) {
thrust::inclusive_scan(thrust::device,
norm_probs_data + id * num_categories,
norm_probs_data + (id + 1) * num_categories,
cumulative_probs + id * num_categories);
}
}
template <typename T>
struct RandomGeneratorCudaFunctor {
unsigned int seed_;
__host__ __device__ RandomGeneratorCudaFunctor(int seed) : seed_(seed) {}
__host__ __device__ T operator()(const unsigned int n) const {
thrust::minstd_rand rng;
rng.seed(seed_);
thrust::uniform_real_distribution<T> dist(0.0, 1.0);
rng.discard(n);
return dist(rng);
}
};
template <typename T>
__device__ int binarySearchFunctor(T* cumulative_probs, T* norm_probs_data,
int num_categories, T rng_number) {
int left = 0;
int right = num_categories;
while (right - left > 0) {
int mid = left + (right - left) / 2;
T temp_prob = cumulative_probs[mid];
if (temp_prob < rng_number) {
left = mid + 1;
} else {
right = mid;
}
}
if (left == num_categories) {
left = num_categories - 1;
}
while (left >= 1 && norm_probs_data[left] == 0) left--;
return left;
}
template <typename T>
__global__ void sampleMultinomialWithReplacement(
T* rng_data, const int64_t num_samples, int64_t* out_data,
const int64_t num_distributions, const int64_t num_categories,
T* cumulative_probs, T* norm_probs_data) {
// use binary search to get the selected category sample id.
// let cumulative_probs[id-1] < rng_data < cumulative_probs[id].
int idx = threadIdx.x + blockIdx.x * blockDim.x +
blockIdx.y * gridDim.x * blockDim.x;
// for every distribution
for (int dist = blockIdx.y; dist < num_distributions; dist += gridDim.y) {
// for every sample
for (int sample = blockIdx.x * blockDim.x + threadIdx.x;
sample < num_samples; sample += blockDim.x * gridDim.x) {
T rng_number = rng_data[sample + dist * num_samples];
// Find the bucket that a uniform random number lies in
int selected_category = binarySearchFunctor<T>(
cumulative_probs + dist * num_categories,
norm_probs_data + dist * num_categories, num_categories, rng_number);
out_data[sample + dist * num_samples] = selected_category;
}
}
}
template <typename T>
class MultinomialOpKernel<platform::CUDADeviceContext, T>
: public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
const auto x = ctx.Input<framework::Tensor>("X");
auto out = ctx.Output<framework::Tensor>("Out");
const int64_t num_samples = ctx.Attr<int>("num_samples");
const bool replacement = ctx.Attr<bool>("replacement");
auto* in_data = x->data<T>();
int64_t* out_data = out->mutable_data<int64_t>(ctx.GetPlace());
auto in_dims = x->dims();
int64_t in_rank = in_dims.size();
const int64_t num_categories = in_dims[in_rank - 1];
const int64_t num_distributions = in_rank > 1 ? in_dims[in_rank - 2] : 1;
// If replacement is False, it's not a replaceable sample. Every category
// can
// be used only once. So after every sample, probability of the distribution
// will change. The implementation can't be parallelizable. Thus, call CPU
// implementation ``MultinomialFunctor`` to sample the distribution.
if (!replacement) {
int64_t in_data_numel = x->numel();
int64_t out_data_numel = out->numel();
T* cpu_in_data = new T[in_data_numel];
int64_t* cpu_out_data = new int64_t[out_data_numel];
cudaMemcpy(cpu_in_data, in_data, in_data_numel * sizeof(T),
cudaMemcpyDeviceToHost);
MultinomialFunctor<T>(cpu_out_data, cpu_in_data, num_samples, replacement,
num_categories, num_distributions);
cudaMemcpy(out_data, cpu_out_data, out_data_numel * sizeof(int64_t),
cudaMemcpyHostToDevice);
delete[] cpu_in_data;
delete[] cpu_out_data;
return;
}
// Sum of input may not be 1. To get probability in range [0, 1], calculate
// sum of each row of input, and then use the sum to normalize the input.
// sum_row_data: sum of each row
framework::Tensor sum_rows_tensor;
auto* sum_rows_data =
sum_rows_tensor.mutable_data<T>({num_distributions}, ctx.GetPlace());
auto& place = *ctx.template device_context<platform::CUDADeviceContext>()
.eigen_device();
if (num_distributions == 1) {
auto eigen_input = framework::EigenVector<T>::Flatten(*x);
auto eigen_sum_rows = framework::EigenVector<T>::Flatten(sum_rows_tensor);
eigen_sum_rows.device(place) =
eigen_input.sum(Eigen::DSizes<int, 1>(1))
.eval()
.reshape(Eigen::DSizes<int, 1>(sum_rows_tensor.dims()[0]));
} else {
auto eigen_input = framework::EigenMatrix<T>::From(*x);
auto eigen_sum_rows = framework::EigenVector<T>::Flatten(sum_rows_tensor);
eigen_sum_rows.device(place) = eigen_input.sum(Eigen::DSizes<int, 1>(1));
}
// Normalize row of each distribution to get the probability in range [0,
// 1].
// norm_probs_data: probability of the distribution
framework::Tensor norm_probs_tensor;
auto* norm_probs_data = norm_probs_tensor.mutable_data<T>(
{num_distributions, num_categories}, ctx.GetPlace());
// number of threads in a block is min(num_categories, 512)
dim3 block_norm(num_categories < 512 ? num_categories : 512);
dim3 grid_norm((num_categories - 1) / block_norm.x + 1, num_distributions);
NormalizeProbability<
T><<<grid_norm, block_norm, 0, ctx.cuda_device_context().stream()>>>(
norm_probs_data, in_data, sum_rows_data);
// Get cumulative probability of each distribution. It's the same function
// of
// ``cumsum`` op.
framework::Tensor cumulative_probs_tensor;
auto* cumulative_probs = cumulative_probs_tensor.mutable_data<T>(
{num_distributions, num_categories}, ctx.GetPlace());
dim3 block_cumsum(1);
dim3 grid_cumsum(num_distributions);
GetCumulativeProbs<T><<<grid_cumsum, block_cumsum, 0,
ctx.cuda_device_context().stream()>>>(
norm_probs_data, num_distributions, num_categories, cumulative_probs);
// Generate random number for each sample.
std::random_device rd;
auto seed = rd();
framework::Tensor rng_data_tensor;
auto* rng_data = rng_data_tensor.mutable_data<T>(
{num_distributions, num_samples}, ctx.GetPlace());
thrust::counting_iterator<unsigned int> index_sequence_begin(0);
platform::Transform<platform::CUDADeviceContext> trans;
auto* context =
static_cast<const platform::CUDADeviceContext*>(&ctx.device_context());
trans(*context, index_sequence_begin,
index_sequence_begin + num_distributions * num_samples, rng_data,
RandomGeneratorCudaFunctor<T>(seed));
// Sample the multinomial distributions.
dim3 block_sample(128);
dim3 grid_sample((num_samples - 1) / block_sample.x + 1, num_distributions);
sampleMultinomialWithReplacement<T><<<grid_sample, block_sample, 0,
ctx.cuda_device_context().stream()>>>(
rng_data, num_samples, out_data, num_distributions, num_categories,
cumulative_probs, norm_probs_data);
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
namespace plat = paddle::platform;
REGISTER_OP_CUDA_KERNEL(
multinomial, ops::MultinomialOpKernel<plat::CUDADeviceContext, float>,
ops::MultinomialOpKernel<plat::CUDADeviceContext, double>);
|
fe1aa35c776a6c0424a9e8fc38362fa355e9dc55.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <unistd.h>
#include <hip/hip_runtime.h>
#include <helper_cuda.h>
#include <helper_functions.h>
#include <stdint.h>
#define NO_STUB_MAIN
#include "ptx_stub.cu"
__device__ __attribute__((noinline)) void store(uint64_t tid, int* where, int what)
{
*where = what;
STORE_OP_FUNCTION_NAME(tid, (uintptr_t)where, OP_STORE, 3);
printf("Storing at %p from %p\n", where, tid);
}
__device__ int zglobal[32];
__global__ void tstfun(uint32_t sid, int *src, int* dst, const int m)
{
uint64_t tid = GETTID_FUNCTION_NAME(sid);
STORE_OP_FUNCTION_NAME(tid, 0, OP_START_KERNEL, 4);
__shared__ int zshared[32];
int p;
int* pp = &p;
int i = blockIdx.x * blockDim.x + threadIdx.x;
int val = src[i];
STORE_OP_FUNCTION_NAME(tid, (uintptr_t)&src[i], OP_LOAD, 5);
printf("Tst1: &src[%i]=%p &dst[%i]=%p m=%i\n", i, &src[i], i, &dst[i], m);
// printf("Tst1: &src[%i]=%p &dst[%i]=%p m=%i zhared[i]=%p zglobal[i]=%p, pp=%p\n", i, &src[i], i, &dst[i], m, &zshared[i], &zglobal[i], pp);
store(tid, &dst[i], val * m);
store(tid, &zshared[i], val * m);
store(tid, &zglobal[i], val * m);
store(tid, pp, val * m);
dst[i] = val * m;
STORE_OP_FUNCTION_NAME(tid, (uintptr_t)&dst[i], OP_STORE, 6);
zshared[i] = val * m;
STORE_OP_FUNCTION_NAME(tid, (uintptr_t)&zshared[i], OP_STORE, 7);
zglobal[i] = val * m;
STORE_OP_FUNCTION_NAME(tid, (uintptr_t)&zglobal[i], OP_STORE, 8);
STORE_OP_FUNCTION_NAME(tid, 0, OP_END_KERNEL, 9);
}
/// host code
int main(int argc, char* argv[])
{
int *dst, *src;
int *dev_dst, *dev_src;
int num_blocks = 2;
int num_threads = 2;
if(argc > 1)
{
num_threads = atoi(argv[1]);
if(argc > 2)
num_blocks = atoi(argv[2]);
}
int num_total = num_threads * num_blocks;
printf("Tst1: threads=%i blocks:=%i total=%i\n", num_threads, num_blocks, num_total);
dst = new int[num_total];
src = new int[num_total];
for(int i = 0; i < num_total; ++ i)
{
dst[i] = 0;
src[i] = i + 10;
}
checkCudaErrors(hipMalloc(&dev_src, sizeof(int) * num_total));
checkCudaErrors(hipMemcpy(dev_src, src, sizeof(int) * num_total, hipMemcpyHostToDevice));
checkCudaErrors(hipMalloc(&dev_dst, sizeof(int) * num_total));
checkCudaErrors(hipMemset(dev_dst, 0, sizeof(int) * num_total));
const int m = 5;
hipLaunchKernelGGL(( tstfun), dim3(num_blocks), dim3(num_threads), 0, 0, 0, dev_src, dev_dst, m);
checkCudaErrors(hipDeviceSynchronize());
checkCudaErrors(hipMemcpy(dst, dev_dst, sizeof(int) * num_total, hipMemcpyDeviceToHost));
for(int i = 0; i < num_total; ++ i)
{
if(dst[i] != src[i] * m)
{
fprintf(stderr, "Tst1: Error At index: %i: %i\n", i, dst[i]);
return -1;
}
}
printf("Tst1: Success (%i*%i=%i).\n", num_blocks, num_total, num_total);
printf("Tst1: no hazards expected.\n");
stub_force_linking();
return 0;
}
| fe1aa35c776a6c0424a9e8fc38362fa355e9dc55.cu | #include <iostream>
#include <unistd.h>
#include <cuda_runtime.h>
#include <helper_cuda.h>
#include <helper_functions.h>
#include <stdint.h>
#define NO_STUB_MAIN
#include "ptx_stub.cu"
__device__ __attribute__((noinline)) void store(uint64_t tid, int* where, int what)
{
*where = what;
STORE_OP_FUNCTION_NAME(tid, (uintptr_t)where, OP_STORE, 3);
printf("Storing at %p from %p\n", where, tid);
}
__device__ int zglobal[32];
__global__ void tstfun(uint32_t sid, int *src, int* dst, const int m)
{
uint64_t tid = GETTID_FUNCTION_NAME(sid);
STORE_OP_FUNCTION_NAME(tid, 0, OP_START_KERNEL, 4);
__shared__ int zshared[32];
int p;
int* pp = &p;
int i = blockIdx.x * blockDim.x + threadIdx.x;
int val = src[i];
STORE_OP_FUNCTION_NAME(tid, (uintptr_t)&src[i], OP_LOAD, 5);
printf("Tst1: &src[%i]=%p &dst[%i]=%p m=%i\n", i, &src[i], i, &dst[i], m);
// printf("Tst1: &src[%i]=%p &dst[%i]=%p m=%i zhared[i]=%p zglobal[i]=%p, pp=%p\n", i, &src[i], i, &dst[i], m, &zshared[i], &zglobal[i], pp);
store(tid, &dst[i], val * m);
store(tid, &zshared[i], val * m);
store(tid, &zglobal[i], val * m);
store(tid, pp, val * m);
dst[i] = val * m;
STORE_OP_FUNCTION_NAME(tid, (uintptr_t)&dst[i], OP_STORE, 6);
zshared[i] = val * m;
STORE_OP_FUNCTION_NAME(tid, (uintptr_t)&zshared[i], OP_STORE, 7);
zglobal[i] = val * m;
STORE_OP_FUNCTION_NAME(tid, (uintptr_t)&zglobal[i], OP_STORE, 8);
STORE_OP_FUNCTION_NAME(tid, 0, OP_END_KERNEL, 9);
}
/// host code
int main(int argc, char* argv[])
{
int *dst, *src;
int *dev_dst, *dev_src;
int num_blocks = 2;
int num_threads = 2;
if(argc > 1)
{
num_threads = atoi(argv[1]);
if(argc > 2)
num_blocks = atoi(argv[2]);
}
int num_total = num_threads * num_blocks;
printf("Tst1: threads=%i blocks:=%i total=%i\n", num_threads, num_blocks, num_total);
dst = new int[num_total];
src = new int[num_total];
for(int i = 0; i < num_total; ++ i)
{
dst[i] = 0;
src[i] = i + 10;
}
checkCudaErrors(cudaMalloc(&dev_src, sizeof(int) * num_total));
checkCudaErrors(cudaMemcpy(dev_src, src, sizeof(int) * num_total, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMalloc(&dev_dst, sizeof(int) * num_total));
checkCudaErrors(cudaMemset(dev_dst, 0, sizeof(int) * num_total));
const int m = 5;
tstfun<<<num_blocks, num_threads>>>(0, dev_src, dev_dst, m);
checkCudaErrors(cudaDeviceSynchronize());
checkCudaErrors(cudaMemcpy(dst, dev_dst, sizeof(int) * num_total, cudaMemcpyDeviceToHost));
for(int i = 0; i < num_total; ++ i)
{
if(dst[i] != src[i] * m)
{
fprintf(stderr, "Tst1: Error At index: %i: %i\n", i, dst[i]);
return -1;
}
}
printf("Tst1: Success (%i*%i=%i).\n", num_blocks, num_total, num_total);
printf("Tst1: no hazards expected.\n");
stub_force_linking();
return 0;
}
|
ced55efcbbdad57942800c490ca11e5b30ed5f05.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <mpi.h>
#include <omp.h>
#include "./collectives.h"
const double *src_buf = NULL;
__host__
void cudaMPI_Scatter(const double *sendbuf, int sendcount,
double *recvbuf, int recvcount,
int root, MPI_Comm comm)
{
int tid = omp_get_thread_num();
int rank = device_id_to_rank[tid];
int mpi_root = device_rank_to_node[root];
int mpi_rank, mpi_size;
MPI_Comm_size(comm, &mpi_size);
MPI_Comm_rank(comm, &mpi_rank);
if (rank == root) {
src_buf = sendbuf;
}
double *host_tmp = NULL;
if (mpi_size > 1) {
if (rank == root) {
// allocate space for all GPUs
int total_count = total_num_devices * sendcount;
CUDACHECK( hipHostMalloc(&host_tmp, total_count * sizeof(double)) );
CUDACHECK( hipMemcpy(host_tmp, sendbuf, total_count * sizeof(double), hipMemcpyDefault) );
int *sendcounts = (int*)malloc(mpi_size * sizeof(int));
int *displs = (int*)malloc(mpi_size * sizeof(int));
displs[0] = 0;
int i;
for (i = 0; i < mpi_size; ++i) {
sendcounts[i] = device_counts[i] * sendcount;
if (i > 0) {
displs[i] = displs[i-1] + sendcounts[i-1];
}
}
// don't waste time sending things back here
sendcounts[mpi_root] = 0;
MPI_Scatterv(host_tmp, sendcounts, displs, MPI_DOUBLE,
host_tmp, 0, MPI_DOUBLE, mpi_root, comm);
free(displs);
free(sendcounts);
}
else {
if (tid == 0) {
CUDACHECK( hipHostMalloc(&host_tmp, recvcount * sizeof(double)) );
MPI_Scatterv(NULL, NULL, NULL, MPI_DOUBLE,
host_tmp, recvcount, MPI_DOUBLE, mpi_root, comm);
src_buf = host_tmp;
}
}
}
#pragma omp barrier
// I want to put offset calculation here. If I am on the same MPI node,
// this should be as simple as (src + (rank * sendcount)).
// However, if I am not on the same MPI node, this should be
// (src + (tid * sendcount))
const double *mysrcbuf = (mpi_rank == mpi_root) ?
src_buf + (rank * recvcount) : src_buf + (tid * recvcount);
// TODO: bucket algorithm here
CUDACHECK( hipMemcpy(recvbuf, mysrcbuf, recvcount * sizeof(double), hipMemcpyDefault) );
#pragma omp barrier
// clear out src pointer
if (tid == 0) {
src_buf = NULL;
}
if (host_tmp) {
CUDACHECK( hipHostFree((host_tmp)) );
host_tmp = NULL;
}
}
| ced55efcbbdad57942800c490ca11e5b30ed5f05.cu | #include <cuda.h>
#include <mpi.h>
#include <omp.h>
#include "./collectives.h"
const double *src_buf = NULL;
__host__
void cudaMPI_Scatter(const double *sendbuf, int sendcount,
double *recvbuf, int recvcount,
int root, MPI_Comm comm)
{
int tid = omp_get_thread_num();
int rank = device_id_to_rank[tid];
int mpi_root = device_rank_to_node[root];
int mpi_rank, mpi_size;
MPI_Comm_size(comm, &mpi_size);
MPI_Comm_rank(comm, &mpi_rank);
if (rank == root) {
src_buf = sendbuf;
}
double *host_tmp = NULL;
if (mpi_size > 1) {
if (rank == root) {
// allocate space for all GPUs
int total_count = total_num_devices * sendcount;
CUDACHECK( cudaMallocHost(&host_tmp, total_count * sizeof(double)) );
CUDACHECK( cudaMemcpy(host_tmp, sendbuf, total_count * sizeof(double), cudaMemcpyDefault) );
int *sendcounts = (int*)malloc(mpi_size * sizeof(int));
int *displs = (int*)malloc(mpi_size * sizeof(int));
displs[0] = 0;
int i;
for (i = 0; i < mpi_size; ++i) {
sendcounts[i] = device_counts[i] * sendcount;
if (i > 0) {
displs[i] = displs[i-1] + sendcounts[i-1];
}
}
// don't waste time sending things back here
sendcounts[mpi_root] = 0;
MPI_Scatterv(host_tmp, sendcounts, displs, MPI_DOUBLE,
host_tmp, 0, MPI_DOUBLE, mpi_root, comm);
free(displs);
free(sendcounts);
}
else {
if (tid == 0) {
CUDACHECK( cudaMallocHost(&host_tmp, recvcount * sizeof(double)) );
MPI_Scatterv(NULL, NULL, NULL, MPI_DOUBLE,
host_tmp, recvcount, MPI_DOUBLE, mpi_root, comm);
src_buf = host_tmp;
}
}
}
#pragma omp barrier
// I want to put offset calculation here. If I am on the same MPI node,
// this should be as simple as (src + (rank * sendcount)).
// However, if I am not on the same MPI node, this should be
// (src + (tid * sendcount))
const double *mysrcbuf = (mpi_rank == mpi_root) ?
src_buf + (rank * recvcount) : src_buf + (tid * recvcount);
// TODO: bucket algorithm here
CUDACHECK( cudaMemcpy(recvbuf, mysrcbuf, recvcount * sizeof(double), cudaMemcpyDefault) );
#pragma omp barrier
// clear out src pointer
if (tid == 0) {
src_buf = NULL;
}
if (host_tmp) {
CUDACHECK( cudaFreeHost((host_tmp)) );
host_tmp = NULL;
}
}
|
0acb72ee484855d9989bc2deb9152eec190d12bc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "Montecarlo.h"
#include <iostream>
#include "Device.h"
/*----------------------------------------------------------------------*\
|* Declaration *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Imported *|
\*-------------------------------------*/
extern __global__ void montecarlo(hiprandState_t* ptrDevGeneratorGM, int* ptrDevNxTotalGM, int nbFlecheByThread);
extern __global__ void createGenerator(hiprandState_t* tabDevGeneratorGM, int deviceId);
/*----------------------------------------------------------------------*\
|* Implementation *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Constructeur *|
\*-------------------------------------*/
Montecarlo::Montecarlo(const Grid& grid, const int nbFlecheTotal) :
dg(grid.dg), db(grid.db), sizeOctetSM(db.x * sizeof(int)), result(0.f), nbFlecheSousLaCourbe(0)
{
this->sizeOctetResultGM = sizeof(int);
this->sizeOctetTabGenerator = grid.threadCounts() * sizeof(hiprandState_t);
Device::malloc(&ptrDevGMResult, sizeOctetResultGM);
Device::memclear(ptrDevGMResult, sizeOctetResultGM);
Device::malloc(&ptrDevGMTabGenerator, sizeOctetTabGenerator);
Device::memclear(ptrDevGMTabGenerator, sizeOctetTabGenerator);
const int DEVICE_ID = Device::getDeviceId();
hipLaunchKernelGGL(( createGenerator), dim3(dg),dim3(db), 0, 0, ptrDevGMTabGenerator, DEVICE_ID);
this->nbFlecheParThread = nbFlecheTotal / grid.threadCounts();
this->nbFlecheTotal = grid.threadCounts() * nbFlecheParThread;
}
Montecarlo::~Montecarlo(void)
{
Device::free(ptrDevGMResult);
Device::free(ptrDevGMTabGenerator);
}
/*--------------------------------------*\
|* Methode *|
\*-------------------------------------*/
void Montecarlo::run()
{
hipLaunchKernelGGL(( montecarlo), dim3(dg),dim3(db), sizeOctetSM, 0, ptrDevGMTabGenerator, ptrDevGMResult, nbFlecheParThread);
int result_device;
Device::memcpyDToH(&result_device, ptrDevGMResult, sizeOctetResultGM);
this->nbFlecheSousLaCourbe = result_device;
this->result = 4.f * result_device / nbFlecheTotal;
}
float Montecarlo::getResult()
{
return this->result;
}
int Montecarlo::getNbFlecheSousLaCourbe()
{
return this->nbFlecheSousLaCourbe;
}
/*----------------------------------------------------------------------*\
|* End *|
\*---------------------------------------------------------------------*/
| 0acb72ee484855d9989bc2deb9152eec190d12bc.cu | #include "Montecarlo.h"
#include <iostream>
#include "Device.h"
/*----------------------------------------------------------------------*\
|* Declaration *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Imported *|
\*-------------------------------------*/
extern __global__ void montecarlo(curandState* ptrDevGeneratorGM, int* ptrDevNxTotalGM, int nbFlecheByThread);
extern __global__ void createGenerator(curandState* tabDevGeneratorGM, int deviceId);
/*----------------------------------------------------------------------*\
|* Implementation *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Constructeur *|
\*-------------------------------------*/
Montecarlo::Montecarlo(const Grid& grid, const int nbFlecheTotal) :
dg(grid.dg), db(grid.db), sizeOctetSM(db.x * sizeof(int)), result(0.f), nbFlecheSousLaCourbe(0)
{
this->sizeOctetResultGM = sizeof(int);
this->sizeOctetTabGenerator = grid.threadCounts() * sizeof(curandState);
Device::malloc(&ptrDevGMResult, sizeOctetResultGM);
Device::memclear(ptrDevGMResult, sizeOctetResultGM);
Device::malloc(&ptrDevGMTabGenerator, sizeOctetTabGenerator);
Device::memclear(ptrDevGMTabGenerator, sizeOctetTabGenerator);
const int DEVICE_ID = Device::getDeviceId();
createGenerator<<<dg,db>>>(ptrDevGMTabGenerator, DEVICE_ID);
this->nbFlecheParThread = nbFlecheTotal / grid.threadCounts();
this->nbFlecheTotal = grid.threadCounts() * nbFlecheParThread;
}
Montecarlo::~Montecarlo(void)
{
Device::free(ptrDevGMResult);
Device::free(ptrDevGMTabGenerator);
}
/*--------------------------------------*\
|* Methode *|
\*-------------------------------------*/
void Montecarlo::run()
{
montecarlo<<<dg,db, sizeOctetSM>>>(ptrDevGMTabGenerator, ptrDevGMResult, nbFlecheParThread);
int result_device;
Device::memcpyDToH(&result_device, ptrDevGMResult, sizeOctetResultGM);
this->nbFlecheSousLaCourbe = result_device;
this->result = 4.f * result_device / nbFlecheTotal;
}
float Montecarlo::getResult()
{
return this->result;
}
int Montecarlo::getNbFlecheSousLaCourbe()
{
return this->nbFlecheSousLaCourbe;
}
/*----------------------------------------------------------------------*\
|* End *|
\*---------------------------------------------------------------------*/
|
7922bf59651ba57882b2ac1194c3cae1dc06fd0b.hip | // !!! This is a file automatically generated by hipify!!!
#include "Scalers.cuh"
#include <cmath>
#include <utility>
#include <iostream>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "Stopwatch.cuh"
#define HANDLE_ERROR(err) (HandleError(err, __FILE__, __LINE__))
static void HandleError(hipError_t err, const char *file, int line)
{
if (err != hipSuccess)
{
printf("%s in %s at line %d\n", hipGetErrorString(err), file, line);
exit(EXIT_FAILURE);
}
}
namespace
{
using namespace std;
const uint32_t BLOCK_DIM = 250;
const uint32_t ROWS_AMOUNT = 20000;
const uint32_t ATTRIBUTES_AMOUNT = 16;
namespace NormalizationGPU
{
__device__ void findLocalMinMax(double *devAttributes, double *mins, double *maxes)
{
int thisThreadStart = threadIdx.x * ROWS_AMOUNT / blockDim.x + blockIdx.x * ROWS_AMOUNT;
const int nextThreadStart = (threadIdx.x + 1) * ROWS_AMOUNT / blockDim.x + blockIdx.x * ROWS_AMOUNT;
double localMin = devAttributes[thisThreadStart];
double localMax = localMin;
__syncthreads();
for (int row = thisThreadStart; row < nextThreadStart; ++row)
{
auto value = devAttributes[row];
if (value < localMin)
{
localMin = value;
}
if (value > localMax)
{
localMax = value;
}
}
mins[threadIdx.x] = localMin;
maxes[threadIdx.x] = localMax;
}
__device__ void findMinMax(double *min, double *max, double *localMin, double *localMax)
{
if (threadIdx.x == 0)
{
*min = localMin[0];
*max = localMax[0];
}
__syncthreads();
for (int i = 0; i < blockDim.x; ++i)
{
auto localMinValue = localMin[i];
if (*min > localMinValue)
{
*min = localMinValue;
}
auto localMaxValue = localMax[i];
if (*max < localMaxValue)
{
*max = localMaxValue;
}
}
}
__device__ void transformValues(double *devAttributes, double *min, double *max)
{
int thisThreadStart = threadIdx.x * ROWS_AMOUNT / blockDim.x + blockIdx.x * ROWS_AMOUNT;
const int nextThreadStart = (threadIdx.x + 1) * ROWS_AMOUNT / blockDim.x + blockIdx.x * ROWS_AMOUNT;
double diff = *max - *min;
for (int row = thisThreadStart; row < nextThreadStart; ++row)
{
devAttributes[row] = (devAttributes[row] - *min) / diff;
}
}
__global__ void normalize(double *devAttributes)
{
__shared__ double max;
__shared__ double min;
{
__shared__ double localMax[BLOCK_DIM];
__shared__ double localMin[BLOCK_DIM];
findLocalMinMax(devAttributes, localMin, localMax);
__syncthreads();
findMinMax(&min, &max, localMin, localMax);
__syncthreads();
} // scoped shared memory variable localMin and localMax to save memory
transformValues(devAttributes, &min, &max);
}
} // namespace NormalizationGPU
namespace StandarizationGPU
{
__device__ void findLocalAverage(double *devAttributes, double *averages)
{
int thisThreadStart = threadIdx.x * ROWS_AMOUNT / blockDim.x + blockIdx.x * ROWS_AMOUNT;
const int nextThreadStart = (threadIdx.x + 1) * ROWS_AMOUNT / blockDim.x + blockIdx.x * ROWS_AMOUNT;
double localAverage = 0;
for (int row = thisThreadStart; row < nextThreadStart; ++row)
{
localAverage += devAttributes[row];
}
averages[threadIdx.x] = localAverage / (nextThreadStart - thisThreadStart);
}
__device__ void findAverage(double *average, double *localAverage)
{
if (threadIdx.x == 0)
{
*average = 0;
}
__syncthreads();
atomicAdd(average, localAverage[threadIdx.x]);
__syncthreads();
if (threadIdx.x == 0)
{
*average /= blockDim.x;
}
}
__device__ void findLocalVariation(double *devAttributes, double *variations, double *average)
{
int thisThreadStart = threadIdx.x * ROWS_AMOUNT / blockDim.x + blockIdx.x * ROWS_AMOUNT;
const int nextThreadStart = (threadIdx.x + 1) * ROWS_AMOUNT / blockDim.x + blockIdx.x * ROWS_AMOUNT;
double localVariation = 0;
for (int row = thisThreadStart; row < nextThreadStart; ++row)
{
auto tmp = devAttributes[row] - *average;
localVariation += tmp * tmp;
}
variations[threadIdx.x] = localVariation;
}
__device__ void findVariation(double *variation, double *localVariations)
{
if (threadIdx.x == 0)
{
*variation = 0;
}
__syncthreads();
atomicAdd(variation, localVariations[threadIdx.x]);
__syncthreads();
if (threadIdx.x == 0)
{
*variation /= ROWS_AMOUNT;
*variation = sqrt(*variation);
}
}
__device__ void transformValues(double *devAttributes, double *average, double *variation)
{
int thisThreadStart = threadIdx.x * ROWS_AMOUNT / blockDim.x + blockIdx.x * ROWS_AMOUNT;
const int nextThreadStart = (threadIdx.x + 1) * ROWS_AMOUNT / blockDim.x + blockIdx.x * ROWS_AMOUNT;
for (int row = thisThreadStart; row < nextThreadStart; ++row)
{
devAttributes[row] = (devAttributes[row] - *average) / *variation;
}
}
__global__ void standarize(double *devAttributes)
{
__shared__ double average;
{
__shared__ double localAverage[BLOCK_DIM];
findLocalAverage(devAttributes, localAverage);
__syncthreads();
findAverage(&average, localAverage);
__syncthreads();
} // scoped shared memory variable localAverage to save memory
__shared__ double variation;
{
__shared__ double localVariation[BLOCK_DIM];
findLocalVariation(devAttributes, localVariation, &average);
__syncthreads();
findVariation(&variation, localVariation);
__syncthreads();
} // scoped shared memory variable localVariation to save memory
transformValues(devAttributes, &average, &variation);
}
} // namespace StandarizationGPU
} // namespace
void Scalers::normalize(vector<double> &attributesValues)
{
double *attributes = attributesValues.data();
double *devAttributes = nullptr;
HANDLE_ERROR(hipMalloc(&devAttributes, attributesValues.size() * sizeof(double)));
HANDLE_ERROR(hipMemcpy(devAttributes, attributes, attributesValues.size() * sizeof(double), hipMemcpyHostToDevice));
hipLaunchKernelGGL(( NormalizationGPU::normalize), dim3(ATTRIBUTES_AMOUNT), dim3(BLOCK_DIM), (2 + BLOCK_DIM * 2) * sizeof(double), 0, devAttributes);
HANDLE_ERROR(hipMemcpy(attributes, devAttributes, attributesValues.size() * sizeof(double), hipMemcpyDeviceToHost));
HANDLE_ERROR(hipFree(devAttributes));
}
void Scalers::standarize(vector<double> &attributesValues)
{
double *attributes = attributesValues.data();
double *devAttributes = nullptr;
HANDLE_ERROR(hipMalloc(&devAttributes, attributesValues.size() * sizeof(double)));
HANDLE_ERROR(hipMemcpy(devAttributes, attributes, attributesValues.size() * sizeof(double), hipMemcpyHostToDevice));
hipLaunchKernelGGL(( StandarizationGPU::standarize), dim3(ATTRIBUTES_AMOUNT), dim3(BLOCK_DIM), (2 + BLOCK_DIM) * sizeof(double), 0, devAttributes);
HANDLE_ERROR(hipMemcpy(attributes, devAttributes, attributesValues.size() * sizeof(double), hipMemcpyDeviceToHost));
HANDLE_ERROR(hipFree(devAttributes));
}
| 7922bf59651ba57882b2ac1194c3cae1dc06fd0b.cu | #include "Scalers.cuh"
#include <cmath>
#include <utility>
#include <iostream>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "Stopwatch.cuh"
#define HANDLE_ERROR(err) (HandleError(err, __FILE__, __LINE__))
static void HandleError(cudaError_t err, const char *file, int line)
{
if (err != cudaSuccess)
{
printf("%s in %s at line %d\n", cudaGetErrorString(err), file, line);
exit(EXIT_FAILURE);
}
}
namespace
{
using namespace std;
const uint32_t BLOCK_DIM = 250;
const uint32_t ROWS_AMOUNT = 20000;
const uint32_t ATTRIBUTES_AMOUNT = 16;
namespace NormalizationGPU
{
__device__ void findLocalMinMax(double *devAttributes, double *mins, double *maxes)
{
int thisThreadStart = threadIdx.x * ROWS_AMOUNT / blockDim.x + blockIdx.x * ROWS_AMOUNT;
const int nextThreadStart = (threadIdx.x + 1) * ROWS_AMOUNT / blockDim.x + blockIdx.x * ROWS_AMOUNT;
double localMin = devAttributes[thisThreadStart];
double localMax = localMin;
__syncthreads();
for (int row = thisThreadStart; row < nextThreadStart; ++row)
{
auto value = devAttributes[row];
if (value < localMin)
{
localMin = value;
}
if (value > localMax)
{
localMax = value;
}
}
mins[threadIdx.x] = localMin;
maxes[threadIdx.x] = localMax;
}
__device__ void findMinMax(double *min, double *max, double *localMin, double *localMax)
{
if (threadIdx.x == 0)
{
*min = localMin[0];
*max = localMax[0];
}
__syncthreads();
for (int i = 0; i < blockDim.x; ++i)
{
auto localMinValue = localMin[i];
if (*min > localMinValue)
{
*min = localMinValue;
}
auto localMaxValue = localMax[i];
if (*max < localMaxValue)
{
*max = localMaxValue;
}
}
}
__device__ void transformValues(double *devAttributes, double *min, double *max)
{
int thisThreadStart = threadIdx.x * ROWS_AMOUNT / blockDim.x + blockIdx.x * ROWS_AMOUNT;
const int nextThreadStart = (threadIdx.x + 1) * ROWS_AMOUNT / blockDim.x + blockIdx.x * ROWS_AMOUNT;
double diff = *max - *min;
for (int row = thisThreadStart; row < nextThreadStart; ++row)
{
devAttributes[row] = (devAttributes[row] - *min) / diff;
}
}
__global__ void normalize(double *devAttributes)
{
__shared__ double max;
__shared__ double min;
{
__shared__ double localMax[BLOCK_DIM];
__shared__ double localMin[BLOCK_DIM];
findLocalMinMax(devAttributes, localMin, localMax);
__syncthreads();
findMinMax(&min, &max, localMin, localMax);
__syncthreads();
} // scoped shared memory variable localMin and localMax to save memory
transformValues(devAttributes, &min, &max);
}
} // namespace NormalizationGPU
namespace StandarizationGPU
{
__device__ void findLocalAverage(double *devAttributes, double *averages)
{
int thisThreadStart = threadIdx.x * ROWS_AMOUNT / blockDim.x + blockIdx.x * ROWS_AMOUNT;
const int nextThreadStart = (threadIdx.x + 1) * ROWS_AMOUNT / blockDim.x + blockIdx.x * ROWS_AMOUNT;
double localAverage = 0;
for (int row = thisThreadStart; row < nextThreadStart; ++row)
{
localAverage += devAttributes[row];
}
averages[threadIdx.x] = localAverage / (nextThreadStart - thisThreadStart);
}
__device__ void findAverage(double *average, double *localAverage)
{
if (threadIdx.x == 0)
{
*average = 0;
}
__syncthreads();
atomicAdd(average, localAverage[threadIdx.x]);
__syncthreads();
if (threadIdx.x == 0)
{
*average /= blockDim.x;
}
}
__device__ void findLocalVariation(double *devAttributes, double *variations, double *average)
{
int thisThreadStart = threadIdx.x * ROWS_AMOUNT / blockDim.x + blockIdx.x * ROWS_AMOUNT;
const int nextThreadStart = (threadIdx.x + 1) * ROWS_AMOUNT / blockDim.x + blockIdx.x * ROWS_AMOUNT;
double localVariation = 0;
for (int row = thisThreadStart; row < nextThreadStart; ++row)
{
auto tmp = devAttributes[row] - *average;
localVariation += tmp * tmp;
}
variations[threadIdx.x] = localVariation;
}
__device__ void findVariation(double *variation, double *localVariations)
{
if (threadIdx.x == 0)
{
*variation = 0;
}
__syncthreads();
atomicAdd(variation, localVariations[threadIdx.x]);
__syncthreads();
if (threadIdx.x == 0)
{
*variation /= ROWS_AMOUNT;
*variation = sqrt(*variation);
}
}
__device__ void transformValues(double *devAttributes, double *average, double *variation)
{
int thisThreadStart = threadIdx.x * ROWS_AMOUNT / blockDim.x + blockIdx.x * ROWS_AMOUNT;
const int nextThreadStart = (threadIdx.x + 1) * ROWS_AMOUNT / blockDim.x + blockIdx.x * ROWS_AMOUNT;
for (int row = thisThreadStart; row < nextThreadStart; ++row)
{
devAttributes[row] = (devAttributes[row] - *average) / *variation;
}
}
__global__ void standarize(double *devAttributes)
{
__shared__ double average;
{
__shared__ double localAverage[BLOCK_DIM];
findLocalAverage(devAttributes, localAverage);
__syncthreads();
findAverage(&average, localAverage);
__syncthreads();
} // scoped shared memory variable localAverage to save memory
__shared__ double variation;
{
__shared__ double localVariation[BLOCK_DIM];
findLocalVariation(devAttributes, localVariation, &average);
__syncthreads();
findVariation(&variation, localVariation);
__syncthreads();
} // scoped shared memory variable localVariation to save memory
transformValues(devAttributes, &average, &variation);
}
} // namespace StandarizationGPU
} // namespace
void Scalers::normalize(vector<double> &attributesValues)
{
double *attributes = attributesValues.data();
double *devAttributes = nullptr;
HANDLE_ERROR(cudaMalloc(&devAttributes, attributesValues.size() * sizeof(double)));
HANDLE_ERROR(cudaMemcpy(devAttributes, attributes, attributesValues.size() * sizeof(double), cudaMemcpyHostToDevice));
NormalizationGPU::normalize<<<ATTRIBUTES_AMOUNT, BLOCK_DIM, (2 + BLOCK_DIM * 2) * sizeof(double)>>>(devAttributes);
HANDLE_ERROR(cudaMemcpy(attributes, devAttributes, attributesValues.size() * sizeof(double), cudaMemcpyDeviceToHost));
HANDLE_ERROR(cudaFree(devAttributes));
}
void Scalers::standarize(vector<double> &attributesValues)
{
double *attributes = attributesValues.data();
double *devAttributes = nullptr;
HANDLE_ERROR(cudaMalloc(&devAttributes, attributesValues.size() * sizeof(double)));
HANDLE_ERROR(cudaMemcpy(devAttributes, attributes, attributesValues.size() * sizeof(double), cudaMemcpyHostToDevice));
StandarizationGPU::standarize<<<ATTRIBUTES_AMOUNT, BLOCK_DIM, (2 + BLOCK_DIM) * sizeof(double)>>>(devAttributes);
HANDLE_ERROR(cudaMemcpy(attributes, devAttributes, attributesValues.size() * sizeof(double), cudaMemcpyDeviceToHost));
HANDLE_ERROR(cudaFree(devAttributes));
}
|
e81bf3f0412d19c4d7e8fda1c1ffb8ec4344cbed.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void kernel_unpack_yuy2_y16_cuda(const uint8_t * src, uint8_t *dst, int superPixCount)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
if (i >= superPixCount)
return;
for (; i < superPixCount; i += stride) {
int idx = i * 4;
dst[idx] = 0;
dst[idx + 1] = src[idx + 0];
dst[idx + 2] = 0;
dst[idx + 3] = src[idx + 2];
}
} | e81bf3f0412d19c4d7e8fda1c1ffb8ec4344cbed.cu | #include "includes.h"
__global__ void kernel_unpack_yuy2_y16_cuda(const uint8_t * src, uint8_t *dst, int superPixCount)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
if (i >= superPixCount)
return;
for (; i < superPixCount; i += stride) {
int idx = i * 4;
dst[idx] = 0;
dst[idx + 1] = src[idx + 0];
dst[idx + 2] = 0;
dst[idx + 3] = src[idx + 2];
}
} |
8af96b27ffb66c50dce58e2a7953d1290e6d7077.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "helper.h"
#include "cuda_settings.h"
__global__ void collide(int X, int Y, int Z, float *cells_d, float *fuerza_d, int current) {
float w[19] = {(2./36.),(2./36.),(2./36.),(2./36.),(2./36.),(2./36.),
(1./36.),(1./36.),(1./36.),(1./36.),(1./36.),(1./36.),
(1./36.),(1./36.),(1./36.),(1./36.),(1./36.),(1./36.),
(12./36.)};
float e_x[19] = {1.0f, -1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f, 1.0f, 1.0f, 1.0f, -1.0f, -1.0f, -1.0f, -1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f};
float e_y[19] = {0.0f, 0.0f, 1.0f, -1.0f, 0.0f, 0.0f, 1.0f, -1.0f, 0.0f, 0.0f, 1.0f, -1.0f, 0.0f, 0.0f, 1.0f, 1.0f, -1.0f, -1.0f, 0.0f};
float e_z[19] = {0.0f, 0.0f, 0.0f, 0.0f, 1.0f, -1.0f, 0.0f, 0.0f, 1.0f, -1.0f, 0.0f, 0.0f, 1.0f, -1.0f, 1.0f, -1.0f, 1.0f, -1.0f, 0.0f};
const float cs = 0.57735026919f; // 1/sqrt(3)
const float omega = 1.0;
int i = blockIdx.x*blockDim.x + threadIdx.x;
int j = blockIdx.y*blockDim.y + threadIdx.y;
int k = blockIdx.z*blockDim.z + threadIdx.z;
if (i < X && j < Y && k < Z) {
// collision step
float rho = 0.0, u_x=0.0, u_y=0.0, u_z=0.0;
for (int l = 0; l < 19; l++) {
const float fi = CELLS_D(current, i, j, k, l);
rho += fi;
u_x += e_x[l]*fi;
u_y += e_y[l]*fi;
u_z += e_z[l]*fi;
}
u_x = (u_x + (FUERZA_D(i, j, k, 0))*(1./2.))/rho;
u_y = (u_y + (FUERZA_D(i, j, k, 1))*(1./2.))/rho;
u_z = (u_z + (FUERZA_D(i, j, k, 2))*(1./2.))/rho;
for (int l = 0; l < 19; l++) {
const float tmp = (e_x[l]*u_x + e_y[l]*u_y + e_z[l]*u_z);
// Funcin de equilibrio
float feq = w[l] * rho * ( 1.0 -
((3.0/2.0) * (u_x*u_x + u_y*u_y + u_z*u_z)) +
(3.0 * tmp) +
((9.0/2.0) * tmp*tmp ) );
// Fuerza por cada direccin i
float v1[3]={0.0,0.0,0.0};
v1[0]=(e_x[l]-u_x)/(cs*cs);
v1[1]=(e_y[l]-u_y)/(cs*cs);
v1[2]=(e_z[l]-u_z)/(cs*cs);
v1[0]=v1[0]+(tmp*e_x[l])/(cs*cs*cs*cs);
v1[1]=v1[1]+(tmp*e_y[l])/(cs*cs*cs*cs);
v1[2]=v1[2]+(tmp*e_z[l])/(cs*cs*cs*cs);
float Fi=0.0, tf=0.0;
tf = (v1[0]*FUERZA_D(i, j, k, 0) + v1[1]*FUERZA_D(i, j, k, 1) + v1[2]*FUERZA_D(i, j, k, 2));
Fi = (1.0-(omega/(2.0)))*w[l]*tf;
CELLS_D(current, i, j, k, l) = CELLS_D(current, i, j, k, l) - omega*(CELLS_D(current, i, j, k, l) - feq) + Fi;
}
}
}
void collide_wrapper(int X, int Y, int Z, float *cells_d, float *fuerza_d, int current) {
//X*Y*Z = 9261;
//Maximum number of threads per block: 1024
dim3 grid_size;
grid_size.x = GRID_SIZE_X;
grid_size.y = GRID_SIZE_Y;
grid_size.z = GRID_SIZE_Z;
dim3 block_size;
// 1000 threads per blocks
block_size.x = BLOCK_SIZE_X;
block_size.y = BLOCK_SIZE_Y;
block_size.z = BLOCK_SIZE_Z;
//Launch kernel
hipLaunchKernelGGL(( collide), dim3(grid_size), dim3(block_size), 0, 0, X, Y, Z, cells_d, fuerza_d, current);
}
| 8af96b27ffb66c50dce58e2a7953d1290e6d7077.cu | #include "helper.h"
#include "cuda_settings.h"
__global__ void collide(int X, int Y, int Z, float *cells_d, float *fuerza_d, int current) {
float w[19] = {(2./36.),(2./36.),(2./36.),(2./36.),(2./36.),(2./36.),
(1./36.),(1./36.),(1./36.),(1./36.),(1./36.),(1./36.),
(1./36.),(1./36.),(1./36.),(1./36.),(1./36.),(1./36.),
(12./36.)};
float e_x[19] = {1.0f, -1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f, 1.0f, 1.0f, 1.0f, -1.0f, -1.0f, -1.0f, -1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f};
float e_y[19] = {0.0f, 0.0f, 1.0f, -1.0f, 0.0f, 0.0f, 1.0f, -1.0f, 0.0f, 0.0f, 1.0f, -1.0f, 0.0f, 0.0f, 1.0f, 1.0f, -1.0f, -1.0f, 0.0f};
float e_z[19] = {0.0f, 0.0f, 0.0f, 0.0f, 1.0f, -1.0f, 0.0f, 0.0f, 1.0f, -1.0f, 0.0f, 0.0f, 1.0f, -1.0f, 1.0f, -1.0f, 1.0f, -1.0f, 0.0f};
const float cs = 0.57735026919f; // 1/sqrt(3)
const float omega = 1.0;
int i = blockIdx.x*blockDim.x + threadIdx.x;
int j = blockIdx.y*blockDim.y + threadIdx.y;
int k = blockIdx.z*blockDim.z + threadIdx.z;
if (i < X && j < Y && k < Z) {
// collision step
float rho = 0.0, u_x=0.0, u_y=0.0, u_z=0.0;
for (int l = 0; l < 19; l++) {
const float fi = CELLS_D(current, i, j, k, l);
rho += fi;
u_x += e_x[l]*fi;
u_y += e_y[l]*fi;
u_z += e_z[l]*fi;
}
u_x = (u_x + (FUERZA_D(i, j, k, 0))*(1./2.))/rho;
u_y = (u_y + (FUERZA_D(i, j, k, 1))*(1./2.))/rho;
u_z = (u_z + (FUERZA_D(i, j, k, 2))*(1./2.))/rho;
for (int l = 0; l < 19; l++) {
const float tmp = (e_x[l]*u_x + e_y[l]*u_y + e_z[l]*u_z);
// Función de equilibrio
float feq = w[l] * rho * ( 1.0 -
((3.0/2.0) * (u_x*u_x + u_y*u_y + u_z*u_z)) +
(3.0 * tmp) +
((9.0/2.0) * tmp*tmp ) );
// Fuerza por cada dirección i
float v1[3]={0.0,0.0,0.0};
v1[0]=(e_x[l]-u_x)/(cs*cs);
v1[1]=(e_y[l]-u_y)/(cs*cs);
v1[2]=(e_z[l]-u_z)/(cs*cs);
v1[0]=v1[0]+(tmp*e_x[l])/(cs*cs*cs*cs);
v1[1]=v1[1]+(tmp*e_y[l])/(cs*cs*cs*cs);
v1[2]=v1[2]+(tmp*e_z[l])/(cs*cs*cs*cs);
float Fi=0.0, tf=0.0;
tf = (v1[0]*FUERZA_D(i, j, k, 0) + v1[1]*FUERZA_D(i, j, k, 1) + v1[2]*FUERZA_D(i, j, k, 2));
Fi = (1.0-(omega/(2.0)))*w[l]*tf;
CELLS_D(current, i, j, k, l) = CELLS_D(current, i, j, k, l) - omega*(CELLS_D(current, i, j, k, l) - feq) + Fi;
}
}
}
void collide_wrapper(int X, int Y, int Z, float *cells_d, float *fuerza_d, int current) {
//X*Y*Z = 9261;
//Maximum number of threads per block: 1024
dim3 grid_size;
grid_size.x = GRID_SIZE_X;
grid_size.y = GRID_SIZE_Y;
grid_size.z = GRID_SIZE_Z;
dim3 block_size;
// 1000 threads per blocks
block_size.x = BLOCK_SIZE_X;
block_size.y = BLOCK_SIZE_Y;
block_size.z = BLOCK_SIZE_Z;
//Launch kernel
collide<<<grid_size, block_size>>>(X, Y, Z, cells_d, fuerza_d, current);
}
|
2c4bccf680b76f7bd5f761a0b4b369aa07cb28cd.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define _DEVICE_VARS_TEST_
#include "GpuTestController.cuh"
#define WARP_SIZE 32
#define MODE_MASK 0x1
#define OP_MASK 0xE
#define DST_MASK 0x70
#define SRC_MASK 0x1F80
#define MODE_SHIFT 0
#define OP_SHIFT 1
#define DST_SHIFT 4
#define SRC_SHIFT 7
#define REG_COUNT 8
#define OP_SUM 0
#define OP_DIFF 1
#define OP_PROD 2
#define OP_DIV 3
#define OP_MOD 4
#define OP_COS 5
#define OP_EXP 6
#define OP_LOG 7
#define OP_CODE_COUNT 8
#define OpCodeCount 7
#define OP_NO 199
#define MAXTHREADS 256
__global__ void kLearnerTest(int val,
int learnOffset,
int pointOffset,
_learner* dLearnerMatrix,
_learnerBid* dLearnerBidMatrix,
_point* dPointMatrix,
int learnerCount,
int pointCount)
{
int pointId = threadIdx.x + blockIdx.x * blockDim.x + pointOffset; // + (blockIdx.x*gridDim.x);
int learnerId = threadIdx.y + blockIdx.y* blockDim.y + learnOffset;// + blockIdx.y * blockDim.y;
if (learnerId < TOTAL_LEARNERS && pointId < TOTAL_POINTS)
{
int id = threadIdx.x * blockDim.y + threadIdx.y;
//_learner *shared_learner = &dLearnerMatrix[(threadIdx.y + blockIdx.y*blockDim.y) * LEARNER_LENGTH];
//_point *feature = &dPointMatrix[(threadIdx.x + blockIdx.x *blockDim.x)*NUM_FEATURES];
_learner *shared_learner = &dLearnerMatrix[learnerId*LEARNER_LENGTH];
_point *feature = &dPointMatrix[pointId*NUM_FEATURES];
//_learner *shared_learner = &dLearnerMatrix[];
//_point *feature = &dPointMatrix[0];
__shared__ _learnerBid registers[MAXTHREADS][8];
registers[id][0] =0;
registers[id][1] =0;
registers[id][2] =0;
registers[id][3] =0;
registers[id][4] =0;
registers[id][5] =0;
registers[id][6] =0;
registers[id][7] =0;
//short progsize = shared_learner[0];
for (int i=1;i<=shared_learner[0];i++)
{
_learnerBid* dst = ®isters[id][((shared_learner[i] & DST_MASK) >> DST_SHIFT)];
_learnerBid srcVal;
if (1 == ((shared_learner[i] & MODE_MASK) >> MODE_SHIFT ) %2) {
// srcVal = dPointMatrix[threadIdx.x* NUM_FEATURES + ((shared_learner[i] & SRC_MASK) >> SRC_SHIFT) % NUM_FEATURES ];
srcVal = feature[((shared_learner[i] & SRC_MASK) >> SRC_SHIFT) % NUM_FEATURES ];
}else{
srcVal = registers[id][(((shared_learner[i] & SRC_MASK) >> SRC_SHIFT) % REG_COUNT)];
}
switch ( ((shared_learner[i] & OP_MASK) >> OP_SHIFT) % OP_CODE_COUNT){
case OP_SUM:
(*dst) += srcVal;
break;
case OP_DIFF:
(*dst) -= srcVal;
break;
case OP_PROD:
(*dst) *= srcVal;
break;
case OP_DIV:
(*dst) /= srcVal;
break;
case OP_MOD:
(*dst) = fmod((*dst), srcVal);
break;
case OP_COS:
(*dst) = cos(srcVal);
break;
case OP_EXP:
(*dst) = expf(srcVal);
break;
case OP_LOG:
(*dst) = logf(fabs(srcVal));
break;
}
if(isfinite((*dst)) == 0)
(*dst) = 0;
}
dLearnerBidMatrix[ threadIdx.y * blockDim.x + blockIdx.x*blockDim.x + threadIdx.x] = 1 / (1+exp(-registers[id][0]));
// dLearnerBidMatrix[ threadIdx.y * blockDim.x + blockIdx.x*blockDim.x + threadIdx.x] = val+1;
}
}
__global__ void kEval2(int val,
int learnOffset,
int pointOffset,
_learner* dLearnerMatrix,
_learnerBid* dLearnerBidMatrix,
_point* dPointMatrix,
int learnerCount,
int pointCount)
{
int pointId = threadIdx.x + blockIdx.x * blockDim.x + pointOffset ;
int learnerId = threadIdx.y + blockIdx.y* blockDim.y + learnOffset ;
// int learnerId = threadIdx.y + blockIdx.y * blockDim.y ;
if (learnerId < TOTAL_LEARNERS && pointId < TOTAL_POINTS)
{
// dLearnerBidMatrix[ learnerId*TOTAL_POINTS + pointId] = dPointMatrix[pointId * NUM_FEATURES + learnerId] ; // + min(LEARNER_LENGTH-1, pointId)];// blockIdx.x * gridDim.y + blockIdx.y + 1;
// dLearnerBidMatrix[ learnerId*TOTAL_POINTS + pointId] = 1 ;//learnOffset;//blockIdx.x * gridDim.y + blockIdx.y + 1;
dLearnerBidMatrix[ threadIdx.y * blockDim.x + threadIdx.x] = threadIdx.x;//blockIdx.x * gridDim.y + blockIdx.y + 1;
}
}
__host__
void TestLearners( int learnerCount,
int learnerLength,
_learner* hLearnerMatrix,
_learnerBid* hLearnerBidMatrix,
int trainingSetSize,
_point* hTrainingSet ,
int numFeatures
)
{
int bytesize_singlePoint = sizeof(_point) * numFeatures;
int bytesize_learner = sizeof(_learner) * learnerLength;
//////////////////////////
// Memory Allocations
//////////////////////////
int streamCount = 2;
_point* dTrainingSet;
_learner* dLearnerMatrix;
_learnerBid* dBidMatrix[streamCount];
int learnersPerChunk =1 ;
int pointsPerChunk = 512;
cutilSafeCall( hipMalloc( (void**) &dTrainingSet, bytesize_singlePoint * trainingSetSize ));
cutilSafeCall( hipMalloc( (void**) &dLearnerMatrix, bytesize_learner * learnerCount ));
hipStream_t* stream = new hipStream_t[streamCount ];
for(int i=0; i < streamCount; i++)
{
cutilSafeCall( hipMalloc( (void**) &dBidMatrix[i], sizeof(_learnerBid) * learnersPerChunk* pointsPerChunk ));
hipStreamCreate(&stream[i]);
}
// cutilSafeCall( hipMalloc( (void**) &dBidMatrix[1], sizeof(_learnerBid) * learnersPerChunk* pointsPerChunk ));
hipMemset(dLearnerMatrix,0,bytesize_learner * learnerCount );
hipMemset(dBidMatrix[0],0,sizeof(_learnerBid) * learnersPerChunk * pointsPerChunk );
cutilSafeCall( hipMemcpyToSymbol( TOTAL_LEARNERS, &learnerCount,sizeof(short)));
cutilSafeCall( hipMemcpyToSymbol( TOTAL_POINTS, &trainingSetSize,sizeof(short)));
cutilSafeCall( hipMemcpyToSymbol( NUM_FEATURES, &numFeatures,sizeof(short)));
cutilSafeCall( hipMemcpyToSymbol( LEARNER_LENGTH, &learnerLength,sizeof(short)));
int learnerChunkCount = (learnerCount-1)/ learnersPerChunk+1;
int pointChunkCount = (trainingSetSize-1) / pointsPerChunk+1;
int pointsPerBlock = 256;
int learnersPerBlock = min(MAXTHREADS/pointsPerBlock , learnerCount);
int threadsPerBlock_x = pointsPerBlock ;
int threadsPerBlock_y = learnersPerBlock;
int blocksPerGrid_x = ((pointsPerChunk-1)/pointsPerBlock+1);
int blocksPerGrid_y = ((learnersPerChunk-1)/learnersPerBlock+1);
dim3 GRID (blocksPerGrid_x,blocksPerGrid_y);
dim3 BLOCK (threadsPerBlock_x,threadsPerBlock_y);
// printf(" ### lpC:%d lCC:%d ppC:%d pCC:%d PS:%d BPGx%d BPGy:%d\n" , learnersPerChunk , learnerChunkCount, pointsPerChunk, pointChunkCount , trainingSetSize, blocksPerGrid_x ,blocksPerGrid_y );
cutilSafeCall(hipMemcpy (dLearnerMatrix, hLearnerMatrix, learnerCount* bytesize_learner, hipMemcpyHostToDevice));
cutilSafeCall(hipMemcpy (dTrainingSet, hTrainingSet, trainingSetSize* bytesize_singlePoint, hipMemcpyHostToDevice));
//
int streamA = 0;
int streamB = 1;
for(int chunkId_y=0; chunkId_y< learnerChunkCount;chunkId_y ++)
{
for(int chunkId_x=0; chunkId_x< pointChunkCount;chunkId_x +=streamCount)
{
//kLearnerTest<<<GRID,BLOCK,0,stream[streamId]>>>(chunkId_x,chunkId_y ,chunkId_x*pointsPerChunk,dLearnerMatrix,dBidMatrix[0], dTrainingSet, 1, trainingSetSize);
for(int streamId=0; streamId < streamCount; streamId++)
{
if(chunkId_x+streamId < pointChunkCount){
hipLaunchKernelGGL(( kLearnerTest), dim3(GRID),dim3(BLOCK),0,stream[streamId], chunkId_x+streamId,chunkId_y ,(chunkId_x+streamId)*pointsPerChunk,dLearnerMatrix,dBidMatrix[streamId], dTrainingSet, 1, trainingSetSize);
}
}
for(int streamId=0; streamId < streamCount; streamId++)
{
if(chunkId_x+streamId < pointChunkCount){
cutilSafeCall(hipMemcpyAsync (hLearnerBidMatrix + chunkId_y*trainingSetSize + (chunkId_x+streamId)* pointsPerChunk , dBidMatrix[streamId], pointsPerChunk * sizeof(_learnerBid), hipMemcpyDeviceToHost,stream[streamId]));
}
}
}
// int offset = chunkId_y * learnersPerChunk * trainingSetSize;
//cutilSafeCall(hipMemcpyAsync (hLearnerBidMatrix + chunkId_y*trainingSetSize , dBidMatrix[0], trainingSetSize * sizeof(_learnerBid), hipMemcpyDeviceToHost,stream[streamB]));
}
hipDeviceSynchronize();
cutilSafeCall( hipFree( dLearnerMatrix));
cutilSafeCall( hipFree( dBidMatrix[0]));
//cutilSafeCall( hipFree( dBidMatrix[1]));
cutilSafeCall( hipFree( dTrainingSet));
for(int i=0; i < streamCount; i++)
{
hipStreamDestroy(stream[i]);
}
}
| 2c4bccf680b76f7bd5f761a0b4b369aa07cb28cd.cu | #define _DEVICE_VARS_TEST_
#include "GpuTestController.cuh"
#define WARP_SIZE 32
#define MODE_MASK 0x1
#define OP_MASK 0xE
#define DST_MASK 0x70
#define SRC_MASK 0x1F80
#define MODE_SHIFT 0
#define OP_SHIFT 1
#define DST_SHIFT 4
#define SRC_SHIFT 7
#define REG_COUNT 8
#define OP_SUM 0
#define OP_DIFF 1
#define OP_PROD 2
#define OP_DIV 3
#define OP_MOD 4
#define OP_COS 5
#define OP_EXP 6
#define OP_LOG 7
#define OP_CODE_COUNT 8
#define OpCodeCount 7
#define OP_NO 199
#define MAXTHREADS 256
__global__ void kLearnerTest(int val,
int learnOffset,
int pointOffset,
_learner* dLearnerMatrix,
_learnerBid* dLearnerBidMatrix,
_point* dPointMatrix,
int learnerCount,
int pointCount)
{
int pointId = threadIdx.x + blockIdx.x * blockDim.x + pointOffset; // + (blockIdx.x*gridDim.x);
int learnerId = threadIdx.y + blockIdx.y* blockDim.y + learnOffset;// + blockIdx.y * blockDim.y;
if (learnerId < TOTAL_LEARNERS && pointId < TOTAL_POINTS)
{
int id = threadIdx.x * blockDim.y + threadIdx.y;
//_learner *shared_learner = &dLearnerMatrix[(threadIdx.y + blockIdx.y*blockDim.y) * LEARNER_LENGTH];
//_point *feature = &dPointMatrix[(threadIdx.x + blockIdx.x *blockDim.x)*NUM_FEATURES];
_learner *shared_learner = &dLearnerMatrix[learnerId*LEARNER_LENGTH];
_point *feature = &dPointMatrix[pointId*NUM_FEATURES];
//_learner *shared_learner = &dLearnerMatrix[];
//_point *feature = &dPointMatrix[0];
__shared__ _learnerBid registers[MAXTHREADS][8];
registers[id][0] =0;
registers[id][1] =0;
registers[id][2] =0;
registers[id][3] =0;
registers[id][4] =0;
registers[id][5] =0;
registers[id][6] =0;
registers[id][7] =0;
//short progsize = shared_learner[0];
for (int i=1;i<=shared_learner[0];i++)
{
_learnerBid* dst = ®isters[id][((shared_learner[i] & DST_MASK) >> DST_SHIFT)];
_learnerBid srcVal;
if (1 == ((shared_learner[i] & MODE_MASK) >> MODE_SHIFT ) %2) {
// srcVal = dPointMatrix[threadIdx.x* NUM_FEATURES + ((shared_learner[i] & SRC_MASK) >> SRC_SHIFT) % NUM_FEATURES ];
srcVal = feature[((shared_learner[i] & SRC_MASK) >> SRC_SHIFT) % NUM_FEATURES ];
}else{
srcVal = registers[id][(((shared_learner[i] & SRC_MASK) >> SRC_SHIFT) % REG_COUNT)];
}
switch ( ((shared_learner[i] & OP_MASK) >> OP_SHIFT) % OP_CODE_COUNT){
case OP_SUM:
(*dst) += srcVal;
break;
case OP_DIFF:
(*dst) -= srcVal;
break;
case OP_PROD:
(*dst) *= srcVal;
break;
case OP_DIV:
(*dst) /= srcVal;
break;
case OP_MOD:
(*dst) = fmod((*dst), srcVal);
break;
case OP_COS:
(*dst) = cos(srcVal);
break;
case OP_EXP:
(*dst) = expf(srcVal);
break;
case OP_LOG:
(*dst) = logf(fabs(srcVal));
break;
}
if(isfinite((*dst)) == 0)
(*dst) = 0;
}
dLearnerBidMatrix[ threadIdx.y * blockDim.x + blockIdx.x*blockDim.x + threadIdx.x] = 1 / (1+exp(-registers[id][0]));
// dLearnerBidMatrix[ threadIdx.y * blockDim.x + blockIdx.x*blockDim.x + threadIdx.x] = val+1;
}
}
__global__ void kEval2(int val,
int learnOffset,
int pointOffset,
_learner* dLearnerMatrix,
_learnerBid* dLearnerBidMatrix,
_point* dPointMatrix,
int learnerCount,
int pointCount)
{
int pointId = threadIdx.x + blockIdx.x * blockDim.x + pointOffset ;
int learnerId = threadIdx.y + blockIdx.y* blockDim.y + learnOffset ;
// int learnerId = threadIdx.y + blockIdx.y * blockDim.y ;
if (learnerId < TOTAL_LEARNERS && pointId < TOTAL_POINTS)
{
// dLearnerBidMatrix[ learnerId*TOTAL_POINTS + pointId] = dPointMatrix[pointId * NUM_FEATURES + learnerId] ; // + min(LEARNER_LENGTH-1, pointId)];// blockIdx.x * gridDim.y + blockIdx.y + 1;
// dLearnerBidMatrix[ learnerId*TOTAL_POINTS + pointId] = 1 ;//learnOffset;//blockIdx.x * gridDim.y + blockIdx.y + 1;
dLearnerBidMatrix[ threadIdx.y * blockDim.x + threadIdx.x] = threadIdx.x;//blockIdx.x * gridDim.y + blockIdx.y + 1;
}
}
__host__
void TestLearners( int learnerCount,
int learnerLength,
_learner* hLearnerMatrix,
_learnerBid* hLearnerBidMatrix,
int trainingSetSize,
_point* hTrainingSet ,
int numFeatures
)
{
int bytesize_singlePoint = sizeof(_point) * numFeatures;
int bytesize_learner = sizeof(_learner) * learnerLength;
//////////////////////////
// Memory Allocations
//////////////////////////
int streamCount = 2;
_point* dTrainingSet;
_learner* dLearnerMatrix;
_learnerBid* dBidMatrix[streamCount];
int learnersPerChunk =1 ;
int pointsPerChunk = 512;
cutilSafeCall( cudaMalloc( (void**) &dTrainingSet, bytesize_singlePoint * trainingSetSize ));
cutilSafeCall( cudaMalloc( (void**) &dLearnerMatrix, bytesize_learner * learnerCount ));
cudaStream_t* stream = new cudaStream_t[streamCount ];
for(int i=0; i < streamCount; i++)
{
cutilSafeCall( cudaMalloc( (void**) &dBidMatrix[i], sizeof(_learnerBid) * learnersPerChunk* pointsPerChunk ));
cudaStreamCreate(&stream[i]);
}
// cutilSafeCall( cudaMalloc( (void**) &dBidMatrix[1], sizeof(_learnerBid) * learnersPerChunk* pointsPerChunk ));
cudaMemset(dLearnerMatrix,0,bytesize_learner * learnerCount );
cudaMemset(dBidMatrix[0],0,sizeof(_learnerBid) * learnersPerChunk * pointsPerChunk );
cutilSafeCall( cudaMemcpyToSymbol( TOTAL_LEARNERS, &learnerCount,sizeof(short)));
cutilSafeCall( cudaMemcpyToSymbol( TOTAL_POINTS, &trainingSetSize,sizeof(short)));
cutilSafeCall( cudaMemcpyToSymbol( NUM_FEATURES, &numFeatures,sizeof(short)));
cutilSafeCall( cudaMemcpyToSymbol( LEARNER_LENGTH, &learnerLength,sizeof(short)));
int learnerChunkCount = (learnerCount-1)/ learnersPerChunk+1;
int pointChunkCount = (trainingSetSize-1) / pointsPerChunk+1;
int pointsPerBlock = 256;
int learnersPerBlock = min(MAXTHREADS/pointsPerBlock , learnerCount);
int threadsPerBlock_x = pointsPerBlock ;
int threadsPerBlock_y = learnersPerBlock;
int blocksPerGrid_x = ((pointsPerChunk-1)/pointsPerBlock+1);
int blocksPerGrid_y = ((learnersPerChunk-1)/learnersPerBlock+1);
dim3 GRID (blocksPerGrid_x,blocksPerGrid_y);
dim3 BLOCK (threadsPerBlock_x,threadsPerBlock_y);
// printf(" ### lpC:%d lCC:%d ppC:%d pCC:%d PS:%d BPGx%d BPGy:%d\n" , learnersPerChunk , learnerChunkCount, pointsPerChunk, pointChunkCount , trainingSetSize, blocksPerGrid_x ,blocksPerGrid_y );
cutilSafeCall(cudaMemcpy (dLearnerMatrix, hLearnerMatrix, learnerCount* bytesize_learner, cudaMemcpyHostToDevice));
cutilSafeCall(cudaMemcpy (dTrainingSet, hTrainingSet, trainingSetSize* bytesize_singlePoint, cudaMemcpyHostToDevice));
//
int streamA = 0;
int streamB = 1;
for(int chunkId_y=0; chunkId_y< learnerChunkCount;chunkId_y ++)
{
for(int chunkId_x=0; chunkId_x< pointChunkCount;chunkId_x +=streamCount)
{
//kLearnerTest<<<GRID,BLOCK,0,stream[streamId]>>>(chunkId_x,chunkId_y ,chunkId_x*pointsPerChunk,dLearnerMatrix,dBidMatrix[0], dTrainingSet, 1, trainingSetSize);
for(int streamId=0; streamId < streamCount; streamId++)
{
if(chunkId_x+streamId < pointChunkCount){
kLearnerTest<<<GRID,BLOCK,0,stream[streamId]>>>(chunkId_x+streamId,chunkId_y ,(chunkId_x+streamId)*pointsPerChunk,dLearnerMatrix,dBidMatrix[streamId], dTrainingSet, 1, trainingSetSize);
}
}
for(int streamId=0; streamId < streamCount; streamId++)
{
if(chunkId_x+streamId < pointChunkCount){
cutilSafeCall(cudaMemcpyAsync (hLearnerBidMatrix + chunkId_y*trainingSetSize + (chunkId_x+streamId)* pointsPerChunk , dBidMatrix[streamId], pointsPerChunk * sizeof(_learnerBid), cudaMemcpyDeviceToHost,stream[streamId]));
}
}
}
// int offset = chunkId_y * learnersPerChunk * trainingSetSize;
//cutilSafeCall(cudaMemcpyAsync (hLearnerBidMatrix + chunkId_y*trainingSetSize , dBidMatrix[0], trainingSetSize * sizeof(_learnerBid), cudaMemcpyDeviceToHost,stream[streamB]));
}
cudaDeviceSynchronize();
cutilSafeCall( cudaFree( dLearnerMatrix));
cutilSafeCall( cudaFree( dBidMatrix[0]));
//cutilSafeCall( cudaFree( dBidMatrix[1]));
cutilSafeCall( cudaFree( dTrainingSet));
for(int i=0; i < streamCount; i++)
{
cudaStreamDestroy(stream[i]);
}
}
|
97b538f16f9f1233366555f7628c0e1ed52cb416.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <windows.h>
#include <d2d1.h>
#include <d2d1helper.h>
#pragma comment(lib, "d2d1")
//*****double buffering*****
#define SCREEN_WIDTH 1920
#define SCREEN_HEIGHT 1000
D2D1_RECT_U display_area;
ID2D1Bitmap *image_container = NULL;
unsigned int *dev_image_data, image_data[SCREEN_WIDTH * SCREEN_HEIGHT];
float *dev_zbuffer;
typedef struct Vec3f {
float x, y, z;
};
//**************************************
ID2D1Factory* pD2DFactory = NULL;
ID2D1HwndRenderTarget* pRT = NULL;
#define HIBA_00 TEXT("Error:Program initialisation process.")
HINSTANCE hInstGlob;
int SajatiCmdShow;
char szClassName[] = "WindowsApp";
HWND Form1; //Windows handler
LRESULT CALLBACK WndProc0(HWND, UINT, WPARAM, LPARAM);
void D2D_drawing(ID2D1HwndRenderTarget* pRT);
//*****double buffering*****
void create_main_buffer(void);
void CUDA_cleanup_main_buffer(void);
__global__ void CUDA_CleanUp_Zbuffer(float *zpuffer);
void swap_main_buffer(void);
//**************************************
//*****Drawing algorithms*****
__device__ void CUDA_SetPixel_Zbuffer(int x1, int y1, int z1, int color, unsigned int *puffer, float *zpuffer);
__device__ void CUDA_DrawLine_Zbuffer(int x1, int y1, int z1, int x2, int y2, int z2, int color, unsigned int *puffer, float *zpuffer);
__device__ void CUDA_FillTriangle_Zbuffer(int x1, int y1, int z1, int x2, int y2, int z2, int x3, int y3, int z3, int color, unsigned int *puffer, float *zpuffer);
//**************************************
//*********************************
//The main entry point of our program
//*********************************
int WINAPI WinMain(HINSTANCE hInstance, HINSTANCE hPrevInstance, PSTR szCmdLine, int iCmdShow)
{
static TCHAR szAppName[] = TEXT("StdWinClassName");
HWND hwnd;
MSG msg;
WNDCLASS wndclass0;
SajatiCmdShow = iCmdShow;
hInstGlob = hInstance;
//*********************************
//Preparing Windows class
//*********************************
wndclass0.style = CS_HREDRAW | CS_VREDRAW;
wndclass0.lpfnWndProc = WndProc0;
wndclass0.cbClsExtra = 0;
wndclass0.cbWndExtra = 0;
wndclass0.hInstance = hInstance;
wndclass0.hIcon = LoadIcon(NULL, IDI_APPLICATION);
wndclass0.hCursor = LoadCursor(NULL, IDC_ARROW);
wndclass0.hbrBackground = (HBRUSH)GetStockObject(LTGRAY_BRUSH);
wndclass0.lpszMenuName = NULL;
wndclass0.lpszClassName = TEXT("WIN0");
//*********************************
//Registering our windows class
//*********************************
if (!RegisterClass(&wndclass0))
{
MessageBox(NULL, HIBA_00, TEXT("Program Start"), MB_ICONERROR);
return 0;
}
//*********************************
//Creating the window
//*********************************
Form1 = CreateWindow(TEXT("WIN0"),
TEXT("CUDA - DIRECT2D"),
(WS_OVERLAPPED | WS_SYSMENU | WS_THICKFRAME | WS_MAXIMIZEBOX | WS_MINIMIZEBOX),
50,
50,
SCREEN_WIDTH,
SCREEN_HEIGHT,
NULL,
NULL,
hInstance,
NULL);
//*********************************
//Displaying the window
//*********************************
ShowWindow(Form1, SajatiCmdShow);
UpdateWindow(Form1);
//*********************************
//Activating the message processing for our window
//*********************************
while (GetMessage(&msg, NULL, 0, 0))
{
TranslateMessage(&msg);
DispatchMessage(&msg);
}
return msg.wParam;
}
//*********************************
//The window's callback funtcion: handling events
//*********************************
LRESULT CALLBACK WndProc0(HWND hwnd, UINT message, WPARAM wParam, LPARAM lParam)
{
HDC hdc;
PAINTSTRUCT ps;
switch (message)
{
//*********************************
//When creating the window
//*********************************
case WM_CREATE:
D2D1CreateFactory(D2D1_FACTORY_TYPE_SINGLE_THREADED, &pD2DFactory);
pD2DFactory->CreateHwndRenderTarget(
D2D1::RenderTargetProperties(),
D2D1::HwndRenderTargetProperties(
hwnd, D2D1::SizeU(SCREEN_WIDTH, SCREEN_HEIGHT)),
&pRT);
create_main_buffer();
hipMalloc((void**)&dev_image_data, SCREEN_WIDTH * SCREEN_HEIGHT * sizeof(unsigned int));
hipMalloc((void**)&dev_zbuffer, SCREEN_WIDTH * SCREEN_HEIGHT * sizeof(float));
return 0;
//*********************************
//to eliminate color flickering
//*********************************
case WM_ERASEBKGND:
return (LRESULT)1;
//*********************************
//Repainting the client area of the window
//*********************************
case WM_PAINT:
hdc = BeginPaint(hwnd, &ps);
EndPaint(hwnd, &ps);
D2D_drawing(pRT);
return 0;
//*********************************
//Closing the window, freeing resources
//*********************************
case WM_CLOSE:
pRT->Release();
pD2DFactory->Release();
hipFree(dev_image_data);
hipFree(dev_zbuffer);
DestroyWindow(hwnd);
return 0;
//*********************************
//Destroying the window
//*********************************
case WM_DESTROY:
PostQuitMessage(0);
return 0;
}
return DefWindowProc(hwnd, message, wParam, lParam);
}
void D2D_drawing(ID2D1HwndRenderTarget* pRT)
{
CUDA_cleanup_main_buffer();
hipLaunchKernelGGL(( CUDA_CleanUp_Zbuffer) , dim3(((SCREEN_WIDTH*SCREEN_HEIGHT) + 384 - 1) / 384), dim3(384) , 0, 0, dev_zbuffer);
hipDeviceSynchronize();
//render_objects<<<blocks,threads >>>(dev_image_data,dev_zbuffer);
hipDeviceSynchronize();
hipMemcpy(image_data, dev_image_data, SCREEN_WIDTH * SCREEN_HEIGHT * sizeof(unsigned int), hipMemcpyDeviceToHost);
swap_main_buffer();
}
void create_main_buffer(void)
{
pRT->CreateBitmap(D2D1::SizeU(SCREEN_WIDTH, SCREEN_HEIGHT),
D2D1::BitmapProperties(D2D1::PixelFormat(DXGI_FORMAT_B8G8R8A8_UNORM,
D2D1_ALPHA_MODE_IGNORE)), &image_container);
}
void CUDA_cleanup_main_buffer(void)
{
hipMemset(dev_image_data, 255, SCREEN_HEIGHT*SCREEN_WIDTH * sizeof(unsigned int));
}
__global__ void CUDA_CleanUp_Zbuffer(float *zpuffer)
{
int i;
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (i = index; i < SCREEN_HEIGHT*SCREEN_WIDTH; i += stride)
{
zpuffer[i] = 999999;
}
}
void swap_main_buffer(void)
{
display_area.left = 0;
display_area.top = 0;
display_area.right = SCREEN_WIDTH;
display_area.bottom = SCREEN_HEIGHT;
image_container->CopyFromMemory(&display_area, image_data, SCREEN_WIDTH * sizeof(unsigned int));
pRT->BeginDraw();
pRT->DrawBitmap(image_container, D2D1::RectF(0.0f, 0.0f, SCREEN_WIDTH, SCREEN_HEIGHT), 1.0f, D2D1_BITMAP_INTERPOLATION_MODE_NEAREST_NEIGHBOR, NULL);
pRT->EndDraw();
}
__device__ void CUDA_SetPixel_Zbuffer(int x1, int y1, int z1, int color, unsigned int *puffer, float *zpuffer)
{
int offset = (y1 * SCREEN_WIDTH) + x1;
if (zpuffer[offset] > z1)
{
zpuffer[offset] = z1;
puffer[offset] = color;
}
}
__device__ void CUDA_DrawLine_Zbuffer(int x1, int y1, int z1, int x2, int y2, int z2, int color, unsigned int *puffer, float *zpuffer)
{
float Pz;
bool flip = false;
int swap, offset;
if (abs(x2 - x1) < 2 && abs(y2 - y1) < 2) {
puffer[(y2*SCREEN_WIDTH) + x2] = color; return;
}
if (abs(x1 - x2) < abs(y1 - y2))
{
swap = x1;
x1 = y1;
y1 = swap;
swap = x2;
x2 = y2;
y2 = swap;
flip = true;
}
if (x1 > x2)
{
swap = x1;
x1 = x2;
x2 = swap;
swap = y1;
y1 = y2;
y2 = swap;
}
int dx = x2 - x1;
int dy = y2 - y1;
int marker1 = abs(dy) * 2;
int marker2 = 0;
int y = y1, x;
for (x = x1; x <= x2; ++x)
{
if (z1 == z2) Pz = z1;
else
{
int s1 = abs(x2 - x1);
int s2 = abs(z1 - z2);
Pz = (float)z2 + (float)((((float)x - (float)x1) / (float)s1) * (float)s2);
}
if (flip)
{
offset = (x * SCREEN_WIDTH);
if (zpuffer[offset + y] > Pz)
{
zpuffer[offset + y] = Pz;
puffer[offset + y] = color;
}
}
else
{
offset = (y * SCREEN_WIDTH);
if (zpuffer[offset + x] > Pz)
{
zpuffer[offset + x] = Pz;
puffer[offset + x] = color;
}
}
marker2 += marker1;
if (marker2 > dx)
{
y += (y2 > y1 ? 1 : -1);
marker2 -= dx * 2;
}
}
}
__device__ void CUDA_FillTriangle_Zbuffer(int x1, int y1, int z1, int x2, int y2, int z2, int x3, int y3, int z3, int color, unsigned int *puffer, float *zpuffer)
{
int Ax, Ay, Bx, By, i, j, depth_value;
int swapx, swapy, offset;
Vec3f interpolate, helper_vector;
if (y1 == y2 && y1 == y3) return;
if (y1 > y2)
{
swapx = x1;
swapy = y1;
x1 = x2;
y1 = y2;
x2 = swapx;
y2 = swapy;
}
if (y1 > y3)
{
swapx = x1;
swapy = y1;
x1 = x3;
y1 = y3;
x3 = swapx;
y3 = swapy;
}
if (y2 > y3)
{
swapx = x3;
swapy = y3;
x3 = x2;
y3 = y2;
x2 = swapx;
y2 = swapy;
}
int t_height = y3 - y1;
for (i = 0; i < t_height; ++i)
{
bool lower_part = i > y2 - y1 || y2 == y1;
int part_height = lower_part ? y3 - y2 : y2 - y1;
float alpha = (float)i / t_height;
float beta = (float)(i - (lower_part ? y2 - y1 : 0)) / part_height;
Ax = x1 + (x3 - x1)*alpha;
Ay = y1 + (y3 - y1)*alpha;
Bx = lower_part ? x2 + (x3 - x2)*beta : x1 + (x2 - x1)*beta;
By = lower_part ? y2 + (y3 - y2)*beta : y1 + (y2 - y1)*beta;
if (Ax > Bx)
{
swapx = Ax;
swapy = Ay;
Ax = Bx;
Ay = By;
Bx = swapx;
By = swapy;
}
offset = (y1 + i)*SCREEN_WIDTH;
for (j = Ax; j <= Bx; ++j)
{
helper_vector.x = (x2 - x1) * (y1 - (y1 + i)) - (x1 - j) * (y2 - y1);
helper_vector.y = (x1 - j) * (y3 - y1) - (x3 - x1) * (y1 - (y1 + i));
helper_vector.z = (x3 - x1) * (y2 - y1) - (x2 - x1) * (y3 - y1);
if (abs((int)helper_vector.z) < 1) { interpolate.x = -1; interpolate.y = 0; interpolate.z = 0; }
else
{
interpolate.x = 1.f - (helper_vector.x + helper_vector.y) / helper_vector.z;
interpolate.y = helper_vector.y / helper_vector.z;
interpolate.z = helper_vector.x / helper_vector.z;
}
if (interpolate.x < 0 || interpolate.y < 0 || interpolate.z < 0) continue;
depth_value = (z1*interpolate.x) + (z2*interpolate.y) + (z3*interpolate.z);
if (zpuffer[offset + j] > depth_value)
{
zpuffer[offset + j] = depth_value;
puffer[offset + j] = color;
}
}
}
} | 97b538f16f9f1233366555f7628c0e1ed52cb416.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <windows.h>
#include <d2d1.h>
#include <d2d1helper.h>
#pragma comment(lib, "d2d1")
//*****double buffering*****
#define SCREEN_WIDTH 1920
#define SCREEN_HEIGHT 1000
D2D1_RECT_U display_area;
ID2D1Bitmap *image_container = NULL;
unsigned int *dev_image_data, image_data[SCREEN_WIDTH * SCREEN_HEIGHT];
float *dev_zbuffer;
typedef struct Vec3f {
float x, y, z;
};
//**************************************
ID2D1Factory* pD2DFactory = NULL;
ID2D1HwndRenderTarget* pRT = NULL;
#define HIBA_00 TEXT("Error:Program initialisation process.")
HINSTANCE hInstGlob;
int SajatiCmdShow;
char szClassName[] = "WindowsApp";
HWND Form1; //Windows handler
LRESULT CALLBACK WndProc0(HWND, UINT, WPARAM, LPARAM);
void D2D_drawing(ID2D1HwndRenderTarget* pRT);
//*****double buffering*****
void create_main_buffer(void);
void CUDA_cleanup_main_buffer(void);
__global__ void CUDA_CleanUp_Zbuffer(float *zpuffer);
void swap_main_buffer(void);
//**************************************
//*****Drawing algorithms*****
__device__ void CUDA_SetPixel_Zbuffer(int x1, int y1, int z1, int color, unsigned int *puffer, float *zpuffer);
__device__ void CUDA_DrawLine_Zbuffer(int x1, int y1, int z1, int x2, int y2, int z2, int color, unsigned int *puffer, float *zpuffer);
__device__ void CUDA_FillTriangle_Zbuffer(int x1, int y1, int z1, int x2, int y2, int z2, int x3, int y3, int z3, int color, unsigned int *puffer, float *zpuffer);
//**************************************
//*********************************
//The main entry point of our program
//*********************************
int WINAPI WinMain(HINSTANCE hInstance, HINSTANCE hPrevInstance, PSTR szCmdLine, int iCmdShow)
{
static TCHAR szAppName[] = TEXT("StdWinClassName");
HWND hwnd;
MSG msg;
WNDCLASS wndclass0;
SajatiCmdShow = iCmdShow;
hInstGlob = hInstance;
//*********************************
//Preparing Windows class
//*********************************
wndclass0.style = CS_HREDRAW | CS_VREDRAW;
wndclass0.lpfnWndProc = WndProc0;
wndclass0.cbClsExtra = 0;
wndclass0.cbWndExtra = 0;
wndclass0.hInstance = hInstance;
wndclass0.hIcon = LoadIcon(NULL, IDI_APPLICATION);
wndclass0.hCursor = LoadCursor(NULL, IDC_ARROW);
wndclass0.hbrBackground = (HBRUSH)GetStockObject(LTGRAY_BRUSH);
wndclass0.lpszMenuName = NULL;
wndclass0.lpszClassName = TEXT("WIN0");
//*********************************
//Registering our windows class
//*********************************
if (!RegisterClass(&wndclass0))
{
MessageBox(NULL, HIBA_00, TEXT("Program Start"), MB_ICONERROR);
return 0;
}
//*********************************
//Creating the window
//*********************************
Form1 = CreateWindow(TEXT("WIN0"),
TEXT("CUDA - DIRECT2D"),
(WS_OVERLAPPED | WS_SYSMENU | WS_THICKFRAME | WS_MAXIMIZEBOX | WS_MINIMIZEBOX),
50,
50,
SCREEN_WIDTH,
SCREEN_HEIGHT,
NULL,
NULL,
hInstance,
NULL);
//*********************************
//Displaying the window
//*********************************
ShowWindow(Form1, SajatiCmdShow);
UpdateWindow(Form1);
//*********************************
//Activating the message processing for our window
//*********************************
while (GetMessage(&msg, NULL, 0, 0))
{
TranslateMessage(&msg);
DispatchMessage(&msg);
}
return msg.wParam;
}
//*********************************
//The window's callback funtcion: handling events
//*********************************
LRESULT CALLBACK WndProc0(HWND hwnd, UINT message, WPARAM wParam, LPARAM lParam)
{
HDC hdc;
PAINTSTRUCT ps;
switch (message)
{
//*********************************
//When creating the window
//*********************************
case WM_CREATE:
D2D1CreateFactory(D2D1_FACTORY_TYPE_SINGLE_THREADED, &pD2DFactory);
pD2DFactory->CreateHwndRenderTarget(
D2D1::RenderTargetProperties(),
D2D1::HwndRenderTargetProperties(
hwnd, D2D1::SizeU(SCREEN_WIDTH, SCREEN_HEIGHT)),
&pRT);
create_main_buffer();
cudaMalloc((void**)&dev_image_data, SCREEN_WIDTH * SCREEN_HEIGHT * sizeof(unsigned int));
cudaMalloc((void**)&dev_zbuffer, SCREEN_WIDTH * SCREEN_HEIGHT * sizeof(float));
return 0;
//*********************************
//to eliminate color flickering
//*********************************
case WM_ERASEBKGND:
return (LRESULT)1;
//*********************************
//Repainting the client area of the window
//*********************************
case WM_PAINT:
hdc = BeginPaint(hwnd, &ps);
EndPaint(hwnd, &ps);
D2D_drawing(pRT);
return 0;
//*********************************
//Closing the window, freeing resources
//*********************************
case WM_CLOSE:
pRT->Release();
pD2DFactory->Release();
cudaFree(dev_image_data);
cudaFree(dev_zbuffer);
DestroyWindow(hwnd);
return 0;
//*********************************
//Destroying the window
//*********************************
case WM_DESTROY:
PostQuitMessage(0);
return 0;
}
return DefWindowProc(hwnd, message, wParam, lParam);
}
void D2D_drawing(ID2D1HwndRenderTarget* pRT)
{
CUDA_cleanup_main_buffer();
CUDA_CleanUp_Zbuffer <<< ((SCREEN_WIDTH*SCREEN_HEIGHT) + 384 - 1) / 384, 384 >>> (dev_zbuffer);
cudaDeviceSynchronize();
//render_objects<<<blocks,threads >>>(dev_image_data,dev_zbuffer);
cudaDeviceSynchronize();
cudaMemcpy(image_data, dev_image_data, SCREEN_WIDTH * SCREEN_HEIGHT * sizeof(unsigned int), cudaMemcpyDeviceToHost);
swap_main_buffer();
}
void create_main_buffer(void)
{
pRT->CreateBitmap(D2D1::SizeU(SCREEN_WIDTH, SCREEN_HEIGHT),
D2D1::BitmapProperties(D2D1::PixelFormat(DXGI_FORMAT_B8G8R8A8_UNORM,
D2D1_ALPHA_MODE_IGNORE)), &image_container);
}
void CUDA_cleanup_main_buffer(void)
{
cudaMemset(dev_image_data, 255, SCREEN_HEIGHT*SCREEN_WIDTH * sizeof(unsigned int));
}
__global__ void CUDA_CleanUp_Zbuffer(float *zpuffer)
{
int i;
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (i = index; i < SCREEN_HEIGHT*SCREEN_WIDTH; i += stride)
{
zpuffer[i] = 999999;
}
}
void swap_main_buffer(void)
{
display_area.left = 0;
display_area.top = 0;
display_area.right = SCREEN_WIDTH;
display_area.bottom = SCREEN_HEIGHT;
image_container->CopyFromMemory(&display_area, image_data, SCREEN_WIDTH * sizeof(unsigned int));
pRT->BeginDraw();
pRT->DrawBitmap(image_container, D2D1::RectF(0.0f, 0.0f, SCREEN_WIDTH, SCREEN_HEIGHT), 1.0f, D2D1_BITMAP_INTERPOLATION_MODE_NEAREST_NEIGHBOR, NULL);
pRT->EndDraw();
}
__device__ void CUDA_SetPixel_Zbuffer(int x1, int y1, int z1, int color, unsigned int *puffer, float *zpuffer)
{
int offset = (y1 * SCREEN_WIDTH) + x1;
if (zpuffer[offset] > z1)
{
zpuffer[offset] = z1;
puffer[offset] = color;
}
}
__device__ void CUDA_DrawLine_Zbuffer(int x1, int y1, int z1, int x2, int y2, int z2, int color, unsigned int *puffer, float *zpuffer)
{
float Pz;
bool flip = false;
int swap, offset;
if (abs(x2 - x1) < 2 && abs(y2 - y1) < 2) {
puffer[(y2*SCREEN_WIDTH) + x2] = color; return;
}
if (abs(x1 - x2) < abs(y1 - y2))
{
swap = x1;
x1 = y1;
y1 = swap;
swap = x2;
x2 = y2;
y2 = swap;
flip = true;
}
if (x1 > x2)
{
swap = x1;
x1 = x2;
x2 = swap;
swap = y1;
y1 = y2;
y2 = swap;
}
int dx = x2 - x1;
int dy = y2 - y1;
int marker1 = abs(dy) * 2;
int marker2 = 0;
int y = y1, x;
for (x = x1; x <= x2; ++x)
{
if (z1 == z2) Pz = z1;
else
{
int s1 = abs(x2 - x1);
int s2 = abs(z1 - z2);
Pz = (float)z2 + (float)((((float)x - (float)x1) / (float)s1) * (float)s2);
}
if (flip)
{
offset = (x * SCREEN_WIDTH);
if (zpuffer[offset + y] > Pz)
{
zpuffer[offset + y] = Pz;
puffer[offset + y] = color;
}
}
else
{
offset = (y * SCREEN_WIDTH);
if (zpuffer[offset + x] > Pz)
{
zpuffer[offset + x] = Pz;
puffer[offset + x] = color;
}
}
marker2 += marker1;
if (marker2 > dx)
{
y += (y2 > y1 ? 1 : -1);
marker2 -= dx * 2;
}
}
}
__device__ void CUDA_FillTriangle_Zbuffer(int x1, int y1, int z1, int x2, int y2, int z2, int x3, int y3, int z3, int color, unsigned int *puffer, float *zpuffer)
{
int Ax, Ay, Bx, By, i, j, depth_value;
int swapx, swapy, offset;
Vec3f interpolate, helper_vector;
if (y1 == y2 && y1 == y3) return;
if (y1 > y2)
{
swapx = x1;
swapy = y1;
x1 = x2;
y1 = y2;
x2 = swapx;
y2 = swapy;
}
if (y1 > y3)
{
swapx = x1;
swapy = y1;
x1 = x3;
y1 = y3;
x3 = swapx;
y3 = swapy;
}
if (y2 > y3)
{
swapx = x3;
swapy = y3;
x3 = x2;
y3 = y2;
x2 = swapx;
y2 = swapy;
}
int t_height = y3 - y1;
for (i = 0; i < t_height; ++i)
{
bool lower_part = i > y2 - y1 || y2 == y1;
int part_height = lower_part ? y3 - y2 : y2 - y1;
float alpha = (float)i / t_height;
float beta = (float)(i - (lower_part ? y2 - y1 : 0)) / part_height;
Ax = x1 + (x3 - x1)*alpha;
Ay = y1 + (y3 - y1)*alpha;
Bx = lower_part ? x2 + (x3 - x2)*beta : x1 + (x2 - x1)*beta;
By = lower_part ? y2 + (y3 - y2)*beta : y1 + (y2 - y1)*beta;
if (Ax > Bx)
{
swapx = Ax;
swapy = Ay;
Ax = Bx;
Ay = By;
Bx = swapx;
By = swapy;
}
offset = (y1 + i)*SCREEN_WIDTH;
for (j = Ax; j <= Bx; ++j)
{
helper_vector.x = (x2 - x1) * (y1 - (y1 + i)) - (x1 - j) * (y2 - y1);
helper_vector.y = (x1 - j) * (y3 - y1) - (x3 - x1) * (y1 - (y1 + i));
helper_vector.z = (x3 - x1) * (y2 - y1) - (x2 - x1) * (y3 - y1);
if (abs((int)helper_vector.z) < 1) { interpolate.x = -1; interpolate.y = 0; interpolate.z = 0; }
else
{
interpolate.x = 1.f - (helper_vector.x + helper_vector.y) / helper_vector.z;
interpolate.y = helper_vector.y / helper_vector.z;
interpolate.z = helper_vector.x / helper_vector.z;
}
if (interpolate.x < 0 || interpolate.y < 0 || interpolate.z < 0) continue;
depth_value = (z1*interpolate.x) + (z2*interpolate.y) + (z3*interpolate.z);
if (zpuffer[offset + j] > depth_value)
{
zpuffer[offset + j] = depth_value;
puffer[offset + j] = color;
}
}
}
} |
0485374c8a00147126679803f9229f97f74a5830.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* \file dnn/src/cuda/convolution/chanwise/bwd_filter.cu
* MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
*
* Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*/
#include "./kern.cuh"
#include "kern_helper_hip.cuh"
#include "hip/hip_fp16.h"
#include "src/cuda/cub/util_ptx.cuh"
#include "src/cuda/fp16_help.cuh"
const uint32_t WARP_SIZE = 32, BATCH_UNROLL = 4;
using namespace megdnn;
using namespace cuda;
using namespace convolution;
using namespace chanwise;
namespace {
/*!
* \brief compute grad w.r.t. filter
*
* block dim: out_id * kern_id
* threads with the same out_id computes grad for corresponding kernel element
* \tparam nr_thpf number of threads for one element in the filter; must be
* power of 2;
*/
template <typename T, uint32_t nr_thpf>
__global__ void kern_bwd_filter_float(
T* flt_grad, const T* src, const T* dst_grad, Param param) {
const uint32_t N = param.batch, IC = param.src_chl, IH = param.src_h,
IW = param.src_w, CHL_MUL = param.chl_mul, FH = param.flt_h,
FW = param.flt_w, PH = param.pad_h, PW = param.pad_w,
SH = param.stride_h, SW = param.stride_w, OH = param.out_h,
OW = param.out_w, SRC_BATCH_STRIDE = IC * IH * IW,
DST_BATCH_STRIDE = IC * CHL_MUL * OH * OW,
BLKDIM_X = blockDim.x / nr_thpf, THREADID_X = threadIdx.x / nr_thpf,
OUT_IDX = blockIdx.x * BLKDIM_X + THREADID_X;
uint32_t ic, chl_mul, fh, fw;
{
uint32_t i = OUT_IDX;
i = div_mod(i, FW, fw);
i = div_mod(i, FH, fh);
i = div_mod(i, CHL_MUL, chl_mul);
ic = i;
}
if (ic >= IC) {
return;
}
src += ic * IH * IW;
dst_grad += (ic * CHL_MUL + chl_mul) * OH * OW;
const uint32_t oh_lo = max(int32_t(PH - fh + SH - 1), 0) / SH,
oh_hi = min((IH - 1 + PH - fh) / SH + 1, OH),
ow_lo = max(int32_t(PW - fw + SW - 1), 0) / SW,
ow_hi = min((IW - 1 + PW - fw) / SW + 1, OW), oblk_h = oh_hi - oh_lo,
oblk_w = ow_hi - ow_lo,
oblk_tot = oblk_h * oblk_w * ((N + BATCH_UNROLL - 1) / BATCH_UNROLL),
tid = threadIdx.x % nr_thpf;
if (IH + PH < fh + 1 || oh_lo >= oh_hi || IW + PW < fw + 1 || ow_lo >= ow_hi) {
if (!tid)
flt_grad[OUT_IDX] = 0;
return;
}
T sum(0);
for (uint32_t oblk_idx = tid; oblk_idx < oblk_tot; oblk_idx += nr_thpf) {
uint32_t n, oh, ow;
n = div_mod(div_mod(oblk_idx, oblk_w, ow), oblk_h, oh) * BATCH_UNROLL;
oh += oh_lo;
ow += ow_lo;
uint32_t ih = oh * SH - PH + fh, iw = ow * SW - PW + fw,
soff = ih * IW + iw + n * SRC_BATCH_STRIDE,
doff = oh * OW + ow + n * DST_BATCH_STRIDE;
#pragma unroll
for (uint32_t i = 0; i < BATCH_UNROLL; ++i) {
if (!i || n + i < N) {
sum += src[soff] * dst_grad[doff];
}
soff += SRC_BATCH_STRIDE;
doff += DST_BATCH_STRIDE;
}
}
if (nr_thpf == 1) {
flt_grad[OUT_IDX] = sum;
} else {
// reduce all sums in a block
extern __shared__ uint8_t shared_storage[];
volatile T* thread_sum = reinterpret_cast<T*>(shared_storage);
thread_sum += THREADID_X * nr_thpf;
thread_sum[tid] = sum;
#pragma unroll
for (uint32_t i = nr_thpf / 2; i; i >>= 1) {
bool cond = nr_thpf >= i * 2 && tid < i;
if (i >= WARP_SIZE) {
__syncthreads();
} else {
cub::WARP_SYNC(0xffffffff);
}
if (cond) {
T v0 = thread_sum[tid], v1 = v0 + thread_sum[tid + i];
thread_sum[tid] = v1;
}
}
if (!tid) {
flt_grad[OUT_IDX] = thread_sum[0];
}
}
}
#if TORCH_HIP_VERSION >= 9000
template <typename T, uint32_t nr_thpf>
__global__ void kern_bwd_filter_hf(
__half* flt_grad, const __half* src, const __half* dst_grad, Param param) {
const uint32_t N = param.batch, IC = param.src_chl, IH = param.src_h,
IW = param.src_w, CHL_MUL = param.chl_mul, FH = param.flt_h,
FW = param.flt_w, PH = param.pad_h, PW = param.pad_w,
SH = param.stride_h, SW = param.stride_w, OH = param.out_h,
OW = param.out_w, SRC_BATCH_STRIDE = IC * IH * IW,
DST_BATCH_STRIDE = IC * CHL_MUL * OH * OW,
BLKDIM_X = (blockDim.x / nr_thpf) * 2,
THREADID_X = (threadIdx.x / nr_thpf) * 2,
OUT_IDX = blockIdx.x * BLKDIM_X + THREADID_X,
LAST_IDX = FH * FW * CHL_MUL * IC, tid = threadIdx.x % nr_thpf;
__half2 sum2{0.0, 0.0};
if (OUT_IDX % FW != FW - 1) {
uint32_t ic, chl_mul, fh, fw;
{
uint32_t i = OUT_IDX;
i = div_mod(i, FW, fw);
i = div_mod(i, FH, fh);
i = div_mod(i, CHL_MUL, chl_mul);
ic = i;
}
if (ic >= IC) {
return;
}
src += ic * IH * IW;
dst_grad += (ic * CHL_MUL + chl_mul) * OH * OW;
const uint32_t oh_lo = max(int32_t(PH - fh + SH - 1), 0) / SH,
oh_hi = min((IH - 1 + PH - fh) / SH + 1, OH),
ow_lox = max(int32_t(PW - fw + SW - 1), 0) / SW,
ow_loy = max(int32_t(PW - fw + SW - 2), 0) / SW,
ow_hix = min((IW - 1 + PW - fw) / SW + 1, OW),
ow_hiy = min((IW - 2 + PW - fw) / SW + 1, OW),
oblk_h = oh_hi - oh_lo, oblk_wx = ow_hix - ow_lox,
oblk_wy = ow_hiy - ow_loy;
if (IH + PH < fh + 1 || oh_lo >= oh_hi || IW + PW < fw + 1) {
if (!tid) {
flt_grad[OUT_IDX] = 0;
flt_grad[OUT_IDX + 1] = 0;
}
return;
}
if (ow_lox >= ow_hix) {
if (!tid)
flt_grad[OUT_IDX] = 0;
}
if (IW + PW < fw + 2 || ow_loy >= ow_hiy) {
if (!tid)
flt_grad[OUT_IDX + 1] = 0;
if (ow_lox >= ow_hix)
return;
}
sum2.x = 0.0;
sum2.y = 0.0;
__half2 src2{0.0, 0.0};
__half2 dst2{0.0, 0.0};
const uint32_t oblk_w = max(ow_hix, ow_hiy) - min(ow_lox, ow_loy),
oblk_tot = oblk_h * oblk_w *
((N + BATCH_UNROLL - 1) / BATCH_UNROLL);
for (uint32_t oblk_idx = tid; oblk_idx < oblk_tot; oblk_idx += nr_thpf) {
uint32_t n_x, n_y, oh, ow_x, ow_y;
n_x = div_mod(div_mod(oblk_idx, oblk_wx, ow_x), oblk_h, oh) * BATCH_UNROLL;
n_y = div_mod(div_mod(oblk_idx, oblk_wy, ow_y), oblk_h, oh) * BATCH_UNROLL;
oh += oh_lo;
ow_x += ow_lox;
ow_y += ow_loy;
uint32_t ih = oh * SH - PH + fh, iw_x = ow_x * SW - PW + fw,
iw_y = ow_y * SW - PW + fw + 1,
soff_x = ih * IW + iw_x + n_x * SRC_BATCH_STRIDE,
soff_y = ih * IW + iw_y + n_y * SRC_BATCH_STRIDE,
doff_x = oh * OW + ow_x + n_x * DST_BATCH_STRIDE,
doff_y = oh * OW + ow_y + n_y * DST_BATCH_STRIDE;
#pragma unroll
for (uint32_t i = 0; i < BATCH_UNROLL; ++i) {
if (!i || n_x + i < N || n_y + i < N) {
src2.x = 0.0;
src2.y = 0.0;
dst2.x = 0.0;
dst2.y = 0.0;
if (n_x + i < N && ow_x < ow_hix) {
src2.x = src[soff_x];
dst2.x = dst_grad[doff_x];
}
if (n_y + i < N && ow_y < ow_hiy) {
src2.y = src[soff_y];
dst2.y = dst_grad[doff_y];
}
sum2 = fma2(src2, dst2, sum2);
}
soff_x += SRC_BATCH_STRIDE;
soff_y += SRC_BATCH_STRIDE;
doff_x += DST_BATCH_STRIDE;
doff_y += DST_BATCH_STRIDE;
}
}
} else {
for (size_t offset = 0; offset < 2; ++offset) {
uint32_t ic, chl_mul, fh, fw;
{
uint32_t i = OUT_IDX + offset;
i = div_mod(i, FW, fw);
i = div_mod(i, FH, fh);
i = div_mod(i, CHL_MUL, chl_mul);
ic = i;
}
if (ic >= IC) {
if (offset == 0)
return;
else
break;
}
const uint32_t oh_lo = max(int32_t(PH - fh + SH - 1), 0) / SH,
oh_hi = min((IH - 1 + PH - fh) / SH + 1, OH),
ow_lo = max(int32_t(PW - fw + SW - 1), 0) / SW,
ow_hi = min((IW - 1 + PW - fw) / SW + 1, OW),
oblk_h = oh_hi - oh_lo, oblk_w = ow_hi - ow_lo,
oblk_tot = oblk_h * oblk_w *
((N + BATCH_UNROLL - 1) / BATCH_UNROLL);
if (IH + PH < fh + 1 || oh_lo >= oh_hi || IW + PW < fw + 1 ||
ow_lo >= ow_hi) {
if (!tid)
flt_grad[OUT_IDX + offset] = 0;
continue;
}
__half sum(0.0);
for (uint32_t oblk_idx = tid; oblk_idx < oblk_tot; oblk_idx += nr_thpf) {
uint32_t n, oh, ow;
n = div_mod(div_mod(oblk_idx, oblk_w, ow), oblk_h, oh) * BATCH_UNROLL;
oh += oh_lo;
ow += ow_lo;
uint32_t ih = oh * SH - PH + fh, iw = ow * SW - PW + fw,
soff = ic * IH * IW + ih * IW + iw + n * SRC_BATCH_STRIDE,
doff = (ic * CHL_MUL + chl_mul) * OH * OW + oh * OW + ow +
n * DST_BATCH_STRIDE;
#pragma unroll
for (uint32_t i = 0; i < BATCH_UNROLL; ++i) {
if (!i || n + i < N) {
sum = fma(src[soff], dst_grad[doff], sum);
}
soff += SRC_BATCH_STRIDE;
doff += DST_BATCH_STRIDE;
}
}
if (!offset)
sum2.x = sum;
if (offset)
sum2.y = sum;
}
}
if (nr_thpf == 1) {
flt_grad[OUT_IDX] = sum2.x;
if (OUT_IDX != LAST_IDX)
flt_grad[OUT_IDX + 1] = sum2.y;
} else {
extern __shared__ uint8_t shared_storage[];
__half2* thread_sum = reinterpret_cast<__half2*>(shared_storage);
thread_sum += THREADID_X * nr_thpf / 2;
thread_sum[tid] = sum2;
#pragma unroll
for (uint32_t i = nr_thpf / 2; i; i >>= 1) {
bool cond = nr_thpf >= i * 2 && tid < i;
if (i >= WARP_SIZE) {
__syncthreads();
} else {
cub::WARP_SYNC(0xffffffff);
}
if (cond) {
__half2 one = {1.0, 1.0};
__half2 v0 = thread_sum[tid], v1 = fma2(v0, one, thread_sum[tid + i]);
thread_sum[tid] = v1;
}
}
if (!tid) {
flt_grad[OUT_IDX] = thread_sum[0].x;
if (OUT_IDX != LAST_IDX)
flt_grad[OUT_IDX + 1] = thread_sum[0].y;
}
}
}
#endif
#define GET_KERN(func, type) \
FixFunction<type> f_struct; \
switch (_p) { \
case 1 << 10: \
f_struct.f = func<type, 1 << 10>; \
break; \
case 1 << 9: \
f_struct.f = func<type, 1 << 9>; \
break; \
case 1 << 8: \
f_struct.f = func<type, 1 << 8>; \
break; \
case 1 << 7: \
f_struct.f = func<type, 1 << 7>; \
break; \
case 1 << 6: \
f_struct.f = func<type, 1 << 6>; \
break; \
case 1 << 5: \
f_struct.f = func<type, 1 << 5>; \
break; \
case 1 << 4: \
f_struct.f = func<type, 1 << 4>; \
break; \
case 1 << 3: \
f_struct.f = func<type, 1 << 3>; \
break; \
case 1 << 2: \
f_struct.f = func<type, 1 << 2>; \
break; \
case 1 << 1: \
f_struct.f = func<type, 1 << 1>; \
break; \
case 1 << 0: \
f_struct.f = func<type, 1 << 0>; \
break; \
default: \
megdnn_assert(false, "DO NOT IMP CASE FUNCTION!!"); \
} \
return f_struct;
template <typename T>
struct FixFunction {
void (*f)(T*, const T*, const T*, Param);
};
template <typename T>
FixFunction<T> get_kern(const uint32_t& _p);
template <>
FixFunction<float> get_kern<float>(const uint32_t& _p) {
GET_KERN(kern_bwd_filter_float, float);
}
#if TORCH_HIP_VERSION >= 9000
template <>
FixFunction<__half> get_kern<__half>(const uint32_t& _p) {
GET_KERN(kern_bwd_filter_hf, __half);
}
#endif
template <>
FixFunction<dt_float16> get_kern<dt_float16>(const uint32_t& _p) {
GET_KERN(kern_bwd_filter_float, dt_float16);
}
#undef GET_KERN
} // anonymous namespace
namespace megdnn {
namespace cuda {
namespace convolution {
namespace chanwise {
template <typename T>
void run_bwd_filter(
T* filter_grad, const T* src, const T* dst_grad, const Param& param,
hipStream_t stream) {
void (*kern)(T*, const T*, const T*, Param) = NULL;
uint32_t nr_thread = query_blocksize_for_kernel(get_kern<T>(1024).f),
nr_thpf = ::min(
nr_thread, std::max<uint32_t>(
1, param.out_h * param.out_w * param.batch /
(BATCH_UNROLL * 16)));
// find nearest power-of-2 of nr_thpf
do {
#define CK(_n) \
if (nr_thpf >= _n) { \
kern = get_kern<T>(_n).f; \
nr_thpf = _n; \
break; \
}
CK(1 << 10);
CK(1 << 9);
CK(1 << 8);
CK(1 << 7);
CK(1 << 6);
CK(1 << 5);
CK(1 << 4);
CK(1 << 3);
CK(1 << 2);
CK(1 << 1);
CK(1 << 0);
#undef CK
} while (0);
megdnn_assert(kern);
nr_thread = query_blocksize_for_kernel(kern);
uint32_t nr_flt_per_blk = nr_thread / nr_thpf;
while (nr_flt_per_blk * nr_thpf % WARP_SIZE)
--nr_flt_per_blk;
megdnn_assert(nr_flt_per_blk);
int nr_block = DIVUP(
param.flt_h * param.flt_w * param.src_chl * param.chl_mul, nr_flt_per_blk);
nr_thread = nr_flt_per_blk * nr_thpf;
uint32_t shared = nr_thread * 2 * sizeof(T);
hipLaunchKernelGGL(( kern), dim3(nr_block), dim3(nr_thread), shared, stream, filter_grad, src, dst_grad, param);
after_kernel_launch();
}
template void run_bwd_filter(
float*, const float*, const float*, const Param&, hipStream_t);
#if TORCH_HIP_VERSION >= 9000
template void run_bwd_filter(
__half*, const __half*, const __half*, const Param&, hipStream_t);
#endif
template void run_bwd_filter(
dt_float16*, const dt_float16*, const dt_float16*, const Param&, hipStream_t);
} // namespace chanwise
} // namespace convolution
} // namespace cuda
} // namespace megdnn
// vim: syntax=cuda.doxygen
| 0485374c8a00147126679803f9229f97f74a5830.cu | /**
* \file dnn/src/cuda/convolution/chanwise/bwd_filter.cu
* MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
*
* Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*/
#include "./kern.cuh"
#include "./kern_helper.cuh"
#include "cuda_fp16.h"
#include "src/cuda/cub/util_ptx.cuh"
#include "src/cuda/fp16_help.cuh"
const uint32_t WARP_SIZE = 32, BATCH_UNROLL = 4;
using namespace megdnn;
using namespace cuda;
using namespace convolution;
using namespace chanwise;
namespace {
/*!
* \brief compute grad w.r.t. filter
*
* block dim: out_id * kern_id
* threads with the same out_id computes grad for corresponding kernel element
* \tparam nr_thpf number of threads for one element in the filter; must be
* power of 2;
*/
template <typename T, uint32_t nr_thpf>
__global__ void kern_bwd_filter_float(
T* flt_grad, const T* src, const T* dst_grad, Param param) {
const uint32_t N = param.batch, IC = param.src_chl, IH = param.src_h,
IW = param.src_w, CHL_MUL = param.chl_mul, FH = param.flt_h,
FW = param.flt_w, PH = param.pad_h, PW = param.pad_w,
SH = param.stride_h, SW = param.stride_w, OH = param.out_h,
OW = param.out_w, SRC_BATCH_STRIDE = IC * IH * IW,
DST_BATCH_STRIDE = IC * CHL_MUL * OH * OW,
BLKDIM_X = blockDim.x / nr_thpf, THREADID_X = threadIdx.x / nr_thpf,
OUT_IDX = blockIdx.x * BLKDIM_X + THREADID_X;
uint32_t ic, chl_mul, fh, fw;
{
uint32_t i = OUT_IDX;
i = div_mod(i, FW, fw);
i = div_mod(i, FH, fh);
i = div_mod(i, CHL_MUL, chl_mul);
ic = i;
}
if (ic >= IC) {
return;
}
src += ic * IH * IW;
dst_grad += (ic * CHL_MUL + chl_mul) * OH * OW;
const uint32_t oh_lo = max(int32_t(PH - fh + SH - 1), 0) / SH,
oh_hi = min((IH - 1 + PH - fh) / SH + 1, OH),
ow_lo = max(int32_t(PW - fw + SW - 1), 0) / SW,
ow_hi = min((IW - 1 + PW - fw) / SW + 1, OW), oblk_h = oh_hi - oh_lo,
oblk_w = ow_hi - ow_lo,
oblk_tot = oblk_h * oblk_w * ((N + BATCH_UNROLL - 1) / BATCH_UNROLL),
tid = threadIdx.x % nr_thpf;
if (IH + PH < fh + 1 || oh_lo >= oh_hi || IW + PW < fw + 1 || ow_lo >= ow_hi) {
if (!tid)
flt_grad[OUT_IDX] = 0;
return;
}
T sum(0);
for (uint32_t oblk_idx = tid; oblk_idx < oblk_tot; oblk_idx += nr_thpf) {
uint32_t n, oh, ow;
n = div_mod(div_mod(oblk_idx, oblk_w, ow), oblk_h, oh) * BATCH_UNROLL;
oh += oh_lo;
ow += ow_lo;
uint32_t ih = oh * SH - PH + fh, iw = ow * SW - PW + fw,
soff = ih * IW + iw + n * SRC_BATCH_STRIDE,
doff = oh * OW + ow + n * DST_BATCH_STRIDE;
#pragma unroll
for (uint32_t i = 0; i < BATCH_UNROLL; ++i) {
if (!i || n + i < N) {
sum += src[soff] * dst_grad[doff];
}
soff += SRC_BATCH_STRIDE;
doff += DST_BATCH_STRIDE;
}
}
if (nr_thpf == 1) {
flt_grad[OUT_IDX] = sum;
} else {
// reduce all sums in a block
extern __shared__ uint8_t shared_storage[];
volatile T* thread_sum = reinterpret_cast<T*>(shared_storage);
thread_sum += THREADID_X * nr_thpf;
thread_sum[tid] = sum;
#pragma unroll
for (uint32_t i = nr_thpf / 2; i; i >>= 1) {
bool cond = nr_thpf >= i * 2 && tid < i;
if (i >= WARP_SIZE) {
__syncthreads();
} else {
cub::WARP_SYNC(0xffffffff);
}
if (cond) {
T v0 = thread_sum[tid], v1 = v0 + thread_sum[tid + i];
thread_sum[tid] = v1;
}
}
if (!tid) {
flt_grad[OUT_IDX] = thread_sum[0];
}
}
}
#if CUDA_VERSION >= 9000
template <typename T, uint32_t nr_thpf>
__global__ void kern_bwd_filter_hf(
__half* flt_grad, const __half* src, const __half* dst_grad, Param param) {
const uint32_t N = param.batch, IC = param.src_chl, IH = param.src_h,
IW = param.src_w, CHL_MUL = param.chl_mul, FH = param.flt_h,
FW = param.flt_w, PH = param.pad_h, PW = param.pad_w,
SH = param.stride_h, SW = param.stride_w, OH = param.out_h,
OW = param.out_w, SRC_BATCH_STRIDE = IC * IH * IW,
DST_BATCH_STRIDE = IC * CHL_MUL * OH * OW,
BLKDIM_X = (blockDim.x / nr_thpf) * 2,
THREADID_X = (threadIdx.x / nr_thpf) * 2,
OUT_IDX = blockIdx.x * BLKDIM_X + THREADID_X,
LAST_IDX = FH * FW * CHL_MUL * IC, tid = threadIdx.x % nr_thpf;
__half2 sum2{0.0, 0.0};
if (OUT_IDX % FW != FW - 1) {
uint32_t ic, chl_mul, fh, fw;
{
uint32_t i = OUT_IDX;
i = div_mod(i, FW, fw);
i = div_mod(i, FH, fh);
i = div_mod(i, CHL_MUL, chl_mul);
ic = i;
}
if (ic >= IC) {
return;
}
src += ic * IH * IW;
dst_grad += (ic * CHL_MUL + chl_mul) * OH * OW;
const uint32_t oh_lo = max(int32_t(PH - fh + SH - 1), 0) / SH,
oh_hi = min((IH - 1 + PH - fh) / SH + 1, OH),
ow_lox = max(int32_t(PW - fw + SW - 1), 0) / SW,
ow_loy = max(int32_t(PW - fw + SW - 2), 0) / SW,
ow_hix = min((IW - 1 + PW - fw) / SW + 1, OW),
ow_hiy = min((IW - 2 + PW - fw) / SW + 1, OW),
oblk_h = oh_hi - oh_lo, oblk_wx = ow_hix - ow_lox,
oblk_wy = ow_hiy - ow_loy;
if (IH + PH < fh + 1 || oh_lo >= oh_hi || IW + PW < fw + 1) {
if (!tid) {
flt_grad[OUT_IDX] = 0;
flt_grad[OUT_IDX + 1] = 0;
}
return;
}
if (ow_lox >= ow_hix) {
if (!tid)
flt_grad[OUT_IDX] = 0;
}
if (IW + PW < fw + 2 || ow_loy >= ow_hiy) {
if (!tid)
flt_grad[OUT_IDX + 1] = 0;
if (ow_lox >= ow_hix)
return;
}
sum2.x = 0.0;
sum2.y = 0.0;
__half2 src2{0.0, 0.0};
__half2 dst2{0.0, 0.0};
const uint32_t oblk_w = max(ow_hix, ow_hiy) - min(ow_lox, ow_loy),
oblk_tot = oblk_h * oblk_w *
((N + BATCH_UNROLL - 1) / BATCH_UNROLL);
for (uint32_t oblk_idx = tid; oblk_idx < oblk_tot; oblk_idx += nr_thpf) {
uint32_t n_x, n_y, oh, ow_x, ow_y;
n_x = div_mod(div_mod(oblk_idx, oblk_wx, ow_x), oblk_h, oh) * BATCH_UNROLL;
n_y = div_mod(div_mod(oblk_idx, oblk_wy, ow_y), oblk_h, oh) * BATCH_UNROLL;
oh += oh_lo;
ow_x += ow_lox;
ow_y += ow_loy;
uint32_t ih = oh * SH - PH + fh, iw_x = ow_x * SW - PW + fw,
iw_y = ow_y * SW - PW + fw + 1,
soff_x = ih * IW + iw_x + n_x * SRC_BATCH_STRIDE,
soff_y = ih * IW + iw_y + n_y * SRC_BATCH_STRIDE,
doff_x = oh * OW + ow_x + n_x * DST_BATCH_STRIDE,
doff_y = oh * OW + ow_y + n_y * DST_BATCH_STRIDE;
#pragma unroll
for (uint32_t i = 0; i < BATCH_UNROLL; ++i) {
if (!i || n_x + i < N || n_y + i < N) {
src2.x = 0.0;
src2.y = 0.0;
dst2.x = 0.0;
dst2.y = 0.0;
if (n_x + i < N && ow_x < ow_hix) {
src2.x = src[soff_x];
dst2.x = dst_grad[doff_x];
}
if (n_y + i < N && ow_y < ow_hiy) {
src2.y = src[soff_y];
dst2.y = dst_grad[doff_y];
}
sum2 = fma2(src2, dst2, sum2);
}
soff_x += SRC_BATCH_STRIDE;
soff_y += SRC_BATCH_STRIDE;
doff_x += DST_BATCH_STRIDE;
doff_y += DST_BATCH_STRIDE;
}
}
} else {
for (size_t offset = 0; offset < 2; ++offset) {
uint32_t ic, chl_mul, fh, fw;
{
uint32_t i = OUT_IDX + offset;
i = div_mod(i, FW, fw);
i = div_mod(i, FH, fh);
i = div_mod(i, CHL_MUL, chl_mul);
ic = i;
}
if (ic >= IC) {
if (offset == 0)
return;
else
break;
}
const uint32_t oh_lo = max(int32_t(PH - fh + SH - 1), 0) / SH,
oh_hi = min((IH - 1 + PH - fh) / SH + 1, OH),
ow_lo = max(int32_t(PW - fw + SW - 1), 0) / SW,
ow_hi = min((IW - 1 + PW - fw) / SW + 1, OW),
oblk_h = oh_hi - oh_lo, oblk_w = ow_hi - ow_lo,
oblk_tot = oblk_h * oblk_w *
((N + BATCH_UNROLL - 1) / BATCH_UNROLL);
if (IH + PH < fh + 1 || oh_lo >= oh_hi || IW + PW < fw + 1 ||
ow_lo >= ow_hi) {
if (!tid)
flt_grad[OUT_IDX + offset] = 0;
continue;
}
__half sum(0.0);
for (uint32_t oblk_idx = tid; oblk_idx < oblk_tot; oblk_idx += nr_thpf) {
uint32_t n, oh, ow;
n = div_mod(div_mod(oblk_idx, oblk_w, ow), oblk_h, oh) * BATCH_UNROLL;
oh += oh_lo;
ow += ow_lo;
uint32_t ih = oh * SH - PH + fh, iw = ow * SW - PW + fw,
soff = ic * IH * IW + ih * IW + iw + n * SRC_BATCH_STRIDE,
doff = (ic * CHL_MUL + chl_mul) * OH * OW + oh * OW + ow +
n * DST_BATCH_STRIDE;
#pragma unroll
for (uint32_t i = 0; i < BATCH_UNROLL; ++i) {
if (!i || n + i < N) {
sum = fma(src[soff], dst_grad[doff], sum);
}
soff += SRC_BATCH_STRIDE;
doff += DST_BATCH_STRIDE;
}
}
if (!offset)
sum2.x = sum;
if (offset)
sum2.y = sum;
}
}
if (nr_thpf == 1) {
flt_grad[OUT_IDX] = sum2.x;
if (OUT_IDX != LAST_IDX)
flt_grad[OUT_IDX + 1] = sum2.y;
} else {
extern __shared__ uint8_t shared_storage[];
__half2* thread_sum = reinterpret_cast<__half2*>(shared_storage);
thread_sum += THREADID_X * nr_thpf / 2;
thread_sum[tid] = sum2;
#pragma unroll
for (uint32_t i = nr_thpf / 2; i; i >>= 1) {
bool cond = nr_thpf >= i * 2 && tid < i;
if (i >= WARP_SIZE) {
__syncthreads();
} else {
cub::WARP_SYNC(0xffffffff);
}
if (cond) {
__half2 one = {1.0, 1.0};
__half2 v0 = thread_sum[tid], v1 = fma2(v0, one, thread_sum[tid + i]);
thread_sum[tid] = v1;
}
}
if (!tid) {
flt_grad[OUT_IDX] = thread_sum[0].x;
if (OUT_IDX != LAST_IDX)
flt_grad[OUT_IDX + 1] = thread_sum[0].y;
}
}
}
#endif
#define GET_KERN(func, type) \
FixFunction<type> f_struct; \
switch (_p) { \
case 1 << 10: \
f_struct.f = func<type, 1 << 10>; \
break; \
case 1 << 9: \
f_struct.f = func<type, 1 << 9>; \
break; \
case 1 << 8: \
f_struct.f = func<type, 1 << 8>; \
break; \
case 1 << 7: \
f_struct.f = func<type, 1 << 7>; \
break; \
case 1 << 6: \
f_struct.f = func<type, 1 << 6>; \
break; \
case 1 << 5: \
f_struct.f = func<type, 1 << 5>; \
break; \
case 1 << 4: \
f_struct.f = func<type, 1 << 4>; \
break; \
case 1 << 3: \
f_struct.f = func<type, 1 << 3>; \
break; \
case 1 << 2: \
f_struct.f = func<type, 1 << 2>; \
break; \
case 1 << 1: \
f_struct.f = func<type, 1 << 1>; \
break; \
case 1 << 0: \
f_struct.f = func<type, 1 << 0>; \
break; \
default: \
megdnn_assert(false, "DO NOT IMP CASE FUNCTION!!"); \
} \
return f_struct;
template <typename T>
struct FixFunction {
void (*f)(T*, const T*, const T*, Param);
};
template <typename T>
FixFunction<T> get_kern(const uint32_t& _p);
template <>
FixFunction<float> get_kern<float>(const uint32_t& _p) {
GET_KERN(kern_bwd_filter_float, float);
}
#if CUDA_VERSION >= 9000
template <>
FixFunction<__half> get_kern<__half>(const uint32_t& _p) {
GET_KERN(kern_bwd_filter_hf, __half);
}
#endif
template <>
FixFunction<dt_float16> get_kern<dt_float16>(const uint32_t& _p) {
GET_KERN(kern_bwd_filter_float, dt_float16);
}
#undef GET_KERN
} // anonymous namespace
namespace megdnn {
namespace cuda {
namespace convolution {
namespace chanwise {
template <typename T>
void run_bwd_filter(
T* filter_grad, const T* src, const T* dst_grad, const Param& param,
cudaStream_t stream) {
void (*kern)(T*, const T*, const T*, Param) = NULL;
uint32_t nr_thread = query_blocksize_for_kernel(get_kern<T>(1024).f),
nr_thpf = std::min(
nr_thread, std::max<uint32_t>(
1, param.out_h * param.out_w * param.batch /
(BATCH_UNROLL * 16)));
// find nearest power-of-2 of nr_thpf
do {
#define CK(_n) \
if (nr_thpf >= _n) { \
kern = get_kern<T>(_n).f; \
nr_thpf = _n; \
break; \
}
CK(1 << 10);
CK(1 << 9);
CK(1 << 8);
CK(1 << 7);
CK(1 << 6);
CK(1 << 5);
CK(1 << 4);
CK(1 << 3);
CK(1 << 2);
CK(1 << 1);
CK(1 << 0);
#undef CK
} while (0);
megdnn_assert(kern);
nr_thread = query_blocksize_for_kernel(kern);
uint32_t nr_flt_per_blk = nr_thread / nr_thpf;
while (nr_flt_per_blk * nr_thpf % WARP_SIZE)
--nr_flt_per_blk;
megdnn_assert(nr_flt_per_blk);
int nr_block = DIVUP(
param.flt_h * param.flt_w * param.src_chl * param.chl_mul, nr_flt_per_blk);
nr_thread = nr_flt_per_blk * nr_thpf;
uint32_t shared = nr_thread * 2 * sizeof(T);
kern<<<nr_block, nr_thread, shared, stream>>>(filter_grad, src, dst_grad, param);
after_kernel_launch();
}
template void run_bwd_filter(
float*, const float*, const float*, const Param&, cudaStream_t);
#if CUDA_VERSION >= 9000
template void run_bwd_filter(
__half*, const __half*, const __half*, const Param&, cudaStream_t);
#endif
template void run_bwd_filter(
dt_float16*, const dt_float16*, const dt_float16*, const Param&, cudaStream_t);
} // namespace chanwise
} // namespace convolution
} // namespace cuda
} // namespace megdnn
// vim: syntax=cuda.doxygen
|
9dd73284430661c803caf8ac2f41a23745a53f01.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <iomanip>
#include <math.h>
#include <helper_cuda.h>
#include <complex>
#include "../src/spreadinterp.h"
#include "../finufft/utils.h"
using namespace std;
int main(int argc, char* argv[])
{
int nf1, nf2, nf3;
FLT sigma = 2.0;
int N1, N2, N3, M;
if (argc<5) {
fprintf(stderr,"Usage: interp3d [method [nupts_distr [nf1 nf2 nf3 [M [tol [sort]]]]]]\n");
fprintf(stderr,"Details --\n");
fprintf(stderr,"method 1: nupts driven\n");
fprintf(stderr,"method 2: sub-problem\n");
return 1;
}
double w;
int method;
sscanf(argv[1],"%d",&method);
int nupts_distribute;
sscanf(argv[2],"%d",&nupts_distribute);
sscanf(argv[3],"%lf",&w); nf1 = (int)w; // so can read 1e6 right!
sscanf(argv[4],"%lf",&w); nf2 = (int)w; // so can read 1e6 right!
sscanf(argv[5],"%lf",&w); nf3 = (int)w; // so can read 1e6 right!
N1 = (int) nf1/sigma;
N2 = (int) nf2/sigma;
N3 = (int) nf3/sigma;
M = N1*N2*N3;// let density always be 1
if(argc>6){
sscanf(argv[6],"%lf",&w); M = (int)w; // so can read 1e6 right!
if(M == 0) M=N1*N2*N3;
}
FLT tol=1e-6;
if(argc>7){
sscanf(argv[7],"%lf",&w); tol = (FLT)w; // so can read 1e6 right!
}
int sort=1;
if(argc>8){
sscanf(argv[8],"%d",&sort);
}
int ier;
int ns=::ceil(-log10(tol/10.0));
int maxsubprobsize;
cout<<scientific<<setprecision(3);
FLT *x, *y, *z;
CPX *c, *fw;
hipHostMalloc(&x, M*sizeof(FLT));
hipHostMalloc(&y, M*sizeof(FLT));
hipHostMalloc(&z, M*sizeof(FLT));
hipHostMalloc(&c, M*sizeof(CPX));
hipHostMalloc(&fw,nf1*nf2*nf3*sizeof(CPX));
switch(nupts_distribute){
// Making data
case 1: //uniform
{
for (int i = 0; i < M; i++) {
x[i] = RESCALE(M_PI*randm11(), nf1, 1);// x in [-pi,pi)
y[i] = RESCALE(M_PI*randm11(), nf2, 1);
z[i] = RESCALE(M_PI*randm11(), nf3, 1);
//cout << x[i] << "," << y[i] << "," << z[i] << endl;
}
maxsubprobsize = 65536;
}
break;
case 2: // concentrate on a small region
{
for (int i = 0; i < M; i++) {
x[i] = RESCALE(M_PI*rand01()/(nf1*2/32), nf1, 1);// x in [-pi,pi)
y[i] = RESCALE(M_PI*rand01()/(nf1*2/32), nf2, 1);
z[i] = RESCALE(M_PI*rand01()/(nf3*2/32), nf3, 1);
}
maxsubprobsize = 1024;
}
break;
default:
cerr<<"error: nupts distr should be 1,2" << endl;
return 1;
}
for(int i=0; i<nf1*nf2*nf3; i++){
fw[i].real(1.0);
fw[i].imag(0.0);
}
int dim=3;
cufinufft_plan dplan;
ier = cufinufft_default_opts(type2, dim, dplan.opts);
if(ier != 0 ){
cout<<"error: cufinufft_default_opts"<<endl;
return 0;
}
ier = setup_spreader_for_nufft(dplan.spopts, tol, dplan.opts);
dplan.opts.upsampfac=sigma;
dplan.opts.gpu_method=method;
dplan.opts.gpu_kerevalmeth=1;
dplan.opts.gpu_sort=sort;
dplan.spopts.pirange=0;
if(dplan.opts.gpu_method == 2)
{
dplan.opts.gpu_binsizex=16;
dplan.opts.gpu_binsizey=16;
dplan.opts.gpu_binsizez=2;
dplan.opts.gpu_maxsubprobsize=maxsubprobsize;
}
if(dplan.opts.gpu_method == 1)
{
dplan.opts.gpu_binsizex=16;
dplan.opts.gpu_binsizey=8;
dplan.opts.gpu_binsizez=4;
}
CNTime timer;
/*warm up gpu*/
char *a;
timer.restart();
checkCudaErrors(hipMalloc(&a,1));
#ifdef TIME
cout<<"[time ]"<< " (warm up) First cudamalloc call " << timer.elapsedsec() <<" s"<<endl<<endl;
#endif
#ifdef INFO
cout<<"[info ] Interpolating ["<<nf1<<"x"<<nf2<<"x"<<nf3<<
"] uniform points to "<<M<<"nupts"<<endl;
#endif
timer.restart();
ier = cufinufft_interp3d(N1, N2, N3, nf1, nf2, nf3, fw, M, x, y, z, c, tol,
&dplan);
if(ier != 0 ){
cout<<"error: cnufftinterp3d"<<endl;
return 0;
}
FLT t=timer.elapsedsec();
printf("[Method %d] %ld U pts to #%d NU pts in %.3g s (\t%.3g U pts/s)\n",
dplan.opts.gpu_method,nf1*nf2*nf3,M,t,M/t);
#ifdef RESULT
cout<<"[result-input]"<<endl;
for(int j=0; j<10; j++){
printf(" (%2.3g,%2.3g)",c[j].real(),c[j].imag() );
cout<<endl;
}
cout<<endl;
#endif
hipHostFree(x);
hipHostFree(y);
hipHostFree(z);
hipHostFree(c);
hipHostFree(fw);
return 0;
}
| 9dd73284430661c803caf8ac2f41a23745a53f01.cu | #include <iostream>
#include <iomanip>
#include <math.h>
#include <helper_cuda.h>
#include <complex>
#include "../src/spreadinterp.h"
#include "../finufft/utils.h"
using namespace std;
int main(int argc, char* argv[])
{
int nf1, nf2, nf3;
FLT sigma = 2.0;
int N1, N2, N3, M;
if (argc<5) {
fprintf(stderr,"Usage: interp3d [method [nupts_distr [nf1 nf2 nf3 [M [tol [sort]]]]]]\n");
fprintf(stderr,"Details --\n");
fprintf(stderr,"method 1: nupts driven\n");
fprintf(stderr,"method 2: sub-problem\n");
return 1;
}
double w;
int method;
sscanf(argv[1],"%d",&method);
int nupts_distribute;
sscanf(argv[2],"%d",&nupts_distribute);
sscanf(argv[3],"%lf",&w); nf1 = (int)w; // so can read 1e6 right!
sscanf(argv[4],"%lf",&w); nf2 = (int)w; // so can read 1e6 right!
sscanf(argv[5],"%lf",&w); nf3 = (int)w; // so can read 1e6 right!
N1 = (int) nf1/sigma;
N2 = (int) nf2/sigma;
N3 = (int) nf3/sigma;
M = N1*N2*N3;// let density always be 1
if(argc>6){
sscanf(argv[6],"%lf",&w); M = (int)w; // so can read 1e6 right!
if(M == 0) M=N1*N2*N3;
}
FLT tol=1e-6;
if(argc>7){
sscanf(argv[7],"%lf",&w); tol = (FLT)w; // so can read 1e6 right!
}
int sort=1;
if(argc>8){
sscanf(argv[8],"%d",&sort);
}
int ier;
int ns=std::ceil(-log10(tol/10.0));
int maxsubprobsize;
cout<<scientific<<setprecision(3);
FLT *x, *y, *z;
CPX *c, *fw;
cudaMallocHost(&x, M*sizeof(FLT));
cudaMallocHost(&y, M*sizeof(FLT));
cudaMallocHost(&z, M*sizeof(FLT));
cudaMallocHost(&c, M*sizeof(CPX));
cudaMallocHost(&fw,nf1*nf2*nf3*sizeof(CPX));
switch(nupts_distribute){
// Making data
case 1: //uniform
{
for (int i = 0; i < M; i++) {
x[i] = RESCALE(M_PI*randm11(), nf1, 1);// x in [-pi,pi)
y[i] = RESCALE(M_PI*randm11(), nf2, 1);
z[i] = RESCALE(M_PI*randm11(), nf3, 1);
//cout << x[i] << "," << y[i] << "," << z[i] << endl;
}
maxsubprobsize = 65536;
}
break;
case 2: // concentrate on a small region
{
for (int i = 0; i < M; i++) {
x[i] = RESCALE(M_PI*rand01()/(nf1*2/32), nf1, 1);// x in [-pi,pi)
y[i] = RESCALE(M_PI*rand01()/(nf1*2/32), nf2, 1);
z[i] = RESCALE(M_PI*rand01()/(nf3*2/32), nf3, 1);
}
maxsubprobsize = 1024;
}
break;
default:
cerr<<"error: nupts distr should be 1,2" << endl;
return 1;
}
for(int i=0; i<nf1*nf2*nf3; i++){
fw[i].real(1.0);
fw[i].imag(0.0);
}
int dim=3;
cufinufft_plan dplan;
ier = cufinufft_default_opts(type2, dim, dplan.opts);
if(ier != 0 ){
cout<<"error: cufinufft_default_opts"<<endl;
return 0;
}
ier = setup_spreader_for_nufft(dplan.spopts, tol, dplan.opts);
dplan.opts.upsampfac=sigma;
dplan.opts.gpu_method=method;
dplan.opts.gpu_kerevalmeth=1;
dplan.opts.gpu_sort=sort;
dplan.spopts.pirange=0;
if(dplan.opts.gpu_method == 2)
{
dplan.opts.gpu_binsizex=16;
dplan.opts.gpu_binsizey=16;
dplan.opts.gpu_binsizez=2;
dplan.opts.gpu_maxsubprobsize=maxsubprobsize;
}
if(dplan.opts.gpu_method == 1)
{
dplan.opts.gpu_binsizex=16;
dplan.opts.gpu_binsizey=8;
dplan.opts.gpu_binsizez=4;
}
CNTime timer;
/*warm up gpu*/
char *a;
timer.restart();
checkCudaErrors(cudaMalloc(&a,1));
#ifdef TIME
cout<<"[time ]"<< " (warm up) First cudamalloc call " << timer.elapsedsec() <<" s"<<endl<<endl;
#endif
#ifdef INFO
cout<<"[info ] Interpolating ["<<nf1<<"x"<<nf2<<"x"<<nf3<<
"] uniform points to "<<M<<"nupts"<<endl;
#endif
timer.restart();
ier = cufinufft_interp3d(N1, N2, N3, nf1, nf2, nf3, fw, M, x, y, z, c, tol,
&dplan);
if(ier != 0 ){
cout<<"error: cnufftinterp3d"<<endl;
return 0;
}
FLT t=timer.elapsedsec();
printf("[Method %d] %ld U pts to #%d NU pts in %.3g s (\t%.3g U pts/s)\n",
dplan.opts.gpu_method,nf1*nf2*nf3,M,t,M/t);
#ifdef RESULT
cout<<"[result-input]"<<endl;
for(int j=0; j<10; j++){
printf(" (%2.3g,%2.3g)",c[j].real(),c[j].imag() );
cout<<endl;
}
cout<<endl;
#endif
cudaFreeHost(x);
cudaFreeHost(y);
cudaFreeHost(z);
cudaFreeHost(c);
cudaFreeHost(fw);
return 0;
}
|
d1d6ae657973198250538923a9145185bde55868.hip | // !!! This is a file automatically generated by hipify!!!
/*
* sum_squares_7.cu
*
* @author chenyang li
*/
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include <hip/device_functions.h>
#include <time.h>
#define DATA_SIZE 1024 * 1024
#define THREAD_NUM 256
#define BLOCK_NUM 32
int data[DATA_SIZE];
int clockRate;
/* 0-9 */
void generateNumbers(int *numbers, int size) {
int i;
for (i = 0; i < size; i++) {
numbers[i] = rand() % 10;
}
}
/* GPU */
void printDeviceProps(const hipDeviceProp_t *prop) {
printf("Device Name: %s\n", prop->name);
printf("totalGlobalMem: %ld\n", prop->totalGlobalMem);
printf("sharedMemPerBlock: %d\n", prop->sharedMemPerBlock);
printf("regsPerBlock: %d\n", prop->regsPerBlock);
printf("warpSize: %d\n", prop->warpSize);
printf("memPitch: %d\n", prop->memPitch);
printf("maxThreadPerBlock: %d\n", prop->maxThreadsPerBlock);
printf("maxThreadsDim[0-2]: %d %d %d\n", prop->maxThreadsDim[0], prop->maxThreadsDim[1], prop->maxThreadsDim[2]);
printf("maxGridSize[0-2]: %d %d %d\n", prop->maxGridSize[0], prop->maxGridSize[1], prop->maxGridSize[2]);
printf("totalConstMem: %d\n", prop->totalConstMem);
printf("major: %d & minor: %d\n", prop->major, prop->minor);
printf("clockRate: %d\n", prop->clockRate); clockRate = prop->clockRate;
printf("textureAlignment: %d\n", prop->textureAlignment);
printf("deviceOverlap: %d\n", prop->deviceOverlap);
printf("multiProcessorCount: %d\n", prop->multiProcessorCount);
}
/* CUDA */
bool initCUDA() {
int count, i;
hipDeviceProp_t prop;
hipGetDeviceCount(&count);
if (0 == count) {
fprintf(stderr, "There is no device.\n");
return false;
}
for (i = 0; i < count; i++) {
if (hipGetDeviceProperties(&prop, i) == hipSuccess) {
if (prop.major >= 1) {
break;
}
}
}
if (i == count) {
fprintf(stderr, "There is no device.\n");
return false;
}
hipSetDevice(i);
printDeviceProps(&prop);
return true;
}
/* */
clock_t findMaxTimeUsed(const clock_t *time) {
int i;
clock_t min_start = time[0], max_end = time[BLOCK_NUM];
for (i = 0; i < BLOCK_NUM; i++) {
if (time[i] < min_start) {
min_start = time[i];
}
if (time[i + BLOCK_NUM] > max_end) {
max_end = time[i + BLOCK_NUM];
}
}
return max_end - min_start;
}
/* __global__GPU*/
__global__ static void sumOfSquares(int *numbers, int *sub_sum, clock_t *time) {
int i;
extern __shared__ int shared[];
const int block_id = blockIdx.x;
const int thread_id = threadIdx.x;
//
int offset, mask;
if (thread_id == 0) {
time[block_id] = clock();
}
shared[thread_id] = 0;
for (i = block_id * THREAD_NUM + thread_id; i < DATA_SIZE; i += BLOCK_NUM * THREAD_NUM) {
shared[thread_id] += numbers[i] * numbers[i];
}
if (thread_id == 0) {
time[block_id + BLOCK_NUM] = clock();
}
__syncthreads();
/* */
offset = 1;
mask = 1;
while (offset < THREAD_NUM) {
// & ==
if ((thread_id & mask) == 0 && thread_id + offset < THREAD_NUM) {
shared[thread_id] += shared[thread_id + offset];
}
offset += offset;
mask += offset;
//
__syncthreads();
}
sub_sum[block_id] = shared[0];
}
int main(void) {
if (!initCUDA()) {
return 0;
}
int *gpudata;
int i, sum;
int sub_sum[BLOCK_NUM], *gpu_sub_sum;
clock_t time_used[BLOCK_NUM * 2], *gpu_time_used;
generateNumbers(data, DATA_SIZE);
hipMalloc((void**)&gpudata, sizeof(int) * DATA_SIZE);
hipMalloc((void**)&gpu_sub_sum, sizeof(int) * BLOCK_NUM);
hipMalloc((void**)&gpu_time_used, sizeof(clock_t) * BLOCK_NUM * 2);
hipMemcpy(gpudata, data, sizeof(int) * DATA_SIZE, hipMemcpyHostToDevice);
sumOfSquares << < BLOCK_NUM, THREAD_NUM, sizeof(int) * THREAD_NUM >> > (gpudata, gpu_sub_sum, gpu_time_used);
hipMemcpy(time_used, gpu_time_used, sizeof(clock_t) * BLOCK_NUM * 2, hipMemcpyDeviceToHost);
hipMemcpy(sub_sum, gpu_sub_sum, sizeof(int) * BLOCK_NUM, hipMemcpyDeviceToHost);
sum = 0;
for (i = 0; i < BLOCK_NUM; i++) {
sum += sub_sum[i];
}
hipFree(gpudata);
hipFree(gpu_sub_sum);
hipFree(time);
clock_t max_time_used = findMaxTimeUsed(time_used);
printf("\nGPU sum is: %d, time used: %f (s)\n", sum, (float)max_time_used / (clockRate * 1000));
sum = 0;
for (i = 0; i < DATA_SIZE; i++) {
sum += data[i] * data[i];
}
printf("CPU sum is: %d\n", sum);
printf("Memory bandwidth: %f (MB/s)\n", ((float)(DATA_SIZE * sizeof(int) / 1024 / 1024)) / ((float)max_time_used / (clockRate * 1000)));
system("pause");
// return 0;
} | d1d6ae657973198250538923a9145185bde55868.cu | /*
* sum_squares_7.cu 利用树状加法,实现加法并行化
*
* @author chenyang li
*/
#include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <device_functions.h>
#include <time.h>
#define DATA_SIZE 1024 * 1024
#define THREAD_NUM 256
#define BLOCK_NUM 32
int data[DATA_SIZE];
int clockRate;
/* 产生0-9的随机数 */
void generateNumbers(int *numbers, int size) {
int i;
for (i = 0; i < size; i++) {
numbers[i] = rand() % 10;
}
}
/* 打印GPU设备信息 */
void printDeviceProps(const cudaDeviceProp *prop) {
printf("Device Name: %s\n", prop->name);
printf("totalGlobalMem: %ld\n", prop->totalGlobalMem);
printf("sharedMemPerBlock: %d\n", prop->sharedMemPerBlock);
printf("regsPerBlock: %d\n", prop->regsPerBlock);
printf("warpSize: %d\n", prop->warpSize);
printf("memPitch: %d\n", prop->memPitch);
printf("maxThreadPerBlock: %d\n", prop->maxThreadsPerBlock);
printf("maxThreadsDim[0-2]: %d %d %d\n", prop->maxThreadsDim[0], prop->maxThreadsDim[1], prop->maxThreadsDim[2]);
printf("maxGridSize[0-2]: %d %d %d\n", prop->maxGridSize[0], prop->maxGridSize[1], prop->maxGridSize[2]);
printf("totalConstMem: %d\n", prop->totalConstMem);
printf("major: %d & minor: %d\n", prop->major, prop->minor);
printf("clockRate: %d\n", prop->clockRate); clockRate = prop->clockRate;
printf("textureAlignment: %d\n", prop->textureAlignment);
printf("deviceOverlap: %d\n", prop->deviceOverlap);
printf("multiProcessorCount: %d\n", prop->multiProcessorCount);
}
/* CUDA 初始化 */
bool initCUDA() {
int count, i;
cudaDeviceProp prop;
cudaGetDeviceCount(&count);
if (0 == count) {
fprintf(stderr, "There is no device.\n");
return false;
}
for (i = 0; i < count; i++) {
if (cudaGetDeviceProperties(&prop, i) == cudaSuccess) {
if (prop.major >= 1) {
break;
}
}
}
if (i == count) {
fprintf(stderr, "There is no device.\n");
return false;
}
cudaSetDevice(i);
printDeviceProps(&prop);
return true;
}
/* 计算最大耗时 */
clock_t findMaxTimeUsed(const clock_t *time) {
int i;
clock_t min_start = time[0], max_end = time[BLOCK_NUM];
for (i = 0; i < BLOCK_NUM; i++) {
if (time[i] < min_start) {
min_start = time[i];
}
if (time[i + BLOCK_NUM] > max_end) {
max_end = time[i + BLOCK_NUM];
}
}
return max_end - min_start;
}
/* 计算平方和(__global__函数运行于GPU)*/
__global__ static void sumOfSquares(int *numbers, int *sub_sum, clock_t *time) {
int i;
extern __shared__ int shared[];
const int block_id = blockIdx.x;
const int thread_id = threadIdx.x;
// 定义步长和计算掩码
int offset, mask;
if (thread_id == 0) {
time[block_id] = clock();
}
shared[thread_id] = 0;
for (i = block_id * THREAD_NUM + thread_id; i < DATA_SIZE; i += BLOCK_NUM * THREAD_NUM) {
shared[thread_id] += numbers[i] * numbers[i];
}
if (thread_id == 0) {
time[block_id + BLOCK_NUM] = clock();
}
__syncthreads();
/* 并行加法代码段 */
offset = 1;
mask = 1;
while (offset < THREAD_NUM) {
// 注意 & 的优先级小于 ==
if ((thread_id & mask) == 0 && thread_id + offset < THREAD_NUM) {
shared[thread_id] += shared[thread_id + offset];
}
offset += offset;
mask += offset;
// 每迭代一轮需要所有线程进行一次同步
__syncthreads();
}
sub_sum[block_id] = shared[0];
}
int main(void) {
if (!initCUDA()) {
return 0;
}
int *gpudata;
int i, sum;
int sub_sum[BLOCK_NUM], *gpu_sub_sum;
clock_t time_used[BLOCK_NUM * 2], *gpu_time_used;
generateNumbers(data, DATA_SIZE);
cudaMalloc((void**)&gpudata, sizeof(int) * DATA_SIZE);
cudaMalloc((void**)&gpu_sub_sum, sizeof(int) * BLOCK_NUM);
cudaMalloc((void**)&gpu_time_used, sizeof(clock_t) * BLOCK_NUM * 2);
cudaMemcpy(gpudata, data, sizeof(int) * DATA_SIZE, cudaMemcpyHostToDevice);
sumOfSquares << < BLOCK_NUM, THREAD_NUM, sizeof(int) * THREAD_NUM >> > (gpudata, gpu_sub_sum, gpu_time_used);
cudaMemcpy(time_used, gpu_time_used, sizeof(clock_t) * BLOCK_NUM * 2, cudaMemcpyDeviceToHost);
cudaMemcpy(sub_sum, gpu_sub_sum, sizeof(int) * BLOCK_NUM, cudaMemcpyDeviceToHost);
sum = 0;
for (i = 0; i < BLOCK_NUM; i++) {
sum += sub_sum[i];
}
cudaFree(gpudata);
cudaFree(gpu_sub_sum);
cudaFree(time);
clock_t max_time_used = findMaxTimeUsed(time_used);
printf("\nGPU sum is: %d, time used: %f (s)\n", sum, (float)max_time_used / (clockRate * 1000));
sum = 0;
for (i = 0; i < DATA_SIZE; i++) {
sum += data[i] * data[i];
}
printf("CPU sum is: %d\n", sum);
printf("Memory bandwidth: %f (MB/s)\n", ((float)(DATA_SIZE * sizeof(int) / 1024 / 1024)) / ((float)max_time_used / (clockRate * 1000)));
system("pause");
// return 0;
} |
cef40595c36a23ea12c79eba5def06f550208900.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "backend/kernel_compiler/gpu/cuda_impl/sponge/dihedral/dihedral_force_impl.cuh"
#include "backend/kernel_compiler/gpu/cuda_impl/util.cuh"
#include "backend/kernel_compiler/gpu/cuda_impl/sponge/common_sponge.cuh"
__global__ void DihedralForceKernel(int dihedral_numbers, const UNSIGNED_INT_VECTOR *uint_crd, const VECTOR *scaler,
const int *atom_a, const int *atom_b, const int *atom_c, const int *atom_d,
const int *ipn, const float *pk, const float *gamc, const float *gams,
const float *pn, VECTOR *frc) {
int dihedral_i = blockDim.x * blockIdx.x + threadIdx.x;
if (dihedral_i < dihedral_numbers) {
int atom_i = atom_a[dihedral_i];
int atom_j = atom_b[dihedral_i];
int atom_k = atom_c[dihedral_i];
int atom_l = atom_d[dihedral_i];
int temp_ipn = ipn[dihedral_i];
float temp_pk = pk[dihedral_i];
float temp_pn = pn[dihedral_i];
float temp_gamc = gamc[dihedral_i];
float temp_gams = gams[dihedral_i];
VECTOR drij = Get_Periodic_Displacement(uint_crd[atom_i], uint_crd[atom_j], scaler[0]);
VECTOR drkj = Get_Periodic_Displacement(uint_crd[atom_k], uint_crd[atom_j], scaler[0]);
VECTOR drkl = Get_Periodic_Displacement(uint_crd[atom_k], uint_crd[atom_l], scaler[0]);
VECTOR r1 = drij ^ drkj;
VECTOR r2 = drkl ^ drkj;
float r1_1 = rnorm3df(r1.x, r1.y, r1.z);
float r2_1 = rnorm3df(r2.x, r2.y, r2.z);
float r1_2 = r1_1 * r1_1;
float r2_2 = r2_1 * r2_1;
float r1_1_r2_1 = r1_1 * r2_1;
float phi = r1 * r2 * r1_1_r2_1;
phi = fmaxf(-0.999999, fminf(phi, 0.999999));
phi = acosf(phi);
float sign = (r2 ^ r1) * drkj;
copysignf(phi, sign);
phi = CONSTANT_Pi - phi;
float nphi = temp_pn * phi;
float cos_phi = cosf(phi);
float sin_phi = sinf(phi);
float cos_nphi = cosf(nphi);
float sin_nphi = sinf(nphi);
float dE_dphi;
if (fabsf(sin_phi) < 1e-6) {
temp_ipn *= temp_ipn % 2; // (((temp_ipn - 1) & 1) ^ 1)
dE_dphi = temp_gamc * (temp_pn - temp_ipn + temp_ipn * cos_phi);
} else {
dE_dphi = temp_pn * (temp_gamc * sin_nphi - temp_gams * cos_nphi) / sin_phi;
}
VECTOR dphi_dr1 = r1_1_r2_1 * r2 + cos_phi * r1_2 * r1;
VECTOR dphi_dr2 = r1_1_r2_1 * r1 + cos_phi * r2_2 * r2;
VECTOR dE_dri = dE_dphi * drkj ^ dphi_dr1;
VECTOR dE_drl = dE_dphi * dphi_dr2 ^ drkj;
VECTOR dE_drj_part = dE_dphi * ((drij ^ dphi_dr1) + (drkl ^ dphi_dr2));
VECTOR fi = dE_dri;
VECTOR fj = dE_drj_part - dE_dri;
VECTOR fk = -dE_drl - dE_drj_part;
VECTOR fl = dE_drl;
atomicAdd(&frc[atom_i].x, fi.x);
atomicAdd(&frc[atom_i].y, fi.y);
atomicAdd(&frc[atom_i].z, fi.z);
atomicAdd(&frc[atom_j].x, fj.x);
atomicAdd(&frc[atom_j].y, fj.y);
atomicAdd(&frc[atom_j].z, fj.z);
atomicAdd(&frc[atom_k].x, fk.x);
atomicAdd(&frc[atom_k].y, fk.y);
atomicAdd(&frc[atom_k].z, fk.z);
atomicAdd(&frc[atom_l].x, fl.x);
atomicAdd(&frc[atom_l].y, fl.y);
atomicAdd(&frc[atom_l].z, fl.z);
}
}
void DihedralForce(int dihedral_numbers, const int *uint_crd_f, const float *scaler_f, const int *atom_a,
const int *atom_b, const int *atom_c, const int *atom_d, const int *ipn, const float *pk,
const float *gamc, const float *gams, const float *pn, float *frc_f, hipStream_t stream) {
size_t thread_per_block = 128;
size_t block_per_grid = ceilf(static_cast<float>(dihedral_numbers) / 128);
UNSIGNED_INT_VECTOR *uint_crd =
const_cast<UNSIGNED_INT_VECTOR *>(reinterpret_cast<const UNSIGNED_INT_VECTOR *>(uint_crd_f));
VECTOR *frc = const_cast<VECTOR *>(reinterpret_cast<const VECTOR *>(frc_f));
VECTOR *scaler = const_cast<VECTOR *>(reinterpret_cast<const VECTOR *>(scaler_f));
hipLaunchKernelGGL(( DihedralForceKernel), dim3(block_per_grid), dim3(thread_per_block), 0, stream,
dihedral_numbers, uint_crd, scaler, atom_a, atom_b, atom_c, atom_d, ipn, pk, gamc, gams, pn, frc);
return;
}
void DihedralForce(int dihedral_numbers, const int *uint_crd_f, const float *scaler_f, const int *atom_a,
const int *atom_b, const int *atom_c, const int *atom_d, const int *ipn, const float *pk,
const float *gamc, const float *gams, const float *pn, float *frc_f, hipStream_t stream);
| cef40595c36a23ea12c79eba5def06f550208900.cu | /**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "backend/kernel_compiler/gpu/cuda_impl/sponge/dihedral/dihedral_force_impl.cuh"
#include "backend/kernel_compiler/gpu/cuda_impl/util.cuh"
#include "backend/kernel_compiler/gpu/cuda_impl/sponge/common_sponge.cuh"
__global__ void DihedralForceKernel(int dihedral_numbers, const UNSIGNED_INT_VECTOR *uint_crd, const VECTOR *scaler,
const int *atom_a, const int *atom_b, const int *atom_c, const int *atom_d,
const int *ipn, const float *pk, const float *gamc, const float *gams,
const float *pn, VECTOR *frc) {
int dihedral_i = blockDim.x * blockIdx.x + threadIdx.x;
if (dihedral_i < dihedral_numbers) {
int atom_i = atom_a[dihedral_i];
int atom_j = atom_b[dihedral_i];
int atom_k = atom_c[dihedral_i];
int atom_l = atom_d[dihedral_i];
int temp_ipn = ipn[dihedral_i];
float temp_pk = pk[dihedral_i];
float temp_pn = pn[dihedral_i];
float temp_gamc = gamc[dihedral_i];
float temp_gams = gams[dihedral_i];
VECTOR drij = Get_Periodic_Displacement(uint_crd[atom_i], uint_crd[atom_j], scaler[0]);
VECTOR drkj = Get_Periodic_Displacement(uint_crd[atom_k], uint_crd[atom_j], scaler[0]);
VECTOR drkl = Get_Periodic_Displacement(uint_crd[atom_k], uint_crd[atom_l], scaler[0]);
VECTOR r1 = drij ^ drkj;
VECTOR r2 = drkl ^ drkj;
float r1_1 = rnorm3df(r1.x, r1.y, r1.z);
float r2_1 = rnorm3df(r2.x, r2.y, r2.z);
float r1_2 = r1_1 * r1_1;
float r2_2 = r2_1 * r2_1;
float r1_1_r2_1 = r1_1 * r2_1;
float phi = r1 * r2 * r1_1_r2_1;
phi = fmaxf(-0.999999, fminf(phi, 0.999999));
phi = acosf(phi);
float sign = (r2 ^ r1) * drkj;
copysignf(phi, sign);
phi = CONSTANT_Pi - phi;
float nphi = temp_pn * phi;
float cos_phi = cosf(phi);
float sin_phi = sinf(phi);
float cos_nphi = cosf(nphi);
float sin_nphi = sinf(nphi);
float dE_dphi;
if (fabsf(sin_phi) < 1e-6) {
temp_ipn *= temp_ipn % 2; // (((temp_ipn - 1) & 1) ^ 1)
dE_dphi = temp_gamc * (temp_pn - temp_ipn + temp_ipn * cos_phi);
} else {
dE_dphi = temp_pn * (temp_gamc * sin_nphi - temp_gams * cos_nphi) / sin_phi;
}
VECTOR dphi_dr1 = r1_1_r2_1 * r2 + cos_phi * r1_2 * r1;
VECTOR dphi_dr2 = r1_1_r2_1 * r1 + cos_phi * r2_2 * r2;
VECTOR dE_dri = dE_dphi * drkj ^ dphi_dr1;
VECTOR dE_drl = dE_dphi * dphi_dr2 ^ drkj;
VECTOR dE_drj_part = dE_dphi * ((drij ^ dphi_dr1) + (drkl ^ dphi_dr2));
VECTOR fi = dE_dri;
VECTOR fj = dE_drj_part - dE_dri;
VECTOR fk = -dE_drl - dE_drj_part;
VECTOR fl = dE_drl;
atomicAdd(&frc[atom_i].x, fi.x);
atomicAdd(&frc[atom_i].y, fi.y);
atomicAdd(&frc[atom_i].z, fi.z);
atomicAdd(&frc[atom_j].x, fj.x);
atomicAdd(&frc[atom_j].y, fj.y);
atomicAdd(&frc[atom_j].z, fj.z);
atomicAdd(&frc[atom_k].x, fk.x);
atomicAdd(&frc[atom_k].y, fk.y);
atomicAdd(&frc[atom_k].z, fk.z);
atomicAdd(&frc[atom_l].x, fl.x);
atomicAdd(&frc[atom_l].y, fl.y);
atomicAdd(&frc[atom_l].z, fl.z);
}
}
void DihedralForce(int dihedral_numbers, const int *uint_crd_f, const float *scaler_f, const int *atom_a,
const int *atom_b, const int *atom_c, const int *atom_d, const int *ipn, const float *pk,
const float *gamc, const float *gams, const float *pn, float *frc_f, cudaStream_t stream) {
size_t thread_per_block = 128;
size_t block_per_grid = ceilf(static_cast<float>(dihedral_numbers) / 128);
UNSIGNED_INT_VECTOR *uint_crd =
const_cast<UNSIGNED_INT_VECTOR *>(reinterpret_cast<const UNSIGNED_INT_VECTOR *>(uint_crd_f));
VECTOR *frc = const_cast<VECTOR *>(reinterpret_cast<const VECTOR *>(frc_f));
VECTOR *scaler = const_cast<VECTOR *>(reinterpret_cast<const VECTOR *>(scaler_f));
DihedralForceKernel<<<block_per_grid, thread_per_block, 0, stream>>>(
dihedral_numbers, uint_crd, scaler, atom_a, atom_b, atom_c, atom_d, ipn, pk, gamc, gams, pn, frc);
return;
}
void DihedralForce(int dihedral_numbers, const int *uint_crd_f, const float *scaler_f, const int *atom_a,
const int *atom_b, const int *atom_c, const int *atom_d, const int *ipn, const float *pk,
const float *gamc, const float *gams, const float *pn, float *frc_f, cudaStream_t stream);
|
293f23e113282729b4118a5c8662d0f350b19f90.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.5.4) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date October 2020
dsymv.cu is nearly identical to dsymv.cu, just change names and drop MAGMA_D_CONJ.
dsymv_kernel_U (upper) in dsymv_upper.cu is very similar to
dsymv_kernel_L (lower) in dsymv.cu; diff the two files to compare.
@generated from magmablas/zhemv.cu, normal z -> d, Thu Oct 8 23:05:33 2020
@author Mark Gates
*/
#include "magma_internal.h"
#include "commonblas_d.h"
#define PRECISION_d
#define NB_X 64
#define NB_Y 4
#define bank_shift 33
#define quarter_NB_X 16
#define half_NB_X 32
/***************************************************************************//**
Lower case, compute block multiply, work = A*x, for any size n:
[ (A11*x1) (A21^H*x2) (A31^H*x3) ] [ A11 A21^H A31^H ] [ x1 ]
work = [ --- (A21*x1 + A22*x2) (A32^H*x3) ] = [ A21 A22 A32^H ] * [ x2 ]
[ --- --- (A31*x1 + A32*x2 + A33*x3) ] [ A31 A32 A33 ] [ x3 ]
Uses a 64x4 thread block.
For diagonal tiles, covers a 64x64 tile using three 32x32 tiles (plus one gets transposed).
For off-diagonal tiles, covers a 64x64 tile using four 64x16 tiles.
In both cases, each thread multiplies 4 elements.
For rows past the bottom of the matrix, the A pointer is adjusted to be the
last valid row of A, which multiple threads will read.
Extra rows are ignored when saving results to work.
Columns past the right edge are explicitly ignored when loading.
x values past the bottom are set to zero, thus, extra columns are zeroed
when multiplying.
Previously:
[ (A11*x1) --- ]
work = [ (A21^H*x2) (A21*x1 + A22*x2) --- ]
[ (A31^H*x3) (A32^H*x3) (A31*x1 + A32*x2 + A33*x3) ]
which doesn't work as well because that has dimension blocks*NB by blocks,
where blocks*NB >= n, and it can be that blocks*NB > lda, so it won't fit in
lda*blocks space. This is why it used to need lwork = lda*(blocks + 1).
*******************************************************************************/
__global__ void
dsymv_kernel_L(
int n,
double const * __restrict__ A, int lda,
double const * __restrict__ x, int incx,
double * __restrict__ work)
{
#if defined(PRECISION_s) || defined(PRECISION_d) || defined(PRECISION_c) || (__CUDA_ARCH__ >= 200)
// treats sA as 16x64 block
#define sA16(i_, j_) (sA[(i_)][(j_)]) // i.e., sA[ (i_)*(NB_X+3) + (j_) ]
// treats sA as 32x32 block
#define sA32(i_, j_) (sA[0][(i_) + bank_shift*(j_)])
// 64x4 thread block
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int blk = blockIdx.x;
const int blk_ind = NB_X * blk;
const int td = NB_X * ty + tx;
// 32x8 thread block
const int tx2 = td % half_NB_X;
const int ty2 = td / half_NB_X;
// If this blk has fewer than NB_X rows, partial is the number of valid rows,
// so tx = 0, ..., partial-1 are valid rows, and tx >= partial are invalid.
// Else, partial == 0.
const int partial = (blk == gridDim.x - 1 ? (n % NB_X) : 0);
double psum, psum_t;
double total = MAGMA_D_ZERO;
// sA is used as a 32x32 block, sA32(i,j),
// and as a 16x64 block, sA16(i,j), in different parts of the code.
// sA must be at least half_NB_X*bank_shift = 32x33 = 1056;
// quarter_NB_X*(NB_X + 2) = 16*(64 + 2) = 1056
__shared__ double sA [quarter_NB_X][NB_X + 3]; /* Why +3? seems it only needs +2. Does +3 reduce bank conflicts? */
__shared__ double sx_blk[NB_X]; // for x[ blk ]
__shared__ double sx_jj [NB_X]; // for x[ jj ], which cycles over all blocks left of diag
double rA[4];
double psums_t[4];
// --------------------
// load 64x1 block x(blk_ind + 0:63) into sx_blk
x += (blk_ind + tx)*incx; // x is x(blk_ind + tx)
if ( ty == 0 ) {
if ( partial == 0 || tx < partial ) {
sx_blk[tx] = x[0];
}
else {
sx_blk[tx] = MAGMA_D_ZERO;
}
}
// --------------------
// move to block row
work += blk*lda; // work is work(0, blk)
A += blk_ind; // A is A(blk_ind, 0)
A += ty2*lda + tx2; // A is A(blk_ind + tx2, ty2)
// move to 32x32 diag block
A += blk_ind*lda; // A is A(blk_ind + tx2, blk_ind + ty2)
// load 32x32 diag block A(blk_ind + 0:31, blk_ind + 0:31) into sA,
// as four 32x8 sections one after another:
// columns 0:7, then 8:15, then 16:23, then 24:31
if ( partial ) {
if ( tx2 >= partial ) {
A = A - tx2 + (partial - 1); // A is A(blk_ind + partial-1, blk_ind + ty2), the bottom-most valid row
}
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
if ( ty2+j < partial ) {
sA32(tx2, ty2 + j) = A[j*lda];
}
else {
sA32(tx2, ty2 + j) = MAGMA_D_ZERO;
}
}
if ( tx2 >= partial ) {
A = A + tx2 - (partial - 1); // A is A(blk_ind + tx2, blk_ind + ty2)
}
}
else {
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
sA32(tx2, ty2 + j) = A[j*lda];
}
}
__syncthreads();
// symmetrize 32x32 diag block, copying lower to upper triangle,
// as four 32x8 sections in parallel:
// columns 0,4,8,12,16,20,24,28; then 1,5,...,29; then 2,6,...,30, then 3,7,...,31
#pragma unroll
for (int j=ty2*4; j < ty2*4 + 4; j++) {
if ( j < tx2 ) {
sA32(j, tx2) = MAGMA_D_CONJ( sA32(tx2, j) );
}
}
__syncthreads();
// multiply 32x32 diag block * x
// each thread does partial row sA(tx2, ty2*4 : ty2*4 + 3)
psum = MAGMA_D_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum += sA32(tx2, ty2*4 + j) * sx_blk[ty2*4 + j];
}
__syncthreads();
// store partial row sums
sA32(ty2, tx2) = psum;
__syncthreads();
// sum up partial row sums, so thread (tx2,0) has total for row (blk_ind + tx2)
if ( ty2 == 0 ) {
total = sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// --------------------
// move to next 32x32 diag block, then repeat steps from first diag block
A += half_NB_X + half_NB_X*lda; // A is A(blk_ind + NB/2 + tx2, blk_ind + NB/2 + ty2)
// load 32x32 diag block A[block + 0:31, block + 0:31] into sA
if ( partial ) {
if ( tx2 + half_NB_X >= partial ) {
A = A - (tx2 + half_NB_X) + (partial - 1);
}
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
if ( ty2+j + half_NB_X < partial ) {
sA32(tx2, ty2 + j) = A[j*lda];
}
else {
sA32(tx2, ty2 + j) = MAGMA_D_ZERO;
}
}
if ( tx2 + half_NB_X >= partial ) {
A = A + (tx2 + half_NB_X) - (partial - 1);
}
}
else {
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
sA32(tx2, ty2 + j) = A[j*lda];
}
}
__syncthreads();
// symmetrize 32x32 diag block, copying lower to upper triangle
#pragma unroll
for (int j=ty2*4; j < ty2*4 + 4; j++) {
if ( j < tx2 ) {
sA32(j, tx2) = MAGMA_D_CONJ( sA32(tx2, j) );
}
}
__syncthreads();
// multiply 32x32 diag block * x
psum = MAGMA_D_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum += sA32(tx2, ty2*4 + j) * sx_blk[half_NB_X + ty2*4 + j];
}
__syncthreads();
// store partial row sums
sA32(ty2, tx2) = psum;
__syncthreads();
// sum up partial row sums, so thread (tx2,1) has total for row (blk_ind + NB/2 + tx2)
if ( ty2 == 1 ) {
total = sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// --------------------
// move to off-diag 32x32 block
A -= half_NB_X*lda; // A is A(blk_ind + NB/2 + tx2, blk_ind + ty2)
// load 32x32 block of A into sA,
// as four 32x8 sections one after another:
// columns 0:7, then 8:15, then 16:23, then 24:31
if ( partial ) {
if ( tx2 + half_NB_X >= partial ) {
A = A - (tx2 + half_NB_X) + (partial - 1);
}
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
if ( ty2+j < partial ) {
sA32(tx2, ty2 + j) = A[j*lda];
}
else {
sA32(tx2, ty2 + j) = MAGMA_D_ZERO;
}
}
if ( tx2 + half_NB_X >= partial ) {
A = A + (tx2 + half_NB_X) - (partial - 1);
}
}
else {
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
sA32(tx2, ty2 + j) = A[j*lda];
}
}
__syncthreads();
// multiply 32x32 block (below diag)
psum = MAGMA_D_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum += sA32(tx2, ty2 + j*8) * sx_blk[j*8 + ty2];
}
//__syncthreads(); // no sync needed here
// multiply transposed 32x32 block (above diag)
psum_t = MAGMA_D_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum_t += MAGMA_D_CONJ( sA32(ty2*4 + j, tx2) ) * sx_blk[half_NB_X + ty2*4 + j];
}
__syncthreads();
// store partial sums for non-transposed 32x32 block
sA32(ty2, tx2) = psum;
__syncthreads();
// sum up partial row sums, so thread (tx2,1) has total for row (blk_ind + NB/2 + tx2)
if ( ty2 == 1 ) {
total = total
+ sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// store partial sums for transposed 32x32 block
sA32(ty2, tx2) = psum_t;
__syncthreads();
// sum up partial row sums, so thread (tx2,0) has total for row (blk_ind + tx2)
if ( ty2 == 0 ) {
total = total
+ sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// --------------------
// move to leftmost 64x64 block in block row, and
// switch thread offset from (tx2,ty2) 32x8 block to (tx,ty) 64x4 block
A -= half_NB_X; // A is A(blk_ind + tx2, blk_ind + ty2)
A -= blk_ind*lda; // A is A(blk_ind + tx2, ty2)
A -= ty2*lda + tx2; // A is A(blk_ind, 0)
A += 4*ty*lda + tx; // A is A(blk_ind + tx, 4*ty)
if ( partial && tx >= partial ) {
A = A - tx + (partial - 1); // A is A(blk_ind + partial-1, 4*ty), the bottom-most valid row
}
x -= blk_ind*incx; // x is x(tx)
// 16x16 thread block
const int tx4 = td % quarter_NB_X;
const int ty4 = td / quarter_NB_X;
// cycle over blocks jj left of diagonal, in block row blk
for (int jj=0; jj < blk; ++jj) {
// load 64x1 block x(jj_ind + 0:63) into sx_jj
// since this block is left of diagonal, x must have all NB rows
if ( ty == 0 ) {
sx_jj[tx] = x[jj*NB_X*incx];
}
__syncthreads();
for (int k=0; k < 4; k++) {
// load 64x16 block of A into rA, 4 elements per thread,
// as four 64x4 sections in parallel:
// columns 0,4,8,12; then 1,5,9,13; then 2,6,10,14; then 3,7,11,15
// since this block is left of diagonal, it has all NB columns,
// and block of x must have all NB rows.
#pragma unroll
for (int j=0; j < 4; j++) {
rA[j] = A[j*lda];
}
// 1) multiply 64x16 block A_{blk,jj} * x_jj
// each thread does partial row rA(tx + 16*k, ty*4 + 16*k : ty*4 + 3 + 16*k)
// 2) multiply transposed 16x64 block A_{blk,jj}^H * x_blk,
// storing each product Aji*xi to sA(j,i)
#pragma unroll
for (int j=0; j < 4; j++) {
total += rA[j] * sx_jj[quarter_NB_X*k + ty*4 + j]; // y_blk = A_{blk,jj} * x_jj
sA16(ty*4 + j, tx) = MAGMA_D_CONJ( rA[j] ) * sx_blk[tx]; // y_jj = A_{blk,jj}^H * x_blk
}
__syncthreads();
// do partial row sums for transposed 16x64 result
// use 16x16 thread grid (tx4, ty4) instead of 64x4 (tx, ty)
// sum sixteen 16x4 sections in parallel:
// columns 0,4,8,...,60; then 1,5,...,61; then 2,6,...,62; then 3,7,...,63
psum_t = MAGMA_D_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum_t += sA16(tx4, ty4*4 + j);
}
__syncthreads();
// store partial row sums of transposed result, y_jj (locally)
psums_t[k] = psum_t;
// move right to next 64x16 block
A += lda * quarter_NB_X; // A is A(blk_ind + tx#, jj*NB_x + (k+1)*NB_X/4 + 4*ty), # tx or partial
}
// already at next 64x64 block
// A is A(blk_ind + tx#, (jj+1)*NB_x + 4*ty), # tx or partial
// store partial row sums of transposed result, y_jj
#pragma unroll
for (int k=0; k < 4; k++) {
sA16(tx4, ty4 + quarter_NB_X*k) = psums_t[k];
}
__syncthreads();
// sum up partial row sums of transposed result, y_jj, and store final total to workspace
// thread (tx4,ty4) where ty4 < 4 sums row tx4 + ty4*16
// since this is the transposed block above the diagonal, it must have all NB rows
if ( ty4 < 4 ) {
int ty4_nb4 = ty4*quarter_NB_X;
psum_t = sA16(tx4, 0 + ty4_nb4) + sA16(tx4, 1 + ty4_nb4)
+ sA16(tx4, 2 + ty4_nb4) + sA16(tx4, 3 + ty4_nb4)
+ sA16(tx4, 4 + ty4_nb4) + sA16(tx4, 5 + ty4_nb4)
+ sA16(tx4, 6 + ty4_nb4) + sA16(tx4, 7 + ty4_nb4)
+ sA16(tx4, 8 + ty4_nb4) + sA16(tx4, 9 + ty4_nb4)
+ sA16(tx4, 10 + ty4_nb4) + sA16(tx4, 11 + ty4_nb4)
+ sA16(tx4, 12 + ty4_nb4) + sA16(tx4, 13 + ty4_nb4)
+ sA16(tx4, 14 + ty4_nb4) + sA16(tx4, 15 + ty4_nb4);
work[jj*NB_X + tx4 + ty4_nb4] = psum_t; // store at work( jj*NB_X + tx4 + ty4*16, blk )
}
__syncthreads();
}
// store row sums
sA16(ty, tx) = total;
__syncthreads();
// sum up final total, y_blk, for row tx
if ( ty == 0 && (partial == 0 || tx < partial) ) {
total = sA16(0, tx)
+ sA16(1, tx)
+ sA16(2, tx)
+ sA16(3, tx);
work[blk*NB_X + tx] = total; // store at work( blk*NB_X + tx, blk )
}
#endif /* PRECISION_[sdc] || (__CUDA_ARCH__ >= 200) */
}
// end dsymv_kernel_L
/***************************************************************************//**
Lower case, sum up final results
Each block sums one block row; each thread sums one row.
On input (for 3 blocks):
[ (A11*x1) (A21^H*x2) (A31^H*x3) ]
work = [ --- (A21*x1 + A22*x2) (A32^H*x3) ]
[ --- --- (A31*x1 + A32*x2 + A33*x3) ]
On output:
[ (A11*x1) + (A21^H*x2) + (A31^H*x3) ]
y = alpha*[ (A21*x1 + A22*x2) + (A32^H*x3) ] + beta*y
[ (A21*x1 + A22*x2 + A33*x3) ]
*******************************************************************************/
__global__ void
dsymv_kernel_L_sum(
int n,
double alpha,
int lda,
double beta,
double * __restrict__ y, int incy,
double const * __restrict__ work )
{
int tx = threadIdx.x;
int blk = blockIdx.x;
int blk_ind = blk * NB_X;
int ind = blk_ind + tx;
int blocks = gridDim.x;
// Don't write outside [0, ..., n)
if ( ind < n ) {
work += ind + blk*lda;
double Ax = MAGMA_D_ZERO;
for (int j = blk; j < blocks; ++j) {
Ax += work[0];
work += lda;
}
y[ind * incy] = beta*y[ind * incy] + alpha*Ax;
}
}
/***************************************************************************//**
Purpose
-------
magmablas_dsymv_work performs the matrix-vector operation:
y := alpha*A*x + beta*y,
where alpha and beta are scalars, x and y are n element vectors and
A is an n by n symmetric matrix.
Arguments
----------
@param[in]
uplo magma_uplo_t.
On entry, UPLO specifies whether the upper or lower
triangular part of the array A is to be referenced as
follows:
- = MagmaUpper: Only the upper triangular part of A is to be referenced.
- = MagmaLower: Only the lower triangular part of A is to be referenced.
@param[in]
n INTEGER.
On entry, N specifies the order of the matrix A.
N must be at least zero.
@param[in]
alpha DOUBLE PRECISION.
On entry, ALPHA specifies the scalar alpha.
@param[in]
dA DOUBLE PRECISION array of DIMENSION ( LDDA, n ).
Before entry with UPLO = MagmaUpper, the leading n by n
upper triangular part of the array A must contain the upper
triangular part of the symmetric matrix and the strictly
lower triangular part of A is not referenced.
Before entry with UPLO = MagmaLower, the leading n by n
lower triangular part of the array A must contain the lower
triangular part of the symmetric matrix and the strictly
upper triangular part of A is not referenced.
Note that the imaginary parts of the diagonal elements need
not be set and are assumed to be zero.
@param[in]
ldda INTEGER.
On entry, LDDA specifies the first dimension of A as declared
in the calling (sub) program. LDDA must be at least
max( 1, n ).
It is recommended that ldda is multiple of 16. Otherwise
performance would be deteriorated as the memory accesses
would not be fully coalescent.
@param[in]
dx DOUBLE PRECISION array of dimension at least
( 1 + ( n - 1 )*abs( INCX ) ).
Before entry, the incremented array X must contain the n
element vector x.
@param[in]
incx INTEGER.
On entry, INCX specifies the increment for the elements of
X. INCX must not be zero.
@param[in]
beta DOUBLE PRECISION.
On entry, BETA specifies the scalar beta. When BETA is
supplied as zero then Y need not be set on input.
@param[in,out]
dy DOUBLE PRECISION array of dimension at least
( 1 + ( n - 1 )*abs( INCY ) ).
Before entry, the incremented array Y must contain the n
element vector y. On exit, Y is overwritten by the updated
vector y.
@param[in]
incy INTEGER.
On entry, INCY specifies the increment for the elements of
Y. INCY must not be zero.
@param[in]
dwork (workspace) DOUBLE PRECISION array on the GPU, dimension (MAX(1, LWORK)),
@param[in]
lwork INTEGER.
The dimension of the array DWORK. LWORK >= LDDA * ceil( N / NB_X ),
where NB_X = 64.
@param[in]
queue magma_queue_t.
Queue to execute in.
MAGMA implements dsymv through two steps:
1) perform the multiplication in each thread block and put the
intermediate value in dwork.
2) sum the intermediate values and store the final result in y.
magamblas_dsymv_work requires users to provide a workspace, while
magmablas_dsymv is a wrapper routine allocating the workspace inside the
routine and provides the same interface as cublas.
If users need to call dsymv frequently, we suggest using
magmablas_dsymv_work instead of magmablas_dsymv. As the overhead to
allocate and free in device memory in magmablas_dsymv would hurt performance.
Our tests show that this penalty is about 10 Gflop/s when the matrix
size is around 10000.
@ingroup magma_hemv
*******************************************************************************/
extern "C"
magma_int_t
magmablas_dsymv_work(
magma_uplo_t uplo, magma_int_t n,
double alpha,
magmaDouble_const_ptr dA, magma_int_t ldda,
magmaDouble_const_ptr dx, magma_int_t incx,
double beta,
magmaDouble_ptr dy, magma_int_t incy,
magmaDouble_ptr dwork, magma_int_t lwork,
magma_queue_t queue )
{
#if defined(PRECISION_z)
// z precision requires CUDA ARCH 2.x; call CUBLAS version instead.
magma_int_t arch = magma_getdevice_arch();
if ( arch < 200 ) {
magma_dsymv( uplo, n, alpha, dA, ldda, dx, incx, beta, dy, incy, queue );
return MAGMA_SUCCESS;
}
#endif
// --------------------
// [sdc] precisions, or z precision with CUDA ARCH 2.x
bool upper = (uplo == MagmaUpper);
magma_int_t blocks = magma_ceildiv( n, NB_X );
magma_int_t lwmin = ldda*blocks;
/*
* Test the input parameters.
*/
magma_int_t info = 0;
if ((! upper) && (uplo != MagmaLower)) {
info = -1;
} else if ( n < 0 ) {
info = -2;
} else if ( ldda < max(1, n) ) {
info = -5;
} else if ( incx == 0 ) {
info = -7;
} else if ( incy == 0 ) {
info = -10;
} else if ( lwork < lwmin ) {
info = -12;
}
if (info != 0) {
magma_xerbla( __func__, -(info) );
return info;
}
/*
* Quick return if possible.
*/
if ( (n == 0) || ( MAGMA_D_EQUAL(alpha, MAGMA_D_ZERO) && MAGMA_D_EQUAL(beta, MAGMA_D_ONE) ) )
return info;
dim3 grid( blocks, 1, 1 );
dim3 threads( NB_X, NB_Y, 1 );
dim3 threads_sum( NB_X, 1, 1 );
if ( upper ) {
hipLaunchKernelGGL(( dsymv_kernel_U), dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
n, dA, ldda, dx, incx, dwork);
hipLaunchKernelGGL(( dsymv_kernel_U_sum), dim3(grid), dim3(threads_sum), 0, queue->cuda_stream() ,
n, alpha, ldda, beta, dy, incy, dwork);
}
else {
hipLaunchKernelGGL(( dsymv_kernel_L), dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
n, dA, ldda, dx, incx, dwork);
hipLaunchKernelGGL(( dsymv_kernel_L_sum), dim3(grid), dim3(threads_sum), 0, queue->cuda_stream() ,
n, alpha, ldda, beta, dy, incy, dwork);
}
return info;
}
// end magmablas_dsymv_work
/***************************************************************************//**
Purpose
-------
magmablas_dsymv performs the matrix-vector operation:
y := alpha*A*x + beta*y,
where alpha and beta are scalars, x and y are n element vectors and
A is an n by n symmetric matrix.
Arguments
----------
@param[in]
uplo magma_uplo_t.
On entry, UPLO specifies whether the upper or lower
triangular part of the array A is to be referenced as
follows:
- = MagmaUpper: Only the upper triangular part of A is to be referenced.
- = MagmaLower: Only the lower triangular part of A is to be referenced.
@param[in]
n INTEGER.
On entry, N specifies the order of the matrix A.
N must be at least zero.
@param[in]
alpha DOUBLE PRECISION.
On entry, ALPHA specifies the scalar alpha.
@param[in]
dA DOUBLE PRECISION array of DIMENSION ( LDDA, n ).
Before entry with UPLO = MagmaUpper, the leading n by n
upper triangular part of the array A must contain the upper
triangular part of the symmetric matrix and the strictly
lower triangular part of A is not referenced.
Before entry with UPLO = MagmaLower, the leading n by n
lower triangular part of the array A must contain the lower
triangular part of the symmetric matrix and the strictly
upper triangular part of A is not referenced.
Note that the imaginary parts of the diagonal elements need
not be set and are assumed to be zero.
@param[in]
ldda INTEGER.
On entry, LDDA specifies the first dimension of A as declared
in the calling (sub) program. LDDA must be at least
max( 1, n ).
It is recommended that ldda is multiple of 16. Otherwise
performance would be deteriorated as the memory accesses
would not be fully coalescent.
@param[in]
dx DOUBLE PRECISION array of dimension at least
( 1 + ( n - 1 )*abs( INCX ) ).
Before entry, the incremented array X must contain the n
element vector x.
@param[in]
incx INTEGER.
On entry, INCX specifies the increment for the elements of
X. INCX must not be zero.
@param[in]
beta DOUBLE PRECISION.
On entry, BETA specifies the scalar beta. When BETA is
supplied as zero then Y need not be set on input.
@param[in,out]
dy DOUBLE PRECISION array of dimension at least
( 1 + ( n - 1 )*abs( INCY ) ).
Before entry, the incremented array Y must contain the n
element vector y. On exit, Y is overwritten by the updated
vector y.
@param[in]
incy INTEGER.
On entry, INCY specifies the increment for the elements of
Y. INCY must not be zero.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_hemv
*******************************************************************************/
extern "C"
magma_int_t
magmablas_dsymv(
magma_uplo_t uplo, magma_int_t n,
double alpha,
magmaDouble_const_ptr dA, magma_int_t ldda,
magmaDouble_const_ptr dx, magma_int_t incx,
double beta,
magmaDouble_ptr dy, magma_int_t incy,
magma_queue_t queue )
{
#if defined(PRECISION_z)
// z precision requires CUDA ARCH 2.x; call CUBLAS version instead.
magma_int_t arch = magma_getdevice_arch();
if ( arch < 200 ) {
magma_dsymv( uplo, n, alpha, dA, ldda, dx, incx, beta, dy, incy, queue );
return MAGMA_SUCCESS;
}
#endif
// --------------------
// [sdc] precisions, or z precision with CUDA ARCH 2.x
bool upper = (uplo == MagmaUpper);
/*
* Test the input parameters.
*/
magma_int_t info = 0;
if ((! upper) && (uplo != MagmaLower)) {
info = -1;
} else if ( n < 0 ) {
info = -2;
} else if ( ldda < max(1, n) ) {
info = -5;
} else if ( incx == 0 ) {
info = -7;
} else if ( incy == 0 ) {
info = -10;
}
if (info != 0) {
magma_xerbla( __func__, -(info) );
return info;
}
/*
* Quick return if possible.
*/
if ( (n == 0) || ( MAGMA_D_EQUAL(alpha, MAGMA_D_ZERO) && MAGMA_D_EQUAL(beta, MAGMA_D_ONE) ) )
return info;
magmaDouble_ptr dwork;
magma_int_t blocks = magma_ceildiv( n, NB_X );
magma_int_t lwork = ldda*blocks;
magma_dmalloc( &dwork, lwork );
if ( dwork == NULL ) {
info = MAGMA_ERR_DEVICE_ALLOC;
magma_xerbla( __func__, -(info) );
return info;
}
magmablas_dsymv_work( uplo, n, alpha, dA, ldda, dx, incx, beta, dy, incy,
dwork, lwork, queue );
magma_free( dwork );
return info;
}
// end magmablas_dsymv
| 293f23e113282729b4118a5c8662d0f350b19f90.cu | /*
-- MAGMA (version 2.5.4) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date October 2020
dsymv.cu is nearly identical to dsymv.cu, just change names and drop MAGMA_D_CONJ.
dsymv_kernel_U (upper) in dsymv_upper.cu is very similar to
dsymv_kernel_L (lower) in dsymv.cu; diff the two files to compare.
@generated from magmablas/zhemv.cu, normal z -> d, Thu Oct 8 23:05:33 2020
@author Mark Gates
*/
#include "magma_internal.h"
#include "commonblas_d.h"
#define PRECISION_d
#define NB_X 64
#define NB_Y 4
#define bank_shift 33
#define quarter_NB_X 16
#define half_NB_X 32
/***************************************************************************//**
Lower case, compute block multiply, work = A*x, for any size n:
[ (A11*x1) (A21^H*x2) (A31^H*x3) ] [ A11 A21^H A31^H ] [ x1 ]
work = [ --- (A21*x1 + A22*x2) (A32^H*x3) ] = [ A21 A22 A32^H ] * [ x2 ]
[ --- --- (A31*x1 + A32*x2 + A33*x3) ] [ A31 A32 A33 ] [ x3 ]
Uses a 64x4 thread block.
For diagonal tiles, covers a 64x64 tile using three 32x32 tiles (plus one gets transposed).
For off-diagonal tiles, covers a 64x64 tile using four 64x16 tiles.
In both cases, each thread multiplies 4 elements.
For rows past the bottom of the matrix, the A pointer is adjusted to be the
last valid row of A, which multiple threads will read.
Extra rows are ignored when saving results to work.
Columns past the right edge are explicitly ignored when loading.
x values past the bottom are set to zero, thus, extra columns are zeroed
when multiplying.
Previously:
[ (A11*x1) --- ]
work = [ (A21^H*x2) (A21*x1 + A22*x2) --- ]
[ (A31^H*x3) (A32^H*x3) (A31*x1 + A32*x2 + A33*x3) ]
which doesn't work as well because that has dimension blocks*NB by blocks,
where blocks*NB >= n, and it can be that blocks*NB > lda, so it won't fit in
lda*blocks space. This is why it used to need lwork = lda*(blocks + 1).
*******************************************************************************/
__global__ void
dsymv_kernel_L(
int n,
double const * __restrict__ A, int lda,
double const * __restrict__ x, int incx,
double * __restrict__ work)
{
#if defined(PRECISION_s) || defined(PRECISION_d) || defined(PRECISION_c) || (__CUDA_ARCH__ >= 200)
// treats sA as 16x64 block
#define sA16(i_, j_) (sA[(i_)][(j_)]) // i.e., sA[ (i_)*(NB_X+3) + (j_) ]
// treats sA as 32x32 block
#define sA32(i_, j_) (sA[0][(i_) + bank_shift*(j_)])
// 64x4 thread block
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int blk = blockIdx.x;
const int blk_ind = NB_X * blk;
const int td = NB_X * ty + tx;
// 32x8 thread block
const int tx2 = td % half_NB_X;
const int ty2 = td / half_NB_X;
// If this blk has fewer than NB_X rows, partial is the number of valid rows,
// so tx = 0, ..., partial-1 are valid rows, and tx >= partial are invalid.
// Else, partial == 0.
const int partial = (blk == gridDim.x - 1 ? (n % NB_X) : 0);
double psum, psum_t;
double total = MAGMA_D_ZERO;
// sA is used as a 32x32 block, sA32(i,j),
// and as a 16x64 block, sA16(i,j), in different parts of the code.
// sA must be at least half_NB_X*bank_shift = 32x33 = 1056;
// quarter_NB_X*(NB_X + 2) = 16*(64 + 2) = 1056
__shared__ double sA [quarter_NB_X][NB_X + 3]; /* Why +3? seems it only needs +2. Does +3 reduce bank conflicts? */
__shared__ double sx_blk[NB_X]; // for x[ blk ]
__shared__ double sx_jj [NB_X]; // for x[ jj ], which cycles over all blocks left of diag
double rA[4];
double psums_t[4];
// --------------------
// load 64x1 block x(blk_ind + 0:63) into sx_blk
x += (blk_ind + tx)*incx; // x is x(blk_ind + tx)
if ( ty == 0 ) {
if ( partial == 0 || tx < partial ) {
sx_blk[tx] = x[0];
}
else {
sx_blk[tx] = MAGMA_D_ZERO;
}
}
// --------------------
// move to block row
work += blk*lda; // work is work(0, blk)
A += blk_ind; // A is A(blk_ind, 0)
A += ty2*lda + tx2; // A is A(blk_ind + tx2, ty2)
// move to 32x32 diag block
A += blk_ind*lda; // A is A(blk_ind + tx2, blk_ind + ty2)
// load 32x32 diag block A(blk_ind + 0:31, blk_ind + 0:31) into sA,
// as four 32x8 sections one after another:
// columns 0:7, then 8:15, then 16:23, then 24:31
if ( partial ) {
if ( tx2 >= partial ) {
A = A - tx2 + (partial - 1); // A is A(blk_ind + partial-1, blk_ind + ty2), the bottom-most valid row
}
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
if ( ty2+j < partial ) {
sA32(tx2, ty2 + j) = A[j*lda];
}
else {
sA32(tx2, ty2 + j) = MAGMA_D_ZERO;
}
}
if ( tx2 >= partial ) {
A = A + tx2 - (partial - 1); // A is A(blk_ind + tx2, blk_ind + ty2)
}
}
else {
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
sA32(tx2, ty2 + j) = A[j*lda];
}
}
__syncthreads();
// symmetrize 32x32 diag block, copying lower to upper triangle,
// as four 32x8 sections in parallel:
// columns 0,4,8,12,16,20,24,28; then 1,5,...,29; then 2,6,...,30, then 3,7,...,31
#pragma unroll
for (int j=ty2*4; j < ty2*4 + 4; j++) {
if ( j < tx2 ) {
sA32(j, tx2) = MAGMA_D_CONJ( sA32(tx2, j) );
}
}
__syncthreads();
// multiply 32x32 diag block * x
// each thread does partial row sA(tx2, ty2*4 : ty2*4 + 3)
psum = MAGMA_D_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum += sA32(tx2, ty2*4 + j) * sx_blk[ty2*4 + j];
}
__syncthreads();
// store partial row sums
sA32(ty2, tx2) = psum;
__syncthreads();
// sum up partial row sums, so thread (tx2,0) has total for row (blk_ind + tx2)
if ( ty2 == 0 ) {
total = sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// --------------------
// move to next 32x32 diag block, then repeat steps from first diag block
A += half_NB_X + half_NB_X*lda; // A is A(blk_ind + NB/2 + tx2, blk_ind + NB/2 + ty2)
// load 32x32 diag block A[block + 0:31, block + 0:31] into sA
if ( partial ) {
if ( tx2 + half_NB_X >= partial ) {
A = A - (tx2 + half_NB_X) + (partial - 1);
}
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
if ( ty2+j + half_NB_X < partial ) {
sA32(tx2, ty2 + j) = A[j*lda];
}
else {
sA32(tx2, ty2 + j) = MAGMA_D_ZERO;
}
}
if ( tx2 + half_NB_X >= partial ) {
A = A + (tx2 + half_NB_X) - (partial - 1);
}
}
else {
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
sA32(tx2, ty2 + j) = A[j*lda];
}
}
__syncthreads();
// symmetrize 32x32 diag block, copying lower to upper triangle
#pragma unroll
for (int j=ty2*4; j < ty2*4 + 4; j++) {
if ( j < tx2 ) {
sA32(j, tx2) = MAGMA_D_CONJ( sA32(tx2, j) );
}
}
__syncthreads();
// multiply 32x32 diag block * x
psum = MAGMA_D_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum += sA32(tx2, ty2*4 + j) * sx_blk[half_NB_X + ty2*4 + j];
}
__syncthreads();
// store partial row sums
sA32(ty2, tx2) = psum;
__syncthreads();
// sum up partial row sums, so thread (tx2,1) has total for row (blk_ind + NB/2 + tx2)
if ( ty2 == 1 ) {
total = sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// --------------------
// move to off-diag 32x32 block
A -= half_NB_X*lda; // A is A(blk_ind + NB/2 + tx2, blk_ind + ty2)
// load 32x32 block of A into sA,
// as four 32x8 sections one after another:
// columns 0:7, then 8:15, then 16:23, then 24:31
if ( partial ) {
if ( tx2 + half_NB_X >= partial ) {
A = A - (tx2 + half_NB_X) + (partial - 1);
}
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
if ( ty2+j < partial ) {
sA32(tx2, ty2 + j) = A[j*lda];
}
else {
sA32(tx2, ty2 + j) = MAGMA_D_ZERO;
}
}
if ( tx2 + half_NB_X >= partial ) {
A = A + (tx2 + half_NB_X) - (partial - 1);
}
}
else {
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
sA32(tx2, ty2 + j) = A[j*lda];
}
}
__syncthreads();
// multiply 32x32 block (below diag)
psum = MAGMA_D_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum += sA32(tx2, ty2 + j*8) * sx_blk[j*8 + ty2];
}
//__syncthreads(); // no sync needed here
// multiply transposed 32x32 block (above diag)
psum_t = MAGMA_D_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum_t += MAGMA_D_CONJ( sA32(ty2*4 + j, tx2) ) * sx_blk[half_NB_X + ty2*4 + j];
}
__syncthreads();
// store partial sums for non-transposed 32x32 block
sA32(ty2, tx2) = psum;
__syncthreads();
// sum up partial row sums, so thread (tx2,1) has total for row (blk_ind + NB/2 + tx2)
if ( ty2 == 1 ) {
total = total
+ sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// store partial sums for transposed 32x32 block
sA32(ty2, tx2) = psum_t;
__syncthreads();
// sum up partial row sums, so thread (tx2,0) has total for row (blk_ind + tx2)
if ( ty2 == 0 ) {
total = total
+ sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// --------------------
// move to leftmost 64x64 block in block row, and
// switch thread offset from (tx2,ty2) 32x8 block to (tx,ty) 64x4 block
A -= half_NB_X; // A is A(blk_ind + tx2, blk_ind + ty2)
A -= blk_ind*lda; // A is A(blk_ind + tx2, ty2)
A -= ty2*lda + tx2; // A is A(blk_ind, 0)
A += 4*ty*lda + tx; // A is A(blk_ind + tx, 4*ty)
if ( partial && tx >= partial ) {
A = A - tx + (partial - 1); // A is A(blk_ind + partial-1, 4*ty), the bottom-most valid row
}
x -= blk_ind*incx; // x is x(tx)
// 16x16 thread block
const int tx4 = td % quarter_NB_X;
const int ty4 = td / quarter_NB_X;
// cycle over blocks jj left of diagonal, in block row blk
for (int jj=0; jj < blk; ++jj) {
// load 64x1 block x(jj_ind + 0:63) into sx_jj
// since this block is left of diagonal, x must have all NB rows
if ( ty == 0 ) {
sx_jj[tx] = x[jj*NB_X*incx];
}
__syncthreads();
for (int k=0; k < 4; k++) {
// load 64x16 block of A into rA, 4 elements per thread,
// as four 64x4 sections in parallel:
// columns 0,4,8,12; then 1,5,9,13; then 2,6,10,14; then 3,7,11,15
// since this block is left of diagonal, it has all NB columns,
// and block of x must have all NB rows.
#pragma unroll
for (int j=0; j < 4; j++) {
rA[j] = A[j*lda];
}
// 1) multiply 64x16 block A_{blk,jj} * x_jj
// each thread does partial row rA(tx + 16*k, ty*4 + 16*k : ty*4 + 3 + 16*k)
// 2) multiply transposed 16x64 block A_{blk,jj}^H * x_blk,
// storing each product Aji*xi to sA(j,i)
#pragma unroll
for (int j=0; j < 4; j++) {
total += rA[j] * sx_jj[quarter_NB_X*k + ty*4 + j]; // y_blk = A_{blk,jj} * x_jj
sA16(ty*4 + j, tx) = MAGMA_D_CONJ( rA[j] ) * sx_blk[tx]; // y_jj = A_{blk,jj}^H * x_blk
}
__syncthreads();
// do partial row sums for transposed 16x64 result
// use 16x16 thread grid (tx4, ty4) instead of 64x4 (tx, ty)
// sum sixteen 16x4 sections in parallel:
// columns 0,4,8,...,60; then 1,5,...,61; then 2,6,...,62; then 3,7,...,63
psum_t = MAGMA_D_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum_t += sA16(tx4, ty4*4 + j);
}
__syncthreads();
// store partial row sums of transposed result, y_jj (locally)
psums_t[k] = psum_t;
// move right to next 64x16 block
A += lda * quarter_NB_X; // A is A(blk_ind + tx#, jj*NB_x + (k+1)*NB_X/4 + 4*ty), # tx or partial
}
// already at next 64x64 block
// A is A(blk_ind + tx#, (jj+1)*NB_x + 4*ty), # tx or partial
// store partial row sums of transposed result, y_jj
#pragma unroll
for (int k=0; k < 4; k++) {
sA16(tx4, ty4 + quarter_NB_X*k) = psums_t[k];
}
__syncthreads();
// sum up partial row sums of transposed result, y_jj, and store final total to workspace
// thread (tx4,ty4) where ty4 < 4 sums row tx4 + ty4*16
// since this is the transposed block above the diagonal, it must have all NB rows
if ( ty4 < 4 ) {
int ty4_nb4 = ty4*quarter_NB_X;
psum_t = sA16(tx4, 0 + ty4_nb4) + sA16(tx4, 1 + ty4_nb4)
+ sA16(tx4, 2 + ty4_nb4) + sA16(tx4, 3 + ty4_nb4)
+ sA16(tx4, 4 + ty4_nb4) + sA16(tx4, 5 + ty4_nb4)
+ sA16(tx4, 6 + ty4_nb4) + sA16(tx4, 7 + ty4_nb4)
+ sA16(tx4, 8 + ty4_nb4) + sA16(tx4, 9 + ty4_nb4)
+ sA16(tx4, 10 + ty4_nb4) + sA16(tx4, 11 + ty4_nb4)
+ sA16(tx4, 12 + ty4_nb4) + sA16(tx4, 13 + ty4_nb4)
+ sA16(tx4, 14 + ty4_nb4) + sA16(tx4, 15 + ty4_nb4);
work[jj*NB_X + tx4 + ty4_nb4] = psum_t; // store at work( jj*NB_X + tx4 + ty4*16, blk )
}
__syncthreads();
}
// store row sums
sA16(ty, tx) = total;
__syncthreads();
// sum up final total, y_blk, for row tx
if ( ty == 0 && (partial == 0 || tx < partial) ) {
total = sA16(0, tx)
+ sA16(1, tx)
+ sA16(2, tx)
+ sA16(3, tx);
work[blk*NB_X + tx] = total; // store at work( blk*NB_X + tx, blk )
}
#endif /* PRECISION_[sdc] || (__CUDA_ARCH__ >= 200) */
}
// end dsymv_kernel_L
/***************************************************************************//**
Lower case, sum up final results
Each block sums one block row; each thread sums one row.
On input (for 3 blocks):
[ (A11*x1) (A21^H*x2) (A31^H*x3) ]
work = [ --- (A21*x1 + A22*x2) (A32^H*x3) ]
[ --- --- (A31*x1 + A32*x2 + A33*x3) ]
On output:
[ (A11*x1) + (A21^H*x2) + (A31^H*x3) ]
y = alpha*[ (A21*x1 + A22*x2) + (A32^H*x3) ] + beta*y
[ (A21*x1 + A22*x2 + A33*x3) ]
*******************************************************************************/
__global__ void
dsymv_kernel_L_sum(
int n,
double alpha,
int lda,
double beta,
double * __restrict__ y, int incy,
double const * __restrict__ work )
{
int tx = threadIdx.x;
int blk = blockIdx.x;
int blk_ind = blk * NB_X;
int ind = blk_ind + tx;
int blocks = gridDim.x;
// Don't write outside [0, ..., n)
if ( ind < n ) {
work += ind + blk*lda;
double Ax = MAGMA_D_ZERO;
for (int j = blk; j < blocks; ++j) {
Ax += work[0];
work += lda;
}
y[ind * incy] = beta*y[ind * incy] + alpha*Ax;
}
}
/***************************************************************************//**
Purpose
-------
magmablas_dsymv_work performs the matrix-vector operation:
y := alpha*A*x + beta*y,
where alpha and beta are scalars, x and y are n element vectors and
A is an n by n symmetric matrix.
Arguments
----------
@param[in]
uplo magma_uplo_t.
On entry, UPLO specifies whether the upper or lower
triangular part of the array A is to be referenced as
follows:
- = MagmaUpper: Only the upper triangular part of A is to be referenced.
- = MagmaLower: Only the lower triangular part of A is to be referenced.
@param[in]
n INTEGER.
On entry, N specifies the order of the matrix A.
N must be at least zero.
@param[in]
alpha DOUBLE PRECISION.
On entry, ALPHA specifies the scalar alpha.
@param[in]
dA DOUBLE PRECISION array of DIMENSION ( LDDA, n ).
Before entry with UPLO = MagmaUpper, the leading n by n
upper triangular part of the array A must contain the upper
triangular part of the symmetric matrix and the strictly
lower triangular part of A is not referenced.
Before entry with UPLO = MagmaLower, the leading n by n
lower triangular part of the array A must contain the lower
triangular part of the symmetric matrix and the strictly
upper triangular part of A is not referenced.
Note that the imaginary parts of the diagonal elements need
not be set and are assumed to be zero.
@param[in]
ldda INTEGER.
On entry, LDDA specifies the first dimension of A as declared
in the calling (sub) program. LDDA must be at least
max( 1, n ).
It is recommended that ldda is multiple of 16. Otherwise
performance would be deteriorated as the memory accesses
would not be fully coalescent.
@param[in]
dx DOUBLE PRECISION array of dimension at least
( 1 + ( n - 1 )*abs( INCX ) ).
Before entry, the incremented array X must contain the n
element vector x.
@param[in]
incx INTEGER.
On entry, INCX specifies the increment for the elements of
X. INCX must not be zero.
@param[in]
beta DOUBLE PRECISION.
On entry, BETA specifies the scalar beta. When BETA is
supplied as zero then Y need not be set on input.
@param[in,out]
dy DOUBLE PRECISION array of dimension at least
( 1 + ( n - 1 )*abs( INCY ) ).
Before entry, the incremented array Y must contain the n
element vector y. On exit, Y is overwritten by the updated
vector y.
@param[in]
incy INTEGER.
On entry, INCY specifies the increment for the elements of
Y. INCY must not be zero.
@param[in]
dwork (workspace) DOUBLE PRECISION array on the GPU, dimension (MAX(1, LWORK)),
@param[in]
lwork INTEGER.
The dimension of the array DWORK. LWORK >= LDDA * ceil( N / NB_X ),
where NB_X = 64.
@param[in]
queue magma_queue_t.
Queue to execute in.
MAGMA implements dsymv through two steps:
1) perform the multiplication in each thread block and put the
intermediate value in dwork.
2) sum the intermediate values and store the final result in y.
magamblas_dsymv_work requires users to provide a workspace, while
magmablas_dsymv is a wrapper routine allocating the workspace inside the
routine and provides the same interface as cublas.
If users need to call dsymv frequently, we suggest using
magmablas_dsymv_work instead of magmablas_dsymv. As the overhead to
allocate and free in device memory in magmablas_dsymv would hurt performance.
Our tests show that this penalty is about 10 Gflop/s when the matrix
size is around 10000.
@ingroup magma_hemv
*******************************************************************************/
extern "C"
magma_int_t
magmablas_dsymv_work(
magma_uplo_t uplo, magma_int_t n,
double alpha,
magmaDouble_const_ptr dA, magma_int_t ldda,
magmaDouble_const_ptr dx, magma_int_t incx,
double beta,
magmaDouble_ptr dy, magma_int_t incy,
magmaDouble_ptr dwork, magma_int_t lwork,
magma_queue_t queue )
{
#if defined(PRECISION_z)
// z precision requires CUDA ARCH 2.x; call CUBLAS version instead.
magma_int_t arch = magma_getdevice_arch();
if ( arch < 200 ) {
magma_dsymv( uplo, n, alpha, dA, ldda, dx, incx, beta, dy, incy, queue );
return MAGMA_SUCCESS;
}
#endif
// --------------------
// [sdc] precisions, or z precision with CUDA ARCH 2.x
bool upper = (uplo == MagmaUpper);
magma_int_t blocks = magma_ceildiv( n, NB_X );
magma_int_t lwmin = ldda*blocks;
/*
* Test the input parameters.
*/
magma_int_t info = 0;
if ((! upper) && (uplo != MagmaLower)) {
info = -1;
} else if ( n < 0 ) {
info = -2;
} else if ( ldda < max(1, n) ) {
info = -5;
} else if ( incx == 0 ) {
info = -7;
} else if ( incy == 0 ) {
info = -10;
} else if ( lwork < lwmin ) {
info = -12;
}
if (info != 0) {
magma_xerbla( __func__, -(info) );
return info;
}
/*
* Quick return if possible.
*/
if ( (n == 0) || ( MAGMA_D_EQUAL(alpha, MAGMA_D_ZERO) && MAGMA_D_EQUAL(beta, MAGMA_D_ONE) ) )
return info;
dim3 grid( blocks, 1, 1 );
dim3 threads( NB_X, NB_Y, 1 );
dim3 threads_sum( NB_X, 1, 1 );
if ( upper ) {
dsymv_kernel_U<<< grid, threads, 0, queue->cuda_stream() >>>
(n, dA, ldda, dx, incx, dwork);
dsymv_kernel_U_sum<<< grid, threads_sum, 0, queue->cuda_stream() >>>
(n, alpha, ldda, beta, dy, incy, dwork);
}
else {
dsymv_kernel_L<<< grid, threads, 0, queue->cuda_stream() >>>
(n, dA, ldda, dx, incx, dwork);
dsymv_kernel_L_sum<<< grid, threads_sum, 0, queue->cuda_stream() >>>
(n, alpha, ldda, beta, dy, incy, dwork);
}
return info;
}
// end magmablas_dsymv_work
/***************************************************************************//**
Purpose
-------
magmablas_dsymv performs the matrix-vector operation:
y := alpha*A*x + beta*y,
where alpha and beta are scalars, x and y are n element vectors and
A is an n by n symmetric matrix.
Arguments
----------
@param[in]
uplo magma_uplo_t.
On entry, UPLO specifies whether the upper or lower
triangular part of the array A is to be referenced as
follows:
- = MagmaUpper: Only the upper triangular part of A is to be referenced.
- = MagmaLower: Only the lower triangular part of A is to be referenced.
@param[in]
n INTEGER.
On entry, N specifies the order of the matrix A.
N must be at least zero.
@param[in]
alpha DOUBLE PRECISION.
On entry, ALPHA specifies the scalar alpha.
@param[in]
dA DOUBLE PRECISION array of DIMENSION ( LDDA, n ).
Before entry with UPLO = MagmaUpper, the leading n by n
upper triangular part of the array A must contain the upper
triangular part of the symmetric matrix and the strictly
lower triangular part of A is not referenced.
Before entry with UPLO = MagmaLower, the leading n by n
lower triangular part of the array A must contain the lower
triangular part of the symmetric matrix and the strictly
upper triangular part of A is not referenced.
Note that the imaginary parts of the diagonal elements need
not be set and are assumed to be zero.
@param[in]
ldda INTEGER.
On entry, LDDA specifies the first dimension of A as declared
in the calling (sub) program. LDDA must be at least
max( 1, n ).
It is recommended that ldda is multiple of 16. Otherwise
performance would be deteriorated as the memory accesses
would not be fully coalescent.
@param[in]
dx DOUBLE PRECISION array of dimension at least
( 1 + ( n - 1 )*abs( INCX ) ).
Before entry, the incremented array X must contain the n
element vector x.
@param[in]
incx INTEGER.
On entry, INCX specifies the increment for the elements of
X. INCX must not be zero.
@param[in]
beta DOUBLE PRECISION.
On entry, BETA specifies the scalar beta. When BETA is
supplied as zero then Y need not be set on input.
@param[in,out]
dy DOUBLE PRECISION array of dimension at least
( 1 + ( n - 1 )*abs( INCY ) ).
Before entry, the incremented array Y must contain the n
element vector y. On exit, Y is overwritten by the updated
vector y.
@param[in]
incy INTEGER.
On entry, INCY specifies the increment for the elements of
Y. INCY must not be zero.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_hemv
*******************************************************************************/
extern "C"
magma_int_t
magmablas_dsymv(
magma_uplo_t uplo, magma_int_t n,
double alpha,
magmaDouble_const_ptr dA, magma_int_t ldda,
magmaDouble_const_ptr dx, magma_int_t incx,
double beta,
magmaDouble_ptr dy, magma_int_t incy,
magma_queue_t queue )
{
#if defined(PRECISION_z)
// z precision requires CUDA ARCH 2.x; call CUBLAS version instead.
magma_int_t arch = magma_getdevice_arch();
if ( arch < 200 ) {
magma_dsymv( uplo, n, alpha, dA, ldda, dx, incx, beta, dy, incy, queue );
return MAGMA_SUCCESS;
}
#endif
// --------------------
// [sdc] precisions, or z precision with CUDA ARCH 2.x
bool upper = (uplo == MagmaUpper);
/*
* Test the input parameters.
*/
magma_int_t info = 0;
if ((! upper) && (uplo != MagmaLower)) {
info = -1;
} else if ( n < 0 ) {
info = -2;
} else if ( ldda < max(1, n) ) {
info = -5;
} else if ( incx == 0 ) {
info = -7;
} else if ( incy == 0 ) {
info = -10;
}
if (info != 0) {
magma_xerbla( __func__, -(info) );
return info;
}
/*
* Quick return if possible.
*/
if ( (n == 0) || ( MAGMA_D_EQUAL(alpha, MAGMA_D_ZERO) && MAGMA_D_EQUAL(beta, MAGMA_D_ONE) ) )
return info;
magmaDouble_ptr dwork;
magma_int_t blocks = magma_ceildiv( n, NB_X );
magma_int_t lwork = ldda*blocks;
magma_dmalloc( &dwork, lwork );
if ( dwork == NULL ) {
info = MAGMA_ERR_DEVICE_ALLOC;
magma_xerbla( __func__, -(info) );
return info;
}
magmablas_dsymv_work( uplo, n, alpha, dA, ldda, dx, incx, beta, dy, incy,
dwork, lwork, queue );
magma_free( dwork );
return info;
}
// end magmablas_dsymv
|
2e2140cca7c422dfd6ec3cb7dbb57cb1c59a3c79.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, int var_1,int var_2,float var_3,float var_4,float* var_5,float* var_6,float var_7,float var_8,float var_9,float var_10) {
if (comp <= (var_3 - var_4)) {
for (int i=0; i < var_1; ++i) {
comp = +1.3954E35f + -0.0f;
for (int i=0; i < var_2; ++i) {
var_5[i] = +1.7694E35f;
comp += var_5[i] - +1.9550E-43f * +1.6932E-43f - (-1.4210E-35f / (var_7 / -0.0f));
var_6[i] = +1.6805E-35f;
comp += var_6[i] + var_8 / -0.0f * var_9 - var_10;
}
}
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
int tmp_2 = atoi(argv[2]);
int tmp_3 = atoi(argv[3]);
float tmp_4 = atof(argv[4]);
float tmp_5 = atof(argv[5]);
float* tmp_6 = initPointer( atof(argv[6]) );
float* tmp_7 = initPointer( atof(argv[7]) );
float tmp_8 = atof(argv[8]);
float tmp_9 = atof(argv[9]);
float tmp_10 = atof(argv[10]);
float tmp_11 = atof(argv[11]);
hipLaunchKernelGGL(( compute), dim3(1),dim3(1), 0, 0, tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11);
hipDeviceSynchronize();
return 0;
}
| 2e2140cca7c422dfd6ec3cb7dbb57cb1c59a3c79.cu |
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, int var_1,int var_2,float var_3,float var_4,float* var_5,float* var_6,float var_7,float var_8,float var_9,float var_10) {
if (comp <= (var_3 - var_4)) {
for (int i=0; i < var_1; ++i) {
comp = +1.3954E35f + -0.0f;
for (int i=0; i < var_2; ++i) {
var_5[i] = +1.7694E35f;
comp += var_5[i] - +1.9550E-43f * +1.6932E-43f - (-1.4210E-35f / (var_7 / -0.0f));
var_6[i] = +1.6805E-35f;
comp += var_6[i] + var_8 / -0.0f * var_9 - var_10;
}
}
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
int tmp_2 = atoi(argv[2]);
int tmp_3 = atoi(argv[3]);
float tmp_4 = atof(argv[4]);
float tmp_5 = atof(argv[5]);
float* tmp_6 = initPointer( atof(argv[6]) );
float* tmp_7 = initPointer( atof(argv[7]) );
float tmp_8 = atof(argv[8]);
float tmp_9 = atof(argv[9]);
float tmp_10 = atof(argv[10]);
float tmp_11 = atof(argv[11]);
compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11);
cudaDeviceSynchronize();
return 0;
}
|
1798e00fa39d3204297a73bf4bca5edcf33d0429.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE
template <typename T, typename C, typename U>
__global__ void
awkward_RegularArray_getitem_jagged_expand(T* multistarts,
C* multistops,
const U* singleoffsets,
int64_t regularsize,
int64_t regularlength,
uint64_t invocation_index,
uint64_t* err_code) {
if (err_code[0] == NO_ERROR) {
int64_t thread_id = (blockIdx.x * blockDim.x + threadIdx.x) / regularsize;
int64_t thready_id = (blockIdx.x * blockDim.x + threadIdx.x) % regularsize;
if (thread_id < regularlength) {
multistarts[(thread_id * regularsize) + thready_id] =
singleoffsets[thready_id];
multistops[(thread_id * regularsize) + thready_id] =
singleoffsets[thready_id + 1];
}
}
}
| 1798e00fa39d3204297a73bf4bca5edcf33d0429.cu | // BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE
template <typename T, typename C, typename U>
__global__ void
awkward_RegularArray_getitem_jagged_expand(T* multistarts,
C* multistops,
const U* singleoffsets,
int64_t regularsize,
int64_t regularlength,
uint64_t invocation_index,
uint64_t* err_code) {
if (err_code[0] == NO_ERROR) {
int64_t thread_id = (blockIdx.x * blockDim.x + threadIdx.x) / regularsize;
int64_t thready_id = (blockIdx.x * blockDim.x + threadIdx.x) % regularsize;
if (thread_id < regularlength) {
multistarts[(thread_id * regularsize) + thready_id] =
singleoffsets[thready_id];
multistops[(thread_id * regularsize) + thready_id] =
singleoffsets[thready_id + 1];
}
}
}
|
fea11c59c44fe40c5c8a6464d12be666b53544ea.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//******************************************************************************
// Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#include "utilities_api.h"
//==============================================================================
// srtCopy
hipError_t srtCopy(
const void* x, const srtTensorDescriptor* xDesc,
void* out, const srtTensorDescriptor* oDesc,
hipStream_t stream
) {
return hipErrorNotSupported;
}
//==============================================================================
// device kernel
__device__ int64_t globalElapsed;
__global__ void cudaDelayStream_kernel(int64_t count) {
clock_t start = clock64();
clock_t elapsed = 0;
while(elapsed < count) {
elapsed = clock64() - start;
}
globalElapsed = elapsed;
}
//------------------------------------------------------------------------------
// Swift importable C functions
hipError_t srtDelayStream(double seconds, hipStream_t stream) {
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, 0);
int64_t count = seconds * prop.clockRate * 1000;
hipLaunchKernelGGL(( cudaDelayStream_kernel), dim3(1), dim3(1), 0, stream, count);
return hipSuccess;
}
| fea11c59c44fe40c5c8a6464d12be666b53544ea.cu | //******************************************************************************
// Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#include "utilities_api.h"
//==============================================================================
// srtCopy
cudaError_t srtCopy(
const void* x, const srtTensorDescriptor* xDesc,
void* out, const srtTensorDescriptor* oDesc,
cudaStream_t stream
) {
return cudaErrorNotSupported;
}
//==============================================================================
// device kernel
__device__ int64_t globalElapsed;
__global__ void cudaDelayStream_kernel(int64_t count) {
clock_t start = clock64();
clock_t elapsed = 0;
while(elapsed < count) {
elapsed = clock64() - start;
}
globalElapsed = elapsed;
}
//------------------------------------------------------------------------------
// Swift importable C functions
cudaError_t srtDelayStream(double seconds, cudaStream_t stream) {
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, 0);
int64_t count = seconds * prop.clockRate * 1000;
cudaDelayStream_kernel<<<1, 1, 0, stream>>>(count);
return cudaSuccess;
}
|
ec5037dcf479161928afcb137107343d04fe29db.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <time.h>
#include <cstring>
#include "hip/hip_runtime.h"
#include "hip/hip_runtime.h"
#define gpuErrchk(ans) { gpu_assert((ans), __FILE__, __LINE__); }
inline void gpu_assert(hipError_t code, const char *file, int line, bool abort = true)
{
if(code != hipSuccess)
{
fprintf(stderr, "GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
void compare_arrays(int *a, int *b, int size)
{
for(int i = 0; i < size; i++)
{
if(a[i] != b[i])
{
printf("Arrays are different :-(\n");
return;
}
}
printf("Arrays are the same :-)\n");
}
__global__ void sum_array_gpu(int *a, int *b, int *c, int *d, int size) {
int gid = blockIdx.x * blockDim.x + threadIdx.x;
if (gid < size) {
d[gid] = a[gid] + b[gid] + c[gid];
}
}
void sum_array_cpu(int *a, int *b, int *c, int *d, int size) {
for (int i = 0; i < size; i++) {
d[i] = a[i] + b[i] + c[i];
}
}
int main(int argc, char *argv[]) {
int size = 1 << 22;
printf("size: %d \n", size);
//int block_size = atoi(argv[1]);
int block_size = 64;
int NO_BYTES = size * sizeof(int);
// host pointers
int *h_a, *h_b, *h_c, *h_d, *gpu_results;
h_a = (int *)malloc(NO_BYTES);
h_b = (int *)malloc(NO_BYTES);
h_c = (int *)malloc(NO_BYTES);
gpu_results = (int *)malloc(NO_BYTES);
h_d = (int *)malloc(NO_BYTES);
// initialize host pointer
time_t t;
srand((unsigned)time(&t));
for (int i = 0; i < size; i++) {
h_a[i] = (int)(rand() & 0xFF);
h_b[i] = (int)(rand() & 0xFF);
h_c[i] = (int)(rand() & 0xFF);
}
// summation in CPU
clock_t cpu_start, cpu_end;
cpu_start = clock();
sum_array_cpu(h_a, h_b, h_c, h_d, size);
cpu_end = clock();
memset(gpu_results, 0, NO_BYTES);
// device pointer
int *d_a, *d_b, *d_c, *d_d;
gpuErrchk(hipMalloc((int **)&d_a, NO_BYTES));
gpuErrchk(hipMalloc((int **)&d_b, NO_BYTES));
gpuErrchk(hipMalloc((int **)&d_c, NO_BYTES));
gpuErrchk(hipMalloc((int **)&d_d, NO_BYTES));
clock_t htod_start, htod_end;
htod_start = clock();
gpuErrchk(hipMemcpy(d_a, h_a, NO_BYTES, hipMemcpyHostToDevice));
gpuErrchk(hipMemcpy(d_b, h_b, NO_BYTES, hipMemcpyHostToDevice));
gpuErrchk(hipMemcpy(d_c, h_c, NO_BYTES, hipMemcpyHostToDevice));
htod_end = clock();
// launching the grid
dim3 block(block_size);
dim3 grid((size + block.x - 1) / block.x);
// execution time mesuring in GPU
clock_t gpu_start, gpu_end;
gpu_start = clock();
hipLaunchKernelGGL(( sum_array_gpu), dim3(grid), dim3(block), 0, 0, d_a, d_b, d_c, d_d, size);
gpuErrchk(hipDeviceSynchronize());
gpu_end = clock();
// memory transfer back to host
clock_t dtoh_start, dtoh_end;
dtoh_start = clock();
gpuErrchk(hipMemcpy(gpu_results, d_d, NO_BYTES, hipMemcpyDeviceToHost));
dtoh_end = clock();
// array comparison
compare_arrays(gpu_results, h_d, size);
printf("Sum array CPU execution time : % 4.6f \n",
(double)((double)(cpu_end - cpu_start) / CLOCKS_PER_SEC));
printf("Sum array GPU execution time : % 4.6f \n",
(double)((double)(gpu_end - gpu_start) / CLOCKS_PER_SEC));
printf("htod mem transfer time : % 4.6f \n",
(double)((double)(htod_end - htod_start) / CLOCKS_PER_SEC));
printf("dtoh mem transfer time : % 4.6f \n",
(double)((double)(dtoh_end - dtoh_start) / CLOCKS_PER_SEC));
printf("Sum array GPU total execution time : % 4.6f \n",
(double)((double)(dtoh_end - htod_start) / CLOCKS_PER_SEC));
gpuErrchk(hipFree(d_a));
gpuErrchk(hipFree(d_b));
gpuErrchk(hipFree(d_c));
gpuErrchk(hipFree(d_d));
free(h_a);
free(h_b);
free(h_c);
free(h_d);
free(gpu_results);
hipDeviceReset();
return 0;
}
| ec5037dcf479161928afcb137107343d04fe29db.cu | #include <stdio.h>
#include <time.h>
#include <cstring>
#include "cuda.h"
#include "cuda_runtime.h"
#define gpuErrchk(ans) { gpu_assert((ans), __FILE__, __LINE__); }
inline void gpu_assert(cudaError_t code, const char *file, int line, bool abort = true)
{
if(code != cudaSuccess)
{
fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
void compare_arrays(int *a, int *b, int size)
{
for(int i = 0; i < size; i++)
{
if(a[i] != b[i])
{
printf("Arrays are different :-(\n");
return;
}
}
printf("Arrays are the same :-)\n");
}
__global__ void sum_array_gpu(int *a, int *b, int *c, int *d, int size) {
int gid = blockIdx.x * blockDim.x + threadIdx.x;
if (gid < size) {
d[gid] = a[gid] + b[gid] + c[gid];
}
}
void sum_array_cpu(int *a, int *b, int *c, int *d, int size) {
for (int i = 0; i < size; i++) {
d[i] = a[i] + b[i] + c[i];
}
}
int main(int argc, char *argv[]) {
int size = 1 << 22;
printf("size: %d \n", size);
//int block_size = atoi(argv[1]);
int block_size = 64;
int NO_BYTES = size * sizeof(int);
// host pointers
int *h_a, *h_b, *h_c, *h_d, *gpu_results;
h_a = (int *)malloc(NO_BYTES);
h_b = (int *)malloc(NO_BYTES);
h_c = (int *)malloc(NO_BYTES);
gpu_results = (int *)malloc(NO_BYTES);
h_d = (int *)malloc(NO_BYTES);
// initialize host pointer
time_t t;
srand((unsigned)time(&t));
for (int i = 0; i < size; i++) {
h_a[i] = (int)(rand() & 0xFF);
h_b[i] = (int)(rand() & 0xFF);
h_c[i] = (int)(rand() & 0xFF);
}
// summation in CPU
clock_t cpu_start, cpu_end;
cpu_start = clock();
sum_array_cpu(h_a, h_b, h_c, h_d, size);
cpu_end = clock();
memset(gpu_results, 0, NO_BYTES);
// device pointer
int *d_a, *d_b, *d_c, *d_d;
gpuErrchk(cudaMalloc((int **)&d_a, NO_BYTES));
gpuErrchk(cudaMalloc((int **)&d_b, NO_BYTES));
gpuErrchk(cudaMalloc((int **)&d_c, NO_BYTES));
gpuErrchk(cudaMalloc((int **)&d_d, NO_BYTES));
clock_t htod_start, htod_end;
htod_start = clock();
gpuErrchk(cudaMemcpy(d_a, h_a, NO_BYTES, cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(d_b, h_b, NO_BYTES, cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(d_c, h_c, NO_BYTES, cudaMemcpyHostToDevice));
htod_end = clock();
// launching the grid
dim3 block(block_size);
dim3 grid((size + block.x - 1) / block.x);
// execution time mesuring in GPU
clock_t gpu_start, gpu_end;
gpu_start = clock();
sum_array_gpu<<<grid, block>>>(d_a, d_b, d_c, d_d, size);
gpuErrchk(cudaDeviceSynchronize());
gpu_end = clock();
// memory transfer back to host
clock_t dtoh_start, dtoh_end;
dtoh_start = clock();
gpuErrchk(cudaMemcpy(gpu_results, d_d, NO_BYTES, cudaMemcpyDeviceToHost));
dtoh_end = clock();
// array comparison
compare_arrays(gpu_results, h_d, size);
printf("Sum array CPU execution time : % 4.6f \n",
(double)((double)(cpu_end - cpu_start) / CLOCKS_PER_SEC));
printf("Sum array GPU execution time : % 4.6f \n",
(double)((double)(gpu_end - gpu_start) / CLOCKS_PER_SEC));
printf("htod mem transfer time : % 4.6f \n",
(double)((double)(htod_end - htod_start) / CLOCKS_PER_SEC));
printf("dtoh mem transfer time : % 4.6f \n",
(double)((double)(dtoh_end - dtoh_start) / CLOCKS_PER_SEC));
printf("Sum array GPU total execution time : % 4.6f \n",
(double)((double)(dtoh_end - htod_start) / CLOCKS_PER_SEC));
gpuErrchk(cudaFree(d_a));
gpuErrchk(cudaFree(d_b));
gpuErrchk(cudaFree(d_c));
gpuErrchk(cudaFree(d_d));
free(h_a);
free(h_b);
free(h_c);
free(h_d);
free(gpu_results);
cudaDeviceReset();
return 0;
}
|
d45d1ad3b8712bd6a23d9f71039cdf8f71a643c0.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include "hip/hip_runtime.h"
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
#define ceil(a,b) ((a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1)
void check_error (const char* message) {
hipError_t error = hipGetLastError ();
if (error != hipSuccess) {
printf ("CUDA error : %s, %s\n", message, hipGetErrorString (error));
exit(-1);
}
}
__global__ void hypterm (double * flux_0, double * flux_1, double * flux_2, double * flux_3, double * flux_4, double * cons_1, double * cons_2, double * cons_3, double * cons_4, double * q_1, double * q_2, double * q_3, double * q_4, double dxinv0, double dxinv1, double dxinv2, int L, int M, int N) {
//Determining the block's indices
int blockdim_i= (int)(blockDim.x);
int i0 = (int)(blockIdx.x)*(blockdim_i-8);
int i = i0 + (int)(threadIdx.x);
int blockdim_j= (int)(blockDim.y);
int j0 = (int)(blockIdx.y)*(blockdim_j)+4;
int j = j0 + (int)(threadIdx.y);
//Declarations
double reg_cons_1_m4=0, reg_cons_1_m3=0, reg_cons_1_m2=0, reg_cons_1_m1=0, __shared__ sh_cons_1_c0[24][16], reg_cons_1_p1=0, reg_cons_1_p2=0, reg_cons_1_p3=0, reg_cons_1_p4=0;
double reg_cons_2_m4=0, reg_cons_2_m3=0, reg_cons_2_m2=0, reg_cons_2_m1=0, __shared__ sh_cons_2_c0[24][16], reg_cons_2_p1=0, reg_cons_2_p2=0, reg_cons_2_p3=0, reg_cons_2_p4=0;
double reg_cons_3_m4=0, reg_cons_3_m3=0, reg_cons_3_m2=0, reg_cons_3_m1=0, __shared__ sh_cons_3_c0[24][16], reg_cons_3_p1=0, reg_cons_3_p2=0, reg_cons_3_p3=0, reg_cons_3_p4=0;
double reg_cons_4_m4=0, reg_cons_4_m3=0, reg_cons_4_m2=0, reg_cons_4_m1=0, __shared__ sh_cons_4_c0[24][16], reg_cons_4_p1=0, reg_cons_4_p2=0, reg_cons_4_p3=0, reg_cons_4_p4=0;
double __shared__ sh_q_1_c0[24][16];
double __shared__ sh_q_2_c0[24][16];
double reg_q_3_m4=0, reg_q_3_m3=0, reg_q_3_m2=0, reg_q_3_m1=0, reg_q_3_c0=0, reg_q_3_p1=0, reg_q_3_p2=0, reg_q_3_p3=0, reg_q_3_p4=0;
double reg_q_4_m4=0, reg_q_4_m3=0, reg_q_4_m2=0, reg_q_4_m1=0, __shared__ sh_q_4_c0[24][16], reg_q_4_p1=0, reg_q_4_p2=0, reg_q_4_p3=0, reg_q_4_p4=0;
//Value Initialization
if (j <= M-1 & i <= min (i0+blockdim_i-1, N-1)) {
reg_cons_1_m4 = cons_1[0 + j*N + i];
reg_cons_1_m3 = cons_1[1*M*N + j*N + i];
reg_cons_1_m2 = cons_1[2*M*N + j*N + i];
reg_cons_1_m1 = cons_1[3*M*N + j*N + i];
sh_cons_1_c0[j-j0+4][i-i0] = cons_1[4*M*N + j*N + i];
reg_cons_1_p1 = cons_1[5*M*N + j*N + i];
reg_cons_1_p2 = cons_1[6*M*N + j*N + i];
reg_cons_1_p3 = cons_1[7*M*N + j*N + i];
reg_cons_2_m4 = cons_2[0 + j*N + i];
reg_cons_2_m3 = cons_2[1*M*N + j*N + i];
reg_cons_2_m2 = cons_2[2*M*N + j*N + i];
reg_cons_2_m1 = cons_2[3*M*N + j*N + i];
sh_cons_2_c0[j-j0+4][i-i0] = cons_2[4*M*N + j*N + i];
reg_cons_2_p1 = cons_2[5*M*N + j*N + i];
reg_cons_2_p2 = cons_2[6*M*N + j*N + i];
reg_cons_2_p3 = cons_2[7*M*N + j*N + i];
reg_cons_3_m4 = cons_3[0 + j*N + i];
reg_cons_3_m3 = cons_3[1*M*N + j*N + i];
reg_cons_3_m2 = cons_3[2*M*N + j*N + i];
reg_cons_3_m1 = cons_3[3*M*N + j*N + i];
sh_cons_3_c0[j-j0+4][i-i0] = cons_3[4*M*N + j*N + i];
reg_cons_3_p1 = cons_3[5*M*N + j*N + i];
reg_cons_3_p2 = cons_3[6*M*N + j*N + i];
reg_cons_3_p3 = cons_3[7*M*N + j*N + i];
reg_cons_4_m4 = cons_4[0 + j*N + i];
reg_cons_4_m3 = cons_4[1*M*N + j*N + i];
reg_cons_4_m2 = cons_4[2*M*N + j*N + i];
reg_cons_4_m1 = cons_4[3*M*N + j*N + i];
sh_cons_4_c0[j-j0+4][i-i0] = cons_4[4*M*N + j*N + i];
reg_cons_4_p1 = cons_4[5*M*N + j*N + i];
reg_cons_4_p2 = cons_4[6*M*N + j*N + i];
reg_cons_4_p3 = cons_4[7*M*N + j*N + i];
reg_q_3_m4 = q_3[0 + j*N + i];
reg_q_3_m3 = q_3[1*M*N + j*N + i];
reg_q_3_m2 = q_3[2*M*N + j*N + i];
reg_q_3_m1 = q_3[3*M*N + j*N + i];
reg_q_3_c0 = q_3[4*M*N + j*N + i];
reg_q_3_p1 = q_3[5*M*N + j*N + i];
reg_q_3_p2 = q_3[6*M*N + j*N + i];
reg_q_3_p3 = q_3[7*M*N + j*N + i];
reg_q_4_m4 = q_4[0 + j*N + i];
reg_q_4_m3 = q_4[1*M*N + j*N + i];
reg_q_4_m2 = q_4[2*M*N + j*N + i];
reg_q_4_m1 = q_4[3*M*N + j*N + i];
sh_q_4_c0[j-j0+4][i-i0] = q_4[4*M*N + j*N + i];
reg_q_4_p1 = q_4[5*M*N + j*N + i];
reg_q_4_p2 = q_4[6*M*N + j*N + i];
reg_q_4_p3 = q_4[7*M*N + j*N + i];
}
if(threadIdx.y < 4) {
int jj = (j-4);
if (jj <= M-1 & i <= min (i0+blockdim_i-1, N-1)) {
sh_cons_1_c0[jj-j0+4][i-i0] = cons_1[4*M*N + jj*N + i];
sh_cons_2_c0[jj-j0+4][i-i0] = cons_2[4*M*N + jj*N + i];
sh_cons_3_c0[jj-j0+4][i-i0] = cons_3[4*M*N + jj*N + i];
sh_cons_4_c0[jj-j0+4][i-i0] = cons_4[4*M*N + jj*N + i];
sh_q_4_c0[jj-j0+4][i-i0] = q_4[4*M*N + jj*N + i];
}
} else if(threadIdx.y < 8) {
int jj = (j-4)+16;
if (jj <= M-1 & i <= min (i0+blockdim_i-1, N-1)) {
sh_cons_1_c0[jj-j0+4][i-i0] = cons_1[4*M*N + jj*N + i];
sh_cons_2_c0[jj-j0+4][i-i0] = cons_2[4*M*N + jj*N + i];
sh_cons_3_c0[jj-j0+4][i-i0] = cons_3[4*M*N + jj*N + i];
sh_cons_4_c0[jj-j0+4][i-i0] = cons_4[4*M*N + jj*N + i];
sh_q_4_c0[jj-j0+4][i-i0] = q_4[4*M*N + jj*N + i];
}
}
//Rest of the computation
for (int k=4; k<=L-5; ++k) {
//Fetch new plane
if (j <= M-1 & i <= min (i0+blockdim_i-1, N-1)) {
reg_cons_1_p4 = cons_1[(k+4)*M*N + j*N + i];
reg_cons_2_p4 = cons_2[(k+4)*M*N + j*N + i];
reg_cons_3_p4 = cons_3[(k+4)*M*N + j*N + i];
reg_cons_4_p4 = cons_4[(k+4)*M*N + j*N + i];
sh_q_1_c0[j-j0+4][i-i0] = q_1[k*M*N + j*N + i];
sh_q_2_c0[j-j0+4][i-i0] = q_2[k*M*N + j*N + i];
reg_q_3_p4 = q_3[(k+4)*M*N + j*N + i];
reg_q_4_p4 = q_4[(k+4)*M*N + j*N + i];
}
if(threadIdx.y < 4) {
int jj = (j-4);
if (jj <= M-1 & i <= min (i0+blockdim_i-1, N-1)) {
sh_cons_1_c0[jj-j0+4][i-i0] = cons_1[k*M*N + jj*N + i];
sh_cons_2_c0[jj-j0+4][i-i0] = cons_2[k*M*N + jj*N + i];
sh_cons_3_c0[jj-j0+4][i-i0] = cons_3[k*M*N + jj*N + i];
sh_cons_4_c0[jj-j0+4][i-i0] = cons_4[k*M*N + jj*N + i];
}
} else if(threadIdx.y < 8) {
int jj = (j-4)+16;
if (jj <= M-1 & i <= min (i0+blockdim_i-1, N-1)) {
sh_cons_1_c0[jj-j0+4][i-i0] = cons_1[k*M*N + jj*N + i];
sh_cons_2_c0[jj-j0+4][i-i0] = cons_2[k*M*N + jj*N + i];
sh_cons_3_c0[jj-j0+4][i-i0] = cons_3[k*M*N + jj*N + i];
sh_cons_4_c0[jj-j0+4][i-i0] = cons_4[k*M*N + jj*N + i];
}
} else if(threadIdx.y < 12) {
int jj = (j-12);
if (jj <= M-1 & i <= min (i0+blockdim_i-1, N-1)) {
sh_q_1_c0[jj-j0+4][i-i0] = q_1[k*M*N + jj*N + i];
sh_q_2_c0[jj-j0+4][i-i0] = q_2[k*M*N + jj*N + i];
sh_q_4_c0[jj-j0+4][i-i0] = q_4[k*M*N + jj*N + i];
}
} else {
int jj = (j-12)+16;
if (jj <= M-1 & i <= min (i0+blockdim_i-1, N-1)) {
sh_q_1_c0[jj-j0+4][i-i0] = q_1[k*M*N + jj*N + i];
sh_q_2_c0[jj-j0+4][i-i0] = q_2[k*M*N + jj*N + i];
sh_q_4_c0[jj-j0+4][i-i0] = q_4[k*M*N + jj*N + i];
}
}
__syncthreads ();
double r0,r1,r2,r3,r4;
if ((j <= M-5) & i >= max (i0+4, 4) & i <= min (i0+blockdim_i-5, N-5)) {
r0=flux_0[k*M*N + j*N + i], r1=flux_1[k*M*N + j*N + i], r2=flux_2[k*M*N + j*N + i], r3=flux_3[k*M*N + j*N + i], r4 = flux_4[k*M*N + j*N + i];
// double r0=0.0f,r1=0.0f,r2=0.0f,r3=0.0f,r4=0.0f;
r0 -= (((((0.8f * (sh_cons_1_c0[j-j0+4][i-i0+1] - sh_cons_1_c0[j-j0+4][i-i0-1])) - (0.2f * (sh_cons_1_c0[j-j0+4][i-i0+2] - sh_cons_1_c0[j-j0+4][i-i0-2]))) + (0.038f * (sh_cons_1_c0[j-j0+4][i-i0+3] - sh_cons_1_c0[j-j0+4][i-i0-3]))) - (0.0035f * (sh_cons_1_c0[j-j0+4][i-i0+4] - sh_cons_1_c0[j-j0+4][i-i0-4]))) * dxinv0);
r0 -= (((((0.8f * (sh_cons_2_c0[j-j0+4+1][i-i0] - sh_cons_2_c0[j-j0+4-1][i-i0])) - (0.2f * (sh_cons_2_c0[j-j0+4+2][i-i0] - sh_cons_2_c0[j-j0+4-2][i-i0]))) + (0.038f * (sh_cons_2_c0[j-j0+4+3][i-i0] - sh_cons_2_c0[j-j0+4-3][i-i0]))) - (0.0035f * (sh_cons_2_c0[j-j0+4+4][i-i0] - sh_cons_2_c0[j-j0+4-4][i-i0]))) * dxinv1);
r0 -= (((((0.8f * (reg_cons_3_p1 - reg_cons_3_m1)) - (0.2f * (reg_cons_3_p2 - reg_cons_3_m2))) + (0.038f * (reg_cons_3_p3 - reg_cons_3_m3))) - (0.0035f * (reg_cons_3_p4 - reg_cons_3_m4))) * dxinv2);
flux_0[k*M*N + j*N + i] = r0;
r1 -= (((((0.8f * (((sh_cons_1_c0[j-j0+4][i-i0+1] * sh_q_1_c0[j-j0+4][i-i0+1]) - (sh_cons_1_c0[j-j0+4][i-i0-1] * sh_q_1_c0[j-j0+4][i-i0-1])) + (sh_q_4_c0[j-j0+4][i-i0+1] - sh_q_4_c0[j-j0+4][i-i0-1]))) - (0.2f * (((sh_cons_1_c0[j-j0+4][i-i0+2] * sh_q_1_c0[j-j0+4][i-i0+2]) - (sh_cons_1_c0[j-j0+4][i-i0-2] * sh_q_1_c0[j-j0+4][i-i0-2])) + (sh_q_4_c0[j-j0+4][i-i0+2] - sh_q_4_c0[j-j0+4][i-i0-2])))) + (0.038f * (((sh_cons_1_c0[j-j0+4][i-i0+3] * sh_q_1_c0[j-j0+4][i-i0+3]) - (sh_cons_1_c0[j-j0+4][i-i0-3] * sh_q_1_c0[j-j0+4][i-i0-3])) + (sh_q_4_c0[j-j0+4][i-i0+3] - sh_q_4_c0[j-j0+4][i-i0-3])))) - (0.0035f * (((sh_cons_1_c0[j-j0+4][i-i0+4] * sh_q_1_c0[j-j0+4][i-i0+4]) - (sh_cons_1_c0[j-j0+4][i-i0-4] * sh_q_1_c0[j-j0+4][i-i0-4])) + (sh_q_4_c0[j-j0+4][i-i0+4] - sh_q_4_c0[j-j0+4][i-i0-4])))) * dxinv0);
r1 -= (((((0.8f * ((sh_cons_1_c0[j-j0+4+1][i-i0] * sh_q_2_c0[j-j0+4+1][i-i0]) - (sh_cons_1_c0[j-j0+4-1][i-i0] * sh_q_2_c0[j-j0+4-1][i-i0]))) - (0.2f * ((sh_cons_1_c0[j-j0+4+2][i-i0] * sh_q_2_c0[j-j0+4+2][i-i0]) - (sh_cons_1_c0[j-j0+4-2][i-i0] * sh_q_2_c0[j-j0+4-2][i-i0])))) + (0.038f * ((sh_cons_1_c0[j-j0+4+3][i-i0] * sh_q_2_c0[j-j0+4+3][i-i0]) - (sh_cons_1_c0[j-j0+4-3][i-i0] * sh_q_2_c0[j-j0+4-3][i-i0])))) - (0.0035f * ((sh_cons_1_c0[j-j0+4+4][i-i0] * sh_q_2_c0[j-j0+4+4][i-i0]) - (sh_cons_1_c0[j-j0+4-4][i-i0] * sh_q_2_c0[j-j0+4-4][i-i0])))) * dxinv1);
r1 -= (((((0.8f * ((reg_cons_1_p1 * reg_q_3_p1) - (reg_cons_1_m1 * reg_q_3_m1))) - (0.2f * ((reg_cons_1_p2 * reg_q_3_p2) - (reg_cons_1_m2 * reg_q_3_m2)))) + (0.038f * ((reg_cons_1_p3 * reg_q_3_p3) - (reg_cons_1_m3 * reg_q_3_m3)))) - (0.0035f * ((reg_cons_1_p4 * reg_q_3_p4) - (reg_cons_1_m4 * reg_q_3_m4)))) * dxinv2);
flux_1[k*M*N + j*N + i] = r1;
r2 -= (((((0.8f * ((sh_cons_2_c0[j-j0+4][i-i0+1] * sh_q_1_c0[j-j0+4][i-i0+1]) - (sh_cons_2_c0[j-j0+4][i-i0-1] * sh_q_1_c0[j-j0+4][i-i0-1]))) - (0.2f * ((sh_cons_2_c0[j-j0+4][i-i0+2] * sh_q_1_c0[j-j0+4][i-i0+2]) - (sh_cons_2_c0[j-j0+4][i-i0-2] * sh_q_1_c0[j-j0+4][i-i0-2])))) + (0.038f * ((sh_cons_2_c0[j-j0+4][i-i0+3] * sh_q_1_c0[j-j0+4][i-i0+3]) - (sh_cons_2_c0[j-j0+4][i-i0-3] * sh_q_1_c0[j-j0+4][i-i0-3])))) - (0.0035f * ((sh_cons_2_c0[j-j0+4][i-i0+4] * sh_q_1_c0[j-j0+4][i-i0+4]) - (sh_cons_2_c0[j-j0+4][i-i0-4] * sh_q_1_c0[j-j0+4][i-i0-4])))) * dxinv0);
r2 -= (((((0.8f * (((sh_cons_2_c0[j-j0+4+1][i-i0] * sh_q_2_c0[j-j0+4+1][i-i0]) - (sh_cons_2_c0[j-j0+4-1][i-i0] * sh_q_2_c0[j-j0+4-1][i-i0])) + (sh_q_4_c0[j-j0+4+1][i-i0] - sh_q_4_c0[j-j0+4-1][i-i0]))) - (0.2f * (((sh_cons_2_c0[j-j0+4+2][i-i0] * sh_q_2_c0[j-j0+4+2][i-i0]) - (sh_cons_2_c0[j-j0+4-2][i-i0] * sh_q_2_c0[j-j0+4-2][i-i0])) + (sh_q_4_c0[j-j0+4+2][i-i0] - sh_q_4_c0[j-j0+4-2][i-i0])))) + (0.038f * (((sh_cons_2_c0[j-j0+4+3][i-i0] * sh_q_2_c0[j-j0+4+3][i-i0]) - (sh_cons_2_c0[j-j0+4-3][i-i0] * sh_q_2_c0[j-j0+4-3][i-i0])) + (sh_q_4_c0[j-j0+4+3][i-i0] - sh_q_4_c0[j-j0+4-3][i-i0])))) - (0.0035f * (((sh_cons_2_c0[j-j0+4+4][i-i0] * sh_q_2_c0[j-j0+4+4][i-i0]) - (sh_cons_2_c0[j-j0+4-4][i-i0] * sh_q_2_c0[j-j0+4-4][i-i0])) + (sh_q_4_c0[j-j0+4+4][i-i0] - sh_q_4_c0[j-j0+4-4][i-i0])))) * dxinv1);
r2 -= (((((0.8f * ((reg_cons_2_p1 * reg_q_3_p1) - (reg_cons_2_m1 * reg_q_3_m1))) - (0.2f * ((reg_cons_2_p2 * reg_q_3_p2) - (reg_cons_2_m2 * reg_q_3_m2)))) + (0.038f * ((reg_cons_2_p3 * reg_q_3_p3) - (reg_cons_2_m3 * reg_q_3_m3)))) - (0.0035f * ((reg_cons_2_p4 * reg_q_3_p4) - (reg_cons_2_m4 * reg_q_3_m4)))) * dxinv2);
flux_2[k*M*N + j*N + i] = r2;
r3 -= (((((0.8f * ((sh_cons_3_c0[j-j0+4][i-i0+1] * sh_q_1_c0[j-j0+4][i-i0+1]) - (sh_cons_3_c0[j-j0+4][i-i0-1] * sh_q_1_c0[j-j0+4][i-i0-1]))) - (0.2f * ((sh_cons_3_c0[j-j0+4][i-i0+2] * sh_q_1_c0[j-j0+4][i-i0+2]) - (sh_cons_3_c0[j-j0+4][i-i0-2] * sh_q_1_c0[j-j0+4][i-i0-2])))) + (0.038f * ((sh_cons_3_c0[j-j0+4][i-i0+3] * sh_q_1_c0[j-j0+4][i-i0+3]) - (sh_cons_3_c0[j-j0+4][i-i0-3] * sh_q_1_c0[j-j0+4][i-i0-3])))) - (0.0035f * ((sh_cons_3_c0[j-j0+4][i-i0+4] * sh_q_1_c0[j-j0+4][i-i0+4]) - (sh_cons_3_c0[j-j0+4][i-i0-4] * sh_q_1_c0[j-j0+4][i-i0-4])))) * dxinv0);
r3 -= (((((0.8f * ((sh_cons_3_c0[j-j0+4+1][i-i0] * sh_q_2_c0[j-j0+4+1][i-i0]) - (sh_cons_3_c0[j-j0+4-1][i-i0] * sh_q_2_c0[j-j0+4-1][i-i0]))) - (0.2f * ((sh_cons_3_c0[j-j0+4+2][i-i0] * sh_q_2_c0[j-j0+4+2][i-i0]) - (sh_cons_3_c0[j-j0+4-2][i-i0] * sh_q_2_c0[j-j0+4-2][i-i0])))) + (0.038f * ((sh_cons_3_c0[j-j0+4+3][i-i0] * sh_q_2_c0[j-j0+4+3][i-i0]) - (sh_cons_3_c0[j-j0+4-3][i-i0] * sh_q_2_c0[j-j0+4-3][i-i0])))) - (0.0035f * ((sh_cons_3_c0[j-j0+4+4][i-i0] * sh_q_2_c0[j-j0+4+4][i-i0]) - (sh_cons_3_c0[j-j0+4-4][i-i0] * sh_q_2_c0[j-j0+4-4][i-i0])))) * dxinv1);
r3 -= (((((0.8f * (((reg_cons_3_p1 * reg_q_3_p1) - (reg_cons_3_m1 * reg_q_3_m1)) + (reg_q_4_p1 - reg_q_4_m1))) - (0.2f * (((reg_cons_3_p2 * reg_q_3_p2) - (reg_cons_3_m2 * reg_q_3_m2)) + (reg_q_4_p2 - reg_q_4_m2)))) + (0.038f * (((reg_cons_3_p3 * reg_q_3_p3) - (reg_cons_3_m3 * reg_q_3_m3)) + (reg_q_4_p3 - reg_q_4_m3)))) - (0.0035f * (((reg_cons_3_p4 * reg_q_3_p4) - (reg_cons_3_m4 * reg_q_3_m4)) + (reg_q_4_p4 - reg_q_4_m4)))) * dxinv2);
flux_3[k*M*N + j*N + i] = r3;
r4 -= (((((0.8f * (((sh_cons_4_c0[j-j0+4][i-i0+1] * sh_q_1_c0[j-j0+4][i-i0+1]) - (sh_cons_4_c0[j-j0+4][i-i0-1] * sh_q_1_c0[j-j0+4][i-i0-1])) + ((sh_q_4_c0[j-j0+4][i-i0+1] * sh_q_1_c0[j-j0+4][i-i0+1]) - (sh_q_4_c0[j-j0+4][i-i0-1] * sh_q_1_c0[j-j0+4][i-i0-1])))) - (0.2f * (((sh_cons_4_c0[j-j0+4][i-i0+2] * sh_q_1_c0[j-j0+4][i-i0+2]) - (sh_cons_4_c0[j-j0+4][i-i0-2] * sh_q_1_c0[j-j0+4][i-i0-2])) + ((sh_q_4_c0[j-j0+4][i-i0+2] * sh_q_1_c0[j-j0+4][i-i0+2]) - (sh_q_4_c0[j-j0+4][i-i0-2] * sh_q_1_c0[j-j0+4][i-i0-2]))))) + (0.038f * (((sh_cons_4_c0[j-j0+4][i-i0+3] * sh_q_1_c0[j-j0+4][i-i0+3]) - (sh_cons_4_c0[j-j0+4][i-i0-3] * sh_q_1_c0[j-j0+4][i-i0-3])) + ((sh_q_4_c0[j-j0+4][i-i0+3] * sh_q_1_c0[j-j0+4][i-i0+3]) - (sh_q_4_c0[j-j0+4][i-i0-3] * sh_q_1_c0[j-j0+4][i-i0-3]))))) - (0.0035f * (((sh_cons_4_c0[j-j0+4][i-i0+4] * sh_q_1_c0[j-j0+4][i-i0+4]) - (sh_cons_4_c0[j-j0+4][i-i0-4] * sh_q_1_c0[j-j0+4][i-i0-4])) + ((sh_q_4_c0[j-j0+4][i-i0+4] * sh_q_1_c0[j-j0+4][i-i0+4]) - (sh_q_4_c0[j-j0+4][i-i0-4] * sh_q_1_c0[j-j0+4][i-i0-4]))))) * dxinv0);
r4 -= (((((0.8f * (((sh_cons_4_c0[j-j0+4+1][i-i0] * sh_q_2_c0[j-j0+4+1][i-i0]) - (sh_cons_4_c0[j-j0+4-1][i-i0] * sh_q_2_c0[j-j0+4-1][i-i0])) + ((sh_q_4_c0[j-j0+4+1][i-i0] * sh_q_2_c0[j-j0+4+1][i-i0]) - (sh_q_4_c0[j-j0+4-1][i-i0] * sh_q_2_c0[j-j0+4-1][i-i0])))) - (0.2f * (((sh_cons_4_c0[j-j0+4+2][i-i0] * sh_q_2_c0[j-j0+4+2][i-i0]) - (sh_cons_4_c0[j-j0+4-2][i-i0] * sh_q_2_c0[j-j0+4-2][i-i0])) + ((sh_q_4_c0[j-j0+4+2][i-i0] * sh_q_2_c0[j-j0+4+2][i-i0]) - (sh_q_4_c0[j-j0+4-2][i-i0] * sh_q_2_c0[j-j0+4-2][i-i0]))))) + (0.038f * (((sh_cons_4_c0[j-j0+4+3][i-i0] * sh_q_2_c0[j-j0+4+3][i-i0]) - (sh_cons_4_c0[j-j0+4-3][i-i0] * sh_q_2_c0[j-j0+4-3][i-i0])) + ((sh_q_4_c0[j-j0+4+3][i-i0] * sh_q_2_c0[j-j0+4+3][i-i0]) - (sh_q_4_c0[j-j0+4-3][i-i0] * sh_q_2_c0[j-j0+4-3][i-i0]))))) - (0.0035f * (((sh_cons_4_c0[j-j0+4+4][i-i0] * sh_q_2_c0[j-j0+4+4][i-i0]) - (sh_cons_4_c0[j-j0+4-4][i-i0] * sh_q_2_c0[j-j0+4-4][i-i0])) + ((sh_q_4_c0[j-j0+4+4][i-i0] * sh_q_2_c0[j-j0+4+4][i-i0]) - (sh_q_4_c0[j-j0+4-4][i-i0] * sh_q_2_c0[j-j0+4-4][i-i0]))))) * dxinv1);
r4 -= (((((0.8f * (((reg_cons_4_p1 * reg_q_3_p1) - (reg_cons_4_m1 * reg_q_3_m1)) + ((reg_q_4_p1 * reg_q_3_p1) - (reg_q_4_m1 * reg_q_3_m1)))) - (0.2f * (((reg_cons_4_p2 * reg_q_3_p2) - (reg_cons_4_m2 * reg_q_3_m2)) + ((reg_q_4_p2 * reg_q_3_p2) - (reg_q_4_m2 * reg_q_3_m2))))) + (0.038f * (((reg_cons_4_p3 * reg_q_3_p3) - (reg_cons_4_m3 * reg_q_3_m3)) + ((reg_q_4_p3 * reg_q_3_p3) - (reg_q_4_m3 * reg_q_3_m3))))) - (0.0035f * (((reg_cons_4_p4 * reg_q_3_p4) - (reg_cons_4_m4 * reg_q_3_m4)) + ((reg_q_4_p4 * reg_q_3_p4) - (reg_q_4_m4 * reg_q_3_m4))))) * dxinv2);
flux_4[k*M*N + j*N + i] = r4;
}
__syncthreads ();
//Value rotation
if (j <= M-1 & i <= min (i0+blockdim_i-1, N-1)) {
reg_cons_1_m4 = reg_cons_1_m3;
reg_cons_1_m3 = reg_cons_1_m2;
reg_cons_1_m2 = reg_cons_1_m1;
reg_cons_1_m1 = sh_cons_1_c0[j-j0+4][i-i0];
sh_cons_1_c0[j-j0+4][i-i0] = reg_cons_1_p1;
reg_cons_1_p1 = reg_cons_1_p2;
reg_cons_1_p2 = reg_cons_1_p3;
reg_cons_1_p3 = reg_cons_1_p4;
reg_cons_2_m4 = reg_cons_2_m3;
reg_cons_2_m3 = reg_cons_2_m2;
reg_cons_2_m2 = reg_cons_2_m1;
reg_cons_2_m1 = sh_cons_2_c0[j-j0+4][i-i0];
sh_cons_2_c0[j-j0+4][i-i0] = reg_cons_2_p1;
reg_cons_2_p1 = reg_cons_2_p2;
reg_cons_2_p2 = reg_cons_2_p3;
reg_cons_2_p3 = reg_cons_2_p4;
reg_cons_3_m4 = reg_cons_3_m3;
reg_cons_3_m3 = reg_cons_3_m2;
reg_cons_3_m2 = reg_cons_3_m1;
reg_cons_3_m1 = sh_cons_3_c0[j-j0+4][i-i0];
sh_cons_3_c0[j-j0+4][i-i0] = reg_cons_3_p1;
reg_cons_3_p1 = reg_cons_3_p2;
reg_cons_3_p2 = reg_cons_3_p3;
reg_cons_3_p3 = reg_cons_3_p4;
reg_cons_4_m4 = reg_cons_4_m3;
reg_cons_4_m3 = reg_cons_4_m2;
reg_cons_4_m2 = reg_cons_4_m1;
reg_cons_4_m1 = sh_cons_4_c0[j-j0+4][i-i0];
sh_cons_4_c0[j-j0+4][i-i0] = reg_cons_4_p1;
reg_cons_4_p1 = reg_cons_4_p2;
reg_cons_4_p2 = reg_cons_4_p3;
reg_cons_4_p3 = reg_cons_4_p4;
reg_q_3_m4 = reg_q_3_m3;
reg_q_3_m3 = reg_q_3_m2;
reg_q_3_m2 = reg_q_3_m1;
reg_q_3_m1 = reg_q_3_c0;
reg_q_3_c0 = reg_q_3_p1;
reg_q_3_p1 = reg_q_3_p2;
reg_q_3_p2 = reg_q_3_p3;
reg_q_3_p3 = reg_q_3_p4;
reg_q_4_m4 = reg_q_4_m3;
reg_q_4_m3 = reg_q_4_m2;
reg_q_4_m2 = reg_q_4_m1;
reg_q_4_m1 = sh_q_4_c0[j-j0+4][i-i0];
sh_q_4_c0[j-j0+4][i-i0] = reg_q_4_p1;
reg_q_4_p1 = reg_q_4_p2;
reg_q_4_p2 = reg_q_4_p3;
reg_q_4_p3 = reg_q_4_p4;
}
}
}
extern "C" void host_code (double *h_flux_0, double *h_flux_1, double *h_flux_2, double *h_flux_3, double *h_flux_4, double *h_cons_1, double *h_cons_2, double *h_cons_3, double *h_cons_4, double *h_q_1, double *h_q_2, double *h_q_3, double *h_q_4, double dxinv0, double dxinv1, double dxinv2, int L, int M, int N) {
double *flux_0;
hipMalloc (&flux_0, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for flux_0\n");
hipMemcpy (flux_0, h_flux_0, sizeof(double)*L*M*N, hipMemcpyHostToDevice);
double *flux_1;
hipMalloc (&flux_1, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for flux_1\n");
hipMemcpy (flux_1, h_flux_1, sizeof(double)*L*M*N, hipMemcpyHostToDevice);
double *flux_2;
hipMalloc (&flux_2, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for flux_2\n");
hipMemcpy (flux_2, h_flux_2, sizeof(double)*L*M*N, hipMemcpyHostToDevice);
double *flux_3;
hipMalloc (&flux_3, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for flux_3\n");
hipMemcpy (flux_3, h_flux_3, sizeof(double)*L*M*N, hipMemcpyHostToDevice);
double *flux_4;
hipMalloc (&flux_4, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for flux_4\n");
hipMemcpy (flux_4, h_flux_4, sizeof(double)*L*M*N, hipMemcpyHostToDevice);
double *cons_1;
hipMalloc (&cons_1, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for cons_1\n");
hipMemcpy (cons_1, h_cons_1, sizeof(double)*L*M*N, hipMemcpyHostToDevice);
double *cons_2;
hipMalloc (&cons_2, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for cons_2\n");
hipMemcpy (cons_2, h_cons_2, sizeof(double)*L*M*N, hipMemcpyHostToDevice);
double *cons_3;
hipMalloc (&cons_3, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for cons_3\n");
hipMemcpy (cons_3, h_cons_3, sizeof(double)*L*M*N, hipMemcpyHostToDevice);
double *cons_4;
hipMalloc (&cons_4, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for cons_4\n");
hipMemcpy (cons_4, h_cons_4, sizeof(double)*L*M*N, hipMemcpyHostToDevice);
double *q_1;
hipMalloc (&q_1, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for q_1\n");
hipMemcpy (q_1, h_q_1, sizeof(double)*L*M*N, hipMemcpyHostToDevice);
double *q_2;
hipMalloc (&q_2, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for q_2\n");
hipMemcpy (q_2, h_q_2, sizeof(double)*L*M*N, hipMemcpyHostToDevice);
double *q_3;
hipMalloc (&q_3, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for q_3\n");
hipMemcpy (q_3, h_q_3, sizeof(double)*L*M*N, hipMemcpyHostToDevice);
double *q_4;
hipMalloc (&q_4, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for q_4\n");
hipMemcpy (q_4, h_q_4, sizeof(double)*L*M*N, hipMemcpyHostToDevice);
dim3 blockconfig_1 (16, 16, 1);
dim3 gridconfig_1 (ceil(N, blockconfig_1.x-8), ceil(M-8, blockconfig_1.y), 1);
hipLaunchKernelGGL(( hypterm) , dim3(gridconfig_1), dim3(blockconfig_1), 0, 0, flux_0, flux_1, flux_2, flux_3, flux_4, cons_1, cons_2, cons_3, cons_4, q_1, q_2, q_3, q_4, dxinv0, dxinv1, dxinv2, L, M, N);
hipMemcpy (h_flux_0, flux_0, sizeof(double)*L*M*N, hipMemcpyDeviceToHost);
hipMemcpy (h_flux_1, flux_1, sizeof(double)*L*M*N, hipMemcpyDeviceToHost);
hipMemcpy (h_flux_2, flux_2, sizeof(double)*L*M*N, hipMemcpyDeviceToHost);
hipMemcpy (h_flux_3, flux_3, sizeof(double)*L*M*N, hipMemcpyDeviceToHost);
hipMemcpy (h_flux_4, flux_4, sizeof(double)*L*M*N, hipMemcpyDeviceToHost);
//Free allocated memory
hipFree (flux_0);
hipFree (flux_1);
hipFree (flux_2);
hipFree (flux_3);
hipFree (flux_4);
hipFree (cons_1);
hipFree (cons_2);
hipFree (cons_3);
hipFree (cons_4);
hipFree (q_1);
hipFree (q_2);
hipFree (q_3);
hipFree (q_4);
}
| d45d1ad3b8712bd6a23d9f71039cdf8f71a643c0.cu | #include <stdio.h>
#include "cuda.h"
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
#define ceil(a,b) ((a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1)
void check_error (const char* message) {
cudaError_t error = cudaGetLastError ();
if (error != cudaSuccess) {
printf ("CUDA error : %s, %s\n", message, cudaGetErrorString (error));
exit(-1);
}
}
__global__ void hypterm (double * flux_0, double * flux_1, double * flux_2, double * flux_3, double * flux_4, double * cons_1, double * cons_2, double * cons_3, double * cons_4, double * q_1, double * q_2, double * q_3, double * q_4, double dxinv0, double dxinv1, double dxinv2, int L, int M, int N) {
//Determining the block's indices
int blockdim_i= (int)(blockDim.x);
int i0 = (int)(blockIdx.x)*(blockdim_i-8);
int i = i0 + (int)(threadIdx.x);
int blockdim_j= (int)(blockDim.y);
int j0 = (int)(blockIdx.y)*(blockdim_j)+4;
int j = j0 + (int)(threadIdx.y);
//Declarations
double reg_cons_1_m4=0, reg_cons_1_m3=0, reg_cons_1_m2=0, reg_cons_1_m1=0, __shared__ sh_cons_1_c0[24][16], reg_cons_1_p1=0, reg_cons_1_p2=0, reg_cons_1_p3=0, reg_cons_1_p4=0;
double reg_cons_2_m4=0, reg_cons_2_m3=0, reg_cons_2_m2=0, reg_cons_2_m1=0, __shared__ sh_cons_2_c0[24][16], reg_cons_2_p1=0, reg_cons_2_p2=0, reg_cons_2_p3=0, reg_cons_2_p4=0;
double reg_cons_3_m4=0, reg_cons_3_m3=0, reg_cons_3_m2=0, reg_cons_3_m1=0, __shared__ sh_cons_3_c0[24][16], reg_cons_3_p1=0, reg_cons_3_p2=0, reg_cons_3_p3=0, reg_cons_3_p4=0;
double reg_cons_4_m4=0, reg_cons_4_m3=0, reg_cons_4_m2=0, reg_cons_4_m1=0, __shared__ sh_cons_4_c0[24][16], reg_cons_4_p1=0, reg_cons_4_p2=0, reg_cons_4_p3=0, reg_cons_4_p4=0;
double __shared__ sh_q_1_c0[24][16];
double __shared__ sh_q_2_c0[24][16];
double reg_q_3_m4=0, reg_q_3_m3=0, reg_q_3_m2=0, reg_q_3_m1=0, reg_q_3_c0=0, reg_q_3_p1=0, reg_q_3_p2=0, reg_q_3_p3=0, reg_q_3_p4=0;
double reg_q_4_m4=0, reg_q_4_m3=0, reg_q_4_m2=0, reg_q_4_m1=0, __shared__ sh_q_4_c0[24][16], reg_q_4_p1=0, reg_q_4_p2=0, reg_q_4_p3=0, reg_q_4_p4=0;
//Value Initialization
if (j <= M-1 & i <= min (i0+blockdim_i-1, N-1)) {
reg_cons_1_m4 = cons_1[0 + j*N + i];
reg_cons_1_m3 = cons_1[1*M*N + j*N + i];
reg_cons_1_m2 = cons_1[2*M*N + j*N + i];
reg_cons_1_m1 = cons_1[3*M*N + j*N + i];
sh_cons_1_c0[j-j0+4][i-i0] = cons_1[4*M*N + j*N + i];
reg_cons_1_p1 = cons_1[5*M*N + j*N + i];
reg_cons_1_p2 = cons_1[6*M*N + j*N + i];
reg_cons_1_p3 = cons_1[7*M*N + j*N + i];
reg_cons_2_m4 = cons_2[0 + j*N + i];
reg_cons_2_m3 = cons_2[1*M*N + j*N + i];
reg_cons_2_m2 = cons_2[2*M*N + j*N + i];
reg_cons_2_m1 = cons_2[3*M*N + j*N + i];
sh_cons_2_c0[j-j0+4][i-i0] = cons_2[4*M*N + j*N + i];
reg_cons_2_p1 = cons_2[5*M*N + j*N + i];
reg_cons_2_p2 = cons_2[6*M*N + j*N + i];
reg_cons_2_p3 = cons_2[7*M*N + j*N + i];
reg_cons_3_m4 = cons_3[0 + j*N + i];
reg_cons_3_m3 = cons_3[1*M*N + j*N + i];
reg_cons_3_m2 = cons_3[2*M*N + j*N + i];
reg_cons_3_m1 = cons_3[3*M*N + j*N + i];
sh_cons_3_c0[j-j0+4][i-i0] = cons_3[4*M*N + j*N + i];
reg_cons_3_p1 = cons_3[5*M*N + j*N + i];
reg_cons_3_p2 = cons_3[6*M*N + j*N + i];
reg_cons_3_p3 = cons_3[7*M*N + j*N + i];
reg_cons_4_m4 = cons_4[0 + j*N + i];
reg_cons_4_m3 = cons_4[1*M*N + j*N + i];
reg_cons_4_m2 = cons_4[2*M*N + j*N + i];
reg_cons_4_m1 = cons_4[3*M*N + j*N + i];
sh_cons_4_c0[j-j0+4][i-i0] = cons_4[4*M*N + j*N + i];
reg_cons_4_p1 = cons_4[5*M*N + j*N + i];
reg_cons_4_p2 = cons_4[6*M*N + j*N + i];
reg_cons_4_p3 = cons_4[7*M*N + j*N + i];
reg_q_3_m4 = q_3[0 + j*N + i];
reg_q_3_m3 = q_3[1*M*N + j*N + i];
reg_q_3_m2 = q_3[2*M*N + j*N + i];
reg_q_3_m1 = q_3[3*M*N + j*N + i];
reg_q_3_c0 = q_3[4*M*N + j*N + i];
reg_q_3_p1 = q_3[5*M*N + j*N + i];
reg_q_3_p2 = q_3[6*M*N + j*N + i];
reg_q_3_p3 = q_3[7*M*N + j*N + i];
reg_q_4_m4 = q_4[0 + j*N + i];
reg_q_4_m3 = q_4[1*M*N + j*N + i];
reg_q_4_m2 = q_4[2*M*N + j*N + i];
reg_q_4_m1 = q_4[3*M*N + j*N + i];
sh_q_4_c0[j-j0+4][i-i0] = q_4[4*M*N + j*N + i];
reg_q_4_p1 = q_4[5*M*N + j*N + i];
reg_q_4_p2 = q_4[6*M*N + j*N + i];
reg_q_4_p3 = q_4[7*M*N + j*N + i];
}
if(threadIdx.y < 4) {
int jj = (j-4);
if (jj <= M-1 & i <= min (i0+blockdim_i-1, N-1)) {
sh_cons_1_c0[jj-j0+4][i-i0] = cons_1[4*M*N + jj*N + i];
sh_cons_2_c0[jj-j0+4][i-i0] = cons_2[4*M*N + jj*N + i];
sh_cons_3_c0[jj-j0+4][i-i0] = cons_3[4*M*N + jj*N + i];
sh_cons_4_c0[jj-j0+4][i-i0] = cons_4[4*M*N + jj*N + i];
sh_q_4_c0[jj-j0+4][i-i0] = q_4[4*M*N + jj*N + i];
}
} else if(threadIdx.y < 8) {
int jj = (j-4)+16;
if (jj <= M-1 & i <= min (i0+blockdim_i-1, N-1)) {
sh_cons_1_c0[jj-j0+4][i-i0] = cons_1[4*M*N + jj*N + i];
sh_cons_2_c0[jj-j0+4][i-i0] = cons_2[4*M*N + jj*N + i];
sh_cons_3_c0[jj-j0+4][i-i0] = cons_3[4*M*N + jj*N + i];
sh_cons_4_c0[jj-j0+4][i-i0] = cons_4[4*M*N + jj*N + i];
sh_q_4_c0[jj-j0+4][i-i0] = q_4[4*M*N + jj*N + i];
}
}
//Rest of the computation
for (int k=4; k<=L-5; ++k) {
//Fetch new plane
if (j <= M-1 & i <= min (i0+blockdim_i-1, N-1)) {
reg_cons_1_p4 = cons_1[(k+4)*M*N + j*N + i];
reg_cons_2_p4 = cons_2[(k+4)*M*N + j*N + i];
reg_cons_3_p4 = cons_3[(k+4)*M*N + j*N + i];
reg_cons_4_p4 = cons_4[(k+4)*M*N + j*N + i];
sh_q_1_c0[j-j0+4][i-i0] = q_1[k*M*N + j*N + i];
sh_q_2_c0[j-j0+4][i-i0] = q_2[k*M*N + j*N + i];
reg_q_3_p4 = q_3[(k+4)*M*N + j*N + i];
reg_q_4_p4 = q_4[(k+4)*M*N + j*N + i];
}
if(threadIdx.y < 4) {
int jj = (j-4);
if (jj <= M-1 & i <= min (i0+blockdim_i-1, N-1)) {
sh_cons_1_c0[jj-j0+4][i-i0] = cons_1[k*M*N + jj*N + i];
sh_cons_2_c0[jj-j0+4][i-i0] = cons_2[k*M*N + jj*N + i];
sh_cons_3_c0[jj-j0+4][i-i0] = cons_3[k*M*N + jj*N + i];
sh_cons_4_c0[jj-j0+4][i-i0] = cons_4[k*M*N + jj*N + i];
}
} else if(threadIdx.y < 8) {
int jj = (j-4)+16;
if (jj <= M-1 & i <= min (i0+blockdim_i-1, N-1)) {
sh_cons_1_c0[jj-j0+4][i-i0] = cons_1[k*M*N + jj*N + i];
sh_cons_2_c0[jj-j0+4][i-i0] = cons_2[k*M*N + jj*N + i];
sh_cons_3_c0[jj-j0+4][i-i0] = cons_3[k*M*N + jj*N + i];
sh_cons_4_c0[jj-j0+4][i-i0] = cons_4[k*M*N + jj*N + i];
}
} else if(threadIdx.y < 12) {
int jj = (j-12);
if (jj <= M-1 & i <= min (i0+blockdim_i-1, N-1)) {
sh_q_1_c0[jj-j0+4][i-i0] = q_1[k*M*N + jj*N + i];
sh_q_2_c0[jj-j0+4][i-i0] = q_2[k*M*N + jj*N + i];
sh_q_4_c0[jj-j0+4][i-i0] = q_4[k*M*N + jj*N + i];
}
} else {
int jj = (j-12)+16;
if (jj <= M-1 & i <= min (i0+blockdim_i-1, N-1)) {
sh_q_1_c0[jj-j0+4][i-i0] = q_1[k*M*N + jj*N + i];
sh_q_2_c0[jj-j0+4][i-i0] = q_2[k*M*N + jj*N + i];
sh_q_4_c0[jj-j0+4][i-i0] = q_4[k*M*N + jj*N + i];
}
}
__syncthreads ();
double r0,r1,r2,r3,r4;
if ((j <= M-5) & i >= max (i0+4, 4) & i <= min (i0+blockdim_i-5, N-5)) {
r0=flux_0[k*M*N + j*N + i], r1=flux_1[k*M*N + j*N + i], r2=flux_2[k*M*N + j*N + i], r3=flux_3[k*M*N + j*N + i], r4 = flux_4[k*M*N + j*N + i];
// double r0=0.0f,r1=0.0f,r2=0.0f,r3=0.0f,r4=0.0f;
r0 -= (((((0.8f * (sh_cons_1_c0[j-j0+4][i-i0+1] - sh_cons_1_c0[j-j0+4][i-i0-1])) - (0.2f * (sh_cons_1_c0[j-j0+4][i-i0+2] - sh_cons_1_c0[j-j0+4][i-i0-2]))) + (0.038f * (sh_cons_1_c0[j-j0+4][i-i0+3] - sh_cons_1_c0[j-j0+4][i-i0-3]))) - (0.0035f * (sh_cons_1_c0[j-j0+4][i-i0+4] - sh_cons_1_c0[j-j0+4][i-i0-4]))) * dxinv0);
r0 -= (((((0.8f * (sh_cons_2_c0[j-j0+4+1][i-i0] - sh_cons_2_c0[j-j0+4-1][i-i0])) - (0.2f * (sh_cons_2_c0[j-j0+4+2][i-i0] - sh_cons_2_c0[j-j0+4-2][i-i0]))) + (0.038f * (sh_cons_2_c0[j-j0+4+3][i-i0] - sh_cons_2_c0[j-j0+4-3][i-i0]))) - (0.0035f * (sh_cons_2_c0[j-j0+4+4][i-i0] - sh_cons_2_c0[j-j0+4-4][i-i0]))) * dxinv1);
r0 -= (((((0.8f * (reg_cons_3_p1 - reg_cons_3_m1)) - (0.2f * (reg_cons_3_p2 - reg_cons_3_m2))) + (0.038f * (reg_cons_3_p3 - reg_cons_3_m3))) - (0.0035f * (reg_cons_3_p4 - reg_cons_3_m4))) * dxinv2);
flux_0[k*M*N + j*N + i] = r0;
r1 -= (((((0.8f * (((sh_cons_1_c0[j-j0+4][i-i0+1] * sh_q_1_c0[j-j0+4][i-i0+1]) - (sh_cons_1_c0[j-j0+4][i-i0-1] * sh_q_1_c0[j-j0+4][i-i0-1])) + (sh_q_4_c0[j-j0+4][i-i0+1] - sh_q_4_c0[j-j0+4][i-i0-1]))) - (0.2f * (((sh_cons_1_c0[j-j0+4][i-i0+2] * sh_q_1_c0[j-j0+4][i-i0+2]) - (sh_cons_1_c0[j-j0+4][i-i0-2] * sh_q_1_c0[j-j0+4][i-i0-2])) + (sh_q_4_c0[j-j0+4][i-i0+2] - sh_q_4_c0[j-j0+4][i-i0-2])))) + (0.038f * (((sh_cons_1_c0[j-j0+4][i-i0+3] * sh_q_1_c0[j-j0+4][i-i0+3]) - (sh_cons_1_c0[j-j0+4][i-i0-3] * sh_q_1_c0[j-j0+4][i-i0-3])) + (sh_q_4_c0[j-j0+4][i-i0+3] - sh_q_4_c0[j-j0+4][i-i0-3])))) - (0.0035f * (((sh_cons_1_c0[j-j0+4][i-i0+4] * sh_q_1_c0[j-j0+4][i-i0+4]) - (sh_cons_1_c0[j-j0+4][i-i0-4] * sh_q_1_c0[j-j0+4][i-i0-4])) + (sh_q_4_c0[j-j0+4][i-i0+4] - sh_q_4_c0[j-j0+4][i-i0-4])))) * dxinv0);
r1 -= (((((0.8f * ((sh_cons_1_c0[j-j0+4+1][i-i0] * sh_q_2_c0[j-j0+4+1][i-i0]) - (sh_cons_1_c0[j-j0+4-1][i-i0] * sh_q_2_c0[j-j0+4-1][i-i0]))) - (0.2f * ((sh_cons_1_c0[j-j0+4+2][i-i0] * sh_q_2_c0[j-j0+4+2][i-i0]) - (sh_cons_1_c0[j-j0+4-2][i-i0] * sh_q_2_c0[j-j0+4-2][i-i0])))) + (0.038f * ((sh_cons_1_c0[j-j0+4+3][i-i0] * sh_q_2_c0[j-j0+4+3][i-i0]) - (sh_cons_1_c0[j-j0+4-3][i-i0] * sh_q_2_c0[j-j0+4-3][i-i0])))) - (0.0035f * ((sh_cons_1_c0[j-j0+4+4][i-i0] * sh_q_2_c0[j-j0+4+4][i-i0]) - (sh_cons_1_c0[j-j0+4-4][i-i0] * sh_q_2_c0[j-j0+4-4][i-i0])))) * dxinv1);
r1 -= (((((0.8f * ((reg_cons_1_p1 * reg_q_3_p1) - (reg_cons_1_m1 * reg_q_3_m1))) - (0.2f * ((reg_cons_1_p2 * reg_q_3_p2) - (reg_cons_1_m2 * reg_q_3_m2)))) + (0.038f * ((reg_cons_1_p3 * reg_q_3_p3) - (reg_cons_1_m3 * reg_q_3_m3)))) - (0.0035f * ((reg_cons_1_p4 * reg_q_3_p4) - (reg_cons_1_m4 * reg_q_3_m4)))) * dxinv2);
flux_1[k*M*N + j*N + i] = r1;
r2 -= (((((0.8f * ((sh_cons_2_c0[j-j0+4][i-i0+1] * sh_q_1_c0[j-j0+4][i-i0+1]) - (sh_cons_2_c0[j-j0+4][i-i0-1] * sh_q_1_c0[j-j0+4][i-i0-1]))) - (0.2f * ((sh_cons_2_c0[j-j0+4][i-i0+2] * sh_q_1_c0[j-j0+4][i-i0+2]) - (sh_cons_2_c0[j-j0+4][i-i0-2] * sh_q_1_c0[j-j0+4][i-i0-2])))) + (0.038f * ((sh_cons_2_c0[j-j0+4][i-i0+3] * sh_q_1_c0[j-j0+4][i-i0+3]) - (sh_cons_2_c0[j-j0+4][i-i0-3] * sh_q_1_c0[j-j0+4][i-i0-3])))) - (0.0035f * ((sh_cons_2_c0[j-j0+4][i-i0+4] * sh_q_1_c0[j-j0+4][i-i0+4]) - (sh_cons_2_c0[j-j0+4][i-i0-4] * sh_q_1_c0[j-j0+4][i-i0-4])))) * dxinv0);
r2 -= (((((0.8f * (((sh_cons_2_c0[j-j0+4+1][i-i0] * sh_q_2_c0[j-j0+4+1][i-i0]) - (sh_cons_2_c0[j-j0+4-1][i-i0] * sh_q_2_c0[j-j0+4-1][i-i0])) + (sh_q_4_c0[j-j0+4+1][i-i0] - sh_q_4_c0[j-j0+4-1][i-i0]))) - (0.2f * (((sh_cons_2_c0[j-j0+4+2][i-i0] * sh_q_2_c0[j-j0+4+2][i-i0]) - (sh_cons_2_c0[j-j0+4-2][i-i0] * sh_q_2_c0[j-j0+4-2][i-i0])) + (sh_q_4_c0[j-j0+4+2][i-i0] - sh_q_4_c0[j-j0+4-2][i-i0])))) + (0.038f * (((sh_cons_2_c0[j-j0+4+3][i-i0] * sh_q_2_c0[j-j0+4+3][i-i0]) - (sh_cons_2_c0[j-j0+4-3][i-i0] * sh_q_2_c0[j-j0+4-3][i-i0])) + (sh_q_4_c0[j-j0+4+3][i-i0] - sh_q_4_c0[j-j0+4-3][i-i0])))) - (0.0035f * (((sh_cons_2_c0[j-j0+4+4][i-i0] * sh_q_2_c0[j-j0+4+4][i-i0]) - (sh_cons_2_c0[j-j0+4-4][i-i0] * sh_q_2_c0[j-j0+4-4][i-i0])) + (sh_q_4_c0[j-j0+4+4][i-i0] - sh_q_4_c0[j-j0+4-4][i-i0])))) * dxinv1);
r2 -= (((((0.8f * ((reg_cons_2_p1 * reg_q_3_p1) - (reg_cons_2_m1 * reg_q_3_m1))) - (0.2f * ((reg_cons_2_p2 * reg_q_3_p2) - (reg_cons_2_m2 * reg_q_3_m2)))) + (0.038f * ((reg_cons_2_p3 * reg_q_3_p3) - (reg_cons_2_m3 * reg_q_3_m3)))) - (0.0035f * ((reg_cons_2_p4 * reg_q_3_p4) - (reg_cons_2_m4 * reg_q_3_m4)))) * dxinv2);
flux_2[k*M*N + j*N + i] = r2;
r3 -= (((((0.8f * ((sh_cons_3_c0[j-j0+4][i-i0+1] * sh_q_1_c0[j-j0+4][i-i0+1]) - (sh_cons_3_c0[j-j0+4][i-i0-1] * sh_q_1_c0[j-j0+4][i-i0-1]))) - (0.2f * ((sh_cons_3_c0[j-j0+4][i-i0+2] * sh_q_1_c0[j-j0+4][i-i0+2]) - (sh_cons_3_c0[j-j0+4][i-i0-2] * sh_q_1_c0[j-j0+4][i-i0-2])))) + (0.038f * ((sh_cons_3_c0[j-j0+4][i-i0+3] * sh_q_1_c0[j-j0+4][i-i0+3]) - (sh_cons_3_c0[j-j0+4][i-i0-3] * sh_q_1_c0[j-j0+4][i-i0-3])))) - (0.0035f * ((sh_cons_3_c0[j-j0+4][i-i0+4] * sh_q_1_c0[j-j0+4][i-i0+4]) - (sh_cons_3_c0[j-j0+4][i-i0-4] * sh_q_1_c0[j-j0+4][i-i0-4])))) * dxinv0);
r3 -= (((((0.8f * ((sh_cons_3_c0[j-j0+4+1][i-i0] * sh_q_2_c0[j-j0+4+1][i-i0]) - (sh_cons_3_c0[j-j0+4-1][i-i0] * sh_q_2_c0[j-j0+4-1][i-i0]))) - (0.2f * ((sh_cons_3_c0[j-j0+4+2][i-i0] * sh_q_2_c0[j-j0+4+2][i-i0]) - (sh_cons_3_c0[j-j0+4-2][i-i0] * sh_q_2_c0[j-j0+4-2][i-i0])))) + (0.038f * ((sh_cons_3_c0[j-j0+4+3][i-i0] * sh_q_2_c0[j-j0+4+3][i-i0]) - (sh_cons_3_c0[j-j0+4-3][i-i0] * sh_q_2_c0[j-j0+4-3][i-i0])))) - (0.0035f * ((sh_cons_3_c0[j-j0+4+4][i-i0] * sh_q_2_c0[j-j0+4+4][i-i0]) - (sh_cons_3_c0[j-j0+4-4][i-i0] * sh_q_2_c0[j-j0+4-4][i-i0])))) * dxinv1);
r3 -= (((((0.8f * (((reg_cons_3_p1 * reg_q_3_p1) - (reg_cons_3_m1 * reg_q_3_m1)) + (reg_q_4_p1 - reg_q_4_m1))) - (0.2f * (((reg_cons_3_p2 * reg_q_3_p2) - (reg_cons_3_m2 * reg_q_3_m2)) + (reg_q_4_p2 - reg_q_4_m2)))) + (0.038f * (((reg_cons_3_p3 * reg_q_3_p3) - (reg_cons_3_m3 * reg_q_3_m3)) + (reg_q_4_p3 - reg_q_4_m3)))) - (0.0035f * (((reg_cons_3_p4 * reg_q_3_p4) - (reg_cons_3_m4 * reg_q_3_m4)) + (reg_q_4_p4 - reg_q_4_m4)))) * dxinv2);
flux_3[k*M*N + j*N + i] = r3;
r4 -= (((((0.8f * (((sh_cons_4_c0[j-j0+4][i-i0+1] * sh_q_1_c0[j-j0+4][i-i0+1]) - (sh_cons_4_c0[j-j0+4][i-i0-1] * sh_q_1_c0[j-j0+4][i-i0-1])) + ((sh_q_4_c0[j-j0+4][i-i0+1] * sh_q_1_c0[j-j0+4][i-i0+1]) - (sh_q_4_c0[j-j0+4][i-i0-1] * sh_q_1_c0[j-j0+4][i-i0-1])))) - (0.2f * (((sh_cons_4_c0[j-j0+4][i-i0+2] * sh_q_1_c0[j-j0+4][i-i0+2]) - (sh_cons_4_c0[j-j0+4][i-i0-2] * sh_q_1_c0[j-j0+4][i-i0-2])) + ((sh_q_4_c0[j-j0+4][i-i0+2] * sh_q_1_c0[j-j0+4][i-i0+2]) - (sh_q_4_c0[j-j0+4][i-i0-2] * sh_q_1_c0[j-j0+4][i-i0-2]))))) + (0.038f * (((sh_cons_4_c0[j-j0+4][i-i0+3] * sh_q_1_c0[j-j0+4][i-i0+3]) - (sh_cons_4_c0[j-j0+4][i-i0-3] * sh_q_1_c0[j-j0+4][i-i0-3])) + ((sh_q_4_c0[j-j0+4][i-i0+3] * sh_q_1_c0[j-j0+4][i-i0+3]) - (sh_q_4_c0[j-j0+4][i-i0-3] * sh_q_1_c0[j-j0+4][i-i0-3]))))) - (0.0035f * (((sh_cons_4_c0[j-j0+4][i-i0+4] * sh_q_1_c0[j-j0+4][i-i0+4]) - (sh_cons_4_c0[j-j0+4][i-i0-4] * sh_q_1_c0[j-j0+4][i-i0-4])) + ((sh_q_4_c0[j-j0+4][i-i0+4] * sh_q_1_c0[j-j0+4][i-i0+4]) - (sh_q_4_c0[j-j0+4][i-i0-4] * sh_q_1_c0[j-j0+4][i-i0-4]))))) * dxinv0);
r4 -= (((((0.8f * (((sh_cons_4_c0[j-j0+4+1][i-i0] * sh_q_2_c0[j-j0+4+1][i-i0]) - (sh_cons_4_c0[j-j0+4-1][i-i0] * sh_q_2_c0[j-j0+4-1][i-i0])) + ((sh_q_4_c0[j-j0+4+1][i-i0] * sh_q_2_c0[j-j0+4+1][i-i0]) - (sh_q_4_c0[j-j0+4-1][i-i0] * sh_q_2_c0[j-j0+4-1][i-i0])))) - (0.2f * (((sh_cons_4_c0[j-j0+4+2][i-i0] * sh_q_2_c0[j-j0+4+2][i-i0]) - (sh_cons_4_c0[j-j0+4-2][i-i0] * sh_q_2_c0[j-j0+4-2][i-i0])) + ((sh_q_4_c0[j-j0+4+2][i-i0] * sh_q_2_c0[j-j0+4+2][i-i0]) - (sh_q_4_c0[j-j0+4-2][i-i0] * sh_q_2_c0[j-j0+4-2][i-i0]))))) + (0.038f * (((sh_cons_4_c0[j-j0+4+3][i-i0] * sh_q_2_c0[j-j0+4+3][i-i0]) - (sh_cons_4_c0[j-j0+4-3][i-i0] * sh_q_2_c0[j-j0+4-3][i-i0])) + ((sh_q_4_c0[j-j0+4+3][i-i0] * sh_q_2_c0[j-j0+4+3][i-i0]) - (sh_q_4_c0[j-j0+4-3][i-i0] * sh_q_2_c0[j-j0+4-3][i-i0]))))) - (0.0035f * (((sh_cons_4_c0[j-j0+4+4][i-i0] * sh_q_2_c0[j-j0+4+4][i-i0]) - (sh_cons_4_c0[j-j0+4-4][i-i0] * sh_q_2_c0[j-j0+4-4][i-i0])) + ((sh_q_4_c0[j-j0+4+4][i-i0] * sh_q_2_c0[j-j0+4+4][i-i0]) - (sh_q_4_c0[j-j0+4-4][i-i0] * sh_q_2_c0[j-j0+4-4][i-i0]))))) * dxinv1);
r4 -= (((((0.8f * (((reg_cons_4_p1 * reg_q_3_p1) - (reg_cons_4_m1 * reg_q_3_m1)) + ((reg_q_4_p1 * reg_q_3_p1) - (reg_q_4_m1 * reg_q_3_m1)))) - (0.2f * (((reg_cons_4_p2 * reg_q_3_p2) - (reg_cons_4_m2 * reg_q_3_m2)) + ((reg_q_4_p2 * reg_q_3_p2) - (reg_q_4_m2 * reg_q_3_m2))))) + (0.038f * (((reg_cons_4_p3 * reg_q_3_p3) - (reg_cons_4_m3 * reg_q_3_m3)) + ((reg_q_4_p3 * reg_q_3_p3) - (reg_q_4_m3 * reg_q_3_m3))))) - (0.0035f * (((reg_cons_4_p4 * reg_q_3_p4) - (reg_cons_4_m4 * reg_q_3_m4)) + ((reg_q_4_p4 * reg_q_3_p4) - (reg_q_4_m4 * reg_q_3_m4))))) * dxinv2);
flux_4[k*M*N + j*N + i] = r4;
}
__syncthreads ();
//Value rotation
if (j <= M-1 & i <= min (i0+blockdim_i-1, N-1)) {
reg_cons_1_m4 = reg_cons_1_m3;
reg_cons_1_m3 = reg_cons_1_m2;
reg_cons_1_m2 = reg_cons_1_m1;
reg_cons_1_m1 = sh_cons_1_c0[j-j0+4][i-i0];
sh_cons_1_c0[j-j0+4][i-i0] = reg_cons_1_p1;
reg_cons_1_p1 = reg_cons_1_p2;
reg_cons_1_p2 = reg_cons_1_p3;
reg_cons_1_p3 = reg_cons_1_p4;
reg_cons_2_m4 = reg_cons_2_m3;
reg_cons_2_m3 = reg_cons_2_m2;
reg_cons_2_m2 = reg_cons_2_m1;
reg_cons_2_m1 = sh_cons_2_c0[j-j0+4][i-i0];
sh_cons_2_c0[j-j0+4][i-i0] = reg_cons_2_p1;
reg_cons_2_p1 = reg_cons_2_p2;
reg_cons_2_p2 = reg_cons_2_p3;
reg_cons_2_p3 = reg_cons_2_p4;
reg_cons_3_m4 = reg_cons_3_m3;
reg_cons_3_m3 = reg_cons_3_m2;
reg_cons_3_m2 = reg_cons_3_m1;
reg_cons_3_m1 = sh_cons_3_c0[j-j0+4][i-i0];
sh_cons_3_c0[j-j0+4][i-i0] = reg_cons_3_p1;
reg_cons_3_p1 = reg_cons_3_p2;
reg_cons_3_p2 = reg_cons_3_p3;
reg_cons_3_p3 = reg_cons_3_p4;
reg_cons_4_m4 = reg_cons_4_m3;
reg_cons_4_m3 = reg_cons_4_m2;
reg_cons_4_m2 = reg_cons_4_m1;
reg_cons_4_m1 = sh_cons_4_c0[j-j0+4][i-i0];
sh_cons_4_c0[j-j0+4][i-i0] = reg_cons_4_p1;
reg_cons_4_p1 = reg_cons_4_p2;
reg_cons_4_p2 = reg_cons_4_p3;
reg_cons_4_p3 = reg_cons_4_p4;
reg_q_3_m4 = reg_q_3_m3;
reg_q_3_m3 = reg_q_3_m2;
reg_q_3_m2 = reg_q_3_m1;
reg_q_3_m1 = reg_q_3_c0;
reg_q_3_c0 = reg_q_3_p1;
reg_q_3_p1 = reg_q_3_p2;
reg_q_3_p2 = reg_q_3_p3;
reg_q_3_p3 = reg_q_3_p4;
reg_q_4_m4 = reg_q_4_m3;
reg_q_4_m3 = reg_q_4_m2;
reg_q_4_m2 = reg_q_4_m1;
reg_q_4_m1 = sh_q_4_c0[j-j0+4][i-i0];
sh_q_4_c0[j-j0+4][i-i0] = reg_q_4_p1;
reg_q_4_p1 = reg_q_4_p2;
reg_q_4_p2 = reg_q_4_p3;
reg_q_4_p3 = reg_q_4_p4;
}
}
}
extern "C" void host_code (double *h_flux_0, double *h_flux_1, double *h_flux_2, double *h_flux_3, double *h_flux_4, double *h_cons_1, double *h_cons_2, double *h_cons_3, double *h_cons_4, double *h_q_1, double *h_q_2, double *h_q_3, double *h_q_4, double dxinv0, double dxinv1, double dxinv2, int L, int M, int N) {
double *flux_0;
cudaMalloc (&flux_0, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for flux_0\n");
cudaMemcpy (flux_0, h_flux_0, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
double *flux_1;
cudaMalloc (&flux_1, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for flux_1\n");
cudaMemcpy (flux_1, h_flux_1, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
double *flux_2;
cudaMalloc (&flux_2, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for flux_2\n");
cudaMemcpy (flux_2, h_flux_2, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
double *flux_3;
cudaMalloc (&flux_3, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for flux_3\n");
cudaMemcpy (flux_3, h_flux_3, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
double *flux_4;
cudaMalloc (&flux_4, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for flux_4\n");
cudaMemcpy (flux_4, h_flux_4, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
double *cons_1;
cudaMalloc (&cons_1, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for cons_1\n");
cudaMemcpy (cons_1, h_cons_1, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
double *cons_2;
cudaMalloc (&cons_2, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for cons_2\n");
cudaMemcpy (cons_2, h_cons_2, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
double *cons_3;
cudaMalloc (&cons_3, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for cons_3\n");
cudaMemcpy (cons_3, h_cons_3, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
double *cons_4;
cudaMalloc (&cons_4, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for cons_4\n");
cudaMemcpy (cons_4, h_cons_4, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
double *q_1;
cudaMalloc (&q_1, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for q_1\n");
cudaMemcpy (q_1, h_q_1, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
double *q_2;
cudaMalloc (&q_2, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for q_2\n");
cudaMemcpy (q_2, h_q_2, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
double *q_3;
cudaMalloc (&q_3, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for q_3\n");
cudaMemcpy (q_3, h_q_3, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
double *q_4;
cudaMalloc (&q_4, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for q_4\n");
cudaMemcpy (q_4, h_q_4, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
dim3 blockconfig_1 (16, 16, 1);
dim3 gridconfig_1 (ceil(N, blockconfig_1.x-8), ceil(M-8, blockconfig_1.y), 1);
hypterm <<<gridconfig_1, blockconfig_1>>> (flux_0, flux_1, flux_2, flux_3, flux_4, cons_1, cons_2, cons_3, cons_4, q_1, q_2, q_3, q_4, dxinv0, dxinv1, dxinv2, L, M, N);
cudaMemcpy (h_flux_0, flux_0, sizeof(double)*L*M*N, cudaMemcpyDeviceToHost);
cudaMemcpy (h_flux_1, flux_1, sizeof(double)*L*M*N, cudaMemcpyDeviceToHost);
cudaMemcpy (h_flux_2, flux_2, sizeof(double)*L*M*N, cudaMemcpyDeviceToHost);
cudaMemcpy (h_flux_3, flux_3, sizeof(double)*L*M*N, cudaMemcpyDeviceToHost);
cudaMemcpy (h_flux_4, flux_4, sizeof(double)*L*M*N, cudaMemcpyDeviceToHost);
//Free allocated memory
cudaFree (flux_0);
cudaFree (flux_1);
cudaFree (flux_2);
cudaFree (flux_3);
cudaFree (flux_4);
cudaFree (cons_1);
cudaFree (cons_2);
cudaFree (cons_3);
cudaFree (cons_4);
cudaFree (q_1);
cudaFree (q_2);
cudaFree (q_3);
cudaFree (q_4);
}
|
224336a27fd93f6b0f82592c5cd3b9e89597fe59.hip | // !!! This is a file automatically generated by hipify!!!
#include <cassert>
#include <cfloat>
#include <list>
#include <math.h>
#include <stdlib.h>
#include <iostream>
#include <hip/hip_runtime.h>
#include "MD.h"
using namespace std;
template <class T, class posVecType>
inline T distance(const posVecType* position, const int i, const int j);
template <class T>
inline void insertInOrder(std::list<T>& currDist, std::list<int>& currList,
const int j, const T distIJ, const int maxNeighbors);
template <class T, class posVecType>
inline int buildNeighborList(const int nAtom, const posVecType* position,
int* neighborList);
template <class T>
inline int populateNeighborList(std::list<T>& currDist,
std::list<int>& currList, const int j, const int nAtom,
int* neighborList);
// ****************************************************************************
// Function: checkResults
//
// Purpose:
// Check device results against cpu results -- this is the CPU equivalent of
//
// Arguments:
// d_force: forces calculated on the device
// position: positions of atoms
// neighList: atom neighbor list
// nAtom: number of atoms
// Returns: true if results match, false otherwise
//
// Programmer: Kyle Spafford
// Creation: July 26, 2010
//
// Modifications:
//
// ****************************************************************************
template <class T, class forceVecType, class posVecType>
bool checkResults(forceVecType* d_force, posVecType *position,
int *neighList, int nAtom)
{
T max_error = 0;
for (int i = 0; i < nAtom; i++)
{
posVecType ipos = position[i];
forceVecType f = {0.f, 0.f, 0.f, 0.f};
int j = 0;
while (j < maxNeighbors)
{
int jidx = neighList[j*nAtom + i];
posVecType jpos = position[jidx];
// Calculate distance
T delx = ipos.x - jpos.x;
T dely = ipos.y - jpos.y;
T delz = ipos.z - jpos.z;
T r2inv = delx*delx + dely*dely + delz*delz;
// If distance is less than cutoff, calculate force
if (r2inv < cutsq) {
r2inv = 1.0f/r2inv;
T r6inv = r2inv * r2inv * r2inv;
T force = r2inv*r6inv*(lj1*r6inv - lj2);
f.x += delx * force;
f.y += dely * force;
f.z += delz * force;
}
j++;
}
// Check the maximum error when the floating-pont results don't exactly match
assert(isnan(d_force[i].x) == false);
assert(isnan(d_force[i].y) == false);
assert(isnan(d_force[i].z) == false);
T fxe = fabs(f.x - d_force[i].x);
T fye = fabs(f.y - d_force[i].y);
T fze = fabs(f.z - d_force[i].z);
if (fxe > max_error) max_error = fxe;
if (fye > max_error) max_error = fye;
if (fze > max_error) max_error = fze;
}
std::cout << "Max error between host and device: " << max_error <<"\n";
return true;
}
__global__
void md ( const POSVECTYPE* position, FORCEVECTYPE* force, const int* neighborList,
const int nAtom, const int maxNeighbors,
const FPTYPE lj1_t, const FPTYPE lj2_t, const FPTYPE cutsq_t )
{
const uint idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx >= nAtom) return;
POSVECTYPE ipos = position[idx];
FORCEVECTYPE f = {0.0f, 0.0f, 0.0f, 0.0f};
int j = 0;
while (j < maxNeighbors)
{
int jidx = neighborList[j*nAtom + idx];
// Uncoalesced read
POSVECTYPE jpos = position[jidx];
// Calculate distance
FPTYPE delx = ipos.x - jpos.x;
FPTYPE dely = ipos.y - jpos.y;
FPTYPE delz = ipos.z - jpos.z;
FPTYPE r2inv = delx*delx + dely*dely + delz*delz;
// If distance is less than cutoff, calculate force
if (r2inv < cutsq_t)
{
r2inv = (FPTYPE)1.0 / r2inv;
FPTYPE r6inv = r2inv * r2inv * r2inv;
FPTYPE forceC = r2inv * r6inv * (lj1_t * r6inv - lj2_t);
f.x += delx * forceC;
f.y += dely * forceC;
f.z += delz * forceC;
}
j++;
}
// store the results
force[idx] = f;
}
int main(int argc, char** argv)
{
if (argc != 3) {
printf("usage: %s <class size> <iteration>", argv[0]);
return 1;
}
// Problem Parameters
int sizeClass = atoi(argv[1]);
int iteration = atoi(argv[2]);
const int probSizes[] = { 12288, 24576, 36864, 73728 };
assert(sizeClass >= 0 && sizeClass < 4);
assert(iteration >= 0);
int nAtom = probSizes[sizeClass];
// Allocate problem data on host
POSVECTYPE* position;
FORCEVECTYPE* h_force;
int* neighborList;
size_t localSize = 256;
size_t globalSize = nAtom;
position = (POSVECTYPE*) malloc(nAtom * sizeof(POSVECTYPE));
h_force = (FORCEVECTYPE*) malloc(nAtom * sizeof(FORCEVECTYPE));
neighborList = (int*) malloc(maxNeighbors * nAtom * sizeof(int));
cout << "Initializing test problem (this can take several "
"minutes for large problems).\n ";
// Seed random number generator
srand48(8650341L);
// Initialize positions -- random distribution in cubic domain
for (int i = 0; i < nAtom; i++)
{
position[i].x = (drand48() * domainEdge);
position[i].y = (drand48() * domainEdge);
position[i].z = (drand48() * domainEdge);
}
cout << "Finished.\n";
int totalPairs = buildNeighborList<FPTYPE, POSVECTYPE>(nAtom, position, neighborList);
cout << totalPairs << " of " << nAtom*maxNeighbors <<
" pairs within cutoff distance = " <<
100.0 * ((double)totalPairs / (nAtom*maxNeighbors)) << " %\n";
// see MD.h
FPTYPE lj1_t = (FPTYPE) lj1;
FPTYPE lj2_t = (FPTYPE) lj2;
FPTYPE cutsq_t = (FPTYPE) cutsq;
POSVECTYPE* d_position;
FORCEVECTYPE* d_force;
int* d_neighborList;
hipMalloc((void**)&d_force, nAtom * sizeof(FORCEVECTYPE));
hipMalloc((void**)&d_position, nAtom * sizeof(POSVECTYPE));
hipMalloc((void**)&d_neighborList, nAtom * maxNeighbors * sizeof(int));
hipMemcpy(d_position, position, nAtom * sizeof(POSVECTYPE), hipMemcpyHostToDevice);
hipMemcpy(d_neighborList, neighborList, nAtom * maxNeighbors * sizeof(int), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( md) , dim3(dim3((globalSize+localSize-1) / localSize)), dim3(dim3(localSize)) , 0, 0,
d_position, d_force, d_neighborList,
nAtom, maxNeighbors, lj1_t, lj2_t, cutsq_t);
hipMemcpy(h_force, d_force, nAtom * sizeof(FORCEVECTYPE), hipMemcpyDeviceToHost);
cout << "Performing Correctness Check (may take several minutes)\n";
checkResults<FPTYPE, FORCEVECTYPE, POSVECTYPE>(h_force, position, neighborList, nAtom);
for (int j = 0; j < iteration; j++)
{
//Launch Kernels
hipLaunchKernelGGL(( md) , dim3(dim3((globalSize+localSize-1) / localSize)), dim3(dim3(localSize)) , 0, 0,
d_position, d_force, d_neighborList, nAtom, maxNeighbors, lj1_t, lj2_t, cutsq_t);
}
hipDeviceSynchronize();
hipFree(d_position);
hipFree(d_force);
hipFree(d_neighborList);
free(position);
free(h_force);
free(neighborList);
return 0;
}
// ********************************************************
// Function: distance
//
// Purpose:
// Calculates distance squared between two atoms
//
// Arguments:
// position: atom position information
// i, j: indexes of the two atoms
//
// Returns: the computed distance
//
// Programmer: Kyle Spafford
// Creation: July 26, 2010
//
// Modifications:
//
// ********************************************************
template <class T, class posVecType>
inline T distance(const posVecType* position, const int i, const int j)
{
posVecType ipos = position[i];
posVecType jpos = position[j];
T delx = ipos.x - jpos.x;
T dely = ipos.y - jpos.y;
T delz = ipos.z - jpos.z;
T r2inv = delx * delx + dely * dely + delz * delz;
return r2inv;
}
// ********************************************************
// Function: insertInOrder
//
// Purpose:
// Adds atom j to current neighbor list and distance list
// if it's distance is low enough.
//
// Arguments:
// currDist: distance between current atom and each of its neighbors in the
// current list, sorted in ascending order
// currList: neighbor list for current atom, sorted by distance in asc. order
// j: atom to insert into neighbor list
// distIJ: distance between current atom and atom J
// maxNeighbors: max length of neighbor list
//
// Returns: nothing
//
// Programmer: Kyle Spafford
// Creation: July 26, 2010
//
// Modifications:
//
// ********************************************************
template <class T>
inline void insertInOrder(list<T>& currDist, list<int>& currList,
const int j, const T distIJ, const int maxNeighbors)
{
typename list<T>::iterator it;
typename list<int>::iterator it2;
it2 = currList.begin();
T currMax = currDist.back();
if (distIJ > currMax) return;
for (it=currDist.begin(); it!=currDist.end(); it++)
{
if (distIJ < (*it))
{
// Insert into appropriate place in list
currDist.insert(it,distIJ);
currList.insert(it2, j);
// Trim end of list
currList.resize(maxNeighbors);
currDist.resize(maxNeighbors);
return;
}
it2++;
}
}
// ********************************************************
// Function: buildNeighborList
//
// Purpose:
// Builds the neighbor list structure for all atoms for GPU coalesced reads
// and counts the number of pairs within the cutoff distance, so
// the benchmark gets an accurate FLOPS count
//
// Arguments:
// nAtom: total number of atoms
// position: pointer to the atom's position information
// neighborList: pointer to neighbor list data structure
//
// Returns: number of pairs of atoms within cutoff distance
//
// Programmer: Kyle Spafford
// Creation: July 26, 2010
//
// Modifications:
//
// ********************************************************
template <class T, class posVecType>
inline int buildNeighborList(const int nAtom, const posVecType* position,
int* neighborList)
{
int totalPairs = 0;
// Build Neighbor List
// Find the nearest N atoms to each other atom, where N = maxNeighbors
for (int i = 0; i < nAtom; i++)
{
// Current neighbor list for atom i, initialized to -1
list<int> currList(maxNeighbors, -1);
// Distance to those neighbors. We're populating this with the
// closest neighbors, so initialize to FLT_MAX
list<T> currDist(maxNeighbors, FLT_MAX);
for (int j = 0; j < nAtom; j++)
{
if (i == j) continue; // An atom cannot be its own neighbor
// Calculate distance and insert in order into the current lists
T distIJ = distance<T, posVecType>(position, i, j);
insertInOrder<T>(currDist, currList, j, distIJ, maxNeighbors);
}
// We should now have the closest maxNeighbors neighbors and their
// distances to atom i. Populate the neighbor list data structure
// for GPU coalesced reads.
// The populate method returns how many of the maxNeighbors closest
// neighbors are within the cutoff distance. This will be used to
// calculate GFLOPS later.
totalPairs += populateNeighborList<T>(currDist, currList, i, nAtom,
neighborList);
}
return totalPairs;
}
// ********************************************************
// Function: populateNeighborList
//
// Purpose:
// Populates the neighbor list structure for a *single* atom for
// GPU coalesced reads and counts the number of pairs within the cutoff
// distance, (for current atom) so the benchmark gets an accurate FLOPS count
//
// Arguments:
// currDist: distance between current atom and each of its maxNeighbors
// neighbors
// currList: current list of neighbors
// i: current atom
// nAtom: total number of atoms
// neighborList: pointer to neighbor list data structure
//
// Returns: number of pairs of atoms within cutoff distance
//
// Programmer: Kyle Spafford
// Creation: July 26, 2010
//
// Modifications:
//
// ********************************************************
template <class T>
inline int populateNeighborList(list<T>& currDist,
list<int>& currList, const int i, const int nAtom,
int* neighborList)
{
int idx = 0;
int validPairs = 0; // Pairs of atoms closer together than the cutoff
// Iterate across distance and neighbor list
typename list<T>::iterator distanceIter = currDist.begin();
for (list<int>::iterator neighborIter = currList.begin();
neighborIter != currList.end(); neighborIter++)
{
// Populate packed neighbor list
neighborList[(idx * nAtom) + i] = *neighborIter;
// If the distance is less than cutoff, increment valid counter
if (*distanceIter < cutsq)
validPairs++;
// Increment idx and distance iterator
idx++;
distanceIter++;
}
return validPairs;
}
| 224336a27fd93f6b0f82592c5cd3b9e89597fe59.cu | #include <cassert>
#include <cfloat>
#include <list>
#include <math.h>
#include <stdlib.h>
#include <iostream>
#include <cuda.h>
#include "MD.h"
using namespace std;
template <class T, class posVecType>
inline T distance(const posVecType* position, const int i, const int j);
template <class T>
inline void insertInOrder(std::list<T>& currDist, std::list<int>& currList,
const int j, const T distIJ, const int maxNeighbors);
template <class T, class posVecType>
inline int buildNeighborList(const int nAtom, const posVecType* position,
int* neighborList);
template <class T>
inline int populateNeighborList(std::list<T>& currDist,
std::list<int>& currList, const int j, const int nAtom,
int* neighborList);
// ****************************************************************************
// Function: checkResults
//
// Purpose:
// Check device results against cpu results -- this is the CPU equivalent of
//
// Arguments:
// d_force: forces calculated on the device
// position: positions of atoms
// neighList: atom neighbor list
// nAtom: number of atoms
// Returns: true if results match, false otherwise
//
// Programmer: Kyle Spafford
// Creation: July 26, 2010
//
// Modifications:
//
// ****************************************************************************
template <class T, class forceVecType, class posVecType>
bool checkResults(forceVecType* d_force, posVecType *position,
int *neighList, int nAtom)
{
T max_error = 0;
for (int i = 0; i < nAtom; i++)
{
posVecType ipos = position[i];
forceVecType f = {0.f, 0.f, 0.f, 0.f};
int j = 0;
while (j < maxNeighbors)
{
int jidx = neighList[j*nAtom + i];
posVecType jpos = position[jidx];
// Calculate distance
T delx = ipos.x - jpos.x;
T dely = ipos.y - jpos.y;
T delz = ipos.z - jpos.z;
T r2inv = delx*delx + dely*dely + delz*delz;
// If distance is less than cutoff, calculate force
if (r2inv < cutsq) {
r2inv = 1.0f/r2inv;
T r6inv = r2inv * r2inv * r2inv;
T force = r2inv*r6inv*(lj1*r6inv - lj2);
f.x += delx * force;
f.y += dely * force;
f.z += delz * force;
}
j++;
}
// Check the maximum error when the floating-pont results don't exactly match
assert(isnan(d_force[i].x) == false);
assert(isnan(d_force[i].y) == false);
assert(isnan(d_force[i].z) == false);
T fxe = fabs(f.x - d_force[i].x);
T fye = fabs(f.y - d_force[i].y);
T fze = fabs(f.z - d_force[i].z);
if (fxe > max_error) max_error = fxe;
if (fye > max_error) max_error = fye;
if (fze > max_error) max_error = fze;
}
std::cout << "Max error between host and device: " << max_error <<"\n";
return true;
}
__global__
void md ( const POSVECTYPE* position, FORCEVECTYPE* force, const int* neighborList,
const int nAtom, const int maxNeighbors,
const FPTYPE lj1_t, const FPTYPE lj2_t, const FPTYPE cutsq_t )
{
const uint idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx >= nAtom) return;
POSVECTYPE ipos = position[idx];
FORCEVECTYPE f = {0.0f, 0.0f, 0.0f, 0.0f};
int j = 0;
while (j < maxNeighbors)
{
int jidx = neighborList[j*nAtom + idx];
// Uncoalesced read
POSVECTYPE jpos = position[jidx];
// Calculate distance
FPTYPE delx = ipos.x - jpos.x;
FPTYPE dely = ipos.y - jpos.y;
FPTYPE delz = ipos.z - jpos.z;
FPTYPE r2inv = delx*delx + dely*dely + delz*delz;
// If distance is less than cutoff, calculate force
if (r2inv < cutsq_t)
{
r2inv = (FPTYPE)1.0 / r2inv;
FPTYPE r6inv = r2inv * r2inv * r2inv;
FPTYPE forceC = r2inv * r6inv * (lj1_t * r6inv - lj2_t);
f.x += delx * forceC;
f.y += dely * forceC;
f.z += delz * forceC;
}
j++;
}
// store the results
force[idx] = f;
}
int main(int argc, char** argv)
{
if (argc != 3) {
printf("usage: %s <class size> <iteration>", argv[0]);
return 1;
}
// Problem Parameters
int sizeClass = atoi(argv[1]);
int iteration = atoi(argv[2]);
const int probSizes[] = { 12288, 24576, 36864, 73728 };
assert(sizeClass >= 0 && sizeClass < 4);
assert(iteration >= 0);
int nAtom = probSizes[sizeClass];
// Allocate problem data on host
POSVECTYPE* position;
FORCEVECTYPE* h_force;
int* neighborList;
size_t localSize = 256;
size_t globalSize = nAtom;
position = (POSVECTYPE*) malloc(nAtom * sizeof(POSVECTYPE));
h_force = (FORCEVECTYPE*) malloc(nAtom * sizeof(FORCEVECTYPE));
neighborList = (int*) malloc(maxNeighbors * nAtom * sizeof(int));
cout << "Initializing test problem (this can take several "
"minutes for large problems).\n ";
// Seed random number generator
srand48(8650341L);
// Initialize positions -- random distribution in cubic domain
for (int i = 0; i < nAtom; i++)
{
position[i].x = (drand48() * domainEdge);
position[i].y = (drand48() * domainEdge);
position[i].z = (drand48() * domainEdge);
}
cout << "Finished.\n";
int totalPairs = buildNeighborList<FPTYPE, POSVECTYPE>(nAtom, position, neighborList);
cout << totalPairs << " of " << nAtom*maxNeighbors <<
" pairs within cutoff distance = " <<
100.0 * ((double)totalPairs / (nAtom*maxNeighbors)) << " %\n";
// see MD.h
FPTYPE lj1_t = (FPTYPE) lj1;
FPTYPE lj2_t = (FPTYPE) lj2;
FPTYPE cutsq_t = (FPTYPE) cutsq;
POSVECTYPE* d_position;
FORCEVECTYPE* d_force;
int* d_neighborList;
cudaMalloc((void**)&d_force, nAtom * sizeof(FORCEVECTYPE));
cudaMalloc((void**)&d_position, nAtom * sizeof(POSVECTYPE));
cudaMalloc((void**)&d_neighborList, nAtom * maxNeighbors * sizeof(int));
cudaMemcpy(d_position, position, nAtom * sizeof(POSVECTYPE), cudaMemcpyHostToDevice);
cudaMemcpy(d_neighborList, neighborList, nAtom * maxNeighbors * sizeof(int), cudaMemcpyHostToDevice);
md <<< dim3((globalSize+localSize-1) / localSize), dim3(localSize) >>> (
d_position, d_force, d_neighborList,
nAtom, maxNeighbors, lj1_t, lj2_t, cutsq_t);
cudaMemcpy(h_force, d_force, nAtom * sizeof(FORCEVECTYPE), cudaMemcpyDeviceToHost);
cout << "Performing Correctness Check (may take several minutes)\n";
checkResults<FPTYPE, FORCEVECTYPE, POSVECTYPE>(h_force, position, neighborList, nAtom);
for (int j = 0; j < iteration; j++)
{
//Launch Kernels
md <<< dim3((globalSize+localSize-1) / localSize), dim3(localSize) >>> (
d_position, d_force, d_neighborList, nAtom, maxNeighbors, lj1_t, lj2_t, cutsq_t);
}
cudaDeviceSynchronize();
cudaFree(d_position);
cudaFree(d_force);
cudaFree(d_neighborList);
free(position);
free(h_force);
free(neighborList);
return 0;
}
// ********************************************************
// Function: distance
//
// Purpose:
// Calculates distance squared between two atoms
//
// Arguments:
// position: atom position information
// i, j: indexes of the two atoms
//
// Returns: the computed distance
//
// Programmer: Kyle Spafford
// Creation: July 26, 2010
//
// Modifications:
//
// ********************************************************
template <class T, class posVecType>
inline T distance(const posVecType* position, const int i, const int j)
{
posVecType ipos = position[i];
posVecType jpos = position[j];
T delx = ipos.x - jpos.x;
T dely = ipos.y - jpos.y;
T delz = ipos.z - jpos.z;
T r2inv = delx * delx + dely * dely + delz * delz;
return r2inv;
}
// ********************************************************
// Function: insertInOrder
//
// Purpose:
// Adds atom j to current neighbor list and distance list
// if it's distance is low enough.
//
// Arguments:
// currDist: distance between current atom and each of its neighbors in the
// current list, sorted in ascending order
// currList: neighbor list for current atom, sorted by distance in asc. order
// j: atom to insert into neighbor list
// distIJ: distance between current atom and atom J
// maxNeighbors: max length of neighbor list
//
// Returns: nothing
//
// Programmer: Kyle Spafford
// Creation: July 26, 2010
//
// Modifications:
//
// ********************************************************
template <class T>
inline void insertInOrder(list<T>& currDist, list<int>& currList,
const int j, const T distIJ, const int maxNeighbors)
{
typename list<T>::iterator it;
typename list<int>::iterator it2;
it2 = currList.begin();
T currMax = currDist.back();
if (distIJ > currMax) return;
for (it=currDist.begin(); it!=currDist.end(); it++)
{
if (distIJ < (*it))
{
// Insert into appropriate place in list
currDist.insert(it,distIJ);
currList.insert(it2, j);
// Trim end of list
currList.resize(maxNeighbors);
currDist.resize(maxNeighbors);
return;
}
it2++;
}
}
// ********************************************************
// Function: buildNeighborList
//
// Purpose:
// Builds the neighbor list structure for all atoms for GPU coalesced reads
// and counts the number of pairs within the cutoff distance, so
// the benchmark gets an accurate FLOPS count
//
// Arguments:
// nAtom: total number of atoms
// position: pointer to the atom's position information
// neighborList: pointer to neighbor list data structure
//
// Returns: number of pairs of atoms within cutoff distance
//
// Programmer: Kyle Spafford
// Creation: July 26, 2010
//
// Modifications:
//
// ********************************************************
template <class T, class posVecType>
inline int buildNeighborList(const int nAtom, const posVecType* position,
int* neighborList)
{
int totalPairs = 0;
// Build Neighbor List
// Find the nearest N atoms to each other atom, where N = maxNeighbors
for (int i = 0; i < nAtom; i++)
{
// Current neighbor list for atom i, initialized to -1
list<int> currList(maxNeighbors, -1);
// Distance to those neighbors. We're populating this with the
// closest neighbors, so initialize to FLT_MAX
list<T> currDist(maxNeighbors, FLT_MAX);
for (int j = 0; j < nAtom; j++)
{
if (i == j) continue; // An atom cannot be its own neighbor
// Calculate distance and insert in order into the current lists
T distIJ = distance<T, posVecType>(position, i, j);
insertInOrder<T>(currDist, currList, j, distIJ, maxNeighbors);
}
// We should now have the closest maxNeighbors neighbors and their
// distances to atom i. Populate the neighbor list data structure
// for GPU coalesced reads.
// The populate method returns how many of the maxNeighbors closest
// neighbors are within the cutoff distance. This will be used to
// calculate GFLOPS later.
totalPairs += populateNeighborList<T>(currDist, currList, i, nAtom,
neighborList);
}
return totalPairs;
}
// ********************************************************
// Function: populateNeighborList
//
// Purpose:
// Populates the neighbor list structure for a *single* atom for
// GPU coalesced reads and counts the number of pairs within the cutoff
// distance, (for current atom) so the benchmark gets an accurate FLOPS count
//
// Arguments:
// currDist: distance between current atom and each of its maxNeighbors
// neighbors
// currList: current list of neighbors
// i: current atom
// nAtom: total number of atoms
// neighborList: pointer to neighbor list data structure
//
// Returns: number of pairs of atoms within cutoff distance
//
// Programmer: Kyle Spafford
// Creation: July 26, 2010
//
// Modifications:
//
// ********************************************************
template <class T>
inline int populateNeighborList(list<T>& currDist,
list<int>& currList, const int i, const int nAtom,
int* neighborList)
{
int idx = 0;
int validPairs = 0; // Pairs of atoms closer together than the cutoff
// Iterate across distance and neighbor list
typename list<T>::iterator distanceIter = currDist.begin();
for (list<int>::iterator neighborIter = currList.begin();
neighborIter != currList.end(); neighborIter++)
{
// Populate packed neighbor list
neighborList[(idx * nAtom) + i] = *neighborIter;
// If the distance is less than cutoff, increment valid counter
if (*distanceIter < cutsq)
validPairs++;
// Increment idx and distance iterator
idx++;
distanceIter++;
}
return validPairs;
}
|
194948f1e96d57993cb826ad022a624931069f26.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#define THREADS 64
// from http://stackoverflow.com/questions/14038589/what-is-the-canonical-way-to-check-for-errors-using-the-cuda-runtime-api
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, char *file, int line, bool abort=true)
{
if (code != hipSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
__global__ void device_global(unsigned int *input_array, int num_elements) {
int my_index = blockIdx.x * blockDim.x + threadIdx.x;
// all threads write a value to the array
input_array[my_index] = my_index - (my_index%2);
__syncthreads(); // all initial values are written
// all threads grab a value from the array
// we know this will always be in bounds
int new_index = input_array[my_index];
__syncthreads(); // all values are read
// use the values to write to the array, a write-write race
input_array[new_index] = my_index;
}
int main(void) {
// how big our array for interfacing with the GPU will be
int num_elements = THREADS;
int num_bytes = sizeof(unsigned int) * num_elements;
// pointers for the interfacing arrays
unsigned int *host_array = 0;
unsigned int *device_array = 0;
// malloc for host and device
host_array = (unsigned int*) malloc(num_bytes);
hipMalloc((void **) &device_array, num_bytes);
// check the mallocs
if (host_array == 0) {
printf("Unable to allocate memory on host");
return 1;
}
if (device_array == 0) {
printf("Unable to allocate memory on device");
return 1;
}
// set host array values
for (int i = 0; i<num_elements; i++) {
host_array[i] = 777777;
}
// copy them to the GPU
hipMemcpy(device_array, host_array, num_bytes, hipMemcpyHostToDevice);
// define block and grid sizes
int block_size = THREADS;
int grid_size = (num_elements + block_size - 1) / block_size;
// run GPU code
hipLaunchKernelGGL(( device_global), dim3(grid_size), dim3(block_size), 0, 0, device_array, num_elements);
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipDeviceSynchronize());
// copy output to host
hipMemcpy(host_array, device_array, num_bytes, hipMemcpyDeviceToHost);
// print any information
for (int i=0; i<num_elements; i++) {
printf("%6u, ", host_array[i]);
if (i%10 == 9) {
printf(" \n");
}
}
printf("\n");
// free memory
free(host_array);
hipFree(device_array);
}
| 194948f1e96d57993cb826ad022a624931069f26.cu | #include <stdio.h>
#define THREADS 64
// from http://stackoverflow.com/questions/14038589/what-is-the-canonical-way-to-check-for-errors-using-the-cuda-runtime-api
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
__global__ void device_global(unsigned int *input_array, int num_elements) {
int my_index = blockIdx.x * blockDim.x + threadIdx.x;
// all threads write a value to the array
input_array[my_index] = my_index - (my_index%2);
__syncthreads(); // all initial values are written
// all threads grab a value from the array
// we know this will always be in bounds
int new_index = input_array[my_index];
__syncthreads(); // all values are read
// use the values to write to the array, a write-write race
input_array[new_index] = my_index;
}
int main(void) {
// how big our array for interfacing with the GPU will be
int num_elements = THREADS;
int num_bytes = sizeof(unsigned int) * num_elements;
// pointers for the interfacing arrays
unsigned int *host_array = 0;
unsigned int *device_array = 0;
// malloc for host and device
host_array = (unsigned int*) malloc(num_bytes);
cudaMalloc((void **) &device_array, num_bytes);
// check the mallocs
if (host_array == 0) {
printf("Unable to allocate memory on host");
return 1;
}
if (device_array == 0) {
printf("Unable to allocate memory on device");
return 1;
}
// set host array values
for (int i = 0; i<num_elements; i++) {
host_array[i] = 777777;
}
// copy them to the GPU
cudaMemcpy(device_array, host_array, num_bytes, cudaMemcpyHostToDevice);
// define block and grid sizes
int block_size = THREADS;
int grid_size = (num_elements + block_size - 1) / block_size;
// run GPU code
device_global<<<grid_size, block_size>>>(device_array, num_elements);
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
// copy output to host
cudaMemcpy(host_array, device_array, num_bytes, cudaMemcpyDeviceToHost);
// print any information
for (int i=0; i<num_elements; i++) {
printf("%6u, ", host_array[i]);
if (i%10 == 9) {
printf(" \n");
}
}
printf("\n");
// free memory
free(host_array);
cudaFree(device_array);
}
|
865bf91fd202846ebdeadca2d69136f769d8aa18.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <stdio.h>
#include "common.h"
void checkResult(float *hostRef, float *gpuRef, const int N) {
double epsilon = 1.0E-8;
bool match = true;
for (int i = 0; i < N; ++i) {
if (abs(hostRef[i] - gpuRef[i]) > epsilon) {
match = false;
printf("Arrays do not match!\n");
printf("host %5.2f gpu %5.2f at current %d\n", hostRef[i], gpuRef[i], i);
break;
}
}
if (match == true) {
printf("Arrays match.\n\n");
}
}
void initialData(float *data, int size) {
// Generate different seed for random number
time_t t;
srand((unsigned int)time(&t));
for (int i = 0; i < size; ++i) {
data[i] = (float)(rand() & 0xFF) / 10.0f;
}
return;
}
void sumArraysOnHost(float *A, float *B, float *C, const int N) {
for (int i = 0; i < N; ++i) {
C[i] = A[i] + B[i];
}
return;
}
__global__ void sumArraysOnGPU(float *A, float *B, float *C) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
C[i] = A[i] + B[i];
return;
}
int main(int argc, char *argv[]) {
printf("%s Starting...\n", argv[0]);
// set up device
hipSetDevice(0);
// set up data size of vectors
int nElem = 32;
printf("Vector size %d\n", nElem);
// malloc host memory
size_t nBytes = nElem * sizeof(float);
float *h_A, *h_B, *hostRef, *gpuRef;
h_A = (float *)malloc(nBytes);
h_B = (float *)malloc(nBytes);
hostRef = (float *)malloc(nBytes);
gpuRef = (float *)malloc(nBytes);
// initialize data at host side
initialData(h_A, nElem);
initialData(h_B, nElem);
memset(hostRef, 0, nBytes);
memset(gpuRef, 0, nBytes);
// malloc device global memory
float *d_A, *d_B, *d_C;
hipMalloc(&d_A, nBytes);
hipMalloc(&d_B, nBytes);
hipMalloc(&d_C, nBytes);
// transfer data from host to device
hipMemcpy(d_A, h_A, nBytes, hipMemcpyHostToDevice);
hipMemcpy(d_B, h_B, nBytes, hipMemcpyHostToDevice);
// invoke kernel at host side
dim3 block(1);
dim3 grid((nElem + block.x - 1) / block.x);
hipLaunchKernelGGL(( sumArraysOnGPU), dim3(grid), dim3(block), 0, 0, d_A, d_B, d_C);
printf("Execution configuration <<<%d, %d>>>\n", grid.x, block.x);
// copy kernel result back to host side
hipMemcpy(gpuRef, d_C, nBytes, hipMemcpyDeviceToHost);
// add vector at host side for result checks
sumArraysOnHost(h_A, h_B, hostRef, nElem);
// check device results
checkResult(hostRef, gpuRef, nElem);
// free device global memory
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
// free host memory
free(h_A);
free(h_B);
free(hostRef);
free(gpuRef);
return 0;
}
| 865bf91fd202846ebdeadca2d69136f769d8aa18.cu | #include <cuda_runtime.h>
#include <stdio.h>
#include "common.h"
void checkResult(float *hostRef, float *gpuRef, const int N) {
double epsilon = 1.0E-8;
bool match = true;
for (int i = 0; i < N; ++i) {
if (abs(hostRef[i] - gpuRef[i]) > epsilon) {
match = false;
printf("Arrays do not match!\n");
printf("host %5.2f gpu %5.2f at current %d\n", hostRef[i], gpuRef[i], i);
break;
}
}
if (match == true) {
printf("Arrays match.\n\n");
}
}
void initialData(float *data, int size) {
// Generate different seed for random number
time_t t;
srand((unsigned int)time(&t));
for (int i = 0; i < size; ++i) {
data[i] = (float)(rand() & 0xFF) / 10.0f;
}
return;
}
void sumArraysOnHost(float *A, float *B, float *C, const int N) {
for (int i = 0; i < N; ++i) {
C[i] = A[i] + B[i];
}
return;
}
__global__ void sumArraysOnGPU(float *A, float *B, float *C) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
C[i] = A[i] + B[i];
return;
}
int main(int argc, char *argv[]) {
printf("%s Starting...\n", argv[0]);
// set up device
cudaSetDevice(0);
// set up data size of vectors
int nElem = 32;
printf("Vector size %d\n", nElem);
// malloc host memory
size_t nBytes = nElem * sizeof(float);
float *h_A, *h_B, *hostRef, *gpuRef;
h_A = (float *)malloc(nBytes);
h_B = (float *)malloc(nBytes);
hostRef = (float *)malloc(nBytes);
gpuRef = (float *)malloc(nBytes);
// initialize data at host side
initialData(h_A, nElem);
initialData(h_B, nElem);
memset(hostRef, 0, nBytes);
memset(gpuRef, 0, nBytes);
// malloc device global memory
float *d_A, *d_B, *d_C;
cudaMalloc(&d_A, nBytes);
cudaMalloc(&d_B, nBytes);
cudaMalloc(&d_C, nBytes);
// transfer data from host to device
cudaMemcpy(d_A, h_A, nBytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, nBytes, cudaMemcpyHostToDevice);
// invoke kernel at host side
dim3 block(1);
dim3 grid((nElem + block.x - 1) / block.x);
sumArraysOnGPU<<<grid, block>>>(d_A, d_B, d_C);
printf("Execution configuration <<<%d, %d>>>\n", grid.x, block.x);
// copy kernel result back to host side
cudaMemcpy(gpuRef, d_C, nBytes, cudaMemcpyDeviceToHost);
// add vector at host side for result checks
sumArraysOnHost(h_A, h_B, hostRef, nElem);
// check device results
checkResult(hostRef, gpuRef, nElem);
// free device global memory
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
// free host memory
free(h_A);
free(h_B);
free(hostRef);
free(gpuRef);
return 0;
}
|
2f5365b5347cc10ff439292a1facba2b8c701cc4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Highly Optimized Object-oriented Many-particle Dynamics -- Blue Edition
(HOOMD-blue) Open Source Software License Copyright 2009-2016 The Regents of
the University of Michigan All rights reserved.
HOOMD-blue may contain modifications ("Contributions") provided, and to which
copyright is held, by various Contributors who have granted The Regents of the
University of Michigan the right to modify and/or distribute such Contributions.
You may redistribute, use, and create derivate works of HOOMD-blue, in source
and binary forms, provided you abide by the following conditions:
* Redistributions of source code must retain the above copyright notice, this
list of conditions, and the following disclaimer both in the code and
prominently in any materials provided with the distribution.
* Redistributions in binary form must reproduce the above copyright notice, this
list of conditions, and the following disclaimer in the documentation and/or
other materials provided with the distribution.
* All publications and presentations based on HOOMD-blue, including any reports
or published results obtained, in whole or in part, with HOOMD-blue, will
acknowledge its use according to the terms posted at the time of submission on:
http://codeblue.umich.edu/hoomd-blue/citations.html
* Any electronic documents citing HOOMD-Blue will link to the HOOMD-Blue website:
http://codeblue.umich.edu/hoomd-blue/
* Apart from the above required attributions, neither the name of the copyright
holder nor the names of HOOMD-blue's contributors may be used to endorse or
promote products derived from this software without specific prior written
permission.
Disclaimer
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS ``AS IS'' AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND/OR ANY
WARRANTIES THAT THIS SOFTWARE IS FREE OF INFRINGEMENT ARE DISCLAIMED.
IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// Maintainer: sbarr
#include "PPPMForceGPU.cuh"
#include "TextureTools.h"
#include <iostream>
using namespace std;
#include <assert.h>
#define MIN(a,b) ((a) < (b) ? (a) : (b))
#ifdef __DEVICE_EMULATION__
#define EMUSYNC __syncthreads()
#else
#define EMUSYNC
#endif
// There are several functions here that are dependent on precision:
// __scalar2int_rd is __float2int_rd in single, __double2int_rd in double
// CUFFTCOMPLEX is hipfftComplex in single, hipfftDoubleComplex in double
// CUFFTEXEC is hipfftExecC2C in single, hipfftExecZ2Z in double
#ifdef SINGLE_PRECISION
#define __scalar2int_rd __float2int_rd
#define CUFFTEXEC hipfftExecC2C
#else
#define __scalar2int_rd __double2int_rd
#define CUFFTEXEC hipfftExecZ2Z
#endif
#define MAX_BLOCK_DIM_SIZE 65535
// Constant memory for gridpoint weighting
#define CONSTANT_SIZE 2048
//! The developer has chosen not to document this variable
__device__ __constant__ Scalar GPU_rho_coeff[CONSTANT_SIZE];
/*! \file PPPMForceGPU.cu
\brief Defines GPU kernel code for calculating the Fourier space forces for the Coulomb interaction. Used by PPPMForceComputeGPU.
*/
//! Texture for reading particle positions
scalar4_tex_t pdata_pos_tex;
//! Texture for reading charge parameters
scalar_tex_t pdata_charge_tex;
//! GPU implementation of sinc(x)==sin(x)/x
__device__ Scalar gpu_sinc(Scalar x)
{
Scalar sinc = 0;
//! Coefficients of a power expansion of sin(x)/x
const Scalar sinc_coeff[] = {Scalar(1.0), Scalar(-1.0/6.0), Scalar(1.0/120.0),
Scalar(-1.0/5040.0),Scalar(1.0/362880.0),
Scalar(-1.0/39916800.0)};
if (x*x <= Scalar(1.0))
{
Scalar term = Scalar(1.0);
for (unsigned int i = 0; i < 6; ++i)
{
sinc += sinc_coeff[i] * term;
term *= x*x;
}
}
else
{
sinc = fast::sin(x)/x;
}
return sinc;
}
#ifndef SINGLE_PRECISION
//! atomicAdd function for double-precision floating point numbers
/*! This function is only used when hoomd is compiled for double precision on the GPU.
\param address Address to write the double to
\param val Value to add to address
*/
static __device__ inline double atomicAdd(double* address, double val)
{
unsigned long long int* address_as_ull = (unsigned long long int*)address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull,
assumed,
__double_as_longlong(val + __longlong_as_double(assumed)));
} while (assumed != old);
return __longlong_as_double(old);
}
#endif
//! The developer has chosen not to document this function
__device__ inline void AddToGridpoint(int X, int Y, int Z, CUFFTCOMPLEX* array, Scalar value, int Ny, int Nz)
{
atomicAdd(&array[Z + Nz * (Y + Ny * X)].x, value);
}
//! The developer has chosen not to document this function
extern "C" __global__
void assign_charges_to_grid_kernel(const unsigned int N,
const Scalar4 *d_pos,
const Scalar *d_charge,
BoxDim box,
CUFFTCOMPLEX *rho_real_space,
int Nx,
int Ny,
int Nz,
int order,
unsigned int *d_group_members,
unsigned int group_size)
{
int group_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (group_idx < group_size)
{
unsigned int idx = d_group_members[group_idx];
//get particle information
Scalar qi = texFetchScalar(d_charge, pdata_charge_tex, idx);
if(fabs(qi) > Scalar(0.0)) {
Scalar4 postypei = texFetchScalar4(d_pos, pdata_pos_tex, idx);
Scalar3 posi = make_scalar3(postypei.x,postypei.y,postypei.z);
//calculate dx, dy, dz for the charge density grid:
Scalar V_cell = box.getVolume()/(Scalar)(Nx*Ny*Nz);
//normalize position to gridsize:
Scalar3 pos_frac = box.makeFraction(posi);
pos_frac.x *= (Scalar)Nx;
pos_frac.y *= (Scalar)Ny;
pos_frac.z *= (Scalar)Nz;
Scalar shift, shiftone, x0, y0, z0, dx, dy, dz;
int nlower, nupper, mx, my, mz, nxi, nyi, nzi;
nlower = -(order-1)/2;
nupper = order/2;
if (order % 2)
{
shift =Scalar(0.5);
shiftone = Scalar(0.0);
}
else
{
shift = Scalar(0.0);
shiftone = Scalar(0.5);
}
nxi = __scalar2int_rd(pos_frac.x + shift);
nyi = __scalar2int_rd(pos_frac.y + shift);
nzi = __scalar2int_rd(pos_frac.z + shift);
if (nxi < 0 || nxi >= Nx || nyi < 0 || nyi >= Ny || nzi < 0 || nzi >= Nz)
{
// ignore
return;
}
dx = shiftone+(Scalar)nxi-pos_frac.x;
dy = shiftone+(Scalar)nyi-pos_frac.y;
dz = shiftone+(Scalar)nzi-pos_frac.z;
int n,m,l,k;
Scalar result;
int mult_fact = 2*order+1;
x0 = qi / V_cell;
for (n = nlower; n <= nupper; n++) {
mx = n+nxi;
if(mx >= Nx) mx -= Nx;
if(mx < 0) mx += Nx;
result = Scalar(0.0);
for (k = order-1; k >= 0; k--) {
result = GPU_rho_coeff[n-nlower + k*mult_fact] + result * dx;
}
y0 = x0*result;
for (m = nlower; m <= nupper; m++) {
my = m+nyi;
if(my >= Ny) my -= Ny;
if(my < 0) my += Ny;
result = Scalar(0.0);
for (k = order-1; k >= 0; k--) {
result = GPU_rho_coeff[m-nlower + k*mult_fact] + result * dy;
}
z0 = y0*result;
for (l = nlower; l <= nupper; l++) {
mz = l+nzi;
if(mz >= Nz) mz -= Nz;
if(mz < 0) mz += Nz;
result = Scalar(0.0);
for (k = order-1; k >= 0; k--) {
result = GPU_rho_coeff[l-nlower + k*mult_fact] + result * dz;
}
AddToGridpoint(mx, my, mz, rho_real_space, z0*result, Ny, Nz);
}
}
}
}
}
}
//! The developer has chosen not to document this function
extern "C" __global__
void combined_green_e_kernel(CUFFTCOMPLEX* E_x,
CUFFTCOMPLEX* E_y,
CUFFTCOMPLEX* E_z,
Scalar3* k_vec,
CUFFTCOMPLEX* rho,
int Nx,
int Ny,
int Nz,
Scalar* green_function)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx < Nx * Ny * Nz)
{
Scalar3 k_vec_local = k_vec[idx];
CUFFTCOMPLEX E_x_local, E_y_local, E_z_local;
Scalar scale_times_green = green_function[idx] / ((Scalar)(Nx*Ny*Nz));
CUFFTCOMPLEX rho_local = rho[idx];
rho_local.x *= scale_times_green;
rho_local.y *= scale_times_green;
E_x_local.x = k_vec_local.x * rho_local.y;
E_x_local.y = -k_vec_local.x * rho_local.x;
E_y_local.x = k_vec_local.y * rho_local.y;
E_y_local.y = -k_vec_local.y * rho_local.x;
E_z_local.x = k_vec_local.z * rho_local.y;
E_z_local.y = -k_vec_local.z * rho_local.x;
E_x[idx] = E_x_local;
E_y[idx] = E_y_local;
E_z[idx] = E_z_local;
}
}
//! The developer has chosen not to document this function
__global__ void set_gpu_field_kernel(CUFFTCOMPLEX* E_x,
CUFFTCOMPLEX* E_y,
CUFFTCOMPLEX* E_z,
Scalar3* Electric_field,
int Nx,
int Ny,
int Nz)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if(tid < Nx * Ny * Nz)
{
Scalar3 local_field;
local_field.x = E_x[tid].x;
local_field.y = E_y[tid].x;
local_field.z = E_z[tid].x;
Electric_field[tid] = local_field;
}
}
//! The developer has chosen not to document this function
__global__
void zero_forces(Scalar4 *d_force, Scalar *d_virial, const unsigned int virial_pitch, const unsigned int N)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < N)
{
d_force[idx] = make_scalar4(Scalar(0.0), Scalar(0.0), Scalar(0.0), Scalar(0.0));
for (unsigned int i = 0; i < 6; i++)
d_virial[i*virial_pitch+idx] = Scalar(0.0);
}
}
//! The developer has chosen not to document this function
extern "C" __global__
void calculate_forces_kernel(Scalar4 *d_force,
const unsigned int N,
const Scalar4 *d_pos,
const Scalar *d_charge,
BoxDim box,
Scalar3 *E_field,
int Nx,
int Ny,
int Nz,
int order,
unsigned int *d_group_members,
unsigned int group_size)
{
int group_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (group_idx < group_size)
{
unsigned int idx = d_group_members[group_idx];
//get particle information
Scalar qi = texFetchScalar(d_charge, pdata_charge_tex, idx);
if(fabs(qi) > Scalar(0.0)) {
Scalar4 posi = texFetchScalar4(d_pos, pdata_pos_tex, idx);
//calculate dx, dy, dz for the charge density grid:
Scalar V_cell = box.getVolume()/(Scalar)(Nx*Ny*Nz);
//normalize position to gridsize:
Scalar3 pos_frac = box.makeFraction(make_scalar3(posi.x, posi.y, posi.z));
pos_frac.x *= (Scalar)Nx;
pos_frac.y *= (Scalar)Ny;
pos_frac.z *= (Scalar)Nz;
Scalar shift, shiftone, x0, y0, z0, dx, dy, dz;
int nlower, nupper, mx, my, mz, nxi, nyi, nzi;
nlower = -(order-1)/2;
nupper = order/2;
Scalar4 local_force = make_scalar4(Scalar(0.0), Scalar(0.0), Scalar(0.0), Scalar(0.0));
if(order % 2)
{
shift =Scalar(0.5);
shiftone = Scalar(0.0);
}
else
{
shift = Scalar(0.0);
shiftone = Scalar(0.5);
}
nxi = __scalar2int_rd(pos_frac.x + shift);
nyi = __scalar2int_rd(pos_frac.y + shift);
nzi = __scalar2int_rd(pos_frac.z + shift);
if (nxi < 0 || nxi >= Nx || nyi < 0 || nyi >= Ny || nzi < 0 || nzi >= Nz)
{
// ignore
return;
}
dx = shiftone+(Scalar)nxi-pos_frac.x;
dy = shiftone+(Scalar)nyi-pos_frac.y;
dz = shiftone+(Scalar)nzi-pos_frac.z;
int n,m,l,k;
Scalar result;
int mult_fact = 2*order+1;
for (n = nlower; n <= nupper; n++) {
mx = n+nxi;
if(mx >= Nx) mx -= Nx;
if(mx < 0) mx += Nx;
result = Scalar(0.0);
for (k = order-1; k >= 0; k--) {
result = GPU_rho_coeff[n-nlower + k*mult_fact] + result * dx;
}
x0 = result;
for (m = nlower; m <= nupper; m++) {
my = m+nyi;
if(my >= Ny) my -= Ny;
if(my < 0) my += Ny;
result = Scalar(0.0);
for (k = order-1; k >= 0; k--) {
result = GPU_rho_coeff[m-nlower + k*mult_fact] + result * dy;
}
y0 = x0*result;
for (l = nlower; l <= nupper; l++) {
mz = l+nzi;
if(mz >= Nz) mz -= Nz;
if(mz < 0) mz += Nz;
result = Scalar(0.0);
for (k = order-1; k >= 0; k--) {
result = GPU_rho_coeff[l-nlower + k*mult_fact] + result * dz;
}
z0 = y0*result;
Scalar local_field_x = E_field[mz + Nz * (my + Ny * mx)].x;
Scalar local_field_y = E_field[mz + Nz * (my + Ny * mx)].y;
Scalar local_field_z = E_field[mz + Nz * (my + Ny * mx)].z;
local_force.x += qi*z0*local_field_x;
local_force.y += qi*z0*local_field_y;
local_force.z += qi*z0*local_field_z;
}
}
}
d_force[idx] = local_force;
}
}
}
hipError_t gpu_compute_pppm_forces(Scalar4 *d_force,
const unsigned int N,
const Scalar4 *d_pos,
const Scalar *d_charge,
const BoxDim& box,
int Nx,
int Ny,
int Nz,
int order,
Scalar *CPU_rho_coeff,
CUFFTCOMPLEX *GPU_rho_real_space,
hipfftHandle plan,
CUFFTCOMPLEX *GPU_E_x,
CUFFTCOMPLEX *GPU_E_y,
CUFFTCOMPLEX *GPU_E_z,
Scalar3 *GPU_k_vec,
Scalar *GPU_green_hat,
Scalar3 *E_field,
unsigned int *d_group_members,
unsigned int group_size,
int block_size,
const unsigned int compute_capability)
{
hipMemcpyToSymbol(GPU_rho_coeff, &(CPU_rho_coeff[0]), order * (2*order+1) * sizeof(Scalar));
// setup the grid to run the kernel with one thread per particle in the group
dim3 grid( (int)ceil((double)group_size / (double)block_size), 1, 1);
dim3 threads(block_size, 1, 1);
// setup the grid to run the kernel with one thread per particle in the group
dim3 P_grid( (int)ceil((double)group_size / (double)block_size), 1, 1);
dim3 P_threads(block_size, 1, 1);
// setup the grid to run the kernel with one thread per grid point
dim3 N_grid( (int)ceil((double)Nx*Ny*Nz / (double)block_size), 1, 1);
dim3 N_threads(block_size, 1, 1);
// bind the textures on pre sm35 arches
if (compute_capability < 350)
{
hipError_t error = hipBindTexture(0, pdata_pos_tex, d_pos, sizeof(Scalar4)*N);
if (error != hipSuccess)
return error;
error = hipBindTexture(0, pdata_charge_tex, d_charge, sizeof(Scalar) * N);
if (error != hipSuccess)
return error;
}
// set the grid charge to zero
hipMemset(GPU_rho_real_space, 0, sizeof(CUFFTCOMPLEX)*Nx*Ny*Nz);
// run the kernels
// assign charges to the grid points, one thread per particles
hipLaunchKernelGGL(( assign_charges_to_grid_kernel) , dim3(P_grid), dim3(P_threads) , 0, 0, N,
d_pos,
d_charge,
box,
GPU_rho_real_space,
Nx,
Ny,
Nz,
order,
d_group_members,
group_size);
hipDeviceSynchronize();
// FFT
CUFFTEXEC(plan, GPU_rho_real_space, GPU_rho_real_space, HIPFFT_FORWARD);
hipDeviceSynchronize();
// multiply Green's function to get E field, one thread per grid point
hipLaunchKernelGGL(( combined_green_e_kernel) , dim3(N_grid), dim3(N_threads) , 0, 0, GPU_E_x,
GPU_E_y,
GPU_E_z,
GPU_k_vec,
GPU_rho_real_space,
Nx,
Ny,
Nz,
GPU_green_hat);
hipDeviceSynchronize();
// FFT
CUFFTEXEC(plan, GPU_E_x, GPU_E_x, HIPFFT_BACKWARD);
CUFFTEXEC(plan, GPU_E_y, GPU_E_y, HIPFFT_BACKWARD);
CUFFTEXEC(plan, GPU_E_z, GPU_E_z, HIPFFT_BACKWARD);
hipDeviceSynchronize();
hipLaunchKernelGGL(( set_gpu_field_kernel) , dim3(N_grid), dim3(N_threads) , 0, 0, GPU_E_x, GPU_E_y, GPU_E_z, E_field, Nx, Ny, Nz);
hipDeviceSynchronize();
//calculate forces on particles, one thread per particles
hipLaunchKernelGGL(( calculate_forces_kernel) , dim3(P_grid), dim3(P_threads) , 0, 0, d_force,
N,
d_pos,
d_charge,
box,
E_field,
Nx,
Ny,
Nz,
order,
d_group_members,
group_size);
return hipSuccess;
}
//! The developer has chosen not to document this function
__global__ void calculate_thermo_quantities_kernel(CUFFTCOMPLEX* rho,
Scalar* green_function,
Scalar* energy_sum,
Scalar* v_xx,
Scalar* v_xy,
Scalar* v_xz,
Scalar* v_yy,
Scalar* v_yz,
Scalar* v_zz,
Scalar* vg,
int Nx,
int Ny,
int Nz)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx < Nx * Ny * Nz)
{
Scalar energy = green_function[idx]*(rho[idx].x*rho[idx].x + rho[idx].y*rho[idx].y);
v_xx[idx] = energy*vg[ 6*idx];
v_xy[idx] = energy*vg[1+6*idx];
v_xz[idx] = energy*vg[2+6*idx];
v_yy[idx] = energy*vg[3+6*idx];
v_yz[idx] = energy*vg[4+6*idx];
v_zz[idx] = energy*vg[5+6*idx];
energy_sum[idx] = energy;
}
}
//! The developer has chosen not to document this function
bool isPow2(unsigned int x)
{
return ((x&(x-1))==0);
}
//! The developer has chosen not to document this function
unsigned int nextPow2( unsigned int x ) {
--x;
x |= x >> 1;
x |= x >> 2;
x |= x >> 4;
x |= x >> 8;
x |= x >> 16;
return ++x;
}
template<class T>
struct SharedMemory //!< Used to speed up the sum over grid points, in this case "T" is a placeholder for the data type
{
//! used to get shared memory for data type T*
__device__ inline operator T*()
{
extern __shared__ T __smem[];
return (T*)__smem;
}
//! used to get shared memory for data type T
__device__ inline operator const T() const
{
extern __shared__ T __smem[];
return (T*)__smem;
}
};
//! The developer has chosen not to document this function
template <class T, unsigned int blockSize, bool nIsPow2>
__global__ void
reduce6(T *g_idata, T *g_odata, unsigned int n)
{
T *sdata = SharedMemory<T>();
// perform first level of reduction,
// reading from global memory, writing to shared memory
unsigned int idx = threadIdx.x;
unsigned int i = blockIdx.x*blockSize*2 + threadIdx.x;
unsigned int gridSize = blockSize*2*gridDim.x;
T mySum;
mySum = Scalar(0.0);
// we reduce multiple elements per thread. The number is determined by the
// number of active thread blocks (via gridDim). More blocks will result
// in a larger gridSize and therefore fewer elements per thread
while (i < n)
{
mySum += g_idata[i];
// ensure we don't read out of bounds -- this is optimized away for powerOf2 sized arrays
if (nIsPow2 || i + blockSize < n) {
mySum += g_idata[i+blockSize];
}
i += gridSize;
}
// each thread puts its local sum into shared memory
sdata[idx] = mySum;
__syncthreads();
// do reduction in shared mem
if (blockSize >= 512) { if (idx < 256) { sdata[idx] = mySum = mySum + sdata[idx + 256]; } __syncthreads(); }
if (blockSize >= 256) { if (idx < 128) { sdata[idx] = mySum = mySum + sdata[idx + 128]; } __syncthreads(); }
if (blockSize >= 128) { if (idx < 64) { sdata[idx] = mySum = mySum + sdata[idx + 64]; } __syncthreads(); }
#ifndef __DEVICE_EMULATION__
if (idx < 32)
#endif
{
// now that we are using warp-synchronous programming (below)
// we need to declare our shared memory volatile so that the compiler
// doesn't reorder stores to it and induce incorrect behavior.
volatile T* smem = sdata;
if (blockSize >= 64) { smem[idx] = mySum = mySum + smem[idx + 32]; EMUSYNC; }
if (blockSize >= 32) { smem[idx] = mySum = mySum + smem[idx + 16]; EMUSYNC; }
if (blockSize >= 16) { smem[idx] = mySum = mySum + smem[idx + 8]; EMUSYNC; }
if (blockSize >= 8) { smem[idx] = mySum = mySum + smem[idx + 4]; EMUSYNC; }
if (blockSize >= 4) { smem[idx] = mySum = mySum + smem[idx + 2]; EMUSYNC; }
if (blockSize >= 2) { smem[idx] = mySum = mySum + smem[idx + 1]; EMUSYNC; }
}
// write result for this block to global mem
if (idx == 0) {
g_odata[blockIdx.x] = sdata[0];
}
}
//! The developer has chosen not to document this function
template <class T> void reduce(int size, int threads, int blocks, T *d_idata, T *d_odata)
{
dim3 dimBlock(threads, 1, 1);
dim3 dimGrid(blocks, 1, 1);
// when there is only one warp per block, we need to allocate two warps
// worth of shared memory so that we don't index shared memory out of bounds
int smemSize = (threads <= 32) ? 2 * threads * sizeof(T) : threads * sizeof(T);
if (isPow2(size))
{
switch (threads)
{
case 512:
hipLaunchKernelGGL(( reduce6<T, 512, true>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break;
case 256:
hipLaunchKernelGGL(( reduce6<T, 256, true>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break;
case 128:
hipLaunchKernelGGL(( reduce6<T, 128, true>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break;
case 64:
hipLaunchKernelGGL(( reduce6<T, 64, true>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break;
case 32:
hipLaunchKernelGGL(( reduce6<T, 32, true>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break;
case 16:
hipLaunchKernelGGL(( reduce6<T, 16, true>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break;
case 8:
hipLaunchKernelGGL(( reduce6<T, 8, true>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break;
case 4:
hipLaunchKernelGGL(( reduce6<T, 4, true>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break;
case 2:
hipLaunchKernelGGL(( reduce6<T, 2, true>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break;
case 1:
hipLaunchKernelGGL(( reduce6<T, 1, true>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break;
}
}
else
{
switch (threads)
{
case 512:
hipLaunchKernelGGL(( reduce6<T, 512, false>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break;
case 256:
hipLaunchKernelGGL(( reduce6<T, 256, false>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break;
case 128:
hipLaunchKernelGGL(( reduce6<T, 128, false>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break;
case 64:
hipLaunchKernelGGL(( reduce6<T, 64, false>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break;
case 32:
hipLaunchKernelGGL(( reduce6<T, 32, false>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break;
case 16:
hipLaunchKernelGGL(( reduce6<T, 16, false>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break;
case 8:
hipLaunchKernelGGL(( reduce6<T, 8, false>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break;
case 4:
hipLaunchKernelGGL(( reduce6<T, 4, false>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break;
case 2:
hipLaunchKernelGGL(( reduce6<T, 2, false>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break;
case 1:
hipLaunchKernelGGL(( reduce6<T, 1, false>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break;
}
}
}
//! The developer has chosen not to document this function
void gpu_compute_pppm_thermo(int Nx,
int Ny,
int Nz,
CUFFTCOMPLEX *GPU_rho_real_space,
Scalar *GPU_vg,
Scalar *GPU_green_hat,
Scalar *o_data,
Scalar *energy_sum,
Scalar *v_xx,
Scalar *v_xy,
Scalar *v_xz,
Scalar *v_yy,
Scalar *v_yz,
Scalar *v_zz,
Scalar *pppm_virial_energy,
int block_size)
{
// setup the grid to run the kernel with one thread per grid point
dim3 N_grid( (int)ceil((double)Nx*Ny*Nz / (double)block_size), 1, 1);
dim3 N_threads(block_size, 1, 1);
hipLaunchKernelGGL(( calculate_thermo_quantities_kernel) , dim3(N_grid), dim3(N_threads) , 0, 0, GPU_rho_real_space,
GPU_green_hat,
energy_sum,
v_xx,
v_xy,
v_xz,
v_yy,
v_yz,
v_zz,
GPU_vg,
Nx,
Ny,
Nz);
hipDeviceSynchronize();
int n = Nx*Ny*Nz;
hipMemset(o_data, 0, sizeof(Scalar)*Nx*Ny*Nz);
pppm_virial_energy[0] = Scalar_reduce(energy_sum, o_data, n);
hipMemset(o_data, 0, sizeof(Scalar)*Nx*Ny*Nz);
pppm_virial_energy[1] = Scalar_reduce(v_xx, o_data, n);
hipMemset(o_data, 0, sizeof(Scalar)*Nx*Ny*Nz);
pppm_virial_energy[2] = Scalar_reduce(v_xy, o_data, n);
hipMemset(o_data, 0, sizeof(Scalar)*Nx*Ny*Nz);
pppm_virial_energy[3] = Scalar_reduce(v_xz, o_data, n);
hipMemset(o_data, 0, sizeof(Scalar)*Nx*Ny*Nz);
pppm_virial_energy[4] = Scalar_reduce(v_yy, o_data, n);
hipMemset(o_data, 0, sizeof(Scalar)*Nx*Ny*Nz);
pppm_virial_energy[5] = Scalar_reduce(v_yz, o_data, n);
hipMemset(o_data, 0, sizeof(Scalar)*Nx*Ny*Nz);
pppm_virial_energy[6] = Scalar_reduce(v_zz, o_data, n);
}
//! The developer has chosen not to document this function
Scalar Scalar_reduce(Scalar* i_data, Scalar* o_data, int n) {
Scalar gpu_result = 0.0;
int threads, blocks, maxBlocks = 64, maxThreads = 256, cpuFinalThreshold = 1;
bool needReadBack = true;
threads = (n < maxThreads*2) ? nextPow2((n + 1)/ 2) : maxThreads;
blocks = (n + (threads * 2 - 1)) / (threads * 2);
blocks = MIN(maxBlocks, blocks);
if (blocks == 1) cpuFinalThreshold = 1;
int maxNumBlocks = MIN( n / maxThreads, MAX_BLOCK_DIM_SIZE);
reduce<Scalar>(n, threads, blocks, i_data, o_data);
int s=blocks;
while(s > cpuFinalThreshold)
{
threads = 0;
blocks = 0;
threads = (s < maxThreads*2) ? nextPow2((s + 1)/ 2) : maxThreads;
blocks = (s + (threads * 2 - 1)) / (threads * 2);
blocks = MIN(maxBlocks, blocks);
reduce<Scalar>(s, threads, blocks, o_data, o_data);
hipDeviceSynchronize();
s = (s + (threads*2-1)) / (threads*2);
}
if (s > 1)
{
Scalar* h_odata = (Scalar *) malloc(maxNumBlocks*sizeof(Scalar));
hipMemcpy( h_odata, o_data, s * sizeof(Scalar), hipMemcpyDeviceToHost);
for(int i=0; i < s; i++)
{
gpu_result += h_odata[i];
}
needReadBack = false;
free(h_odata);
}
if (needReadBack) hipMemcpy( &gpu_result, o_data, sizeof(Scalar), hipMemcpyDeviceToHost);
return gpu_result;
}
//! The developer has chosen not to document this function
__global__ void reset_kvec_green_hat_kernel(BoxDim box,
Scalar3 b1,
Scalar3 b2,
Scalar3 b3,
int Nx,
int Ny,
int Nz,
int order,
Scalar kappa,
Scalar3* kvec_array,
Scalar* green_hat,
Scalar* vg,
int nbx,
int nby,
int nbz,
Scalar* gf_b)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if( idx < Nx*Ny*Nz) {
int N2 = Ny*Nz;
int xn = idx/N2;
int yn = (idx - xn*N2)/Nz;
int zn = (idx - xn*N2 - yn*Nz);
Scalar3 j;
Scalar kappa2 = kappa*kappa;
j.x = xn > Nx/2 ? (Scalar)(xn - Nx) : (Scalar)xn;
j.y = yn > Ny/2 ? (Scalar)(yn - Ny) : (Scalar)yn;
j.z = zn > Nz/2 ? (Scalar)(zn - Nz) : (Scalar)zn;
Scalar3 k = j.x * b1 + j.y * b2 + j.z * b3;
kvec_array[idx] = k;
Scalar sqk = dot(k,k);
// omit DC term
if(idx == 0) {
vg[0+6*idx] = 0.0f;
vg[1+6*idx] = 0.0f;
vg[2+6*idx] = 0.0f;
vg[3+6*idx] = 0.0f;
vg[4+6*idx] = 0.0f;
vg[5+6*idx] = 0.0f;
}
else {
Scalar vterm = (-Scalar(2.0)/sqk - Scalar(0.5)/kappa2);
vg[0+6*idx] = Scalar(1.0)+vterm*kvec_array[idx].x*kvec_array[idx].x;
vg[1+6*idx] = vterm*kvec_array[idx].x*kvec_array[idx].y;
vg[2+6*idx] = vterm*kvec_array[idx].x*kvec_array[idx].z;
vg[3+6*idx] = Scalar(1.0)+vterm*kvec_array[idx].y*kvec_array[idx].y;
vg[4+6*idx] = vterm*kvec_array[idx].y*kvec_array[idx].z;
vg[5+6*idx] = Scalar(1.0)+vterm*kvec_array[idx].z*kvec_array[idx].z;
}
Scalar3 kH = Scalar(2.0*M_PI)*make_scalar3(Scalar(1.0)/(Scalar)Nx,
Scalar(1.0)/(Scalar)Ny,
Scalar(1.0)/(Scalar)Nz);
int ix, iy, iz;
Scalar snx, sny, snz, snx2, sny2, snz2;
Scalar argx, argy, argz, wx, wy, wz, sx, sy, sz, qx, qy, qz;
Scalar sum1, dot1, dot2;
Scalar numerator, denominator;
snz = fast::sin(Scalar(0.5)*j.z*kH.z);
snz2 = snz*snz;
sny = fast::sin(Scalar(0.5)*j.y*kH.y);
sny2 = sny*sny;
snx = fast::sin(Scalar(0.5)*j.x*kH.x);
snx2 = snx*snx;
int l;
sz = sy = sx = Scalar(0.0);
for (l = order-1; l >= 0; l--) {
sx = gf_b[l] + sx*snx2;
sy = gf_b[l] + sy*sny2;
sz = gf_b[l] + sz*snz2;
}
denominator = sx*sy*sz;
denominator *= denominator;
Scalar3 kn, kn1, kn2, kn3;
Scalar arg_gauss, gauss;
Scalar W;
if (sqk != 0.0f) {
numerator = Scalar(12.5663706)/sqk;
sum1 = 0;
for (ix = -nbx; ix <= nbx; ix++) {
qx = (j.x+(Scalar)(Nx*ix));
kn1 = b1 * qx;
argx = Scalar(0.5)*qx*kH.x;
Scalar wxs = gpu_sinc(argx);
wx = Scalar(1.0);
for (int iorder = 0; iorder < order; ++iorder)
wx *= wxs;
for (iy = -nby; iy <= nby; iy++) {
qy = (j.y+(Scalar)(Ny*iy));
kn2 = b2 * qy;
argy = Scalar(0.5)*qy*kH.y;
Scalar wys = gpu_sinc(argy);
wy = Scalar(1.0);
for (int iorder = 0; iorder < order; ++iorder)
wy *= wys;
for (iz = -nbz; iz <= nbz; iz++) {
qz = (j.z+(Scalar)(Nz*iz));
kn3 = b3 * qz;
kn = kn1+kn2+kn3;
argz = Scalar(0.5)*qz*kH.z;
Scalar wzs = gpu_sinc(argz);
wz = Scalar(1.0);
for (int iorder = 0; iorder < order; ++iorder)
wz *= wzs;
dot1 = dot(kn,k);
dot2 = dot(kn,kn);
arg_gauss = Scalar(0.25)*dot2/kappa2;
gauss = fast::exp(-arg_gauss);
W = wx*wy*wz;
sum1 += (dot1/dot2) * gauss * W*W;
}
}
}
green_hat[idx] = numerator*sum1/denominator;
} else green_hat[idx] = Scalar(0.0);
}
}
//! The developer has chosen not to document this function
hipError_t reset_kvec_green_hat(const BoxDim& box,
int Nx,
int Ny,
int Nz,
int nbx,
int nby,
int nbz,
int order,
Scalar kappa,
Scalar3 *kvec,
Scalar *green_hat,
Scalar *vg,
Scalar *gf_b,
int block_size)
{
// compute reciprocal lattice vectors
Scalar3 a1 = box.getLatticeVector(0);
Scalar3 a2 = box.getLatticeVector(1);
Scalar3 a3 = box.getLatticeVector(2);
Scalar V_box = box.getVolume();
Scalar3 b1 = Scalar(2.0*M_PI)*make_scalar3(a2.y*a3.z-a2.z*a3.y, a2.z*a3.x-a2.x*a3.z, a2.x*a3.y-a2.y*a3.x)/V_box;
Scalar3 b2 = Scalar(2.0*M_PI)*make_scalar3(a3.y*a1.z-a3.z*a1.y, a3.z*a1.x-a3.x*a1.z, a3.x*a1.y-a3.y*a1.x)/V_box;
Scalar3 b3 = Scalar(2.0*M_PI)*make_scalar3(a1.y*a2.z-a1.z*a2.y, a1.z*a2.x-a1.x*a2.z, a1.x*a2.y-a1.y*a2.x)/V_box;
dim3 grid( (int)ceil((double)Nx*Ny*Nz / (double)block_size), 1, 1);
dim3 threads(block_size, 1, 1);
hipLaunchKernelGGL(( reset_kvec_green_hat_kernel) , dim3(grid), dim3(threads) , 0, 0, box, b1, b2, b3, Nx, Ny, Nz, order, kappa, kvec, green_hat, vg, nbx, nby, nbz, gf_b);
return hipSuccess;
}
//! The developer has chosen not to document this function
__global__ void gpu_fix_exclusions_kernel(Scalar4 *d_force,
Scalar *d_virial,
const unsigned int virial_pitch,
const unsigned int N,
const Scalar4 *d_pos,
const Scalar *d_charge,
const BoxDim box,
const unsigned int *d_n_neigh,
const unsigned int *d_nlist,
const Index2D nli,
Scalar kappa,
unsigned int *d_group_members,
unsigned int group_size)
{
// start by identifying which particle we are to handle
int group_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (group_idx < group_size)
{
unsigned int idx = d_group_members[group_idx];
const Scalar sqrtpi = sqrtf(M_PI);
unsigned int n_neigh = d_n_neigh[idx];
Scalar4 postypei = texFetchScalar4(d_pos, pdata_pos_tex, idx);
Scalar3 posi = make_scalar3(postypei.x, postypei.y, postypei.z);
Scalar qi = texFetchScalar(d_charge, pdata_charge_tex, idx);
// initialize the force to 0
Scalar4 force = make_scalar4(Scalar(0.0), Scalar(0.0), Scalar(0.0), Scalar(0.0));
Scalar virial[6];
for (unsigned int i = 0; i < 6; i++)
virial[i] = Scalar(0.0);
unsigned int cur_j = 0;
// prefetch neighbor index
unsigned int next_j = d_nlist[nli(idx, 0)];
for (int neigh_idx = 0; neigh_idx < n_neigh; neigh_idx++)
{
{
// read the current neighbor index (MEM TRANSFER: 4 bytes)
// prefetch the next value and set the current one
cur_j = next_j;
if (neigh_idx+1 < n_neigh)
next_j = d_nlist[nli(idx, neigh_idx+1)];
// get the neighbor's position (MEM TRANSFER: 16 bytes)
Scalar4 postypej = texFetchScalar4(d_pos, pdata_pos_tex, cur_j);
Scalar3 posj = make_scalar3(postypej.x, postypej.y, postypej.z);
Scalar qj = texFetchScalar(d_charge, pdata_charge_tex, cur_j);
// calculate dr (with periodic boundary conditions) (FLOPS: 3)
Scalar3 dx = posi - posj;
// apply periodic boundary conditions: (FLOPS 12)
dx = box.minImage(dx);
// calculate r squard (FLOPS: 5)
Scalar rsq = dot(dx,dx);
Scalar r = sqrtf(rsq);
Scalar qiqj = qi * qj;
Scalar erffac = ::erf(kappa * r) / r;
Scalar force_divr = qiqj * (-Scalar(2.0) * exp(-rsq * kappa * kappa) * kappa / (sqrtpi * rsq) + erffac / rsq);
Scalar pair_eng = qiqj * erffac;
Scalar force_div2r = Scalar(0.5) * force_divr;
virial[0] += dx.x * dx.x * force_div2r;
virial[1] += dx.x * dx.y * force_div2r;
virial[2] += dx.x * dx.z * force_div2r;
virial[3] += dx.y * dx.y * force_div2r;
virial[4] += dx.y * dx.z * force_div2r;
virial[5] += dx.z * dx.z * force_div2r;
force.x += dx.x * force_divr;
force.y += dx.y * force_divr;
force.z += dx.z * force_divr;
force.w += pair_eng;
}
}
force.w *= Scalar(0.5);
d_force[idx].x -= force.x;
d_force[idx].y -= force.y;
d_force[idx].z -= force.z;
d_force[idx].w -= force.w;
for (unsigned int i = 0; i < 6; i++)
d_virial[i*virial_pitch+idx] = - virial[i];
}
}
//! The developer has chosen not to document this function
hipError_t fix_exclusions(Scalar4 *d_force,
Scalar *d_virial,
const unsigned int virial_pitch,
const unsigned int N,
const Scalar4 *d_pos,
const Scalar *d_charge,
const BoxDim& box,
const unsigned int *d_n_ex,
const unsigned int *d_exlist,
const Index2D nex,
Scalar kappa,
unsigned int *d_group_members,
unsigned int group_size,
int block_size,
const unsigned int compute_capability)
{
dim3 grid( (int)ceil((double)group_size / (double)block_size), 1, 1);
dim3 threads(block_size, 1, 1);
// bind the textures on pre sm35 arches
if (compute_capability < 350)
{
hipError_t error = hipBindTexture(0, pdata_pos_tex, d_pos, sizeof(Scalar4)*N);
if (error != hipSuccess)
return error;
error = hipBindTexture(0, pdata_charge_tex, d_charge, sizeof(Scalar) * N);
if (error != hipSuccess)
return error;
}
hipLaunchKernelGGL(( gpu_fix_exclusions_kernel) , dim3(grid), dim3(threads) , 0, 0, d_force,
d_virial,
virial_pitch,
N,
d_pos,
d_charge,
box,
d_n_ex,
d_exlist,
nex,
kappa,
d_group_members,
group_size);
return hipSuccess;
}
| 2f5365b5347cc10ff439292a1facba2b8c701cc4.cu | /*
Highly Optimized Object-oriented Many-particle Dynamics -- Blue Edition
(HOOMD-blue) Open Source Software License Copyright 2009-2016 The Regents of
the University of Michigan All rights reserved.
HOOMD-blue may contain modifications ("Contributions") provided, and to which
copyright is held, by various Contributors who have granted The Regents of the
University of Michigan the right to modify and/or distribute such Contributions.
You may redistribute, use, and create derivate works of HOOMD-blue, in source
and binary forms, provided you abide by the following conditions:
* Redistributions of source code must retain the above copyright notice, this
list of conditions, and the following disclaimer both in the code and
prominently in any materials provided with the distribution.
* Redistributions in binary form must reproduce the above copyright notice, this
list of conditions, and the following disclaimer in the documentation and/or
other materials provided with the distribution.
* All publications and presentations based on HOOMD-blue, including any reports
or published results obtained, in whole or in part, with HOOMD-blue, will
acknowledge its use according to the terms posted at the time of submission on:
http://codeblue.umich.edu/hoomd-blue/citations.html
* Any electronic documents citing HOOMD-Blue will link to the HOOMD-Blue website:
http://codeblue.umich.edu/hoomd-blue/
* Apart from the above required attributions, neither the name of the copyright
holder nor the names of HOOMD-blue's contributors may be used to endorse or
promote products derived from this software without specific prior written
permission.
Disclaimer
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS ``AS IS'' AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND/OR ANY
WARRANTIES THAT THIS SOFTWARE IS FREE OF INFRINGEMENT ARE DISCLAIMED.
IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// Maintainer: sbarr
#include "PPPMForceGPU.cuh"
#include "TextureTools.h"
#include <iostream>
using namespace std;
#include <assert.h>
#define MIN(a,b) ((a) < (b) ? (a) : (b))
#ifdef __DEVICE_EMULATION__
#define EMUSYNC __syncthreads()
#else
#define EMUSYNC
#endif
// There are several functions here that are dependent on precision:
// __scalar2int_rd is __float2int_rd in single, __double2int_rd in double
// CUFFTCOMPLEX is cufftComplex in single, cufftDoubleComplex in double
// CUFFTEXEC is cufftExecC2C in single, cufftExecZ2Z in double
#ifdef SINGLE_PRECISION
#define __scalar2int_rd __float2int_rd
#define CUFFTEXEC cufftExecC2C
#else
#define __scalar2int_rd __double2int_rd
#define CUFFTEXEC cufftExecZ2Z
#endif
#define MAX_BLOCK_DIM_SIZE 65535
// Constant memory for gridpoint weighting
#define CONSTANT_SIZE 2048
//! The developer has chosen not to document this variable
__device__ __constant__ Scalar GPU_rho_coeff[CONSTANT_SIZE];
/*! \file PPPMForceGPU.cu
\brief Defines GPU kernel code for calculating the Fourier space forces for the Coulomb interaction. Used by PPPMForceComputeGPU.
*/
//! Texture for reading particle positions
scalar4_tex_t pdata_pos_tex;
//! Texture for reading charge parameters
scalar_tex_t pdata_charge_tex;
//! GPU implementation of sinc(x)==sin(x)/x
__device__ Scalar gpu_sinc(Scalar x)
{
Scalar sinc = 0;
//! Coefficients of a power expansion of sin(x)/x
const Scalar sinc_coeff[] = {Scalar(1.0), Scalar(-1.0/6.0), Scalar(1.0/120.0),
Scalar(-1.0/5040.0),Scalar(1.0/362880.0),
Scalar(-1.0/39916800.0)};
if (x*x <= Scalar(1.0))
{
Scalar term = Scalar(1.0);
for (unsigned int i = 0; i < 6; ++i)
{
sinc += sinc_coeff[i] * term;
term *= x*x;
}
}
else
{
sinc = fast::sin(x)/x;
}
return sinc;
}
#ifndef SINGLE_PRECISION
//! atomicAdd function for double-precision floating point numbers
/*! This function is only used when hoomd is compiled for double precision on the GPU.
\param address Address to write the double to
\param val Value to add to address
*/
static __device__ inline double atomicAdd(double* address, double val)
{
unsigned long long int* address_as_ull = (unsigned long long int*)address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull,
assumed,
__double_as_longlong(val + __longlong_as_double(assumed)));
} while (assumed != old);
return __longlong_as_double(old);
}
#endif
//! The developer has chosen not to document this function
__device__ inline void AddToGridpoint(int X, int Y, int Z, CUFFTCOMPLEX* array, Scalar value, int Ny, int Nz)
{
atomicAdd(&array[Z + Nz * (Y + Ny * X)].x, value);
}
//! The developer has chosen not to document this function
extern "C" __global__
void assign_charges_to_grid_kernel(const unsigned int N,
const Scalar4 *d_pos,
const Scalar *d_charge,
BoxDim box,
CUFFTCOMPLEX *rho_real_space,
int Nx,
int Ny,
int Nz,
int order,
unsigned int *d_group_members,
unsigned int group_size)
{
int group_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (group_idx < group_size)
{
unsigned int idx = d_group_members[group_idx];
//get particle information
Scalar qi = texFetchScalar(d_charge, pdata_charge_tex, idx);
if(fabs(qi) > Scalar(0.0)) {
Scalar4 postypei = texFetchScalar4(d_pos, pdata_pos_tex, idx);
Scalar3 posi = make_scalar3(postypei.x,postypei.y,postypei.z);
//calculate dx, dy, dz for the charge density grid:
Scalar V_cell = box.getVolume()/(Scalar)(Nx*Ny*Nz);
//normalize position to gridsize:
Scalar3 pos_frac = box.makeFraction(posi);
pos_frac.x *= (Scalar)Nx;
pos_frac.y *= (Scalar)Ny;
pos_frac.z *= (Scalar)Nz;
Scalar shift, shiftone, x0, y0, z0, dx, dy, dz;
int nlower, nupper, mx, my, mz, nxi, nyi, nzi;
nlower = -(order-1)/2;
nupper = order/2;
if (order % 2)
{
shift =Scalar(0.5);
shiftone = Scalar(0.0);
}
else
{
shift = Scalar(0.0);
shiftone = Scalar(0.5);
}
nxi = __scalar2int_rd(pos_frac.x + shift);
nyi = __scalar2int_rd(pos_frac.y + shift);
nzi = __scalar2int_rd(pos_frac.z + shift);
if (nxi < 0 || nxi >= Nx || nyi < 0 || nyi >= Ny || nzi < 0 || nzi >= Nz)
{
// ignore
return;
}
dx = shiftone+(Scalar)nxi-pos_frac.x;
dy = shiftone+(Scalar)nyi-pos_frac.y;
dz = shiftone+(Scalar)nzi-pos_frac.z;
int n,m,l,k;
Scalar result;
int mult_fact = 2*order+1;
x0 = qi / V_cell;
for (n = nlower; n <= nupper; n++) {
mx = n+nxi;
if(mx >= Nx) mx -= Nx;
if(mx < 0) mx += Nx;
result = Scalar(0.0);
for (k = order-1; k >= 0; k--) {
result = GPU_rho_coeff[n-nlower + k*mult_fact] + result * dx;
}
y0 = x0*result;
for (m = nlower; m <= nupper; m++) {
my = m+nyi;
if(my >= Ny) my -= Ny;
if(my < 0) my += Ny;
result = Scalar(0.0);
for (k = order-1; k >= 0; k--) {
result = GPU_rho_coeff[m-nlower + k*mult_fact] + result * dy;
}
z0 = y0*result;
for (l = nlower; l <= nupper; l++) {
mz = l+nzi;
if(mz >= Nz) mz -= Nz;
if(mz < 0) mz += Nz;
result = Scalar(0.0);
for (k = order-1; k >= 0; k--) {
result = GPU_rho_coeff[l-nlower + k*mult_fact] + result * dz;
}
AddToGridpoint(mx, my, mz, rho_real_space, z0*result, Ny, Nz);
}
}
}
}
}
}
//! The developer has chosen not to document this function
extern "C" __global__
void combined_green_e_kernel(CUFFTCOMPLEX* E_x,
CUFFTCOMPLEX* E_y,
CUFFTCOMPLEX* E_z,
Scalar3* k_vec,
CUFFTCOMPLEX* rho,
int Nx,
int Ny,
int Nz,
Scalar* green_function)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx < Nx * Ny * Nz)
{
Scalar3 k_vec_local = k_vec[idx];
CUFFTCOMPLEX E_x_local, E_y_local, E_z_local;
Scalar scale_times_green = green_function[idx] / ((Scalar)(Nx*Ny*Nz));
CUFFTCOMPLEX rho_local = rho[idx];
rho_local.x *= scale_times_green;
rho_local.y *= scale_times_green;
E_x_local.x = k_vec_local.x * rho_local.y;
E_x_local.y = -k_vec_local.x * rho_local.x;
E_y_local.x = k_vec_local.y * rho_local.y;
E_y_local.y = -k_vec_local.y * rho_local.x;
E_z_local.x = k_vec_local.z * rho_local.y;
E_z_local.y = -k_vec_local.z * rho_local.x;
E_x[idx] = E_x_local;
E_y[idx] = E_y_local;
E_z[idx] = E_z_local;
}
}
//! The developer has chosen not to document this function
__global__ void set_gpu_field_kernel(CUFFTCOMPLEX* E_x,
CUFFTCOMPLEX* E_y,
CUFFTCOMPLEX* E_z,
Scalar3* Electric_field,
int Nx,
int Ny,
int Nz)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if(tid < Nx * Ny * Nz)
{
Scalar3 local_field;
local_field.x = E_x[tid].x;
local_field.y = E_y[tid].x;
local_field.z = E_z[tid].x;
Electric_field[tid] = local_field;
}
}
//! The developer has chosen not to document this function
__global__
void zero_forces(Scalar4 *d_force, Scalar *d_virial, const unsigned int virial_pitch, const unsigned int N)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < N)
{
d_force[idx] = make_scalar4(Scalar(0.0), Scalar(0.0), Scalar(0.0), Scalar(0.0));
for (unsigned int i = 0; i < 6; i++)
d_virial[i*virial_pitch+idx] = Scalar(0.0);
}
}
//! The developer has chosen not to document this function
extern "C" __global__
void calculate_forces_kernel(Scalar4 *d_force,
const unsigned int N,
const Scalar4 *d_pos,
const Scalar *d_charge,
BoxDim box,
Scalar3 *E_field,
int Nx,
int Ny,
int Nz,
int order,
unsigned int *d_group_members,
unsigned int group_size)
{
int group_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (group_idx < group_size)
{
unsigned int idx = d_group_members[group_idx];
//get particle information
Scalar qi = texFetchScalar(d_charge, pdata_charge_tex, idx);
if(fabs(qi) > Scalar(0.0)) {
Scalar4 posi = texFetchScalar4(d_pos, pdata_pos_tex, idx);
//calculate dx, dy, dz for the charge density grid:
Scalar V_cell = box.getVolume()/(Scalar)(Nx*Ny*Nz);
//normalize position to gridsize:
Scalar3 pos_frac = box.makeFraction(make_scalar3(posi.x, posi.y, posi.z));
pos_frac.x *= (Scalar)Nx;
pos_frac.y *= (Scalar)Ny;
pos_frac.z *= (Scalar)Nz;
Scalar shift, shiftone, x0, y0, z0, dx, dy, dz;
int nlower, nupper, mx, my, mz, nxi, nyi, nzi;
nlower = -(order-1)/2;
nupper = order/2;
Scalar4 local_force = make_scalar4(Scalar(0.0), Scalar(0.0), Scalar(0.0), Scalar(0.0));
if(order % 2)
{
shift =Scalar(0.5);
shiftone = Scalar(0.0);
}
else
{
shift = Scalar(0.0);
shiftone = Scalar(0.5);
}
nxi = __scalar2int_rd(pos_frac.x + shift);
nyi = __scalar2int_rd(pos_frac.y + shift);
nzi = __scalar2int_rd(pos_frac.z + shift);
if (nxi < 0 || nxi >= Nx || nyi < 0 || nyi >= Ny || nzi < 0 || nzi >= Nz)
{
// ignore
return;
}
dx = shiftone+(Scalar)nxi-pos_frac.x;
dy = shiftone+(Scalar)nyi-pos_frac.y;
dz = shiftone+(Scalar)nzi-pos_frac.z;
int n,m,l,k;
Scalar result;
int mult_fact = 2*order+1;
for (n = nlower; n <= nupper; n++) {
mx = n+nxi;
if(mx >= Nx) mx -= Nx;
if(mx < 0) mx += Nx;
result = Scalar(0.0);
for (k = order-1; k >= 0; k--) {
result = GPU_rho_coeff[n-nlower + k*mult_fact] + result * dx;
}
x0 = result;
for (m = nlower; m <= nupper; m++) {
my = m+nyi;
if(my >= Ny) my -= Ny;
if(my < 0) my += Ny;
result = Scalar(0.0);
for (k = order-1; k >= 0; k--) {
result = GPU_rho_coeff[m-nlower + k*mult_fact] + result * dy;
}
y0 = x0*result;
for (l = nlower; l <= nupper; l++) {
mz = l+nzi;
if(mz >= Nz) mz -= Nz;
if(mz < 0) mz += Nz;
result = Scalar(0.0);
for (k = order-1; k >= 0; k--) {
result = GPU_rho_coeff[l-nlower + k*mult_fact] + result * dz;
}
z0 = y0*result;
Scalar local_field_x = E_field[mz + Nz * (my + Ny * mx)].x;
Scalar local_field_y = E_field[mz + Nz * (my + Ny * mx)].y;
Scalar local_field_z = E_field[mz + Nz * (my + Ny * mx)].z;
local_force.x += qi*z0*local_field_x;
local_force.y += qi*z0*local_field_y;
local_force.z += qi*z0*local_field_z;
}
}
}
d_force[idx] = local_force;
}
}
}
cudaError_t gpu_compute_pppm_forces(Scalar4 *d_force,
const unsigned int N,
const Scalar4 *d_pos,
const Scalar *d_charge,
const BoxDim& box,
int Nx,
int Ny,
int Nz,
int order,
Scalar *CPU_rho_coeff,
CUFFTCOMPLEX *GPU_rho_real_space,
cufftHandle plan,
CUFFTCOMPLEX *GPU_E_x,
CUFFTCOMPLEX *GPU_E_y,
CUFFTCOMPLEX *GPU_E_z,
Scalar3 *GPU_k_vec,
Scalar *GPU_green_hat,
Scalar3 *E_field,
unsigned int *d_group_members,
unsigned int group_size,
int block_size,
const unsigned int compute_capability)
{
cudaMemcpyToSymbol(GPU_rho_coeff, &(CPU_rho_coeff[0]), order * (2*order+1) * sizeof(Scalar));
// setup the grid to run the kernel with one thread per particle in the group
dim3 grid( (int)ceil((double)group_size / (double)block_size), 1, 1);
dim3 threads(block_size, 1, 1);
// setup the grid to run the kernel with one thread per particle in the group
dim3 P_grid( (int)ceil((double)group_size / (double)block_size), 1, 1);
dim3 P_threads(block_size, 1, 1);
// setup the grid to run the kernel with one thread per grid point
dim3 N_grid( (int)ceil((double)Nx*Ny*Nz / (double)block_size), 1, 1);
dim3 N_threads(block_size, 1, 1);
// bind the textures on pre sm35 arches
if (compute_capability < 350)
{
cudaError_t error = cudaBindTexture(0, pdata_pos_tex, d_pos, sizeof(Scalar4)*N);
if (error != cudaSuccess)
return error;
error = cudaBindTexture(0, pdata_charge_tex, d_charge, sizeof(Scalar) * N);
if (error != cudaSuccess)
return error;
}
// set the grid charge to zero
cudaMemset(GPU_rho_real_space, 0, sizeof(CUFFTCOMPLEX)*Nx*Ny*Nz);
// run the kernels
// assign charges to the grid points, one thread per particles
assign_charges_to_grid_kernel <<< P_grid, P_threads >>> (N,
d_pos,
d_charge,
box,
GPU_rho_real_space,
Nx,
Ny,
Nz,
order,
d_group_members,
group_size);
cudaThreadSynchronize();
// FFT
CUFFTEXEC(plan, GPU_rho_real_space, GPU_rho_real_space, CUFFT_FORWARD);
cudaThreadSynchronize();
// multiply Green's function to get E field, one thread per grid point
combined_green_e_kernel <<< N_grid, N_threads >>> (GPU_E_x,
GPU_E_y,
GPU_E_z,
GPU_k_vec,
GPU_rho_real_space,
Nx,
Ny,
Nz,
GPU_green_hat);
cudaThreadSynchronize();
// FFT
CUFFTEXEC(plan, GPU_E_x, GPU_E_x, CUFFT_INVERSE);
CUFFTEXEC(plan, GPU_E_y, GPU_E_y, CUFFT_INVERSE);
CUFFTEXEC(plan, GPU_E_z, GPU_E_z, CUFFT_INVERSE);
cudaThreadSynchronize();
set_gpu_field_kernel <<< N_grid, N_threads >>> (GPU_E_x, GPU_E_y, GPU_E_z, E_field, Nx, Ny, Nz);
cudaThreadSynchronize();
//calculate forces on particles, one thread per particles
calculate_forces_kernel <<< P_grid, P_threads >>>(d_force,
N,
d_pos,
d_charge,
box,
E_field,
Nx,
Ny,
Nz,
order,
d_group_members,
group_size);
return cudaSuccess;
}
//! The developer has chosen not to document this function
__global__ void calculate_thermo_quantities_kernel(CUFFTCOMPLEX* rho,
Scalar* green_function,
Scalar* energy_sum,
Scalar* v_xx,
Scalar* v_xy,
Scalar* v_xz,
Scalar* v_yy,
Scalar* v_yz,
Scalar* v_zz,
Scalar* vg,
int Nx,
int Ny,
int Nz)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx < Nx * Ny * Nz)
{
Scalar energy = green_function[idx]*(rho[idx].x*rho[idx].x + rho[idx].y*rho[idx].y);
v_xx[idx] = energy*vg[ 6*idx];
v_xy[idx] = energy*vg[1+6*idx];
v_xz[idx] = energy*vg[2+6*idx];
v_yy[idx] = energy*vg[3+6*idx];
v_yz[idx] = energy*vg[4+6*idx];
v_zz[idx] = energy*vg[5+6*idx];
energy_sum[idx] = energy;
}
}
//! The developer has chosen not to document this function
bool isPow2(unsigned int x)
{
return ((x&(x-1))==0);
}
//! The developer has chosen not to document this function
unsigned int nextPow2( unsigned int x ) {
--x;
x |= x >> 1;
x |= x >> 2;
x |= x >> 4;
x |= x >> 8;
x |= x >> 16;
return ++x;
}
template<class T>
struct SharedMemory //!< Used to speed up the sum over grid points, in this case "T" is a placeholder for the data type
{
//! used to get shared memory for data type T*
__device__ inline operator T*()
{
extern __shared__ T __smem[];
return (T*)__smem;
}
//! used to get shared memory for data type T
__device__ inline operator const T() const
{
extern __shared__ T __smem[];
return (T*)__smem;
}
};
//! The developer has chosen not to document this function
template <class T, unsigned int blockSize, bool nIsPow2>
__global__ void
reduce6(T *g_idata, T *g_odata, unsigned int n)
{
T *sdata = SharedMemory<T>();
// perform first level of reduction,
// reading from global memory, writing to shared memory
unsigned int idx = threadIdx.x;
unsigned int i = blockIdx.x*blockSize*2 + threadIdx.x;
unsigned int gridSize = blockSize*2*gridDim.x;
T mySum;
mySum = Scalar(0.0);
// we reduce multiple elements per thread. The number is determined by the
// number of active thread blocks (via gridDim). More blocks will result
// in a larger gridSize and therefore fewer elements per thread
while (i < n)
{
mySum += g_idata[i];
// ensure we don't read out of bounds -- this is optimized away for powerOf2 sized arrays
if (nIsPow2 || i + blockSize < n) {
mySum += g_idata[i+blockSize];
}
i += gridSize;
}
// each thread puts its local sum into shared memory
sdata[idx] = mySum;
__syncthreads();
// do reduction in shared mem
if (blockSize >= 512) { if (idx < 256) { sdata[idx] = mySum = mySum + sdata[idx + 256]; } __syncthreads(); }
if (blockSize >= 256) { if (idx < 128) { sdata[idx] = mySum = mySum + sdata[idx + 128]; } __syncthreads(); }
if (blockSize >= 128) { if (idx < 64) { sdata[idx] = mySum = mySum + sdata[idx + 64]; } __syncthreads(); }
#ifndef __DEVICE_EMULATION__
if (idx < 32)
#endif
{
// now that we are using warp-synchronous programming (below)
// we need to declare our shared memory volatile so that the compiler
// doesn't reorder stores to it and induce incorrect behavior.
volatile T* smem = sdata;
if (blockSize >= 64) { smem[idx] = mySum = mySum + smem[idx + 32]; EMUSYNC; }
if (blockSize >= 32) { smem[idx] = mySum = mySum + smem[idx + 16]; EMUSYNC; }
if (blockSize >= 16) { smem[idx] = mySum = mySum + smem[idx + 8]; EMUSYNC; }
if (blockSize >= 8) { smem[idx] = mySum = mySum + smem[idx + 4]; EMUSYNC; }
if (blockSize >= 4) { smem[idx] = mySum = mySum + smem[idx + 2]; EMUSYNC; }
if (blockSize >= 2) { smem[idx] = mySum = mySum + smem[idx + 1]; EMUSYNC; }
}
// write result for this block to global mem
if (idx == 0) {
g_odata[blockIdx.x] = sdata[0];
}
}
//! The developer has chosen not to document this function
template <class T> void reduce(int size, int threads, int blocks, T *d_idata, T *d_odata)
{
dim3 dimBlock(threads, 1, 1);
dim3 dimGrid(blocks, 1, 1);
// when there is only one warp per block, we need to allocate two warps
// worth of shared memory so that we don't index shared memory out of bounds
int smemSize = (threads <= 32) ? 2 * threads * sizeof(T) : threads * sizeof(T);
if (isPow2(size))
{
switch (threads)
{
case 512:
reduce6<T, 512, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break;
case 256:
reduce6<T, 256, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break;
case 128:
reduce6<T, 128, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break;
case 64:
reduce6<T, 64, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break;
case 32:
reduce6<T, 32, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break;
case 16:
reduce6<T, 16, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break;
case 8:
reduce6<T, 8, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break;
case 4:
reduce6<T, 4, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break;
case 2:
reduce6<T, 2, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break;
case 1:
reduce6<T, 1, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break;
}
}
else
{
switch (threads)
{
case 512:
reduce6<T, 512, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break;
case 256:
reduce6<T, 256, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break;
case 128:
reduce6<T, 128, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break;
case 64:
reduce6<T, 64, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break;
case 32:
reduce6<T, 32, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break;
case 16:
reduce6<T, 16, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break;
case 8:
reduce6<T, 8, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break;
case 4:
reduce6<T, 4, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break;
case 2:
reduce6<T, 2, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break;
case 1:
reduce6<T, 1, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break;
}
}
}
//! The developer has chosen not to document this function
void gpu_compute_pppm_thermo(int Nx,
int Ny,
int Nz,
CUFFTCOMPLEX *GPU_rho_real_space,
Scalar *GPU_vg,
Scalar *GPU_green_hat,
Scalar *o_data,
Scalar *energy_sum,
Scalar *v_xx,
Scalar *v_xy,
Scalar *v_xz,
Scalar *v_yy,
Scalar *v_yz,
Scalar *v_zz,
Scalar *pppm_virial_energy,
int block_size)
{
// setup the grid to run the kernel with one thread per grid point
dim3 N_grid( (int)ceil((double)Nx*Ny*Nz / (double)block_size), 1, 1);
dim3 N_threads(block_size, 1, 1);
calculate_thermo_quantities_kernel <<< N_grid, N_threads >>> (GPU_rho_real_space,
GPU_green_hat,
energy_sum,
v_xx,
v_xy,
v_xz,
v_yy,
v_yz,
v_zz,
GPU_vg,
Nx,
Ny,
Nz);
cudaThreadSynchronize();
int n = Nx*Ny*Nz;
cudaMemset(o_data, 0, sizeof(Scalar)*Nx*Ny*Nz);
pppm_virial_energy[0] = Scalar_reduce(energy_sum, o_data, n);
cudaMemset(o_data, 0, sizeof(Scalar)*Nx*Ny*Nz);
pppm_virial_energy[1] = Scalar_reduce(v_xx, o_data, n);
cudaMemset(o_data, 0, sizeof(Scalar)*Nx*Ny*Nz);
pppm_virial_energy[2] = Scalar_reduce(v_xy, o_data, n);
cudaMemset(o_data, 0, sizeof(Scalar)*Nx*Ny*Nz);
pppm_virial_energy[3] = Scalar_reduce(v_xz, o_data, n);
cudaMemset(o_data, 0, sizeof(Scalar)*Nx*Ny*Nz);
pppm_virial_energy[4] = Scalar_reduce(v_yy, o_data, n);
cudaMemset(o_data, 0, sizeof(Scalar)*Nx*Ny*Nz);
pppm_virial_energy[5] = Scalar_reduce(v_yz, o_data, n);
cudaMemset(o_data, 0, sizeof(Scalar)*Nx*Ny*Nz);
pppm_virial_energy[6] = Scalar_reduce(v_zz, o_data, n);
}
//! The developer has chosen not to document this function
Scalar Scalar_reduce(Scalar* i_data, Scalar* o_data, int n) {
Scalar gpu_result = 0.0;
int threads, blocks, maxBlocks = 64, maxThreads = 256, cpuFinalThreshold = 1;
bool needReadBack = true;
threads = (n < maxThreads*2) ? nextPow2((n + 1)/ 2) : maxThreads;
blocks = (n + (threads * 2 - 1)) / (threads * 2);
blocks = MIN(maxBlocks, blocks);
if (blocks == 1) cpuFinalThreshold = 1;
int maxNumBlocks = MIN( n / maxThreads, MAX_BLOCK_DIM_SIZE);
reduce<Scalar>(n, threads, blocks, i_data, o_data);
int s=blocks;
while(s > cpuFinalThreshold)
{
threads = 0;
blocks = 0;
threads = (s < maxThreads*2) ? nextPow2((s + 1)/ 2) : maxThreads;
blocks = (s + (threads * 2 - 1)) / (threads * 2);
blocks = MIN(maxBlocks, blocks);
reduce<Scalar>(s, threads, blocks, o_data, o_data);
cudaThreadSynchronize();
s = (s + (threads*2-1)) / (threads*2);
}
if (s > 1)
{
Scalar* h_odata = (Scalar *) malloc(maxNumBlocks*sizeof(Scalar));
cudaMemcpy( h_odata, o_data, s * sizeof(Scalar), cudaMemcpyDeviceToHost);
for(int i=0; i < s; i++)
{
gpu_result += h_odata[i];
}
needReadBack = false;
free(h_odata);
}
if (needReadBack) cudaMemcpy( &gpu_result, o_data, sizeof(Scalar), cudaMemcpyDeviceToHost);
return gpu_result;
}
//! The developer has chosen not to document this function
__global__ void reset_kvec_green_hat_kernel(BoxDim box,
Scalar3 b1,
Scalar3 b2,
Scalar3 b3,
int Nx,
int Ny,
int Nz,
int order,
Scalar kappa,
Scalar3* kvec_array,
Scalar* green_hat,
Scalar* vg,
int nbx,
int nby,
int nbz,
Scalar* gf_b)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if( idx < Nx*Ny*Nz) {
int N2 = Ny*Nz;
int xn = idx/N2;
int yn = (idx - xn*N2)/Nz;
int zn = (idx - xn*N2 - yn*Nz);
Scalar3 j;
Scalar kappa2 = kappa*kappa;
j.x = xn > Nx/2 ? (Scalar)(xn - Nx) : (Scalar)xn;
j.y = yn > Ny/2 ? (Scalar)(yn - Ny) : (Scalar)yn;
j.z = zn > Nz/2 ? (Scalar)(zn - Nz) : (Scalar)zn;
Scalar3 k = j.x * b1 + j.y * b2 + j.z * b3;
kvec_array[idx] = k;
Scalar sqk = dot(k,k);
// omit DC term
if(idx == 0) {
vg[0+6*idx] = 0.0f;
vg[1+6*idx] = 0.0f;
vg[2+6*idx] = 0.0f;
vg[3+6*idx] = 0.0f;
vg[4+6*idx] = 0.0f;
vg[5+6*idx] = 0.0f;
}
else {
Scalar vterm = (-Scalar(2.0)/sqk - Scalar(0.5)/kappa2);
vg[0+6*idx] = Scalar(1.0)+vterm*kvec_array[idx].x*kvec_array[idx].x;
vg[1+6*idx] = vterm*kvec_array[idx].x*kvec_array[idx].y;
vg[2+6*idx] = vterm*kvec_array[idx].x*kvec_array[idx].z;
vg[3+6*idx] = Scalar(1.0)+vterm*kvec_array[idx].y*kvec_array[idx].y;
vg[4+6*idx] = vterm*kvec_array[idx].y*kvec_array[idx].z;
vg[5+6*idx] = Scalar(1.0)+vterm*kvec_array[idx].z*kvec_array[idx].z;
}
Scalar3 kH = Scalar(2.0*M_PI)*make_scalar3(Scalar(1.0)/(Scalar)Nx,
Scalar(1.0)/(Scalar)Ny,
Scalar(1.0)/(Scalar)Nz);
int ix, iy, iz;
Scalar snx, sny, snz, snx2, sny2, snz2;
Scalar argx, argy, argz, wx, wy, wz, sx, sy, sz, qx, qy, qz;
Scalar sum1, dot1, dot2;
Scalar numerator, denominator;
snz = fast::sin(Scalar(0.5)*j.z*kH.z);
snz2 = snz*snz;
sny = fast::sin(Scalar(0.5)*j.y*kH.y);
sny2 = sny*sny;
snx = fast::sin(Scalar(0.5)*j.x*kH.x);
snx2 = snx*snx;
int l;
sz = sy = sx = Scalar(0.0);
for (l = order-1; l >= 0; l--) {
sx = gf_b[l] + sx*snx2;
sy = gf_b[l] + sy*sny2;
sz = gf_b[l] + sz*snz2;
}
denominator = sx*sy*sz;
denominator *= denominator;
Scalar3 kn, kn1, kn2, kn3;
Scalar arg_gauss, gauss;
Scalar W;
if (sqk != 0.0f) {
numerator = Scalar(12.5663706)/sqk;
sum1 = 0;
for (ix = -nbx; ix <= nbx; ix++) {
qx = (j.x+(Scalar)(Nx*ix));
kn1 = b1 * qx;
argx = Scalar(0.5)*qx*kH.x;
Scalar wxs = gpu_sinc(argx);
wx = Scalar(1.0);
for (int iorder = 0; iorder < order; ++iorder)
wx *= wxs;
for (iy = -nby; iy <= nby; iy++) {
qy = (j.y+(Scalar)(Ny*iy));
kn2 = b2 * qy;
argy = Scalar(0.5)*qy*kH.y;
Scalar wys = gpu_sinc(argy);
wy = Scalar(1.0);
for (int iorder = 0; iorder < order; ++iorder)
wy *= wys;
for (iz = -nbz; iz <= nbz; iz++) {
qz = (j.z+(Scalar)(Nz*iz));
kn3 = b3 * qz;
kn = kn1+kn2+kn3;
argz = Scalar(0.5)*qz*kH.z;
Scalar wzs = gpu_sinc(argz);
wz = Scalar(1.0);
for (int iorder = 0; iorder < order; ++iorder)
wz *= wzs;
dot1 = dot(kn,k);
dot2 = dot(kn,kn);
arg_gauss = Scalar(0.25)*dot2/kappa2;
gauss = fast::exp(-arg_gauss);
W = wx*wy*wz;
sum1 += (dot1/dot2) * gauss * W*W;
}
}
}
green_hat[idx] = numerator*sum1/denominator;
} else green_hat[idx] = Scalar(0.0);
}
}
//! The developer has chosen not to document this function
cudaError_t reset_kvec_green_hat(const BoxDim& box,
int Nx,
int Ny,
int Nz,
int nbx,
int nby,
int nbz,
int order,
Scalar kappa,
Scalar3 *kvec,
Scalar *green_hat,
Scalar *vg,
Scalar *gf_b,
int block_size)
{
// compute reciprocal lattice vectors
Scalar3 a1 = box.getLatticeVector(0);
Scalar3 a2 = box.getLatticeVector(1);
Scalar3 a3 = box.getLatticeVector(2);
Scalar V_box = box.getVolume();
Scalar3 b1 = Scalar(2.0*M_PI)*make_scalar3(a2.y*a3.z-a2.z*a3.y, a2.z*a3.x-a2.x*a3.z, a2.x*a3.y-a2.y*a3.x)/V_box;
Scalar3 b2 = Scalar(2.0*M_PI)*make_scalar3(a3.y*a1.z-a3.z*a1.y, a3.z*a1.x-a3.x*a1.z, a3.x*a1.y-a3.y*a1.x)/V_box;
Scalar3 b3 = Scalar(2.0*M_PI)*make_scalar3(a1.y*a2.z-a1.z*a2.y, a1.z*a2.x-a1.x*a2.z, a1.x*a2.y-a1.y*a2.x)/V_box;
dim3 grid( (int)ceil((double)Nx*Ny*Nz / (double)block_size), 1, 1);
dim3 threads(block_size, 1, 1);
reset_kvec_green_hat_kernel <<< grid, threads >>> (box, b1, b2, b3, Nx, Ny, Nz, order, kappa, kvec, green_hat, vg, nbx, nby, nbz, gf_b);
return cudaSuccess;
}
//! The developer has chosen not to document this function
__global__ void gpu_fix_exclusions_kernel(Scalar4 *d_force,
Scalar *d_virial,
const unsigned int virial_pitch,
const unsigned int N,
const Scalar4 *d_pos,
const Scalar *d_charge,
const BoxDim box,
const unsigned int *d_n_neigh,
const unsigned int *d_nlist,
const Index2D nli,
Scalar kappa,
unsigned int *d_group_members,
unsigned int group_size)
{
// start by identifying which particle we are to handle
int group_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (group_idx < group_size)
{
unsigned int idx = d_group_members[group_idx];
const Scalar sqrtpi = sqrtf(M_PI);
unsigned int n_neigh = d_n_neigh[idx];
Scalar4 postypei = texFetchScalar4(d_pos, pdata_pos_tex, idx);
Scalar3 posi = make_scalar3(postypei.x, postypei.y, postypei.z);
Scalar qi = texFetchScalar(d_charge, pdata_charge_tex, idx);
// initialize the force to 0
Scalar4 force = make_scalar4(Scalar(0.0), Scalar(0.0), Scalar(0.0), Scalar(0.0));
Scalar virial[6];
for (unsigned int i = 0; i < 6; i++)
virial[i] = Scalar(0.0);
unsigned int cur_j = 0;
// prefetch neighbor index
unsigned int next_j = d_nlist[nli(idx, 0)];
for (int neigh_idx = 0; neigh_idx < n_neigh; neigh_idx++)
{
{
// read the current neighbor index (MEM TRANSFER: 4 bytes)
// prefetch the next value and set the current one
cur_j = next_j;
if (neigh_idx+1 < n_neigh)
next_j = d_nlist[nli(idx, neigh_idx+1)];
// get the neighbor's position (MEM TRANSFER: 16 bytes)
Scalar4 postypej = texFetchScalar4(d_pos, pdata_pos_tex, cur_j);
Scalar3 posj = make_scalar3(postypej.x, postypej.y, postypej.z);
Scalar qj = texFetchScalar(d_charge, pdata_charge_tex, cur_j);
// calculate dr (with periodic boundary conditions) (FLOPS: 3)
Scalar3 dx = posi - posj;
// apply periodic boundary conditions: (FLOPS 12)
dx = box.minImage(dx);
// calculate r squard (FLOPS: 5)
Scalar rsq = dot(dx,dx);
Scalar r = sqrtf(rsq);
Scalar qiqj = qi * qj;
Scalar erffac = ::erf(kappa * r) / r;
Scalar force_divr = qiqj * (-Scalar(2.0) * exp(-rsq * kappa * kappa) * kappa / (sqrtpi * rsq) + erffac / rsq);
Scalar pair_eng = qiqj * erffac;
Scalar force_div2r = Scalar(0.5) * force_divr;
virial[0] += dx.x * dx.x * force_div2r;
virial[1] += dx.x * dx.y * force_div2r;
virial[2] += dx.x * dx.z * force_div2r;
virial[3] += dx.y * dx.y * force_div2r;
virial[4] += dx.y * dx.z * force_div2r;
virial[5] += dx.z * dx.z * force_div2r;
force.x += dx.x * force_divr;
force.y += dx.y * force_divr;
force.z += dx.z * force_divr;
force.w += pair_eng;
}
}
force.w *= Scalar(0.5);
d_force[idx].x -= force.x;
d_force[idx].y -= force.y;
d_force[idx].z -= force.z;
d_force[idx].w -= force.w;
for (unsigned int i = 0; i < 6; i++)
d_virial[i*virial_pitch+idx] = - virial[i];
}
}
//! The developer has chosen not to document this function
cudaError_t fix_exclusions(Scalar4 *d_force,
Scalar *d_virial,
const unsigned int virial_pitch,
const unsigned int N,
const Scalar4 *d_pos,
const Scalar *d_charge,
const BoxDim& box,
const unsigned int *d_n_ex,
const unsigned int *d_exlist,
const Index2D nex,
Scalar kappa,
unsigned int *d_group_members,
unsigned int group_size,
int block_size,
const unsigned int compute_capability)
{
dim3 grid( (int)ceil((double)group_size / (double)block_size), 1, 1);
dim3 threads(block_size, 1, 1);
// bind the textures on pre sm35 arches
if (compute_capability < 350)
{
cudaError_t error = cudaBindTexture(0, pdata_pos_tex, d_pos, sizeof(Scalar4)*N);
if (error != cudaSuccess)
return error;
error = cudaBindTexture(0, pdata_charge_tex, d_charge, sizeof(Scalar) * N);
if (error != cudaSuccess)
return error;
}
gpu_fix_exclusions_kernel <<< grid, threads >>> (d_force,
d_virial,
virial_pitch,
N,
d_pos,
d_charge,
box,
d_n_ex,
d_exlist,
nex,
kappa,
d_group_members,
group_size);
return cudaSuccess;
}
|
467eb1f7dc7879124072397f656138246549bfbf.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*******************************<GINKGO LICENSE>******************************
Copyright (c) 2017-2019, the Ginkgo authors
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
******************************<GINKGO LICENSE>*******************************/
#include "core/matrix/ell_kernels.hpp"
#include <array>
#include <ginkgo/core/base/exception_helpers.hpp>
#include <ginkgo/core/base/math.hpp>
#include <ginkgo/core/base/types.hpp>
#include <ginkgo/core/matrix/csr.hpp>
#include <ginkgo/core/matrix/dense.hpp>
#include "core/matrix/dense_kernels.hpp"
#include "core/synthesizer/implementation_selection.hpp"
#include "cuda/base/cusparse_bindings.hpp"
#include "cuda/base/types.hpp"
#include "cuda/components/atomic.cuh"
#include "cuda/components/cooperative_groups.cuh"
#include "cuda/components/format_conversion.cuh"
#include "cuda/components/prefix_sum.cuh"
#include "cuda/components/reduction.cuh"
#include "cuda/components/zero_array.hpp"
namespace gko {
namespace kernels {
namespace cuda {
/**
* @brief The ELL matrix format namespace.
*
* @ingroup ell
*/
namespace ell {
constexpr int default_block_size = 512;
// TODO: num_threads_per_core and ratio are parameters should be tuned
/**
* num_threads_per_core is the oversubscribing parameter. There are
* `num_threads_per_core` threads assigned to each physical core.
*/
constexpr int num_threads_per_core = 4;
/**
* ratio is the parameter to decide when to use threads to do reduction on each
* row. (#cols/#rows > ratio)
*/
constexpr double ratio = 1e-2;
/**
* A compile-time list of sub-warp sizes for which the spmv kernels should be
* compiled.
* 0 is a special case where it uses a sub-warp size of 32 in
* combination with atomic_adds.
*/
using compiled_kernels = syn::value_list<int, 0, 1, 2, 4, 8, 16, 32>;
namespace kernel {
namespace {
template <int subwarp_size, bool atomic, typename ValueType, typename IndexType,
typename Closure>
__device__ void spmv_kernel(const size_type num_rows,
const ValueType *__restrict__ val,
const IndexType *__restrict__ col,
const size_type stride,
const size_type num_stored_elements_per_row,
const ValueType *__restrict__ b,
const size_type b_stride, ValueType *__restrict__ c,
const size_type c_stride, Closure op)
{
const auto tidx =
static_cast<IndexType>(blockDim.x) * blockIdx.x + threadIdx.x;
const auto nwarps_per_row =
gridDim.x * blockDim.x / num_rows / subwarp_size;
const auto x = tidx / subwarp_size / nwarps_per_row;
const auto warp_id = tidx / subwarp_size % nwarps_per_row;
const auto y_start = tidx % subwarp_size +
num_stored_elements_per_row * warp_id / nwarps_per_row;
const auto y_end =
num_stored_elements_per_row * (warp_id + 1) / nwarps_per_row;
if (x < num_rows) {
const auto tile_block =
group::tiled_partition<subwarp_size>(group::this_thread_block());
ValueType temp = zero<ValueType>();
const auto column_id = blockIdx.y;
for (IndexType idx = y_start; idx < y_end; idx += subwarp_size) {
const auto ind = x + idx * stride;
const auto col_idx = col[ind];
if (col_idx < idx) {
break;
} else {
temp += val[ind] * b[col_idx * b_stride + column_id];
}
}
const auto answer = reduce(
tile_block, temp, [](ValueType x, ValueType y) { return x + y; });
if (tile_block.thread_rank() == 0) {
if (atomic) {
atomic_add(&(c[x * c_stride + column_id]),
op(answer, c[x * c_stride + column_id]));
} else {
c[x * c_stride + column_id] =
op(answer, c[x * c_stride + column_id]);
}
}
}
}
template <int subwarp_size, bool atomic = false, typename ValueType,
typename IndexType>
__global__ __launch_bounds__(default_block_size) void spmv(
const size_type num_rows, const ValueType *__restrict__ val,
const IndexType *__restrict__ col, const size_type stride,
const size_type num_stored_elements_per_row,
const ValueType *__restrict__ b, const size_type b_stride,
ValueType *__restrict__ c, const size_type c_stride)
{
spmv_kernel<subwarp_size, atomic>(
num_rows, val, col, stride, num_stored_elements_per_row, b, b_stride, c,
c_stride, [](const ValueType &x, const ValueType &y) { return x; });
}
template <int subwarp_size, bool atomic = false, typename ValueType,
typename IndexType>
__global__ __launch_bounds__(default_block_size) void spmv(
const size_type num_rows, const ValueType *__restrict__ alpha,
const ValueType *__restrict__ val, const IndexType *__restrict__ col,
const size_type stride, const size_type num_stored_elements_per_row,
const ValueType *__restrict__ b, const size_type b_stride,
const ValueType *__restrict__ beta, ValueType *__restrict__ c,
const size_type c_stride)
{
const ValueType alpha_val = alpha[0];
const ValueType beta_val = beta[0];
// Because the atomic operation changes the values of c during computation,
// it can not do the right alpha * a * b + beta * c operation.
// Thus, the cuda kernel only computes alpha * a * b when it uses atomic
// operation.
if (atomic) {
spmv_kernel<subwarp_size, atomic>(
num_rows, val, col, stride, num_stored_elements_per_row, b,
b_stride, c, c_stride,
[&alpha_val](const ValueType &x, const ValueType &y) {
return alpha_val * x;
});
} else {
spmv_kernel<subwarp_size, atomic>(
num_rows, val, col, stride, num_stored_elements_per_row, b,
b_stride, c, c_stride,
[&alpha_val, &beta_val](const ValueType &x, const ValueType &y) {
return alpha_val * x + beta_val * y;
});
}
}
} // namespace
} // namespace kernel
namespace {
template <int info, typename ValueType, typename IndexType>
void abstract_spmv(syn::value_list<int, info>, int nwarps_per_row,
const matrix::Ell<ValueType, IndexType> *a,
const matrix::Dense<ValueType> *b,
matrix::Dense<ValueType> *c,
const matrix::Dense<ValueType> *alpha = nullptr,
const matrix::Dense<ValueType> *beta = nullptr)
{
const auto nrows = a->get_size()[0];
constexpr int subwarp_size = (info == 0) ? 32 : info;
constexpr bool atomic = (info == 0);
const dim3 block_size(default_block_size, 1, 1);
const dim3 grid_size(
ceildiv(nrows * subwarp_size * nwarps_per_row, block_size.x),
b->get_size()[1], 1);
if (alpha == nullptr && beta == nullptr) {
hipLaunchKernelGGL(( kernel::spmv<subwarp_size, atomic>), dim3(grid_size), dim3(block_size), 0, 0,
nrows, as_cuda_type(a->get_const_values()), a->get_const_col_idxs(),
a->get_stride(), a->get_num_stored_elements_per_row(),
as_cuda_type(b->get_const_values()), b->get_stride(),
as_cuda_type(c->get_values()), c->get_stride());
} else if (alpha != nullptr && beta != nullptr) {
hipLaunchKernelGGL(( kernel::spmv<subwarp_size, atomic>), dim3(grid_size), dim3(block_size), 0, 0,
nrows, as_cuda_type(alpha->get_const_values()),
as_cuda_type(a->get_const_values()), a->get_const_col_idxs(),
a->get_stride(), a->get_num_stored_elements_per_row(),
as_cuda_type(b->get_const_values()), b->get_stride(),
as_cuda_type(beta->get_const_values()),
as_cuda_type(c->get_values()), c->get_stride());
} else {
GKO_KERNEL_NOT_FOUND;
}
}
GKO_ENABLE_IMPLEMENTATION_SELECTION(select_abstract_spmv, abstract_spmv);
template <typename ValueType, typename IndexType>
std::array<int, 3> compute_subwarp_size_and_atomicity(
std::shared_ptr<const CudaExecutor> exec,
const matrix::Ell<ValueType, IndexType> *a)
{
int subwarp_size = 1;
int atomic = 0;
int nwarps_per_row = 1;
const auto nrows = a->get_size()[0];
const auto ell_ncols = a->get_num_stored_elements_per_row();
const auto nwarps = exec->get_num_cores_per_sm() / cuda_config::warp_size *
exec->get_num_multiprocessor() * num_threads_per_core;
// Use multithreads to perform the reduction on each row when the matrix is
// wide.
// To make every thread have computation, so pick the value which is the
// power of 2 less than 32 and is less than or equal to ell_ncols. If the
// subwarp_size is 32 and allow more than one warps to work on the same row,
// use atomic add to handle the warps write the value into the same
// position. The #warps is decided according to the number of warps allowed
// on GPU.
if (static_cast<double>(ell_ncols) / nrows > ratio) {
while (subwarp_size < 32 && (subwarp_size << 1) <= ell_ncols) {
subwarp_size <<= 1;
}
if (subwarp_size == 32) {
nwarps_per_row =
::min(ell_ncols / cuda_config::warp_size, nwarps / nrows);
nwarps_per_row = ::max(nwarps_per_row, 1);
}
if (nwarps_per_row > 1) {
atomic = 1;
}
}
return {subwarp_size, atomic, nwarps_per_row};
}
} // namespace
template <typename ValueType, typename IndexType>
void spmv(std::shared_ptr<const CudaExecutor> exec,
const matrix::Ell<ValueType, IndexType> *a,
const matrix::Dense<ValueType> *b, matrix::Dense<ValueType> *c)
{
const auto data = compute_subwarp_size_and_atomicity(exec, a);
const int subwarp_size = std::get<0>(data);
const int atomic = std::get<1>(data);
const int nwarps_per_row = std::get<2>(data);
/**
* info is the parameter for selecting the cuda kernel.
* for info == 0, it uses the kernel by 32 threads with atomic operation
* for other value, it uses the kernel without atomic_add
*/
const int info = (!atomic) * subwarp_size;
if (atomic) {
zero_array(c->get_num_stored_elements(), c->get_values());
}
select_abstract_spmv(
compiled_kernels(),
[&info](int compiled_info) { return info == compiled_info; },
syn::value_list<int>(), syn::type_list<>(), nwarps_per_row, a, b, c);
}
GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE(GKO_DECLARE_ELL_SPMV_KERNEL);
template <typename ValueType, typename IndexType>
void advanced_spmv(std::shared_ptr<const CudaExecutor> exec,
const matrix::Dense<ValueType> *alpha,
const matrix::Ell<ValueType, IndexType> *a,
const matrix::Dense<ValueType> *b,
const matrix::Dense<ValueType> *beta,
matrix::Dense<ValueType> *c)
{
const auto data = compute_subwarp_size_and_atomicity(exec, a);
const int subwarp_size = std::get<0>(data);
const int atomic = std::get<1>(data);
const int nwarps_per_row = std::get<2>(data);
/**
* info is the parameter for selecting the cuda kernel.
* for info == 0, it uses the kernel by 32 threads with atomic operation
* for other value, it uses the kernel without atomic_add
*/
const int info = (!atomic) * subwarp_size;
if (atomic) {
dense::scale(exec, beta, c);
}
select_abstract_spmv(
compiled_kernels(),
[&info](int compiled_info) { return info == compiled_info; },
syn::value_list<int>(), syn::type_list<>(), nwarps_per_row, a, b, c,
alpha, beta);
}
GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE(
GKO_DECLARE_ELL_ADVANCED_SPMV_KERNEL);
namespace kernel {
template <typename ValueType>
__global__
__launch_bounds__(cuda_config::max_block_size) void initialize_zero_dense(
size_type num_rows, size_type num_cols, size_type stride,
ValueType *__restrict__ result)
{
const auto tidx_x = threadIdx.x + blockDim.x * blockIdx.x;
const auto tidx_y = threadIdx.y + blockDim.y * blockIdx.y;
if (tidx_x < num_cols && tidx_y < num_rows) {
result[tidx_y * stride + tidx_x] = zero<ValueType>();
}
}
template <typename ValueType, typename IndexType>
__global__ __launch_bounds__(default_block_size) void fill_in_dense(
size_type num_rows, size_type nnz, size_type source_stride,
const IndexType *__restrict__ col_idxs,
const ValueType *__restrict__ values, size_type result_stride,
ValueType *__restrict__ result)
{
const auto tidx = threadIdx.x + blockIdx.x * blockDim.x;
if (tidx < num_rows) {
for (auto col = 0; col < nnz; col++) {
result[tidx * result_stride +
col_idxs[tidx + col * source_stride]] +=
values[tidx + col * source_stride];
}
}
}
} // namespace kernel
template <typename ValueType, typename IndexType>
void convert_to_dense(std::shared_ptr<const CudaExecutor> exec,
matrix::Dense<ValueType> *result,
const matrix::Ell<ValueType, IndexType> *source)
{
const auto num_rows = result->get_size()[0];
const auto num_cols = result->get_size()[1];
const auto result_stride = result->get_stride();
const auto col_idxs = source->get_const_col_idxs();
const auto vals = source->get_const_values();
const auto source_stride = source->get_stride();
const dim3 block_size(cuda_config::warp_size,
cuda_config::max_block_size / cuda_config::warp_size,
1);
const dim3 init_grid_dim(ceildiv(result_stride, block_size.x),
ceildiv(num_rows, block_size.y), 1);
hipLaunchKernelGGL(( kernel::initialize_zero_dense), dim3(init_grid_dim), dim3(block_size), 0, 0,
num_rows, num_cols, result_stride, as_cuda_type(result->get_values()));
const auto grid_dim = ceildiv(num_rows, default_block_size);
hipLaunchKernelGGL(( kernel::fill_in_dense), dim3(grid_dim), dim3(default_block_size), 0, 0,
num_rows, source->get_num_stored_elements_per_row(), source_stride,
as_cuda_type(col_idxs), as_cuda_type(vals), result_stride,
as_cuda_type(result->get_values()));
}
GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE(
GKO_DECLARE_ELL_CONVERT_TO_DENSE_KERNEL);
namespace kernel {
template <typename ValueType, typename IndexType>
__global__ __launch_bounds__(default_block_size) void count_nnz_per_row(
size_type num_rows, size_type max_nnz_per_row, size_type stride,
const ValueType *__restrict__ values, IndexType *__restrict__ result)
{
constexpr auto warp_size = cuda_config::warp_size;
const auto tidx = threadIdx.x + blockIdx.x * blockDim.x;
const auto row_idx = tidx / warp_size;
if (row_idx < num_rows) {
IndexType part_result{};
for (auto i = threadIdx.x % warp_size; i < max_nnz_per_row;
i += warp_size) {
if (values[stride * i + row_idx] != zero<ValueType>()) {
part_result += 1;
}
}
auto warp_tile =
group::tiled_partition<warp_size>(group::this_thread_block());
result[row_idx] = reduce(
warp_tile, part_result,
[](const size_type &a, const size_type &b) { return a + b; });
}
}
template <typename ValueType, typename IndexType>
__global__ __launch_bounds__(default_block_size) void fill_in_csr(
size_type num_rows, size_type max_nnz_per_row, size_type stride,
const ValueType *__restrict__ source_values,
const IndexType *__restrict__ source_col_idxs,
IndexType *__restrict__ result_row_ptrs,
IndexType *__restrict__ result_col_idxs,
ValueType *__restrict__ result_values)
{
const auto tidx = threadIdx.x + blockDim.x * blockIdx.x;
if (tidx < num_rows) {
auto write_to = result_row_ptrs[tidx];
for (auto i = 0; i < max_nnz_per_row; i++) {
const auto source_idx = tidx + stride * i;
if (source_values[source_idx] != zero<ValueType>()) {
result_values[write_to] = source_values[source_idx];
result_col_idxs[write_to] = source_col_idxs[source_idx];
write_to++;
}
}
}
}
} // namespace kernel
template <typename ValueType, typename IndexType>
void convert_to_csr(std::shared_ptr<const CudaExecutor> exec,
matrix::Csr<ValueType, IndexType> *result,
const matrix::Ell<ValueType, IndexType> *source)
{
auto num_rows = result->get_size()[0];
auto row_ptrs = result->get_row_ptrs();
auto col_idxs = result->get_col_idxs();
auto values = result->get_values();
const auto stride = source->get_stride();
const auto max_nnz_per_row = source->get_num_stored_elements_per_row();
constexpr auto rows_per_block =
ceildiv(default_block_size, cuda_config::warp_size);
const auto grid_dim_nnz = ceildiv(source->get_size()[0], rows_per_block);
hipLaunchKernelGGL(( kernel::count_nnz_per_row), dim3(grid_dim_nnz), dim3(default_block_size), 0, 0,
num_rows, max_nnz_per_row, stride,
as_cuda_type(source->get_const_values()), as_cuda_type(row_ptrs));
size_type grid_dim = ceildiv(num_rows + 1, default_block_size);
auto add_values = Array<IndexType>(exec, grid_dim);
hipLaunchKernelGGL(( start_prefix_sum<default_block_size>)
, dim3(grid_dim), dim3(default_block_size), 0, 0, num_rows + 1, as_cuda_type(row_ptrs),
as_cuda_type(add_values.get_data()));
hipLaunchKernelGGL(( finalize_prefix_sum<default_block_size>), dim3(grid_dim), dim3(default_block_size), 0, 0,
num_rows + 1, as_cuda_type(row_ptrs),
as_cuda_type(add_values.get_const_data()));
hipLaunchKernelGGL(( kernel::fill_in_csr), dim3(grid_dim), dim3(default_block_size), 0, 0,
num_rows, max_nnz_per_row, stride,
as_cuda_type(source->get_const_values()),
as_cuda_type(source->get_const_col_idxs()), as_cuda_type(row_ptrs),
as_cuda_type(col_idxs), as_cuda_type(values));
add_values.clear();
}
GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE(
GKO_DECLARE_ELL_CONVERT_TO_CSR_KERNEL);
template <typename ValueType, typename IndexType>
void count_nonzeros(std::shared_ptr<const CudaExecutor> exec,
const matrix::Ell<ValueType, IndexType> *source,
size_type *result)
{
const auto num_rows = source->get_size()[0];
auto nnz_per_row = Array<size_type>(exec, num_rows);
calculate_nonzeros_per_row(exec, source, &nnz_per_row);
*result = reduce_add_array(exec, num_rows, nnz_per_row.get_const_data());
nnz_per_row.clear();
}
GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE(
GKO_DECLARE_ELL_COUNT_NONZEROS_KERNEL);
template <typename ValueType, typename IndexType>
void calculate_nonzeros_per_row(std::shared_ptr<const CudaExecutor> exec,
const matrix::Ell<ValueType, IndexType> *source,
Array<size_type> *result)
{
const auto num_rows = source->get_size()[0];
const auto max_nnz_per_row = source->get_num_stored_elements_per_row();
const auto stride = source->get_stride();
const auto values = source->get_const_values();
const auto warp_size = cuda_config::warp_size;
const auto grid_dim = ceildiv(num_rows * warp_size, default_block_size);
hipLaunchKernelGGL(( kernel::count_nnz_per_row), dim3(grid_dim), dim3(default_block_size), 0, 0,
num_rows, max_nnz_per_row, stride, as_cuda_type(values),
as_cuda_type(result->get_data()));
}
GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE(
GKO_DECLARE_ELL_CALCULATE_NONZEROS_PER_ROW_KERNEL);
} // namespace ell
} // namespace cuda
} // namespace kernels
} // namespace gko
| 467eb1f7dc7879124072397f656138246549bfbf.cu | /*******************************<GINKGO LICENSE>******************************
Copyright (c) 2017-2019, the Ginkgo authors
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
******************************<GINKGO LICENSE>*******************************/
#include "core/matrix/ell_kernels.hpp"
#include <array>
#include <ginkgo/core/base/exception_helpers.hpp>
#include <ginkgo/core/base/math.hpp>
#include <ginkgo/core/base/types.hpp>
#include <ginkgo/core/matrix/csr.hpp>
#include <ginkgo/core/matrix/dense.hpp>
#include "core/matrix/dense_kernels.hpp"
#include "core/synthesizer/implementation_selection.hpp"
#include "cuda/base/cusparse_bindings.hpp"
#include "cuda/base/types.hpp"
#include "cuda/components/atomic.cuh"
#include "cuda/components/cooperative_groups.cuh"
#include "cuda/components/format_conversion.cuh"
#include "cuda/components/prefix_sum.cuh"
#include "cuda/components/reduction.cuh"
#include "cuda/components/zero_array.hpp"
namespace gko {
namespace kernels {
namespace cuda {
/**
* @brief The ELL matrix format namespace.
*
* @ingroup ell
*/
namespace ell {
constexpr int default_block_size = 512;
// TODO: num_threads_per_core and ratio are parameters should be tuned
/**
* num_threads_per_core is the oversubscribing parameter. There are
* `num_threads_per_core` threads assigned to each physical core.
*/
constexpr int num_threads_per_core = 4;
/**
* ratio is the parameter to decide when to use threads to do reduction on each
* row. (#cols/#rows > ratio)
*/
constexpr double ratio = 1e-2;
/**
* A compile-time list of sub-warp sizes for which the spmv kernels should be
* compiled.
* 0 is a special case where it uses a sub-warp size of 32 in
* combination with atomic_adds.
*/
using compiled_kernels = syn::value_list<int, 0, 1, 2, 4, 8, 16, 32>;
namespace kernel {
namespace {
template <int subwarp_size, bool atomic, typename ValueType, typename IndexType,
typename Closure>
__device__ void spmv_kernel(const size_type num_rows,
const ValueType *__restrict__ val,
const IndexType *__restrict__ col,
const size_type stride,
const size_type num_stored_elements_per_row,
const ValueType *__restrict__ b,
const size_type b_stride, ValueType *__restrict__ c,
const size_type c_stride, Closure op)
{
const auto tidx =
static_cast<IndexType>(blockDim.x) * blockIdx.x + threadIdx.x;
const auto nwarps_per_row =
gridDim.x * blockDim.x / num_rows / subwarp_size;
const auto x = tidx / subwarp_size / nwarps_per_row;
const auto warp_id = tidx / subwarp_size % nwarps_per_row;
const auto y_start = tidx % subwarp_size +
num_stored_elements_per_row * warp_id / nwarps_per_row;
const auto y_end =
num_stored_elements_per_row * (warp_id + 1) / nwarps_per_row;
if (x < num_rows) {
const auto tile_block =
group::tiled_partition<subwarp_size>(group::this_thread_block());
ValueType temp = zero<ValueType>();
const auto column_id = blockIdx.y;
for (IndexType idx = y_start; idx < y_end; idx += subwarp_size) {
const auto ind = x + idx * stride;
const auto col_idx = col[ind];
if (col_idx < idx) {
break;
} else {
temp += val[ind] * b[col_idx * b_stride + column_id];
}
}
const auto answer = reduce(
tile_block, temp, [](ValueType x, ValueType y) { return x + y; });
if (tile_block.thread_rank() == 0) {
if (atomic) {
atomic_add(&(c[x * c_stride + column_id]),
op(answer, c[x * c_stride + column_id]));
} else {
c[x * c_stride + column_id] =
op(answer, c[x * c_stride + column_id]);
}
}
}
}
template <int subwarp_size, bool atomic = false, typename ValueType,
typename IndexType>
__global__ __launch_bounds__(default_block_size) void spmv(
const size_type num_rows, const ValueType *__restrict__ val,
const IndexType *__restrict__ col, const size_type stride,
const size_type num_stored_elements_per_row,
const ValueType *__restrict__ b, const size_type b_stride,
ValueType *__restrict__ c, const size_type c_stride)
{
spmv_kernel<subwarp_size, atomic>(
num_rows, val, col, stride, num_stored_elements_per_row, b, b_stride, c,
c_stride, [](const ValueType &x, const ValueType &y) { return x; });
}
template <int subwarp_size, bool atomic = false, typename ValueType,
typename IndexType>
__global__ __launch_bounds__(default_block_size) void spmv(
const size_type num_rows, const ValueType *__restrict__ alpha,
const ValueType *__restrict__ val, const IndexType *__restrict__ col,
const size_type stride, const size_type num_stored_elements_per_row,
const ValueType *__restrict__ b, const size_type b_stride,
const ValueType *__restrict__ beta, ValueType *__restrict__ c,
const size_type c_stride)
{
const ValueType alpha_val = alpha[0];
const ValueType beta_val = beta[0];
// Because the atomic operation changes the values of c during computation,
// it can not do the right alpha * a * b + beta * c operation.
// Thus, the cuda kernel only computes alpha * a * b when it uses atomic
// operation.
if (atomic) {
spmv_kernel<subwarp_size, atomic>(
num_rows, val, col, stride, num_stored_elements_per_row, b,
b_stride, c, c_stride,
[&alpha_val](const ValueType &x, const ValueType &y) {
return alpha_val * x;
});
} else {
spmv_kernel<subwarp_size, atomic>(
num_rows, val, col, stride, num_stored_elements_per_row, b,
b_stride, c, c_stride,
[&alpha_val, &beta_val](const ValueType &x, const ValueType &y) {
return alpha_val * x + beta_val * y;
});
}
}
} // namespace
} // namespace kernel
namespace {
template <int info, typename ValueType, typename IndexType>
void abstract_spmv(syn::value_list<int, info>, int nwarps_per_row,
const matrix::Ell<ValueType, IndexType> *a,
const matrix::Dense<ValueType> *b,
matrix::Dense<ValueType> *c,
const matrix::Dense<ValueType> *alpha = nullptr,
const matrix::Dense<ValueType> *beta = nullptr)
{
const auto nrows = a->get_size()[0];
constexpr int subwarp_size = (info == 0) ? 32 : info;
constexpr bool atomic = (info == 0);
const dim3 block_size(default_block_size, 1, 1);
const dim3 grid_size(
ceildiv(nrows * subwarp_size * nwarps_per_row, block_size.x),
b->get_size()[1], 1);
if (alpha == nullptr && beta == nullptr) {
kernel::spmv<subwarp_size, atomic><<<grid_size, block_size, 0, 0>>>(
nrows, as_cuda_type(a->get_const_values()), a->get_const_col_idxs(),
a->get_stride(), a->get_num_stored_elements_per_row(),
as_cuda_type(b->get_const_values()), b->get_stride(),
as_cuda_type(c->get_values()), c->get_stride());
} else if (alpha != nullptr && beta != nullptr) {
kernel::spmv<subwarp_size, atomic><<<grid_size, block_size, 0, 0>>>(
nrows, as_cuda_type(alpha->get_const_values()),
as_cuda_type(a->get_const_values()), a->get_const_col_idxs(),
a->get_stride(), a->get_num_stored_elements_per_row(),
as_cuda_type(b->get_const_values()), b->get_stride(),
as_cuda_type(beta->get_const_values()),
as_cuda_type(c->get_values()), c->get_stride());
} else {
GKO_KERNEL_NOT_FOUND;
}
}
GKO_ENABLE_IMPLEMENTATION_SELECTION(select_abstract_spmv, abstract_spmv);
template <typename ValueType, typename IndexType>
std::array<int, 3> compute_subwarp_size_and_atomicity(
std::shared_ptr<const CudaExecutor> exec,
const matrix::Ell<ValueType, IndexType> *a)
{
int subwarp_size = 1;
int atomic = 0;
int nwarps_per_row = 1;
const auto nrows = a->get_size()[0];
const auto ell_ncols = a->get_num_stored_elements_per_row();
const auto nwarps = exec->get_num_cores_per_sm() / cuda_config::warp_size *
exec->get_num_multiprocessor() * num_threads_per_core;
// Use multithreads to perform the reduction on each row when the matrix is
// wide.
// To make every thread have computation, so pick the value which is the
// power of 2 less than 32 and is less than or equal to ell_ncols. If the
// subwarp_size is 32 and allow more than one warps to work on the same row,
// use atomic add to handle the warps write the value into the same
// position. The #warps is decided according to the number of warps allowed
// on GPU.
if (static_cast<double>(ell_ncols) / nrows > ratio) {
while (subwarp_size < 32 && (subwarp_size << 1) <= ell_ncols) {
subwarp_size <<= 1;
}
if (subwarp_size == 32) {
nwarps_per_row =
std::min(ell_ncols / cuda_config::warp_size, nwarps / nrows);
nwarps_per_row = std::max(nwarps_per_row, 1);
}
if (nwarps_per_row > 1) {
atomic = 1;
}
}
return {subwarp_size, atomic, nwarps_per_row};
}
} // namespace
template <typename ValueType, typename IndexType>
void spmv(std::shared_ptr<const CudaExecutor> exec,
const matrix::Ell<ValueType, IndexType> *a,
const matrix::Dense<ValueType> *b, matrix::Dense<ValueType> *c)
{
const auto data = compute_subwarp_size_and_atomicity(exec, a);
const int subwarp_size = std::get<0>(data);
const int atomic = std::get<1>(data);
const int nwarps_per_row = std::get<2>(data);
/**
* info is the parameter for selecting the cuda kernel.
* for info == 0, it uses the kernel by 32 threads with atomic operation
* for other value, it uses the kernel without atomic_add
*/
const int info = (!atomic) * subwarp_size;
if (atomic) {
zero_array(c->get_num_stored_elements(), c->get_values());
}
select_abstract_spmv(
compiled_kernels(),
[&info](int compiled_info) { return info == compiled_info; },
syn::value_list<int>(), syn::type_list<>(), nwarps_per_row, a, b, c);
}
GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE(GKO_DECLARE_ELL_SPMV_KERNEL);
template <typename ValueType, typename IndexType>
void advanced_spmv(std::shared_ptr<const CudaExecutor> exec,
const matrix::Dense<ValueType> *alpha,
const matrix::Ell<ValueType, IndexType> *a,
const matrix::Dense<ValueType> *b,
const matrix::Dense<ValueType> *beta,
matrix::Dense<ValueType> *c)
{
const auto data = compute_subwarp_size_and_atomicity(exec, a);
const int subwarp_size = std::get<0>(data);
const int atomic = std::get<1>(data);
const int nwarps_per_row = std::get<2>(data);
/**
* info is the parameter for selecting the cuda kernel.
* for info == 0, it uses the kernel by 32 threads with atomic operation
* for other value, it uses the kernel without atomic_add
*/
const int info = (!atomic) * subwarp_size;
if (atomic) {
dense::scale(exec, beta, c);
}
select_abstract_spmv(
compiled_kernels(),
[&info](int compiled_info) { return info == compiled_info; },
syn::value_list<int>(), syn::type_list<>(), nwarps_per_row, a, b, c,
alpha, beta);
}
GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE(
GKO_DECLARE_ELL_ADVANCED_SPMV_KERNEL);
namespace kernel {
template <typename ValueType>
__global__
__launch_bounds__(cuda_config::max_block_size) void initialize_zero_dense(
size_type num_rows, size_type num_cols, size_type stride,
ValueType *__restrict__ result)
{
const auto tidx_x = threadIdx.x + blockDim.x * blockIdx.x;
const auto tidx_y = threadIdx.y + blockDim.y * blockIdx.y;
if (tidx_x < num_cols && tidx_y < num_rows) {
result[tidx_y * stride + tidx_x] = zero<ValueType>();
}
}
template <typename ValueType, typename IndexType>
__global__ __launch_bounds__(default_block_size) void fill_in_dense(
size_type num_rows, size_type nnz, size_type source_stride,
const IndexType *__restrict__ col_idxs,
const ValueType *__restrict__ values, size_type result_stride,
ValueType *__restrict__ result)
{
const auto tidx = threadIdx.x + blockIdx.x * blockDim.x;
if (tidx < num_rows) {
for (auto col = 0; col < nnz; col++) {
result[tidx * result_stride +
col_idxs[tidx + col * source_stride]] +=
values[tidx + col * source_stride];
}
}
}
} // namespace kernel
template <typename ValueType, typename IndexType>
void convert_to_dense(std::shared_ptr<const CudaExecutor> exec,
matrix::Dense<ValueType> *result,
const matrix::Ell<ValueType, IndexType> *source)
{
const auto num_rows = result->get_size()[0];
const auto num_cols = result->get_size()[1];
const auto result_stride = result->get_stride();
const auto col_idxs = source->get_const_col_idxs();
const auto vals = source->get_const_values();
const auto source_stride = source->get_stride();
const dim3 block_size(cuda_config::warp_size,
cuda_config::max_block_size / cuda_config::warp_size,
1);
const dim3 init_grid_dim(ceildiv(result_stride, block_size.x),
ceildiv(num_rows, block_size.y), 1);
kernel::initialize_zero_dense<<<init_grid_dim, block_size>>>(
num_rows, num_cols, result_stride, as_cuda_type(result->get_values()));
const auto grid_dim = ceildiv(num_rows, default_block_size);
kernel::fill_in_dense<<<grid_dim, default_block_size>>>(
num_rows, source->get_num_stored_elements_per_row(), source_stride,
as_cuda_type(col_idxs), as_cuda_type(vals), result_stride,
as_cuda_type(result->get_values()));
}
GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE(
GKO_DECLARE_ELL_CONVERT_TO_DENSE_KERNEL);
namespace kernel {
template <typename ValueType, typename IndexType>
__global__ __launch_bounds__(default_block_size) void count_nnz_per_row(
size_type num_rows, size_type max_nnz_per_row, size_type stride,
const ValueType *__restrict__ values, IndexType *__restrict__ result)
{
constexpr auto warp_size = cuda_config::warp_size;
const auto tidx = threadIdx.x + blockIdx.x * blockDim.x;
const auto row_idx = tidx / warp_size;
if (row_idx < num_rows) {
IndexType part_result{};
for (auto i = threadIdx.x % warp_size; i < max_nnz_per_row;
i += warp_size) {
if (values[stride * i + row_idx] != zero<ValueType>()) {
part_result += 1;
}
}
auto warp_tile =
group::tiled_partition<warp_size>(group::this_thread_block());
result[row_idx] = reduce(
warp_tile, part_result,
[](const size_type &a, const size_type &b) { return a + b; });
}
}
template <typename ValueType, typename IndexType>
__global__ __launch_bounds__(default_block_size) void fill_in_csr(
size_type num_rows, size_type max_nnz_per_row, size_type stride,
const ValueType *__restrict__ source_values,
const IndexType *__restrict__ source_col_idxs,
IndexType *__restrict__ result_row_ptrs,
IndexType *__restrict__ result_col_idxs,
ValueType *__restrict__ result_values)
{
const auto tidx = threadIdx.x + blockDim.x * blockIdx.x;
if (tidx < num_rows) {
auto write_to = result_row_ptrs[tidx];
for (auto i = 0; i < max_nnz_per_row; i++) {
const auto source_idx = tidx + stride * i;
if (source_values[source_idx] != zero<ValueType>()) {
result_values[write_to] = source_values[source_idx];
result_col_idxs[write_to] = source_col_idxs[source_idx];
write_to++;
}
}
}
}
} // namespace kernel
template <typename ValueType, typename IndexType>
void convert_to_csr(std::shared_ptr<const CudaExecutor> exec,
matrix::Csr<ValueType, IndexType> *result,
const matrix::Ell<ValueType, IndexType> *source)
{
auto num_rows = result->get_size()[0];
auto row_ptrs = result->get_row_ptrs();
auto col_idxs = result->get_col_idxs();
auto values = result->get_values();
const auto stride = source->get_stride();
const auto max_nnz_per_row = source->get_num_stored_elements_per_row();
constexpr auto rows_per_block =
ceildiv(default_block_size, cuda_config::warp_size);
const auto grid_dim_nnz = ceildiv(source->get_size()[0], rows_per_block);
kernel::count_nnz_per_row<<<grid_dim_nnz, default_block_size>>>(
num_rows, max_nnz_per_row, stride,
as_cuda_type(source->get_const_values()), as_cuda_type(row_ptrs));
size_type grid_dim = ceildiv(num_rows + 1, default_block_size);
auto add_values = Array<IndexType>(exec, grid_dim);
start_prefix_sum<default_block_size>
<<<grid_dim, default_block_size>>>(num_rows + 1, as_cuda_type(row_ptrs),
as_cuda_type(add_values.get_data()));
finalize_prefix_sum<default_block_size><<<grid_dim, default_block_size>>>(
num_rows + 1, as_cuda_type(row_ptrs),
as_cuda_type(add_values.get_const_data()));
kernel::fill_in_csr<<<grid_dim, default_block_size>>>(
num_rows, max_nnz_per_row, stride,
as_cuda_type(source->get_const_values()),
as_cuda_type(source->get_const_col_idxs()), as_cuda_type(row_ptrs),
as_cuda_type(col_idxs), as_cuda_type(values));
add_values.clear();
}
GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE(
GKO_DECLARE_ELL_CONVERT_TO_CSR_KERNEL);
template <typename ValueType, typename IndexType>
void count_nonzeros(std::shared_ptr<const CudaExecutor> exec,
const matrix::Ell<ValueType, IndexType> *source,
size_type *result)
{
const auto num_rows = source->get_size()[0];
auto nnz_per_row = Array<size_type>(exec, num_rows);
calculate_nonzeros_per_row(exec, source, &nnz_per_row);
*result = reduce_add_array(exec, num_rows, nnz_per_row.get_const_data());
nnz_per_row.clear();
}
GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE(
GKO_DECLARE_ELL_COUNT_NONZEROS_KERNEL);
template <typename ValueType, typename IndexType>
void calculate_nonzeros_per_row(std::shared_ptr<const CudaExecutor> exec,
const matrix::Ell<ValueType, IndexType> *source,
Array<size_type> *result)
{
const auto num_rows = source->get_size()[0];
const auto max_nnz_per_row = source->get_num_stored_elements_per_row();
const auto stride = source->get_stride();
const auto values = source->get_const_values();
const auto warp_size = cuda_config::warp_size;
const auto grid_dim = ceildiv(num_rows * warp_size, default_block_size);
kernel::count_nnz_per_row<<<grid_dim, default_block_size>>>(
num_rows, max_nnz_per_row, stride, as_cuda_type(values),
as_cuda_type(result->get_data()));
}
GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE(
GKO_DECLARE_ELL_CALCULATE_NONZEROS_PER_ROW_KERNEL);
} // namespace ell
} // namespace cuda
} // namespace kernels
} // namespace gko
|
cd9e3f52751ad17660c167709e648cc4f81f8000.hip | // !!! This is a file automatically generated by hipify!!!
#include "mycuda.h"
#include "mycuda_public.h"
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
constexpr int WARP_SIZE = 32;
/* perform first level of reduction */
template <unsigned int blockSize>
__global__ void ReduceAddKernel(const int* __restrict__ g_idata, int* __restrict__ g_odata, unsigned int n)
{
extern __shared__ int sdata[];
unsigned int tid = threadIdx.x;
unsigned int gridSize2 = blockSize * gridDim.x * 2;
/* we reduce multiple elements per thread. The number is determined by gridSize.
** More blocks will result in a larger gridSize and therefore fewer elements per thread */
unsigned int i = blockIdx.x * blockSize * 2 + threadIdx.x;
int mySum = 0;
while (i < n) {
mySum += g_idata[i];
if ((i + blockSize) < n)
mySum += g_idata[i + blockSize];
i += gridSize2;
}
/* Reduce within warp using __shfl_down_sync */
for (int offset = min(blockSize, WARP_SIZE) / 2; offset > 0; offset /= 2)
mySum += __shfl_down_sync(0xffffffff, mySum, offset);
if ((tid % WARP_SIZE) == 0) /* each warp puts its local sum into shared memory */
sdata[tid / WARP_SIZE] = mySum;
__syncthreads();
/* Reduce shared memory using __shfl_down_sync */
const unsigned int size_share_memory = (blockSize / WARP_SIZE) > 0 ? (blockSize / WARP_SIZE) : 1; /* size_share_memory <= 1024/32=32 */
const unsigned int mask_ballot = __ballot_sync(0xffffffff, tid < size_share_memory);
if (tid < size_share_memory) {
mySum = sdata[tid];
for (int offset = size_share_memory / 2; offset > 0; offset /= 2)
mySum += __shfl_down_sync(mask_ballot, mySum, offset);
}
/* write result for this block to global mem */
if (tid == 0)
g_odata[blockIdx.x] = mySum;
}
/* Reduce the array `dev_in` of size `size` to the array `dev_out` of size `blocks` */
void ReduceAdd(int size, int threads, int blocks, const int* dev_in, int* dev_out)
{
dim3 dimBlock(threads, 1, 1);
dim3 dimGrid(blocks, 1, 1);
int smemSize = ((threads / WARP_SIZE) + 1) * sizeof(int);
switch (threads) {
case 1024:
hipLaunchKernelGGL(( ReduceAddKernel<1024>), dim3(dimGrid), dim3(dimBlock), smemSize, 0, dev_in, dev_out, size);
break;
case 512:
hipLaunchKernelGGL(( ReduceAddKernel<512>) , dim3(dimGrid), dim3(dimBlock), smemSize, 0, dev_in, dev_out, size);
break;
case 256:
hipLaunchKernelGGL(( ReduceAddKernel<256>) , dim3(dimGrid), dim3(dimBlock), smemSize, 0, dev_in, dev_out, size);
break;
case 128:
hipLaunchKernelGGL(( ReduceAddKernel<128>) , dim3(dimGrid), dim3(dimBlock), smemSize, 0, dev_in, dev_out, size);
break;
case 64:
hipLaunchKernelGGL(( ReduceAddKernel<64>) , dim3(dimGrid), dim3(dimBlock), smemSize, 0, dev_in, dev_out, size);
break;
case 32:
hipLaunchKernelGGL(( ReduceAddKernel<32>) , dim3(dimGrid), dim3(dimBlock), smemSize, 0, dev_in, dev_out, size);
break;
case 16:
hipLaunchKernelGGL(( ReduceAddKernel<16>) , dim3(dimGrid), dim3(dimBlock), smemSize, 0, dev_in, dev_out, size);
break;
case 8:
hipLaunchKernelGGL(( ReduceAddKernel<8>) , dim3(dimGrid), dim3(dimBlock), smemSize, 0, dev_in, dev_out, size);
break;
case 4:
hipLaunchKernelGGL(( ReduceAddKernel<4>) , dim3(dimGrid), dim3(dimBlock), smemSize, 0, dev_in, dev_out, size);
break;
case 2:
hipLaunchKernelGGL(( ReduceAddKernel<2>) , dim3(dimGrid), dim3(dimBlock), smemSize, 0, dev_in, dev_out, size);
break;
case 1:
hipLaunchKernelGGL(( ReduceAddKernel<1>) , dim3(dimGrid), dim3(dimBlock), smemSize, 0, dev_in, dev_out, size);
break;
}
}
unsigned int cuda::nextPow2(unsigned int x)
{
--x;
x |= x >> 1;
x |= x >> 2;
x |= x >> 4;
x |= x >> 8;
x |= x >> 16;
return ++x;
}
/* Compute numbers of blocks and threads for reduction algorithms */
void cuda::getNumBlocksAndThreads(int n, int maxBlocks, int maxThreads, int& blocks, int& threads) {
threads = (n < maxThreads * 2) ? nextPow2((n + 1) / 2) : maxThreads;
blocks = (n + (threads * 2 - 1)) / (threads * 2);
if (blocks > maxBlocks)
blocks = maxBlocks;
}
void cuda::Sum(const int* dev_in, int* dev_out, size_t size_in) /* size_in should be positive */
{
int maxThreads = 256;
int maxBlocks = 64;
int numBlocks = 0;
int numThreads = 0;
getNumBlocksAndThreads((int)size_in, maxBlocks, maxThreads, numBlocks, numThreads);
cuda::ArrayInt dev_c(numBlocks), dev_tmp(numBlocks);
ReduceAdd((int)size_in, numThreads, numBlocks, dev_in, dev_c.data());
#ifdef _DEBUG
CheckLastError();
DeviceSynchronize();
#endif
int s = numBlocks;
while (s > 1) {
int threads = 0, blocks = 0;
getNumBlocksAndThreads(s, maxBlocks, maxThreads, blocks, threads);
Memcpy(dev_tmp, dev_c, s);
ReduceAdd(s, threads, blocks, dev_tmp.data(), dev_c.data());
#ifdef _DEBUG
CheckLastError();
DeviceSynchronize();
#endif
s = (s + (threads * 2 - 1)) / (threads * 2);
}
Memcpy(dev_out, dev_c.data(), sizeof(int), hipMemcpyDeviceToDevice);
}
int cuda::Sum(const int* dev_in, size_t size_in)
{
cuda::ArrayInt dev_out(1);
Sum(dev_in, dev_out.data(), size_in);
int sum;
cuda::Memcpy(&sum, dev_out.data(), sizeof(int), hipMemcpyDeviceToHost);
return sum;
} | cd9e3f52751ad17660c167709e648cc4f81f8000.cu | #include "mycuda.h"
#include "mycuda_public.h"
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
constexpr int WARP_SIZE = 32;
/* perform first level of reduction */
template <unsigned int blockSize>
__global__ void ReduceAddKernel(const int* __restrict__ g_idata, int* __restrict__ g_odata, unsigned int n)
{
extern __shared__ int sdata[];
unsigned int tid = threadIdx.x;
unsigned int gridSize2 = blockSize * gridDim.x * 2;
/* we reduce multiple elements per thread. The number is determined by gridSize.
** More blocks will result in a larger gridSize and therefore fewer elements per thread */
unsigned int i = blockIdx.x * blockSize * 2 + threadIdx.x;
int mySum = 0;
while (i < n) {
mySum += g_idata[i];
if ((i + blockSize) < n)
mySum += g_idata[i + blockSize];
i += gridSize2;
}
/* Reduce within warp using __shfl_down_sync */
for (int offset = min(blockSize, WARP_SIZE) / 2; offset > 0; offset /= 2)
mySum += __shfl_down_sync(0xffffffff, mySum, offset);
if ((tid % WARP_SIZE) == 0) /* each warp puts its local sum into shared memory */
sdata[tid / WARP_SIZE] = mySum;
__syncthreads();
/* Reduce shared memory using __shfl_down_sync */
const unsigned int size_share_memory = (blockSize / WARP_SIZE) > 0 ? (blockSize / WARP_SIZE) : 1; /* size_share_memory <= 1024/32=32 */
const unsigned int mask_ballot = __ballot_sync(0xffffffff, tid < size_share_memory);
if (tid < size_share_memory) {
mySum = sdata[tid];
for (int offset = size_share_memory / 2; offset > 0; offset /= 2)
mySum += __shfl_down_sync(mask_ballot, mySum, offset);
}
/* write result for this block to global mem */
if (tid == 0)
g_odata[blockIdx.x] = mySum;
}
/* Reduce the array `dev_in` of size `size` to the array `dev_out` of size `blocks` */
void ReduceAdd(int size, int threads, int blocks, const int* dev_in, int* dev_out)
{
dim3 dimBlock(threads, 1, 1);
dim3 dimGrid(blocks, 1, 1);
int smemSize = ((threads / WARP_SIZE) + 1) * sizeof(int);
switch (threads) {
case 1024:
ReduceAddKernel<1024><<<dimGrid, dimBlock, smemSize>>>(dev_in, dev_out, size);
break;
case 512:
ReduceAddKernel<512> <<<dimGrid, dimBlock, smemSize>>>(dev_in, dev_out, size);
break;
case 256:
ReduceAddKernel<256> <<<dimGrid, dimBlock, smemSize>>>(dev_in, dev_out, size);
break;
case 128:
ReduceAddKernel<128> <<<dimGrid, dimBlock, smemSize>>>(dev_in, dev_out, size);
break;
case 64:
ReduceAddKernel<64> <<<dimGrid, dimBlock, smemSize>>>(dev_in, dev_out, size);
break;
case 32:
ReduceAddKernel<32> <<<dimGrid, dimBlock, smemSize>>>(dev_in, dev_out, size);
break;
case 16:
ReduceAddKernel<16> <<<dimGrid, dimBlock, smemSize>>>(dev_in, dev_out, size);
break;
case 8:
ReduceAddKernel<8> <<<dimGrid, dimBlock, smemSize>>>(dev_in, dev_out, size);
break;
case 4:
ReduceAddKernel<4> <<<dimGrid, dimBlock, smemSize>>>(dev_in, dev_out, size);
break;
case 2:
ReduceAddKernel<2> <<<dimGrid, dimBlock, smemSize>>>(dev_in, dev_out, size);
break;
case 1:
ReduceAddKernel<1> <<<dimGrid, dimBlock, smemSize>>>(dev_in, dev_out, size);
break;
}
}
unsigned int cuda::nextPow2(unsigned int x)
{
--x;
x |= x >> 1;
x |= x >> 2;
x |= x >> 4;
x |= x >> 8;
x |= x >> 16;
return ++x;
}
/* Compute numbers of blocks and threads for reduction algorithms */
void cuda::getNumBlocksAndThreads(int n, int maxBlocks, int maxThreads, int& blocks, int& threads) {
threads = (n < maxThreads * 2) ? nextPow2((n + 1) / 2) : maxThreads;
blocks = (n + (threads * 2 - 1)) / (threads * 2);
if (blocks > maxBlocks)
blocks = maxBlocks;
}
void cuda::Sum(const int* dev_in, int* dev_out, size_t size_in) /* size_in should be positive */
{
int maxThreads = 256;
int maxBlocks = 64;
int numBlocks = 0;
int numThreads = 0;
getNumBlocksAndThreads((int)size_in, maxBlocks, maxThreads, numBlocks, numThreads);
cuda::ArrayInt dev_c(numBlocks), dev_tmp(numBlocks);
ReduceAdd((int)size_in, numThreads, numBlocks, dev_in, dev_c.data());
#ifdef _DEBUG
CheckLastError();
DeviceSynchronize();
#endif
int s = numBlocks;
while (s > 1) {
int threads = 0, blocks = 0;
getNumBlocksAndThreads(s, maxBlocks, maxThreads, blocks, threads);
Memcpy(dev_tmp, dev_c, s);
ReduceAdd(s, threads, blocks, dev_tmp.data(), dev_c.data());
#ifdef _DEBUG
CheckLastError();
DeviceSynchronize();
#endif
s = (s + (threads * 2 - 1)) / (threads * 2);
}
Memcpy(dev_out, dev_c.data(), sizeof(int), cudaMemcpyDeviceToDevice);
}
int cuda::Sum(const int* dev_in, size_t size_in)
{
cuda::ArrayInt dev_out(1);
Sum(dev_in, dev_out.data(), size_in);
int sum;
cuda::Memcpy(&sum, dev_out.data(), sizeof(int), cudaMemcpyDeviceToHost);
return sum;
} |
0a06b992ad1fd9542154677bf1bd2cef98d550b5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <cstring>
#include <time.h>
__global__ void calculate_unique_3d_idx(int * input, int size)
{
int tid = (threadIdx.z * blockDim.y * blockDim.x) + (threadIdx.y * blockDim.x) + threadIdx.x;
int block_id = blockIdx.x + (blockIdx.y * gridDim.x) + (blockIdx.z * gridDim.x * gridDim.y);
int global_index = block_id * blockDim.x * blockDim.y * blockDim.z + tid;
printf ("tid: %d, block_id : %d, global_index : %d, value: %d \n", tid, block_id, global_index, input[global_index]);
}
int main()
{
int size = 64;
int byte_size = sizeof(int) * size;
int * h_data;
h_data = (int *) malloc(byte_size);
time_t t;
srand((unsigned) time(&t));
for (int i =0; i < size; i++) {
h_data[i] = (int) (rand() && 0xff);
}
int * d_data;
hipMalloc((void **)&d_data, byte_size);
hipMemcpy(d_data, h_data, byte_size, hipMemcpyHostToDevice);
int nx, ny, nz;
nx = 4; ny = 4; nz = 4;
dim3 block(2, 2, 2);
dim3 grid(nx/block.x, ny/block.y, nz/block.z);
hipLaunchKernelGGL(( calculate_unique_3d_idx), dim3(grid), dim3(block), 0, 0, d_data, size);
hipDeviceSynchronize();
hipFree(d_data);
free(h_data);
hipDeviceReset();
return 0;
}
| 0a06b992ad1fd9542154677bf1bd2cef98d550b5.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <cstring>
#include <time.h>
__global__ void calculate_unique_3d_idx(int * input, int size)
{
int tid = (threadIdx.z * blockDim.y * blockDim.x) + (threadIdx.y * blockDim.x) + threadIdx.x;
int block_id = blockIdx.x + (blockIdx.y * gridDim.x) + (blockIdx.z * gridDim.x * gridDim.y);
int global_index = block_id * blockDim.x * blockDim.y * blockDim.z + tid;
printf ("tid: %d, block_id : %d, global_index : %d, value: %d \n", tid, block_id, global_index, input[global_index]);
}
int main()
{
int size = 64;
int byte_size = sizeof(int) * size;
int * h_data;
h_data = (int *) malloc(byte_size);
time_t t;
srand((unsigned) time(&t));
for (int i =0; i < size; i++) {
h_data[i] = (int) (rand() && 0xff);
}
int * d_data;
cudaMalloc((void **)&d_data, byte_size);
cudaMemcpy(d_data, h_data, byte_size, cudaMemcpyHostToDevice);
int nx, ny, nz;
nx = 4; ny = 4; nz = 4;
dim3 block(2, 2, 2);
dim3 grid(nx/block.x, ny/block.y, nz/block.z);
calculate_unique_3d_idx<<<grid, block>>>(d_data, size);
cudaDeviceSynchronize();
cudaFree(d_data);
free(h_data);
cudaDeviceReset();
return 0;
}
|
09d8a874c5fda3989a2129d67bbb0add9f51a803.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#define N 32
#define BLOCK_SIZE 32
__global__ void add_kernel(int *X, int *Y, int *Z){
int i = threadIdx.x;
int j = threadIdx.y;
int index = i*N+j;
Z[index] = X[index] + Y[index];
}
int main()
{
int n;
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
printf("input positive integer n: ");
scanf("%d", &n);
int X[N*N];
int Y[N*N];
for(int i=0; i<N; i++){
for(int j=0; j<N; j++){
X[i*N+j] = 0;
Y[i*N+j] = 1;
}
}
int Z[N*N];
int *d_X, *d_Y, *d_Z;
hipMalloc((void**) &d_X, (N*N)*sizeof(int));
hipMalloc((void**) &d_Y, (N*N)*sizeof(int));
hipMalloc((void**) &d_Z, (N*N)*sizeof(int));
hipMemcpy(d_X, &X, (N*N)*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_Y, &Y, (N*N)*sizeof(int), hipMemcpyHostToDevice);
dim3 dimGrid(2, 1);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE, 1);
hipEventRecord(start);
hipLaunchKernelGGL(( add_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_X, d_Y, d_Z);
hipEventRecord(stop);
hipMemcpy(&Z, d_Z, (N*N)*sizeof(int), hipMemcpyDeviceToHost);
hipEventSynchronize(stop);
float milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
hipFree(d_X);
hipFree(d_Y);
hipFree(d_Z);
printf("%f ms\n", milliseconds);
for(int i=0; i<N; i++){
for(int j=0; j<N; j++){
printf("%d ", Z[i*N+j]);
}
printf("\n");
}
} | 09d8a874c5fda3989a2129d67bbb0add9f51a803.cu | #include <stdio.h>
#define N 32
#define BLOCK_SIZE 32
__global__ void add_kernel(int *X, int *Y, int *Z){
int i = threadIdx.x;
int j = threadIdx.y;
int index = i*N+j;
Z[index] = X[index] + Y[index];
}
int main()
{
int n;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
printf("input positive integer n: ");
scanf("%d", &n);
int X[N*N];
int Y[N*N];
for(int i=0; i<N; i++){
for(int j=0; j<N; j++){
X[i*N+j] = 0;
Y[i*N+j] = 1;
}
}
int Z[N*N];
int *d_X, *d_Y, *d_Z;
cudaMalloc((void**) &d_X, (N*N)*sizeof(int));
cudaMalloc((void**) &d_Y, (N*N)*sizeof(int));
cudaMalloc((void**) &d_Z, (N*N)*sizeof(int));
cudaMemcpy(d_X, &X, (N*N)*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_Y, &Y, (N*N)*sizeof(int), cudaMemcpyHostToDevice);
dim3 dimGrid(2, 1);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE, 1);
cudaEventRecord(start);
add_kernel<<<dimGrid, dimBlock>>>(d_X, d_Y, d_Z);
cudaEventRecord(stop);
cudaMemcpy(&Z, d_Z, (N*N)*sizeof(int), cudaMemcpyDeviceToHost);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
cudaFree(d_X);
cudaFree(d_Y);
cudaFree(d_Z);
printf("%f ms\n", milliseconds);
for(int i=0; i<N; i++){
for(int j=0; j<N; j++){
printf("%d ", Z[i*N+j]);
}
printf("\n");
}
} |
9a67b4ccb3342384384fbfb08915cbf409d8bf41.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "backend/kernel_compiler/gpu/cuda_impl/sponge/bond/bond_atom_energy_cuda_gpu_impl.cuh"
#include "backend/kernel_compiler/gpu/cuda_impl/sponge/common_sponge.cuh"
#include "backend/kernel_compiler/gpu/cuda_impl/util.cuh"
__global__ void BondAtomEnergyCudaKernel(const int bond_numbers, const UNSIGNED_INT_VECTOR *uint_crd,
const VECTOR *scaler, const int *atom_a, const int *atom_b,
const float *bond_k, const float *bond_r0, float *atom_ene) {
int bond_i = blockDim.x * blockIdx.x + threadIdx.x;
if (bond_i < bond_numbers) {
int atom_i = atom_a[bond_i];
int atom_j = atom_b[bond_i];
float k = bond_k[bond_i];
float r0 = bond_r0[bond_i];
VECTOR dr = Get_Periodic_Displacement(uint_crd[atom_i], uint_crd[atom_j], scaler[0]);
float r1 = norm3df(dr.x, dr.y, dr.z);
float tempf = r1 - r0;
atomicAdd(&atom_ene[atom_i], k * tempf * tempf);
}
}
void BondAtomEnergy(int bond_numbers, int atom_numbers, const int *uint_crd_f, const float *scaler_f, const int *atom_a,
const int *atom_b, const float *bond_k, const float *bond_r0, float *atom_ene,
hipStream_t stream) {
hipLaunchKernelGGL(( Reset_List), dim3(ceilf(static_cast<float>(atom_numbers) / 128)), dim3(128), 0, stream, atom_numbers, atom_ene, 0.);
size_t thread_per_block = 128;
size_t block_per_grid = ceilf(static_cast<float>(bond_numbers) / 128);
UNSIGNED_INT_VECTOR *uint_crd =
const_cast<UNSIGNED_INT_VECTOR *>(reinterpret_cast<const UNSIGNED_INT_VECTOR *>(uint_crd_f));
VECTOR *scaler = const_cast<VECTOR *>(reinterpret_cast<const VECTOR *>(scaler_f));
hipLaunchKernelGGL(( BondAtomEnergyCudaKernel), dim3(block_per_grid), dim3(thread_per_block), 0, stream, bond_numbers, uint_crd, scaler, atom_a,
atom_b, bond_k, bond_r0, atom_ene);
return;
}
void BondAtomEnergy(int bond_numbers, int atom_numbers, const int *uint_crd_f, const float *scaler_f, const int *atom_a,
const int *atom_b, const float *bond_k, const float *bond_r0, float *atom_ene, hipStream_t stream);
| 9a67b4ccb3342384384fbfb08915cbf409d8bf41.cu | /**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "backend/kernel_compiler/gpu/cuda_impl/sponge/bond/bond_atom_energy_cuda_gpu_impl.cuh"
#include "backend/kernel_compiler/gpu/cuda_impl/sponge/common_sponge.cuh"
#include "backend/kernel_compiler/gpu/cuda_impl/util.cuh"
__global__ void BondAtomEnergyCudaKernel(const int bond_numbers, const UNSIGNED_INT_VECTOR *uint_crd,
const VECTOR *scaler, const int *atom_a, const int *atom_b,
const float *bond_k, const float *bond_r0, float *atom_ene) {
int bond_i = blockDim.x * blockIdx.x + threadIdx.x;
if (bond_i < bond_numbers) {
int atom_i = atom_a[bond_i];
int atom_j = atom_b[bond_i];
float k = bond_k[bond_i];
float r0 = bond_r0[bond_i];
VECTOR dr = Get_Periodic_Displacement(uint_crd[atom_i], uint_crd[atom_j], scaler[0]);
float r1 = norm3df(dr.x, dr.y, dr.z);
float tempf = r1 - r0;
atomicAdd(&atom_ene[atom_i], k * tempf * tempf);
}
}
void BondAtomEnergy(int bond_numbers, int atom_numbers, const int *uint_crd_f, const float *scaler_f, const int *atom_a,
const int *atom_b, const float *bond_k, const float *bond_r0, float *atom_ene,
cudaStream_t stream) {
Reset_List<<<ceilf(static_cast<float>(atom_numbers) / 128), 128, 0, stream>>>(atom_numbers, atom_ene, 0.);
size_t thread_per_block = 128;
size_t block_per_grid = ceilf(static_cast<float>(bond_numbers) / 128);
UNSIGNED_INT_VECTOR *uint_crd =
const_cast<UNSIGNED_INT_VECTOR *>(reinterpret_cast<const UNSIGNED_INT_VECTOR *>(uint_crd_f));
VECTOR *scaler = const_cast<VECTOR *>(reinterpret_cast<const VECTOR *>(scaler_f));
BondAtomEnergyCudaKernel<<<block_per_grid, thread_per_block, 0, stream>>>(bond_numbers, uint_crd, scaler, atom_a,
atom_b, bond_k, bond_r0, atom_ene);
return;
}
void BondAtomEnergy(int bond_numbers, int atom_numbers, const int *uint_crd_f, const float *scaler_f, const int *atom_a,
const int *atom_b, const float *bond_k, const float *bond_r0, float *atom_ene, cudaStream_t stream);
|
5eebc15dc487ae2f891453cb3520e667251aae41.hip | // !!! This is a file automatically generated by hipify!!!
/*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "oneflow/core/framework/framework.h"
#include "oneflow/core/kernel/new_kernel_util.h"
#include "oneflow/core/kernel/cuda_graph_support.h"
#include "oneflow/core/ep/cuda/cuda_stream.h"
#include <hip/hip_runtime.h>
#if TORCH_HIP_VERSION >= 11000
#include <cuda_bf16.h>
#endif // TORCH_HIP_VERSION >= 11000
#include "oneflow/core/device/cuda_pseudo_bfloat16.h"
namespace oneflow {
namespace {
template<typename T, typename U>
__global__ void FusedCastScaleGpu(const int64_t n, const T scale_val, const U* in,
const T* scale_by_ptr, T* out) {
const T scale = *scale_by_ptr * scale_val;
CUDA_1D_KERNEL_LOOP(i, n) { out[i] = static_cast<T>(in[i]) * scale; }
}
template<>
__global__ void FusedCastScaleGpu<float, half>(const int64_t n, const float scale_val,
const half* in, const float* scale_by_ptr,
float* out) {
const float scale = *scale_by_ptr * scale_val;
const int64_t n_2 = n / 2;
const auto* in_2 = reinterpret_cast<const half2*>(in);
auto* out_2 = reinterpret_cast<float2*>(out);
CUDA_1D_KERNEL_LOOP(i, n_2) {
float2 f2 = __half22float2(in_2[i]);
f2.x *= scale;
f2.y *= scale;
out_2[i] = f2;
}
if (n % 2 == 1 && blockIdx.x == 0 && threadIdx.x == 0) {
out[n - 1] = __half2float(in[n - 1]) * scale;
}
}
template<>
__global__ void FusedCastScaleGpu<half, float>(const int64_t n, const half scale_val,
const float* in, const half* scale_by_ptr,
half* out) {
const half scale = *scale_by_ptr * scale_val;
const half2 scale_h2 = __half2half2(scale);
const int64_t n_2 = n / 2;
const auto* in_2 = reinterpret_cast<const float2*>(in);
auto* out_h2 = reinterpret_cast<half2*>(out);
CUDA_1D_KERNEL_LOOP(i, n_2) {
half2 in_h2 = __float22half2_rn(in_2[i]);
out_h2[i] = __hmul2(in_h2, scale_h2);
}
if (n % 2 == 1 && blockIdx.x == 0 && threadIdx.x == 0) {
out[n - 1] = __float2half(in[n - 1]) * scale;
}
}
#if TORCH_HIP_VERSION >= 11000 && __CUDA_ARCH__ >= 800
template<>
__global__ void FusedCastScaleGpu<float, nv_bfloat16>(const int64_t n, const float scale_val,
const nv_bfloat16* in,
const float* scale_by_ptr, float* out) {
const float scale = *scale_by_ptr * scale_val;
const int64_t n_2 = n / 2;
const auto* in_2 = reinterpret_cast<const nv_bfloat162*>(in);
auto* out_2 = reinterpret_cast<float2*>(out);
CUDA_1D_KERNEL_LOOP(i, n_2) {
float2 f2 = __bfloat1622float2(in_2[i]);
f2.x *= scale;
f2.y *= scale;
out_2[i] = f2;
}
if (n % 2 == 1 && blockIdx.x == 0 && threadIdx.x == 0) {
out[n - 1] = __bfloat162float(in[n - 1]) * scale;
}
}
template<>
__global__ void FusedCastScaleGpu<nv_bfloat16, float>(const int64_t n, const nv_bfloat16 scale_val,
const float* in,
const nv_bfloat16* scale_by_ptr,
nv_bfloat16* out) {
const nv_bfloat16 scale = *scale_by_ptr * scale_val;
const nv_bfloat162 scale_h2 = __bfloat162bfloat162(scale);
const int64_t n_2 = n / 2;
const auto* in_2 = reinterpret_cast<const float2*>(in);
auto* out_h2 = reinterpret_cast<nv_bfloat162*>(out);
CUDA_1D_KERNEL_LOOP(i, n_2) {
nv_bfloat162 in_h2 = __float22bfloat162_rn(in_2[i]);
out_h2[i] = __hmul2(in_h2, scale_h2);
}
if (n % 2 == 1 && blockIdx.x == 0 && threadIdx.x == 0) {
out[n - 1] = __float2bfloat16(in[n - 1]) * scale;
}
}
#endif
template<typename T, typename U>
class FusedCastScaleGpuKernel final : public user_op::OpKernel, public user_op::CudaGraphSupport {
public:
FusedCastScaleGpuKernel() = default;
~FusedCastScaleGpuKernel() override = default;
private:
using user_op::OpKernel::Compute;
void Compute(user_op::KernelComputeContext* ctx) const override {
const user_op::Tensor* x = ctx->Tensor4ArgNameAndIndex("x", 0);
const user_op::Tensor* scale_by_tensor = ctx->Tensor4ArgNameAndIndex("scale_by_tensor", 0);
user_op::Tensor* y = ctx->Tensor4ArgNameAndIndex("y", 0);
const int64_t n = x->shape_view().elem_cnt();
const double scale = ctx->Attr<double>("scale");
const bool use_pack =
(x->data_type() == DataType::kFloat
&& (y->data_type() == DataType::kFloat16 || y->data_type() == DataType::kBFloat16))
|| (y->data_type() == DataType::kFloat
&& (x->data_type() == DataType::kFloat16 || x->data_type() == DataType::kBFloat16));
const int64_t launch_n = use_pack ? RoundUp(n, 2) / 2 : n;
hipLaunchKernelGGL(( FusedCastScaleGpu<T, U>), dim3(BlocksNum4ThreadsNum(launch_n)), dim3(kCudaThreadsNumPerBlock), 0,
ctx->stream()->As<ep::CudaStream>()->cuda_stream(),
n, static_cast<T>(scale), x->dptr<U>(), scale_by_tensor->dptr<T>(), y->mut_dptr<T>());
};
bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; }
};
} // namespace
#define REGISTER_FUSED_CAST_SCALE_CUDA_KERNEL(x_type, y_type) \
REGISTER_USER_KERNEL("fused_cast_scale") \
.SetCreateFn<FusedCastScaleGpuKernel<y_type, x_type>>() \
.SetIsMatchedHob((user_op::HobDeviceType() == DeviceType::kCUDA) \
&& (user_op::HobDataType("y", 0) == GetDataType<y_type>::value) \
&& (user_op::HobDataType("x", 0) == GetDataType<x_type>::value));
REGISTER_FUSED_CAST_SCALE_CUDA_KERNEL(half, float);
REGISTER_FUSED_CAST_SCALE_CUDA_KERNEL(half, double);
REGISTER_FUSED_CAST_SCALE_CUDA_KERNEL(float, half);
REGISTER_FUSED_CAST_SCALE_CUDA_KERNEL(float, double);
REGISTER_FUSED_CAST_SCALE_CUDA_KERNEL(double, half);
REGISTER_FUSED_CAST_SCALE_CUDA_KERNEL(double, float);
#if TORCH_HIP_VERSION >= 11000
REGISTER_FUSED_CAST_SCALE_CUDA_KERNEL(nv_bfloat16, float);
REGISTER_FUSED_CAST_SCALE_CUDA_KERNEL(float, nv_bfloat16);
#endif
#undef REGISTER_FUSED_CAST_SCALE_CUDA_KERNEL
} // namespace oneflow
| 5eebc15dc487ae2f891453cb3520e667251aae41.cu | /*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "oneflow/core/framework/framework.h"
#include "oneflow/core/kernel/new_kernel_util.h"
#include "oneflow/core/kernel/cuda_graph_support.h"
#include "oneflow/core/ep/cuda/cuda_stream.h"
#include <cuda.h>
#if CUDA_VERSION >= 11000
#include <cuda_bf16.h>
#endif // CUDA_VERSION >= 11000
#include "oneflow/core/device/cuda_pseudo_bfloat16.h"
namespace oneflow {
namespace {
template<typename T, typename U>
__global__ void FusedCastScaleGpu(const int64_t n, const T scale_val, const U* in,
const T* scale_by_ptr, T* out) {
const T scale = *scale_by_ptr * scale_val;
CUDA_1D_KERNEL_LOOP(i, n) { out[i] = static_cast<T>(in[i]) * scale; }
}
template<>
__global__ void FusedCastScaleGpu<float, half>(const int64_t n, const float scale_val,
const half* in, const float* scale_by_ptr,
float* out) {
const float scale = *scale_by_ptr * scale_val;
const int64_t n_2 = n / 2;
const auto* in_2 = reinterpret_cast<const half2*>(in);
auto* out_2 = reinterpret_cast<float2*>(out);
CUDA_1D_KERNEL_LOOP(i, n_2) {
float2 f2 = __half22float2(in_2[i]);
f2.x *= scale;
f2.y *= scale;
out_2[i] = f2;
}
if (n % 2 == 1 && blockIdx.x == 0 && threadIdx.x == 0) {
out[n - 1] = __half2float(in[n - 1]) * scale;
}
}
template<>
__global__ void FusedCastScaleGpu<half, float>(const int64_t n, const half scale_val,
const float* in, const half* scale_by_ptr,
half* out) {
const half scale = *scale_by_ptr * scale_val;
const half2 scale_h2 = __half2half2(scale);
const int64_t n_2 = n / 2;
const auto* in_2 = reinterpret_cast<const float2*>(in);
auto* out_h2 = reinterpret_cast<half2*>(out);
CUDA_1D_KERNEL_LOOP(i, n_2) {
half2 in_h2 = __float22half2_rn(in_2[i]);
out_h2[i] = __hmul2(in_h2, scale_h2);
}
if (n % 2 == 1 && blockIdx.x == 0 && threadIdx.x == 0) {
out[n - 1] = __float2half(in[n - 1]) * scale;
}
}
#if CUDA_VERSION >= 11000 && __CUDA_ARCH__ >= 800
template<>
__global__ void FusedCastScaleGpu<float, nv_bfloat16>(const int64_t n, const float scale_val,
const nv_bfloat16* in,
const float* scale_by_ptr, float* out) {
const float scale = *scale_by_ptr * scale_val;
const int64_t n_2 = n / 2;
const auto* in_2 = reinterpret_cast<const nv_bfloat162*>(in);
auto* out_2 = reinterpret_cast<float2*>(out);
CUDA_1D_KERNEL_LOOP(i, n_2) {
float2 f2 = __bfloat1622float2(in_2[i]);
f2.x *= scale;
f2.y *= scale;
out_2[i] = f2;
}
if (n % 2 == 1 && blockIdx.x == 0 && threadIdx.x == 0) {
out[n - 1] = __bfloat162float(in[n - 1]) * scale;
}
}
template<>
__global__ void FusedCastScaleGpu<nv_bfloat16, float>(const int64_t n, const nv_bfloat16 scale_val,
const float* in,
const nv_bfloat16* scale_by_ptr,
nv_bfloat16* out) {
const nv_bfloat16 scale = *scale_by_ptr * scale_val;
const nv_bfloat162 scale_h2 = __bfloat162bfloat162(scale);
const int64_t n_2 = n / 2;
const auto* in_2 = reinterpret_cast<const float2*>(in);
auto* out_h2 = reinterpret_cast<nv_bfloat162*>(out);
CUDA_1D_KERNEL_LOOP(i, n_2) {
nv_bfloat162 in_h2 = __float22bfloat162_rn(in_2[i]);
out_h2[i] = __hmul2(in_h2, scale_h2);
}
if (n % 2 == 1 && blockIdx.x == 0 && threadIdx.x == 0) {
out[n - 1] = __float2bfloat16(in[n - 1]) * scale;
}
}
#endif
template<typename T, typename U>
class FusedCastScaleGpuKernel final : public user_op::OpKernel, public user_op::CudaGraphSupport {
public:
FusedCastScaleGpuKernel() = default;
~FusedCastScaleGpuKernel() override = default;
private:
using user_op::OpKernel::Compute;
void Compute(user_op::KernelComputeContext* ctx) const override {
const user_op::Tensor* x = ctx->Tensor4ArgNameAndIndex("x", 0);
const user_op::Tensor* scale_by_tensor = ctx->Tensor4ArgNameAndIndex("scale_by_tensor", 0);
user_op::Tensor* y = ctx->Tensor4ArgNameAndIndex("y", 0);
const int64_t n = x->shape_view().elem_cnt();
const double scale = ctx->Attr<double>("scale");
const bool use_pack =
(x->data_type() == DataType::kFloat
&& (y->data_type() == DataType::kFloat16 || y->data_type() == DataType::kBFloat16))
|| (y->data_type() == DataType::kFloat
&& (x->data_type() == DataType::kFloat16 || x->data_type() == DataType::kBFloat16));
const int64_t launch_n = use_pack ? RoundUp(n, 2) / 2 : n;
FusedCastScaleGpu<T, U><<<BlocksNum4ThreadsNum(launch_n), kCudaThreadsNumPerBlock, 0,
ctx->stream()->As<ep::CudaStream>()->cuda_stream()>>>(
n, static_cast<T>(scale), x->dptr<U>(), scale_by_tensor->dptr<T>(), y->mut_dptr<T>());
};
bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; }
};
} // namespace
#define REGISTER_FUSED_CAST_SCALE_CUDA_KERNEL(x_type, y_type) \
REGISTER_USER_KERNEL("fused_cast_scale") \
.SetCreateFn<FusedCastScaleGpuKernel<y_type, x_type>>() \
.SetIsMatchedHob((user_op::HobDeviceType() == DeviceType::kCUDA) \
&& (user_op::HobDataType("y", 0) == GetDataType<y_type>::value) \
&& (user_op::HobDataType("x", 0) == GetDataType<x_type>::value));
REGISTER_FUSED_CAST_SCALE_CUDA_KERNEL(half, float);
REGISTER_FUSED_CAST_SCALE_CUDA_KERNEL(half, double);
REGISTER_FUSED_CAST_SCALE_CUDA_KERNEL(float, half);
REGISTER_FUSED_CAST_SCALE_CUDA_KERNEL(float, double);
REGISTER_FUSED_CAST_SCALE_CUDA_KERNEL(double, half);
REGISTER_FUSED_CAST_SCALE_CUDA_KERNEL(double, float);
#if CUDA_VERSION >= 11000
REGISTER_FUSED_CAST_SCALE_CUDA_KERNEL(nv_bfloat16, float);
REGISTER_FUSED_CAST_SCALE_CUDA_KERNEL(float, nv_bfloat16);
#endif
#undef REGISTER_FUSED_CAST_SCALE_CUDA_KERNEL
} // namespace oneflow
|
b413c08f88f6fba4c98020ef8e13f583fe9a40b6.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <utils.h>
#include "hipcub/hipcub.hpp"
#include <thrust/extrema.h>
#include "common/cumlHandle.hpp"
#include <common/device_buffer.hpp>
#include <common/host_buffer.hpp>
#include "memory.h"
#include <fstream>
template<class T>
TemporaryMemory<T>::TemporaryMemory(const ML::cumlHandle_impl& handle, int N, int Ncols, int maxstr, int n_unique, int n_bins, const int split_algo):ml_handle(handle)
{
//Assign Stream from cumlHandle
stream = ml_handle.getStream();
int n_hist_elements = n_unique * n_bins;
h_hist = new MLCommon::host_buffer<int>(handle.getHostAllocator(), stream, n_hist_elements);
d_hist = new MLCommon::device_buffer<int>(handle.getDeviceAllocator(), stream, n_hist_elements);
nrowsleftright = new MLCommon::host_buffer<int>(handle.getHostAllocator(), stream, 2);
int extra_elements = Ncols;
int quantile_elements = (split_algo == ML::SPLIT_ALGO::GLOBAL_QUANTILE) ? extra_elements : 1;
temp_data = new MLCommon::device_buffer<T>(handle.getDeviceAllocator(), stream, N * extra_elements);
totalmem += n_hist_elements * sizeof(int) + N * extra_elements * sizeof(T);
if (split_algo == ML::SPLIT_ALGO::GLOBAL_QUANTILE) {
d_quantile = new MLCommon::device_buffer<T>(handle.getDeviceAllocator(), stream, n_bins * quantile_elements);
d_temp_sampledcolumn = new MLCommon::device_buffer<T>(handle.getDeviceAllocator(), stream, N * extra_elements);
totalmem += (n_bins + N) * extra_elements * sizeof(T);
}
sampledlabels = new MLCommon::device_buffer<int>(handle.getDeviceAllocator(), stream, N);
totalmem += N*sizeof(int);
//Allocate Temporary for split functions
d_num_selected_out = new MLCommon::device_buffer<int>(handle.getDeviceAllocator(), stream, 1);
d_flags_left = new MLCommon::device_buffer<char>(handle.getDeviceAllocator(), stream, N);
d_flags_right = new MLCommon::device_buffer<char>(handle.getDeviceAllocator(), stream, N);
temprowids = new MLCommon::device_buffer<int>(handle.getDeviceAllocator(), stream, N);
question_value = new MLCommon::device_buffer<T>(handle.getDeviceAllocator(), stream, 1);
hipcub::DeviceSelect::Flagged(d_split_temp_storage, split_temp_storage_bytes, temprowids->data(), d_flags_left->data(), temprowids->data(), d_num_selected_out->data(), N);
d_split_temp_storage = new MLCommon::device_buffer<char>(handle.getDeviceAllocator(), stream, split_temp_storage_bytes);
totalmem += split_temp_storage_bytes + (N + 1)*sizeof(int) + 2*N*sizeof(char) + sizeof(T);
h_histout = new MLCommon::host_buffer<int>(handle.getHostAllocator(), stream, n_hist_elements * Ncols);
d_globalminmax = new MLCommon::device_buffer<T>(handle.getDeviceAllocator(), stream, Ncols * 2);
d_histout = new MLCommon::device_buffer<int>(handle.getDeviceAllocator(), stream, n_hist_elements * Ncols);
d_colids = new MLCommon::device_buffer<int>(handle.getDeviceAllocator(), stream, Ncols);
totalmem += (n_hist_elements * sizeof(int) + sizeof(int) + 2*sizeof(T))* Ncols;
}
template<class T>
void TemporaryMemory<T>::print_info()
{
std::cout <<" Inside the print_info function \n" << std::flush;
std::cout << " Total temporary memory usage--> "<< ((double)totalmem/ (1024*1024)) << " MB" << std::endl;
return;
}
template<class T>
TemporaryMemory<T>::~TemporaryMemory()
{
h_hist->release(stream);
d_hist->release(stream);
nrowsleftright->release(stream);
temp_data->release(stream);
delete h_hist;
delete d_hist;
delete temp_data;
if (d_quantile != nullptr) {
d_quantile->release(stream);
delete d_quantile;
}
if (d_temp_sampledcolumn != nullptr) {
d_temp_sampledcolumn->release(stream);
delete d_temp_sampledcolumn;
}
sampledlabels->release(stream);
d_split_temp_storage->release(stream);
d_num_selected_out->release(stream);
d_flags_left->release(stream);
d_flags_right->release(stream);
temprowids->release(stream);
question_value->release(stream);
h_histout->release(stream);
delete sampledlabels;
delete d_split_temp_storage;
delete d_num_selected_out;
delete d_flags_left;
delete d_flags_right;
delete temprowids;
delete question_value;
delete h_histout;
d_globalminmax->release(stream);
d_histout->release(stream);
d_colids->release(stream);
delete d_globalminmax;
delete d_histout;
delete d_colids;
}
| b413c08f88f6fba4c98020ef8e13f583fe9a40b6.cu | /*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <utils.h>
#include "cub/cub.cuh"
#include <thrust/extrema.h>
#include "common/cumlHandle.hpp"
#include <common/device_buffer.hpp>
#include <common/host_buffer.hpp>
#include "memory.h"
#include <fstream>
template<class T>
TemporaryMemory<T>::TemporaryMemory(const ML::cumlHandle_impl& handle, int N, int Ncols, int maxstr, int n_unique, int n_bins, const int split_algo):ml_handle(handle)
{
//Assign Stream from cumlHandle
stream = ml_handle.getStream();
int n_hist_elements = n_unique * n_bins;
h_hist = new MLCommon::host_buffer<int>(handle.getHostAllocator(), stream, n_hist_elements);
d_hist = new MLCommon::device_buffer<int>(handle.getDeviceAllocator(), stream, n_hist_elements);
nrowsleftright = new MLCommon::host_buffer<int>(handle.getHostAllocator(), stream, 2);
int extra_elements = Ncols;
int quantile_elements = (split_algo == ML::SPLIT_ALGO::GLOBAL_QUANTILE) ? extra_elements : 1;
temp_data = new MLCommon::device_buffer<T>(handle.getDeviceAllocator(), stream, N * extra_elements);
totalmem += n_hist_elements * sizeof(int) + N * extra_elements * sizeof(T);
if (split_algo == ML::SPLIT_ALGO::GLOBAL_QUANTILE) {
d_quantile = new MLCommon::device_buffer<T>(handle.getDeviceAllocator(), stream, n_bins * quantile_elements);
d_temp_sampledcolumn = new MLCommon::device_buffer<T>(handle.getDeviceAllocator(), stream, N * extra_elements);
totalmem += (n_bins + N) * extra_elements * sizeof(T);
}
sampledlabels = new MLCommon::device_buffer<int>(handle.getDeviceAllocator(), stream, N);
totalmem += N*sizeof(int);
//Allocate Temporary for split functions
d_num_selected_out = new MLCommon::device_buffer<int>(handle.getDeviceAllocator(), stream, 1);
d_flags_left = new MLCommon::device_buffer<char>(handle.getDeviceAllocator(), stream, N);
d_flags_right = new MLCommon::device_buffer<char>(handle.getDeviceAllocator(), stream, N);
temprowids = new MLCommon::device_buffer<int>(handle.getDeviceAllocator(), stream, N);
question_value = new MLCommon::device_buffer<T>(handle.getDeviceAllocator(), stream, 1);
cub::DeviceSelect::Flagged(d_split_temp_storage, split_temp_storage_bytes, temprowids->data(), d_flags_left->data(), temprowids->data(), d_num_selected_out->data(), N);
d_split_temp_storage = new MLCommon::device_buffer<char>(handle.getDeviceAllocator(), stream, split_temp_storage_bytes);
totalmem += split_temp_storage_bytes + (N + 1)*sizeof(int) + 2*N*sizeof(char) + sizeof(T);
h_histout = new MLCommon::host_buffer<int>(handle.getHostAllocator(), stream, n_hist_elements * Ncols);
d_globalminmax = new MLCommon::device_buffer<T>(handle.getDeviceAllocator(), stream, Ncols * 2);
d_histout = new MLCommon::device_buffer<int>(handle.getDeviceAllocator(), stream, n_hist_elements * Ncols);
d_colids = new MLCommon::device_buffer<int>(handle.getDeviceAllocator(), stream, Ncols);
totalmem += (n_hist_elements * sizeof(int) + sizeof(int) + 2*sizeof(T))* Ncols;
}
template<class T>
void TemporaryMemory<T>::print_info()
{
std::cout <<" Inside the print_info function \n" << std::flush;
std::cout << " Total temporary memory usage--> "<< ((double)totalmem/ (1024*1024)) << " MB" << std::endl;
return;
}
template<class T>
TemporaryMemory<T>::~TemporaryMemory()
{
h_hist->release(stream);
d_hist->release(stream);
nrowsleftright->release(stream);
temp_data->release(stream);
delete h_hist;
delete d_hist;
delete temp_data;
if (d_quantile != nullptr) {
d_quantile->release(stream);
delete d_quantile;
}
if (d_temp_sampledcolumn != nullptr) {
d_temp_sampledcolumn->release(stream);
delete d_temp_sampledcolumn;
}
sampledlabels->release(stream);
d_split_temp_storage->release(stream);
d_num_selected_out->release(stream);
d_flags_left->release(stream);
d_flags_right->release(stream);
temprowids->release(stream);
question_value->release(stream);
h_histout->release(stream);
delete sampledlabels;
delete d_split_temp_storage;
delete d_num_selected_out;
delete d_flags_left;
delete d_flags_right;
delete temprowids;
delete question_value;
delete h_histout;
d_globalminmax->release(stream);
d_histout->release(stream);
d_colids->release(stream);
delete d_globalminmax;
delete d_histout;
delete d_colids;
}
|
86a7503b439afe905f513c329dfb9593935084b0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
template <typename scalar_t>
static __global__ void fused_bias_act_kernel(scalar_t* out, const scalar_t* p_x, const scalar_t* p_b, const scalar_t* p_ref,
int act, int grad, scalar_t alpha, scalar_t scale, int loop_x, int size_x, int step_b, int size_b, int use_bias, int use_ref) {
int xi = blockIdx.x * loop_x * blockDim.x + threadIdx.x;
scalar_t zero = 0.0;
for (int loop_idx = 0; loop_idx < loop_x && xi < size_x; loop_idx++, xi += blockDim.x) {
scalar_t x = p_x[xi];
if (use_bias) {
x += p_b[(xi / step_b) % size_b];
}
scalar_t ref = use_ref ? p_ref[xi] : zero;
scalar_t y;
switch (act * 10 + grad) {
default:
case 10: y = x; break;
case 11: y = x; break;
case 12: y = 0.0; break;
case 30: y = (x > 0.0) ? x : x * alpha; break;
case 31: y = (ref > 0.0) ? x : x * alpha; break;
case 32: y = 0.0; break;
}
out[xi] = y * scale;
}
}
torch::Tensor fused_bias_act_op(const torch::Tensor& input, const torch::Tensor& bias, const torch::Tensor& refer, int act, int grad, float alpha, float scale) {
int curDevice = -1;
hipGetDevice(&curDevice);
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(curDevice);
auto x = input.contiguous();
auto b = bias.contiguous();
auto ref = refer.contiguous();
int use_bias = b.numel() ? 1 : 0;
int use_ref = ref.numel() ? 1 : 0;
int size_x = x.numel();
int size_b = b.numel();
int step_b = 1;
for (int i = 1 + 1; i < x.dim(); i++) {
step_b *= x.size(i);
}
int loop_x = 4;
int block_size = 4 * 32;
int grid_size = (size_x - 1) / (loop_x * block_size) + 1;
auto y = torch::empty_like(x);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(x.scalar_type(), "fused_bias_act_kernel", [&] {
hipLaunchKernelGGL(( fused_bias_act_kernel<scalar_t>), dim3(grid_size), dim3(block_size), 0, stream,
y.data_ptr<scalar_t>(),
x.data_ptr<scalar_t>(),
b.data_ptr<scalar_t>(),
ref.data_ptr<scalar_t>(),
act,
grad,
alpha,
scale,
loop_x,
size_x,
step_b,
size_b,
use_bias,
use_ref
);
});
return y;
} | 86a7503b439afe905f513c329dfb9593935084b0.cu |
template <typename scalar_t>
static __global__ void fused_bias_act_kernel(scalar_t* out, const scalar_t* p_x, const scalar_t* p_b, const scalar_t* p_ref,
int act, int grad, scalar_t alpha, scalar_t scale, int loop_x, int size_x, int step_b, int size_b, int use_bias, int use_ref) {
int xi = blockIdx.x * loop_x * blockDim.x + threadIdx.x;
scalar_t zero = 0.0;
for (int loop_idx = 0; loop_idx < loop_x && xi < size_x; loop_idx++, xi += blockDim.x) {
scalar_t x = p_x[xi];
if (use_bias) {
x += p_b[(xi / step_b) % size_b];
}
scalar_t ref = use_ref ? p_ref[xi] : zero;
scalar_t y;
switch (act * 10 + grad) {
default:
case 10: y = x; break;
case 11: y = x; break;
case 12: y = 0.0; break;
case 30: y = (x > 0.0) ? x : x * alpha; break;
case 31: y = (ref > 0.0) ? x : x * alpha; break;
case 32: y = 0.0; break;
}
out[xi] = y * scale;
}
}
torch::Tensor fused_bias_act_op(const torch::Tensor& input, const torch::Tensor& bias, const torch::Tensor& refer, int act, int grad, float alpha, float scale) {
int curDevice = -1;
cudaGetDevice(&curDevice);
cudaStream_t stream = at::cuda::getCurrentCUDAStream(curDevice);
auto x = input.contiguous();
auto b = bias.contiguous();
auto ref = refer.contiguous();
int use_bias = b.numel() ? 1 : 0;
int use_ref = ref.numel() ? 1 : 0;
int size_x = x.numel();
int size_b = b.numel();
int step_b = 1;
for (int i = 1 + 1; i < x.dim(); i++) {
step_b *= x.size(i);
}
int loop_x = 4;
int block_size = 4 * 32;
int grid_size = (size_x - 1) / (loop_x * block_size) + 1;
auto y = torch::empty_like(x);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(x.scalar_type(), "fused_bias_act_kernel", [&] {
fused_bias_act_kernel<scalar_t><<<grid_size, block_size, 0, stream>>>(
y.data_ptr<scalar_t>(),
x.data_ptr<scalar_t>(),
b.data_ptr<scalar_t>(),
ref.data_ptr<scalar_t>(),
act,
grad,
alpha,
scale,
loop_x,
size_x,
step_b,
size_b,
use_bias,
use_ref
);
});
return y;
} |
270f396eb9893e94263a9dd5961dcee6d935c24f.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <algorithm>
#include <chrono>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "bbcu/bbcu.h"
#include "bbcu/bbcu_util.h"
//////////////////////////////
// forward
//////////////////////////////
__global__ void kernal_fp32_MaxPooling_Forward(
float const *x_buf,
float *y_buf,
int filter_h_size,
int filter_w_size,
int input_w_size,
int input_h_size,
int output_w_size,
int output_h_size,
int c_size,
int frame_size,
int frame_stride
)
{
int frame_base = threadIdx.x;
int frame_step = blockDim.x;
int x = blockIdx.y * blockDim.y + threadIdx.y;
int y = blockIdx.x;
int c = blockIdx.z * blockDim.z + threadIdx.z;
if (y >= output_h_size || x >= output_w_size) {
return;
}
for ( int frame = frame_base; frame < frame_size; frame += frame_step ) {
//
float max_val = -1.0e7f;
for (int fy = 0; fy < filter_h_size; ++fy) {
int iy = y * filter_h_size + fy;
if ( iy < input_h_size ) {
for (int fx = 0; fx < filter_w_size; ++fx) {
int ix = x * filter_w_size + fx;
if ( ix < input_w_size ) {
float sig = x_buf[((c * input_h_size + iy) * input_w_size + ix) * frame_stride + frame];
max_val = max(max_val, sig);
}
}
}
}
//
y_buf[((c * output_h_size + y) * output_w_size + x) * frame_stride + frame] = max_val;
}
}
BBCU_DLL_EXPORT int bbcu_fp32_MaxPooling_Forward
(
float const * dev_x_buf,
float* dev_y_buf,
int filter_h_size,
int filter_w_size,
int input_w_size,
int input_h_size,
int output_w_size,
int output_h_size,
int c_size,
int frame_size,
int frame_stride,
hipStream_t streamId
)
{
BBCU_DEBUG_ASSERT(bbcu_IsDeviceAvailable());
dim3 block(32, 32, 1);
dim3 grid;
grid.x = output_h_size;
grid.y = (output_w_size + (block.y-1)) / block.y;
grid.z = c_size;
block.x = min(block.x, frame_size);
block.y = min(block.y, output_w_size);
hipLaunchKernelGGL(( kernal_fp32_MaxPooling_Forward), dim3(grid), dim3(block), 0, streamId,
dev_x_buf,
dev_y_buf,
filter_h_size,
filter_w_size,
input_w_size,
input_h_size,
output_w_size,
output_h_size,
c_size,
frame_size,
frame_stride
);
BB_CUDA_CHECK_LAST_ERROR();
return 0;
}
//////////////////////////////
// backward
//////////////////////////////
__global__ void kernal_fp32_MaxPooling_Backward(
float const *x_buf,
float const *y_buf,
float const *dy_buf,
float *dx_buf,
int filter_h_size,
int filter_w_size,
int input_w_size,
int input_h_size,
int output_w_size,
int output_h_size,
int c_size,
int frame_size,
int frame_stride
)
{
int frame_base = threadIdx.x;
int frame_step = blockDim.x;
int x = blockIdx.y * blockDim.y + threadIdx.y;
int y = blockIdx.x;
int c = blockIdx.z * blockDim.z + threadIdx.z;
if (y >= output_h_size || x >= output_w_size) {
return;
}
//
for ( int frame = frame_base; frame < frame_size; frame += frame_step ) {
float out_sig = y_buf[((c * output_h_size + y) * output_w_size + x) * frame_stride + frame];
float grad = dy_buf[((c * output_h_size + y) * output_w_size + x) * frame_stride + frame];
for (int fy = 0; fy < filter_h_size; ++fy) {
int iy = y * filter_h_size + fy;
if ( iy < input_h_size ) {
for (int fx = 0; fx < filter_w_size; ++fx) {
int ix = x * filter_w_size + fx;
if ( ix < input_w_size ) {
float in_sig = x_buf[((c * input_h_size + iy) * input_w_size + ix) * frame_stride + frame];
dx_buf[((c * input_h_size + iy) * input_w_size + ix) * frame_stride + frame] = (in_sig == out_sig) ? grad : 0;
}
}
}
}
}
}
BBCU_DLL_EXPORT int bbcu_fp32_MaxPooling_Backward
(
float const *dev_x_buf,
float const *dev_y_buf,
float const *dev_dy_buf,
float *dev_dx_buf,
int filter_h_size,
int filter_w_size,
int input_w_size,
int input_h_size,
int output_w_size,
int output_h_size,
int c_size,
int frame_size,
int frame_stride,
hipStream_t streamId
)
{
BBCU_DEBUG_ASSERT(bbcu_IsDeviceAvailable());
dim3 block(32, 32, 1);
dim3 grid;
grid.x = output_h_size;
grid.y = (output_w_size + (block.y-1)) / block.y;
grid.z = c_size;
block.x = min(block.x, frame_size);
block.y = min(block.y, output_w_size);
hipLaunchKernelGGL(( kernal_fp32_MaxPooling_Backward), dim3(grid), dim3(block), 0, streamId,
dev_x_buf,
dev_y_buf,
dev_dy_buf,
dev_dx_buf,
filter_h_size,
filter_w_size,
input_w_size,
input_h_size,
output_w_size,
output_h_size,
c_size,
frame_size,
frame_stride
);
BB_CUDA_CHECK_LAST_ERROR();
return 0;
}
| 270f396eb9893e94263a9dd5961dcee6d935c24f.cu | #include <iostream>
#include <algorithm>
#include <chrono>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "bbcu/bbcu.h"
#include "bbcu/bbcu_util.h"
//////////////////////////////
// forward
//////////////////////////////
__global__ void kernal_fp32_MaxPooling_Forward(
float const *x_buf,
float *y_buf,
int filter_h_size,
int filter_w_size,
int input_w_size,
int input_h_size,
int output_w_size,
int output_h_size,
int c_size,
int frame_size,
int frame_stride
)
{
int frame_base = threadIdx.x;
int frame_step = blockDim.x;
int x = blockIdx.y * blockDim.y + threadIdx.y;
int y = blockIdx.x;
int c = blockIdx.z * blockDim.z + threadIdx.z;
if (y >= output_h_size || x >= output_w_size) {
return;
}
for ( int frame = frame_base; frame < frame_size; frame += frame_step ) {
// 最大値探索
float max_val = -1.0e7f;
for (int fy = 0; fy < filter_h_size; ++fy) {
int iy = y * filter_h_size + fy;
if ( iy < input_h_size ) {
for (int fx = 0; fx < filter_w_size; ++fx) {
int ix = x * filter_w_size + fx;
if ( ix < input_w_size ) {
float sig = x_buf[((c * input_h_size + iy) * input_w_size + ix) * frame_stride + frame];
max_val = max(max_val, sig);
}
}
}
}
// 出力
y_buf[((c * output_h_size + y) * output_w_size + x) * frame_stride + frame] = max_val;
}
}
BBCU_DLL_EXPORT int bbcu_fp32_MaxPooling_Forward
(
float const * dev_x_buf,
float* dev_y_buf,
int filter_h_size,
int filter_w_size,
int input_w_size,
int input_h_size,
int output_w_size,
int output_h_size,
int c_size,
int frame_size,
int frame_stride,
cudaStream_t streamId
)
{
BBCU_DEBUG_ASSERT(bbcu_IsDeviceAvailable());
dim3 block(32, 32, 1);
dim3 grid;
grid.x = output_h_size;
grid.y = (output_w_size + (block.y-1)) / block.y;
grid.z = c_size;
block.x = min(block.x, frame_size);
block.y = min(block.y, output_w_size);
kernal_fp32_MaxPooling_Forward<<<grid, block, 0, streamId>>>(
dev_x_buf,
dev_y_buf,
filter_h_size,
filter_w_size,
input_w_size,
input_h_size,
output_w_size,
output_h_size,
c_size,
frame_size,
frame_stride
);
BB_CUDA_CHECK_LAST_ERROR();
return 0;
}
//////////////////////////////
// backward
//////////////////////////////
__global__ void kernal_fp32_MaxPooling_Backward(
float const *x_buf,
float const *y_buf,
float const *dy_buf,
float *dx_buf,
int filter_h_size,
int filter_w_size,
int input_w_size,
int input_h_size,
int output_w_size,
int output_h_size,
int c_size,
int frame_size,
int frame_stride
)
{
int frame_base = threadIdx.x;
int frame_step = blockDim.x;
int x = blockIdx.y * blockDim.y + threadIdx.y;
int y = blockIdx.x;
int c = blockIdx.z * blockDim.z + threadIdx.z;
if (y >= output_h_size || x >= output_w_size) {
return;
}
// 最大値箇所のみ伝播
for ( int frame = frame_base; frame < frame_size; frame += frame_step ) {
float out_sig = y_buf[((c * output_h_size + y) * output_w_size + x) * frame_stride + frame];
float grad = dy_buf[((c * output_h_size + y) * output_w_size + x) * frame_stride + frame];
for (int fy = 0; fy < filter_h_size; ++fy) {
int iy = y * filter_h_size + fy;
if ( iy < input_h_size ) {
for (int fx = 0; fx < filter_w_size; ++fx) {
int ix = x * filter_w_size + fx;
if ( ix < input_w_size ) {
float in_sig = x_buf[((c * input_h_size + iy) * input_w_size + ix) * frame_stride + frame];
dx_buf[((c * input_h_size + iy) * input_w_size + ix) * frame_stride + frame] = (in_sig == out_sig) ? grad : 0;
}
}
}
}
}
}
BBCU_DLL_EXPORT int bbcu_fp32_MaxPooling_Backward
(
float const *dev_x_buf,
float const *dev_y_buf,
float const *dev_dy_buf,
float *dev_dx_buf,
int filter_h_size,
int filter_w_size,
int input_w_size,
int input_h_size,
int output_w_size,
int output_h_size,
int c_size,
int frame_size,
int frame_stride,
cudaStream_t streamId
)
{
BBCU_DEBUG_ASSERT(bbcu_IsDeviceAvailable());
dim3 block(32, 32, 1);
dim3 grid;
grid.x = output_h_size;
grid.y = (output_w_size + (block.y-1)) / block.y;
grid.z = c_size;
block.x = min(block.x, frame_size);
block.y = min(block.y, output_w_size);
kernal_fp32_MaxPooling_Backward<<<grid, block, 0, streamId>>>(
dev_x_buf,
dev_y_buf,
dev_dy_buf,
dev_dx_buf,
filter_h_size,
filter_w_size,
input_w_size,
input_h_size,
output_w_size,
output_h_size,
c_size,
frame_size,
frame_stride
);
BB_CUDA_CHECK_LAST_ERROR();
return 0;
}
|
ea14b6d5fb2bdf23542dbfad203122df29121e3a.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include "common.h"
#include "naive.h"
namespace StreamCompaction {
namespace Naive {
using StreamCompaction::Common::PerformanceTimer;
PerformanceTimer& timer()
{
static PerformanceTimer timer;
return timer;
}
// TODO: __global__
__global__ void scan(int N, int power, int *dev_oDataArray, int *dev_iDataArray) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index >= N) return;
dev_oDataArray[index] = (index >= power) ? dev_iDataArray[index - power] + dev_iDataArray[index] : dev_iDataArray[index];
}
__global__ void kernExclusiveFromInclusive(int N, int *dev_oDataArray, int *dev_iDataArray) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index >= N) return;
dev_oDataArray[index] = (index == 0) ? 0 : dev_iDataArray[index - 1];
}
/**
* Performs prefix-sum (aka scan) on idata, storing the result into odata.
*/
void scan(int n, int *odata, const int *idata) {
// Defining the configuration of the kernel
dim3 fullBlocksPerGrid((n + blockSize - 1) / blockSize);
dim3 threadsPerBlock(blockSize);
int size = n * sizeof(int);
// Creating array buffers on the device memory
int *dev_oDataArray, *dev_iDataArray;
hipMalloc((void**)&dev_oDataArray, size);
checkCUDAError("hipMalloc dev_oDataArray failed!");
hipMalloc((void**)&dev_iDataArray, size);
checkCUDAError("hipMalloc dev_iDataArray failed!");
// Copying array buffers from Host to Device
hipMemcpy(dev_iDataArray, idata, size, hipMemcpyHostToDevice);
checkCUDAError("hipMemcpy dev_iDataArray failed!");
timer().startGpuTimer();
// TODO
int dimension = ilog2ceil(n);
for (int d = 1; d <= dimension; ++d) {
// Power of 2^(d-1)
int power = 1 << (d - 1);
hipLaunchKernelGGL(( scan) , dim3(fullBlocksPerGrid), dim3(threadsPerBlock), 0, 0, n, power, dev_oDataArray, dev_iDataArray);
checkCUDAError("scan kernel failed!");
std::swap(dev_oDataArray, dev_iDataArray);
}
// Convert the output data array from inclusve to exclusive
kernExclusiveFromInclusive << <fullBlocksPerGrid, threadsPerBlock >> > (n, dev_oDataArray, dev_iDataArray);
timer().endGpuTimer();
//Copying array buffers from Device to Host
hipMemcpy(odata, dev_oDataArray, size, hipMemcpyDeviceToHost);
checkCUDAError("hipMemcpy odata failed!");
// Freeing the device memory
hipFree(dev_oDataArray);
hipFree(dev_iDataArray);
}
}
}
| ea14b6d5fb2bdf23542dbfad203122df29121e3a.cu | #include <cuda.h>
#include <cuda_runtime.h>
#include "common.h"
#include "naive.h"
namespace StreamCompaction {
namespace Naive {
using StreamCompaction::Common::PerformanceTimer;
PerformanceTimer& timer()
{
static PerformanceTimer timer;
return timer;
}
// TODO: __global__
__global__ void scan(int N, int power, int *dev_oDataArray, int *dev_iDataArray) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index >= N) return;
dev_oDataArray[index] = (index >= power) ? dev_iDataArray[index - power] + dev_iDataArray[index] : dev_iDataArray[index];
}
__global__ void kernExclusiveFromInclusive(int N, int *dev_oDataArray, int *dev_iDataArray) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index >= N) return;
dev_oDataArray[index] = (index == 0) ? 0 : dev_iDataArray[index - 1];
}
/**
* Performs prefix-sum (aka scan) on idata, storing the result into odata.
*/
void scan(int n, int *odata, const int *idata) {
// Defining the configuration of the kernel
dim3 fullBlocksPerGrid((n + blockSize - 1) / blockSize);
dim3 threadsPerBlock(blockSize);
int size = n * sizeof(int);
// Creating array buffers on the device memory
int *dev_oDataArray, *dev_iDataArray;
cudaMalloc((void**)&dev_oDataArray, size);
checkCUDAError("cudaMalloc dev_oDataArray failed!");
cudaMalloc((void**)&dev_iDataArray, size);
checkCUDAError("cudaMalloc dev_iDataArray failed!");
// Copying array buffers from Host to Device
cudaMemcpy(dev_iDataArray, idata, size, cudaMemcpyHostToDevice);
checkCUDAError("cudaMemcpy dev_iDataArray failed!");
timer().startGpuTimer();
// TODO
int dimension = ilog2ceil(n);
for (int d = 1; d <= dimension; ++d) {
// Power of 2^(d-1)
int power = 1 << (d - 1);
scan <<<fullBlocksPerGrid, threadsPerBlock>>> (n, power, dev_oDataArray, dev_iDataArray);
checkCUDAError("scan kernel failed!");
std::swap(dev_oDataArray, dev_iDataArray);
}
// Convert the output data array from inclusve to exclusive
kernExclusiveFromInclusive << <fullBlocksPerGrid, threadsPerBlock >> > (n, dev_oDataArray, dev_iDataArray);
timer().endGpuTimer();
//Copying array buffers from Device to Host
cudaMemcpy(odata, dev_oDataArray, size, cudaMemcpyDeviceToHost);
checkCUDAError("cudaMemcpy odata failed!");
// Freeing the device memory
cudaFree(dev_oDataArray);
cudaFree(dev_iDataArray);
}
}
}
|
3a4a8806d578a957ced7fd7ded140c9283c57b65.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cstdio>
#include <iostream>
#include <cmath>
#include <memory>
#include <thrust/reduce.h>
#include <thrust/execution_policy.h>
#include "process_split.h"
#include "border_conditions.h"
#include "send_recv_mpi.h"
TimersArray timers;
int rank = -1;
int N, K;
int dx, dy, dz;
double hx, hy, hz, tau;
const int LAYERS = 3;
cell_info_t params;
__host__ __device__ double phi_func(double x, double y, double z) {
return sin(3 * x) * cos(2 * y) * sin(z);
}
__host__ __device__ double u_func(double x, double y, double z, double t) {
return cos(sqrt(14.0) * t) * phi_func(x, y, z);
}
__host__ __device__ double f_func(double x, double y, double z, double t) {
return 0;
}
/*
__host__ __device__ double phi_func(double x, double y, double z) {
return sin(3 * x) * cos(2 * y) * sin(z);
}
__host__ __device__ double u_func(double x, double y, double z, double t) {
return ( 1 + pow(t, 3.0) ) * phi_func(x, y, z);
}
__host__ __device__ double f_func(double x, double y, double z, double t) {
return ( 6 * t + 14 * ( 1 + pow(t, 3.0) ) ) * phi_func(x, y, z);
}
*/
struct EstimateError {
double mse;
double max;
EstimateError() : mse(0), max(0) {}
};
__global__ void cuda_task_init(double *data, cell_info_t params)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
int j = blockDim.y * blockIdx.y + threadIdx.y;
int k = blockDim.z * blockIdx.z + threadIdx.z;
if (i >= params.dx || j >= params.dy || k >= params.dz)
return;
double x = (params.i_min + i - 1) * params.hx;
double y = (params.j_min + j - 1) * params.hy;
double z = (params.k_min + k - 1) * params.hz;
data[index(i, j, k, params)] = phi_func(x, y, z);
}
__global__ void cuda_task_iter(double *p_next, double *p_curr, double *p_prev, int n, cell_info_t params)
{
int i = blockDim.x * blockIdx.x + threadIdx.x + 1;
int j = blockDim.y * blockIdx.y + threadIdx.y + 1;
int k = blockDim.z * blockIdx.z + threadIdx.z + 1;
//
if (i >= params.dx - 1 || j >= params.dy - 1 || k >= params.dz - 1)
return;
bool is_first[ndim], is_last[ndim];
unpack_fl_mask(is_first, is_last, params.fl_mask);
char border_conditions[ndim];
unpack_bc_mask(border_conditions, params.bc_mask);
//
if (is_first[0] && i == 1 || (border_conditions[0] != 2) && is_last[0] && i == params.dx - 2)
return;
if (is_first[1] && j == 1 || (border_conditions[1] != 2) && is_last[1] && j == params.dy - 2)
return;
if (is_first[2] && k == 1 || (border_conditions[2] != 2) && is_last[2] && k == params.dz - 2)
return;
int p = index(i, j, k, params);
double x = (params.i_min + i - 1) * params.hx;
double y = (params.j_min + j - 1) * params.hy;
double z = (params.k_min + k - 1) * params.hz;
if (n == 1) {
// t = t1;
double f_value = f_func(x, y, z, 0);
p_next[p] = p_curr[p] + 0.5 * params.tau * params.tau * \
( laplace(p_curr, i, j, k, params) + f_value );
} else {
// t;
double f_value = f_func(x, y, z, (n - 1) * params.tau);
p_next[p] = 2 * p_curr[p] - p_prev[p] + params.tau * params.tau * \
( laplace(p_curr, i, j, k, params) + f_value );
}
}
__global__ void cuda_mse_error(double *err, const double *data, cell_info_t params, int n)
{
int i = blockDim.x * blockIdx.x + threadIdx.x + 1;
int j = blockDim.y * blockIdx.y + threadIdx.y + 1;
int k = blockDim.z * blockIdx.z + threadIdx.z + 1;
//
if (i >= params.dx - 1 || j >= params.dy - 1 || k >= params.dz - 1)
return;
int p = (i - 1) + (j - 1) * (params.dx - 2) + (k - 1) * (params.dx - 2) * (params.dy - 2);
double x = (params.i_min + i - 1) * params.hx;
double y = (params.j_min + j - 1) * params.hy;
double z = (params.k_min + k - 1) * params.hz;
double u_true = u_func(x, y, z, n * params.tau);
err[p] = pow(data[index(i, j, k, params)] - u_true, 2.0);
}
__global__ void cuda_max_error(double *err, const double *data, cell_info_t params, int n)
{
int i = blockDim.x * blockIdx.x + threadIdx.x + 1;
int j = blockDim.y * blockIdx.y + threadIdx.y + 1;
int k = blockDim.z * blockIdx.z + threadIdx.z + 1;
//
if (i >= params.dx - 1 || j >= params.dy - 1 || k >= params.dz - 1)
return;
int p = (i - 1) + (j - 1) * (params.dx - 2) + (k - 1) * (params.dx - 2) * (params.dy - 2);
double x = (params.i_min + i - 1) * params.hx;
double y = (params.j_min + j - 1) * params.hy;
double z = (params.k_min + k - 1) * params.hz;
double u_true = u_func(x, y, z, n * params.tau);
double value = data[index(i, j, k, params)] - u_true;
err[p] = (value < 0) ? -value : value;
}
int main(int argc, char **argv)
{
// MPI
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
timers.total.start();
timers.init.start();
N = (argc > 1) ? std::atoi(argv[1]) : 256;
K = (argc > 2) ? std::atoi(argv[2]) : 20;
const int nproc = (argc > 3) ? std::atoi(argv[3]) : 1;
const bool is_compute = (argc > 4) ? std::atoi(argv[4]) : 0;
hx = hy = hz = 16 * M_PI / (N - 1);
tau = 0.004;
char border_conditions[ndim];
border_conditions[0] = 1;
border_conditions[1] = 2;
border_conditions[2] = 1;
//
int dims[ndim];
split_processes_by_grid(dims, ndim, nproc);
// ( )
int periods[ndim];
for (int d = 0; d < ndim; d++)
periods[d] = 1;
//
int nodes[ndim];
for (int d = 0; d < ndim; d++) {
nodes[d] = (int) ceil(N / (double) dims[d]);
if (!nodes[d]) {
std::cerr << "[ERROR] Invalid grid split" << std::endl;
return 1;
}
}
// GPU
int num_cuda_devices;
hipGetDeviceCount(&num_cuda_devices);
hipSetDevice(rank % num_cuda_devices);
// int curr_cuda_device;
// hipGetDevice(&curr_cuda_device);
// std::cout << rank << ' ' << curr_cuda_device << ' ' << std::endl;
//
if (!rank) {
std::cout << N << ' ' << K << ' ' << nproc << std::endl;
for (int d = 0; d < ndim; d++) {
std::cout << "axis" << d << '\t'
<< dims[d] << '\t' << nodes[d] << std::endl;
}
std::cout << "Number of cuda devices: " << num_cuda_devices << std::endl;
}
//
MPI_Comm comm_cart;
MPI_Cart_create(MPI_COMM_WORLD, ndim, dims, periods, 0, &comm_cart);
//
int coords[ndim];
MPI_Cart_coords(comm_cart, rank, ndim, coords);
//
int rank_prev[ndim], rank_next[ndim];
for (int d = 0; d < ndim; d++) {
MPI_Cart_shift(comm_cart, d, +1, &rank_prev[d], &rank_next[d]);
}
// , /
bool is_first[ndim], is_last[ndim];
for (int d = 0; d < ndim; d++) {
is_first[d] = (!coords[d]);
is_last[d] = (coords[d] == dims[d] - 1);
}
//
const int i_min = coords[0] * nodes[0], i_max = ::min(N, (coords[0] + 1) * nodes[0]) - 1;
const int j_min = coords[1] * nodes[1], j_max = ::min(N, (coords[1] + 1) * nodes[1]) - 1;
const int k_min = coords[2] * nodes[2], k_max = ::min(N, (coords[2] + 1) * nodes[2]) - 1;
//
// ( 2 ),
dx = i_max - i_min + 1 + 2;
dy = j_max - j_min + 1 + 2;
dz = k_max - k_min + 1 + 2;
params.dx = dx;
params.dy = dy;
params.dz = dz;
params.hx = hx;
params.hy = hy;
params.hz = hz;
params.tau = tau;
params.i_min = i_min;
params.j_min = j_min;
params.k_min = k_min;
params.fl_mask = pack_fl_mask(is_first, is_last);
params.bc_mask = pack_bc_mask(border_conditions);
//
EstimateError error_cumm, error_curr, error_proc;
hipError_t err;
// GPU
double *u_data[LAYERS], *u_error;
for (int p = 0; p < LAYERS; p++)
hipMalloc((void **) &u_data[p], sizeof(double) * dx * dy * dz);
hipMalloc((void **) &u_error, sizeof(double) * (dx - 2) * (dy - 2) * (dz - 2));
err = hipGetLastError();
if (err != hipSuccess) {
printf("Memory GPU allocation failed.\n");
printf("Error: %s\n", hipGetErrorString(err));
}
timers.init.pause();
//
MPITimer timer;
timer.start();
// GPU ( , )
dim3 threads(8, 8, 8);
dim3 blocks(split(dx - 2, threads.x), split(dy - 2, threads.y), split(dz - 2, threads.z));
// t = t0
{
dim3 blocks(split(dx, threads.x), split(dy, threads.y), split(dz, threads.z));
hipLaunchKernelGGL(( cuda_task_init) , dim3(blocks), dim3(threads) , 0, 0, u_data[0], params);
}
err = hipGetLastError();
if (err != hipSuccess) {
printf("Function cuda_task_init has failed.\n");
printf("Error: %s\n", hipGetErrorString(err));
}
hipDeviceSynchronize();
if (is_compute) {
TimerScopePauseCallback callback(timer);
error_curr.mse = 0;
error_curr.max = 0;
hipLaunchKernelGGL(( cuda_mse_error) , dim3(blocks), dim3(threads) , 0, 0, u_error, u_data[0], params, 0);
hipDeviceSynchronize();
error_proc.mse = thrust::reduce(
thrust::device,
u_error, u_error + (dx - 2) * (dy - 2) * (dz - 2),
0.0, thrust::plus<double>()
);
hipLaunchKernelGGL(( cuda_max_error) , dim3(blocks), dim3(threads) , 0, 0, u_error, u_data[0], params, 0);
hipDeviceSynchronize();
error_proc.max = thrust::reduce(
thrust::device,
u_error, u_error + (dx - 2) * (dy - 2) * (dz - 2),
0.0, thrust::maximum<double>()
);
MPI_Reduce(&error_proc.mse, &error_curr.mse, 1, MPI_DOUBLE, MPI_SUM, 0, comm_cart);
MPI_Reduce(&error_proc.max, &error_curr.max, 1, MPI_DOUBLE, MPI_MAX, 0, comm_cart);
if (!rank) {
error_curr.mse /= pow(N, 3);
error_cumm.mse += error_curr.mse;
if (error_curr.max > error_cumm.max)
error_cumm.max = error_curr.max;
}
}
if (!rank) {
printf("[iter %03d]", 0);
if (is_compute)
printf(" RMSE = %.6f; MAX = %.6f;", sqrt(error_curr.mse), error_curr.max);
printf(" Time = %.6f sec.\n", timer.delta());
}
// t
for (int n = 1; n < K; n++) {
hipLaunchKernelGGL(( cuda_task_iter) , dim3(blocks), dim3(threads) , 0, 0,
u_data[n % LAYERS], u_data[(n - 1) % LAYERS], u_data[(n - 2) % LAYERS], n, params);
hipDeviceSynchronize();
err = hipGetLastError();
if (err != hipSuccess) {
printf("Function cuda_task_iter has failed. Current iteration: %d\n", n);
printf("Error: %s\n", hipGetErrorString(err));
}
// X
send_recv_forward_x (u_data[n % LAYERS], comm_cart, rank_prev[0], rank_next[0], is_first[0], is_last[0]);
send_recv_backward_x(u_data[n % LAYERS], comm_cart, rank_prev[0], rank_next[0], is_first[0], is_last[0]);
// Y
send_recv_forward_y (u_data[n % LAYERS], comm_cart, rank_prev[1], rank_next[1], is_first[1], is_last[1]);
send_recv_backward_y(u_data[n % LAYERS], comm_cart, rank_prev[1], rank_next[1], is_first[1], is_last[1]);
// Z
send_recv_forward_z (u_data[n % LAYERS], comm_cart, rank_prev[2], rank_next[2], is_first[2], is_last[2]);
send_recv_backward_z(u_data[n % LAYERS], comm_cart, rank_prev[2], rank_next[2], is_first[2], is_last[2]);
hipDeviceSynchronize();
{
CudaScopeTimerCallback cb(&timers.copy);
switch (border_conditions[0]) {
case 1: border_condition_1st_x(u_data[n % LAYERS], is_first[0], is_last[0]); break;
case 2: border_condition_2nd_x(u_data[n % LAYERS], is_first[0], is_last[0]); break;
default: ;
}
switch (border_conditions[1]) {
case 1: border_condition_1st_y(u_data[n % LAYERS], is_first[1], is_last[1]); break;
case 2: border_condition_2nd_y(u_data[n % LAYERS], is_first[1], is_last[1]); break;
default: ;
}
switch (border_conditions[2]) {
case 1: border_condition_1st_z(u_data[n % LAYERS], is_first[2], is_last[2]); break;
case 2: border_condition_2nd_z(u_data[n % LAYERS], is_first[2], is_last[2]); break;
default: ;
}
}
hipDeviceSynchronize();
if (is_compute) {
TimerScopePauseCallback callback(timer);
error_curr.mse = 0;
error_curr.max = 0;
hipLaunchKernelGGL(( cuda_mse_error) , dim3(blocks), dim3(threads) , 0, 0, u_error, u_data[n % LAYERS], params, n);
hipDeviceSynchronize();
error_proc.mse = thrust::reduce(
thrust::device,
u_error, u_error + (dx - 2) * (dy - 2) * (dz - 2),
0.0, thrust::plus<double>()
);
hipLaunchKernelGGL(( cuda_max_error) , dim3(blocks), dim3(threads) , 0, 0, u_error, u_data[n % LAYERS], params, n);
hipDeviceSynchronize();
error_proc.max = thrust::reduce(
thrust::device,
u_error, u_error + (dx - 2) * (dy - 2) * (dz - 2),
0.0, thrust::maximum<double>()
);
MPI_Reduce(&error_proc.mse, &error_curr.mse, 1, MPI_DOUBLE, MPI_SUM, 0, comm_cart);
MPI_Reduce(&error_proc.max, &error_curr.max, 1, MPI_DOUBLE, MPI_MAX, 0, comm_cart);
if (!rank) {
error_curr.mse /= pow(N, 3);
error_cumm.mse += error_curr.mse;
if (error_curr.max > error_cumm.max)
error_cumm.max = error_curr.max;
}
}
if (!rank) {
printf("[iter %03d]", n);
if (is_compute)
printf(" RMSE = %.6f; MAX = %.6f;", sqrt(error_curr.mse), error_curr.max);
printf(" Time = %.6f sec.\n", timer.delta());
}
}
timer.pause();
if (!rank) {
if (is_compute)
printf("Final RMSE = %.6f; MAX = %.6f\n", sqrt(error_cumm.mse / K), error_cumm.max);
printf("Task elapsed in: %.6f sec.\n", timer.delta());
}
timers.free.start();
//
for (int p = 0; p < LAYERS; p++)
hipFree(u_data[p]);
hipFree(u_error);
timers.free.pause();
timers.total.pause();
MPI_Finalize();
if (!rank) {
printf("\n");
printf("Time total: %.6f\n", timers.total.delta());
printf("Time init: %.6f\n", timers.init.delta());
printf("Time logic: %.6f\n", timer.delta());
printf("Time sendrecv: %.6f\n", timers.sendrecv.delta());
printf("Time copy: %.6f\n", timers.copy);
printf("Time free: %.6f\n", timers.free.delta());
}
return 0;
}
| 3a4a8806d578a957ced7fd7ded140c9283c57b65.cu | #include <cstdio>
#include <iostream>
#include <cmath>
#include <memory>
#include <thrust/reduce.h>
#include <thrust/execution_policy.h>
#include "process_split.h"
#include "border_conditions.h"
#include "send_recv_mpi.h"
TimersArray timers;
int rank = -1;
int N, K;
int dx, dy, dz;
double hx, hy, hz, tau;
const int LAYERS = 3;
cell_info_t params;
__host__ __device__ double phi_func(double x, double y, double z) {
return sin(3 * x) * cos(2 * y) * sin(z);
}
__host__ __device__ double u_func(double x, double y, double z, double t) {
return cos(sqrt(14.0) * t) * phi_func(x, y, z);
}
__host__ __device__ double f_func(double x, double y, double z, double t) {
return 0;
}
/*
__host__ __device__ double phi_func(double x, double y, double z) {
return sin(3 * x) * cos(2 * y) * sin(z);
}
__host__ __device__ double u_func(double x, double y, double z, double t) {
return ( 1 + pow(t, 3.0) ) * phi_func(x, y, z);
}
__host__ __device__ double f_func(double x, double y, double z, double t) {
return ( 6 * t + 14 * ( 1 + pow(t, 3.0) ) ) * phi_func(x, y, z);
}
*/
struct EstimateError {
double mse;
double max;
EstimateError() : mse(0), max(0) {}
};
__global__ void cuda_task_init(double *data, cell_info_t params)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
int j = blockDim.y * blockIdx.y + threadIdx.y;
int k = blockDim.z * blockIdx.z + threadIdx.z;
if (i >= params.dx || j >= params.dy || k >= params.dz)
return;
double x = (params.i_min + i - 1) * params.hx;
double y = (params.j_min + j - 1) * params.hy;
double z = (params.k_min + k - 1) * params.hz;
data[index(i, j, k, params)] = phi_func(x, y, z);
}
__global__ void cuda_task_iter(double *p_next, double *p_curr, double *p_prev, int n, cell_info_t params)
{
int i = blockDim.x * blockIdx.x + threadIdx.x + 1;
int j = blockDim.y * blockIdx.y + threadIdx.y + 1;
int k = blockDim.z * blockIdx.z + threadIdx.z + 1;
// пропускаем правую обменную область и выход за границы
if (i >= params.dx - 1 || j >= params.dy - 1 || k >= params.dz - 1)
return;
bool is_first[ndim], is_last[ndim];
unpack_fl_mask(is_first, is_last, params.fl_mask);
char border_conditions[ndim];
unpack_bc_mask(border_conditions, params.bc_mask);
// пропускаем граничные области
if (is_first[0] && i == 1 || (border_conditions[0] != 2) && is_last[0] && i == params.dx - 2)
return;
if (is_first[1] && j == 1 || (border_conditions[1] != 2) && is_last[1] && j == params.dy - 2)
return;
if (is_first[2] && k == 1 || (border_conditions[2] != 2) && is_last[2] && k == params.dz - 2)
return;
int p = index(i, j, k, params);
double x = (params.i_min + i - 1) * params.hx;
double y = (params.j_min + j - 1) * params.hy;
double z = (params.k_min + k - 1) * params.hz;
if (n == 1) {
// заполняем для t = t1;
double f_value = f_func(x, y, z, 0);
p_next[p] = p_curr[p] + 0.5 * params.tau * params.tau * \
( laplace(p_curr, i, j, k, params) + f_value );
} else {
// заполняем для всех остальных t;
double f_value = f_func(x, y, z, (n - 1) * params.tau);
p_next[p] = 2 * p_curr[p] - p_prev[p] + params.tau * params.tau * \
( laplace(p_curr, i, j, k, params) + f_value );
}
}
__global__ void cuda_mse_error(double *err, const double *data, cell_info_t params, int n)
{
int i = blockDim.x * blockIdx.x + threadIdx.x + 1;
int j = blockDim.y * blockIdx.y + threadIdx.y + 1;
int k = blockDim.z * blockIdx.z + threadIdx.z + 1;
// пропускаем правую обменную область и выход за границы
if (i >= params.dx - 1 || j >= params.dy - 1 || k >= params.dz - 1)
return;
int p = (i - 1) + (j - 1) * (params.dx - 2) + (k - 1) * (params.dx - 2) * (params.dy - 2);
double x = (params.i_min + i - 1) * params.hx;
double y = (params.j_min + j - 1) * params.hy;
double z = (params.k_min + k - 1) * params.hz;
double u_true = u_func(x, y, z, n * params.tau);
err[p] = pow(data[index(i, j, k, params)] - u_true, 2.0);
}
__global__ void cuda_max_error(double *err, const double *data, cell_info_t params, int n)
{
int i = blockDim.x * blockIdx.x + threadIdx.x + 1;
int j = blockDim.y * blockIdx.y + threadIdx.y + 1;
int k = blockDim.z * blockIdx.z + threadIdx.z + 1;
// пропускаем правую обменную область и выход за границы
if (i >= params.dx - 1 || j >= params.dy - 1 || k >= params.dz - 1)
return;
int p = (i - 1) + (j - 1) * (params.dx - 2) + (k - 1) * (params.dx - 2) * (params.dy - 2);
double x = (params.i_min + i - 1) * params.hx;
double y = (params.j_min + j - 1) * params.hy;
double z = (params.k_min + k - 1) * params.hz;
double u_true = u_func(x, y, z, n * params.tau);
double value = data[index(i, j, k, params)] - u_true;
err[p] = (value < 0) ? -value : value;
}
int main(int argc, char **argv)
{
// инициализируем MPI и определяем ранг процесса
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
timers.total.start();
timers.init.start();
N = (argc > 1) ? std::atoi(argv[1]) : 256;
K = (argc > 2) ? std::atoi(argv[2]) : 20;
const int nproc = (argc > 3) ? std::atoi(argv[3]) : 1;
const bool is_compute = (argc > 4) ? std::atoi(argv[4]) : 0;
hx = hy = hz = 16 * M_PI / (N - 1);
tau = 0.004;
char border_conditions[ndim];
border_conditions[0] = 1;
border_conditions[1] = 2;
border_conditions[2] = 1;
// число процессов по каждой из оси решетки
int dims[ndim];
split_processes_by_grid(dims, ndim, nproc);
// решетка является периодической (для установки граничных условий)
int periods[ndim];
for (int d = 0; d < ndim; d++)
periods[d] = 1;
// число узлов решетки для процесса по каждой из осей
int nodes[ndim];
for (int d = 0; d < ndim; d++) {
nodes[d] = (int) ceil(N / (double) dims[d]);
if (!nodes[d]) {
std::cerr << "[ERROR] Invalid grid split" << std::endl;
return 1;
}
}
// равномерно распределяем процессы между GPU
int num_cuda_devices;
cudaGetDeviceCount(&num_cuda_devices);
cudaSetDevice(rank % num_cuda_devices);
// int curr_cuda_device;
// cudaGetDevice(&curr_cuda_device);
// std::cout << rank << ' ' << curr_cuda_device << ' ' << std::endl;
// вывод информации о разбиении
if (!rank) {
std::cout << N << ' ' << K << ' ' << nproc << std::endl;
for (int d = 0; d < ndim; d++) {
std::cout << "axis" << d << '\t'
<< dims[d] << '\t' << nodes[d] << std::endl;
}
std::cout << "Number of cuda devices: " << num_cuda_devices << std::endl;
}
// создаем топологию
MPI_Comm comm_cart;
MPI_Cart_create(MPI_COMM_WORLD, ndim, dims, periods, 0, &comm_cart);
// координаты процесса в системе декартовой решетки
int coords[ndim];
MPI_Cart_coords(comm_cart, rank, ndim, coords);
// вычисляем соседей для процесса по каждой из осей
int rank_prev[ndim], rank_next[ndim];
for (int d = 0; d < ndim; d++) {
MPI_Cart_shift(comm_cart, d, +1, &rank_prev[d], &rank_next[d]);
}
// индикаторы того, что процесс является первым и/или последним по каждой из осей
bool is_first[ndim], is_last[ndim];
for (int d = 0; d < ndim; d++) {
is_first[d] = (!coords[d]);
is_last[d] = (coords[d] == dims[d] - 1);
}
// минимальные и максимальные рабочие индексы
const int i_min = coords[0] * nodes[0], i_max = std::min(N, (coords[0] + 1) * nodes[0]) - 1;
const int j_min = coords[1] * nodes[1], j_max = std::min(N, (coords[1] + 1) * nodes[1]) - 1;
const int k_min = coords[2] * nodes[2], k_max = std::min(N, (coords[2] + 1) * nodes[2]) - 1;
// ширина области в индексах
// храним еще и обменные области (по 2е на каждую ось), помимо рабочих областей
dx = i_max - i_min + 1 + 2;
dy = j_max - j_min + 1 + 2;
dz = k_max - k_min + 1 + 2;
params.dx = dx;
params.dy = dy;
params.dz = dz;
params.hx = hx;
params.hy = hy;
params.hz = hz;
params.tau = tau;
params.i_min = i_min;
params.j_min = j_min;
params.k_min = k_min;
params.fl_mask = pack_fl_mask(is_first, is_last);
params.bc_mask = pack_bc_mask(border_conditions);
// подсчет ошибки
EstimateError error_cumm, error_curr, error_proc;
cudaError_t err;
// выделяем память на GPU
double *u_data[LAYERS], *u_error;
for (int p = 0; p < LAYERS; p++)
cudaMalloc((void **) &u_data[p], sizeof(double) * dx * dy * dz);
cudaMalloc((void **) &u_error, sizeof(double) * (dx - 2) * (dy - 2) * (dz - 2));
err = cudaGetLastError();
if (err != cudaSuccess) {
printf("Memory GPU allocation failed.\n");
printf("Error: %s\n", cudaGetErrorString(err));
}
timers.init.pause();
// засекаем время
MPITimer timer;
timer.start();
// определяем разбиение на GPU (обменные области заполняются, но не вычисляются)
dim3 threads(8, 8, 8);
dim3 blocks(split(dx - 2, threads.x), split(dy - 2, threads.y), split(dz - 2, threads.z));
// заполняем для t = t0
{
dim3 blocks(split(dx, threads.x), split(dy, threads.y), split(dz, threads.z));
cuda_task_init <<< blocks, threads >>> (u_data[0], params);
}
err = cudaGetLastError();
if (err != cudaSuccess) {
printf("Function cuda_task_init has failed.\n");
printf("Error: %s\n", cudaGetErrorString(err));
}
cudaDeviceSynchronize();
if (is_compute) {
TimerScopePauseCallback callback(timer);
error_curr.mse = 0;
error_curr.max = 0;
cuda_mse_error <<< blocks, threads >>> (u_error, u_data[0], params, 0);
cudaDeviceSynchronize();
error_proc.mse = thrust::reduce(
thrust::device,
u_error, u_error + (dx - 2) * (dy - 2) * (dz - 2),
0.0, thrust::plus<double>()
);
cuda_max_error <<< blocks, threads >>> (u_error, u_data[0], params, 0);
cudaDeviceSynchronize();
error_proc.max = thrust::reduce(
thrust::device,
u_error, u_error + (dx - 2) * (dy - 2) * (dz - 2),
0.0, thrust::maximum<double>()
);
MPI_Reduce(&error_proc.mse, &error_curr.mse, 1, MPI_DOUBLE, MPI_SUM, 0, comm_cart);
MPI_Reduce(&error_proc.max, &error_curr.max, 1, MPI_DOUBLE, MPI_MAX, 0, comm_cart);
if (!rank) {
error_curr.mse /= pow(N, 3);
error_cumm.mse += error_curr.mse;
if (error_curr.max > error_cumm.max)
error_cumm.max = error_curr.max;
}
}
if (!rank) {
printf("[iter %03d]", 0);
if (is_compute)
printf(" RMSE = %.6f; MAX = %.6f;", sqrt(error_curr.mse), error_curr.max);
printf(" Time = %.6f sec.\n", timer.delta());
}
// заполняем для остальных t
for (int n = 1; n < K; n++) {
cuda_task_iter <<< blocks, threads >>> (
u_data[n % LAYERS], u_data[(n - 1) % LAYERS], u_data[(n - 2) % LAYERS], n, params);
cudaDeviceSynchronize();
err = cudaGetLastError();
if (err != cudaSuccess) {
printf("Function cuda_task_iter has failed. Current iteration: %d\n", n);
printf("Error: %s\n", cudaGetErrorString(err));
}
// обмены граничными областями между процессами по оси X
send_recv_forward_x (u_data[n % LAYERS], comm_cart, rank_prev[0], rank_next[0], is_first[0], is_last[0]);
send_recv_backward_x(u_data[n % LAYERS], comm_cart, rank_prev[0], rank_next[0], is_first[0], is_last[0]);
// обмены граничными областями между процессами по оси Y
send_recv_forward_y (u_data[n % LAYERS], comm_cart, rank_prev[1], rank_next[1], is_first[1], is_last[1]);
send_recv_backward_y(u_data[n % LAYERS], comm_cart, rank_prev[1], rank_next[1], is_first[1], is_last[1]);
// обмены граничными областями между процессами по оси Z
send_recv_forward_z (u_data[n % LAYERS], comm_cart, rank_prev[2], rank_next[2], is_first[2], is_last[2]);
send_recv_backward_z(u_data[n % LAYERS], comm_cart, rank_prev[2], rank_next[2], is_first[2], is_last[2]);
cudaDeviceSynchronize();
{
CudaScopeTimerCallback cb(&timers.copy);
switch (border_conditions[0]) {
case 1: border_condition_1st_x(u_data[n % LAYERS], is_first[0], is_last[0]); break;
case 2: border_condition_2nd_x(u_data[n % LAYERS], is_first[0], is_last[0]); break;
default: ;
}
switch (border_conditions[1]) {
case 1: border_condition_1st_y(u_data[n % LAYERS], is_first[1], is_last[1]); break;
case 2: border_condition_2nd_y(u_data[n % LAYERS], is_first[1], is_last[1]); break;
default: ;
}
switch (border_conditions[2]) {
case 1: border_condition_1st_z(u_data[n % LAYERS], is_first[2], is_last[2]); break;
case 2: border_condition_2nd_z(u_data[n % LAYERS], is_first[2], is_last[2]); break;
default: ;
}
}
cudaDeviceSynchronize();
if (is_compute) {
TimerScopePauseCallback callback(timer);
error_curr.mse = 0;
error_curr.max = 0;
cuda_mse_error <<< blocks, threads >>> (u_error, u_data[n % LAYERS], params, n);
cudaDeviceSynchronize();
error_proc.mse = thrust::reduce(
thrust::device,
u_error, u_error + (dx - 2) * (dy - 2) * (dz - 2),
0.0, thrust::plus<double>()
);
cuda_max_error <<< blocks, threads >>> (u_error, u_data[n % LAYERS], params, n);
cudaDeviceSynchronize();
error_proc.max = thrust::reduce(
thrust::device,
u_error, u_error + (dx - 2) * (dy - 2) * (dz - 2),
0.0, thrust::maximum<double>()
);
MPI_Reduce(&error_proc.mse, &error_curr.mse, 1, MPI_DOUBLE, MPI_SUM, 0, comm_cart);
MPI_Reduce(&error_proc.max, &error_curr.max, 1, MPI_DOUBLE, MPI_MAX, 0, comm_cart);
if (!rank) {
error_curr.mse /= pow(N, 3);
error_cumm.mse += error_curr.mse;
if (error_curr.max > error_cumm.max)
error_cumm.max = error_curr.max;
}
}
if (!rank) {
printf("[iter %03d]", n);
if (is_compute)
printf(" RMSE = %.6f; MAX = %.6f;", sqrt(error_curr.mse), error_curr.max);
printf(" Time = %.6f sec.\n", timer.delta());
}
}
timer.pause();
if (!rank) {
if (is_compute)
printf("Final RMSE = %.6f; MAX = %.6f\n", sqrt(error_cumm.mse / K), error_cumm.max);
printf("Task elapsed in: %.6f sec.\n", timer.delta());
}
timers.free.start();
// освобождаем память
for (int p = 0; p < LAYERS; p++)
cudaFree(u_data[p]);
cudaFree(u_error);
timers.free.pause();
timers.total.pause();
MPI_Finalize();
if (!rank) {
printf("\n");
printf("Time total: %.6f\n", timers.total.delta());
printf("Time init: %.6f\n", timers.init.delta());
printf("Time logic: %.6f\n", timer.delta());
printf("Time sendrecv: %.6f\n", timers.sendrecv.delta());
printf("Time copy: %.6f\n", timers.copy);
printf("Time free: %.6f\n", timers.free.delta());
}
return 0;
}
|
63d99b53bdcba7fc1cb0c7086e147992ea30d3c5.hip | // !!! This is a file automatically generated by hipify!!!
// SPH.cpp : "main"
//
#include <iostream>
#include <fstream>
#include "glm/glm.hpp"
#include "CUDA_compute.h"
#include "include/input.h"
#include <ctime>
void outputAsCSV(ParticleManager &pm, int index) {
auto str = std::to_string(index / 10);
if (index % 10 == 0) {
std::ofstream out("simulation" + str + ".csv");
out << "x,y,z,v_x,v_y,v_z" << std::endl;
for (auto &flow : pm.fluid) {
auto &p = flow.second.p;
auto &v = flow.second.v;
out << p.x << "," << p.y << "," << p.z <<
"," << v.x << "," << v.y << "," << v.z << std::endl;
}
for (auto &flow : pm.solid) {
auto &p = flow.second.p;
auto &v = flow.second.v;
out << p.x << "," << p.y << "," << p.z <<
"," << v.x << "," << v.y << "," << v.z << std::endl;
}
out.close();
}
/* vec3 res(0.0f);
float x = 0.0f, y = 0.0f, z = 0.0f;
for (auto &par : pm.fluid) {
auto &gp= par.second.gradpre;
x = ::min(x, gp.x);
y = ::min(y, gp.y);
z = ::min(z, gp.z);
}
res = res / (float)pm.fluid.size();
/ std::cout << x << " " << y << " " << z << std::endl;*/
}
//void sampleTest() {
// Grid grid(100, 100, 200, 0.005);
// Cube sp(vec3(-0.4f, -0.4f, 0.0f), vec3(.83f, .0f, .0f), vec3(.0f, .83f, .0f), vec3(.0f, .0f, 0.80f));
// auto vec = grid.sampleSurface(sp, 0.08, 20, 1.0845);
// std::ofstream out("cube_surface.csv");
// out << "x,y,z" << std::endl;
// for (auto &pos : vec)
// out << pos.x << "," << pos.y << "," << pos.z << std::endl;
// out.close();
// std::vector<vec3> seeds{ vec[0] };
// vec = grid.sampleVolume([](vec3 p) {return true; }, sp, seeds, 0.08, 20);
// out.open("cube_inner.csv", std::ios::out);
// out << "x,y,z" << std::endl;
// for (auto &pos : vec)
// out << pos.x << "," << pos.y << "," << pos.z << std::endl;
// out.close();
//}
void output(ParticleManager &pm, std::string path, int type) {
std::ofstream out(path);
out << "x,y,z\n";
if (type == 0)
for (auto &par : pm.p_solid) {
auto pos = par.p;
out << pos.x << "," << pos.y << "," << pos.z << "\n";
}
if (type == 1)
for (auto &par : pm.p_fluid) {
auto pos = par.p;
out << pos.x << "," << pos.y << "," << pos.z << "\n";
}
if (type == 2)
for (auto &par : pm.p_air) {
auto pos = par.p;
out << pos.x << "," << pos.y << "," << pos.z << "\n";
}
out.close();
}
int main()
{
ParticleManager pm(0.008);
unsigned int step;
std::string path;
std::cout << "Please specify the path to input file: ";
std::cin >> path;
std::cout << "Please specify the number of steps: ";
std::cin >> step;
parseInputFile(path, pm);
output(pm, "air.csv", 2);
output(pm, "water.csv", 1);
output(pm, "fluid.csv", 0);
auto points = cudaCompute(pm, step);
//sampleTest();
return 0;
}
| 63d99b53bdcba7fc1cb0c7086e147992ea30d3c5.cu | // SPH.cpp : 此文件包含 "main" 函数。程序执行将在此处开始并结束。
//
#include <iostream>
#include <fstream>
#include "glm/glm.hpp"
#include "CUDA_compute.h"
#include "include/input.h"
#include <ctime>
void outputAsCSV(ParticleManager &pm, int index) {
auto str = std::to_string(index / 10);
if (index % 10 == 0) {
std::ofstream out("simulation" + str + ".csv");
out << "x,y,z,v_x,v_y,v_z" << std::endl;
for (auto &flow : pm.fluid) {
auto &p = flow.second.p;
auto &v = flow.second.v;
out << p.x << "," << p.y << "," << p.z <<
"," << v.x << "," << v.y << "," << v.z << std::endl;
}
for (auto &flow : pm.solid) {
auto &p = flow.second.p;
auto &v = flow.second.v;
out << p.x << "," << p.y << "," << p.z <<
"," << v.x << "," << v.y << "," << v.z << std::endl;
}
out.close();
}
/* vec3 res(0.0f);
float x = 0.0f, y = 0.0f, z = 0.0f;
for (auto &par : pm.fluid) {
auto &gp= par.second.gradpre;
x = std::min(x, gp.x);
y = std::min(y, gp.y);
z = std::min(z, gp.z);
}
res = res / (float)pm.fluid.size();
/ std::cout << x << " " << y << " " << z << std::endl;*/
}
//void sampleTest() {
// Grid grid(100, 100, 200, 0.005);
// Cube sp(vec3(-0.4f, -0.4f, 0.0f), vec3(.83f, .0f, .0f), vec3(.0f, .83f, .0f), vec3(.0f, .0f, 0.80f));
// auto vec = grid.sampleSurface(sp, 0.08, 20, 1.0845);
// std::ofstream out("cube_surface.csv");
// out << "x,y,z" << std::endl;
// for (auto &pos : vec)
// out << pos.x << "," << pos.y << "," << pos.z << std::endl;
// out.close();
// std::vector<vec3> seeds{ vec[0] };
// vec = grid.sampleVolume([](vec3 p) {return true; }, sp, seeds, 0.08, 20);
// out.open("cube_inner.csv", std::ios::out);
// out << "x,y,z" << std::endl;
// for (auto &pos : vec)
// out << pos.x << "," << pos.y << "," << pos.z << std::endl;
// out.close();
//}
void output(ParticleManager &pm, std::string path, int type) {
std::ofstream out(path);
out << "x,y,z\n";
if (type == 0)
for (auto &par : pm.p_solid) {
auto pos = par.p;
out << pos.x << "," << pos.y << "," << pos.z << "\n";
}
if (type == 1)
for (auto &par : pm.p_fluid) {
auto pos = par.p;
out << pos.x << "," << pos.y << "," << pos.z << "\n";
}
if (type == 2)
for (auto &par : pm.p_air) {
auto pos = par.p;
out << pos.x << "," << pos.y << "," << pos.z << "\n";
}
out.close();
}
int main()
{
ParticleManager pm(0.008);
unsigned int step;
std::string path;
std::cout << "Please specify the path to input file: ";
std::cin >> path;
std::cout << "Please specify the number of steps: ";
std::cin >> step;
parseInputFile(path, pm);
output(pm, "air.csv", 2);
output(pm, "water.csv", 1);
output(pm, "fluid.csv", 0);
auto points = cudaCompute(pm, step);
//sampleTest();
return 0;
}
|
0999e59edc7e06cda85e9ece7d8b4de30cfc49fe.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Skylar Sang & Matthew Rhie
ECSE 4740
Spring 2020
Guidance for CUDA, hiprand, and error checking by Nvidia Developer Roger Allen
Project adapted from the book Ray Tracing in One Weekend by Peter Shirley
*/
#include <iostream>
#include <time.h>
#include <float.h>
#include <hiprand/hiprand_kernel.h>
#include "vec3.h"
#include "ray.h"
#include "sphere.h"
#include "hitable_list.h"
#include "camera.h"
#include "material.h"
// limited version of checkCudaErrors from helper_cuda.h in CUDA examples
#define checkCudaErrors(val) check_cuda( (val), #val, __FILE__, __LINE__ )
struct ints3 {
int x;
int y;
int z;
};
void check_cuda(hipError_t result, char const *const func, const char *const file, int const line) {
if (result) {
std::cerr << "CUDA error = " << static_cast<unsigned int>(result) << " at " <<
file << ":" << line << " '" << func << "' \n";
// Make sure we call CUDA Device Reset before exiting
hipDeviceReset();
exit(99);
}
}
// Function for calculating collisions and color for each pixel
__device__ vec3 write_color(const ray& r, hitable **world, hiprandState_t *local_rand_state) {
ray cur_ray = r;
vec3 cur_attenuation = vec3(1.0,1.0,1.0);
for(int i = 0; i < 50; i++) {
hit_record rec;
if ((*world)->hit(cur_ray, 0.001f, FLT_MAX, rec)) {
ray scattered;
vec3 attenuation;
if(rec.mat_ptr->scatter(cur_ray, rec, attenuation, scattered, local_rand_state)) {
cur_attenuation *= attenuation;
cur_ray = scattered;
}
else {
return vec3(0.0,0.0,0.0);
}
}
else {
vec3 unit_direction = unit_vector(cur_ray.direction());
float t = 0.5f*(unit_direction.y() + 1.0f);
vec3 c = (1.0f-t)*vec3(1.0, 1.0, 1.0) + t*vec3(0.5, 0.7, 1.0);
return cur_attenuation * c;
}
}
return vec3(0.0,0.0,0.0); // exceeded recursion
}
__global__ void rand_init(hiprandState_t *rand_state) {
if (threadIdx.x == 0 && blockIdx.x == 0) {
hiprand_init(1984, 0, 0, rand_state);
}
}
__global__ void render_init(int max_x, int max_y, hiprandState_t *rand_state) {
int pixel_index = blockIdx.x*blockDim.x + threadIdx.x;
if (pixel_index >= max_x*max_y) return;
//Each thread gets same seed, a different sequence number, no offset
hiprand_init(19+pixel_index, 0, 0, &rand_state[pixel_index]);
}
__global__ void render(ints3 *fb, int max_x, int max_y, int ns, camera **cam, hitable **world, hiprandState_t *rand_state) {
int pixel_index = blockIdx.x*blockDim.x + threadIdx.x;
if (pixel_index >= max_x*max_y) return;
int j = pixel_index / max_x;
int i = pixel_index - j*max_x;
hiprandState_t local_rand_state = rand_state[pixel_index];
vec3 col(0,0,0);
for(int s=0; s < ns; s++) {
float u = float(i + hiprand_uniform(&local_rand_state)) / float(max_x);
float v = float(j + hiprand_uniform(&local_rand_state)) / float(max_y);
ray r = (*cam)->get_ray(u, v, &local_rand_state);
col += write_color(r, world, &local_rand_state);
}
rand_state[pixel_index] = local_rand_state;
col /= float(ns);
col[0] = sqrt(col[0]);
col[1] = sqrt(col[1]);
col[2] = sqrt(col[2]);
ints3 color_int;
color_int.x = int(255.99*col.x());
color_int.y = int(255.99*col.y());
color_int.z = int(255.99*col.z());
fb[pixel_index] = color_int;
}
#define RND (hiprand_uniform(&local_rand_state))
// Kernel for initializing world
__global__ void random_scene(hitable **d_list, hitable **d_world, camera **d_camera, int nx, int ny, hiprandState_t *rand_state) {
if (threadIdx.x == 0 && blockIdx.x == 0) {
hiprandState_t local_rand_state = *rand_state;
d_list[0] = new sphere(vec3(0,-1000.0,-1), 1000,
new lambertian(vec3(0.5, 0.5, 0.5)));
int i = 1;
for(int a = -11; a < 11; a++) {
for(int b = -11; b < 11; b++) {
float choose_mat = RND;
vec3 center(a+RND,0.2,b+RND);
if(choose_mat < 0.8f) {
d_list[i++] = new sphere(center, 0.2,
new lambertian(vec3(RND*RND, RND*RND, RND*RND)));
}
else if(choose_mat < 0.95f) {
d_list[i++] = new sphere(center, 0.2,
new metal(vec3(0.5f*(1.0f+RND), 0.5f*(1.0f+RND), 0.5f*(1.0f+RND)), 0.5f*RND));
}
else {
d_list[i++] = new sphere(center, 0.2, new dielectric(1.5));
}
}
}
d_list[i++] = new sphere(vec3(0, 1,0), 1.0, new dielectric(1.5));
d_list[i++] = new sphere(vec3(-4, 1, 0), 1.0, new lambertian(vec3(0.4, 0.2, 0.1)));
d_list[i++] = new sphere(vec3(4, 1, 0), 1.0, new metal(vec3(0.7, 0.6, 0.5), 0.0));
*rand_state = local_rand_state;
*d_world = new hitable_list(d_list, 22*22+1+3);
vec3 lookfrom(13,2,3);
vec3 lookat(0,0,0);
float dist_to_focus = 10.0; (lookfrom-lookat).length();
float aperture = 0.1;
*d_camera = new camera(lookfrom,
lookat,
vec3(0,1,0),
30.0,
float(nx)/float(ny),
aperture,
dist_to_focus);
}
}
__global__ void free_world(hitable **d_list, hitable **d_world, camera **d_camera) {
for(int i=0; i < 22*22+1+3; i++) {
delete ((sphere *)d_list[i])->mat_ptr;
delete d_list[i];
}
delete *d_world;
delete *d_camera;
}
int main(int argc, char** argv) {
int width = 1200;
int height = 800;
int samples = 10;
int num_pixels = width*height;
size_t threadsPerBlock = atoi(argv[1]);
size_t blocks = ceil(num_pixels/threadsPerBlock);
// allocate FB
std::cerr << "Rendering a " << width << "x" << height << " image with " << samples << " samples per pixel ";
std::cerr << "in " << blocks << "blocks with " << threadsPerBlock << " threads.\n";
ints3 *frame;
checkCudaErrors(hipMallocManaged((void **)&frame, num_pixels*sizeof(ints3)));
// allocate random state
hiprandState_t *d_rand_state;
checkCudaErrors(hipMalloc((void **)&d_rand_state, num_pixels*sizeof(hiprandState_t)));
hiprandState_t *d_rand_state2;
checkCudaErrors(hipMalloc((void **)&d_rand_state2, 1*sizeof(hiprandState_t)));
// Initialize random number generator for material/size randomness of the spheres
hipLaunchKernelGGL(( rand_init), dim3(1),dim3(1), 0, 0, d_rand_state2);
checkCudaErrors(hipGetLastError());
checkCudaErrors(hipDeviceSynchronize());
// Initialize world with 3D hitable objects
hitable **d_list;
int num_hitables = 22*22+1+3;
checkCudaErrors(hipMalloc((void **)&d_list, num_hitables*sizeof(hitable *)));
hitable **d_world;
checkCudaErrors(hipMalloc((void **)&d_world, sizeof(hitable *)));
camera **d_camera;
checkCudaErrors(hipMalloc((void **)&d_camera, sizeof(camera *)));
hipLaunchKernelGGL(( random_scene), dim3(1),dim3(1), 0, 0, d_list, d_world, d_camera, width, height, d_rand_state2);
checkCudaErrors(hipGetLastError());
checkCudaErrors(hipDeviceSynchronize());
clock_t start, stop;
start = clock();
// Initialize Random Number Generators for each pixel/thread
hipLaunchKernelGGL(( render_init), dim3(blocks), dim3(threadsPerBlock), 0, 0, width, height, d_rand_state);
checkCudaErrors(hipGetLastError());
checkCudaErrors(hipDeviceSynchronize());
// Run ray tracing algorihtm
hipLaunchKernelGGL(( render), dim3(blocks), dim3(threadsPerBlock), 0, 0, frame, width, height, samples, d_camera, d_world, d_rand_state);
checkCudaErrors(hipGetLastError());
checkCudaErrors(hipDeviceSynchronize());
stop = clock();
double timer_seconds = ((double)(stop - start)) / CLOCKS_PER_SEC;
std::cerr << "took " << timer_seconds << " seconds.\n";
// Output frame to std output > ppm file
std::cout << "P3\n" << width << " " << height << "\n255\n";
for (int i = num_pixels-1; i >= 0; i--) {
std::cout << frame[i].x << " " << frame[i].y << " " << frame[i].z << "\n";
}
// clean up
checkCudaErrors(hipDeviceSynchronize());
hipLaunchKernelGGL(( free_world), dim3(1),dim3(1), 0, 0, d_list,d_world,d_camera);
checkCudaErrors(hipGetLastError());
checkCudaErrors(hipFree(d_camera));
checkCudaErrors(hipFree(d_world));
checkCudaErrors(hipFree(d_list));
checkCudaErrors(hipFree(d_rand_state));
checkCudaErrors(hipFree(frame));
hipDeviceReset();
}
| 0999e59edc7e06cda85e9ece7d8b4de30cfc49fe.cu | /*
Skylar Sang & Matthew Rhie
ECSE 4740
Spring 2020
Guidance for CUDA, curand, and error checking by Nvidia Developer Roger Allen
Project adapted from the book Ray Tracing in One Weekend by Peter Shirley
*/
#include <iostream>
#include <time.h>
#include <float.h>
#include <curand_kernel.h>
#include "vec3.h"
#include "ray.h"
#include "sphere.h"
#include "hitable_list.h"
#include "camera.h"
#include "material.h"
// limited version of checkCudaErrors from helper_cuda.h in CUDA examples
#define checkCudaErrors(val) check_cuda( (val), #val, __FILE__, __LINE__ )
struct ints3 {
int x;
int y;
int z;
};
void check_cuda(cudaError_t result, char const *const func, const char *const file, int const line) {
if (result) {
std::cerr << "CUDA error = " << static_cast<unsigned int>(result) << " at " <<
file << ":" << line << " '" << func << "' \n";
// Make sure we call CUDA Device Reset before exiting
cudaDeviceReset();
exit(99);
}
}
// Function for calculating collisions and color for each pixel
__device__ vec3 write_color(const ray& r, hitable **world, curandState *local_rand_state) {
ray cur_ray = r;
vec3 cur_attenuation = vec3(1.0,1.0,1.0);
for(int i = 0; i < 50; i++) {
hit_record rec;
if ((*world)->hit(cur_ray, 0.001f, FLT_MAX, rec)) {
ray scattered;
vec3 attenuation;
if(rec.mat_ptr->scatter(cur_ray, rec, attenuation, scattered, local_rand_state)) {
cur_attenuation *= attenuation;
cur_ray = scattered;
}
else {
return vec3(0.0,0.0,0.0);
}
}
else {
vec3 unit_direction = unit_vector(cur_ray.direction());
float t = 0.5f*(unit_direction.y() + 1.0f);
vec3 c = (1.0f-t)*vec3(1.0, 1.0, 1.0) + t*vec3(0.5, 0.7, 1.0);
return cur_attenuation * c;
}
}
return vec3(0.0,0.0,0.0); // exceeded recursion
}
__global__ void rand_init(curandState *rand_state) {
if (threadIdx.x == 0 && blockIdx.x == 0) {
curand_init(1984, 0, 0, rand_state);
}
}
__global__ void render_init(int max_x, int max_y, curandState *rand_state) {
int pixel_index = blockIdx.x*blockDim.x + threadIdx.x;
if (pixel_index >= max_x*max_y) return;
//Each thread gets same seed, a different sequence number, no offset
curand_init(19+pixel_index, 0, 0, &rand_state[pixel_index]);
}
__global__ void render(ints3 *fb, int max_x, int max_y, int ns, camera **cam, hitable **world, curandState *rand_state) {
int pixel_index = blockIdx.x*blockDim.x + threadIdx.x;
if (pixel_index >= max_x*max_y) return;
int j = pixel_index / max_x;
int i = pixel_index - j*max_x;
curandState local_rand_state = rand_state[pixel_index];
vec3 col(0,0,0);
for(int s=0; s < ns; s++) {
float u = float(i + curand_uniform(&local_rand_state)) / float(max_x);
float v = float(j + curand_uniform(&local_rand_state)) / float(max_y);
ray r = (*cam)->get_ray(u, v, &local_rand_state);
col += write_color(r, world, &local_rand_state);
}
rand_state[pixel_index] = local_rand_state;
col /= float(ns);
col[0] = sqrt(col[0]);
col[1] = sqrt(col[1]);
col[2] = sqrt(col[2]);
ints3 color_int;
color_int.x = int(255.99*col.x());
color_int.y = int(255.99*col.y());
color_int.z = int(255.99*col.z());
fb[pixel_index] = color_int;
}
#define RND (curand_uniform(&local_rand_state))
// Kernel for initializing world
__global__ void random_scene(hitable **d_list, hitable **d_world, camera **d_camera, int nx, int ny, curandState *rand_state) {
if (threadIdx.x == 0 && blockIdx.x == 0) {
curandState local_rand_state = *rand_state;
d_list[0] = new sphere(vec3(0,-1000.0,-1), 1000,
new lambertian(vec3(0.5, 0.5, 0.5)));
int i = 1;
for(int a = -11; a < 11; a++) {
for(int b = -11; b < 11; b++) {
float choose_mat = RND;
vec3 center(a+RND,0.2,b+RND);
if(choose_mat < 0.8f) {
d_list[i++] = new sphere(center, 0.2,
new lambertian(vec3(RND*RND, RND*RND, RND*RND)));
}
else if(choose_mat < 0.95f) {
d_list[i++] = new sphere(center, 0.2,
new metal(vec3(0.5f*(1.0f+RND), 0.5f*(1.0f+RND), 0.5f*(1.0f+RND)), 0.5f*RND));
}
else {
d_list[i++] = new sphere(center, 0.2, new dielectric(1.5));
}
}
}
d_list[i++] = new sphere(vec3(0, 1,0), 1.0, new dielectric(1.5));
d_list[i++] = new sphere(vec3(-4, 1, 0), 1.0, new lambertian(vec3(0.4, 0.2, 0.1)));
d_list[i++] = new sphere(vec3(4, 1, 0), 1.0, new metal(vec3(0.7, 0.6, 0.5), 0.0));
*rand_state = local_rand_state;
*d_world = new hitable_list(d_list, 22*22+1+3);
vec3 lookfrom(13,2,3);
vec3 lookat(0,0,0);
float dist_to_focus = 10.0; (lookfrom-lookat).length();
float aperture = 0.1;
*d_camera = new camera(lookfrom,
lookat,
vec3(0,1,0),
30.0,
float(nx)/float(ny),
aperture,
dist_to_focus);
}
}
__global__ void free_world(hitable **d_list, hitable **d_world, camera **d_camera) {
for(int i=0; i < 22*22+1+3; i++) {
delete ((sphere *)d_list[i])->mat_ptr;
delete d_list[i];
}
delete *d_world;
delete *d_camera;
}
int main(int argc, char** argv) {
int width = 1200;
int height = 800;
int samples = 10;
int num_pixels = width*height;
size_t threadsPerBlock = atoi(argv[1]);
size_t blocks = ceil(num_pixels/threadsPerBlock);
// allocate FB
std::cerr << "Rendering a " << width << "x" << height << " image with " << samples << " samples per pixel ";
std::cerr << "in " << blocks << "blocks with " << threadsPerBlock << " threads.\n";
ints3 *frame;
checkCudaErrors(cudaMallocManaged((void **)&frame, num_pixels*sizeof(ints3)));
// allocate random state
curandState *d_rand_state;
checkCudaErrors(cudaMalloc((void **)&d_rand_state, num_pixels*sizeof(curandState)));
curandState *d_rand_state2;
checkCudaErrors(cudaMalloc((void **)&d_rand_state2, 1*sizeof(curandState)));
// Initialize random number generator for material/size randomness of the spheres
rand_init<<<1,1>>>(d_rand_state2);
checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaDeviceSynchronize());
// Initialize world with 3D hitable objects
hitable **d_list;
int num_hitables = 22*22+1+3;
checkCudaErrors(cudaMalloc((void **)&d_list, num_hitables*sizeof(hitable *)));
hitable **d_world;
checkCudaErrors(cudaMalloc((void **)&d_world, sizeof(hitable *)));
camera **d_camera;
checkCudaErrors(cudaMalloc((void **)&d_camera, sizeof(camera *)));
random_scene<<<1,1>>>(d_list, d_world, d_camera, width, height, d_rand_state2);
checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaDeviceSynchronize());
clock_t start, stop;
start = clock();
// Initialize Random Number Generators for each pixel/thread
render_init<<<blocks, threadsPerBlock>>>(width, height, d_rand_state);
checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaDeviceSynchronize());
// Run ray tracing algorihtm
render<<<blocks, threadsPerBlock>>>(frame, width, height, samples, d_camera, d_world, d_rand_state);
checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaDeviceSynchronize());
stop = clock();
double timer_seconds = ((double)(stop - start)) / CLOCKS_PER_SEC;
std::cerr << "took " << timer_seconds << " seconds.\n";
// Output frame to std output > ppm file
std::cout << "P3\n" << width << " " << height << "\n255\n";
for (int i = num_pixels-1; i >= 0; i--) {
std::cout << frame[i].x << " " << frame[i].y << " " << frame[i].z << "\n";
}
// clean up
checkCudaErrors(cudaDeviceSynchronize());
free_world<<<1,1>>>(d_list,d_world,d_camera);
checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaFree(d_camera));
checkCudaErrors(cudaFree(d_world));
checkCudaErrors(cudaFree(d_list));
checkCudaErrors(cudaFree(d_rand_state));
checkCudaErrors(cudaFree(frame));
cudaDeviceReset();
}
|
74dcd34b6aff761e5039789804e310579938e509.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* @file ex_particle_OPENMP_seq.c
* @author Michael Trotter & Matt Goodrum
* @brief Particle filter implementation in C/OpenMP
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <limits.h>
#include <math.h>
#include <unistd.h>
#include <fcntl.h>
#include <float.h>
#include <sys/time.h>
#define PI acos(-1)
#define BLOCK_X 16
#define BLOCK_Y 16
__constant__ double c_CDF[1000];
/**
@var M value for Linear Congruential Generator (LCG); use GCC's value
*/
long M = INT_MAX;
/**
@var A value for LCG
*/
int A = 1103515245;
/**
@var C value for LCG
*/
int C = 12345;
const int threads_per_block = 128;
/*****************************
*GET_TIME
*returns a long int representing the time
*****************************/
long long get_time() {
struct timeval tv;
gettimeofday(&tv, NULL);
return (tv.tv_sec * 1000000) + tv.tv_usec;
}
// Returns the number of seconds elapsed between the two specified times
float elapsed_time(long long start_time, long long end_time) {
return (float) (end_time - start_time) / (1000 * 1000);
}
/*****************************
* CHECK_ERROR
* Checks for CUDA errors and prints them to the screen to help with
* debugging of CUDA related programming
*****************************/
void check_error(hipError_t e) {
if (e != hipSuccess) {
printf("\nCUDA error: %s\n", hipGetErrorString(e));
exit(1);
}
}
__device__ int findIndexSeq(double * CDF, int lengthCDF, double value)
{
int index = -1;
int x;
for(x = 0; x < lengthCDF; x++)
{
if(CDF[x] >= value)
{
index = x;
break;
}
}
if(index == -1)
return lengthCDF-1;
return index;
}
__device__ int findIndexBin(double * CDF, int beginIndex, int endIndex, double value)
{
if(endIndex < beginIndex)
return -1;
int middleIndex;
while(endIndex > beginIndex)
{
middleIndex = beginIndex + ((endIndex-beginIndex)/2);
if(CDF[middleIndex] >= value)
{
if(middleIndex == 0)
return middleIndex;
else if(CDF[middleIndex-1] < value)
return middleIndex;
else if(CDF[middleIndex-1] == value)
{
while(CDF[middleIndex] == value && middleIndex >= 0)
middleIndex--;
middleIndex++;
return middleIndex;
}
}
if(CDF[middleIndex] > value)
endIndex = middleIndex-1;
else
beginIndex = middleIndex+1;
}
return -1;
}
/*****************************
* CUDA Kernel Function to replace FindIndex
* param1: arrayX
* param2: arrayY
* param3: CDF
* param4: u
* param5: xj
* param6: yj
* param7: Nparticles
*****************************/
__global__ void kernel(double * arrayX, double * arrayY, double * CDF, const double* __restrict__ u, double * xj, double * yj, int Nparticles){
int block_id = blockIdx.x;// + gridDim.x * blockIdx.y;
int i = blockDim.x * block_id + threadIdx.x;
if(i < Nparticles){
int index = -1;
int x;
for(x = 0; x < Nparticles; x++){
if(c_CDF[x] >= u[i]){
index = x;
break;
}
}
if(index == -1){
index = Nparticles-1;
}
xj[i] = arrayX[index];
yj[i] = arrayY[index];
}
}
/**
* Takes in a double and returns an integer that approximates to that double
* @return if the mantissa < .5 => return value < input value; else return value > input value
*/
double roundDouble(double value){
int newValue = (int)(value);
if(value - newValue < .5)
return newValue;
else
return newValue++;
}
/**
* Set values of the 3D array to a newValue if that value is equal to the testValue
* @param testValue The value to be replaced
* @param newValue The value to replace testValue with
* @param array3D The image vector
* @param dimX The x dimension of the frame
* @param dimY The y dimension of the frame
* @param dimZ The number of frames
*/
void setIf(int testValue, int newValue, int * array3D, int * dimX, int * dimY, int * dimZ){
int x, y, z;
for(x = 0; x < *dimX; x++){
for(y = 0; y < *dimY; y++){
for(z = 0; z < *dimZ; z++){
if(array3D[x * *dimY * *dimZ+y * *dimZ + z] == testValue)
array3D[x * *dimY * *dimZ + y * *dimZ + z] = newValue;
}
}
}
}
/**
* Generates a uniformly distributed random number using the provided seed and GCC's settings for the Linear Congruential Generator (LCG)
* @see http://en.wikipedia.org/wiki/Linear_congruential_generator
* @note This function is thread-safe
* @param seed The seed array
* @param index The specific index of the seed to be advanced
* @return a uniformly distributed number [0, 1)
*/
double randu(int * seed, int index)
{
int num = A*seed[index] + C;
seed[index] = num % M;
return fabs(seed[index]/((double) M));
}
/**
* Generates a normally distributed random number using the Box-Muller transformation
* @note This function is thread-safe
* @param seed The seed array
* @param index The specific index of the seed to be advanced
* @return a double representing random number generated using the Box-Muller algorithm
* @see http://en.wikipedia.org/wiki/Normal_distribution, section computing value for normal random distribution
*/
double randn(int * seed, int index){
/*Box-Muller algorithm*/
double u = randu(seed, index);
double v = randu(seed, index);
double cosine = cos(2*PI*v);
double rt = -2*log(u);
return sqrt(rt)*cosine;
}
/**
* Sets values of 3D matrix using randomly generated numbers from a normal distribution
* @param array3D The video to be modified
* @param dimX The x dimension of the frame
* @param dimY The y dimension of the frame
* @param dimZ The number of frames
* @param seed The seed array
*/
void addNoise(int * array3D, int * dimX, int * dimY, int * dimZ, int * seed){
int x, y, z;
for(x = 0; x < *dimX; x++){
for(y = 0; y < *dimY; y++){
for(z = 0; z < *dimZ; z++){
array3D[x * *dimY * *dimZ + y * *dimZ + z] = array3D[x * *dimY * *dimZ + y * *dimZ + z] + (int)(5*randn(seed, 0));
}
}
}
}
/**
* Fills a radius x radius matrix representing the disk
* @param disk The pointer to the disk to be made
* @param radius The radius of the disk to be made
*/
void strelDisk(int * disk, int radius)
{
int diameter = radius*2 - 1;
int x, y;
for(x = 0; x < diameter; x++){
for(y = 0; y < diameter; y++){
double distance = sqrt(pow((double)(x-radius+1),2) + pow((double)(y-radius+1),2));
if(distance < radius)
disk[x*diameter + y] = 1;
}
}
}
/**
* Dilates the provided video
* @param matrix The video to be dilated
* @param posX The x location of the pixel to be dilated
* @param posY The y location of the pixel to be dilated
* @param poxZ The z location of the pixel to be dilated
* @param dimX The x dimension of the frame
* @param dimY The y dimension of the frame
* @param dimZ The number of frames
* @param error The error radius
*/
void dilate_matrix(int * matrix, int posX, int posY, int posZ, int dimX, int dimY, int dimZ, int error)
{
int startX = posX - error;
while(startX < 0)
startX++;
int startY = posY - error;
while(startY < 0)
startY++;
int endX = posX + error;
while(endX > dimX)
endX--;
int endY = posY + error;
while(endY > dimY)
endY--;
int x,y;
for(x = startX; x < endX; x++){
for(y = startY; y < endY; y++){
double distance = sqrt( pow((double)(x-posX),2) + pow((double)(y-posY),2) );
if(distance < error)
matrix[x*dimY*dimZ + y*dimZ + posZ] = 1;
}
}
}
/**
* Dilates the target matrix using the radius as a guide
* @param matrix The reference matrix
* @param dimX The x dimension of the video
* @param dimY The y dimension of the video
* @param dimZ The z dimension of the video
* @param error The error radius to be dilated
* @param newMatrix The target matrix
*/
void imdilate_disk(int * matrix, int dimX, int dimY, int dimZ, int error, int * newMatrix)
{
int x, y, z;
for(z = 0; z < dimZ; z++){
for(x = 0; x < dimX; x++){
for(y = 0; y < dimY; y++){
if(matrix[x*dimY*dimZ + y*dimZ + z] == 1){
dilate_matrix(newMatrix, x, y, z, dimX, dimY, dimZ, error);
}
}
}
}
}
/**
* Fills a 2D array describing the offsets of the disk object
* @param se The disk object
* @param numOnes The number of ones in the disk
* @param neighbors The array that will contain the offsets
* @param radius The radius used for dilation
*/
void getneighbors(int * se, int numOnes, double * neighbors, int radius){
int x, y;
int neighY = 0;
int center = radius - 1;
int diameter = radius*2 -1;
for(x = 0; x < diameter; x++){
for(y = 0; y < diameter; y++){
if(se[x*diameter + y]){
neighbors[neighY*2] = (int)(y - center);
neighbors[neighY*2 + 1] = (int)(x - center);
neighY++;
}
}
}
}
/**
* The synthetic video sequence we will work with here is composed of a
* single moving object, circular in shape (fixed radius)
* The motion here is a linear motion
* the foreground intensity and the backgrounf intensity is known
* the image is corrupted with zero mean Gaussian noise
* @param I The video itself
* @param IszX The x dimension of the video
* @param IszY The y dimension of the video
* @param Nfr The number of frames of the video
* @param seed The seed array used for number generation
*/
void videoSequence(int * I, int IszX, int IszY, int Nfr, int * seed){
int k;
int max_size = IszX*IszY*Nfr;
/*get object centers*/
int x0 = (int)roundDouble(IszY/2.0);
int y0 = (int)roundDouble(IszX/2.0);
I[x0 *IszY *Nfr + y0 * Nfr + 0] = 1;
/*move point*/
int xk, yk, pos;
for(k = 1; k < Nfr; k++){
xk = abs(x0 + (k-1));
yk = abs(y0 - 2*(k-1));
pos = yk * IszY * Nfr + xk *Nfr + k;
if(pos >= max_size)
pos = 0;
I[pos] = 1;
}
/*dilate matrix*/
int * newMatrix = (int *)malloc(sizeof(int)*IszX*IszY*Nfr);
imdilate_disk(I, IszX, IszY, Nfr, 5, newMatrix);
int x, y;
for(x = 0; x < IszX; x++){
for(y = 0; y < IszY; y++){
for(k = 0; k < Nfr; k++){
I[x*IszY*Nfr + y*Nfr + k] = newMatrix[x*IszY*Nfr + y*Nfr + k];
}
}
}
free(newMatrix);
/*define background, add noise*/
setIf(0, 100, I, &IszX, &IszY, &Nfr);
setIf(1, 228, I, &IszX, &IszY, &Nfr);
/*add noise*/
addNoise(I, &IszX, &IszY, &Nfr, seed);
}
/**
* Determines the likelihood sum based on the formula: SUM( (IK[IND] - 100)^2 - (IK[IND] - 228)^2)/ 100
* @param I The 3D matrix
* @param ind The current ind array
* @param numOnes The length of ind array
* @return A double representing the sum
*/
double calcLikelihoodSum(int * I, int * ind, int numOnes){
double likelihoodSum = 0.0;
int y;
for(y = 0; y < numOnes; y++)
likelihoodSum += (pow((double)(I[ind[y]] - 100),2) - pow((double)(I[ind[y]]-228),2))/50.0;
return likelihoodSum;
}
/**
* Finds the first element in the CDF that is greater than or equal to the provided value and returns that index
* @note This function uses sequential search
* @param CDF The CDF
* @param lengthCDF The length of CDF
* @param value The value to be found
* @return The index of value in the CDF; if value is never found, returns the last index
*/
int findIndex(double * CDF, int lengthCDF, double value){
int index = -1;
int x;
for(x = 0; x < lengthCDF; x++){
if(CDF[x] >= value){
index = x;
break;
}
}
if(index == -1){
return lengthCDF-1;
}
return index;
}
/**
* The implementation of the particle filter using OpenMP for many frames
* @see http://openmp.org/wp/
* @note This function is designed to work with a video of several frames. In addition, it references a provided MATLAB function which takes the video, the objxy matrix and the x and y arrays as arguments and returns the likelihoods
* @param I The video to be run
* @param IszX The x dimension of the video
* @param IszY The y dimension of the video
* @param Nfr The number of frames
* @param seed The seed array used for random number generation
* @param Nparticles The number of particles to be used
*/
void particleFilter(int * I, int IszX, int IszY, int Nfr, int * seed, int Nparticles){
int max_size = IszX*IszY*Nfr;
long long start = get_time();
//original particle centroid
double xe = roundDouble(IszY/2.0);
double ye = roundDouble(IszX/2.0);
//expected object locations, compared to center
int radius = 5;
int diameter = radius*2 - 1;
int * disk = (int *)malloc(diameter*diameter*sizeof(int));
strelDisk(disk, radius);
int countOnes = 0;
int x, y;
for(x = 0; x < diameter; x++){
for(y = 0; y < diameter; y++){
if(disk[x*diameter + y] == 1)
countOnes++;
}
}
double * objxy = (double *)malloc(countOnes*2*sizeof(double));
getneighbors(disk, countOnes, objxy, radius);
long long get_neighbors = get_time();
printf("TIME TO GET NEIGHBORS TOOK: %f\n", elapsed_time(start, get_neighbors));
//initial weights are all equal (1/Nparticles)
double * weights = (double *)malloc(sizeof(double)*Nparticles);
for(x = 0; x < Nparticles; x++){
weights[x] = 1/((double)(Nparticles));
}
long long get_weights = get_time();
printf("TIME TO GET WEIGHTSTOOK: %f\n", elapsed_time(get_neighbors, get_weights));
//initial likelihood to 0.0
double * likelihood = (double *)malloc(sizeof(double)*Nparticles);
double * arrayX = (double *)malloc(sizeof(double)*Nparticles);
double * arrayY = (double *)malloc(sizeof(double)*Nparticles);
double * xj = (double *)malloc(sizeof(double)*Nparticles);
double * yj = (double *)malloc(sizeof(double)*Nparticles);
double * CDF = (double *)malloc(sizeof(double)*Nparticles);
//GPU copies of arrays
double * arrayX_GPU;
double * arrayY_GPU;
double * xj_GPU;
double * yj_GPU;
double * CDF_GPU;
int * ind = (int*)malloc(sizeof(int)*countOnes);
double * u = (double *)malloc(sizeof(double)*Nparticles);
double * u_GPU;
//CUDA memory allocation
check_error(hipMalloc((void **) &arrayX_GPU, sizeof(double)*Nparticles));
check_error(hipMalloc((void **) &arrayY_GPU, sizeof(double)*Nparticles));
check_error(hipMalloc((void **) &xj_GPU, sizeof(double)*Nparticles));
check_error(hipMalloc((void **) &yj_GPU, sizeof(double)*Nparticles));
check_error(hipMalloc((void **) &CDF_GPU, sizeof(double)*Nparticles));
check_error(hipMalloc((void **) &u_GPU, sizeof(double)*Nparticles));
for(x = 0; x < Nparticles; x++){
arrayX[x] = xe;
arrayY[x] = ye;
}
int k;
//double * Ik = (double *)malloc(sizeof(double)*IszX*IszY);
int indX, indY;
for(k = 1; k < Nfr; k++){
long long set_arrays = get_time();
//printf("TIME TO SET ARRAYS TOOK: %f\n", elapsed_time(get_weights, set_arrays));
//apply motion model
//draws sample from motion model (random walk). The only prior information
//is that the object moves 2x as fast as in the y direction
for(x = 0; x < Nparticles; x++){
arrayX[x] = arrayX[x] + 1.0 + 5.0*randn(seed, x);
arrayY[x] = arrayY[x] - 2.0 + 2.0*randn(seed, x);
}
//particle filter likelihood
long long error = get_time();
printf("TIME TO SET ERROR TOOK: %f\n", elapsed_time(set_arrays, error));
for(x = 0; x < Nparticles; x++){
//compute the likelihood: remember our assumption is that you know
// foreground and the background image intensity distribution.
// Notice that we consider here a likelihood ratio, instead of
// p(z|x). It is possible in this case. why? a hometask for you.
//calc ind
for(y = 0; y < countOnes; y++){
indX = roundDouble(arrayX[x]) + objxy[y*2 + 1];
indY = roundDouble(arrayY[x]) + objxy[y*2];
ind[y] = fabs(indX*IszY*Nfr + indY*Nfr + k);
if(ind[y] >= max_size)
ind[y] = 0;
}
likelihood[x] = calcLikelihoodSum(I, ind, countOnes);
likelihood[x] = likelihood[x]/countOnes;
}
long long likelihood_time = get_time();
printf("TIME TO GET LIKELIHOODS TOOK: %f\n", elapsed_time(error, likelihood_time));
// update & normalize weights
// using equation (63) of Arulampalam Tutorial
for(x = 0; x < Nparticles; x++){
weights[x] = weights[x] * exp(likelihood[x]);
}
long long exponential = get_time();
printf("TIME TO GET EXP TOOK: %f\n", elapsed_time(likelihood_time, exponential));
double sumWeights = 0;
for(x = 0; x < Nparticles; x++){
sumWeights += weights[x];
}
long long sum_time = get_time();
printf("TIME TO SUM WEIGHTS TOOK: %f\n", elapsed_time(exponential, sum_time));
for(x = 0; x < Nparticles; x++){
weights[x] = weights[x]/sumWeights;
}
long long normalize = get_time();
printf("TIME TO NORMALIZE WEIGHTS TOOK: %f\n", elapsed_time(sum_time, normalize));
xe = 0;
ye = 0;
// estimate the object location by expected values
for(x = 0; x < Nparticles; x++){
xe += arrayX[x] * weights[x];
ye += arrayY[x] * weights[x];
}
long long move_time = get_time();
printf("TIME TO MOVE OBJECT TOOK: %f\n", elapsed_time(normalize, move_time));
printf("XE: %lf\n", xe);
printf("YE: %lf\n", ye);
double distance = sqrt( pow((double)(xe-(int)roundDouble(IszY/2.0)),2) + pow((double)(ye-(int)roundDouble(IszX/2.0)),2) );
printf("%lf\n", distance);
//display(hold off for now)
//pause(hold off for now)
//resampling
CDF[0] = weights[0];
for(x = 1; x < Nparticles; x++){
CDF[x] = weights[x] + CDF[x-1];
}
long long cum_sum = get_time();
printf("TIME TO CALC CUM SUM TOOK: %f\n", elapsed_time(move_time, cum_sum));
double u1 = (1/((double)(Nparticles)))*randu(seed, 0);
for(x = 0; x < Nparticles; x++){
u[x] = u1 + x/((double)(Nparticles));
}
long long u_time = get_time();
printf("TIME TO CALC U TOOK: %f\n", elapsed_time(cum_sum, u_time));
long long start_copy = get_time();
//CUDA memory copying from CPU memory to GPU memory
hipMemcpy(arrayX_GPU, arrayX, sizeof(double)*Nparticles, hipMemcpyHostToDevice);
hipMemcpy(arrayY_GPU, arrayY, sizeof(double)*Nparticles, hipMemcpyHostToDevice);
hipMemcpy(xj_GPU, xj, sizeof(double)*Nparticles, hipMemcpyHostToDevice);
hipMemcpy(yj_GPU, yj, sizeof(double)*Nparticles, hipMemcpyHostToDevice);
hipMemcpy(CDF_GPU, CDF, sizeof(double)*Nparticles, hipMemcpyHostToDevice);
hipMemcpy(u_GPU, u, sizeof(double)*Nparticles, hipMemcpyHostToDevice);
hipMemcpyToSymbol(c_CDF,CDF,sizeof(double)*Nparticles);
long long end_copy = get_time();
//Set number of threads
int num_blocks = ceil((double) Nparticles/(double) threads_per_block);
//KERNEL FUNCTION CALL
hipLaunchKernelGGL(( kernel) , dim3(num_blocks), dim3(threads_per_block) , 0, 0, arrayX_GPU, arrayY_GPU, CDF_GPU, u_GPU, xj_GPU, yj_GPU, Nparticles);
hipDeviceSynchronize();
long long start_copy_back = get_time();
//CUDA memory copying back from GPU to CPU memory
hipMemcpy(yj, yj_GPU, sizeof(double)*Nparticles, hipMemcpyDeviceToHost);
hipMemcpy(xj, xj_GPU, sizeof(double)*Nparticles, hipMemcpyDeviceToHost);
long long end_copy_back = get_time();
printf("SENDING TO GPU TOOK: %lf\n", elapsed_time(start_copy, end_copy));
printf("CUDA EXEC TOOK: %lf\n", elapsed_time(end_copy, start_copy_back));
printf("SENDING BACK FROM GPU TOOK: %lf\n", elapsed_time(start_copy_back, end_copy_back));
long long xyj_time = get_time();
printf("TIME TO CALC NEW ARRAY X AND Y TOOK: %f\n", elapsed_time(u_time, xyj_time));
for(x = 0; x < Nparticles; x++){
//reassign arrayX and arrayY
arrayX[x] = xj[x];
arrayY[x] = yj[x];
weights[x] = 1/((double)(Nparticles));
}
long long reset = get_time();
printf("TIME TO RESET WEIGHTS TOOK: %f\n", elapsed_time(xyj_time, reset));
}
//CUDA freeing of memory
hipFree(u_GPU);
hipFree(CDF_GPU);
hipFree(yj_GPU);
hipFree(xj_GPU);
hipFree(arrayY_GPU);
hipFree(arrayX_GPU);
//free memory
free(disk);
free(objxy);
free(weights);
free(likelihood);
free(arrayX);
free(arrayY);
free(xj);
free(yj);
free(CDF);
free(u);
free(ind);
}
int main(int argc, char * argv[]){
char* usage = "naive.out -x <dimX> -y <dimY> -z <Nfr> -np <Nparticles>";
//check number of arguments
if(argc != 9)
{
printf("%s\n", usage);
return 0;
}
//check args deliminators
if( strcmp( argv[1], "-x" ) || strcmp( argv[3], "-y" ) || strcmp( argv[5], "-z" ) || strcmp( argv[7], "-np" ) ) {
printf( "%s\n",usage );
return 0;
}
int IszX, IszY, Nfr, Nparticles;
//converting a string to a integer
if( sscanf( argv[2], "%d", &IszX ) == EOF ) {
printf("ERROR: dimX input is incorrect");
return 0;
}
if( IszX <= 0 ) {
printf("dimX must be > 0\n");
return 0;
}
//converting a string to a integer
if( sscanf( argv[4], "%d", &IszY ) == EOF ) {
printf("ERROR: dimY input is incorrect");
return 0;
}
if( IszY <= 0 ) {
printf("dimY must be > 0\n");
return 0;
}
//converting a string to a integer
if( sscanf( argv[6], "%d", &Nfr ) == EOF ) {
printf("ERROR: Number of frames input is incorrect");
return 0;
}
if( Nfr <= 0 ) {
printf("number of frames must be > 0\n");
return 0;
}
//converting a string to a integer
if( sscanf( argv[8], "%d", &Nparticles ) == EOF ) {
printf("ERROR: Number of particles input is incorrect");
return 0;
}
if( Nparticles <= 0 ) {
printf("Number of particles must be > 0\n");
return 0;
}
//establish seed
int * seed = (int *)malloc(sizeof(int)*Nparticles);
int i;
for(i = 0; i < Nparticles; i++)
seed[i] = time(0)*i;
//malloc matrix
int * I = (int *)malloc(sizeof(int)*IszX*IszY*Nfr);
long long start = get_time();
//call video sequence
videoSequence(I, IszX, IszY, Nfr, seed);
long long endVideoSequence = get_time();
printf("VIDEO SEQUENCE TOOK %f\n", elapsed_time(start, endVideoSequence));
//call particle filter
particleFilter(I, IszX, IszY, Nfr, seed, Nparticles);
long long endParticleFilter = get_time();
printf("PARTICLE FILTER TOOK %f\n", elapsed_time(endVideoSequence, endParticleFilter));
printf("ENTIRE PROGRAM TOOK %f\n", elapsed_time(start, endParticleFilter));
free(seed);
free(I);
return 0;
}
| 74dcd34b6aff761e5039789804e310579938e509.cu | /**
* @file ex_particle_OPENMP_seq.c
* @author Michael Trotter & Matt Goodrum
* @brief Particle filter implementation in C/OpenMP
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <limits.h>
#include <math.h>
#include <unistd.h>
#include <fcntl.h>
#include <float.h>
#include <sys/time.h>
#define PI acos(-1)
#define BLOCK_X 16
#define BLOCK_Y 16
__constant__ double c_CDF[1000];
/**
@var M value for Linear Congruential Generator (LCG); use GCC's value
*/
long M = INT_MAX;
/**
@var A value for LCG
*/
int A = 1103515245;
/**
@var C value for LCG
*/
int C = 12345;
const int threads_per_block = 128;
/*****************************
*GET_TIME
*returns a long int representing the time
*****************************/
long long get_time() {
struct timeval tv;
gettimeofday(&tv, NULL);
return (tv.tv_sec * 1000000) + tv.tv_usec;
}
// Returns the number of seconds elapsed between the two specified times
float elapsed_time(long long start_time, long long end_time) {
return (float) (end_time - start_time) / (1000 * 1000);
}
/*****************************
* CHECK_ERROR
* Checks for CUDA errors and prints them to the screen to help with
* debugging of CUDA related programming
*****************************/
void check_error(cudaError e) {
if (e != cudaSuccess) {
printf("\nCUDA error: %s\n", cudaGetErrorString(e));
exit(1);
}
}
__device__ int findIndexSeq(double * CDF, int lengthCDF, double value)
{
int index = -1;
int x;
for(x = 0; x < lengthCDF; x++)
{
if(CDF[x] >= value)
{
index = x;
break;
}
}
if(index == -1)
return lengthCDF-1;
return index;
}
__device__ int findIndexBin(double * CDF, int beginIndex, int endIndex, double value)
{
if(endIndex < beginIndex)
return -1;
int middleIndex;
while(endIndex > beginIndex)
{
middleIndex = beginIndex + ((endIndex-beginIndex)/2);
if(CDF[middleIndex] >= value)
{
if(middleIndex == 0)
return middleIndex;
else if(CDF[middleIndex-1] < value)
return middleIndex;
else if(CDF[middleIndex-1] == value)
{
while(CDF[middleIndex] == value && middleIndex >= 0)
middleIndex--;
middleIndex++;
return middleIndex;
}
}
if(CDF[middleIndex] > value)
endIndex = middleIndex-1;
else
beginIndex = middleIndex+1;
}
return -1;
}
/*****************************
* CUDA Kernel Function to replace FindIndex
* param1: arrayX
* param2: arrayY
* param3: CDF
* param4: u
* param5: xj
* param6: yj
* param7: Nparticles
*****************************/
__global__ void kernel(double * arrayX, double * arrayY, double * CDF, const double* __restrict__ u, double * xj, double * yj, int Nparticles){
int block_id = blockIdx.x;// + gridDim.x * blockIdx.y;
int i = blockDim.x * block_id + threadIdx.x;
if(i < Nparticles){
int index = -1;
int x;
for(x = 0; x < Nparticles; x++){
if(c_CDF[x] >= u[i]){
index = x;
break;
}
}
if(index == -1){
index = Nparticles-1;
}
xj[i] = arrayX[index];
yj[i] = arrayY[index];
}
}
/**
* Takes in a double and returns an integer that approximates to that double
* @return if the mantissa < .5 => return value < input value; else return value > input value
*/
double roundDouble(double value){
int newValue = (int)(value);
if(value - newValue < .5)
return newValue;
else
return newValue++;
}
/**
* Set values of the 3D array to a newValue if that value is equal to the testValue
* @param testValue The value to be replaced
* @param newValue The value to replace testValue with
* @param array3D The image vector
* @param dimX The x dimension of the frame
* @param dimY The y dimension of the frame
* @param dimZ The number of frames
*/
void setIf(int testValue, int newValue, int * array3D, int * dimX, int * dimY, int * dimZ){
int x, y, z;
for(x = 0; x < *dimX; x++){
for(y = 0; y < *dimY; y++){
for(z = 0; z < *dimZ; z++){
if(array3D[x * *dimY * *dimZ+y * *dimZ + z] == testValue)
array3D[x * *dimY * *dimZ + y * *dimZ + z] = newValue;
}
}
}
}
/**
* Generates a uniformly distributed random number using the provided seed and GCC's settings for the Linear Congruential Generator (LCG)
* @see http://en.wikipedia.org/wiki/Linear_congruential_generator
* @note This function is thread-safe
* @param seed The seed array
* @param index The specific index of the seed to be advanced
* @return a uniformly distributed number [0, 1)
*/
double randu(int * seed, int index)
{
int num = A*seed[index] + C;
seed[index] = num % M;
return fabs(seed[index]/((double) M));
}
/**
* Generates a normally distributed random number using the Box-Muller transformation
* @note This function is thread-safe
* @param seed The seed array
* @param index The specific index of the seed to be advanced
* @return a double representing random number generated using the Box-Muller algorithm
* @see http://en.wikipedia.org/wiki/Normal_distribution, section computing value for normal random distribution
*/
double randn(int * seed, int index){
/*Box-Muller algorithm*/
double u = randu(seed, index);
double v = randu(seed, index);
double cosine = cos(2*PI*v);
double rt = -2*log(u);
return sqrt(rt)*cosine;
}
/**
* Sets values of 3D matrix using randomly generated numbers from a normal distribution
* @param array3D The video to be modified
* @param dimX The x dimension of the frame
* @param dimY The y dimension of the frame
* @param dimZ The number of frames
* @param seed The seed array
*/
void addNoise(int * array3D, int * dimX, int * dimY, int * dimZ, int * seed){
int x, y, z;
for(x = 0; x < *dimX; x++){
for(y = 0; y < *dimY; y++){
for(z = 0; z < *dimZ; z++){
array3D[x * *dimY * *dimZ + y * *dimZ + z] = array3D[x * *dimY * *dimZ + y * *dimZ + z] + (int)(5*randn(seed, 0));
}
}
}
}
/**
* Fills a radius x radius matrix representing the disk
* @param disk The pointer to the disk to be made
* @param radius The radius of the disk to be made
*/
void strelDisk(int * disk, int radius)
{
int diameter = radius*2 - 1;
int x, y;
for(x = 0; x < diameter; x++){
for(y = 0; y < diameter; y++){
double distance = sqrt(pow((double)(x-radius+1),2) + pow((double)(y-radius+1),2));
if(distance < radius)
disk[x*diameter + y] = 1;
}
}
}
/**
* Dilates the provided video
* @param matrix The video to be dilated
* @param posX The x location of the pixel to be dilated
* @param posY The y location of the pixel to be dilated
* @param poxZ The z location of the pixel to be dilated
* @param dimX The x dimension of the frame
* @param dimY The y dimension of the frame
* @param dimZ The number of frames
* @param error The error radius
*/
void dilate_matrix(int * matrix, int posX, int posY, int posZ, int dimX, int dimY, int dimZ, int error)
{
int startX = posX - error;
while(startX < 0)
startX++;
int startY = posY - error;
while(startY < 0)
startY++;
int endX = posX + error;
while(endX > dimX)
endX--;
int endY = posY + error;
while(endY > dimY)
endY--;
int x,y;
for(x = startX; x < endX; x++){
for(y = startY; y < endY; y++){
double distance = sqrt( pow((double)(x-posX),2) + pow((double)(y-posY),2) );
if(distance < error)
matrix[x*dimY*dimZ + y*dimZ + posZ] = 1;
}
}
}
/**
* Dilates the target matrix using the radius as a guide
* @param matrix The reference matrix
* @param dimX The x dimension of the video
* @param dimY The y dimension of the video
* @param dimZ The z dimension of the video
* @param error The error radius to be dilated
* @param newMatrix The target matrix
*/
void imdilate_disk(int * matrix, int dimX, int dimY, int dimZ, int error, int * newMatrix)
{
int x, y, z;
for(z = 0; z < dimZ; z++){
for(x = 0; x < dimX; x++){
for(y = 0; y < dimY; y++){
if(matrix[x*dimY*dimZ + y*dimZ + z] == 1){
dilate_matrix(newMatrix, x, y, z, dimX, dimY, dimZ, error);
}
}
}
}
}
/**
* Fills a 2D array describing the offsets of the disk object
* @param se The disk object
* @param numOnes The number of ones in the disk
* @param neighbors The array that will contain the offsets
* @param radius The radius used for dilation
*/
void getneighbors(int * se, int numOnes, double * neighbors, int radius){
int x, y;
int neighY = 0;
int center = radius - 1;
int diameter = radius*2 -1;
for(x = 0; x < diameter; x++){
for(y = 0; y < diameter; y++){
if(se[x*diameter + y]){
neighbors[neighY*2] = (int)(y - center);
neighbors[neighY*2 + 1] = (int)(x - center);
neighY++;
}
}
}
}
/**
* The synthetic video sequence we will work with here is composed of a
* single moving object, circular in shape (fixed radius)
* The motion here is a linear motion
* the foreground intensity and the backgrounf intensity is known
* the image is corrupted with zero mean Gaussian noise
* @param I The video itself
* @param IszX The x dimension of the video
* @param IszY The y dimension of the video
* @param Nfr The number of frames of the video
* @param seed The seed array used for number generation
*/
void videoSequence(int * I, int IszX, int IszY, int Nfr, int * seed){
int k;
int max_size = IszX*IszY*Nfr;
/*get object centers*/
int x0 = (int)roundDouble(IszY/2.0);
int y0 = (int)roundDouble(IszX/2.0);
I[x0 *IszY *Nfr + y0 * Nfr + 0] = 1;
/*move point*/
int xk, yk, pos;
for(k = 1; k < Nfr; k++){
xk = abs(x0 + (k-1));
yk = abs(y0 - 2*(k-1));
pos = yk * IszY * Nfr + xk *Nfr + k;
if(pos >= max_size)
pos = 0;
I[pos] = 1;
}
/*dilate matrix*/
int * newMatrix = (int *)malloc(sizeof(int)*IszX*IszY*Nfr);
imdilate_disk(I, IszX, IszY, Nfr, 5, newMatrix);
int x, y;
for(x = 0; x < IszX; x++){
for(y = 0; y < IszY; y++){
for(k = 0; k < Nfr; k++){
I[x*IszY*Nfr + y*Nfr + k] = newMatrix[x*IszY*Nfr + y*Nfr + k];
}
}
}
free(newMatrix);
/*define background, add noise*/
setIf(0, 100, I, &IszX, &IszY, &Nfr);
setIf(1, 228, I, &IszX, &IszY, &Nfr);
/*add noise*/
addNoise(I, &IszX, &IszY, &Nfr, seed);
}
/**
* Determines the likelihood sum based on the formula: SUM( (IK[IND] - 100)^2 - (IK[IND] - 228)^2)/ 100
* @param I The 3D matrix
* @param ind The current ind array
* @param numOnes The length of ind array
* @return A double representing the sum
*/
double calcLikelihoodSum(int * I, int * ind, int numOnes){
double likelihoodSum = 0.0;
int y;
for(y = 0; y < numOnes; y++)
likelihoodSum += (pow((double)(I[ind[y]] - 100),2) - pow((double)(I[ind[y]]-228),2))/50.0;
return likelihoodSum;
}
/**
* Finds the first element in the CDF that is greater than or equal to the provided value and returns that index
* @note This function uses sequential search
* @param CDF The CDF
* @param lengthCDF The length of CDF
* @param value The value to be found
* @return The index of value in the CDF; if value is never found, returns the last index
*/
int findIndex(double * CDF, int lengthCDF, double value){
int index = -1;
int x;
for(x = 0; x < lengthCDF; x++){
if(CDF[x] >= value){
index = x;
break;
}
}
if(index == -1){
return lengthCDF-1;
}
return index;
}
/**
* The implementation of the particle filter using OpenMP for many frames
* @see http://openmp.org/wp/
* @note This function is designed to work with a video of several frames. In addition, it references a provided MATLAB function which takes the video, the objxy matrix and the x and y arrays as arguments and returns the likelihoods
* @param I The video to be run
* @param IszX The x dimension of the video
* @param IszY The y dimension of the video
* @param Nfr The number of frames
* @param seed The seed array used for random number generation
* @param Nparticles The number of particles to be used
*/
void particleFilter(int * I, int IszX, int IszY, int Nfr, int * seed, int Nparticles){
int max_size = IszX*IszY*Nfr;
long long start = get_time();
//original particle centroid
double xe = roundDouble(IszY/2.0);
double ye = roundDouble(IszX/2.0);
//expected object locations, compared to center
int radius = 5;
int diameter = radius*2 - 1;
int * disk = (int *)malloc(diameter*diameter*sizeof(int));
strelDisk(disk, radius);
int countOnes = 0;
int x, y;
for(x = 0; x < diameter; x++){
for(y = 0; y < diameter; y++){
if(disk[x*diameter + y] == 1)
countOnes++;
}
}
double * objxy = (double *)malloc(countOnes*2*sizeof(double));
getneighbors(disk, countOnes, objxy, radius);
long long get_neighbors = get_time();
printf("TIME TO GET NEIGHBORS TOOK: %f\n", elapsed_time(start, get_neighbors));
//initial weights are all equal (1/Nparticles)
double * weights = (double *)malloc(sizeof(double)*Nparticles);
for(x = 0; x < Nparticles; x++){
weights[x] = 1/((double)(Nparticles));
}
long long get_weights = get_time();
printf("TIME TO GET WEIGHTSTOOK: %f\n", elapsed_time(get_neighbors, get_weights));
//initial likelihood to 0.0
double * likelihood = (double *)malloc(sizeof(double)*Nparticles);
double * arrayX = (double *)malloc(sizeof(double)*Nparticles);
double * arrayY = (double *)malloc(sizeof(double)*Nparticles);
double * xj = (double *)malloc(sizeof(double)*Nparticles);
double * yj = (double *)malloc(sizeof(double)*Nparticles);
double * CDF = (double *)malloc(sizeof(double)*Nparticles);
//GPU copies of arrays
double * arrayX_GPU;
double * arrayY_GPU;
double * xj_GPU;
double * yj_GPU;
double * CDF_GPU;
int * ind = (int*)malloc(sizeof(int)*countOnes);
double * u = (double *)malloc(sizeof(double)*Nparticles);
double * u_GPU;
//CUDA memory allocation
check_error(cudaMalloc((void **) &arrayX_GPU, sizeof(double)*Nparticles));
check_error(cudaMalloc((void **) &arrayY_GPU, sizeof(double)*Nparticles));
check_error(cudaMalloc((void **) &xj_GPU, sizeof(double)*Nparticles));
check_error(cudaMalloc((void **) &yj_GPU, sizeof(double)*Nparticles));
check_error(cudaMalloc((void **) &CDF_GPU, sizeof(double)*Nparticles));
check_error(cudaMalloc((void **) &u_GPU, sizeof(double)*Nparticles));
for(x = 0; x < Nparticles; x++){
arrayX[x] = xe;
arrayY[x] = ye;
}
int k;
//double * Ik = (double *)malloc(sizeof(double)*IszX*IszY);
int indX, indY;
for(k = 1; k < Nfr; k++){
long long set_arrays = get_time();
//printf("TIME TO SET ARRAYS TOOK: %f\n", elapsed_time(get_weights, set_arrays));
//apply motion model
//draws sample from motion model (random walk). The only prior information
//is that the object moves 2x as fast as in the y direction
for(x = 0; x < Nparticles; x++){
arrayX[x] = arrayX[x] + 1.0 + 5.0*randn(seed, x);
arrayY[x] = arrayY[x] - 2.0 + 2.0*randn(seed, x);
}
//particle filter likelihood
long long error = get_time();
printf("TIME TO SET ERROR TOOK: %f\n", elapsed_time(set_arrays, error));
for(x = 0; x < Nparticles; x++){
//compute the likelihood: remember our assumption is that you know
// foreground and the background image intensity distribution.
// Notice that we consider here a likelihood ratio, instead of
// p(z|x). It is possible in this case. why? a hometask for you.
//calc ind
for(y = 0; y < countOnes; y++){
indX = roundDouble(arrayX[x]) + objxy[y*2 + 1];
indY = roundDouble(arrayY[x]) + objxy[y*2];
ind[y] = fabs(indX*IszY*Nfr + indY*Nfr + k);
if(ind[y] >= max_size)
ind[y] = 0;
}
likelihood[x] = calcLikelihoodSum(I, ind, countOnes);
likelihood[x] = likelihood[x]/countOnes;
}
long long likelihood_time = get_time();
printf("TIME TO GET LIKELIHOODS TOOK: %f\n", elapsed_time(error, likelihood_time));
// update & normalize weights
// using equation (63) of Arulampalam Tutorial
for(x = 0; x < Nparticles; x++){
weights[x] = weights[x] * exp(likelihood[x]);
}
long long exponential = get_time();
printf("TIME TO GET EXP TOOK: %f\n", elapsed_time(likelihood_time, exponential));
double sumWeights = 0;
for(x = 0; x < Nparticles; x++){
sumWeights += weights[x];
}
long long sum_time = get_time();
printf("TIME TO SUM WEIGHTS TOOK: %f\n", elapsed_time(exponential, sum_time));
for(x = 0; x < Nparticles; x++){
weights[x] = weights[x]/sumWeights;
}
long long normalize = get_time();
printf("TIME TO NORMALIZE WEIGHTS TOOK: %f\n", elapsed_time(sum_time, normalize));
xe = 0;
ye = 0;
// estimate the object location by expected values
for(x = 0; x < Nparticles; x++){
xe += arrayX[x] * weights[x];
ye += arrayY[x] * weights[x];
}
long long move_time = get_time();
printf("TIME TO MOVE OBJECT TOOK: %f\n", elapsed_time(normalize, move_time));
printf("XE: %lf\n", xe);
printf("YE: %lf\n", ye);
double distance = sqrt( pow((double)(xe-(int)roundDouble(IszY/2.0)),2) + pow((double)(ye-(int)roundDouble(IszX/2.0)),2) );
printf("%lf\n", distance);
//display(hold off for now)
//pause(hold off for now)
//resampling
CDF[0] = weights[0];
for(x = 1; x < Nparticles; x++){
CDF[x] = weights[x] + CDF[x-1];
}
long long cum_sum = get_time();
printf("TIME TO CALC CUM SUM TOOK: %f\n", elapsed_time(move_time, cum_sum));
double u1 = (1/((double)(Nparticles)))*randu(seed, 0);
for(x = 0; x < Nparticles; x++){
u[x] = u1 + x/((double)(Nparticles));
}
long long u_time = get_time();
printf("TIME TO CALC U TOOK: %f\n", elapsed_time(cum_sum, u_time));
long long start_copy = get_time();
//CUDA memory copying from CPU memory to GPU memory
cudaMemcpy(arrayX_GPU, arrayX, sizeof(double)*Nparticles, cudaMemcpyHostToDevice);
cudaMemcpy(arrayY_GPU, arrayY, sizeof(double)*Nparticles, cudaMemcpyHostToDevice);
cudaMemcpy(xj_GPU, xj, sizeof(double)*Nparticles, cudaMemcpyHostToDevice);
cudaMemcpy(yj_GPU, yj, sizeof(double)*Nparticles, cudaMemcpyHostToDevice);
cudaMemcpy(CDF_GPU, CDF, sizeof(double)*Nparticles, cudaMemcpyHostToDevice);
cudaMemcpy(u_GPU, u, sizeof(double)*Nparticles, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(c_CDF,CDF,sizeof(double)*Nparticles);
long long end_copy = get_time();
//Set number of threads
int num_blocks = ceil((double) Nparticles/(double) threads_per_block);
//KERNEL FUNCTION CALL
kernel <<< num_blocks, threads_per_block >>> (arrayX_GPU, arrayY_GPU, CDF_GPU, u_GPU, xj_GPU, yj_GPU, Nparticles);
cudaThreadSynchronize();
long long start_copy_back = get_time();
//CUDA memory copying back from GPU to CPU memory
cudaMemcpy(yj, yj_GPU, sizeof(double)*Nparticles, cudaMemcpyDeviceToHost);
cudaMemcpy(xj, xj_GPU, sizeof(double)*Nparticles, cudaMemcpyDeviceToHost);
long long end_copy_back = get_time();
printf("SENDING TO GPU TOOK: %lf\n", elapsed_time(start_copy, end_copy));
printf("CUDA EXEC TOOK: %lf\n", elapsed_time(end_copy, start_copy_back));
printf("SENDING BACK FROM GPU TOOK: %lf\n", elapsed_time(start_copy_back, end_copy_back));
long long xyj_time = get_time();
printf("TIME TO CALC NEW ARRAY X AND Y TOOK: %f\n", elapsed_time(u_time, xyj_time));
for(x = 0; x < Nparticles; x++){
//reassign arrayX and arrayY
arrayX[x] = xj[x];
arrayY[x] = yj[x];
weights[x] = 1/((double)(Nparticles));
}
long long reset = get_time();
printf("TIME TO RESET WEIGHTS TOOK: %f\n", elapsed_time(xyj_time, reset));
}
//CUDA freeing of memory
cudaFree(u_GPU);
cudaFree(CDF_GPU);
cudaFree(yj_GPU);
cudaFree(xj_GPU);
cudaFree(arrayY_GPU);
cudaFree(arrayX_GPU);
//free memory
free(disk);
free(objxy);
free(weights);
free(likelihood);
free(arrayX);
free(arrayY);
free(xj);
free(yj);
free(CDF);
free(u);
free(ind);
}
int main(int argc, char * argv[]){
char* usage = "naive.out -x <dimX> -y <dimY> -z <Nfr> -np <Nparticles>";
//check number of arguments
if(argc != 9)
{
printf("%s\n", usage);
return 0;
}
//check args deliminators
if( strcmp( argv[1], "-x" ) || strcmp( argv[3], "-y" ) || strcmp( argv[5], "-z" ) || strcmp( argv[7], "-np" ) ) {
printf( "%s\n",usage );
return 0;
}
int IszX, IszY, Nfr, Nparticles;
//converting a string to a integer
if( sscanf( argv[2], "%d", &IszX ) == EOF ) {
printf("ERROR: dimX input is incorrect");
return 0;
}
if( IszX <= 0 ) {
printf("dimX must be > 0\n");
return 0;
}
//converting a string to a integer
if( sscanf( argv[4], "%d", &IszY ) == EOF ) {
printf("ERROR: dimY input is incorrect");
return 0;
}
if( IszY <= 0 ) {
printf("dimY must be > 0\n");
return 0;
}
//converting a string to a integer
if( sscanf( argv[6], "%d", &Nfr ) == EOF ) {
printf("ERROR: Number of frames input is incorrect");
return 0;
}
if( Nfr <= 0 ) {
printf("number of frames must be > 0\n");
return 0;
}
//converting a string to a integer
if( sscanf( argv[8], "%d", &Nparticles ) == EOF ) {
printf("ERROR: Number of particles input is incorrect");
return 0;
}
if( Nparticles <= 0 ) {
printf("Number of particles must be > 0\n");
return 0;
}
//establish seed
int * seed = (int *)malloc(sizeof(int)*Nparticles);
int i;
for(i = 0; i < Nparticles; i++)
seed[i] = time(0)*i;
//malloc matrix
int * I = (int *)malloc(sizeof(int)*IszX*IszY*Nfr);
long long start = get_time();
//call video sequence
videoSequence(I, IszX, IszY, Nfr, seed);
long long endVideoSequence = get_time();
printf("VIDEO SEQUENCE TOOK %f\n", elapsed_time(start, endVideoSequence));
//call particle filter
particleFilter(I, IszX, IszY, Nfr, seed, Nparticles);
long long endParticleFilter = get_time();
printf("PARTICLE FILTER TOOK %f\n", elapsed_time(endVideoSequence, endParticleFilter));
printf("ENTIRE PROGRAM TOOK %f\n", elapsed_time(start, endParticleFilter));
free(seed);
free(I);
return 0;
}
|
979fc0ae91200737e1f04579955bdfa45d4ce6f8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <cassert>
#include <cstdlib>
#include <functional>
#include <iostream>
#include <vector>
#include <chrono>
using namespace std::chrono;
using std::cout;
using std::generate;
using std::vector;
__global__ void matrixMul(const int *a, const int *b, int *c, int N) {
// Compute each thread's global row and column index
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
// Iterate over row, and down column
c[row * N + col] = 0;
for (int k = 0; k < N; k++) {
// Accumulate results for a single element
c[row * N + col] += a[row * N + k] * b[k * N + col];
}
}
// Check result on the CPU
void verify_result(vector<int> &a, vector<int> &b, vector<int> &c, int N) {
// For every row...
for (int i = 0; i < N; i++) {
// For every column...
for (int j = 0; j < N; j++) {
// For every element in the row-column pair
int tmp = 0;
for (int k = 0; k < N; k++) {
// Accumulate the partial results
tmp += a[i * N + k] * b[k * N + j];
}
// Check against the CPU result
assert(tmp == c[i * N + j]);
}
}
}
int main() {
auto main_start = high_resolution_clock::now();
int N = 1 << 13;
// Size (in bytes) of matrix
size_t bytes = N * N * sizeof(int);
auto start = high_resolution_clock::now();
// Host vectors
vector<int> h_a(N * N);
vector<int> h_b(N * N);
vector<int> h_c(N * N);
// Initialize matrices
generate(h_a.begin(), h_a.end(), []() { return rand() % 100; });
generate(h_b.begin(), h_b.end(), []() { return rand() % 100; });
// Allocate device memory
int *d_a, *d_b, *d_c;
hipMalloc(&d_a, bytes);
hipMalloc(&d_b, bytes);
hipMalloc(&d_c, bytes);
// Copy data to the device
hipMemcpy(d_a, h_a.data(), bytes, hipMemcpyHostToDevice);
hipMemcpy(d_b, h_b.data(), bytes, hipMemcpyHostToDevice);
auto stop = high_resolution_clock::now();
auto duration = duration_cast<microseconds>(stop - start);
cout << "Data transfer time from CPU - GPU : " << duration.count() << "\n";
// Threads per CTA dimension
int THREADS = 32;
// Blocks per grid dimension (assumes THREADS divides N evenly)
int BLOCKS = N / THREADS;
// Use dim3 structs for block and grid dimensions
dim3 threads(THREADS, THREADS);
dim3 blocks(BLOCKS, BLOCKS);
// Launch kernel
start = high_resolution_clock::now();
hipLaunchKernelGGL(( matrixMul), dim3(blocks), dim3(threads), 0, 0, d_a, d_b, d_c, N);
stop = high_resolution_clock::now();
duration = duration_cast<microseconds>(stop - start);
cout << "cuda kernel time from CPU - GPU : " << duration.count() << "\n";
// Copy back to the host
start = high_resolution_clock::now();
hipMemcpy(h_c.data(), d_c, bytes, hipMemcpyDeviceToHost);
stop = high_resolution_clock::now();
duration = duration_cast<microseconds>(stop - start);
cout << "Data transfer time from GPU - CPU : " << duration.count() << "\n";
// Check result
// verify_result(h_a, h_b, h_c, N);
// Free memory on device
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
auto main_stop = high_resolution_clock::now();
duration = duration_cast<microseconds>(main_stop - main_start);
cout << "Total time : " << duration.count() << "\n";
cout << "COMPLETED SUCCESSFULLY\n";
return 0;
} | 979fc0ae91200737e1f04579955bdfa45d4ce6f8.cu | #include <algorithm>
#include <cassert>
#include <cstdlib>
#include <functional>
#include <iostream>
#include <vector>
#include <chrono>
using namespace std::chrono;
using std::cout;
using std::generate;
using std::vector;
__global__ void matrixMul(const int *a, const int *b, int *c, int N) {
// Compute each thread's global row and column index
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
// Iterate over row, and down column
c[row * N + col] = 0;
for (int k = 0; k < N; k++) {
// Accumulate results for a single element
c[row * N + col] += a[row * N + k] * b[k * N + col];
}
}
// Check result on the CPU
void verify_result(vector<int> &a, vector<int> &b, vector<int> &c, int N) {
// For every row...
for (int i = 0; i < N; i++) {
// For every column...
for (int j = 0; j < N; j++) {
// For every element in the row-column pair
int tmp = 0;
for (int k = 0; k < N; k++) {
// Accumulate the partial results
tmp += a[i * N + k] * b[k * N + j];
}
// Check against the CPU result
assert(tmp == c[i * N + j]);
}
}
}
int main() {
auto main_start = high_resolution_clock::now();
int N = 1 << 13;
// Size (in bytes) of matrix
size_t bytes = N * N * sizeof(int);
auto start = high_resolution_clock::now();
// Host vectors
vector<int> h_a(N * N);
vector<int> h_b(N * N);
vector<int> h_c(N * N);
// Initialize matrices
generate(h_a.begin(), h_a.end(), []() { return rand() % 100; });
generate(h_b.begin(), h_b.end(), []() { return rand() % 100; });
// Allocate device memory
int *d_a, *d_b, *d_c;
cudaMalloc(&d_a, bytes);
cudaMalloc(&d_b, bytes);
cudaMalloc(&d_c, bytes);
// Copy data to the device
cudaMemcpy(d_a, h_a.data(), bytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b.data(), bytes, cudaMemcpyHostToDevice);
auto stop = high_resolution_clock::now();
auto duration = duration_cast<microseconds>(stop - start);
cout << "Data transfer time from CPU - GPU : " << duration.count() << "\n";
// Threads per CTA dimension
int THREADS = 32;
// Blocks per grid dimension (assumes THREADS divides N evenly)
int BLOCKS = N / THREADS;
// Use dim3 structs for block and grid dimensions
dim3 threads(THREADS, THREADS);
dim3 blocks(BLOCKS, BLOCKS);
// Launch kernel
start = high_resolution_clock::now();
matrixMul<<<blocks, threads>>>(d_a, d_b, d_c, N);
stop = high_resolution_clock::now();
duration = duration_cast<microseconds>(stop - start);
cout << "cuda kernel time from CPU - GPU : " << duration.count() << "\n";
// Copy back to the host
start = high_resolution_clock::now();
cudaMemcpy(h_c.data(), d_c, bytes, cudaMemcpyDeviceToHost);
stop = high_resolution_clock::now();
duration = duration_cast<microseconds>(stop - start);
cout << "Data transfer time from GPU - CPU : " << duration.count() << "\n";
// Check result
// verify_result(h_a, h_b, h_c, N);
// Free memory on device
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
auto main_stop = high_resolution_clock::now();
duration = duration_cast<microseconds>(main_stop - main_start);
cout << "Total time : " << duration.count() << "\n";
cout << "COMPLETED SUCCESSFULLY\n";
return 0;
} |
96bb2df05aaf66fdd1c22b4990f6432a8730f564.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdint.h>
#include <stdio.h>
#include <assert.h>
#include <pthread.h>
#include <string>
#include <fstream>
#include <vector>
#include <map>
#include <algorithm>
#include <iostream>
#include <sstream>
#include <iterator>
#include <signal.h>
#include <unistd.h>
#include <unordered_set>
#include <random>
#include "nvbit_tool.h"
#include "nvbit.h"
#include "utils/utils.h"
#include "globals.h"
#include "injector.h"
int verbose;
__managed__ int verbose_device;
// injection parameters input filename: This file is created the the script
// that launched error injections
std::string injInputFilename = "nvbitfi-injection-info.txt";
pthread_mutex_t mutex;
__managed__ inj_info_t inj_info;
void reset_inj_info() {
inj_info.areParamsReady = false;
inj_info.kernelName[0] = '\0';
inj_info.kernelCount = -1;
inj_info.groupID = 0; // arch state id
inj_info.instID = 0; // instruction id
inj_info.opIDSeed = 0; // destination id seed (float, 0-1)
inj_info.bitIDSeed = 0; // bit location seed (float, 0-1)
inj_info.bitFlipModel = 0; // fault model: single bit flip, all bit flip, random value
inj_info.mask = 0;
inj_info.beforeVal = 0;
inj_info.afterVal = 0;
inj_info.regNo = -1;
inj_info.opcode = NOP;
inj_info.pcOffset = 0;
inj_info.tid = -1;
inj_info.errorInjected = false;
for (int i = 0; i < NUM_DEBUG_VALS; i++) {
inj_info.debug[i] = -1;
}
}
void write_inj_info() {
assert(fout.good());
for (int i = 0; i < NUM_INST_GROUPS; i++) {
fout << " grp " << i << ": " << counters[NUM_ISA_INSTRUCTIONS + i];
}
fout << std::endl;
fout << "mask: 0x" << std::hex << inj_info.mask << std::endl;
fout << "beforeVal: 0x" << inj_info.beforeVal << ";";
fout << "afterVal: 0x" << inj_info.afterVal << std::endl;
fout << "regNo: " << std::dec << inj_info.regNo << std::endl;
fout << "opcode: " << instTypeNames[inj_info.opcode] << std::endl;
fout << "pcOffset: 0x" << std::hex << inj_info.pcOffset << std::endl;
fout << "tid: " << std::dec << inj_info.tid << std::endl;
}
// for debugging
void print_inj_info() {
assert(fout.good());
fout << "kernelName=" << inj_info.kernelName << std::endl;
fout << "kernelCount=" << inj_info.kernelCount << std::endl;
fout << "groupID=" << inj_info.groupID << std::endl;
fout << "bitFlipModel=" << inj_info.bitFlipModel << std::endl;
fout << "instID=" << inj_info.instID << std::endl;
fout << "opIDSeed=" << inj_info.opIDSeed << std::endl;
fout << "bitIDSeed=" << inj_info.bitIDSeed << std::endl;
}
// Parse error injection site info from a file. This should be done on host side.
void parse_params(std::string filename) {
static bool parse_flag = false; // file will be parsed only once - performance enhancement
if (!parse_flag) {
parse_flag = true;
reset_inj_info();
std::ifstream ifs(filename.c_str(), std::ifstream::in);
if (ifs.is_open()) {
ifs >> inj_info.groupID; // arch state id
assert(inj_info.groupID >= 0 && inj_info.groupID < NUM_INST_GROUPS); // ensure that the value is in the expected range
ifs >> inj_info.bitFlipModel; // fault model: single bit flip, all bit flip, random value
assert(inj_info.bitFlipModel < NUM_BFM_TYPES); // ensure that the value is in the expected range
ifs >> inj_info.kernelName;
ifs >> inj_info.kernelCount;
ifs >> inj_info.instID; // instruction id
ifs >> inj_info.opIDSeed; // destination id seed (float, 0-1 for inst injections and 0-256 for reg)
assert(inj_info.opIDSeed >= 0 && inj_info.opIDSeed < 1.01); // ensure that the value is in the expected range
ifs >> inj_info.bitIDSeed; // bit location seed (float, 0-1)
assert(inj_info.bitIDSeed >= 0 && inj_info.bitIDSeed < 1.01); // ensure that the value is in the expected range
} else {
printf(" File %s does not exist!", filename.c_str());
printf(
" This file should contain enough information about the fault site to perform an error injection run: ");
printf(
"(1) arch state id, (2) bit flip model, (3) kernel name, (4) kernel count, (5) instruction id, (6) seed to select destination id, (7) sed to select bit location.\n");
assert(false);
}
ifs.close();
if (verbose)
print_inj_info();
}
}
int get_maxregs(hipFunction_t func) {
int maxregs = -1;
hipFuncGetAttribute(&maxregs, hipFuncAttributeNumRegs, func);
return maxregs;
}
// custom signal handler such that we don't miss the injection information.
void INThandler(int sig) {
signal(sig, SIG_IGN); // disable Ctrl-C
fout << "ERROR FAIL Detected Signal SIGKILL\n";
write_inj_info();
exit(-1);
}
/* nvbit_at_init() is executed as soon as the nvbit tool is loaded. We typically
* do initializations in this call. In this case for instance we get some
* environment variables values which we use as input arguments to the tool */
// DO NOT USE UVM (__managed__) variables in this function
void nvbit_at_init() {
/* just make sure all managed variables are allocated on GPU */
setenv("CUDA_MANAGED_FORCE_DEVICE_ALLOC", "1", 1);
/* we get some environment variables that are going to be use to selectively
* instrument (within a interval of kernel indexes and instructions). By
* default we instrument everything. */
if (getenv("TOOL_VERBOSE")) {
verbose = atoi(getenv("TOOL_VERBOSE"));
} else {
verbose = 0;
}
// GET_VAR_INT(verbose, "TOOL_VERBOSE", 0, "Enable verbosity inside the tool (1, 2, 3,..)");
initInstTypeNameMap();
signal(SIGINT, INThandler); // install Ctrl-C handler
open_output_file(injOutputFilename);
if (verbose)
printf("nvbit_at_init:end\n");
}
/**
* Function to generate a real uniform random value
*/
void generate_new_random_value() {
if (inj_info.bitFlipModel == FLEXGRIP_RELATIVE_FU || inj_info.bitFlipModel == FLEXGRIP_RELATIVE_PIPELINE) {
std::random_device rd; //Will be used to obtain a seed for the random number engine
std::mt19937 gen(rd()); //Standard mersenne_twister_engine seeded with rd()
std::uniform_real_distribution<float> dis(0.0f, 1.0f);
inj_info.opIDSeed = dis(gen);
}
}
/* Set used to avoid re-instrumenting the same functions multiple times */
std::unordered_set<hipFunction_t> already_instrumented;
void instrument_function_if_needed(hipCtx_t ctx, hipFunction_t func) {
parse_params(injInputFilename.c_str()); // injParams are updated based on injection seed file
hipDeviceSynchronize();
verbose_device = verbose;
hipDeviceSynchronize();
/* Get related functions of the kernel (device function that can be
* called by the kernel) */
std::vector<hipFunction_t> related_functions = nvbit_get_related_functions(ctx,
func);
/* add kernel itself to the related function vector */
related_functions.push_back(func);
/* iterate on function */
for (auto f : related_functions) {
/* "recording" function was instrumented, if set insertion failed
* we have already encountered this function */
if (!already_instrumented.insert(f).second) {
continue;
}
std::string kname = removeSpaces(nvbit_get_func_name(ctx, f));
if (strcmp(inj_info.kernelName, kname.c_str()) == 0) { // this is the kernel selected for injection
assert(fout.good()); // ensure that the log file is good.
/* Get the vector of instruction composing the loaded CUFunction "f" */
const std::vector<Instr *> &instrs = nvbit_get_instrs(ctx, f);
/* If verbose we print function name and number of" static" instructions
*/
if (verbose) {
printf("inspecting %s - num instrs %ld\n",
nvbit_get_func_name(ctx, f), instrs.size());
}
int maxregs = get_maxregs(f);
fout << "inspecting: " << kname << "\nnum_static_instrs: "
<< instrs.size() << "\nmaxregs: " << maxregs << "("
<< maxregs << ")" << std::endl;
/* We iterate on the vector of instruction */
for (auto i : instrs) {
std::string opcode = i->getOpcode();
std::string instType = extractInstType(opcode);
// printf("extracted instType: %s\n", instType.c_str());
// printf("index of instType: %d\n", instTypeNameMap[instType]);
// Tokenize the instruction
std::vector<std::string> tokens;
std::string buf; // a buffer string
std::stringstream ss(i->getSass()); // Insert the string into a stream
while (ss >> buf)
tokens.push_back(buf);
int destGPRNum = -1;
int numDestGPRs = 0;
int destPRNum1 = -1;
int destPRNum2 = -1;
int instGrpNum = getOpGroupNum(instTypeNameMap[instType]);
;
if (tokens.size() > 0 && instGrpNum != G_NODEST) { // an actual instruction that writes to either a GPR or PR register
if (verbose)
printf("num tokens = %ld ", tokens.size());
int start = 1; // first token is opcode string
if (tokens[0].find('@') != std::string::npos) { // predicated instruction, ignore first token
start = 2; // first token is predicate and 2nd token is opcode
}
// Parse the first operand - this is the first destination
int regnum1 = -1;
int regnum2 = -1;
int regtype = extractRegNo(tokens[start], regnum1);
if (regtype == 0) { // GPR reg
destGPRNum = regnum1;
numDestGPRs = (instGrpNum == G_FP64) ? 2 : 1;
int sz = extractSize(opcode);
if (sz != 0) { // for LD, IMMA, HMMA
numDestGPRs = sz / 32;
}
int regtype2 = extractRegNo(tokens[start + 1], regnum2);
// the following is probably not possible in Pascal ISA
if (regtype2 == 1) { // PR reg, it looks like this instruction has two destination registers
destPRNum1 = regnum2;
}
}
if (regtype == 1) {
destPRNum1 = regnum1;
if (instGrpNum != G_PR) { // this is not a PR-only instruction.
int regtype2 = extractRegNo(tokens[start + 1],
regnum2);
if (regtype2 == 0) { // a GPR reg, it looks like this instruction has two destination registers
destGPRNum = regnum2;
numDestGPRs = (instGrpNum == G_FP64) ? 2 : 1;
}
} else { // check if the 2nd reg is a PR dest
if (tokens.size() > 5) { // this seems like the instruction that has 2 PR destinations
int regtype2 = extractRegNo(tokens[start + 1],
regnum2);
if (regtype2 == 1) { // a PR reg, it looks like this instruction has two destination registers
destPRNum2 = regnum2;
}
}
}
}
if (verbose)
printf(
"offset = 0x%x, opcode_info=%d, instType=%s, opcode=%s, numDestGPRs=%d, destGPRNum=%d, destPRNum1=%d, destPRNum2=%d, instruction: %s\n",
i->getOffset(), instTypeNameMap[instType],
instType.c_str(), opcode.c_str(), numDestGPRs,
destGPRNum, destPRNum1, destPRNum2,
i->getSass());
}
//Power law error model
generate_new_random_value();
nvbit_insert_call(i, "inject_error", IPOINT_AFTER);
nvbit_add_call_arg_const_val64(i, (uint64_t) &inj_info);
nvbit_add_call_arg_const_val64(i, (uint64_t) counters);
nvbit_add_call_arg_const_val64(i, (uint64_t) &verbose_device);
nvbit_add_call_arg_const_val32(i, i->getOffset()); // offset (for pc) info
nvbit_add_call_arg_const_val32(i, instTypeNameMap[instType]); // opcode info
nvbit_add_call_arg_const_val32(i, instGrpNum); // instruction group info
nvbit_add_call_arg_const_val32(i, destGPRNum); // destination GPR register number
if (destGPRNum != -1) {
nvbit_add_call_arg_reg_val(i, destGPRNum); // destination GPR register val
} else {
nvbit_add_call_arg_const_val32(i, (unsigned int) -1); // destination GPR register val
}
nvbit_add_call_arg_const_val32(i, numDestGPRs); // number of destination GPR registers
if (isGPInst(instGrpNum) && inj_info.groupID == G_GP) { // PR register numbers should be -1, if the injection model is G_GP. This way we will never inject errors into them
nvbit_add_call_arg_const_val32(i, (unsigned int) -1); // first destination PR register number
nvbit_add_call_arg_const_val32(i, (unsigned int) -1); // second destination PR register number
} else {
nvbit_add_call_arg_const_val32(i, destPRNum1); // first destination PR register number
nvbit_add_call_arg_const_val32(i, destPRNum2); // second destination PR register number
}
nvbit_add_call_arg_const_val32(i, maxregs); // max regs used by the inst info
}
} else {
const std::vector<Instr *> &instrs = nvbit_get_instrs(ctx, f);
if (verbose)
printf(
":::NVBit-inject-error; NOT inspecting: %s; %d, %d, num_static_instrs: %ld; maxregs: %d:::",
kname.c_str(), kernel_id, inj_info.kernelCount,
instrs.size(), get_maxregs(f));
}
}
}
/* This call-back is triggered every time a CUDA event is encountered.
* Here, we identify CUDA kernel launch events and reset the "counter" before
* th kernel is launched, and print the counter after the kernel has completed
* (we make sure it has completed by using hipDeviceSynchronize()). To
* selectively run either the original or instrumented kernel we used
* nvbit_enable_instrumented() before launching the kernel. */
void nvbit_at_cuda_event(hipCtx_t ctx, int is_exit, nvbit_api_cuda_t cbid,
const char *name, void *params, hipError_t *pStatus) {
/* Identify all the possible CUDA launch events */
if (cbid == API_CUDA_cuLaunch || cbid == API_CUDA_cuLaunchKernel_ptsz
|| cbid == API_CUDA_cuLaunchGrid
|| cbid == API_CUDA_cuLaunchGridAsync
|| cbid == API_CUDA_cuLaunchKernel) {
/* cast params to cuLaunch_params since if we are here we know these are
* the right parameters type */
cuLaunch_params * p = (cuLaunch_params *) params;
if (!is_exit) {
pthread_mutex_lock(&mutex);
instrument_function_if_needed(ctx, p->f);
init_counters();
hipDeviceSynchronize();
parse_params(injInputFilename); // injParams are updated based on injection seed file
// print_inj_info();
inj_info.errorInjected = false;
inj_info.areParamsReady = (inj_info.kernelCount == kernel_id); // areParamsReady = true for the selected kernel
if (verbose)
inj_info.debug[NUM_DEBUG_VALS - 1] = -1; // set debug flag to check whether the the instrumented kernel was executed
if (verbose)
printf(
"setting areParamsReady=%d, inj_info.kernelCount=%d, kernel_id=%d\n",
inj_info.areParamsReady, inj_info.kernelCount,
kernel_id);
hipDeviceSynchronize();
nvbit_enable_instrumented(ctx, p->f, inj_info.areParamsReady); // should we run the un-instrumented code?
// nvbit_enable_instrumented(ctx, p->f, false); // for debugging
hipDeviceSynchronize();
} else {
if (verbose)
printf("is_exit\n");
hipDeviceSynchronize();
hipError_t le = hipGetLastError();
if (hipSuccess != le) {
assert(fout.good());
std::cout << "ERROR FAIL in kernel execution ("
<< hipGetErrorString(le) << "); ";
fout << "ERROR FAIL in kernel execution ("
<< hipGetErrorString(le) << "); ";
fout.flush();
exit(1); // let's exit early because no error was injected
}
std::string kname = removeSpaces(nvbit_get_func_name(ctx, p->f));
if (inj_info.areParamsReady) {
inj_info.areParamsReady = false;
int num_ctas = 0;
if (cbid == API_CUDA_cuLaunchKernel_ptsz
|| cbid == API_CUDA_cuLaunchKernel) {
cuLaunchKernel_params * p2 = (cuLaunchKernel_params*) params;
num_ctas = p2->gridDimX * p2->gridDimY * p2->gridDimZ;
}
assert(fout.good());
fout << "Injection data" << std::endl;
fout << "index: " << kernel_id << std::endl;
fout << "kernel_name: " << kname << std::endl;
fout << "ctas: " << num_ctas << std::endl;
fout << "instrs: " << get_inst_count() << std::endl;
write_inj_info();
if (inj_info.opcode == NOP) {
fout << "Error not injected\n";
}
if (verbose != 0 && inj_info.debug[2] != inj_info.debug[3]) { // sanity check
fout
<< "ERROR FAIL in kernel execution; Expected reg value doesn't match; \n";
fout << "maxRegs: " << inj_info.debug[0] << ", destGPRNum: "
<< inj_info.debug[1] << ", expected_val: "
<< std::hex << inj_info.debug[2] << ", myval: "
<< inj_info.debug[3] << std::dec << "\n";
fout << std::endl;
std::cout
<< "NVBit-inject-error; ERROR FAIL in kernel execution; Expected reg value doesn't match; \n";
std::cout << "maxRegs: " << inj_info.debug[0]
<< ", destGPRNum: " << inj_info.debug[1]
<< ", expected_val: " << std::hex
<< inj_info.debug[2] << ", myval: "
<< inj_info.debug[3] << std::dec << "\n";
for (int x = 4; x < 10; x++) {
std::cout << "debug[" << x << "]: " << std::hex
<< inj_info.debug[x] << "\n";
}
std::cout << "debug[11]: " << std::hex << inj_info.debug[11]
<< "\n";
std::cout << "debug[12]: " << inj_info.debug[12] << " "
<< instTypeNames[inj_info.debug[12]] << "\n";
std::cout << "debug[13]: " << inj_info.debug[13] << "\n";
std::cout << "debug[14]: " << std::hex << inj_info.debug[14]
<< "\n";
assert(inj_info.debug[2] == inj_info.debug[3]);
// printf("\nmaxRegs: %d, destGPRNum: %d, expected_val: %x, myval: %x, myval@-1: %x, myval@+1: %x, myval with maxRegs+1: %x, myval with maxRegs-1: %x\n",
// inj_info.debug[0], inj_info.debug[1], inj_info.debug[2], inj_info.debug[3], inj_info.debug[4], inj_info.debug[5], inj_info.debug[6], inj_info.debug[7]);
}
fout.flush();
}
if (verbose)
printf(
"\n index: %d; kernel_name: %s; used_instrumented=%d; \n",
kernel_id, kname.c_str(),
inj_info.debug[NUM_DEBUG_VALS - 1]);
kernel_id++; // always increment kernel_id on kernel exit
hipDeviceSynchronize();
pthread_mutex_unlock(&mutex);
}
}
}
void nvbit_at_term() {
} // nothing to do here.
| 96bb2df05aaf66fdd1c22b4990f6432a8730f564.cu | /*
* Copyright 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdint.h>
#include <stdio.h>
#include <assert.h>
#include <pthread.h>
#include <string>
#include <fstream>
#include <vector>
#include <map>
#include <algorithm>
#include <iostream>
#include <sstream>
#include <iterator>
#include <signal.h>
#include <unistd.h>
#include <unordered_set>
#include <random>
#include "nvbit_tool.h"
#include "nvbit.h"
#include "utils/utils.h"
#include "globals.h"
#include "injector.h"
int verbose;
__managed__ int verbose_device;
// injection parameters input filename: This file is created the the script
// that launched error injections
std::string injInputFilename = "nvbitfi-injection-info.txt";
pthread_mutex_t mutex;
__managed__ inj_info_t inj_info;
void reset_inj_info() {
inj_info.areParamsReady = false;
inj_info.kernelName[0] = '\0';
inj_info.kernelCount = -1;
inj_info.groupID = 0; // arch state id
inj_info.instID = 0; // instruction id
inj_info.opIDSeed = 0; // destination id seed (float, 0-1)
inj_info.bitIDSeed = 0; // bit location seed (float, 0-1)
inj_info.bitFlipModel = 0; // fault model: single bit flip, all bit flip, random value
inj_info.mask = 0;
inj_info.beforeVal = 0;
inj_info.afterVal = 0;
inj_info.regNo = -1;
inj_info.opcode = NOP;
inj_info.pcOffset = 0;
inj_info.tid = -1;
inj_info.errorInjected = false;
for (int i = 0; i < NUM_DEBUG_VALS; i++) {
inj_info.debug[i] = -1;
}
}
void write_inj_info() {
assert(fout.good());
for (int i = 0; i < NUM_INST_GROUPS; i++) {
fout << " grp " << i << ": " << counters[NUM_ISA_INSTRUCTIONS + i];
}
fout << std::endl;
fout << "mask: 0x" << std::hex << inj_info.mask << std::endl;
fout << "beforeVal: 0x" << inj_info.beforeVal << ";";
fout << "afterVal: 0x" << inj_info.afterVal << std::endl;
fout << "regNo: " << std::dec << inj_info.regNo << std::endl;
fout << "opcode: " << instTypeNames[inj_info.opcode] << std::endl;
fout << "pcOffset: 0x" << std::hex << inj_info.pcOffset << std::endl;
fout << "tid: " << std::dec << inj_info.tid << std::endl;
}
// for debugging
void print_inj_info() {
assert(fout.good());
fout << "kernelName=" << inj_info.kernelName << std::endl;
fout << "kernelCount=" << inj_info.kernelCount << std::endl;
fout << "groupID=" << inj_info.groupID << std::endl;
fout << "bitFlipModel=" << inj_info.bitFlipModel << std::endl;
fout << "instID=" << inj_info.instID << std::endl;
fout << "opIDSeed=" << inj_info.opIDSeed << std::endl;
fout << "bitIDSeed=" << inj_info.bitIDSeed << std::endl;
}
// Parse error injection site info from a file. This should be done on host side.
void parse_params(std::string filename) {
static bool parse_flag = false; // file will be parsed only once - performance enhancement
if (!parse_flag) {
parse_flag = true;
reset_inj_info();
std::ifstream ifs(filename.c_str(), std::ifstream::in);
if (ifs.is_open()) {
ifs >> inj_info.groupID; // arch state id
assert(inj_info.groupID >= 0 && inj_info.groupID < NUM_INST_GROUPS); // ensure that the value is in the expected range
ifs >> inj_info.bitFlipModel; // fault model: single bit flip, all bit flip, random value
assert(inj_info.bitFlipModel < NUM_BFM_TYPES); // ensure that the value is in the expected range
ifs >> inj_info.kernelName;
ifs >> inj_info.kernelCount;
ifs >> inj_info.instID; // instruction id
ifs >> inj_info.opIDSeed; // destination id seed (float, 0-1 for inst injections and 0-256 for reg)
assert(inj_info.opIDSeed >= 0 && inj_info.opIDSeed < 1.01); // ensure that the value is in the expected range
ifs >> inj_info.bitIDSeed; // bit location seed (float, 0-1)
assert(inj_info.bitIDSeed >= 0 && inj_info.bitIDSeed < 1.01); // ensure that the value is in the expected range
} else {
printf(" File %s does not exist!", filename.c_str());
printf(
" This file should contain enough information about the fault site to perform an error injection run: ");
printf(
"(1) arch state id, (2) bit flip model, (3) kernel name, (4) kernel count, (5) instruction id, (6) seed to select destination id, (7) sed to select bit location.\n");
assert(false);
}
ifs.close();
if (verbose)
print_inj_info();
}
}
int get_maxregs(CUfunction func) {
int maxregs = -1;
cuFuncGetAttribute(&maxregs, CU_FUNC_ATTRIBUTE_NUM_REGS, func);
return maxregs;
}
// custom signal handler such that we don't miss the injection information.
void INThandler(int sig) {
signal(sig, SIG_IGN); // disable Ctrl-C
fout << "ERROR FAIL Detected Signal SIGKILL\n";
write_inj_info();
exit(-1);
}
/* nvbit_at_init() is executed as soon as the nvbit tool is loaded. We typically
* do initializations in this call. In this case for instance we get some
* environment variables values which we use as input arguments to the tool */
// DO NOT USE UVM (__managed__) variables in this function
void nvbit_at_init() {
/* just make sure all managed variables are allocated on GPU */
setenv("CUDA_MANAGED_FORCE_DEVICE_ALLOC", "1", 1);
/* we get some environment variables that are going to be use to selectively
* instrument (within a interval of kernel indexes and instructions). By
* default we instrument everything. */
if (getenv("TOOL_VERBOSE")) {
verbose = atoi(getenv("TOOL_VERBOSE"));
} else {
verbose = 0;
}
// GET_VAR_INT(verbose, "TOOL_VERBOSE", 0, "Enable verbosity inside the tool (1, 2, 3,..)");
initInstTypeNameMap();
signal(SIGINT, INThandler); // install Ctrl-C handler
open_output_file(injOutputFilename);
if (verbose)
printf("nvbit_at_init:end\n");
}
/**
* Function to generate a real uniform random value
*/
void generate_new_random_value() {
if (inj_info.bitFlipModel == FLEXGRIP_RELATIVE_FU || inj_info.bitFlipModel == FLEXGRIP_RELATIVE_PIPELINE) {
std::random_device rd; //Will be used to obtain a seed for the random number engine
std::mt19937 gen(rd()); //Standard mersenne_twister_engine seeded with rd()
std::uniform_real_distribution<float> dis(0.0f, 1.0f);
inj_info.opIDSeed = dis(gen);
}
}
/* Set used to avoid re-instrumenting the same functions multiple times */
std::unordered_set<CUfunction> already_instrumented;
void instrument_function_if_needed(CUcontext ctx, CUfunction func) {
parse_params(injInputFilename.c_str()); // injParams are updated based on injection seed file
cudaDeviceSynchronize();
verbose_device = verbose;
cudaDeviceSynchronize();
/* Get related functions of the kernel (device function that can be
* called by the kernel) */
std::vector<CUfunction> related_functions = nvbit_get_related_functions(ctx,
func);
/* add kernel itself to the related function vector */
related_functions.push_back(func);
/* iterate on function */
for (auto f : related_functions) {
/* "recording" function was instrumented, if set insertion failed
* we have already encountered this function */
if (!already_instrumented.insert(f).second) {
continue;
}
std::string kname = removeSpaces(nvbit_get_func_name(ctx, f));
if (strcmp(inj_info.kernelName, kname.c_str()) == 0) { // this is the kernel selected for injection
assert(fout.good()); // ensure that the log file is good.
/* Get the vector of instruction composing the loaded CUFunction "f" */
const std::vector<Instr *> &instrs = nvbit_get_instrs(ctx, f);
/* If verbose we print function name and number of" static" instructions
*/
if (verbose) {
printf("inspecting %s - num instrs %ld\n",
nvbit_get_func_name(ctx, f), instrs.size());
}
int maxregs = get_maxregs(f);
fout << "inspecting: " << kname << "\nnum_static_instrs: "
<< instrs.size() << "\nmaxregs: " << maxregs << "("
<< maxregs << ")" << std::endl;
/* We iterate on the vector of instruction */
for (auto i : instrs) {
std::string opcode = i->getOpcode();
std::string instType = extractInstType(opcode);
// printf("extracted instType: %s\n", instType.c_str());
// printf("index of instType: %d\n", instTypeNameMap[instType]);
// Tokenize the instruction
std::vector<std::string> tokens;
std::string buf; // a buffer string
std::stringstream ss(i->getSass()); // Insert the string into a stream
while (ss >> buf)
tokens.push_back(buf);
int destGPRNum = -1;
int numDestGPRs = 0;
int destPRNum1 = -1;
int destPRNum2 = -1;
int instGrpNum = getOpGroupNum(instTypeNameMap[instType]);
;
if (tokens.size() > 0 && instGrpNum != G_NODEST) { // an actual instruction that writes to either a GPR or PR register
if (verbose)
printf("num tokens = %ld ", tokens.size());
int start = 1; // first token is opcode string
if (tokens[0].find('@') != std::string::npos) { // predicated instruction, ignore first token
start = 2; // first token is predicate and 2nd token is opcode
}
// Parse the first operand - this is the first destination
int regnum1 = -1;
int regnum2 = -1;
int regtype = extractRegNo(tokens[start], regnum1);
if (regtype == 0) { // GPR reg
destGPRNum = regnum1;
numDestGPRs = (instGrpNum == G_FP64) ? 2 : 1;
int sz = extractSize(opcode);
if (sz != 0) { // for LD, IMMA, HMMA
numDestGPRs = sz / 32;
}
int regtype2 = extractRegNo(tokens[start + 1], regnum2);
// the following is probably not possible in Pascal ISA
if (regtype2 == 1) { // PR reg, it looks like this instruction has two destination registers
destPRNum1 = regnum2;
}
}
if (regtype == 1) {
destPRNum1 = regnum1;
if (instGrpNum != G_PR) { // this is not a PR-only instruction.
int regtype2 = extractRegNo(tokens[start + 1],
regnum2);
if (regtype2 == 0) { // a GPR reg, it looks like this instruction has two destination registers
destGPRNum = regnum2;
numDestGPRs = (instGrpNum == G_FP64) ? 2 : 1;
}
} else { // check if the 2nd reg is a PR dest
if (tokens.size() > 5) { // this seems like the instruction that has 2 PR destinations
int regtype2 = extractRegNo(tokens[start + 1],
regnum2);
if (regtype2 == 1) { // a PR reg, it looks like this instruction has two destination registers
destPRNum2 = regnum2;
}
}
}
}
if (verbose)
printf(
"offset = 0x%x, opcode_info=%d, instType=%s, opcode=%s, numDestGPRs=%d, destGPRNum=%d, destPRNum1=%d, destPRNum2=%d, instruction: %s\n",
i->getOffset(), instTypeNameMap[instType],
instType.c_str(), opcode.c_str(), numDestGPRs,
destGPRNum, destPRNum1, destPRNum2,
i->getSass());
}
//Power law error model
generate_new_random_value();
nvbit_insert_call(i, "inject_error", IPOINT_AFTER);
nvbit_add_call_arg_const_val64(i, (uint64_t) &inj_info);
nvbit_add_call_arg_const_val64(i, (uint64_t) counters);
nvbit_add_call_arg_const_val64(i, (uint64_t) &verbose_device);
nvbit_add_call_arg_const_val32(i, i->getOffset()); // offset (for pc) info
nvbit_add_call_arg_const_val32(i, instTypeNameMap[instType]); // opcode info
nvbit_add_call_arg_const_val32(i, instGrpNum); // instruction group info
nvbit_add_call_arg_const_val32(i, destGPRNum); // destination GPR register number
if (destGPRNum != -1) {
nvbit_add_call_arg_reg_val(i, destGPRNum); // destination GPR register val
} else {
nvbit_add_call_arg_const_val32(i, (unsigned int) -1); // destination GPR register val
}
nvbit_add_call_arg_const_val32(i, numDestGPRs); // number of destination GPR registers
if (isGPInst(instGrpNum) && inj_info.groupID == G_GP) { // PR register numbers should be -1, if the injection model is G_GP. This way we will never inject errors into them
nvbit_add_call_arg_const_val32(i, (unsigned int) -1); // first destination PR register number
nvbit_add_call_arg_const_val32(i, (unsigned int) -1); // second destination PR register number
} else {
nvbit_add_call_arg_const_val32(i, destPRNum1); // first destination PR register number
nvbit_add_call_arg_const_val32(i, destPRNum2); // second destination PR register number
}
nvbit_add_call_arg_const_val32(i, maxregs); // max regs used by the inst info
}
} else {
const std::vector<Instr *> &instrs = nvbit_get_instrs(ctx, f);
if (verbose)
printf(
":::NVBit-inject-error; NOT inspecting: %s; %d, %d, num_static_instrs: %ld; maxregs: %d:::",
kname.c_str(), kernel_id, inj_info.kernelCount,
instrs.size(), get_maxregs(f));
}
}
}
/* This call-back is triggered every time a CUDA event is encountered.
* Here, we identify CUDA kernel launch events and reset the "counter" before
* th kernel is launched, and print the counter after the kernel has completed
* (we make sure it has completed by using cudaDeviceSynchronize()). To
* selectively run either the original or instrumented kernel we used
* nvbit_enable_instrumented() before launching the kernel. */
void nvbit_at_cuda_event(CUcontext ctx, int is_exit, nvbit_api_cuda_t cbid,
const char *name, void *params, CUresult *pStatus) {
/* Identify all the possible CUDA launch events */
if (cbid == API_CUDA_cuLaunch || cbid == API_CUDA_cuLaunchKernel_ptsz
|| cbid == API_CUDA_cuLaunchGrid
|| cbid == API_CUDA_cuLaunchGridAsync
|| cbid == API_CUDA_cuLaunchKernel) {
/* cast params to cuLaunch_params since if we are here we know these are
* the right parameters type */
cuLaunch_params * p = (cuLaunch_params *) params;
if (!is_exit) {
pthread_mutex_lock(&mutex);
instrument_function_if_needed(ctx, p->f);
init_counters();
cudaDeviceSynchronize();
parse_params(injInputFilename); // injParams are updated based on injection seed file
// print_inj_info();
inj_info.errorInjected = false;
inj_info.areParamsReady = (inj_info.kernelCount == kernel_id); // areParamsReady = true for the selected kernel
if (verbose)
inj_info.debug[NUM_DEBUG_VALS - 1] = -1; // set debug flag to check whether the the instrumented kernel was executed
if (verbose)
printf(
"setting areParamsReady=%d, inj_info.kernelCount=%d, kernel_id=%d\n",
inj_info.areParamsReady, inj_info.kernelCount,
kernel_id);
cudaDeviceSynchronize();
nvbit_enable_instrumented(ctx, p->f, inj_info.areParamsReady); // should we run the un-instrumented code?
// nvbit_enable_instrumented(ctx, p->f, false); // for debugging
cudaDeviceSynchronize();
} else {
if (verbose)
printf("is_exit\n");
cudaDeviceSynchronize();
cudaError_t le = cudaGetLastError();
if (cudaSuccess != le) {
assert(fout.good());
std::cout << "ERROR FAIL in kernel execution ("
<< cudaGetErrorString(le) << "); ";
fout << "ERROR FAIL in kernel execution ("
<< cudaGetErrorString(le) << "); ";
fout.flush();
exit(1); // let's exit early because no error was injected
}
std::string kname = removeSpaces(nvbit_get_func_name(ctx, p->f));
if (inj_info.areParamsReady) {
inj_info.areParamsReady = false;
int num_ctas = 0;
if (cbid == API_CUDA_cuLaunchKernel_ptsz
|| cbid == API_CUDA_cuLaunchKernel) {
cuLaunchKernel_params * p2 = (cuLaunchKernel_params*) params;
num_ctas = p2->gridDimX * p2->gridDimY * p2->gridDimZ;
}
assert(fout.good());
fout << "Injection data" << std::endl;
fout << "index: " << kernel_id << std::endl;
fout << "kernel_name: " << kname << std::endl;
fout << "ctas: " << num_ctas << std::endl;
fout << "instrs: " << get_inst_count() << std::endl;
write_inj_info();
if (inj_info.opcode == NOP) {
fout << "Error not injected\n";
}
if (verbose != 0 && inj_info.debug[2] != inj_info.debug[3]) { // sanity check
fout
<< "ERROR FAIL in kernel execution; Expected reg value doesn't match; \n";
fout << "maxRegs: " << inj_info.debug[0] << ", destGPRNum: "
<< inj_info.debug[1] << ", expected_val: "
<< std::hex << inj_info.debug[2] << ", myval: "
<< inj_info.debug[3] << std::dec << "\n";
fout << std::endl;
std::cout
<< "NVBit-inject-error; ERROR FAIL in kernel execution; Expected reg value doesn't match; \n";
std::cout << "maxRegs: " << inj_info.debug[0]
<< ", destGPRNum: " << inj_info.debug[1]
<< ", expected_val: " << std::hex
<< inj_info.debug[2] << ", myval: "
<< inj_info.debug[3] << std::dec << "\n";
for (int x = 4; x < 10; x++) {
std::cout << "debug[" << x << "]: " << std::hex
<< inj_info.debug[x] << "\n";
}
std::cout << "debug[11]: " << std::hex << inj_info.debug[11]
<< "\n";
std::cout << "debug[12]: " << inj_info.debug[12] << " "
<< instTypeNames[inj_info.debug[12]] << "\n";
std::cout << "debug[13]: " << inj_info.debug[13] << "\n";
std::cout << "debug[14]: " << std::hex << inj_info.debug[14]
<< "\n";
assert(inj_info.debug[2] == inj_info.debug[3]);
// printf("\nmaxRegs: %d, destGPRNum: %d, expected_val: %x, myval: %x, myval@-1: %x, myval@+1: %x, myval with maxRegs+1: %x, myval with maxRegs-1: %x\n",
// inj_info.debug[0], inj_info.debug[1], inj_info.debug[2], inj_info.debug[3], inj_info.debug[4], inj_info.debug[5], inj_info.debug[6], inj_info.debug[7]);
}
fout.flush();
}
if (verbose)
printf(
"\n index: %d; kernel_name: %s; used_instrumented=%d; \n",
kernel_id, kname.c_str(),
inj_info.debug[NUM_DEBUG_VALS - 1]);
kernel_id++; // always increment kernel_id on kernel exit
cudaDeviceSynchronize();
pthread_mutex_unlock(&mutex);
}
}
}
void nvbit_at_term() {
} // nothing to do here.
|
f7b36210f13dd3967c379f5aeb1b78ea9f19603e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* thunderstruck/tracker: SVMImpl.cu
*/
#include "SVMImpl.h"
#include "cudaApi.cuh"
namespace thunderstruck {
//#################### CUDA FUNCTIONS ####################
__device__ double cf_gaussian_kernel(double *x1, double *x2, size_t featureCount, double sigma = 0.2)
{
double squaredNorm = 0.0;
for(size_t i = 0; i < featureCount; ++i)
{
double delta = x1[i] - x2[i];
squaredNorm += delta * delta;
}
return exp(-sigma * squaredNorm);
}
__device__ double cf_linear_kernel(double *x1, double *x2, size_t featureCount)
{
double result = 0.0;
for(size_t i = 0; i < featureCount; ++i)
{
result += x1[i] * x2[i];
}
return result;
}
//#################### CUDA KERNELS ####################
__global__ void ck_calculate_gradients(double *lossValues, double *evaluationResults, int *keepSamples, size_t sampleCount, double *gradients)
{
int tid = threadIdx.x + blockDim.x * blockIdx.x;
if(tid < sampleCount)
{
gradients[tid] = keepSamples[tid] != 0 ? -lossValues[tid] - evaluationResults[tid] : 0.0;
}
}
__global__ void ck_calculate_weights(int *supportVectors, double *betas, size_t maxSupportVectors, double *features, size_t featureCount, double *weights)
{
// Note: These arrays are set to the largest sizes necessary to avoid the need to manage shared memory dynamically.
__shared__ int sharedSupportVectors[200];
__shared__ double sharedBetas[200];
if(threadIdx.x < maxSupportVectors)
{
sharedSupportVectors[threadIdx.x] = supportVectors[threadIdx.x];
sharedBetas[threadIdx.x] = betas[threadIdx.x];
}
__syncthreads();
double weight = 0.0;
for(int i = 0; i < maxSupportVectors; ++i)
{
int svRef = sharedSupportVectors[i];
if(svRef != -1)
{
weight += sharedBetas[i] * features[svRef * featureCount + threadIdx.x];
}
}
weights[threadIdx.x] = weight;
}
__global__ void ck_evaluate_svm_gaussian(int *supportVectors, double *betas, size_t maxSupportVectors,
double *features, int featureCount,
double *sampleFeatures, int *keepSamples, double *sampleResults,
double sigma)
{
__shared__ volatile double sharedResults[512]; // note: the volatile is crucial or the reduction may fail
double sampleResult = 0.0;
// 1 global read
if(keepSamples[blockIdx.x] != 0)
{
// 1 global read
double sampleFeature = sampleFeatures[blockIdx.x * featureCount + threadIdx.x];
// Calculate the contribution from each support vector and add it to the final result.
// To calculate the contribution from support vector i, the strategy is to calculate
// (x[j] - x_i[j])^2 in each thread j, and then add up the results using reduction to
// find (x - x_i)^2. We do the necessary exponentiation when adding to the final result.
for(int i = 0; i < maxSupportVectors; ++i)
{
// 1 global read
int svRef = supportVectors[i];
if(svRef == -1) continue;
// 1 global read
// TODO: Pass in svRef * featureCount in an array?
float delta = sampleFeature - features[svRef * featureCount + threadIdx.x];
// 1 shared write
sharedResults[threadIdx.x] = delta * delta;
__syncthreads();
// Perform a reduction to calculate the sum of the shared results, namely |x - x_i|^2.
for(unsigned int s = featureCount / 2; s > 32; s >>= 1)
{
if(threadIdx.x < s)
{
sharedResults[threadIdx.x] += sharedResults[threadIdx.x + s];
}
__syncthreads();
}
if(threadIdx.x < 32)
{
sharedResults[threadIdx.x] += sharedResults[threadIdx.x + 32];
sharedResults[threadIdx.x] += sharedResults[threadIdx.x + 16];
sharedResults[threadIdx.x] += sharedResults[threadIdx.x + 8];
sharedResults[threadIdx.x] += sharedResults[threadIdx.x + 4];
sharedResults[threadIdx.x] += sharedResults[threadIdx.x + 2];
sharedResults[threadIdx.x] += sharedResults[threadIdx.x + 1];
}
// Add the contribution from this support vector, namely beta_i * exp(-sigma * |x - x_i|^2), to the final result.
if(threadIdx.x == 0)
{
// 1 global read, 1 shared read
sampleResult += betas[i] * __expf(-sigma * sharedResults[0]);
//sampleResult = __fma_rn(betas[i], __expf(-sigma * sharedResults[0]), sampleResult);
//sampleResult = __fmaf_rn(betas[i], __expf(-sigma * sharedResults[0]), sampleResult);
}
}
}
if(threadIdx.x == 0)
{
// 1 global write
sampleResults[blockIdx.x] = sampleResult;
}
}
__global__ void ck_evaluate_svm_linear(double *weights, double *sampleFeatures, int *keepSamples, double *sampleResults)
{
// Note: This array is set to the largest size necessary to avoid the need to manage shared memory dynamically.
__shared__ double sharedFeatureResults[512];
// Each thread block evaluates the SVM on a single sample, which is just a dot product of the form w . x, where x
// contains the features for the sample. We use a thread for each feature, and then sum the results from the various
// features at the end.
double featureResult = 0.0;
// 1 global read
if(keepSamples[blockIdx.x] != 0)
{
// 2 global reads
featureResult = weights[threadIdx.x] * sampleFeatures[blockIdx.x * blockDim.x + threadIdx.x];
}
// 1 shared write
sharedFeatureResults[threadIdx.x] = featureResult;
__syncthreads();
// Sum the results from all the feature threads and write the final result for the sample to global memory.
if(threadIdx.x == 0)
{
double sampleResult = 0.0;
// blockDim.x (e.g. 256) shared reads
for(int i = 0; i < blockDim.x; ++i)
{
sampleResult += sharedFeatureResults[i];
}
// 1 global write
sampleResults[blockIdx.x] = sampleResult;
}
}
__global__ void ck_update_gradient_values(double lambda, size_t plusIndex, size_t minusIndex,
int *supportVectors, double *gradients, size_t maxSupportVectors,
double *kernelMatrix)
{
size_t tid = threadIdx.x + blockDim.x * blockIdx.x;
if(tid < maxSupportVectors && supportVectors[tid] != -1)
{
gradients[tid] -= lambda * (kernelMatrix[tid * maxSupportVectors + plusIndex] - kernelMatrix[tid * maxSupportVectors + minusIndex]);
}
}
__global__ void ck_update_kernel_matrix(double *kernelMatrix, size_t i, size_t maxSupportVectors, int *supportVectors, size_t featureCount, double *features)
{
size_t j = threadIdx.x + blockDim.x * blockIdx.x;
if(j < maxSupportVectors)
{
int svJ = supportVectors[j];
if(svJ != -1)
{
int svI = supportVectors[i];
double *featuresI = features + svI * featureCount;
double *featuresJ = features + svJ * featureCount;
#if 1
double value = cf_linear_kernel(featuresI, featuresJ, featureCount);
#else
double value = cf_gaussian_kernel(featuresI, featuresJ, featureCount);
#endif
kernelMatrix[i * maxSupportVectors + j] = value;
kernelMatrix[j * maxSupportVectors + i] = value;
}
}
}
//#################### WRAPPER FUNCTIONS ####################
void calculate_svm_gradients(double *lossValues, double *evaluationResults, int *keepSamples, size_t sampleCount, double *gradients, int threadsPerBlock)
{
int numThreads = sampleCount;
int numBlocks = (numThreads + threadsPerBlock - 1) / threadsPerBlock;
hipLaunchKernelGGL(( ck_calculate_gradients), dim3(numBlocks),dim3(threadsPerBlock), 0, 0, lossValues, evaluationResults, keepSamples, sampleCount, gradients);
}
void calculate_svm_weights(int *supportVectors, double *betas, size_t maxSupportVectors, double *features, size_t featureCount, double *weights)
{
hipLaunchKernelGGL(( ck_calculate_weights), dim3(1),dim3(featureCount), 0, 0, supportVectors, betas, maxSupportVectors, features, featureCount, weights);
}
void evaluate_svm_gaussian(int *supportVectors, double *betas, size_t maxSupportVectors,
double *features, size_t featureCount,
double *sampleFeatures, int *keepSamples, size_t sampleCount,
double *sampleResults, double sigma)
{
hipLaunchKernelGGL(( ck_evaluate_svm_gaussian), dim3(sampleCount),dim3(featureCount), 0, 0, supportVectors, betas, maxSupportVectors,
features, featureCount,
sampleFeatures, keepSamples, sampleResults,
sigma);
}
void evaluate_svm_linear(double *weights, size_t featureCount, double *sampleFeatures, int *keepSamples, size_t sampleCount, double *sampleResults)
{
hipLaunchKernelGGL(( ck_evaluate_svm_linear), dim3(sampleCount),dim3(featureCount), 0, 0, weights, sampleFeatures, keepSamples, sampleResults);
}
void update_gradient_values(double lambda, size_t plusIndex, size_t minusIndex,
int *supportVectors, double *gradients, size_t maxSupportVectors,
double *kernelMatrix, int threadsPerBlock)
{
int numThreads = maxSupportVectors;
int numBlocks = (numThreads + threadsPerBlock - 1) / threadsPerBlock;
hipLaunchKernelGGL(( ck_update_gradient_values), dim3(numBlocks),dim3(threadsPerBlock), 0, 0, lambda, plusIndex, minusIndex, supportVectors, gradients, maxSupportVectors, kernelMatrix);
}
void update_kernel_matrix(double *kernelMatrix, size_t i, size_t maxSupportVectors, int *supportVectors, size_t featureCount, double *features, int threadsPerBlock)
{
// We need to update K_ij (for all j).
int numThreads = maxSupportVectors;
int numBlocks = (numThreads + threadsPerBlock - 1) / threadsPerBlock;
hipLaunchKernelGGL(( ck_update_kernel_matrix), dim3(numBlocks),dim3(threadsPerBlock), 0, 0, kernelMatrix, i, maxSupportVectors, supportVectors, featureCount, features);
}
}
| f7b36210f13dd3967c379f5aeb1b78ea9f19603e.cu | /**
* thunderstruck/tracker: SVMImpl.cu
*/
#include "SVMImpl.h"
#include "cudaApi.cuh"
namespace thunderstruck {
//#################### CUDA FUNCTIONS ####################
__device__ double cf_gaussian_kernel(double *x1, double *x2, size_t featureCount, double sigma = 0.2)
{
double squaredNorm = 0.0;
for(size_t i = 0; i < featureCount; ++i)
{
double delta = x1[i] - x2[i];
squaredNorm += delta * delta;
}
return exp(-sigma * squaredNorm);
}
__device__ double cf_linear_kernel(double *x1, double *x2, size_t featureCount)
{
double result = 0.0;
for(size_t i = 0; i < featureCount; ++i)
{
result += x1[i] * x2[i];
}
return result;
}
//#################### CUDA KERNELS ####################
__global__ void ck_calculate_gradients(double *lossValues, double *evaluationResults, int *keepSamples, size_t sampleCount, double *gradients)
{
int tid = threadIdx.x + blockDim.x * blockIdx.x;
if(tid < sampleCount)
{
gradients[tid] = keepSamples[tid] != 0 ? -lossValues[tid] - evaluationResults[tid] : 0.0;
}
}
__global__ void ck_calculate_weights(int *supportVectors, double *betas, size_t maxSupportVectors, double *features, size_t featureCount, double *weights)
{
// Note: These arrays are set to the largest sizes necessary to avoid the need to manage shared memory dynamically.
__shared__ int sharedSupportVectors[200];
__shared__ double sharedBetas[200];
if(threadIdx.x < maxSupportVectors)
{
sharedSupportVectors[threadIdx.x] = supportVectors[threadIdx.x];
sharedBetas[threadIdx.x] = betas[threadIdx.x];
}
__syncthreads();
double weight = 0.0;
for(int i = 0; i < maxSupportVectors; ++i)
{
int svRef = sharedSupportVectors[i];
if(svRef != -1)
{
weight += sharedBetas[i] * features[svRef * featureCount + threadIdx.x];
}
}
weights[threadIdx.x] = weight;
}
__global__ void ck_evaluate_svm_gaussian(int *supportVectors, double *betas, size_t maxSupportVectors,
double *features, int featureCount,
double *sampleFeatures, int *keepSamples, double *sampleResults,
double sigma)
{
__shared__ volatile double sharedResults[512]; // note: the volatile is crucial or the reduction may fail
double sampleResult = 0.0;
// 1 global read
if(keepSamples[blockIdx.x] != 0)
{
// 1 global read
double sampleFeature = sampleFeatures[blockIdx.x * featureCount + threadIdx.x];
// Calculate the contribution from each support vector and add it to the final result.
// To calculate the contribution from support vector i, the strategy is to calculate
// (x[j] - x_i[j])^2 in each thread j, and then add up the results using reduction to
// find (x - x_i)^2. We do the necessary exponentiation when adding to the final result.
for(int i = 0; i < maxSupportVectors; ++i)
{
// 1 global read
int svRef = supportVectors[i];
if(svRef == -1) continue;
// 1 global read
// TODO: Pass in svRef * featureCount in an array?
float delta = sampleFeature - features[svRef * featureCount + threadIdx.x];
// 1 shared write
sharedResults[threadIdx.x] = delta * delta;
__syncthreads();
// Perform a reduction to calculate the sum of the shared results, namely |x - x_i|^2.
for(unsigned int s = featureCount / 2; s > 32; s >>= 1)
{
if(threadIdx.x < s)
{
sharedResults[threadIdx.x] += sharedResults[threadIdx.x + s];
}
__syncthreads();
}
if(threadIdx.x < 32)
{
sharedResults[threadIdx.x] += sharedResults[threadIdx.x + 32];
sharedResults[threadIdx.x] += sharedResults[threadIdx.x + 16];
sharedResults[threadIdx.x] += sharedResults[threadIdx.x + 8];
sharedResults[threadIdx.x] += sharedResults[threadIdx.x + 4];
sharedResults[threadIdx.x] += sharedResults[threadIdx.x + 2];
sharedResults[threadIdx.x] += sharedResults[threadIdx.x + 1];
}
// Add the contribution from this support vector, namely beta_i * exp(-sigma * |x - x_i|^2), to the final result.
if(threadIdx.x == 0)
{
// 1 global read, 1 shared read
sampleResult += betas[i] * __expf(-sigma * sharedResults[0]);
//sampleResult = __fma_rn(betas[i], __expf(-sigma * sharedResults[0]), sampleResult);
//sampleResult = __fmaf_rn(betas[i], __expf(-sigma * sharedResults[0]), sampleResult);
}
}
}
if(threadIdx.x == 0)
{
// 1 global write
sampleResults[blockIdx.x] = sampleResult;
}
}
__global__ void ck_evaluate_svm_linear(double *weights, double *sampleFeatures, int *keepSamples, double *sampleResults)
{
// Note: This array is set to the largest size necessary to avoid the need to manage shared memory dynamically.
__shared__ double sharedFeatureResults[512];
// Each thread block evaluates the SVM on a single sample, which is just a dot product of the form w . x, where x
// contains the features for the sample. We use a thread for each feature, and then sum the results from the various
// features at the end.
double featureResult = 0.0;
// 1 global read
if(keepSamples[blockIdx.x] != 0)
{
// 2 global reads
featureResult = weights[threadIdx.x] * sampleFeatures[blockIdx.x * blockDim.x + threadIdx.x];
}
// 1 shared write
sharedFeatureResults[threadIdx.x] = featureResult;
__syncthreads();
// Sum the results from all the feature threads and write the final result for the sample to global memory.
if(threadIdx.x == 0)
{
double sampleResult = 0.0;
// blockDim.x (e.g. 256) shared reads
for(int i = 0; i < blockDim.x; ++i)
{
sampleResult += sharedFeatureResults[i];
}
// 1 global write
sampleResults[blockIdx.x] = sampleResult;
}
}
__global__ void ck_update_gradient_values(double lambda, size_t plusIndex, size_t minusIndex,
int *supportVectors, double *gradients, size_t maxSupportVectors,
double *kernelMatrix)
{
size_t tid = threadIdx.x + blockDim.x * blockIdx.x;
if(tid < maxSupportVectors && supportVectors[tid] != -1)
{
gradients[tid] -= lambda * (kernelMatrix[tid * maxSupportVectors + plusIndex] - kernelMatrix[tid * maxSupportVectors + minusIndex]);
}
}
__global__ void ck_update_kernel_matrix(double *kernelMatrix, size_t i, size_t maxSupportVectors, int *supportVectors, size_t featureCount, double *features)
{
size_t j = threadIdx.x + blockDim.x * blockIdx.x;
if(j < maxSupportVectors)
{
int svJ = supportVectors[j];
if(svJ != -1)
{
int svI = supportVectors[i];
double *featuresI = features + svI * featureCount;
double *featuresJ = features + svJ * featureCount;
#if 1
double value = cf_linear_kernel(featuresI, featuresJ, featureCount);
#else
double value = cf_gaussian_kernel(featuresI, featuresJ, featureCount);
#endif
kernelMatrix[i * maxSupportVectors + j] = value;
kernelMatrix[j * maxSupportVectors + i] = value;
}
}
}
//#################### WRAPPER FUNCTIONS ####################
void calculate_svm_gradients(double *lossValues, double *evaluationResults, int *keepSamples, size_t sampleCount, double *gradients, int threadsPerBlock)
{
int numThreads = sampleCount;
int numBlocks = (numThreads + threadsPerBlock - 1) / threadsPerBlock;
ck_calculate_gradients<<<numBlocks,threadsPerBlock>>>(lossValues, evaluationResults, keepSamples, sampleCount, gradients);
}
void calculate_svm_weights(int *supportVectors, double *betas, size_t maxSupportVectors, double *features, size_t featureCount, double *weights)
{
ck_calculate_weights<<<1,featureCount>>>(supportVectors, betas, maxSupportVectors, features, featureCount, weights);
}
void evaluate_svm_gaussian(int *supportVectors, double *betas, size_t maxSupportVectors,
double *features, size_t featureCount,
double *sampleFeatures, int *keepSamples, size_t sampleCount,
double *sampleResults, double sigma)
{
ck_evaluate_svm_gaussian<<<sampleCount,featureCount>>>(supportVectors, betas, maxSupportVectors,
features, featureCount,
sampleFeatures, keepSamples, sampleResults,
sigma);
}
void evaluate_svm_linear(double *weights, size_t featureCount, double *sampleFeatures, int *keepSamples, size_t sampleCount, double *sampleResults)
{
ck_evaluate_svm_linear<<<sampleCount,featureCount>>>(weights, sampleFeatures, keepSamples, sampleResults);
}
void update_gradient_values(double lambda, size_t plusIndex, size_t minusIndex,
int *supportVectors, double *gradients, size_t maxSupportVectors,
double *kernelMatrix, int threadsPerBlock)
{
int numThreads = maxSupportVectors;
int numBlocks = (numThreads + threadsPerBlock - 1) / threadsPerBlock;
ck_update_gradient_values<<<numBlocks,threadsPerBlock>>>(lambda, plusIndex, minusIndex, supportVectors, gradients, maxSupportVectors, kernelMatrix);
}
void update_kernel_matrix(double *kernelMatrix, size_t i, size_t maxSupportVectors, int *supportVectors, size_t featureCount, double *features, int threadsPerBlock)
{
// We need to update K_ij (for all j).
int numThreads = maxSupportVectors;
int numBlocks = (numThreads + threadsPerBlock - 1) / threadsPerBlock;
ck_update_kernel_matrix<<<numBlocks,threadsPerBlock>>>(kernelMatrix, i, maxSupportVectors, supportVectors, featureCount, features);
}
}
|
f641b1e4a9257b756484175addca4e50e220d10d.hip | // !!! This is a file automatically generated by hipify!!!
//
//#include "hip/hip_runtime.h"
//#include "device_launch_parameters.h"
//
//#include <stdio.h>
//
//hipError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size);
//
//__global__ void addKernel(int *c, const int *a, const int *b)
//{
// int i = threadIdx.x;
// c[i] = a[i] + b[i];
//}
//
//int main()
//{
// const int arraySize = 5;
// const int a[arraySize] = { 1, 2, 3, 4, 5 };
// const int b[arraySize] = { 10, 20, 30, 40, 50 };
// int c[arraySize] = { 0 };
//
// // Add vectors in parallel.
// hipError_t cudaStatus = addWithCuda(c, a, b, arraySize);
// if (cudaStatus != hipSuccess) {
// fprintf(stderr, "addWithCuda failed!");
// return 1;
// }
//
// printf("{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n",
// c[0], c[1], c[2], c[3], c[4]);
//
// // hipDeviceReset must be called before exiting in order for profiling and
// // tracing tools such as Nsight and Visual Profiler to show complete traces.
// cudaStatus = hipDeviceReset();
// if (cudaStatus != hipSuccess) {
// fprintf(stderr, "hipDeviceReset failed!");
// return 1;
// }
//
// return 0;
//}
//
//// Helper function for using CUDA to add vectors in parallel.
//hipError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size)
//{
// int *dev_a = 0;
// int *dev_b = 0;
// int *dev_c = 0;
// hipError_t cudaStatus;
//
// // Choose which GPU to run on, change this on a multi-GPU system.
// cudaStatus = hipSetDevice(0);
// if (cudaStatus != hipSuccess) {
// fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
// goto Error;
// }
//
// // Allocate GPU buffers for three vectors (two input, one output) .
// cudaStatus = hipMalloc((void**)&dev_c, size * sizeof(int));
// if (cudaStatus != hipSuccess) {
// fprintf(stderr, "hipMalloc failed!");
// goto Error;
// }
//
// cudaStatus = hipMalloc((void**)&dev_a, size * sizeof(int));
// if (cudaStatus != hipSuccess) {
// fprintf(stderr, "hipMalloc failed!");
// goto Error;
// }
//
// cudaStatus = hipMalloc((void**)&dev_b, size * sizeof(int));
// if (cudaStatus != hipSuccess) {
// fprintf(stderr, "hipMalloc failed!");
// goto Error;
// }
//
// // Copy input vectors from host memory to GPU buffers.
// cudaStatus = hipMemcpy(dev_a, a, size * sizeof(int), hipMemcpyHostToDevice);
// if (cudaStatus != hipSuccess) {
// fprintf(stderr, "hipMemcpy failed!");
// goto Error;
// }
//
// cudaStatus = hipMemcpy(dev_b, b, size * sizeof(int), hipMemcpyHostToDevice);
// if (cudaStatus != hipSuccess) {
// fprintf(stderr, "hipMemcpy failed!");
// goto Error;
// }
//
// // Launch a kernel on the GPU with one thread for each element.
// addKernel<<<1, size>>>(dev_c, dev_a, dev_b);
//
// // Check for any errors launching the kernel
// cudaStatus = hipGetLastError();
// if (cudaStatus != hipSuccess) {
// fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus));
// goto Error;
// }
//
// // hipDeviceSynchronize waits for the kernel to finish, and returns
// // any errors encountered during the launch.
// cudaStatus = hipDeviceSynchronize();
// if (cudaStatus != hipSuccess) {
// fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
// goto Error;
// }
//
// // Copy output vector from GPU buffer to host memory.
// cudaStatus = hipMemcpy(c, dev_c, size * sizeof(int), hipMemcpyDeviceToHost);
// if (cudaStatus != hipSuccess) {
// fprintf(stderr, "hipMemcpy failed!");
// goto Error;
// }
//
//Error:
// hipFree(dev_c);
// hipFree(dev_a);
// hipFree(dev_b);
//
// return cudaStatus;
//}
| f641b1e4a9257b756484175addca4e50e220d10d.cu | //
//#include "cuda_runtime.h"
//#include "device_launch_parameters.h"
//
//#include <stdio.h>
//
//cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size);
//
//__global__ void addKernel(int *c, const int *a, const int *b)
//{
// int i = threadIdx.x;
// c[i] = a[i] + b[i];
//}
//
//int main()
//{
// const int arraySize = 5;
// const int a[arraySize] = { 1, 2, 3, 4, 5 };
// const int b[arraySize] = { 10, 20, 30, 40, 50 };
// int c[arraySize] = { 0 };
//
// // Add vectors in parallel.
// cudaError_t cudaStatus = addWithCuda(c, a, b, arraySize);
// if (cudaStatus != cudaSuccess) {
// fprintf(stderr, "addWithCuda failed!");
// return 1;
// }
//
// printf("{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n",
// c[0], c[1], c[2], c[3], c[4]);
//
// // cudaDeviceReset must be called before exiting in order for profiling and
// // tracing tools such as Nsight and Visual Profiler to show complete traces.
// cudaStatus = cudaDeviceReset();
// if (cudaStatus != cudaSuccess) {
// fprintf(stderr, "cudaDeviceReset failed!");
// return 1;
// }
//
// return 0;
//}
//
//// Helper function for using CUDA to add vectors in parallel.
//cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size)
//{
// int *dev_a = 0;
// int *dev_b = 0;
// int *dev_c = 0;
// cudaError_t cudaStatus;
//
// // Choose which GPU to run on, change this on a multi-GPU system.
// cudaStatus = cudaSetDevice(0);
// if (cudaStatus != cudaSuccess) {
// fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
// goto Error;
// }
//
// // Allocate GPU buffers for three vectors (two input, one output) .
// cudaStatus = cudaMalloc((void**)&dev_c, size * sizeof(int));
// if (cudaStatus != cudaSuccess) {
// fprintf(stderr, "cudaMalloc failed!");
// goto Error;
// }
//
// cudaStatus = cudaMalloc((void**)&dev_a, size * sizeof(int));
// if (cudaStatus != cudaSuccess) {
// fprintf(stderr, "cudaMalloc failed!");
// goto Error;
// }
//
// cudaStatus = cudaMalloc((void**)&dev_b, size * sizeof(int));
// if (cudaStatus != cudaSuccess) {
// fprintf(stderr, "cudaMalloc failed!");
// goto Error;
// }
//
// // Copy input vectors from host memory to GPU buffers.
// cudaStatus = cudaMemcpy(dev_a, a, size * sizeof(int), cudaMemcpyHostToDevice);
// if (cudaStatus != cudaSuccess) {
// fprintf(stderr, "cudaMemcpy failed!");
// goto Error;
// }
//
// cudaStatus = cudaMemcpy(dev_b, b, size * sizeof(int), cudaMemcpyHostToDevice);
// if (cudaStatus != cudaSuccess) {
// fprintf(stderr, "cudaMemcpy failed!");
// goto Error;
// }
//
// // Launch a kernel on the GPU with one thread for each element.
// addKernel<<<1, size>>>(dev_c, dev_a, dev_b);
//
// // Check for any errors launching the kernel
// cudaStatus = cudaGetLastError();
// if (cudaStatus != cudaSuccess) {
// fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
// goto Error;
// }
//
// // cudaDeviceSynchronize waits for the kernel to finish, and returns
// // any errors encountered during the launch.
// cudaStatus = cudaDeviceSynchronize();
// if (cudaStatus != cudaSuccess) {
// fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
// goto Error;
// }
//
// // Copy output vector from GPU buffer to host memory.
// cudaStatus = cudaMemcpy(c, dev_c, size * sizeof(int), cudaMemcpyDeviceToHost);
// if (cudaStatus != cudaSuccess) {
// fprintf(stderr, "cudaMemcpy failed!");
// goto Error;
// }
//
//Error:
// cudaFree(dev_c);
// cudaFree(dev_a);
// cudaFree(dev_b);
//
// return cudaStatus;
//}
|
d7b15c4f3d5b46d4092ada5068cfd9c1c8cbcaa6.hip | // !!! This is a file automatically generated by hipify!!!
/*******************************************************************************
*
* probe a comuter for basic info about processing cores and GPU
*
* compile with:
*
* nvcc probe2.cu -L/usr/local/cuda/lib64 -I/usr/local/cuda-10.2/targets/x86_64-linux/include -lcuda -lcudart
*
* (in .tcshrc, please have:)
* set path = ($path /usr/local/cuda-10.1/bin ./)
* setenv LD_LIBRARY_PATH /usr/local/lib:/usr/local/cuda-10.1/lib64
*
*******************************************************************************/
#include <stdio.h>
#include <thread>
#include "hip/hip_runtime.h"
using namespace std;
/******************************************************************************/
int main(int argc, char *argv[]){
unsigned int numCores = std::thread::hardware_concurrency();
hipError_t err;
err = hipDeviceReset();
hipDeviceProp_t prop;
int count;
err = hipGetDeviceCount(&count);
if(err != hipSuccess){
printf("problem getting device count = %s\n", hipGetErrorString(err));
return 1;
}
printf("number of GPU devices: %d\n\n", count);
printf("*********Num CPU cores on this machine: %d\n*********", numCores);
for (int i = 0; i< count; i++){
printf("************ GPU Device: %d ************\n\n", i);
err = hipGetDeviceProperties(&prop, i);
if(err != hipSuccess){
printf("problem getting device properties = %s\n", hipGetErrorString(err));
return 1;
}
printf("\tName: %s\n", prop.name);
printf("\tTotal global mem: %ld\n", prop.totalGlobalMem );
printf("\tTotal constant Mem: %ld\n", prop.totalConstMem );
printf( "\tMultiprocessor count: %d\n", prop.multiProcessorCount );
printf( "\tShared mem per processor: %ld\n", prop.sharedMemPerBlock );
printf( "\tMax threads per block: %d\n", prop.maxThreadsPerBlock );
printf( "\tMax block dimensions: (%d, %d, %d)\n",
prop.maxThreadsDim[0],
prop.maxThreadsDim[1],
prop.maxThreadsDim[2]);
printf( "\tMax grid dimensions: (%d, %d, %d)\n",
prop.maxGridSize[0],
prop.maxGridSize[1],
prop.maxGridSize[2]);
printf("\n");
}
return 0;
}
/******************************************************************************/
| d7b15c4f3d5b46d4092ada5068cfd9c1c8cbcaa6.cu | /*******************************************************************************
*
* probe a comuter for basic info about processing cores and GPU
*
* compile with:
*
* nvcc probe2.cu -L/usr/local/cuda/lib64 -I/usr/local/cuda-10.2/targets/x86_64-linux/include -lcuda -lcudart
*
* (in .tcshrc, please have:)
* set path = ($path /usr/local/cuda-10.1/bin ./)
* setenv LD_LIBRARY_PATH /usr/local/lib:/usr/local/cuda-10.1/lib64
*
*******************************************************************************/
#include <stdio.h>
#include <thread>
#include "cuda_runtime.h"
using namespace std;
/******************************************************************************/
int main(int argc, char *argv[]){
unsigned int numCores = std::thread::hardware_concurrency();
cudaError_t err;
err = cudaDeviceReset();
cudaDeviceProp prop;
int count;
err = cudaGetDeviceCount(&count);
if(err != cudaSuccess){
printf("problem getting device count = %s\n", cudaGetErrorString(err));
return 1;
}
printf("number of GPU devices: %d\n\n", count);
printf("*********Num CPU cores on this machine: %d\n*********", numCores);
for (int i = 0; i< count; i++){
printf("************ GPU Device: %d ************\n\n", i);
err = cudaGetDeviceProperties(&prop, i);
if(err != cudaSuccess){
printf("problem getting device properties = %s\n", cudaGetErrorString(err));
return 1;
}
printf("\tName: %s\n", prop.name);
printf("\tTotal global mem: %ld\n", prop.totalGlobalMem );
printf("\tTotal constant Mem: %ld\n", prop.totalConstMem );
printf( "\tMultiprocessor count: %d\n", prop.multiProcessorCount );
printf( "\tShared mem per processor: %ld\n", prop.sharedMemPerBlock );
printf( "\tMax threads per block: %d\n", prop.maxThreadsPerBlock );
printf( "\tMax block dimensions: (%d, %d, %d)\n",
prop.maxThreadsDim[0],
prop.maxThreadsDim[1],
prop.maxThreadsDim[2]);
printf( "\tMax grid dimensions: (%d, %d, %d)\n",
prop.maxGridSize[0],
prop.maxGridSize[1],
prop.maxGridSize[2]);
printf("\n");
}
return 0;
}
/******************************************************************************/
|
a6ef425bc8f5d9a13a607c9610eb86926c7a0eff.hip | // !!! This is a file automatically generated by hipify!!!
// cd /home/hork/cuda-workspace/CudaSHA256/Debug/files
// time ~/Dropbox/FIIT/APS/Projekt/CpuSHA256/a.out -f ../file-list
// time ../CudaSHA256 -f ../file-list
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <hip/hip_runtime.h>
#include "sha256.cuh"
#include <dirent.h>
#include <ctype.h>
#include <sys/time.h>
#define FILE_OUTPUT "timing_report.log"
#define N 65536
#define BLOCKSIZE 256
#define M 4294967296/N
void string2ByteArray(char* input, BYTE* output)
{
uint32_t loop;
uint32_t i;
loop = 0;
i = 0;
while(input[loop] != '\0')
{
output[i++] = input[loop++];
}
}
uint32_t LitToBigEndian(uint32_t x)
{
return (((x>>24) & 0x000000ff) | ((x>>8) & 0x0000ff00) | ((x<<8) & 0x00ff0000) | ((x<<24) & 0xff000000));
}
__global__ void sha256_cuda(JOB ** jobs, uint32_t n, uint32_t j, OUT * outs) {
uint32_t index = blockIdx.x * blockDim.x + threadIdx.x;
uint32_t stride = blockDim.x * gridDim.x;
for (uint32_t i = index; i < n; i += stride){
SHA256_CTX ctx;
//sha256_init_23(&ctx);
WORD temp1[8];
WORD temp2[8];
jobs[i]->data2[3] = j*n+i;
sha256_transform_2(&ctx, jobs[i]->data1, temp1);
sha256_transform_1(&ctx, temp1, jobs[i]->data2, temp2);
uint32_t k;
for (k = 0; k < 8; k++)
{
jobs[i]->temp[k] = temp2[k];
}
jobs[i]->temp[8] = 0x80000000;
for (k = 9; k < 14; k++)
{
jobs[i]->temp[k] = 0;
}
jobs[i]->temp[15] = 0x00000100;
sha256_transform_2(&ctx, jobs[i]->temp, jobs[i]->digest);
uint32_t m;
uint32_t Final_Hash[8];
for (m = 0; m < 8; m++)
{
Final_Hash[7-m] = ((jobs[i]->digest[m]>>24) & 0x000000ff) | ((jobs[i]->digest[m]>>8) & 0x0000ff00) | ((jobs[i]->digest[m]<<8) & 0x00ff0000) | ((jobs[i]->digest[m]<<24) & 0xff000000);
}
int valid = 1;
for ( m = 0; m < outs->NUM; m ++){
if(Final_Hash[m] > outs->TARGET[m])
valid = 0;
}
if(valid){
outs->NONCE = jobs[i]->data2[3];
for (m = 0; m < 8; m++)
{
outs->VALID_H[m] = Final_Hash[m];
}
}
}
}
void pre_sha256() {
// compy symbols
checkCudaErrors(hipMemcpyToSymbol(dev_k, host_k, sizeof(host_k), 0, hipMemcpyHostToDevice));
}
void runJobs(JOB ** jobs, uint32_t n, uint32_t j, OUT * outs){
uint32_t blockSize = BLOCKSIZE;
uint32_t numBlocks = (n + blockSize - 1) / blockSize;
hipLaunchKernelGGL(( sha256_cuda) , dim3(numBlocks), dim3(blockSize) , 0, 0, jobs, n, j,outs);
//sha256_cuda <<< 1, 1 >>> (jobs, n, j, outs);
//sha256_cuda <<< 1, 16 >>> (jobs, n);
}
JOB * JOB_init(const WORD data1[], const WORD data2[], const WORD H[]) {
JOB * j;
checkCudaErrors(hipMallocManaged(&j, sizeof(JOB)));
for (uint32_t i = 0; i < 16; i++)
{
j->data1[i] = data1[i];
}
for (uint32_t i = 0; i < 16; i++)
{
j->data2[i] = data2[i];
}
for (uint32_t i = 0; i < 8; i++)
{
j->H[i] = H[i];
}
return j;
}
int main(void)
{
JOB ** jobs;
OUT * outs;
WORD buf1[8];
uint32_t i,j;
FILE* fo = fopen(FILE_OUTPUT, "w+");
////////////////////////////////
//**BitcoinAtom Block Header**//
///////////////////////////////
/*char Version[] = "2000e000";
char Prev_Hash[] = "000000000000000f5edd17eb45ea50489d171d13e5255fe1ee9e49084eeb65ab";
char Merk_Hash[] = "f896a21b7213eb5f1b8ba73b277fba850f6ad4eaf9cfa72a2a1b0986e04cdcd5";
char Time[] = "5F718F4E";
char Target[] = "1928d33c";*/
////////////////////////////////
//**BitcoinCash Block Header**//
///////////////////////////////
/*char Version[] = "20e00000";
char Prev_Hash[] = "00000000000000000150983ec2829d878c4b3c65dbb3b2b91bb68e2d5073314d";
char Merk_Hash[] = "11f642ffaf5fd182bea3c41ce8a635b2b92aa03a7c362171b777a63c5e540f89";
char Time[] = "5F6F4F19";
char Target[] = "1802f9c7";*/
////////////////////////////////
//** BitcoinV Block Header **//
///////////////////////////////
/*char Version[] = "20c00000";
char Prev_Hash[] = "00000000000000071817e9b8a491790be5835daf63933485d41752513047a94e";
char Merk_Hash[] = "bf609e249dd579d2fcc20fc4c15686964bc49fa359d056c595984cf758b2b96d";
char Time[] = "5F760D01";
char Target[] = "190c1d72";*/
////////////////////////////////
//**EmbargoCoin Block Header**//
///////////////////////////////
/*char Version[] = "00000002";
char Prev_Hash[] = "0000061e5616fa75619116059b18facaf5e31f661aab1c3548dd3cb061cc9b05";
char Merk_Hash[] = "46d2deb2ca2340bd17ef5166e24c0475ab1950fc5ef5a90defbe40467ad8afce";
char Time[] = "5F6F4951";
char Target[] = "1e0962d9";*/
////////////////////////////////
//**EmbargoCoin Block Header**//
///////////////////////////////
/*char Version[] = "00000002";
char Prev_Hash[] = "0000061e5616fa75619116059b18facaf5e31f661aab1c3548dd3cb061cc9b05";
char Merk_Hash[] = "46d2deb2ca2340bd17ef5166e24c0475ab1950fc5ef5a90defbe40467ad8afce";
char Time[] = "5F6F4951";
char Target[] = "1e0962d9";*/
////////////////////////////////
//** FreiCoin Block Header **//
///////////////////////////////
/*char Version[] = "20800000";
char Prev_Hash[] = "0000000000000116a9ff19c489f2bdba49c387d7da193015ab3aa6a222150573";
char Merk_Hash[] = "8516eb1f8561b4c954f32bd3f59cae603ba773c6925523b29fad20df9ec84a6d";
char Time[] = "5F6F474B";
char Target[] = "1a01e394";*/
////////////////////////////////
//** JouleCoin Block Header **//
///////////////////////////////
/*char Version[] = "00400004";
char Prev_Hash[] = "000000000000525e9ed757b108c9c593fb35108fb35f03fd087cfbbc2e71cddb";
char Merk_Hash[] = "641a7ffbd1a0479428f1d3f803880a86cc7ed914ec97932d780eb7ef9c69ca1b";
char Time[] = "5F6A3C6F";
char Target[] = "1b00931b";*/
////////////////////////////////
//**Kryptofranc Block Header**//
///////////////////////////////
/*char Version[] = "20000000";
char Prev_Hash[] = "0000000000000196d80d750006472b0786fa607114574330a28bc7afe7ef8e70";
char Merk_Hash[] = "914cfe3a7005c76f808781fafeab874300c514a1a886160e429283906104a3ed";
char Time[] = "5F71CD79";
char Target[] = "1a028a1e";*/
////////////////////////////////
//** ZetaCoin Block Header **//
///////////////////////////////
/*char Version[] = "00000002";
char Prev_Hash[] = "00000000000eb602457fec75d26912c30b8f6740ee26bd53b7a1235dd7847c78";
char Merk_Hash[] = "3d4874f4a1449e13b303dcd0b74eddd47c1f9b5b8edd2d9d0069163ac56f2fbe";
char Time[] = "52166E7B";
char Target[] = "1b176520";*/
////////////////////////////////
//** Bitcoin Block Header **//
///////////////////////////////
char Version[] = "37FFE000";
char Prev_Hash[] = "000000000000000000038973ac554e90636fae2995efa0d1725c00ac4e7dbc35";
char Merk_Hash[] = "1ef117d88223949d22091e9f6aa01e7f614b9c7e7a609c25808b413639151683";
char Time[] = "5F715CF2";
char Target[] = "170E92AA";
////////////Change to Little Endian///////////
fprintf(fo, "----------------------We are trying to mine a Bitcoin block----------------------\n");
fprintf(fo, "*Block header information:\n");
fprintf(fo, " +Version : %s\n", Version);
fprintf(fo, " +Previous Hash : %s\n", Prev_Hash);
fprintf(fo, " +Merkle Hash : %s\n", Merk_Hash);
fprintf(fo, " +Timestemp : %s\n", Time);
fprintf(fo, " +Target : %s\n", Target);
//Version
uint32_t Version_HEX = (uint32_t)strtol(Version, NULL, 16);
uint32_t Version_LitEndian = LitToBigEndian(Version_HEX);
//Previous hash
uint32_t Prev_Hash_Int[8];
char Prev_Hash_temp[8];
uint32_t Prev_Hash_Counter = 0;
uint32_t Prev_Hash_LitEndian[8];
for (i=0;i<8;i++){
for(j=i*8;j<i*8+8;j++){
Prev_Hash_temp[Prev_Hash_Counter] = Prev_Hash[j];
Prev_Hash_Counter++;
}
Prev_Hash_Counter=0;
Prev_Hash_Int[i] = (uint32_t)strtol(Prev_Hash_temp, NULL, 16);
Prev_Hash_LitEndian[7-i] = LitToBigEndian(Prev_Hash_Int[i]);
}
//Merkle hash
uint32_t Merk_Hash_Int[8];
char Merk_Hash_temp[8];
uint32_t Merk_Hash_Counter = 0;
uint32_t Merk_Hash_LitEndian[8];
for (i=0;i<8;i++){
for(j=i*8;j<i*8+8;j++){
Merk_Hash_temp[Merk_Hash_Counter] = Merk_Hash[j];
Merk_Hash_Counter++;
}
Merk_Hash_Counter=0;
Merk_Hash_Int[i] = (uint32_t)strtol(Merk_Hash_temp, NULL, 16);
Merk_Hash_LitEndian[7-i] = LitToBigEndian(Merk_Hash_Int[i]);
}
//Timestamp
uint32_t Time_HEX = (uint32_t)strtol(Time, NULL, 16);
uint32_t Time_LitEndian = LitToBigEndian(Time_HEX);
//Target
uint32_t Target_HEX = (uint32_t)strtol(Target, NULL, 16);
uint32_t Target_LitEndian = LitToBigEndian(Target_HEX);
uint32_t nbit1 = (Target_HEX >> 24)&0x000000ff;
uint32_t nbit2 = (Target_HEX >> 16)&0x000000ff;
uint32_t nbit3 = (Target_HEX >> 8)&0x000000ff;
uint32_t nbit4 = (Target_HEX)&0x000000ff;
uint32_t target_8b[32];
for( i = 0; i < 32; i++){
if(i == (32 - nbit1 + 2)) {
target_8b[i] = nbit4;
}
else if(i == (32 - nbit1 + 1)) {
target_8b[i] = nbit3;
}
else if(i == (32 - nbit1)) {
target_8b[i] = nbit2;
}
else {
target_8b[i] = 0;
}
}
uint32_t Target_32b[8];
for( i = 0; i < 8; i++){
Target_32b[i] = (target_8b[i*4]<<24)|(target_8b[i*4 + 1] << 16)|(target_8b[i*4 + 2] << 8)|(target_8b[i*4 + 3]);
}
int num_int = (32 - nbit1+3) / 4;
checkCudaErrors(hipMallocManaged(&outs, sizeof(OUT)));
outs->NUM = num_int;
for( i = 0; i < 8; i++){
outs->TARGET[i] = Target_32b[i];
}
fprintf(fo, "*Start to mine........\n");
clock_t start, end;
double cpu_time_used;
int GPU_N;
start = clock();
checkCudaErrors(hipGetDeviceCount(&GPU_N));
checkCudaErrors(hipSetDevice(GPU_N-1));
//sha256_transform_0(&ctx1, Word1, buf1);
checkCudaErrors(hipMallocManaged(&jobs, N * sizeof(JOB *)));
for (i=0; i < N; ++i){
WORD Word1[16] = {Version_LitEndian, Prev_Hash_LitEndian[0], Prev_Hash_LitEndian[1], Prev_Hash_LitEndian[2], Prev_Hash_LitEndian[3], Prev_Hash_LitEndian[4], Prev_Hash_LitEndian[5], Prev_Hash_LitEndian[6], Prev_Hash_LitEndian[7], Merk_Hash_LitEndian[0], Merk_Hash_LitEndian[1], Merk_Hash_LitEndian[2], Merk_Hash_LitEndian[3], Merk_Hash_LitEndian[4], Merk_Hash_LitEndian[5], Merk_Hash_LitEndian[6]};
WORD Word2[16] = {Merk_Hash_LitEndian[7], Time_LitEndian, Target_LitEndian, 0x00000000, 0x80000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000280};
jobs[i] = JOB_init(Word1, Word2, buf1);
}
for(j = 0; j <M; ++j){
pre_sha256();
runJobs(jobs, N, j, outs);
}
hipDeviceSynchronize();
end = clock();
cpu_time_used = ((double) (end - start)) / CLOCKS_PER_SEC;
fprintf(fo, "*Execution Time of 2^32 hashes on GPU : %f seconds\n", cpu_time_used);
fprintf(fo, "*A found nonce:%08x\n", outs->NONCE);
fprintf(fo, "*A valid hash: ");
for (i = 0; i < 8; i++)
{
fprintf(fo, "%08x",outs->VALID_H[i]);
}
fprintf(fo, "\n");
hipDeviceReset();
return 0;
}
| a6ef425bc8f5d9a13a607c9610eb86926c7a0eff.cu | // cd /home/hork/cuda-workspace/CudaSHA256/Debug/files
// time ~/Dropbox/FIIT/APS/Projekt/CpuSHA256/a.out -f ../file-list
// time ../CudaSHA256 -f ../file-list
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <cuda.h>
#include "sha256.cuh"
#include <dirent.h>
#include <ctype.h>
#include <sys/time.h>
#define FILE_OUTPUT "timing_report.log"
#define N 65536
#define BLOCKSIZE 256
#define M 4294967296/N
void string2ByteArray(char* input, BYTE* output)
{
uint32_t loop;
uint32_t i;
loop = 0;
i = 0;
while(input[loop] != '\0')
{
output[i++] = input[loop++];
}
}
uint32_t LitToBigEndian(uint32_t x)
{
return (((x>>24) & 0x000000ff) | ((x>>8) & 0x0000ff00) | ((x<<8) & 0x00ff0000) | ((x<<24) & 0xff000000));
}
__global__ void sha256_cuda(JOB ** jobs, uint32_t n, uint32_t j, OUT * outs) {
uint32_t index = blockIdx.x * blockDim.x + threadIdx.x;
uint32_t stride = blockDim.x * gridDim.x;
for (uint32_t i = index; i < n; i += stride){
SHA256_CTX ctx;
//sha256_init_23(&ctx);
WORD temp1[8];
WORD temp2[8];
jobs[i]->data2[3] = j*n+i;
sha256_transform_2(&ctx, jobs[i]->data1, temp1);
sha256_transform_1(&ctx, temp1, jobs[i]->data2, temp2);
uint32_t k;
for (k = 0; k < 8; k++)
{
jobs[i]->temp[k] = temp2[k];
}
jobs[i]->temp[8] = 0x80000000;
for (k = 9; k < 14; k++)
{
jobs[i]->temp[k] = 0;
}
jobs[i]->temp[15] = 0x00000100;
sha256_transform_2(&ctx, jobs[i]->temp, jobs[i]->digest);
uint32_t m;
uint32_t Final_Hash[8];
for (m = 0; m < 8; m++)
{
Final_Hash[7-m] = ((jobs[i]->digest[m]>>24) & 0x000000ff) | ((jobs[i]->digest[m]>>8) & 0x0000ff00) | ((jobs[i]->digest[m]<<8) & 0x00ff0000) | ((jobs[i]->digest[m]<<24) & 0xff000000);
}
int valid = 1;
for ( m = 0; m < outs->NUM; m ++){
if(Final_Hash[m] > outs->TARGET[m])
valid = 0;
}
if(valid){
outs->NONCE = jobs[i]->data2[3];
for (m = 0; m < 8; m++)
{
outs->VALID_H[m] = Final_Hash[m];
}
}
}
}
void pre_sha256() {
// compy symbols
checkCudaErrors(cudaMemcpyToSymbol(dev_k, host_k, sizeof(host_k), 0, cudaMemcpyHostToDevice));
}
void runJobs(JOB ** jobs, uint32_t n, uint32_t j, OUT * outs){
uint32_t blockSize = BLOCKSIZE;
uint32_t numBlocks = (n + blockSize - 1) / blockSize;
sha256_cuda <<< numBlocks, blockSize >>> (jobs, n, j,outs);
//sha256_cuda <<< 1, 1 >>> (jobs, n, j, outs);
//sha256_cuda <<< 1, 16 >>> (jobs, n);
}
JOB * JOB_init(const WORD data1[], const WORD data2[], const WORD H[]) {
JOB * j;
checkCudaErrors(cudaMallocManaged(&j, sizeof(JOB)));
for (uint32_t i = 0; i < 16; i++)
{
j->data1[i] = data1[i];
}
for (uint32_t i = 0; i < 16; i++)
{
j->data2[i] = data2[i];
}
for (uint32_t i = 0; i < 8; i++)
{
j->H[i] = H[i];
}
return j;
}
int main(void)
{
JOB ** jobs;
OUT * outs;
WORD buf1[8];
uint32_t i,j;
FILE* fo = fopen(FILE_OUTPUT, "w+");
////////////////////////////////
//**BitcoinAtom Block Header**//
///////////////////////////////
/*char Version[] = "2000e000";
char Prev_Hash[] = "000000000000000f5edd17eb45ea50489d171d13e5255fe1ee9e49084eeb65ab";
char Merk_Hash[] = "f896a21b7213eb5f1b8ba73b277fba850f6ad4eaf9cfa72a2a1b0986e04cdcd5";
char Time[] = "5F718F4E";
char Target[] = "1928d33c";*/
////////////////////////////////
//**BitcoinCash Block Header**//
///////////////////////////////
/*char Version[] = "20e00000";
char Prev_Hash[] = "00000000000000000150983ec2829d878c4b3c65dbb3b2b91bb68e2d5073314d";
char Merk_Hash[] = "11f642ffaf5fd182bea3c41ce8a635b2b92aa03a7c362171b777a63c5e540f89";
char Time[] = "5F6F4F19";
char Target[] = "1802f9c7";*/
////////////////////////////////
//** BitcoinV Block Header **//
///////////////////////////////
/*char Version[] = "20c00000";
char Prev_Hash[] = "00000000000000071817e9b8a491790be5835daf63933485d41752513047a94e";
char Merk_Hash[] = "bf609e249dd579d2fcc20fc4c15686964bc49fa359d056c595984cf758b2b96d";
char Time[] = "5F760D01";
char Target[] = "190c1d72";*/
////////////////////////////////
//**EmbargoCoin Block Header**//
///////////////////////////////
/*char Version[] = "00000002";
char Prev_Hash[] = "0000061e5616fa75619116059b18facaf5e31f661aab1c3548dd3cb061cc9b05";
char Merk_Hash[] = "46d2deb2ca2340bd17ef5166e24c0475ab1950fc5ef5a90defbe40467ad8afce";
char Time[] = "5F6F4951";
char Target[] = "1e0962d9";*/
////////////////////////////////
//**EmbargoCoin Block Header**//
///////////////////////////////
/*char Version[] = "00000002";
char Prev_Hash[] = "0000061e5616fa75619116059b18facaf5e31f661aab1c3548dd3cb061cc9b05";
char Merk_Hash[] = "46d2deb2ca2340bd17ef5166e24c0475ab1950fc5ef5a90defbe40467ad8afce";
char Time[] = "5F6F4951";
char Target[] = "1e0962d9";*/
////////////////////////////////
//** FreiCoin Block Header **//
///////////////////////////////
/*char Version[] = "20800000";
char Prev_Hash[] = "0000000000000116a9ff19c489f2bdba49c387d7da193015ab3aa6a222150573";
char Merk_Hash[] = "8516eb1f8561b4c954f32bd3f59cae603ba773c6925523b29fad20df9ec84a6d";
char Time[] = "5F6F474B";
char Target[] = "1a01e394";*/
////////////////////////////////
//** JouleCoin Block Header **//
///////////////////////////////
/*char Version[] = "00400004";
char Prev_Hash[] = "000000000000525e9ed757b108c9c593fb35108fb35f03fd087cfbbc2e71cddb";
char Merk_Hash[] = "641a7ffbd1a0479428f1d3f803880a86cc7ed914ec97932d780eb7ef9c69ca1b";
char Time[] = "5F6A3C6F";
char Target[] = "1b00931b";*/
////////////////////////////////
//**Kryptofranc Block Header**//
///////////////////////////////
/*char Version[] = "20000000";
char Prev_Hash[] = "0000000000000196d80d750006472b0786fa607114574330a28bc7afe7ef8e70";
char Merk_Hash[] = "914cfe3a7005c76f808781fafeab874300c514a1a886160e429283906104a3ed";
char Time[] = "5F71CD79";
char Target[] = "1a028a1e";*/
////////////////////////////////
//** ZetaCoin Block Header **//
///////////////////////////////
/*char Version[] = "00000002";
char Prev_Hash[] = "00000000000eb602457fec75d26912c30b8f6740ee26bd53b7a1235dd7847c78";
char Merk_Hash[] = "3d4874f4a1449e13b303dcd0b74eddd47c1f9b5b8edd2d9d0069163ac56f2fbe";
char Time[] = "52166E7B";
char Target[] = "1b176520";*/
////////////////////////////////
//** Bitcoin Block Header **//
///////////////////////////////
char Version[] = "37FFE000";
char Prev_Hash[] = "000000000000000000038973ac554e90636fae2995efa0d1725c00ac4e7dbc35";
char Merk_Hash[] = "1ef117d88223949d22091e9f6aa01e7f614b9c7e7a609c25808b413639151683";
char Time[] = "5F715CF2";
char Target[] = "170E92AA";
////////////Change to Little Endian///////////
fprintf(fo, "----------------------We are trying to mine a Bitcoin block----------------------\n");
fprintf(fo, "*Block header information:\n");
fprintf(fo, " +Version : %s\n", Version);
fprintf(fo, " +Previous Hash : %s\n", Prev_Hash);
fprintf(fo, " +Merkle Hash : %s\n", Merk_Hash);
fprintf(fo, " +Timestemp : %s\n", Time);
fprintf(fo, " +Target : %s\n", Target);
//Version
uint32_t Version_HEX = (uint32_t)strtol(Version, NULL, 16);
uint32_t Version_LitEndian = LitToBigEndian(Version_HEX);
//Previous hash
uint32_t Prev_Hash_Int[8];
char Prev_Hash_temp[8];
uint32_t Prev_Hash_Counter = 0;
uint32_t Prev_Hash_LitEndian[8];
for (i=0;i<8;i++){
for(j=i*8;j<i*8+8;j++){
Prev_Hash_temp[Prev_Hash_Counter] = Prev_Hash[j];
Prev_Hash_Counter++;
}
Prev_Hash_Counter=0;
Prev_Hash_Int[i] = (uint32_t)strtol(Prev_Hash_temp, NULL, 16);
Prev_Hash_LitEndian[7-i] = LitToBigEndian(Prev_Hash_Int[i]);
}
//Merkle hash
uint32_t Merk_Hash_Int[8];
char Merk_Hash_temp[8];
uint32_t Merk_Hash_Counter = 0;
uint32_t Merk_Hash_LitEndian[8];
for (i=0;i<8;i++){
for(j=i*8;j<i*8+8;j++){
Merk_Hash_temp[Merk_Hash_Counter] = Merk_Hash[j];
Merk_Hash_Counter++;
}
Merk_Hash_Counter=0;
Merk_Hash_Int[i] = (uint32_t)strtol(Merk_Hash_temp, NULL, 16);
Merk_Hash_LitEndian[7-i] = LitToBigEndian(Merk_Hash_Int[i]);
}
//Timestamp
uint32_t Time_HEX = (uint32_t)strtol(Time, NULL, 16);
uint32_t Time_LitEndian = LitToBigEndian(Time_HEX);
//Target
uint32_t Target_HEX = (uint32_t)strtol(Target, NULL, 16);
uint32_t Target_LitEndian = LitToBigEndian(Target_HEX);
uint32_t nbit1 = (Target_HEX >> 24)&0x000000ff;
uint32_t nbit2 = (Target_HEX >> 16)&0x000000ff;
uint32_t nbit3 = (Target_HEX >> 8)&0x000000ff;
uint32_t nbit4 = (Target_HEX)&0x000000ff;
uint32_t target_8b[32];
for( i = 0; i < 32; i++){
if(i == (32 - nbit1 + 2)) {
target_8b[i] = nbit4;
}
else if(i == (32 - nbit1 + 1)) {
target_8b[i] = nbit3;
}
else if(i == (32 - nbit1)) {
target_8b[i] = nbit2;
}
else {
target_8b[i] = 0;
}
}
uint32_t Target_32b[8];
for( i = 0; i < 8; i++){
Target_32b[i] = (target_8b[i*4]<<24)|(target_8b[i*4 + 1] << 16)|(target_8b[i*4 + 2] << 8)|(target_8b[i*4 + 3]);
}
int num_int = (32 - nbit1+3) / 4;
checkCudaErrors(cudaMallocManaged(&outs, sizeof(OUT)));
outs->NUM = num_int;
for( i = 0; i < 8; i++){
outs->TARGET[i] = Target_32b[i];
}
fprintf(fo, "*Start to mine........\n");
clock_t start, end;
double cpu_time_used;
int GPU_N;
start = clock();
checkCudaErrors(cudaGetDeviceCount(&GPU_N));
checkCudaErrors(cudaSetDevice(GPU_N-1));
//sha256_transform_0(&ctx1, Word1, buf1);
checkCudaErrors(cudaMallocManaged(&jobs, N * sizeof(JOB *)));
for (i=0; i < N; ++i){
WORD Word1[16] = {Version_LitEndian, Prev_Hash_LitEndian[0], Prev_Hash_LitEndian[1], Prev_Hash_LitEndian[2], Prev_Hash_LitEndian[3], Prev_Hash_LitEndian[4], Prev_Hash_LitEndian[5], Prev_Hash_LitEndian[6], Prev_Hash_LitEndian[7], Merk_Hash_LitEndian[0], Merk_Hash_LitEndian[1], Merk_Hash_LitEndian[2], Merk_Hash_LitEndian[3], Merk_Hash_LitEndian[4], Merk_Hash_LitEndian[5], Merk_Hash_LitEndian[6]};
WORD Word2[16] = {Merk_Hash_LitEndian[7], Time_LitEndian, Target_LitEndian, 0x00000000, 0x80000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000280};
jobs[i] = JOB_init(Word1, Word2, buf1);
}
for(j = 0; j <M; ++j){
pre_sha256();
runJobs(jobs, N, j, outs);
}
cudaDeviceSynchronize();
end = clock();
cpu_time_used = ((double) (end - start)) / CLOCKS_PER_SEC;
fprintf(fo, "*Execution Time of 2^32 hashes on GPU : %f seconds\n", cpu_time_used);
fprintf(fo, "*A found nonce:%08x\n", outs->NONCE);
fprintf(fo, "*A valid hash: ");
for (i = 0; i < 8; i++)
{
fprintf(fo, "%08x",outs->VALID_H[i]);
}
fprintf(fo, "\n");
cudaDeviceReset();
return 0;
}
|
25a8f790f5600c872833b8997706c46358687869.hip | // !!! This is a file automatically generated by hipify!!!
/***************************************************************************************************
* Copyright (c) 2020, Vijay Thakkar ([email protected]).
**************************************************************************************************/
//////////////////////////////////////////////////////////////////////
// THIS BENCHMARK FILE IS GENERATED AUTOMATICALLY : DO NOT MODIFY //
//////////////////////////////////////////////////////////////////////
#include "benchmark/benchmark.h"
#include "cuasr/gemm/device/default_srgemm_configuration.h"
#include "cuasr/gemm/device/srgemm.h"
#include "cuasr/functional.h"
#include "harness.h"
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 1
// Threadblock: 8 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_maximum_plus_dsrgemm_tn_t_8x32x8_8x32x1_2x4_4x8_1x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<8, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_plus_dsrgemm_tn_t_8x32x8_8x32x1_2x4_4x8_1x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 1
// Threadblock: 16 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_maximum_plus_dsrgemm_tn_t_16x32x8_16x32x1_4x4_4x8_1x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_plus_dsrgemm_tn_t_16x32x8_16x32x1_4x4_4x8_1x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 8
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 1
// Threadblock: 16 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_maximum_plus_dsrgemm_tn_t_16x64x8_16x64x1_4x8_4x8_1x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_plus_dsrgemm_tn_t_16x64x8_16x64x1_4x8_4x8_1x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 1
// Threadblock: 32 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 0)
static void BM_SM50_device_maximum_plus_dsrgemm_tn_t_32x32x8_32x32x1_8x4_4x8_1x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_plus_dsrgemm_tn_t_32x32x8_32x32x1_8x4_4x8_1x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 2
// Threadblock: 8 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_plus_dsrgemm_tn_t_8x32x8_8x16x1_2x2_4x8_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<8, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_plus_dsrgemm_tn_t_8x32x8_8x16x1_2x2_4x8_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 2
// Threadblock: 8 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_maximum_plus_dsrgemm_tn_t_8x64x8_8x32x1_2x4_4x8_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<8, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_plus_dsrgemm_tn_t_8x64x8_8x32x1_2x4_4x8_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 2
// Threadblock: 16 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_plus_dsrgemm_tn_t_16x32x8_16x16x1_4x2_4x8_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_plus_dsrgemm_tn_t_16x32x8_16x16x1_4x2_4x8_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 2
// Threadblock: 16 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_plus_dsrgemm_tn_t_16x64x8_16x32x1_4x4_4x8_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_plus_dsrgemm_tn_t_16x64x8_16x32x1_4x4_4x8_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 8
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 2
// Threadblock: 16 x 128 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_maximum_plus_dsrgemm_tn_t_16x128x8_16x64x1_4x8_4x8_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 128, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_plus_dsrgemm_tn_t_16x128x8_16x64x1_4x8_4x8_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 8 x 4
// Warps / Block: 1 x 2
// Threadblock: 32 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_maximum_plus_dsrgemm_tn_t_32x32x8_32x16x1_4x4_8x4_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_plus_dsrgemm_tn_t_32x32x8_32x16x1_4x4_8x4_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 2
// Threadblock: 32 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_maximum_plus_dsrgemm_tn_t_32x64x8_32x32x1_8x4_4x8_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_plus_dsrgemm_tn_t_32x64x8_32x32x1_8x4_4x8_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 1
// Threadblock: 32 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_plus_dsrgemm_tn_t_32x32x8_16x32x1_4x4_4x8_2x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_plus_dsrgemm_tn_t_32x32x8_16x32x1_4x4_4x8_2x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 1
// Threadblock: 64 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_maximum_plus_dsrgemm_tn_t_64x32x8_32x32x1_8x4_4x8_2x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_plus_dsrgemm_tn_t_64x32x8_32x32x1_8x4_4x8_2x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 2
// Threadblock: 16 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_plus_dsrgemm_tn_t_16x32x8_8x16x1_2x2_4x8_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_plus_dsrgemm_tn_t_16x32x8_8x16x1_2x2_4x8_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 2
// Threadblock: 16 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_plus_dsrgemm_tn_t_16x64x8_8x32x1_2x4_4x8_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_plus_dsrgemm_tn_t_16x64x8_8x32x1_2x4_4x8_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 2
// Threadblock: 32 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_plus_dsrgemm_tn_t_32x32x8_16x16x1_4x2_4x8_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_plus_dsrgemm_tn_t_32x32x8_16x16x1_4x2_4x8_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 2
// Threadblock: 32 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_plus_dsrgemm_tn_t_32x64x8_16x32x1_4x4_4x8_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_plus_dsrgemm_tn_t_32x64x8_16x32x1_4x4_4x8_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 8
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 2
// Threadblock: 32 x 128 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_maximum_plus_dsrgemm_tn_t_32x128x8_16x64x1_4x8_4x8_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_plus_dsrgemm_tn_t_32x128x8_16x64x1_4x8_4x8_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 8 x 4
// Warps / Block: 2 x 2
// Threadblock: 64 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_plus_dsrgemm_tn_t_64x32x8_32x16x1_4x4_8x4_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_plus_dsrgemm_tn_t_64x32x8_32x16x1_4x4_8x4_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 2
// Threadblock: 64 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 0)
static void BM_SM50_device_maximum_plus_dsrgemm_tn_t_64x64x8_32x32x1_8x4_4x8_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_plus_dsrgemm_tn_t_64x64x8_32x32x1_8x4_4x8_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 4
// Threads / Warp: 8 x 4
// Warps / Block: 2 x 2
// Threadblock: 128 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_maximum_plus_dsrgemm_tn_t_128x32x8_64x16x1_8x4_8x4_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<64, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_plus_dsrgemm_tn_t_128x32x8_64x16x1_8x4_8x4_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 4
// Threadblock: 16 x 64 x 16
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_plus_dsrgemm_tn_t_16x64x16_8x16x1_2x2_4x8_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 16>;
using WarpShape = cutlass::gemm::GemmShape<8, 16, 16>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_plus_dsrgemm_tn_t_16x64x16_8x16x1_2x2_4x8_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 4
// Threadblock: 16 x 128 x 16
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_plus_dsrgemm_tn_t_16x128x16_8x32x1_2x4_4x8_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 128, 16>;
using WarpShape = cutlass::gemm::GemmShape<8, 32, 16>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_plus_dsrgemm_tn_t_16x128x16_8x32x1_2x4_4x8_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 2
// Threads / Warp: 8 x 4
// Warps / Block: 2 x 4
// Threadblock: 32 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_plus_dsrgemm_tn_t_32x32x8_16x8x1_2x2_8x4_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 8, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_plus_dsrgemm_tn_t_32x32x8_16x8x1_2x2_8x4_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 4
// Threadblock: 32 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_plus_dsrgemm_tn_t_32x64x8_16x16x1_4x2_4x8_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_plus_dsrgemm_tn_t_32x64x8_16x16x1_4x2_4x8_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 4
// Threadblock: 32 x 128 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_plus_dsrgemm_tn_t_32x128x8_16x32x1_4x4_4x8_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_plus_dsrgemm_tn_t_32x128x8_16x32x1_4x4_4x8_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 8 x 4
// Warps / Block: 2 x 4
// Threadblock: 64 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_maximum_plus_dsrgemm_tn_t_64x64x8_32x16x1_4x4_8x4_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_plus_dsrgemm_tn_t_64x64x8_32x16x1_4x4_8x4_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 2
// Threadblock: 32 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_plus_dsrgemm_tn_t_32x32x8_8x16x1_2x2_4x8_4x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_plus_dsrgemm_tn_t_32x32x8_8x16x1_2x2_4x8_4x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 2
// Threadblock: 64 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_plus_dsrgemm_tn_t_64x32x8_16x16x1_4x2_4x8_4x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_plus_dsrgemm_tn_t_64x32x8_16x16x1_4x2_4x8_4x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 2
// Threadblock: 64 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_plus_dsrgemm_tn_t_64x64x8_16x32x1_4x4_4x8_4x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_plus_dsrgemm_tn_t_64x64x8_16x32x1_4x4_4x8_4x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 8 x 4
// Warps / Block: 4 x 2
// Threadblock: 128 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_plus_dsrgemm_tn_t_128x32x8_32x16x1_4x4_8x4_4x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_plus_dsrgemm_tn_t_128x32x8_32x16x1_4x4_8x4_4x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 4
// Threadblock: 32 x 64 x 16
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_plus_dsrgemm_tn_t_32x64x16_8x16x1_2x2_4x8_4x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 16>;
using WarpShape = cutlass::gemm::GemmShape<8, 16, 16>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_plus_dsrgemm_tn_t_32x64x16_8x16x1_2x2_4x8_4x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 4
// Threadblock: 32 x 128 x 16
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_plus_dsrgemm_tn_t_32x128x16_8x32x1_2x4_4x8_4x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 16>;
using WarpShape = cutlass::gemm::GemmShape<8, 32, 16>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_plus_dsrgemm_tn_t_32x128x16_8x32x1_2x4_4x8_4x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 2
// Threads / Warp: 8 x 4
// Warps / Block: 4 x 4
// Threadblock: 64 x 32 x 16
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_plus_dsrgemm_tn_t_64x32x16_16x8x1_2x2_8x4_4x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 16>;
using WarpShape = cutlass::gemm::GemmShape<16, 8, 16>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_plus_dsrgemm_tn_t_64x32x16_16x8x1_2x2_8x4_4x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 4
// Threadblock: 64 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_plus_dsrgemm_tn_t_64x64x8_16x16x1_4x2_4x8_4x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_plus_dsrgemm_tn_t_64x64x8_16x16x1_4x2_4x8_4x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 2
// Threads / Warp: 8 x 4
// Warps / Block: 4 x 4
// Threadblock: 128 x 32 x 16
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_plus_dsrgemm_tn_t_128x32x16_32x8x1_4x2_8x4_4x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 16>;
using WarpShape = cutlass::gemm::GemmShape<32, 8, 16>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_plus_dsrgemm_tn_t_128x32x16_32x8x1_4x2_8x4_4x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
| 25a8f790f5600c872833b8997706c46358687869.cu | /***************************************************************************************************
* Copyright (c) 2020, Vijay Thakkar ([email protected]).
**************************************************************************************************/
//////////////////////////////////////////////////////////////////////
// THIS BENCHMARK FILE IS GENERATED AUTOMATICALLY : DO NOT MODIFY //
//////////////////////////////////////////////////////////////////////
#include "benchmark/benchmark.h"
#include "cuasr/gemm/device/default_srgemm_configuration.h"
#include "cuasr/gemm/device/srgemm.h"
#include "cuasr/functional.h"
#include "harness.h"
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 1
// Threadblock: 8 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_maximum_plus_dsrgemm_tn_t_8x32x8_8x32x1_2x4_4x8_1x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<8, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_plus_dsrgemm_tn_t_8x32x8_8x32x1_2x4_4x8_1x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 1
// Threadblock: 16 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_maximum_plus_dsrgemm_tn_t_16x32x8_16x32x1_4x4_4x8_1x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_plus_dsrgemm_tn_t_16x32x8_16x32x1_4x4_4x8_1x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 8
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 1
// Threadblock: 16 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_maximum_plus_dsrgemm_tn_t_16x64x8_16x64x1_4x8_4x8_1x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_plus_dsrgemm_tn_t_16x64x8_16x64x1_4x8_4x8_1x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 1
// Threadblock: 32 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 0)
static void BM_SM50_device_maximum_plus_dsrgemm_tn_t_32x32x8_32x32x1_8x4_4x8_1x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_plus_dsrgemm_tn_t_32x32x8_32x32x1_8x4_4x8_1x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 2
// Threadblock: 8 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_plus_dsrgemm_tn_t_8x32x8_8x16x1_2x2_4x8_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<8, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_plus_dsrgemm_tn_t_8x32x8_8x16x1_2x2_4x8_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 2
// Threadblock: 8 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_maximum_plus_dsrgemm_tn_t_8x64x8_8x32x1_2x4_4x8_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<8, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_plus_dsrgemm_tn_t_8x64x8_8x32x1_2x4_4x8_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 2
// Threadblock: 16 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_plus_dsrgemm_tn_t_16x32x8_16x16x1_4x2_4x8_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_plus_dsrgemm_tn_t_16x32x8_16x16x1_4x2_4x8_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 2
// Threadblock: 16 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_plus_dsrgemm_tn_t_16x64x8_16x32x1_4x4_4x8_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_plus_dsrgemm_tn_t_16x64x8_16x32x1_4x4_4x8_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 8
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 2
// Threadblock: 16 x 128 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_maximum_plus_dsrgemm_tn_t_16x128x8_16x64x1_4x8_4x8_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 128, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_plus_dsrgemm_tn_t_16x128x8_16x64x1_4x8_4x8_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 8 x 4
// Warps / Block: 1 x 2
// Threadblock: 32 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_maximum_plus_dsrgemm_tn_t_32x32x8_32x16x1_4x4_8x4_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_plus_dsrgemm_tn_t_32x32x8_32x16x1_4x4_8x4_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 2
// Threadblock: 32 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_maximum_plus_dsrgemm_tn_t_32x64x8_32x32x1_8x4_4x8_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_plus_dsrgemm_tn_t_32x64x8_32x32x1_8x4_4x8_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 1
// Threadblock: 32 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_plus_dsrgemm_tn_t_32x32x8_16x32x1_4x4_4x8_2x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_plus_dsrgemm_tn_t_32x32x8_16x32x1_4x4_4x8_2x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 1
// Threadblock: 64 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_maximum_plus_dsrgemm_tn_t_64x32x8_32x32x1_8x4_4x8_2x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_plus_dsrgemm_tn_t_64x32x8_32x32x1_8x4_4x8_2x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 2
// Threadblock: 16 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_plus_dsrgemm_tn_t_16x32x8_8x16x1_2x2_4x8_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_plus_dsrgemm_tn_t_16x32x8_8x16x1_2x2_4x8_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 2
// Threadblock: 16 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_plus_dsrgemm_tn_t_16x64x8_8x32x1_2x4_4x8_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_plus_dsrgemm_tn_t_16x64x8_8x32x1_2x4_4x8_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 2
// Threadblock: 32 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_plus_dsrgemm_tn_t_32x32x8_16x16x1_4x2_4x8_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_plus_dsrgemm_tn_t_32x32x8_16x16x1_4x2_4x8_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 2
// Threadblock: 32 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_plus_dsrgemm_tn_t_32x64x8_16x32x1_4x4_4x8_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_plus_dsrgemm_tn_t_32x64x8_16x32x1_4x4_4x8_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 8
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 2
// Threadblock: 32 x 128 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_maximum_plus_dsrgemm_tn_t_32x128x8_16x64x1_4x8_4x8_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_plus_dsrgemm_tn_t_32x128x8_16x64x1_4x8_4x8_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 8 x 4
// Warps / Block: 2 x 2
// Threadblock: 64 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_plus_dsrgemm_tn_t_64x32x8_32x16x1_4x4_8x4_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_plus_dsrgemm_tn_t_64x32x8_32x16x1_4x4_8x4_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 2
// Threadblock: 64 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 0)
static void BM_SM50_device_maximum_plus_dsrgemm_tn_t_64x64x8_32x32x1_8x4_4x8_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_plus_dsrgemm_tn_t_64x64x8_32x32x1_8x4_4x8_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 4
// Threads / Warp: 8 x 4
// Warps / Block: 2 x 2
// Threadblock: 128 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_maximum_plus_dsrgemm_tn_t_128x32x8_64x16x1_8x4_8x4_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<64, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_plus_dsrgemm_tn_t_128x32x8_64x16x1_8x4_8x4_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 4
// Threadblock: 16 x 64 x 16
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_plus_dsrgemm_tn_t_16x64x16_8x16x1_2x2_4x8_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 16>;
using WarpShape = cutlass::gemm::GemmShape<8, 16, 16>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_plus_dsrgemm_tn_t_16x64x16_8x16x1_2x2_4x8_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 4
// Threadblock: 16 x 128 x 16
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_plus_dsrgemm_tn_t_16x128x16_8x32x1_2x4_4x8_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 128, 16>;
using WarpShape = cutlass::gemm::GemmShape<8, 32, 16>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_plus_dsrgemm_tn_t_16x128x16_8x32x1_2x4_4x8_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 2
// Threads / Warp: 8 x 4
// Warps / Block: 2 x 4
// Threadblock: 32 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_plus_dsrgemm_tn_t_32x32x8_16x8x1_2x2_8x4_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 8, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_plus_dsrgemm_tn_t_32x32x8_16x8x1_2x2_8x4_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 4
// Threadblock: 32 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_plus_dsrgemm_tn_t_32x64x8_16x16x1_4x2_4x8_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_plus_dsrgemm_tn_t_32x64x8_16x16x1_4x2_4x8_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 4
// Threadblock: 32 x 128 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_plus_dsrgemm_tn_t_32x128x8_16x32x1_4x4_4x8_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_plus_dsrgemm_tn_t_32x128x8_16x32x1_4x4_4x8_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 8 x 4
// Warps / Block: 2 x 4
// Threadblock: 64 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_maximum_plus_dsrgemm_tn_t_64x64x8_32x16x1_4x4_8x4_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_plus_dsrgemm_tn_t_64x64x8_32x16x1_4x4_8x4_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 2
// Threadblock: 32 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_plus_dsrgemm_tn_t_32x32x8_8x16x1_2x2_4x8_4x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_plus_dsrgemm_tn_t_32x32x8_8x16x1_2x2_4x8_4x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 2
// Threadblock: 64 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_plus_dsrgemm_tn_t_64x32x8_16x16x1_4x2_4x8_4x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_plus_dsrgemm_tn_t_64x32x8_16x16x1_4x2_4x8_4x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 2
// Threadblock: 64 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_plus_dsrgemm_tn_t_64x64x8_16x32x1_4x4_4x8_4x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_plus_dsrgemm_tn_t_64x64x8_16x32x1_4x4_4x8_4x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 8 x 4
// Warps / Block: 4 x 2
// Threadblock: 128 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_plus_dsrgemm_tn_t_128x32x8_32x16x1_4x4_8x4_4x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_plus_dsrgemm_tn_t_128x32x8_32x16x1_4x4_8x4_4x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 4
// Threadblock: 32 x 64 x 16
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_plus_dsrgemm_tn_t_32x64x16_8x16x1_2x2_4x8_4x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 16>;
using WarpShape = cutlass::gemm::GemmShape<8, 16, 16>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_plus_dsrgemm_tn_t_32x64x16_8x16x1_2x2_4x8_4x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 4
// Threadblock: 32 x 128 x 16
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_plus_dsrgemm_tn_t_32x128x16_8x32x1_2x4_4x8_4x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 16>;
using WarpShape = cutlass::gemm::GemmShape<8, 32, 16>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_plus_dsrgemm_tn_t_32x128x16_8x32x1_2x4_4x8_4x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 2
// Threads / Warp: 8 x 4
// Warps / Block: 4 x 4
// Threadblock: 64 x 32 x 16
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_plus_dsrgemm_tn_t_64x32x16_16x8x1_2x2_8x4_4x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 16>;
using WarpShape = cutlass::gemm::GemmShape<16, 8, 16>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_plus_dsrgemm_tn_t_64x32x16_16x8x1_2x2_8x4_4x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 4
// Threadblock: 64 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_plus_dsrgemm_tn_t_64x64x8_16x16x1_4x2_4x8_4x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_plus_dsrgemm_tn_t_64x64x8_16x16x1_4x2_4x8_4x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 2
// Threads / Warp: 8 x 4
// Warps / Block: 4 x 4
// Threadblock: 128 x 32 x 16
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_plus_dsrgemm_tn_t_128x32x16_32x8x1_4x2_8x4_4x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 16>;
using WarpShape = cutlass::gemm::GemmShape<32, 8, 16>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_plus_dsrgemm_tn_t_128x32x16_32x8x1_4x2_8x4_4x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
|
e1c9d7f5587f8feee1f483ef949b681f37d90c14.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<stdio.h>
#include<cuda.h>
#define row1 2 /* Number of rows of first matrix */
#define col1 3 /* Number of columns of first matrix */
#define row2 3 /* Number of rows of second matrix */
#define col2 2 /* Number of columns of second matrix */
__global__ void matadd(int *l,int *m, int *n)
{
int x=threadIdx.x;
int y=threadIdx.y;
int k;
n[col2*y+x]=0;
for(k=0;k<col1;k++)
{
n[col2*y+x]=n[col2*y+x]+l[col1*y+k]*m[col2*k+x];
}
}
int main()
{
int a[row1][col1];
int b[row2][col2];
int c[row1][col2];
int *d,*e,*f;
int i,j;
printf("\n Enter elements of first matrix of size 2*3\n");
for(i=0;i<row1;i++)
{
for(j=0;j<col1;j++)
{
scanf("%d",&a[i][j]);
}
}
printf("\n Enter elements of second matrix of size 3*2\n");
for(i=0;i<row2;i++)
{
for(j=0;j<col2;j++)
{
scanf("%d",&b[i][j]);
}
}
hipMalloc((void **)&d,row1*col1*sizeof(int));
hipMalloc((void **)&e,row2*col2*sizeof(int));
hipMalloc((void **)&f,row1*col2*sizeof(int));
hipMemcpy(d,a,row1*col1*sizeof(int),hipMemcpyHostToDevice);
hipMemcpy(e,b,row2*col2*sizeof(int),hipMemcpyHostToDevice);
dim3 threadBlock(col2,row1);
/* Here we are defining two dimensional Grid(collection of blocks) structure. Syntax is dim3 grid(no. of columns,no. of rows) */
hipLaunchKernelGGL(( matadd), dim3(1),dim3(threadBlock), 0, 0, d,e,f);
hipMemcpy(c,f,row1*col2*sizeof(int),hipMemcpyDeviceToHost);
printf("\nProduct of two matrices:\n ");
for(i=0;i<row1;i++)
{
for(j=0;j<col2;j++)
{
printf("%d\t",c[i][j]);
}
printf("\n");
}
hipFree(d);
hipFree(e);
hipFree(f);
return 0;
}
| e1c9d7f5587f8feee1f483ef949b681f37d90c14.cu | #include<stdio.h>
#include<cuda.h>
#define row1 2 /* Number of rows of first matrix */
#define col1 3 /* Number of columns of first matrix */
#define row2 3 /* Number of rows of second matrix */
#define col2 2 /* Number of columns of second matrix */
__global__ void matadd(int *l,int *m, int *n)
{
int x=threadIdx.x;
int y=threadIdx.y;
int k;
n[col2*y+x]=0;
for(k=0;k<col1;k++)
{
n[col2*y+x]=n[col2*y+x]+l[col1*y+k]*m[col2*k+x];
}
}
int main()
{
int a[row1][col1];
int b[row2][col2];
int c[row1][col2];
int *d,*e,*f;
int i,j;
printf("\n Enter elements of first matrix of size 2*3\n");
for(i=0;i<row1;i++)
{
for(j=0;j<col1;j++)
{
scanf("%d",&a[i][j]);
}
}
printf("\n Enter elements of second matrix of size 3*2\n");
for(i=0;i<row2;i++)
{
for(j=0;j<col2;j++)
{
scanf("%d",&b[i][j]);
}
}
cudaMalloc((void **)&d,row1*col1*sizeof(int));
cudaMalloc((void **)&e,row2*col2*sizeof(int));
cudaMalloc((void **)&f,row1*col2*sizeof(int));
cudaMemcpy(d,a,row1*col1*sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(e,b,row2*col2*sizeof(int),cudaMemcpyHostToDevice);
dim3 threadBlock(col2,row1);
/* Here we are defining two dimensional Grid(collection of blocks) structure. Syntax is dim3 grid(no. of columns,no. of rows) */
matadd<<<1,threadBlock>>>(d,e,f);
cudaMemcpy(c,f,row1*col2*sizeof(int),cudaMemcpyDeviceToHost);
printf("\nProduct of two matrices:\n ");
for(i=0;i<row1;i++)
{
for(j=0;j<col2;j++)
{
printf("%d\t",c[i][j]);
}
printf("\n");
}
cudaFree(d);
cudaFree(e);
cudaFree(f);
return 0;
}
|
c43a79a7a13be4ed7a3a554bb5b4a8fa5b82652d.hip | // !!! This is a file automatically generated by hipify!!!
#include "stdafx.h"
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#ifndef __HIPCC__
#define __HIPCC__
#endif
#include <hip/device_functions.h>
#include <hip/hip_runtime_api.h>
#include "index_.h"
#include "object.cuh"
#include "message_processor.cuh"
#include "now.h"
#include "cuda_stream_controller.h"
__global__ void xsfl_msg_knl(int n, MessageC* A, MessageC* T) {
int id = threadIdx.x + blockIdx.x * blockDim.x;
int bundle_id = id / MessageBucket::kRhoB;
MessageC cached_messages[5];
for (int i = id * MessageBucket::kRhoB; i < MessageBucket::kRhoB; ++i) {
MessageC m = A[i];
for (int j = 4; j >= 0; --j) {
for (int k = 0; k < 5; ++k) {
if (cached_messages[k].oid == m.oid) {
if (cached_messages[k].t < m.t) {
cached_messages[k] = m;
}
break;
}
if (cached_messages[k].oid == 0) {
cached_messages[k] = m;
break;
}
}
m.oid = __shfl_xor_sync(0xFFFFFFFF, m.oid, 1 << j);
m.eid = __shfl_xor_sync(0xFFFFFFFF, m.eid, 1 << j);
m.rho = __shfl_xor_sync(0xFFFFFFFF, m.rho, 1 << j);
m.t = __shfl_xor_sync(0xFFFFFFFF, m.t, 1 << j);
}
for (int j = 0; j < 4; ++j) {
T[m.oid * n + bundle_id] = m;
}
}
}
__global__ void clct_knl(int n, int* o_num, MessageC* R, MessageC* T) {
int id = threadIdx.x + blockIdx.x * blockDim.x;
int bundle_id = id / MessageBucket::kRhoB;
MessageC m;
for (int i = id * n; i < id * n + n; ++i) {
if (T[i].oid == id && T[i].t > m.t) {
m = T[i];
}
if (m.oid != 0) {
for (int j = m.cid * G_Grid::kMaxObjectsPerCell; j < m.cid * G_Grid::kMaxObjectsPerCell + G_Grid::kMaxObjectsPerCell; ++j) {
if (R[j].oid == 0) R[j] = m;
}
}
}
}
MessageBucket* MessageLists::lists_[G_Grid::kCellNum];
void MessageLists::MessageCleaning(std::vector<int> lists, int message_out_num, MessageC* messages) {
int now = Now::now();
int total = 0;
int n_to_clean = std::accumulate(lists.begin(), lists.end(), 0,
[&total, now](int t, int i) -> int {
auto pm = lists_[i];
while (pm != nullptr && (now - pm->t < kTimeDelta)) {
//TODO delete if obsolete
total++;
pm = pm->p;
}
return t + total;
});
MessageC *h_buckets, *d_buckets, *d_T, *d_R, *h_R;
int n_message_out, *d_m;
hipHostMalloc(&h_buckets, sizeof(MessageC) * MessageBucket::kRhoB * n_to_clean);
hipMalloc(&d_buckets, MessageBucket::kRhoB * n_to_clean);
hipMalloc(&d_m, sizeof(int));
hipMalloc(&d_T, sizeof(MessageC) * Objects::kTotalObjectNum * n_to_clean);
hipMalloc(&d_R, sizeof(MessageC) * G_Grid::kCellNum * G_Grid::kMaxObjectsPerCell);
hipHostMalloc(&h_R, sizeof(MessageC) * G_Grid::kCellNum * G_Grid::kMaxObjectsPerCell);
int sum = 0;
for (auto i = lists.begin(); i < lists.end(); ++i) {
(*(reinterpret_cast<MessageBucket*>(h_buckets) + sum++)) = *lists_[*i];
}
hipMemcpy(d_buckets, h_buckets, sizeof(MessageC) * MessageBucket::kRhoB * n_to_clean, hipMemcpyHostToDevice);
hipSetDevice(0);
dim3 block(128);
dim3 grid(n_to_clean / 128);
auto ustream_st = CudaStreamControler::getStream();
xsfl_msg_knl << <grid, block, 0, ustream_st>> >(n_to_clean, d_buckets, d_T);
clct_knl << <dim3(Objects::kTotalObjectNum / 128), block, 0, ustream_st>> >(n_to_clean, d_m, d_R, d_T);
hipDeviceSynchronize();
hipMemcpy(d_R, h_R, sizeof(MessageC) * MessageBucket::kRhoB * n_to_clean, hipMemcpyDeviceToHost);
hipMemcpy(d_m, &n_message_out, sizeof(int), hipMemcpyDeviceToHost);
hipFree(d_buckets);
hipFree(d_m);
hipFree(d_T);
hipFree(d_R);
hipHostFree(h_buckets);
hipHostFree(h_R);
}
| c43a79a7a13be4ed7a3a554bb5b4a8fa5b82652d.cu | #include "stdafx.h"
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#ifndef __CUDACC__
#define __CUDACC__
#endif
#include <device_functions.h>
#include <cuda_runtime_api.h>
#include "index_.h"
#include "object.cuh"
#include "message_processor.cuh"
#include "now.h"
#include "cuda_stream_controller.h"
__global__ void xsfl_msg_knl(int n, MessageC* A, MessageC* T) {
int id = threadIdx.x + blockIdx.x * blockDim.x;
int bundle_id = id / MessageBucket::kRhoB;
MessageC cached_messages[5];
for (int i = id * MessageBucket::kRhoB; i < MessageBucket::kRhoB; ++i) {
MessageC m = A[i];
for (int j = 4; j >= 0; --j) {
for (int k = 0; k < 5; ++k) {
if (cached_messages[k].oid == m.oid) {
if (cached_messages[k].t < m.t) {
cached_messages[k] = m;
}
break;
}
if (cached_messages[k].oid == 0) {
cached_messages[k] = m;
break;
}
}
m.oid = __shfl_xor_sync(0xFFFFFFFF, m.oid, 1 << j);
m.eid = __shfl_xor_sync(0xFFFFFFFF, m.eid, 1 << j);
m.rho = __shfl_xor_sync(0xFFFFFFFF, m.rho, 1 << j);
m.t = __shfl_xor_sync(0xFFFFFFFF, m.t, 1 << j);
}
for (int j = 0; j < 4; ++j) {
T[m.oid * n + bundle_id] = m;
}
}
}
__global__ void clct_knl(int n, int* o_num, MessageC* R, MessageC* T) {
int id = threadIdx.x + blockIdx.x * blockDim.x;
int bundle_id = id / MessageBucket::kRhoB;
MessageC m;
for (int i = id * n; i < id * n + n; ++i) {
if (T[i].oid == id && T[i].t > m.t) {
m = T[i];
}
if (m.oid != 0) {
for (int j = m.cid * G_Grid::kMaxObjectsPerCell; j < m.cid * G_Grid::kMaxObjectsPerCell + G_Grid::kMaxObjectsPerCell; ++j) {
if (R[j].oid == 0) R[j] = m;
}
}
}
}
MessageBucket* MessageLists::lists_[G_Grid::kCellNum];
void MessageLists::MessageCleaning(std::vector<int> lists, int message_out_num, MessageC* messages) {
int now = Now::now();
int total = 0;
int n_to_clean = std::accumulate(lists.begin(), lists.end(), 0,
[&total, now](int t, int i) -> int {
auto pm = lists_[i];
while (pm != nullptr && (now - pm->t < kTimeDelta)) {
//TODO delete if obsolete
total++;
pm = pm->p;
}
return t + total;
});
MessageC *h_buckets, *d_buckets, *d_T, *d_R, *h_R;
int n_message_out, *d_m;
cudaMallocHost(&h_buckets, sizeof(MessageC) * MessageBucket::kRhoB * n_to_clean);
cudaMalloc(&d_buckets, MessageBucket::kRhoB * n_to_clean);
cudaMalloc(&d_m, sizeof(int));
cudaMalloc(&d_T, sizeof(MessageC) * Objects::kTotalObjectNum * n_to_clean);
cudaMalloc(&d_R, sizeof(MessageC) * G_Grid::kCellNum * G_Grid::kMaxObjectsPerCell);
cudaMallocHost(&h_R, sizeof(MessageC) * G_Grid::kCellNum * G_Grid::kMaxObjectsPerCell);
int sum = 0;
for (auto i = lists.begin(); i < lists.end(); ++i) {
(*(reinterpret_cast<MessageBucket*>(h_buckets) + sum++)) = *lists_[*i];
}
cudaMemcpy(d_buckets, h_buckets, sizeof(MessageC) * MessageBucket::kRhoB * n_to_clean, cudaMemcpyHostToDevice);
cudaSetDevice(0);
dim3 block(128);
dim3 grid(n_to_clean / 128);
auto ustream_st = CudaStreamControler::getStream();
xsfl_msg_knl << <grid, block, 0, ustream_st>> >(n_to_clean, d_buckets, d_T);
clct_knl << <dim3(Objects::kTotalObjectNum / 128), block, 0, ustream_st>> >(n_to_clean, d_m, d_R, d_T);
cudaDeviceSynchronize();
cudaMemcpy(d_R, h_R, sizeof(MessageC) * MessageBucket::kRhoB * n_to_clean, cudaMemcpyDeviceToHost);
cudaMemcpy(d_m, &n_message_out, sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(d_buckets);
cudaFree(d_m);
cudaFree(d_T);
cudaFree(d_R);
cudaFreeHost(h_buckets);
cudaFreeHost(h_R);
}
|
87d31e00ddd66d9fb491769d5011f041964db160.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
# include "option.h"
# include "performance.h"
# include <pnl/pnl_mathtools.h>
#include "performance.cuh"
/*!
* \file performance.cpp
* \brief option performance
*/
Performance :: Performance() : Option() {
Coeff_ = pnl_vect_new();
}
Performance :: Performance(Parser & pars) : Option(pars){
Coeff_ = pnl_vect_copy(pars.getVect("payoff coefficients"));
Coeff_gpu = (float*)malloc(size_*sizeof(float));
for (int i = 0; i < size_; i++)
Coeff_gpu[i] = GET(Coeff_, i);
}
Performance :: ~Performance(){
}
PnlVect* Performance :: get_Coeff(){
return Coeff_;
}
void Performance :: set_Coeff(PnlVect *Coeff) {
Coeff_ = Coeff;
}
double Performance :: payoff (const PnlMat *path) {
double sum = 0.0;
double temp_num;
double temp_deno;
//Numerateur: vecteur contenant la somme des d actifs au temps t_i
//Denominateur: vecteur contenant la somme des d actifs au temps t_{i-1}
PnlVect* numerateur = pnl_vect_create(size_);
PnlVect* denominateur = pnl_vect_create(size_);
for (int i=1; i<TimeSteps_+1; i++){
//On met les d actif au temps t_i dans numerateur
//et ceux au temps t_{i-1} dans denominateur
pnl_mat_get_col(numerateur, path, i);
pnl_mat_get_col(denominateur, path, i-1);
temp_num = pnl_vect_scalar_prod(numerateur, Coeff_);
temp_deno = pnl_vect_scalar_prod(denominateur, Coeff_);
sum = sum + temp_num/temp_deno;
}
sum = sum/(double)(TimeSteps_) - 1;
pnl_vect_free(&numerateur);
pnl_vect_free(&denominateur);
return 1+MIN(MAX(sum,0), 0.1);
}
void Performance::priceMC(
dim3 dimGrid,
dim3 dimBlock,
double &prix,
double &ic,
int N,
int samples,
float* d_path)
{
//Compute price
float* d_per_block_results_price;
hipMalloc((float**)&d_per_block_results_price, (dimGrid.x)*sizeof(float));
float* d_per_block_results_ic;
hipMalloc((float**)&d_per_block_results_ic, (dimGrid.x)*sizeof(float));
float* d_coeff;
hipMalloc((float**)&d_coeff, size_*sizeof(float));
hipMemcpy(d_coeff, Coeff_gpu, size_*sizeof(float), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( mc_performance), dim3(dimGrid), dim3(dimBlock), 2*(dimBlock.x)*sizeof(float), 0, N, size_, samples, d_coeff, d_path, d_per_block_results_price, d_per_block_results_ic);
hipDeviceSynchronize();
float* per_block_results_price = (float*)malloc((dimGrid.x)*sizeof(float));
hipMemcpy(per_block_results_price, d_per_block_results_price, (dimGrid.x)*sizeof(float), hipMemcpyDeviceToHost);
float* per_block_results_ic = (float*)malloc((dimGrid.x)*sizeof(float));
hipMemcpy(per_block_results_ic, d_per_block_results_ic, (dimGrid.x)*sizeof(float), hipMemcpyDeviceToHost);
prix = 0.;
ic = 0.;
for (int i = 0; i < dimGrid.x; i++){
prix += per_block_results_price[i];
ic += per_block_results_ic[i];
}
hipFree(d_per_block_results_price);
hipFree(d_per_block_results_ic);
} | 87d31e00ddd66d9fb491769d5011f041964db160.cu | # include "option.h"
# include "performance.h"
# include <pnl/pnl_mathtools.h>
#include "performance.cuh"
/*!
* \file performance.cpp
* \brief option performance
*/
Performance :: Performance() : Option() {
Coeff_ = pnl_vect_new();
}
Performance :: Performance(Parser & pars) : Option(pars){
Coeff_ = pnl_vect_copy(pars.getVect("payoff coefficients"));
Coeff_gpu = (float*)malloc(size_*sizeof(float));
for (int i = 0; i < size_; i++)
Coeff_gpu[i] = GET(Coeff_, i);
}
Performance :: ~Performance(){
}
PnlVect* Performance :: get_Coeff(){
return Coeff_;
}
void Performance :: set_Coeff(PnlVect *Coeff) {
Coeff_ = Coeff;
}
double Performance :: payoff (const PnlMat *path) {
double sum = 0.0;
double temp_num;
double temp_deno;
//Numerateur: vecteur contenant la somme des d actifs au temps t_i
//Denominateur: vecteur contenant la somme des d actifs au temps t_{i-1}
PnlVect* numerateur = pnl_vect_create(size_);
PnlVect* denominateur = pnl_vect_create(size_);
for (int i=1; i<TimeSteps_+1; i++){
//On met les d actif au temps t_i dans numerateur
//et ceux au temps t_{i-1} dans denominateur
pnl_mat_get_col(numerateur, path, i);
pnl_mat_get_col(denominateur, path, i-1);
temp_num = pnl_vect_scalar_prod(numerateur, Coeff_);
temp_deno = pnl_vect_scalar_prod(denominateur, Coeff_);
sum = sum + temp_num/temp_deno;
}
sum = sum/(double)(TimeSteps_) - 1;
pnl_vect_free(&numerateur);
pnl_vect_free(&denominateur);
return 1+MIN(MAX(sum,0), 0.1);
}
void Performance::priceMC(
dim3 dimGrid,
dim3 dimBlock,
double &prix,
double &ic,
int N,
int samples,
float* d_path)
{
//Compute price
float* d_per_block_results_price;
cudaMalloc((float**)&d_per_block_results_price, (dimGrid.x)*sizeof(float));
float* d_per_block_results_ic;
cudaMalloc((float**)&d_per_block_results_ic, (dimGrid.x)*sizeof(float));
float* d_coeff;
cudaMalloc((float**)&d_coeff, size_*sizeof(float));
cudaMemcpy(d_coeff, Coeff_gpu, size_*sizeof(float), cudaMemcpyHostToDevice);
mc_performance<<<dimGrid, dimBlock, 2*(dimBlock.x)*sizeof(float)>>>(N, size_, samples, d_coeff, d_path, d_per_block_results_price, d_per_block_results_ic);
cudaThreadSynchronize();
float* per_block_results_price = (float*)malloc((dimGrid.x)*sizeof(float));
cudaMemcpy(per_block_results_price, d_per_block_results_price, (dimGrid.x)*sizeof(float), cudaMemcpyDeviceToHost);
float* per_block_results_ic = (float*)malloc((dimGrid.x)*sizeof(float));
cudaMemcpy(per_block_results_ic, d_per_block_results_ic, (dimGrid.x)*sizeof(float), cudaMemcpyDeviceToHost);
prix = 0.;
ic = 0.;
for (int i = 0; i < dimGrid.x; i++){
prix += per_block_results_price[i];
ic += per_block_results_ic[i];
}
cudaFree(d_per_block_results_price);
cudaFree(d_per_block_results_ic);
} |
9da7d1a1665f9889c7fb39ba98f0568830cba3b6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Exhibits a bank conflict.
// Gklee and Gkleep both detect this.
//#include <cstdio>
#define N 32
__global__ void bc(char* __restrict__ in, char* __restrict__ out)
{
__shared__ int smem[512];
int tid = threadIdx.x;
smem[tid*2]=in[tid];
__syncthreads();
smem[tid*4]=in[tid];
__syncthreads();
smem[tid*8]=in[tid];
__syncthreads();
int x = smem[tid * 2]; // 2-way bank conflicts
int y = smem[tid * 4]; // 4-way bank conflicts
int z = smem[tid * 8]; // 8-way bank conflicts
int m = max(max(x,y),z);
out[tid] = m;
}
/*
int main()
{
char* in = (char*) malloc(N*sizeof(char));
for(int i = 0; i < N; i++)
in[i] = i;
char* din, * dout;
hipMalloc((void**) &din, N*sizeof(char));
hipMalloc((void**) &dout, N*sizeof(char));
hipMemcpy(din, in, N*sizeof(char), hipMemcpyHostToDevice);
bc<<<1,N>>>(din,dout);
hipMemcpy(in, dout, N*sizeof(char), hipMemcpyDeviceToHost);
for(int i = 0; i < N; i++)
printf("%d ", in[i]);
printf("\n");
free(in); hipFree(din); hipFree(dout);
}
*/
| 9da7d1a1665f9889c7fb39ba98f0568830cba3b6.cu | // Exhibits a bank conflict.
// Gklee and Gkleep both detect this.
//#include <cstdio>
#define N 32
__global__ void bc(char* __restrict__ in, char* __restrict__ out)
{
__shared__ int smem[512];
int tid = threadIdx.x;
smem[tid*2]=in[tid];
__syncthreads();
smem[tid*4]=in[tid];
__syncthreads();
smem[tid*8]=in[tid];
__syncthreads();
int x = smem[tid * 2]; // 2-way bank conflicts
int y = smem[tid * 4]; // 4-way bank conflicts
int z = smem[tid * 8]; // 8-way bank conflicts
int m = max(max(x,y),z);
out[tid] = m;
}
/*
int main()
{
char* in = (char*) malloc(N*sizeof(char));
for(int i = 0; i < N; i++)
in[i] = i;
char* din, * dout;
cudaMalloc((void**) &din, N*sizeof(char));
cudaMalloc((void**) &dout, N*sizeof(char));
cudaMemcpy(din, in, N*sizeof(char), cudaMemcpyHostToDevice);
bc<<<1,N>>>(din,dout);
cudaMemcpy(in, dout, N*sizeof(char), cudaMemcpyDeviceToHost);
for(int i = 0; i < N; i++)
printf("%d ", in[i]);
printf("\n");
free(in); cudaFree(din); cudaFree(dout);
}
*/
|
e56228f1f5ba1663faad14b1bba3288b5f958872.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
__device__ void partition_by_bit(unsigned int *values, unsigned int bit);
__global__ void radix_sort(unsigned int *values)
{
int bit;
for( bit = 0; bit < 32; ++bit )
{
partition_by_bit(values, bit);
__syncthreads();
}
}
__device__ int plus_scan(unsigned int *x)
{
unsigned int i = threadIdx.x; // id of thread executing this instance
unsigned int n = blockDim.x; // total number of threads in this block
unsigned int offset; // distance between elements to be added
for( offset = 1; offset < n; offset *= 2) {
unsigned int t;
if ( i >= offset )
t = x[i-offset];
__syncthreads();
if ( i >= offset )
x[i] = t + x[i]; // i.e., x[i] = x[i] + x[i-1]
__syncthreads();
}
return x[i];
}
__device__ void partition_by_bit(unsigned int *values, unsigned int bit)
{
unsigned int i = threadIdx.x;
unsigned int size = blockDim.x;
unsigned int x_i = values[i]; // value of integer at position i
unsigned int p_i = (x_i >> bit) & 1; // value of bit at position bit
values[i] = p_i;
__syncthreads();
unsigned int T_before = plus_scan(values);
unsigned int T_total = values[size-1];
unsigned int F_total = size - T_total;
__syncthreads();
if ( p_i )
values[T_before-1 + F_total] = x_i;
else
values[i - T_before] = x_i;
}
int main ()
{
unsigned int a[1000];
int size = 1000;
srand(time(NULL));
for (int i = 0; i < 1000; i++)
{
a[i] = rand ()%1000;
}
unsigned int *dev_a;
hipMalloc(&dev_a, size * sizeof(unsigned int));
hipMemcpy( dev_a, a, size * sizeof(unsigned int), hipMemcpyHostToDevice);hipLaunchKernelGGL((
radix_sort), dim3(1),dim3(size), 0, 0, dev_a);
hipMemcpy( a, dev_a, size * sizeof(unsigned int), hipMemcpyDeviceToHost );
for (int i = 0; i < 1000; i++)
{
printf("%u ", a[i]);
}
printf ("\n");
}
| e56228f1f5ba1663faad14b1bba3288b5f958872.cu | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
__device__ void partition_by_bit(unsigned int *values, unsigned int bit);
__global__ void radix_sort(unsigned int *values)
{
int bit;
for( bit = 0; bit < 32; ++bit )
{
partition_by_bit(values, bit);
__syncthreads();
}
}
__device__ int plus_scan(unsigned int *x)
{
unsigned int i = threadIdx.x; // id of thread executing this instance
unsigned int n = blockDim.x; // total number of threads in this block
unsigned int offset; // distance between elements to be added
for( offset = 1; offset < n; offset *= 2) {
unsigned int t;
if ( i >= offset )
t = x[i-offset];
__syncthreads();
if ( i >= offset )
x[i] = t + x[i]; // i.e., x[i] = x[i] + x[i-1]
__syncthreads();
}
return x[i];
}
__device__ void partition_by_bit(unsigned int *values, unsigned int bit)
{
unsigned int i = threadIdx.x;
unsigned int size = blockDim.x;
unsigned int x_i = values[i]; // value of integer at position i
unsigned int p_i = (x_i >> bit) & 1; // value of bit at position bit
values[i] = p_i;
__syncthreads();
unsigned int T_before = plus_scan(values);
unsigned int T_total = values[size-1];
unsigned int F_total = size - T_total;
__syncthreads();
if ( p_i )
values[T_before-1 + F_total] = x_i;
else
values[i - T_before] = x_i;
}
int main ()
{
unsigned int a[1000];
int size = 1000;
srand(time(NULL));
for (int i = 0; i < 1000; i++)
{
a[i] = rand ()%1000;
}
unsigned int *dev_a;
cudaMalloc(&dev_a, size * sizeof(unsigned int));
cudaMemcpy( dev_a, a, size * sizeof(unsigned int), cudaMemcpyHostToDevice);
radix_sort<<<1,size>>>(dev_a);
cudaMemcpy( a, dev_a, size * sizeof(unsigned int), cudaMemcpyDeviceToHost );
for (int i = 0; i < 1000; i++)
{
printf("%u ", a[i]);
}
printf ("\n");
}
|
ae2e80167c7ee1d3ba1127ac7f09f6e9b2d3874b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* University of Pittsburgh
* Department of Computer Science
* CS1645: Introduction to HPC Systems
* Instructor: Xiaolong Cui
* This is a skeleton for implementing prefix sum using GPU, inspired
* by nvidia course of similar name.
*/
#include <stdio.h>
#include <math.h>
#include <string.h>
#define N 512
/*
* You should implement the parallel scan function here!
*/
__global__ void parallel_scan(float *g_odata, float *g_idata, int n) {
g_odata[threadIdx.x] = 0.0;
extern __shared__ float temp[];
int thread_id = threadIdx.x;
int p_out = 0, p_in = 1;
temp[p_out * n + thread_id] = (thread_id > 0) ? g_idata[thread_id - 1] : 0;
__syncthreads();
for(int i = 1; i < n; i *= 2) {
p_out = 1 - p_out;
p_in = 1 - p_out;
if(thread_id >= i){
temp[p_out * n + thread_id] = temp[p_in * n + thread_id - i] + temp[p_in * n + thread_id];
}
else{
temp[p_out* n + thread_id] = temp[p_in * n + thread_id];
}
__syncthreads();
}
g_odata[thread_id] = temp[p_out * n + thread_id];
}
/*
* Fills an array a with n random floats.
*/
void random_floats(float* a, int n) {
float d;
// Comment out this line if you want consistent "random".
srand(time(NULL));
for (int i = 0; i < n; ++i) {
d = rand() % 8;
a[i] = ((rand() % 64) / (d > 0 ? d : 1));
}
}
/*
* Simple Serial implementation of exclusive scan.
*/
void serial_scan(float* out, float* in, int n) {
float total_sum = 0;
out[0] = 0;
for (int i = 1; i < n; i++) {
total_sum += in[i-1];
out[i] = out[i-1] + in[i-1];
}
if (total_sum != out[n-1]) {
printf("Warning: exceeding accuracy of float.\n");
}
}
/*
* This is a simple function that confirms that the output of the scan
* function matches that of a golden image (array).
*/
bool printError(float *gold_out, float *test_out, bool show_all) {
bool firstFail = true;
bool error = false;
float epislon = 0.1;
float diff = 0.0;
for (int i = 0; i < N; ++i) {
diff = abs(gold_out[i] - test_out[i]);
if ((diff > epislon) && firstFail) {
printf("ERROR: gold_out[%d] = %f != test_out[%d] = %f // diff = %f \n", i, gold_out[i], i, test_out[i], diff);
firstFail = show_all;
error = true;
}
}
return error;
}
int main(void) {
float *in, *out, *gold_out; // host
float *devIn, *devOut;
int size = sizeof(float) * N;
hipMalloc((void **)&devIn, size);
hipMalloc((void **)&devOut, size);
in = (float *)malloc(size);
random_floats(in, N);
out = (float *)malloc(size);
gold_out = (float *)malloc(size);
// ***********
// RUN SERIAL SCAN
// ***********
serial_scan(gold_out, in, N);
// ***********
// RUN PARALLEL SCAN
// ***********
hipLaunchKernelGGL(( parallel_scan), dim3(1), dim3(512), N * 2 * sizeof(float), 0, devOut, devIn, N);
hipDeviceSynchronize();
hipMemcpy(out, devOut, size, hipMemcpyDeviceToHost);
if (printError(gold_out, out, false)) {
printf("ERROR: The parallel scan function failed to produce proper output.\n");
} else {
printf("CONGRATS: The parallel scan function produced proper output.\n");
}
return 0;
}
| ae2e80167c7ee1d3ba1127ac7f09f6e9b2d3874b.cu | /**
* University of Pittsburgh
* Department of Computer Science
* CS1645: Introduction to HPC Systems
* Instructor: Xiaolong Cui
* This is a skeleton for implementing prefix sum using GPU, inspired
* by nvidia course of similar name.
*/
#include <stdio.h>
#include <math.h>
#include <string.h>
#define N 512
/*
* You should implement the parallel scan function here!
*/
__global__ void parallel_scan(float *g_odata, float *g_idata, int n) {
g_odata[threadIdx.x] = 0.0;
extern __shared__ float temp[];
int thread_id = threadIdx.x;
int p_out = 0, p_in = 1;
temp[p_out * n + thread_id] = (thread_id > 0) ? g_idata[thread_id - 1] : 0;
__syncthreads();
for(int i = 1; i < n; i *= 2) {
p_out = 1 - p_out;
p_in = 1 - p_out;
if(thread_id >= i){
temp[p_out * n + thread_id] = temp[p_in * n + thread_id - i] + temp[p_in * n + thread_id];
}
else{
temp[p_out* n + thread_id] = temp[p_in * n + thread_id];
}
__syncthreads();
}
g_odata[thread_id] = temp[p_out * n + thread_id];
}
/*
* Fills an array a with n random floats.
*/
void random_floats(float* a, int n) {
float d;
// Comment out this line if you want consistent "random".
srand(time(NULL));
for (int i = 0; i < n; ++i) {
d = rand() % 8;
a[i] = ((rand() % 64) / (d > 0 ? d : 1));
}
}
/*
* Simple Serial implementation of exclusive scan.
*/
void serial_scan(float* out, float* in, int n) {
float total_sum = 0;
out[0] = 0;
for (int i = 1; i < n; i++) {
total_sum += in[i-1];
out[i] = out[i-1] + in[i-1];
}
if (total_sum != out[n-1]) {
printf("Warning: exceeding accuracy of float.\n");
}
}
/*
* This is a simple function that confirms that the output of the scan
* function matches that of a golden image (array).
*/
bool printError(float *gold_out, float *test_out, bool show_all) {
bool firstFail = true;
bool error = false;
float epislon = 0.1;
float diff = 0.0;
for (int i = 0; i < N; ++i) {
diff = abs(gold_out[i] - test_out[i]);
if ((diff > epislon) && firstFail) {
printf("ERROR: gold_out[%d] = %f != test_out[%d] = %f // diff = %f \n", i, gold_out[i], i, test_out[i], diff);
firstFail = show_all;
error = true;
}
}
return error;
}
int main(void) {
float *in, *out, *gold_out; // host
float *devIn, *devOut;
int size = sizeof(float) * N;
cudaMalloc((void **)&devIn, size);
cudaMalloc((void **)&devOut, size);
in = (float *)malloc(size);
random_floats(in, N);
out = (float *)malloc(size);
gold_out = (float *)malloc(size);
// ***********
// RUN SERIAL SCAN
// ***********
serial_scan(gold_out, in, N);
// ***********
// RUN PARALLEL SCAN
// ***********
parallel_scan<<< 1, 512, N * 2 * sizeof(float)>>>(devOut, devIn, N);
cudaDeviceSynchronize();
cudaMemcpy(out, devOut, size, cudaMemcpyDeviceToHost);
if (printError(gold_out, out, false)) {
printf("ERROR: The parallel scan function failed to produce proper output.\n");
} else {
printf("CONGRATS: The parallel scan function produced proper output.\n");
}
return 0;
}
|
3a5a680df8adacdd1e0ec14d955893d32edb20f3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// Created by ss on 16-11-15.
//
#include <sys/time.h>
#include "deviceHessianOnFly.h"
#include "../constant.h"
__global__ void RBFKernel(const float_point *aSelfDot, float_point bSelfDot, float_point *dotProduct, int numOfSamples,
float gamma) {
const int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < numOfSamples) {
dotProduct[idx] = expf(-(aSelfDot[idx] + bSelfDot - dotProduct[idx] * 2) * gamma);
}
}
void DeviceHessianOnFly::ReadRow(int nPosofRowAtHessian, float_point *devHessianRow, int start, int end) {
const int numOfSamples = end - start;
const int *csrRowPtr = csrMat.getCSRRowPtr();
const int numOfFeatures = csrMat.getNumOfFeatures();
const int nnzA = csrRowPtr[end] - csrRowPtr[start];
const int *devARowPtr;
if (numOfSamples == csrMat.getNumOfSamples()) {
//for binary case
devARowPtr = devRowPtr;
} else {
//for multi-class case
devARowPtr = devRowPtrSplit + start;
if (start != 0)
devARowPtr++;
}
const float_point *devAVal = devVal + csrRowPtr[start];
const int *devAColInd = devColInd + csrRowPtr[start];
const int nnzB = csrRowPtr[nPosofRowAtHessian + 1] - csrRowPtr[nPosofRowAtHessian];
const float_point *devBVal = devVal + csrRowPtr[nPosofRowAtHessian];
const int *devBColInd = devColInd + csrRowPtr[nPosofRowAtHessian];
// float_point *devDenseVector;
// checkCudaErrors(hipMalloc((void **) &devDenseVector, sizeof(float_point) * numOfFeatures));
checkCudaErrors(hipMemset(devDenseVector, 0, sizeof(float_point) * numOfFeatures));
cusparseSsctr(handle, nnzB, devBVal, devBColInd, devDenseVector, HIPSPARSE_INDEX_BASE_ZERO);
// checkCudaErrors(hipMemset(devHessianRow,0,sizeof(float_point) * numOfSamples));
hipsparseScsrmm(handle, HIPSPARSE_OPERATION_NON_TRANSPOSE,
numOfSamples, 1, numOfFeatures,
nnzA, &one, descr, devAVal, devARowPtr, devAColInd,
devDenseVector, numOfFeatures, &zero,
devHessianRow, numOfSamples);
RBFKernel << < Ceil(numOfSamples, BLOCK_SIZE), BLOCK_SIZE >> >
(devValSelfDot +
start, csrMat.csrValSelfDot[nPosofRowAtHessian], devHessianRow, numOfSamples, gamma);
// float_point *hrow = new float_point[numOfSamples];
// checkCudaErrors(
// hipMemcpy(hrow, devHessianRow, sizeof(float_point) * numOfSamples, hipMemcpyDeviceToHost));
// RBFKernelFunction function(gamma);
// float_point *hostKernel = new float_point[problem.getNumOfSamples()];
// float_point totalErr = 0;
// vector<vector<svm_node> > s = problem.v_vSamples;
// function.ComputeSparseRow(s,nPosofRowAtHessian,1,hostKernel);
// for (int i = 0; i < problem.getNumOfSamples(); ++i) {
// float_point err = fabs(hostKernel[i] - hrow[i]);
// totalErr +=err;
// printf("row %d, col %d, host %f, device %f,err %f\n",nPosofRowAtHessian, i, hostKernel[i],hrow[i],err);
// }
// printf("compute row %d, total err %f\n",nPosofRowAtHessian,totalErr);
// memcpy(devHessianRow,hostKernel,sizeof(float_point) * numOfSamples);
// delete[] hostKernel;
// checkCudaErrors(hipFree(devDenseVector));
}
bool DeviceHessianOnFly::PrecomputeHessian(const string &strHessianMatrixFileName, const string &strDiagHessianFileName,
vector<vector<float_point> > &v_v_DocVector) {
return true;
}
bool DeviceHessianOnFly::GetHessianDiag(const string &strFileName, const int &nNumofTraingSamples,
float_point *pfHessianDiag) {
for (int i = 0; i < nNumofTraingSamples; ++i) {
pfHessianDiag[i] = 1;
}
return true;
}
DeviceHessianOnFly::DeviceHessianOnFly(const SvmProblem &subProblem, float_point gamma) :
gamma(gamma), zero(0.0f), one(1.0f),
csrMat(subProblem.v_vSamples, subProblem.getNumOfFeatures()) {
hipsparseCreate(&handle);
hipsparseCreateMatDescr(&descr);
hipsparseSetMatIndexBase(descr, HIPSPARSE_INDEX_BASE_ZERO);
hipsparseSetMatType(descr, HIPSPARSE_MATRIX_TYPE_GENERAL);
checkCudaErrors(hipMalloc((void **) &devVal, sizeof(float_point) * csrMat.getNnz()));
checkCudaErrors(hipMalloc((void **) &devValSelfDot, sizeof(float_point) * csrMat.getNumOfSamples()));
checkCudaErrors(hipMalloc((void **) &devRowPtr, sizeof(int) * (csrMat.getNumOfSamples() + 1)));
checkCudaErrors(hipMalloc((void **) &devRowPtrSplit, sizeof(int) * (csrMat.getNumOfSamples() + 2)));
checkCudaErrors(hipMalloc((void **) &devColInd, sizeof(int) * (csrMat.getNnz())));
checkCudaErrors(hipMemcpy(devVal, csrMat.getCSRVal(), sizeof(float_point) * csrMat.getNnz(),
hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(devValSelfDot, csrMat.getCSRValSelfDot(),
sizeof(float_point) * subProblem.v_vSamples.size(), hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(devRowPtr, csrMat.getCSRRowPtr(), sizeof(int) * (subProblem.v_vSamples.size() + 1),
hipMemcpyHostToDevice));
//nnz for samples with label +1
int nnzA = csrMat.csrRowPtr[subProblem.count[0]];
csrRowPtrSplit = vector<int>(csrMat.csrRowPtr.begin(), csrMat.csrRowPtr.begin() + subProblem.count[0] + 1);
for (int i = 0; i <= subProblem.count[1]; ++i) {
csrRowPtrSplit.push_back(csrMat.csrRowPtr[subProblem.count[0] + i] - nnzA);
}
checkCudaErrors(hipMemcpy(devRowPtrSplit, csrRowPtrSplit.data(), sizeof(int) * (subProblem.v_vSamples.size() + 2),
hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(devColInd, csrMat.getCSRColInd(), sizeof(int) * (csrMat.getNnz()),
hipMemcpyHostToDevice));
checkCudaErrors(hipMalloc((void **) &devDenseVector, sizeof(float_point) * subProblem.getNumOfFeatures()));
}
DeviceHessianOnFly::~DeviceHessianOnFly() {
checkCudaErrors(hipFree(devVal));
checkCudaErrors(hipFree(devValSelfDot));
checkCudaErrors(hipFree(devRowPtr));
checkCudaErrors(hipFree(devRowPtrSplit));
checkCudaErrors(hipFree(devColInd));
checkCudaErrors(hipFree(devDenseVector));
hipsparseDestroyMatDescr(descr);
hipsparseDestroy(handle);
}
| 3a5a680df8adacdd1e0ec14d955893d32edb20f3.cu | //
// Created by ss on 16-11-15.
//
#include <sys/time.h>
#include "deviceHessianOnFly.h"
#include "../constant.h"
__global__ void RBFKernel(const float_point *aSelfDot, float_point bSelfDot, float_point *dotProduct, int numOfSamples,
float gamma) {
const int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < numOfSamples) {
dotProduct[idx] = expf(-(aSelfDot[idx] + bSelfDot - dotProduct[idx] * 2) * gamma);
}
}
void DeviceHessianOnFly::ReadRow(int nPosofRowAtHessian, float_point *devHessianRow, int start, int end) {
const int numOfSamples = end - start;
const int *csrRowPtr = csrMat.getCSRRowPtr();
const int numOfFeatures = csrMat.getNumOfFeatures();
const int nnzA = csrRowPtr[end] - csrRowPtr[start];
const int *devARowPtr;
if (numOfSamples == csrMat.getNumOfSamples()) {
//for binary case
devARowPtr = devRowPtr;
} else {
//for multi-class case
devARowPtr = devRowPtrSplit + start;
if (start != 0)
devARowPtr++;
}
const float_point *devAVal = devVal + csrRowPtr[start];
const int *devAColInd = devColInd + csrRowPtr[start];
const int nnzB = csrRowPtr[nPosofRowAtHessian + 1] - csrRowPtr[nPosofRowAtHessian];
const float_point *devBVal = devVal + csrRowPtr[nPosofRowAtHessian];
const int *devBColInd = devColInd + csrRowPtr[nPosofRowAtHessian];
// float_point *devDenseVector;
// checkCudaErrors(cudaMalloc((void **) &devDenseVector, sizeof(float_point) * numOfFeatures));
checkCudaErrors(cudaMemset(devDenseVector, 0, sizeof(float_point) * numOfFeatures));
cusparseSsctr(handle, nnzB, devBVal, devBColInd, devDenseVector, CUSPARSE_INDEX_BASE_ZERO);
// checkCudaErrors(cudaMemset(devHessianRow,0,sizeof(float_point) * numOfSamples));
cusparseScsrmm(handle, CUSPARSE_OPERATION_NON_TRANSPOSE,
numOfSamples, 1, numOfFeatures,
nnzA, &one, descr, devAVal, devARowPtr, devAColInd,
devDenseVector, numOfFeatures, &zero,
devHessianRow, numOfSamples);
RBFKernel << < Ceil(numOfSamples, BLOCK_SIZE), BLOCK_SIZE >> >
(devValSelfDot +
start, csrMat.csrValSelfDot[nPosofRowAtHessian], devHessianRow, numOfSamples, gamma);
// float_point *hrow = new float_point[numOfSamples];
// checkCudaErrors(
// cudaMemcpy(hrow, devHessianRow, sizeof(float_point) * numOfSamples, cudaMemcpyDeviceToHost));
// RBFKernelFunction function(gamma);
// float_point *hostKernel = new float_point[problem.getNumOfSamples()];
// float_point totalErr = 0;
// vector<vector<svm_node> > s = problem.v_vSamples;
// function.ComputeSparseRow(s,nPosofRowAtHessian,1,hostKernel);
// for (int i = 0; i < problem.getNumOfSamples(); ++i) {
// float_point err = fabs(hostKernel[i] - hrow[i]);
// totalErr +=err;
// printf("row %d, col %d, host %f, device %f,err %f\n",nPosofRowAtHessian, i, hostKernel[i],hrow[i],err);
// }
// printf("compute row %d, total err %f\n",nPosofRowAtHessian,totalErr);
// memcpy(devHessianRow,hostKernel,sizeof(float_point) * numOfSamples);
// delete[] hostKernel;
// checkCudaErrors(cudaFree(devDenseVector));
}
bool DeviceHessianOnFly::PrecomputeHessian(const string &strHessianMatrixFileName, const string &strDiagHessianFileName,
vector<vector<float_point> > &v_v_DocVector) {
return true;
}
bool DeviceHessianOnFly::GetHessianDiag(const string &strFileName, const int &nNumofTraingSamples,
float_point *pfHessianDiag) {
for (int i = 0; i < nNumofTraingSamples; ++i) {
pfHessianDiag[i] = 1;
}
return true;
}
DeviceHessianOnFly::DeviceHessianOnFly(const SvmProblem &subProblem, float_point gamma) :
gamma(gamma), zero(0.0f), one(1.0f),
csrMat(subProblem.v_vSamples, subProblem.getNumOfFeatures()) {
cusparseCreate(&handle);
cusparseCreateMatDescr(&descr);
cusparseSetMatIndexBase(descr, CUSPARSE_INDEX_BASE_ZERO);
cusparseSetMatType(descr, CUSPARSE_MATRIX_TYPE_GENERAL);
checkCudaErrors(cudaMalloc((void **) &devVal, sizeof(float_point) * csrMat.getNnz()));
checkCudaErrors(cudaMalloc((void **) &devValSelfDot, sizeof(float_point) * csrMat.getNumOfSamples()));
checkCudaErrors(cudaMalloc((void **) &devRowPtr, sizeof(int) * (csrMat.getNumOfSamples() + 1)));
checkCudaErrors(cudaMalloc((void **) &devRowPtrSplit, sizeof(int) * (csrMat.getNumOfSamples() + 2)));
checkCudaErrors(cudaMalloc((void **) &devColInd, sizeof(int) * (csrMat.getNnz())));
checkCudaErrors(cudaMemcpy(devVal, csrMat.getCSRVal(), sizeof(float_point) * csrMat.getNnz(),
cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(devValSelfDot, csrMat.getCSRValSelfDot(),
sizeof(float_point) * subProblem.v_vSamples.size(), cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(devRowPtr, csrMat.getCSRRowPtr(), sizeof(int) * (subProblem.v_vSamples.size() + 1),
cudaMemcpyHostToDevice));
//nnz for samples with label +1
int nnzA = csrMat.csrRowPtr[subProblem.count[0]];
csrRowPtrSplit = vector<int>(csrMat.csrRowPtr.begin(), csrMat.csrRowPtr.begin() + subProblem.count[0] + 1);
for (int i = 0; i <= subProblem.count[1]; ++i) {
csrRowPtrSplit.push_back(csrMat.csrRowPtr[subProblem.count[0] + i] - nnzA);
}
checkCudaErrors(cudaMemcpy(devRowPtrSplit, csrRowPtrSplit.data(), sizeof(int) * (subProblem.v_vSamples.size() + 2),
cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(devColInd, csrMat.getCSRColInd(), sizeof(int) * (csrMat.getNnz()),
cudaMemcpyHostToDevice));
checkCudaErrors(cudaMalloc((void **) &devDenseVector, sizeof(float_point) * subProblem.getNumOfFeatures()));
}
DeviceHessianOnFly::~DeviceHessianOnFly() {
checkCudaErrors(cudaFree(devVal));
checkCudaErrors(cudaFree(devValSelfDot));
checkCudaErrors(cudaFree(devRowPtr));
checkCudaErrors(cudaFree(devRowPtrSplit));
checkCudaErrors(cudaFree(devColInd));
checkCudaErrors(cudaFree(devDenseVector));
cusparseDestroyMatDescr(descr);
cusparseDestroy(handle);
}
|
6b233e78f49356b180cba4329a5296f4dcae4e21.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Copyright (c) 2016-present, Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/* Modifications Copyright (c) Microsoft. */
#include "core/providers/cuda/cu_inc/common.cuh"
#include "orttraining/training_ops/cuda/nn/dropout_grad_impl.h"
#include <hiprand/hiprand_kernel.h>
#include <algorithm>
namespace onnxruntime {
namespace cuda {
template <typename T, int NumThreadsPerBlock, int NumElementsPerThread>
__global__ void DropoutGradientKernel(
const int64_t N,
const T* dY_data,
const bool* mask_data,
const float scale,
T* dX_data) {
CUDA_LONG id = NumElementsPerThread * NumThreadsPerBlock * blockIdx.x + threadIdx.x;
#pragma unroll
for (int i = 0; i < NumElementsPerThread; i++) {
if (id < N) {
dX_data[id] = T(float(dY_data[id]) * mask_data[id] * scale);
id += NumThreadsPerBlock;
}
}
}
template <typename T>
void DropoutGradientKernelImpl(
hipStream_t stream,
const int64_t N,
const T* dY_data,
const bool* mask_data,
const float ratio,
T* dX_data) {
if (ratio == 0.0f) {
if (dY_data != dX_data) {
CUDA_CALL_THROW(hipMemcpyAsync(dX_data, dY_data, N * sizeof(T), hipMemcpyDeviceToDevice, stream));
}
} else {
const float scale = 1.f / (1.f - ratio);
const int blocksPerGrid = static_cast<int>(CeilDiv(N, GridDim::maxThreadsPerBlock * GridDim::maxElementsPerThread));
hipLaunchKernelGGL(( DropoutGradientKernel<T, GridDim::maxThreadsPerBlock, GridDim::maxElementsPerThread>)
, dim3(blocksPerGrid), dim3(GridDim::maxThreadsPerBlock), 0, stream, N, dY_data, mask_data, scale, dX_data);
}
}
#define SPECIALIZED_DROPOUT_GRAD_IMPL(T) \
template void DropoutGradientKernelImpl( \
hipStream_t stream, \
const int64_t N, \
const T* dY_data, \
const bool* mask_data, \
const float scale, \
T* dX_data);
SPECIALIZED_DROPOUT_GRAD_IMPL(float)
SPECIALIZED_DROPOUT_GRAD_IMPL(double)
SPECIALIZED_DROPOUT_GRAD_IMPL(half)
#if TORCH_HIP_VERSION >= 11000 && (__CUDA_ARCH__ >= 800 || !defined(__CUDA_ARCH__))
SPECIALIZED_DROPOUT_GRAD_IMPL(nv_bfloat16)
#endif
} // namespace cuda
} // namespace onnxruntime
| 6b233e78f49356b180cba4329a5296f4dcae4e21.cu | /**
* Copyright (c) 2016-present, Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/* Modifications Copyright (c) Microsoft. */
#include "core/providers/cuda/cu_inc/common.cuh"
#include "orttraining/training_ops/cuda/nn/dropout_grad_impl.h"
#include <curand_kernel.h>
#include <algorithm>
namespace onnxruntime {
namespace cuda {
template <typename T, int NumThreadsPerBlock, int NumElementsPerThread>
__global__ void DropoutGradientKernel(
const int64_t N,
const T* dY_data,
const bool* mask_data,
const float scale,
T* dX_data) {
CUDA_LONG id = NumElementsPerThread * NumThreadsPerBlock * blockIdx.x + threadIdx.x;
#pragma unroll
for (int i = 0; i < NumElementsPerThread; i++) {
if (id < N) {
dX_data[id] = T(float(dY_data[id]) * mask_data[id] * scale);
id += NumThreadsPerBlock;
}
}
}
template <typename T>
void DropoutGradientKernelImpl(
cudaStream_t stream,
const int64_t N,
const T* dY_data,
const bool* mask_data,
const float ratio,
T* dX_data) {
if (ratio == 0.0f) {
if (dY_data != dX_data) {
CUDA_CALL_THROW(cudaMemcpyAsync(dX_data, dY_data, N * sizeof(T), cudaMemcpyDeviceToDevice, stream));
}
} else {
const float scale = 1.f / (1.f - ratio);
const int blocksPerGrid = static_cast<int>(CeilDiv(N, GridDim::maxThreadsPerBlock * GridDim::maxElementsPerThread));
DropoutGradientKernel<T, GridDim::maxThreadsPerBlock, GridDim::maxElementsPerThread>
<<<blocksPerGrid, GridDim::maxThreadsPerBlock, 0, stream>>>(N, dY_data, mask_data, scale, dX_data);
}
}
#define SPECIALIZED_DROPOUT_GRAD_IMPL(T) \
template void DropoutGradientKernelImpl( \
cudaStream_t stream, \
const int64_t N, \
const T* dY_data, \
const bool* mask_data, \
const float scale, \
T* dX_data);
SPECIALIZED_DROPOUT_GRAD_IMPL(float)
SPECIALIZED_DROPOUT_GRAD_IMPL(double)
SPECIALIZED_DROPOUT_GRAD_IMPL(half)
#if CUDA_VERSION >= 11000 && (__CUDA_ARCH__ >= 800 || !defined(__CUDA_ARCH__))
SPECIALIZED_DROPOUT_GRAD_IMPL(nv_bfloat16)
#endif
} // namespace cuda
} // namespace onnxruntime
|
4d21fe2dcacfb96154fe0bd7e1aa02dca40e66c0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Homework 1
// Color to Greyscale Conversion
//A common way to represent color images is known as RGBA - the color
//is specified by how much Red, Green, and Blue is in it.
//The 'A' stands for Alpha and is used for transparency; it will be
//ignored in this homework.
//Each channel Red, Blue, Green, and Alpha is represented by one byte.
//Since we are using one byte for each color there are 256 different
//possible values for each color. This means we use 4 bytes per pixel.
//Greyscale images are represented by a single intensity value per pixel
//which is one byte in size.
//To convert an image from color to grayscale one simple method is to
//set the intensity to the average of the RGB channels. But we will
//use a more sophisticated method that takes into account how the eye
//perceives color and weights the channels unequally.
//The eye responds most strongly to green followed by red and then blue.
//The NTSC (National Television System Committee) recommends the following
//formula for color to greyscale conversion:
//I = .299f * R + .587f * G + .114f * B
//Notice the trailing f's on the numbers which indicate that they are
//single precision floating point constants and not double precision
//constants.
//You should fill in the kernel as well as set the block and grid sizes
//so that the entire image is processed.
#include "reference_calc.cpp"
#include "utils.h"
#include <stdio.h>
__global__
void rgba_to_greyscale(const uchar4* const rgbaImage,
unsigned char* const greyImage,
int numRows, int numCols)
{
//TODO
//Fill in the kernel to convert from color to greyscale
//the mapping from components of a uchar4 to RGBA is:
// .x -> R ; .y -> G ; .z -> B ; .w -> A
//
//The output (greyImage) at each pixel should be the result of
//applying the formula: output = .299f * R + .587f * G + .114f * B;
//Note: We will be ignoring the alpha channel for this conversion
//First create a mapping from the 2D block and grid locations
//to an absolute 2D location in the image, then use that to
//calculate a 1D offset
int x_index = threadIdx.x + blockIdx.x*blockDim.x;
int y_index = threadIdx.y + blockIdx.y*blockDim.y;
int index = x_index*numCols + y_index;
uchar4 rgba = rgbaImage[index];
float channelSum = .299f * rgba.x + .587f * rgba.y + .114f * rgba.z;
greyImage[index] = channelSum;
}
void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage,
unsigned char* const d_greyImage, size_t numRows, size_t numCols)
{
//You must fill in the correct sizes for the blockSize and gridSize
//currently only one block with one thread is being launched
int blockSideLen = 8;
const dim3 blockSize(blockSideLen, blockSideLen, 1); //TODO
const dim3 gridSize(numRows / blockSideLen + 1, numCols / blockSideLen + 1, 1); //TODO
rgba_to_greyscale << <gridSize, blockSize >> >(d_rgbaImage, d_greyImage, numRows, numCols);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
} | 4d21fe2dcacfb96154fe0bd7e1aa02dca40e66c0.cu | // Homework 1
// Color to Greyscale Conversion
//A common way to represent color images is known as RGBA - the color
//is specified by how much Red, Green, and Blue is in it.
//The 'A' stands for Alpha and is used for transparency; it will be
//ignored in this homework.
//Each channel Red, Blue, Green, and Alpha is represented by one byte.
//Since we are using one byte for each color there are 256 different
//possible values for each color. This means we use 4 bytes per pixel.
//Greyscale images are represented by a single intensity value per pixel
//which is one byte in size.
//To convert an image from color to grayscale one simple method is to
//set the intensity to the average of the RGB channels. But we will
//use a more sophisticated method that takes into account how the eye
//perceives color and weights the channels unequally.
//The eye responds most strongly to green followed by red and then blue.
//The NTSC (National Television System Committee) recommends the following
//formula for color to greyscale conversion:
//I = .299f * R + .587f * G + .114f * B
//Notice the trailing f's on the numbers which indicate that they are
//single precision floating point constants and not double precision
//constants.
//You should fill in the kernel as well as set the block and grid sizes
//so that the entire image is processed.
#include "reference_calc.cpp"
#include "utils.h"
#include <stdio.h>
__global__
void rgba_to_greyscale(const uchar4* const rgbaImage,
unsigned char* const greyImage,
int numRows, int numCols)
{
//TODO
//Fill in the kernel to convert from color to greyscale
//the mapping from components of a uchar4 to RGBA is:
// .x -> R ; .y -> G ; .z -> B ; .w -> A
//
//The output (greyImage) at each pixel should be the result of
//applying the formula: output = .299f * R + .587f * G + .114f * B;
//Note: We will be ignoring the alpha channel for this conversion
//First create a mapping from the 2D block and grid locations
//to an absolute 2D location in the image, then use that to
//calculate a 1D offset
int x_index = threadIdx.x + blockIdx.x*blockDim.x;
int y_index = threadIdx.y + blockIdx.y*blockDim.y;
int index = x_index*numCols + y_index;
uchar4 rgba = rgbaImage[index];
float channelSum = .299f * rgba.x + .587f * rgba.y + .114f * rgba.z;
greyImage[index] = channelSum;
}
void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage,
unsigned char* const d_greyImage, size_t numRows, size_t numCols)
{
//You must fill in the correct sizes for the blockSize and gridSize
//currently only one block with one thread is being launched
int blockSideLen = 8;
const dim3 blockSize(blockSideLen, blockSideLen, 1); //TODO
const dim3 gridSize(numRows / blockSideLen + 1, numCols / blockSideLen + 1, 1); //TODO
rgba_to_greyscale << <gridSize, blockSize >> >(d_rgbaImage, d_greyImage, numRows, numCols);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
} |
a23f9d2f410185fec720ec6751a975f2caa4012d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Self-Organizing Maps on a cluster
* Copyright (C) 2013 Peter Wittek
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*
*/
#undef _GLIBCXX_ATOMIC_BUILTINS
#undef _GLIBCXX_USE_INT128
#include <iostream>
#include <sstream>
#include <map>
#include <vector>
#include <stdio.h>
#include <rocblas.h>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/reduce.h>
#include <thrust/functional.h>
#include "somoclu.h"
#ifdef _WIN32
#define popen _popen
#define pclose _pclose
#endif
// Error handling macro
#define CUDA_CHECK(call) \
if((call) != hipSuccess) { \
hipError_t err = hipGetLastError(); \
stringstream sstm; \
sstm << "CUDA error calling \""#call"\", code is " << err; \
cuda_abort(sstm.str()); }
//Globals
hipblasHandle_t handle;
thrust::device_vector<float> deviceData;
thrust::device_vector<float> deviceDataNorms;
thrust::device_vector<float> deviceCodebook;
thrust::device_vector<float> deviceCodebookNorms;
// convert a linear index to a row index
template <typename T>
struct linear_index_to_row_index : public thrust::unary_function<T, T> {
T C; // number of columns
__host__ __device__
linear_index_to_row_index(T C) : C(C) {}
__host__ __device__
T operator()(T i) {
return i / C;
}
};
// note: functor inherits from unary_function
template <typename T>
struct square : public thrust::unary_function<T, T> {
__host__ __device__
T operator()(T x) const {
return x * x;
}
};
typedef thrust::tuple<int, float> argMinType;
struct argMin : public thrust::binary_function<argMinType, argMinType, argMinType> {
__host__ __device__
argMinType operator()(const argMinType& a, const argMinType& b) const {
if (thrust::get<1>(a) < thrust::get<1>(b)) {
return a;
}
else {
return b;
}
}
};
template <typename T>
thrust::device_vector<T> normsOfRowSpace(thrust::device_vector<T> A, int nRows, int nColumns) {
// allocate storage for row sums and indices
thrust::device_vector<T> row_sums(nRows);
thrust::device_vector<int> row_indices(nRows);
// compute row sums by summing values with equal row indices
thrust::reduce_by_key
(thrust::make_transform_iterator(thrust::counting_iterator<int>(0), linear_index_to_row_index<int>(nColumns)),
thrust::make_transform_iterator(thrust::counting_iterator<int>(0), linear_index_to_row_index<int>(nColumns)) + (nRows * nColumns),
thrust::make_transform_iterator(A.begin(), square<T>()),
row_indices.begin(),
row_sums.begin(),
thrust::equal_to<int>(),
thrust::plus<T>());
return row_sums;
}
thrust::device_vector<argMinType> minsOfRowSpace(thrust::device_vector<float> A, int nRows, int nColumns) {
// allocate storage for row sums and indices
thrust::device_vector<argMinType> row_sums(nRows);
thrust::device_vector<int> row_indices(nRows);
// compute row sums by summing values with equal row indices
thrust::reduce_by_key
(thrust::make_transform_iterator(thrust::counting_iterator<int>(0), linear_index_to_row_index<int>(nColumns)),
thrust::make_transform_iterator(thrust::counting_iterator<int>(0), linear_index_to_row_index<int>(nColumns)) + (nRows * nColumns),
thrust::make_zip_iterator(thrust::make_tuple(thrust::counting_iterator<int>(0), A.begin())),
row_indices.begin(),
row_sums.begin(),
thrust::equal_to<int>(),
argMin());
return row_sums;
}
template <int BLOCK_DIM>
__global__ void euclidean(float *anorm2, float *bnorm2, float *M, int height, int width) {
unsigned int xIndex = blockIdx.x * BLOCK_DIM + threadIdx.x;
unsigned int yStartIndex = blockIdx.y * BLOCK_DIM;
if (xIndex < width) {
float bNormForX = bnorm2[xIndex];
unsigned int yEndIndex = (yStartIndex + BLOCK_DIM < height ? yStartIndex + BLOCK_DIM : height);
for (unsigned int yIndex = yStartIndex; yIndex < yEndIndex; yIndex++) {
unsigned int index = yIndex * width + xIndex;
M[index] = anorm2[yIndex] - 2 * M[index] + bNormForX;
}
}
}
template <typename T>
void printMatrix(thrust::device_vector<T> A, int nRows, int nColumns) {
for (size_t i = 0; i < nRows; i++) {
for (size_t j = 0; j < nColumns; j++) {
std::cout << A[i * nColumns + j] << " ";
}
std::cout << "\n";
}
std::cout << "\n";
}
/** Clear the device memory and shut down CUBLAS
*
*/
void freeGpu() {
deviceData.clear();
deviceDataNorms.clear();
deviceCodebook.clear();
deviceCodebookNorms.clear();
thrust::device_vector<float>().swap(deviceData);
thrust::device_vector<float>().swap(deviceDataNorms);
thrust::device_vector<float>().swap(deviceCodebook);
thrust::device_vector<float>().swap(deviceCodebookNorms);
hipblasStatus_t status = hipblasDestroy(handle);
if (status != HIPBLAS_STATUS_SUCCESS) {
cuda_abort("CuBLAS shutdown error");
}
}
/** Find the best matching units -- called from the map function
* @param bmus - array of best matching units
* @param map.codebook - the map.codebook to save
* @param map.nSomX - dimensions of SOM map in the x direction
* @param map.nSomY - dimensions of SOM map in the y direction
* @param map.nDimensions - dimensions of a data instance
* @param nVectorsPerRank - the number of data points assigned to this GPU
*/
void getBmusOnGpu(int *bmus, som map, int nVectorsPerRank) {
deviceCodebook = thrust::device_vector<float>(map.codebook, map.codebook + map.nSomX * map.nSomY * map.nDimensions);
deviceCodebookNorms = normsOfRowSpace<float>(deviceCodebook, map.nSomX * map.nSomY, map.nDimensions);
thrust::device_vector<float> deviceGramMatrix(map.nSomX * map.nSomY * nVectorsPerRank, 0);
//Calculate the inner products of the data vectors and the weight vectors
float alpha = 1.0f;
float beta = 0.0f;
hipblasStatus_t status = hipblasSgemm(handle, HIPBLAS_OP_T, HIPBLAS_OP_N,
map.nSomX * map.nSomY, nVectorsPerRank, map.nDimensions,
&alpha, thrust::raw_pointer_cast(&deviceCodebook[0]), map.nDimensions,
thrust::raw_pointer_cast(&deviceData[0]), map.nDimensions,
&beta, thrust::raw_pointer_cast(&deviceGramMatrix[0]), map.nSomX * map.nSomY);
if (status != HIPBLAS_STATUS_SUCCESS) {
cuda_abort("Kernel execution error.");
}
//All components of the vectorized Euclidean distance are available
// 32 is a magic number, this is the block size that works best on Tesla C2050
int BLOCK_DIM = 32;
dim3 grid((map.nSomX * map.nSomY + BLOCK_DIM - 1) / BLOCK_DIM, (nVectorsPerRank + BLOCK_DIM - 1) / BLOCK_DIM, 1);
dim3 threads(BLOCK_DIM, 1, 1);
if (BLOCK_DIM == 32) {
hipLaunchKernelGGL(( euclidean<32>) , dim3(grid), dim3(threads), 0, 0, thrust::raw_pointer_cast(&deviceDataNorms[0]),
thrust::raw_pointer_cast(&deviceCodebookNorms[0]),
thrust::raw_pointer_cast(&deviceGramMatrix[0]),
nVectorsPerRank, map.nSomX * map.nSomY);
}
//Finding minimums
thrust::host_vector<argMinType> minsOfA = minsOfRowSpace(deviceGramMatrix, nVectorsPerRank, map.nSomX * map.nSomY);
CUDA_CHECK(hipDeviceSynchronize());
//Getting back SOM coordinates from minimums
for (int i = 0; i < nVectorsPerRank; i++) {
argMinType tmp = minsOfA[i];
int somCoordinate = thrust::get<0>(tmp) % (map.nSomX * map.nSomY);
bmus[i * 2] = somCoordinate % map.nSomX;
bmus[i * 2 + 1] = somCoordinate / map.nSomX;
}
}
/** Initialize CUBLAS and device data
* @param hostData - the data in the main memory
* @param height - number of data points assigned to this GPU
* @param width - dimensions of a data instance
*/
void initializeGpu(float *hostData, int nVectorsPerRank, som map) {
/* Initialize CUBLAS */
hipblasStatus_t status = hipblasCreate(&handle);
if (status != HIPBLAS_STATUS_SUCCESS) {
cuda_abort("CuBLAS initialization error");
}
deviceData = thrust::device_vector<float>(hostData, hostData + nVectorsPerRank * map.nDimensions);
deviceDataNorms = normsOfRowSpace<float>(deviceData, nVectorsPerRank, map.nDimensions);
deviceCodebook = thrust::device_vector<float>(map.nSomX * map.nSomY * map.nDimensions, 0);
deviceCodebookNorms = thrust::device_vector<float>(map.nSomX * map.nSomY, 0);
}
/** Check and initialize a device attached to a node
* @param commRank - the MPI rank of this process
* @param commSize - the size of MPI comm world
*/
/// Note that this function was lifted from http://code.google.com/p/gpmr/
void setDevice(int commRank, int commSize) {
int devCount;
int deviceNum = 0;
CUDA_CHECK(hipGetDeviceCount(&devCount));
#ifdef HAVE_MPI
#ifdef _WIN32
FILE * fp = popen("hostname.exe", "r");
#else
FILE * fp = popen("/bin/hostname", "r");
#endif
char buf[1024];
if (fgets(buf, 1023, fp) == NULL) strcpy(buf, "localhost");
pclose(fp);
string host = buf;
host = host.substr(0, host.size() - 1);
strcpy(buf, host.c_str());
if (commRank == 0) {
map<string, vector<int> > hosts;
map<string, int> devCounts;
hosts[buf].push_back(0);
devCounts[buf] = devCount;
MPI_Status stat;
MPI_Request req;
for (int i = 1; i < commSize; ++i) {
MPI_Recv(buf, 1024, MPI_CHAR, i, 0, MPI_COMM_WORLD, &stat);
MPI_Recv(&devCount, 1, MPI_INT, i, 0, MPI_COMM_WORLD, &stat);
// check to make sure each process on each node reports the same number of devices.
hosts[buf].push_back(i);
if (devCounts.find(buf) != devCounts.end()) {
if (devCounts[buf] != devCount) {
printf("Error, device count mismatch %d != %d on %s\n", devCounts[buf], devCount, buf);
fflush(stdout);
}
}
else devCounts[buf] = devCount;
}
// check to make sure that we don't have more jobs on a node than we have GPUs.
for (map<string, vector<int> >::iterator it = hosts.begin(); it != hosts.end(); ++it) {
if (it->second.size() > static_cast<unsigned int>(devCounts[it->first])) {
stringstream sstm;
sstm << "Error, more jobs running on " << it->first.c_str() << " than devices - " << static_cast<int>(it->second.size()) << " jobs > " << devCounts[it->first] << " devices.";
cuda_abort(sstm.str());
}
}
// send out the device number for each process to use.
MPI_Irecv(&deviceNum, 1, MPI_INT, 0, 0, MPI_COMM_WORLD, &req);
for (map<string, vector<int> >::iterator it = hosts.begin(); it != hosts.end(); ++it) {
for (unsigned int i = 0; i < it->second.size(); ++i) {
int devID = i;
MPI_Send(&devID, 1, MPI_INT, it->second[i], 0, MPI_COMM_WORLD);
}
}
MPI_Wait(&req, &stat);
}
else {
// send out the hostname and device count for your local node, then get back the device number you should use.
MPI_Status stat;
MPI_Send(buf, strlen(buf) + 1, MPI_CHAR, 0, 0, MPI_COMM_WORLD);
MPI_Send(&devCount, 1, MPI_INT, 0, 0, MPI_COMM_WORLD);
MPI_Recv(&deviceNum, 1, MPI_INT, 0, 0, MPI_COMM_WORLD, &stat);
}
MPI_Barrier(MPI_COMM_WORLD);
#endif
CUDA_CHECK(hipSetDevice(deviceNum));
}
/** One epoch on the GPU, dense variant
*/
void trainOneEpochDenseGPU(int itask, float *data, float *numerator,
float *denominator, som map,
unsigned int nVectorsPerRank, float radius,
float scale, bool compact_support, bool gaussian,
bool only_bmus, float std_coeff) {
int *bmus;
#ifdef HAVE_MPI
bmus = new int[nVectorsPerRank * 2];
#else
bmus = map.bmus;
#endif
getBmusOnGpu(bmus, map, nVectorsPerRank);
if (only_bmus) {
#ifdef HAVE_MPI
MPI_Gather(bmus, nVectorsPerRank * 2, MPI_INT, map.bmus, nVectorsPerRank * 2, MPI_INT, 0, MPI_COMM_WORLD);
delete [] bmus;
#endif
return;
}
#ifdef HAVE_MPI
float *localNumerator = new float[map.nSomY * map.nSomX * map.nDimensions];
float *localDenominator = new float[map.nSomY * map.nSomX];
#pragma omp for
for (omp_iter_t som_y = 0; som_y < map.nSomY; som_y++) {
for (unsigned int som_x = 0; som_x < map.nSomX; som_x++) {
localDenominator[som_y * map.nSomX + som_x] = 0.0;
for (unsigned int d = 0; d < map.nDimensions; d++)
localNumerator[som_y * map.nSomX * map.nDimensions + som_x * map.nDimensions + d] = 0.0;
}
}
#pragma omp parallel default(shared)
#else // not HAVE_MPI
float *localNumerator;
float localDenominator = 0;
#pragma omp parallel default(shared) private(localDenominator) private(localNumerator)
#endif
{
#ifndef HAVE_MPI
localNumerator = new float[map.nDimensions];
#endif // HAVE_MPI
#pragma omp for
for (omp_iter_t som_y = 0; som_y < map.nSomY; som_y++) {
for (unsigned int som_x = 0; som_x < map.nSomX; som_x++) {
for (unsigned int n = 0; n < nVectorsPerRank; n++) {
if (itask * nVectorsPerRank + n < map.nVectors) {
float dist = 0.0f;
if (map.gridType == "rectangular") {
if (map.mapType == "planar") {
dist = euclideanDistanceOnPlanarMap(som_x, som_y, bmus[2 * n], bmus[2 * n + 1]);
}
else if (map.mapType == "toroid") {
dist = euclideanDistanceOnToroidMap(som_x, som_y, bmus[2 * n], bmus[2 * n + 1], map.nSomX, map.nSomY);
}
}
else {
if (map.mapType == "planar") {
dist = euclideanDistanceOnHexagonalPlanarMap(som_x, som_y, bmus[2 * n], bmus[2 * n + 1]);
}
else if (map.mapType == "toroid") {
dist = euclideanDistanceOnHexagonalToroidMap(som_x, som_y, bmus[2 * n], bmus[2 * n + 1], map.nSomX, map.nSomY);
}
}
float neighbor_fuct = getWeight(dist, radius, scale, compact_support, gaussian, std_coeff);
#ifdef HAVE_MPI
for (unsigned int d = 0; d < map.nDimensions; d++) {
localNumerator[som_y * map.nSomX * map.nDimensions + som_x * map.nDimensions + d] +=
1.0f * neighbor_fuct
* (*(data + n * map.nDimensions + d));
}
localDenominator[som_y * map.nSomX + som_x] += neighbor_fuct;
#else // In this case, we can update in place
if (n == 0) {
localDenominator = neighbor_fuct;
for (unsigned int d = 0; d < map.nDimensions; d++) {
localNumerator[d] = 1.0f * neighbor_fuct
* (*(data + n * map.nDimensions + d));
}
} else {
localDenominator += neighbor_fuct;
for (unsigned int d = 0; d < map.nDimensions; d++) {
localNumerator[d] += 1.0f * neighbor_fuct
* (*(data + n * map.nDimensions + d));
}
}
#endif // HAVE_MPI
}
} // Looping over data instances
#ifndef HAVE_MPI // We update in-place
for (unsigned int d = 0; d < map.nDimensions; d++) {
if (localDenominator != 0) {
float newWeight = localNumerator[d] / localDenominator;
map.codebook[som_y * map.nSomX * map.nDimensions + som_x * map.nDimensions + d] = newWeight;
}
}
#endif
} // Looping over som_x
} // Looping over som_y
#ifndef HAVE_MPI
delete [] localNumerator;
#endif
} // OPENMP
#ifdef HAVE_MPI
MPI_Reduce(localNumerator, numerator,
map.nSomY * map.nSomX * map.nDimensions, MPI_FLOAT, MPI_SUM, 0, MPI_COMM_WORLD);
MPI_Reduce(localDenominator, denominator,
map.nSomY * map.nSomX, MPI_FLOAT, MPI_SUM, 0, MPI_COMM_WORLD);
MPI_Gather(bmus, nVectorsPerRank * 2, MPI_INT, map.bmus, nVectorsPerRank * 2, MPI_INT, 0, MPI_COMM_WORLD);
delete [] bmus;
delete [] localNumerator;
delete [] localDenominator;
#endif
}
| a23f9d2f410185fec720ec6751a975f2caa4012d.cu | /**
* Self-Organizing Maps on a cluster
* Copyright (C) 2013 Peter Wittek
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*
*/
#undef _GLIBCXX_ATOMIC_BUILTINS
#undef _GLIBCXX_USE_INT128
#include <iostream>
#include <sstream>
#include <map>
#include <vector>
#include <stdio.h>
#include <cublas_v2.h>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/reduce.h>
#include <thrust/functional.h>
#include "somoclu.h"
#ifdef _WIN32
#define popen _popen
#define pclose _pclose
#endif
// Error handling macro
#define CUDA_CHECK(call) \
if((call) != cudaSuccess) { \
cudaError_t err = cudaGetLastError(); \
stringstream sstm; \
sstm << "CUDA error calling \""#call"\", code is " << err; \
cuda_abort(sstm.str()); }
//Globals
cublasHandle_t handle;
thrust::device_vector<float> deviceData;
thrust::device_vector<float> deviceDataNorms;
thrust::device_vector<float> deviceCodebook;
thrust::device_vector<float> deviceCodebookNorms;
// convert a linear index to a row index
template <typename T>
struct linear_index_to_row_index : public thrust::unary_function<T, T> {
T C; // number of columns
__host__ __device__
linear_index_to_row_index(T C) : C(C) {}
__host__ __device__
T operator()(T i) {
return i / C;
}
};
// note: functor inherits from unary_function
template <typename T>
struct square : public thrust::unary_function<T, T> {
__host__ __device__
T operator()(T x) const {
return x * x;
}
};
typedef thrust::tuple<int, float> argMinType;
struct argMin : public thrust::binary_function<argMinType, argMinType, argMinType> {
__host__ __device__
argMinType operator()(const argMinType& a, const argMinType& b) const {
if (thrust::get<1>(a) < thrust::get<1>(b)) {
return a;
}
else {
return b;
}
}
};
template <typename T>
thrust::device_vector<T> normsOfRowSpace(thrust::device_vector<T> A, int nRows, int nColumns) {
// allocate storage for row sums and indices
thrust::device_vector<T> row_sums(nRows);
thrust::device_vector<int> row_indices(nRows);
// compute row sums by summing values with equal row indices
thrust::reduce_by_key
(thrust::make_transform_iterator(thrust::counting_iterator<int>(0), linear_index_to_row_index<int>(nColumns)),
thrust::make_transform_iterator(thrust::counting_iterator<int>(0), linear_index_to_row_index<int>(nColumns)) + (nRows * nColumns),
thrust::make_transform_iterator(A.begin(), square<T>()),
row_indices.begin(),
row_sums.begin(),
thrust::equal_to<int>(),
thrust::plus<T>());
return row_sums;
}
thrust::device_vector<argMinType> minsOfRowSpace(thrust::device_vector<float> A, int nRows, int nColumns) {
// allocate storage for row sums and indices
thrust::device_vector<argMinType> row_sums(nRows);
thrust::device_vector<int> row_indices(nRows);
// compute row sums by summing values with equal row indices
thrust::reduce_by_key
(thrust::make_transform_iterator(thrust::counting_iterator<int>(0), linear_index_to_row_index<int>(nColumns)),
thrust::make_transform_iterator(thrust::counting_iterator<int>(0), linear_index_to_row_index<int>(nColumns)) + (nRows * nColumns),
thrust::make_zip_iterator(thrust::make_tuple(thrust::counting_iterator<int>(0), A.begin())),
row_indices.begin(),
row_sums.begin(),
thrust::equal_to<int>(),
argMin());
return row_sums;
}
template <int BLOCK_DIM>
__global__ void euclidean(float *anorm2, float *bnorm2, float *M, int height, int width) {
unsigned int xIndex = blockIdx.x * BLOCK_DIM + threadIdx.x;
unsigned int yStartIndex = blockIdx.y * BLOCK_DIM;
if (xIndex < width) {
float bNormForX = bnorm2[xIndex];
unsigned int yEndIndex = (yStartIndex + BLOCK_DIM < height ? yStartIndex + BLOCK_DIM : height);
for (unsigned int yIndex = yStartIndex; yIndex < yEndIndex; yIndex++) {
unsigned int index = yIndex * width + xIndex;
M[index] = anorm2[yIndex] - 2 * M[index] + bNormForX;
}
}
}
template <typename T>
void printMatrix(thrust::device_vector<T> A, int nRows, int nColumns) {
for (size_t i = 0; i < nRows; i++) {
for (size_t j = 0; j < nColumns; j++) {
std::cout << A[i * nColumns + j] << " ";
}
std::cout << "\n";
}
std::cout << "\n";
}
/** Clear the device memory and shut down CUBLAS
*
*/
void freeGpu() {
deviceData.clear();
deviceDataNorms.clear();
deviceCodebook.clear();
deviceCodebookNorms.clear();
thrust::device_vector<float>().swap(deviceData);
thrust::device_vector<float>().swap(deviceDataNorms);
thrust::device_vector<float>().swap(deviceCodebook);
thrust::device_vector<float>().swap(deviceCodebookNorms);
cublasStatus_t status = cublasDestroy(handle);
if (status != CUBLAS_STATUS_SUCCESS) {
cuda_abort("CuBLAS shutdown error");
}
}
/** Find the best matching units -- called from the map function
* @param bmus - array of best matching units
* @param map.codebook - the map.codebook to save
* @param map.nSomX - dimensions of SOM map in the x direction
* @param map.nSomY - dimensions of SOM map in the y direction
* @param map.nDimensions - dimensions of a data instance
* @param nVectorsPerRank - the number of data points assigned to this GPU
*/
void getBmusOnGpu(int *bmus, som map, int nVectorsPerRank) {
deviceCodebook = thrust::device_vector<float>(map.codebook, map.codebook + map.nSomX * map.nSomY * map.nDimensions);
deviceCodebookNorms = normsOfRowSpace<float>(deviceCodebook, map.nSomX * map.nSomY, map.nDimensions);
thrust::device_vector<float> deviceGramMatrix(map.nSomX * map.nSomY * nVectorsPerRank, 0);
//Calculate the inner products of the data vectors and the weight vectors
float alpha = 1.0f;
float beta = 0.0f;
cublasStatus_t status = cublasSgemm(handle, CUBLAS_OP_T, CUBLAS_OP_N,
map.nSomX * map.nSomY, nVectorsPerRank, map.nDimensions,
&alpha, thrust::raw_pointer_cast(&deviceCodebook[0]), map.nDimensions,
thrust::raw_pointer_cast(&deviceData[0]), map.nDimensions,
&beta, thrust::raw_pointer_cast(&deviceGramMatrix[0]), map.nSomX * map.nSomY);
if (status != CUBLAS_STATUS_SUCCESS) {
cuda_abort("Kernel execution error.");
}
//All components of the vectorized Euclidean distance are available
// 32 is a magic number, this is the block size that works best on Tesla C2050
int BLOCK_DIM = 32;
dim3 grid((map.nSomX * map.nSomY + BLOCK_DIM - 1) / BLOCK_DIM, (nVectorsPerRank + BLOCK_DIM - 1) / BLOCK_DIM, 1);
dim3 threads(BLOCK_DIM, 1, 1);
if (BLOCK_DIM == 32) {
euclidean<32> <<< grid, threads>>>(thrust::raw_pointer_cast(&deviceDataNorms[0]),
thrust::raw_pointer_cast(&deviceCodebookNorms[0]),
thrust::raw_pointer_cast(&deviceGramMatrix[0]),
nVectorsPerRank, map.nSomX * map.nSomY);
}
//Finding minimums
thrust::host_vector<argMinType> minsOfA = minsOfRowSpace(deviceGramMatrix, nVectorsPerRank, map.nSomX * map.nSomY);
CUDA_CHECK(cudaDeviceSynchronize());
//Getting back SOM coordinates from minimums
for (int i = 0; i < nVectorsPerRank; i++) {
argMinType tmp = minsOfA[i];
int somCoordinate = thrust::get<0>(tmp) % (map.nSomX * map.nSomY);
bmus[i * 2] = somCoordinate % map.nSomX;
bmus[i * 2 + 1] = somCoordinate / map.nSomX;
}
}
/** Initialize CUBLAS and device data
* @param hostData - the data in the main memory
* @param height - number of data points assigned to this GPU
* @param width - dimensions of a data instance
*/
void initializeGpu(float *hostData, int nVectorsPerRank, som map) {
/* Initialize CUBLAS */
cublasStatus_t status = cublasCreate(&handle);
if (status != CUBLAS_STATUS_SUCCESS) {
cuda_abort("CuBLAS initialization error");
}
deviceData = thrust::device_vector<float>(hostData, hostData + nVectorsPerRank * map.nDimensions);
deviceDataNorms = normsOfRowSpace<float>(deviceData, nVectorsPerRank, map.nDimensions);
deviceCodebook = thrust::device_vector<float>(map.nSomX * map.nSomY * map.nDimensions, 0);
deviceCodebookNorms = thrust::device_vector<float>(map.nSomX * map.nSomY, 0);
}
/** Check and initialize a device attached to a node
* @param commRank - the MPI rank of this process
* @param commSize - the size of MPI comm world
*/
/// Note that this function was lifted from http://code.google.com/p/gpmr/
void setDevice(int commRank, int commSize) {
int devCount;
int deviceNum = 0;
CUDA_CHECK(cudaGetDeviceCount(&devCount));
#ifdef HAVE_MPI
#ifdef _WIN32
FILE * fp = popen("hostname.exe", "r");
#else
FILE * fp = popen("/bin/hostname", "r");
#endif
char buf[1024];
if (fgets(buf, 1023, fp) == NULL) strcpy(buf, "localhost");
pclose(fp);
string host = buf;
host = host.substr(0, host.size() - 1);
strcpy(buf, host.c_str());
if (commRank == 0) {
map<string, vector<int> > hosts;
map<string, int> devCounts;
hosts[buf].push_back(0);
devCounts[buf] = devCount;
MPI_Status stat;
MPI_Request req;
for (int i = 1; i < commSize; ++i) {
MPI_Recv(buf, 1024, MPI_CHAR, i, 0, MPI_COMM_WORLD, &stat);
MPI_Recv(&devCount, 1, MPI_INT, i, 0, MPI_COMM_WORLD, &stat);
// check to make sure each process on each node reports the same number of devices.
hosts[buf].push_back(i);
if (devCounts.find(buf) != devCounts.end()) {
if (devCounts[buf] != devCount) {
printf("Error, device count mismatch %d != %d on %s\n", devCounts[buf], devCount, buf);
fflush(stdout);
}
}
else devCounts[buf] = devCount;
}
// check to make sure that we don't have more jobs on a node than we have GPUs.
for (map<string, vector<int> >::iterator it = hosts.begin(); it != hosts.end(); ++it) {
if (it->second.size() > static_cast<unsigned int>(devCounts[it->first])) {
stringstream sstm;
sstm << "Error, more jobs running on " << it->first.c_str() << " than devices - " << static_cast<int>(it->second.size()) << " jobs > " << devCounts[it->first] << " devices.";
cuda_abort(sstm.str());
}
}
// send out the device number for each process to use.
MPI_Irecv(&deviceNum, 1, MPI_INT, 0, 0, MPI_COMM_WORLD, &req);
for (map<string, vector<int> >::iterator it = hosts.begin(); it != hosts.end(); ++it) {
for (unsigned int i = 0; i < it->second.size(); ++i) {
int devID = i;
MPI_Send(&devID, 1, MPI_INT, it->second[i], 0, MPI_COMM_WORLD);
}
}
MPI_Wait(&req, &stat);
}
else {
// send out the hostname and device count for your local node, then get back the device number you should use.
MPI_Status stat;
MPI_Send(buf, strlen(buf) + 1, MPI_CHAR, 0, 0, MPI_COMM_WORLD);
MPI_Send(&devCount, 1, MPI_INT, 0, 0, MPI_COMM_WORLD);
MPI_Recv(&deviceNum, 1, MPI_INT, 0, 0, MPI_COMM_WORLD, &stat);
}
MPI_Barrier(MPI_COMM_WORLD);
#endif
CUDA_CHECK(cudaSetDevice(deviceNum));
}
/** One epoch on the GPU, dense variant
*/
void trainOneEpochDenseGPU(int itask, float *data, float *numerator,
float *denominator, som map,
unsigned int nVectorsPerRank, float radius,
float scale, bool compact_support, bool gaussian,
bool only_bmus, float std_coeff) {
int *bmus;
#ifdef HAVE_MPI
bmus = new int[nVectorsPerRank * 2];
#else
bmus = map.bmus;
#endif
getBmusOnGpu(bmus, map, nVectorsPerRank);
if (only_bmus) {
#ifdef HAVE_MPI
MPI_Gather(bmus, nVectorsPerRank * 2, MPI_INT, map.bmus, nVectorsPerRank * 2, MPI_INT, 0, MPI_COMM_WORLD);
delete [] bmus;
#endif
return;
}
#ifdef HAVE_MPI
float *localNumerator = new float[map.nSomY * map.nSomX * map.nDimensions];
float *localDenominator = new float[map.nSomY * map.nSomX];
#pragma omp for
for (omp_iter_t som_y = 0; som_y < map.nSomY; som_y++) {
for (unsigned int som_x = 0; som_x < map.nSomX; som_x++) {
localDenominator[som_y * map.nSomX + som_x] = 0.0;
for (unsigned int d = 0; d < map.nDimensions; d++)
localNumerator[som_y * map.nSomX * map.nDimensions + som_x * map.nDimensions + d] = 0.0;
}
}
#pragma omp parallel default(shared)
#else // not HAVE_MPI
float *localNumerator;
float localDenominator = 0;
#pragma omp parallel default(shared) private(localDenominator) private(localNumerator)
#endif
{
#ifndef HAVE_MPI
localNumerator = new float[map.nDimensions];
#endif // HAVE_MPI
#pragma omp for
for (omp_iter_t som_y = 0; som_y < map.nSomY; som_y++) {
for (unsigned int som_x = 0; som_x < map.nSomX; som_x++) {
for (unsigned int n = 0; n < nVectorsPerRank; n++) {
if (itask * nVectorsPerRank + n < map.nVectors) {
float dist = 0.0f;
if (map.gridType == "rectangular") {
if (map.mapType == "planar") {
dist = euclideanDistanceOnPlanarMap(som_x, som_y, bmus[2 * n], bmus[2 * n + 1]);
}
else if (map.mapType == "toroid") {
dist = euclideanDistanceOnToroidMap(som_x, som_y, bmus[2 * n], bmus[2 * n + 1], map.nSomX, map.nSomY);
}
}
else {
if (map.mapType == "planar") {
dist = euclideanDistanceOnHexagonalPlanarMap(som_x, som_y, bmus[2 * n], bmus[2 * n + 1]);
}
else if (map.mapType == "toroid") {
dist = euclideanDistanceOnHexagonalToroidMap(som_x, som_y, bmus[2 * n], bmus[2 * n + 1], map.nSomX, map.nSomY);
}
}
float neighbor_fuct = getWeight(dist, radius, scale, compact_support, gaussian, std_coeff);
#ifdef HAVE_MPI
for (unsigned int d = 0; d < map.nDimensions; d++) {
localNumerator[som_y * map.nSomX * map.nDimensions + som_x * map.nDimensions + d] +=
1.0f * neighbor_fuct
* (*(data + n * map.nDimensions + d));
}
localDenominator[som_y * map.nSomX + som_x] += neighbor_fuct;
#else // In this case, we can update in place
if (n == 0) {
localDenominator = neighbor_fuct;
for (unsigned int d = 0; d < map.nDimensions; d++) {
localNumerator[d] = 1.0f * neighbor_fuct
* (*(data + n * map.nDimensions + d));
}
} else {
localDenominator += neighbor_fuct;
for (unsigned int d = 0; d < map.nDimensions; d++) {
localNumerator[d] += 1.0f * neighbor_fuct
* (*(data + n * map.nDimensions + d));
}
}
#endif // HAVE_MPI
}
} // Looping over data instances
#ifndef HAVE_MPI // We update in-place
for (unsigned int d = 0; d < map.nDimensions; d++) {
if (localDenominator != 0) {
float newWeight = localNumerator[d] / localDenominator;
map.codebook[som_y * map.nSomX * map.nDimensions + som_x * map.nDimensions + d] = newWeight;
}
}
#endif
} // Looping over som_x
} // Looping over som_y
#ifndef HAVE_MPI
delete [] localNumerator;
#endif
} // OPENMP
#ifdef HAVE_MPI
MPI_Reduce(localNumerator, numerator,
map.nSomY * map.nSomX * map.nDimensions, MPI_FLOAT, MPI_SUM, 0, MPI_COMM_WORLD);
MPI_Reduce(localDenominator, denominator,
map.nSomY * map.nSomX, MPI_FLOAT, MPI_SUM, 0, MPI_COMM_WORLD);
MPI_Gather(bmus, nVectorsPerRank * 2, MPI_INT, map.bmus, nVectorsPerRank * 2, MPI_INT, 0, MPI_COMM_WORLD);
delete [] bmus;
delete [] localNumerator;
delete [] localDenominator;
#endif
}
|
4db2cdeace3b3a22ff2233f937bd506ea1aabb8e.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 2021, Oak Ridge National Laboratory.
* MGARD-GPU: MultiGrid Adaptive Reduction of Data Accelerated by GPUs
* Author: Jieyang Chen ([email protected])
* Date: April 2, 2021
*/
#include <chrono>
#include <iomanip>
#include <iostream>
#include <numeric>
#include <thrust/device_vector.h>
#include <thrust/functional.h>
#include <thrust/host_vector.h>
#include <thrust/reduce.h>
#include <vector>
#include "cuda/CompressionWorkflow.h"
#include "cuda/CommonInternal.h"
#include "cuda/MemoryManagement.h"
#include "cuda/DataRefactoring.h"
#include "cuda/LinearQuantization.h"
#include "cuda/LosslessCompression.h"
using namespace std::chrono;
namespace mgard_cuda {
template <typename T>
struct linf_norm : public thrust::binary_function<T, T, T> {
__host__ __device__ T operator()(T x, T y) { return max(abs(x), abs(y)); }
};
template <uint32_t D, typename T>
Array<1, unsigned char> compress(Handle<D, T> &handle, Array<D, T> &in_array,
enum error_bound_type type, T tol, T s) {
cudaSetDeviceHelper(handle.dev_id);
for (int i = 0; i < D; i++) {
if (handle.shapes_h[0][i] != in_array.getShape()[i]) {
std::cout << log_err
<< "The shape of input array does not match the shape "
"initilized in handle!\n";
std::vector<size_t> empty_shape;
empty_shape.push_back(1);
Array<1, unsigned char> empty(empty_shape);
return empty;
}
}
// handle.l_target = 3;
high_resolution_clock::time_point t1, t2, start, end;
duration<double> time_span;
size_t free, total;
// hipMemGetInfo(&free, &total); printf("Mem: %f/%f\n",
// (double)(total-free)/1e9, (double)total/1e9); hipMemGetInfo(&free,
// &total); printf("Mem: %f/%f\n", (double)(total-free)/1e9,
// (double)total/1e9);
T norm = (T)1.0;
if (type == REL) {
t1 = high_resolution_clock::now();
thrust::device_vector<T> v_vec(handle.dofs[0][0] * handle.dofs[1][0] *
handle.linearized_depth);
cudaMemcpy3DAsyncHelper(
handle, thrust::raw_pointer_cast(v_vec.data()),
handle.dofs[0][0] * sizeof(T), handle.dofs[0][0] * sizeof(T),
handle.dofs[1][0], in_array.get_dv(),
in_array.get_ldvs_h()[0] * sizeof(T), handle.dofs[0][0] * sizeof(T),
handle.dofs[1][0], handle.dofs[0][0] * sizeof(T), handle.dofs[1][0],
handle.linearized_depth, AUTO, 0);
handle.sync(0);
t2 = high_resolution_clock::now();
time_span = duration_cast<duration<double>>(t2 - t1);
// printf("copy time: %.6f s \n", time_span.count());
t1 = high_resolution_clock::now();
norm = thrust::reduce(v_vec.begin(), v_vec.end(), (T)0, linf_norm<T>());
// printf("norm %f\n", norm);
t2 = high_resolution_clock::now();
time_span = duration_cast<duration<double>>(t2 - t1);
}
// hipMemGetInfo(&free, &total); printf("Mem: %f/%f\n",
// (double)(total-free)/1e9, (double)total/1e9);
handle.allocate_workspace();
// hipMemGetInfo(&free, &total); printf("Mem: %f/%f\n",
// (double)(total-free)/1e9, (double)total/1e9);
start = high_resolution_clock::now();
// Decomposition
t1 = high_resolution_clock::now();
decompose<D, T>(handle, in_array.get_dv(), in_array.get_ldvs_h(),
handle.l_target);
// printf("sync_all 1\n");
handle.sync_all();
t2 = high_resolution_clock::now();
time_span = duration_cast<duration<double>>(t2 - t1);
// printf("Decomposition time: %.6f s\n", time_span.count());
// hipMemGetInfo(&free, &total); printf("Mem: %f/%f\n",
// (double)(total-free)/1e9, (double)total/1e9);
// print_matrix_cuda(handle.nrow, handle.ncol, handle.nfib, dv, lddv1, lddv2,
// handle.nfib);
// printf("sync_all 2\n");
handle.sync_all();
handle.free_workspace();
// hipMemGetInfo(&free, &total); printf("Mem: %f/%f\n",
// (double)(total-free)/1e9, (double)total/1e9);
/**** refactoring ****/
// outsize = (handle.linearized_depth * handle.dofs[1][0] * handle.dofs[0][0])
// * sizeof(T); unsigned char *buffer = (unsigned char *)malloc(outsize);
// cudaMemcpy3DAsyncHelper(
// handle,
// buffer, handle.dofs[0][0] * sizeof(T), handle.dofs[0][0] * sizeof(T),
// handle.dofs[1][0], dv, lddv1 * sizeof(T), handle.dofs[0][0] *
// sizeof(T), handle.dofs[1][0], handle.dofs[0][0] * sizeof(T),
// handle.dofs[1][0], handle.linearized_depth, D2H, 0);
// Quantization
bool huffman = true;
int dict_size = handle.huff_dict_size, block_size = handle.huff_block_size;
size_t quantized_count =
handle.dofs[0][0] * handle.dofs[1][0] * handle.linearized_depth;
int *dqv;
cudaMallocHelper((void **)&dqv, (handle.dofs[0][0] * handle.dofs[1][0] *
handle.linearized_depth) *
sizeof(int));
thrust::device_vector<int> ldqvs(handle.D_padded);
ldqvs[0] = handle.dofs[0][0];
for (int i = 1; i < handle.D_padded; i++) {
ldqvs[i] = handle.dofs[i][0];
}
t1 = high_resolution_clock::now();
int *hshapes = new int[D * (handle.l_target + 2)];
for (int d = 0; d < D; d++) {
hshapes[d * (handle.l_target + 2)] = 0;
for (int l = 1; l < handle.l_target + 2; l++) {
hshapes[d * (handle.l_target + 2) + l] =
handle.dofs[d][handle.l_target + 1 - l];
}
// printf("hshapes[%d]: ", d);
// for (int l = 0; l < handle.l_target+2; l++) { printf("%d ", hshapes[d *
// (handle.l_target+2)+l]); } printf("\n");
}
int *dshapes;
cudaMallocHelper((void **)&dshapes, D * (handle.l_target + 2) * sizeof(int));
cudaMemcpyAsyncHelper(handle, dshapes, hshapes,
D * (handle.l_target + 2) * sizeof(int), H2D, 0);
size_t estimate_outlier_count = (double)handle.dofs[0][0] *
handle.dofs[1][0] * handle.linearized_depth *
1;
// printf("estimate_outlier_count: %llu\n", estimate_outlier_count);
size_t *outlier_count_d;
unsigned int *outlier_idx_d;
int *outliers;
cudaMallocHelper((void **)&outliers, estimate_outlier_count * sizeof(int));
cudaMallocHelper((void **)&outlier_count_d, sizeof(size_t));
cudaMallocHelper((void **)&outlier_idx_d,
estimate_outlier_count * sizeof(unsigned int));
size_t zero = 0, outlier_count, *outlier_idx_h;
cudaMemcpyAsyncHelper(handle, outlier_count_d, &zero, sizeof(size_t), H2D, 0);
quant_meta<T> m;
m.norm = norm;
m.s = s;
m.tol = tol;
m.dict_size = dict_size;
m.enable_lz4 = handle.enable_lz4;
m.l_target = handle.l_target;
// hipMemGetInfo(&free, &total); printf("Mem: %f/%f\n",
// (double)(total-free)/1e9, (double)total/1e9);
levelwise_linear_quantize<D, T>(
handle, dshapes, handle.l_target, m, in_array.get_dv(),
in_array.get_ldvs_d(), dqv, thrust::raw_pointer_cast(ldqvs.data()),
huffman, handle.shapes_d[0], outlier_count_d, outlier_idx_d, outliers, 0);
cudaMemcpyAsyncHelper(handle, &outlier_count, outlier_count_d, sizeof(size_t),
D2H, 0);
// printf("outlier_count: %llu\n", outlier_count);
// printf("dqv\n");
// print_matrix_cuda(1, quantized_counD, Tqv, quantized_count);
// printf("outlier_idx_d\n");
// print_matrix_cuda(1, outlier_count, outlier_idx_d, quantized_count);
// printf("outliers\n");
// print_matrix_cuda(1, outlier_count, outliers, quantized_count);
std::vector<size_t> outlier_idx;
t2 = high_resolution_clock::now();
time_span = duration_cast<duration<double>>(t2 - t1);
// printf("Quantization time: %.6f s\n", time_span.count());
// cudaFreeHelper(dv);
// hipMemGetInfo(&free, &total); printf("Mem: %f/%f\n",
// (double)(total-free)/1e9, (double)total/1e9);
// Huffman compression
t1 = high_resolution_clock::now();
uint64_t *hufmeta;
uint64_t *hufdata;
size_t hufmeta_size;
size_t hufdata_size;
huffman_compress<D, T, int, uint32_t, uint64_t>(
handle, dqv, quantized_count, outlier_idx, hufmeta, hufmeta_size, hufdata,
hufdata_size, block_size, dict_size, 0);
// printf("sync_all 3\n");
handle.sync_all();
t2 = high_resolution_clock::now();
time_span = duration_cast<duration<double>>(t2 - t1);
// printf("Parallel Huffman time: %.6f s\n", time_span.count());
cudaFreeHelper(dqv);
// hipMemGetInfo(&free, &total); printf("Mem: %f/%f\n",
// (double)(total-free)/1e9, (double)total/1e9);
// LZ4 compression
void *lz4_hufmeta;
size_t lz4_hufmeta_size;
void *lz4_hufdata;
size_t lz4_hufdata_size;
if (handle.enable_lz4) {
t1 = high_resolution_clock::now();
lz4_compress(handle, hufdata, hufdata_size / sizeof(uint64_t), lz4_hufdata,
lz4_hufdata_size, handle.lz4_block_size, 0);
// printf("sync_all 4\n");
handle.sync_all();
cudaFreeHelper(hufdata);
hufdata = (uint64_t *)lz4_hufdata;
hufdata_size = lz4_hufdata_size;
t2 = high_resolution_clock::now();
time_span = duration_cast<duration<double>>(t2 - t1);
// printf("NVComp::LZ4 time: %.6f s\n", time_span.count());
// hipMemGetInfo(&free, &total); printf("Mem: %f/%f\n",
// (double)(total-free)/1e9, (double)total/1e9);
}
end = high_resolution_clock::now();
time_span = duration_cast<duration<double>>(end - start);
// printf("Overall compression time: %.6f s (%.6f GB/s)\n", time_span.count(),
// (double)(handle.dofs[0][0] * handle.dofs[1][0] *handle.linearized_depth
// *sizeof(T))/time_span.count()/1e9);
// Output serilization
t1 = high_resolution_clock::now();
size_t outsize = 0;
outsize += sizeof(quant_meta<T>);
outsize += sizeof(size_t) + outlier_count * sizeof(size_t) +
outlier_count * sizeof(int);
outsize += sizeof(size_t) + hufmeta_size;
outsize += sizeof(size_t) + hufdata_size;
std::vector<size_t> out_shape(1);
out_shape[0] = outsize;
Array<1, unsigned char> compressed_array(out_shape);
unsigned char *buffer = compressed_array.get_dv();
// cudaMallocHostHelper((void**)&buffer, outsize);
// else cudaMallocHelper((void**)&buffer, outsize);
// unsigned char *buffer = (unsigned char *)malloc(outsize);
void *buffer_p = (void *)buffer;
// memcpy(buffer_p, &m, sizeof(quant_meta<T>));
cudaMemcpyAsyncHelper(handle, buffer_p, &m, sizeof(quant_meta<T>), AUTO, 0);
buffer_p = buffer_p + sizeof(quant_meta<T>);
cudaMemcpyAsyncHelper(handle, buffer_p, outlier_count_d, sizeof(size_t), AUTO,
0);
buffer_p = buffer_p + sizeof(size_t);
cudaMemcpyAsyncHelper(handle, buffer_p, outlier_idx_d,
outlier_count * sizeof(unsigned int), AUTO, 0);
buffer_p = buffer_p + outlier_count * sizeof(unsigned int);
cudaMemcpyAsyncHelper(handle, buffer_p, outliers, outlier_count * sizeof(int),
AUTO, 0);
buffer_p = buffer_p + outlier_count * sizeof(int);
// memcpy(buffer_p, &hufmeta_size, sizeof(size_t));
cudaMemcpyAsyncHelper(handle, buffer_p, &hufmeta_size, sizeof(size_t), AUTO,
0);
buffer_p = buffer_p + sizeof(size_t);
cudaMemcpyAsyncHelper(handle, buffer_p, hufmeta, hufmeta_size, AUTO, 0);
buffer_p = buffer_p + hufmeta_size;
// memcpy(buffer_p, &lz4_hufmeta_size, sizeof(size_t));
// buffer_p = buffer_p + sizeof(size_t);
// memcpy(buffer_p, &hufdata_size, sizeof(size_t));
cudaMemcpyAsyncHelper(handle, buffer_p, &hufdata_size, sizeof(size_t), AUTO,
0);
buffer_p = buffer_p + sizeof(size_t);
// cudaMemcpyAsyncHelper(handle, buffer_p, lz4_hufmeta, lz4_hufmeta_size, D2H,
// 0); buffer_p = buffer_p + lz4_hufmeta_size;
cudaMemcpyAsyncHelper(handle, buffer_p, hufdata, hufdata_size, AUTO, 0);
buffer_p = buffer_p + hufdata_size;
// printf("sync_all 5\n");
handle.sync_all();
t2 = high_resolution_clock::now();
time_span = duration_cast<duration<double>>(t2 - t1);
// printf("seralization time: %.6f\n", time_span.count());
cudaFreeHelper(outlier_count_d);
cudaFreeHelper(outlier_idx_d);
cudaFreeHelper(outliers);
cudaFreeHelper(hufmeta);
cudaFreeHelper(hufdata);
hipMemGetInfo(&free, &total);
// printf("Mem: %f/%f\n", (double)(total - free) / 1e9, (double)total / 1e9);
// hipMemGetInfo(&free, &total); printf("Mem: %f/%f\n",
// (double)(total-free)/1e9, (double)total/1e9);
return compressed_array;
}
template <uint32_t D, typename T>
Array<D, T> decompress(Handle<D, T> &handle,
Array<1, unsigned char> &compressed_array) {
cudaSetDeviceHelper(handle.dev_id);
high_resolution_clock::time_point t1, t2, start, end;
duration<double> time_span;
size_t free, total;
quant_meta<T> m;
size_t outlier_count;
unsigned int *outlier_idx_d;
int *outliers;
void *lz4_hufmeta;
size_t lz4_hufmeta_size;
void *lz4_hufdata;
size_t lz4_hufdata_size;
uint8_t *hufmeta;
uint64_t *hufdata;
size_t hufmeta_size;
size_t hufdata_size;
size_t outsize;
// hipMemGetInfo(&free, &total); printf("Mem: %f/%f\n",
// (double)(total-free)/1e9, (double)total/1e9);
void *data_p = compressed_array.get_dv(); //(void *)data;
cudaMemcpyAsyncHelper(handle, &m, data_p, sizeof(quant_meta<T>), AUTO, 0);
data_p = data_p + sizeof(quant_meta<T>);
cudaMemcpyAsyncHelper(handle, &outlier_count, data_p, sizeof(size_t), AUTO,
0);
data_p = data_p + sizeof(size_t);
handle.sync(0);
cudaMallocHelper((void **)&outlier_idx_d,
outlier_count * sizeof(unsigned int));
cudaMemcpyAsyncHelper(handle, outlier_idx_d, data_p,
outlier_count * sizeof(unsigned int), AUTO, 0);
data_p = data_p + outlier_count * sizeof(unsigned int);
cudaMallocHelper((void **)&outliers, outlier_count * sizeof(int));
cudaMemcpyAsyncHelper(handle, outliers, data_p, outlier_count * sizeof(int),
AUTO, 0);
data_p = data_p + outlier_count * sizeof(int);
cudaMemcpyAsyncHelper(handle, &hufmeta_size, data_p, sizeof(size_t), AUTO, 0);
data_p = data_p + sizeof(size_t);
handle.sync(0);
cudaMallocHelper((void **)&hufmeta, hufmeta_size);
cudaMemcpyAsyncHelper(handle, hufmeta, data_p, hufmeta_size, AUTO, 0);
data_p = data_p + hufmeta_size;
cudaMemcpyAsyncHelper(handle, &hufdata_size, data_p, sizeof(size_t), AUTO, 0);
data_p = data_p + sizeof(size_t);
handle.sync(0);
cudaMallocHelper((void **)&hufdata, hufdata_size);
cudaMemcpyAsyncHelper(handle, hufdata, data_p, hufdata_size, H2D, 0);
data_p = data_p + hufdata_size;
handle.sync(0);
// hipMemGetInfo(&free, &total); printf("Mem: %f/%f\n",
// (double)(total-free)/1e9, (double)total/1e9);
start = high_resolution_clock::now();
if (m.enable_lz4) {
if (!handle.enable_lz4)
printf("Warning: This data was compressed with LZ4, but handler is "
"configed to disable LZ4!\n");
t1 = high_resolution_clock::now();
uint64_t *lz4_decompressed_hufdata;
size_t lz4_decompressed_hufdata_size;
lz4_decompress(handle, (void *)hufdata, hufdata_size,
lz4_decompressed_hufdata, lz4_decompressed_hufdata_size, 0);
// printf("sync_all 6\n");
handle.sync_all();
cudaFreeHelper(hufdata);
hufdata = lz4_decompressed_hufdata;
hufdata_size = lz4_decompressed_hufdata_size;
t2 = high_resolution_clock::now();
time_span = duration_cast<duration<double>>(t2 - t1);
// printf("NVComp::LZ4 time: %.6f s \n", time_span.count());
// hipMemGetInfo(&free, &total); printf("Mem: %f/%f\n",
// (double)(total-free)/1e9, (double)total/1e9);
}
size_t quantized_count =
handle.dofs[0][0] * handle.dofs[1][0] * handle.linearized_depth;
int *dqv;
t1 = high_resolution_clock::now();
huffman_decompress<D, T, int, uint32_t, uint64_t>(
handle, (uint64_t *)hufmeta, hufmeta_size, hufdata, hufdata_size, dqv,
outsize, 0);
// printf("sync_all 7\n");
handle.sync_all();
t2 = high_resolution_clock::now();
time_span = duration_cast<duration<double>>(t2 - t1);
// printf("Parallel Huffman time: %.6f s\n", time_span.count());
cudaFreeHelper(hufmeta);
cudaFreeHelper(hufdata);
// hipMemGetInfo(&free, &total); printf("Mem: %f/%f\n",
// (double)(total-free)/1e9, (double)total/1e9);
thrust::device_vector<int> ldqvs(handle.D_padded);
ldqvs[0] = handle.dofs[0][0];
for (int i = 1; i < handle.D_padded; i++) {
ldqvs[i] = handle.dofs[i][0];
}
thrust::device_vector<int> shape(handle.D_padded);
for (int d = 0; d < handle.D_padded; d++) {
shape[d] = handle.dofs[d][0];
}
// T *dv;
// size_t dv_pitch;
// cudaMalloc3DHelper((void **)&dv, &dv_pitch, handle.dofs[0][0] * sizeof(T),
// handle.dofs[1][0], handle.linearized_depth);
// int lddv1 = dv_pitch / sizeof(T);
// int lddv2 = handle.dofs[1][0];
// thrust::device_vector<int> ldvs(handle.D_padded);
// ldvs[0] = lddv1;
// for (int i = 1; i < handle.D_padded; i++) { ldvs[i] = handle.dofs[i][0]; }
// std::vector<int> ldvs_h(handle.D_padded);
// ldvs_h[0] = lddv1;
// for (int i = 1; i < handle.D_padded; i++) { ldvs_h[i] = handle.dofs[i][0];
// } int * ldvs_d; cudaMallocHelper((void **)&ldvs_d, handle.D_padded *
// sizeof(int)); cudaMemcpyAsyncHelper(handle, ldvs_d, ldvs_h.data(),
// handle.D_padded * sizeof(int), H2D, 0);
std::vector<size_t> decompressed_shape(D);
for (int i = 0; i < D; i++)
decompressed_shape[i] = handle.shapes_h[0][i];
std::reverse(decompressed_shape.begin(), decompressed_shape.end());
Array<D, T> decompressed_data(decompressed_shape);
int *hshapes = new int[D * (handle.l_target + 2)];
for (int d = 0; d < D; d++) {
hshapes[d * (handle.l_target + 2)] = 0;
for (int l = 1; l < handle.l_target + 2; l++) {
hshapes[d * (handle.l_target + 2) + l] =
handle.dofs[d][handle.l_target + 1 - l];
}
// printf("hshapes[%d]: ", d);
// for (int l = 0; l < handle.l_target+2; l++) { printf("%d ", hshapes[d *
// (handle.l_target+2)+l]); } printf("\n");
}
int *dshapes;
cudaMallocHelper((void **)&dshapes, D * (handle.l_target + 2) * sizeof(int));
cudaMemcpyAsyncHelper(handle, dshapes, hshapes,
D * (handle.l_target + 2) * sizeof(int), H2D, 0);
// printf("sync_all 7.5\n");
handle.sync_all();
t1 = high_resolution_clock::now();
levelwise_linear_dequantize<D, T>(handle, dshapes, handle.l_target, m, dqv,
thrust::raw_pointer_cast(ldqvs.data()),
decompressed_data.get_dv(),
decompressed_data.get_ldvs_d(),
outlier_count, outlier_idx_d, outliers, 0);
// printf("sync_all 8\n");
handle.sync_all();
t2 = high_resolution_clock::now();
time_span = duration_cast<duration<double>>(t2 - t1);
// printf("Dequantization time: %.6f s\n", time_span.count());
cudaFreeHelper(dqv);
cudaFreeHelper(outlier_idx_d);
cudaFreeHelper(outliers);
hipMemGetInfo(&free, &total);
// printf("Mem: %f/%f\n", (double)(total-free)/1e9, (double)total/1e9);
// printf("dv:\n");
// print_matrix_cuda(1, quantized_counD, Tv, quantized_count);
/**** refactoring ****/
// cudaMemcpy3DAsyncHelper( handle,
// dv, lddv1 * sizeof(T), handle.dofs[0][0] * sizeof(T), handle.dofs[1][0],
// data, handle.dofs[0][0] * sizeof(T), handle.dofs[0][0] * sizeof(T),
// handle.dofs[1][0], handle.dofs[0][0] * sizeof(T), handle.dofs[1][0],
// handle.linearized_depth, H2D, 0);
handle.allocate_workspace();
// hipMemGetInfo(&free, &total); printf("Mem: %f/%f\n",
// (double)(total-free)/1e9, (double)total/1e9);
t1 = high_resolution_clock::now();
recompose<D, T>(handle, decompressed_data.get_dv(),
decompressed_data.get_ldvs_h(), m.l_target);
// printf("sync_all 9\n");
handle.sync_all();
t2 = high_resolution_clock::now();
time_span = duration_cast<duration<double>>(t2 - t1);
// printf("Recomposition time: %.6f s\n", time_span.count());
handle.free_workspace();
// printf("sync_all 10\n");
handle.sync_all();
end = high_resolution_clock::now();
time_span = duration_cast<duration<double>>(end - start);
// printf("Overall decompression time: %.6f s (%.6f GB/s)\n",
// time_span.count(), (double)(handle.dofs[0][0] * handle.dofs[1][0]
// *handle.linearized_depth *sizeof(T))/time_span.count()/1e9);
// hipMemGetInfo(&free, &total); printf("Mem: %f/%f\n",
// (double)(total-free)/1e9, (double)total/1e9);
// T *v;
// cudaMallocHostHelper((void **)&v, handle.dofs[0][0] * handle.dofs[1][0] *
// handle.linearized_depth * sizeof(T));
// // = (T *)malloc(handle.dofs[0][0] * handle.dofs[1][0] *
// handle.linearized_depth * sizeof(T));
// cudaMemcpy3DAsyncHelper(
// handle, v, handle.dofs[0][0] * sizeof(T), handle.dofs[0][0] *
// sizeof(T), handle.dofs[1][0], dv, lddv1 * sizeof(T), handle.dofs[0][0]
// * sizeof(T), handle.dofs[1][0], handle.dofs[0][0] * sizeof(T),
// handle.dofs[1][0], handle.linearized_depth, D2H, 0);
// cudaFreeHelper(dv);
// hipMemGetInfo(&free, &total); printf("Mem: %f/%f\n",
// (double)(total-free)/1e9, (double)total/1e9);
return decompressed_data;
}
#define KERNELS(D, T) \
template Array<1, unsigned char> compress<D, T>( \
Handle<D, T> & handle, Array<D, T> & in_array, \
enum error_bound_type type, T tol, T s); \
template Array<D, T> decompress<D, T>( \
Handle<D, T> & handle, Array<1, unsigned char> & compressed_array);
KERNELS(1, double)
KERNELS(1, float)
KERNELS(2, double)
KERNELS(2, float)
KERNELS(3, double)
KERNELS(3, float)
KERNELS(4, double)
KERNELS(4, float)
KERNELS(5, double)
KERNELS(5, float)
#undef KERNELS
} // namespace mgard_cuda
| 4db2cdeace3b3a22ff2233f937bd506ea1aabb8e.cu | /*
* Copyright 2021, Oak Ridge National Laboratory.
* MGARD-GPU: MultiGrid Adaptive Reduction of Data Accelerated by GPUs
* Author: Jieyang Chen ([email protected])
* Date: April 2, 2021
*/
#include <chrono>
#include <iomanip>
#include <iostream>
#include <numeric>
#include <thrust/device_vector.h>
#include <thrust/functional.h>
#include <thrust/host_vector.h>
#include <thrust/reduce.h>
#include <vector>
#include "cuda/CompressionWorkflow.h"
#include "cuda/CommonInternal.h"
#include "cuda/MemoryManagement.h"
#include "cuda/DataRefactoring.h"
#include "cuda/LinearQuantization.h"
#include "cuda/LosslessCompression.h"
using namespace std::chrono;
namespace mgard_cuda {
template <typename T>
struct linf_norm : public thrust::binary_function<T, T, T> {
__host__ __device__ T operator()(T x, T y) { return max(abs(x), abs(y)); }
};
template <uint32_t D, typename T>
Array<1, unsigned char> compress(Handle<D, T> &handle, Array<D, T> &in_array,
enum error_bound_type type, T tol, T s) {
cudaSetDeviceHelper(handle.dev_id);
for (int i = 0; i < D; i++) {
if (handle.shapes_h[0][i] != in_array.getShape()[i]) {
std::cout << log_err
<< "The shape of input array does not match the shape "
"initilized in handle!\n";
std::vector<size_t> empty_shape;
empty_shape.push_back(1);
Array<1, unsigned char> empty(empty_shape);
return empty;
}
}
// handle.l_target = 3;
high_resolution_clock::time_point t1, t2, start, end;
duration<double> time_span;
size_t free, total;
// cudaMemGetInfo(&free, &total); printf("Mem: %f/%f\n",
// (double)(total-free)/1e9, (double)total/1e9); cudaMemGetInfo(&free,
// &total); printf("Mem: %f/%f\n", (double)(total-free)/1e9,
// (double)total/1e9);
T norm = (T)1.0;
if (type == REL) {
t1 = high_resolution_clock::now();
thrust::device_vector<T> v_vec(handle.dofs[0][0] * handle.dofs[1][0] *
handle.linearized_depth);
cudaMemcpy3DAsyncHelper(
handle, thrust::raw_pointer_cast(v_vec.data()),
handle.dofs[0][0] * sizeof(T), handle.dofs[0][0] * sizeof(T),
handle.dofs[1][0], in_array.get_dv(),
in_array.get_ldvs_h()[0] * sizeof(T), handle.dofs[0][0] * sizeof(T),
handle.dofs[1][0], handle.dofs[0][0] * sizeof(T), handle.dofs[1][0],
handle.linearized_depth, AUTO, 0);
handle.sync(0);
t2 = high_resolution_clock::now();
time_span = duration_cast<duration<double>>(t2 - t1);
// printf("copy time: %.6f s \n", time_span.count());
t1 = high_resolution_clock::now();
norm = thrust::reduce(v_vec.begin(), v_vec.end(), (T)0, linf_norm<T>());
// printf("norm %f\n", norm);
t2 = high_resolution_clock::now();
time_span = duration_cast<duration<double>>(t2 - t1);
}
// cudaMemGetInfo(&free, &total); printf("Mem: %f/%f\n",
// (double)(total-free)/1e9, (double)total/1e9);
handle.allocate_workspace();
// cudaMemGetInfo(&free, &total); printf("Mem: %f/%f\n",
// (double)(total-free)/1e9, (double)total/1e9);
start = high_resolution_clock::now();
// Decomposition
t1 = high_resolution_clock::now();
decompose<D, T>(handle, in_array.get_dv(), in_array.get_ldvs_h(),
handle.l_target);
// printf("sync_all 1\n");
handle.sync_all();
t2 = high_resolution_clock::now();
time_span = duration_cast<duration<double>>(t2 - t1);
// printf("Decomposition time: %.6f s\n", time_span.count());
// cudaMemGetInfo(&free, &total); printf("Mem: %f/%f\n",
// (double)(total-free)/1e9, (double)total/1e9);
// print_matrix_cuda(handle.nrow, handle.ncol, handle.nfib, dv, lddv1, lddv2,
// handle.nfib);
// printf("sync_all 2\n");
handle.sync_all();
handle.free_workspace();
// cudaMemGetInfo(&free, &total); printf("Mem: %f/%f\n",
// (double)(total-free)/1e9, (double)total/1e9);
/**** refactoring ****/
// outsize = (handle.linearized_depth * handle.dofs[1][0] * handle.dofs[0][0])
// * sizeof(T); unsigned char *buffer = (unsigned char *)malloc(outsize);
// cudaMemcpy3DAsyncHelper(
// handle,
// buffer, handle.dofs[0][0] * sizeof(T), handle.dofs[0][0] * sizeof(T),
// handle.dofs[1][0], dv, lddv1 * sizeof(T), handle.dofs[0][0] *
// sizeof(T), handle.dofs[1][0], handle.dofs[0][0] * sizeof(T),
// handle.dofs[1][0], handle.linearized_depth, D2H, 0);
// Quantization
bool huffman = true;
int dict_size = handle.huff_dict_size, block_size = handle.huff_block_size;
size_t quantized_count =
handle.dofs[0][0] * handle.dofs[1][0] * handle.linearized_depth;
int *dqv;
cudaMallocHelper((void **)&dqv, (handle.dofs[0][0] * handle.dofs[1][0] *
handle.linearized_depth) *
sizeof(int));
thrust::device_vector<int> ldqvs(handle.D_padded);
ldqvs[0] = handle.dofs[0][0];
for (int i = 1; i < handle.D_padded; i++) {
ldqvs[i] = handle.dofs[i][0];
}
t1 = high_resolution_clock::now();
int *hshapes = new int[D * (handle.l_target + 2)];
for (int d = 0; d < D; d++) {
hshapes[d * (handle.l_target + 2)] = 0;
for (int l = 1; l < handle.l_target + 2; l++) {
hshapes[d * (handle.l_target + 2) + l] =
handle.dofs[d][handle.l_target + 1 - l];
}
// printf("hshapes[%d]: ", d);
// for (int l = 0; l < handle.l_target+2; l++) { printf("%d ", hshapes[d *
// (handle.l_target+2)+l]); } printf("\n");
}
int *dshapes;
cudaMallocHelper((void **)&dshapes, D * (handle.l_target + 2) * sizeof(int));
cudaMemcpyAsyncHelper(handle, dshapes, hshapes,
D * (handle.l_target + 2) * sizeof(int), H2D, 0);
size_t estimate_outlier_count = (double)handle.dofs[0][0] *
handle.dofs[1][0] * handle.linearized_depth *
1;
// printf("estimate_outlier_count: %llu\n", estimate_outlier_count);
size_t *outlier_count_d;
unsigned int *outlier_idx_d;
int *outliers;
cudaMallocHelper((void **)&outliers, estimate_outlier_count * sizeof(int));
cudaMallocHelper((void **)&outlier_count_d, sizeof(size_t));
cudaMallocHelper((void **)&outlier_idx_d,
estimate_outlier_count * sizeof(unsigned int));
size_t zero = 0, outlier_count, *outlier_idx_h;
cudaMemcpyAsyncHelper(handle, outlier_count_d, &zero, sizeof(size_t), H2D, 0);
quant_meta<T> m;
m.norm = norm;
m.s = s;
m.tol = tol;
m.dict_size = dict_size;
m.enable_lz4 = handle.enable_lz4;
m.l_target = handle.l_target;
// cudaMemGetInfo(&free, &total); printf("Mem: %f/%f\n",
// (double)(total-free)/1e9, (double)total/1e9);
levelwise_linear_quantize<D, T>(
handle, dshapes, handle.l_target, m, in_array.get_dv(),
in_array.get_ldvs_d(), dqv, thrust::raw_pointer_cast(ldqvs.data()),
huffman, handle.shapes_d[0], outlier_count_d, outlier_idx_d, outliers, 0);
cudaMemcpyAsyncHelper(handle, &outlier_count, outlier_count_d, sizeof(size_t),
D2H, 0);
// printf("outlier_count: %llu\n", outlier_count);
// printf("dqv\n");
// print_matrix_cuda(1, quantized_counD, Tqv, quantized_count);
// printf("outlier_idx_d\n");
// print_matrix_cuda(1, outlier_count, outlier_idx_d, quantized_count);
// printf("outliers\n");
// print_matrix_cuda(1, outlier_count, outliers, quantized_count);
std::vector<size_t> outlier_idx;
t2 = high_resolution_clock::now();
time_span = duration_cast<duration<double>>(t2 - t1);
// printf("Quantization time: %.6f s\n", time_span.count());
// cudaFreeHelper(dv);
// cudaMemGetInfo(&free, &total); printf("Mem: %f/%f\n",
// (double)(total-free)/1e9, (double)total/1e9);
// Huffman compression
t1 = high_resolution_clock::now();
uint64_t *hufmeta;
uint64_t *hufdata;
size_t hufmeta_size;
size_t hufdata_size;
huffman_compress<D, T, int, uint32_t, uint64_t>(
handle, dqv, quantized_count, outlier_idx, hufmeta, hufmeta_size, hufdata,
hufdata_size, block_size, dict_size, 0);
// printf("sync_all 3\n");
handle.sync_all();
t2 = high_resolution_clock::now();
time_span = duration_cast<duration<double>>(t2 - t1);
// printf("Parallel Huffman time: %.6f s\n", time_span.count());
cudaFreeHelper(dqv);
// cudaMemGetInfo(&free, &total); printf("Mem: %f/%f\n",
// (double)(total-free)/1e9, (double)total/1e9);
// LZ4 compression
void *lz4_hufmeta;
size_t lz4_hufmeta_size;
void *lz4_hufdata;
size_t lz4_hufdata_size;
if (handle.enable_lz4) {
t1 = high_resolution_clock::now();
lz4_compress(handle, hufdata, hufdata_size / sizeof(uint64_t), lz4_hufdata,
lz4_hufdata_size, handle.lz4_block_size, 0);
// printf("sync_all 4\n");
handle.sync_all();
cudaFreeHelper(hufdata);
hufdata = (uint64_t *)lz4_hufdata;
hufdata_size = lz4_hufdata_size;
t2 = high_resolution_clock::now();
time_span = duration_cast<duration<double>>(t2 - t1);
// printf("NVComp::LZ4 time: %.6f s\n", time_span.count());
// cudaMemGetInfo(&free, &total); printf("Mem: %f/%f\n",
// (double)(total-free)/1e9, (double)total/1e9);
}
end = high_resolution_clock::now();
time_span = duration_cast<duration<double>>(end - start);
// printf("Overall compression time: %.6f s (%.6f GB/s)\n", time_span.count(),
// (double)(handle.dofs[0][0] * handle.dofs[1][0] *handle.linearized_depth
// *sizeof(T))/time_span.count()/1e9);
// Output serilization
t1 = high_resolution_clock::now();
size_t outsize = 0;
outsize += sizeof(quant_meta<T>);
outsize += sizeof(size_t) + outlier_count * sizeof(size_t) +
outlier_count * sizeof(int);
outsize += sizeof(size_t) + hufmeta_size;
outsize += sizeof(size_t) + hufdata_size;
std::vector<size_t> out_shape(1);
out_shape[0] = outsize;
Array<1, unsigned char> compressed_array(out_shape);
unsigned char *buffer = compressed_array.get_dv();
// cudaMallocHostHelper((void**)&buffer, outsize);
// else cudaMallocHelper((void**)&buffer, outsize);
// unsigned char *buffer = (unsigned char *)malloc(outsize);
void *buffer_p = (void *)buffer;
// memcpy(buffer_p, &m, sizeof(quant_meta<T>));
cudaMemcpyAsyncHelper(handle, buffer_p, &m, sizeof(quant_meta<T>), AUTO, 0);
buffer_p = buffer_p + sizeof(quant_meta<T>);
cudaMemcpyAsyncHelper(handle, buffer_p, outlier_count_d, sizeof(size_t), AUTO,
0);
buffer_p = buffer_p + sizeof(size_t);
cudaMemcpyAsyncHelper(handle, buffer_p, outlier_idx_d,
outlier_count * sizeof(unsigned int), AUTO, 0);
buffer_p = buffer_p + outlier_count * sizeof(unsigned int);
cudaMemcpyAsyncHelper(handle, buffer_p, outliers, outlier_count * sizeof(int),
AUTO, 0);
buffer_p = buffer_p + outlier_count * sizeof(int);
// memcpy(buffer_p, &hufmeta_size, sizeof(size_t));
cudaMemcpyAsyncHelper(handle, buffer_p, &hufmeta_size, sizeof(size_t), AUTO,
0);
buffer_p = buffer_p + sizeof(size_t);
cudaMemcpyAsyncHelper(handle, buffer_p, hufmeta, hufmeta_size, AUTO, 0);
buffer_p = buffer_p + hufmeta_size;
// memcpy(buffer_p, &lz4_hufmeta_size, sizeof(size_t));
// buffer_p = buffer_p + sizeof(size_t);
// memcpy(buffer_p, &hufdata_size, sizeof(size_t));
cudaMemcpyAsyncHelper(handle, buffer_p, &hufdata_size, sizeof(size_t), AUTO,
0);
buffer_p = buffer_p + sizeof(size_t);
// cudaMemcpyAsyncHelper(handle, buffer_p, lz4_hufmeta, lz4_hufmeta_size, D2H,
// 0); buffer_p = buffer_p + lz4_hufmeta_size;
cudaMemcpyAsyncHelper(handle, buffer_p, hufdata, hufdata_size, AUTO, 0);
buffer_p = buffer_p + hufdata_size;
// printf("sync_all 5\n");
handle.sync_all();
t2 = high_resolution_clock::now();
time_span = duration_cast<duration<double>>(t2 - t1);
// printf("seralization time: %.6f\n", time_span.count());
cudaFreeHelper(outlier_count_d);
cudaFreeHelper(outlier_idx_d);
cudaFreeHelper(outliers);
cudaFreeHelper(hufmeta);
cudaFreeHelper(hufdata);
cudaMemGetInfo(&free, &total);
// printf("Mem: %f/%f\n", (double)(total - free) / 1e9, (double)total / 1e9);
// cudaMemGetInfo(&free, &total); printf("Mem: %f/%f\n",
// (double)(total-free)/1e9, (double)total/1e9);
return compressed_array;
}
template <uint32_t D, typename T>
Array<D, T> decompress(Handle<D, T> &handle,
Array<1, unsigned char> &compressed_array) {
cudaSetDeviceHelper(handle.dev_id);
high_resolution_clock::time_point t1, t2, start, end;
duration<double> time_span;
size_t free, total;
quant_meta<T> m;
size_t outlier_count;
unsigned int *outlier_idx_d;
int *outliers;
void *lz4_hufmeta;
size_t lz4_hufmeta_size;
void *lz4_hufdata;
size_t lz4_hufdata_size;
uint8_t *hufmeta;
uint64_t *hufdata;
size_t hufmeta_size;
size_t hufdata_size;
size_t outsize;
// cudaMemGetInfo(&free, &total); printf("Mem: %f/%f\n",
// (double)(total-free)/1e9, (double)total/1e9);
void *data_p = compressed_array.get_dv(); //(void *)data;
cudaMemcpyAsyncHelper(handle, &m, data_p, sizeof(quant_meta<T>), AUTO, 0);
data_p = data_p + sizeof(quant_meta<T>);
cudaMemcpyAsyncHelper(handle, &outlier_count, data_p, sizeof(size_t), AUTO,
0);
data_p = data_p + sizeof(size_t);
handle.sync(0);
cudaMallocHelper((void **)&outlier_idx_d,
outlier_count * sizeof(unsigned int));
cudaMemcpyAsyncHelper(handle, outlier_idx_d, data_p,
outlier_count * sizeof(unsigned int), AUTO, 0);
data_p = data_p + outlier_count * sizeof(unsigned int);
cudaMallocHelper((void **)&outliers, outlier_count * sizeof(int));
cudaMemcpyAsyncHelper(handle, outliers, data_p, outlier_count * sizeof(int),
AUTO, 0);
data_p = data_p + outlier_count * sizeof(int);
cudaMemcpyAsyncHelper(handle, &hufmeta_size, data_p, sizeof(size_t), AUTO, 0);
data_p = data_p + sizeof(size_t);
handle.sync(0);
cudaMallocHelper((void **)&hufmeta, hufmeta_size);
cudaMemcpyAsyncHelper(handle, hufmeta, data_p, hufmeta_size, AUTO, 0);
data_p = data_p + hufmeta_size;
cudaMemcpyAsyncHelper(handle, &hufdata_size, data_p, sizeof(size_t), AUTO, 0);
data_p = data_p + sizeof(size_t);
handle.sync(0);
cudaMallocHelper((void **)&hufdata, hufdata_size);
cudaMemcpyAsyncHelper(handle, hufdata, data_p, hufdata_size, H2D, 0);
data_p = data_p + hufdata_size;
handle.sync(0);
// cudaMemGetInfo(&free, &total); printf("Mem: %f/%f\n",
// (double)(total-free)/1e9, (double)total/1e9);
start = high_resolution_clock::now();
if (m.enable_lz4) {
if (!handle.enable_lz4)
printf("Warning: This data was compressed with LZ4, but handler is "
"configed to disable LZ4!\n");
t1 = high_resolution_clock::now();
uint64_t *lz4_decompressed_hufdata;
size_t lz4_decompressed_hufdata_size;
lz4_decompress(handle, (void *)hufdata, hufdata_size,
lz4_decompressed_hufdata, lz4_decompressed_hufdata_size, 0);
// printf("sync_all 6\n");
handle.sync_all();
cudaFreeHelper(hufdata);
hufdata = lz4_decompressed_hufdata;
hufdata_size = lz4_decompressed_hufdata_size;
t2 = high_resolution_clock::now();
time_span = duration_cast<duration<double>>(t2 - t1);
// printf("NVComp::LZ4 time: %.6f s \n", time_span.count());
// cudaMemGetInfo(&free, &total); printf("Mem: %f/%f\n",
// (double)(total-free)/1e9, (double)total/1e9);
}
size_t quantized_count =
handle.dofs[0][0] * handle.dofs[1][0] * handle.linearized_depth;
int *dqv;
t1 = high_resolution_clock::now();
huffman_decompress<D, T, int, uint32_t, uint64_t>(
handle, (uint64_t *)hufmeta, hufmeta_size, hufdata, hufdata_size, dqv,
outsize, 0);
// printf("sync_all 7\n");
handle.sync_all();
t2 = high_resolution_clock::now();
time_span = duration_cast<duration<double>>(t2 - t1);
// printf("Parallel Huffman time: %.6f s\n", time_span.count());
cudaFreeHelper(hufmeta);
cudaFreeHelper(hufdata);
// cudaMemGetInfo(&free, &total); printf("Mem: %f/%f\n",
// (double)(total-free)/1e9, (double)total/1e9);
thrust::device_vector<int> ldqvs(handle.D_padded);
ldqvs[0] = handle.dofs[0][0];
for (int i = 1; i < handle.D_padded; i++) {
ldqvs[i] = handle.dofs[i][0];
}
thrust::device_vector<int> shape(handle.D_padded);
for (int d = 0; d < handle.D_padded; d++) {
shape[d] = handle.dofs[d][0];
}
// T *dv;
// size_t dv_pitch;
// cudaMalloc3DHelper((void **)&dv, &dv_pitch, handle.dofs[0][0] * sizeof(T),
// handle.dofs[1][0], handle.linearized_depth);
// int lddv1 = dv_pitch / sizeof(T);
// int lddv2 = handle.dofs[1][0];
// thrust::device_vector<int> ldvs(handle.D_padded);
// ldvs[0] = lddv1;
// for (int i = 1; i < handle.D_padded; i++) { ldvs[i] = handle.dofs[i][0]; }
// std::vector<int> ldvs_h(handle.D_padded);
// ldvs_h[0] = lddv1;
// for (int i = 1; i < handle.D_padded; i++) { ldvs_h[i] = handle.dofs[i][0];
// } int * ldvs_d; cudaMallocHelper((void **)&ldvs_d, handle.D_padded *
// sizeof(int)); cudaMemcpyAsyncHelper(handle, ldvs_d, ldvs_h.data(),
// handle.D_padded * sizeof(int), H2D, 0);
std::vector<size_t> decompressed_shape(D);
for (int i = 0; i < D; i++)
decompressed_shape[i] = handle.shapes_h[0][i];
std::reverse(decompressed_shape.begin(), decompressed_shape.end());
Array<D, T> decompressed_data(decompressed_shape);
int *hshapes = new int[D * (handle.l_target + 2)];
for (int d = 0; d < D; d++) {
hshapes[d * (handle.l_target + 2)] = 0;
for (int l = 1; l < handle.l_target + 2; l++) {
hshapes[d * (handle.l_target + 2) + l] =
handle.dofs[d][handle.l_target + 1 - l];
}
// printf("hshapes[%d]: ", d);
// for (int l = 0; l < handle.l_target+2; l++) { printf("%d ", hshapes[d *
// (handle.l_target+2)+l]); } printf("\n");
}
int *dshapes;
cudaMallocHelper((void **)&dshapes, D * (handle.l_target + 2) * sizeof(int));
cudaMemcpyAsyncHelper(handle, dshapes, hshapes,
D * (handle.l_target + 2) * sizeof(int), H2D, 0);
// printf("sync_all 7.5\n");
handle.sync_all();
t1 = high_resolution_clock::now();
levelwise_linear_dequantize<D, T>(handle, dshapes, handle.l_target, m, dqv,
thrust::raw_pointer_cast(ldqvs.data()),
decompressed_data.get_dv(),
decompressed_data.get_ldvs_d(),
outlier_count, outlier_idx_d, outliers, 0);
// printf("sync_all 8\n");
handle.sync_all();
t2 = high_resolution_clock::now();
time_span = duration_cast<duration<double>>(t2 - t1);
// printf("Dequantization time: %.6f s\n", time_span.count());
cudaFreeHelper(dqv);
cudaFreeHelper(outlier_idx_d);
cudaFreeHelper(outliers);
cudaMemGetInfo(&free, &total);
// printf("Mem: %f/%f\n", (double)(total-free)/1e9, (double)total/1e9);
// printf("dv:\n");
// print_matrix_cuda(1, quantized_counD, Tv, quantized_count);
/**** refactoring ****/
// cudaMemcpy3DAsyncHelper( handle,
// dv, lddv1 * sizeof(T), handle.dofs[0][0] * sizeof(T), handle.dofs[1][0],
// data, handle.dofs[0][0] * sizeof(T), handle.dofs[0][0] * sizeof(T),
// handle.dofs[1][0], handle.dofs[0][0] * sizeof(T), handle.dofs[1][0],
// handle.linearized_depth, H2D, 0);
handle.allocate_workspace();
// cudaMemGetInfo(&free, &total); printf("Mem: %f/%f\n",
// (double)(total-free)/1e9, (double)total/1e9);
t1 = high_resolution_clock::now();
recompose<D, T>(handle, decompressed_data.get_dv(),
decompressed_data.get_ldvs_h(), m.l_target);
// printf("sync_all 9\n");
handle.sync_all();
t2 = high_resolution_clock::now();
time_span = duration_cast<duration<double>>(t2 - t1);
// printf("Recomposition time: %.6f s\n", time_span.count());
handle.free_workspace();
// printf("sync_all 10\n");
handle.sync_all();
end = high_resolution_clock::now();
time_span = duration_cast<duration<double>>(end - start);
// printf("Overall decompression time: %.6f s (%.6f GB/s)\n",
// time_span.count(), (double)(handle.dofs[0][0] * handle.dofs[1][0]
// *handle.linearized_depth *sizeof(T))/time_span.count()/1e9);
// cudaMemGetInfo(&free, &total); printf("Mem: %f/%f\n",
// (double)(total-free)/1e9, (double)total/1e9);
// T *v;
// cudaMallocHostHelper((void **)&v, handle.dofs[0][0] * handle.dofs[1][0] *
// handle.linearized_depth * sizeof(T));
// // = (T *)malloc(handle.dofs[0][0] * handle.dofs[1][0] *
// handle.linearized_depth * sizeof(T));
// cudaMemcpy3DAsyncHelper(
// handle, v, handle.dofs[0][0] * sizeof(T), handle.dofs[0][0] *
// sizeof(T), handle.dofs[1][0], dv, lddv1 * sizeof(T), handle.dofs[0][0]
// * sizeof(T), handle.dofs[1][0], handle.dofs[0][0] * sizeof(T),
// handle.dofs[1][0], handle.linearized_depth, D2H, 0);
// cudaFreeHelper(dv);
// cudaMemGetInfo(&free, &total); printf("Mem: %f/%f\n",
// (double)(total-free)/1e9, (double)total/1e9);
return decompressed_data;
}
#define KERNELS(D, T) \
template Array<1, unsigned char> compress<D, T>( \
Handle<D, T> & handle, Array<D, T> & in_array, \
enum error_bound_type type, T tol, T s); \
template Array<D, T> decompress<D, T>( \
Handle<D, T> & handle, Array<1, unsigned char> & compressed_array);
KERNELS(1, double)
KERNELS(1, float)
KERNELS(2, double)
KERNELS(2, float)
KERNELS(3, double)
KERNELS(3, float)
KERNELS(4, double)
KERNELS(4, float)
KERNELS(5, double)
KERNELS(5, float)
#undef KERNELS
} // namespace mgard_cuda
|
bf80ca1dba70e69d5ca964fb70de7c5b931dd205.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "Kernels.cuh"
#include "stdio.h"
__global__ void determineNextState(int *board, int *newBoard, int rows, int columns, size_t pitchOld, size_t pitchNew)
{
//getting coordintates of the thread
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
//adjusting the pitch
size_t pitchOldAdjusted = pitchOld / sizeof(int);
size_t pitchNewAdjusted = pitchNew / sizeof(int);
if (x < rows && y < columns)
{
int idxNew = y * pitchNewAdjusted + x;
int idxOld = y * pitchOldAdjusted + x;
//remebering the old state
int state = board[idxOld];
int output = DEAD;
//checking if any alive condition is met
if (state == ALIVE)
{
if ((newBoard[idxNew] == 2 || newBoard[idxNew] == 3))
{
output = ALIVE;
}
}
else
{
if (newBoard[idxNew] == 3)
{
output = ALIVE;
}
}
newBoard[idxNew] = output;
}
}
__global__ void numberAliveAround(int *board, int *newBoard, int rows, int columns, size_t pitchOld, size_t pitchNew)
{
//calculating the thread we are on
int row = (blockIdx.x * blockDim.x) + threadIdx.x;
int column = (blockIdx.y * blockDim.y) + threadIdx.y;
//adjusting pitch, because it's the ammount of bytes and not integer array width
size_t pitchOldAdjusted = pitchOld / sizeof(int);
size_t pitchNewAdjusted = pitchNew / sizeof(int);
if (row < rows && column < columns)
{
int outputNumber = 0;
int idx = 0, xMod = 0, yMod = 0;
//over
yMod = (column - 1 + columns) % columns;
idx = yMod * pitchOldAdjusted + row;
outputNumber += board[idx];
//under
yMod = (column + 1) % columns;
idx = yMod * pitchOldAdjusted + row;
outputNumber += board[idx];
//right
xMod = (row + 1) % rows;
idx = column * pitchOldAdjusted + xMod;
outputNumber += board[idx];
//left
xMod = ((row - 1) + rows) % rows;
idx = column * pitchOldAdjusted + xMod;
outputNumber += board[idx];
//right bottom corner
xMod = (row + 1) % rows;
yMod = (column + 1) % columns;
idx = yMod * pitchOldAdjusted + xMod;
outputNumber += board[idx];
//left bottom corner
xMod = (row - 1 + rows) % rows;
yMod = (column + 1) % columns;
idx = yMod * pitchOldAdjusted + xMod;
outputNumber += board[idx];
//right upper corner
xMod = (row + 1) % rows;
yMod = (column - 1 + columns) % columns;
idx = yMod * pitchOldAdjusted + xMod;
outputNumber += board[idx];
//left upper corner
xMod = (row - 1 + rows) % rows;
yMod = (column - 1 + columns) % columns;
idx = yMod * pitchOldAdjusted + xMod;
outputNumber += board[idx];
newBoard[column * pitchNewAdjusted + row] = outputNumber;
}
}
__global__ void multiplyMatrix(int *matrixA, int *matrixB, int *matrixC, int rows, int cols, size_t pitchA, size_t pitchB, size_t pitchC)
{
int row = (blockIdx.x * blockDim.x) + threadIdx.x;
int column = (blockIdx.y * blockDim.y) + threadIdx.y;
//adjusting pitch, because it's the ammount of bytes and not integer array width
size_t pitchAAdjusted = pitchA / sizeof(int);
size_t pitchBAdjusted = pitchB / sizeof(int);
size_t pitchCAdjusted = pitchC / sizeof(int);
if (row < rows && column < cols)
{
for (int i = 0; i < pitchAAdjusted; i++)
{
matrixC[column * pitchCAdjusted + row] += matrixA[i * pitchAAdjusted + row] * matrixB[column * pitchBAdjusted + i];
}
}
}
__global__ void determineNextStateOffset(int *board, int *newBoard, int rows, int columns, pitchesBoard pitches, int offset)
{
//getting coordintates of the thread
//offset because these are neing called in small batches
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
//adjusting the pitch
size_t pitchOldAdjusted = pitches.pitchOld / sizeof(int);
size_t pitchNewAdjusted = pitches.pitchNew / sizeof(int);
if (x < rows && y < columns)
{
int idxNew = y * pitchNewAdjusted + x + offset;
int idxOld = y * pitchOldAdjusted + x + offset;
//remebering the old state
int state = board[idxOld];
int output = DEAD;
//checking if any alive condition is met
if (state == ALIVE)
{
if ((newBoard[idxNew] == 2 || newBoard[idxNew] == 3))
{
output = ALIVE;
}
}
else
{
if (newBoard[idxNew] == 3)
{
output = ALIVE;
}
}
newBoard[idxNew] = output;
}
}
__global__ void numberAliveAroundOffset(int *board, int *newBoard, int rows, int columns, pitchesBoard pitches, int offset)
{
//calculating the thread we are on
int row = (blockIdx.x * blockDim.x) + threadIdx.x;
int column = (blockIdx.y * blockDim.y) + threadIdx.y;
//adjusting pitch, because it's the ammount of bytes and not integer array width
size_t pitchOldAdjusted = pitches.pitchOld / sizeof(int);
size_t pitchNewAdjusted = pitches.pitchNew / sizeof(int);
if (row < rows && column < columns)
{
int outputNumber = 0;
int idx = 0, xMod = 0, yMod = 0;
//over
yMod = (column - 1 + columns) % columns;
idx = yMod * pitchOldAdjusted + row;
outputNumber += board[idx + offset];
//under
yMod = (column + 1) % columns;
idx = yMod * pitchOldAdjusted + row;
outputNumber += board[idx + offset];
//right
xMod = (row + 1) % rows;
idx = column * pitchOldAdjusted + xMod;
outputNumber += board[idx + offset];
//left
xMod = ((row - 1) + rows) % rows;
idx = column * pitchOldAdjusted + xMod;
outputNumber += board[idx + offset];
//right bottom corner
xMod = (row + 1) % rows;
yMod = (column + 1) % columns;
idx = yMod * pitchOldAdjusted + xMod;
outputNumber += board[idx + offset];
//left bottom corner
xMod = (row - 1 + rows) % rows;
yMod = (column + 1) % columns;
idx = yMod * pitchOldAdjusted + xMod;
outputNumber += board[idx + offset];
//right upper corner
xMod = (row + 1) % rows;
yMod = (column - 1 + columns) % columns;
idx = yMod * pitchOldAdjusted + xMod;
outputNumber += board[idx + offset];
//left upper corner
xMod = (row - 1 + rows) % rows;
yMod = (column - 1 + columns) % columns;
idx = yMod * pitchOldAdjusted + xMod;
outputNumber += board[idx + offset];
newBoard[column * pitchNewAdjusted + row + offset] = outputNumber;
}
}
__global__ void multiplyMatrixOffset(int *matrixA, int *matrixB, int *matrixC, int rows, int cols, pitchesMatrix pitches, int offset)
{
int row = (blockIdx.x * blockDim.x) + threadIdx.x;
int column = (blockIdx.y * blockDim.y) + threadIdx.y;
//adjusting pitch, because it's the ammount of bytes and not integer array width
size_t pitchAAdjusted = pitches.pitchMA / sizeof(int);
size_t pitchBAdjusted = pitches.pitchMB / sizeof(int);
size_t pitchCAdjusted = pitches.pitchMC / sizeof(int);
if (column < cols && row < rows)
{
for (int i = 0; i < pitchAAdjusted; i++)
{
matrixC[column * pitchCAdjusted + row + offset] += matrixA[i * pitchAAdjusted + row + offset] * matrixB[column * pitchBAdjusted + i + offset];
}
}
}
| bf80ca1dba70e69d5ca964fb70de7c5b931dd205.cu | #include "Kernels.cuh"
#include "stdio.h"
__global__ void determineNextState(int *board, int *newBoard, int rows, int columns, size_t pitchOld, size_t pitchNew)
{
//getting coordintates of the thread
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
//adjusting the pitch
size_t pitchOldAdjusted = pitchOld / sizeof(int);
size_t pitchNewAdjusted = pitchNew / sizeof(int);
if (x < rows && y < columns)
{
int idxNew = y * pitchNewAdjusted + x;
int idxOld = y * pitchOldAdjusted + x;
//remebering the old state
int state = board[idxOld];
int output = DEAD;
//checking if any alive condition is met
if (state == ALIVE)
{
if ((newBoard[idxNew] == 2 || newBoard[idxNew] == 3))
{
output = ALIVE;
}
}
else
{
if (newBoard[idxNew] == 3)
{
output = ALIVE;
}
}
newBoard[idxNew] = output;
}
}
__global__ void numberAliveAround(int *board, int *newBoard, int rows, int columns, size_t pitchOld, size_t pitchNew)
{
//calculating the thread we are on
int row = (blockIdx.x * blockDim.x) + threadIdx.x;
int column = (blockIdx.y * blockDim.y) + threadIdx.y;
//adjusting pitch, because it's the ammount of bytes and not integer array width
size_t pitchOldAdjusted = pitchOld / sizeof(int);
size_t pitchNewAdjusted = pitchNew / sizeof(int);
if (row < rows && column < columns)
{
int outputNumber = 0;
int idx = 0, xMod = 0, yMod = 0;
//over
yMod = (column - 1 + columns) % columns;
idx = yMod * pitchOldAdjusted + row;
outputNumber += board[idx];
//under
yMod = (column + 1) % columns;
idx = yMod * pitchOldAdjusted + row;
outputNumber += board[idx];
//right
xMod = (row + 1) % rows;
idx = column * pitchOldAdjusted + xMod;
outputNumber += board[idx];
//left
xMod = ((row - 1) + rows) % rows;
idx = column * pitchOldAdjusted + xMod;
outputNumber += board[idx];
//right bottom corner
xMod = (row + 1) % rows;
yMod = (column + 1) % columns;
idx = yMod * pitchOldAdjusted + xMod;
outputNumber += board[idx];
//left bottom corner
xMod = (row - 1 + rows) % rows;
yMod = (column + 1) % columns;
idx = yMod * pitchOldAdjusted + xMod;
outputNumber += board[idx];
//right upper corner
xMod = (row + 1) % rows;
yMod = (column - 1 + columns) % columns;
idx = yMod * pitchOldAdjusted + xMod;
outputNumber += board[idx];
//left upper corner
xMod = (row - 1 + rows) % rows;
yMod = (column - 1 + columns) % columns;
idx = yMod * pitchOldAdjusted + xMod;
outputNumber += board[idx];
newBoard[column * pitchNewAdjusted + row] = outputNumber;
}
}
__global__ void multiplyMatrix(int *matrixA, int *matrixB, int *matrixC, int rows, int cols, size_t pitchA, size_t pitchB, size_t pitchC)
{
int row = (blockIdx.x * blockDim.x) + threadIdx.x;
int column = (blockIdx.y * blockDim.y) + threadIdx.y;
//adjusting pitch, because it's the ammount of bytes and not integer array width
size_t pitchAAdjusted = pitchA / sizeof(int);
size_t pitchBAdjusted = pitchB / sizeof(int);
size_t pitchCAdjusted = pitchC / sizeof(int);
if (row < rows && column < cols)
{
for (int i = 0; i < pitchAAdjusted; i++)
{
matrixC[column * pitchCAdjusted + row] += matrixA[i * pitchAAdjusted + row] * matrixB[column * pitchBAdjusted + i];
}
}
}
__global__ void determineNextStateOffset(int *board, int *newBoard, int rows, int columns, pitchesBoard pitches, int offset)
{
//getting coordintates of the thread
//offset because these are neing called in small batches
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
//adjusting the pitch
size_t pitchOldAdjusted = pitches.pitchOld / sizeof(int);
size_t pitchNewAdjusted = pitches.pitchNew / sizeof(int);
if (x < rows && y < columns)
{
int idxNew = y * pitchNewAdjusted + x + offset;
int idxOld = y * pitchOldAdjusted + x + offset;
//remebering the old state
int state = board[idxOld];
int output = DEAD;
//checking if any alive condition is met
if (state == ALIVE)
{
if ((newBoard[idxNew] == 2 || newBoard[idxNew] == 3))
{
output = ALIVE;
}
}
else
{
if (newBoard[idxNew] == 3)
{
output = ALIVE;
}
}
newBoard[idxNew] = output;
}
}
__global__ void numberAliveAroundOffset(int *board, int *newBoard, int rows, int columns, pitchesBoard pitches, int offset)
{
//calculating the thread we are on
int row = (blockIdx.x * blockDim.x) + threadIdx.x;
int column = (blockIdx.y * blockDim.y) + threadIdx.y;
//adjusting pitch, because it's the ammount of bytes and not integer array width
size_t pitchOldAdjusted = pitches.pitchOld / sizeof(int);
size_t pitchNewAdjusted = pitches.pitchNew / sizeof(int);
if (row < rows && column < columns)
{
int outputNumber = 0;
int idx = 0, xMod = 0, yMod = 0;
//over
yMod = (column - 1 + columns) % columns;
idx = yMod * pitchOldAdjusted + row;
outputNumber += board[idx + offset];
//under
yMod = (column + 1) % columns;
idx = yMod * pitchOldAdjusted + row;
outputNumber += board[idx + offset];
//right
xMod = (row + 1) % rows;
idx = column * pitchOldAdjusted + xMod;
outputNumber += board[idx + offset];
//left
xMod = ((row - 1) + rows) % rows;
idx = column * pitchOldAdjusted + xMod;
outputNumber += board[idx + offset];
//right bottom corner
xMod = (row + 1) % rows;
yMod = (column + 1) % columns;
idx = yMod * pitchOldAdjusted + xMod;
outputNumber += board[idx + offset];
//left bottom corner
xMod = (row - 1 + rows) % rows;
yMod = (column + 1) % columns;
idx = yMod * pitchOldAdjusted + xMod;
outputNumber += board[idx + offset];
//right upper corner
xMod = (row + 1) % rows;
yMod = (column - 1 + columns) % columns;
idx = yMod * pitchOldAdjusted + xMod;
outputNumber += board[idx + offset];
//left upper corner
xMod = (row - 1 + rows) % rows;
yMod = (column - 1 + columns) % columns;
idx = yMod * pitchOldAdjusted + xMod;
outputNumber += board[idx + offset];
newBoard[column * pitchNewAdjusted + row + offset] = outputNumber;
}
}
__global__ void multiplyMatrixOffset(int *matrixA, int *matrixB, int *matrixC, int rows, int cols, pitchesMatrix pitches, int offset)
{
int row = (blockIdx.x * blockDim.x) + threadIdx.x;
int column = (blockIdx.y * blockDim.y) + threadIdx.y;
//adjusting pitch, because it's the ammount of bytes and not integer array width
size_t pitchAAdjusted = pitches.pitchMA / sizeof(int);
size_t pitchBAdjusted = pitches.pitchMB / sizeof(int);
size_t pitchCAdjusted = pitches.pitchMC / sizeof(int);
if (column < cols && row < rows)
{
for (int i = 0; i < pitchAAdjusted; i++)
{
matrixC[column * pitchCAdjusted + row + offset] += matrixA[i * pitchAAdjusted + row + offset] * matrixB[column * pitchBAdjusted + i + offset];
}
}
}
|
94dc74562298e22899bc0f6c532f468f7c08db43.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "grayscale.h"
#include <stdio.h>
__global__ void _im2gray(const uint8_t* __restrict__ data, int size, float* __restrict__ output) {
const size_t ind = blockIdx.x*blockDim.x + threadIdx.x;
if(ind<size)output[ind]= data[ind]/255.0;
}
void im2gray(uint8_t* data, int size, float* output,dim3 gridSize, dim3 blockSize) {
hipLaunchKernelGGL(( _im2gray), dim3(gridSize),dim3(blockSize), 0, 0, data,size,output);
}
| 94dc74562298e22899bc0f6c532f468f7c08db43.cu | #include "grayscale.h"
#include <stdio.h>
__global__ void _im2gray(const uint8_t* __restrict__ data, int size, float* __restrict__ output) {
const size_t ind = blockIdx.x*blockDim.x + threadIdx.x;
if(ind<size)output[ind]= data[ind]/255.0;
}
void im2gray(uint8_t* data, int size, float* output,dim3 gridSize, dim3 blockSize) {
_im2gray<<<gridSize,blockSize>>>(data,size,output);
}
|
8072f6d2f6b563815113207fbb4d4eb6db605328.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdint.h>
#include <hip/hip_runtime.h>
typedef unsigned char BYTE; // 8-bit byte
typedef unsigned int WORD; // 32-bit word, change to "long" for 16-bit machines
typedef struct {
BYTE data[64];
WORD datalen;
unsigned long long bitlen;
WORD state[5];
WORD k[4];
} SHA1_CTX;
#define ROTLEFT(a, b) ((a << b) | (a >> (32 - b)))
#define SHA1_BLOCK_SIZE 20 // SHA1 outputs a 20 byte digest
#define PAGE_SIZE 4096
__device__ void sha1_init(SHA1_CTX *ctx)
{
ctx->datalen = 0;
ctx->bitlen = 0;
ctx->state[0] = 0x67452301;
ctx->state[1] = 0xEFCDAB89;
ctx->state[2] = 0x98BADCFE;
ctx->state[3] = 0x10325476;
ctx->state[4] = 0xc3d2e1f0;
ctx->k[0] = 0x5a827999;
ctx->k[1] = 0x6ed9eba1;
ctx->k[2] = 0x8f1bbcdc;
ctx->k[3] = 0xca62c1d6;
}
__device__ void sha1_transform(SHA1_CTX *ctx, const BYTE data[])
{
WORD a, b, c, d, e, i, j, t, m[80];
for (i = 0, j = 0; i < 16; ++i, j += 4)
m[i] = (data[j] << 24) + (data[j + 1] << 16) + (data[j + 2] << 8) + (data[j + 3]);
for ( ; i < 80; ++i) {
m[i] = (m[i - 3] ^ m[i - 8] ^ m[i - 14] ^ m[i - 16]);
m[i] = (m[i] << 1) | (m[i] >> 31);
}
a = ctx->state[0];
b = ctx->state[1];
c = ctx->state[2];
d = ctx->state[3];
e = ctx->state[4];
for (i = 0; i < 20; ++i) {
t = ROTLEFT(a, 5) + ((b & c) ^ (~b & d)) + e + ctx->k[0] + m[i];
e = d;
d = c;
c = ROTLEFT(b, 30);
b = a;
a = t;
}
for ( ; i < 40; ++i) {
t = ROTLEFT(a, 5) + (b ^ c ^ d) + e + ctx->k[1] + m[i];
e = d;
d = c;
c = ROTLEFT(b, 30);
b = a;
a = t;
}
for ( ; i < 60; ++i) {
t = ROTLEFT(a, 5) + ((b & c) ^ (b & d) ^ (c & d)) + e + ctx->k[2] + m[i];
e = d;
d = c;
c = ROTLEFT(b, 30);
b = a;
a = t;
}
for ( ; i < 80; ++i) {
t = ROTLEFT(a, 5) + (b ^ c ^ d) + e + ctx->k[3] + m[i];
e = d;
d = c;
c = ROTLEFT(b, 30);
b = a;
a = t;
}
ctx->state[0] += a;
ctx->state[1] += b;
ctx->state[2] += c;
ctx->state[3] += d;
ctx->state[4] += e;
}
__device__ void sha1_update(SHA1_CTX *ctx, const BYTE data[], size_t len)
{
size_t i;
for (i = 0; i < len; ++i) {
ctx->data[ctx->datalen] = data[i];
ctx->datalen++;
if (ctx->datalen == 64) {
sha1_transform(ctx, ctx->data);
ctx->bitlen += 512;
ctx->datalen = 0;
}
}
}
__device__ void sha1_final(SHA1_CTX *ctx, BYTE hash[])
{
WORD i;
i = ctx->datalen;
// Pad whatever data is left in the buffer.
if (ctx->datalen < 56) {
ctx->data[i++] = 0x80;
while (i < 56)
ctx->data[i++] = 0x00;
}
else {
ctx->data[i++] = 0x80;
while (i < 64)
ctx->data[i++] = 0x00;
sha1_transform(ctx, ctx->data);
while (i < 7)
ctx->data[i++] = 0x00;
}
// Append to the padding the total message's length in bits and transform.
ctx->bitlen += ctx->datalen * 8;
ctx->data[63] = ctx->bitlen;
ctx->data[62] = ctx->bitlen >> 8;
ctx->data[61] = ctx->bitlen >> 16;
ctx->data[60] = ctx->bitlen >> 24;
ctx->data[59] = ctx->bitlen >> 32;
ctx->data[58] = ctx->bitlen >> 40;
ctx->data[57] = ctx->bitlen >> 48;
ctx->data[56] = ctx->bitlen >> 56;
sha1_transform(ctx, ctx->data);
// Since this implementation uses little endian byte ordering and MD uses big endian,
// reverse all the bytes when copying the final state to the output hash.
for (i = 0; i < 4; ++i) {
hash[i] = (ctx->state[0] >> (24 - i * 8)) & 0x000000ff;
hash[i + 4] = (ctx->state[1] >> (24 - i * 8)) & 0x000000ff;
hash[i + 8] = (ctx->state[2] >> (24 - i * 8)) & 0x000000ff;
hash[i + 12] = (ctx->state[3] >> (24 - i * 8)) & 0x000000ff;
hash[i + 16] = (ctx->state[4] >> (24 - i * 8)) & 0x000000ff;
}
}
extern "C" __global__
void gpusha1(unsigned char* text1, unsigned char* hashval, int text_num) {
int thx = blockIdx.x * blockDim.x + threadIdx.x;
SHA1_CTX ctx;
unsigned char text_dev[PAGE_SIZE];
unsigned char hashval_dev[SHA1_BLOCK_SIZE];
int i;
if (thx < text_num) {
for (i = 0; i < PAGE_SIZE; ++i) {
text_dev[i] = text1[i + thx*PAGE_SIZE];
}
sha1_init(&ctx);
sha1_update(&ctx, text_dev, PAGE_SIZE);
sha1_final(&ctx, hashval_dev);
for (i = 0; i < SHA1_BLOCK_SIZE; ++i) {
hashval[i + thx*SHA1_BLOCK_SIZE] = hashval_dev[i];
}
}
}
| 8072f6d2f6b563815113207fbb4d4eb6db605328.cu | #include <stdint.h>
#include <cuda.h>
typedef unsigned char BYTE; // 8-bit byte
typedef unsigned int WORD; // 32-bit word, change to "long" for 16-bit machines
typedef struct {
BYTE data[64];
WORD datalen;
unsigned long long bitlen;
WORD state[5];
WORD k[4];
} SHA1_CTX;
#define ROTLEFT(a, b) ((a << b) | (a >> (32 - b)))
#define SHA1_BLOCK_SIZE 20 // SHA1 outputs a 20 byte digest
#define PAGE_SIZE 4096
__device__ void sha1_init(SHA1_CTX *ctx)
{
ctx->datalen = 0;
ctx->bitlen = 0;
ctx->state[0] = 0x67452301;
ctx->state[1] = 0xEFCDAB89;
ctx->state[2] = 0x98BADCFE;
ctx->state[3] = 0x10325476;
ctx->state[4] = 0xc3d2e1f0;
ctx->k[0] = 0x5a827999;
ctx->k[1] = 0x6ed9eba1;
ctx->k[2] = 0x8f1bbcdc;
ctx->k[3] = 0xca62c1d6;
}
__device__ void sha1_transform(SHA1_CTX *ctx, const BYTE data[])
{
WORD a, b, c, d, e, i, j, t, m[80];
for (i = 0, j = 0; i < 16; ++i, j += 4)
m[i] = (data[j] << 24) + (data[j + 1] << 16) + (data[j + 2] << 8) + (data[j + 3]);
for ( ; i < 80; ++i) {
m[i] = (m[i - 3] ^ m[i - 8] ^ m[i - 14] ^ m[i - 16]);
m[i] = (m[i] << 1) | (m[i] >> 31);
}
a = ctx->state[0];
b = ctx->state[1];
c = ctx->state[2];
d = ctx->state[3];
e = ctx->state[4];
for (i = 0; i < 20; ++i) {
t = ROTLEFT(a, 5) + ((b & c) ^ (~b & d)) + e + ctx->k[0] + m[i];
e = d;
d = c;
c = ROTLEFT(b, 30);
b = a;
a = t;
}
for ( ; i < 40; ++i) {
t = ROTLEFT(a, 5) + (b ^ c ^ d) + e + ctx->k[1] + m[i];
e = d;
d = c;
c = ROTLEFT(b, 30);
b = a;
a = t;
}
for ( ; i < 60; ++i) {
t = ROTLEFT(a, 5) + ((b & c) ^ (b & d) ^ (c & d)) + e + ctx->k[2] + m[i];
e = d;
d = c;
c = ROTLEFT(b, 30);
b = a;
a = t;
}
for ( ; i < 80; ++i) {
t = ROTLEFT(a, 5) + (b ^ c ^ d) + e + ctx->k[3] + m[i];
e = d;
d = c;
c = ROTLEFT(b, 30);
b = a;
a = t;
}
ctx->state[0] += a;
ctx->state[1] += b;
ctx->state[2] += c;
ctx->state[3] += d;
ctx->state[4] += e;
}
__device__ void sha1_update(SHA1_CTX *ctx, const BYTE data[], size_t len)
{
size_t i;
for (i = 0; i < len; ++i) {
ctx->data[ctx->datalen] = data[i];
ctx->datalen++;
if (ctx->datalen == 64) {
sha1_transform(ctx, ctx->data);
ctx->bitlen += 512;
ctx->datalen = 0;
}
}
}
__device__ void sha1_final(SHA1_CTX *ctx, BYTE hash[])
{
WORD i;
i = ctx->datalen;
// Pad whatever data is left in the buffer.
if (ctx->datalen < 56) {
ctx->data[i++] = 0x80;
while (i < 56)
ctx->data[i++] = 0x00;
}
else {
ctx->data[i++] = 0x80;
while (i < 64)
ctx->data[i++] = 0x00;
sha1_transform(ctx, ctx->data);
while (i < 7)
ctx->data[i++] = 0x00;
}
// Append to the padding the total message's length in bits and transform.
ctx->bitlen += ctx->datalen * 8;
ctx->data[63] = ctx->bitlen;
ctx->data[62] = ctx->bitlen >> 8;
ctx->data[61] = ctx->bitlen >> 16;
ctx->data[60] = ctx->bitlen >> 24;
ctx->data[59] = ctx->bitlen >> 32;
ctx->data[58] = ctx->bitlen >> 40;
ctx->data[57] = ctx->bitlen >> 48;
ctx->data[56] = ctx->bitlen >> 56;
sha1_transform(ctx, ctx->data);
// Since this implementation uses little endian byte ordering and MD uses big endian,
// reverse all the bytes when copying the final state to the output hash.
for (i = 0; i < 4; ++i) {
hash[i] = (ctx->state[0] >> (24 - i * 8)) & 0x000000ff;
hash[i + 4] = (ctx->state[1] >> (24 - i * 8)) & 0x000000ff;
hash[i + 8] = (ctx->state[2] >> (24 - i * 8)) & 0x000000ff;
hash[i + 12] = (ctx->state[3] >> (24 - i * 8)) & 0x000000ff;
hash[i + 16] = (ctx->state[4] >> (24 - i * 8)) & 0x000000ff;
}
}
extern "C" __global__
void gpusha1(unsigned char* text1, unsigned char* hashval, int text_num) {
int thx = blockIdx.x * blockDim.x + threadIdx.x;
SHA1_CTX ctx;
unsigned char text_dev[PAGE_SIZE];
unsigned char hashval_dev[SHA1_BLOCK_SIZE];
int i;
if (thx < text_num) {
for (i = 0; i < PAGE_SIZE; ++i) {
text_dev[i] = text1[i + thx*PAGE_SIZE];
}
sha1_init(&ctx);
sha1_update(&ctx, text_dev, PAGE_SIZE);
sha1_final(&ctx, hashval_dev);
for (i = 0; i < SHA1_BLOCK_SIZE; ++i) {
hashval[i + thx*SHA1_BLOCK_SIZE] = hashval_dev[i];
}
}
}
|
78bf06c129b9c1c5bf6ec3f91f46f0df8e37f431.hip | // !!! This is a file automatically generated by hipify!!!
/*
Copyright (c) 2011, T. Kroes <[email protected]>
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
- Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
- Neither the name of the <ORGANIZATION> nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "Core.cuh"
texture<short, hipTextureType3D, hipReadModeNormalizedFloat> gTexDensity;
texture<short, hipTextureType3D, hipReadModeNormalizedFloat> gTexGradientMagnitude;
texture<float, hipTextureType3D, hipReadModeElementType> gTexExtinction;
texture<float, hipTextureType1D, hipReadModeElementType> gTexOpacity;
texture<float4, hipTextureType1D, hipReadModeElementType> gTexDiffuse;
texture<float4, hipTextureType1D, hipReadModeElementType> gTexSpecular;
texture<float, hipTextureType1D, hipReadModeElementType> gTexRoughness;
texture<float4, hipTextureType1D, hipReadModeElementType> gTexEmission;
texture<uchar4, hipTextureType2D, hipReadModeNormalizedFloat> gTexRunningEstimateRgba;
hipArray* gpDensityArray = NULL;
hipArray* gpGradientMagnitudeArray = NULL;
hipArray* gpOpacityArray = NULL;
hipArray* gpDiffuseArray = NULL;
hipArray* gpSpecularArray = NULL;
hipArray* gpRoughnessArray = NULL;
hipArray* gpEmissionArray = NULL;
CD float3 gAaBbMin;
CD float3 gAaBbMax;
CD float3 gInvAaBbMin;
CD float3 gInvAaBbMax;
CD float gIntensityMin;
CD float gIntensityMax;
CD float gIntensityRange;
CD float gIntensityInvRange;
CD float gStepSize;
CD float gStepSizeShadow;
CD float gDensityScale;
CD float gGradientDelta;
CD float gInvGradientDelta;
CD float3 gGradientDeltaX;
CD float3 gGradientDeltaY;
CD float3 gGradientDeltaZ;
CD int gFilmWidth;
CD int gFilmHeight;
CD int gFilmNoPixels;
CD int gFilterWidth;
CD float gFilterWeights[10];
CD float gExposure;
CD float gInvExposure;
CD float gGamma;
CD float gInvGamma;
CD float gDenoiseEnabled;
CD float gDenoiseWindowRadius;
CD float gDenoiseInvWindowArea;
CD float gDenoiseNoise;
CD float gDenoiseWeightThreshold;
CD float gDenoiseLerpThreshold;
CD float gDenoiseLerpC;
CD float gNoIterations;
CD float gInvNoIterations;
#define TF_NO_SAMPLES 128
#define INV_TF_NO_SAMPLES 1.0f / (float)TF_NO_SAMPLES
#include "Model.cuh"
#include "View.cuh"
#include "Blur.cuh"
#include "Denoise.cuh"
#include "Estimate.cuh"
#include "Utilities.cuh"
#include "SingleScattering.cuh"
#include "NearestIntersection.cuh"
#include "SpecularBloom.cuh"
#include "ToneMap.cuh"
CCudaModel gModel;
CCudaView gRenderCanvasView;
CCudaView gNavigatorView;
void BindDensityBuffer(short* pBuffer, hipExtent Extent)
{
hipChannelFormatDesc ChannelDesc = hipCreateChannelDesc<short>();
HandleCudaError(hipMalloc3DArray(&gpDensityArray, &ChannelDesc, Extent));
hipMemcpy3DParms CopyParams = {0};
CopyParams.srcPtr = make_hipPitchedPtr(pBuffer, Extent.width * sizeof(short), Extent.width, Extent.height);
CopyParams.dstArray = gpDensityArray;
CopyParams.extent = Extent;
CopyParams.kind = hipMemcpyHostToDevice;
HandleCudaError(hipMemcpy3D(&CopyParams));
gTexDensity.normalized = true;
gTexDensity.filterMode = hipFilterModeLinear;
gTexDensity.addressMode[0] = hipAddressModeClamp;
gTexDensity.addressMode[1] = hipAddressModeClamp;
gTexDensity.addressMode[2] = hipAddressModeClamp;
HandleCudaError(hipBindTextureToArray(gTexDensity, gpDensityArray, ChannelDesc));
}
void BindGradientMagnitudeBuffer(short* pBuffer, hipExtent Extent)
{
hipChannelFormatDesc ChannelDesc = hipCreateChannelDesc<short>();
HandleCudaError(hipMalloc3DArray(&gpGradientMagnitudeArray, &ChannelDesc, Extent));
hipMemcpy3DParms CopyParams = {0};
CopyParams.srcPtr = make_hipPitchedPtr(pBuffer, Extent.width * sizeof(short), Extent.width, Extent.height);
CopyParams.dstArray = gpGradientMagnitudeArray;
CopyParams.extent = Extent;
CopyParams.kind = hipMemcpyHostToDevice;
HandleCudaError(hipMemcpy3D(&CopyParams));
gTexGradientMagnitude.normalized = true;
gTexGradientMagnitude.filterMode = hipFilterModeLinear;
gTexGradientMagnitude.addressMode[0] = hipAddressModeClamp;
gTexGradientMagnitude.addressMode[1] = hipAddressModeClamp;
gTexGradientMagnitude.addressMode[2] = hipAddressModeClamp;
HandleCudaError(hipBindTextureToArray(gTexGradientMagnitude, gpGradientMagnitudeArray, ChannelDesc));
}
void UnbindDensityBuffer(void)
{
HandleCudaError(hipFreeArray(gpDensityArray));
gpDensityArray = NULL;
HandleCudaError(hipUnbindTexture(gTexDensity));
}
void UnbindGradientMagnitudeBuffer(void)
{
HandleCudaError(hipFreeArray(gpGradientMagnitudeArray));
gpGradientMagnitudeArray = NULL;
HandleCudaError(hipUnbindTexture(gTexGradientMagnitude));
}
void BindRenderCanvasView(const CResolution2D& Resolution)
{
gRenderCanvasView.Resize(Resolution);
hipChannelFormatDesc Channel;
Channel = hipCreateChannelDesc<uchar4>();
HandleCudaError(hipBindTexture2D(0, gTexRunningEstimateRgba, gRenderCanvasView.m_EstimateRgbaLdr.GetPtr(), Channel, gRenderCanvasView.GetWidth(), gRenderCanvasView.GetHeight(), gRenderCanvasView.m_EstimateRgbaLdr.GetPitch()));
}
void ResetRenderCanvasView(void)
{
gRenderCanvasView.Reset();
}
void FreeRenderCanvasView(void)
{
gRenderCanvasView.Free();
}
unsigned char* GetDisplayEstimate(void)
{
return (unsigned char*)gRenderCanvasView.m_DisplayEstimateRgbLdr.GetPtr(0, 0);
}
void BindTransferFunctionOpacity(CTransferFunction& TransferFunctionOpacity)
{
gTexOpacity.normalized = true;
gTexOpacity.filterMode = hipFilterModeLinear;
gTexOpacity.addressMode[0] = hipAddressModeClamp;
float Opacity[TF_NO_SAMPLES];
for (int i = 0; i < TF_NO_SAMPLES; i++)
Opacity[i] = TransferFunctionOpacity.F((float)i * INV_TF_NO_SAMPLES).r;
hipChannelFormatDesc ChannelDesc = hipCreateChannelDesc<float>();
if (gpOpacityArray == NULL)
HandleCudaError(hipMallocArray(&gpOpacityArray, &ChannelDesc, TF_NO_SAMPLES, 1));
HandleCudaError(hipMemcpyToArray(gpOpacityArray, 0, 0, Opacity, TF_NO_SAMPLES * sizeof(float), hipMemcpyHostToDevice));
HandleCudaError(hipBindTextureToArray(gTexOpacity, gpOpacityArray, ChannelDesc));
}
void UnbindTransferFunctionOpacity(void)
{
HandleCudaError(hipFreeArray(gpOpacityArray));
gpOpacityArray = NULL;
HandleCudaError(hipUnbindTexture(gTexOpacity));
}
void BindTransferFunctionDiffuse(CTransferFunction& TransferFunctionDiffuse)
{
gTexDiffuse.normalized = true;
gTexDiffuse.filterMode = hipFilterModeLinear;
gTexDiffuse.addressMode[0] = hipAddressModeClamp;
float4 Diffuse[TF_NO_SAMPLES];
for (int i = 0; i < TF_NO_SAMPLES; i++)
{
Diffuse[i].x = TransferFunctionDiffuse.F((float)i * INV_TF_NO_SAMPLES).r;
Diffuse[i].y = TransferFunctionDiffuse.F((float)i * INV_TF_NO_SAMPLES).g;
Diffuse[i].z = TransferFunctionDiffuse.F((float)i * INV_TF_NO_SAMPLES).b;
}
hipChannelFormatDesc ChannelDesc = hipCreateChannelDesc<float4>();
if (gpDiffuseArray == NULL)
HandleCudaError(hipMallocArray(&gpDiffuseArray, &ChannelDesc, TF_NO_SAMPLES, 1));
HandleCudaError(hipMemcpyToArray(gpDiffuseArray, 0, 0, Diffuse, TF_NO_SAMPLES * sizeof(float4), hipMemcpyHostToDevice));
HandleCudaError(hipBindTextureToArray(gTexDiffuse, gpDiffuseArray, ChannelDesc));
}
void UnbindTransferFunctionDiffuse(void)
{
HandleCudaError(hipFreeArray(gpDiffuseArray));
gpDiffuseArray = NULL;
HandleCudaError(hipUnbindTexture(gTexDiffuse));
}
void BindTransferFunctionSpecular(CTransferFunction& TransferFunctionSpecular)
{
gTexSpecular.normalized = true;
gTexSpecular.filterMode = hipFilterModeLinear;
gTexSpecular.addressMode[0] = hipAddressModeClamp;
float4 Specular[TF_NO_SAMPLES];
for (int i = 0; i < TF_NO_SAMPLES; i++)
{
Specular[i].x = TransferFunctionSpecular.F((float)i * INV_TF_NO_SAMPLES).r;
Specular[i].y = TransferFunctionSpecular.F((float)i * INV_TF_NO_SAMPLES).g;
Specular[i].z = TransferFunctionSpecular.F((float)i * INV_TF_NO_SAMPLES).b;
}
hipChannelFormatDesc ChannelDesc = hipCreateChannelDesc<float4>();
if (gpSpecularArray == NULL)
HandleCudaError(hipMallocArray(&gpSpecularArray, &ChannelDesc, TF_NO_SAMPLES, 1));
HandleCudaError(hipMemcpyToArray(gpSpecularArray, 0, 0, Specular, TF_NO_SAMPLES * sizeof(float4), hipMemcpyHostToDevice));
HandleCudaError(hipBindTextureToArray(gTexSpecular, gpSpecularArray, ChannelDesc));
}
void UnbindTransferFunctionSpecular(void)
{
HandleCudaError(hipFreeArray(gpSpecularArray));
gpSpecularArray = NULL;
HandleCudaError(hipUnbindTexture(gTexSpecular));
}
void BindTransferFunctionRoughness(CTransferFunction& TransferFunctionRoughness)
{
gTexRoughness.normalized = true;
gTexRoughness.filterMode = hipFilterModeLinear;
gTexRoughness.addressMode[0] = hipAddressModeClamp;
float Roughness[TF_NO_SAMPLES];
for (int i = 0; i < TF_NO_SAMPLES; i++)
Roughness[i] = TransferFunctionRoughness.F((float)i * INV_TF_NO_SAMPLES).r;
hipChannelFormatDesc ChannelDesc = hipCreateChannelDesc<float>();
if (gpRoughnessArray == NULL)
HandleCudaError(hipMallocArray(&gpRoughnessArray, &ChannelDesc, TF_NO_SAMPLES, 1));
HandleCudaError(hipMemcpyToArray(gpRoughnessArray, 0, 0, Roughness, TF_NO_SAMPLES * sizeof(float), hipMemcpyHostToDevice));
HandleCudaError(hipBindTextureToArray(gTexRoughness, gpRoughnessArray, ChannelDesc));
}
void UnbindTransferFunctionRoughness(void)
{
HandleCudaError(hipFreeArray(gpRoughnessArray));
gpRoughnessArray = NULL;
HandleCudaError(hipUnbindTexture(gTexRoughness));
}
void BindTransferFunctionEmission(CTransferFunction& TransferFunctionEmission)
{
gTexEmission.normalized = true;
gTexEmission.filterMode = hipFilterModeLinear;
gTexEmission.addressMode[0] = hipAddressModeClamp;
float4 Emission[TF_NO_SAMPLES];
for (int i = 0; i < TF_NO_SAMPLES; i++)
{
Emission[i].x = TransferFunctionEmission.F((float)i * INV_TF_NO_SAMPLES).r;
Emission[i].y = TransferFunctionEmission.F((float)i * INV_TF_NO_SAMPLES).g;
Emission[i].z = TransferFunctionEmission.F((float)i * INV_TF_NO_SAMPLES).b;
}
hipChannelFormatDesc ChannelDesc = hipCreateChannelDesc<float4>();
if (gpEmissionArray == NULL)
HandleCudaError(hipMallocArray(&gpEmissionArray, &ChannelDesc, TF_NO_SAMPLES, 1));
HandleCudaError(hipMemcpyToArray(gpEmissionArray, 0, 0, Emission, TF_NO_SAMPLES * sizeof(float4), hipMemcpyHostToDevice));
HandleCudaError(hipBindTextureToArray(gTexEmission, gpEmissionArray, ChannelDesc));
}
void UnbindTransferFunctionEmission(void)
{
HandleCudaError(hipFreeArray(gpEmissionArray));
gpEmissionArray = NULL;
HandleCudaError(hipUnbindTexture(gTexEmission));
}
void BindConstants(CScene* pScene)
{
const float3 AaBbMin = make_float3(pScene->m_BoundingBox.GetMinP().x, pScene->m_BoundingBox.GetMinP().y, pScene->m_BoundingBox.GetMinP().z);
const float3 AaBbMax = make_float3(pScene->m_BoundingBox.GetMaxP().x, pScene->m_BoundingBox.GetMaxP().y, pScene->m_BoundingBox.GetMaxP().z);
HandleCudaError(hipMemcpyToSymbol("gAaBbMin", &AaBbMin, sizeof(float3)));
HandleCudaError(hipMemcpyToSymbol("gAaBbMax", &AaBbMax, sizeof(float3)));
const float3 InvAaBbMin = make_float3(pScene->m_BoundingBox.GetInvMinP().x, pScene->m_BoundingBox.GetInvMinP().y, pScene->m_BoundingBox.GetInvMinP().z);
const float3 InvAaBbMax = make_float3(pScene->m_BoundingBox.GetInvMaxP().x, pScene->m_BoundingBox.GetInvMaxP().y, pScene->m_BoundingBox.GetInvMaxP().z);
HandleCudaError(hipMemcpyToSymbol("gInvAaBbMin", &InvAaBbMin, sizeof(float3)));
HandleCudaError(hipMemcpyToSymbol("gInvAaBbMax", &InvAaBbMax, sizeof(float3)));
const float IntensityMin = pScene->m_IntensityRange.GetMin();
const float IntensityMax = pScene->m_IntensityRange.GetMax();
const float IntensityRange = pScene->m_IntensityRange.GetRange();
const float IntensityInvRange = 1.0f / IntensityRange;
HandleCudaError(hipMemcpyToSymbol("gIntensityMin", &IntensityMin, sizeof(float)));
HandleCudaError(hipMemcpyToSymbol("gIntensityMax", &IntensityMax, sizeof(float)));
HandleCudaError(hipMemcpyToSymbol("gIntensityRange", &IntensityRange, sizeof(float)));
HandleCudaError(hipMemcpyToSymbol("gIntensityInvRange", &IntensityInvRange, sizeof(float)));
const float StepSize = pScene->m_StepSizeFactor * pScene->m_GradientDelta;
const float StepSizeShadow = pScene->m_StepSizeFactorShadow * pScene->m_GradientDelta;
HandleCudaError(hipMemcpyToSymbol("gStepSize", &StepSize, sizeof(float)));
HandleCudaError(hipMemcpyToSymbol("gStepSizeShadow", &StepSizeShadow, sizeof(float)));
const float DensityScale = pScene->m_DensityScale;
HandleCudaError(hipMemcpyToSymbol("gDensityScale", &DensityScale, sizeof(float)));
const float GradientDelta = 1.0f * pScene->m_GradientDelta;
const float InvGradientDelta = 1.0f / GradientDelta;
const Vec3f GradientDeltaX(GradientDelta, 0.0f, 0.0f);
const Vec3f GradientDeltaY(0.0f, GradientDelta, 0.0f);
const Vec3f GradientDeltaZ(0.0f, 0.0f, GradientDelta);
HandleCudaError(hipMemcpyToSymbol("gGradientDelta", &GradientDelta, sizeof(float)));
HandleCudaError(hipMemcpyToSymbol("gInvGradientDelta", &InvGradientDelta, sizeof(float)));
HandleCudaError(hipMemcpyToSymbol("gGradientDeltaX", &GradientDeltaX, sizeof(Vec3f)));
HandleCudaError(hipMemcpyToSymbol("gGradientDeltaY", &GradientDeltaY, sizeof(Vec3f)));
HandleCudaError(hipMemcpyToSymbol("gGradientDeltaZ", &GradientDeltaZ, sizeof(Vec3f)));
const int FilmWidth = pScene->m_Camera.m_Film.GetWidth();
const int Filmheight = pScene->m_Camera.m_Film.GetHeight();
const int FilmNoPixels = pScene->m_Camera.m_Film.m_Resolution.GetNoElements();
HandleCudaError(hipMemcpyToSymbol("gFilmWidth", &FilmWidth, sizeof(int)));
HandleCudaError(hipMemcpyToSymbol("gFilmHeight", &Filmheight, sizeof(int)));
HandleCudaError(hipMemcpyToSymbol("gFilmNoPixels", &FilmNoPixels, sizeof(int)));
const int FilterWidth = 1;
HandleCudaError(hipMemcpyToSymbol("gFilterWidth", &FilterWidth, sizeof(int)));
const float FilterWeights[10] = { 0.11411459588254977f, 0.08176668094332218f, 0.03008028089187349f, 0.01f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f };
HandleCudaError(hipMemcpyToSymbol("gFilterWeights", &FilterWeights, 10 * sizeof(float)));
const float Gamma = pScene->m_Camera.m_Film.m_Gamma;
const float InvGamma = 1.0f / Gamma;
const float Exposure = pScene->m_Camera.m_Film.m_Exposure;
const float InvExposure = 1.0f / Exposure;
HandleCudaError(hipMemcpyToSymbol("gExposure", &Exposure, sizeof(float)));
HandleCudaError(hipMemcpyToSymbol("gInvExposure", &InvExposure, sizeof(float)));
HandleCudaError(hipMemcpyToSymbol("gGamma", &Gamma, sizeof(float)));
HandleCudaError(hipMemcpyToSymbol("gInvGamma", &InvGamma, sizeof(float)));
HandleCudaError(hipMemcpyToSymbol("gDenoiseEnabled", &pScene->m_DenoiseParams.m_Enabled, sizeof(bool)));
HandleCudaError(hipMemcpyToSymbol("gDenoiseWindowRadius", &pScene->m_DenoiseParams.m_WindowRadius, sizeof(float)));
HandleCudaError(hipMemcpyToSymbol("gDenoiseInvWindowArea", &pScene->m_DenoiseParams.m_InvWindowArea, sizeof(float)));
HandleCudaError(hipMemcpyToSymbol("gDenoiseNoise", &pScene->m_DenoiseParams.m_Noise, sizeof(float)));
HandleCudaError(hipMemcpyToSymbol("gDenoiseWeightThreshold", &pScene->m_DenoiseParams.m_WeightThreshold, sizeof(float)));
HandleCudaError(hipMemcpyToSymbol("gDenoiseLerpThreshold", &pScene->m_DenoiseParams.m_LerpThreshold, sizeof(float)));
HandleCudaError(hipMemcpyToSymbol("gDenoiseLerpC", &pScene->m_DenoiseParams.m_LerpC, sizeof(float)));
const float NoIterations = pScene->GetNoIterations();
const float InvNoIterations = 1.0f / __max(1.0f, NoIterations);
HandleCudaError(hipMemcpyToSymbol("gNoIterations", &NoIterations, sizeof(float)));
HandleCudaError(hipMemcpyToSymbol("gInvNoIterations", &InvNoIterations, sizeof(float)));
}
void Render(const int& Type, CScene& Scene, CTiming& RenderImage, CTiming& BlurImage, CTiming& PostProcessImage, CTiming& DenoiseImage)
{
CScene* pDevScene = NULL;
HandleCudaError(hipMalloc(&pDevScene, sizeof(CScene)));
HandleCudaError(hipMemcpy(pDevScene, &Scene, sizeof(CScene), hipMemcpyHostToDevice));
if (Scene.m_Camera.m_Focus.m_Type == 0)
Scene.m_Camera.m_Focus.m_FocalDistance = NearestIntersection(pDevScene);
HandleCudaError(hipMemcpy(pDevScene, &Scene, sizeof(CScene), hipMemcpyHostToDevice));
CCudaView* pDevView = NULL;
HandleCudaError(hipMalloc(&pDevView, sizeof(CCudaView)));
HandleCudaError(hipMemcpy(pDevView, &gRenderCanvasView, sizeof(CCudaView), hipMemcpyHostToDevice));
CCudaTimer TmrRender;
switch (Type)
{
case 0:
{
SingleScattering(&Scene, pDevScene, pDevView);
break;
}
case 1:
{
// MultipleScattering(&Scene, pDevScene);
break;
}
}
RenderImage.AddDuration(TmrRender.ElapsedTime());
CCudaTimer TmrBlur;
Blur(&Scene, pDevScene, pDevView);
BlurImage.AddDuration(TmrBlur.ElapsedTime());
CCudaTimer TmrPostProcess;
Estimate(&Scene, pDevScene, pDevView);
PostProcessImage.AddDuration(TmrPostProcess.ElapsedTime());
ToneMap(&Scene, pDevScene, pDevView);
CCudaTimer TmrDenoise;
Denoise(&Scene, pDevScene, pDevView);
DenoiseImage.AddDuration(TmrDenoise.ElapsedTime());
HandleCudaError(hipFree(pDevScene));
HandleCudaError(hipFree(pDevView));
} | 78bf06c129b9c1c5bf6ec3f91f46f0df8e37f431.cu | /*
Copyright (c) 2011, T. Kroes <[email protected]>
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
- Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
- Neither the name of the <ORGANIZATION> nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "Core.cuh"
texture<short, cudaTextureType3D, cudaReadModeNormalizedFloat> gTexDensity;
texture<short, cudaTextureType3D, cudaReadModeNormalizedFloat> gTexGradientMagnitude;
texture<float, cudaTextureType3D, cudaReadModeElementType> gTexExtinction;
texture<float, cudaTextureType1D, cudaReadModeElementType> gTexOpacity;
texture<float4, cudaTextureType1D, cudaReadModeElementType> gTexDiffuse;
texture<float4, cudaTextureType1D, cudaReadModeElementType> gTexSpecular;
texture<float, cudaTextureType1D, cudaReadModeElementType> gTexRoughness;
texture<float4, cudaTextureType1D, cudaReadModeElementType> gTexEmission;
texture<uchar4, cudaTextureType2D, cudaReadModeNormalizedFloat> gTexRunningEstimateRgba;
cudaArray* gpDensityArray = NULL;
cudaArray* gpGradientMagnitudeArray = NULL;
cudaArray* gpOpacityArray = NULL;
cudaArray* gpDiffuseArray = NULL;
cudaArray* gpSpecularArray = NULL;
cudaArray* gpRoughnessArray = NULL;
cudaArray* gpEmissionArray = NULL;
CD float3 gAaBbMin;
CD float3 gAaBbMax;
CD float3 gInvAaBbMin;
CD float3 gInvAaBbMax;
CD float gIntensityMin;
CD float gIntensityMax;
CD float gIntensityRange;
CD float gIntensityInvRange;
CD float gStepSize;
CD float gStepSizeShadow;
CD float gDensityScale;
CD float gGradientDelta;
CD float gInvGradientDelta;
CD float3 gGradientDeltaX;
CD float3 gGradientDeltaY;
CD float3 gGradientDeltaZ;
CD int gFilmWidth;
CD int gFilmHeight;
CD int gFilmNoPixels;
CD int gFilterWidth;
CD float gFilterWeights[10];
CD float gExposure;
CD float gInvExposure;
CD float gGamma;
CD float gInvGamma;
CD float gDenoiseEnabled;
CD float gDenoiseWindowRadius;
CD float gDenoiseInvWindowArea;
CD float gDenoiseNoise;
CD float gDenoiseWeightThreshold;
CD float gDenoiseLerpThreshold;
CD float gDenoiseLerpC;
CD float gNoIterations;
CD float gInvNoIterations;
#define TF_NO_SAMPLES 128
#define INV_TF_NO_SAMPLES 1.0f / (float)TF_NO_SAMPLES
#include "Model.cuh"
#include "View.cuh"
#include "Blur.cuh"
#include "Denoise.cuh"
#include "Estimate.cuh"
#include "Utilities.cuh"
#include "SingleScattering.cuh"
#include "NearestIntersection.cuh"
#include "SpecularBloom.cuh"
#include "ToneMap.cuh"
CCudaModel gModel;
CCudaView gRenderCanvasView;
CCudaView gNavigatorView;
void BindDensityBuffer(short* pBuffer, cudaExtent Extent)
{
cudaChannelFormatDesc ChannelDesc = cudaCreateChannelDesc<short>();
HandleCudaError(cudaMalloc3DArray(&gpDensityArray, &ChannelDesc, Extent));
cudaMemcpy3DParms CopyParams = {0};
CopyParams.srcPtr = make_cudaPitchedPtr(pBuffer, Extent.width * sizeof(short), Extent.width, Extent.height);
CopyParams.dstArray = gpDensityArray;
CopyParams.extent = Extent;
CopyParams.kind = cudaMemcpyHostToDevice;
HandleCudaError(cudaMemcpy3D(&CopyParams));
gTexDensity.normalized = true;
gTexDensity.filterMode = cudaFilterModeLinear;
gTexDensity.addressMode[0] = cudaAddressModeClamp;
gTexDensity.addressMode[1] = cudaAddressModeClamp;
gTexDensity.addressMode[2] = cudaAddressModeClamp;
HandleCudaError(cudaBindTextureToArray(gTexDensity, gpDensityArray, ChannelDesc));
}
void BindGradientMagnitudeBuffer(short* pBuffer, cudaExtent Extent)
{
cudaChannelFormatDesc ChannelDesc = cudaCreateChannelDesc<short>();
HandleCudaError(cudaMalloc3DArray(&gpGradientMagnitudeArray, &ChannelDesc, Extent));
cudaMemcpy3DParms CopyParams = {0};
CopyParams.srcPtr = make_cudaPitchedPtr(pBuffer, Extent.width * sizeof(short), Extent.width, Extent.height);
CopyParams.dstArray = gpGradientMagnitudeArray;
CopyParams.extent = Extent;
CopyParams.kind = cudaMemcpyHostToDevice;
HandleCudaError(cudaMemcpy3D(&CopyParams));
gTexGradientMagnitude.normalized = true;
gTexGradientMagnitude.filterMode = cudaFilterModeLinear;
gTexGradientMagnitude.addressMode[0] = cudaAddressModeClamp;
gTexGradientMagnitude.addressMode[1] = cudaAddressModeClamp;
gTexGradientMagnitude.addressMode[2] = cudaAddressModeClamp;
HandleCudaError(cudaBindTextureToArray(gTexGradientMagnitude, gpGradientMagnitudeArray, ChannelDesc));
}
void UnbindDensityBuffer(void)
{
HandleCudaError(cudaFreeArray(gpDensityArray));
gpDensityArray = NULL;
HandleCudaError(cudaUnbindTexture(gTexDensity));
}
void UnbindGradientMagnitudeBuffer(void)
{
HandleCudaError(cudaFreeArray(gpGradientMagnitudeArray));
gpGradientMagnitudeArray = NULL;
HandleCudaError(cudaUnbindTexture(gTexGradientMagnitude));
}
void BindRenderCanvasView(const CResolution2D& Resolution)
{
gRenderCanvasView.Resize(Resolution);
cudaChannelFormatDesc Channel;
Channel = cudaCreateChannelDesc<uchar4>();
HandleCudaError(cudaBindTexture2D(0, gTexRunningEstimateRgba, gRenderCanvasView.m_EstimateRgbaLdr.GetPtr(), Channel, gRenderCanvasView.GetWidth(), gRenderCanvasView.GetHeight(), gRenderCanvasView.m_EstimateRgbaLdr.GetPitch()));
}
void ResetRenderCanvasView(void)
{
gRenderCanvasView.Reset();
}
void FreeRenderCanvasView(void)
{
gRenderCanvasView.Free();
}
unsigned char* GetDisplayEstimate(void)
{
return (unsigned char*)gRenderCanvasView.m_DisplayEstimateRgbLdr.GetPtr(0, 0);
}
void BindTransferFunctionOpacity(CTransferFunction& TransferFunctionOpacity)
{
gTexOpacity.normalized = true;
gTexOpacity.filterMode = cudaFilterModeLinear;
gTexOpacity.addressMode[0] = cudaAddressModeClamp;
float Opacity[TF_NO_SAMPLES];
for (int i = 0; i < TF_NO_SAMPLES; i++)
Opacity[i] = TransferFunctionOpacity.F((float)i * INV_TF_NO_SAMPLES).r;
cudaChannelFormatDesc ChannelDesc = cudaCreateChannelDesc<float>();
if (gpOpacityArray == NULL)
HandleCudaError(cudaMallocArray(&gpOpacityArray, &ChannelDesc, TF_NO_SAMPLES, 1));
HandleCudaError(cudaMemcpyToArray(gpOpacityArray, 0, 0, Opacity, TF_NO_SAMPLES * sizeof(float), cudaMemcpyHostToDevice));
HandleCudaError(cudaBindTextureToArray(gTexOpacity, gpOpacityArray, ChannelDesc));
}
void UnbindTransferFunctionOpacity(void)
{
HandleCudaError(cudaFreeArray(gpOpacityArray));
gpOpacityArray = NULL;
HandleCudaError(cudaUnbindTexture(gTexOpacity));
}
void BindTransferFunctionDiffuse(CTransferFunction& TransferFunctionDiffuse)
{
gTexDiffuse.normalized = true;
gTexDiffuse.filterMode = cudaFilterModeLinear;
gTexDiffuse.addressMode[0] = cudaAddressModeClamp;
float4 Diffuse[TF_NO_SAMPLES];
for (int i = 0; i < TF_NO_SAMPLES; i++)
{
Diffuse[i].x = TransferFunctionDiffuse.F((float)i * INV_TF_NO_SAMPLES).r;
Diffuse[i].y = TransferFunctionDiffuse.F((float)i * INV_TF_NO_SAMPLES).g;
Diffuse[i].z = TransferFunctionDiffuse.F((float)i * INV_TF_NO_SAMPLES).b;
}
cudaChannelFormatDesc ChannelDesc = cudaCreateChannelDesc<float4>();
if (gpDiffuseArray == NULL)
HandleCudaError(cudaMallocArray(&gpDiffuseArray, &ChannelDesc, TF_NO_SAMPLES, 1));
HandleCudaError(cudaMemcpyToArray(gpDiffuseArray, 0, 0, Diffuse, TF_NO_SAMPLES * sizeof(float4), cudaMemcpyHostToDevice));
HandleCudaError(cudaBindTextureToArray(gTexDiffuse, gpDiffuseArray, ChannelDesc));
}
void UnbindTransferFunctionDiffuse(void)
{
HandleCudaError(cudaFreeArray(gpDiffuseArray));
gpDiffuseArray = NULL;
HandleCudaError(cudaUnbindTexture(gTexDiffuse));
}
void BindTransferFunctionSpecular(CTransferFunction& TransferFunctionSpecular)
{
gTexSpecular.normalized = true;
gTexSpecular.filterMode = cudaFilterModeLinear;
gTexSpecular.addressMode[0] = cudaAddressModeClamp;
float4 Specular[TF_NO_SAMPLES];
for (int i = 0; i < TF_NO_SAMPLES; i++)
{
Specular[i].x = TransferFunctionSpecular.F((float)i * INV_TF_NO_SAMPLES).r;
Specular[i].y = TransferFunctionSpecular.F((float)i * INV_TF_NO_SAMPLES).g;
Specular[i].z = TransferFunctionSpecular.F((float)i * INV_TF_NO_SAMPLES).b;
}
cudaChannelFormatDesc ChannelDesc = cudaCreateChannelDesc<float4>();
if (gpSpecularArray == NULL)
HandleCudaError(cudaMallocArray(&gpSpecularArray, &ChannelDesc, TF_NO_SAMPLES, 1));
HandleCudaError(cudaMemcpyToArray(gpSpecularArray, 0, 0, Specular, TF_NO_SAMPLES * sizeof(float4), cudaMemcpyHostToDevice));
HandleCudaError(cudaBindTextureToArray(gTexSpecular, gpSpecularArray, ChannelDesc));
}
void UnbindTransferFunctionSpecular(void)
{
HandleCudaError(cudaFreeArray(gpSpecularArray));
gpSpecularArray = NULL;
HandleCudaError(cudaUnbindTexture(gTexSpecular));
}
void BindTransferFunctionRoughness(CTransferFunction& TransferFunctionRoughness)
{
gTexRoughness.normalized = true;
gTexRoughness.filterMode = cudaFilterModeLinear;
gTexRoughness.addressMode[0] = cudaAddressModeClamp;
float Roughness[TF_NO_SAMPLES];
for (int i = 0; i < TF_NO_SAMPLES; i++)
Roughness[i] = TransferFunctionRoughness.F((float)i * INV_TF_NO_SAMPLES).r;
cudaChannelFormatDesc ChannelDesc = cudaCreateChannelDesc<float>();
if (gpRoughnessArray == NULL)
HandleCudaError(cudaMallocArray(&gpRoughnessArray, &ChannelDesc, TF_NO_SAMPLES, 1));
HandleCudaError(cudaMemcpyToArray(gpRoughnessArray, 0, 0, Roughness, TF_NO_SAMPLES * sizeof(float), cudaMemcpyHostToDevice));
HandleCudaError(cudaBindTextureToArray(gTexRoughness, gpRoughnessArray, ChannelDesc));
}
void UnbindTransferFunctionRoughness(void)
{
HandleCudaError(cudaFreeArray(gpRoughnessArray));
gpRoughnessArray = NULL;
HandleCudaError(cudaUnbindTexture(gTexRoughness));
}
void BindTransferFunctionEmission(CTransferFunction& TransferFunctionEmission)
{
gTexEmission.normalized = true;
gTexEmission.filterMode = cudaFilterModeLinear;
gTexEmission.addressMode[0] = cudaAddressModeClamp;
float4 Emission[TF_NO_SAMPLES];
for (int i = 0; i < TF_NO_SAMPLES; i++)
{
Emission[i].x = TransferFunctionEmission.F((float)i * INV_TF_NO_SAMPLES).r;
Emission[i].y = TransferFunctionEmission.F((float)i * INV_TF_NO_SAMPLES).g;
Emission[i].z = TransferFunctionEmission.F((float)i * INV_TF_NO_SAMPLES).b;
}
cudaChannelFormatDesc ChannelDesc = cudaCreateChannelDesc<float4>();
if (gpEmissionArray == NULL)
HandleCudaError(cudaMallocArray(&gpEmissionArray, &ChannelDesc, TF_NO_SAMPLES, 1));
HandleCudaError(cudaMemcpyToArray(gpEmissionArray, 0, 0, Emission, TF_NO_SAMPLES * sizeof(float4), cudaMemcpyHostToDevice));
HandleCudaError(cudaBindTextureToArray(gTexEmission, gpEmissionArray, ChannelDesc));
}
void UnbindTransferFunctionEmission(void)
{
HandleCudaError(cudaFreeArray(gpEmissionArray));
gpEmissionArray = NULL;
HandleCudaError(cudaUnbindTexture(gTexEmission));
}
void BindConstants(CScene* pScene)
{
const float3 AaBbMin = make_float3(pScene->m_BoundingBox.GetMinP().x, pScene->m_BoundingBox.GetMinP().y, pScene->m_BoundingBox.GetMinP().z);
const float3 AaBbMax = make_float3(pScene->m_BoundingBox.GetMaxP().x, pScene->m_BoundingBox.GetMaxP().y, pScene->m_BoundingBox.GetMaxP().z);
HandleCudaError(cudaMemcpyToSymbol("gAaBbMin", &AaBbMin, sizeof(float3)));
HandleCudaError(cudaMemcpyToSymbol("gAaBbMax", &AaBbMax, sizeof(float3)));
const float3 InvAaBbMin = make_float3(pScene->m_BoundingBox.GetInvMinP().x, pScene->m_BoundingBox.GetInvMinP().y, pScene->m_BoundingBox.GetInvMinP().z);
const float3 InvAaBbMax = make_float3(pScene->m_BoundingBox.GetInvMaxP().x, pScene->m_BoundingBox.GetInvMaxP().y, pScene->m_BoundingBox.GetInvMaxP().z);
HandleCudaError(cudaMemcpyToSymbol("gInvAaBbMin", &InvAaBbMin, sizeof(float3)));
HandleCudaError(cudaMemcpyToSymbol("gInvAaBbMax", &InvAaBbMax, sizeof(float3)));
const float IntensityMin = pScene->m_IntensityRange.GetMin();
const float IntensityMax = pScene->m_IntensityRange.GetMax();
const float IntensityRange = pScene->m_IntensityRange.GetRange();
const float IntensityInvRange = 1.0f / IntensityRange;
HandleCudaError(cudaMemcpyToSymbol("gIntensityMin", &IntensityMin, sizeof(float)));
HandleCudaError(cudaMemcpyToSymbol("gIntensityMax", &IntensityMax, sizeof(float)));
HandleCudaError(cudaMemcpyToSymbol("gIntensityRange", &IntensityRange, sizeof(float)));
HandleCudaError(cudaMemcpyToSymbol("gIntensityInvRange", &IntensityInvRange, sizeof(float)));
const float StepSize = pScene->m_StepSizeFactor * pScene->m_GradientDelta;
const float StepSizeShadow = pScene->m_StepSizeFactorShadow * pScene->m_GradientDelta;
HandleCudaError(cudaMemcpyToSymbol("gStepSize", &StepSize, sizeof(float)));
HandleCudaError(cudaMemcpyToSymbol("gStepSizeShadow", &StepSizeShadow, sizeof(float)));
const float DensityScale = pScene->m_DensityScale;
HandleCudaError(cudaMemcpyToSymbol("gDensityScale", &DensityScale, sizeof(float)));
const float GradientDelta = 1.0f * pScene->m_GradientDelta;
const float InvGradientDelta = 1.0f / GradientDelta;
const Vec3f GradientDeltaX(GradientDelta, 0.0f, 0.0f);
const Vec3f GradientDeltaY(0.0f, GradientDelta, 0.0f);
const Vec3f GradientDeltaZ(0.0f, 0.0f, GradientDelta);
HandleCudaError(cudaMemcpyToSymbol("gGradientDelta", &GradientDelta, sizeof(float)));
HandleCudaError(cudaMemcpyToSymbol("gInvGradientDelta", &InvGradientDelta, sizeof(float)));
HandleCudaError(cudaMemcpyToSymbol("gGradientDeltaX", &GradientDeltaX, sizeof(Vec3f)));
HandleCudaError(cudaMemcpyToSymbol("gGradientDeltaY", &GradientDeltaY, sizeof(Vec3f)));
HandleCudaError(cudaMemcpyToSymbol("gGradientDeltaZ", &GradientDeltaZ, sizeof(Vec3f)));
const int FilmWidth = pScene->m_Camera.m_Film.GetWidth();
const int Filmheight = pScene->m_Camera.m_Film.GetHeight();
const int FilmNoPixels = pScene->m_Camera.m_Film.m_Resolution.GetNoElements();
HandleCudaError(cudaMemcpyToSymbol("gFilmWidth", &FilmWidth, sizeof(int)));
HandleCudaError(cudaMemcpyToSymbol("gFilmHeight", &Filmheight, sizeof(int)));
HandleCudaError(cudaMemcpyToSymbol("gFilmNoPixels", &FilmNoPixels, sizeof(int)));
const int FilterWidth = 1;
HandleCudaError(cudaMemcpyToSymbol("gFilterWidth", &FilterWidth, sizeof(int)));
const float FilterWeights[10] = { 0.11411459588254977f, 0.08176668094332218f, 0.03008028089187349f, 0.01f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f };
HandleCudaError(cudaMemcpyToSymbol("gFilterWeights", &FilterWeights, 10 * sizeof(float)));
const float Gamma = pScene->m_Camera.m_Film.m_Gamma;
const float InvGamma = 1.0f / Gamma;
const float Exposure = pScene->m_Camera.m_Film.m_Exposure;
const float InvExposure = 1.0f / Exposure;
HandleCudaError(cudaMemcpyToSymbol("gExposure", &Exposure, sizeof(float)));
HandleCudaError(cudaMemcpyToSymbol("gInvExposure", &InvExposure, sizeof(float)));
HandleCudaError(cudaMemcpyToSymbol("gGamma", &Gamma, sizeof(float)));
HandleCudaError(cudaMemcpyToSymbol("gInvGamma", &InvGamma, sizeof(float)));
HandleCudaError(cudaMemcpyToSymbol("gDenoiseEnabled", &pScene->m_DenoiseParams.m_Enabled, sizeof(bool)));
HandleCudaError(cudaMemcpyToSymbol("gDenoiseWindowRadius", &pScene->m_DenoiseParams.m_WindowRadius, sizeof(float)));
HandleCudaError(cudaMemcpyToSymbol("gDenoiseInvWindowArea", &pScene->m_DenoiseParams.m_InvWindowArea, sizeof(float)));
HandleCudaError(cudaMemcpyToSymbol("gDenoiseNoise", &pScene->m_DenoiseParams.m_Noise, sizeof(float)));
HandleCudaError(cudaMemcpyToSymbol("gDenoiseWeightThreshold", &pScene->m_DenoiseParams.m_WeightThreshold, sizeof(float)));
HandleCudaError(cudaMemcpyToSymbol("gDenoiseLerpThreshold", &pScene->m_DenoiseParams.m_LerpThreshold, sizeof(float)));
HandleCudaError(cudaMemcpyToSymbol("gDenoiseLerpC", &pScene->m_DenoiseParams.m_LerpC, sizeof(float)));
const float NoIterations = pScene->GetNoIterations();
const float InvNoIterations = 1.0f / __max(1.0f, NoIterations);
HandleCudaError(cudaMemcpyToSymbol("gNoIterations", &NoIterations, sizeof(float)));
HandleCudaError(cudaMemcpyToSymbol("gInvNoIterations", &InvNoIterations, sizeof(float)));
}
void Render(const int& Type, CScene& Scene, CTiming& RenderImage, CTiming& BlurImage, CTiming& PostProcessImage, CTiming& DenoiseImage)
{
CScene* pDevScene = NULL;
HandleCudaError(cudaMalloc(&pDevScene, sizeof(CScene)));
HandleCudaError(cudaMemcpy(pDevScene, &Scene, sizeof(CScene), cudaMemcpyHostToDevice));
if (Scene.m_Camera.m_Focus.m_Type == 0)
Scene.m_Camera.m_Focus.m_FocalDistance = NearestIntersection(pDevScene);
HandleCudaError(cudaMemcpy(pDevScene, &Scene, sizeof(CScene), cudaMemcpyHostToDevice));
CCudaView* pDevView = NULL;
HandleCudaError(cudaMalloc(&pDevView, sizeof(CCudaView)));
HandleCudaError(cudaMemcpy(pDevView, &gRenderCanvasView, sizeof(CCudaView), cudaMemcpyHostToDevice));
CCudaTimer TmrRender;
switch (Type)
{
case 0:
{
SingleScattering(&Scene, pDevScene, pDevView);
break;
}
case 1:
{
// MultipleScattering(&Scene, pDevScene);
break;
}
}
RenderImage.AddDuration(TmrRender.ElapsedTime());
CCudaTimer TmrBlur;
Blur(&Scene, pDevScene, pDevView);
BlurImage.AddDuration(TmrBlur.ElapsedTime());
CCudaTimer TmrPostProcess;
Estimate(&Scene, pDevScene, pDevView);
PostProcessImage.AddDuration(TmrPostProcess.ElapsedTime());
ToneMap(&Scene, pDevScene, pDevView);
CCudaTimer TmrDenoise;
Denoise(&Scene, pDevScene, pDevView);
DenoiseImage.AddDuration(TmrDenoise.ElapsedTime());
HandleCudaError(cudaFree(pDevScene));
HandleCudaError(cudaFree(pDevView));
} |
sample1_kernel_time.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
__global__ void Sample1Kernel(float *d_A, float *d_B, float *d_C) {
// Step 1. CUDAID
int thread_id = blockDim.x * blockIdx.x + threadIdx.x;
// Step 2. CUDAID
d_C[thread_id] = d_A[thread_id] + d_B[thread_id];
}
__host__ void Sample1Host(float *h_A, float *h_B, float *h_C, int length) {
for (int i = 0; i < length; ++i) {
h_C[i] = h_A[i] + h_B[i];
}
}
| sample1_kernel_time.cu | __global__ void Sample1Kernel(float *d_A, float *d_B, float *d_C) {
// Step 1. 自身のCUDAスレッドIDを計算する
int thread_id = blockDim.x * blockIdx.x + threadIdx.x;
// Step 2. CUDAスレッドIDを用いてグローバルメモリからデータを読み込み,計算する
d_C[thread_id] = d_A[thread_id] + d_B[thread_id];
}
__host__ void Sample1Host(float *h_A, float *h_B, float *h_C, int length) {
for (int i = 0; i < length; ++i) {
h_C[i] = h_A[i] + h_B[i];
}
}
|
3c7446264c9669d1c60c9c3a46abfd4d9af5cbf5.hip | // !!! This is a file automatically generated by hipify!!!
// ----------------------------------------------------------------------------
// - Open3D: www.open3d.org -
// ----------------------------------------------------------------------------
// The MIT License (MIT)
//
// Copyright (c) 2018-2021 www.open3d.org
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
// ----------------------------------------------------------------------------
#include <hip/hip_runtime.h>
#include "open3d/core/CUDAUtils.h"
#include "open3d/core/Dispatch.h"
#include "open3d/core/ParallelFor.h"
#include "open3d/core/Tensor.h"
#include "open3d/t/geometry/kernel/GeometryIndexer.h"
#include "open3d/t/geometry/kernel/GeometryMacros.h"
#include "open3d/t/pipelines/kernel/RGBDOdometryImpl.h"
#include "open3d/t/pipelines/kernel/RGBDOdometryJacobianImpl.h"
#include "open3d/t/pipelines/kernel/Reduction6x6Impl.cuh"
#include "open3d/t/pipelines/kernel/TransformationConverter.h"
namespace open3d {
namespace t {
namespace pipelines {
namespace kernel {
namespace odometry {
__global__ void ComputeOdometryResultPointToPlaneCUDAKernel(
NDArrayIndexer source_vertex_indexer,
NDArrayIndexer target_vertex_indexer,
NDArrayIndexer target_normal_indexer,
TransformIndexer ti,
float* global_sum,
int rows,
int cols,
const float depth_outlier_trunc,
const float depth_huber_delta) {
const int kBlockSize = 256;
__shared__ float local_sum0[kBlockSize];
__shared__ float local_sum1[kBlockSize];
__shared__ float local_sum2[kBlockSize];
const int x = threadIdx.x + blockIdx.x * blockDim.x;
const int y = threadIdx.y + blockIdx.y * blockDim.y;
const int tid = threadIdx.x + threadIdx.y * blockDim.x;
local_sum0[tid] = 0;
local_sum1[tid] = 0;
local_sum2[tid] = 0;
if (y >= rows || x >= cols) return;
float J[6] = {0}, reduction[21 + 6 + 2];
float r = 0;
bool valid = GetJacobianPointToPlane(
x, y, depth_outlier_trunc, source_vertex_indexer,
target_vertex_indexer, target_normal_indexer, ti, J, r);
float d_huber = HuberDeriv(r, depth_huber_delta);
float r_huber = HuberLoss(r, depth_huber_delta);
// Dump J, r into JtJ and Jtr
int offset = 0;
for (int i = 0; i < 6; ++i) {
for (int j = 0; j <= i; ++j) {
reduction[offset++] = J[i] * J[j];
}
}
for (int i = 0; i < 6; ++i) {
reduction[offset++] = J[i] * d_huber;
}
reduction[offset++] = r_huber;
reduction[offset++] = valid;
// Sum reduction: JtJ(21) and Jtr(6)
for (size_t i = 0; i < 27; i += 3) {
local_sum0[tid] = valid ? reduction[i + 0] : 0;
local_sum1[tid] = valid ? reduction[i + 1] : 0;
local_sum2[tid] = valid ? reduction[i + 2] : 0;
__syncthreads();
BlockReduceSum<float, kBlockSize>(tid, local_sum0, local_sum1,
local_sum2);
if (tid == 0) {
atomicAdd(&global_sum[i + 0], local_sum0[0]);
atomicAdd(&global_sum[i + 1], local_sum1[0]);
atomicAdd(&global_sum[i + 2], local_sum2[0]);
}
__syncthreads();
}
// Sum reduction: residual(1) and inlier(1)
{
local_sum0[tid] = valid ? reduction[27] : 0;
local_sum1[tid] = valid ? reduction[28] : 0;
__syncthreads();
BlockReduceSum<float, kBlockSize>(tid, local_sum0, local_sum1);
if (tid == 0) {
atomicAdd(&global_sum[27], local_sum0[0]);
atomicAdd(&global_sum[28], local_sum1[0]);
}
__syncthreads();
}
}
void ComputeOdometryResultPointToPlaneCUDA(
const core::Tensor& source_vertex_map,
const core::Tensor& target_vertex_map,
const core::Tensor& target_normal_map,
const core::Tensor& intrinsics,
const core::Tensor& init_source_to_target,
core::Tensor& delta,
float& inlier_residual,
int& inlier_count,
const float depth_outlier_trunc,
const float depth_huber_delta) {
NDArrayIndexer source_vertex_indexer(source_vertex_map, 2);
NDArrayIndexer target_vertex_indexer(target_vertex_map, 2);
NDArrayIndexer target_normal_indexer(target_normal_map, 2);
core::Device device = source_vertex_map.GetDevice();
core::Tensor trans = init_source_to_target;
TransformIndexer ti(intrinsics, trans);
const int64_t rows = source_vertex_indexer.GetShape(0);
const int64_t cols = source_vertex_indexer.GetShape(1);
core::Tensor global_sum = core::Tensor::Zeros({29}, core::Float32, device);
float* global_sum_ptr = global_sum.GetDataPtr<float>();
const int kThreadSize = 16;
const dim3 blocks((cols + kThreadSize - 1) / kThreadSize,
(rows + kThreadSize - 1) / kThreadSize);
const dim3 threads(kThreadSize, kThreadSize);
hipLaunchKernelGGL(( ComputeOdometryResultPointToPlaneCUDAKernel), dim3(blocks), dim3(threads), 0,
core::cuda::GetStream(),
source_vertex_indexer, target_vertex_indexer, target_normal_indexer,
ti, global_sum_ptr, rows, cols, depth_outlier_trunc,
depth_huber_delta);
core::cuda::Synchronize();
DecodeAndSolve6x6(global_sum, delta, inlier_residual, inlier_count);
}
__global__ void ComputeOdometryResultIntensityCUDAKernel(
NDArrayIndexer source_depth_indexer,
NDArrayIndexer target_depth_indexer,
NDArrayIndexer source_intensity_indexer,
NDArrayIndexer target_intensity_indexer,
NDArrayIndexer target_intensity_dx_indexer,
NDArrayIndexer target_intensity_dy_indexer,
NDArrayIndexer source_vertex_indexer,
TransformIndexer ti,
float* global_sum,
int rows,
int cols,
const float depth_outlier_trunc,
const float intensity_huber_delta) {
const int kBlockSize = 256;
__shared__ float local_sum0[kBlockSize];
__shared__ float local_sum1[kBlockSize];
__shared__ float local_sum2[kBlockSize];
const int x = threadIdx.x + blockIdx.x * blockDim.x;
const int y = threadIdx.y + blockIdx.y * blockDim.y;
const int tid = threadIdx.x + threadIdx.y * blockDim.x;
local_sum0[tid] = 0;
local_sum1[tid] = 0;
local_sum2[tid] = 0;
if (y >= rows || x >= cols) return;
float J[6] = {0}, reduction[21 + 6 + 2];
float r = 0;
bool valid = GetJacobianIntensity(
x, y, depth_outlier_trunc, source_depth_indexer,
target_depth_indexer, source_intensity_indexer,
target_intensity_indexer, target_intensity_dx_indexer,
target_intensity_dy_indexer, source_vertex_indexer, ti, J, r);
float d_huber = HuberDeriv(r, intensity_huber_delta);
float r_huber = HuberLoss(r, intensity_huber_delta);
// Dump J, r into JtJ and Jtr
int offset = 0;
for (int i = 0; i < 6; ++i) {
for (int j = 0; j <= i; ++j) {
reduction[offset++] = J[i] * J[j];
}
}
for (int i = 0; i < 6; ++i) {
reduction[offset++] = J[i] * HuberDeriv(r, intensity_huber_delta);
}
reduction[offset++] = HuberLoss(r, intensity_huber_delta);
reduction[offset++] = valid;
ReduceSum6x6LinearSystem<float, kBlockSize>(tid, valid, reduction,
local_sum0, local_sum1,
local_sum2, global_sum);
}
void ComputeOdometryResultIntensityCUDA(
const core::Tensor& source_depth,
const core::Tensor& target_depth,
const core::Tensor& source_intensity,
const core::Tensor& target_intensity,
const core::Tensor& target_intensity_dx,
const core::Tensor& target_intensity_dy,
const core::Tensor& source_vertex_map,
const core::Tensor& intrinsics,
const core::Tensor& init_source_to_target,
core::Tensor& delta,
float& inlier_residual,
int& inlier_count,
const float depth_outlier_trunc,
const float intensity_huber_delta) {
NDArrayIndexer source_depth_indexer(source_depth, 2);
NDArrayIndexer target_depth_indexer(target_depth, 2);
NDArrayIndexer source_intensity_indexer(source_intensity, 2);
NDArrayIndexer target_intensity_indexer(target_intensity, 2);
NDArrayIndexer target_intensity_dx_indexer(target_intensity_dx, 2);
NDArrayIndexer target_intensity_dy_indexer(target_intensity_dy, 2);
NDArrayIndexer source_vertex_indexer(source_vertex_map, 2);
core::Device device = source_vertex_map.GetDevice();
core::Tensor trans = init_source_to_target;
t::geometry::kernel::TransformIndexer ti(intrinsics, trans);
const int64_t rows = source_vertex_indexer.GetShape(0);
const int64_t cols = source_vertex_indexer.GetShape(1);
core::Tensor global_sum = core::Tensor::Zeros({29}, core::Float32, device);
float* global_sum_ptr = global_sum.GetDataPtr<float>();
const int kThreadSize = 16;
const dim3 blocks((cols + kThreadSize - 1) / kThreadSize,
(rows + kThreadSize - 1) / kThreadSize);
const dim3 threads(kThreadSize, kThreadSize);
hipLaunchKernelGGL(( ComputeOdometryResultIntensityCUDAKernel), dim3(blocks), dim3(threads), 0,
core::cuda::GetStream(),
source_depth_indexer, target_depth_indexer,
source_intensity_indexer, target_intensity_indexer,
target_intensity_dx_indexer, target_intensity_dy_indexer,
source_vertex_indexer, ti, global_sum_ptr, rows, cols,
depth_outlier_trunc, intensity_huber_delta);
core::cuda::Synchronize();
DecodeAndSolve6x6(global_sum, delta, inlier_residual, inlier_count);
}
__global__ void ComputeOdometryResultHybridCUDAKernel(
NDArrayIndexer source_depth_indexer,
NDArrayIndexer target_depth_indexer,
NDArrayIndexer source_intensity_indexer,
NDArrayIndexer target_intensity_indexer,
NDArrayIndexer target_depth_dx_indexer,
NDArrayIndexer target_depth_dy_indexer,
NDArrayIndexer target_intensity_dx_indexer,
NDArrayIndexer target_intensity_dy_indexer,
NDArrayIndexer source_vertex_indexer,
TransformIndexer ti,
float* global_sum,
int rows,
int cols,
const float depth_outlier_trunc,
const float depth_huber_delta,
const float intensity_huber_delta) {
const int kBlockSize = 256;
__shared__ float local_sum0[kBlockSize];
__shared__ float local_sum1[kBlockSize];
__shared__ float local_sum2[kBlockSize];
const int x = threadIdx.x + blockIdx.x * blockDim.x;
const int y = threadIdx.y + blockIdx.y * blockDim.y;
const int tid = threadIdx.x + threadIdx.y * blockDim.x;
local_sum0[tid] = 0;
local_sum1[tid] = 0;
local_sum2[tid] = 0;
if (y >= rows || x >= cols) return;
float J_I[6] = {0}, J_D[6] = {0}, reduction[21 + 6 + 2];
float r_I = 0, r_D = 0;
bool valid = GetJacobianHybrid(
x, y, depth_outlier_trunc, source_depth_indexer,
target_depth_indexer, source_intensity_indexer,
target_intensity_indexer, target_depth_dx_indexer,
target_depth_dy_indexer, target_intensity_dx_indexer,
target_intensity_dy_indexer, source_vertex_indexer, ti, J_I, J_D,
r_I, r_D);
float d_huber_D = HuberDeriv(r_D, depth_huber_delta);
float d_huber_I = HuberDeriv(r_I, intensity_huber_delta);
float r_huber_D = HuberLoss(r_D, depth_huber_delta);
float r_huber_I = HuberLoss(r_I, intensity_huber_delta);
// Dump J, r into JtJ and Jtr
int offset = 0;
for (int i = 0; i < 6; ++i) {
for (int j = 0; j <= i; ++j) {
reduction[offset++] = J_I[i] * J_I[j] + J_D[i] * J_D[j];
}
}
for (int i = 0; i < 6; ++i) {
reduction[offset++] = J_I[i] * d_huber_I + J_D[i] * d_huber_D;
}
reduction[offset++] = r_huber_D + r_huber_I;
reduction[offset++] = valid;
ReduceSum6x6LinearSystem<float, kBlockSize>(tid, valid, reduction,
local_sum0, local_sum1,
local_sum2, global_sum);
}
void ComputeOdometryResultHybridCUDA(const core::Tensor& source_depth,
const core::Tensor& target_depth,
const core::Tensor& source_intensity,
const core::Tensor& target_intensity,
const core::Tensor& target_depth_dx,
const core::Tensor& target_depth_dy,
const core::Tensor& target_intensity_dx,
const core::Tensor& target_intensity_dy,
const core::Tensor& source_vertex_map,
const core::Tensor& intrinsics,
const core::Tensor& init_source_to_target,
core::Tensor& delta,
float& inlier_residual,
int& inlier_count,
const float depth_outlier_trunc,
const float depth_huber_delta,
const float intensity_huber_delta) {
NDArrayIndexer source_depth_indexer(source_depth, 2);
NDArrayIndexer target_depth_indexer(target_depth, 2);
NDArrayIndexer source_intensity_indexer(source_intensity, 2);
NDArrayIndexer target_intensity_indexer(target_intensity, 2);
NDArrayIndexer target_depth_dx_indexer(target_depth_dx, 2);
NDArrayIndexer target_depth_dy_indexer(target_depth_dy, 2);
NDArrayIndexer target_intensity_dx_indexer(target_intensity_dx, 2);
NDArrayIndexer target_intensity_dy_indexer(target_intensity_dy, 2);
NDArrayIndexer source_vertex_indexer(source_vertex_map, 2);
core::Device device = source_vertex_map.GetDevice();
core::Tensor trans = init_source_to_target;
t::geometry::kernel::TransformIndexer ti(intrinsics, trans);
const int64_t rows = source_vertex_indexer.GetShape(0);
const int64_t cols = source_vertex_indexer.GetShape(1);
core::Tensor global_sum = core::Tensor::Zeros({29}, core::Float32, device);
float* global_sum_ptr = global_sum.GetDataPtr<float>();
const int kThreadSize = 16;
const dim3 blocks((cols + kThreadSize - 1) / kThreadSize,
(rows + kThreadSize - 1) / kThreadSize);
const dim3 threads(kThreadSize, kThreadSize);
hipLaunchKernelGGL(( ComputeOdometryResultHybridCUDAKernel), dim3(blocks), dim3(threads), 0,
core::cuda::GetStream(),
source_depth_indexer, target_depth_indexer,
source_intensity_indexer, target_intensity_indexer,
target_depth_dx_indexer, target_depth_dy_indexer,
target_intensity_dx_indexer, target_intensity_dy_indexer,
source_vertex_indexer, ti, global_sum_ptr, rows, cols,
depth_outlier_trunc, depth_huber_delta, intensity_huber_delta);
core::cuda::Synchronize();
DecodeAndSolve6x6(global_sum, delta, inlier_residual, inlier_count);
}
} // namespace odometry
} // namespace kernel
} // namespace pipelines
} // namespace t
} // namespace open3d
| 3c7446264c9669d1c60c9c3a46abfd4d9af5cbf5.cu | // ----------------------------------------------------------------------------
// - Open3D: www.open3d.org -
// ----------------------------------------------------------------------------
// The MIT License (MIT)
//
// Copyright (c) 2018-2021 www.open3d.org
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
// ----------------------------------------------------------------------------
#include <cuda.h>
#include "open3d/core/CUDAUtils.h"
#include "open3d/core/Dispatch.h"
#include "open3d/core/ParallelFor.h"
#include "open3d/core/Tensor.h"
#include "open3d/t/geometry/kernel/GeometryIndexer.h"
#include "open3d/t/geometry/kernel/GeometryMacros.h"
#include "open3d/t/pipelines/kernel/RGBDOdometryImpl.h"
#include "open3d/t/pipelines/kernel/RGBDOdometryJacobianImpl.h"
#include "open3d/t/pipelines/kernel/Reduction6x6Impl.cuh"
#include "open3d/t/pipelines/kernel/TransformationConverter.h"
namespace open3d {
namespace t {
namespace pipelines {
namespace kernel {
namespace odometry {
__global__ void ComputeOdometryResultPointToPlaneCUDAKernel(
NDArrayIndexer source_vertex_indexer,
NDArrayIndexer target_vertex_indexer,
NDArrayIndexer target_normal_indexer,
TransformIndexer ti,
float* global_sum,
int rows,
int cols,
const float depth_outlier_trunc,
const float depth_huber_delta) {
const int kBlockSize = 256;
__shared__ float local_sum0[kBlockSize];
__shared__ float local_sum1[kBlockSize];
__shared__ float local_sum2[kBlockSize];
const int x = threadIdx.x + blockIdx.x * blockDim.x;
const int y = threadIdx.y + blockIdx.y * blockDim.y;
const int tid = threadIdx.x + threadIdx.y * blockDim.x;
local_sum0[tid] = 0;
local_sum1[tid] = 0;
local_sum2[tid] = 0;
if (y >= rows || x >= cols) return;
float J[6] = {0}, reduction[21 + 6 + 2];
float r = 0;
bool valid = GetJacobianPointToPlane(
x, y, depth_outlier_trunc, source_vertex_indexer,
target_vertex_indexer, target_normal_indexer, ti, J, r);
float d_huber = HuberDeriv(r, depth_huber_delta);
float r_huber = HuberLoss(r, depth_huber_delta);
// Dump J, r into JtJ and Jtr
int offset = 0;
for (int i = 0; i < 6; ++i) {
for (int j = 0; j <= i; ++j) {
reduction[offset++] = J[i] * J[j];
}
}
for (int i = 0; i < 6; ++i) {
reduction[offset++] = J[i] * d_huber;
}
reduction[offset++] = r_huber;
reduction[offset++] = valid;
// Sum reduction: JtJ(21) and Jtr(6)
for (size_t i = 0; i < 27; i += 3) {
local_sum0[tid] = valid ? reduction[i + 0] : 0;
local_sum1[tid] = valid ? reduction[i + 1] : 0;
local_sum2[tid] = valid ? reduction[i + 2] : 0;
__syncthreads();
BlockReduceSum<float, kBlockSize>(tid, local_sum0, local_sum1,
local_sum2);
if (tid == 0) {
atomicAdd(&global_sum[i + 0], local_sum0[0]);
atomicAdd(&global_sum[i + 1], local_sum1[0]);
atomicAdd(&global_sum[i + 2], local_sum2[0]);
}
__syncthreads();
}
// Sum reduction: residual(1) and inlier(1)
{
local_sum0[tid] = valid ? reduction[27] : 0;
local_sum1[tid] = valid ? reduction[28] : 0;
__syncthreads();
BlockReduceSum<float, kBlockSize>(tid, local_sum0, local_sum1);
if (tid == 0) {
atomicAdd(&global_sum[27], local_sum0[0]);
atomicAdd(&global_sum[28], local_sum1[0]);
}
__syncthreads();
}
}
void ComputeOdometryResultPointToPlaneCUDA(
const core::Tensor& source_vertex_map,
const core::Tensor& target_vertex_map,
const core::Tensor& target_normal_map,
const core::Tensor& intrinsics,
const core::Tensor& init_source_to_target,
core::Tensor& delta,
float& inlier_residual,
int& inlier_count,
const float depth_outlier_trunc,
const float depth_huber_delta) {
NDArrayIndexer source_vertex_indexer(source_vertex_map, 2);
NDArrayIndexer target_vertex_indexer(target_vertex_map, 2);
NDArrayIndexer target_normal_indexer(target_normal_map, 2);
core::Device device = source_vertex_map.GetDevice();
core::Tensor trans = init_source_to_target;
TransformIndexer ti(intrinsics, trans);
const int64_t rows = source_vertex_indexer.GetShape(0);
const int64_t cols = source_vertex_indexer.GetShape(1);
core::Tensor global_sum = core::Tensor::Zeros({29}, core::Float32, device);
float* global_sum_ptr = global_sum.GetDataPtr<float>();
const int kThreadSize = 16;
const dim3 blocks((cols + kThreadSize - 1) / kThreadSize,
(rows + kThreadSize - 1) / kThreadSize);
const dim3 threads(kThreadSize, kThreadSize);
ComputeOdometryResultPointToPlaneCUDAKernel<<<blocks, threads, 0,
core::cuda::GetStream()>>>(
source_vertex_indexer, target_vertex_indexer, target_normal_indexer,
ti, global_sum_ptr, rows, cols, depth_outlier_trunc,
depth_huber_delta);
core::cuda::Synchronize();
DecodeAndSolve6x6(global_sum, delta, inlier_residual, inlier_count);
}
__global__ void ComputeOdometryResultIntensityCUDAKernel(
NDArrayIndexer source_depth_indexer,
NDArrayIndexer target_depth_indexer,
NDArrayIndexer source_intensity_indexer,
NDArrayIndexer target_intensity_indexer,
NDArrayIndexer target_intensity_dx_indexer,
NDArrayIndexer target_intensity_dy_indexer,
NDArrayIndexer source_vertex_indexer,
TransformIndexer ti,
float* global_sum,
int rows,
int cols,
const float depth_outlier_trunc,
const float intensity_huber_delta) {
const int kBlockSize = 256;
__shared__ float local_sum0[kBlockSize];
__shared__ float local_sum1[kBlockSize];
__shared__ float local_sum2[kBlockSize];
const int x = threadIdx.x + blockIdx.x * blockDim.x;
const int y = threadIdx.y + blockIdx.y * blockDim.y;
const int tid = threadIdx.x + threadIdx.y * blockDim.x;
local_sum0[tid] = 0;
local_sum1[tid] = 0;
local_sum2[tid] = 0;
if (y >= rows || x >= cols) return;
float J[6] = {0}, reduction[21 + 6 + 2];
float r = 0;
bool valid = GetJacobianIntensity(
x, y, depth_outlier_trunc, source_depth_indexer,
target_depth_indexer, source_intensity_indexer,
target_intensity_indexer, target_intensity_dx_indexer,
target_intensity_dy_indexer, source_vertex_indexer, ti, J, r);
float d_huber = HuberDeriv(r, intensity_huber_delta);
float r_huber = HuberLoss(r, intensity_huber_delta);
// Dump J, r into JtJ and Jtr
int offset = 0;
for (int i = 0; i < 6; ++i) {
for (int j = 0; j <= i; ++j) {
reduction[offset++] = J[i] * J[j];
}
}
for (int i = 0; i < 6; ++i) {
reduction[offset++] = J[i] * HuberDeriv(r, intensity_huber_delta);
}
reduction[offset++] = HuberLoss(r, intensity_huber_delta);
reduction[offset++] = valid;
ReduceSum6x6LinearSystem<float, kBlockSize>(tid, valid, reduction,
local_sum0, local_sum1,
local_sum2, global_sum);
}
void ComputeOdometryResultIntensityCUDA(
const core::Tensor& source_depth,
const core::Tensor& target_depth,
const core::Tensor& source_intensity,
const core::Tensor& target_intensity,
const core::Tensor& target_intensity_dx,
const core::Tensor& target_intensity_dy,
const core::Tensor& source_vertex_map,
const core::Tensor& intrinsics,
const core::Tensor& init_source_to_target,
core::Tensor& delta,
float& inlier_residual,
int& inlier_count,
const float depth_outlier_trunc,
const float intensity_huber_delta) {
NDArrayIndexer source_depth_indexer(source_depth, 2);
NDArrayIndexer target_depth_indexer(target_depth, 2);
NDArrayIndexer source_intensity_indexer(source_intensity, 2);
NDArrayIndexer target_intensity_indexer(target_intensity, 2);
NDArrayIndexer target_intensity_dx_indexer(target_intensity_dx, 2);
NDArrayIndexer target_intensity_dy_indexer(target_intensity_dy, 2);
NDArrayIndexer source_vertex_indexer(source_vertex_map, 2);
core::Device device = source_vertex_map.GetDevice();
core::Tensor trans = init_source_to_target;
t::geometry::kernel::TransformIndexer ti(intrinsics, trans);
const int64_t rows = source_vertex_indexer.GetShape(0);
const int64_t cols = source_vertex_indexer.GetShape(1);
core::Tensor global_sum = core::Tensor::Zeros({29}, core::Float32, device);
float* global_sum_ptr = global_sum.GetDataPtr<float>();
const int kThreadSize = 16;
const dim3 blocks((cols + kThreadSize - 1) / kThreadSize,
(rows + kThreadSize - 1) / kThreadSize);
const dim3 threads(kThreadSize, kThreadSize);
ComputeOdometryResultIntensityCUDAKernel<<<blocks, threads, 0,
core::cuda::GetStream()>>>(
source_depth_indexer, target_depth_indexer,
source_intensity_indexer, target_intensity_indexer,
target_intensity_dx_indexer, target_intensity_dy_indexer,
source_vertex_indexer, ti, global_sum_ptr, rows, cols,
depth_outlier_trunc, intensity_huber_delta);
core::cuda::Synchronize();
DecodeAndSolve6x6(global_sum, delta, inlier_residual, inlier_count);
}
__global__ void ComputeOdometryResultHybridCUDAKernel(
NDArrayIndexer source_depth_indexer,
NDArrayIndexer target_depth_indexer,
NDArrayIndexer source_intensity_indexer,
NDArrayIndexer target_intensity_indexer,
NDArrayIndexer target_depth_dx_indexer,
NDArrayIndexer target_depth_dy_indexer,
NDArrayIndexer target_intensity_dx_indexer,
NDArrayIndexer target_intensity_dy_indexer,
NDArrayIndexer source_vertex_indexer,
TransformIndexer ti,
float* global_sum,
int rows,
int cols,
const float depth_outlier_trunc,
const float depth_huber_delta,
const float intensity_huber_delta) {
const int kBlockSize = 256;
__shared__ float local_sum0[kBlockSize];
__shared__ float local_sum1[kBlockSize];
__shared__ float local_sum2[kBlockSize];
const int x = threadIdx.x + blockIdx.x * blockDim.x;
const int y = threadIdx.y + blockIdx.y * blockDim.y;
const int tid = threadIdx.x + threadIdx.y * blockDim.x;
local_sum0[tid] = 0;
local_sum1[tid] = 0;
local_sum2[tid] = 0;
if (y >= rows || x >= cols) return;
float J_I[6] = {0}, J_D[6] = {0}, reduction[21 + 6 + 2];
float r_I = 0, r_D = 0;
bool valid = GetJacobianHybrid(
x, y, depth_outlier_trunc, source_depth_indexer,
target_depth_indexer, source_intensity_indexer,
target_intensity_indexer, target_depth_dx_indexer,
target_depth_dy_indexer, target_intensity_dx_indexer,
target_intensity_dy_indexer, source_vertex_indexer, ti, J_I, J_D,
r_I, r_D);
float d_huber_D = HuberDeriv(r_D, depth_huber_delta);
float d_huber_I = HuberDeriv(r_I, intensity_huber_delta);
float r_huber_D = HuberLoss(r_D, depth_huber_delta);
float r_huber_I = HuberLoss(r_I, intensity_huber_delta);
// Dump J, r into JtJ and Jtr
int offset = 0;
for (int i = 0; i < 6; ++i) {
for (int j = 0; j <= i; ++j) {
reduction[offset++] = J_I[i] * J_I[j] + J_D[i] * J_D[j];
}
}
for (int i = 0; i < 6; ++i) {
reduction[offset++] = J_I[i] * d_huber_I + J_D[i] * d_huber_D;
}
reduction[offset++] = r_huber_D + r_huber_I;
reduction[offset++] = valid;
ReduceSum6x6LinearSystem<float, kBlockSize>(tid, valid, reduction,
local_sum0, local_sum1,
local_sum2, global_sum);
}
void ComputeOdometryResultHybridCUDA(const core::Tensor& source_depth,
const core::Tensor& target_depth,
const core::Tensor& source_intensity,
const core::Tensor& target_intensity,
const core::Tensor& target_depth_dx,
const core::Tensor& target_depth_dy,
const core::Tensor& target_intensity_dx,
const core::Tensor& target_intensity_dy,
const core::Tensor& source_vertex_map,
const core::Tensor& intrinsics,
const core::Tensor& init_source_to_target,
core::Tensor& delta,
float& inlier_residual,
int& inlier_count,
const float depth_outlier_trunc,
const float depth_huber_delta,
const float intensity_huber_delta) {
NDArrayIndexer source_depth_indexer(source_depth, 2);
NDArrayIndexer target_depth_indexer(target_depth, 2);
NDArrayIndexer source_intensity_indexer(source_intensity, 2);
NDArrayIndexer target_intensity_indexer(target_intensity, 2);
NDArrayIndexer target_depth_dx_indexer(target_depth_dx, 2);
NDArrayIndexer target_depth_dy_indexer(target_depth_dy, 2);
NDArrayIndexer target_intensity_dx_indexer(target_intensity_dx, 2);
NDArrayIndexer target_intensity_dy_indexer(target_intensity_dy, 2);
NDArrayIndexer source_vertex_indexer(source_vertex_map, 2);
core::Device device = source_vertex_map.GetDevice();
core::Tensor trans = init_source_to_target;
t::geometry::kernel::TransformIndexer ti(intrinsics, trans);
const int64_t rows = source_vertex_indexer.GetShape(0);
const int64_t cols = source_vertex_indexer.GetShape(1);
core::Tensor global_sum = core::Tensor::Zeros({29}, core::Float32, device);
float* global_sum_ptr = global_sum.GetDataPtr<float>();
const int kThreadSize = 16;
const dim3 blocks((cols + kThreadSize - 1) / kThreadSize,
(rows + kThreadSize - 1) / kThreadSize);
const dim3 threads(kThreadSize, kThreadSize);
ComputeOdometryResultHybridCUDAKernel<<<blocks, threads, 0,
core::cuda::GetStream()>>>(
source_depth_indexer, target_depth_indexer,
source_intensity_indexer, target_intensity_indexer,
target_depth_dx_indexer, target_depth_dy_indexer,
target_intensity_dx_indexer, target_intensity_dy_indexer,
source_vertex_indexer, ti, global_sum_ptr, rows, cols,
depth_outlier_trunc, depth_huber_delta, intensity_huber_delta);
core::cuda::Synchronize();
DecodeAndSolve6x6(global_sum, delta, inlier_residual, inlier_count);
}
} // namespace odometry
} // namespace kernel
} // namespace pipelines
} // namespace t
} // namespace open3d
|
c146882c9cade2f7d36292cf3926c1a84a7944dc.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "setStatisticAtLast.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const int dim = 1;
const int nwl = 1;
const float *lst = NULL;
hipMalloc(&lst, XSIZE*YSIZE);
float *stt = NULL;
hipMalloc(&stt, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
setStatisticAtLast), dim3(gridBlock),dim3(threadBlock), 0, 0, dim,nwl,lst,stt);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
setStatisticAtLast), dim3(gridBlock),dim3(threadBlock), 0, 0, dim,nwl,lst,stt);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
setStatisticAtLast), dim3(gridBlock),dim3(threadBlock), 0, 0, dim,nwl,lst,stt);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | c146882c9cade2f7d36292cf3926c1a84a7944dc.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "setStatisticAtLast.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const int dim = 1;
const int nwl = 1;
const float *lst = NULL;
cudaMalloc(&lst, XSIZE*YSIZE);
float *stt = NULL;
cudaMalloc(&stt, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
setStatisticAtLast<<<gridBlock,threadBlock>>>(dim,nwl,lst,stt);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
setStatisticAtLast<<<gridBlock,threadBlock>>>(dim,nwl,lst,stt);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
setStatisticAtLast<<<gridBlock,threadBlock>>>(dim,nwl,lst,stt);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
940dbdfe5000a375b681a0ca2eee7b29edf5a414.hip | // !!! This is a file automatically generated by hipify!!!
#include <gtest/gtest.h>
#include <hipcub/hipcub.hpp>
#include <detail/masked_mult.cuh>
#include <masked_spgemm.h>
#include <spgemm.h>
#include <random>
using index_type = unsigned int;
using b_mat = std::vector<std::vector<bool>>;
std::pair<std::vector<index_type>, std::vector<index_type>> dense_to_csr(const b_mat& matrix) {
std::vector<index_type> col_index;
std::vector<index_type> row_index;
index_type size = 0;
for (const auto& row : matrix) {
row_index.push_back(size);
for (unsigned int i = 0; i < row.size(); i++) {
if (row[i]) {
col_index.push_back(i);
size++;
}
}
}
row_index.push_back(size);
return {col_index, row_index};
}
b_mat matrix_generator(size_t rows, size_t cols, float density);
template <typename T>
std::vector<T> value_generator(size_t size, T min, T max) {
std::mt19937 gen;
std::uniform_int_distribution<T> urd(min, max);
std::vector<T> values;
for (auto i = 0; i < size; i++) {
values.push_back(urd(gen));
}
return values;
}
b_mat mult(const b_mat& a, const b_mat& b);
b_mat sum(const b_mat& a, const b_mat& b);
template <typename T>
std::ostream& operator<<(std::ostream& os, const thrust::device_vector<T> vec) {
for (auto i = 0; i < vec.size(); i++) {
os << vec[i] << " ";
}
return os;
}
nsparse::matrix<bool, index_type> dense_to_gpu_csr(const b_mat& matrix) {
auto m = dense_to_csr(matrix);
return nsparse::matrix<bool, index_type>(m.first, m.second, matrix.size(), matrix[0].size(),
m.second.back());
}
class NsparseCountNonZeroTest : public testing::Test {
protected:
static void eval(const b_mat& c, const b_mat& a, const b_mat& b) {
b_mat r = sum(c, mult(a, b));
auto sprsA = dense_to_csr(a);
auto sprsB = dense_to_csr(b);
auto sprsC = dense_to_csr(c);
auto sprsR = dense_to_csr(r);
nsparse::matrix<bool, index_type> A(sprsA.first, sprsA.second, a.size(), a[0].size(),
sprsA.second.back());
nsparse::matrix<bool, index_type> B(sprsB.first, sprsB.second, b.size(), b[0].size(),
sprsB.second.back());
nsparse::matrix<bool, index_type> C(sprsC.first, sprsC.second, c.size(), c[0].size(),
sprsC.second.back());
nsparse::spgemm_functor_t<bool, index_type> spgemm_functor;
auto res = spgemm_functor(C, A, B);
ASSERT_EQ(sprsR.second, res.m_row_index);
ASSERT_EQ(sprsR.first, res.m_col_index);
}
};
template <typename value_type>
void test_masked(const b_mat& a, const b_mat& b) {
const b_mat c = mult(a, b);
auto sprsA = dense_to_csr(a);
auto sprsB = dense_to_csr(b);
auto sprsC = dense_to_csr(c);
auto maxH = 1025;
constexpr value_type zero = std::numeric_limits<value_type>::max();
std::vector<value_type> a_values = value_generator<value_type>(sprsA.first.size(), 0, maxH);
std::vector<value_type> b_values = value_generator<value_type>(sprsB.first.size(), 0, maxH);
std::for_each(a_values.begin(), a_values.end(),
[](value_type& elem) { elem <<= sizeof(value_type) * 8 / 2; });
std::for_each(b_values.begin(), b_values.end(),
[](value_type& elem) { elem <<= sizeof(value_type) * 8 / 2; });
std::vector<value_type> expected_c_values(sprsC.first.size(), zero);
{
auto rows = a.size();
for (auto row = 0; row < rows; row++) {
auto c_row_begin = sprsC.second[row];
auto c_row_end = sprsC.second[row + 1];
auto a_row_begin = sprsA.second[row];
auto a_row_end = sprsA.second[row + 1];
for (auto i = a_row_begin; i < a_row_end; i++) {
auto a_col = sprsA.first[i];
auto a_value = a_values[i];
if (a_value == zero)
continue;
auto b_row_begin = sprsB.second[a_col];
auto b_row_end = sprsB.second[a_col + 1];
for (auto j = b_row_begin; j < b_row_end; j++) {
auto b_col = sprsB.first[j];
auto b_value = b_values[j];
if (b_value == zero)
continue;
value_type mult_res = ::max(a_value, b_value);
mult_res >>= sizeof(value_type) * 8 / 2;
mult_res++;
mult_res <<= sizeof(value_type) * 8 / 2;
mult_res += a_col;
auto it =
std::find(sprsC.first.begin() + c_row_begin, sprsC.first.begin() + c_row_end, b_col);
assert(it != sprsC.first.end());
auto pos = it - sprsC.first.begin();
expected_c_values[pos] = ::min(expected_c_values[pos], mult_res);
}
}
}
}
auto gpu_c = dense_to_gpu_csr(c);
auto gpu_a = dense_to_gpu_csr(a);
auto gpu_b = dense_to_gpu_csr(b);
auto mul = [] __device__(value_type lhs, value_type rhs, index_type a_col) -> value_type {
value_type mult_res = max(lhs, rhs);
mult_res >>= sizeof(value_type) * 8 / 2;
mult_res++;
mult_res <<= sizeof(value_type) * 8 / 2;
mult_res += a_col;
return mult_res;
};
auto add = [] __device__(value_type * lhs, value_type rhs) -> void {
static_assert(sizeof(unsigned long long) == sizeof(value_type));
// atomicMin((unsigned long long*)lhs, (unsigned long long)rhs);
unsigned long long int old = (unsigned long long int)(*lhs);
unsigned long long int expected;
do {
expected = old;
old = atomicCAS((unsigned long long int*)lhs, expected,
min((unsigned long long int)rhs, expected));
} while (expected != old);
};
nsparse::masked_matrix<value_type, index_type> masked_a(gpu_a, a_values);
nsparse::masked_matrix<value_type, index_type> masked_b(gpu_b, b_values);
nsparse::masked_matrix<value_type, index_type> masked_c(gpu_c, -1);
nsparse::masked_spgemm_functor_t<value_type, index_type, zero, decltype(mul), decltype(add)>
masked_spgemm(mul, add);
masked_spgemm(masked_c, masked_a, masked_b);
ASSERT_EQ(expected_c_values, masked_c.m_values);
}
TEST_F(NsparseCountNonZeroTest, multMaskedSmall) {
size_t a = 10;
size_t b = 15;
size_t c = 20;
for (float density = 0.01; density <= 1; density += 0.1) {
test_masked<uint64_t>(matrix_generator(a, b, density), matrix_generator(b, c, density));
}
}
TEST_F(NsparseCountNonZeroTest, multMaskedMedium) {
size_t a = 500;
size_t b = 600;
size_t c = 700;
for (float density = 0.01; density <= 0.5; density += 0.1) {
test_masked<uint64_t>(matrix_generator(a, b, density), matrix_generator(b, c, density));
}
}
TEST_F(NsparseCountNonZeroTest, multMaskedBig) {
size_t a = 1000;
size_t b = 1100;
size_t c = 1200;
for (float density = 0.01; density <= 0.2; density += 0.05) {
test_masked<uint64_t>(matrix_generator(a, b, density), matrix_generator(b, c, density));
}
}
TEST_F(NsparseCountNonZeroTest, countNzSmall) {
eval(
{
{0, 0, 0, 0, 0},
{0, 0, 0, 0, 0},
{0, 0, 0, 0, 0},
{0, 0, 0, 0, 0},
},
{
{0, 1, 0, 0, 1, 0},
{1, 0, 1, 0, 1, 0},
{0, 0, 0, 0, 0, 0},
{0, 1, 1, 0, 0, 0},
},
{
{0, 0, 1, 0, 0},
{1, 0, 1, 0, 1},
{1, 1, 1, 1, 1},
{0, 0, 0, 0, 0},
{0, 1, 0, 0, 0},
{0, 0, 1, 1, 1},
});
}
TEST_F(NsparseCountNonZeroTest, countNzGeneratedSmall) {
size_t a = 100;
size_t b = 150;
size_t c = 200;
for (float density = 0.01; density <= 1; density += 0.01) {
eval(matrix_generator(a, c, density), matrix_generator(a, b, density),
matrix_generator(b, c, density));
}
}
TEST_F(NsparseCountNonZeroTest, countNzGeneratedMedium) {
size_t a = 500;
size_t b = 600;
size_t c = 700;
for (float density = 0.01; density <= 0.5; density += 0.01) {
eval(matrix_generator(a, c, density), matrix_generator(a, b, density),
matrix_generator(b, c, density));
}
}
TEST_F(NsparseCountNonZeroTest, countNzGeneratedBig) {
size_t a = 1000;
size_t b = 1100;
size_t c = 1200;
for (float density = 0.01; density <= 0.2; density += 0.01) {
eval(matrix_generator(a, c, density), matrix_generator(a, b, density),
matrix_generator(b, c, density));
}
}
TEST_F(NsparseCountNonZeroTest, countNzGeneratedGlobalHashTable) {
size_t a = 100;
size_t b = 500;
size_t c = 5000;
eval(matrix_generator(a, c, 0.5), matrix_generator(a, b, 0.5), matrix_generator(b, c, 0.5));
}
| 940dbdfe5000a375b681a0ca2eee7b29edf5a414.cu | #include <gtest/gtest.h>
#include <cub/cub.cuh>
#include <detail/masked_mult.cuh>
#include <masked_spgemm.h>
#include <spgemm.h>
#include <random>
using index_type = unsigned int;
using b_mat = std::vector<std::vector<bool>>;
std::pair<std::vector<index_type>, std::vector<index_type>> dense_to_csr(const b_mat& matrix) {
std::vector<index_type> col_index;
std::vector<index_type> row_index;
index_type size = 0;
for (const auto& row : matrix) {
row_index.push_back(size);
for (unsigned int i = 0; i < row.size(); i++) {
if (row[i]) {
col_index.push_back(i);
size++;
}
}
}
row_index.push_back(size);
return {col_index, row_index};
}
b_mat matrix_generator(size_t rows, size_t cols, float density);
template <typename T>
std::vector<T> value_generator(size_t size, T min, T max) {
std::mt19937 gen;
std::uniform_int_distribution<T> urd(min, max);
std::vector<T> values;
for (auto i = 0; i < size; i++) {
values.push_back(urd(gen));
}
return values;
}
b_mat mult(const b_mat& a, const b_mat& b);
b_mat sum(const b_mat& a, const b_mat& b);
template <typename T>
std::ostream& operator<<(std::ostream& os, const thrust::device_vector<T> vec) {
for (auto i = 0; i < vec.size(); i++) {
os << vec[i] << " ";
}
return os;
}
nsparse::matrix<bool, index_type> dense_to_gpu_csr(const b_mat& matrix) {
auto m = dense_to_csr(matrix);
return nsparse::matrix<bool, index_type>(m.first, m.second, matrix.size(), matrix[0].size(),
m.second.back());
}
class NsparseCountNonZeroTest : public testing::Test {
protected:
static void eval(const b_mat& c, const b_mat& a, const b_mat& b) {
b_mat r = sum(c, mult(a, b));
auto sprsA = dense_to_csr(a);
auto sprsB = dense_to_csr(b);
auto sprsC = dense_to_csr(c);
auto sprsR = dense_to_csr(r);
nsparse::matrix<bool, index_type> A(sprsA.first, sprsA.second, a.size(), a[0].size(),
sprsA.second.back());
nsparse::matrix<bool, index_type> B(sprsB.first, sprsB.second, b.size(), b[0].size(),
sprsB.second.back());
nsparse::matrix<bool, index_type> C(sprsC.first, sprsC.second, c.size(), c[0].size(),
sprsC.second.back());
nsparse::spgemm_functor_t<bool, index_type> spgemm_functor;
auto res = spgemm_functor(C, A, B);
ASSERT_EQ(sprsR.second, res.m_row_index);
ASSERT_EQ(sprsR.first, res.m_col_index);
}
};
template <typename value_type>
void test_masked(const b_mat& a, const b_mat& b) {
const b_mat c = mult(a, b);
auto sprsA = dense_to_csr(a);
auto sprsB = dense_to_csr(b);
auto sprsC = dense_to_csr(c);
auto maxH = 1025;
constexpr value_type zero = std::numeric_limits<value_type>::max();
std::vector<value_type> a_values = value_generator<value_type>(sprsA.first.size(), 0, maxH);
std::vector<value_type> b_values = value_generator<value_type>(sprsB.first.size(), 0, maxH);
std::for_each(a_values.begin(), a_values.end(),
[](value_type& elem) { elem <<= sizeof(value_type) * 8 / 2; });
std::for_each(b_values.begin(), b_values.end(),
[](value_type& elem) { elem <<= sizeof(value_type) * 8 / 2; });
std::vector<value_type> expected_c_values(sprsC.first.size(), zero);
{
auto rows = a.size();
for (auto row = 0; row < rows; row++) {
auto c_row_begin = sprsC.second[row];
auto c_row_end = sprsC.second[row + 1];
auto a_row_begin = sprsA.second[row];
auto a_row_end = sprsA.second[row + 1];
for (auto i = a_row_begin; i < a_row_end; i++) {
auto a_col = sprsA.first[i];
auto a_value = a_values[i];
if (a_value == zero)
continue;
auto b_row_begin = sprsB.second[a_col];
auto b_row_end = sprsB.second[a_col + 1];
for (auto j = b_row_begin; j < b_row_end; j++) {
auto b_col = sprsB.first[j];
auto b_value = b_values[j];
if (b_value == zero)
continue;
value_type mult_res = std::max(a_value, b_value);
mult_res >>= sizeof(value_type) * 8 / 2;
mult_res++;
mult_res <<= sizeof(value_type) * 8 / 2;
mult_res += a_col;
auto it =
std::find(sprsC.first.begin() + c_row_begin, sprsC.first.begin() + c_row_end, b_col);
assert(it != sprsC.first.end());
auto pos = it - sprsC.first.begin();
expected_c_values[pos] = std::min(expected_c_values[pos], mult_res);
}
}
}
}
auto gpu_c = dense_to_gpu_csr(c);
auto gpu_a = dense_to_gpu_csr(a);
auto gpu_b = dense_to_gpu_csr(b);
auto mul = [] __device__(value_type lhs, value_type rhs, index_type a_col) -> value_type {
value_type mult_res = max(lhs, rhs);
mult_res >>= sizeof(value_type) * 8 / 2;
mult_res++;
mult_res <<= sizeof(value_type) * 8 / 2;
mult_res += a_col;
return mult_res;
};
auto add = [] __device__(value_type * lhs, value_type rhs) -> void {
static_assert(sizeof(unsigned long long) == sizeof(value_type));
// atomicMin((unsigned long long*)lhs, (unsigned long long)rhs);
unsigned long long int old = (unsigned long long int)(*lhs);
unsigned long long int expected;
do {
expected = old;
old = atomicCAS((unsigned long long int*)lhs, expected,
min((unsigned long long int)rhs, expected));
} while (expected != old);
};
nsparse::masked_matrix<value_type, index_type> masked_a(gpu_a, a_values);
nsparse::masked_matrix<value_type, index_type> masked_b(gpu_b, b_values);
nsparse::masked_matrix<value_type, index_type> masked_c(gpu_c, -1);
nsparse::masked_spgemm_functor_t<value_type, index_type, zero, decltype(mul), decltype(add)>
masked_spgemm(mul, add);
masked_spgemm(masked_c, masked_a, masked_b);
ASSERT_EQ(expected_c_values, masked_c.m_values);
}
TEST_F(NsparseCountNonZeroTest, multMaskedSmall) {
size_t a = 10;
size_t b = 15;
size_t c = 20;
for (float density = 0.01; density <= 1; density += 0.1) {
test_masked<uint64_t>(matrix_generator(a, b, density), matrix_generator(b, c, density));
}
}
TEST_F(NsparseCountNonZeroTest, multMaskedMedium) {
size_t a = 500;
size_t b = 600;
size_t c = 700;
for (float density = 0.01; density <= 0.5; density += 0.1) {
test_masked<uint64_t>(matrix_generator(a, b, density), matrix_generator(b, c, density));
}
}
TEST_F(NsparseCountNonZeroTest, multMaskedBig) {
size_t a = 1000;
size_t b = 1100;
size_t c = 1200;
for (float density = 0.01; density <= 0.2; density += 0.05) {
test_masked<uint64_t>(matrix_generator(a, b, density), matrix_generator(b, c, density));
}
}
TEST_F(NsparseCountNonZeroTest, countNzSmall) {
eval(
{
{0, 0, 0, 0, 0},
{0, 0, 0, 0, 0},
{0, 0, 0, 0, 0},
{0, 0, 0, 0, 0},
},
{
{0, 1, 0, 0, 1, 0},
{1, 0, 1, 0, 1, 0},
{0, 0, 0, 0, 0, 0},
{0, 1, 1, 0, 0, 0},
},
{
{0, 0, 1, 0, 0},
{1, 0, 1, 0, 1},
{1, 1, 1, 1, 1},
{0, 0, 0, 0, 0},
{0, 1, 0, 0, 0},
{0, 0, 1, 1, 1},
});
}
TEST_F(NsparseCountNonZeroTest, countNzGeneratedSmall) {
size_t a = 100;
size_t b = 150;
size_t c = 200;
for (float density = 0.01; density <= 1; density += 0.01) {
eval(matrix_generator(a, c, density), matrix_generator(a, b, density),
matrix_generator(b, c, density));
}
}
TEST_F(NsparseCountNonZeroTest, countNzGeneratedMedium) {
size_t a = 500;
size_t b = 600;
size_t c = 700;
for (float density = 0.01; density <= 0.5; density += 0.01) {
eval(matrix_generator(a, c, density), matrix_generator(a, b, density),
matrix_generator(b, c, density));
}
}
TEST_F(NsparseCountNonZeroTest, countNzGeneratedBig) {
size_t a = 1000;
size_t b = 1100;
size_t c = 1200;
for (float density = 0.01; density <= 0.2; density += 0.01) {
eval(matrix_generator(a, c, density), matrix_generator(a, b, density),
matrix_generator(b, c, density));
}
}
TEST_F(NsparseCountNonZeroTest, countNzGeneratedGlobalHashTable) {
size_t a = 100;
size_t b = 500;
size_t c = 5000;
eval(matrix_generator(a, c, 0.5), matrix_generator(a, b, 0.5), matrix_generator(b, c, 0.5));
}
|
ce5d5507ce95597e64577544c6e30d03f0412556.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "../GaussBlurTex/gpu_blur_tex.h"
#include "../BgSub/gu_sub.h"
#include "../API/api.h"
gpu_error_t gpu_highpass( gpu_context_t *ctx, )
{
}
| ce5d5507ce95597e64577544c6e30d03f0412556.cu | #include "cuda.h"
#include "../GaussBlurTex/gpu_blur_tex.h"
#include "../BgSub/gu_sub.h"
#include "../API/api.h"
gpu_error_t gpu_highpass( gpu_context_t *ctx, )
{
}
|
c2c9fcad08e7fa867fb952a992269d7466383eea.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <vector>
#include "caffe/layers/zk_conv.hpp"
#include "device_launch_parameters.h"
#include <math_functions.h> // CUDA's, not caffe's, for fabs, signbit
#include <thrust/device_vector.h>
#include <thrust/functional.h> // thrust::plus
#include <thrust/reduce.h>
namespace caffe {
template <typename Dtype>
__global__ void backward_bias_gpuwithgroup(const int Batchsize,
const Dtype *top_diff, const int top_rows, const int top_cols, const int top_depth,
Dtype *bias_diff,const int bias_Num){
int thread_id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (thread_id >= bias_Num) return;
const int top_diff_offset = thread_id*top_rows*top_cols;
Dtype gradient = 0;
for (int n = 0; n < Batchsize; n++) {
const int top_offset = n*top_depth*top_cols*top_rows;
for (int i = 0; i < top_rows*top_cols; i+=4){
gradient += top_diff[top_offset + top_diff_offset + i];
gradient += top_diff[top_offset + top_diff_offset + i+1];
gradient += top_diff[top_offset + top_diff_offset + i+2];
gradient += top_diff[top_offset + top_diff_offset + i+3];
}
}
bias_diff[thread_id] = gradient;
}
/*bottomtop_diffweight180
* bottom
*/
template <typename Dtype>
__global__ void backward_Input_gpuwithgroup(const Dtype *top_diff, const int top_rows, const int top_cols, const int top_depth, const int top_group,
const Dtype *weight, const int weight_rows, const int weight_cols, const int weight_depth,
const int stride, const int pad_rows, const int pad_cols,
const int bottom_rows, const int bottom_cols, const int bottom_depth, const int bottom_group, Dtype *bottom_diff, const int bottom_Number){
int thread_id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (thread_id >= bottom_Number) return;
const int B_col = thread_id%bottom_cols;
const int B_row = (thread_id / bottom_cols) % bottom_rows;
const int B_chanel = (thread_id / bottom_cols / bottom_rows) % bottom_depth;
const int B_batch = thread_id / bottom_cols / bottom_rows / bottom_depth;
/* bottom_group
*top_group
*B_chanelNth_group=B_chanel/bottom_group;
*Nth_grouptop_difftop_offset=Nth_group*top_group*top_cols*top_rows;
*weightkernel_offset=OD*in_group*kernel_cols*kernel_rows;
*/
int Nth_group = B_chanel / bottom_group;
int Nth_chanel = B_chanel%bottom_group;
const Dtype *top_diff_slice = top_diff + B_batch*top_depth*top_rows*top_cols + Nth_group*top_group*top_cols*top_rows;
const int weight_rotate_offset = Nth_group*top_group*weight_depth*weight_cols*weight_rows + (Nth_chanel + 1)*weight_rows*weight_cols - 1;
int top_C_start = B_col + pad_cols - weight_cols + 1;
int top_C_end = top_C_start + weight_cols;
int top_R_start = B_row + pad_rows - weight_rows + 1;
int top_R_end = top_R_start + weight_rows;
Dtype gradient = 0;
for (int top_Cindex = 0; top_Cindex < top_group; top_Cindex++){
int top_diff_slice_offset = top_Cindex*top_rows*top_cols;
int weight_offset = top_Cindex*weight_depth*weight_cols*weight_rows+weight_rotate_offset;
for (int top_R = top_R_start; top_R < top_R_end; top_R++){
for (int top_C = top_C_start; top_C < top_C_end; top_C++){
if (top_R%stride == 0 && top_C%stride == 0){
int top_r = top_R / stride;
int top_c = top_C / stride;
if (top_r >= 0 && top_r < top_rows&&top_c >= 0 && top_c < top_cols){
int topoffset = top_r*top_cols + top_c;
//std::cout << "gradient=" << thread_id << " " << top_diff_slice_offset + topoffset << " * " << weight_offset << endl;
gradient += *(top_diff_slice + top_diff_slice_offset + topoffset)*(*(weight + weight_offset));
}
}
weight_offset--;
}
}
}
bottom_diff[thread_id]=gradient;
}
/// input output
//
/*group*/
//numbatchsize
template <typename Dtype>
__global__ void backward_weight_gpuwithgroup(const int num, const Dtype *inputdata, const int in_group, const int in_rows, const int in_cols, const int in_depth,
const Dtype *output_diff, const int out_group, const int out_rows, const int out_cols, const int out_depth,
const int stride, const int pad_rows, const int pad_cols,
const int weight_rows, const int weight_cols, const int weight_depth, Dtype *weight_diff, const int weight_Number){
int thread_id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (thread_id >= weight_Number) return;
///NxMxKxK; NMKkernel
const int K_col = thread_id % weight_cols; //Kernel
const int K_row = (thread_id / weight_cols) % weight_rows; //Kernel
const int K_chanel = (thread_id / weight_cols / weight_rows) % weight_depth; //Kernel
const int K_N = thread_id / weight_cols / weight_rows / weight_depth; //Kernel
/* in_group
*out_group
*ODNth_group=OD/out_group;
*Nth_groupinputdatain_offset=Nth_group*in_group*in_cols*in_rows;
*weightkernel_offset=OD*in_group*kernel_cols*kernel_rows;
*/
const int Nth_group = K_N / out_group;
const int input_offset = in_rows*in_cols*in_depth;
const int out_offset = out_rows*out_cols*out_depth;
//const int input_row_start = K_row*stride - pad_rows;
Dtype gradient = 0;
for (int n = 0; n < num; n++){
const Dtype* const inputdata_slice = inputdata + n*input_offset + K_chanel*in_cols*in_rows + Nth_group*in_group*in_cols*in_rows; //nbatsize
const Dtype* const outputdata_slice = output_diff + n*out_offset + K_N*out_cols*out_rows; //
for (int out_R = 0; out_R < out_rows; out_R++){
const int in_R = out_R*stride + K_row - pad_rows;
if (in_R >= 0 && in_R<in_rows){
for (int out_C = 0; out_C < out_cols; out_C++){
const int in_C = out_C*stride + K_col - pad_cols;
if (in_C >= 0 && in_C<in_cols){
int in_index = in_R*in_cols + in_C;
int out_index = out_R*out_cols + out_C;
gradient += inputdata_slice[in_index] * outputdata_slice[out_index];
}
}
}
}
}
weight_diff[thread_id] = gradient;
}
/*im2colgroup
*
* inputdata
* in_group
* in_rows
* in_cols
* in_depth
* weight
* weight_rows
* weight_cols
* stride
* pad_rows pad
* pad_cols pad
* biasexist bias
* biasdata bias
* out_group
* out_rows
* out_cols
* out_depth
* outputdata
* out_Number
*/
template <typename Dtype>
__global__ void forward_convwithgroup(const Dtype *inputdata, const int in_group, const int in_rows, const int in_cols, const int in_depth,
const Dtype *weight, const int weight_rows, const int weight_cols, const int stride, const int pad_rows, const int pad_cols, bool biasexist, const Dtype *biasdata,
const int out_group, const int out_rows, const int out_cols, const int out_depth, Dtype *outputdata, const int out_Number){
int thread_id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (thread_id >= out_Number) return;
const int OC = thread_id % out_cols;//width
const int OR = (thread_id / out_cols) % out_rows;//height
const int OD = (thread_id / out_cols / out_rows) % out_depth;//channel
const int OB = thread_id / out_cols / out_rows / out_depth;//batch size
const int input_row_start = OR * stride - pad_rows;
const int input_col_start = OC * stride - pad_cols;
const int input_row_end = input_row_start + weight_rows;
const int input_col_end = input_col_start + weight_cols;
/* in_group
*out_group
*ODNth_group=OD/out_group;
*Nth_groupinputdatain_offset=Nth_group*in_group*in_cols*in_rows;
*weightkernel_offset=OD*in_group*kernel_cols*kernel_rows;
*/
int Nth_group = OD / out_group;
const int inputoffset = OB*in_depth*in_rows*in_cols + Nth_group*in_group*in_cols*in_rows; // +OD*in_cols*in_rows;// +input_row_start*in_cols + input_col_start;
const int kerneloffset = OD*in_group*weight_cols*weight_rows;
Dtype sum = 0.f;
//#pragma unroll
for (int W_C = 0; W_C < in_group; W_C++){
int InputOffset = inputoffset + W_C*in_rows*in_cols;
int KernelOffset = kerneloffset + W_C*weight_cols*weight_rows;
//#pragma unroll
for (int W_H = 0; W_H < weight_rows; W_H++){
int in_r = input_row_start + W_H;
//#pragma unroll
for (int W_W = 0; W_W < weight_cols; W_W++){
int in_c = input_col_start + W_W;
if (in_r >= 0 && in_r < in_rows && in_c >= 0 && in_c < in_cols) {
int input_offset = InputOffset + in_r*in_cols + in_c;
int weight_offset = KernelOffset + W_H*weight_cols + W_W;
sum += (*(inputdata + input_offset))*(*(weight + weight_offset));
}
}
}
}
if (biasexist) outputdata[thread_id] = sum + biasdata[OD];
else outputdata[thread_id] = sum;
}
// template <>
// void gpu_convwithgroup<float>(const float *inputdata, const int in_group, const int in_rows, const int in_cols, const int in_depth,
// const float *weight, const int weight_rows, const int weight_cols,
// const int stride, const int pad_rows, const int pad_cols,
// bool biasexist, const float *biasdata,
// const int out_group, const int out_rows, const int out_cols, const int out_depth, float *outputdata, const int out_Number){
// int thread_id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
// if (thread_id >= out_Number) return;
// const int OC = thread_id % out_cols;//width
// const int OR = (thread_id / out_cols) % out_rows;//height
// const int OD = (thread_id / out_cols / out_rows) % out_depth;//channel
// const int OB = thread_id / out_cols / out_rows / out_depth;//batch size
//
// const int input_row_start = OR * stride - pad_rows;
// const int input_col_start = OC * stride - pad_cols;
// const int input_row_end = input_row_start + weight_rows;
// const int input_col_end = input_col_start + weight_cols;
//
// /* in_group
// *out_group
// *ODNth_group=OD/out_group;
// *Nth_groupinputdatain_offset=Nth_group*in_group*in_cols*in_rows;
// *weightkernel_offset=OD*in_group*kernel_cols*kernel_rows;
// */
// int Nth_group = OD / out_group;
// const int inputoffset = OB*in_depth*in_rows*in_cols + Nth_group*in_group*in_cols*in_rows; // +OD*in_cols*in_rows;// +input_row_start*in_cols + input_col_start;
// const int kerneloffset = OD*in_group*weight_cols*weight_rows;
//
// float sum = 0.f;
////#pragma unroll
// for (int W_C = 0; W_C < in_group; W_C++){
// int InputOffset = inputoffset + W_C*in_rows*in_cols;
// int KernelOffset = kerneloffset + W_C*weight_cols*weight_rows;
////#pragma unroll
// for (int W_H = 0; W_H < weight_rows; W_H++){
//
// int in_r = input_row_start + W_H;
////#pragma unroll
// for (int W_W = 0; W_W < weight_cols; W_W++){
// int in_c = input_col_start + W_W;
// if (in_r >= 0 && in_r < in_rows && in_c >= 0 && in_c < in_cols) {
// int input_offset = InputOffset + in_r*in_cols + in_c;
// int weight_offset = KernelOffset + W_H*weight_cols + W_W;
// sum += (*(inputdata + input_offset))*(*(weight + weight_offset));
// }
//
// }
// }
// }
// if (biasexist) outputdata[thread_id] = sum + biasdata[OD];
// else outputdata[thread_id] = sum;
// }
// template <>
// void gpu_convwithgroup<double>(const double *inputdata, const int in_group, const int in_rows, const int in_cols, const int in_depth,
// const double *weight, const int weight_rows, const int weight_cols, const int stride, const int pad_rows, const int pad_cols, bool biasexist, const double *biasdata,
// const int out_group, const int out_rows, const int out_cols, const int out_depth, double *outputdata, const int out_Number){
// int thread_id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
// if (thread_id >= out_Number) return;
// const int OC = thread_id % out_cols;//width
// const int OR = (thread_id / out_cols) % out_rows;//height
// const int OD = (thread_id / out_cols / out_rows) % out_depth;//channel
// const int OB = thread_id / out_cols / out_rows / out_depth;//batch size
//
// const int input_row_start = OR * stride - pad_rows;
// const int input_col_start = OC * stride - pad_cols;
// const int input_row_end = input_row_start + weight_rows;
// const int input_col_end = input_col_start + weight_cols;
//
// /* in_group
// *out_group
// *ODNth_group=OD/out_group;
// *Nth_groupinputdatain_offset=Nth_group*in_group*in_cols*in_rows;
// *weightkernel_offset=OD*in_group*kernel_cols*kernel_rows;
// */
// int Nth_group = OD / out_group;
// const int inputoffset = OB*in_depth*in_rows*in_cols + Nth_group*in_group*in_cols*in_rows; // +OD*in_cols*in_rows;// +input_row_start*in_cols + input_col_start;
// const int kerneloffset = OD*in_group*weight_cols*weight_rows;
//
// double sum = 0.f;
////#pragma unroll
// for (int W_C = 0; W_C < in_group; W_C++){
// int InputOffset = inputoffset + W_C*in_rows*in_cols;
// int KernelOffset = kerneloffset + W_C*weight_cols*weight_rows;
////#pragma unroll
// for (int W_H = 0; W_H < weight_rows; W_H++){
//
// int in_r = input_row_start + W_H;
////#pragma unroll
// for (int W_W = 0; W_W < weight_cols; W_W++){
// int in_c = input_col_start + W_W;
// if (in_r >= 0 && in_r < in_rows && in_c >= 0 && in_c < in_cols) {
// int input_offset = InputOffset + in_r*in_cols + in_c;
// int weight_offset = KernelOffset + W_H*weight_cols + W_W;
// sum += (*(inputdata + input_offset))*(*(weight + weight_offset));
// }
//
// }
// }
// }
// if (biasexist) outputdata[thread_id] = sum + biasdata[OD];
// else outputdata[thread_id] = sum;
// };
template <typename Dtype>
void ZKConvolutionLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const int group = this->group_;
const Dtype* weight = this->blobs_[0]->gpu_data();
const int weight_rows = this->blobs_[0]->height();
const int weight_cols = this->blobs_[0]->width();
bool biasexist = this->bias_term_;
const Dtype *biasdata = NULL;
if (biasexist) biasdata = this->blobs_[1]->gpu_data();
int* stride_data = this->stride_.mutable_cpu_data();
const int stride = stride_data[0];
int *pad_data = this->pad_.mutable_cpu_data();
const int pad_rows = pad_data[0];
const int pad_cols = pad_data[1];
for (int i = 0; i < bottom.size(); ++i) {
const Dtype* bottom_data = bottom[i]->gpu_data(); //
vector<int> in_shape_ = bottom[i]->shape();
const int in_channels_ = in_shape_[1]; //
const int in_group = in_channels_ / group; //
const int in_height_ = in_shape_[2]; //
const int in_width_ = in_shape_[3]; //
Dtype* top_data = top[i]->mutable_gpu_data(); //
const int count = top[i]->count(); //
vector<int> outshape_ = top[i]->shape();
const int outchannels_ = outshape_[1]; //
const int out_group = outchannels_ / group; //
const int outheight_ = outshape_[2]; //
const int outwidth_ = outshape_[3]; //
forward_convwithgroup<Dtype> << <cuda_gridsize(count), CAFFE_CUDA_NUM_THREADS >> > (bottom_data, in_group, in_height_,
in_width_, in_channels_, weight, weight_rows, weight_cols, stride, pad_rows, pad_cols, biasexist, biasdata,
out_group, outheight_, outwidth_, outchannels_, top_data, count);
}
}
template <typename Dtype>
void ZKConvolutionLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
const int batchSize = this->num_;
const Dtype* weight = this->blobs_[0]->gpu_data();
Dtype* weight_diff = this->blobs_[0]->mutable_gpu_diff();
const int weight_num = this->blobs_[0]->count();
const int weight_rows = this->blobs_[0]->height();
const int weight_cols = this->blobs_[0]->width();
const int weight_depth = this->blobs_[0]->channels();
int* stride_data = this->stride_.mutable_cpu_data();
const int stride = stride_data[0];
int *pad_data = this->pad_.mutable_cpu_data();
const int pad_rows = pad_data[0];
const int pad_cols = pad_data[1];
/*const int top_rows, const int top_cols, const int top_depth,
Dtype *bias_diff, const int bias_Num*/
//Dtype* weight_diff = this->blobs_[0]->mutable_gpu_diff();
for (int i = 0; i < top.size(); ++i) {
const Dtype* top_diff = top[i]->gpu_diff();
std::vector<int> outshape_ = top[i]->shape();
const int bias_number = this->conv_out_channels_;
const int outchannels_ = outshape_[1]; //
const int out_group = outchannels_ / group_; //
const int outheight_ = outshape_[2]; //
const int outwidth_ = outshape_[3]; //
// Bias gradient, if necessary.
if (this->bias_term_ && this->param_propagate_down_[1]){
Dtype* bias_diff = this->blobs_[1]->mutable_gpu_diff();
backward_bias_gpuwithgroup<Dtype> << <cuda_gridsize(bias_number), CAFFE_CUDA_NUM_THREADS >> >(batchSize, top_diff, outheight_,
outwidth_, outchannels_, bias_diff, bias_number);
}
if (this->param_propagate_down_[0] || propagate_down[i]){
const Dtype* bottom_data = bottom[i]->gpu_data();
Dtype* bottom_diff = bottom[i]->mutable_gpu_diff();
const int bottom_number = bottom[i]->count();
vector<int> in_shape_ = bottom[i]->shape();
const int in_channels_ = in_shape_[1]; //
const int in_group = in_channels_ / group_; //
const int in_height_ = in_shape_[2]; //
const int in_width_ = in_shape_[3]; //
if (this->param_propagate_down_[0]){
/*backward_weight_gpuwithgroup(const int num, const Dtype *inputdata, const int in_group, const int in_rows, const int in_cols, const int in_depth,
const Dtype *output_diff, const int out_group, const int out_rows, const int out_cols, const int out_depth,
const int stride, const int pad_rows, const int pad_cols,
const int weight_rows, const int weight_cols, const int weight_depth, Dtype *weight_diff, const int weight_Number)*/
backward_weight_gpuwithgroup<Dtype> << < cuda_gridsize(weight_num), CAFFE_CUDA_NUM_THREADS >> > (batchSize,
bottom_data, in_group, in_height_, in_width_, in_channels_,
top_diff, out_group, outheight_, outwidth_, outchannels_,
stride, pad_rows, pad_cols,
weight_rows, weight_cols, weight_depth, weight_diff, weight_num
);
}
/*backward_Input_gpuwithgroup(const Dtype *top_diff, const int top_rows, const int top_cols, const int top_depth, const int top_group,
const Dtype *weight, const int weight_rows, const int weight_cols, const int weight_depth,
const int stride, const int pad_rows, const int pad_cols,
const int bottom_rows, const int bottom_cols, const int bottom_depth, const int bottom_group, Dtype *bottom_diff, const int bottom_Number)*/
if (propagate_down[i]) {
backward_Input_gpuwithgroup<Dtype> << <cuda_gridsize(bottom_number), CAFFE_CUDA_NUM_THREADS >> >(top_diff, outheight_, outwidth_,
outchannels_, out_group, weight, weight_rows, weight_cols, weight_depth, stride, pad_rows, pad_rows,
in_height_, in_width_, in_channels_, in_group, bottom_diff, bottom_number);
}
}
}
}
//template <typename Dtype>
//void ZKConvolutionLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
// const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
// const Dtype* weight = this->blobs_[0]->gpu_data();
// Dtype* weight_diff = this->blobs_[0]->mutable_gpu_diff();
// for (int i = 0; i < top.size(); ++i) {
// const Dtype* top_diff = top[i]->gpu_diff();
// // Bias gradient, if necessary.
// if (this->bias_term_ && this->param_propagate_down_[1]) {
// Dtype* bias_diff = this->blobs_[1]->mutable_gpu_diff();
// for (int n = 0; n < this->num_; ++n) {
// this->backward_gpu_bias(bias_diff, top_diff + n * this->top_dim_);
// }
// }
// if (this->param_propagate_down_[0] || propagate_down[i]) {
// const Dtype* bottom_data = bottom[i]->gpu_data();
// Dtype* bottom_diff = bottom[i]->mutable_gpu_diff();
// for (int n = 0; n < this->num_; ++n) {
// // gradient w.r.t. weight. Note that we will accumulate diffs.
// if (this->param_propagate_down_[0]) {
// this->weight_gpu_gemm(bottom_data + n * this->bottom_dim_,
// top_diff + n * this->top_dim_, weight_diff);
// }
// // gradient w.r.t. bottom data, if necessary.
// if (propagate_down[i]) {
// this->backward_gpu_gemm(top_diff + n * this->top_dim_, weight,
// bottom_diff + n * this->bottom_dim_);
// }
// }
// }
// }
//}
INSTANTIATE_LAYER_GPU_FUNCS(ZKConvolutionLayer);
} | c2c9fcad08e7fa867fb952a992269d7466383eea.cu | #include <vector>
#include "caffe/layers/zk_conv.hpp"
#include "device_launch_parameters.h"
#include <math_functions.h> // CUDA's, not caffe's, for fabs, signbit
#include <thrust/device_vector.h>
#include <thrust/functional.h> // thrust::plus
#include <thrust/reduce.h>
namespace caffe {
template <typename Dtype>
__global__ void backward_bias_gpuwithgroup(const int Batchsize,
const Dtype *top_diff, const int top_rows, const int top_cols, const int top_depth,
Dtype *bias_diff,const int bias_Num){
int thread_id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (thread_id >= bias_Num) return;
const int top_diff_offset = thread_id*top_rows*top_cols;
Dtype gradient = 0;
for (int n = 0; n < Batchsize; n++) {
const int top_offset = n*top_depth*top_cols*top_rows;
for (int i = 0; i < top_rows*top_cols; i+=4){
gradient += top_diff[top_offset + top_diff_offset + i];
gradient += top_diff[top_offset + top_diff_offset + i+1];
gradient += top_diff[top_offset + top_diff_offset + i+2];
gradient += top_diff[top_offset + top_diff_offset + i+3];
}
}
bias_diff[thread_id] = gradient;
}
/*对bottom梯度的反向传播为top_diff与weight旋转180度后进行卷积
* 开多线程计算每一个bottom的梯度
*/
template <typename Dtype>
__global__ void backward_Input_gpuwithgroup(const Dtype *top_diff, const int top_rows, const int top_cols, const int top_depth, const int top_group,
const Dtype *weight, const int weight_rows, const int weight_cols, const int weight_depth,
const int stride, const int pad_rows, const int pad_cols,
const int bottom_rows, const int bottom_cols, const int bottom_depth, const int bottom_group, Dtype *bottom_diff, const int bottom_Number){
int thread_id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (thread_id >= bottom_Number) return;
const int B_col = thread_id%bottom_cols;
const int B_row = (thread_id / bottom_cols) % bottom_rows;
const int B_chanel = (thread_id / bottom_cols / bottom_rows) % bottom_depth;
const int B_batch = thread_id / bottom_cols / bottom_rows / bottom_depth;
/* bottom_group为输入每组所包含的通道数
*top_group为输出每组所包含的通道数
*计算方式:首先根据B_chanel计算出当前通道属于第几组Nth_group=B_chanel/bottom_group;
*根据Nth_group计算输入top_diff的偏置top_offset=Nth_group*top_group*top_cols*top_rows;
*权重weight偏置kernel_offset=OD*in_group*kernel_cols*kernel_rows;
*/
int Nth_group = B_chanel / bottom_group;
int Nth_chanel = B_chanel%bottom_group;
const Dtype *top_diff_slice = top_diff + B_batch*top_depth*top_rows*top_cols + Nth_group*top_group*top_cols*top_rows;
const int weight_rotate_offset = Nth_group*top_group*weight_depth*weight_cols*weight_rows + (Nth_chanel + 1)*weight_rows*weight_cols - 1;
int top_C_start = B_col + pad_cols - weight_cols + 1;
int top_C_end = top_C_start + weight_cols;
int top_R_start = B_row + pad_rows - weight_rows + 1;
int top_R_end = top_R_start + weight_rows;
Dtype gradient = 0;
for (int top_Cindex = 0; top_Cindex < top_group; top_Cindex++){
int top_diff_slice_offset = top_Cindex*top_rows*top_cols;
int weight_offset = top_Cindex*weight_depth*weight_cols*weight_rows+weight_rotate_offset;
for (int top_R = top_R_start; top_R < top_R_end; top_R++){
for (int top_C = top_C_start; top_C < top_C_end; top_C++){
if (top_R%stride == 0 && top_C%stride == 0){
int top_r = top_R / stride;
int top_c = top_C / stride;
if (top_r >= 0 && top_r < top_rows&&top_c >= 0 && top_c < top_cols){
int topoffset = top_r*top_cols + top_c;
//std::cout << "gradient=" << thread_id << " " << top_diff_slice_offset + topoffset << " * " << weight_offset << endl;
gradient += *(top_diff_slice + top_diff_slice_offset + topoffset)*(*(weight + weight_offset));
}
}
weight_offset--;
}
}
}
bottom_diff[thread_id]=gradient;
}
///对权重的梯度为 input 与 output的卷积
//计算方式为开多线程直接计算权重梯度的每一个值
/*group为任意数时*/
//num为batchsize;
template <typename Dtype>
__global__ void backward_weight_gpuwithgroup(const int num, const Dtype *inputdata, const int in_group, const int in_rows, const int in_cols, const int in_depth,
const Dtype *output_diff, const int out_group, const int out_rows, const int out_cols, const int out_depth,
const int stride, const int pad_rows, const int pad_cols,
const int weight_rows, const int weight_cols, const int weight_depth, Dtype *weight_diff, const int weight_Number){
int thread_id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (thread_id >= weight_Number) return;
///权重的排列方式为NxMxKxK; N为输出个数,M为输入的通道数,K为kernel的高度和宽度
const int K_col = thread_id % weight_cols; //Kernel的列
const int K_row = (thread_id / weight_cols) % weight_rows; //Kernel的行
const int K_chanel = (thread_id / weight_cols / weight_rows) % weight_depth; //Kernel的通道数即输入的通道数
const int K_N = thread_id / weight_cols / weight_rows / weight_depth; //Kernel的个数 即输出的通道数
/* in_group为输入每组所包含的通道数
*out_group为输出每组所包含的通道数
*计算方式:首先根据OD计算出当前通道属于第几组Nth_group=OD/out_group;
*根据Nth_group计算输入inputdata的偏置in_offset=Nth_group*in_group*in_cols*in_rows;
*权重weight偏置kernel_offset=OD*in_group*kernel_cols*kernel_rows;
*/
const int Nth_group = K_N / out_group;
const int input_offset = in_rows*in_cols*in_depth;
const int out_offset = out_rows*out_cols*out_depth;
//const int input_row_start = K_row*stride - pad_rows;
Dtype gradient = 0;
for (int n = 0; n < num; n++){
const Dtype* const inputdata_slice = inputdata + n*input_offset + K_chanel*in_cols*in_rows + Nth_group*in_group*in_cols*in_rows; //n为batsize中的第几个 输入偏置
const Dtype* const outputdata_slice = output_diff + n*out_offset + K_N*out_cols*out_rows; //输出偏置
for (int out_R = 0; out_R < out_rows; out_R++){
const int in_R = out_R*stride + K_row - pad_rows;
if (in_R >= 0 && in_R<in_rows){
for (int out_C = 0; out_C < out_cols; out_C++){
const int in_C = out_C*stride + K_col - pad_cols;
if (in_C >= 0 && in_C<in_cols){
int in_index = in_R*in_cols + in_C;
int out_index = out_R*out_cols + out_C;
gradient += inputdata_slice[in_index] * outputdata_slice[out_index];
}
}
}
}
}
weight_diff[thread_id] = gradient;
}
/*开多线程计算卷积,不通过im2col方式计算卷积,目前版本包含了group方式
*具体方式如下所示,需要输入的参数
* inputdata 输入数据
* in_group 输入中每组所包含的通道数
* in_rows 输入图像高度
* in_cols 输入图像宽度
* in_depth 输入图像总通道数
* weight 卷积权重
* weight_rows 卷积核的高度
* weight_cols 卷积核的宽度
* stride 步长
* pad_rows pad高
* pad_cols pad宽
* biasexist bias是否存在的标志
* biasdata bias数据
* out_group 输出图像中每组所包含的通道数
* out_rows 输出图像高度
* out_cols 输出图像宽度
* out_depth 输出图像总通道数
* outputdata 输出数据,
* out_Number 输出数据总长度
*/
template <typename Dtype>
__global__ void forward_convwithgroup(const Dtype *inputdata, const int in_group, const int in_rows, const int in_cols, const int in_depth,
const Dtype *weight, const int weight_rows, const int weight_cols, const int stride, const int pad_rows, const int pad_cols, bool biasexist, const Dtype *biasdata,
const int out_group, const int out_rows, const int out_cols, const int out_depth, Dtype *outputdata, const int out_Number){
int thread_id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (thread_id >= out_Number) return;
const int OC = thread_id % out_cols;//width
const int OR = (thread_id / out_cols) % out_rows;//height
const int OD = (thread_id / out_cols / out_rows) % out_depth;//channel
const int OB = thread_id / out_cols / out_rows / out_depth;//batch size
const int input_row_start = OR * stride - pad_rows;
const int input_col_start = OC * stride - pad_cols;
const int input_row_end = input_row_start + weight_rows;
const int input_col_end = input_col_start + weight_cols;
/* in_group为输入每组所包含的通道数
*out_group为输出每组所包含的通道数
*计算方式:首先根据OD计算出当前通道属于第几组Nth_group=OD/out_group;
*根据Nth_group计算输入inputdata的偏置in_offset=Nth_group*in_group*in_cols*in_rows;
*权重weight偏置kernel_offset=OD*in_group*kernel_cols*kernel_rows;
*/
int Nth_group = OD / out_group;
const int inputoffset = OB*in_depth*in_rows*in_cols + Nth_group*in_group*in_cols*in_rows; // +OD*in_cols*in_rows;// +input_row_start*in_cols + input_col_start;
const int kerneloffset = OD*in_group*weight_cols*weight_rows;
Dtype sum = 0.f;
//#pragma unroll
for (int W_C = 0; W_C < in_group; W_C++){
int InputOffset = inputoffset + W_C*in_rows*in_cols;
int KernelOffset = kerneloffset + W_C*weight_cols*weight_rows;
//#pragma unroll
for (int W_H = 0; W_H < weight_rows; W_H++){
int in_r = input_row_start + W_H;
//#pragma unroll
for (int W_W = 0; W_W < weight_cols; W_W++){
int in_c = input_col_start + W_W;
if (in_r >= 0 && in_r < in_rows && in_c >= 0 && in_c < in_cols) {
int input_offset = InputOffset + in_r*in_cols + in_c;
int weight_offset = KernelOffset + W_H*weight_cols + W_W;
sum += (*(inputdata + input_offset))*(*(weight + weight_offset));
}
}
}
}
if (biasexist) outputdata[thread_id] = sum + biasdata[OD];
else outputdata[thread_id] = sum;
}
// template <>
// void gpu_convwithgroup<float>(const float *inputdata, const int in_group, const int in_rows, const int in_cols, const int in_depth,
// const float *weight, const int weight_rows, const int weight_cols,
// const int stride, const int pad_rows, const int pad_cols,
// bool biasexist, const float *biasdata,
// const int out_group, const int out_rows, const int out_cols, const int out_depth, float *outputdata, const int out_Number){
// int thread_id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
// if (thread_id >= out_Number) return;
// const int OC = thread_id % out_cols;//width
// const int OR = (thread_id / out_cols) % out_rows;//height
// const int OD = (thread_id / out_cols / out_rows) % out_depth;//channel
// const int OB = thread_id / out_cols / out_rows / out_depth;//batch size
//
// const int input_row_start = OR * stride - pad_rows;
// const int input_col_start = OC * stride - pad_cols;
// const int input_row_end = input_row_start + weight_rows;
// const int input_col_end = input_col_start + weight_cols;
//
// /* in_group为输入每组所包含的通道数
// *out_group为输出每组所包含的通道数
// *计算方式:首先根据OD计算出当前通道属于第几组Nth_group=OD/out_group;
// *根据Nth_group计算输入inputdata的偏置in_offset=Nth_group*in_group*in_cols*in_rows;
// *权重weight偏置kernel_offset=OD*in_group*kernel_cols*kernel_rows;
// */
// int Nth_group = OD / out_group;
// const int inputoffset = OB*in_depth*in_rows*in_cols + Nth_group*in_group*in_cols*in_rows; // +OD*in_cols*in_rows;// +input_row_start*in_cols + input_col_start;
// const int kerneloffset = OD*in_group*weight_cols*weight_rows;
//
// float sum = 0.f;
////#pragma unroll
// for (int W_C = 0; W_C < in_group; W_C++){
// int InputOffset = inputoffset + W_C*in_rows*in_cols;
// int KernelOffset = kerneloffset + W_C*weight_cols*weight_rows;
////#pragma unroll
// for (int W_H = 0; W_H < weight_rows; W_H++){
//
// int in_r = input_row_start + W_H;
////#pragma unroll
// for (int W_W = 0; W_W < weight_cols; W_W++){
// int in_c = input_col_start + W_W;
// if (in_r >= 0 && in_r < in_rows && in_c >= 0 && in_c < in_cols) {
// int input_offset = InputOffset + in_r*in_cols + in_c;
// int weight_offset = KernelOffset + W_H*weight_cols + W_W;
// sum += (*(inputdata + input_offset))*(*(weight + weight_offset));
// }
//
// }
// }
// }
// if (biasexist) outputdata[thread_id] = sum + biasdata[OD];
// else outputdata[thread_id] = sum;
// }
// template <>
// void gpu_convwithgroup<double>(const double *inputdata, const int in_group, const int in_rows, const int in_cols, const int in_depth,
// const double *weight, const int weight_rows, const int weight_cols, const int stride, const int pad_rows, const int pad_cols, bool biasexist, const double *biasdata,
// const int out_group, const int out_rows, const int out_cols, const int out_depth, double *outputdata, const int out_Number){
// int thread_id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
// if (thread_id >= out_Number) return;
// const int OC = thread_id % out_cols;//width
// const int OR = (thread_id / out_cols) % out_rows;//height
// const int OD = (thread_id / out_cols / out_rows) % out_depth;//channel
// const int OB = thread_id / out_cols / out_rows / out_depth;//batch size
//
// const int input_row_start = OR * stride - pad_rows;
// const int input_col_start = OC * stride - pad_cols;
// const int input_row_end = input_row_start + weight_rows;
// const int input_col_end = input_col_start + weight_cols;
//
// /* in_group为输入每组所包含的通道数
// *out_group为输出每组所包含的通道数
// *计算方式:首先根据OD计算出当前通道属于第几组Nth_group=OD/out_group;
// *根据Nth_group计算输入inputdata的偏置in_offset=Nth_group*in_group*in_cols*in_rows;
// *权重weight偏置kernel_offset=OD*in_group*kernel_cols*kernel_rows;
// */
// int Nth_group = OD / out_group;
// const int inputoffset = OB*in_depth*in_rows*in_cols + Nth_group*in_group*in_cols*in_rows; // +OD*in_cols*in_rows;// +input_row_start*in_cols + input_col_start;
// const int kerneloffset = OD*in_group*weight_cols*weight_rows;
//
// double sum = 0.f;
////#pragma unroll
// for (int W_C = 0; W_C < in_group; W_C++){
// int InputOffset = inputoffset + W_C*in_rows*in_cols;
// int KernelOffset = kerneloffset + W_C*weight_cols*weight_rows;
////#pragma unroll
// for (int W_H = 0; W_H < weight_rows; W_H++){
//
// int in_r = input_row_start + W_H;
////#pragma unroll
// for (int W_W = 0; W_W < weight_cols; W_W++){
// int in_c = input_col_start + W_W;
// if (in_r >= 0 && in_r < in_rows && in_c >= 0 && in_c < in_cols) {
// int input_offset = InputOffset + in_r*in_cols + in_c;
// int weight_offset = KernelOffset + W_H*weight_cols + W_W;
// sum += (*(inputdata + input_offset))*(*(weight + weight_offset));
// }
//
// }
// }
// }
// if (biasexist) outputdata[thread_id] = sum + biasdata[OD];
// else outputdata[thread_id] = sum;
// };
template <typename Dtype>
void ZKConvolutionLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const int group = this->group_;
const Dtype* weight = this->blobs_[0]->gpu_data();
const int weight_rows = this->blobs_[0]->height();
const int weight_cols = this->blobs_[0]->width();
bool biasexist = this->bias_term_;
const Dtype *biasdata = NULL;
if (biasexist) biasdata = this->blobs_[1]->gpu_data();
int* stride_data = this->stride_.mutable_cpu_data();
const int stride = stride_data[0];
int *pad_data = this->pad_.mutable_cpu_data();
const int pad_rows = pad_data[0];
const int pad_cols = pad_data[1];
for (int i = 0; i < bottom.size(); ++i) {
const Dtype* bottom_data = bottom[i]->gpu_data(); //输入数据
vector<int> in_shape_ = bottom[i]->shape();
const int in_channels_ = in_shape_[1]; //输入数据的通道数
const int in_group = in_channels_ / group; //输入每组包含的通道数
const int in_height_ = in_shape_[2]; //输入数据的高度
const int in_width_ = in_shape_[3]; //输入数据的宽度
Dtype* top_data = top[i]->mutable_gpu_data(); // 输出数据
const int count = top[i]->count(); // 输出数据的总长度
vector<int> outshape_ = top[i]->shape();
const int outchannels_ = outshape_[1]; //输出数据的通道数
const int out_group = outchannels_ / group; //输出每组包含的通道数
const int outheight_ = outshape_[2]; //输出数据的高度
const int outwidth_ = outshape_[3]; //输出数据的宽度
forward_convwithgroup<Dtype> << <cuda_gridsize(count), CAFFE_CUDA_NUM_THREADS >> > (bottom_data, in_group, in_height_,
in_width_, in_channels_, weight, weight_rows, weight_cols, stride, pad_rows, pad_cols, biasexist, biasdata,
out_group, outheight_, outwidth_, outchannels_, top_data, count);
}
}
template <typename Dtype>
void ZKConvolutionLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
const int batchSize = this->num_;
const Dtype* weight = this->blobs_[0]->gpu_data();
Dtype* weight_diff = this->blobs_[0]->mutable_gpu_diff();
const int weight_num = this->blobs_[0]->count();
const int weight_rows = this->blobs_[0]->height();
const int weight_cols = this->blobs_[0]->width();
const int weight_depth = this->blobs_[0]->channels();
int* stride_data = this->stride_.mutable_cpu_data();
const int stride = stride_data[0];
int *pad_data = this->pad_.mutable_cpu_data();
const int pad_rows = pad_data[0];
const int pad_cols = pad_data[1];
/*const int top_rows, const int top_cols, const int top_depth,
Dtype *bias_diff, const int bias_Num*/
//Dtype* weight_diff = this->blobs_[0]->mutable_gpu_diff();
for (int i = 0; i < top.size(); ++i) {
const Dtype* top_diff = top[i]->gpu_diff();
std::vector<int> outshape_ = top[i]->shape();
const int bias_number = this->conv_out_channels_;
const int outchannels_ = outshape_[1]; //输出数据的通道数
const int out_group = outchannels_ / group_; //输出每组包含的通道数
const int outheight_ = outshape_[2]; //输出数据的高度
const int outwidth_ = outshape_[3]; //输出数据的宽度
// Bias gradient, if necessary.
if (this->bias_term_ && this->param_propagate_down_[1]){
Dtype* bias_diff = this->blobs_[1]->mutable_gpu_diff();
backward_bias_gpuwithgroup<Dtype> << <cuda_gridsize(bias_number), CAFFE_CUDA_NUM_THREADS >> >(batchSize, top_diff, outheight_,
outwidth_, outchannels_, bias_diff, bias_number);
}
if (this->param_propagate_down_[0] || propagate_down[i]){
const Dtype* bottom_data = bottom[i]->gpu_data();
Dtype* bottom_diff = bottom[i]->mutable_gpu_diff();
const int bottom_number = bottom[i]->count();
vector<int> in_shape_ = bottom[i]->shape();
const int in_channels_ = in_shape_[1]; //输入数据的通道数
const int in_group = in_channels_ / group_; //输入每组包含的通道数
const int in_height_ = in_shape_[2]; //输入数据的高度
const int in_width_ = in_shape_[3]; //输入数据的宽度
if (this->param_propagate_down_[0]){
/*backward_weight_gpuwithgroup(const int num, const Dtype *inputdata, const int in_group, const int in_rows, const int in_cols, const int in_depth,
const Dtype *output_diff, const int out_group, const int out_rows, const int out_cols, const int out_depth,
const int stride, const int pad_rows, const int pad_cols,
const int weight_rows, const int weight_cols, const int weight_depth, Dtype *weight_diff, const int weight_Number)*/
backward_weight_gpuwithgroup<Dtype> << < cuda_gridsize(weight_num), CAFFE_CUDA_NUM_THREADS >> > (batchSize,
bottom_data, in_group, in_height_, in_width_, in_channels_,
top_diff, out_group, outheight_, outwidth_, outchannels_,
stride, pad_rows, pad_cols,
weight_rows, weight_cols, weight_depth, weight_diff, weight_num
);
}
/*backward_Input_gpuwithgroup(const Dtype *top_diff, const int top_rows, const int top_cols, const int top_depth, const int top_group,
const Dtype *weight, const int weight_rows, const int weight_cols, const int weight_depth,
const int stride, const int pad_rows, const int pad_cols,
const int bottom_rows, const int bottom_cols, const int bottom_depth, const int bottom_group, Dtype *bottom_diff, const int bottom_Number)*/
if (propagate_down[i]) {
backward_Input_gpuwithgroup<Dtype> << <cuda_gridsize(bottom_number), CAFFE_CUDA_NUM_THREADS >> >(top_diff, outheight_, outwidth_,
outchannels_, out_group, weight, weight_rows, weight_cols, weight_depth, stride, pad_rows, pad_rows,
in_height_, in_width_, in_channels_, in_group, bottom_diff, bottom_number);
}
}
}
}
//template <typename Dtype>
//void ZKConvolutionLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
// const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
// const Dtype* weight = this->blobs_[0]->gpu_data();
// Dtype* weight_diff = this->blobs_[0]->mutable_gpu_diff();
// for (int i = 0; i < top.size(); ++i) {
// const Dtype* top_diff = top[i]->gpu_diff();
// // Bias gradient, if necessary.
// if (this->bias_term_ && this->param_propagate_down_[1]) {
// Dtype* bias_diff = this->blobs_[1]->mutable_gpu_diff();
// for (int n = 0; n < this->num_; ++n) {
// this->backward_gpu_bias(bias_diff, top_diff + n * this->top_dim_);
// }
// }
// if (this->param_propagate_down_[0] || propagate_down[i]) {
// const Dtype* bottom_data = bottom[i]->gpu_data();
// Dtype* bottom_diff = bottom[i]->mutable_gpu_diff();
// for (int n = 0; n < this->num_; ++n) {
// // gradient w.r.t. weight. Note that we will accumulate diffs.
// if (this->param_propagate_down_[0]) {
// this->weight_gpu_gemm(bottom_data + n * this->bottom_dim_,
// top_diff + n * this->top_dim_, weight_diff);
// }
// // gradient w.r.t. bottom data, if necessary.
// if (propagate_down[i]) {
// this->backward_gpu_gemm(top_diff + n * this->top_dim_, weight,
// bottom_diff + n * this->bottom_dim_);
// }
// }
// }
// }
//}
INSTANTIATE_LAYER_GPU_FUNCS(ZKConvolutionLayer);
} |
62e0e5d73baeb52ea8fa425b8429e2705808936f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2019
@precisions normal z -> s d c
@author Stan Tomov
@author Mark Gates
*/
#include "magma_internal.h"
#define NB 16
/******************************************************************************/
// grid is (n/nb) x ((n/nb)/2 + 1), where n/nb is odd.
// lower indicates blocks in lower triangle of grid, including diagonal.
// lower blocks cover left side of matrix, including diagonal.
// upper blocks swap block indices (x,y) and shift by grid width (or width-1)
// to cover right side of matrix.
// [ A00 A01 A02 ] [ A00 . . | . . ]
// [ A10 A11 A12 ] [ A10 A11 . | . . ]
// grid [ A20 A21 A22 ] covers matrix as [ A20 A21 A22 | . . ]
// [ A30 A31 A32 ] [ A30 A31 A32 | A01 . ]
// [ A40 A41 A42 ] [ A40 A41 A42 | A02 A12 ]
//
// See ztranspose_inplace_even for description of threads.
__global__ void ztranspose_inplace_odd(
int n,
magmaDoubleComplex *matrix, int lda )
{
__shared__ magmaDoubleComplex sA[ NB ][ NB+1 ];
__shared__ magmaDoubleComplex sB[ NB ][ NB+1 ];
int i = threadIdx.x;
int j = threadIdx.y;
bool lower = (blockIdx.x >= blockIdx.y);
int ii = (lower ? blockIdx.x : (blockIdx.y + gridDim.y - 1));
int jj = (lower ? blockIdx.y : (blockIdx.x + gridDim.y ));
ii *= NB;
jj *= NB;
magmaDoubleComplex *A = matrix + ii+i + (jj+j)*lda;
if ( ii == jj ) {
if ( ii+i < n && jj+j < n ) {
sA[j][i] = *A;
}
__syncthreads();
if ( ii+i < n && jj+j < n ) {
*A = sA[i][j];
}
}
else {
magmaDoubleComplex *B = matrix + jj+i + (ii+j)*lda;
if ( ii+i < n && jj+j < n ) {
sA[j][i] = *A;
}
if ( jj+i < n && ii+j < n ) {
sB[j][i] = *B;
}
__syncthreads();
if ( ii+i < n && jj+j < n ) {
*A = sB[i][j];
}
if ( jj+i < n && ii+j < n ) {
*B = sA[i][j];
}
}
}
/******************************************************************************/
// grid is ((n/nb) + 1) x (n/nb)/2, where n/nb is even.
// lower indicates blocks in strictly lower triangle of grid, excluding diagonal.
// lower blocks shift up by one to cover left side of matrix including diagonal.
// upper blocks swap block indices (x,y) and shift by grid width
// to cover right side of matrix.
// [ A00 A01 ] [ A10 . | . . ]
// [ A10 A11 ] [ A20 A21 | . . ]
// grid [ A20 A21 ] covers matrix as [ A30 A31 | A00 . ]
// [ A30 A31 ] [ A40 A41 | A01 A11 ]
// [ A40 A41 ]
//
// Each block is NB x NB threads.
// For non-diagonal block A, block B is symmetric block.
// Thread (i,j) loads A(i,j) into sA(j,i) and B(i,j) into sB(j,i), i.e., transposed,
// syncs, then saves sA(i,j) to B(i,j) and sB(i,j) to A(i,j).
// Threads outside the matrix do not touch memory.
__global__ void ztranspose_inplace_even(
int n,
magmaDoubleComplex *matrix, int lda )
{
__shared__ magmaDoubleComplex sA[ NB ][ NB+1 ];
__shared__ magmaDoubleComplex sB[ NB ][ NB+1 ];
int i = threadIdx.x;
int j = threadIdx.y;
bool lower = (blockIdx.x > blockIdx.y);
int ii = (lower ? (blockIdx.x - 1) : (blockIdx.y + gridDim.y));
int jj = (lower ? (blockIdx.y ) : (blockIdx.x + gridDim.y));
ii *= NB;
jj *= NB;
magmaDoubleComplex *A = matrix + ii+i + (jj+j)*lda;
if ( ii == jj ) {
if ( ii+i < n && jj+j < n ) {
sA[j][i] = *A;
}
__syncthreads();
if ( ii+i < n && jj+j < n ) {
*A = sA[i][j];
}
}
else {
magmaDoubleComplex *B = matrix + jj+i + (ii+j)*lda;
if ( ii+i < n && jj+j < n ) {
sA[j][i] = *A;
}
if ( jj+i < n && ii+j < n ) {
sB[j][i] = *B;
}
__syncthreads();
if ( ii+i < n && jj+j < n ) {
*A = sB[i][j];
}
if ( jj+i < n && ii+j < n ) {
*B = sA[i][j];
}
}
}
/***************************************************************************//**
Purpose
-------
ztranspose_inplace_q transposes a square N-by-N matrix in-place.
Same as ztranspose_inplace, but adds queue argument.
Arguments
---------
@param[in]
n INTEGER
The number of rows & columns of the matrix dA. N >= 0.
@param[in]
dA COMPLEX_16 array, dimension (LDDA,N)
The N-by-N matrix dA.
On exit, dA(j,i) = dA_original(i,j), for 0 <= i,j < N.
@param[in]
ldda INTEGER
The leading dimension of the array dA. LDDA >= N.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_transpose
*******************************************************************************/
extern "C" void
magmablas_ztranspose_inplace(
magma_int_t n,
magmaDoubleComplex_ptr dA, magma_int_t ldda,
magma_queue_t queue )
{
magma_int_t info = 0;
if ( n < 0 )
info = -1;
else if ( ldda < n )
info = -3;
if ( info != 0 ) {
magma_xerbla( __func__, -(info) );
return; //info;
}
dim3 threads( NB, NB );
int nblock = magma_ceildiv( n, NB );
// need 1/2 * (nblock+1) * nblock to cover lower triangle and diagonal of matrix.
// block assignment differs depending on whether nblock is odd or even.
if ( nblock % 2 == 1 ) {
dim3 grid( nblock, (nblock+1)/2 );
hipLaunchKernelGGL(( ztranspose_inplace_odd), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, dA, ldda );
}
else {
dim3 grid( nblock+1, nblock/2 );
hipLaunchKernelGGL(( ztranspose_inplace_even), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, dA, ldda );
}
}
| 62e0e5d73baeb52ea8fa425b8429e2705808936f.cu | /*
-- MAGMA (version 2.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2019
@precisions normal z -> s d c
@author Stan Tomov
@author Mark Gates
*/
#include "magma_internal.h"
#define NB 16
/******************************************************************************/
// grid is (n/nb) x ((n/nb)/2 + 1), where n/nb is odd.
// lower indicates blocks in lower triangle of grid, including diagonal.
// lower blocks cover left side of matrix, including diagonal.
// upper blocks swap block indices (x,y) and shift by grid width (or width-1)
// to cover right side of matrix.
// [ A00 A01 A02 ] [ A00 . . | . . ]
// [ A10 A11 A12 ] [ A10 A11 . | . . ]
// grid [ A20 A21 A22 ] covers matrix as [ A20 A21 A22 | . . ]
// [ A30 A31 A32 ] [ A30 A31 A32 | A01 . ]
// [ A40 A41 A42 ] [ A40 A41 A42 | A02 A12 ]
//
// See ztranspose_inplace_even for description of threads.
__global__ void ztranspose_inplace_odd(
int n,
magmaDoubleComplex *matrix, int lda )
{
__shared__ magmaDoubleComplex sA[ NB ][ NB+1 ];
__shared__ magmaDoubleComplex sB[ NB ][ NB+1 ];
int i = threadIdx.x;
int j = threadIdx.y;
bool lower = (blockIdx.x >= blockIdx.y);
int ii = (lower ? blockIdx.x : (blockIdx.y + gridDim.y - 1));
int jj = (lower ? blockIdx.y : (blockIdx.x + gridDim.y ));
ii *= NB;
jj *= NB;
magmaDoubleComplex *A = matrix + ii+i + (jj+j)*lda;
if ( ii == jj ) {
if ( ii+i < n && jj+j < n ) {
sA[j][i] = *A;
}
__syncthreads();
if ( ii+i < n && jj+j < n ) {
*A = sA[i][j];
}
}
else {
magmaDoubleComplex *B = matrix + jj+i + (ii+j)*lda;
if ( ii+i < n && jj+j < n ) {
sA[j][i] = *A;
}
if ( jj+i < n && ii+j < n ) {
sB[j][i] = *B;
}
__syncthreads();
if ( ii+i < n && jj+j < n ) {
*A = sB[i][j];
}
if ( jj+i < n && ii+j < n ) {
*B = sA[i][j];
}
}
}
/******************************************************************************/
// grid is ((n/nb) + 1) x (n/nb)/2, where n/nb is even.
// lower indicates blocks in strictly lower triangle of grid, excluding diagonal.
// lower blocks shift up by one to cover left side of matrix including diagonal.
// upper blocks swap block indices (x,y) and shift by grid width
// to cover right side of matrix.
// [ A00 A01 ] [ A10 . | . . ]
// [ A10 A11 ] [ A20 A21 | . . ]
// grid [ A20 A21 ] covers matrix as [ A30 A31 | A00 . ]
// [ A30 A31 ] [ A40 A41 | A01 A11 ]
// [ A40 A41 ]
//
// Each block is NB x NB threads.
// For non-diagonal block A, block B is symmetric block.
// Thread (i,j) loads A(i,j) into sA(j,i) and B(i,j) into sB(j,i), i.e., transposed,
// syncs, then saves sA(i,j) to B(i,j) and sB(i,j) to A(i,j).
// Threads outside the matrix do not touch memory.
__global__ void ztranspose_inplace_even(
int n,
magmaDoubleComplex *matrix, int lda )
{
__shared__ magmaDoubleComplex sA[ NB ][ NB+1 ];
__shared__ magmaDoubleComplex sB[ NB ][ NB+1 ];
int i = threadIdx.x;
int j = threadIdx.y;
bool lower = (blockIdx.x > blockIdx.y);
int ii = (lower ? (blockIdx.x - 1) : (blockIdx.y + gridDim.y));
int jj = (lower ? (blockIdx.y ) : (blockIdx.x + gridDim.y));
ii *= NB;
jj *= NB;
magmaDoubleComplex *A = matrix + ii+i + (jj+j)*lda;
if ( ii == jj ) {
if ( ii+i < n && jj+j < n ) {
sA[j][i] = *A;
}
__syncthreads();
if ( ii+i < n && jj+j < n ) {
*A = sA[i][j];
}
}
else {
magmaDoubleComplex *B = matrix + jj+i + (ii+j)*lda;
if ( ii+i < n && jj+j < n ) {
sA[j][i] = *A;
}
if ( jj+i < n && ii+j < n ) {
sB[j][i] = *B;
}
__syncthreads();
if ( ii+i < n && jj+j < n ) {
*A = sB[i][j];
}
if ( jj+i < n && ii+j < n ) {
*B = sA[i][j];
}
}
}
/***************************************************************************//**
Purpose
-------
ztranspose_inplace_q transposes a square N-by-N matrix in-place.
Same as ztranspose_inplace, but adds queue argument.
Arguments
---------
@param[in]
n INTEGER
The number of rows & columns of the matrix dA. N >= 0.
@param[in]
dA COMPLEX_16 array, dimension (LDDA,N)
The N-by-N matrix dA.
On exit, dA(j,i) = dA_original(i,j), for 0 <= i,j < N.
@param[in]
ldda INTEGER
The leading dimension of the array dA. LDDA >= N.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_transpose
*******************************************************************************/
extern "C" void
magmablas_ztranspose_inplace(
magma_int_t n,
magmaDoubleComplex_ptr dA, magma_int_t ldda,
magma_queue_t queue )
{
magma_int_t info = 0;
if ( n < 0 )
info = -1;
else if ( ldda < n )
info = -3;
if ( info != 0 ) {
magma_xerbla( __func__, -(info) );
return; //info;
}
dim3 threads( NB, NB );
int nblock = magma_ceildiv( n, NB );
// need 1/2 * (nblock+1) * nblock to cover lower triangle and diagonal of matrix.
// block assignment differs depending on whether nblock is odd or even.
if ( nblock % 2 == 1 ) {
dim3 grid( nblock, (nblock+1)/2 );
ztranspose_inplace_odd<<< grid, threads, 0, queue->cuda_stream() >>>( n, dA, ldda );
}
else {
dim3 grid( nblock+1, nblock/2 );
ztranspose_inplace_even<<< grid, threads, 0, queue->cuda_stream() >>>( n, dA, ldda );
}
}
|
010d6f07c4e56636a9f1360f63d36cb56c9e9aad.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/******************************************************************************
* Copyright 2020 The Apollo Authors. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*****************************************************************************/
/*
* Copyright 2018-2019 Autoware Foundation. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// headers in STL
#include <iostream>
// headers in local files
#include "modules/perception/lidar/lib/detection/lidar_point_pillars/common.h"
#include "modules/perception/lidar/lib/detection/lidar_point_pillars/preprocess_points_cuda.h"
namespace apollo {
namespace perception {
namespace lidar {
__global__ void make_pillar_histo_kernel(
const float* dev_points, float* dev_pillar_point_feature_in_coors,
int* pillar_count_histo, const int num_points,
const int max_points_per_pillar, const int grid_x_size,
const int grid_y_size, const int grid_z_size, const float min_x_range,
const float min_y_range, const float min_z_range, const float pillar_x_size,
const float pillar_y_size, const float pillar_z_size,
const int num_box_corners) {
int th_i = threadIdx.x + blockIdx.x * blockDim.x;
if (th_i >= num_points) {
return;
}
int y_coor = floor((dev_points[th_i * num_box_corners + 1] - min_y_range) /
pillar_y_size);
int x_coor = floor((dev_points[th_i * num_box_corners + 0] - min_x_range) /
pillar_x_size);
int z_coor = floor((dev_points[th_i * num_box_corners + 2] - min_z_range) /
pillar_z_size);
if (x_coor >= 0 && x_coor < grid_x_size && y_coor >= 0 &&
y_coor < grid_y_size && z_coor >= 0 && z_coor < grid_z_size) {
int count =
atomicAdd(&pillar_count_histo[y_coor * grid_x_size + x_coor], 1);
if (count < max_points_per_pillar) {
int ind = y_coor * grid_x_size * max_points_per_pillar * num_box_corners +
x_coor * max_points_per_pillar * num_box_corners +
count * num_box_corners;
for (int i = 0; i < num_box_corners; ++i) {
dev_pillar_point_feature_in_coors[ind + i] =
dev_points[th_i * num_box_corners + i];
}
}
}
}
__global__ void make_pillar_index_kernel(
int* dev_pillar_count_histo, int* dev_counter, int* dev_pillar_count,
int* dev_x_coors, int* dev_y_coors, float* dev_x_coors_for_sub,
float* dev_y_coors_for_sub, float* dev_num_points_per_pillar,
int* dev_sparse_pillar_map, const int max_pillars,
const int max_points_per_pillar, const int grid_x_size,
const float pillar_x_size, const float pillar_y_size,
const int num_inds_for_scan) {
int x = blockIdx.x;
int y = threadIdx.x;
int num_points_at_this_pillar = dev_pillar_count_histo[y * grid_x_size + x];
if (num_points_at_this_pillar == 0) {
return;
}
int count = atomicAdd(dev_counter, 1);
if (count < max_pillars) {
atomicAdd(dev_pillar_count, 1);
if (num_points_at_this_pillar >= max_points_per_pillar) {
dev_num_points_per_pillar[count] = max_points_per_pillar;
} else {
dev_num_points_per_pillar[count] = num_points_at_this_pillar;
}
dev_x_coors[count] = x;
dev_y_coors[count] = y;
// TODO(...): Need to be modified after making properly trained weight
// Will be modified in ver 1.1
// x_offset = self.vx / 2 + pc_range[0]
// y_offset = self.vy / 2 + pc_range[1]
// x_sub = coors_x.unsqueeze(1) * 0.16 + x_offset
// y_sub = coors_y.unsqueeze(1) * 0.16 + y_offset
// TODO(chenjiahao): offsets need to be fetched from function's
// input params and z need to be calculated after voxels are utilized
dev_x_coors_for_sub[count] = x * pillar_x_size + 0.1f;
dev_y_coors_for_sub[count] = y * pillar_y_size + -39.9f;
dev_sparse_pillar_map[y * num_inds_for_scan + x] = 1;
}
}
__global__ void make_pillar_feature_kernel(
float* dev_pillar_point_feature_in_coors, float* dev_pillar_point_feature,
int* dev_x_coors, int* dev_y_coors, float* dev_num_points_per_pillar,
const int max_points, const int num_point_feature, const int grid_x_size) {
int ith_pillar = blockIdx.x;
int num_points_at_this_pillar = dev_num_points_per_pillar[ith_pillar];
int ith_point = threadIdx.x;
if (ith_point >= num_points_at_this_pillar) {
return;
}
int x_ind = dev_x_coors[ith_pillar];
int y_ind = dev_y_coors[ith_pillar];
int pillar_ind = ith_pillar * max_points * num_point_feature +
ith_point * num_point_feature;
int coors_ind = y_ind * grid_x_size * max_points * num_point_feature +
x_ind * max_points * num_point_feature +
ith_point * num_point_feature;
for (int i = 0; i < num_point_feature; ++i) {
dev_pillar_point_feature[pillar_ind + i] =
dev_pillar_point_feature_in_coors[coors_ind + i];
}
}
__global__ void make_extra_network_input_kernel(
float* dev_x_coors_for_sub, float* dev_y_coors_for_sub,
float* dev_num_points_per_pillar, float* dev_pillar_coors,
const int max_num_points_per_pillar) {
int ith_pillar = blockIdx.x;
int ith_point = threadIdx.x;
float x = dev_x_coors_for_sub[ith_pillar];
float y = dev_y_coors_for_sub[ith_pillar];
// TODO(chenjiahao): replace '4' with hyper-parameter vars
int ind = ith_pillar * max_num_points_per_pillar * 4 + ith_point * 4;
// TODO(chenjiahao): batch idx and z need to be specified after
// voxels are utilized
dev_pillar_coors[ind + 0] = 0; // batch idx
dev_pillar_coors[ind + 1] = 0; // z
dev_pillar_coors[ind + 2] = y;
dev_pillar_coors[ind + 3] = x;
}
PreprocessPointsCuda::PreprocessPointsCuda(
const int num_threads, const int max_num_pillars,
const int max_points_per_pillar, const int num_point_feature,
const int num_inds_for_scan, const int grid_x_size, const int grid_y_size,
const int grid_z_size, const float pillar_x_size, const float pillar_y_size,
const float pillar_z_size, const float min_x_range, const float min_y_range,
const float min_z_range, const int num_box_corners)
: num_threads_(num_threads),
max_num_pillars_(max_num_pillars),
max_num_points_per_pillar_(max_points_per_pillar),
num_point_feature_(num_point_feature),
num_inds_for_scan_(num_inds_for_scan),
grid_x_size_(grid_x_size),
grid_y_size_(grid_y_size),
grid_z_size_(grid_z_size),
pillar_x_size_(pillar_x_size),
pillar_y_size_(pillar_y_size),
pillar_z_size_(pillar_z_size),
min_x_range_(min_x_range),
min_y_range_(min_y_range),
min_z_range_(min_z_range),
num_box_corners_(num_box_corners) {
GPU_CHECK(
hipMalloc(reinterpret_cast<void**>(&dev_pillar_point_feature_in_coors_),
grid_y_size_ * grid_x_size_ * max_num_points_per_pillar_ *
num_point_feature_ * sizeof(float)));
GPU_CHECK(hipMalloc(reinterpret_cast<void**>(&dev_pillar_count_histo_),
grid_y_size_ * grid_x_size_ * sizeof(int)));
GPU_CHECK(hipMalloc(reinterpret_cast<void**>(&dev_counter_), sizeof(int)));
GPU_CHECK(
hipMalloc(reinterpret_cast<void**>(&dev_pillar_count_), sizeof(int)));
GPU_CHECK(hipMalloc(reinterpret_cast<void**>(&dev_x_coors_for_sub_),
max_num_pillars_ * sizeof(float)));
GPU_CHECK(hipMalloc(reinterpret_cast<void**>(&dev_y_coors_for_sub_),
max_num_pillars_ * sizeof(float)));
}
PreprocessPointsCuda::~PreprocessPointsCuda() {
GPU_CHECK(hipFree(dev_pillar_point_feature_in_coors_));
GPU_CHECK(hipFree(dev_pillar_count_histo_));
GPU_CHECK(hipFree(dev_counter_));
GPU_CHECK(hipFree(dev_pillar_count_));
GPU_CHECK(hipFree(dev_x_coors_for_sub_));
GPU_CHECK(hipFree(dev_y_coors_for_sub_));
}
void PreprocessPointsCuda::DoPreprocessPointsCuda(
const float* dev_points, const int in_num_points, int* dev_x_coors,
int* dev_y_coors, float* dev_num_points_per_pillar,
float* dev_pillar_point_feature, float* dev_pillar_coors,
int* dev_sparse_pillar_map, int* host_pillar_count) {
GPU_CHECK(hipMemset(dev_pillar_count_histo_, 0,
grid_y_size_ * grid_x_size_ * sizeof(int)));
GPU_CHECK(hipMemset(dev_counter_, 0, sizeof(int)));
GPU_CHECK(hipMemset(dev_pillar_count_, 0, sizeof(int)));
int num_block = DIVUP(in_num_points, num_threads_);
hipLaunchKernelGGL(( make_pillar_histo_kernel), dim3(num_block), dim3(num_threads_), 0, 0,
dev_points, dev_pillar_point_feature_in_coors_, dev_pillar_count_histo_,
in_num_points, max_num_points_per_pillar_, grid_x_size_, grid_y_size_,
grid_z_size_, min_x_range_, min_y_range_, min_z_range_, pillar_x_size_,
pillar_y_size_, pillar_z_size_, num_box_corners_);
hipLaunchKernelGGL(( make_pillar_index_kernel), dim3(grid_x_size_), dim3(grid_y_size_), 0, 0,
dev_pillar_count_histo_, dev_counter_, dev_pillar_count_, dev_x_coors,
dev_y_coors, dev_x_coors_for_sub_, dev_y_coors_for_sub_,
dev_num_points_per_pillar, dev_sparse_pillar_map, max_num_pillars_,
max_num_points_per_pillar_, grid_x_size_, pillar_x_size_, pillar_y_size_,
num_inds_for_scan_);
GPU_CHECK(hipMemcpy(host_pillar_count, dev_pillar_count_, sizeof(int),
hipMemcpyDeviceToHost));
hipLaunchKernelGGL(( make_pillar_feature_kernel), dim3(host_pillar_count[0]),
dim3(max_num_points_per_pillar_), 0, 0,
dev_pillar_point_feature_in_coors_, dev_pillar_point_feature, dev_x_coors,
dev_y_coors, dev_num_points_per_pillar, max_num_points_per_pillar_,
num_point_feature_, grid_x_size_);
hipLaunchKernelGGL(( make_extra_network_input_kernel), dim3(max_num_pillars_),
dim3(max_num_points_per_pillar_), 0, 0,
dev_x_coors_for_sub_, dev_y_coors_for_sub_, dev_num_points_per_pillar,
dev_pillar_coors, max_num_points_per_pillar_);
}
} // namespace lidar
} // namespace perception
} // namespace apollo
| 010d6f07c4e56636a9f1360f63d36cb56c9e9aad.cu | /******************************************************************************
* Copyright 2020 The Apollo Authors. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*****************************************************************************/
/*
* Copyright 2018-2019 Autoware Foundation. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// headers in STL
#include <iostream>
// headers in local files
#include "modules/perception/lidar/lib/detection/lidar_point_pillars/common.h"
#include "modules/perception/lidar/lib/detection/lidar_point_pillars/preprocess_points_cuda.h"
namespace apollo {
namespace perception {
namespace lidar {
__global__ void make_pillar_histo_kernel(
const float* dev_points, float* dev_pillar_point_feature_in_coors,
int* pillar_count_histo, const int num_points,
const int max_points_per_pillar, const int grid_x_size,
const int grid_y_size, const int grid_z_size, const float min_x_range,
const float min_y_range, const float min_z_range, const float pillar_x_size,
const float pillar_y_size, const float pillar_z_size,
const int num_box_corners) {
int th_i = threadIdx.x + blockIdx.x * blockDim.x;
if (th_i >= num_points) {
return;
}
int y_coor = floor((dev_points[th_i * num_box_corners + 1] - min_y_range) /
pillar_y_size);
int x_coor = floor((dev_points[th_i * num_box_corners + 0] - min_x_range) /
pillar_x_size);
int z_coor = floor((dev_points[th_i * num_box_corners + 2] - min_z_range) /
pillar_z_size);
if (x_coor >= 0 && x_coor < grid_x_size && y_coor >= 0 &&
y_coor < grid_y_size && z_coor >= 0 && z_coor < grid_z_size) {
int count =
atomicAdd(&pillar_count_histo[y_coor * grid_x_size + x_coor], 1);
if (count < max_points_per_pillar) {
int ind = y_coor * grid_x_size * max_points_per_pillar * num_box_corners +
x_coor * max_points_per_pillar * num_box_corners +
count * num_box_corners;
for (int i = 0; i < num_box_corners; ++i) {
dev_pillar_point_feature_in_coors[ind + i] =
dev_points[th_i * num_box_corners + i];
}
}
}
}
__global__ void make_pillar_index_kernel(
int* dev_pillar_count_histo, int* dev_counter, int* dev_pillar_count,
int* dev_x_coors, int* dev_y_coors, float* dev_x_coors_for_sub,
float* dev_y_coors_for_sub, float* dev_num_points_per_pillar,
int* dev_sparse_pillar_map, const int max_pillars,
const int max_points_per_pillar, const int grid_x_size,
const float pillar_x_size, const float pillar_y_size,
const int num_inds_for_scan) {
int x = blockIdx.x;
int y = threadIdx.x;
int num_points_at_this_pillar = dev_pillar_count_histo[y * grid_x_size + x];
if (num_points_at_this_pillar == 0) {
return;
}
int count = atomicAdd(dev_counter, 1);
if (count < max_pillars) {
atomicAdd(dev_pillar_count, 1);
if (num_points_at_this_pillar >= max_points_per_pillar) {
dev_num_points_per_pillar[count] = max_points_per_pillar;
} else {
dev_num_points_per_pillar[count] = num_points_at_this_pillar;
}
dev_x_coors[count] = x;
dev_y_coors[count] = y;
// TODO(...): Need to be modified after making properly trained weight
// Will be modified in ver 1.1
// x_offset = self.vx / 2 + pc_range[0]
// y_offset = self.vy / 2 + pc_range[1]
// x_sub = coors_x.unsqueeze(1) * 0.16 + x_offset
// y_sub = coors_y.unsqueeze(1) * 0.16 + y_offset
// TODO(chenjiahao): offsets need to be fetched from function's
// input params and z need to be calculated after voxels are utilized
dev_x_coors_for_sub[count] = x * pillar_x_size + 0.1f;
dev_y_coors_for_sub[count] = y * pillar_y_size + -39.9f;
dev_sparse_pillar_map[y * num_inds_for_scan + x] = 1;
}
}
__global__ void make_pillar_feature_kernel(
float* dev_pillar_point_feature_in_coors, float* dev_pillar_point_feature,
int* dev_x_coors, int* dev_y_coors, float* dev_num_points_per_pillar,
const int max_points, const int num_point_feature, const int grid_x_size) {
int ith_pillar = blockIdx.x;
int num_points_at_this_pillar = dev_num_points_per_pillar[ith_pillar];
int ith_point = threadIdx.x;
if (ith_point >= num_points_at_this_pillar) {
return;
}
int x_ind = dev_x_coors[ith_pillar];
int y_ind = dev_y_coors[ith_pillar];
int pillar_ind = ith_pillar * max_points * num_point_feature +
ith_point * num_point_feature;
int coors_ind = y_ind * grid_x_size * max_points * num_point_feature +
x_ind * max_points * num_point_feature +
ith_point * num_point_feature;
for (int i = 0; i < num_point_feature; ++i) {
dev_pillar_point_feature[pillar_ind + i] =
dev_pillar_point_feature_in_coors[coors_ind + i];
}
}
__global__ void make_extra_network_input_kernel(
float* dev_x_coors_for_sub, float* dev_y_coors_for_sub,
float* dev_num_points_per_pillar, float* dev_pillar_coors,
const int max_num_points_per_pillar) {
int ith_pillar = blockIdx.x;
int ith_point = threadIdx.x;
float x = dev_x_coors_for_sub[ith_pillar];
float y = dev_y_coors_for_sub[ith_pillar];
// TODO(chenjiahao): replace '4' with hyper-parameter vars
int ind = ith_pillar * max_num_points_per_pillar * 4 + ith_point * 4;
// TODO(chenjiahao): batch idx and z need to be specified after
// voxels are utilized
dev_pillar_coors[ind + 0] = 0; // batch idx
dev_pillar_coors[ind + 1] = 0; // z
dev_pillar_coors[ind + 2] = y;
dev_pillar_coors[ind + 3] = x;
}
PreprocessPointsCuda::PreprocessPointsCuda(
const int num_threads, const int max_num_pillars,
const int max_points_per_pillar, const int num_point_feature,
const int num_inds_for_scan, const int grid_x_size, const int grid_y_size,
const int grid_z_size, const float pillar_x_size, const float pillar_y_size,
const float pillar_z_size, const float min_x_range, const float min_y_range,
const float min_z_range, const int num_box_corners)
: num_threads_(num_threads),
max_num_pillars_(max_num_pillars),
max_num_points_per_pillar_(max_points_per_pillar),
num_point_feature_(num_point_feature),
num_inds_for_scan_(num_inds_for_scan),
grid_x_size_(grid_x_size),
grid_y_size_(grid_y_size),
grid_z_size_(grid_z_size),
pillar_x_size_(pillar_x_size),
pillar_y_size_(pillar_y_size),
pillar_z_size_(pillar_z_size),
min_x_range_(min_x_range),
min_y_range_(min_y_range),
min_z_range_(min_z_range),
num_box_corners_(num_box_corners) {
GPU_CHECK(
cudaMalloc(reinterpret_cast<void**>(&dev_pillar_point_feature_in_coors_),
grid_y_size_ * grid_x_size_ * max_num_points_per_pillar_ *
num_point_feature_ * sizeof(float)));
GPU_CHECK(cudaMalloc(reinterpret_cast<void**>(&dev_pillar_count_histo_),
grid_y_size_ * grid_x_size_ * sizeof(int)));
GPU_CHECK(cudaMalloc(reinterpret_cast<void**>(&dev_counter_), sizeof(int)));
GPU_CHECK(
cudaMalloc(reinterpret_cast<void**>(&dev_pillar_count_), sizeof(int)));
GPU_CHECK(cudaMalloc(reinterpret_cast<void**>(&dev_x_coors_for_sub_),
max_num_pillars_ * sizeof(float)));
GPU_CHECK(cudaMalloc(reinterpret_cast<void**>(&dev_y_coors_for_sub_),
max_num_pillars_ * sizeof(float)));
}
PreprocessPointsCuda::~PreprocessPointsCuda() {
GPU_CHECK(cudaFree(dev_pillar_point_feature_in_coors_));
GPU_CHECK(cudaFree(dev_pillar_count_histo_));
GPU_CHECK(cudaFree(dev_counter_));
GPU_CHECK(cudaFree(dev_pillar_count_));
GPU_CHECK(cudaFree(dev_x_coors_for_sub_));
GPU_CHECK(cudaFree(dev_y_coors_for_sub_));
}
void PreprocessPointsCuda::DoPreprocessPointsCuda(
const float* dev_points, const int in_num_points, int* dev_x_coors,
int* dev_y_coors, float* dev_num_points_per_pillar,
float* dev_pillar_point_feature, float* dev_pillar_coors,
int* dev_sparse_pillar_map, int* host_pillar_count) {
GPU_CHECK(cudaMemset(dev_pillar_count_histo_, 0,
grid_y_size_ * grid_x_size_ * sizeof(int)));
GPU_CHECK(cudaMemset(dev_counter_, 0, sizeof(int)));
GPU_CHECK(cudaMemset(dev_pillar_count_, 0, sizeof(int)));
int num_block = DIVUP(in_num_points, num_threads_);
make_pillar_histo_kernel<<<num_block, num_threads_>>>(
dev_points, dev_pillar_point_feature_in_coors_, dev_pillar_count_histo_,
in_num_points, max_num_points_per_pillar_, grid_x_size_, grid_y_size_,
grid_z_size_, min_x_range_, min_y_range_, min_z_range_, pillar_x_size_,
pillar_y_size_, pillar_z_size_, num_box_corners_);
make_pillar_index_kernel<<<grid_x_size_, grid_y_size_>>>(
dev_pillar_count_histo_, dev_counter_, dev_pillar_count_, dev_x_coors,
dev_y_coors, dev_x_coors_for_sub_, dev_y_coors_for_sub_,
dev_num_points_per_pillar, dev_sparse_pillar_map, max_num_pillars_,
max_num_points_per_pillar_, grid_x_size_, pillar_x_size_, pillar_y_size_,
num_inds_for_scan_);
GPU_CHECK(cudaMemcpy(host_pillar_count, dev_pillar_count_, sizeof(int),
cudaMemcpyDeviceToHost));
make_pillar_feature_kernel<<<host_pillar_count[0],
max_num_points_per_pillar_>>>(
dev_pillar_point_feature_in_coors_, dev_pillar_point_feature, dev_x_coors,
dev_y_coors, dev_num_points_per_pillar, max_num_points_per_pillar_,
num_point_feature_, grid_x_size_);
make_extra_network_input_kernel<<<max_num_pillars_,
max_num_points_per_pillar_>>>(
dev_x_coors_for_sub_, dev_y_coors_for_sub_, dev_num_points_per_pillar,
dev_pillar_coors, max_num_points_per_pillar_);
}
} // namespace lidar
} // namespace perception
} // namespace apollo
|
cd506e23003c41d093be3ca0ca74bed53793d6a6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include "thrust/reduce.h"
#include "thrust/device_ptr.h"
#include "header.h"
#include "util.h"
#define BLOCK_DIM 512
__device__ double d_courno;
__constant__ double GAMMA = 1.4E0;
__constant__ double CV = 8.3333333333E6;
#undef SQR
#define SQR(x) (__dmul_rn((x),(x)))
__global__ void gpu_ctoprim_kernel(
global_const_t *g, // i: Application parameters
double *u, // i: u[hi[0]-lo[0]+2*ng][hi[1]-lo[1]+2*ng][hi[2]-lo[2]+2*ng][5]
double *q, // o: q[hi[0]-lo[0]+2*ng][hi[1]-lo[1]+2*ng][hi[2]-lo[2]+2*ng][6]
double *courno // i/o
){
int i, j, k, idx, cour_idx, loffset;
int numthreads = BLOCK_DIM;
double rhoinv, eint, c, courx, coury, courz;
cour_idx = blockIdx.x * blockDim.x + threadIdx.x;
k = cour_idx / (g->dim_g[0] * g->dim_g[1]);
j = (cour_idx / g->dim_g[0]) % g->dim_g[1];
i = cour_idx % g->dim_g[0];
idx = k*g->plane_offset_g_padded + j*g->pitch_g[0] + i;
loffset = g->comp_offset_g_padded;
// Calculate Q
if( idx < loffset ){
rhoinv = 1.0E0/u[idx]; //u(i,j,k,1) = u[0][i][j][k]
q[idx] = u[idx]; //u(i,j,k,1) = u[0][i][j][k]
q[idx+loffset] = u[idx+loffset]*rhoinv; //u(i,j,k,2) = u[1][i][j][k]
q[idx+2*loffset] = u[idx+2*loffset]*rhoinv; //u(i,j,k,3) = u[2][i][j][k]
q[idx+3*loffset] = u[idx+3*loffset]*rhoinv; //u(i,j,k,4) = u[3][i][j][k]
eint = u[idx+4*loffset]*rhoinv - 0.5E0*(SQR(q[idx+loffset]) + SQR(q[idx+2*loffset]) + SQR(q[idx+3*loffset]));
q[idx+4*loffset] = (GAMMA-1.0E0)*eint*u[idx];
q[idx+5*loffset] = eint/CV;
// Calculate new courno (excluding ng)
if( g->ng <= i && i <= g->hi[0]+g->ng &&
g->ng <= j && j <= g->hi[1]+g->ng &&
g->ng <= k && k <= g->hi[2]+g->ng ){
c = sqrt(GAMMA*q[idx+4*loffset]/q[idx]);
courx = (c+fabs(q[idx+loffset])) /g->dx[0];
coury = (c+fabs(q[idx+2*loffset]))/g->dx[1];
courz = (c+fabs(q[idx+3*loffset]))/g->dx[2];
courno[cour_idx] = MAX(courx, MAX(coury, courz));
}
else
courno[cour_idx] = -1.0; //TODO: make it minus infinity
}
}
__global__ void gpu_ctoprim_kernel(
global_const_t *g, // i: Application parameters
double *u, // i: u[hi[0]-lo[0]+2*ng][hi[1]-lo[1]+2*ng][hi[2]-lo[2]+2*ng][5]
double *q // o: q[hi[0]-lo[0]+2*ng][hi[1]-lo[1]+2*ng][hi[2]-lo[2]+2*ng][6]
){
int i, j, k, idx, loffset;
int numthreads = BLOCK_DIM;
double rhoinv, eint, c, courx, coury, courz;
idx = blockIdx.x * blockDim.x + threadIdx.x;
k = idx / (g->dim_g[0] * g->dim_g[1]);
j = (idx / g->dim_g[0]) % g->dim_g[1];
i = idx % g->dim_g[0];
idx = k*g->plane_offset_g_padded + j*g->pitch_g[0] + i;
loffset = g->comp_offset_g_padded;
// Calculate Q
if( idx < loffset ){
rhoinv = 1.0E0/u[idx]; //u(i,j,k,1) = u[0][i][j][k]
q[idx] = u[idx]; //u(i,j,k,1) = u[0][i][j][k]
q[idx+loffset] = u[idx+loffset]*rhoinv; //u(i,j,k,2) = u[1][i][j][k]
q[idx+2*loffset] = u[idx+2*loffset]*rhoinv; //u(i,j,k,3) = u[2][i][j][k]
q[idx+3*loffset] = u[idx+3*loffset]*rhoinv; //u(i,j,k,4) = u[3][i][j][k]
eint = u[idx+4*loffset]*rhoinv - 0.5E0*(SQR(q[idx+loffset]) + SQR(q[idx+2*loffset]) + SQR(q[idx+3*loffset]));
q[idx+4*loffset] = (GAMMA-1.0E0)*eint*u[idx];
q[idx+5*loffset] = eint/CV;
}
}
void gpu_ctoprim(
global_const_t h_const, // i: Global struct containing application parameters
global_const_t *d_const, // i: Device pointer to global struct containing application parameters
double *u_d, // i: u[hi[0]-lo[0]+2*ng][hi[1]-lo[1]+2*ng][hi[2]-lo[2]+2*ng][5]
double *q_d, // o: q[hi[0]-lo[0]+2*ng][hi[1]-lo[1]+2*ng][hi[2]-lo[2]+2*ng][6]
double &courno // i/o
){
int i, len;
len = h_const.dim_g[0] * h_const.dim_g[1] * h_const.dim_g[2];
int grid_dim = (len + BLOCK_DIM-1) / BLOCK_DIM;
int block_dim = BLOCK_DIM;
hipLaunchKernelGGL(( gpu_ctoprim_kernel), dim3(grid_dim), dim3(block_dim), 0, 0, d_const, u_d, q_d, h_const.temp[0]);
// Find max & update courno
// TODO: make it minus infinity
thrust::device_ptr<double> dev_ptr(h_const.temp[0]);
courno = thrust::reduce(dev_ptr, dev_ptr + len, (double) -INFINITY, thrust::maximum<double>());
}
void gpu_ctoprim(
global_const_t h_const, // i: Global struct containing application parameters
global_const_t *d_const, // i: Device pointer to global struct containing application parameters
double *u_d, // i: u[hi[0]-lo[0]+2*ng][hi[1]-lo[1]+2*ng][hi[2]-lo[2]+2*ng][5]
double *q_d // o: q[hi[0]-lo[0]+2*ng][hi[1]-lo[1]+2*ng][hi[2]-lo[2]+2*ng][6]
){
int i, len;
len = h_const.dim_g[0] * h_const.dim_g[1] * h_const.dim_g[2];
int grid_dim = (len + BLOCK_DIM-1) / BLOCK_DIM;
int block_dim = BLOCK_DIM;
// TODO: edit parameters
hipLaunchKernelGGL(( gpu_ctoprim_kernel), dim3(grid_dim), dim3(block_dim), 0, 0, d_const, u_d, q_d);
}
#undef SQR
#define SQR(x) ((x)*(x))
#define u(i,j,k,l) u[l-1][i][j][k]
#define q(i,j,k,l) q[l-1][i][j][k]
#define dx(i) h.dx[i-1]
#define dxinv(i) h.dxinv[i-1]
void ctoprim (
global_const_t h,
double ****u, // i: u[hi[0]-lo[0]+2*ng][hi[1]-lo[1]+2*ng][hi[2]-lo[2]+2*ng][5]
double ****q, // o: q[hi[0]-lo[0]+2*ng][hi[1]-lo[1]+2*ng][hi[2]-lo[2]+2*ng][6]
double &courno // i/o
){
int i, j, k;
double c, eint, courx, coury, courz, courmx, courmy, courmz, rhoinv;
const double GAMMA = 1.4E0;
const double CV = 8.3333333333E6;
// #pragma omp parallel for private(i, j, k, eint, rhoinv)
DO(k, h.lo[2]-h.ng, h.hi[2]+h.ng){
DO(j, h.lo[1]-h.ng, h.hi[1]+h.ng){
DO(i, h.lo[0]-h.ng, h.hi[0]+h.ng){
rhoinv = 1.0E0/u(i,j,k,1);
q(i,j,k,1) = u(i,j,k,1);
q(i,j,k,2) = u(i,j,k,2)*rhoinv;
q(i,j,k,3) = u(i,j,k,3)*rhoinv;
q(i,j,k,4) = u(i,j,k,4)*rhoinv;
eint = u(i,j,k,5)*rhoinv - 0.5E0*(SQR(q(i,j,k,2)) + SQR(q(i,j,k,3)) + SQR(q(i,j,k,4)));
q(i,j,k,5) = (GAMMA-1.0E0)*eint*u(i,j,k,1);
q(i,j,k,6) = eint/CV;
}
}
}
// #pragma omp parallel for private(i, j, k, c, courx, coury, courz) reduction(max: courmx, courmy, courmz)
DO(k, h.lo[2], h.hi[2]){
DO(j, h.lo[1], h.hi[1]){
DO(i, h.lo[0], h.hi[0]){
c = sqrt(GAMMA*q(i,j,k,5)/q(i,j,k,1));
courx = ( c+fabs(q(i,j,k,2)) ) / dx(1); // I tried to change to * dxinv(1) but the results diverge.. (max diff = 5E-8)
coury = ( c+fabs(q(i,j,k,3)) ) / dx(2);
courz = ( c+fabs(q(i,j,k,4)) ) / dx(3);
courmx = MAX( courmx, courx );
courmy = MAX( courmy, coury );
courmz = MAX( courmz, courz );
}
}
}
//
// Compute running max of Courant number over grids.
//
courno = MAX(MAX(courmx, courmy), MAX(courmz, courno));
}
void ctoprim (
global_const_t h,
double ****u, // i: u[hi[0]-lo[0]+2*ng][hi[1]-lo[1]+2*ng][hi[2]-lo[2]+2*ng][5]
double ****q // o: q[hi[0]-lo[0]+2*ng][hi[1]-lo[1]+2*ng][hi[2]-lo[2]+2*ng][6]
){
int i, j, k;
double c, eint, courx, coury, courz, courmx, courmy, courmz, rhoinv;
const double GAMMA = 1.4E0;
const double CV = 8.3333333333E6;
// #pragma omp parallel for private(i, j, k, eint, rhoinv)
DO(k, h.lo[2]-h.ng, h.hi[2]+h.ng){
DO(j, h.lo[1]-h.ng, h.hi[1]+h.ng){
DO(i, h.lo[0]-h.ng, h.hi[0]+h.ng){
rhoinv = 1.0E0/u(i,j,k,1);
q(i,j,k,1) = u(i,j,k,1);
q(i,j,k,2) = u(i,j,k,2)*rhoinv;
q(i,j,k,3) = u(i,j,k,3)*rhoinv;
q(i,j,k,4) = u(i,j,k,4)*rhoinv;
eint = u(i,j,k,5)*rhoinv - 0.5E0*(SQR(q(i,j,k,2)) + SQR(q(i,j,k,3)) + SQR(q(i,j,k,4)));
q(i,j,k,5) = (GAMMA-1.0E0)*eint*u(i,j,k,1);
q(i,j,k,6) = eint/CV;
}
}
}
}
#undef u
#undef q
#undef dx
| cd506e23003c41d093be3ca0ca74bed53793d6a6.cu | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include "thrust/reduce.h"
#include "thrust/device_ptr.h"
#include "header.h"
#include "util.h"
#define BLOCK_DIM 512
__device__ double d_courno;
__constant__ double GAMMA = 1.4E0;
__constant__ double CV = 8.3333333333E6;
#undef SQR
#define SQR(x) (__dmul_rn((x),(x)))
__global__ void gpu_ctoprim_kernel(
global_const_t *g, // i: Application parameters
double *u, // i: u[hi[0]-lo[0]+2*ng][hi[1]-lo[1]+2*ng][hi[2]-lo[2]+2*ng][5]
double *q, // o: q[hi[0]-lo[0]+2*ng][hi[1]-lo[1]+2*ng][hi[2]-lo[2]+2*ng][6]
double *courno // i/o
){
int i, j, k, idx, cour_idx, loffset;
int numthreads = BLOCK_DIM;
double rhoinv, eint, c, courx, coury, courz;
cour_idx = blockIdx.x * blockDim.x + threadIdx.x;
k = cour_idx / (g->dim_g[0] * g->dim_g[1]);
j = (cour_idx / g->dim_g[0]) % g->dim_g[1];
i = cour_idx % g->dim_g[0];
idx = k*g->plane_offset_g_padded + j*g->pitch_g[0] + i;
loffset = g->comp_offset_g_padded;
// Calculate Q
if( idx < loffset ){
rhoinv = 1.0E0/u[idx]; //u(i,j,k,1) = u[0][i][j][k]
q[idx] = u[idx]; //u(i,j,k,1) = u[0][i][j][k]
q[idx+loffset] = u[idx+loffset]*rhoinv; //u(i,j,k,2) = u[1][i][j][k]
q[idx+2*loffset] = u[idx+2*loffset]*rhoinv; //u(i,j,k,3) = u[2][i][j][k]
q[idx+3*loffset] = u[idx+3*loffset]*rhoinv; //u(i,j,k,4) = u[3][i][j][k]
eint = u[idx+4*loffset]*rhoinv - 0.5E0*(SQR(q[idx+loffset]) + SQR(q[idx+2*loffset]) + SQR(q[idx+3*loffset]));
q[idx+4*loffset] = (GAMMA-1.0E0)*eint*u[idx];
q[idx+5*loffset] = eint/CV;
// Calculate new courno (excluding ng)
if( g->ng <= i && i <= g->hi[0]+g->ng &&
g->ng <= j && j <= g->hi[1]+g->ng &&
g->ng <= k && k <= g->hi[2]+g->ng ){
c = sqrt(GAMMA*q[idx+4*loffset]/q[idx]);
courx = (c+fabs(q[idx+loffset])) /g->dx[0];
coury = (c+fabs(q[idx+2*loffset]))/g->dx[1];
courz = (c+fabs(q[idx+3*loffset]))/g->dx[2];
courno[cour_idx] = MAX(courx, MAX(coury, courz));
}
else
courno[cour_idx] = -1.0; //TODO: make it minus infinity
}
}
__global__ void gpu_ctoprim_kernel(
global_const_t *g, // i: Application parameters
double *u, // i: u[hi[0]-lo[0]+2*ng][hi[1]-lo[1]+2*ng][hi[2]-lo[2]+2*ng][5]
double *q // o: q[hi[0]-lo[0]+2*ng][hi[1]-lo[1]+2*ng][hi[2]-lo[2]+2*ng][6]
){
int i, j, k, idx, loffset;
int numthreads = BLOCK_DIM;
double rhoinv, eint, c, courx, coury, courz;
idx = blockIdx.x * blockDim.x + threadIdx.x;
k = idx / (g->dim_g[0] * g->dim_g[1]);
j = (idx / g->dim_g[0]) % g->dim_g[1];
i = idx % g->dim_g[0];
idx = k*g->plane_offset_g_padded + j*g->pitch_g[0] + i;
loffset = g->comp_offset_g_padded;
// Calculate Q
if( idx < loffset ){
rhoinv = 1.0E0/u[idx]; //u(i,j,k,1) = u[0][i][j][k]
q[idx] = u[idx]; //u(i,j,k,1) = u[0][i][j][k]
q[idx+loffset] = u[idx+loffset]*rhoinv; //u(i,j,k,2) = u[1][i][j][k]
q[idx+2*loffset] = u[idx+2*loffset]*rhoinv; //u(i,j,k,3) = u[2][i][j][k]
q[idx+3*loffset] = u[idx+3*loffset]*rhoinv; //u(i,j,k,4) = u[3][i][j][k]
eint = u[idx+4*loffset]*rhoinv - 0.5E0*(SQR(q[idx+loffset]) + SQR(q[idx+2*loffset]) + SQR(q[idx+3*loffset]));
q[idx+4*loffset] = (GAMMA-1.0E0)*eint*u[idx];
q[idx+5*loffset] = eint/CV;
}
}
void gpu_ctoprim(
global_const_t h_const, // i: Global struct containing application parameters
global_const_t *d_const, // i: Device pointer to global struct containing application parameters
double *u_d, // i: u[hi[0]-lo[0]+2*ng][hi[1]-lo[1]+2*ng][hi[2]-lo[2]+2*ng][5]
double *q_d, // o: q[hi[0]-lo[0]+2*ng][hi[1]-lo[1]+2*ng][hi[2]-lo[2]+2*ng][6]
double &courno // i/o
){
int i, len;
len = h_const.dim_g[0] * h_const.dim_g[1] * h_const.dim_g[2];
int grid_dim = (len + BLOCK_DIM-1) / BLOCK_DIM;
int block_dim = BLOCK_DIM;
gpu_ctoprim_kernel<<<grid_dim, block_dim>>>(d_const, u_d, q_d, h_const.temp[0]);
// Find max & update courno
// TODO: make it minus infinity
thrust::device_ptr<double> dev_ptr(h_const.temp[0]);
courno = thrust::reduce(dev_ptr, dev_ptr + len, (double) -INFINITY, thrust::maximum<double>());
}
void gpu_ctoprim(
global_const_t h_const, // i: Global struct containing application parameters
global_const_t *d_const, // i: Device pointer to global struct containing application parameters
double *u_d, // i: u[hi[0]-lo[0]+2*ng][hi[1]-lo[1]+2*ng][hi[2]-lo[2]+2*ng][5]
double *q_d // o: q[hi[0]-lo[0]+2*ng][hi[1]-lo[1]+2*ng][hi[2]-lo[2]+2*ng][6]
){
int i, len;
len = h_const.dim_g[0] * h_const.dim_g[1] * h_const.dim_g[2];
int grid_dim = (len + BLOCK_DIM-1) / BLOCK_DIM;
int block_dim = BLOCK_DIM;
// TODO: edit parameters
gpu_ctoprim_kernel<<<grid_dim, block_dim>>>(d_const, u_d, q_d);
}
#undef SQR
#define SQR(x) ((x)*(x))
#define u(i,j,k,l) u[l-1][i][j][k]
#define q(i,j,k,l) q[l-1][i][j][k]
#define dx(i) h.dx[i-1]
#define dxinv(i) h.dxinv[i-1]
void ctoprim (
global_const_t h,
double ****u, // i: u[hi[0]-lo[0]+2*ng][hi[1]-lo[1]+2*ng][hi[2]-lo[2]+2*ng][5]
double ****q, // o: q[hi[0]-lo[0]+2*ng][hi[1]-lo[1]+2*ng][hi[2]-lo[2]+2*ng][6]
double &courno // i/o
){
int i, j, k;
double c, eint, courx, coury, courz, courmx, courmy, courmz, rhoinv;
const double GAMMA = 1.4E0;
const double CV = 8.3333333333E6;
// #pragma omp parallel for private(i, j, k, eint, rhoinv)
DO(k, h.lo[2]-h.ng, h.hi[2]+h.ng){
DO(j, h.lo[1]-h.ng, h.hi[1]+h.ng){
DO(i, h.lo[0]-h.ng, h.hi[0]+h.ng){
rhoinv = 1.0E0/u(i,j,k,1);
q(i,j,k,1) = u(i,j,k,1);
q(i,j,k,2) = u(i,j,k,2)*rhoinv;
q(i,j,k,3) = u(i,j,k,3)*rhoinv;
q(i,j,k,4) = u(i,j,k,4)*rhoinv;
eint = u(i,j,k,5)*rhoinv - 0.5E0*(SQR(q(i,j,k,2)) + SQR(q(i,j,k,3)) + SQR(q(i,j,k,4)));
q(i,j,k,5) = (GAMMA-1.0E0)*eint*u(i,j,k,1);
q(i,j,k,6) = eint/CV;
}
}
}
// #pragma omp parallel for private(i, j, k, c, courx, coury, courz) reduction(max: courmx, courmy, courmz)
DO(k, h.lo[2], h.hi[2]){
DO(j, h.lo[1], h.hi[1]){
DO(i, h.lo[0], h.hi[0]){
c = sqrt(GAMMA*q(i,j,k,5)/q(i,j,k,1));
courx = ( c+fabs(q(i,j,k,2)) ) / dx(1); // I tried to change to * dxinv(1) but the results diverge.. (max diff = 5E-8)
coury = ( c+fabs(q(i,j,k,3)) ) / dx(2);
courz = ( c+fabs(q(i,j,k,4)) ) / dx(3);
courmx = MAX( courmx, courx );
courmy = MAX( courmy, coury );
courmz = MAX( courmz, courz );
}
}
}
//
// Compute running max of Courant number over grids.
//
courno = MAX(MAX(courmx, courmy), MAX(courmz, courno));
}
void ctoprim (
global_const_t h,
double ****u, // i: u[hi[0]-lo[0]+2*ng][hi[1]-lo[1]+2*ng][hi[2]-lo[2]+2*ng][5]
double ****q // o: q[hi[0]-lo[0]+2*ng][hi[1]-lo[1]+2*ng][hi[2]-lo[2]+2*ng][6]
){
int i, j, k;
double c, eint, courx, coury, courz, courmx, courmy, courmz, rhoinv;
const double GAMMA = 1.4E0;
const double CV = 8.3333333333E6;
// #pragma omp parallel for private(i, j, k, eint, rhoinv)
DO(k, h.lo[2]-h.ng, h.hi[2]+h.ng){
DO(j, h.lo[1]-h.ng, h.hi[1]+h.ng){
DO(i, h.lo[0]-h.ng, h.hi[0]+h.ng){
rhoinv = 1.0E0/u(i,j,k,1);
q(i,j,k,1) = u(i,j,k,1);
q(i,j,k,2) = u(i,j,k,2)*rhoinv;
q(i,j,k,3) = u(i,j,k,3)*rhoinv;
q(i,j,k,4) = u(i,j,k,4)*rhoinv;
eint = u(i,j,k,5)*rhoinv - 0.5E0*(SQR(q(i,j,k,2)) + SQR(q(i,j,k,3)) + SQR(q(i,j,k,4)));
q(i,j,k,5) = (GAMMA-1.0E0)*eint*u(i,j,k,1);
q(i,j,k,6) = eint/CV;
}
}
}
}
#undef u
#undef q
#undef dx
|
974ca2a2aa3d1ec6f5c2cef0d7c4cf26e8e22556.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2006 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws.
*
* This software and the information contained herein is PROPRIETARY and
* CONFIDENTIAL to NVIDIA and is being provided under the terms and
* conditions of a Non-Disclosure Agreement. Any reproduction or
* disclosure to any third party without the express written consent of
* NVIDIA is prohibited.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*/
/* Matrix multiplication: C = A * B.
* Host code.
*/
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
// includes, project
#include <cutil.h>
// includes, kernels
#include <matrixmul_kernel.cu>
////////////////////////////////////////////////////////////////////////////////
// declarations, forward
extern "C"
void computeGold(float*, const float*, const float*, unsigned int, unsigned int, unsigned int);
Matrix AllocateDeviceMatrix(const Matrix M);
Matrix AllocateMatrix(int height, int width, int init);
void CopyToDeviceMatrix(Matrix Mdevice, const Matrix Mhost);
void CopyFromDeviceMatrix(Matrix Mhost, const Matrix Mdevice);
int ReadFile(Matrix* M, char* file_name);
void WriteFile(Matrix M, char* file_name);
void FreeDeviceMatrix(Matrix* M);
void FreeMatrix(Matrix* M);
void MatrixMulOnDevice(const Matrix M, const Matrix N, Matrix P);
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char** argv) {
Matrix M;
Matrix N;
Matrix P;
int errorM = 0, errorN = 0;
srand(52);
if(argc != 5 && argc != 4)
{
// Allocate and initialize the matrices
srand(time(NULL));
M = AllocateMatrix(rand() % 1024, rand() % 1024, 1);
N = AllocateMatrix(M.width, rand() % 1024, 1);
P = AllocateMatrix(M.height, N.width, 0);
}
else
{
// Allocate and read in matrices from disk
int* params = NULL; //(int*)malloc(3 * sizeof(int));
unsigned int data_read = 3;
cutReadFilei(argv[1], ¶ms, &data_read, true);
if(data_read != 3){
printf("Error reading parameter file\n");
return 1;
}
M = AllocateMatrix(params[0], params[1], 0);
N = AllocateMatrix(params[1], params[2], 0);
P = AllocateMatrix(params[0], params[2], 0);
errorM = ReadFile(&M, argv[2]);
errorN = ReadFile(&N, argv[3]);
if(errorM || errorN )
{
printf("Error reading input files %d, %d\n", errorM, errorN);
return 1;
}
}
// M * N on the device
MatrixMulOnDevice(M, N, P);
printf("GPU computation complete\n");
// compute the matrix multiplication on the CPU for comparison
Matrix reference = AllocateMatrix(P.height, P.width, 0);
computeGold(reference.elements, M.elements, N.elements, M.height, M.width, N.width);
printf("CPU computation complete\n");
// in this case check if the result is equivalent to the expected soluion
CUTBoolean res = cutComparefe(reference.elements, P.elements, P.height*P.width, 0.001f);
printf("Test %s\n", (1 == res) ? "PASSED" : "FAILED");
if(argc == 5)
{
WriteFile(P, argv[4]);
}
else if(argc == 2)
{
WriteFile(P, argv[1]);
}
// Free matrices
FreeMatrix(&M);
FreeMatrix(&N);
FreeMatrix(&P);
return 0;
}
////////////////////////////////////////////////////////////////////////////////
//! Run a simple test for CUDA
////////////////////////////////////////////////////////////////////////////////
void MatrixMulOnDevice(const Matrix M, const Matrix N, Matrix P)
{
// Load M and N to the device
Matrix Md = AllocateDeviceMatrix(M);
CopyToDeviceMatrix(Md, M);
Matrix Nd = AllocateDeviceMatrix(N);
CopyToDeviceMatrix(Nd, N);
// Allocate P on the device
Matrix Pd = AllocateDeviceMatrix(P);
CopyToDeviceMatrix(Pd, P); // Clear memory
// printf("Matrix M: %f\n", *M.elements);
// Setup the execution configuration
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid((int)ceil((double)N.width/(double)dimBlock.x), (int)ceil((double)M.height/(double)dimBlock.y));
// Launch the device computation threads!
hipLaunchKernelGGL(( MatrixMulKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, Md, Nd, Pd);
// Read P from the device
CopyFromDeviceMatrix(P, Pd);
// printf("Matrix P: %f\n", P.elements[0]);
// Free device matrices
FreeDeviceMatrix(&Md);
FreeDeviceMatrix(&Nd);
FreeDeviceMatrix(&Pd);
}
// Allocate a device matrix of same size as M.
Matrix AllocateDeviceMatrix(const Matrix M)
{
Matrix Mdevice = M;
int size = M.width * M.height * sizeof(float);
hipMalloc((void**)&Mdevice.elements, size);
return Mdevice;
}
// Allocate a device matrix of dimensions height*width
// If init == 0, initialize to all zeroes.
// If init == 1, perform random initialization.
// If init == 2, initialize matrix parameters, but do not allocate memory
Matrix AllocateMatrix(int height, int width, int init)
{
Matrix M;
M.width = M.pitch = width;
M.height = height;
int size = M.width * M.height;
M.elements = NULL;
// don't allocate memory on option 2
if(init == 2)
return M;
M.elements = (float*) malloc(size*sizeof(float));
for(unsigned int i = 0; i < M.height * M.width; i++)
{
M.elements[i] = (init == 0) ? (0.0f) : (rand()*3 / (float)RAND_MAX);
}
return M;
}
// Copy a host matrix to a device matrix.
void CopyToDeviceMatrix(Matrix Mdevice, const Matrix Mhost)
{
int size = Mhost.width * Mhost.height * sizeof(float);
Mdevice.height = Mhost.height;
Mdevice.width = Mhost.width;
Mdevice.pitch = Mhost.pitch;
hipMemcpy(Mdevice.elements, Mhost.elements, size,
hipMemcpyHostToDevice);
}
// Copy a device matrix to a host matrix.
void CopyFromDeviceMatrix(Matrix Mhost, const Matrix Mdevice)
{
int size = Mdevice.width * Mdevice.height * sizeof(float);
hipMemcpy(Mhost.elements, Mdevice.elements, size,
hipMemcpyDeviceToHost);
}
// Free a device matrix.
void FreeDeviceMatrix(Matrix* M)
{
hipFree(M->elements);
M->elements = NULL;
}
// Free a host Matrix
void FreeMatrix(Matrix* M)
{
free(M->elements);
M->elements = NULL;
}
// Read a floating point matrix in from file
// Returns zero if the number of elements read is
// equals M.height * M.width, and 1 otherwise
int ReadFile(Matrix* M, char* file_name)
{
unsigned int data_read = M->height*M->width;
cutReadFilef(file_name, &(M->elements), &data_read, true);
return (data_read != (M->height * M->width));
}
// Write a 16x16 floating point matrix to file
void WriteFile(Matrix M, char* file_name)
{
cutWriteFilef(file_name, M.elements, M.width*M.height,
0.0001f);
}
| 974ca2a2aa3d1ec6f5c2cef0d7c4cf26e8e22556.cu | /*
* Copyright 1993-2006 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws.
*
* This software and the information contained herein is PROPRIETARY and
* CONFIDENTIAL to NVIDIA and is being provided under the terms and
* conditions of a Non-Disclosure Agreement. Any reproduction or
* disclosure to any third party without the express written consent of
* NVIDIA is prohibited.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*/
/* Matrix multiplication: C = A * B.
* Host code.
*/
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
// includes, project
#include <cutil.h>
// includes, kernels
#include <matrixmul_kernel.cu>
////////////////////////////////////////////////////////////////////////////////
// declarations, forward
extern "C"
void computeGold(float*, const float*, const float*, unsigned int, unsigned int, unsigned int);
Matrix AllocateDeviceMatrix(const Matrix M);
Matrix AllocateMatrix(int height, int width, int init);
void CopyToDeviceMatrix(Matrix Mdevice, const Matrix Mhost);
void CopyFromDeviceMatrix(Matrix Mhost, const Matrix Mdevice);
int ReadFile(Matrix* M, char* file_name);
void WriteFile(Matrix M, char* file_name);
void FreeDeviceMatrix(Matrix* M);
void FreeMatrix(Matrix* M);
void MatrixMulOnDevice(const Matrix M, const Matrix N, Matrix P);
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char** argv) {
Matrix M;
Matrix N;
Matrix P;
int errorM = 0, errorN = 0;
srand(52);
if(argc != 5 && argc != 4)
{
// Allocate and initialize the matrices
srand(time(NULL));
M = AllocateMatrix(rand() % 1024, rand() % 1024, 1);
N = AllocateMatrix(M.width, rand() % 1024, 1);
P = AllocateMatrix(M.height, N.width, 0);
}
else
{
// Allocate and read in matrices from disk
int* params = NULL; //(int*)malloc(3 * sizeof(int));
unsigned int data_read = 3;
cutReadFilei(argv[1], ¶ms, &data_read, true);
if(data_read != 3){
printf("Error reading parameter file\n");
return 1;
}
M = AllocateMatrix(params[0], params[1], 0);
N = AllocateMatrix(params[1], params[2], 0);
P = AllocateMatrix(params[0], params[2], 0);
errorM = ReadFile(&M, argv[2]);
errorN = ReadFile(&N, argv[3]);
if(errorM || errorN )
{
printf("Error reading input files %d, %d\n", errorM, errorN);
return 1;
}
}
// M * N on the device
MatrixMulOnDevice(M, N, P);
printf("GPU computation complete\n");
// compute the matrix multiplication on the CPU for comparison
Matrix reference = AllocateMatrix(P.height, P.width, 0);
computeGold(reference.elements, M.elements, N.elements, M.height, M.width, N.width);
printf("CPU computation complete\n");
// in this case check if the result is equivalent to the expected soluion
CUTBoolean res = cutComparefe(reference.elements, P.elements, P.height*P.width, 0.001f);
printf("Test %s\n", (1 == res) ? "PASSED" : "FAILED");
if(argc == 5)
{
WriteFile(P, argv[4]);
}
else if(argc == 2)
{
WriteFile(P, argv[1]);
}
// Free matrices
FreeMatrix(&M);
FreeMatrix(&N);
FreeMatrix(&P);
return 0;
}
////////////////////////////////////////////////////////////////////////////////
//! Run a simple test for CUDA
////////////////////////////////////////////////////////////////////////////////
void MatrixMulOnDevice(const Matrix M, const Matrix N, Matrix P)
{
// Load M and N to the device
Matrix Md = AllocateDeviceMatrix(M);
CopyToDeviceMatrix(Md, M);
Matrix Nd = AllocateDeviceMatrix(N);
CopyToDeviceMatrix(Nd, N);
// Allocate P on the device
Matrix Pd = AllocateDeviceMatrix(P);
CopyToDeviceMatrix(Pd, P); // Clear memory
// printf("Matrix M: %f\n", *M.elements);
// Setup the execution configuration
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid((int)ceil((double)N.width/(double)dimBlock.x), (int)ceil((double)M.height/(double)dimBlock.y));
// Launch the device computation threads!
MatrixMulKernel<<<dimGrid, dimBlock>>>(Md, Nd, Pd);
// Read P from the device
CopyFromDeviceMatrix(P, Pd);
// printf("Matrix P: %f\n", P.elements[0]);
// Free device matrices
FreeDeviceMatrix(&Md);
FreeDeviceMatrix(&Nd);
FreeDeviceMatrix(&Pd);
}
// Allocate a device matrix of same size as M.
Matrix AllocateDeviceMatrix(const Matrix M)
{
Matrix Mdevice = M;
int size = M.width * M.height * sizeof(float);
cudaMalloc((void**)&Mdevice.elements, size);
return Mdevice;
}
// Allocate a device matrix of dimensions height*width
// If init == 0, initialize to all zeroes.
// If init == 1, perform random initialization.
// If init == 2, initialize matrix parameters, but do not allocate memory
Matrix AllocateMatrix(int height, int width, int init)
{
Matrix M;
M.width = M.pitch = width;
M.height = height;
int size = M.width * M.height;
M.elements = NULL;
// don't allocate memory on option 2
if(init == 2)
return M;
M.elements = (float*) malloc(size*sizeof(float));
for(unsigned int i = 0; i < M.height * M.width; i++)
{
M.elements[i] = (init == 0) ? (0.0f) : (rand()*3 / (float)RAND_MAX);
}
return M;
}
// Copy a host matrix to a device matrix.
void CopyToDeviceMatrix(Matrix Mdevice, const Matrix Mhost)
{
int size = Mhost.width * Mhost.height * sizeof(float);
Mdevice.height = Mhost.height;
Mdevice.width = Mhost.width;
Mdevice.pitch = Mhost.pitch;
cudaMemcpy(Mdevice.elements, Mhost.elements, size,
cudaMemcpyHostToDevice);
}
// Copy a device matrix to a host matrix.
void CopyFromDeviceMatrix(Matrix Mhost, const Matrix Mdevice)
{
int size = Mdevice.width * Mdevice.height * sizeof(float);
cudaMemcpy(Mhost.elements, Mdevice.elements, size,
cudaMemcpyDeviceToHost);
}
// Free a device matrix.
void FreeDeviceMatrix(Matrix* M)
{
cudaFree(M->elements);
M->elements = NULL;
}
// Free a host Matrix
void FreeMatrix(Matrix* M)
{
free(M->elements);
M->elements = NULL;
}
// Read a floating point matrix in from file
// Returns zero if the number of elements read is
// equals M.height * M.width, and 1 otherwise
int ReadFile(Matrix* M, char* file_name)
{
unsigned int data_read = M->height*M->width;
cutReadFilef(file_name, &(M->elements), &data_read, true);
return (data_read != (M->height * M->width));
}
// Write a 16x16 floating point matrix to file
void WriteFile(Matrix M, char* file_name)
{
cutWriteFilef(file_name, M.elements, M.width*M.height,
0.0001f);
}
|
98d043bcc0134258978a91be88acaa4942cd7a86.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
__global__ void kernel() {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
printf("Hello from thread.\n");
}
int main(){
int host_a, host_b, host_c;
int *dev_a, *dev_b, *dev_c;
int size = sizeof (int);
hipMalloc((void**) &dev_a, size);
hipMalloc((void**) &dev_b, size);
hipMalloc((void**) &dev_c, size);
host_a = 2;
host_b = 7;
hipMemcpy(dev_a, &host_a, size, hipMemcpyHostToDevice);
hipMemcpy(dev_b, &host_b, size, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( kernel) , dim3(1), dim3(1) , 0, 0, );
hipDeviceSynchronize();
hipMemcpy(&host_c, dev_c, size, hipMemcpyDeviceToHost);
printf("C = %d \n", host_c);
hipFree(dev_a);
hipFree(dev_b);
hipFree(dev_c);
printf("Hello, CUDA! \n");
}
| 98d043bcc0134258978a91be88acaa4942cd7a86.cu | #include <stdio.h>
#include <stdlib.h>
__global__ void kernel() {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
printf("Hello from thread.\n");
}
int main(){
int host_a, host_b, host_c;
int *dev_a, *dev_b, *dev_c;
int size = sizeof (int);
cudaMalloc((void**) &dev_a, size);
cudaMalloc((void**) &dev_b, size);
cudaMalloc((void**) &dev_c, size);
host_a = 2;
host_b = 7;
cudaMemcpy(dev_a, &host_a, size, cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, &host_b, size, cudaMemcpyHostToDevice);
kernel <<< 1, 1 >>> ();
cudaDeviceSynchronize();
cudaMemcpy(&host_c, dev_c, size, cudaMemcpyDeviceToHost);
printf("C = %d \n", host_c);
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
printf("Hello, CUDA! \n");
}
|
11f92a6163b98649d80ec8f1ab4fe1004b18d03e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* This code is courtesy of, and copyright 2015,
* Tomas Oppelstrup, Livermore National Lab. Please
* do not redistribute without his approval.
*/
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <sys/time.h>
#include <unistd.h>
#include "real.h"
#define NBLOCKS 180
#define NTHREADS 64
#include "boxsortinc.cu"
double rawtime;
void vru(double r,double *v,double *g) {
double rcut = 2.5;
double alpha = -24.0*(pow(rcut,-7.0) - 2.0*pow(rcut,-13.0));
double beta = -4.0*(pow(rcut,-12.0) - pow(rcut,-6.0));
if(r < rcut && r > 0.1) {
// Contribution to potential energy phi(r)
*v = 4.0*(pow(r,-12.0) - pow(r,-6.0)) + alpha*(r - rcut) + beta;
// Contribution to gradient 1/r * Dphi(r)/Dr
*g = 24.0*(pow(r,-8.0) - 2.0*pow(r,-14.0)) + alpha/r;
} else {
*v = 0.0;
*g = 0.0;
}
}
void vru_grnpotential(double r,double *v,double *g) {
/* % Parameters: */
const double a1 = 265.848;
const double m = 12;
const double b1 = 1.5;
const double c1 = 1.45;
const double d = 0.8;
const double a2 = 2.5;
const double b2 = 0.19;
const double c2 = 1.89;
double a1x,a2x;
static int firsttime = 1;
if(firsttime == 1) {
printf("%% Potential parameters:\n"
"%% a1 = %9.4f a2 = %9.4f m = %9.4f\n"
"%% b1 = %9.4f b2 = %9.4f d = %9.4f\n"
"%% c1 = %9.4f c2 = %9.4f\n\n",
a1,a2,m,b1,b2,d,c1,c2);
firsttime = 0;
}
/* %Formula: */
if(r < 0.1) {
*v = 0.0;
*g = 0.0;
} else if(r < c1) {
a1x = a1*exp(b1/(r-c1));
a2x = a2*exp(b2/(r-c2));
*v = (1/pow(r,m)-d)*a1x + a2x;
*g = (-m/pow(r,m+1) + (1/pow(r,m)-d)*(-b1/((r-c1)*(r-c1))))*a1x +
a2x*(-b2/((r-c2)*(r-c2)));
*g = *g/r;
} else if(r < c2) {
*v = a2*exp(b2/(r-c2));
*g = *v * (-b2/((r-c2)*(r-c2)));
*g = *g/r;
} else {
*v = 0;
*g = 0;
}
}
/* Transformation. From normal to skewed box:
1 s/k (s/k)^2
0 1 s/k
0 0 1
Inverse transformation:
1 -s/k 0
0 1 -s/k
0 0 1
*/
/* Figure out linear index of particle */
__host__ __device__
static int boxindex(real boxl,int k,volatile vector x, real g, real w) {
//real g = 1.0/k, w = k/boxl;
int a,b,c;
a = (int) floor(x[0] * w);
b = (int) floor((x[1] - g*x[0]) * w);
c = (int) floor((x[2] - g*x[1]) * w);
return a + k*(b + k*c);
}
__global__ void makeboxno(int n,int k,real boxl,vector4 xx[],int boxno[]) {
const int pid = threadIdx.x + blockDim.x*blockIdx.x;
const int np = blockDim.x*gridDim.x;
const int tid = threadIdx.x;
const int nt = blockDim.x;
int k3 = k*k*k;
real g = 1.0/k, w = k/boxl;
volatile __shared__ struct {
vector4 xx[NTHREADS];
} shm;
int i,bi;
for(i = pid; i<n+tid; i+=np) {
__syncthreads();
shm.xx[0][tid+0*nt] = xx[i-tid][tid+0*nt];
shm.xx[0][tid+1*nt] = xx[i-tid][tid+1*nt];
shm.xx[0][tid+2*nt] = xx[i-tid][tid+2*nt];
shm.xx[0][tid+3*nt] = xx[i-tid][tid+3*nt];
__syncthreads();
bi = boxindex(boxl,k,shm.xx[tid],g,w);
bi = (k3 + (bi % k3)) % k3;
if(i < n)
boxno[i] = bi;
}
}
/* Put particles in boxes */
static void boxem(int n,real boxl,int k,vector4 xx[],int first[],int perm[]) {
int i,j,p,k3 = k*k*k;
int *next;
int bi;
real g = 1.0/k, w = k/boxl;
int *tags = (int *) alloca(sizeof(int) * n);
memset(tags,0,sizeof(int) * n);
next = (int *) alloca(n * sizeof(int));
memset(next,0,sizeof(int) * n);
memset(first,0,sizeof(int) * (k3+1));
for(i = 0; i<n; i++) {
bi = boxindex(boxl,k,xx[i],g,w);
j = bi % k3;
j = (k3+j)%k3;
next[i] = first[j];
first[j] = i+1;
}
i = 0;
for(j = 0; j<k3; j++) {
int ix = (i<n) ? i : i-n;
p = first[j]-1;
first[j] = ix; /*printf("First in box %2d is %2d. Chain is %2d",j,i,p);*/
while(p >= 0) {
tags[p] = tags[p] + 1;
perm[i] = p; /*printf("location %3d has particle %3d.\n",i,p);*/
i = i + 1;
p = next[p]-1;
/*printf(" %d",p);*/
}
/*printf("\n");*/
}
if(n != i) printf("* Serious counting error @%s:%d. i=%d n=%d k3=%d\n",
__FILE__,__LINE__,i,n,k3);
for(i = 0; i<n; i++)
if(tags[i] != 1) printf("Wrong tag: tags(%d) = %d\n",i,tags[i]);
first[k3] = 0;
}
static void forcecalc_host(int n,real boxl,
int k,int first[],int boxno[],vector4 xx1[],
vector4 vv1[],vector4 xx2[],vector4 vv2[],real dt,
double *u_p,double *w_p,double *k_p,
int npot,double rcut,real upot[],real fpot[]) {
double boxli = 1.0/boxl;
int k3 = k*k*k;
int i,j,i0,i1,j0,j1,iu,iv,b,ii;
double xi,yi,zi,fxi,fyi,fzi,dx,dy,dz;
double d2;
double vx0,vy0,vz0,vx1,vy1,vz1,kx,ky,kz;
double vr,u,rcut2 = rcut*rcut;
double utot,wtot,ktot;
utot = 0.0;
wtot = 0.0;
ktot = 0.0;
for(b = 0; b<k3; b++) {
i0 = first[b];
i1 = first[b+1];
for(i = i0; i!=i1; i++) {
xi = xx1[i][0];
yi = xx1[i][1];
zi = xx1[i][2];
ii = (int) xx1[i][3];
fxi = 0.0;
fyi = 0.0;
fzi = 0.0;
for(iv = -2; iv<=2; iv++)
for(iu = -2; iu<=2; iu++) {
j0 = (k3 + b + k*(iu + k*iv) - 2)%k3;
j1 = j0 + 5;
if(j1 >= k3) j1 = j1-k3;
j0 = first[j0];
j1 = first[j1];
if(j0 > n || j1 > n) {
printf("Crap in forcecalc_host :: n=%d j0=%d j1=%d\n",
n,j0,j1);
fflush(stdout);
exit(1);
}
if(j0 == n) j0 = 0;
if(j1 == n) j1 = 0;
for(j = j0; j!=j1; j=((j==n-1) ? 0 : j+1)) {
dx = xi - xx1[j][0];
dy = yi - xx1[j][1];
dz = zi - xx1[j][2];
dx = dx - boxl*rint(dx*boxli);
dy = dy - boxl*rint(dy*boxli);
dz = dz - boxl*rint(dz*boxli);
d2 = dx*dx + dy*dy + dz*dz;
if(d2 > 0.0 && d2 < rcut2) {
//vru(sqrt(d2),&u,&vr);
double fdx = d2/rcut2 * (npot-1);
int idx = (int) floor(fdx);
double frac = fdx-idx;
//frac = floor(256.0*frac)/256.0;
if(idx >= npot-1) {
u = 0.0;
vr = 0.0;
} else {
u = (1.0-frac)*upot[idx] + frac*upot[idx+1];
vr = (1.0-frac)*fpot[idx] + frac*fpot[idx+1];
}
fxi = fxi - vr*dx;
fyi = fyi - vr*dy;
fzi = fzi - vr*dz;
utot = utot + u;
wtot = wtot - vr*d2;
}
}
}
vx0 = vv1[i][0];
vy0 = vv1[i][1];
vz0 = vv1[i][2];
vx1 = vx0 + fxi*dt;
vy1 = vy0 + fyi*dt;
vz1 = vz0 + fzi*dt;
kx = vx0 + vx1;
ky = vy0 + vy1;
kz = vz0 + vz1;
kx = kx*kx;
ky = ky*ky;
kz = kz*kz;
ktot = ktot + (kx + ky + kz)*0.125;
vv2[i][0] = vx1;
vv2[i][1] = vy1;
vv2[i][2] = vz1;
xx2[i][0] = xi + dt*vx1;
xx2[i][1] = yi + dt*vy1;
xx2[i][2] = zi + dt*vz1;
xx2[i][3] = ii;
}
}
*u_p = utot*0.5;
*w_p = wtot*0.5;
*k_p = ktot;
}
__global__ static void adjustx4(int n,int k3,int first[],vector4 xx[],
int startstop[]) {
const int pid = threadIdx.x + blockDim.x*blockIdx.x;
const int np = blockDim.x*gridDim.x;
int i,b,n4;
real xi;
__syncthreads();
if(threadIdx.x == 0)
startstop[blockIdx.x] = (startstop[blockIdx.x] == 0);
for(b = pid; b<k3; b+=np) {
i = first[b];
first[b+k3 ] = i+ n;
first[b+2*k3] = i+2*n;
}
n4 = 4*n;
for(b = pid; b<n4; b+=np) {
xi = xx[0][b];
xx[n ][b] = xi;
xx[2*n][b] = xi;
}
__syncthreads();
if(threadIdx.x == 0)
startstop[gridDim.x+blockIdx.x] = (startstop[gridDim.x+blockIdx.x] == 0);
}
static texture<float2,1,hipReadModeElementType> pottex;
__global__ static void
forcecalc_box(int n,real boxl,int k,int first[],int boxno[],vector4 xx1[],
vector4 vv1[],vector4 xx2[],vector4 vv2[],real dt,real ukout[],
int startstop[],real rcut,int npot) {
volatile __shared__ int offsets[32];
volatile __shared__ int j0share[NTHREADS],j1share[NTHREADS];
volatile __shared__ real xxshare[NTHREADS][4];
/*#define YYLEN (NTHREADS+160)*/
#define YYLEN (NTHREADS+160)
volatile __shared__ real yyshare[YYLEN][4];
const int pid = threadIdx.x + blockDim.x*blockIdx.x;
const int np = blockDim.x * gridDim.x;
const int tid = threadIdx.x;
const int nt = blockDim.x;
const int bid = blockIdx.x;
const int nb = gridDim.x;
const int k3 = k*k*k;
const real boxli = 1.0/boxl,rcut2 = rcut*rcut,potscale = (npot-1)/rcut2;
//const real g = 1.0/k, w = k/boxl;
real dx,dy,dz,d2,fxi,fyi,fzi;
real utot,wtot,ktot,vx0,vx1;
int i,j,j0,j1,iv,b;
__syncthreads();
if(tid == 0) startstop[bid] = (startstop[bid] == 0);
for(i = tid; i<25; i+=nt)
offsets[i] = (i/5-2)*k*k + (i%5-2)*k;
utot = 0.0; wtot = 0.0; ktot = 0.0;
for(i = pid; i<n+tid; i+=np) {
// Load i-particles into shared memory, one particle per thread
__syncthreads();
xxshare[0][tid+0*nt] = xx1[i-tid][tid+0*nt];
xxshare[0][tid+1*nt] = xx1[i-tid][tid+1*nt];
xxshare[0][tid+2*nt] = xx1[i-tid][tid+2*nt];
xxshare[0][tid+3*nt] = xx1[i-tid][tid+3*nt];
__syncthreads();
fxi = 0.0; fyi = 0.0; fzi = 0.0;
// Loop over 25 neighboring columns
b = n-1;
if(i < n) b = i;
b = boxno[b];//((boxindex(boxl,k,xxshare[tid],g,w)%k3)+k3)%k3;
for(iv = 0; iv<25; iv++) {
__syncthreads();
j0share[tid] = first[k3+b+offsets[iv]-2];
j1share[tid] = first[k3+b+offsets[iv]+2+1];
__syncthreads();
j0 = j0share[0]; j1 = j1share[nt-1];
{
int joff;
for(joff = 0; joff<j1-j0; joff+=YYLEN) {
int jcount = j1-j0-joff;
if(jcount > YYLEN) jcount = YYLEN;
__syncthreads();
for(j = tid; j<4*jcount; j+=nt)
yyshare[0][j] = xx1[j0+joff][j];
__syncthreads();
{
int j0loc = j0share[tid] - j0share[0];
int j1loc = j1share[tid] - j0share[0];
if(j0loc < joff) j0loc = joff;
if(j1loc > joff+jcount) j1loc = joff+jcount;
for(j = j0loc; j<j1loc; j++) {
dx = xxshare[tid][0] - yyshare[j-joff][0];
dy = xxshare[tid][1] - yyshare[j-joff][1];
dz = xxshare[tid][2] - yyshare[j-joff][2];
dx = dx - boxl*rint(dx*boxli);
dy = dy - boxl*rint(dy*boxli);
dz = dz - boxl*rint(dz*boxli);
d2 = dx*dx + dy*dy + dz*dz;
if(d2 > 0.0 && d2 < rcut2) {
float2 f = tex1D(pottex,0.5 + d2*potscale);
fxi = fxi - f.y*dx;
fyi = fyi - f.y*dy;
fzi = fzi - f.y*dz;
utot = utot + f.x;
wtot = wtot - f.y*d2;
}
}
}
}
}
}
__syncthreads();
for(j = 0; j<4; j++)
yyshare[0][tid+j*nt] = vv1[i-tid][tid+j*nt];
__syncthreads();
if(i<n) {
vx0 = yyshare[tid][0];
vx1 = vx0 + fxi*dt; vx0 = vx0 + vx1;
ktot = ktot + vx0*vx0;
yyshare[tid][0] = vx1;
xxshare[tid][0] = xxshare[tid][0] + vx1*dt;
vx0 = yyshare[tid][1];
vx1 = vx0 + fyi*dt; vx0 = vx0 + vx1;
ktot = ktot + vx0*vx0;
yyshare[tid][1] = vx1;
xxshare[tid][1] = xxshare[tid][1] + vx1*dt;
vx0 = yyshare[tid][2];
vx1 = vx0 + fzi*dt; vx0 = vx0 + vx1;
ktot = ktot + vx0*vx0;
yyshare[tid][2] = vx1;
xxshare[tid][2] = xxshare[tid][2] + vx1*dt;
}
__syncthreads();
for(j = tid; j<4*min(nt,n-(i-tid)); j+=nt) {
xx2[i-tid][j] = xxshare[0][j];
vv2[i-tid][j] = yyshare[0][j];
}
}
__syncthreads();
xxshare[0][tid+0*nt] = utot*0.5;
xxshare[0][tid+1*nt] = wtot*0.5;
xxshare[0][tid+2*nt] = ktot*0.125;
__syncthreads();
j = 1;
while(j < nt) {
i = (tid-j) | (j-1);
if(tid & j) {
xxshare[0][tid+0*nt] = xxshare[0][tid+0*nt] + xxshare[0][i+0*nt];
xxshare[0][tid+1*nt] = xxshare[0][tid+1*nt] + xxshare[0][i+1*nt];
xxshare[0][tid+2*nt] = xxshare[0][tid+2*nt] + xxshare[0][i+2*nt];
}
j = j<<1;
__syncthreads();
}
for(i = tid; i<3; i+=nt)
ukout[bid+i*nb] = xxshare[0][(i+1)*nt-1];
__syncthreads();
if(tid == 0) startstop[bid+nb] = (startstop[bid+nb] == 0);
/*bad_exit:*/ __syncthreads();
#undef YYLEN
}
double gettime(void) {
struct timeval tv;
gettimeofday(&tv,NULL);
return tv.tv_sec + 1e-6*tv.tv_usec;
}
static int devChoice = -1;
int init_graphics_card(void) {
/* Initialize graphics card */
static int inited = 0;
int devCount;
if(inited == 0) {
hipGetDeviceCount(&devCount);
if(devCount < 1) {
printf("No devices...\n");
exit(1);
}
if(devChoice < 0 || devChoice >= devCount) devChoice = devCount-1;
printf("%% Number of devices is %d. Choosing device %d!\n", devCount,devChoice);
hipSetDevice(devChoice);
inited = 1;
}
return 0;
}
int errorcheck(char s[],int nblocks,int startstop[]) {
int i,err = 0;
for(i = 0; i<2*nblocks; i++) if(startstop[i] != 1) err = err + 1;
if(err) {
printf("%s\n",s);
printf("Error running kernel, errorcount = %d, nblocks = %d\n",
err,nblocks);
printf("BLOCK: ");
for(i = 0; i<nblocks; i++)
printf("%4d",i);
printf("\nSTART: ");
for(i = 0; i<nblocks; i++)
printf("%4d",startstop[i]);
printf("\nSTOP :");
for(i = 0; i<nblocks; i++)
printf("%4d",startstop[nblocks+i]);
printf("\n");
}
return err != 0;
}
int cardtimestep_box(int n,int k,vector4 xx[],vector4 vv[],real boxl,real dt,
double *utot,double *wtot,double *ktot,
int npot,real rcut,real upot[],real fpot[],
int coord_in,int coord_out) {
static vector4 *xx1_dev,*xx2_dev,*vv1_dev,*vv2_dev;
static int *boxno1_dev,*boxno2_dev,*first_dev,*startstop_dev;
static real *uk_dev;
static int ninit = 0, npotinit = 0;
static hipChannelFormatDesc channelDesc;
static hipArray *potarray;
const int align = 32, nthreads = NTHREADS, nblocks = NBLOCKS;
int k3 = k*k*k;
int i;
real *uk = (real *) alloca(sizeof(real) * 3*nblocks);
int *startstop = (int *) alloca(sizeof(int) * 2*nblocks);
if(ninit != n || npotinit != npot) {
if(ninit > 0) {
hipFree(uk_dev);
hipFree(startstop_dev);
hipFree(first_dev);
hipFree(boxno2_dev);
hipFree(boxno1_dev);
hipFree(vv2_dev);
hipFree(vv1_dev);
hipFree(xx2_dev);
hipFree(xx1_dev);
} else {
init_graphics_card();
}
if(n > 0) {
void *ptr;
hipMalloc(&ptr,(sizeof(vector4)*3*n + align-1)/align*align);
xx1_dev = (vector4 *) ptr;
hipMalloc(&ptr,(sizeof(vector4)*3*n + align-1)/align*align);
xx2_dev = (vector4 *) ptr;
hipMalloc(&ptr,(sizeof(vector4)*3*n + align-1)/align*align);
vv1_dev = (vector4 *) ptr;
hipMalloc(&ptr,(sizeof(vector4)*3*n + align-1)/align*align);
vv2_dev = (vector4 *) ptr;
hipMalloc(&ptr,(sizeof(int)*n + align-1)/align*align);
boxno1_dev = (int *) ptr;
hipMalloc(&ptr,(sizeof(int)*n + align-1)/align*align);
boxno2_dev = (int *) ptr;
hipMalloc(&ptr,(sizeof(int)*(k3+1)*3 + align-1)/align*align);
first_dev = (int *) ptr;
hipMalloc(&ptr,(sizeof(real)*3*nblocks + align-1)/align*align);
uk_dev = (real *) ptr;
hipMalloc(&ptr,(sizeof(int)*2*nblocks + align-1)/align*align);
startstop_dev = (int *) ptr;
channelDesc = hipCreateChannelDesc<float2>();
hipMallocArray(&potarray,&channelDesc,npot,1);
hipMalloc(&ptr,sizeof(float2)*npot);
{
float2 *pcopy = (float2 *) alloca(sizeof(float2)*npot);
for(i = 0; i<npot; i++) {
pcopy[i].x = upot[i];
pcopy[i].y = fpot[i];
}
hipMemcpyToArray(potarray,0,0,pcopy,npot*sizeof(float2),
hipMemcpyHostToDevice);
pottex.addressMode[0] = hipAddressModeClamp;
pottex.filterMode = hipFilterModeLinear;
pottex.normalized = false;
hipBindTextureToArray(pottex,potarray,channelDesc);
}
}
ninit = n; npotinit = npot;
}
if(n > 0) {
double t0,t1;
//printf("coord_in = %d , coord_out = %d\n",coord_in,coord_out);
if(coord_in) {
hipMemcpy(xx1_dev,xx,sizeof(vector4) * n,hipMemcpyHostToDevice);
hipMemcpy(vv1_dev,vv,sizeof(vector4) * n,hipMemcpyHostToDevice);
}
for(i = 0; i<3*nblocks; i++) uk[i] = 0.0;
hipMemcpy(uk_dev,uk,sizeof(real) * 3*nblocks, hipMemcpyHostToDevice);
for(i = 0; i<2*nblocks; i++) startstop[i] = 0;
hipMemcpy(startstop_dev,startstop,sizeof(int)*2*nblocks,
hipMemcpyHostToDevice);
hipDeviceSynchronize();
t0 = gettime();
//printf("Computing box indices\n");
hipLaunchKernelGGL(( makeboxno), dim3(nblocks),dim3(nthreads), 0, 0, n,k,boxl,xx1_dev,boxno1_dev);
/* Check box indices */ if(0) {
int *boxno = (int *) malloc(sizeof(int) * n),nerr = 0;
vector4 *xxtemp = (vector4 *) malloc(sizeof(vector4) * n);
int *tags = (int *) malloc(sizeof(int) * n);
hipMemcpy(boxno,boxno1_dev,sizeof(int)*n,hipMemcpyDeviceToHost);
hipMemcpy(xxtemp,xx1_dev,sizeof(vector4)*n,hipMemcpyDeviceToHost);
//printf("Checking box computation\n");
for(i = 0; i<n; i++) {
int bi = boxindex(boxl,k,xxtemp[i],1.0/k,k/boxl);
bi = (k3 + (bi % k3)) % k3;
if(boxno[i] != bi || bi<0 || bi>=k3) if(nerr++ < 10)
printf("boxno[%d] = %d, boxindex=%d\n",i,boxno[i],bi);
}
for(i = 0; i<n; i++) tags[i] = 0;
for(i = 0; i<n; i++) tags[(int) xxtemp[i][3]]++;
for(i = 0; i<n; i++)
if(tags[i] != 1) if(nerr++ < 10) printf("input tag error: tag[%d] = %d\n",i,tags[i]);
free(tags);
free(xxtemp);
free(boxno);
if(nerr > 5) exit(1);
}
//printf("Sorting particles\n");
rsort_card(n,k3+1,
boxno1_dev,xx1_dev,vv1_dev,
boxno2_dev,xx2_dev,vv2_dev,first_dev);
/* Check sorting */ if(0) {
int *boxno = (int *) malloc(sizeof(int) * n);
int *first = (int *) malloc(sizeof(int) * (k3+1));
vector4 *xxtemp = (vector4 *) malloc(sizeof(vector4) * n);
int *tags = (int *) malloc(sizeof(int) * n);
int nerr = 0;
hipMemcpy(boxno,boxno2_dev,sizeof(int)*n,hipMemcpyDeviceToHost);
hipMemcpy(first,first_dev,sizeof(int)*(k3+1),hipMemcpyDeviceToHost);
hipMemcpy(xxtemp,xx2_dev,sizeof(vector4)*n,hipMemcpyDeviceToHost);
//printf("Checking sorting\n");
for(i = 1; i<n; i++) {
if(boxno[i]<boxno[i-1]) if(nerr++ < 10)
printf("Sorting error: boxno[%d] = %d, boxno[%d]=%d\n",
i,boxno[i],i-1,boxno[i-1]);
}
for(i = 0; i<n; i++) tags[i] = 0;
for(i = 0; i<n; i++) tags[(int) xxtemp[i][3]]++;
for(i = 0; i<n; i++)
if(tags[i] != 1) if(nerr++ < 10) printf("tag error: tag[%d] = %d\n",i,tags[i]);
//printf("n=%d k3=%d first[0]=%d first[k3-1]=%d first[k3]=%d\n",
// n,k3,first[0],first[k3-1],first[k3]);
for(i = 0; i<k3; i++) {
int j;
for(j = first[i]; j<first[i+1]; j++)
if(boxno[j] != i) if(nerr++ < 10)
printf("first/box error: boxno[%d]=%d first[%d]=%d first[%d]=%d\n",
j,boxno[j],i,first[i],i+1,first[i+1]);
if(first[i+1] - first[i] > 15) {
printf("Very full box %d: %d\n",i,first[i+1]-first[i]);
for(j = first[i]; j<first[i+1]; j++) {
printf("particle %5d in box %4d :: %10.3f %10.3f %10.3f %10.2f\n",
j,i,xxtemp[j][0],xxtemp[j][1],xxtemp[j][2],xxtemp[j][3]);
}
exit(1);
}
}
free(tags);
free(xxtemp);
free(first);
free(boxno);
if(nerr > 0) exit(1);
}
//printf("Running adjust4x\n");
hipLaunchKernelGGL(( adjustx4), dim3(nblocks),dim3(nthreads/*),sizeof(vector4)*(nthreads+5)*5*/, 0, 0,
n,k3,first_dev,xx2_dev,startstop_dev);
hipDeviceSynchronize();
for(i = 0; i<2*nblocks; i++) startstop[i] = 0;
hipMemcpy(startstop,startstop_dev,sizeof(int)*2*nblocks,
hipMemcpyDeviceToHost);
hipDeviceSynchronize();
if(errorcheck("KERNEL ADJUSTX4",nblocks,startstop)) { exit(1); }
t1 = gettime();
//rawtime = t1-t0;
//hipMemcpy(first_dev,first,sizeof(int) * (k3+1),hipMemcpyHostToDevice);
*utot = 0.0; *wtot = 0.0; *ktot = 0.0;
for(i = 0; i<2*nblocks; i++) startstop[i] = 0;
hipMemcpy(startstop_dev,startstop,sizeof(int)*2*nblocks,
hipMemcpyHostToDevice);
hipDeviceSynchronize();
//printf("Running force calculation\n");
t0 = gettime();
hipLaunchKernelGGL(( forcecalc_box), dim3(nblocks),dim3(nthreads), 0, 0, n,boxl,k,first_dev,boxno2_dev,
xx2_dev,vv2_dev,xx1_dev,vv1_dev,
dt,uk_dev,startstop_dev,rcut,npot);
hipDeviceSynchronize();
t1 = gettime();
rawtime = t1-t0;
//printf("Force caculation done.\n");
//printf("%120s Rawtime: %.3f ms\n","",rawtime*1e3);
hipMemcpy(startstop,startstop_dev,sizeof(int)*2*nblocks,
hipMemcpyDeviceToHost);
hipDeviceSynchronize();
if(errorcheck("KERNEL FORCECALC_BOX",nblocks,startstop)) { exit(1); }
hipMemcpy(uk,uk_dev,sizeof(real ) * 3*nblocks, hipMemcpyDeviceToHost);
hipDeviceSynchronize();
if(coord_out) {
/*int nerr = 0;*/
int *tags = (int *) malloc(sizeof(int) * n);
hipMemcpy(xx,xx1_dev,sizeof(vector4) * n, hipMemcpyDeviceToHost);
hipMemcpy(vv,vv1_dev,sizeof(vector4) * n, hipMemcpyDeviceToHost);
/*
for(i = 0; i<n; i++) tags[i] = 0;
for(i = 0; i<n; i++) tags[(int) xx[i][3]]++;
for(i = 0; i<n; i++)
if(tags[i] != 1) if(nerr++ < 5) printf("force tag error (xx): tag[%d] = %d\n",i,tags[i]);
nerr = 0;
for(i = 0; i<n; i++) tags[i] = 0;
for(i = 0; i<n; i++) tags[(int) vv[i][3]]++;
for(i = 0; i<n; i++)
if(tags[i] != 1) if(nerr++ < 5) printf("force tag error (vv): tag[%d] = %d\n",i,tags[i]);
*/
free(tags);
}
for(i = 0; i<nblocks; i++) {
//if(uk[i] > *utot) *utot = uk[i];
*utot = *utot + uk[i+0*nblocks];
*wtot = *wtot + uk[i+1*nblocks];
*ktot = *ktot + uk[i+2*nblocks];
}
}
return 0;
}
void bswap(int n, int sz, void *v) {
char *p = (char *) v;
char t;
int i,k;
for(i = 0; i<n; i++)
for(k = 0; k<sz/2; k++) {
t = p[i*sz + k];
p[i*sz + k] = p[i*sz + sz-k-1];
p[i*sz + sz-k-1] = t;
}
}
void storecfg(char *fname,int n,vector *xx,int byteorder) {
double *xout = (double *) malloc(sizeof(double) * 3*n);
int i,j,len;
FILE *f;
f = fopen(fname,"w");
if(f == NULL) {
printf("Can not open file %s for writing.\n",fname);
free(xout);
return;
}
len = 3*n*sizeof(double);
for(i = 0; i<n; i++)
for(j = 0; j<3; j++)
xout[n*j+i] = xx[i][j];
if(byteorder) {
bswap(1,sizeof(int),&len);
bswap(3*n,sizeof(double),xout);
}
fwrite(&len,sizeof(int),1,f);
fwrite(xout,3*sizeof(double),n,f);
fwrite(&len,sizeof(int),1,f);
fclose(f);
free(xout);
}
int loadcfg(char *fname,vector **xx,int *byteorder) {
FILE *f;
int n,do_swap,len;
double *xin;
int i,j;
f = fopen(fname,"r");
if(f == NULL) {
printf("Can not open file %s for reading.\n",fname);
return -1;
}
fseek(f,0,SEEK_END);
len = ftell(f);
fseek(f,0,SEEK_SET);
fread(&n,sizeof(int),1,f);
if(len != (int) (n+2*sizeof(int))) {
bswap(1,sizeof(int),&n);
if(len != (int) (n+2*sizeof(int))) {
printf("Crap, unable to understand md.cfg\n");
fclose(f);
return -1;
}
do_swap = 1;
} else do_swap = 0;
n = n / (3*sizeof(double));
///printf("do_swap = %d n = %d\n",do_swap,n);
*xx = (vector *) malloc(sizeof(vector ) * n);
xin = (double *) malloc(sizeof(double) * 3*n);
fread(xin,sizeof(double)*3,n,f);
if(do_swap) bswap(3*n,sizeof(double),xin);
for(i = 0; i<n; i++)
for(j = 0; j<3; j++)
(*xx)[i][j] = xin[n*j+i];
free(xin);
fread(&len,sizeof(int),1,f);
fclose(f);
if(do_swap) bswap(1,sizeof(int),&len);
if(len != (int) (sizeof(double)*3*n)) {
printf("Crap, unable to understand file %s (stage two) %d %d\n",
fname,len,(int) (sizeof(double)*3*n));
free(xx);
return -1;
}
*byteorder = do_swap;
return n;
}
int main(int argc, char *argv[]) {
int niter,nrescale,noutput,nrestart,ncompare,nmomentum,cfgsave;
int iter0,iter;
int i,j,k,k3,n,nin;
real boxl,dt;
vector *xx,*vv,*xx0;
vector4 *xx4,*vv4,*xx4save,*vv4save;
//int *first,*perm;
double utot,wtot,ktot,p,tinst,etotlast = 0.0;;
double rho,rhoguess;
double tfixed;
double Uavg,Tavg,Pavg,Tscaleavg,msd = 0.0;
FILE *logfile;
char line[100];
int byteorder = 0,echange;
real rcut = 2.51;
int npot = 1000;
real *upot = (real *) alloca(sizeof(real)*npot);
real *fpot = (real *) alloca(sizeof(real)*npot);
int coord_in,coord_out;
if(argc >= 3 && strcmp(argv[1],"-device") == 0) {
devChoice = atoi(argv[2]);
printf("%% Command line option set tentative device number %d\n",devChoice);
}
/* Compute potantial table */
for(i = 0; i<npot; i++) {
double v,g;
double r2 = i*rcut*rcut/(npot-1);
vru(sqrt(r2),&v,&g);
upot[i] = v;
fpot[i] = g;
}
/* Load initial configuration */
n = loadcfg("md.cfg",&xx,&byteorder);
n = loadcfg("md.vel",&vv,&byteorder);
{
FILE *fp = fopen("md0.cfg","r");
if(fp == NULL) {
xx0 = (vector *) malloc(sizeof(vector) * n);
memcpy(xx0,xx,sizeof(vector)*n);
storecfg("md0.cfg",n,xx0,byteorder);
} else {
fclose(fp);
n = loadcfg("md0.cfg",&xx0,&byteorder);
}
}
{
FILE *fp = fopen("md.inp","r");
if(fp == NULL) {
printf("Cannot open input file md.inp\n");
exit(1);
}
fgets(line,sizeof(line),fp); sscanf(line+29,"%d",&nin);
fgets(line,sizeof(line),fp); sscanf(line+29,"%lf",&rho);
fgets(line,sizeof(line),fp); sscanf(line+29,"%lf",&tfixed);
fgets(line,sizeof(line),fp); sscanf(line+29,"%d",&nrescale);
fgets(line,sizeof(line),fp); sscanf(line+29,"%f",&dt);
fgets(line,sizeof(line),fp); sscanf(line+29,"%d",&niter);
fgets(line,sizeof(line),fp); sscanf(line+29,"%d",&noutput);
fgets(line,sizeof(line),fp); sscanf(line+29,"%d",&nrestart);
fgets(line,sizeof(line),fp); // potential cut off
fgets(line,sizeof(line),fp); // cubic flag
fgets(line,sizeof(line),fp); // noncubic data
fgets(line,sizeof(line),fp); // noncubic data
fgets(line,sizeof(line),fp); // noncubic data
fgets(line,sizeof(line),fp); // mpi data
fgets(line,sizeof(line),fp); // mpi data
fgets(line,sizeof(line),fp); // pot file
fgets(line,sizeof(line),fp); sscanf(line+29,"%d",&cfgsave);
boxl = pow(n/rho,1.0/3.0);
}
{
FILE *fp = fopen("md.sts","r");
iter0 = 1; Uavg = 0.0; Tavg = 0.0; Pavg = 0.0; Tscaleavg = 0.0;
if(fp == NULL) {
fp = fopen("md.sts","w");
fprintf(fp,"%12d %20.10e %20.10e %20.10e %20.10e\n",
iter0,Uavg,Tavg,Pavg,Tscaleavg);
fclose(fp);
} else {
fscanf(fp,"%d%lf%lf%lf%lf",&iter0,&Uavg,&Tavg,&Pavg,&Tscaleavg);
Uavg = Uavg * ((iter0-1) % noutput);
Tavg = Tavg * ((iter0-1) % noutput);
Pavg = Pavg * ((iter0-1) % noutput);
}
}
logfile = fopen("md.log","a");
/* Compute number of boxes to divide system into */
k = (int) floor(2*boxl/rcut);
while(k>0 && k+boxl/(4*k*k*rcut) > 2*boxl/rcut) k=k-1;
if(k <= 0) {
printf("Error in k, k=%d boxl=%f rcut=%f\n",k,boxl,rcut);
exit(1);
}
k3 = k*k*k;
/* Compute an estimate of the particle density */
{
double xmax = -1e20;
for(i = 0; i<n; i++)
for(j = 0; j<3; j++)
if(xx[i][j] > xmax) xmax = xx[i][j];
rhoguess = n/(xmax*xmax*xmax);
}
if(fabs(rhoguess-rho) > 1e-3)
printf("WARNING, rho and rhoguess differ with more than 1e-3.\n");
if(n != nin)
printf("WARNING, N in cfgfile and md.inp differ.\n");
ncompare = 1000000000; /* How often to compare cpu/card computations */
nmomentum = 100; /* How often to rescale momentu, (often due to single precision)*/
printf("%% MD CONFIGURATION\n"
"%% n = %7d\n"
"%% k = %7d\n"
"%% k3 = %7d\n"
"%% rho = %11.4f\n"
"%% rhoguess = %11.4f\n"
"%% boxl = %15.8f\n"
"%% dt = %15.8f\n"
"%% niter = %9d\n"
"%% cardcmp = %9d\n"
"%% momentum = %9d\n",
n,k,k3,rho,rhoguess,boxl,dt,niter,ncompare,nmomentum);
/* Allocate memory for internal data structure */
xx4save = (vector4 *) malloc(sizeof(vector4) * n);
vv4save = (vector4 *) malloc(sizeof(vector4) * n);
xx4 = (vector4 *) malloc(sizeof(vector4) * n);
vv4 = (vector4 *) malloc(sizeof(vector4) * n);
for(i = 0; i<n; i++) {
for(j = 0; j<3; j++) {
xx4[i][j] = xx[i][j];
vv4[i][j] = vv[i][j];
}
xx4[i][3] = i;
vv4[i][3] = i;
}
echange = 1;
coord_out = 1;
for(iter = iter0; iter<niter+iter0; iter++) {
double t0,t1/*,boxtime*/;
//t0 = gettime();
/* Save configuration before timestep so that
a step can be performed on the cpu, and so
that it can be dumped to file in case of
error */
if(iter % ncompare == 0) {
memcpy(xx4save,xx4,n*sizeof(vector4));
memcpy(vv4save,vv4,n*sizeof(vector4));
}
if(coord_out) coord_in = 1; else coord_in = 0;
coord_out = 0;
if(iter % noutput == 0) coord_out = 1;
if(iter % ncompare == ncompare-1 || iter % ncompare == 0) coord_out = 1;
if(iter % nmomentum == 0) coord_out = 1;
if(iter % nrestart == 0 || iter==iter0+niter-1) coord_out = 1;
t0 = gettime();
cardtimestep_box(n,k,xx4,vv4,boxl,dt,
&utot,&wtot,&ktot,
npot,rcut,upot,fpot,
coord_in,coord_out);
t1 = gettime();
if(iter % noutput == 0 || iter % ncompare == 0) {
msd = 0.0;
for(i = 0; i<n; i++)
for(j = 0; j<3; j++)
msd = msd + (xx0[(int) xx4[i][3]][j] - xx4[i][j])*(xx0[(int) xx4[i][3]][j] - xx4[i][j]);
}
utot = utot/n;
wtot = wtot/(3*n);
ktot = ktot/n;
tinst = 2.0/3.0 * ktot;
p = rho*(tinst + wtot);
msd = msd/n;
//t1 = gettime();
/* If total energy changes by more than 1% in one iteration,
that indicates a srious error. This codes dumps the state
that produced the error. */
if(0) if(echange == 0 &&
fabs(etotlast-utot-ktot)>0.01*fabs(etotlast) &&
fabs(etotlast-utot-ktot)>0.01) {
char s[80];
FILE *f;
printf("%% card: %20.10e %20.10e %20.10e %20.10e %10.3f\n",
utot,ktot,utot+ktot,p,(t1-t0)*1e3);
printf("%% Serious energy error. "
"Dumping configuration and exiting...\n");
sprintf(s,"totaldump.%d",iter);
f = fopen(s,"w");
/* Simulation parameters */
fwrite(&n,sizeof(n),1,f);
fwrite(&k,sizeof(k),1,f);
fwrite(&k3,sizeof(k3),1,f);
fwrite(&boxl,sizeof(boxl),1,f);
fwrite(&rcut,sizeof(rcut),1,f);
fwrite(&dt,sizeof(dt),1,f);
/* Input to time-step */
fwrite(xx4save,sizeof(vector4),n,f);
fwrite(vv4save,sizeof(vector4),n,f);
fwrite(xx4,sizeof(vector4),n,f);
fwrite(vv4,sizeof(vector4),n,f);
/* Output from time-step */
fwrite(xx,sizeof(vector),n,f);
fwrite(vv,sizeof(vector),n,f);
fclose(f);
break;
} else etotlast = utot + ktot;
echange = 0;
/* Output statistics */
Uavg = Uavg + utot;
Tavg = Tavg + tinst;
Pavg = Pavg + p;
Tscaleavg = Tscaleavg + tinst;
if(iter % noutput == 0) {
Uavg = Uavg / noutput;
Tavg = Tavg / noutput;
Pavg = Pavg / noutput;
printf("%12d %20.10e %20.10e %20.10e %20.10e %20.10e\n",
iter,Uavg+Tavg*1.5,Uavg,Tavg,Pavg,msd);
fprintf(logfile,
"%12d %20.10e %20.10e %20.10e %20.10e %20.10e\n",
iter,Uavg+Tavg*1.5,Uavg,Tavg,Pavg,msd);
Uavg = 0.0; Tavg = 0.0; Pavg = 0.0;
}
etotlast = utot + ktot;
if(iter % ncompare == 0) {
/* Run same timestep on cpu, and pring statistics for both card
and cpu step, for accuracy comparisons. */
printf("%% card: %12d %20.10e %20.10e %20.10e %20.10e %20.10e %10.3f\n",
iter,utot+ktot,utot,tinst,p,msd,(t1-t0)*1e3);
fprintf(logfile,
"%% card: %12d %20.10e %20.10e %20.10e %20.10e %20.10e %10.3f\n",
iter,utot+ktot,utot,tinst,p,msd,(t1-t0)*1e3);
t0 = gettime();
{
int *first = (int *) malloc(sizeof(int) * (k3+1));
int *perm = (int *) malloc(sizeof(int) * n);
int *boxno = (int *) malloc(sizeof(int) * n);
vector4 *xx4temp = (vector4 *) malloc(sizeof(vector4) * n);
vector4 *vv4temp = (vector4 *) malloc(sizeof(vector4) * n);
int jsave;
//printf("%% -- CPU check. Running boxem...\n"); fflush(stdout);
boxem(n,boxl,k,xx4save,first,perm);
//printf("%% -- boxem complete\n"); fflush(stdout);
jsave = k3;
while(first[jsave] == 0) {
first[jsave] = n; jsave = jsave-1;
}
//printf("%% -- Copying to xx4temp\n"); fflush(stdout);
for(i = 0; i<n; i++) {
for(j = 0; j<3; j++) {
xx4temp[i][j] = xx4save[perm[i]][j];
vv4temp[i][j] = vv4save[perm[i]][j];
}
xx4temp[i][3] = xx4save[perm[i]][3];
vv4temp[i][3] = xx4save[perm[i]][3];
}
//printf("%% -- Assigning to boxno\n"); fflush(stdout);
for(i = 0; i<k3; i++)
for(j = first[i]; j<first[i+1]; j++)
boxno[j] = i;
//printf("%% -- Calling forcecalc_host...\n"); fflush(stdout);
forcecalc_host(n,boxl,k,first,boxno,xx4temp,
vv4temp,xx4save,vv4save,dt,
&utot,&wtot,&ktot,npot,rcut,upot,fpot);
//printf("%% -- forcecalc_host complete\n"); fflush(stdout);
free(vv4temp);
free(xx4temp);
free(boxno);
free(perm);
free(first);
}
//printf("%% -- Copmuting msd\n"); fflush(stdout);
msd = 0.0;
for(i = 0; i<n; i++)
for(j = 0; j<3; j++)
msd = msd + (xx0[(int) xx4save[i][3]][j] - xx4save[i][j])*
(xx0[(int) xx4save[i][3]][j] - xx4save[i][j]);
//printf("%% -- msd calculation complete\n"); fflush(stdout);
utot = utot/n;
wtot = wtot/(3*n);
ktot = ktot/n;
tinst = 2.0/3.0 * ktot;
p = rho*(tinst + wtot);
msd = msd/n;
t1 = gettime();
printf("%% cpu: %12d %20.10e %20.10e %20.10e %20.10e %20.10e %10.3f\n",
iter,utot+ktot,utot,tinst,p,msd,(t1-t0)*1e3);
fprintf(logfile,
"%% cpu: %12d %20.10e %20.10e %20.10e %20.10e %20.10e %10.3f\n",
iter,utot+ktot,utot,tinst,p,msd,(t1-t0)*1e3);
fflush(stdout); fflush(logfile);
}
//printf("Quitting here... %s:%d\n",__FILE__,__LINE__);
//exit(1);
if(iter % nmomentum == 0) {
double mom[3] = {0.0, 0.0, 0.0};
for(i = 0; i<n; i++)
for(j = 0; j<3; j++)
mom[j] = mom[j] + vv4[i][j];
/*printf("%% Momentum is (%20.10e , %20.10e , %20.10e)\n",
mom[0],mom[1],mom[2]);*/
for(j = 0; j<3; j++) mom[j] = mom[j] / n;
for(i = 0; i<n; i++)
for(j = 0; j<3; j++)
vv4[i][j] = vv4[i][j] - mom[j];
for(i = 0; i<n; i++)
for(j = 0; j<3; j++) {
double t = boxl*floor(xx4[i][j]/boxl);
xx4[i][j] = xx4[i][j] - t;
xx0[(int) xx4[i][3]][j] = xx0[(int) xx4[i][3]][j] - t;
xx[(int) xx4[i][3]][j] = xx4[i][j];
vv[(int) vv4[i][3]][j] = vv4[i][j];
}
/*
for(j = 0; j<3; j++) mom[j] = 0.0;
for(i = 0; i<n; i++)
for(j = 0; j<3; j++)
mom[j] = mom[j] + vv[i][j];
*/
/*printf("%% Corrected (%20.10e , %20.10e , %20.10e)\n",
mom[0],mom[1],mom[2]);*/
echange = 1;
}
if(nrescale > 0 && iter % nrescale == 0) {
double alpha;
Tscaleavg = Tscaleavg / nrescale;
/* alpha = (2*tfixed - Tscaleavg)/Tscaleavg; */
alpha = 1.0 + 1.8*(tfixed - Tscaleavg)/tinst;
if(alpha < 1e-6) alpha = 1e-6;
alpha = sqrt(alpha);
for(i = 0; i<n; i++)
for(j = 0; j<3; j++)
vv4[i][j] = vv4[i][j]*alpha;
Tscaleavg = 0.0;
echange = 1;
}
if(iter % nrestart == 0 || iter==iter0+niter-1) {
char fname[80];
FILE *fp;
for(i = 0; i<n; i++)
for(j = 0; j<3; j++) {
double t = boxl*floor(xx4[i][j]/boxl);
xx4[i][j] = xx4[i][j] - t;
xx0[(int) xx4[i][3]][j] = xx0[(int) xx4[i][3]][j] - t;
xx[(int) xx4[i][3]][j] = xx4[i][j];
vv[(int) vv4[i][3]][j] = vv4[i][j];
}
fclose(logfile);
if(cfgsave == 1){
sprintf(fname,"md%09d.cfg",iter);
storecfg(fname,n,xx,byteorder);
}
if(cfgsave == 2){
sprintf(fname,"md%09d.cfg",iter);
storecfg(fname,n,xx,byteorder);
sprintf(fname,"md0_%09d.cfg",iter);
storecfg(fname,n,xx0,byteorder);
sprintf(fname,"md%09d.vel",iter);
storecfg(fname,n,vv,byteorder);
}
storecfg("md.cfg",n,xx,byteorder);
storecfg("md.vel",n,vv,byteorder);
storecfg("md0.cfg",n,xx0,byteorder);
fp = fopen("md.sts","w");
fprintf(fp,"%12d %20.10e %20.10e %20.10e %20.10e\n",
iter+1,Uavg/iter,Tavg/iter,Pavg/iter,Tscaleavg);
fclose(fp);
logfile = fopen("md.log","a");
}
}
/* Release memory allocated on graphics card */
cardtimestep_box(-1,-1,NULL,NULL,
0.0,0.0,NULL,NULL,NULL,0,0.0,NULL,NULL,0,0);
free(xx);
free(vv);
free(xx0);
free(xx4);
free(vv4);
free(xx4save);
free(vv4save);
return 0;
}
| 11f92a6163b98649d80ec8f1ab4fe1004b18d03e.cu | /*
* This code is courtesy of, and copyright 2015,
* Tomas Oppelstrup, Livermore National Lab. Please
* do not redistribute without his approval.
*/
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <sys/time.h>
#include <unistd.h>
#include "real.h"
#define NBLOCKS 180
#define NTHREADS 64
#include "boxsortinc.cu"
double rawtime;
void vru(double r,double *v,double *g) {
double rcut = 2.5;
double alpha = -24.0*(pow(rcut,-7.0) - 2.0*pow(rcut,-13.0));
double beta = -4.0*(pow(rcut,-12.0) - pow(rcut,-6.0));
if(r < rcut && r > 0.1) {
// Contribution to potential energy phi(r)
*v = 4.0*(pow(r,-12.0) - pow(r,-6.0)) + alpha*(r - rcut) + beta;
// Contribution to gradient 1/r * Dphi(r)/Dr
*g = 24.0*(pow(r,-8.0) - 2.0*pow(r,-14.0)) + alpha/r;
} else {
*v = 0.0;
*g = 0.0;
}
}
void vru_grnpotential(double r,double *v,double *g) {
/* % Parameters: */
const double a1 = 265.848;
const double m = 12;
const double b1 = 1.5;
const double c1 = 1.45;
const double d = 0.8;
const double a2 = 2.5;
const double b2 = 0.19;
const double c2 = 1.89;
double a1x,a2x;
static int firsttime = 1;
if(firsttime == 1) {
printf("%% Potential parameters:\n"
"%% a1 = %9.4f a2 = %9.4f m = %9.4f\n"
"%% b1 = %9.4f b2 = %9.4f d = %9.4f\n"
"%% c1 = %9.4f c2 = %9.4f\n\n",
a1,a2,m,b1,b2,d,c1,c2);
firsttime = 0;
}
/* %Formula: */
if(r < 0.1) {
*v = 0.0;
*g = 0.0;
} else if(r < c1) {
a1x = a1*exp(b1/(r-c1));
a2x = a2*exp(b2/(r-c2));
*v = (1/pow(r,m)-d)*a1x + a2x;
*g = (-m/pow(r,m+1) + (1/pow(r,m)-d)*(-b1/((r-c1)*(r-c1))))*a1x +
a2x*(-b2/((r-c2)*(r-c2)));
*g = *g/r;
} else if(r < c2) {
*v = a2*exp(b2/(r-c2));
*g = *v * (-b2/((r-c2)*(r-c2)));
*g = *g/r;
} else {
*v = 0;
*g = 0;
}
}
/* Transformation. From normal to skewed box:
1 s/k (s/k)^2
0 1 s/k
0 0 1
Inverse transformation:
1 -s/k 0
0 1 -s/k
0 0 1
*/
/* Figure out linear index of particle */
__host__ __device__
static int boxindex(real boxl,int k,volatile vector x, real g, real w) {
//real g = 1.0/k, w = k/boxl;
int a,b,c;
a = (int) floor(x[0] * w);
b = (int) floor((x[1] - g*x[0]) * w);
c = (int) floor((x[2] - g*x[1]) * w);
return a + k*(b + k*c);
}
__global__ void makeboxno(int n,int k,real boxl,vector4 xx[],int boxno[]) {
const int pid = threadIdx.x + blockDim.x*blockIdx.x;
const int np = blockDim.x*gridDim.x;
const int tid = threadIdx.x;
const int nt = blockDim.x;
int k3 = k*k*k;
real g = 1.0/k, w = k/boxl;
volatile __shared__ struct {
vector4 xx[NTHREADS];
} shm;
int i,bi;
for(i = pid; i<n+tid; i+=np) {
__syncthreads();
shm.xx[0][tid+0*nt] = xx[i-tid][tid+0*nt];
shm.xx[0][tid+1*nt] = xx[i-tid][tid+1*nt];
shm.xx[0][tid+2*nt] = xx[i-tid][tid+2*nt];
shm.xx[0][tid+3*nt] = xx[i-tid][tid+3*nt];
__syncthreads();
bi = boxindex(boxl,k,shm.xx[tid],g,w);
bi = (k3 + (bi % k3)) % k3;
if(i < n)
boxno[i] = bi;
}
}
/* Put particles in boxes */
static void boxem(int n,real boxl,int k,vector4 xx[],int first[],int perm[]) {
int i,j,p,k3 = k*k*k;
int *next;
int bi;
real g = 1.0/k, w = k/boxl;
int *tags = (int *) alloca(sizeof(int) * n);
memset(tags,0,sizeof(int) * n);
next = (int *) alloca(n * sizeof(int));
memset(next,0,sizeof(int) * n);
memset(first,0,sizeof(int) * (k3+1));
for(i = 0; i<n; i++) {
bi = boxindex(boxl,k,xx[i],g,w);
j = bi % k3;
j = (k3+j)%k3;
next[i] = first[j];
first[j] = i+1;
}
i = 0;
for(j = 0; j<k3; j++) {
int ix = (i<n) ? i : i-n;
p = first[j]-1;
first[j] = ix; /*printf("First in box %2d is %2d. Chain is %2d",j,i,p);*/
while(p >= 0) {
tags[p] = tags[p] + 1;
perm[i] = p; /*printf("location %3d has particle %3d.\n",i,p);*/
i = i + 1;
p = next[p]-1;
/*printf(" %d",p);*/
}
/*printf("\n");*/
}
if(n != i) printf("* Serious counting error @%s:%d. i=%d n=%d k3=%d\n",
__FILE__,__LINE__,i,n,k3);
for(i = 0; i<n; i++)
if(tags[i] != 1) printf("Wrong tag: tags(%d) = %d\n",i,tags[i]);
first[k3] = 0;
}
static void forcecalc_host(int n,real boxl,
int k,int first[],int boxno[],vector4 xx1[],
vector4 vv1[],vector4 xx2[],vector4 vv2[],real dt,
double *u_p,double *w_p,double *k_p,
int npot,double rcut,real upot[],real fpot[]) {
double boxli = 1.0/boxl;
int k3 = k*k*k;
int i,j,i0,i1,j0,j1,iu,iv,b,ii;
double xi,yi,zi,fxi,fyi,fzi,dx,dy,dz;
double d2;
double vx0,vy0,vz0,vx1,vy1,vz1,kx,ky,kz;
double vr,u,rcut2 = rcut*rcut;
double utot,wtot,ktot;
utot = 0.0;
wtot = 0.0;
ktot = 0.0;
for(b = 0; b<k3; b++) {
i0 = first[b];
i1 = first[b+1];
for(i = i0; i!=i1; i++) {
xi = xx1[i][0];
yi = xx1[i][1];
zi = xx1[i][2];
ii = (int) xx1[i][3];
fxi = 0.0;
fyi = 0.0;
fzi = 0.0;
for(iv = -2; iv<=2; iv++)
for(iu = -2; iu<=2; iu++) {
j0 = (k3 + b + k*(iu + k*iv) - 2)%k3;
j1 = j0 + 5;
if(j1 >= k3) j1 = j1-k3;
j0 = first[j0];
j1 = first[j1];
if(j0 > n || j1 > n) {
printf("Crap in forcecalc_host :: n=%d j0=%d j1=%d\n",
n,j0,j1);
fflush(stdout);
exit(1);
}
if(j0 == n) j0 = 0;
if(j1 == n) j1 = 0;
for(j = j0; j!=j1; j=((j==n-1) ? 0 : j+1)) {
dx = xi - xx1[j][0];
dy = yi - xx1[j][1];
dz = zi - xx1[j][2];
dx = dx - boxl*rint(dx*boxli);
dy = dy - boxl*rint(dy*boxli);
dz = dz - boxl*rint(dz*boxli);
d2 = dx*dx + dy*dy + dz*dz;
if(d2 > 0.0 && d2 < rcut2) {
//vru(sqrt(d2),&u,&vr);
double fdx = d2/rcut2 * (npot-1);
int idx = (int) floor(fdx);
double frac = fdx-idx;
//frac = floor(256.0*frac)/256.0;
if(idx >= npot-1) {
u = 0.0;
vr = 0.0;
} else {
u = (1.0-frac)*upot[idx] + frac*upot[idx+1];
vr = (1.0-frac)*fpot[idx] + frac*fpot[idx+1];
}
fxi = fxi - vr*dx;
fyi = fyi - vr*dy;
fzi = fzi - vr*dz;
utot = utot + u;
wtot = wtot - vr*d2;
}
}
}
vx0 = vv1[i][0];
vy0 = vv1[i][1];
vz0 = vv1[i][2];
vx1 = vx0 + fxi*dt;
vy1 = vy0 + fyi*dt;
vz1 = vz0 + fzi*dt;
kx = vx0 + vx1;
ky = vy0 + vy1;
kz = vz0 + vz1;
kx = kx*kx;
ky = ky*ky;
kz = kz*kz;
ktot = ktot + (kx + ky + kz)*0.125;
vv2[i][0] = vx1;
vv2[i][1] = vy1;
vv2[i][2] = vz1;
xx2[i][0] = xi + dt*vx1;
xx2[i][1] = yi + dt*vy1;
xx2[i][2] = zi + dt*vz1;
xx2[i][3] = ii;
}
}
*u_p = utot*0.5;
*w_p = wtot*0.5;
*k_p = ktot;
}
__global__ static void adjustx4(int n,int k3,int first[],vector4 xx[],
int startstop[]) {
const int pid = threadIdx.x + blockDim.x*blockIdx.x;
const int np = blockDim.x*gridDim.x;
int i,b,n4;
real xi;
__syncthreads();
if(threadIdx.x == 0)
startstop[blockIdx.x] = (startstop[blockIdx.x] == 0);
for(b = pid; b<k3; b+=np) {
i = first[b];
first[b+k3 ] = i+ n;
first[b+2*k3] = i+2*n;
}
n4 = 4*n;
for(b = pid; b<n4; b+=np) {
xi = xx[0][b];
xx[n ][b] = xi;
xx[2*n][b] = xi;
}
__syncthreads();
if(threadIdx.x == 0)
startstop[gridDim.x+blockIdx.x] = (startstop[gridDim.x+blockIdx.x] == 0);
}
static texture<float2,1,cudaReadModeElementType> pottex;
__global__ static void
forcecalc_box(int n,real boxl,int k,int first[],int boxno[],vector4 xx1[],
vector4 vv1[],vector4 xx2[],vector4 vv2[],real dt,real ukout[],
int startstop[],real rcut,int npot) {
volatile __shared__ int offsets[32];
volatile __shared__ int j0share[NTHREADS],j1share[NTHREADS];
volatile __shared__ real xxshare[NTHREADS][4];
/*#define YYLEN (NTHREADS+160)*/
#define YYLEN (NTHREADS+160)
volatile __shared__ real yyshare[YYLEN][4];
const int pid = threadIdx.x + blockDim.x*blockIdx.x;
const int np = blockDim.x * gridDim.x;
const int tid = threadIdx.x;
const int nt = blockDim.x;
const int bid = blockIdx.x;
const int nb = gridDim.x;
const int k3 = k*k*k;
const real boxli = 1.0/boxl,rcut2 = rcut*rcut,potscale = (npot-1)/rcut2;
//const real g = 1.0/k, w = k/boxl;
real dx,dy,dz,d2,fxi,fyi,fzi;
real utot,wtot,ktot,vx0,vx1;
int i,j,j0,j1,iv,b;
__syncthreads();
if(tid == 0) startstop[bid] = (startstop[bid] == 0);
for(i = tid; i<25; i+=nt)
offsets[i] = (i/5-2)*k*k + (i%5-2)*k;
utot = 0.0; wtot = 0.0; ktot = 0.0;
for(i = pid; i<n+tid; i+=np) {
// Load i-particles into shared memory, one particle per thread
__syncthreads();
xxshare[0][tid+0*nt] = xx1[i-tid][tid+0*nt];
xxshare[0][tid+1*nt] = xx1[i-tid][tid+1*nt];
xxshare[0][tid+2*nt] = xx1[i-tid][tid+2*nt];
xxshare[0][tid+3*nt] = xx1[i-tid][tid+3*nt];
__syncthreads();
fxi = 0.0; fyi = 0.0; fzi = 0.0;
// Loop over 25 neighboring columns
b = n-1;
if(i < n) b = i;
b = boxno[b];//((boxindex(boxl,k,xxshare[tid],g,w)%k3)+k3)%k3;
for(iv = 0; iv<25; iv++) {
__syncthreads();
j0share[tid] = first[k3+b+offsets[iv]-2];
j1share[tid] = first[k3+b+offsets[iv]+2+1];
__syncthreads();
j0 = j0share[0]; j1 = j1share[nt-1];
{
int joff;
for(joff = 0; joff<j1-j0; joff+=YYLEN) {
int jcount = j1-j0-joff;
if(jcount > YYLEN) jcount = YYLEN;
__syncthreads();
for(j = tid; j<4*jcount; j+=nt)
yyshare[0][j] = xx1[j0+joff][j];
__syncthreads();
{
int j0loc = j0share[tid] - j0share[0];
int j1loc = j1share[tid] - j0share[0];
if(j0loc < joff) j0loc = joff;
if(j1loc > joff+jcount) j1loc = joff+jcount;
for(j = j0loc; j<j1loc; j++) {
dx = xxshare[tid][0] - yyshare[j-joff][0];
dy = xxshare[tid][1] - yyshare[j-joff][1];
dz = xxshare[tid][2] - yyshare[j-joff][2];
dx = dx - boxl*rint(dx*boxli);
dy = dy - boxl*rint(dy*boxli);
dz = dz - boxl*rint(dz*boxli);
d2 = dx*dx + dy*dy + dz*dz;
if(d2 > 0.0 && d2 < rcut2) {
float2 f = tex1D(pottex,0.5 + d2*potscale);
fxi = fxi - f.y*dx;
fyi = fyi - f.y*dy;
fzi = fzi - f.y*dz;
utot = utot + f.x;
wtot = wtot - f.y*d2;
}
}
}
}
}
}
__syncthreads();
for(j = 0; j<4; j++)
yyshare[0][tid+j*nt] = vv1[i-tid][tid+j*nt];
__syncthreads();
if(i<n) {
vx0 = yyshare[tid][0];
vx1 = vx0 + fxi*dt; vx0 = vx0 + vx1;
ktot = ktot + vx0*vx0;
yyshare[tid][0] = vx1;
xxshare[tid][0] = xxshare[tid][0] + vx1*dt;
vx0 = yyshare[tid][1];
vx1 = vx0 + fyi*dt; vx0 = vx0 + vx1;
ktot = ktot + vx0*vx0;
yyshare[tid][1] = vx1;
xxshare[tid][1] = xxshare[tid][1] + vx1*dt;
vx0 = yyshare[tid][2];
vx1 = vx0 + fzi*dt; vx0 = vx0 + vx1;
ktot = ktot + vx0*vx0;
yyshare[tid][2] = vx1;
xxshare[tid][2] = xxshare[tid][2] + vx1*dt;
}
__syncthreads();
for(j = tid; j<4*min(nt,n-(i-tid)); j+=nt) {
xx2[i-tid][j] = xxshare[0][j];
vv2[i-tid][j] = yyshare[0][j];
}
}
__syncthreads();
xxshare[0][tid+0*nt] = utot*0.5;
xxshare[0][tid+1*nt] = wtot*0.5;
xxshare[0][tid+2*nt] = ktot*0.125;
__syncthreads();
j = 1;
while(j < nt) {
i = (tid-j) | (j-1);
if(tid & j) {
xxshare[0][tid+0*nt] = xxshare[0][tid+0*nt] + xxshare[0][i+0*nt];
xxshare[0][tid+1*nt] = xxshare[0][tid+1*nt] + xxshare[0][i+1*nt];
xxshare[0][tid+2*nt] = xxshare[0][tid+2*nt] + xxshare[0][i+2*nt];
}
j = j<<1;
__syncthreads();
}
for(i = tid; i<3; i+=nt)
ukout[bid+i*nb] = xxshare[0][(i+1)*nt-1];
__syncthreads();
if(tid == 0) startstop[bid+nb] = (startstop[bid+nb] == 0);
/*bad_exit:*/ __syncthreads();
#undef YYLEN
}
double gettime(void) {
struct timeval tv;
gettimeofday(&tv,NULL);
return tv.tv_sec + 1e-6*tv.tv_usec;
}
static int devChoice = -1;
int init_graphics_card(void) {
/* Initialize graphics card */
static int inited = 0;
int devCount;
if(inited == 0) {
cudaGetDeviceCount(&devCount);
if(devCount < 1) {
printf("No devices...\n");
exit(1);
}
if(devChoice < 0 || devChoice >= devCount) devChoice = devCount-1;
printf("%% Number of devices is %d. Choosing device %d!\n", devCount,devChoice);
cudaSetDevice(devChoice);
inited = 1;
}
return 0;
}
int errorcheck(char s[],int nblocks,int startstop[]) {
int i,err = 0;
for(i = 0; i<2*nblocks; i++) if(startstop[i] != 1) err = err + 1;
if(err) {
printf("%s\n",s);
printf("Error running kernel, errorcount = %d, nblocks = %d\n",
err,nblocks);
printf("BLOCK: ");
for(i = 0; i<nblocks; i++)
printf("%4d",i);
printf("\nSTART: ");
for(i = 0; i<nblocks; i++)
printf("%4d",startstop[i]);
printf("\nSTOP :");
for(i = 0; i<nblocks; i++)
printf("%4d",startstop[nblocks+i]);
printf("\n");
}
return err != 0;
}
int cardtimestep_box(int n,int k,vector4 xx[],vector4 vv[],real boxl,real dt,
double *utot,double *wtot,double *ktot,
int npot,real rcut,real upot[],real fpot[],
int coord_in,int coord_out) {
static vector4 *xx1_dev,*xx2_dev,*vv1_dev,*vv2_dev;
static int *boxno1_dev,*boxno2_dev,*first_dev,*startstop_dev;
static real *uk_dev;
static int ninit = 0, npotinit = 0;
static cudaChannelFormatDesc channelDesc;
static cudaArray *potarray;
const int align = 32, nthreads = NTHREADS, nblocks = NBLOCKS;
int k3 = k*k*k;
int i;
real *uk = (real *) alloca(sizeof(real) * 3*nblocks);
int *startstop = (int *) alloca(sizeof(int) * 2*nblocks);
if(ninit != n || npotinit != npot) {
if(ninit > 0) {
cudaFree(uk_dev);
cudaFree(startstop_dev);
cudaFree(first_dev);
cudaFree(boxno2_dev);
cudaFree(boxno1_dev);
cudaFree(vv2_dev);
cudaFree(vv1_dev);
cudaFree(xx2_dev);
cudaFree(xx1_dev);
} else {
init_graphics_card();
}
if(n > 0) {
void *ptr;
cudaMalloc(&ptr,(sizeof(vector4)*3*n + align-1)/align*align);
xx1_dev = (vector4 *) ptr;
cudaMalloc(&ptr,(sizeof(vector4)*3*n + align-1)/align*align);
xx2_dev = (vector4 *) ptr;
cudaMalloc(&ptr,(sizeof(vector4)*3*n + align-1)/align*align);
vv1_dev = (vector4 *) ptr;
cudaMalloc(&ptr,(sizeof(vector4)*3*n + align-1)/align*align);
vv2_dev = (vector4 *) ptr;
cudaMalloc(&ptr,(sizeof(int)*n + align-1)/align*align);
boxno1_dev = (int *) ptr;
cudaMalloc(&ptr,(sizeof(int)*n + align-1)/align*align);
boxno2_dev = (int *) ptr;
cudaMalloc(&ptr,(sizeof(int)*(k3+1)*3 + align-1)/align*align);
first_dev = (int *) ptr;
cudaMalloc(&ptr,(sizeof(real)*3*nblocks + align-1)/align*align);
uk_dev = (real *) ptr;
cudaMalloc(&ptr,(sizeof(int)*2*nblocks + align-1)/align*align);
startstop_dev = (int *) ptr;
channelDesc = cudaCreateChannelDesc<float2>();
cudaMallocArray(&potarray,&channelDesc,npot,1);
cudaMalloc(&ptr,sizeof(float2)*npot);
{
float2 *pcopy = (float2 *) alloca(sizeof(float2)*npot);
for(i = 0; i<npot; i++) {
pcopy[i].x = upot[i];
pcopy[i].y = fpot[i];
}
cudaMemcpyToArray(potarray,0,0,pcopy,npot*sizeof(float2),
cudaMemcpyHostToDevice);
pottex.addressMode[0] = cudaAddressModeClamp;
pottex.filterMode = cudaFilterModeLinear;
pottex.normalized = false;
cudaBindTextureToArray(pottex,potarray,channelDesc);
}
}
ninit = n; npotinit = npot;
}
if(n > 0) {
double t0,t1;
//printf("coord_in = %d , coord_out = %d\n",coord_in,coord_out);
if(coord_in) {
cudaMemcpy(xx1_dev,xx,sizeof(vector4) * n,cudaMemcpyHostToDevice);
cudaMemcpy(vv1_dev,vv,sizeof(vector4) * n,cudaMemcpyHostToDevice);
}
for(i = 0; i<3*nblocks; i++) uk[i] = 0.0;
cudaMemcpy(uk_dev,uk,sizeof(real) * 3*nblocks, cudaMemcpyHostToDevice);
for(i = 0; i<2*nblocks; i++) startstop[i] = 0;
cudaMemcpy(startstop_dev,startstop,sizeof(int)*2*nblocks,
cudaMemcpyHostToDevice);
cudaThreadSynchronize();
t0 = gettime();
//printf("Computing box indices\n");
makeboxno<<<nblocks,nthreads>>>(n,k,boxl,xx1_dev,boxno1_dev);
/* Check box indices */ if(0) {
int *boxno = (int *) malloc(sizeof(int) * n),nerr = 0;
vector4 *xxtemp = (vector4 *) malloc(sizeof(vector4) * n);
int *tags = (int *) malloc(sizeof(int) * n);
cudaMemcpy(boxno,boxno1_dev,sizeof(int)*n,cudaMemcpyDeviceToHost);
cudaMemcpy(xxtemp,xx1_dev,sizeof(vector4)*n,cudaMemcpyDeviceToHost);
//printf("Checking box computation\n");
for(i = 0; i<n; i++) {
int bi = boxindex(boxl,k,xxtemp[i],1.0/k,k/boxl);
bi = (k3 + (bi % k3)) % k3;
if(boxno[i] != bi || bi<0 || bi>=k3) if(nerr++ < 10)
printf("boxno[%d] = %d, boxindex=%d\n",i,boxno[i],bi);
}
for(i = 0; i<n; i++) tags[i] = 0;
for(i = 0; i<n; i++) tags[(int) xxtemp[i][3]]++;
for(i = 0; i<n; i++)
if(tags[i] != 1) if(nerr++ < 10) printf("input tag error: tag[%d] = %d\n",i,tags[i]);
free(tags);
free(xxtemp);
free(boxno);
if(nerr > 5) exit(1);
}
//printf("Sorting particles\n");
rsort_card(n,k3+1,
boxno1_dev,xx1_dev,vv1_dev,
boxno2_dev,xx2_dev,vv2_dev,first_dev);
/* Check sorting */ if(0) {
int *boxno = (int *) malloc(sizeof(int) * n);
int *first = (int *) malloc(sizeof(int) * (k3+1));
vector4 *xxtemp = (vector4 *) malloc(sizeof(vector4) * n);
int *tags = (int *) malloc(sizeof(int) * n);
int nerr = 0;
cudaMemcpy(boxno,boxno2_dev,sizeof(int)*n,cudaMemcpyDeviceToHost);
cudaMemcpy(first,first_dev,sizeof(int)*(k3+1),cudaMemcpyDeviceToHost);
cudaMemcpy(xxtemp,xx2_dev,sizeof(vector4)*n,cudaMemcpyDeviceToHost);
//printf("Checking sorting\n");
for(i = 1; i<n; i++) {
if(boxno[i]<boxno[i-1]) if(nerr++ < 10)
printf("Sorting error: boxno[%d] = %d, boxno[%d]=%d\n",
i,boxno[i],i-1,boxno[i-1]);
}
for(i = 0; i<n; i++) tags[i] = 0;
for(i = 0; i<n; i++) tags[(int) xxtemp[i][3]]++;
for(i = 0; i<n; i++)
if(tags[i] != 1) if(nerr++ < 10) printf("tag error: tag[%d] = %d\n",i,tags[i]);
//printf("n=%d k3=%d first[0]=%d first[k3-1]=%d first[k3]=%d\n",
// n,k3,first[0],first[k3-1],first[k3]);
for(i = 0; i<k3; i++) {
int j;
for(j = first[i]; j<first[i+1]; j++)
if(boxno[j] != i) if(nerr++ < 10)
printf("first/box error: boxno[%d]=%d first[%d]=%d first[%d]=%d\n",
j,boxno[j],i,first[i],i+1,first[i+1]);
if(first[i+1] - first[i] > 15) {
printf("Very full box %d: %d\n",i,first[i+1]-first[i]);
for(j = first[i]; j<first[i+1]; j++) {
printf("particle %5d in box %4d :: %10.3f %10.3f %10.3f %10.2f\n",
j,i,xxtemp[j][0],xxtemp[j][1],xxtemp[j][2],xxtemp[j][3]);
}
exit(1);
}
}
free(tags);
free(xxtemp);
free(first);
free(boxno);
if(nerr > 0) exit(1);
}
//printf("Running adjust4x\n");
adjustx4<<<nblocks,nthreads/*,sizeof(vector4)*(nthreads+5)*5*/>>>
(n,k3,first_dev,xx2_dev,startstop_dev);
cudaThreadSynchronize();
for(i = 0; i<2*nblocks; i++) startstop[i] = 0;
cudaMemcpy(startstop,startstop_dev,sizeof(int)*2*nblocks,
cudaMemcpyDeviceToHost);
cudaThreadSynchronize();
if(errorcheck("KERNEL ADJUSTX4",nblocks,startstop)) { exit(1); }
t1 = gettime();
//rawtime = t1-t0;
//cudaMemcpy(first_dev,first,sizeof(int) * (k3+1),cudaMemcpyHostToDevice);
*utot = 0.0; *wtot = 0.0; *ktot = 0.0;
for(i = 0; i<2*nblocks; i++) startstop[i] = 0;
cudaMemcpy(startstop_dev,startstop,sizeof(int)*2*nblocks,
cudaMemcpyHostToDevice);
cudaThreadSynchronize();
//printf("Running force calculation\n");
t0 = gettime();
forcecalc_box<<<nblocks,nthreads>>>(n,boxl,k,first_dev,boxno2_dev,
xx2_dev,vv2_dev,xx1_dev,vv1_dev,
dt,uk_dev,startstop_dev,rcut,npot);
cudaThreadSynchronize();
t1 = gettime();
rawtime = t1-t0;
//printf("Force caculation done.\n");
//printf("%120s Rawtime: %.3f ms\n","",rawtime*1e3);
cudaMemcpy(startstop,startstop_dev,sizeof(int)*2*nblocks,
cudaMemcpyDeviceToHost);
cudaThreadSynchronize();
if(errorcheck("KERNEL FORCECALC_BOX",nblocks,startstop)) { exit(1); }
cudaMemcpy(uk,uk_dev,sizeof(real ) * 3*nblocks, cudaMemcpyDeviceToHost);
cudaThreadSynchronize();
if(coord_out) {
/*int nerr = 0;*/
int *tags = (int *) malloc(sizeof(int) * n);
cudaMemcpy(xx,xx1_dev,sizeof(vector4) * n, cudaMemcpyDeviceToHost);
cudaMemcpy(vv,vv1_dev,sizeof(vector4) * n, cudaMemcpyDeviceToHost);
/*
for(i = 0; i<n; i++) tags[i] = 0;
for(i = 0; i<n; i++) tags[(int) xx[i][3]]++;
for(i = 0; i<n; i++)
if(tags[i] != 1) if(nerr++ < 5) printf("force tag error (xx): tag[%d] = %d\n",i,tags[i]);
nerr = 0;
for(i = 0; i<n; i++) tags[i] = 0;
for(i = 0; i<n; i++) tags[(int) vv[i][3]]++;
for(i = 0; i<n; i++)
if(tags[i] != 1) if(nerr++ < 5) printf("force tag error (vv): tag[%d] = %d\n",i,tags[i]);
*/
free(tags);
}
for(i = 0; i<nblocks; i++) {
//if(uk[i] > *utot) *utot = uk[i];
*utot = *utot + uk[i+0*nblocks];
*wtot = *wtot + uk[i+1*nblocks];
*ktot = *ktot + uk[i+2*nblocks];
}
}
return 0;
}
void bswap(int n, int sz, void *v) {
char *p = (char *) v;
char t;
int i,k;
for(i = 0; i<n; i++)
for(k = 0; k<sz/2; k++) {
t = p[i*sz + k];
p[i*sz + k] = p[i*sz + sz-k-1];
p[i*sz + sz-k-1] = t;
}
}
void storecfg(char *fname,int n,vector *xx,int byteorder) {
double *xout = (double *) malloc(sizeof(double) * 3*n);
int i,j,len;
FILE *f;
f = fopen(fname,"w");
if(f == NULL) {
printf("Can not open file %s for writing.\n",fname);
free(xout);
return;
}
len = 3*n*sizeof(double);
for(i = 0; i<n; i++)
for(j = 0; j<3; j++)
xout[n*j+i] = xx[i][j];
if(byteorder) {
bswap(1,sizeof(int),&len);
bswap(3*n,sizeof(double),xout);
}
fwrite(&len,sizeof(int),1,f);
fwrite(xout,3*sizeof(double),n,f);
fwrite(&len,sizeof(int),1,f);
fclose(f);
free(xout);
}
int loadcfg(char *fname,vector **xx,int *byteorder) {
FILE *f;
int n,do_swap,len;
double *xin;
int i,j;
f = fopen(fname,"r");
if(f == NULL) {
printf("Can not open file %s for reading.\n",fname);
return -1;
}
fseek(f,0,SEEK_END);
len = ftell(f);
fseek(f,0,SEEK_SET);
fread(&n,sizeof(int),1,f);
if(len != (int) (n+2*sizeof(int))) {
bswap(1,sizeof(int),&n);
if(len != (int) (n+2*sizeof(int))) {
printf("Crap, unable to understand md.cfg\n");
fclose(f);
return -1;
}
do_swap = 1;
} else do_swap = 0;
n = n / (3*sizeof(double));
///printf("do_swap = %d n = %d\n",do_swap,n);
*xx = (vector *) malloc(sizeof(vector ) * n);
xin = (double *) malloc(sizeof(double) * 3*n);
fread(xin,sizeof(double)*3,n,f);
if(do_swap) bswap(3*n,sizeof(double),xin);
for(i = 0; i<n; i++)
for(j = 0; j<3; j++)
(*xx)[i][j] = xin[n*j+i];
free(xin);
fread(&len,sizeof(int),1,f);
fclose(f);
if(do_swap) bswap(1,sizeof(int),&len);
if(len != (int) (sizeof(double)*3*n)) {
printf("Crap, unable to understand file %s (stage two) %d %d\n",
fname,len,(int) (sizeof(double)*3*n));
free(xx);
return -1;
}
*byteorder = do_swap;
return n;
}
int main(int argc, char *argv[]) {
int niter,nrescale,noutput,nrestart,ncompare,nmomentum,cfgsave;
int iter0,iter;
int i,j,k,k3,n,nin;
real boxl,dt;
vector *xx,*vv,*xx0;
vector4 *xx4,*vv4,*xx4save,*vv4save;
//int *first,*perm;
double utot,wtot,ktot,p,tinst,etotlast = 0.0;;
double rho,rhoguess;
double tfixed;
double Uavg,Tavg,Pavg,Tscaleavg,msd = 0.0;
FILE *logfile;
char line[100];
int byteorder = 0,echange;
real rcut = 2.51;
int npot = 1000;
real *upot = (real *) alloca(sizeof(real)*npot);
real *fpot = (real *) alloca(sizeof(real)*npot);
int coord_in,coord_out;
if(argc >= 3 && strcmp(argv[1],"-device") == 0) {
devChoice = atoi(argv[2]);
printf("%% Command line option set tentative device number %d\n",devChoice);
}
/* Compute potantial table */
for(i = 0; i<npot; i++) {
double v,g;
double r2 = i*rcut*rcut/(npot-1);
vru(sqrt(r2),&v,&g);
upot[i] = v;
fpot[i] = g;
}
/* Load initial configuration */
n = loadcfg("md.cfg",&xx,&byteorder);
n = loadcfg("md.vel",&vv,&byteorder);
{
FILE *fp = fopen("md0.cfg","r");
if(fp == NULL) {
xx0 = (vector *) malloc(sizeof(vector) * n);
memcpy(xx0,xx,sizeof(vector)*n);
storecfg("md0.cfg",n,xx0,byteorder);
} else {
fclose(fp);
n = loadcfg("md0.cfg",&xx0,&byteorder);
}
}
{
FILE *fp = fopen("md.inp","r");
if(fp == NULL) {
printf("Cannot open input file md.inp\n");
exit(1);
}
fgets(line,sizeof(line),fp); sscanf(line+29,"%d",&nin);
fgets(line,sizeof(line),fp); sscanf(line+29,"%lf",&rho);
fgets(line,sizeof(line),fp); sscanf(line+29,"%lf",&tfixed);
fgets(line,sizeof(line),fp); sscanf(line+29,"%d",&nrescale);
fgets(line,sizeof(line),fp); sscanf(line+29,"%f",&dt);
fgets(line,sizeof(line),fp); sscanf(line+29,"%d",&niter);
fgets(line,sizeof(line),fp); sscanf(line+29,"%d",&noutput);
fgets(line,sizeof(line),fp); sscanf(line+29,"%d",&nrestart);
fgets(line,sizeof(line),fp); // potential cut off
fgets(line,sizeof(line),fp); // cubic flag
fgets(line,sizeof(line),fp); // noncubic data
fgets(line,sizeof(line),fp); // noncubic data
fgets(line,sizeof(line),fp); // noncubic data
fgets(line,sizeof(line),fp); // mpi data
fgets(line,sizeof(line),fp); // mpi data
fgets(line,sizeof(line),fp); // pot file
fgets(line,sizeof(line),fp); sscanf(line+29,"%d",&cfgsave);
boxl = pow(n/rho,1.0/3.0);
}
{
FILE *fp = fopen("md.sts","r");
iter0 = 1; Uavg = 0.0; Tavg = 0.0; Pavg = 0.0; Tscaleavg = 0.0;
if(fp == NULL) {
fp = fopen("md.sts","w");
fprintf(fp,"%12d %20.10e %20.10e %20.10e %20.10e\n",
iter0,Uavg,Tavg,Pavg,Tscaleavg);
fclose(fp);
} else {
fscanf(fp,"%d%lf%lf%lf%lf",&iter0,&Uavg,&Tavg,&Pavg,&Tscaleavg);
Uavg = Uavg * ((iter0-1) % noutput);
Tavg = Tavg * ((iter0-1) % noutput);
Pavg = Pavg * ((iter0-1) % noutput);
}
}
logfile = fopen("md.log","a");
/* Compute number of boxes to divide system into */
k = (int) floor(2*boxl/rcut);
while(k>0 && k+boxl/(4*k*k*rcut) > 2*boxl/rcut) k=k-1;
if(k <= 0) {
printf("Error in k, k=%d boxl=%f rcut=%f\n",k,boxl,rcut);
exit(1);
}
k3 = k*k*k;
/* Compute an estimate of the particle density */
{
double xmax = -1e20;
for(i = 0; i<n; i++)
for(j = 0; j<3; j++)
if(xx[i][j] > xmax) xmax = xx[i][j];
rhoguess = n/(xmax*xmax*xmax);
}
if(fabs(rhoguess-rho) > 1e-3)
printf("WARNING, rho and rhoguess differ with more than 1e-3.\n");
if(n != nin)
printf("WARNING, N in cfgfile and md.inp differ.\n");
ncompare = 1000000000; /* How often to compare cpu/card computations */
nmomentum = 100; /* How often to rescale momentu, (often due to single precision)*/
printf("%% MD CONFIGURATION\n"
"%% n = %7d\n"
"%% k = %7d\n"
"%% k3 = %7d\n"
"%% rho = %11.4f\n"
"%% rhoguess = %11.4f\n"
"%% boxl = %15.8f\n"
"%% dt = %15.8f\n"
"%% niter = %9d\n"
"%% cardcmp = %9d\n"
"%% momentum = %9d\n",
n,k,k3,rho,rhoguess,boxl,dt,niter,ncompare,nmomentum);
/* Allocate memory for internal data structure */
xx4save = (vector4 *) malloc(sizeof(vector4) * n);
vv4save = (vector4 *) malloc(sizeof(vector4) * n);
xx4 = (vector4 *) malloc(sizeof(vector4) * n);
vv4 = (vector4 *) malloc(sizeof(vector4) * n);
for(i = 0; i<n; i++) {
for(j = 0; j<3; j++) {
xx4[i][j] = xx[i][j];
vv4[i][j] = vv[i][j];
}
xx4[i][3] = i;
vv4[i][3] = i;
}
echange = 1;
coord_out = 1;
for(iter = iter0; iter<niter+iter0; iter++) {
double t0,t1/*,boxtime*/;
//t0 = gettime();
/* Save configuration before timestep so that
a step can be performed on the cpu, and so
that it can be dumped to file in case of
error */
if(iter % ncompare == 0) {
memcpy(xx4save,xx4,n*sizeof(vector4));
memcpy(vv4save,vv4,n*sizeof(vector4));
}
if(coord_out) coord_in = 1; else coord_in = 0;
coord_out = 0;
if(iter % noutput == 0) coord_out = 1;
if(iter % ncompare == ncompare-1 || iter % ncompare == 0) coord_out = 1;
if(iter % nmomentum == 0) coord_out = 1;
if(iter % nrestart == 0 || iter==iter0+niter-1) coord_out = 1;
t0 = gettime();
cardtimestep_box(n,k,xx4,vv4,boxl,dt,
&utot,&wtot,&ktot,
npot,rcut,upot,fpot,
coord_in,coord_out);
t1 = gettime();
if(iter % noutput == 0 || iter % ncompare == 0) {
msd = 0.0;
for(i = 0; i<n; i++)
for(j = 0; j<3; j++)
msd = msd + (xx0[(int) xx4[i][3]][j] - xx4[i][j])*(xx0[(int) xx4[i][3]][j] - xx4[i][j]);
}
utot = utot/n;
wtot = wtot/(3*n);
ktot = ktot/n;
tinst = 2.0/3.0 * ktot;
p = rho*(tinst + wtot);
msd = msd/n;
//t1 = gettime();
/* If total energy changes by more than 1% in one iteration,
that indicates a srious error. This codes dumps the state
that produced the error. */
if(0) if(echange == 0 &&
fabs(etotlast-utot-ktot)>0.01*fabs(etotlast) &&
fabs(etotlast-utot-ktot)>0.01) {
char s[80];
FILE *f;
printf("%% card: %20.10e %20.10e %20.10e %20.10e %10.3f\n",
utot,ktot,utot+ktot,p,(t1-t0)*1e3);
printf("%% Serious energy error. "
"Dumping configuration and exiting...\n");
sprintf(s,"totaldump.%d",iter);
f = fopen(s,"w");
/* Simulation parameters */
fwrite(&n,sizeof(n),1,f);
fwrite(&k,sizeof(k),1,f);
fwrite(&k3,sizeof(k3),1,f);
fwrite(&boxl,sizeof(boxl),1,f);
fwrite(&rcut,sizeof(rcut),1,f);
fwrite(&dt,sizeof(dt),1,f);
/* Input to time-step */
fwrite(xx4save,sizeof(vector4),n,f);
fwrite(vv4save,sizeof(vector4),n,f);
fwrite(xx4,sizeof(vector4),n,f);
fwrite(vv4,sizeof(vector4),n,f);
/* Output from time-step */
fwrite(xx,sizeof(vector),n,f);
fwrite(vv,sizeof(vector),n,f);
fclose(f);
break;
} else etotlast = utot + ktot;
echange = 0;
/* Output statistics */
Uavg = Uavg + utot;
Tavg = Tavg + tinst;
Pavg = Pavg + p;
Tscaleavg = Tscaleavg + tinst;
if(iter % noutput == 0) {
Uavg = Uavg / noutput;
Tavg = Tavg / noutput;
Pavg = Pavg / noutput;
printf("%12d %20.10e %20.10e %20.10e %20.10e %20.10e\n",
iter,Uavg+Tavg*1.5,Uavg,Tavg,Pavg,msd);
fprintf(logfile,
"%12d %20.10e %20.10e %20.10e %20.10e %20.10e\n",
iter,Uavg+Tavg*1.5,Uavg,Tavg,Pavg,msd);
Uavg = 0.0; Tavg = 0.0; Pavg = 0.0;
}
etotlast = utot + ktot;
if(iter % ncompare == 0) {
/* Run same timestep on cpu, and pring statistics for both card
and cpu step, for accuracy comparisons. */
printf("%% card: %12d %20.10e %20.10e %20.10e %20.10e %20.10e %10.3f\n",
iter,utot+ktot,utot,tinst,p,msd,(t1-t0)*1e3);
fprintf(logfile,
"%% card: %12d %20.10e %20.10e %20.10e %20.10e %20.10e %10.3f\n",
iter,utot+ktot,utot,tinst,p,msd,(t1-t0)*1e3);
t0 = gettime();
{
int *first = (int *) malloc(sizeof(int) * (k3+1));
int *perm = (int *) malloc(sizeof(int) * n);
int *boxno = (int *) malloc(sizeof(int) * n);
vector4 *xx4temp = (vector4 *) malloc(sizeof(vector4) * n);
vector4 *vv4temp = (vector4 *) malloc(sizeof(vector4) * n);
int jsave;
//printf("%% -- CPU check. Running boxem...\n"); fflush(stdout);
boxem(n,boxl,k,xx4save,first,perm);
//printf("%% -- boxem complete\n"); fflush(stdout);
jsave = k3;
while(first[jsave] == 0) {
first[jsave] = n; jsave = jsave-1;
}
//printf("%% -- Copying to xx4temp\n"); fflush(stdout);
for(i = 0; i<n; i++) {
for(j = 0; j<3; j++) {
xx4temp[i][j] = xx4save[perm[i]][j];
vv4temp[i][j] = vv4save[perm[i]][j];
}
xx4temp[i][3] = xx4save[perm[i]][3];
vv4temp[i][3] = xx4save[perm[i]][3];
}
//printf("%% -- Assigning to boxno\n"); fflush(stdout);
for(i = 0; i<k3; i++)
for(j = first[i]; j<first[i+1]; j++)
boxno[j] = i;
//printf("%% -- Calling forcecalc_host...\n"); fflush(stdout);
forcecalc_host(n,boxl,k,first,boxno,xx4temp,
vv4temp,xx4save,vv4save,dt,
&utot,&wtot,&ktot,npot,rcut,upot,fpot);
//printf("%% -- forcecalc_host complete\n"); fflush(stdout);
free(vv4temp);
free(xx4temp);
free(boxno);
free(perm);
free(first);
}
//printf("%% -- Copmuting msd\n"); fflush(stdout);
msd = 0.0;
for(i = 0; i<n; i++)
for(j = 0; j<3; j++)
msd = msd + (xx0[(int) xx4save[i][3]][j] - xx4save[i][j])*
(xx0[(int) xx4save[i][3]][j] - xx4save[i][j]);
//printf("%% -- msd calculation complete\n"); fflush(stdout);
utot = utot/n;
wtot = wtot/(3*n);
ktot = ktot/n;
tinst = 2.0/3.0 * ktot;
p = rho*(tinst + wtot);
msd = msd/n;
t1 = gettime();
printf("%% cpu: %12d %20.10e %20.10e %20.10e %20.10e %20.10e %10.3f\n",
iter,utot+ktot,utot,tinst,p,msd,(t1-t0)*1e3);
fprintf(logfile,
"%% cpu: %12d %20.10e %20.10e %20.10e %20.10e %20.10e %10.3f\n",
iter,utot+ktot,utot,tinst,p,msd,(t1-t0)*1e3);
fflush(stdout); fflush(logfile);
}
//printf("Quitting here... %s:%d\n",__FILE__,__LINE__);
//exit(1);
if(iter % nmomentum == 0) {
double mom[3] = {0.0, 0.0, 0.0};
for(i = 0; i<n; i++)
for(j = 0; j<3; j++)
mom[j] = mom[j] + vv4[i][j];
/*printf("%% Momentum is (%20.10e , %20.10e , %20.10e)\n",
mom[0],mom[1],mom[2]);*/
for(j = 0; j<3; j++) mom[j] = mom[j] / n;
for(i = 0; i<n; i++)
for(j = 0; j<3; j++)
vv4[i][j] = vv4[i][j] - mom[j];
for(i = 0; i<n; i++)
for(j = 0; j<3; j++) {
double t = boxl*floor(xx4[i][j]/boxl);
xx4[i][j] = xx4[i][j] - t;
xx0[(int) xx4[i][3]][j] = xx0[(int) xx4[i][3]][j] - t;
xx[(int) xx4[i][3]][j] = xx4[i][j];
vv[(int) vv4[i][3]][j] = vv4[i][j];
}
/*
for(j = 0; j<3; j++) mom[j] = 0.0;
for(i = 0; i<n; i++)
for(j = 0; j<3; j++)
mom[j] = mom[j] + vv[i][j];
*/
/*printf("%% Corrected (%20.10e , %20.10e , %20.10e)\n",
mom[0],mom[1],mom[2]);*/
echange = 1;
}
if(nrescale > 0 && iter % nrescale == 0) {
double alpha;
Tscaleavg = Tscaleavg / nrescale;
/* alpha = (2*tfixed - Tscaleavg)/Tscaleavg; */
alpha = 1.0 + 1.8*(tfixed - Tscaleavg)/tinst;
if(alpha < 1e-6) alpha = 1e-6;
alpha = sqrt(alpha);
for(i = 0; i<n; i++)
for(j = 0; j<3; j++)
vv4[i][j] = vv4[i][j]*alpha;
Tscaleavg = 0.0;
echange = 1;
}
if(iter % nrestart == 0 || iter==iter0+niter-1) {
char fname[80];
FILE *fp;
for(i = 0; i<n; i++)
for(j = 0; j<3; j++) {
double t = boxl*floor(xx4[i][j]/boxl);
xx4[i][j] = xx4[i][j] - t;
xx0[(int) xx4[i][3]][j] = xx0[(int) xx4[i][3]][j] - t;
xx[(int) xx4[i][3]][j] = xx4[i][j];
vv[(int) vv4[i][3]][j] = vv4[i][j];
}
fclose(logfile);
if(cfgsave == 1){
sprintf(fname,"md%09d.cfg",iter);
storecfg(fname,n,xx,byteorder);
}
if(cfgsave == 2){
sprintf(fname,"md%09d.cfg",iter);
storecfg(fname,n,xx,byteorder);
sprintf(fname,"md0_%09d.cfg",iter);
storecfg(fname,n,xx0,byteorder);
sprintf(fname,"md%09d.vel",iter);
storecfg(fname,n,vv,byteorder);
}
storecfg("md.cfg",n,xx,byteorder);
storecfg("md.vel",n,vv,byteorder);
storecfg("md0.cfg",n,xx0,byteorder);
fp = fopen("md.sts","w");
fprintf(fp,"%12d %20.10e %20.10e %20.10e %20.10e\n",
iter+1,Uavg/iter,Tavg/iter,Pavg/iter,Tscaleavg);
fclose(fp);
logfile = fopen("md.log","a");
}
}
/* Release memory allocated on graphics card */
cardtimestep_box(-1,-1,NULL,NULL,
0.0,0.0,NULL,NULL,NULL,0,0.0,NULL,NULL,0,0);
free(xx);
free(vv);
free(xx0);
free(xx4);
free(vv4);
free(xx4save);
free(vv4save);
return 0;
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.