hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
1bb50e3efb233ec9977408f7d4dacff211fd89f3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 2011-2016 Maxim Milakov
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "batch_norm_layer_tester_cuda.h"
#include "neural_network_cudnn_exception.h"
#include "util_cuda.h"
#include "../batch_norm_layer.h"
namespace nnforge
{
namespace cuda
{
__global__ void batch_norm_kernel(
float * __restrict output,
const float * __restrict input,
const float * __restrict gamma,
const float * __restrict beta,
const float * __restrict mean,
const float * __restrict inverse_sigma,
int elem_count_per_feature_map,
int feature_map_count,
int entry_count,
int elem_count_per_entry)
{
int elem_id = blockDim.x * blockIdx.x + threadIdx.x;
int feature_map_id = blockDim.y * blockIdx.y + threadIdx.y;
int entry_id = (blockDim.z * blockIdx.z + threadIdx.z) * 2;
if ((elem_id < elem_count_per_feature_map) && (feature_map_id < feature_map_count) && (entry_id < entry_count))
{
bool second_item_valid = (entry_id + 1 < entry_count);
float mult = gamma[feature_map_id] * inverse_sigma[feature_map_id];
float add = beta[feature_map_id] - mult * mean[feature_map_id];
int offset1 = (entry_id * feature_map_count + feature_map_id) * elem_count_per_feature_map + elem_id;
int offset2 = offset1 + elem_count_per_entry;
float input_val1 = input[offset1];
float input_val2;
if (second_item_valid)
input_val2 = input[offset2];
float output_val1 = input_val1 * mult + add;
float output_val2 = input_val2 * mult + add;
output[offset1] = output_val1;
if (second_item_valid)
output[offset2] = output_val2;
}
}
void batch_norm_layer_tester_cuda::enqueue_forward_propagation(
hipStream_t stream_id,
cuda_linear_buffer_device::ptr output_buffer,
const std::vector<cuda_linear_buffer_device::const_ptr>& schema_data,
const std::vector<cuda_linear_buffer_device::const_ptr>& data,
const std::vector<cuda_linear_buffer_device::const_ptr>& data_custom,
const std::vector<cuda_linear_buffer_device::const_ptr>& input_buffers,
const std::vector<cuda_linear_buffer_device::const_ptr>& persistent_working_data,
cuda_linear_buffer_device::ptr temporary_working_fixed_buffer,
cuda_linear_buffer_device::ptr temporary_working_per_entry_buffer,
unsigned int entry_count)
{
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
*cuda_config,
output_elem_count_per_feature_map,
output_configuration_specific.feature_map_count,
(entry_count + 1) >> 1);
hipLaunchKernelGGL(( batch_norm_kernel), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, stream_id,
*output_buffer,
*input_buffers[0],
*data[0],
*data[1],
*data[2],
*data[3],
output_elem_count_per_feature_map,
output_configuration_specific.feature_map_count,
entry_count,
output_elem_count_per_entry);
}
void batch_norm_layer_tester_cuda::tester_configured()
{
std::shared_ptr<const batch_norm_layer> layer_derived = std::dynamic_pointer_cast<const batch_norm_layer>(layer_schema);
epsilon = layer_derived->epsilon;
if (epsilon < CUDNN_BN_MIN_EPSILON)
throw neural_network_exception((boost::format("Too small epsilon specified: %1%, cuDNN requires at least %2%") % epsilon % CUDNN_BN_MIN_EPSILON).str());
}
int batch_norm_layer_tester_cuda::get_input_index_layer_can_write() const
{
return 0;
}
}
}
| 1bb50e3efb233ec9977408f7d4dacff211fd89f3.cu | /*
* Copyright 2011-2016 Maxim Milakov
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "batch_norm_layer_tester_cuda.h"
#include "neural_network_cudnn_exception.h"
#include "util_cuda.h"
#include "../batch_norm_layer.h"
namespace nnforge
{
namespace cuda
{
__global__ void batch_norm_kernel(
float * __restrict output,
const float * __restrict input,
const float * __restrict gamma,
const float * __restrict beta,
const float * __restrict mean,
const float * __restrict inverse_sigma,
int elem_count_per_feature_map,
int feature_map_count,
int entry_count,
int elem_count_per_entry)
{
int elem_id = blockDim.x * blockIdx.x + threadIdx.x;
int feature_map_id = blockDim.y * blockIdx.y + threadIdx.y;
int entry_id = (blockDim.z * blockIdx.z + threadIdx.z) * 2;
if ((elem_id < elem_count_per_feature_map) && (feature_map_id < feature_map_count) && (entry_id < entry_count))
{
bool second_item_valid = (entry_id + 1 < entry_count);
float mult = gamma[feature_map_id] * inverse_sigma[feature_map_id];
float add = beta[feature_map_id] - mult * mean[feature_map_id];
int offset1 = (entry_id * feature_map_count + feature_map_id) * elem_count_per_feature_map + elem_id;
int offset2 = offset1 + elem_count_per_entry;
float input_val1 = input[offset1];
float input_val2;
if (second_item_valid)
input_val2 = input[offset2];
float output_val1 = input_val1 * mult + add;
float output_val2 = input_val2 * mult + add;
output[offset1] = output_val1;
if (second_item_valid)
output[offset2] = output_val2;
}
}
void batch_norm_layer_tester_cuda::enqueue_forward_propagation(
cudaStream_t stream_id,
cuda_linear_buffer_device::ptr output_buffer,
const std::vector<cuda_linear_buffer_device::const_ptr>& schema_data,
const std::vector<cuda_linear_buffer_device::const_ptr>& data,
const std::vector<cuda_linear_buffer_device::const_ptr>& data_custom,
const std::vector<cuda_linear_buffer_device::const_ptr>& input_buffers,
const std::vector<cuda_linear_buffer_device::const_ptr>& persistent_working_data,
cuda_linear_buffer_device::ptr temporary_working_fixed_buffer,
cuda_linear_buffer_device::ptr temporary_working_per_entry_buffer,
unsigned int entry_count)
{
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
*cuda_config,
output_elem_count_per_feature_map,
output_configuration_specific.feature_map_count,
(entry_count + 1) >> 1);
batch_norm_kernel<<<kernel_dims.first, kernel_dims.second, 0, stream_id>>>(
*output_buffer,
*input_buffers[0],
*data[0],
*data[1],
*data[2],
*data[3],
output_elem_count_per_feature_map,
output_configuration_specific.feature_map_count,
entry_count,
output_elem_count_per_entry);
}
void batch_norm_layer_tester_cuda::tester_configured()
{
std::shared_ptr<const batch_norm_layer> layer_derived = std::dynamic_pointer_cast<const batch_norm_layer>(layer_schema);
epsilon = layer_derived->epsilon;
if (epsilon < CUDNN_BN_MIN_EPSILON)
throw neural_network_exception((boost::format("Too small epsilon specified: %1%, cuDNN requires at least %2%") % epsilon % CUDNN_BN_MIN_EPSILON).str());
}
int batch_norm_layer_tester_cuda::get_input_index_layer_can_write() const
{
return 0;
}
}
}
|
b10d3430738edf3d02348900b4482c3ddb9732f5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* developed by zhujin
*/
#include <google/protobuf/text_format.h>
#include <google/protobuf/io/zero_copy_stream_impl.h>
#include <google/protobuf/io/coded_stream.h>
#include <cmath>
#include "caffe/common.hpp"
#include "caffe/util/util_img.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/util/im2col.hpp"
#include "caffe/proto/caffe.pb.h"
namespace caffe {
template <typename Dtype>
__global__ void kernel_BiLinearResize(const int nthreads, const Dtype* src_data, const int src_height, const int src_width,
Dtype* dst_data, const int dst_height, const int dst_width, const Dtype scale_h, const Dtype scale_w)
{
CUDA_KERNEL_LOOP(i, nthreads) {
int dst_h = i /dst_width;
Dtype fh = dst_h * scale_h;
const int src_h = floor(fh);
fh -= src_h;
const Dtype w_h0 = std::abs(1.0f - fh);
const Dtype w_h1 = std::abs(fh);
const int dst_offset_1 = dst_h * dst_width;
const int src_offset_1 = src_h * src_width;
int dst_w = i %dst_width;
Dtype fw = dst_w * scale_w;
const int src_w = floor(fw);
fw -= src_w;
const Dtype w_w0 = std::abs(1.0f - fw);
const Dtype w_w1 = std::abs(fw);
const int dst_idx = dst_offset_1 + dst_w;
const int src_idx = src_offset_1 + src_w;
Dtype res = (w_h0 * w_w0 * src_data[src_idx]);
if (src_w + 1 < src_width)
res += (w_h0 * w_w1 * src_data[src_idx + 1]);
if (src_h + 1 < src_height)
res += (w_h1 * w_w0 * src_data[src_idx + src_width]);
if (src_w + 1 < src_width && src_h + 1 < src_height)
res += (w_h1 * w_w1 * src_data[src_idx + src_width + 1]);
dst_data[dst_idx] = res;
}
}
template <typename Dtype>
void BiLinearResizeMat_gpu(const Dtype* src, const int src_height, const int src_width,
Dtype* dst, const int dst_height, const int dst_width)
{
const Dtype scale_w = src_width / (Dtype)dst_width;
const Dtype scale_h = src_height / (Dtype)dst_height;
int loop_n = dst_height * dst_width;
hipLaunchKernelGGL(( kernel_BiLinearResize<Dtype>) , dim3(CAFFE_GET_BLOCKS(loop_n)), dim3(CAFFE_CUDA_NUM_THREADS) , 0, 0,
loop_n,src, src_height, src_width, dst, dst_height, dst_width, scale_h, scale_w);
//CUDA_POST_KERNEL_CHECK;
}
template void BiLinearResizeMat_gpu(const float* src, const int src_height, const int src_width,
float* dst, const int dst_height, const int dst_width);
template void BiLinearResizeMat_gpu(const double* src, const int src_height, const int src_width,
double* dst, const int dst_height, const int dst_width);
template <typename Dtype>
void ResizeBlob_gpu(const Blob<Dtype>* src, const int src_n, const int src_c,
Blob<Dtype>* dst, const int dst_n, const int dst_c) {
const int src_channels = src->channels();
const int src_height = src->height();
const int src_width = src->width();
const int src_offset = (src_n * src_channels + src_c) * src_height * src_width;
const int dst_channels = dst->channels();
const int dst_height = dst->height();
const int dst_width = dst->width();
const int dst_offset = (dst_n * dst_channels + dst_c) * dst_height * dst_width;
const Dtype* src_data = &(src->gpu_data()[src_offset]);
Dtype* dst_data = &(dst->mutable_gpu_data()[dst_offset]);
BiLinearResizeMat_gpu(src_data, src_height, src_width,
dst_data, dst_height, dst_width);
CUDA_POST_KERNEL_CHECK;
}
template void ResizeBlob_gpu(const Blob<float>* src, const int src_n, const int src_c,
Blob<float>* dst, const int dst_n, const int dst_c);
template void ResizeBlob_gpu(const Blob<double>* src, const int src_n, const int src_c,
Blob<double>* dst, const int dst_n, const int dst_c);
template <typename Dtype>
__global__ void kernel_GetBiLinearResizeMatRules(const int nthreads, const int src_height, const int src_width,
const int dst_height, const int dst_width, const Dtype scale_h, const Dtype scale_w,
Dtype* loc1, Dtype* weight1, Dtype* loc2, Dtype* weight2,
Dtype* loc3, Dtype* weight3, Dtype* loc4, Dtype* weight4)
{
CUDA_KERNEL_LOOP(index, nthreads)
{
int dst_h = index /dst_width;
Dtype fh = dst_h * scale_h;
const int src_h = floor(fh);
fh -= src_h;
const Dtype w_h0 = std::abs(1.0f - fh);
const Dtype w_h1 = std::abs(fh);
const int dst_offset_1 = dst_h * dst_width;
const int src_offset_1 = src_h * src_width;
int dst_w = index %dst_width;
Dtype fw = dst_w * scale_w;
const int src_w = floor(fw);
fw -= src_w;
const Dtype w_w0 = std::abs(1.0f - fw);
const Dtype w_w1 = std::abs(fw);
const int dst_idx = dst_offset_1 + dst_w;
// dst_data[dst_idx] = 0;
const int src_idx = src_offset_1 + src_w;
loc1[dst_idx] = src_idx;
weight1[dst_idx] = w_h0 * w_w0;
loc2[dst_idx] = 0;
weight2[dst_idx] = 0;
weight3[dst_idx] = 0;
loc3[dst_idx] = 0;
loc4[dst_idx] = 0;
weight4[dst_idx] = 0;
if (src_w + 1 < src_width)
{
loc2[dst_idx] = src_idx + 1;
weight2[dst_idx] = w_h0 * w_w1;
// dst_data[dst_idx] += (w_h0 * w_w1 * src_data[src_idx + 1]);
}
if (src_h + 1 < src_height)
{
// dst_data[dst_idx] += (w_h1 * w_w0 * src_data[src_idx + src_width]);
weight3[dst_idx] = w_h1 * w_w0;
loc3[dst_idx] = src_idx + src_width;
}
if (src_w + 1 < src_width && src_h + 1 < src_height)
{
loc4[dst_idx] = src_idx + src_width + 1;
weight4[dst_idx] = w_h1 * w_w1;
// dst_data[dst_idx] += (w_h1 * w_w1 * src_data[src_idx + src_width + 1]);
}
}
}
template <typename Dtype>
__global__ void kernel_ResizeBlob(const int nthreads,const int num,const int channels, const Dtype* src, const int src_height, const int src_width,
Dtype* dst, const int dst_height, const int dst_width, const Dtype scale_h, const Dtype scale_w)
{
CUDA_KERNEL_LOOP(index, nthreads) {
int i = index %( dst_height * dst_width);
int c = (index/(dst_height * dst_width))%channels;
int n = (index/(dst_height * dst_width))/channels;
int src_offset = (n * channels + c) * src_height * src_width;
int dst_offset = (n * channels + c) * dst_height * dst_width;
const Dtype* src_data = src+src_offset;
Dtype* dst_data = dst+dst_offset;
int dst_h = i /dst_width;
Dtype fh = dst_h * scale_h;
const int src_h = floor(fh);
fh -= src_h;
const Dtype w_h0 = std::abs(1.0f - fh);
const Dtype w_h1 = std::abs(fh);
const int dst_offset_1 = dst_h * dst_width;
const int src_offset_1 = src_h * src_width;
int dst_w = i %dst_width;
Dtype fw = dst_w * scale_w;
const int src_w = floor(fw);
fw -= src_w;
const Dtype w_w0 = std::abs(1.0f - fw);
const Dtype w_w1 = std::abs(fw);
const int dst_idx = dst_offset_1 + dst_w;
const int src_idx = src_offset_1 + src_w;
Dtype res = (w_h0 * w_w0 * src_data[src_idx]);
if (src_w + 1 < src_width)
res += (w_h0 * w_w1 * src_data[src_idx + 1]);
if (src_h + 1 < src_height)
res += (w_h1 * w_w0 * src_data[src_idx + src_width]);
if (src_w + 1 < src_width && src_h + 1 < src_height)
res += (w_h1 * w_w1 * src_data[src_idx + src_width + 1]);
dst_data[dst_idx] = res;
}
}
template <typename Dtype>
void ResizeBlob_gpu(const Blob<Dtype>* src,Blob<Dtype>* dst) {
CHECK(src->num() == dst->num())<<"src->num() == dst->num()";
CHECK(src->channels() == dst->channels())<< "src->channels() == dst->channels()";
const int src_num = src->num();
const int src_channels = src->channels();
const int src_height = src->height();
const int src_width = src->width();
const int dst_channels = dst->channels();
const int dst_height = dst->height();
const int dst_width = dst->width();
const Dtype scale_w = src_width / (Dtype)dst_width;
const Dtype scale_h = src_height / (Dtype)dst_height;
int loop_n = dst_height * dst_width*dst_channels*src_num;
const Dtype* src_data = src->gpu_data();
Dtype* dst_data = dst->mutable_gpu_data();
hipLaunchKernelGGL(( kernel_ResizeBlob<Dtype>) , dim3(CAFFE_GET_BLOCKS(loop_n)), dim3(CAFFE_CUDA_NUM_THREADS) , 0, 0, loop_n,src_num,src_channels,
src_data, src_height,src_width,
dst_data, dst_height, dst_width,
scale_h,scale_w);
CUDA_POST_KERNEL_CHECK;
}
template void ResizeBlob_gpu(const Blob<float>* src,
Blob<float>* dst);
template void ResizeBlob_gpu(const Blob<double>* src,
Blob<double>* dst);
template <typename Dtype>
void GetBiLinearResizeMatRules_gpu( const int src_height, const int src_width,
const int dst_height, const int dst_width,
Dtype* loc1, Dtype* weight1, Dtype* loc2, Dtype* weight2,
Dtype* loc3, Dtype* weight3, Dtype* loc4, Dtype* weight4)
{
const Dtype scale_w = src_width / (Dtype)dst_width;
const Dtype scale_h = src_height / (Dtype)dst_height;
int loop_n = dst_height * dst_width;
hipLaunchKernelGGL(( kernel_GetBiLinearResizeMatRules<Dtype>) , dim3(CAFFE_GET_BLOCKS(loop_n)), dim3(CAFFE_CUDA_NUM_THREADS) , 0, 0,
loop_n, src_height, src_width,
dst_height, dst_width, scale_h, scale_w,
loc1, weight1, loc2, weight2,
loc3, weight3, loc4, weight4);
CUDA_POST_KERNEL_CHECK;
}
template void GetBiLinearResizeMatRules_gpu( const int src_height, const int src_width,
const int dst_height, const int dst_width,
float* loc1, float* weight1, float* loc2, float* weight2,
float* loc3, float* weight3, float* loc4, float* weight4);
template void GetBiLinearResizeMatRules_gpu( const int src_height, const int src_width,
const int dst_height, const int dst_width,
double* loc1, double* weight1, double* loc2, double* weight2,
double* loc3, double* weight3, double* loc4, double* weight4);
template <typename Dtype>
void ResizeBlob_gpu(const Blob<Dtype>* src,Blob<Dtype>* dst,
Blob<Dtype>* loc1, Blob<Dtype>* loc2, Blob<Dtype>* loc3, Blob<Dtype>* loc4){
CHECK(src->num() == dst->num())<<"src->num() == dst->num()";
CHECK(src->channels() == dst->channels())<< "src->channels() == dst->channels()";
GetBiLinearResizeMatRules_gpu( src->height(),src->width(),
dst->height(), dst->width(),
loc1->mutable_gpu_data(), loc1->mutable_gpu_diff(), loc2->mutable_gpu_data(), loc2->mutable_gpu_diff(),
loc3->mutable_gpu_data(), loc3->mutable_gpu_diff(), loc4->mutable_gpu_data(), loc4->mutable_gpu_diff());
ResizeBlob_gpu( src, dst) ;
}
template void ResizeBlob_gpu(const Blob<float>* src,Blob<float>* dst,
Blob<float>* loc1, Blob<float>* loc2, Blob<float>* loc3, Blob<float>* loc4);
template void ResizeBlob_gpu(const Blob<double>* src,Blob<double>* dst,
Blob<double>* loc1, Blob<double>* loc2, Blob<double>* loc3, Blob<double>* loc4);
template <typename Dtype>
void GenerateSubBlobs_gpu(const Blob<Dtype>& src,
Blob<Dtype>& dst,const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h,
const int stride_w)
{
const int nums_ = src.num();
const int channels_ = src.channels();
const int height_ = src.height();
const int width_ = src.width();
const int height_col_ =(height_ + 2 * pad_h - kernel_h) / stride_h + 1;
const int width_col_ = (width_ + 2 * pad_w - kernel_w) / stride_w + 1;
/*
* actually after im2col_v2, data is stored as
* col_buffer_.Reshape(1*height_out_*width_out_, channels_ , kernel_h_ , kernel_w_);
* */
dst.Reshape(height_col_*width_col_*nums_,channels_, kernel_h, kernel_w);
caffe::caffe_gpu_set(dst.count(),Dtype(0),dst.mutable_gpu_data());
for(int n = 0; n < nums_; n++){
const Dtype* src_data = src.gpu_data() + src.offset(n);
Dtype* dst_data = dst.mutable_gpu_data() + dst.offset(n*height_col_*width_col_);
caffe::im2col_v2_gpu(src_data, channels_, height_,
width_, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w,
dst_data);
}
}
template void GenerateSubBlobs_gpu(const Blob<float>& src,
Blob<float>& dst,const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h,
const int stride_w);
template void GenerateSubBlobs_gpu(const Blob<double>& src,
Blob<double>& dst,const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h,
const int stride_w);
template <typename Dtype>
__global__ void kernel_CropBlob(const int nthreads, const Dtype* src_data, Dtype* dst_data,
const int num, const int channels, const int in_h, const int in_w,
const int out_h, const int out_w, const int start_h, const int start_w)
{
CUDA_KERNEL_LOOP(index, nthreads) {
int n = index/channels/out_h/out_w;
int c = (index/(out_h*out_w))% channels;
int h = (index%(out_h*out_w))/out_w;
int w = (index%(out_h*out_w))%out_w;
Dtype* dst_data_ptr =dst_data+ ((n* channels+c)*out_h )*out_w ;
const Dtype* src_data_ptr = src_data + ((n* channels+c)*in_h )*in_w ;
dst_data_ptr[h*out_w+w] = src_data_ptr[(h+start_h)*in_w + w+start_w];
}
}
template <typename Dtype>
void CropBlobs_gpu( const Blob<Dtype>&src,
const int start_h, const int start_w,
const int end_h, const int end_w, Blob<Dtype>&dst)
{
const int in_h = src.height();
const int in_w = src.width();
const int num = src.num();
const int channels = src.channels();
const int out_h = end_h - start_h;
const int out_w = end_w - start_w;
CHECK(out_h > 0) <<" end_h should be larger than start_h";
CHECK(out_w > 0) <<" end_w should be larger than start_w";
CHECK(out_h <=in_h) <<" out_h should nor be larger than input_height";
CHECK(out_w <=in_w) <<" out_w should nor be larger than input_width";
dst.Reshape(num,channels,out_h,out_w);
if((out_h != in_h) || (out_w != in_w)){
const int loop_n = num*channels*out_h*out_w;
hipLaunchKernelGGL(( kernel_CropBlob <Dtype>) , dim3(CAFFE_GET_BLOCKS(loop_n)), dim3(CAFFE_CUDA_NUM_THREADS) , 0, 0, loop_n,
src.gpu_data(), dst.mutable_gpu_data(),
num, channels, in_h, in_w, out_h, out_w, start_h, start_w);
}
else
{
caffe::caffe_copy(src.count(),src.gpu_data(),dst.mutable_gpu_data());
}
}
template void CropBlobs_gpu( const Blob<float>&src,
const int start_h, const int start_w,
const int end_h, const int end_w, Blob<float>&dst);
template void CropBlobs_gpu( const Blob<double>&src,
const int start_h, const int start_w,
const int end_h, const int end_w, Blob<double>&dst);
template <typename Dtype>
__global__ void kernel_CropBlob(const int nthreads, const Dtype* src_data, Dtype* dst_data,
const int num, const int channels, const int in_h, const int in_w,
const int dst_num, const int dst_h, const int dst_w,
const int src_num_id, const int dst_num_id,const int out_h, const int out_w,
const int start_h, const int start_w, const int dst_start_h, const int dst_start_w){
CUDA_KERNEL_LOOP(index, nthreads) {
int c = (index/(out_h*out_w))% channels;
int h = (index%(out_h*out_w))/out_w;
int w = (index%(out_h*out_w))%out_w;
Dtype* dst_data_ptr =dst_data+ ((dst_num_id* channels+c)*dst_h )*dst_w ;
const Dtype* src_data_ptr = src_data + ((src_num_id* channels+c)*in_h )*in_w ;
int true_src_h = h+start_h;
int true_dst_h = h+dst_start_h;
int true_src_w = w+start_w;
int true_dst_w = w + dst_start_w;
if(true_src_h >= 0 && true_src_h < in_h && true_src_w >= 0 && true_src_w < in_w &&
true_dst_h >= 0 && true_dst_h < dst_h && true_dst_w>= 0 && true_dst_w< dst_w )
dst_data_ptr[true_dst_h *dst_w + true_dst_w] =
src_data_ptr[true_src_h * in_w + true_src_w];
}
}
template <typename Dtype>
void CropBlobs_gpu( const Blob<Dtype>&src, const int src_num_id, const int start_h,
const int start_w, const int end_h, const int end_w, Blob<Dtype>&dst,
const int dst_num_id,const int dst_start_h , const int dst_start_w ){
const int in_h = src.height();
const int in_w = src.width();
const int dst_h = dst.height();
const int dst_w = dst.width();
const int channels = src.channels();
const int out_h = end_h - start_h;
const int out_w = end_w - start_w;
CHECK(out_h > 0) <<" end_h should be larger than start_h";
CHECK(out_w > 0) <<" end_w should be larger than start_w";
// CHECK(out_h <=in_h) <<" out_h should nor be larger than input_height";
// CHECK(out_w <=in_w) <<" out_w should nor be larger than input_width";
CHECK_GT(src.num(), src_num_id);
CHECK_GT(dst.num(), dst_num_id);
CHECK_EQ(channels, dst.channels());
// CHECK_GE(dst.height(), end_h);
// CHECK_GE(dst.width(), end_w);
const int loop_n = channels*out_h*out_w;
hipLaunchKernelGGL(( kernel_CropBlob <Dtype>) , dim3(CAFFE_GET_BLOCKS(loop_n)), dim3(CAFFE_CUDA_NUM_THREADS) , 0, 0, loop_n,
src.gpu_data(), dst.mutable_gpu_data(),
src.num(), channels, in_h, in_w,
dst.num(),dst_h,dst_w, src_num_id,dst_num_id,
out_h, out_w, start_h, start_w, dst_start_h, dst_start_w);
}
template void CropBlobs_gpu( const Blob<float>&src, const int src_num_id, const int start_h,
const int start_w, const int end_h, const int end_w, Blob<float>&dst,
const int dst_num_id,const int dst_start_h , const int dst_start_w );
template void CropBlobs_gpu( const Blob<double>&src, const int src_num_id, const int start_h,
const int start_w, const int end_h, const int end_w, Blob<double>&dst,
const int dst_num_id,const int dst_start_h , const int dst_start_w );
template <typename Dtype>
void ConcateSubImagesInBlobs_gpu(const Blob<Dtype>& src,
Blob<Dtype>& dst,const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h,
const int stride_w, const int out_img_h, const int out_img_w)
{
const int in_nums = src.num();
const int height_col_ =(out_img_h + 2 * pad_h - kernel_h) / stride_h + 1;
const int width_col_ = (out_img_w + 2 * pad_w - kernel_w) / stride_w + 1;
// std::cout<<"in_nums:"<<in_nums<<" kernel_h:"<<kernel_h<<" kernel_w:"<<kernel_w
// <<" pad_h:"<<pad_h<<" pad_w:"<<pad_w<<" stride_h:"<<stride_h<<
// " stride_w:"<<stride_w<<" out_img_h:"<<out_img_h <<" out_img_w:"<<out_img_w
// << " height_col:"<<height_col_<<" width_col:"<<width_col_<<std::endl;
dst.Reshape(in_nums/height_col_/width_col_,src.channels(), out_img_h, out_img_w);
// std::cout<<"in_nums/height_col_/width_col_,src.channels(), out_img_h, out_img_w: "<<
// in_nums/height_col_/width_col_<< " "<<src.channels()<<" "<<out_img_h<<" "<<
// out_img_w<<std::endl;
const int channels_ = dst.channels();
const int height_ = dst.height();
const int width_ = dst.width();
const int out_num = dst.num();
for(int n = 0; n < out_num; n++){
const Dtype* src_data = src.gpu_data() + src.offset(n*height_col_*width_col_);
Dtype* dst_data = dst.mutable_gpu_data() + dst.offset(n);
caffe::col2im_v2_gpu(src_data, channels_, height_,
width_, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w,
dst_data);
}
return;
}
template void ConcateSubImagesInBlobs_gpu(const Blob<float>& src,
Blob<float>& dst,const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h,
const int stride_w, const int out_img_h, const int out_img_w);
template void ConcateSubImagesInBlobs_gpu(const Blob<double>& src,
Blob<double>& dst,const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h,
const int stride_w, const int out_img_h, const int out_img_w);
// namespace caffe
}
| b10d3430738edf3d02348900b4482c3ddb9732f5.cu |
/**
* developed by zhujin
*/
#include <google/protobuf/text_format.h>
#include <google/protobuf/io/zero_copy_stream_impl.h>
#include <google/protobuf/io/coded_stream.h>
#include <cmath>
#include "caffe/common.hpp"
#include "caffe/util/util_img.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/util/im2col.hpp"
#include "caffe/proto/caffe.pb.h"
namespace caffe {
template <typename Dtype>
__global__ void kernel_BiLinearResize(const int nthreads, const Dtype* src_data, const int src_height, const int src_width,
Dtype* dst_data, const int dst_height, const int dst_width, const Dtype scale_h, const Dtype scale_w)
{
CUDA_KERNEL_LOOP(i, nthreads) {
int dst_h = i /dst_width;
Dtype fh = dst_h * scale_h;
const int src_h = floor(fh);
fh -= src_h;
const Dtype w_h0 = std::abs(1.0f - fh);
const Dtype w_h1 = std::abs(fh);
const int dst_offset_1 = dst_h * dst_width;
const int src_offset_1 = src_h * src_width;
int dst_w = i %dst_width;
Dtype fw = dst_w * scale_w;
const int src_w = floor(fw);
fw -= src_w;
const Dtype w_w0 = std::abs(1.0f - fw);
const Dtype w_w1 = std::abs(fw);
const int dst_idx = dst_offset_1 + dst_w;
const int src_idx = src_offset_1 + src_w;
Dtype res = (w_h0 * w_w0 * src_data[src_idx]);
if (src_w + 1 < src_width)
res += (w_h0 * w_w1 * src_data[src_idx + 1]);
if (src_h + 1 < src_height)
res += (w_h1 * w_w0 * src_data[src_idx + src_width]);
if (src_w + 1 < src_width && src_h + 1 < src_height)
res += (w_h1 * w_w1 * src_data[src_idx + src_width + 1]);
dst_data[dst_idx] = res;
}
}
template <typename Dtype>
void BiLinearResizeMat_gpu(const Dtype* src, const int src_height, const int src_width,
Dtype* dst, const int dst_height, const int dst_width)
{
const Dtype scale_w = src_width / (Dtype)dst_width;
const Dtype scale_h = src_height / (Dtype)dst_height;
int loop_n = dst_height * dst_width;
kernel_BiLinearResize<Dtype> <<<CAFFE_GET_BLOCKS(loop_n), CAFFE_CUDA_NUM_THREADS >>>(
loop_n,src, src_height, src_width, dst, dst_height, dst_width, scale_h, scale_w);
//CUDA_POST_KERNEL_CHECK;
}
template void BiLinearResizeMat_gpu(const float* src, const int src_height, const int src_width,
float* dst, const int dst_height, const int dst_width);
template void BiLinearResizeMat_gpu(const double* src, const int src_height, const int src_width,
double* dst, const int dst_height, const int dst_width);
template <typename Dtype>
void ResizeBlob_gpu(const Blob<Dtype>* src, const int src_n, const int src_c,
Blob<Dtype>* dst, const int dst_n, const int dst_c) {
const int src_channels = src->channels();
const int src_height = src->height();
const int src_width = src->width();
const int src_offset = (src_n * src_channels + src_c) * src_height * src_width;
const int dst_channels = dst->channels();
const int dst_height = dst->height();
const int dst_width = dst->width();
const int dst_offset = (dst_n * dst_channels + dst_c) * dst_height * dst_width;
const Dtype* src_data = &(src->gpu_data()[src_offset]);
Dtype* dst_data = &(dst->mutable_gpu_data()[dst_offset]);
BiLinearResizeMat_gpu(src_data, src_height, src_width,
dst_data, dst_height, dst_width);
CUDA_POST_KERNEL_CHECK;
}
template void ResizeBlob_gpu(const Blob<float>* src, const int src_n, const int src_c,
Blob<float>* dst, const int dst_n, const int dst_c);
template void ResizeBlob_gpu(const Blob<double>* src, const int src_n, const int src_c,
Blob<double>* dst, const int dst_n, const int dst_c);
template <typename Dtype>
__global__ void kernel_GetBiLinearResizeMatRules(const int nthreads, const int src_height, const int src_width,
const int dst_height, const int dst_width, const Dtype scale_h, const Dtype scale_w,
Dtype* loc1, Dtype* weight1, Dtype* loc2, Dtype* weight2,
Dtype* loc3, Dtype* weight3, Dtype* loc4, Dtype* weight4)
{
CUDA_KERNEL_LOOP(index, nthreads)
{
int dst_h = index /dst_width;
Dtype fh = dst_h * scale_h;
const int src_h = floor(fh);
fh -= src_h;
const Dtype w_h0 = std::abs(1.0f - fh);
const Dtype w_h1 = std::abs(fh);
const int dst_offset_1 = dst_h * dst_width;
const int src_offset_1 = src_h * src_width;
int dst_w = index %dst_width;
Dtype fw = dst_w * scale_w;
const int src_w = floor(fw);
fw -= src_w;
const Dtype w_w0 = std::abs(1.0f - fw);
const Dtype w_w1 = std::abs(fw);
const int dst_idx = dst_offset_1 + dst_w;
// dst_data[dst_idx] = 0;
const int src_idx = src_offset_1 + src_w;
loc1[dst_idx] = src_idx;
weight1[dst_idx] = w_h0 * w_w0;
loc2[dst_idx] = 0;
weight2[dst_idx] = 0;
weight3[dst_idx] = 0;
loc3[dst_idx] = 0;
loc4[dst_idx] = 0;
weight4[dst_idx] = 0;
if (src_w + 1 < src_width)
{
loc2[dst_idx] = src_idx + 1;
weight2[dst_idx] = w_h0 * w_w1;
// dst_data[dst_idx] += (w_h0 * w_w1 * src_data[src_idx + 1]);
}
if (src_h + 1 < src_height)
{
// dst_data[dst_idx] += (w_h1 * w_w0 * src_data[src_idx + src_width]);
weight3[dst_idx] = w_h1 * w_w0;
loc3[dst_idx] = src_idx + src_width;
}
if (src_w + 1 < src_width && src_h + 1 < src_height)
{
loc4[dst_idx] = src_idx + src_width + 1;
weight4[dst_idx] = w_h1 * w_w1;
// dst_data[dst_idx] += (w_h1 * w_w1 * src_data[src_idx + src_width + 1]);
}
}
}
template <typename Dtype>
__global__ void kernel_ResizeBlob(const int nthreads,const int num,const int channels, const Dtype* src, const int src_height, const int src_width,
Dtype* dst, const int dst_height, const int dst_width, const Dtype scale_h, const Dtype scale_w)
{
CUDA_KERNEL_LOOP(index, nthreads) {
int i = index %( dst_height * dst_width);
int c = (index/(dst_height * dst_width))%channels;
int n = (index/(dst_height * dst_width))/channels;
int src_offset = (n * channels + c) * src_height * src_width;
int dst_offset = (n * channels + c) * dst_height * dst_width;
const Dtype* src_data = src+src_offset;
Dtype* dst_data = dst+dst_offset;
int dst_h = i /dst_width;
Dtype fh = dst_h * scale_h;
const int src_h = floor(fh);
fh -= src_h;
const Dtype w_h0 = std::abs(1.0f - fh);
const Dtype w_h1 = std::abs(fh);
const int dst_offset_1 = dst_h * dst_width;
const int src_offset_1 = src_h * src_width;
int dst_w = i %dst_width;
Dtype fw = dst_w * scale_w;
const int src_w = floor(fw);
fw -= src_w;
const Dtype w_w0 = std::abs(1.0f - fw);
const Dtype w_w1 = std::abs(fw);
const int dst_idx = dst_offset_1 + dst_w;
const int src_idx = src_offset_1 + src_w;
Dtype res = (w_h0 * w_w0 * src_data[src_idx]);
if (src_w + 1 < src_width)
res += (w_h0 * w_w1 * src_data[src_idx + 1]);
if (src_h + 1 < src_height)
res += (w_h1 * w_w0 * src_data[src_idx + src_width]);
if (src_w + 1 < src_width && src_h + 1 < src_height)
res += (w_h1 * w_w1 * src_data[src_idx + src_width + 1]);
dst_data[dst_idx] = res;
}
}
template <typename Dtype>
void ResizeBlob_gpu(const Blob<Dtype>* src,Blob<Dtype>* dst) {
CHECK(src->num() == dst->num())<<"src->num() == dst->num()";
CHECK(src->channels() == dst->channels())<< "src->channels() == dst->channels()";
const int src_num = src->num();
const int src_channels = src->channels();
const int src_height = src->height();
const int src_width = src->width();
const int dst_channels = dst->channels();
const int dst_height = dst->height();
const int dst_width = dst->width();
const Dtype scale_w = src_width / (Dtype)dst_width;
const Dtype scale_h = src_height / (Dtype)dst_height;
int loop_n = dst_height * dst_width*dst_channels*src_num;
const Dtype* src_data = src->gpu_data();
Dtype* dst_data = dst->mutable_gpu_data();
kernel_ResizeBlob<Dtype> <<<CAFFE_GET_BLOCKS(loop_n), CAFFE_CUDA_NUM_THREADS >>>(loop_n,src_num,src_channels,
src_data, src_height,src_width,
dst_data, dst_height, dst_width,
scale_h,scale_w);
CUDA_POST_KERNEL_CHECK;
}
template void ResizeBlob_gpu(const Blob<float>* src,
Blob<float>* dst);
template void ResizeBlob_gpu(const Blob<double>* src,
Blob<double>* dst);
template <typename Dtype>
void GetBiLinearResizeMatRules_gpu( const int src_height, const int src_width,
const int dst_height, const int dst_width,
Dtype* loc1, Dtype* weight1, Dtype* loc2, Dtype* weight2,
Dtype* loc3, Dtype* weight3, Dtype* loc4, Dtype* weight4)
{
const Dtype scale_w = src_width / (Dtype)dst_width;
const Dtype scale_h = src_height / (Dtype)dst_height;
int loop_n = dst_height * dst_width;
kernel_GetBiLinearResizeMatRules<Dtype> <<<CAFFE_GET_BLOCKS(loop_n), CAFFE_CUDA_NUM_THREADS >>>(
loop_n, src_height, src_width,
dst_height, dst_width, scale_h, scale_w,
loc1, weight1, loc2, weight2,
loc3, weight3, loc4, weight4);
CUDA_POST_KERNEL_CHECK;
}
template void GetBiLinearResizeMatRules_gpu( const int src_height, const int src_width,
const int dst_height, const int dst_width,
float* loc1, float* weight1, float* loc2, float* weight2,
float* loc3, float* weight3, float* loc4, float* weight4);
template void GetBiLinearResizeMatRules_gpu( const int src_height, const int src_width,
const int dst_height, const int dst_width,
double* loc1, double* weight1, double* loc2, double* weight2,
double* loc3, double* weight3, double* loc4, double* weight4);
template <typename Dtype>
void ResizeBlob_gpu(const Blob<Dtype>* src,Blob<Dtype>* dst,
Blob<Dtype>* loc1, Blob<Dtype>* loc2, Blob<Dtype>* loc3, Blob<Dtype>* loc4){
CHECK(src->num() == dst->num())<<"src->num() == dst->num()";
CHECK(src->channels() == dst->channels())<< "src->channels() == dst->channels()";
GetBiLinearResizeMatRules_gpu( src->height(),src->width(),
dst->height(), dst->width(),
loc1->mutable_gpu_data(), loc1->mutable_gpu_diff(), loc2->mutable_gpu_data(), loc2->mutable_gpu_diff(),
loc3->mutable_gpu_data(), loc3->mutable_gpu_diff(), loc4->mutable_gpu_data(), loc4->mutable_gpu_diff());
ResizeBlob_gpu( src, dst) ;
}
template void ResizeBlob_gpu(const Blob<float>* src,Blob<float>* dst,
Blob<float>* loc1, Blob<float>* loc2, Blob<float>* loc3, Blob<float>* loc4);
template void ResizeBlob_gpu(const Blob<double>* src,Blob<double>* dst,
Blob<double>* loc1, Blob<double>* loc2, Blob<double>* loc3, Blob<double>* loc4);
template <typename Dtype>
void GenerateSubBlobs_gpu(const Blob<Dtype>& src,
Blob<Dtype>& dst,const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h,
const int stride_w)
{
const int nums_ = src.num();
const int channels_ = src.channels();
const int height_ = src.height();
const int width_ = src.width();
const int height_col_ =(height_ + 2 * pad_h - kernel_h) / stride_h + 1;
const int width_col_ = (width_ + 2 * pad_w - kernel_w) / stride_w + 1;
/*
* actually after im2col_v2, data is stored as
* col_buffer_.Reshape(1*height_out_*width_out_, channels_ , kernel_h_ , kernel_w_);
* */
dst.Reshape(height_col_*width_col_*nums_,channels_, kernel_h, kernel_w);
caffe::caffe_gpu_set(dst.count(),Dtype(0),dst.mutable_gpu_data());
for(int n = 0; n < nums_; n++){
const Dtype* src_data = src.gpu_data() + src.offset(n);
Dtype* dst_data = dst.mutable_gpu_data() + dst.offset(n*height_col_*width_col_);
caffe::im2col_v2_gpu(src_data, channels_, height_,
width_, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w,
dst_data);
}
}
template void GenerateSubBlobs_gpu(const Blob<float>& src,
Blob<float>& dst,const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h,
const int stride_w);
template void GenerateSubBlobs_gpu(const Blob<double>& src,
Blob<double>& dst,const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h,
const int stride_w);
template <typename Dtype>
__global__ void kernel_CropBlob(const int nthreads, const Dtype* src_data, Dtype* dst_data,
const int num, const int channels, const int in_h, const int in_w,
const int out_h, const int out_w, const int start_h, const int start_w)
{
CUDA_KERNEL_LOOP(index, nthreads) {
int n = index/channels/out_h/out_w;
int c = (index/(out_h*out_w))% channels;
int h = (index%(out_h*out_w))/out_w;
int w = (index%(out_h*out_w))%out_w;
Dtype* dst_data_ptr =dst_data+ ((n* channels+c)*out_h )*out_w ;
const Dtype* src_data_ptr = src_data + ((n* channels+c)*in_h )*in_w ;
dst_data_ptr[h*out_w+w] = src_data_ptr[(h+start_h)*in_w + w+start_w];
}
}
template <typename Dtype>
void CropBlobs_gpu( const Blob<Dtype>&src,
const int start_h, const int start_w,
const int end_h, const int end_w, Blob<Dtype>&dst)
{
const int in_h = src.height();
const int in_w = src.width();
const int num = src.num();
const int channels = src.channels();
const int out_h = end_h - start_h;
const int out_w = end_w - start_w;
CHECK(out_h > 0) <<" end_h should be larger than start_h";
CHECK(out_w > 0) <<" end_w should be larger than start_w";
CHECK(out_h <=in_h) <<" out_h should nor be larger than input_height";
CHECK(out_w <=in_w) <<" out_w should nor be larger than input_width";
dst.Reshape(num,channels,out_h,out_w);
if((out_h != in_h) || (out_w != in_w)){
const int loop_n = num*channels*out_h*out_w;
kernel_CropBlob <Dtype> <<< CAFFE_GET_BLOCKS(loop_n), CAFFE_CUDA_NUM_THREADS >>> (loop_n,
src.gpu_data(), dst.mutable_gpu_data(),
num, channels, in_h, in_w, out_h, out_w, start_h, start_w);
}
else
{
caffe::caffe_copy(src.count(),src.gpu_data(),dst.mutable_gpu_data());
}
}
template void CropBlobs_gpu( const Blob<float>&src,
const int start_h, const int start_w,
const int end_h, const int end_w, Blob<float>&dst);
template void CropBlobs_gpu( const Blob<double>&src,
const int start_h, const int start_w,
const int end_h, const int end_w, Blob<double>&dst);
template <typename Dtype>
__global__ void kernel_CropBlob(const int nthreads, const Dtype* src_data, Dtype* dst_data,
const int num, const int channels, const int in_h, const int in_w,
const int dst_num, const int dst_h, const int dst_w,
const int src_num_id, const int dst_num_id,const int out_h, const int out_w,
const int start_h, const int start_w, const int dst_start_h, const int dst_start_w){
CUDA_KERNEL_LOOP(index, nthreads) {
int c = (index/(out_h*out_w))% channels;
int h = (index%(out_h*out_w))/out_w;
int w = (index%(out_h*out_w))%out_w;
Dtype* dst_data_ptr =dst_data+ ((dst_num_id* channels+c)*dst_h )*dst_w ;
const Dtype* src_data_ptr = src_data + ((src_num_id* channels+c)*in_h )*in_w ;
int true_src_h = h+start_h;
int true_dst_h = h+dst_start_h;
int true_src_w = w+start_w;
int true_dst_w = w + dst_start_w;
if(true_src_h >= 0 && true_src_h < in_h && true_src_w >= 0 && true_src_w < in_w &&
true_dst_h >= 0 && true_dst_h < dst_h && true_dst_w>= 0 && true_dst_w< dst_w )
dst_data_ptr[true_dst_h *dst_w + true_dst_w] =
src_data_ptr[true_src_h * in_w + true_src_w];
}
}
template <typename Dtype>
void CropBlobs_gpu( const Blob<Dtype>&src, const int src_num_id, const int start_h,
const int start_w, const int end_h, const int end_w, Blob<Dtype>&dst,
const int dst_num_id,const int dst_start_h , const int dst_start_w ){
const int in_h = src.height();
const int in_w = src.width();
const int dst_h = dst.height();
const int dst_w = dst.width();
const int channels = src.channels();
const int out_h = end_h - start_h;
const int out_w = end_w - start_w;
CHECK(out_h > 0) <<" end_h should be larger than start_h";
CHECK(out_w > 0) <<" end_w should be larger than start_w";
// CHECK(out_h <=in_h) <<" out_h should nor be larger than input_height";
// CHECK(out_w <=in_w) <<" out_w should nor be larger than input_width";
CHECK_GT(src.num(), src_num_id);
CHECK_GT(dst.num(), dst_num_id);
CHECK_EQ(channels, dst.channels());
// CHECK_GE(dst.height(), end_h);
// CHECK_GE(dst.width(), end_w);
const int loop_n = channels*out_h*out_w;
kernel_CropBlob <Dtype> <<< CAFFE_GET_BLOCKS(loop_n), CAFFE_CUDA_NUM_THREADS >>> (loop_n,
src.gpu_data(), dst.mutable_gpu_data(),
src.num(), channels, in_h, in_w,
dst.num(),dst_h,dst_w, src_num_id,dst_num_id,
out_h, out_w, start_h, start_w, dst_start_h, dst_start_w);
}
template void CropBlobs_gpu( const Blob<float>&src, const int src_num_id, const int start_h,
const int start_w, const int end_h, const int end_w, Blob<float>&dst,
const int dst_num_id,const int dst_start_h , const int dst_start_w );
template void CropBlobs_gpu( const Blob<double>&src, const int src_num_id, const int start_h,
const int start_w, const int end_h, const int end_w, Blob<double>&dst,
const int dst_num_id,const int dst_start_h , const int dst_start_w );
template <typename Dtype>
void ConcateSubImagesInBlobs_gpu(const Blob<Dtype>& src,
Blob<Dtype>& dst,const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h,
const int stride_w, const int out_img_h, const int out_img_w)
{
const int in_nums = src.num();
const int height_col_ =(out_img_h + 2 * pad_h - kernel_h) / stride_h + 1;
const int width_col_ = (out_img_w + 2 * pad_w - kernel_w) / stride_w + 1;
// std::cout<<"in_nums:"<<in_nums<<" kernel_h:"<<kernel_h<<" kernel_w:"<<kernel_w
// <<" pad_h:"<<pad_h<<" pad_w:"<<pad_w<<" stride_h:"<<stride_h<<
// " stride_w:"<<stride_w<<" out_img_h:"<<out_img_h <<" out_img_w:"<<out_img_w
// << " height_col:"<<height_col_<<" width_col:"<<width_col_<<std::endl;
dst.Reshape(in_nums/height_col_/width_col_,src.channels(), out_img_h, out_img_w);
// std::cout<<"in_nums/height_col_/width_col_,src.channels(), out_img_h, out_img_w: "<<
// in_nums/height_col_/width_col_<< " "<<src.channels()<<" "<<out_img_h<<" "<<
// out_img_w<<std::endl;
const int channels_ = dst.channels();
const int height_ = dst.height();
const int width_ = dst.width();
const int out_num = dst.num();
for(int n = 0; n < out_num; n++){
const Dtype* src_data = src.gpu_data() + src.offset(n*height_col_*width_col_);
Dtype* dst_data = dst.mutable_gpu_data() + dst.offset(n);
caffe::col2im_v2_gpu(src_data, channels_, height_,
width_, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w,
dst_data);
}
return;
}
template void ConcateSubImagesInBlobs_gpu(const Blob<float>& src,
Blob<float>& dst,const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h,
const int stride_w, const int out_img_h, const int out_img_w);
template void ConcateSubImagesInBlobs_gpu(const Blob<double>& src,
Blob<double>& dst,const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h,
const int stride_w, const int out_img_h, const int out_img_w);
// namespace caffe
}
|
eb41274d9837a4ce787145350b314d13373f80c0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Adapted from interp.cpp from Caffe util by Pauline Luc
// Originally developed by George Papandreou
#include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/NativeFunctions.h>
#include <ATen/TensorUtils.h>
#include <ATen/Utils.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/hip/HIPApplyUtils.cuh>
#include <ATen/native/hip/UpSample.cuh>
#include <ATen/native/hip/KernelUtils.cuh>
namespace at {
namespace native {
namespace {
__device__ __forceinline__ size_t
idx(const size_t nc,
const size_t height,
const size_t width,
const size_t y,
const size_t x) {
return (nc * height + y) * width + x;
}
template <typename scalar_t, typename accscalar_t>
C10_LAUNCH_BOUNDS_1(1024)
__global__ void upsample_bilinear2d_out_frame(
const int n,
const accscalar_t rheight,
const accscalar_t rwidth,
const bool align_corners,
const PackedTensorAccessor<scalar_t, 4> idata,
PackedTensorAccessor<scalar_t, 4> odata) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
const int batchsize = idata.size(0);
const int channels = idata.size(1);
const int height1 = idata.size(2);
const int width1 = idata.size(3);
const int height2 = odata.size(2);
const int width2 = odata.size(3);
if (index < n) {
const int w2 = index % width2; // 0:width2-1
const int h2 = index / width2; // 0:height2-1
// special case: just copy
if (height1 == height2 && width1 == width2) {
const int h1 = h2;
const int w1 = w2;
for (int n = 0; n < batchsize; n++) {
for (int c = 0; c < channels; ++c) {
const scalar_t val = idata[n][c][h1][w1];
odata[n][c][h2][w2] = val;
}
}
return;
}
//
const accscalar_t h1r = area_pixel_compute_source_index<accscalar_t>(
rheight, h2, align_corners, /*cubic=*/false);
const int h1 = h1r;
const int h1p = (h1 < height1 - 1) ? 1 : 0;
const accscalar_t h1lambda = h1r - h1;
const accscalar_t h0lambda = static_cast<accscalar_t>(1) - h1lambda;
//
const accscalar_t w1r = area_pixel_compute_source_index<accscalar_t>(
rwidth, w2, align_corners, /*cubic=*/false);
const int w1 = w1r;
const int w1p = (w1 < width1 - 1) ? 1 : 0;
const accscalar_t w1lambda = w1r - w1;
const accscalar_t w0lambda = static_cast<accscalar_t>(1) - w1lambda;
//
for (int n = 0; n < batchsize; n++) {
for (int c = 0; c < channels; ++c) {
const accscalar_t val = h0lambda *
(w0lambda * idata[n][c][h1][w1] +
w1lambda * idata[n][c][h1][w1 + w1p]) +
h1lambda *
(w0lambda * idata[n][c][h1 + h1p][w1] +
w1lambda * idata[n][c][h1 + h1p][w1 + w1p]);
odata[n][c][h2][w2] = static_cast<scalar_t>(val);
}
}
}
}
// Backward (adjoint) operation 1 <- 2 (accumulates)
template <typename scalar_t, typename accscalar_t>
C10_LAUNCH_BOUNDS_1(1024)
__global__ void upsample_bilinear2d_backward_out_frame(
const size_t nc,
const int height1,
const int width1,
const int height2,
const int width2,
const accscalar_t rheight,
const accscalar_t rwidth,
const bool align_corners,
scalar_t* __restrict__ idata,
const scalar_t* __restrict__ odata) {
const size_t o_numel = nc * width2 * height2;
const size_t i_numel = nc * width1 * height1;
for (size_t index = blockDim.x * blockIdx.x + threadIdx.x; index < o_numel;
index += blockDim.x * gridDim.x) {
size_t index_temp = index;
const int w2 = index_temp % width2; // 0:width2-1
index_temp /= width2;
const int h2 = index_temp % height2; // 0:height2-1
const size_t nc = index_temp / height2;
//
const accscalar_t h1r = area_pixel_compute_source_index<accscalar_t>(
rheight, h2, align_corners, /*cubic=*/false);
const int h1 = h1r;
const int h1p = (h1 < height1 - 1) ? 1 : 0;
const accscalar_t h1lambda = h1r - h1;
const accscalar_t h0lambda = static_cast<accscalar_t>(1) - h1lambda;
//
const accscalar_t w1r = area_pixel_compute_source_index<accscalar_t>(
rwidth, w2, align_corners, /*cubic=*/false);
const int w1 = w1r;
const int w1p = (w1 < width1 - 1) ? 1 : 0;
const accscalar_t w1lambda = w1r - w1;
const accscalar_t w0lambda = static_cast<accscalar_t>(1) - w1lambda;
//
const scalar_t d2val = odata[index];
fastAtomicAdd(
idata,
idx(nc, height1, width1, h1, w1),
i_numel,
static_cast<scalar_t>(h0lambda * w0lambda * d2val),
true);
fastAtomicAdd(
idata,
idx(nc, height1, width1, h1, w1 + w1p),
i_numel,
static_cast<scalar_t>(h0lambda * w1lambda * d2val),
true);
fastAtomicAdd(
idata,
idx(nc, height1, width1, h1 + h1p, w1),
i_numel,
static_cast<scalar_t>(h1lambda * w0lambda * d2val),
true);
fastAtomicAdd(
idata,
idx(nc, height1, width1, h1 + h1p, w1 + w1p),
i_numel,
static_cast<scalar_t>(h1lambda * w1lambda * d2val),
true);
}
}
static void upsample_bilinear2d_out_cuda_template(
Tensor& output,
const Tensor& input,
IntArrayRef output_size,
bool align_corners) {
TensorArg input_arg{input, "input", 1}, output_arg{output, "output", 2};
checkAllSameGPU("upsample_bilinear2d_out_cuda", {input_arg, output_arg});
TORCH_CHECK(
output_size.size() == 2,
"It is expected output_size equals to 2, but got size ",
output_size.size());
int output_height = output_size[0];
int output_width = output_size[1];
int nbatch = input.size(0);
int channels = input.size(1);
int input_height = input.size(2);
int input_width = input.size(3);
upsample_2d_shape_check(
input,
Tensor(),
nbatch,
channels,
input_height,
input_width,
output_height,
output_width);
output.resize_({input.size(0), input.size(1), output_height, output_width});
AT_ASSERT(
input_height > 0 && input_width > 0 && output_height > 0 &&
output_width > 0);
const int num_kernels = output_height * output_width;
const int num_threads = ::min(
at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, 1024);
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
input.scalar_type(), "upsample_bilinear2d_out_frame", [&] {
using accscalar_t = at::acc_type<scalar_t, true>;
auto idata = input.packed_accessor64<scalar_t, 4>();
auto odata = output.packed_accessor64<scalar_t, 4>();
const accscalar_t rheight = area_pixel_compute_scale<accscalar_t>(
input_height, output_height, align_corners);
const accscalar_t rwidth = area_pixel_compute_scale<accscalar_t>(
input_width, output_width, align_corners);
hipLaunchKernelGGL(( upsample_bilinear2d_out_frame<scalar_t, accscalar_t>)
, dim3(cuda::ATenCeilDiv(num_kernels, num_threads)),
dim3(num_threads),
0,
stream,
num_kernels, rheight, rwidth, align_corners, idata, odata);
});
AT_CUDA_CHECK(hipGetLastError());
}
static void upsample_bilinear2d_backward_out_cuda_template(
Tensor& grad_input,
const Tensor& grad_output_,
IntArrayRef output_size,
IntArrayRef input_size,
bool align_corners) {
TensorArg grad_input_arg{grad_input, "grad_input", 1},
grad_output_arg{grad_output_, "grad_output_", 2};
checkAllSameGPU(
"upsample_bilinear2d_backward_out_cuda",
{grad_output_arg, grad_input_arg});
TORCH_CHECK(
output_size.size() == 2,
"It is expected output_size equals to 2, but got size ",
output_size.size());
TORCH_CHECK(
input_size.size() == 4,
"It is expected input_size equals to 4, but got size ",
input_size.size());
int output_height = output_size[0];
int output_width = output_size[1];
int nbatch = input_size[0];
int channels = input_size[1];
int input_height = input_size[2];
int input_width = input_size[3];
upsample_2d_shape_check(
Tensor(),
grad_output_,
nbatch,
channels,
input_height,
input_width,
output_height,
output_width);
Tensor grad_output = grad_output_.contiguous();
grad_input.resize_({nbatch, channels, input_height, input_width});
// A contiguous tensor is required for the kernel launch config
grad_input.contiguous();
// initialization to zero is required here. As we launch one thread per output
// element, and atomicAdd to input gradient. Given a sparse sampling case, our
// threads are not covering the whole input tensor.
grad_input.zero_();
const size_t num_kernels = nbatch * channels * output_height * output_width;
const int num_threads = ::min(
at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, 1024);
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
grad_output.scalar_type(), "upsample_bilinear2d_backward_out_frame", [&] {
using accscalar_t = at::acc_type<scalar_t, true>;
auto idata = grad_input.data_ptr<scalar_t>();
auto odata = grad_output.data_ptr<scalar_t>();
const accscalar_t rheight = area_pixel_compute_scale<accscalar_t>(
input_height, output_height, align_corners);
const accscalar_t rwidth = area_pixel_compute_scale<accscalar_t>(
input_width, output_width, align_corners);
hipLaunchKernelGGL(( upsample_bilinear2d_backward_out_frame<scalar_t, accscalar_t>)
, dim3(cuda::ATenCeilDiv(num_kernels, static_cast<size_t>(num_threads))),
dim3(num_threads),
0,
stream,
nbatch * channels,
input_height,
input_width,
output_height,
output_width,
rheight,
rwidth,
align_corners,
idata,
odata);
});
AT_CUDA_CHECK(hipGetLastError());
}
} // namespace
Tensor& upsample_bilinear2d_out_cuda(
Tensor& output,
const Tensor& input,
IntArrayRef output_size,
bool align_corners) {
upsample_bilinear2d_out_cuda_template(
output, input, output_size, align_corners);
return output;
}
Tensor upsample_bilinear2d_cuda(
const Tensor& input,
IntArrayRef output_size,
bool align_corners) {
Tensor output = at::empty_like(input);
upsample_bilinear2d_out_cuda_template(
output, input, output_size, align_corners);
return output;
}
Tensor& upsample_bilinear2d_backward_out_cuda(
Tensor& grad_input,
const Tensor& grad_output,
IntArrayRef output_size,
IntArrayRef input_size,
bool align_corners) {
upsample_bilinear2d_backward_out_cuda_template(
grad_input, grad_output, output_size, input_size, align_corners);
return grad_input;
}
Tensor upsample_bilinear2d_backward_cuda(
const Tensor& grad_output,
IntArrayRef output_size,
IntArrayRef input_size,
bool align_corners) {
Tensor grad_input = at::empty_like(grad_output);
upsample_bilinear2d_backward_out_cuda_template(
grad_input, grad_output, output_size, input_size, align_corners);
return grad_input;
}
} // namespace native
} // namespace at
| eb41274d9837a4ce787145350b314d13373f80c0.cu | // Adapted from interp.cpp from Caffe util by Pauline Luc
// Originally developed by George Papandreou
#include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/NativeFunctions.h>
#include <ATen/TensorUtils.h>
#include <ATen/Utils.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/cuda/CUDAApplyUtils.cuh>
#include <ATen/native/cuda/UpSample.cuh>
#include <ATen/native/cuda/KernelUtils.cuh>
namespace at {
namespace native {
namespace {
__device__ __forceinline__ size_t
idx(const size_t nc,
const size_t height,
const size_t width,
const size_t y,
const size_t x) {
return (nc * height + y) * width + x;
}
template <typename scalar_t, typename accscalar_t>
C10_LAUNCH_BOUNDS_1(1024)
__global__ void upsample_bilinear2d_out_frame(
const int n,
const accscalar_t rheight,
const accscalar_t rwidth,
const bool align_corners,
const PackedTensorAccessor<scalar_t, 4> idata,
PackedTensorAccessor<scalar_t, 4> odata) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
const int batchsize = idata.size(0);
const int channels = idata.size(1);
const int height1 = idata.size(2);
const int width1 = idata.size(3);
const int height2 = odata.size(2);
const int width2 = odata.size(3);
if (index < n) {
const int w2 = index % width2; // 0:width2-1
const int h2 = index / width2; // 0:height2-1
// special case: just copy
if (height1 == height2 && width1 == width2) {
const int h1 = h2;
const int w1 = w2;
for (int n = 0; n < batchsize; n++) {
for (int c = 0; c < channels; ++c) {
const scalar_t val = idata[n][c][h1][w1];
odata[n][c][h2][w2] = val;
}
}
return;
}
//
const accscalar_t h1r = area_pixel_compute_source_index<accscalar_t>(
rheight, h2, align_corners, /*cubic=*/false);
const int h1 = h1r;
const int h1p = (h1 < height1 - 1) ? 1 : 0;
const accscalar_t h1lambda = h1r - h1;
const accscalar_t h0lambda = static_cast<accscalar_t>(1) - h1lambda;
//
const accscalar_t w1r = area_pixel_compute_source_index<accscalar_t>(
rwidth, w2, align_corners, /*cubic=*/false);
const int w1 = w1r;
const int w1p = (w1 < width1 - 1) ? 1 : 0;
const accscalar_t w1lambda = w1r - w1;
const accscalar_t w0lambda = static_cast<accscalar_t>(1) - w1lambda;
//
for (int n = 0; n < batchsize; n++) {
for (int c = 0; c < channels; ++c) {
const accscalar_t val = h0lambda *
(w0lambda * idata[n][c][h1][w1] +
w1lambda * idata[n][c][h1][w1 + w1p]) +
h1lambda *
(w0lambda * idata[n][c][h1 + h1p][w1] +
w1lambda * idata[n][c][h1 + h1p][w1 + w1p]);
odata[n][c][h2][w2] = static_cast<scalar_t>(val);
}
}
}
}
// Backward (adjoint) operation 1 <- 2 (accumulates)
template <typename scalar_t, typename accscalar_t>
C10_LAUNCH_BOUNDS_1(1024)
__global__ void upsample_bilinear2d_backward_out_frame(
const size_t nc,
const int height1,
const int width1,
const int height2,
const int width2,
const accscalar_t rheight,
const accscalar_t rwidth,
const bool align_corners,
scalar_t* __restrict__ idata,
const scalar_t* __restrict__ odata) {
const size_t o_numel = nc * width2 * height2;
const size_t i_numel = nc * width1 * height1;
for (size_t index = blockDim.x * blockIdx.x + threadIdx.x; index < o_numel;
index += blockDim.x * gridDim.x) {
size_t index_temp = index;
const int w2 = index_temp % width2; // 0:width2-1
index_temp /= width2;
const int h2 = index_temp % height2; // 0:height2-1
const size_t nc = index_temp / height2;
//
const accscalar_t h1r = area_pixel_compute_source_index<accscalar_t>(
rheight, h2, align_corners, /*cubic=*/false);
const int h1 = h1r;
const int h1p = (h1 < height1 - 1) ? 1 : 0;
const accscalar_t h1lambda = h1r - h1;
const accscalar_t h0lambda = static_cast<accscalar_t>(1) - h1lambda;
//
const accscalar_t w1r = area_pixel_compute_source_index<accscalar_t>(
rwidth, w2, align_corners, /*cubic=*/false);
const int w1 = w1r;
const int w1p = (w1 < width1 - 1) ? 1 : 0;
const accscalar_t w1lambda = w1r - w1;
const accscalar_t w0lambda = static_cast<accscalar_t>(1) - w1lambda;
//
const scalar_t d2val = odata[index];
fastAtomicAdd(
idata,
idx(nc, height1, width1, h1, w1),
i_numel,
static_cast<scalar_t>(h0lambda * w0lambda * d2val),
true);
fastAtomicAdd(
idata,
idx(nc, height1, width1, h1, w1 + w1p),
i_numel,
static_cast<scalar_t>(h0lambda * w1lambda * d2val),
true);
fastAtomicAdd(
idata,
idx(nc, height1, width1, h1 + h1p, w1),
i_numel,
static_cast<scalar_t>(h1lambda * w0lambda * d2val),
true);
fastAtomicAdd(
idata,
idx(nc, height1, width1, h1 + h1p, w1 + w1p),
i_numel,
static_cast<scalar_t>(h1lambda * w1lambda * d2val),
true);
}
}
static void upsample_bilinear2d_out_cuda_template(
Tensor& output,
const Tensor& input,
IntArrayRef output_size,
bool align_corners) {
TensorArg input_arg{input, "input", 1}, output_arg{output, "output", 2};
checkAllSameGPU("upsample_bilinear2d_out_cuda", {input_arg, output_arg});
TORCH_CHECK(
output_size.size() == 2,
"It is expected output_size equals to 2, but got size ",
output_size.size());
int output_height = output_size[0];
int output_width = output_size[1];
int nbatch = input.size(0);
int channels = input.size(1);
int input_height = input.size(2);
int input_width = input.size(3);
upsample_2d_shape_check(
input,
Tensor(),
nbatch,
channels,
input_height,
input_width,
output_height,
output_width);
output.resize_({input.size(0), input.size(1), output_height, output_width});
AT_ASSERT(
input_height > 0 && input_width > 0 && output_height > 0 &&
output_width > 0);
const int num_kernels = output_height * output_width;
const int num_threads = std::min(
at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, 1024);
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
input.scalar_type(), "upsample_bilinear2d_out_frame", [&] {
using accscalar_t = at::acc_type<scalar_t, true>;
auto idata = input.packed_accessor64<scalar_t, 4>();
auto odata = output.packed_accessor64<scalar_t, 4>();
const accscalar_t rheight = area_pixel_compute_scale<accscalar_t>(
input_height, output_height, align_corners);
const accscalar_t rwidth = area_pixel_compute_scale<accscalar_t>(
input_width, output_width, align_corners);
upsample_bilinear2d_out_frame<scalar_t, accscalar_t>
<<<cuda::ATenCeilDiv(num_kernels, num_threads),
num_threads,
0,
stream>>>(
num_kernels, rheight, rwidth, align_corners, idata, odata);
});
AT_CUDA_CHECK(cudaGetLastError());
}
static void upsample_bilinear2d_backward_out_cuda_template(
Tensor& grad_input,
const Tensor& grad_output_,
IntArrayRef output_size,
IntArrayRef input_size,
bool align_corners) {
TensorArg grad_input_arg{grad_input, "grad_input", 1},
grad_output_arg{grad_output_, "grad_output_", 2};
checkAllSameGPU(
"upsample_bilinear2d_backward_out_cuda",
{grad_output_arg, grad_input_arg});
TORCH_CHECK(
output_size.size() == 2,
"It is expected output_size equals to 2, but got size ",
output_size.size());
TORCH_CHECK(
input_size.size() == 4,
"It is expected input_size equals to 4, but got size ",
input_size.size());
int output_height = output_size[0];
int output_width = output_size[1];
int nbatch = input_size[0];
int channels = input_size[1];
int input_height = input_size[2];
int input_width = input_size[3];
upsample_2d_shape_check(
Tensor(),
grad_output_,
nbatch,
channels,
input_height,
input_width,
output_height,
output_width);
Tensor grad_output = grad_output_.contiguous();
grad_input.resize_({nbatch, channels, input_height, input_width});
// A contiguous tensor is required for the kernel launch config
grad_input.contiguous();
// initialization to zero is required here. As we launch one thread per output
// element, and atomicAdd to input gradient. Given a sparse sampling case, our
// threads are not covering the whole input tensor.
grad_input.zero_();
const size_t num_kernels = nbatch * channels * output_height * output_width;
const int num_threads = std::min(
at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, 1024);
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
grad_output.scalar_type(), "upsample_bilinear2d_backward_out_frame", [&] {
using accscalar_t = at::acc_type<scalar_t, true>;
auto idata = grad_input.data_ptr<scalar_t>();
auto odata = grad_output.data_ptr<scalar_t>();
const accscalar_t rheight = area_pixel_compute_scale<accscalar_t>(
input_height, output_height, align_corners);
const accscalar_t rwidth = area_pixel_compute_scale<accscalar_t>(
input_width, output_width, align_corners);
upsample_bilinear2d_backward_out_frame<scalar_t, accscalar_t>
<<<cuda::ATenCeilDiv(num_kernels, static_cast<size_t>(num_threads)),
num_threads,
0,
stream>>>(
nbatch * channels,
input_height,
input_width,
output_height,
output_width,
rheight,
rwidth,
align_corners,
idata,
odata);
});
AT_CUDA_CHECK(cudaGetLastError());
}
} // namespace
Tensor& upsample_bilinear2d_out_cuda(
Tensor& output,
const Tensor& input,
IntArrayRef output_size,
bool align_corners) {
upsample_bilinear2d_out_cuda_template(
output, input, output_size, align_corners);
return output;
}
Tensor upsample_bilinear2d_cuda(
const Tensor& input,
IntArrayRef output_size,
bool align_corners) {
Tensor output = at::empty_like(input);
upsample_bilinear2d_out_cuda_template(
output, input, output_size, align_corners);
return output;
}
Tensor& upsample_bilinear2d_backward_out_cuda(
Tensor& grad_input,
const Tensor& grad_output,
IntArrayRef output_size,
IntArrayRef input_size,
bool align_corners) {
upsample_bilinear2d_backward_out_cuda_template(
grad_input, grad_output, output_size, input_size, align_corners);
return grad_input;
}
Tensor upsample_bilinear2d_backward_cuda(
const Tensor& grad_output,
IntArrayRef output_size,
IntArrayRef input_size,
bool align_corners) {
Tensor grad_input = at::empty_like(grad_output);
upsample_bilinear2d_backward_out_cuda_template(
grad_input, grad_output, output_size, input_size, align_corners);
return grad_input;
}
} // namespace native
} // namespace at
|
45527dfe63dedd1f76357eca8c80bc115e9419aa.hip | // !!! This is a file automatically generated by hipify!!!
// -*- C++ -*-
// -*- coding: utf-8 -*-
//
// michael a.g. avzis <[email protected]>
// parasim
// (c) 1998-2019 all rights reserved
//
// configuration
#include <portinfo>
// cuda
#include <hip/hip_runtime.h>
#include <hip/hip_cooperative_groups.h>
// pyre
#include <pyre/journal.h>
// pull the declarations
#include "kernels.h"
// the kernel that assembles the offset field
template <typename value_t = float>
__global__
static void
_offsetField(const int * coarse, // the coarse offsets
const int * fine, // the fine offsets
std::size_t pairs, // the total number of tiles
std::size_t margin, // the origin of the coarse shifts
std::size_t refineMargin, // origin of the refined shifts
std::size_t zoom, // the overall zoom factor of the refined shifts
float * field // results
);
// run through the correlation matrix for each, find its maximum value and record its location
void
ampcor::cuda::kernels::
offsetField(const int * coarse, // the coarse offsets
const int * fine, // the fine offsets
std::size_t pairs, // the total number of entries
std::size_t margin, // the origin of the coarse shifts
std::size_t refineMargin, // origin of the refined shifts
std::size_t zoom, // the overall zoom factor of the refined shifts
float * field // results
)
{
// make a channel
pyre::journal::debug_t channel("ampcor.cuda");
// launch blocks of T threads
auto T = 128;
// in as many blocks as it takes to handle all pairs
auto B = pairs / T + (pairs % T ? 1 : 0);
// show me
channel
<< pyre::journal::at(__HERE__)
<< "launching " << B << " blocks of " << T
<< " threads each to assemble the offset fields of " << pairs << " tiles"
<< pyre::journal::endl;
// launch
hipLaunchKernelGGL(( _offsetField) , dim3(B),dim3(T), 0, 0, coarse, fine, pairs, margin, refineMargin, zoom, field);
// wait for the kernels to finish
hipError_t status = hipDeviceSynchronize();
// check
if (status != hipSuccess) {
// get the description of the error
std::string description = hipGetErrorName(status);
// make a channel
pyre::journal::error_t channel("ampcor.cuda");
// complain
channel
<< pyre::journal::at(__HERE__)
<< "while assembling the offset field: "
<< description << " (" << status << ")"
<< pyre::journal::endl;
// and bail
throw std::runtime_error(description);
}
// all done
return;
}
// the kernel that assembles the offset field
template <typename value_t>
__global__
static void
_offsetField(const int * coarse, // the coarse offsets
const int * fine, // the fine offsets
std::size_t pairs, // the total number of tiles
std::size_t margin, // the origin of the coarse shifts
std::size_t refineMargin, // origin of the refined shifts
std::size_t zoom, // the overall zoom factor of the refined shifts
float * field // results
)
{
// build the workload descriptors
// global
// std::size_t B = gridDim.x; // number of blocks
std::size_t T = blockDim.x; // number of threads per block
// std::size_t W = B*T; // total number of workers
// local
std::size_t b = blockIdx.x; // my block id
std::size_t t = threadIdx.x; // my thread id within my block
std::size_t w = b*T + t; // my worker id
// if my worker id exceeds the number of cells that require update
if (w >= pairs) {
// nothing for me to do
return;
}
// a constant
const value_t one = 1.0;
// find the beginning of my coarse offset
auto myCoarse = coarse + 2*w;
// find the beginning of my fine offset
auto myFine = fine + 2*w;
// and the beginning of where i store my result
auto myField = field + 2*w;
// do the math
myField[0] = (one*myCoarse[0] - margin) + (one * myFine[0] / zoom - refineMargin);
myField[1] = (one*myCoarse[1] - margin) + (one * myFine[1] / zoom - refineMargin);
// all done
return;
}
// end of file
| 45527dfe63dedd1f76357eca8c80bc115e9419aa.cu | // -*- C++ -*-
// -*- coding: utf-8 -*-
//
// michael a.g. aïvázis <[email protected]>
// parasim
// (c) 1998-2019 all rights reserved
//
// configuration
#include <portinfo>
// cuda
#include <cuda_runtime.h>
#include <cooperative_groups.h>
// pyre
#include <pyre/journal.h>
// pull the declarations
#include "kernels.h"
// the kernel that assembles the offset field
template <typename value_t = float>
__global__
static void
_offsetField(const int * coarse, // the coarse offsets
const int * fine, // the fine offsets
std::size_t pairs, // the total number of tiles
std::size_t margin, // the origin of the coarse shifts
std::size_t refineMargin, // origin of the refined shifts
std::size_t zoom, // the overall zoom factor of the refined shifts
float * field // results
);
// run through the correlation matrix for each, find its maximum value and record its location
void
ampcor::cuda::kernels::
offsetField(const int * coarse, // the coarse offsets
const int * fine, // the fine offsets
std::size_t pairs, // the total number of entries
std::size_t margin, // the origin of the coarse shifts
std::size_t refineMargin, // origin of the refined shifts
std::size_t zoom, // the overall zoom factor of the refined shifts
float * field // results
)
{
// make a channel
pyre::journal::debug_t channel("ampcor.cuda");
// launch blocks of T threads
auto T = 128;
// in as many blocks as it takes to handle all pairs
auto B = pairs / T + (pairs % T ? 1 : 0);
// show me
channel
<< pyre::journal::at(__HERE__)
<< "launching " << B << " blocks of " << T
<< " threads each to assemble the offset fields of " << pairs << " tiles"
<< pyre::journal::endl;
// launch
_offsetField <<<B,T>>> (coarse, fine, pairs, margin, refineMargin, zoom, field);
// wait for the kernels to finish
cudaError_t status = cudaDeviceSynchronize();
// check
if (status != cudaSuccess) {
// get the description of the error
std::string description = cudaGetErrorName(status);
// make a channel
pyre::journal::error_t channel("ampcor.cuda");
// complain
channel
<< pyre::journal::at(__HERE__)
<< "while assembling the offset field: "
<< description << " (" << status << ")"
<< pyre::journal::endl;
// and bail
throw std::runtime_error(description);
}
// all done
return;
}
// the kernel that assembles the offset field
template <typename value_t>
__global__
static void
_offsetField(const int * coarse, // the coarse offsets
const int * fine, // the fine offsets
std::size_t pairs, // the total number of tiles
std::size_t margin, // the origin of the coarse shifts
std::size_t refineMargin, // origin of the refined shifts
std::size_t zoom, // the overall zoom factor of the refined shifts
float * field // results
)
{
// build the workload descriptors
// global
// std::size_t B = gridDim.x; // number of blocks
std::size_t T = blockDim.x; // number of threads per block
// std::size_t W = B*T; // total number of workers
// local
std::size_t b = blockIdx.x; // my block id
std::size_t t = threadIdx.x; // my thread id within my block
std::size_t w = b*T + t; // my worker id
// if my worker id exceeds the number of cells that require update
if (w >= pairs) {
// nothing for me to do
return;
}
// a constant
const value_t one = 1.0;
// find the beginning of my coarse offset
auto myCoarse = coarse + 2*w;
// find the beginning of my fine offset
auto myFine = fine + 2*w;
// and the beginning of where i store my result
auto myField = field + 2*w;
// do the math
myField[0] = (one*myCoarse[0] - margin) + (one * myFine[0] / zoom - refineMargin);
myField[1] = (one*myCoarse[1] - margin) + (one * myFine[1] / zoom - refineMargin);
// all done
return;
}
// end of file
|
be3c540d1fe712e8ce04d1a20c06c572bcc98c93.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "computeCost.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const double *Params = NULL;
hipMalloc(&Params, XSIZE*YSIZE);
const float *uproj = NULL;
hipMalloc(&uproj, XSIZE*YSIZE);
const float *mu = NULL;
hipMalloc(&mu, XSIZE*YSIZE);
const float *W = NULL;
hipMalloc(&W, XSIZE*YSIZE);
const bool *match = NULL;
hipMalloc(&match, XSIZE*YSIZE);
const int *iC = NULL;
hipMalloc(&iC, XSIZE*YSIZE);
const int *call = NULL;
hipMalloc(&call, XSIZE*YSIZE);
float *cmax = NULL;
hipMalloc(&cmax, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
computeCost), dim3(gridBlock),dim3(threadBlock), 0, 0, Params,uproj,mu,W,match,iC,call,cmax);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
computeCost), dim3(gridBlock),dim3(threadBlock), 0, 0, Params,uproj,mu,W,match,iC,call,cmax);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
computeCost), dim3(gridBlock),dim3(threadBlock), 0, 0, Params,uproj,mu,W,match,iC,call,cmax);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | be3c540d1fe712e8ce04d1a20c06c572bcc98c93.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "computeCost.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const double *Params = NULL;
cudaMalloc(&Params, XSIZE*YSIZE);
const float *uproj = NULL;
cudaMalloc(&uproj, XSIZE*YSIZE);
const float *mu = NULL;
cudaMalloc(&mu, XSIZE*YSIZE);
const float *W = NULL;
cudaMalloc(&W, XSIZE*YSIZE);
const bool *match = NULL;
cudaMalloc(&match, XSIZE*YSIZE);
const int *iC = NULL;
cudaMalloc(&iC, XSIZE*YSIZE);
const int *call = NULL;
cudaMalloc(&call, XSIZE*YSIZE);
float *cmax = NULL;
cudaMalloc(&cmax, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
computeCost<<<gridBlock,threadBlock>>>(Params,uproj,mu,W,match,iC,call,cmax);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
computeCost<<<gridBlock,threadBlock>>>(Params,uproj,mu,W,match,iC,call,cmax);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
computeCost<<<gridBlock,threadBlock>>>(Params,uproj,mu,W,match,iC,call,cmax);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
28afff04a83c02ae73277927e55cab60bd034033.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// -------------------------------------------------------------
// CUDPP -- CUDA Data Parallel Primitives library
// -------------------------------------------------------------
// $Revision: 5633 $
// $Date: 2009-07-01 15:02:51 +1000 (Wed, 01 Jul 2009) $
// -------------------------------------------------------------
// This source code is distributed under the terms of license.txt
// in the root directory of this source distribution.
// -------------------------------------------------------------
/**
* @file
* scan_app.cu
*
* @brief CUDPP application-level scan routines
*/
/** \defgroup cudpp_app CUDPP Application-Level API
* The CUDPP Application-Level API contains functions
* that run on the host CPU and invoke GPU routines in
* the CUDPP \link cudpp_kernel Kernel-Level API\endlink.
* Application-Level API functions are used by
* CUDPP \link publicInterface Public Interface\endlink
* functions to implement CUDPP's core functionality.
* @{
*/
/** @name Scan Functions
* @{
*/
#include "cudpp.h"
#include "cudpp_util.h"
#include "cudpp_plan.h"
#include "kernel/scan_kernel.cu"
#include "kernel/vector_kernel.cu"
#include <cutil.h>
#include <cstdlib>
#include <cstdio>
#include <assert.h>
/** @brief Perform recursive scan on arbitrary size arrays
*
* This is the CPU-side workhorse function of the scan engine. This function
* invokes the CUDA kernels which perform the scan on individual blocks.
*
* Scans of large arrays must be split (possibly recursively) into a hierarchy of block scans,
* where each block is scanned by a single CUDA thread block. At each recursive level of the
* scanArrayRecursive first invokes a kernel to scan all blocks of that level, and if the level
* has more than one block, it calls itself recursively. On returning from each recursive level,
* the total sum of each block from the level below is added to all elements of the corresponding
* block in this level. See "Parallel Prefix Sum (Scan) in CUDA" for more information (see
* \ref references ).
*
* Template parameter \a T is the datatype; \a isBackward specifies backward or forward scan;
* \a isExclusive specifies exclusive or inclusive scan, and \a op specifies the binary associative
* operator to be used.
*
* @param[out] d_out The output array for the scan results
* @param[in] d_in The input array to be scanned
* @param[out] d_blockSums Array of arrays of per-block sums (one array per recursive level, allocated
* by allocScanStorage())
* @param[in] numElements The number of elements in the array to scan
* @param[in] numRows The number of rows in the array to scan
* @param[in] rowPitches Array of row pitches (one array per recursive level, allocated by
* allocScanStorage())
* @param[in] level The current recursive level of the scan
* @param[in] stream The stream to execute the kernel on
*/
template <class T, bool isBackward, bool isExclusive, CUDPPOperator op>
void scanArrayRecursive(T *d_out,
const T *d_in,
T **d_blockSums,
size_t numElements,
size_t numRows,
const size_t *rowPitches,
int level,
const hipStream_t stream)
{
unsigned int numBlocks =
max(1, (unsigned int)ceil((double)numElements / ((double)SCAN_ELTS_PER_THREAD * CTA_SIZE)));
unsigned int sharedEltsPerBlock = CTA_SIZE * 2;
unsigned int sharedMemSize = sizeof(T) * sharedEltsPerBlock;
// divide pitch by four since scan's load/store addresses are for vec4 elements
unsigned int rowPitch = 1;
unsigned int blockSumRowPitch = 1;
if (numRows > 1)
{
rowPitch = rowPitches[level] / 4;
blockSumRowPitch = (numBlocks > 1) ? rowPitches[level+1] / 4 : 0;
}
bool fullBlock = (numElements == numBlocks * SCAN_ELTS_PER_THREAD * CTA_SIZE);
// setup execution parameters
dim3 grid(numBlocks, numRows, 1);
dim3 threads(CTA_SIZE, 1, 1);
// make sure there are no CUDA errors before we start
CUT_CHECK_ERROR("scanArray before kernels");
unsigned int traitsCode = 0;
if (numBlocks > 1) traitsCode |= 1;
if (numRows > 1) traitsCode |= 2;
if (fullBlock) traitsCode |= 4;
switch (traitsCode)
{
case 0: // single block, single row, non-full block
hipLaunchKernelGGL(( scan4<T, ScanTraits<T, op, isBackward, isExclusive, false, false, false> >)
, dim3(grid), dim3(threads), sharedMemSize, stream ,
d_out, d_in, 0, numElements, rowPitch, blockSumRowPitch);
break;
case 1: // multiblock, single row, non-full block
hipLaunchKernelGGL(( scan4< T, ScanTraits<T, op, isBackward, isExclusive, false, true, false> >)
, dim3(grid), dim3(threads), sharedMemSize, stream ,
d_out, d_in, d_blockSums[level], numElements, rowPitch, blockSumRowPitch);
break;
case 2: // single block, multirow, non-full block
hipLaunchKernelGGL(( scan4<T, ScanTraits<T, op, isBackward, isExclusive, true, false, false> >)
, dim3(grid), dim3(threads), sharedMemSize, stream ,
d_out, d_in, 0, numElements, rowPitch, blockSumRowPitch);
break;
case 3: // multiblock, multirow, non-full block
hipLaunchKernelGGL(( scan4<T, ScanTraits<T, op, isBackward, isExclusive, true, true, false> >)
, dim3(grid), dim3(threads), sharedMemSize, stream ,
d_out, d_in, d_blockSums[level], numElements, rowPitch, blockSumRowPitch);
break;
case 4: // single block, single row, full block
hipLaunchKernelGGL(( scan4<T, ScanTraits<T, op, isBackward, isExclusive, false, false, true> >)
, dim3(grid), dim3(threads), sharedMemSize, stream ,
d_out, d_in, 0, numElements, rowPitch, blockSumRowPitch);
break;
case 5: // multiblock, single row, full block
hipLaunchKernelGGL(( scan4< T, ScanTraits<T, op, isBackward, isExclusive, false, true, true> >)
, dim3(grid), dim3(threads), sharedMemSize, stream ,
d_out, d_in, d_blockSums[level], numElements, rowPitch, blockSumRowPitch);
break;
case 6: // single block, multirow, full block
hipLaunchKernelGGL(( scan4<T, ScanTraits<T, op, isBackward, isExclusive, true, false, true> >)
, dim3(grid), dim3(threads), sharedMemSize, stream ,
d_out, d_in, 0, numElements, rowPitch, blockSumRowPitch);
break;
case 7: // multiblock, multirow, full block
hipLaunchKernelGGL(( scan4<T, ScanTraits<T, op, isBackward, isExclusive, true, true, true> >)
, dim3(grid), dim3(threads), sharedMemSize, stream ,
d_out, d_in, d_blockSums[level], numElements, rowPitch, blockSumRowPitch);
break;
}
CUT_CHECK_ERROR("prescan");
if (numBlocks > 1)
{
// After scanning all the sub-blocks, we are mostly done. But
// now we need to take all of the last values of the
// sub-blocks and scan those. This will give us a new value
// that must be sdded to each block to get the final results.
scanArrayRecursive<T, isBackward, true, op>
((T*)d_blockSums[level], (const T*)d_blockSums[level],
(T**)d_blockSums, numBlocks, numRows, rowPitches, level + 1, stream); // recursive (CPU) call
hipLaunchKernelGGL(( vectorAddUniform4<T, op, SCAN_ELTS_PER_THREAD>)
, dim3(grid), dim3(threads),0, stream , d_out,
(T*)d_blockSums[level],
numElements,
rowPitch*4,
blockSumRowPitch*4,
0, 0);
CUT_CHECK_ERROR("vectorAddUniform");
}
}
// global
#ifdef __cplusplus
extern "C"
{
#endif
/** @brief Allocate intermediate arrays used by scan.
*
* Scans of large arrays must be split (possibly recursively) into a hierarchy
* of block scans, where each block is scanned by a single CUDA thread block.
* At each recursive level of the scan, we need an array in which to store the
* total sums of all blocks in that level. This function computes the amount
* of storage needed and allocates it.
*
* @param plan Pointer to CUDPPScanPlan object containing options and number
* of elements, which is used to compute storage requirements, and
* within which intermediate storage is allocated.
*/
void allocScanStorage(CUDPPScanPlan *plan)
{
//assert(config->_numEltsAllocated == 0); // shouldn't be called
plan->m_numEltsAllocated = plan->m_numElements;
size_t numElts = plan->m_numElements;
size_t level = 0;
do
{
size_t numBlocks =
max(1, (unsigned int)ceil((double)numElts / ((double)SCAN_ELTS_PER_THREAD * CTA_SIZE)));
if (numBlocks > 1)
{
level++;
}
numElts = numBlocks;
} while (numElts > 1);
size_t elementSize = 0;
switch(plan->m_config.datatype)
{
case CUDPP_INT:
plan->m_blockSums = (void**) malloc(level * sizeof(int*));
elementSize = sizeof(int);
break;
case CUDPP_UINT:
plan->m_blockSums = (void**) malloc(level * sizeof(unsigned int*));
elementSize = sizeof(unsigned int);
break;
case CUDPP_FLOAT:
plan->m_blockSums = (void**) malloc(level * sizeof(float*));
elementSize = sizeof(float);
break;
default:
break;
}
plan->m_numLevelsAllocated = level;
numElts = plan->m_numElements;
size_t numRows = plan->m_numRows;
plan->m_numRowsAllocated = numRows;
plan->m_rowPitches = 0;
if (numRows > 1)
{
plan->m_rowPitches = (size_t*) malloc((level + 1) * sizeof(size_t));
plan->m_rowPitches[0] = plan->m_rowPitch;
}
level = 0;
do
{
size_t numBlocks =
max(1, (unsigned int)ceil((double)numElts / ((double)SCAN_ELTS_PER_THREAD * CTA_SIZE)));
if (numBlocks > 1)
{
// Use hipMallocPitch for multi-row block sums to ensure alignment
if (numRows > 1)
{
size_t dpitch;
CUDA_SAFE_CALL( hipMallocPitch((void**) &(plan->m_blockSums[level]),
&dpitch,
numBlocks * elementSize,
numRows));
plan->m_rowPitches[level+1] = dpitch / elementSize;
level++;
}
else
{
CUDA_SAFE_CALL(hipMalloc((void**) &(plan->m_blockSums[level++]),
numBlocks * elementSize));
}
}
numElts = numBlocks;
} while (numElts > 1);
CUT_CHECK_ERROR("allocScanStorage");
}
/** @brief Deallocate intermediate block sums arrays in a CUDPPScanPlan object.
*
* These arrays must have been allocated by allocScanStorage(), which is called
* by the constructor of cudppScanPlan().
*
* @param plan Pointer to CUDPPScanPlan object initialized by allocScanStorage().
*/
void freeScanStorage(CUDPPScanPlan *plan)
{
for (unsigned int i = 0; i < plan->m_numLevelsAllocated; i++)
{
hipFree(plan->m_blockSums[i]);
}
CUT_CHECK_ERROR("freeScanStorage");
free((void**)plan->m_blockSums);
if (plan->m_numRows > 1)
free((void*)plan->m_rowPitches);
plan->m_blockSums = 0;
plan->m_numEltsAllocated = 0;
plan->m_numLevelsAllocated = 0;
}
/** @brief Dispatch function to perform a scan (prefix sum) on an
* array with the specified configuration.
*
* This is the dispatch routine which calls scanArrayRecursive() with
* appropriate template parameters and arguments to achieve the scan as
* specified in \a plan.
*
* @param[out] d_out The output array of scan results
* @param[in] d_in The input array
* @param[in] numElements The number of elements to scan
* @param[in] numRows The number of rows to scan in parallel
* @param[in[ stream The stream to execute the kernel on
* @param[in] plan Pointer to CUDPPScanPlan object containing scan options
* and intermediate storage
*/
void cudppScanDispatch(void *d_out,
const void *d_in,
size_t numElements,
size_t numRows,
const hipStream_t stream,
const CUDPPScanPlan *plan)
{
if (CUDPP_OPTION_EXCLUSIVE & plan->m_config.options)
{
if (CUDPP_OPTION_BACKWARD & plan->m_config.options)
{
switch (plan->m_config.datatype)
{
case CUDPP_INT:
switch(plan->m_config.op)
{
case CUDPP_ADD:
scanArrayRecursive<int, true, true, CUDPP_ADD>
((int*)d_out, (const int*)d_in,
(int**)plan->m_blockSums,
numElements, numRows, plan->m_rowPitches, 0, stream);
break;
case CUDPP_MULTIPLY:
scanArrayRecursive<int, true, true, CUDPP_MULTIPLY>
((int*)d_out, (const int*)d_in,
(int**)plan->m_blockSums,
numElements, numRows, plan->m_rowPitches, 0, stream);
break;
case CUDPP_MAX:
scanArrayRecursive<int, true, true, CUDPP_MAX>
((int*)d_out, (const int*)d_in,
(int**)plan->m_blockSums,
numElements, numRows, plan->m_rowPitches, 0, stream);
break;
case CUDPP_MIN:
scanArrayRecursive<int, true, true, CUDPP_MIN>
((int*)d_out, (const int*)d_in,
(int**)plan->m_blockSums,
numElements, numRows, plan->m_rowPitches, 0, stream);
break;
default:
break;
}
break;
case CUDPP_UINT:
switch(plan->m_config.op)
{
case CUDPP_ADD:
scanArrayRecursive<unsigned int, true, true, CUDPP_ADD>
((unsigned int*)d_out, (const unsigned int*)d_in,
(unsigned int**)plan->m_blockSums,
numElements, numRows, plan->m_rowPitches, 0, stream);
break;
case CUDPP_MULTIPLY:
scanArrayRecursive<unsigned int, true, true, CUDPP_MULTIPLY>
((unsigned int*)d_out, (const unsigned int*)d_in,
(unsigned int**)plan->m_blockSums,
numElements, numRows, plan->m_rowPitches, 0, stream);
break;
case CUDPP_MAX:
scanArrayRecursive<unsigned int, true, true, CUDPP_MAX>
((unsigned int*)d_out, (const unsigned int*)d_in,
(unsigned int**)plan->m_blockSums,
numElements, numRows, plan->m_rowPitches, 0, stream);
break;
case CUDPP_MIN:
scanArrayRecursive<unsigned int, true, true, CUDPP_MIN>
((unsigned int*)d_out, (const unsigned int*)d_in,
(unsigned int**)plan->m_blockSums,
numElements, numRows, plan->m_rowPitches, 0, stream);
break;
default:
break;
}
break;
case CUDPP_FLOAT:
switch(plan->m_config.op)
{
case CUDPP_ADD:
scanArrayRecursive<float, true, true, CUDPP_ADD>
((float*)d_out, (const float*)d_in,
(float**)plan->m_blockSums,
numElements, numRows, plan->m_rowPitches, 0, stream);
break;
case CUDPP_MULTIPLY:
scanArrayRecursive<float, true, true, CUDPP_MULTIPLY>
((float*)d_out, (const float*)d_in,
(float**)plan->m_blockSums,
numElements, numRows, plan->m_rowPitches, 0, stream);
break;
case CUDPP_MAX:
scanArrayRecursive<float, true, true, CUDPP_MAX>
((float*)d_out, (const float*)d_in,
(float**)plan->m_blockSums,
numElements, numRows, plan->m_rowPitches, 0, stream);
break;
case CUDPP_MIN:
scanArrayRecursive<float, true, true, CUDPP_MIN>
((float*)d_out, (const float*)d_in,
(float**)plan->m_blockSums,
numElements, numRows, plan->m_rowPitches, 0, stream);
break;
default:
break;
}
break;
default:
break;
}
}
else
{
switch (plan->m_config.datatype)
{
case CUDPP_INT:
switch(plan->m_config.op)
{
case CUDPP_ADD:
scanArrayRecursive<int, false, true, CUDPP_ADD>
((int*)d_out, (const int*)d_in,
(int**)plan->m_blockSums,
numElements, numRows, plan->m_rowPitches, 0, stream);
break;
case CUDPP_MULTIPLY:
scanArrayRecursive<int, false, true, CUDPP_MULTIPLY>
((int*)d_out, (const int*)d_in,
(int**)plan->m_blockSums,
numElements, numRows, plan->m_rowPitches, 0, stream);
break;
case CUDPP_MAX:
scanArrayRecursive<int, false, true, CUDPP_MAX>
((int*)d_out, (const int*)d_in,
(int**)plan->m_blockSums,
numElements, numRows, plan->m_rowPitches, 0, stream);
break;
case CUDPP_MIN:
scanArrayRecursive<int, false, true, CUDPP_MIN>
((int*)d_out, (const int*)d_in,
(int**)plan->m_blockSums,
numElements, numRows, plan->m_rowPitches, 0, stream);
break;
default:
break;
}
break;
case CUDPP_UINT:
switch(plan->m_config.op)
{
case CUDPP_ADD:
scanArrayRecursive<unsigned int, false, true, CUDPP_ADD>
((unsigned int*)d_out, (const unsigned int*)d_in,
(unsigned int**)plan->m_blockSums,
numElements, numRows, plan->m_rowPitches, 0, stream);
break;
case CUDPP_MULTIPLY:
scanArrayRecursive<unsigned int, false, true, CUDPP_MULTIPLY>
((unsigned int*)d_out, (const unsigned int*)d_in,
(unsigned int**)plan->m_blockSums,
numElements, numRows, plan->m_rowPitches, 0, stream);
break;
case CUDPP_MAX:
scanArrayRecursive<unsigned int, false, true, CUDPP_MAX>
((unsigned int*)d_out, (const unsigned int*)d_in,
(unsigned int**)plan->m_blockSums,
numElements, numRows, plan->m_rowPitches, 0, stream);
break;
case CUDPP_MIN:
scanArrayRecursive<unsigned int, false, true, CUDPP_MIN>
((unsigned int*)d_out, (const unsigned int*)d_in,
(unsigned int**)plan->m_blockSums,
numElements, numRows, plan->m_rowPitches, 0, stream);
break;
default:
break;
}
break;
case CUDPP_FLOAT:
switch(plan->m_config.op)
{
case CUDPP_ADD:
scanArrayRecursive<float, false, true, CUDPP_ADD>
((float*)d_out, (const float*)d_in,
(float**)plan->m_blockSums,
numElements, numRows, plan->m_rowPitches, 0, stream);
break;
case CUDPP_MULTIPLY:
scanArrayRecursive<float, false, true, CUDPP_MULTIPLY>
((float*)d_out, (const float*)d_in,
(float**)plan->m_blockSums,
numElements, numRows, plan->m_rowPitches, 0, stream);
break;
case CUDPP_MAX:
scanArrayRecursive<float, false, true, CUDPP_MAX>
((float*)d_out, (const float*)d_in,
(float**)plan->m_blockSums,
numElements, numRows, plan->m_rowPitches, 0, stream);
break;
case CUDPP_MIN:
scanArrayRecursive<float, false, true, CUDPP_MIN>
((float*)d_out, (const float*)d_in,
(float**)plan->m_blockSums,
numElements, numRows, plan->m_rowPitches, 0, stream);
break;
default:
break;
}
break;
default:
break;
}
}
}
else
{
if (CUDPP_OPTION_BACKWARD & plan->m_config.options)
{
switch (plan->m_config.datatype)
{
case CUDPP_INT:
switch(plan->m_config.op)
{
case CUDPP_ADD:
scanArrayRecursive<int, true, false, CUDPP_ADD>
((int*)d_out, (const int*)d_in,
(int**)plan->m_blockSums,
numElements, numRows, plan->m_rowPitches, 0, stream);
break;
case CUDPP_MULTIPLY:
scanArrayRecursive<int, true, false, CUDPP_MULTIPLY>
((int*)d_out, (const int*)d_in,
(int**)plan->m_blockSums,
numElements, numRows, plan->m_rowPitches, 0, stream);
break;
case CUDPP_MAX:
scanArrayRecursive<int, true, false, CUDPP_MAX>
((int*)d_out, (const int*)d_in,
(int**)plan->m_blockSums,
numElements, numRows, plan->m_rowPitches, 0, stream);
break;
case CUDPP_MIN:
scanArrayRecursive<int, true, false, CUDPP_MIN>
((int*)d_out, (const int*)d_in,
(int**)plan->m_blockSums,
numElements, numRows, plan->m_rowPitches, 0, stream);
break;
default:
break;
}
break;
case CUDPP_UINT:
switch(plan->m_config.op)
{
case CUDPP_ADD:
scanArrayRecursive<unsigned int, true, false, CUDPP_ADD>
((unsigned int*)d_out, (const unsigned int*)d_in,
(unsigned int**)plan->m_blockSums,
numElements, numRows, plan->m_rowPitches, 0, stream);
break;
case CUDPP_MULTIPLY:
scanArrayRecursive<unsigned int, true, false, CUDPP_MULTIPLY>
((unsigned int*)d_out, (const unsigned int*)d_in,
(unsigned int**)plan->m_blockSums,
numElements, numRows, plan->m_rowPitches, 0, stream);
break;
case CUDPP_MAX:
scanArrayRecursive<unsigned int, true, false, CUDPP_MAX>
((unsigned int*)d_out, (const unsigned int*)d_in,
(unsigned int**)plan->m_blockSums,
numElements, numRows, plan->m_rowPitches, 0, stream);
break;
case CUDPP_MIN:
scanArrayRecursive<unsigned int, true, false, CUDPP_MIN>
((unsigned int*)d_out, (const unsigned int*)d_in,
(unsigned int**)plan->m_blockSums,
numElements, numRows, plan->m_rowPitches, 0, stream);
break;
default:
break;
}
break;
case CUDPP_FLOAT:
switch(plan->m_config.op)
{
case CUDPP_ADD:
scanArrayRecursive<float, true, false, CUDPP_ADD>
((float*)d_out, (const float*)d_in,
(float**)plan->m_blockSums,
numElements, numRows, plan->m_rowPitches, 0, stream);
break;
case CUDPP_MULTIPLY:
scanArrayRecursive<float, true, false, CUDPP_MULTIPLY>
((float*)d_out, (const float*)d_in,
(float**)plan->m_blockSums,
numElements, numRows, plan->m_rowPitches, 0, stream);
break;
case CUDPP_MAX:
scanArrayRecursive<float, true, false, CUDPP_MAX>
((float*)d_out, (const float*)d_in,
(float**)plan->m_blockSums,
numElements, numRows, plan->m_rowPitches, 0, stream);
break;
case CUDPP_MIN:
scanArrayRecursive<float, true, false, CUDPP_MIN>
((float*)d_out, (const float*)d_in,
(float**)plan->m_blockSums,
numElements, numRows, plan->m_rowPitches, 0, stream);
break;
default:
break;
}
break;
default:
break;
}
}
else
{
switch (plan->m_config.datatype)
{
case CUDPP_INT:
switch(plan->m_config.op)
{
case CUDPP_ADD:
scanArrayRecursive<int, false, false, CUDPP_ADD>
((int*)d_out, (const int*)d_in,
(int**)plan->m_blockSums,
numElements, numRows, plan->m_rowPitches, 0, stream);
break;
case CUDPP_MULTIPLY:
scanArrayRecursive<int, false, false, CUDPP_MULTIPLY>
((int*)d_out, (const int*)d_in,
(int**)plan->m_blockSums,
numElements, numRows, plan->m_rowPitches, 0, stream);
break;
case CUDPP_MAX:
scanArrayRecursive<int, false, false, CUDPP_MAX>
((int*)d_out, (const int*)d_in,
(int**)plan->m_blockSums,
numElements, numRows, plan->m_rowPitches, 0, stream);
break;
case CUDPP_MIN:
scanArrayRecursive<int, false, false, CUDPP_MIN>
((int*)d_out, (const int*)d_in,
(int**)plan->m_blockSums,
numElements, numRows, plan->m_rowPitches, 0, stream);
break;
default:
break;
}
break;
case CUDPP_UINT:
switch(plan->m_config.op)
{
case CUDPP_ADD:
scanArrayRecursive<unsigned int, false, false, CUDPP_ADD>
((unsigned int*)d_out, (const unsigned int*)d_in,
(unsigned int**)plan->m_blockSums,
numElements, numRows, plan->m_rowPitches, 0, stream);
break;
case CUDPP_MULTIPLY:
scanArrayRecursive<unsigned int, false, false, CUDPP_MULTIPLY>
((unsigned int*)d_out, (const unsigned int*)d_in,
(unsigned int**)plan->m_blockSums,
numElements, numRows, plan->m_rowPitches, 0, stream);
break;
case CUDPP_MAX:
scanArrayRecursive<unsigned int, false, false, CUDPP_MAX>
((unsigned int*)d_out, (const unsigned int*)d_in,
(unsigned int**)plan->m_blockSums,
numElements, numRows, plan->m_rowPitches, 0, stream);
break;
case CUDPP_MIN:
scanArrayRecursive<unsigned int, false, false, CUDPP_MIN>
((unsigned int*)d_out, (const unsigned int*)d_in,
(unsigned int**)plan->m_blockSums,
numElements, numRows, plan->m_rowPitches, 0, stream);
break;
default:
break;
}
break;
case CUDPP_FLOAT:
switch(plan->m_config.op)
{
case CUDPP_ADD:
scanArrayRecursive<float, false, false, CUDPP_ADD>
((float*)d_out, (const float*)d_in,
(float**)plan->m_blockSums,
numElements, numRows, plan->m_rowPitches, 0, stream);
break;
case CUDPP_MULTIPLY:
scanArrayRecursive<float, false, false, CUDPP_MULTIPLY>
((float*)d_out, (const float*)d_in,
(float**)plan->m_blockSums,
numElements, numRows, plan->m_rowPitches, 0, stream);
break;
case CUDPP_MAX:
scanArrayRecursive<float, false, false, CUDPP_MAX>
((float*)d_out, (const float*)d_in,
(float**)plan->m_blockSums,
numElements, numRows, plan->m_rowPitches, 0, stream);
break;
case CUDPP_MIN:
scanArrayRecursive<float, false, false, CUDPP_MIN>
((float*)d_out, (const float*)d_in,
(float**)plan->m_blockSums,
numElements, numRows, plan->m_rowPitches, 0, stream);
break;
default:
break;
}
break;
default:
break;
}
}
}
}
#ifdef __cplusplus
}
#endif
/** @} */ // end scan functions
/** @} */ // end cudpp_app
| 28afff04a83c02ae73277927e55cab60bd034033.cu | // -------------------------------------------------------------
// CUDPP -- CUDA Data Parallel Primitives library
// -------------------------------------------------------------
// $Revision: 5633 $
// $Date: 2009-07-01 15:02:51 +1000 (Wed, 01 Jul 2009) $
// -------------------------------------------------------------
// This source code is distributed under the terms of license.txt
// in the root directory of this source distribution.
// -------------------------------------------------------------
/**
* @file
* scan_app.cu
*
* @brief CUDPP application-level scan routines
*/
/** \defgroup cudpp_app CUDPP Application-Level API
* The CUDPP Application-Level API contains functions
* that run on the host CPU and invoke GPU routines in
* the CUDPP \link cudpp_kernel Kernel-Level API\endlink.
* Application-Level API functions are used by
* CUDPP \link publicInterface Public Interface\endlink
* functions to implement CUDPP's core functionality.
* @{
*/
/** @name Scan Functions
* @{
*/
#include "cudpp.h"
#include "cudpp_util.h"
#include "cudpp_plan.h"
#include "kernel/scan_kernel.cu"
#include "kernel/vector_kernel.cu"
#include <cutil.h>
#include <cstdlib>
#include <cstdio>
#include <assert.h>
/** @brief Perform recursive scan on arbitrary size arrays
*
* This is the CPU-side workhorse function of the scan engine. This function
* invokes the CUDA kernels which perform the scan on individual blocks.
*
* Scans of large arrays must be split (possibly recursively) into a hierarchy of block scans,
* where each block is scanned by a single CUDA thread block. At each recursive level of the
* scanArrayRecursive first invokes a kernel to scan all blocks of that level, and if the level
* has more than one block, it calls itself recursively. On returning from each recursive level,
* the total sum of each block from the level below is added to all elements of the corresponding
* block in this level. See "Parallel Prefix Sum (Scan) in CUDA" for more information (see
* \ref references ).
*
* Template parameter \a T is the datatype; \a isBackward specifies backward or forward scan;
* \a isExclusive specifies exclusive or inclusive scan, and \a op specifies the binary associative
* operator to be used.
*
* @param[out] d_out The output array for the scan results
* @param[in] d_in The input array to be scanned
* @param[out] d_blockSums Array of arrays of per-block sums (one array per recursive level, allocated
* by allocScanStorage())
* @param[in] numElements The number of elements in the array to scan
* @param[in] numRows The number of rows in the array to scan
* @param[in] rowPitches Array of row pitches (one array per recursive level, allocated by
* allocScanStorage())
* @param[in] level The current recursive level of the scan
* @param[in] stream The stream to execute the kernel on
*/
template <class T, bool isBackward, bool isExclusive, CUDPPOperator op>
void scanArrayRecursive(T *d_out,
const T *d_in,
T **d_blockSums,
size_t numElements,
size_t numRows,
const size_t *rowPitches,
int level,
const cudaStream_t stream)
{
unsigned int numBlocks =
max(1, (unsigned int)ceil((double)numElements / ((double)SCAN_ELTS_PER_THREAD * CTA_SIZE)));
unsigned int sharedEltsPerBlock = CTA_SIZE * 2;
unsigned int sharedMemSize = sizeof(T) * sharedEltsPerBlock;
// divide pitch by four since scan's load/store addresses are for vec4 elements
unsigned int rowPitch = 1;
unsigned int blockSumRowPitch = 1;
if (numRows > 1)
{
rowPitch = rowPitches[level] / 4;
blockSumRowPitch = (numBlocks > 1) ? rowPitches[level+1] / 4 : 0;
}
bool fullBlock = (numElements == numBlocks * SCAN_ELTS_PER_THREAD * CTA_SIZE);
// setup execution parameters
dim3 grid(numBlocks, numRows, 1);
dim3 threads(CTA_SIZE, 1, 1);
// make sure there are no CUDA errors before we start
CUT_CHECK_ERROR("scanArray before kernels");
unsigned int traitsCode = 0;
if (numBlocks > 1) traitsCode |= 1;
if (numRows > 1) traitsCode |= 2;
if (fullBlock) traitsCode |= 4;
switch (traitsCode)
{
case 0: // single block, single row, non-full block
scan4<T, ScanTraits<T, op, isBackward, isExclusive, false, false, false> >
<<< grid, threads, sharedMemSize, stream >>>
(d_out, d_in, 0, numElements, rowPitch, blockSumRowPitch);
break;
case 1: // multiblock, single row, non-full block
scan4< T, ScanTraits<T, op, isBackward, isExclusive, false, true, false> >
<<< grid, threads, sharedMemSize, stream >>>
(d_out, d_in, d_blockSums[level], numElements, rowPitch, blockSumRowPitch);
break;
case 2: // single block, multirow, non-full block
scan4<T, ScanTraits<T, op, isBackward, isExclusive, true, false, false> >
<<< grid, threads, sharedMemSize, stream >>>
(d_out, d_in, 0, numElements, rowPitch, blockSumRowPitch);
break;
case 3: // multiblock, multirow, non-full block
scan4<T, ScanTraits<T, op, isBackward, isExclusive, true, true, false> >
<<< grid, threads, sharedMemSize, stream >>>
(d_out, d_in, d_blockSums[level], numElements, rowPitch, blockSumRowPitch);
break;
case 4: // single block, single row, full block
scan4<T, ScanTraits<T, op, isBackward, isExclusive, false, false, true> >
<<< grid, threads, sharedMemSize, stream >>>
(d_out, d_in, 0, numElements, rowPitch, blockSumRowPitch);
break;
case 5: // multiblock, single row, full block
scan4< T, ScanTraits<T, op, isBackward, isExclusive, false, true, true> >
<<< grid, threads, sharedMemSize, stream >>>
(d_out, d_in, d_blockSums[level], numElements, rowPitch, blockSumRowPitch);
break;
case 6: // single block, multirow, full block
scan4<T, ScanTraits<T, op, isBackward, isExclusive, true, false, true> >
<<< grid, threads, sharedMemSize, stream >>>
(d_out, d_in, 0, numElements, rowPitch, blockSumRowPitch);
break;
case 7: // multiblock, multirow, full block
scan4<T, ScanTraits<T, op, isBackward, isExclusive, true, true, true> >
<<< grid, threads, sharedMemSize, stream >>>
(d_out, d_in, d_blockSums[level], numElements, rowPitch, blockSumRowPitch);
break;
}
CUT_CHECK_ERROR("prescan");
if (numBlocks > 1)
{
// After scanning all the sub-blocks, we are mostly done. But
// now we need to take all of the last values of the
// sub-blocks and scan those. This will give us a new value
// that must be sdded to each block to get the final results.
scanArrayRecursive<T, isBackward, true, op>
((T*)d_blockSums[level], (const T*)d_blockSums[level],
(T**)d_blockSums, numBlocks, numRows, rowPitches, level + 1, stream); // recursive (CPU) call
vectorAddUniform4<T, op, SCAN_ELTS_PER_THREAD>
<<< grid, threads,0, stream >>>(d_out,
(T*)d_blockSums[level],
numElements,
rowPitch*4,
blockSumRowPitch*4,
0, 0);
CUT_CHECK_ERROR("vectorAddUniform");
}
}
// global
#ifdef __cplusplus
extern "C"
{
#endif
/** @brief Allocate intermediate arrays used by scan.
*
* Scans of large arrays must be split (possibly recursively) into a hierarchy
* of block scans, where each block is scanned by a single CUDA thread block.
* At each recursive level of the scan, we need an array in which to store the
* total sums of all blocks in that level. This function computes the amount
* of storage needed and allocates it.
*
* @param plan Pointer to CUDPPScanPlan object containing options and number
* of elements, which is used to compute storage requirements, and
* within which intermediate storage is allocated.
*/
void allocScanStorage(CUDPPScanPlan *plan)
{
//assert(config->_numEltsAllocated == 0); // shouldn't be called
plan->m_numEltsAllocated = plan->m_numElements;
size_t numElts = plan->m_numElements;
size_t level = 0;
do
{
size_t numBlocks =
max(1, (unsigned int)ceil((double)numElts / ((double)SCAN_ELTS_PER_THREAD * CTA_SIZE)));
if (numBlocks > 1)
{
level++;
}
numElts = numBlocks;
} while (numElts > 1);
size_t elementSize = 0;
switch(plan->m_config.datatype)
{
case CUDPP_INT:
plan->m_blockSums = (void**) malloc(level * sizeof(int*));
elementSize = sizeof(int);
break;
case CUDPP_UINT:
plan->m_blockSums = (void**) malloc(level * sizeof(unsigned int*));
elementSize = sizeof(unsigned int);
break;
case CUDPP_FLOAT:
plan->m_blockSums = (void**) malloc(level * sizeof(float*));
elementSize = sizeof(float);
break;
default:
break;
}
plan->m_numLevelsAllocated = level;
numElts = plan->m_numElements;
size_t numRows = plan->m_numRows;
plan->m_numRowsAllocated = numRows;
plan->m_rowPitches = 0;
if (numRows > 1)
{
plan->m_rowPitches = (size_t*) malloc((level + 1) * sizeof(size_t));
plan->m_rowPitches[0] = plan->m_rowPitch;
}
level = 0;
do
{
size_t numBlocks =
max(1, (unsigned int)ceil((double)numElts / ((double)SCAN_ELTS_PER_THREAD * CTA_SIZE)));
if (numBlocks > 1)
{
// Use cudaMallocPitch for multi-row block sums to ensure alignment
if (numRows > 1)
{
size_t dpitch;
CUDA_SAFE_CALL( cudaMallocPitch((void**) &(plan->m_blockSums[level]),
&dpitch,
numBlocks * elementSize,
numRows));
plan->m_rowPitches[level+1] = dpitch / elementSize;
level++;
}
else
{
CUDA_SAFE_CALL(cudaMalloc((void**) &(plan->m_blockSums[level++]),
numBlocks * elementSize));
}
}
numElts = numBlocks;
} while (numElts > 1);
CUT_CHECK_ERROR("allocScanStorage");
}
/** @brief Deallocate intermediate block sums arrays in a CUDPPScanPlan object.
*
* These arrays must have been allocated by allocScanStorage(), which is called
* by the constructor of cudppScanPlan().
*
* @param plan Pointer to CUDPPScanPlan object initialized by allocScanStorage().
*/
void freeScanStorage(CUDPPScanPlan *plan)
{
for (unsigned int i = 0; i < plan->m_numLevelsAllocated; i++)
{
cudaFree(plan->m_blockSums[i]);
}
CUT_CHECK_ERROR("freeScanStorage");
free((void**)plan->m_blockSums);
if (plan->m_numRows > 1)
free((void*)plan->m_rowPitches);
plan->m_blockSums = 0;
plan->m_numEltsAllocated = 0;
plan->m_numLevelsAllocated = 0;
}
/** @brief Dispatch function to perform a scan (prefix sum) on an
* array with the specified configuration.
*
* This is the dispatch routine which calls scanArrayRecursive() with
* appropriate template parameters and arguments to achieve the scan as
* specified in \a plan.
*
* @param[out] d_out The output array of scan results
* @param[in] d_in The input array
* @param[in] numElements The number of elements to scan
* @param[in] numRows The number of rows to scan in parallel
* @param[in[ stream The stream to execute the kernel on
* @param[in] plan Pointer to CUDPPScanPlan object containing scan options
* and intermediate storage
*/
void cudppScanDispatch(void *d_out,
const void *d_in,
size_t numElements,
size_t numRows,
const cudaStream_t stream,
const CUDPPScanPlan *plan)
{
if (CUDPP_OPTION_EXCLUSIVE & plan->m_config.options)
{
if (CUDPP_OPTION_BACKWARD & plan->m_config.options)
{
switch (plan->m_config.datatype)
{
case CUDPP_INT:
switch(plan->m_config.op)
{
case CUDPP_ADD:
scanArrayRecursive<int, true, true, CUDPP_ADD>
((int*)d_out, (const int*)d_in,
(int**)plan->m_blockSums,
numElements, numRows, plan->m_rowPitches, 0, stream);
break;
case CUDPP_MULTIPLY:
scanArrayRecursive<int, true, true, CUDPP_MULTIPLY>
((int*)d_out, (const int*)d_in,
(int**)plan->m_blockSums,
numElements, numRows, plan->m_rowPitches, 0, stream);
break;
case CUDPP_MAX:
scanArrayRecursive<int, true, true, CUDPP_MAX>
((int*)d_out, (const int*)d_in,
(int**)plan->m_blockSums,
numElements, numRows, plan->m_rowPitches, 0, stream);
break;
case CUDPP_MIN:
scanArrayRecursive<int, true, true, CUDPP_MIN>
((int*)d_out, (const int*)d_in,
(int**)plan->m_blockSums,
numElements, numRows, plan->m_rowPitches, 0, stream);
break;
default:
break;
}
break;
case CUDPP_UINT:
switch(plan->m_config.op)
{
case CUDPP_ADD:
scanArrayRecursive<unsigned int, true, true, CUDPP_ADD>
((unsigned int*)d_out, (const unsigned int*)d_in,
(unsigned int**)plan->m_blockSums,
numElements, numRows, plan->m_rowPitches, 0, stream);
break;
case CUDPP_MULTIPLY:
scanArrayRecursive<unsigned int, true, true, CUDPP_MULTIPLY>
((unsigned int*)d_out, (const unsigned int*)d_in,
(unsigned int**)plan->m_blockSums,
numElements, numRows, plan->m_rowPitches, 0, stream);
break;
case CUDPP_MAX:
scanArrayRecursive<unsigned int, true, true, CUDPP_MAX>
((unsigned int*)d_out, (const unsigned int*)d_in,
(unsigned int**)plan->m_blockSums,
numElements, numRows, plan->m_rowPitches, 0, stream);
break;
case CUDPP_MIN:
scanArrayRecursive<unsigned int, true, true, CUDPP_MIN>
((unsigned int*)d_out, (const unsigned int*)d_in,
(unsigned int**)plan->m_blockSums,
numElements, numRows, plan->m_rowPitches, 0, stream);
break;
default:
break;
}
break;
case CUDPP_FLOAT:
switch(plan->m_config.op)
{
case CUDPP_ADD:
scanArrayRecursive<float, true, true, CUDPP_ADD>
((float*)d_out, (const float*)d_in,
(float**)plan->m_blockSums,
numElements, numRows, plan->m_rowPitches, 0, stream);
break;
case CUDPP_MULTIPLY:
scanArrayRecursive<float, true, true, CUDPP_MULTIPLY>
((float*)d_out, (const float*)d_in,
(float**)plan->m_blockSums,
numElements, numRows, plan->m_rowPitches, 0, stream);
break;
case CUDPP_MAX:
scanArrayRecursive<float, true, true, CUDPP_MAX>
((float*)d_out, (const float*)d_in,
(float**)plan->m_blockSums,
numElements, numRows, plan->m_rowPitches, 0, stream);
break;
case CUDPP_MIN:
scanArrayRecursive<float, true, true, CUDPP_MIN>
((float*)d_out, (const float*)d_in,
(float**)plan->m_blockSums,
numElements, numRows, plan->m_rowPitches, 0, stream);
break;
default:
break;
}
break;
default:
break;
}
}
else
{
switch (plan->m_config.datatype)
{
case CUDPP_INT:
switch(plan->m_config.op)
{
case CUDPP_ADD:
scanArrayRecursive<int, false, true, CUDPP_ADD>
((int*)d_out, (const int*)d_in,
(int**)plan->m_blockSums,
numElements, numRows, plan->m_rowPitches, 0, stream);
break;
case CUDPP_MULTIPLY:
scanArrayRecursive<int, false, true, CUDPP_MULTIPLY>
((int*)d_out, (const int*)d_in,
(int**)plan->m_blockSums,
numElements, numRows, plan->m_rowPitches, 0, stream);
break;
case CUDPP_MAX:
scanArrayRecursive<int, false, true, CUDPP_MAX>
((int*)d_out, (const int*)d_in,
(int**)plan->m_blockSums,
numElements, numRows, plan->m_rowPitches, 0, stream);
break;
case CUDPP_MIN:
scanArrayRecursive<int, false, true, CUDPP_MIN>
((int*)d_out, (const int*)d_in,
(int**)plan->m_blockSums,
numElements, numRows, plan->m_rowPitches, 0, stream);
break;
default:
break;
}
break;
case CUDPP_UINT:
switch(plan->m_config.op)
{
case CUDPP_ADD:
scanArrayRecursive<unsigned int, false, true, CUDPP_ADD>
((unsigned int*)d_out, (const unsigned int*)d_in,
(unsigned int**)plan->m_blockSums,
numElements, numRows, plan->m_rowPitches, 0, stream);
break;
case CUDPP_MULTIPLY:
scanArrayRecursive<unsigned int, false, true, CUDPP_MULTIPLY>
((unsigned int*)d_out, (const unsigned int*)d_in,
(unsigned int**)plan->m_blockSums,
numElements, numRows, plan->m_rowPitches, 0, stream);
break;
case CUDPP_MAX:
scanArrayRecursive<unsigned int, false, true, CUDPP_MAX>
((unsigned int*)d_out, (const unsigned int*)d_in,
(unsigned int**)plan->m_blockSums,
numElements, numRows, plan->m_rowPitches, 0, stream);
break;
case CUDPP_MIN:
scanArrayRecursive<unsigned int, false, true, CUDPP_MIN>
((unsigned int*)d_out, (const unsigned int*)d_in,
(unsigned int**)plan->m_blockSums,
numElements, numRows, plan->m_rowPitches, 0, stream);
break;
default:
break;
}
break;
case CUDPP_FLOAT:
switch(plan->m_config.op)
{
case CUDPP_ADD:
scanArrayRecursive<float, false, true, CUDPP_ADD>
((float*)d_out, (const float*)d_in,
(float**)plan->m_blockSums,
numElements, numRows, plan->m_rowPitches, 0, stream);
break;
case CUDPP_MULTIPLY:
scanArrayRecursive<float, false, true, CUDPP_MULTIPLY>
((float*)d_out, (const float*)d_in,
(float**)plan->m_blockSums,
numElements, numRows, plan->m_rowPitches, 0, stream);
break;
case CUDPP_MAX:
scanArrayRecursive<float, false, true, CUDPP_MAX>
((float*)d_out, (const float*)d_in,
(float**)plan->m_blockSums,
numElements, numRows, plan->m_rowPitches, 0, stream);
break;
case CUDPP_MIN:
scanArrayRecursive<float, false, true, CUDPP_MIN>
((float*)d_out, (const float*)d_in,
(float**)plan->m_blockSums,
numElements, numRows, plan->m_rowPitches, 0, stream);
break;
default:
break;
}
break;
default:
break;
}
}
}
else
{
if (CUDPP_OPTION_BACKWARD & plan->m_config.options)
{
switch (plan->m_config.datatype)
{
case CUDPP_INT:
switch(plan->m_config.op)
{
case CUDPP_ADD:
scanArrayRecursive<int, true, false, CUDPP_ADD>
((int*)d_out, (const int*)d_in,
(int**)plan->m_blockSums,
numElements, numRows, plan->m_rowPitches, 0, stream);
break;
case CUDPP_MULTIPLY:
scanArrayRecursive<int, true, false, CUDPP_MULTIPLY>
((int*)d_out, (const int*)d_in,
(int**)plan->m_blockSums,
numElements, numRows, plan->m_rowPitches, 0, stream);
break;
case CUDPP_MAX:
scanArrayRecursive<int, true, false, CUDPP_MAX>
((int*)d_out, (const int*)d_in,
(int**)plan->m_blockSums,
numElements, numRows, plan->m_rowPitches, 0, stream);
break;
case CUDPP_MIN:
scanArrayRecursive<int, true, false, CUDPP_MIN>
((int*)d_out, (const int*)d_in,
(int**)plan->m_blockSums,
numElements, numRows, plan->m_rowPitches, 0, stream);
break;
default:
break;
}
break;
case CUDPP_UINT:
switch(plan->m_config.op)
{
case CUDPP_ADD:
scanArrayRecursive<unsigned int, true, false, CUDPP_ADD>
((unsigned int*)d_out, (const unsigned int*)d_in,
(unsigned int**)plan->m_blockSums,
numElements, numRows, plan->m_rowPitches, 0, stream);
break;
case CUDPP_MULTIPLY:
scanArrayRecursive<unsigned int, true, false, CUDPP_MULTIPLY>
((unsigned int*)d_out, (const unsigned int*)d_in,
(unsigned int**)plan->m_blockSums,
numElements, numRows, plan->m_rowPitches, 0, stream);
break;
case CUDPP_MAX:
scanArrayRecursive<unsigned int, true, false, CUDPP_MAX>
((unsigned int*)d_out, (const unsigned int*)d_in,
(unsigned int**)plan->m_blockSums,
numElements, numRows, plan->m_rowPitches, 0, stream);
break;
case CUDPP_MIN:
scanArrayRecursive<unsigned int, true, false, CUDPP_MIN>
((unsigned int*)d_out, (const unsigned int*)d_in,
(unsigned int**)plan->m_blockSums,
numElements, numRows, plan->m_rowPitches, 0, stream);
break;
default:
break;
}
break;
case CUDPP_FLOAT:
switch(plan->m_config.op)
{
case CUDPP_ADD:
scanArrayRecursive<float, true, false, CUDPP_ADD>
((float*)d_out, (const float*)d_in,
(float**)plan->m_blockSums,
numElements, numRows, plan->m_rowPitches, 0, stream);
break;
case CUDPP_MULTIPLY:
scanArrayRecursive<float, true, false, CUDPP_MULTIPLY>
((float*)d_out, (const float*)d_in,
(float**)plan->m_blockSums,
numElements, numRows, plan->m_rowPitches, 0, stream);
break;
case CUDPP_MAX:
scanArrayRecursive<float, true, false, CUDPP_MAX>
((float*)d_out, (const float*)d_in,
(float**)plan->m_blockSums,
numElements, numRows, plan->m_rowPitches, 0, stream);
break;
case CUDPP_MIN:
scanArrayRecursive<float, true, false, CUDPP_MIN>
((float*)d_out, (const float*)d_in,
(float**)plan->m_blockSums,
numElements, numRows, plan->m_rowPitches, 0, stream);
break;
default:
break;
}
break;
default:
break;
}
}
else
{
switch (plan->m_config.datatype)
{
case CUDPP_INT:
switch(plan->m_config.op)
{
case CUDPP_ADD:
scanArrayRecursive<int, false, false, CUDPP_ADD>
((int*)d_out, (const int*)d_in,
(int**)plan->m_blockSums,
numElements, numRows, plan->m_rowPitches, 0, stream);
break;
case CUDPP_MULTIPLY:
scanArrayRecursive<int, false, false, CUDPP_MULTIPLY>
((int*)d_out, (const int*)d_in,
(int**)plan->m_blockSums,
numElements, numRows, plan->m_rowPitches, 0, stream);
break;
case CUDPP_MAX:
scanArrayRecursive<int, false, false, CUDPP_MAX>
((int*)d_out, (const int*)d_in,
(int**)plan->m_blockSums,
numElements, numRows, plan->m_rowPitches, 0, stream);
break;
case CUDPP_MIN:
scanArrayRecursive<int, false, false, CUDPP_MIN>
((int*)d_out, (const int*)d_in,
(int**)plan->m_blockSums,
numElements, numRows, plan->m_rowPitches, 0, stream);
break;
default:
break;
}
break;
case CUDPP_UINT:
switch(plan->m_config.op)
{
case CUDPP_ADD:
scanArrayRecursive<unsigned int, false, false, CUDPP_ADD>
((unsigned int*)d_out, (const unsigned int*)d_in,
(unsigned int**)plan->m_blockSums,
numElements, numRows, plan->m_rowPitches, 0, stream);
break;
case CUDPP_MULTIPLY:
scanArrayRecursive<unsigned int, false, false, CUDPP_MULTIPLY>
((unsigned int*)d_out, (const unsigned int*)d_in,
(unsigned int**)plan->m_blockSums,
numElements, numRows, plan->m_rowPitches, 0, stream);
break;
case CUDPP_MAX:
scanArrayRecursive<unsigned int, false, false, CUDPP_MAX>
((unsigned int*)d_out, (const unsigned int*)d_in,
(unsigned int**)plan->m_blockSums,
numElements, numRows, plan->m_rowPitches, 0, stream);
break;
case CUDPP_MIN:
scanArrayRecursive<unsigned int, false, false, CUDPP_MIN>
((unsigned int*)d_out, (const unsigned int*)d_in,
(unsigned int**)plan->m_blockSums,
numElements, numRows, plan->m_rowPitches, 0, stream);
break;
default:
break;
}
break;
case CUDPP_FLOAT:
switch(plan->m_config.op)
{
case CUDPP_ADD:
scanArrayRecursive<float, false, false, CUDPP_ADD>
((float*)d_out, (const float*)d_in,
(float**)plan->m_blockSums,
numElements, numRows, plan->m_rowPitches, 0, stream);
break;
case CUDPP_MULTIPLY:
scanArrayRecursive<float, false, false, CUDPP_MULTIPLY>
((float*)d_out, (const float*)d_in,
(float**)plan->m_blockSums,
numElements, numRows, plan->m_rowPitches, 0, stream);
break;
case CUDPP_MAX:
scanArrayRecursive<float, false, false, CUDPP_MAX>
((float*)d_out, (const float*)d_in,
(float**)plan->m_blockSums,
numElements, numRows, plan->m_rowPitches, 0, stream);
break;
case CUDPP_MIN:
scanArrayRecursive<float, false, false, CUDPP_MIN>
((float*)d_out, (const float*)d_in,
(float**)plan->m_blockSums,
numElements, numRows, plan->m_rowPitches, 0, stream);
break;
default:
break;
}
break;
default:
break;
}
}
}
}
#ifdef __cplusplus
}
#endif
/** @} */ // end scan functions
/** @} */ // end cudpp_app
|
dda83ea547ab4fe215c9d02ac7819037d9aa906c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "oneflow/user/kernels/distributions/uniform_distribution.h"
#include "oneflow/core/common/data_type.h"
namespace oneflow {
namespace {
template<typename T>
__device__ T GenUniform(hiprandState_t* state, const T low, const T high);
#define INITIATE_GENUNIFORM(T, typeproto) \
template<> \
__device__ T GenUniform<T>(hiprandState_t * state, const T low, const T high) { \
return hiprand_uniform(state) * (high - low) + low; \
}
OF_PP_FOR_EACH_TUPLE(INITIATE_GENUNIFORM, INT_DATA_TYPE_SEQ)
template<>
__device__ float GenUniform<float>(hiprandState_t* state, const float low, const float high) {
return hiprand_uniform(state) * (high - low) + low;
}
template<>
__device__ double GenUniform<double>(hiprandState_t* state, const double low, const double high) {
return hiprand_uniform_double(state) * (high - low) + low;
}
template<typename T>
__global__ void GenerateGpu(hiprandState_t* state, const int64_t elem_cnt, T* dptr, const T low,
const T high) {
const int id = blockIdx.x * blockDim.x + threadIdx.x;
hiprandState_t localState = state[id];
if (id < elem_cnt) { dptr[id] = GenUniform<T>(&localState, low, high); }
state[id] = localState;
}
} // namespace
template<typename T>
void UniformDistribution<DeviceType::kGPU, T>::operator()(
DeviceCtx* device_ctx, const int64_t elem_cnt, T* dptr,
const std::shared_ptr<one::Generator>& generator) const {
CHECK_GE(elem_cnt, 0);
auto gen = CHECK_JUST(generator->Get<one::CUDAGeneratorImpl>());
int32_t block_num = gen->max_block_num();
int32_t thread_num = gen->max_thread_num();
auto* curand_states = gen->curand_states();
hipLaunchKernelGGL(( GenerateGpu<T>), dim3(block_num), dim3(thread_num), 0, device_ctx->cuda_stream(), curand_states, elem_cnt,
dptr, low_, high_);
}
#define INITIATE_GPU_UNIFORM_DISTRIBUTION(T, typeproto) \
template void UniformDistribution<DeviceType::kGPU, T>::operator()( \
DeviceCtx* device_ctx, const int64_t elem_cnt, T* dptr, \
const std::shared_ptr<one::Generator>& generator) const;
OF_PP_FOR_EACH_TUPLE(INITIATE_GPU_UNIFORM_DISTRIBUTION, FLOATING_DATA_TYPE_SEQ)
OF_PP_FOR_EACH_TUPLE(INITIATE_GPU_UNIFORM_DISTRIBUTION, INT_DATA_TYPE_SEQ)
} // namespace oneflow
| dda83ea547ab4fe215c9d02ac7819037d9aa906c.cu | /*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "oneflow/user/kernels/distributions/uniform_distribution.h"
#include "oneflow/core/common/data_type.h"
namespace oneflow {
namespace {
template<typename T>
__device__ T GenUniform(curandState* state, const T low, const T high);
#define INITIATE_GENUNIFORM(T, typeproto) \
template<> \
__device__ T GenUniform<T>(curandState * state, const T low, const T high) { \
return curand_uniform(state) * (high - low) + low; \
}
OF_PP_FOR_EACH_TUPLE(INITIATE_GENUNIFORM, INT_DATA_TYPE_SEQ)
template<>
__device__ float GenUniform<float>(curandState* state, const float low, const float high) {
return curand_uniform(state) * (high - low) + low;
}
template<>
__device__ double GenUniform<double>(curandState* state, const double low, const double high) {
return curand_uniform_double(state) * (high - low) + low;
}
template<typename T>
__global__ void GenerateGpu(curandState* state, const int64_t elem_cnt, T* dptr, const T low,
const T high) {
const int id = blockIdx.x * blockDim.x + threadIdx.x;
curandState localState = state[id];
if (id < elem_cnt) { dptr[id] = GenUniform<T>(&localState, low, high); }
state[id] = localState;
}
} // namespace
template<typename T>
void UniformDistribution<DeviceType::kGPU, T>::operator()(
DeviceCtx* device_ctx, const int64_t elem_cnt, T* dptr,
const std::shared_ptr<one::Generator>& generator) const {
CHECK_GE(elem_cnt, 0);
auto gen = CHECK_JUST(generator->Get<one::CUDAGeneratorImpl>());
int32_t block_num = gen->max_block_num();
int32_t thread_num = gen->max_thread_num();
auto* curand_states = gen->curand_states();
GenerateGpu<T><<<block_num, thread_num, 0, device_ctx->cuda_stream()>>>(curand_states, elem_cnt,
dptr, low_, high_);
}
#define INITIATE_GPU_UNIFORM_DISTRIBUTION(T, typeproto) \
template void UniformDistribution<DeviceType::kGPU, T>::operator()( \
DeviceCtx* device_ctx, const int64_t elem_cnt, T* dptr, \
const std::shared_ptr<one::Generator>& generator) const;
OF_PP_FOR_EACH_TUPLE(INITIATE_GPU_UNIFORM_DISTRIBUTION, FLOATING_DATA_TYPE_SEQ)
OF_PP_FOR_EACH_TUPLE(INITIATE_GPU_UNIFORM_DISTRIBUTION, INT_DATA_TYPE_SEQ)
} // namespace oneflow
|
82894abc659efd934ee94499947f0775b6726bd9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <hip/hip_fp16.h>
#include <stdio.h>
#include <algorithm>
#include <cmath>
#include <hipcub/hipcub.hpp>
#include "amir_cuda_util/cuda_util.h"
#include "torch_cum_maxmin.h"
namespace amirstan {
namespace plugin {
using namespace amirstan::cuda;
template <typename T, bool is_max>
struct BlockPrefixPairCumCallbackOp;
template <typename T>
struct BlockPrefixPairCumCallbackOp<T, true> {
// Running prefix
hipcub::KeyValuePair<int, T> running_total;
// Constructor
__device__ BlockPrefixPairCumCallbackOp(
hipcub::KeyValuePair<int, T> running_total)
: running_total(running_total) {}
// Callback operator to be entered by the first warp of threads in the block.
// Thread-0 is responsible for returning a value for seeding the block-wide
// scan.
__device__ hipcub::KeyValuePair<int, T> operator()(
hipcub::KeyValuePair<int, T> block_aggregate) {
hipcub::KeyValuePair<int, T> old_prefix = running_total;
running_total = (block_aggregate.value > old_prefix.value) ? block_aggregate
: old_prefix;
return old_prefix;
}
};
template <typename T>
struct BlockPrefixPairCumCallbackOp<T, false> {
// Running prefix
hipcub::KeyValuePair<int, T> running_total;
// Constructor
__device__ BlockPrefixPairCumCallbackOp(
hipcub::KeyValuePair<int, T> running_total)
: running_total(running_total) {}
// Callback operator to be entered by the first warp of threads in the block.
// Thread-0 is responsible for returning a value for seeding the block-wide
// scan.
__device__ hipcub::KeyValuePair<int, T> operator()(
hipcub::KeyValuePair<int, T> block_aggregate) {
hipcub::KeyValuePair<int, T> old_prefix = running_total;
running_total = (block_aggregate.value < old_prefix.value) ? block_aggregate
: old_prefix;
return old_prefix;
}
};
template <typename T>
__global__ void torch_cum_maxmin_warp_kernel(T *output, int *out_index,
const T *input, size_t stride,
int dim_size, size_t cum_size,
const int cum_type) {
// create block scan
typedef cub::WarpScan<hipcub::KeyValuePair<int, T>> warpScan;
__shared__ union {
typename warpScan::TempStorage scan[CUDA_NUM_WARP];
} temp_storage;
for (int index =
(blockIdx.x * CUDA_NUM_WARP) + int(threadIdx.x / CUDA_WARP_SIZE);
index < cum_size; index += gridDim.x * CUDA_NUM_WARP) {
// compute cum start
const size_t pre_index = index / stride;
const size_t post_index = index % stride;
const size_t cum_start = pre_index * stride * dim_size + post_index;
hipcub::KeyValuePair<int, T> aggregate_value{0, input[cum_start]};
for (int warp_offset = 0; warp_offset < dim_size;
warp_offset += CUDA_WARP_SIZE) {
const size_t cum_position = warp_offset + threadIdx.x % CUDA_WARP_SIZE;
hipcub::KeyValuePair<int, T> thread_data = {
cum_position, cum_position < dim_size
? input[cum_start + cum_position * stride]
: 0};
if (cum_type == 0) {
thread_data = thread_data.value > aggregate_value.value
? thread_data
: aggregate_value;
warpScan(temp_storage.scan[int(threadIdx.x / CUDA_WARP_SIZE)])
.InclusiveScan(thread_data, thread_data, hipcub::ArgMax(),
aggregate_value);
} else {
thread_data = thread_data.value < aggregate_value.value
? thread_data
: aggregate_value;
warpScan(temp_storage.scan[int(threadIdx.x / CUDA_WARP_SIZE)])
.InclusiveScan(thread_data, thread_data, hipcub::ArgMin(),
aggregate_value);
}
// Store scanned items to output segment
if (cum_position < dim_size) {
output[cum_start + cum_position * stride] = thread_data.value;
out_index[cum_start + cum_position * stride] = thread_data.key;
}
}
}
}
static void create_size_stride(const int *dims, int nb_dims, TensorSize &size,
TensorStride &stride) {
memcpy(&size.size[0], dims, sizeof(int) * nb_dims);
stride.size[nb_dims - 1] = 1;
for (int i = nb_dims - 2; i >= 0; --i) {
stride.size[i] = stride.size[i + 1] * size.size[i + 1];
}
}
template <typename T>
void torch_cum_maxmin(T *output, int *index, const T *input, int *input_dims,
int nb_dims, int cum_dim, int cum_type,
hipStream_t stream) {
TensorSize ts_input_size;
TensorStride input_stride;
create_size_stride(input_dims, nb_dims, ts_input_size, input_stride);
size_t cum_size = 1;
for (int i = 0; i < nb_dims; ++i) {
if (i != cum_dim) {
cum_size *= ts_input_size.size[i];
}
}
size_t num_blocks = std::min<long>(
kMaxGridNum, (cum_size + CUDA_NUM_WARP - 1) / CUDA_NUM_WARP);
hipLaunchKernelGGL(( torch_cum_maxmin_warp_kernel<T>), dim3(num_blocks), dim3(CUDA_NUM_THREADS), 0, stream,
output, index, input, input_stride.size[cum_dim],
ts_input_size.size[cum_dim], cum_size, cum_type);
}
template void torch_cum_maxmin<float>(float *output, int *index,
const float *input, int *input_dims,
int nb_dims, int cum_dim, int cum_type,
hipStream_t stream);
} // namespace plugin
} // namespace amirstan
| 82894abc659efd934ee94499947f0775b6726bd9.cu | #include <cuda_fp16.h>
#include <stdio.h>
#include <algorithm>
#include <cmath>
#include <cub/cub.cuh>
#include "amir_cuda_util/cuda_util.h"
#include "torch_cum_maxmin.h"
namespace amirstan {
namespace plugin {
using namespace amirstan::cuda;
template <typename T, bool is_max>
struct BlockPrefixPairCumCallbackOp;
template <typename T>
struct BlockPrefixPairCumCallbackOp<T, true> {
// Running prefix
cub::KeyValuePair<int, T> running_total;
// Constructor
__device__ BlockPrefixPairCumCallbackOp(
cub::KeyValuePair<int, T> running_total)
: running_total(running_total) {}
// Callback operator to be entered by the first warp of threads in the block.
// Thread-0 is responsible for returning a value for seeding the block-wide
// scan.
__device__ cub::KeyValuePair<int, T> operator()(
cub::KeyValuePair<int, T> block_aggregate) {
cub::KeyValuePair<int, T> old_prefix = running_total;
running_total = (block_aggregate.value > old_prefix.value) ? block_aggregate
: old_prefix;
return old_prefix;
}
};
template <typename T>
struct BlockPrefixPairCumCallbackOp<T, false> {
// Running prefix
cub::KeyValuePair<int, T> running_total;
// Constructor
__device__ BlockPrefixPairCumCallbackOp(
cub::KeyValuePair<int, T> running_total)
: running_total(running_total) {}
// Callback operator to be entered by the first warp of threads in the block.
// Thread-0 is responsible for returning a value for seeding the block-wide
// scan.
__device__ cub::KeyValuePair<int, T> operator()(
cub::KeyValuePair<int, T> block_aggregate) {
cub::KeyValuePair<int, T> old_prefix = running_total;
running_total = (block_aggregate.value < old_prefix.value) ? block_aggregate
: old_prefix;
return old_prefix;
}
};
template <typename T>
__global__ void torch_cum_maxmin_warp_kernel(T *output, int *out_index,
const T *input, size_t stride,
int dim_size, size_t cum_size,
const int cum_type) {
// create block scan
typedef cub::WarpScan<cub::KeyValuePair<int, T>> warpScan;
__shared__ union {
typename warpScan::TempStorage scan[CUDA_NUM_WARP];
} temp_storage;
for (int index =
(blockIdx.x * CUDA_NUM_WARP) + int(threadIdx.x / CUDA_WARP_SIZE);
index < cum_size; index += gridDim.x * CUDA_NUM_WARP) {
// compute cum start
const size_t pre_index = index / stride;
const size_t post_index = index % stride;
const size_t cum_start = pre_index * stride * dim_size + post_index;
cub::KeyValuePair<int, T> aggregate_value{0, input[cum_start]};
for (int warp_offset = 0; warp_offset < dim_size;
warp_offset += CUDA_WARP_SIZE) {
const size_t cum_position = warp_offset + threadIdx.x % CUDA_WARP_SIZE;
cub::KeyValuePair<int, T> thread_data = {
cum_position, cum_position < dim_size
? input[cum_start + cum_position * stride]
: 0};
if (cum_type == 0) {
thread_data = thread_data.value > aggregate_value.value
? thread_data
: aggregate_value;
warpScan(temp_storage.scan[int(threadIdx.x / CUDA_WARP_SIZE)])
.InclusiveScan(thread_data, thread_data, cub::ArgMax(),
aggregate_value);
} else {
thread_data = thread_data.value < aggregate_value.value
? thread_data
: aggregate_value;
warpScan(temp_storage.scan[int(threadIdx.x / CUDA_WARP_SIZE)])
.InclusiveScan(thread_data, thread_data, cub::ArgMin(),
aggregate_value);
}
// Store scanned items to output segment
if (cum_position < dim_size) {
output[cum_start + cum_position * stride] = thread_data.value;
out_index[cum_start + cum_position * stride] = thread_data.key;
}
}
}
}
static void create_size_stride(const int *dims, int nb_dims, TensorSize &size,
TensorStride &stride) {
memcpy(&size.size[0], dims, sizeof(int) * nb_dims);
stride.size[nb_dims - 1] = 1;
for (int i = nb_dims - 2; i >= 0; --i) {
stride.size[i] = stride.size[i + 1] * size.size[i + 1];
}
}
template <typename T>
void torch_cum_maxmin(T *output, int *index, const T *input, int *input_dims,
int nb_dims, int cum_dim, int cum_type,
cudaStream_t stream) {
TensorSize ts_input_size;
TensorStride input_stride;
create_size_stride(input_dims, nb_dims, ts_input_size, input_stride);
size_t cum_size = 1;
for (int i = 0; i < nb_dims; ++i) {
if (i != cum_dim) {
cum_size *= ts_input_size.size[i];
}
}
size_t num_blocks = std::min<long>(
kMaxGridNum, (cum_size + CUDA_NUM_WARP - 1) / CUDA_NUM_WARP);
torch_cum_maxmin_warp_kernel<T><<<num_blocks, CUDA_NUM_THREADS, 0, stream>>>(
output, index, input, input_stride.size[cum_dim],
ts_input_size.size[cum_dim], cum_size, cum_type);
}
template void torch_cum_maxmin<float>(float *output, int *index,
const float *input, int *input_dims,
int nb_dims, int cum_dim, int cum_type,
cudaStream_t stream);
} // namespace plugin
} // namespace amirstan
|
7ec203f363665d48052af785fb82fc4cfd968ccc.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <math.h>
__global__ void somaMatrizGPU(int *d_vetA, int indice, int passo){
int id = blockDim.x * blockIdx.x + threadIdx.x;
if((id % indice) == 0)
d_vetA[id] += d_vetA[id+passo];
}
int main(){
int h_Size = 16;
int j, i = 0;
int h_vetA[16]={1,2,3,4,5,6,7,8,9,10, 11, 12, 13, 14, 15, 16};
int *d_vetA;
int passo, indice;
int block = h_Size;
hipDeviceReset();
hipMalloc((void**) &d_vetA, h_Size * sizeof(int));
hipMemcpy(d_vetA, h_vetA, h_Size * sizeof(int), hipMemcpyHostToDevice);
for(i = 0; i < 4; i++){
indice = pow(2, i+1);
passo = pow(2, i);
hipLaunchKernelGGL(( somaMatrizGPU), dim3(8), dim3(2), 0, 0, d_vetA, indice, passo);
hipMemcpy(h_vetA, d_vetA, h_Size * sizeof(int), hipMemcpyDeviceToHost);
for(j=0; j < h_Size; j++){
printf("%d, ", h_vetA[j]);
}
printf("\n");
}
hipDeviceSynchronize();
hipFree(d_vetA);
return 0;
}
| 7ec203f363665d48052af785fb82fc4cfd968ccc.cu | #include <stdio.h>
#include <cuda.h>
#include <math.h>
__global__ void somaMatrizGPU(int *d_vetA, int indice, int passo){
int id = blockDim.x * blockIdx.x + threadIdx.x;
if((id % indice) == 0)
d_vetA[id] += d_vetA[id+passo];
}
int main(){
int h_Size = 16;
int j, i = 0;
int h_vetA[16]={1,2,3,4,5,6,7,8,9,10, 11, 12, 13, 14, 15, 16};
int *d_vetA;
int passo, indice;
int block = h_Size;
cudaDeviceReset();
cudaMalloc((void**) &d_vetA, h_Size * sizeof(int));
cudaMemcpy(d_vetA, h_vetA, h_Size * sizeof(int), cudaMemcpyHostToDevice);
for(i = 0; i < 4; i++){
indice = pow(2, i+1);
passo = pow(2, i);
somaMatrizGPU<<<8, 2>>>(d_vetA, indice, passo);
cudaMemcpy(h_vetA, d_vetA, h_Size * sizeof(int), cudaMemcpyDeviceToHost);
for(j=0; j < h_Size; j++){
printf("%d, ", h_vetA[j]);
}
printf("\n");
}
cudaDeviceSynchronize();
cudaFree(d_vetA);
return 0;
}
|
1926ebdb503ddc36ebc56d00f21181c1aa3b544a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* sha1.cu CUDA Implementation of SHA1 Hashing
*
* Date: 12 June 2019
* Revision: 1
*
* Based on the public domain Reference Implementation in C, by
* Brad Conte, original code here:
*
* https://github.com/B-Con/crypto-algorithms
*
* This file is released into the Public Domain.
*/
/*************************** HEADER FILES ***************************/
#include <stdlib.h>
#include <memory.h>
#include "sha1.cuh"
/****************************** MACROS ******************************/
// SHA1 outputs a 20 byte digest
/**************************** DATA TYPES ****************************/
typedef struct {
BYTE data[64];
WORD datalen;
unsigned long long bitlen;
WORD state[5];
WORD k[4];
} CUDA_SHA1_CTX;
/****************************** MACROS ******************************/
#ifndef ROTLEFT
#define ROTLEFT(a,b) (((a) << (b)) | ((a) >> (32-(b))))
#endif
/*********************** FUNCTION DEFINITIONS ***********************/
__device__ __forceinline__ void cuda_sha1_transform(CUDA_SHA1_CTX *ctx, const BYTE data[])
{
WORD a, b, c, d, e, i, j, t, m[80];
for (i = 0, j = 0; i < 16; ++i, j += 4)
m[i] = (data[j] << 24) + (data[j + 1] << 16) + (data[j + 2] << 8) + (data[j + 3]);
for ( ; i < 80; ++i) {
m[i] = (m[i - 3] ^ m[i - 8] ^ m[i - 14] ^ m[i - 16]);
m[i] = (m[i] << 1) | (m[i] >> 31);
}
a = ctx->state[0];
b = ctx->state[1];
c = ctx->state[2];
d = ctx->state[3];
e = ctx->state[4];
for (i = 0; i < 20; ++i) {
t = ROTLEFT(a, 5) + ((b & c) ^ (~b & d)) + e + ctx->k[0] + m[i];
e = d;
d = c;
c = ROTLEFT(b, 30);
b = a;
a = t;
}
for ( ; i < 40; ++i) {
t = ROTLEFT(a, 5) + (b ^ c ^ d) + e + ctx->k[1] + m[i];
e = d;
d = c;
c = ROTLEFT(b, 30);
b = a;
a = t;
}
for ( ; i < 60; ++i) {
t = ROTLEFT(a, 5) + ((b & c) ^ (b & d) ^ (c & d)) + e + ctx->k[2] + m[i];
e = d;
d = c;
c = ROTLEFT(b, 30);
b = a;
a = t;
}
for ( ; i < 80; ++i) {
t = ROTLEFT(a, 5) + (b ^ c ^ d) + e + ctx->k[3] + m[i];
e = d;
d = c;
c = ROTLEFT(b, 30);
b = a;
a = t;
}
ctx->state[0] += a;
ctx->state[1] += b;
ctx->state[2] += c;
ctx->state[3] += d;
ctx->state[4] += e;
}
__device__ void cuda_sha1_init(CUDA_SHA1_CTX *ctx)
{
ctx->datalen = 0;
ctx->bitlen = 0;
ctx->state[0] = 0x67452301;
ctx->state[1] = 0xEFCDAB89;
ctx->state[2] = 0x98BADCFE;
ctx->state[3] = 0x10325476;
ctx->state[4] = 0xc3d2e1f0;
ctx->k[0] = 0x5a827999;
ctx->k[1] = 0x6ed9eba1;
ctx->k[2] = 0x8f1bbcdc;
ctx->k[3] = 0xca62c1d6;
}
__device__ void cuda_sha1_update(CUDA_SHA1_CTX *ctx, const BYTE data[], size_t len)
{
size_t i;
for (i = 0; i < len; ++i) {
ctx->data[ctx->datalen] = data[i];
ctx->datalen++;
if (ctx->datalen == 64) {
cuda_sha1_transform(ctx, ctx->data);
ctx->bitlen += 512;
ctx->datalen = 0;
}
}
}
__device__ void cuda_sha1_final(CUDA_SHA1_CTX *ctx, BYTE hash[])
{
WORD i;
i = ctx->datalen;
// Pad whatever data is left in the buffer.
if (ctx->datalen < 56) {
ctx->data[i++] = 0x80;
while (i < 56)
ctx->data[i++] = 0x00;
}
else {
ctx->data[i++] = 0x80;
while (i < 64)
ctx->data[i++] = 0x00;
cuda_sha1_transform(ctx, ctx->data);
memset(ctx->data, 0, 56);
}
// Append to the padding the total message's length in bits and transform.
ctx->bitlen += ctx->datalen * 8;
ctx->data[63] = ctx->bitlen;
ctx->data[62] = ctx->bitlen >> 8;
ctx->data[61] = ctx->bitlen >> 16;
ctx->data[60] = ctx->bitlen >> 24;
ctx->data[59] = ctx->bitlen >> 32;
ctx->data[58] = ctx->bitlen >> 40;
ctx->data[57] = ctx->bitlen >> 48;
ctx->data[56] = ctx->bitlen >> 56;
cuda_sha1_transform(ctx, ctx->data);
// Since this implementation uses little endian byte ordering and MD uses big endian,
// reverse all the bytes when copying the final state to the output hash.
for (i = 0; i < 4; ++i) {
hash[i] = (ctx->state[0] >> (24 - i * 8)) & 0x000000ff;
hash[i + 4] = (ctx->state[1] >> (24 - i * 8)) & 0x000000ff;
hash[i + 8] = (ctx->state[2] >> (24 - i * 8)) & 0x000000ff;
hash[i + 12] = (ctx->state[3] >> (24 - i * 8)) & 0x000000ff;
hash[i + 16] = (ctx->state[4] >> (24 - i * 8)) & 0x000000ff;
}
}
__global__ void kernel_sha1_hash(BYTE* indata, WORD inlen, BYTE* outdata, WORD n_batch)
{
WORD thread = blockIdx.x * blockDim.x + threadIdx.x;
if (thread >= n_batch)
{
return;
}
BYTE* in = indata + thread * inlen;
BYTE* out = outdata + thread * SHA1_BLOCK_SIZE;
CUDA_SHA1_CTX ctx;
cuda_sha1_init(&ctx);
cuda_sha1_update(&ctx, in, inlen);
cuda_sha1_final(&ctx, out);
}
void mcm_cuda_sha1_hash_batch(BYTE* in, WORD inlen, BYTE* out, WORD n_batch, WORD n_iter)
{
BYTE *cuda_indata;
BYTE *cuda_outdata;
hipMalloc(&cuda_indata, inlen * n_batch);
hipMalloc(&cuda_outdata, SHA1_BLOCK_SIZE * n_batch);
hipMemcpy(cuda_indata, in, inlen * n_batch, hipMemcpyHostToDevice);
WORD thread = WG_SIZE;
WORD block = (n_batch / thread) + (n_batch % thread != 0);
for(int i = 0 ; i < n_iter ; ++i)
kernel_sha1_hash << < block, thread >> > (cuda_indata, inlen, cuda_outdata, n_batch);
hipMemcpy(out, cuda_outdata, SHA1_BLOCK_SIZE * n_batch, hipMemcpyDeviceToHost);
hipDeviceSynchronize();
hipError_t error = hipGetLastError();
if (error != hipSuccess) {
printf("Error cuda sha1 hash: %s \n", hipGetErrorString(error));
}
hipFree(cuda_indata);
hipFree(cuda_outdata);
}
| 1926ebdb503ddc36ebc56d00f21181c1aa3b544a.cu | /*
* sha1.cu CUDA Implementation of SHA1 Hashing
*
* Date: 12 June 2019
* Revision: 1
*
* Based on the public domain Reference Implementation in C, by
* Brad Conte, original code here:
*
* https://github.com/B-Con/crypto-algorithms
*
* This file is released into the Public Domain.
*/
/*************************** HEADER FILES ***************************/
#include <stdlib.h>
#include <memory.h>
#include "sha1.cuh"
/****************************** MACROS ******************************/
// SHA1 outputs a 20 byte digest
/**************************** DATA TYPES ****************************/
typedef struct {
BYTE data[64];
WORD datalen;
unsigned long long bitlen;
WORD state[5];
WORD k[4];
} CUDA_SHA1_CTX;
/****************************** MACROS ******************************/
#ifndef ROTLEFT
#define ROTLEFT(a,b) (((a) << (b)) | ((a) >> (32-(b))))
#endif
/*********************** FUNCTION DEFINITIONS ***********************/
__device__ __forceinline__ void cuda_sha1_transform(CUDA_SHA1_CTX *ctx, const BYTE data[])
{
WORD a, b, c, d, e, i, j, t, m[80];
for (i = 0, j = 0; i < 16; ++i, j += 4)
m[i] = (data[j] << 24) + (data[j + 1] << 16) + (data[j + 2] << 8) + (data[j + 3]);
for ( ; i < 80; ++i) {
m[i] = (m[i - 3] ^ m[i - 8] ^ m[i - 14] ^ m[i - 16]);
m[i] = (m[i] << 1) | (m[i] >> 31);
}
a = ctx->state[0];
b = ctx->state[1];
c = ctx->state[2];
d = ctx->state[3];
e = ctx->state[4];
for (i = 0; i < 20; ++i) {
t = ROTLEFT(a, 5) + ((b & c) ^ (~b & d)) + e + ctx->k[0] + m[i];
e = d;
d = c;
c = ROTLEFT(b, 30);
b = a;
a = t;
}
for ( ; i < 40; ++i) {
t = ROTLEFT(a, 5) + (b ^ c ^ d) + e + ctx->k[1] + m[i];
e = d;
d = c;
c = ROTLEFT(b, 30);
b = a;
a = t;
}
for ( ; i < 60; ++i) {
t = ROTLEFT(a, 5) + ((b & c) ^ (b & d) ^ (c & d)) + e + ctx->k[2] + m[i];
e = d;
d = c;
c = ROTLEFT(b, 30);
b = a;
a = t;
}
for ( ; i < 80; ++i) {
t = ROTLEFT(a, 5) + (b ^ c ^ d) + e + ctx->k[3] + m[i];
e = d;
d = c;
c = ROTLEFT(b, 30);
b = a;
a = t;
}
ctx->state[0] += a;
ctx->state[1] += b;
ctx->state[2] += c;
ctx->state[3] += d;
ctx->state[4] += e;
}
__device__ void cuda_sha1_init(CUDA_SHA1_CTX *ctx)
{
ctx->datalen = 0;
ctx->bitlen = 0;
ctx->state[0] = 0x67452301;
ctx->state[1] = 0xEFCDAB89;
ctx->state[2] = 0x98BADCFE;
ctx->state[3] = 0x10325476;
ctx->state[4] = 0xc3d2e1f0;
ctx->k[0] = 0x5a827999;
ctx->k[1] = 0x6ed9eba1;
ctx->k[2] = 0x8f1bbcdc;
ctx->k[3] = 0xca62c1d6;
}
__device__ void cuda_sha1_update(CUDA_SHA1_CTX *ctx, const BYTE data[], size_t len)
{
size_t i;
for (i = 0; i < len; ++i) {
ctx->data[ctx->datalen] = data[i];
ctx->datalen++;
if (ctx->datalen == 64) {
cuda_sha1_transform(ctx, ctx->data);
ctx->bitlen += 512;
ctx->datalen = 0;
}
}
}
__device__ void cuda_sha1_final(CUDA_SHA1_CTX *ctx, BYTE hash[])
{
WORD i;
i = ctx->datalen;
// Pad whatever data is left in the buffer.
if (ctx->datalen < 56) {
ctx->data[i++] = 0x80;
while (i < 56)
ctx->data[i++] = 0x00;
}
else {
ctx->data[i++] = 0x80;
while (i < 64)
ctx->data[i++] = 0x00;
cuda_sha1_transform(ctx, ctx->data);
memset(ctx->data, 0, 56);
}
// Append to the padding the total message's length in bits and transform.
ctx->bitlen += ctx->datalen * 8;
ctx->data[63] = ctx->bitlen;
ctx->data[62] = ctx->bitlen >> 8;
ctx->data[61] = ctx->bitlen >> 16;
ctx->data[60] = ctx->bitlen >> 24;
ctx->data[59] = ctx->bitlen >> 32;
ctx->data[58] = ctx->bitlen >> 40;
ctx->data[57] = ctx->bitlen >> 48;
ctx->data[56] = ctx->bitlen >> 56;
cuda_sha1_transform(ctx, ctx->data);
// Since this implementation uses little endian byte ordering and MD uses big endian,
// reverse all the bytes when copying the final state to the output hash.
for (i = 0; i < 4; ++i) {
hash[i] = (ctx->state[0] >> (24 - i * 8)) & 0x000000ff;
hash[i + 4] = (ctx->state[1] >> (24 - i * 8)) & 0x000000ff;
hash[i + 8] = (ctx->state[2] >> (24 - i * 8)) & 0x000000ff;
hash[i + 12] = (ctx->state[3] >> (24 - i * 8)) & 0x000000ff;
hash[i + 16] = (ctx->state[4] >> (24 - i * 8)) & 0x000000ff;
}
}
__global__ void kernel_sha1_hash(BYTE* indata, WORD inlen, BYTE* outdata, WORD n_batch)
{
WORD thread = blockIdx.x * blockDim.x + threadIdx.x;
if (thread >= n_batch)
{
return;
}
BYTE* in = indata + thread * inlen;
BYTE* out = outdata + thread * SHA1_BLOCK_SIZE;
CUDA_SHA1_CTX ctx;
cuda_sha1_init(&ctx);
cuda_sha1_update(&ctx, in, inlen);
cuda_sha1_final(&ctx, out);
}
void mcm_cuda_sha1_hash_batch(BYTE* in, WORD inlen, BYTE* out, WORD n_batch, WORD n_iter)
{
BYTE *cuda_indata;
BYTE *cuda_outdata;
cudaMalloc(&cuda_indata, inlen * n_batch);
cudaMalloc(&cuda_outdata, SHA1_BLOCK_SIZE * n_batch);
cudaMemcpy(cuda_indata, in, inlen * n_batch, cudaMemcpyHostToDevice);
WORD thread = WG_SIZE;
WORD block = (n_batch / thread) + (n_batch % thread != 0);
for(int i = 0 ; i < n_iter ; ++i)
kernel_sha1_hash << < block, thread >> > (cuda_indata, inlen, cuda_outdata, n_batch);
cudaMemcpy(out, cuda_outdata, SHA1_BLOCK_SIZE * n_batch, cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
cudaError_t error = cudaGetLastError();
if (error != cudaSuccess) {
printf("Error cuda sha1 hash: %s \n", cudaGetErrorString(error));
}
cudaFree(cuda_indata);
cudaFree(cuda_outdata);
}
|
47bf5ff17a5ac6d410d03fa6c121e98bed07639d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
extern "C"
{
__global__ void tx1mx(const int lengthX, const double *t, const double *x, double *z)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i<lengthX)
{
z[i] += t[i]*x[i]*(1.0-x[i]);
}
}
} | 47bf5ff17a5ac6d410d03fa6c121e98bed07639d.cu | extern "C"
{
__global__ void tx1mx(const int lengthX, const double *t, const double *x, double *z)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i<lengthX)
{
z[i] += t[i]*x[i]*(1.0-x[i]);
}
}
} |
1b1e65dc25bb9d98bb7fd18a08da7358688cbea3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "layers/operator.h"
#include <string.h>
// --------------------------------------------------------------------------
// kernel code
// minus_{gpu, cpu}
// --------------------------------------------------------------------------
// in-place negative transform bottom -> bottom
// bottom[i] = -bottom[i]
#ifdef GPU
__global__
static
void minus_inplace_gpu(real bottom[], const int item_size)
{
const long int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < item_size) {
bottom[index] = -bottom[index];
}
}
#else
static
void minus_inplace_cpu(real bottom[], const int item_size)
{
for (int index = 0; index < item_size; ++index) {
bottom[index] = -bottom[index];
}
}
#endif
// --------------------------------------------------------------------------
// layer-wise operator code
// --------------------------------------------------------------------------
static
void crelu_forward(const Tensor* const bottom,
Tensor* const top,
const LayerOption* const option)
{
for (int n = bottom->num_items - 1; n >= 0; --n) {
int item_size = 1;
for (int i = 0; i < bottom->ndim; ++i) {
item_size *= bottom->shape[n][i];
}
#ifdef GPU
{
const int threads_per_block = 512;
const int num_blocks = DIV_THEN_CEIL(item_size, threads_per_block);
hipMemcpyAsync(top->data + top->start[n] + item_size,
bottom->data + bottom->start[n],
item_size * sizeof(real),
hipMemcpyDeviceToDevice);
hipMemcpyAsync(top->data + top->start[n],
bottom->data + bottom->start[n],
item_size * sizeof(real),
hipMemcpyDeviceToDevice);
hipLaunchKernelGGL(( minus_inplace_gpu), dim3(num_blocks), dim3(threads_per_block), 0, 0,
top->data + top->start[n] + item_size,
item_size);
}
#else
{
memcpy(top->data + top->start[n] + item_size,
bottom->data + bottom->start[n],
item_size * sizeof(real));
memcpy(top->data + top->start[n],
bottom->data + bottom->start[n],
item_size * sizeof(real));
minus_inplace_cpu(top->data + top->start[n] + item_size,
item_size);
}
#endif
}
}
// --------------------------------------------------------------------------
// output shape calculator code
// --------------------------------------------------------------------------
static
void crelu_shape(const Tensor* const bottom,
Tensor* const top)
{
top->ndim = bottom->ndim;
top->num_items = bottom->num_items;
for (int n = 0; n < bottom->num_items; ++n) {
top->shape[n][0] = bottom->shape[n][0] * 2; // 2x channels
for (int i = 1; i < bottom->ndim; ++i) {
top->shape[n][i] = bottom->shape[n][i];
}
}
for (int n = 0; n < bottom->num_items; ++n) {
top->start[n] = bottom->start[n] * 2;
}
}
// --------------------------------------------------------------------------
// functions for layer instance
// --------------------------------------------------------------------------
void forward_crelu_layer(void* const net_, void* const layer_)
{
Layer* const layer = (Layer*)layer_;
crelu_forward(get_bottom(layer, 0), get_top(layer, 0),
&layer->option);
}
void shape_crelu_layer(void* const net_, void* const layer_)
{
Layer* const layer = (Layer*)layer_;
crelu_shape(get_bottom(layer, 0), get_top(layer, 0));
}
void init_crelu_layer(void* const net_, void* const layer_)
{
return;
}
void free_crelu_layer(void* const net_, void* const layer_)
{
return;
}
| 1b1e65dc25bb9d98bb7fd18a08da7358688cbea3.cu | #include "layers/operator.h"
#include <string.h>
// --------------------------------------------------------------------------
// kernel code
// minus_{gpu, cpu}
// --------------------------------------------------------------------------
// in-place negative transform bottom -> bottom
// bottom[i] = -bottom[i]
#ifdef GPU
__global__
static
void minus_inplace_gpu(real bottom[], const int item_size)
{
const long int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < item_size) {
bottom[index] = -bottom[index];
}
}
#else
static
void minus_inplace_cpu(real bottom[], const int item_size)
{
for (int index = 0; index < item_size; ++index) {
bottom[index] = -bottom[index];
}
}
#endif
// --------------------------------------------------------------------------
// layer-wise operator code
// --------------------------------------------------------------------------
static
void crelu_forward(const Tensor* const bottom,
Tensor* const top,
const LayerOption* const option)
{
for (int n = bottom->num_items - 1; n >= 0; --n) {
int item_size = 1;
for (int i = 0; i < bottom->ndim; ++i) {
item_size *= bottom->shape[n][i];
}
#ifdef GPU
{
const int threads_per_block = 512;
const int num_blocks = DIV_THEN_CEIL(item_size, threads_per_block);
cudaMemcpyAsync(top->data + top->start[n] + item_size,
bottom->data + bottom->start[n],
item_size * sizeof(real),
cudaMemcpyDeviceToDevice);
cudaMemcpyAsync(top->data + top->start[n],
bottom->data + bottom->start[n],
item_size * sizeof(real),
cudaMemcpyDeviceToDevice);
minus_inplace_gpu<<<num_blocks, threads_per_block>>>(
top->data + top->start[n] + item_size,
item_size);
}
#else
{
memcpy(top->data + top->start[n] + item_size,
bottom->data + bottom->start[n],
item_size * sizeof(real));
memcpy(top->data + top->start[n],
bottom->data + bottom->start[n],
item_size * sizeof(real));
minus_inplace_cpu(top->data + top->start[n] + item_size,
item_size);
}
#endif
}
}
// --------------------------------------------------------------------------
// output shape calculator code
// --------------------------------------------------------------------------
static
void crelu_shape(const Tensor* const bottom,
Tensor* const top)
{
top->ndim = bottom->ndim;
top->num_items = bottom->num_items;
for (int n = 0; n < bottom->num_items; ++n) {
top->shape[n][0] = bottom->shape[n][0] * 2; // 2x channels
for (int i = 1; i < bottom->ndim; ++i) {
top->shape[n][i] = bottom->shape[n][i];
}
}
for (int n = 0; n < bottom->num_items; ++n) {
top->start[n] = bottom->start[n] * 2;
}
}
// --------------------------------------------------------------------------
// functions for layer instance
// --------------------------------------------------------------------------
void forward_crelu_layer(void* const net_, void* const layer_)
{
Layer* const layer = (Layer*)layer_;
crelu_forward(get_bottom(layer, 0), get_top(layer, 0),
&layer->option);
}
void shape_crelu_layer(void* const net_, void* const layer_)
{
Layer* const layer = (Layer*)layer_;
crelu_shape(get_bottom(layer, 0), get_top(layer, 0));
}
void init_crelu_layer(void* const net_, void* const layer_)
{
return;
}
void free_crelu_layer(void* const net_, void* const layer_)
{
return;
}
|
f9c07f986c017142d80fab3b984429500d651f8f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
// TrtSequenceOffset kernels are modified from FasterTransformer
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "core/providers/cuda/cuda_common.h"
#include "contrib_ops/cuda/bert/bert_padding.h"
#include <hipcub/hipcub.hpp>
using namespace onnxruntime::cuda;
namespace onnxruntime {
namespace contrib {
namespace cuda {
constexpr int32_t kMAX_THREADS_PER_BLOCK = 256;
// -----------------------------------
// Get indices of non-padding tokens and padding tokens. Here we assume that padding is on the right side of sequence.
// sequence_token_count is number of non-padding tokens per sequence, and it has shape [batch_size].
// For example, we have 3 sequences with 1, 2, 4 non-padding tokens and positions like the following (* means padding):
// Sequence_0: 0, 1*, 2*, 3*
// Sequence_1: 4, 5, 6*, 7*
// Sequence_2: 8, 9, 10, 11
// token_offset: 0, 4, 5, 8, 9, 10, 11, 1*, 2*, 3*, 6*, 7*
// token_count_buffer has two numbers for non-padding tokens:
// total_token_count: 1 + 2 + 4 = 7
// max_token_count: 4
// cumulated_token_count: 0, 1, 1+2, 1+2+4
__global__ void getTokenOffset(int* token_count_buffer,
int* token_offset,
int* cumulated_token_count,
const int* sequence_token_count,
const int batch_size,
const int sequence_length) {
// Find offset of non-padding tokens, and max sequence length among all batches
// TODO(tianleiwu): Use cub::DevicePartition::Flagged like BuildGlobalIndex in longformer_global_impl.cu
// to build token_offset when sequence length is large.
int total_tokens = 0;
int max_tokens = 0;
int index = 0;
cumulated_token_count[0] = 0;
for (int i = 0; i < batch_size; i++) {
const int count = sequence_token_count[i];
if (count > max_tokens) {
max_tokens = count;
}
cumulated_token_count[i + 1] = cumulated_token_count[i] + count;
for (int j = 0; j < count; j++) {
token_offset[index] = i * sequence_length + j;
index++;
}
total_tokens += count;
}
// Offset of paddings
for (int i = 0; i < batch_size; i++) {
const int count = sequence_token_count[i];
for (int j = 0; j < sequence_length - count; j++) {
token_offset[index] = i * sequence_length + count + j;
index++;
}
}
token_count_buffer[0] = total_tokens;
token_count_buffer[1] = max_tokens;
}
void LaunchGetTokenOffset(int* token_count_buffer,
int* token_offset,
int* cumulated_token_count,
const int* sequence_token_count,
const int batch_size,
const int sequence_length,
hipStream_t stream) {
hipLaunchKernelGGL(( getTokenOffset), dim3(1), dim3(1), 0, stream,
token_count_buffer, token_offset, cumulated_token_count, sequence_token_count, batch_size, sequence_length);
}
// -----------------------------------
// Remove paddings
template <typename T>
__global__ void __launch_bounds__(kMAX_THREADS_PER_BLOCK)
removePadding(T* target, const T* source, const int* token_offset, const int width) {
const int tid = threadIdx.x;
const int token_index = blockIdx.x;
const int source_offset = token_offset[token_index];
const int target_offset = token_index;
for (int i = tid; i < width; i += blockDim.x) {
target[target_offset * width + i] = source[source_offset * width + i];
}
}
template <>
void LaunchRemovePadding(
half* output, const half* input, const int* token_offset, const int token_count, const int hidden_size,
hipStream_t stream) {
// input: [batch_size, sequence_length, hidden_size]
// output: [token_count, hidden_size]
// Make sure memory is aligned to 128 bit
ORT_ENFORCE(!(reinterpret_cast<size_t>(input) & 0xF) && !(reinterpret_cast<size_t>(output) & 0xF), "alignment");
if (hidden_size % 8 == 0) {
const int width = hidden_size / 8;
const int4* input2 = reinterpret_cast<const int4*>(input);
int4* output2 = reinterpret_cast<int4*>(output);
hipLaunchKernelGGL(( removePadding<int4>), dim3(token_count), dim3(kMAX_THREADS_PER_BLOCK), 0, stream,
output2, input2, token_offset, width);
} else if (hidden_size % 4 == 0) {
const int width = hidden_size / 4;
const int64_t* input2 = reinterpret_cast<const int64_t*>(input);
int64_t* output2 = reinterpret_cast<int64_t*>(output);
hipLaunchKernelGGL(( removePadding<int64_t>), dim3(token_count), dim3(kMAX_THREADS_PER_BLOCK), 0, stream,
output2, input2, token_offset, width);
} else if (hidden_size % 2 == 0) {
const int width = hidden_size / 2;
const int32_t* input2 = reinterpret_cast<const int32_t*>(input);
int32_t* output2 = reinterpret_cast<int32_t*>(output);
hipLaunchKernelGGL(( removePadding<int32_t>), dim3(token_count), dim3(kMAX_THREADS_PER_BLOCK), 0, stream,
output2, input2, token_offset, width);
} else {
const int width = hidden_size;
const int16_t* input2 = reinterpret_cast<const int16_t*>(input);
int16_t* output2 = reinterpret_cast<int16_t*>(output);
hipLaunchKernelGGL(( removePadding<int16_t>), dim3(token_count), dim3(kMAX_THREADS_PER_BLOCK), 0, stream,
output2, input2, token_offset, width);
}
}
template <>
void LaunchRemovePadding(
float* output, const float* input, const int* token_offset, const int token_count, const int hidden_size,
hipStream_t stream) {
ORT_ENFORCE(!(reinterpret_cast<size_t>(input) & 0xF) && !(reinterpret_cast<size_t>(output) & 0xF), "alignment");
if (hidden_size % 4 == 0) {
const int width = hidden_size / 4;
const int4* input2 = reinterpret_cast<const int4*>(input);
int4* output2 = reinterpret_cast<int4*>(output);
hipLaunchKernelGGL(( removePadding<int4>), dim3(token_count), dim3(kMAX_THREADS_PER_BLOCK), 0, stream, output2, input2, token_offset, width);
} else if (hidden_size % 2 == 0) {
const int width = hidden_size / 2;
const int64_t* input2 = reinterpret_cast<const int64_t*>(input);
int64_t* output2 = reinterpret_cast<int64_t*>(output);
hipLaunchKernelGGL(( removePadding<int64_t>), dim3(token_count), dim3(kMAX_THREADS_PER_BLOCK), 0, stream, output2, input2, token_offset, width);
} else {
const int width = hidden_size;
const int32_t* input2 = reinterpret_cast<const int32_t*>(input);
int32_t* output2 = reinterpret_cast<int32_t*>(output);
hipLaunchKernelGGL(( removePadding<int32_t>), dim3(token_count), dim3(kMAX_THREADS_PER_BLOCK), 0, stream, output2, input2, token_offset, width);
}
}
// -----------------------------------
// Recover padding.
template <typename T>
__global__ void __launch_bounds__(kMAX_THREADS_PER_BLOCK)
restorePadding(T* target, const T* source, const int* token_offset, const int width, const int token_count) {
const int tid = threadIdx.x;
const int token_index = blockIdx.x;
const int target_seq_id = token_offset[token_index];
const int source_seq_id = token_index;
constexpr T padding_zero = 0;
if (token_index < token_count) {
for (int i = tid; i < width; i += blockDim.x) {
target[target_seq_id * width + i] = source[source_seq_id * width + i];
}
} else {
// It is padding: fill with zeros
for (int i = tid; i < width; i += blockDim.x) {
target[target_seq_id * width + i] = padding_zero;
}
}
}
template <>
__global__ void __launch_bounds__(kMAX_THREADS_PER_BLOCK)
restorePadding(int4* target, const int4* source, const int* token_offset, const int width, const int token_count) {
const int tid = threadIdx.x;
const int token_index = blockIdx.x;
const int target_seq_id = token_offset[token_index];
const int source_seq_id = token_index;
int4 padding_zero{0, 0, 0, 0};
if (token_index < token_count) {
for (int i = tid; i < width; i += blockDim.x) {
target[target_seq_id * width + i] = source[source_seq_id * width + i];
}
} else {
// It is padding: fill with zeros
for (int i = tid; i < width; i += blockDim.x) {
target[target_seq_id * width + i] = padding_zero;
}
}
}
template <>
void LaunchRestorePadding(
float* output, const float* input, const int* token_offset, const int token_count, const int hidden_size,
const int batch_size, const int sequence_length,
hipStream_t stream) {
ORT_ENFORCE(!(reinterpret_cast<size_t>(input) & 0xF) && !(reinterpret_cast<size_t>(output) & 0xF), "alignment");
int grid_size = batch_size * sequence_length;
if (hidden_size % 4 == 0) {
const int width = hidden_size / 4;
const int4* input2 = reinterpret_cast<const int4*>(input);
int4* output2 = reinterpret_cast<int4*>(output);
hipLaunchKernelGGL(( restorePadding<int4>), dim3(grid_size), dim3(kMAX_THREADS_PER_BLOCK), 0, stream,
output2, input2, token_offset, width, token_count);
} else if (hidden_size % 2 == 0) {
const int width = hidden_size / 2;
const int64_t* input2 = reinterpret_cast<const int64_t*>(input);
int64_t* output2 = reinterpret_cast<int64_t*>(output);
hipLaunchKernelGGL(( restorePadding<int64_t>), dim3(grid_size), dim3(kMAX_THREADS_PER_BLOCK), 0, stream,
output2, input2, token_offset, width, token_count);
} else {
const int width = hidden_size;
const int32_t* input2 = reinterpret_cast<const int32_t*>(input);
int32_t* output2 = reinterpret_cast<int32_t*>(output);
hipLaunchKernelGGL(( restorePadding<int32_t>), dim3(grid_size), dim3(kMAX_THREADS_PER_BLOCK), 0, stream,
output2, input2, token_offset, width, token_count);
}
}
template <>
void LaunchRestorePadding(
half* output, const half* input, const int* token_offset, const int token_count, const int hidden_size,
const int batch_size, const int sequence_length,
hipStream_t stream) {
// input: [token_count, hidden_size]
// output: [batch_size, sequence_length, hidden_size]
ORT_ENFORCE(!(reinterpret_cast<size_t>(input) & 0xF) && !(reinterpret_cast<size_t>(output) & 0xF), "alignment");
int grid_size = batch_size * sequence_length;
if (hidden_size % 8 == 0) {
const int width = hidden_size / 8;
const int4* input2 = reinterpret_cast<const int4*>(input);
int4* output2 = reinterpret_cast<int4*>(output);
hipLaunchKernelGGL(( restorePadding<int4>), dim3(grid_size), dim3(kMAX_THREADS_PER_BLOCK), 0, stream,
output2, input2, token_offset, width, token_count);
} else if (hidden_size % 4 == 0) {
const int width = hidden_size / 4;
const int64_t* input2 = reinterpret_cast<const int64_t*>(input);
int64_t* output2 = reinterpret_cast<int64_t*>(output);
hipLaunchKernelGGL(( restorePadding<int64_t>), dim3(grid_size), dim3(kMAX_THREADS_PER_BLOCK), 0, stream,
output2, input2, token_offset, width, token_count);
} else if (hidden_size % 2 == 0) {
const int width = hidden_size / 2;
const int32_t* input2 = reinterpret_cast<const int32_t*>(input);
int32_t* output2 = reinterpret_cast<int32_t*>(output);
hipLaunchKernelGGL(( restorePadding<int32_t>), dim3(grid_size), dim3(kMAX_THREADS_PER_BLOCK), 0, stream,
output2, input2, token_offset, width, token_count);
} else {
const int width = hidden_size;
const int16_t* input2 = reinterpret_cast<const int16_t*>(input);
int16_t* output2 = reinterpret_cast<int16_t*>(output);
hipLaunchKernelGGL(( restorePadding<int16_t>), dim3(grid_size), dim3(kMAX_THREADS_PER_BLOCK), 0, stream,
output2, input2, token_offset, width, token_count);
}
}
__global__ void __launch_bounds__(kMAX_THREADS_PER_BLOCK)
getTrtSequenceOffset(int* trt_mha_padding_offset,
const int* sequence_token_count,
const int batch_size) {
extern __shared__ int tmp_offset[];
if (threadIdx.x == 0) {
tmp_offset[0] = 0;
for (int i = 0; i < batch_size; i++) {
tmp_offset[i + 1] = tmp_offset[i] + sequence_token_count[i];
}
}
__syncthreads();
for (int i = threadIdx.x; i < batch_size + 1; i += blockDim.x) {
trt_mha_padding_offset[i] = tmp_offset[i];
}
}
// Get sequence offset for TensorRT fused attention when there is no padding (or padding is removed)
void LaunchTrtSequenceOffset(int* trt_mha_padding_offset,
const int* sequence_token_count,
const int batch_size,
hipStream_t stream) {
hipLaunchKernelGGL(( getTrtSequenceOffset), dim3(1), dim3(kMAX_THREADS_PER_BLOCK), sizeof(int) * (batch_size + 1), stream,
trt_mha_padding_offset, sequence_token_count, batch_size);
}
__global__ void __launch_bounds__(kMAX_THREADS_PER_BLOCK)
getTrtSequenceOffset(int* trt_mha_padding_offset,
const int* sequence_token_count,
const int batch_size,
const int sequence_length) {
extern __shared__ int tmp_offset[];
if (threadIdx.x == 0) {
tmp_offset[0] = 0;
// B for fused attention is 2 * batch_size
for (int i = 0; i < batch_size; i++) {
tmp_offset[i * 2 + 1] = tmp_offset[i * 2] + sequence_token_count[i];
tmp_offset[i * 2 + 2] = sequence_length * (i + 1);
}
}
__syncthreads();
for (int i = threadIdx.x; i < 2 * batch_size + 1; i += blockDim.x) {
trt_mha_padding_offset[i] = tmp_offset[i];
}
}
// When there is no attention mask, the sequence offset is like
// 0, sequence_length, 2 * sequence_length, 3 * sequence_length, .... ,batch_size * sequence_length
__global__ void __launch_bounds__(kMAX_THREADS_PER_BLOCK)
getTrtSequenceOffsetNoMask(int* trt_mha_padding_offset,
const int batch_size,
const int sequence_length) {
extern __shared__ int tmp_offset[];
if (threadIdx.x == 0) {
tmp_offset[0] = 0;
for (int i = 0; i < batch_size; i++) {
tmp_offset[i + 1] = sequence_length * (i + 1);
}
}
__syncthreads();
for (int i = threadIdx.x; i < batch_size + 1; i += blockDim.x) {
trt_mha_padding_offset[i] = tmp_offset[i];
}
}
// Get sequence offset for TensorRT fused attention when we keep the padding
void LaunchTrtSequenceOffset(int* trt_mha_padding_offset,
const int* sequence_token_count,
const int batch_size,
const int sequence_length,
hipStream_t stream) {
if (nullptr == sequence_token_count) {
hipLaunchKernelGGL(( getTrtSequenceOffsetNoMask), dim3(1), dim3(kMAX_THREADS_PER_BLOCK), sizeof(int) * (batch_size + 1), stream,
trt_mha_padding_offset, batch_size, sequence_length);
} else {
hipLaunchKernelGGL(( getTrtSequenceOffset), dim3(1), dim3(kMAX_THREADS_PER_BLOCK), sizeof(int) * (2 * batch_size + 1), stream,
trt_mha_padding_offset, sequence_token_count, batch_size, sequence_length);
}
}
__global__ void __launch_bounds__(kMAX_THREADS_PER_BLOCK)
getTrtSequenceOffset2d(int* trt_mha_padding_offset,
const int* attention_masks,
const int batch_size,
const int sequence_length) {
typedef hipcub::BlockReduce<int, kMAX_THREADS_PER_BLOCK> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
const int batch_id = blockIdx.x;
const int* batch_mask = attention_masks + (batch_id * sequence_length);
const bool leftmost_non_zero = (batch_mask[0] != 0);
int biggest_position = 0;
for (int i = threadIdx.x; i < sequence_length; i += blockDim.x) {
if (leftmost_non_zero == (batch_mask[i] != 0)) {
biggest_position = i;
} else {
break;
}
}
int last_leading_position = BlockReduce(temp_storage).Reduce(biggest_position, hipcub::Max(), blockDim.x);
if (threadIdx.x == 0) {
int batch_offset = batch_id * sequence_length;
trt_mha_padding_offset[2 * batch_id] = batch_offset;
trt_mha_padding_offset[2 * batch_id + 1] = batch_offset + last_leading_position + 1;
if (batch_id == gridDim.x - 1) {
trt_mha_padding_offset[2 * batch_id + 2] = batch_offset + sequence_length;
}
}
}
// only support simple left padding with mask 0s on leading left,
// or simple right padding with mask 1s on leading left.
void LaunchTrtSequenceOffset2d(int* trt_mha_padding_offset,
const int* attention_masks,
const int batch_size,
const int sequence_length,
hipStream_t stream) {
hipLaunchKernelGGL(( getTrtSequenceOffset2d), dim3(batch_size), dim3(kMAX_THREADS_PER_BLOCK), 0, stream,
trt_mha_padding_offset, attention_masks, batch_size, sequence_length);
}
} // namespace cuda
} // namespace contrib
} // namespace onnxruntime
| f9c07f986c017142d80fab3b984429500d651f8f.cu | // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
// TrtSequenceOffset kernels are modified from FasterTransformer
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "core/providers/cuda/cuda_common.h"
#include "contrib_ops/cuda/bert/bert_padding.h"
#include <cub/cub.cuh>
using namespace onnxruntime::cuda;
namespace onnxruntime {
namespace contrib {
namespace cuda {
constexpr int32_t kMAX_THREADS_PER_BLOCK = 256;
// -----------------------------------
// Get indices of non-padding tokens and padding tokens. Here we assume that padding is on the right side of sequence.
// sequence_token_count is number of non-padding tokens per sequence, and it has shape [batch_size].
// For example, we have 3 sequences with 1, 2, 4 non-padding tokens and positions like the following (* means padding):
// Sequence_0: 0, 1*, 2*, 3*
// Sequence_1: 4, 5, 6*, 7*
// Sequence_2: 8, 9, 10, 11
// token_offset: 0, 4, 5, 8, 9, 10, 11, 1*, 2*, 3*, 6*, 7*
// token_count_buffer has two numbers for non-padding tokens:
// total_token_count: 1 + 2 + 4 = 7
// max_token_count: 4
// cumulated_token_count: 0, 1, 1+2, 1+2+4
__global__ void getTokenOffset(int* token_count_buffer,
int* token_offset,
int* cumulated_token_count,
const int* sequence_token_count,
const int batch_size,
const int sequence_length) {
// Find offset of non-padding tokens, and max sequence length among all batches
// TODO(tianleiwu): Use cub::DevicePartition::Flagged like BuildGlobalIndex in longformer_global_impl.cu
// to build token_offset when sequence length is large.
int total_tokens = 0;
int max_tokens = 0;
int index = 0;
cumulated_token_count[0] = 0;
for (int i = 0; i < batch_size; i++) {
const int count = sequence_token_count[i];
if (count > max_tokens) {
max_tokens = count;
}
cumulated_token_count[i + 1] = cumulated_token_count[i] + count;
for (int j = 0; j < count; j++) {
token_offset[index] = i * sequence_length + j;
index++;
}
total_tokens += count;
}
// Offset of paddings
for (int i = 0; i < batch_size; i++) {
const int count = sequence_token_count[i];
for (int j = 0; j < sequence_length - count; j++) {
token_offset[index] = i * sequence_length + count + j;
index++;
}
}
token_count_buffer[0] = total_tokens;
token_count_buffer[1] = max_tokens;
}
void LaunchGetTokenOffset(int* token_count_buffer,
int* token_offset,
int* cumulated_token_count,
const int* sequence_token_count,
const int batch_size,
const int sequence_length,
cudaStream_t stream) {
getTokenOffset<<<1, 1, 0, stream>>>(
token_count_buffer, token_offset, cumulated_token_count, sequence_token_count, batch_size, sequence_length);
}
// -----------------------------------
// Remove paddings
template <typename T>
__global__ void __launch_bounds__(kMAX_THREADS_PER_BLOCK)
removePadding(T* target, const T* source, const int* token_offset, const int width) {
const int tid = threadIdx.x;
const int token_index = blockIdx.x;
const int source_offset = token_offset[token_index];
const int target_offset = token_index;
for (int i = tid; i < width; i += blockDim.x) {
target[target_offset * width + i] = source[source_offset * width + i];
}
}
template <>
void LaunchRemovePadding(
half* output, const half* input, const int* token_offset, const int token_count, const int hidden_size,
cudaStream_t stream) {
// input: [batch_size, sequence_length, hidden_size]
// output: [token_count, hidden_size]
// Make sure memory is aligned to 128 bit
ORT_ENFORCE(!(reinterpret_cast<size_t>(input) & 0xF) && !(reinterpret_cast<size_t>(output) & 0xF), "alignment");
if (hidden_size % 8 == 0) {
const int width = hidden_size / 8;
const int4* input2 = reinterpret_cast<const int4*>(input);
int4* output2 = reinterpret_cast<int4*>(output);
removePadding<int4><<<token_count, kMAX_THREADS_PER_BLOCK, 0, stream>>>(
output2, input2, token_offset, width);
} else if (hidden_size % 4 == 0) {
const int width = hidden_size / 4;
const int64_t* input2 = reinterpret_cast<const int64_t*>(input);
int64_t* output2 = reinterpret_cast<int64_t*>(output);
removePadding<int64_t><<<token_count, kMAX_THREADS_PER_BLOCK, 0, stream>>>(
output2, input2, token_offset, width);
} else if (hidden_size % 2 == 0) {
const int width = hidden_size / 2;
const int32_t* input2 = reinterpret_cast<const int32_t*>(input);
int32_t* output2 = reinterpret_cast<int32_t*>(output);
removePadding<int32_t><<<token_count, kMAX_THREADS_PER_BLOCK, 0, stream>>>(
output2, input2, token_offset, width);
} else {
const int width = hidden_size;
const int16_t* input2 = reinterpret_cast<const int16_t*>(input);
int16_t* output2 = reinterpret_cast<int16_t*>(output);
removePadding<int16_t><<<token_count, kMAX_THREADS_PER_BLOCK, 0, stream>>>(
output2, input2, token_offset, width);
}
}
template <>
void LaunchRemovePadding(
float* output, const float* input, const int* token_offset, const int token_count, const int hidden_size,
cudaStream_t stream) {
ORT_ENFORCE(!(reinterpret_cast<size_t>(input) & 0xF) && !(reinterpret_cast<size_t>(output) & 0xF), "alignment");
if (hidden_size % 4 == 0) {
const int width = hidden_size / 4;
const int4* input2 = reinterpret_cast<const int4*>(input);
int4* output2 = reinterpret_cast<int4*>(output);
removePadding<int4><<<token_count, kMAX_THREADS_PER_BLOCK, 0, stream>>>(output2, input2, token_offset, width);
} else if (hidden_size % 2 == 0) {
const int width = hidden_size / 2;
const int64_t* input2 = reinterpret_cast<const int64_t*>(input);
int64_t* output2 = reinterpret_cast<int64_t*>(output);
removePadding<int64_t><<<token_count, kMAX_THREADS_PER_BLOCK, 0, stream>>>(output2, input2, token_offset, width);
} else {
const int width = hidden_size;
const int32_t* input2 = reinterpret_cast<const int32_t*>(input);
int32_t* output2 = reinterpret_cast<int32_t*>(output);
removePadding<int32_t><<<token_count, kMAX_THREADS_PER_BLOCK, 0, stream>>>(output2, input2, token_offset, width);
}
}
// -----------------------------------
// Recover padding.
template <typename T>
__global__ void __launch_bounds__(kMAX_THREADS_PER_BLOCK)
restorePadding(T* target, const T* source, const int* token_offset, const int width, const int token_count) {
const int tid = threadIdx.x;
const int token_index = blockIdx.x;
const int target_seq_id = token_offset[token_index];
const int source_seq_id = token_index;
constexpr T padding_zero = 0;
if (token_index < token_count) {
for (int i = tid; i < width; i += blockDim.x) {
target[target_seq_id * width + i] = source[source_seq_id * width + i];
}
} else {
// It is padding: fill with zeros
for (int i = tid; i < width; i += blockDim.x) {
target[target_seq_id * width + i] = padding_zero;
}
}
}
template <>
__global__ void __launch_bounds__(kMAX_THREADS_PER_BLOCK)
restorePadding(int4* target, const int4* source, const int* token_offset, const int width, const int token_count) {
const int tid = threadIdx.x;
const int token_index = blockIdx.x;
const int target_seq_id = token_offset[token_index];
const int source_seq_id = token_index;
int4 padding_zero{0, 0, 0, 0};
if (token_index < token_count) {
for (int i = tid; i < width; i += blockDim.x) {
target[target_seq_id * width + i] = source[source_seq_id * width + i];
}
} else {
// It is padding: fill with zeros
for (int i = tid; i < width; i += blockDim.x) {
target[target_seq_id * width + i] = padding_zero;
}
}
}
template <>
void LaunchRestorePadding(
float* output, const float* input, const int* token_offset, const int token_count, const int hidden_size,
const int batch_size, const int sequence_length,
cudaStream_t stream) {
ORT_ENFORCE(!(reinterpret_cast<size_t>(input) & 0xF) && !(reinterpret_cast<size_t>(output) & 0xF), "alignment");
int grid_size = batch_size * sequence_length;
if (hidden_size % 4 == 0) {
const int width = hidden_size / 4;
const int4* input2 = reinterpret_cast<const int4*>(input);
int4* output2 = reinterpret_cast<int4*>(output);
restorePadding<int4><<<grid_size, kMAX_THREADS_PER_BLOCK, 0, stream>>>(
output2, input2, token_offset, width, token_count);
} else if (hidden_size % 2 == 0) {
const int width = hidden_size / 2;
const int64_t* input2 = reinterpret_cast<const int64_t*>(input);
int64_t* output2 = reinterpret_cast<int64_t*>(output);
restorePadding<int64_t><<<grid_size, kMAX_THREADS_PER_BLOCK, 0, stream>>>(
output2, input2, token_offset, width, token_count);
} else {
const int width = hidden_size;
const int32_t* input2 = reinterpret_cast<const int32_t*>(input);
int32_t* output2 = reinterpret_cast<int32_t*>(output);
restorePadding<int32_t><<<grid_size, kMAX_THREADS_PER_BLOCK, 0, stream>>>(
output2, input2, token_offset, width, token_count);
}
}
template <>
void LaunchRestorePadding(
half* output, const half* input, const int* token_offset, const int token_count, const int hidden_size,
const int batch_size, const int sequence_length,
cudaStream_t stream) {
// input: [token_count, hidden_size]
// output: [batch_size, sequence_length, hidden_size]
ORT_ENFORCE(!(reinterpret_cast<size_t>(input) & 0xF) && !(reinterpret_cast<size_t>(output) & 0xF), "alignment");
int grid_size = batch_size * sequence_length;
if (hidden_size % 8 == 0) {
const int width = hidden_size / 8;
const int4* input2 = reinterpret_cast<const int4*>(input);
int4* output2 = reinterpret_cast<int4*>(output);
restorePadding<int4><<<grid_size, kMAX_THREADS_PER_BLOCK, 0, stream>>>(
output2, input2, token_offset, width, token_count);
} else if (hidden_size % 4 == 0) {
const int width = hidden_size / 4;
const int64_t* input2 = reinterpret_cast<const int64_t*>(input);
int64_t* output2 = reinterpret_cast<int64_t*>(output);
restorePadding<int64_t><<<grid_size, kMAX_THREADS_PER_BLOCK, 0, stream>>>(
output2, input2, token_offset, width, token_count);
} else if (hidden_size % 2 == 0) {
const int width = hidden_size / 2;
const int32_t* input2 = reinterpret_cast<const int32_t*>(input);
int32_t* output2 = reinterpret_cast<int32_t*>(output);
restorePadding<int32_t><<<grid_size, kMAX_THREADS_PER_BLOCK, 0, stream>>>(
output2, input2, token_offset, width, token_count);
} else {
const int width = hidden_size;
const int16_t* input2 = reinterpret_cast<const int16_t*>(input);
int16_t* output2 = reinterpret_cast<int16_t*>(output);
restorePadding<int16_t><<<grid_size, kMAX_THREADS_PER_BLOCK, 0, stream>>>(
output2, input2, token_offset, width, token_count);
}
}
__global__ void __launch_bounds__(kMAX_THREADS_PER_BLOCK)
getTrtSequenceOffset(int* trt_mha_padding_offset,
const int* sequence_token_count,
const int batch_size) {
extern __shared__ int tmp_offset[];
if (threadIdx.x == 0) {
tmp_offset[0] = 0;
for (int i = 0; i < batch_size; i++) {
tmp_offset[i + 1] = tmp_offset[i] + sequence_token_count[i];
}
}
__syncthreads();
for (int i = threadIdx.x; i < batch_size + 1; i += blockDim.x) {
trt_mha_padding_offset[i] = tmp_offset[i];
}
}
// Get sequence offset for TensorRT fused attention when there is no padding (or padding is removed)
void LaunchTrtSequenceOffset(int* trt_mha_padding_offset,
const int* sequence_token_count,
const int batch_size,
cudaStream_t stream) {
getTrtSequenceOffset<<<1, kMAX_THREADS_PER_BLOCK, sizeof(int) * (batch_size + 1), stream>>>(
trt_mha_padding_offset, sequence_token_count, batch_size);
}
__global__ void __launch_bounds__(kMAX_THREADS_PER_BLOCK)
getTrtSequenceOffset(int* trt_mha_padding_offset,
const int* sequence_token_count,
const int batch_size,
const int sequence_length) {
extern __shared__ int tmp_offset[];
if (threadIdx.x == 0) {
tmp_offset[0] = 0;
// B for fused attention is 2 * batch_size
for (int i = 0; i < batch_size; i++) {
tmp_offset[i * 2 + 1] = tmp_offset[i * 2] + sequence_token_count[i];
tmp_offset[i * 2 + 2] = sequence_length * (i + 1);
}
}
__syncthreads();
for (int i = threadIdx.x; i < 2 * batch_size + 1; i += blockDim.x) {
trt_mha_padding_offset[i] = tmp_offset[i];
}
}
// When there is no attention mask, the sequence offset is like
// 0, sequence_length, 2 * sequence_length, 3 * sequence_length, .... ,batch_size * sequence_length
__global__ void __launch_bounds__(kMAX_THREADS_PER_BLOCK)
getTrtSequenceOffsetNoMask(int* trt_mha_padding_offset,
const int batch_size,
const int sequence_length) {
extern __shared__ int tmp_offset[];
if (threadIdx.x == 0) {
tmp_offset[0] = 0;
for (int i = 0; i < batch_size; i++) {
tmp_offset[i + 1] = sequence_length * (i + 1);
}
}
__syncthreads();
for (int i = threadIdx.x; i < batch_size + 1; i += blockDim.x) {
trt_mha_padding_offset[i] = tmp_offset[i];
}
}
// Get sequence offset for TensorRT fused attention when we keep the padding
void LaunchTrtSequenceOffset(int* trt_mha_padding_offset,
const int* sequence_token_count,
const int batch_size,
const int sequence_length,
cudaStream_t stream) {
if (nullptr == sequence_token_count) {
getTrtSequenceOffsetNoMask<<<1, kMAX_THREADS_PER_BLOCK, sizeof(int) * (batch_size + 1), stream>>>(
trt_mha_padding_offset, batch_size, sequence_length);
} else {
getTrtSequenceOffset<<<1, kMAX_THREADS_PER_BLOCK, sizeof(int) * (2 * batch_size + 1), stream>>>(
trt_mha_padding_offset, sequence_token_count, batch_size, sequence_length);
}
}
__global__ void __launch_bounds__(kMAX_THREADS_PER_BLOCK)
getTrtSequenceOffset2d(int* trt_mha_padding_offset,
const int* attention_masks,
const int batch_size,
const int sequence_length) {
typedef cub::BlockReduce<int, kMAX_THREADS_PER_BLOCK> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
const int batch_id = blockIdx.x;
const int* batch_mask = attention_masks + (batch_id * sequence_length);
const bool leftmost_non_zero = (batch_mask[0] != 0);
int biggest_position = 0;
for (int i = threadIdx.x; i < sequence_length; i += blockDim.x) {
if (leftmost_non_zero == (batch_mask[i] != 0)) {
biggest_position = i;
} else {
break;
}
}
int last_leading_position = BlockReduce(temp_storage).Reduce(biggest_position, cub::Max(), blockDim.x);
if (threadIdx.x == 0) {
int batch_offset = batch_id * sequence_length;
trt_mha_padding_offset[2 * batch_id] = batch_offset;
trt_mha_padding_offset[2 * batch_id + 1] = batch_offset + last_leading_position + 1;
if (batch_id == gridDim.x - 1) {
trt_mha_padding_offset[2 * batch_id + 2] = batch_offset + sequence_length;
}
}
}
// only support simple left padding with mask 0s on leading left,
// or simple right padding with mask 1s on leading left.
void LaunchTrtSequenceOffset2d(int* trt_mha_padding_offset,
const int* attention_masks,
const int batch_size,
const int sequence_length,
cudaStream_t stream) {
getTrtSequenceOffset2d<<<batch_size, kMAX_THREADS_PER_BLOCK, 0, stream>>>(
trt_mha_padding_offset, attention_masks, batch_size, sequence_length);
}
} // namespace cuda
} // namespace contrib
} // namespace onnxruntime
|
72c69d32f4e19ee037b455d3889d8977d031c68b.hip | // !!! This is a file automatically generated by hipify!!!
#include "./kern.cuh"
#include "megdnn/dtype.h"
#include "src/cuda/elemwise_helper.cuh"
#include "src/cuda/elemwise_helper_q4.cuh"
#include "src/cuda/kernel_common/diagnostic_prologue.cuh"
using namespace megdnn;
using namespace cuda;
using namespace elemwise_intl;
namespace {
template <typename ctype_dest, typename ctype_src, typename enable = void>
struct TypeCvtOp {
ctype_dest* dest;
__device__ __forceinline__ void operator()(uint32_t idx, ctype_src src) {
dest[idx] = static_cast<ctype_dest>(src);
}
};
template <typename ctype_dest, typename ctype_src, typename enable = void>
struct TypeCvtOpToQuantized {
ctype_dest* dest;
CudaDTypeParam<ctype_dest> param;
__device__ __forceinline__ void operator()(uint32_t idx, ctype_src src) {
dest[idx] = param.quantize(src);
}
};
template <typename ctype_dest, typename ctype_src, typename enable = void>
struct TypeCvtOpFromQuantized {
ctype_dest* dest;
CudaDTypeParam<ctype_src> param;
__device__ __forceinline__ void operator()(uint32_t idx, ctype_src src) {
dest[idx] = static_cast<ctype_dest>(param.dequantize(src));
}
};
template <typename ctype_dest, typename ctype_src, typename enable = void>
struct TypeCvtOpBetweenQuantized {
ctype_dest* dest;
CudaDTypeParam<ctype_src> src_param;
CudaDTypeParam<ctype_dest> dst_param;
__device__ __forceinline__ void operator()(uint32_t idx, ctype_src src) {
dest[idx] = dst_param.quantize(src_param.dequantize(src));
}
};
template <typename ctype_dest, typename ctype_src>
struct TypeCvtOp<
ctype_dest, ctype_src,
typename std::enable_if<
std::is_same<ctype_src, dt_int8>::value ||
std::is_same<ctype_src, dt_uint8>::value ||
std::is_same<ctype_src, dt_bool>::value>::type> {
ctype_dest* dest;
using src_vect_type = typename VectTypeTrait<ctype_src>::vect_type;
using dst_vect_type = typename VectTypeTrait<ctype_dest>::vect_type;
__device__ __forceinline__ void operator()(uint32_t idx, ctype_src src) {
dest[idx] = static_cast<ctype_dest>(src);
}
__device__ __forceinline__ void operator()(uint32_t idx, src_vect_type src) {
ctype_dest x = static_cast<ctype_dest>(src.x);
ctype_dest y = static_cast<ctype_dest>(src.y);
ctype_dest z = static_cast<ctype_dest>(src.z);
ctype_dest w = static_cast<ctype_dest>(src.w);
*(dst_vect_type*)(&dest[idx]) =
VectTypeTrait<ctype_dest>::make_vector(x, y, z, w);
}
};
template <typename ctype_dest, typename ctype_src>
struct TypeCvtOpToQuantized<
ctype_dest, ctype_src,
typename std::enable_if<
std::is_same<ctype_src, dt_int8>::value ||
std::is_same<ctype_src, dt_uint8>::value ||
std::is_same<ctype_src, dt_qint1>::value ||
std::is_same<ctype_src, dt_bool>::value>::type> {
ctype_dest* dest;
CudaDTypeParam<ctype_dest> param;
using src_vect_type = typename VectTypeTrait<ctype_src>::vect_type;
using dst_vect_type = typename VectTypeTrait<ctype_dest>::vect_type;
__device__ __forceinline__ void operator()(uint32_t idx, ctype_src src) {
dest[idx] = param.quantize(src);
}
__device__ __forceinline__ void operator()(uint32_t idx, src_vect_type src) {
ctype_dest x = param.quantize(src.x);
ctype_dest y = param.quantize(src.y);
ctype_dest z = param.quantize(src.z);
ctype_dest w = param.quantize(src.w);
*(dst_vect_type*)(&dest[idx]) =
VectTypeTrait<ctype_dest>::make_vector(x, y, z, w);
}
};
template <typename ctype_dest, typename ctype_src>
struct TypeCvtOpFromQuantized<
ctype_dest, ctype_src,
typename std::enable_if<
std::is_same<ctype_src, dt_qint8>::value ||
std::is_same<ctype_src, dt_qint1>::value ||
std::is_same<ctype_src, dt_quint8>::value>::type> {
ctype_dest* dest;
CudaDTypeParam<ctype_src> param;
using src_vect_type = typename VectTypeTrait<ctype_src>::vect_type;
using dst_vect_type = typename VectTypeTrait<ctype_dest>::vect_type;
__device__ __forceinline__ void operator()(uint32_t idx, ctype_src src) {
dest[idx] = static_cast<ctype_dest>(param.dequantize(src));
}
__device__ __forceinline__ void operator()(uint32_t idx, src_vect_type src) {
ctype_dest x = static_cast<ctype_dest>(param.dequantize(ctype_src(src.x)));
ctype_dest y = static_cast<ctype_dest>(param.dequantize(ctype_src(src.y)));
ctype_dest z = static_cast<ctype_dest>(param.dequantize(ctype_src(src.z)));
ctype_dest w = static_cast<ctype_dest>(param.dequantize(ctype_src(src.w)));
*(dst_vect_type*)(&dest[idx]) =
VectTypeTrait<ctype_dest>::make_vector(x, y, z, w);
}
};
template <typename ctype_dest, typename ctype_src>
struct TypeCvtOpBetweenQuantized<
ctype_dest, ctype_src,
typename std::enable_if<
(std::is_same<ctype_src, dt_qint8>::value ||
std::is_same<ctype_src, dt_quint8>::value ||
std::is_same<ctype_src, dt_qint1>::value) &&
IsNotTypeQ4<ctype_dest>::value>::type> {
ctype_dest* dest;
CudaDTypeParam<ctype_src> src_param;
CudaDTypeParam<ctype_dest> dst_param;
using src_vect_type = typename VectTypeTrait<ctype_src>::vect_type;
using dst_vect_type = typename VectTypeTrait<ctype_dest>::vect_type;
__device__ __forceinline__ ctype_dest apply(ctype_src in) {
float inter = src_param.dequantize(in);
return dst_param.quantize(inter);
}
__device__ __forceinline__ void operator()(uint32_t idx, ctype_src src) {
dest[idx] = dst_param.quantize(src_param.dequantize(src));
}
__device__ __forceinline__ void operator()(uint32_t idx, src_vect_type src) {
ctype_dest x = apply(ctype_src(src.x));
ctype_dest y = apply(ctype_src(src.y));
ctype_dest z = apply(ctype_src(src.z));
ctype_dest w = apply(ctype_src(src.w));
*(dst_vect_type*)(&dest[idx]) =
VectTypeTrait<ctype_dest>::make_vector(x, y, z, w);
}
};
template <typename ctype_dest, typename ctype_src>
struct TypeCvtOpFromNormalToQuantized4bit {
CudaDTypeParam<ctype_dest> dst_param;
using dst_vect_type = typename VectTypeTrait<ctype_dest>::vect_type;
using dst_storage = typename VectTypeTrait<ctype_dest>::Storage;
dst_storage* dest;
__device__ __forceinline__ dst_storage apply(ctype_src in) {
return dst_param.quantize(in).as_storage();
}
__device__ __forceinline__ void operator()(
uint32_t idx, ctype_src src_x, ctype_src src_y) {
dst_storage x = apply(src_x);
dst_storage y = apply(src_y);
*(dst_vect_type*)(&dest[idx]) = VectTypeTrait<ctype_dest>::make_vector(x, y);
}
};
template <typename ctype_dest, typename ctype_src, typename enable = void>
struct TypeCvtOpFromQuantizedToQuantized4bit {
CudaDTypeParam<ctype_src> src_param;
CudaDTypeParam<ctype_dest> dst_param;
using dst_vect_type = typename VectTypeTrait<ctype_dest>::vect_type;
using dst_storage = typename VectTypeTrait<ctype_dest>::Storage;
dst_storage* dest;
__device__ __forceinline__ dst_storage apply(ctype_src in) {
float inter = src_param.dequantize(in);
return dst_param.quantize(inter).as_storage();
}
__device__ __forceinline__ void operator()(
uint32_t idx, ctype_src src_x, ctype_src src_y) {
dst_storage x = apply(src_x);
dst_storage y = apply(src_y);
*(dst_vect_type*)(&dest[idx]) = VectTypeTrait<ctype_dest>::make_vector(x, y);
}
};
template <typename ctype_dest, typename ctype_src>
struct TypeCvtOpFromQuantizedToQuantized4bit<
ctype_dest, ctype_src,
typename std::enable_if<IsTypeQ4<ctype_src>::value>::type> {
static constexpr bool src_signedness = std::is_same<ctype_src, dt_qint4>::value;
CudaDTypeParam<ctype_src> src_param;
CudaDTypeParam<ctype_dest> dst_param;
using src_vect_type = typename VectTypeTrait<ctype_src>::vect_type;
using dst_vect_type = typename VectTypeTrait<ctype_dest>::vect_type;
using src_storage = typename VectTypeTrait<ctype_src>::Storage;
using dst_storage = typename VectTypeTrait<ctype_dest>::Storage;
dst_storage* dest;
__device__ __forceinline__ dst_storage apply(src_storage in) {
float inter = src_param.dequantize(in);
return dst_param.quantize(inter).as_storage();
}
__device__ __forceinline__ void operator()(uint32_t idx, src_vect_type src) {
dst_storage x = apply(src_storage(
integer_subbyte::unpack_integer_4bits<src_signedness>(src.x, 0)));
dst_storage y = apply(src_storage(
integer_subbyte::unpack_integer_4bits<src_signedness>(src.x, 4)));
*(dst_vect_type*)(&dest[idx]) = VectTypeTrait<ctype_dest>::make_vector(x, y);
}
};
} // anonymous namespace
#define main_func(OpType, body) \
{ \
typedef typename DTypeTrait<dtype_src>::ctype ctype_src; \
typedef typename DTypeTrait<dtype_dest>::ctype ctype_dest; \
typedef OpType<ctype_dest, ctype_src> Op; \
ElemwiseOpParamN<1> param; \
param[0] = src; \
param.init_from_given_tensor(); \
megdnn_assert(DTypeTrait<ctype_src>::enumv == src.layout.dtype.enumv().ev); \
megdnn_assert(DTypeTrait<ctype_dest>::enumv == dest.layout.dtype.enumv().ev); \
Op op; \
op.dest = dest.ptr<ctype_dest>(); \
body; \
return run_elemwise<Op, ctype_src, 1>(param, stream, op); \
}
namespace megdnn {
namespace cuda {
// currently only typecvt_kern_{n2q,n2q4} respect this. change others typecvt_kern_* if
// needed.
template <typename dtype_src, typename dtype_dest, typename sfinae = void>
struct enable_typecvt_kern {
static constexpr bool value = true;
};
#define MEGDNN_DISABLE_CUDA_TYPECVT_KERN(dtype_src, dtype_dest) \
template <> \
struct enable_typecvt_kern<dtype_src, dtype_dest, void> { \
static constexpr bool value = false; \
};
template <typename dtype_src, typename dtype_dest>
void typecvt_kern_q2q(
const TensorND& dest, const TensorND& src,
const CudaDTypeParam<dtype_src>& src_param,
const CudaDTypeParam<dtype_dest>& dst_param, hipStream_t stream) {
main_func(TypeCvtOpBetweenQuantized, op.dst_param = dst_param;
op.src_param = src_param;)
}
template <typename dtype_src, typename dtype_dest>
typename std::enable_if<enable_typecvt_kern<dtype_src, dtype_dest>::value>::type
typecvt_kern_n2q_impl(
const TensorND& dest, const TensorND& src,
const CudaDTypeParam<dtype_dest>& dst_param, hipStream_t stream) {
main_func(TypeCvtOpToQuantized, op.param = dst_param;);
}
template <typename dtype_src, typename dtype_dest>
typename std::enable_if<!enable_typecvt_kern<dtype_src, dtype_dest>::value>::type
typecvt_kern_n2q_impl(
const TensorND& dest, const TensorND& src,
const CudaDTypeParam<dtype_dest>& dst_param, hipStream_t stream) {
megdnn_throw("TypeCvt: CUDA kernel for this dtype pair is disabled");
}
template <typename dtype_src, typename dtype_dest>
void typecvt_kern_n2q(
const TensorND& dest, const TensorND& src,
const CudaDTypeParam<dtype_dest>& dst_param, hipStream_t stream) {
typecvt_kern_n2q_impl<dtype_src, dtype_dest>(dest, src, dst_param, stream);
}
template <typename dtype_src, typename dtype_dest>
void typecvt_kern_q2n(
const TensorND& dest, const TensorND& src,
const CudaDTypeParam<dtype_src>& src_param, hipStream_t stream) {
main_func(TypeCvtOpFromQuantized, op.param = src_param;);
}
template <typename dtype_src, typename dtype_dest>
void typecvt_kern_n2n(const TensorND& dest, const TensorND& src, hipStream_t stream) {
main_func(TypeCvtOp, );
}
#define INST_Q2Q(dtype_src, dtype_dest) \
template void typecvt_kern_q2q<dtype_src, dtype_dest>( \
const TensorND& dest, const TensorND& src, \
const CudaDTypeParam<dtype_src>& src_param, \
const CudaDTypeParam<dtype_dest>& dst_param, hipStream_t stream);
#define INST_Q2N(dtype_src, dtype_dest) \
template void typecvt_kern_q2n<dtype_src, dtype_dest>( \
const TensorND& dest, const TensorND& src, \
const CudaDTypeParam<dtype_src>& src_param, hipStream_t stream);
#define INST_N2Q(dtype_src, dtype_dest) \
template void typecvt_kern_n2q<dtype_src, dtype_dest>( \
const TensorND& dest, const TensorND& src, \
const CudaDTypeParam<dtype_dest>& dst_param, hipStream_t stream);
#define INST_N2N(dtype_src, dtype_dest) \
template void typecvt_kern_n2n<dtype_src, dtype_dest>( \
const TensorND& dest, const TensorND& src, hipStream_t stream);
// clang-format off
#define MEGDNN_FOREACH_COMPUTING_DTYPE_WITH_DTYPE_SRC(dtype_src, cb) \
cb(dtype_src, dt_int8) \
cb(dtype_src, dt_int32) \
cb(dtype_src, dt_int16) \
cb(dtype_src, dt_uint8) \
cb(dtype_src, dt_float32) \
cb(dtype_src, dt_float16) \
cb(dtype_src, dt_bfloat16) \
cb(dtype_src, dt_bool) \
#define MEGDNN_FOREACH_QUANTIZED_DTYPE_WITH_DTYPE_SRC(dtype_src, cb) \
cb(dtype_src, dt_quint8) \
cb(dtype_src, dt_qint32) \
cb(dtype_src, dt_qint8) \
cb(dtype_src, dt_qint1) \
MEGDNN_FOREACH_QUANTIZED_DTYPE_WITH_DTYPE_SRC(dt_uint16, MEGDNN_DISABLE_CUDA_TYPECVT_KERN)
#define INST_SRC_QUANTIZED(dtype_src) \
MEGDNN_FOREACH_COMPUTING_DTYPE_WITH_DTYPE_SRC(dtype_src, INST_Q2N) \
MEGDNN_FOREACH_QUANTIZED_DTYPE_WITH_DTYPE_SRC(dtype_src, INST_Q2Q) \
#define INST_SRC_NORMAL(dtype_src) \
MEGDNN_FOREACH_COMPUTING_DTYPE_WITH_DTYPE_SRC(dtype_src, INST_N2N) \
INST_N2N(dtype_src, dt_uint16) \
MEGDNN_FOREACH_QUANTIZED_DTYPE_WITH_DTYPE_SRC(dtype_src, INST_N2Q) \
#define MEGDNN_FOREACH_COMPUTING_CTYPE(cb) \
cb(dt_int8) \
cb(dt_int32) \
cb(dt_int16) \
cb(dt_uint8) \
cb(dt_float32) \
cb(dt_float16) \
cb(dt_bfloat16) \
cb(dt_bool) \
#define MEGDNN_FOREACH_QUANTIZED_CTYPE(cb) \
cb(dt_quint8) \
cb(dt_qint32) \
cb(dt_qint8) \
cb(dt_qint4) \
cb(dt_quint4) \
cb(dt_qint1)
MEGDNN_FOREACH_QUANTIZED_CTYPE(INST_SRC_QUANTIZED)
MEGDNN_FOREACH_COMPUTING_CTYPE(INST_SRC_NORMAL)
INST_SRC_NORMAL(dt_uint16)
// clang-format on
template void typecvt_kern_n2q<dtype::Int8, dtype::QuantizedS8>(
const TensorND& src, const TensorND& dst, const CudaDTypeParam<dt_qint8>& param,
hipStream_t stream);
#define main_func_to_q4(OpType, body) \
{ \
typedef typename DTypeTrait<dtype_src>::ctype ctype_src; \
typedef typename DTypeTrait<dtype_dest>::ctype ctype_dest; \
typedef OpType<ctype_dest, ctype_src> Op; \
ElemwiseOpParamN<1> param_src; \
ElemwiseOpParamN<1> param_dst; \
param_src[0] = src; \
param_dst[0] = dest; \
param_src.init_from_given_tensor(); \
param_dst.init_from_given_tensor(); \
megdnn_assert(DTypeTrait<ctype_src>::enumv == src.layout.dtype.enumv().ev); \
megdnn_assert(DTypeTrait<ctype_dest>::enumv == dest.layout.dtype.enumv().ev); \
using dst_storage = typename VectTypeTrait<ctype_dest>::Storage; \
Op op; \
op.dest = reinterpret_cast<dst_storage*>(dest.raw_ptr()); \
body; \
run_elemwise<Op, ctype_src, ctype_dest, 1>(param_src, param_dst, stream, op); \
return; \
}
template <typename dtype_src, typename dtype_dest>
void typecvt_kern_q2q4(
const TensorND& dest, const TensorND& src,
const CudaDTypeParam<dtype_src>& src_param,
const CudaDTypeParam<dtype_dest>& dst_param, hipStream_t stream) {
main_func_to_q4(TypeCvtOpFromQuantizedToQuantized4bit, op.dst_param = dst_param;
op.src_param = src_param;)
}
template <typename dtype_src, typename dtype_dest>
typename std::enable_if<enable_typecvt_kern<dtype_src, dtype_dest>::value>::type
typecvt_kern_n2q4_impl(
const TensorND& dest, const TensorND& src,
const CudaDTypeParam<dtype_dest>& dst_param, hipStream_t stream) {
main_func_to_q4(TypeCvtOpFromNormalToQuantized4bit, op.dst_param = dst_param;)
}
template <typename dtype_src, typename dtype_dest>
typename std::enable_if<!enable_typecvt_kern<dtype_src, dtype_dest>::value>::type
typecvt_kern_n2q4_impl(
const TensorND& dest, const TensorND& src,
const CudaDTypeParam<dtype_dest>& dst_param, hipStream_t stream) {
megdnn_throw("TypeCvt: CUDA kernel for this dtype pair is disabled");
}
template <typename dtype_src, typename dtype_dest>
void typecvt_kern_n2q4(
const TensorND& dest, const TensorND& src,
const CudaDTypeParam<dtype_dest>& dst_param, hipStream_t stream) {
typecvt_kern_n2q4_impl<dtype_src, dtype_dest>(dest, src, dst_param, stream);
}
#define INST_Q2Q4(dtype_src, dtype_dest) \
template void typecvt_kern_q2q4<dtype_src, dtype_dest>( \
const TensorND& dest, const TensorND& src, \
const CudaDTypeParam<dtype_src>& src_param, \
const CudaDTypeParam<dtype_dest>& dst_param, hipStream_t stream);
#define INST_N2Q4(dtype_src, dtype_dest) \
template void typecvt_kern_n2q4<dtype_src, dtype_dest>( \
const TensorND& dest, const TensorND& src, \
const CudaDTypeParam<dtype_dest>& dst_param, hipStream_t stream);
// clang-format off
#define MEGDNN_FOREACH_QUANTIZED_LOWBIT_WITH_DTYPE_SRC(dtype_src, cb) \
cb(dtype_src, dt_qint4) \
cb(dtype_src, dt_quint4) \
MEGDNN_FOREACH_QUANTIZED_LOWBIT_WITH_DTYPE_SRC(dt_uint16, MEGDNN_DISABLE_CUDA_TYPECVT_KERN)
#define INST_SRC_QUANTIZED_LOWBIT(dtype_src) \
MEGDNN_FOREACH_QUANTIZED_LOWBIT_WITH_DTYPE_SRC(dtype_src, INST_Q2Q4) \
#define INST_SRC_NORMAL_LOWBIT(dtype_src) \
MEGDNN_FOREACH_QUANTIZED_LOWBIT_WITH_DTYPE_SRC(dtype_src, INST_N2Q4) \
MEGDNN_FOREACH_QUANTIZED_CTYPE(INST_SRC_QUANTIZED_LOWBIT)
MEGDNN_FOREACH_COMPUTING_CTYPE(INST_SRC_NORMAL_LOWBIT)
INST_SRC_NORMAL_LOWBIT(dt_uint16)
} // namespace cuda
} // namespace megdnn
#include "src/cuda/kernel_common/diagnostic_epilogue.cuh"
// vim: syntax=cpp.doxygen
| 72c69d32f4e19ee037b455d3889d8977d031c68b.cu | #include "./kern.cuh"
#include "megdnn/dtype.h"
#include "src/cuda/elemwise_helper.cuh"
#include "src/cuda/elemwise_helper_q4.cuh"
#include "src/cuda/kernel_common/diagnostic_prologue.cuh"
using namespace megdnn;
using namespace cuda;
using namespace elemwise_intl;
namespace {
template <typename ctype_dest, typename ctype_src, typename enable = void>
struct TypeCvtOp {
ctype_dest* dest;
__device__ __forceinline__ void operator()(uint32_t idx, ctype_src src) {
dest[idx] = static_cast<ctype_dest>(src);
}
};
template <typename ctype_dest, typename ctype_src, typename enable = void>
struct TypeCvtOpToQuantized {
ctype_dest* dest;
CudaDTypeParam<ctype_dest> param;
__device__ __forceinline__ void operator()(uint32_t idx, ctype_src src) {
dest[idx] = param.quantize(src);
}
};
template <typename ctype_dest, typename ctype_src, typename enable = void>
struct TypeCvtOpFromQuantized {
ctype_dest* dest;
CudaDTypeParam<ctype_src> param;
__device__ __forceinline__ void operator()(uint32_t idx, ctype_src src) {
dest[idx] = static_cast<ctype_dest>(param.dequantize(src));
}
};
template <typename ctype_dest, typename ctype_src, typename enable = void>
struct TypeCvtOpBetweenQuantized {
ctype_dest* dest;
CudaDTypeParam<ctype_src> src_param;
CudaDTypeParam<ctype_dest> dst_param;
__device__ __forceinline__ void operator()(uint32_t idx, ctype_src src) {
dest[idx] = dst_param.quantize(src_param.dequantize(src));
}
};
template <typename ctype_dest, typename ctype_src>
struct TypeCvtOp<
ctype_dest, ctype_src,
typename std::enable_if<
std::is_same<ctype_src, dt_int8>::value ||
std::is_same<ctype_src, dt_uint8>::value ||
std::is_same<ctype_src, dt_bool>::value>::type> {
ctype_dest* dest;
using src_vect_type = typename VectTypeTrait<ctype_src>::vect_type;
using dst_vect_type = typename VectTypeTrait<ctype_dest>::vect_type;
__device__ __forceinline__ void operator()(uint32_t idx, ctype_src src) {
dest[idx] = static_cast<ctype_dest>(src);
}
__device__ __forceinline__ void operator()(uint32_t idx, src_vect_type src) {
ctype_dest x = static_cast<ctype_dest>(src.x);
ctype_dest y = static_cast<ctype_dest>(src.y);
ctype_dest z = static_cast<ctype_dest>(src.z);
ctype_dest w = static_cast<ctype_dest>(src.w);
*(dst_vect_type*)(&dest[idx]) =
VectTypeTrait<ctype_dest>::make_vector(x, y, z, w);
}
};
template <typename ctype_dest, typename ctype_src>
struct TypeCvtOpToQuantized<
ctype_dest, ctype_src,
typename std::enable_if<
std::is_same<ctype_src, dt_int8>::value ||
std::is_same<ctype_src, dt_uint8>::value ||
std::is_same<ctype_src, dt_qint1>::value ||
std::is_same<ctype_src, dt_bool>::value>::type> {
ctype_dest* dest;
CudaDTypeParam<ctype_dest> param;
using src_vect_type = typename VectTypeTrait<ctype_src>::vect_type;
using dst_vect_type = typename VectTypeTrait<ctype_dest>::vect_type;
__device__ __forceinline__ void operator()(uint32_t idx, ctype_src src) {
dest[idx] = param.quantize(src);
}
__device__ __forceinline__ void operator()(uint32_t idx, src_vect_type src) {
ctype_dest x = param.quantize(src.x);
ctype_dest y = param.quantize(src.y);
ctype_dest z = param.quantize(src.z);
ctype_dest w = param.quantize(src.w);
*(dst_vect_type*)(&dest[idx]) =
VectTypeTrait<ctype_dest>::make_vector(x, y, z, w);
}
};
template <typename ctype_dest, typename ctype_src>
struct TypeCvtOpFromQuantized<
ctype_dest, ctype_src,
typename std::enable_if<
std::is_same<ctype_src, dt_qint8>::value ||
std::is_same<ctype_src, dt_qint1>::value ||
std::is_same<ctype_src, dt_quint8>::value>::type> {
ctype_dest* dest;
CudaDTypeParam<ctype_src> param;
using src_vect_type = typename VectTypeTrait<ctype_src>::vect_type;
using dst_vect_type = typename VectTypeTrait<ctype_dest>::vect_type;
__device__ __forceinline__ void operator()(uint32_t idx, ctype_src src) {
dest[idx] = static_cast<ctype_dest>(param.dequantize(src));
}
__device__ __forceinline__ void operator()(uint32_t idx, src_vect_type src) {
ctype_dest x = static_cast<ctype_dest>(param.dequantize(ctype_src(src.x)));
ctype_dest y = static_cast<ctype_dest>(param.dequantize(ctype_src(src.y)));
ctype_dest z = static_cast<ctype_dest>(param.dequantize(ctype_src(src.z)));
ctype_dest w = static_cast<ctype_dest>(param.dequantize(ctype_src(src.w)));
*(dst_vect_type*)(&dest[idx]) =
VectTypeTrait<ctype_dest>::make_vector(x, y, z, w);
}
};
template <typename ctype_dest, typename ctype_src>
struct TypeCvtOpBetweenQuantized<
ctype_dest, ctype_src,
typename std::enable_if<
(std::is_same<ctype_src, dt_qint8>::value ||
std::is_same<ctype_src, dt_quint8>::value ||
std::is_same<ctype_src, dt_qint1>::value) &&
IsNotTypeQ4<ctype_dest>::value>::type> {
ctype_dest* dest;
CudaDTypeParam<ctype_src> src_param;
CudaDTypeParam<ctype_dest> dst_param;
using src_vect_type = typename VectTypeTrait<ctype_src>::vect_type;
using dst_vect_type = typename VectTypeTrait<ctype_dest>::vect_type;
__device__ __forceinline__ ctype_dest apply(ctype_src in) {
float inter = src_param.dequantize(in);
return dst_param.quantize(inter);
}
__device__ __forceinline__ void operator()(uint32_t idx, ctype_src src) {
dest[idx] = dst_param.quantize(src_param.dequantize(src));
}
__device__ __forceinline__ void operator()(uint32_t idx, src_vect_type src) {
ctype_dest x = apply(ctype_src(src.x));
ctype_dest y = apply(ctype_src(src.y));
ctype_dest z = apply(ctype_src(src.z));
ctype_dest w = apply(ctype_src(src.w));
*(dst_vect_type*)(&dest[idx]) =
VectTypeTrait<ctype_dest>::make_vector(x, y, z, w);
}
};
template <typename ctype_dest, typename ctype_src>
struct TypeCvtOpFromNormalToQuantized4bit {
CudaDTypeParam<ctype_dest> dst_param;
using dst_vect_type = typename VectTypeTrait<ctype_dest>::vect_type;
using dst_storage = typename VectTypeTrait<ctype_dest>::Storage;
dst_storage* dest;
__device__ __forceinline__ dst_storage apply(ctype_src in) {
return dst_param.quantize(in).as_storage();
}
__device__ __forceinline__ void operator()(
uint32_t idx, ctype_src src_x, ctype_src src_y) {
dst_storage x = apply(src_x);
dst_storage y = apply(src_y);
*(dst_vect_type*)(&dest[idx]) = VectTypeTrait<ctype_dest>::make_vector(x, y);
}
};
template <typename ctype_dest, typename ctype_src, typename enable = void>
struct TypeCvtOpFromQuantizedToQuantized4bit {
CudaDTypeParam<ctype_src> src_param;
CudaDTypeParam<ctype_dest> dst_param;
using dst_vect_type = typename VectTypeTrait<ctype_dest>::vect_type;
using dst_storage = typename VectTypeTrait<ctype_dest>::Storage;
dst_storage* dest;
__device__ __forceinline__ dst_storage apply(ctype_src in) {
float inter = src_param.dequantize(in);
return dst_param.quantize(inter).as_storage();
}
__device__ __forceinline__ void operator()(
uint32_t idx, ctype_src src_x, ctype_src src_y) {
dst_storage x = apply(src_x);
dst_storage y = apply(src_y);
*(dst_vect_type*)(&dest[idx]) = VectTypeTrait<ctype_dest>::make_vector(x, y);
}
};
template <typename ctype_dest, typename ctype_src>
struct TypeCvtOpFromQuantizedToQuantized4bit<
ctype_dest, ctype_src,
typename std::enable_if<IsTypeQ4<ctype_src>::value>::type> {
static constexpr bool src_signedness = std::is_same<ctype_src, dt_qint4>::value;
CudaDTypeParam<ctype_src> src_param;
CudaDTypeParam<ctype_dest> dst_param;
using src_vect_type = typename VectTypeTrait<ctype_src>::vect_type;
using dst_vect_type = typename VectTypeTrait<ctype_dest>::vect_type;
using src_storage = typename VectTypeTrait<ctype_src>::Storage;
using dst_storage = typename VectTypeTrait<ctype_dest>::Storage;
dst_storage* dest;
__device__ __forceinline__ dst_storage apply(src_storage in) {
float inter = src_param.dequantize(in);
return dst_param.quantize(inter).as_storage();
}
__device__ __forceinline__ void operator()(uint32_t idx, src_vect_type src) {
dst_storage x = apply(src_storage(
integer_subbyte::unpack_integer_4bits<src_signedness>(src.x, 0)));
dst_storage y = apply(src_storage(
integer_subbyte::unpack_integer_4bits<src_signedness>(src.x, 4)));
*(dst_vect_type*)(&dest[idx]) = VectTypeTrait<ctype_dest>::make_vector(x, y);
}
};
} // anonymous namespace
#define main_func(OpType, body) \
{ \
typedef typename DTypeTrait<dtype_src>::ctype ctype_src; \
typedef typename DTypeTrait<dtype_dest>::ctype ctype_dest; \
typedef OpType<ctype_dest, ctype_src> Op; \
ElemwiseOpParamN<1> param; \
param[0] = src; \
param.init_from_given_tensor(); \
megdnn_assert(DTypeTrait<ctype_src>::enumv == src.layout.dtype.enumv().ev); \
megdnn_assert(DTypeTrait<ctype_dest>::enumv == dest.layout.dtype.enumv().ev); \
Op op; \
op.dest = dest.ptr<ctype_dest>(); \
body; \
return run_elemwise<Op, ctype_src, 1>(param, stream, op); \
}
namespace megdnn {
namespace cuda {
// currently only typecvt_kern_{n2q,n2q4} respect this. change others typecvt_kern_* if
// needed.
template <typename dtype_src, typename dtype_dest, typename sfinae = void>
struct enable_typecvt_kern {
static constexpr bool value = true;
};
#define MEGDNN_DISABLE_CUDA_TYPECVT_KERN(dtype_src, dtype_dest) \
template <> \
struct enable_typecvt_kern<dtype_src, dtype_dest, void> { \
static constexpr bool value = false; \
};
template <typename dtype_src, typename dtype_dest>
void typecvt_kern_q2q(
const TensorND& dest, const TensorND& src,
const CudaDTypeParam<dtype_src>& src_param,
const CudaDTypeParam<dtype_dest>& dst_param, cudaStream_t stream) {
main_func(TypeCvtOpBetweenQuantized, op.dst_param = dst_param;
op.src_param = src_param;)
}
template <typename dtype_src, typename dtype_dest>
typename std::enable_if<enable_typecvt_kern<dtype_src, dtype_dest>::value>::type
typecvt_kern_n2q_impl(
const TensorND& dest, const TensorND& src,
const CudaDTypeParam<dtype_dest>& dst_param, cudaStream_t stream) {
main_func(TypeCvtOpToQuantized, op.param = dst_param;);
}
template <typename dtype_src, typename dtype_dest>
typename std::enable_if<!enable_typecvt_kern<dtype_src, dtype_dest>::value>::type
typecvt_kern_n2q_impl(
const TensorND& dest, const TensorND& src,
const CudaDTypeParam<dtype_dest>& dst_param, cudaStream_t stream) {
megdnn_throw("TypeCvt: CUDA kernel for this dtype pair is disabled");
}
template <typename dtype_src, typename dtype_dest>
void typecvt_kern_n2q(
const TensorND& dest, const TensorND& src,
const CudaDTypeParam<dtype_dest>& dst_param, cudaStream_t stream) {
typecvt_kern_n2q_impl<dtype_src, dtype_dest>(dest, src, dst_param, stream);
}
template <typename dtype_src, typename dtype_dest>
void typecvt_kern_q2n(
const TensorND& dest, const TensorND& src,
const CudaDTypeParam<dtype_src>& src_param, cudaStream_t stream) {
main_func(TypeCvtOpFromQuantized, op.param = src_param;);
}
template <typename dtype_src, typename dtype_dest>
void typecvt_kern_n2n(const TensorND& dest, const TensorND& src, cudaStream_t stream) {
main_func(TypeCvtOp, );
}
#define INST_Q2Q(dtype_src, dtype_dest) \
template void typecvt_kern_q2q<dtype_src, dtype_dest>( \
const TensorND& dest, const TensorND& src, \
const CudaDTypeParam<dtype_src>& src_param, \
const CudaDTypeParam<dtype_dest>& dst_param, cudaStream_t stream);
#define INST_Q2N(dtype_src, dtype_dest) \
template void typecvt_kern_q2n<dtype_src, dtype_dest>( \
const TensorND& dest, const TensorND& src, \
const CudaDTypeParam<dtype_src>& src_param, cudaStream_t stream);
#define INST_N2Q(dtype_src, dtype_dest) \
template void typecvt_kern_n2q<dtype_src, dtype_dest>( \
const TensorND& dest, const TensorND& src, \
const CudaDTypeParam<dtype_dest>& dst_param, cudaStream_t stream);
#define INST_N2N(dtype_src, dtype_dest) \
template void typecvt_kern_n2n<dtype_src, dtype_dest>( \
const TensorND& dest, const TensorND& src, cudaStream_t stream);
// clang-format off
#define MEGDNN_FOREACH_COMPUTING_DTYPE_WITH_DTYPE_SRC(dtype_src, cb) \
cb(dtype_src, dt_int8) \
cb(dtype_src, dt_int32) \
cb(dtype_src, dt_int16) \
cb(dtype_src, dt_uint8) \
cb(dtype_src, dt_float32) \
cb(dtype_src, dt_float16) \
cb(dtype_src, dt_bfloat16) \
cb(dtype_src, dt_bool) \
#define MEGDNN_FOREACH_QUANTIZED_DTYPE_WITH_DTYPE_SRC(dtype_src, cb) \
cb(dtype_src, dt_quint8) \
cb(dtype_src, dt_qint32) \
cb(dtype_src, dt_qint8) \
cb(dtype_src, dt_qint1) \
MEGDNN_FOREACH_QUANTIZED_DTYPE_WITH_DTYPE_SRC(dt_uint16, MEGDNN_DISABLE_CUDA_TYPECVT_KERN)
#define INST_SRC_QUANTIZED(dtype_src) \
MEGDNN_FOREACH_COMPUTING_DTYPE_WITH_DTYPE_SRC(dtype_src, INST_Q2N) \
MEGDNN_FOREACH_QUANTIZED_DTYPE_WITH_DTYPE_SRC(dtype_src, INST_Q2Q) \
#define INST_SRC_NORMAL(dtype_src) \
MEGDNN_FOREACH_COMPUTING_DTYPE_WITH_DTYPE_SRC(dtype_src, INST_N2N) \
INST_N2N(dtype_src, dt_uint16) \
MEGDNN_FOREACH_QUANTIZED_DTYPE_WITH_DTYPE_SRC(dtype_src, INST_N2Q) \
#define MEGDNN_FOREACH_COMPUTING_CTYPE(cb) \
cb(dt_int8) \
cb(dt_int32) \
cb(dt_int16) \
cb(dt_uint8) \
cb(dt_float32) \
cb(dt_float16) \
cb(dt_bfloat16) \
cb(dt_bool) \
#define MEGDNN_FOREACH_QUANTIZED_CTYPE(cb) \
cb(dt_quint8) \
cb(dt_qint32) \
cb(dt_qint8) \
cb(dt_qint4) \
cb(dt_quint4) \
cb(dt_qint1)
MEGDNN_FOREACH_QUANTIZED_CTYPE(INST_SRC_QUANTIZED)
MEGDNN_FOREACH_COMPUTING_CTYPE(INST_SRC_NORMAL)
INST_SRC_NORMAL(dt_uint16)
// clang-format on
template void typecvt_kern_n2q<dtype::Int8, dtype::QuantizedS8>(
const TensorND& src, const TensorND& dst, const CudaDTypeParam<dt_qint8>& param,
cudaStream_t stream);
#define main_func_to_q4(OpType, body) \
{ \
typedef typename DTypeTrait<dtype_src>::ctype ctype_src; \
typedef typename DTypeTrait<dtype_dest>::ctype ctype_dest; \
typedef OpType<ctype_dest, ctype_src> Op; \
ElemwiseOpParamN<1> param_src; \
ElemwiseOpParamN<1> param_dst; \
param_src[0] = src; \
param_dst[0] = dest; \
param_src.init_from_given_tensor(); \
param_dst.init_from_given_tensor(); \
megdnn_assert(DTypeTrait<ctype_src>::enumv == src.layout.dtype.enumv().ev); \
megdnn_assert(DTypeTrait<ctype_dest>::enumv == dest.layout.dtype.enumv().ev); \
using dst_storage = typename VectTypeTrait<ctype_dest>::Storage; \
Op op; \
op.dest = reinterpret_cast<dst_storage*>(dest.raw_ptr()); \
body; \
run_elemwise<Op, ctype_src, ctype_dest, 1>(param_src, param_dst, stream, op); \
return; \
}
template <typename dtype_src, typename dtype_dest>
void typecvt_kern_q2q4(
const TensorND& dest, const TensorND& src,
const CudaDTypeParam<dtype_src>& src_param,
const CudaDTypeParam<dtype_dest>& dst_param, cudaStream_t stream) {
main_func_to_q4(TypeCvtOpFromQuantizedToQuantized4bit, op.dst_param = dst_param;
op.src_param = src_param;)
}
template <typename dtype_src, typename dtype_dest>
typename std::enable_if<enable_typecvt_kern<dtype_src, dtype_dest>::value>::type
typecvt_kern_n2q4_impl(
const TensorND& dest, const TensorND& src,
const CudaDTypeParam<dtype_dest>& dst_param, cudaStream_t stream) {
main_func_to_q4(TypeCvtOpFromNormalToQuantized4bit, op.dst_param = dst_param;)
}
template <typename dtype_src, typename dtype_dest>
typename std::enable_if<!enable_typecvt_kern<dtype_src, dtype_dest>::value>::type
typecvt_kern_n2q4_impl(
const TensorND& dest, const TensorND& src,
const CudaDTypeParam<dtype_dest>& dst_param, cudaStream_t stream) {
megdnn_throw("TypeCvt: CUDA kernel for this dtype pair is disabled");
}
template <typename dtype_src, typename dtype_dest>
void typecvt_kern_n2q4(
const TensorND& dest, const TensorND& src,
const CudaDTypeParam<dtype_dest>& dst_param, cudaStream_t stream) {
typecvt_kern_n2q4_impl<dtype_src, dtype_dest>(dest, src, dst_param, stream);
}
#define INST_Q2Q4(dtype_src, dtype_dest) \
template void typecvt_kern_q2q4<dtype_src, dtype_dest>( \
const TensorND& dest, const TensorND& src, \
const CudaDTypeParam<dtype_src>& src_param, \
const CudaDTypeParam<dtype_dest>& dst_param, cudaStream_t stream);
#define INST_N2Q4(dtype_src, dtype_dest) \
template void typecvt_kern_n2q4<dtype_src, dtype_dest>( \
const TensorND& dest, const TensorND& src, \
const CudaDTypeParam<dtype_dest>& dst_param, cudaStream_t stream);
// clang-format off
#define MEGDNN_FOREACH_QUANTIZED_LOWBIT_WITH_DTYPE_SRC(dtype_src, cb) \
cb(dtype_src, dt_qint4) \
cb(dtype_src, dt_quint4) \
MEGDNN_FOREACH_QUANTIZED_LOWBIT_WITH_DTYPE_SRC(dt_uint16, MEGDNN_DISABLE_CUDA_TYPECVT_KERN)
#define INST_SRC_QUANTIZED_LOWBIT(dtype_src) \
MEGDNN_FOREACH_QUANTIZED_LOWBIT_WITH_DTYPE_SRC(dtype_src, INST_Q2Q4) \
#define INST_SRC_NORMAL_LOWBIT(dtype_src) \
MEGDNN_FOREACH_QUANTIZED_LOWBIT_WITH_DTYPE_SRC(dtype_src, INST_N2Q4) \
MEGDNN_FOREACH_QUANTIZED_CTYPE(INST_SRC_QUANTIZED_LOWBIT)
MEGDNN_FOREACH_COMPUTING_CTYPE(INST_SRC_NORMAL_LOWBIT)
INST_SRC_NORMAL_LOWBIT(dt_uint16)
} // namespace cuda
} // namespace megdnn
#include "src/cuda/kernel_common/diagnostic_epilogue.cuh"
// vim: syntax=cpp.doxygen
|
b520ef159010d7afa7f66d21cf90a56a367a3f8c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/******************************************************************************
* Copyright (c) 2011, Duane Merrill. All rights reserved.
* Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************/
/******************************************************************************
* Test of BlockReduce utilities
******************************************************************************/
// Ensure printing of CUDA runtime errors to console
#define CUB_STDERR
#include <stdio.h>
#include <hip/device_functions.h>
#include <typeinfo>
#include <hipcub/hipcub.hpp>
#include <hipcub/hipcub.hpp>
#include <cub/util_ptx.cuh>
#include <hipcub/hipcub.hpp>
#include <cub/util_debug.cuh>
#include "test_util.h"
using namespace cub;
//---------------------------------------------------------------------
// Globals, constants and typedefs
//---------------------------------------------------------------------
bool g_verbose = false;
int g_repeat = 0;
CachingDeviceAllocator g_allocator(true);
//---------------------------------------------------------------------
// Test kernels
//---------------------------------------------------------------------
/// Generic reduction (full, 1)
template <typename BlockReduceT, typename T, typename ReductionOp>
__device__ __forceinline__ T DeviceTest(
BlockReduceT &block_reduce, T (&data)[1], ReductionOp &reduction_op)
{
return block_reduce.Reduce(data[0], reduction_op);
}
/// Generic reduction (full, ITEMS_PER_THREAD)
template <typename BlockReduceT, typename T, int ITEMS_PER_THREAD, typename ReductionOp>
__device__ __forceinline__ T DeviceTest(
BlockReduceT &block_reduce, T (&data)[ITEMS_PER_THREAD], ReductionOp &reduction_op)
{
return block_reduce.Reduce(data, reduction_op);
}
/// Generic reduction (partial, 1)
template <typename BlockReduceT, typename T, typename ReductionOp>
__device__ __forceinline__ T DeviceTest(
BlockReduceT &block_reduce, T &data, ReductionOp &reduction_op, int valid_threads)
{
return block_reduce.Reduce(data, reduction_op, valid_threads);
}
/// Sum reduction (full, 1)
template <typename BlockReduceT, typename T>
__device__ __forceinline__ T DeviceTest(
BlockReduceT &block_reduce, T (&data)[1], Sum &reduction_op)
{
return block_reduce.Sum(data[0]);
}
/// Sum reduction (full, ITEMS_PER_THREAD)
template <typename BlockReduceT, typename T, int ITEMS_PER_THREAD>
__device__ __forceinline__ T DeviceTest(
BlockReduceT &block_reduce, T (&data)[ITEMS_PER_THREAD], Sum &reduction_op)
{
return block_reduce.Sum(data);
}
/// Sum reduction (partial, 1)
template <typename BlockReduceT, typename T>
__device__ __forceinline__ T DeviceTest(
BlockReduceT &block_reduce, T &data, Sum &reduction_op, int valid_threads)
{
return block_reduce.Sum(data, valid_threads);
}
/**
* Test full-tile reduction kernel (where num_items is an even
* multiple of BLOCK_THREADS)
*/
template <
BlockReduceAlgorithm ALGORITHM,
int BLOCK_DIM_X,
int BLOCK_DIM_Y,
int BLOCK_DIM_Z,
int ITEMS_PER_THREAD,
typename T,
typename ReductionOp>
__launch_bounds__ (BLOCK_DIM_X * BLOCK_DIM_Y * BLOCK_DIM_Z)
__global__ void FullTileReduceKernel(
T *d_in,
T *d_out,
ReductionOp reduction_op,
int tiles,
clock_t *d_elapsed)
{
const int BLOCK_THREADS = BLOCK_DIM_X * BLOCK_DIM_Y * BLOCK_DIM_Z;
const int TILE_SIZE = BLOCK_THREADS * ITEMS_PER_THREAD;
// Cooperative threadblock reduction utility type (returns aggregate in thread 0)
typedef BlockReduce<T, BLOCK_DIM_X, ALGORITHM, BLOCK_DIM_Y, BLOCK_DIM_Z> BlockReduceT;
// Allocate temp storage in shared memory
__shared__ typename BlockReduceT::TempStorage temp_storage;
int linear_tid = RowMajorTid(BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z);
// Per-thread tile data
T data[ITEMS_PER_THREAD];
// Load first tile of data
int block_offset = 0;
if (block_offset < TILE_SIZE * tiles)
{
LoadDirectBlocked(linear_tid, d_in + block_offset, data);
block_offset += TILE_SIZE;
// Start cycle timer
clock_t start = clock();
// Cooperative reduce first tile
BlockReduceT block_reduce(temp_storage) ;
T block_aggregate = DeviceTest(block_reduce, data, reduction_op);
// Stop cycle timer
#if CUB_PTX_ARCH == 100
// Bug: recording stop clock causes mis-write of running prefix value
clock_t stop = 0;
#else
clock_t stop = clock();
#endif // CUB_PTX_ARCH == 100
clock_t elapsed = (start > stop) ? start - stop : stop - start;
// Loop over input tiles
while (block_offset < TILE_SIZE * tiles)
{
// TestBarrier between threadblock reductions
__syncthreads();
// Load tile of data
LoadDirectBlocked(linear_tid, d_in + block_offset, data);
block_offset += TILE_SIZE;
// Start cycle timer
clock_t start = clock();
// Cooperatively reduce the tile's aggregate
BlockReduceT block_reduce(temp_storage) ;
T tile_aggregate = DeviceTest(block_reduce, data, reduction_op);
// Stop cycle timer
#if CUB_PTX_ARCH == 100
// Bug: recording stop clock causes mis-write of running prefix value
clock_t stop = 0;
#else
clock_t stop = clock();
#endif // CUB_PTX_ARCH == 100
elapsed += (start > stop) ? start - stop : stop - start;
// Reduce threadblock aggregate
block_aggregate = reduction_op(block_aggregate, tile_aggregate);
}
// Store data
if (linear_tid == 0)
{
d_out[0] = block_aggregate;
*d_elapsed = elapsed;
}
}
}
/**
* Test partial-tile reduction kernel (where num_items < BLOCK_THREADS)
*/
template <
BlockReduceAlgorithm ALGORITHM,
int BLOCK_DIM_X,
int BLOCK_DIM_Y,
int BLOCK_DIM_Z,
typename T,
typename ReductionOp>
__launch_bounds__ (BLOCK_DIM_X * BLOCK_DIM_Y * BLOCK_DIM_Z)
__global__ void PartialTileReduceKernel(
T *d_in,
T *d_out,
int num_items,
ReductionOp reduction_op,
clock_t *d_elapsed)
{
// Cooperative threadblock reduction utility type (returns aggregate only in thread-0)
typedef BlockReduce<T, BLOCK_DIM_X, ALGORITHM, BLOCK_DIM_Y, BLOCK_DIM_Z> BlockReduceT;
// Allocate temp storage in shared memory
__shared__ typename BlockReduceT::TempStorage temp_storage;
int linear_tid = RowMajorTid(BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z);
// Per-thread tile data
T partial;
// Load partial tile data
if (linear_tid < num_items)
{
partial = d_in[linear_tid];
}
// Start cycle timer
clock_t start = clock();
// Cooperatively reduce the tile's aggregate
BlockReduceT block_reduce(temp_storage) ;
T tile_aggregate = DeviceTest(block_reduce, partial, reduction_op, num_items);
// Stop cycle timer
#if CUB_PTX_ARCH == 100
// Bug: recording stop clock causes mis-write of running prefix value
clock_t stop = 0;
#else
clock_t stop = clock();
#endif // CUB_PTX_ARCH == 100
clock_t elapsed = (start > stop) ? start - stop : stop - start;
// Store data
if (linear_tid == 0)
{
d_out[0] = tile_aggregate;
*d_elapsed = elapsed;
}
}
//---------------------------------------------------------------------
// Host utility subroutines
//---------------------------------------------------------------------
/**
* Initialize problem (and solution)
*/
template <
typename T,
typename ReductionOp>
void Initialize(
GenMode gen_mode,
T *h_in,
T h_reference[1],
ReductionOp reduction_op,
int num_items)
{
for (int i = 0; i < num_items; ++i)
{
InitValue(gen_mode, h_in[i], i);
if (i == 0)
h_reference[0] = h_in[0];
else
h_reference[0] = reduction_op(h_reference[0], h_in[i]);
}
if (g_verbose)
{
printf("Input:\n");
DisplayResults(h_in, num_items);
printf("\n");
}
}
//---------------------------------------------------------------------
// Full tile test generation
//---------------------------------------------------------------------
/**
* Test full-tile reduction. (Specialized for sufficient resources)
*/
template <
BlockReduceAlgorithm ALGORITHM,
int BLOCK_DIM_X,
int BLOCK_DIM_Y,
int BLOCK_DIM_Z,
int ITEMS_PER_THREAD,
typename T,
typename ReductionOp>
void TestFullTile(
GenMode gen_mode,
int tiles,
ReductionOp reduction_op,
Int2Type<true> sufficient_resources)
{
const int BLOCK_THREADS = BLOCK_DIM_X * BLOCK_DIM_Y * BLOCK_DIM_Z;
const int TILE_SIZE = BLOCK_THREADS * ITEMS_PER_THREAD;
int num_items = TILE_SIZE * tiles;
// Allocate host arrays
T *h_in = new T[num_items];
T h_reference[1];
// Initialize problem
Initialize(gen_mode, h_in, h_reference, reduction_op, num_items);
// Initialize/clear device arrays
T *d_in = NULL;
T *d_out = NULL;
clock_t *d_elapsed = NULL;
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_elapsed, sizeof(unsigned long long)));
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_in, sizeof(T) * num_items));
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_out, sizeof(T) * 1));
CubDebugExit(hipMemcpy(d_in, h_in, sizeof(T) * num_items, hipMemcpyHostToDevice));
CubDebugExit(hipMemset(d_out, 0, sizeof(T) * 1));
// Test multi-tile (unguarded)
printf("TestFullTile %s, %s, gen-mode %d, num_items(%d), BLOCK_THREADS(%d) (%d,%d,%d), ITEMS_PER_THREAD(%d), tiles(%d), %s (%d bytes) elements:\n",
Equals<ReductionOp, Sum>::VALUE ? "Sum" : "Max",
(ALGORITHM == BLOCK_REDUCE_RAKING) ? "BLOCK_REDUCE_RAKING" : (ALGORITHM == BLOCK_REDUCE_RAKING_COMMUTATIVE_ONLY) ? "BLOCK_REDUCE_RAKING_COMMUTATIVE_ONLY" : "BLOCK_REDUCE_WARP_REDUCTIONS",
gen_mode,
num_items,
BLOCK_THREADS, BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z,
ITEMS_PER_THREAD,
tiles,
typeid(T).name(),
(int) sizeof(T));
fflush(stdout);
dim3 block_dims(BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z);
hipLaunchKernelGGL(( FullTileReduceKernel<ALGORITHM, BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z, ITEMS_PER_THREAD>), dim3(1), dim3(block_dims), 0, 0,
d_in,
d_out,
reduction_op,
tiles,
d_elapsed);
CubDebugExit(hipPeekAtLastError());
CubDebugExit(hipDeviceSynchronize());
// Copy out and display results
printf("\tReduction results: ");
int compare = CompareDeviceResults(h_reference, d_out, 1, g_verbose, g_verbose);
printf("%s\n", compare ? "FAIL" : "PASS");
AssertEquals(0, compare);
printf("\tElapsed clocks: ");
DisplayDeviceResults(d_elapsed, 1);
// Cleanup
if (h_in) delete[] h_in;
if (d_in) CubDebugExit(g_allocator.DeviceFree(d_in));
if (d_out) CubDebugExit(g_allocator.DeviceFree(d_out));
if (d_elapsed) CubDebugExit(g_allocator.DeviceFree(d_elapsed));
}
/**
* Test full-tile reduction. (Specialized for insufficient resources)
*/
template <
BlockReduceAlgorithm ALGORITHM,
int BLOCK_DIM_X,
int BLOCK_DIM_Y,
int BLOCK_DIM_Z,
int ITEMS_PER_THREAD,
typename T,
typename ReductionOp>
void TestFullTile(
GenMode gen_mode,
int tiles,
ReductionOp reduction_op,
Int2Type<false> sufficient_resources)
{}
/**
* Test full-tile reduction.
*/
template <
BlockReduceAlgorithm ALGORITHM,
int BLOCK_DIM_X,
int BLOCK_DIM_Y,
int BLOCK_DIM_Z,
int ITEMS_PER_THREAD,
typename T,
typename ReductionOp>
void TestFullTile(
GenMode gen_mode,
int tiles,
ReductionOp reduction_op)
{
// Check size of smem storage for the target arch to make sure it will fit
typedef BlockReduce<T, BLOCK_DIM_X, ALGORITHM, BLOCK_DIM_Y, BLOCK_DIM_Z, TEST_ARCH> BlockReduceT;
enum
{
#if defined(SM100) || defined(SM110) || defined(SM130)
sufficient_smem = (sizeof(typename BlockReduceT::TempStorage) <= 16 * 1024),
sufficient_threads = ((BLOCK_DIM_X * BLOCK_DIM_Y * BLOCK_DIM_Z) <= 512),
#else
sufficient_smem = (sizeof(typename BlockReduceT::TempStorage) <= 48 * 1024),
sufficient_threads = ((BLOCK_DIM_X * BLOCK_DIM_Y * BLOCK_DIM_Z) <= 1024),
#endif
};
TestFullTile<ALGORITHM, BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z, ITEMS_PER_THREAD, T>(gen_mode, tiles, reduction_op, Int2Type<sufficient_smem && sufficient_threads>());
}
/**
* Run battery of tests for different threadblock dimensions
*/
template <
BlockReduceAlgorithm ALGORITHM,
int BLOCK_THREADS,
int ITEMS_PER_THREAD,
typename T,
typename ReductionOp>
void TestFullTile(
GenMode gen_mode,
int tiles,
ReductionOp reduction_op)
{
TestFullTile<ALGORITHM, BLOCK_THREADS, 1, 1, ITEMS_PER_THREAD, T>(gen_mode, tiles, reduction_op);
TestFullTile<ALGORITHM, BLOCK_THREADS, 2, 2, ITEMS_PER_THREAD, T>(gen_mode, tiles, reduction_op);
}
/**
* Run battery of tests for different thread items
*/
template <
BlockReduceAlgorithm ALGORITHM,
int BLOCK_THREADS,
typename T,
typename ReductionOp>
void TestFullTile(
GenMode gen_mode,
int tiles,
ReductionOp reduction_op)
{
TestFullTile<ALGORITHM, BLOCK_THREADS, 1, T>(gen_mode, tiles, reduction_op);
TestFullTile<ALGORITHM, BLOCK_THREADS, 4, T>(gen_mode, tiles, reduction_op);
}
/**
* Run battery of full-tile tests for different numbers of tiles
*/
template <
BlockReduceAlgorithm ALGORITHM,
int BLOCK_THREADS,
typename T,
typename ReductionOp>
void TestFullTile(
GenMode gen_mode,
ReductionOp reduction_op)
{
for (int tiles = 1; tiles < 3; tiles++)
{
TestFullTile<ALGORITHM, BLOCK_THREADS, T>(gen_mode, tiles, reduction_op);
}
}
//---------------------------------------------------------------------
// Partial-tile test generation
//---------------------------------------------------------------------
/**
* Test partial-tile reduction. (Specialized for sufficient resources)
*/
template <
BlockReduceAlgorithm ALGORITHM,
int BLOCK_DIM_X,
int BLOCK_DIM_Y,
int BLOCK_DIM_Z,
typename T,
typename ReductionOp>
void TestPartialTile(
GenMode gen_mode,
int num_items,
ReductionOp reduction_op,
Int2Type<true> sufficient_resources)
{
const int BLOCK_THREADS = BLOCK_DIM_X * BLOCK_DIM_Y * BLOCK_DIM_Z;
const int TILE_SIZE = BLOCK_THREADS;
// Allocate host arrays
T *h_in = new T[num_items];
T h_reference[1];
// Initialize problem
Initialize(gen_mode, h_in, h_reference, reduction_op, num_items);
// Initialize/clear device arrays
T *d_in = NULL;
T *d_out = NULL;
clock_t *d_elapsed = NULL;
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_elapsed, sizeof(unsigned long long)));
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_in, sizeof(T) * TILE_SIZE));
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_out, sizeof(T) * 1));
CubDebugExit(hipMemcpy(d_in, h_in, sizeof(T) * num_items, hipMemcpyHostToDevice));
CubDebugExit(hipMemset(d_out, 0, sizeof(T) * 1));
printf("TestPartialTile %s, gen-mode %d, num_items(%d), BLOCK_THREADS(%d) (%d,%d,%d), %s (%d bytes) elements:\n",
(ALGORITHM == BLOCK_REDUCE_RAKING) ? "BLOCK_REDUCE_RAKING" : (ALGORITHM == BLOCK_REDUCE_RAKING_COMMUTATIVE_ONLY) ? "BLOCK_REDUCE_RAKING_COMMUTATIVE_ONLY" : "BLOCK_REDUCE_WARP_REDUCTIONS",
gen_mode,
num_items,
BLOCK_THREADS, BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z,
typeid(T).name(),
(int) sizeof(T));
fflush(stdout);
dim3 block_dims(BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z);
hipLaunchKernelGGL(( PartialTileReduceKernel<ALGORITHM, BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z>), dim3(1), dim3(block_dims), 0, 0,
d_in,
d_out,
num_items,
reduction_op,
d_elapsed);
CubDebugExit(hipPeekAtLastError());
CubDebugExit(hipDeviceSynchronize());
// Copy out and display results
printf("\tReduction results: ");
int compare = CompareDeviceResults(h_reference, d_out, 1, g_verbose, g_verbose);
printf("%s\n", compare ? "FAIL" : "PASS");
AssertEquals(0, compare);
printf("\tElapsed clocks: ");
DisplayDeviceResults(d_elapsed, 1);
// Cleanup
if (h_in) delete[] h_in;
if (d_in) CubDebugExit(g_allocator.DeviceFree(d_in));
if (d_out) CubDebugExit(g_allocator.DeviceFree(d_out));
if (d_elapsed) CubDebugExit(g_allocator.DeviceFree(d_elapsed));
}
/**
* Test partial-tile reduction (specialized for insufficient resources)
*/
template <
BlockReduceAlgorithm ALGORITHM,
int BLOCK_DIM_X,
int BLOCK_DIM_Y,
int BLOCK_DIM_Z,
typename T,
typename ReductionOp>
void TestPartialTile(
GenMode gen_mode,
int num_items,
ReductionOp reduction_op,
Int2Type<false> sufficient_resources)
{}
/**
* Run battery of partial-tile tests for different numbers of effective threads and thread dimensions
*/
template <
BlockReduceAlgorithm ALGORITHM,
int BLOCK_DIM_X,
int BLOCK_DIM_Y,
int BLOCK_DIM_Z,
typename T,
typename ReductionOp>
void TestPartialTile(
GenMode gen_mode,
int num_items,
ReductionOp reduction_op)
{
// Check size of smem storage for the target arch to make sure it will fit
typedef BlockReduce<T, BLOCK_DIM_X, ALGORITHM, BLOCK_DIM_Y, BLOCK_DIM_Z, TEST_ARCH> BlockReduceT;
enum
{
#if defined(SM100) || defined(SM110) || defined(SM130)
sufficient_smem = sizeof(typename BlockReduceT::TempStorage) <= 16 * 1024,
sufficient_threads = (BLOCK_DIM_X * BLOCK_DIM_Y * BLOCK_DIM_Z) <= 512,
#else
sufficient_smem = sizeof(typename BlockReduceT::TempStorage) <= 48 * 1024,
sufficient_threads = (BLOCK_DIM_X * BLOCK_DIM_Y * BLOCK_DIM_Z) <= 1024,
#endif
};
TestPartialTile<ALGORITHM, BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z, T>(gen_mode, num_items, reduction_op, Int2Type<sufficient_smem && sufficient_threads>());
}
/**
* Run battery of partial-tile tests for different numbers of effective threads and thread dimensions
*/
template <
BlockReduceAlgorithm ALGORITHM,
int BLOCK_THREADS,
typename T,
typename ReductionOp>
void TestPartialTile(
GenMode gen_mode,
ReductionOp reduction_op)
{
for (
int num_items = 1;
num_items < BLOCK_THREADS;
num_items += CUB_MAX(1, BLOCK_THREADS / 5))
{
TestPartialTile<ALGORITHM, BLOCK_THREADS, 1, 1, T>(gen_mode, num_items, reduction_op);
TestPartialTile<ALGORITHM, BLOCK_THREADS, 2, 2, T>(gen_mode, num_items, reduction_op);
}
}
//---------------------------------------------------------------------
// Main
//---------------------------------------------------------------------
/**
* Run battery of full-tile tests for different gen modes
*/
template <
BlockReduceAlgorithm ALGORITHM,
int BLOCK_THREADS,
typename T,
typename ReductionOp>
void Test(
ReductionOp reduction_op)
{
TestFullTile<ALGORITHM, BLOCK_THREADS, T>(UNIFORM, reduction_op);
TestPartialTile<ALGORITHM, BLOCK_THREADS, T>(UNIFORM, reduction_op);
TestFullTile<ALGORITHM, BLOCK_THREADS, T>(INTEGER_SEED, reduction_op);
TestPartialTile<ALGORITHM, BLOCK_THREADS, T>(INTEGER_SEED, reduction_op);
if (Traits<T>::CATEGORY != FLOATING_POINT)
{
// Don't test randomly-generated floats b/c of stability
TestFullTile<ALGORITHM, BLOCK_THREADS, T>(RANDOM, reduction_op);
TestPartialTile<ALGORITHM, BLOCK_THREADS, T>(RANDOM, reduction_op);
}
}
/**
* Run battery of tests for different block-reduction algorithmic variants
*/
template <
int BLOCK_THREADS,
typename T,
typename ReductionOp>
void Test(
ReductionOp reduction_op)
{
#ifdef TEST_RAKING
Test<BLOCK_REDUCE_RAKING, BLOCK_THREADS, T>(reduction_op);
Test<BLOCK_REDUCE_RAKING_COMMUTATIVE_ONLY, BLOCK_THREADS, T>(reduction_op);
#endif
#ifdef TEST_WARP_REDUCTIONS
Test<BLOCK_REDUCE_WARP_REDUCTIONS, BLOCK_THREADS, T>(reduction_op);
#endif
}
/**
* Run battery of tests for different block sizes
*/
template <
typename T,
typename ReductionOp>
void Test(
ReductionOp reduction_op)
{
Test<7, T>(reduction_op);
Test<32, T>(reduction_op);
Test<63, T>(reduction_op);
Test<97, T>(reduction_op);
Test<128, T>(reduction_op);
Test<238, T>(reduction_op);
}
/**
* Run battery of tests for different block sizes
*/
template <typename T>
void Test()
{
Test<T>(Sum());
Test<T>(Max());
}
/**
* Main
*/
int main(int argc, char** argv)
{
// Initialize command line
CommandLineArgs args(argc, argv);
g_verbose = args.CheckCmdLineFlag("v");
args.GetCmdLineArgument("repeat", g_repeat);
// Print usage
if (args.CheckCmdLineFlag("help"))
{
printf("%s "
"[--device=<device-id>] "
"[--repeat=<repetitions of entire test suite>]"
"[--v] "
"\n", argv[0]);
exit(0);
}
// Initialize device
CubDebugExit(args.DeviceInit());
// Get ptx version
int ptx_version;
CubDebugExit(PtxVersion(ptx_version));
#ifdef QUICK_TEST
// Compile/run quick tests
printf("\n full tile ------------------------\n\n");
TestFullTile<BLOCK_REDUCE_RAKING, 128, 1, 1, 4, int>(RANDOM, 1, Sum());
TestFullTile<BLOCK_REDUCE_RAKING_COMMUTATIVE_ONLY, 128, 1, 1, 4, int>(RANDOM, 1, Sum());
TestFullTile<BLOCK_REDUCE_WARP_REDUCTIONS, 128, 1, 1, 4, int>(RANDOM, 1, Sum());
TestFullTile<BLOCK_REDUCE_RAKING, 128, 1, 1, 1, int>(RANDOM, 1, Sum());
TestFullTile<BLOCK_REDUCE_RAKING_COMMUTATIVE_ONLY, 128, 1, 1, 1, int>(RANDOM, 1, Sum());
TestFullTile<BLOCK_REDUCE_WARP_REDUCTIONS, 128, 1, 1, 1, int>(RANDOM, 1, Sum());
printf("\n partial tile ------------------------\n\n");
TestPartialTile<BLOCK_REDUCE_RAKING, 128, 1, 1, int>(RANDOM, 7, Sum());
TestPartialTile<BLOCK_REDUCE_RAKING_COMMUTATIVE_ONLY, 128, 1, 1, int>(RANDOM, 7, Sum());
TestPartialTile<BLOCK_REDUCE_WARP_REDUCTIONS, 128, 1, 1, int>(RANDOM, 7, Sum());
#else
// Compile/run thorough tests
for (int i = 0; i <= g_repeat; ++i)
{
// primitives
Test<char>();
Test<short>();
Test<int>();
Test<long long>();
if (ptx_version > 120) // Don't check doubles on PTX120 or below because they're down-converted
Test<double>();
Test<float>();
// vector types
Test<char2>();
Test<short2>();
Test<int2>();
Test<longlong2>();
Test<char4>();
Test<short4>();
Test<int4>();
Test<longlong4>();
// Complex types
Test<TestFoo>();
Test<TestBar>();
}
#endif
return 0;
}
| b520ef159010d7afa7f66d21cf90a56a367a3f8c.cu | /******************************************************************************
* Copyright (c) 2011, Duane Merrill. All rights reserved.
* Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************/
/******************************************************************************
* Test of BlockReduce utilities
******************************************************************************/
// Ensure printing of CUDA runtime errors to console
#define CUB_STDERR
#include <stdio.h>
#include <device_functions.h>
#include <typeinfo>
#include <cub/block/block_reduce.cuh>
#include <cub/block/block_load.cuh>
#include <cub/util_ptx.cuh>
#include <cub/util_allocator.cuh>
#include <cub/util_debug.cuh>
#include "test_util.h"
using namespace cub;
//---------------------------------------------------------------------
// Globals, constants and typedefs
//---------------------------------------------------------------------
bool g_verbose = false;
int g_repeat = 0;
CachingDeviceAllocator g_allocator(true);
//---------------------------------------------------------------------
// Test kernels
//---------------------------------------------------------------------
/// Generic reduction (full, 1)
template <typename BlockReduceT, typename T, typename ReductionOp>
__device__ __forceinline__ T DeviceTest(
BlockReduceT &block_reduce, T (&data)[1], ReductionOp &reduction_op)
{
return block_reduce.Reduce(data[0], reduction_op);
}
/// Generic reduction (full, ITEMS_PER_THREAD)
template <typename BlockReduceT, typename T, int ITEMS_PER_THREAD, typename ReductionOp>
__device__ __forceinline__ T DeviceTest(
BlockReduceT &block_reduce, T (&data)[ITEMS_PER_THREAD], ReductionOp &reduction_op)
{
return block_reduce.Reduce(data, reduction_op);
}
/// Generic reduction (partial, 1)
template <typename BlockReduceT, typename T, typename ReductionOp>
__device__ __forceinline__ T DeviceTest(
BlockReduceT &block_reduce, T &data, ReductionOp &reduction_op, int valid_threads)
{
return block_reduce.Reduce(data, reduction_op, valid_threads);
}
/// Sum reduction (full, 1)
template <typename BlockReduceT, typename T>
__device__ __forceinline__ T DeviceTest(
BlockReduceT &block_reduce, T (&data)[1], Sum &reduction_op)
{
return block_reduce.Sum(data[0]);
}
/// Sum reduction (full, ITEMS_PER_THREAD)
template <typename BlockReduceT, typename T, int ITEMS_PER_THREAD>
__device__ __forceinline__ T DeviceTest(
BlockReduceT &block_reduce, T (&data)[ITEMS_PER_THREAD], Sum &reduction_op)
{
return block_reduce.Sum(data);
}
/// Sum reduction (partial, 1)
template <typename BlockReduceT, typename T>
__device__ __forceinline__ T DeviceTest(
BlockReduceT &block_reduce, T &data, Sum &reduction_op, int valid_threads)
{
return block_reduce.Sum(data, valid_threads);
}
/**
* Test full-tile reduction kernel (where num_items is an even
* multiple of BLOCK_THREADS)
*/
template <
BlockReduceAlgorithm ALGORITHM,
int BLOCK_DIM_X,
int BLOCK_DIM_Y,
int BLOCK_DIM_Z,
int ITEMS_PER_THREAD,
typename T,
typename ReductionOp>
__launch_bounds__ (BLOCK_DIM_X * BLOCK_DIM_Y * BLOCK_DIM_Z)
__global__ void FullTileReduceKernel(
T *d_in,
T *d_out,
ReductionOp reduction_op,
int tiles,
clock_t *d_elapsed)
{
const int BLOCK_THREADS = BLOCK_DIM_X * BLOCK_DIM_Y * BLOCK_DIM_Z;
const int TILE_SIZE = BLOCK_THREADS * ITEMS_PER_THREAD;
// Cooperative threadblock reduction utility type (returns aggregate in thread 0)
typedef BlockReduce<T, BLOCK_DIM_X, ALGORITHM, BLOCK_DIM_Y, BLOCK_DIM_Z> BlockReduceT;
// Allocate temp storage in shared memory
__shared__ typename BlockReduceT::TempStorage temp_storage;
int linear_tid = RowMajorTid(BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z);
// Per-thread tile data
T data[ITEMS_PER_THREAD];
// Load first tile of data
int block_offset = 0;
if (block_offset < TILE_SIZE * tiles)
{
LoadDirectBlocked(linear_tid, d_in + block_offset, data);
block_offset += TILE_SIZE;
// Start cycle timer
clock_t start = clock();
// Cooperative reduce first tile
BlockReduceT block_reduce(temp_storage) ;
T block_aggregate = DeviceTest(block_reduce, data, reduction_op);
// Stop cycle timer
#if CUB_PTX_ARCH == 100
// Bug: recording stop clock causes mis-write of running prefix value
clock_t stop = 0;
#else
clock_t stop = clock();
#endif // CUB_PTX_ARCH == 100
clock_t elapsed = (start > stop) ? start - stop : stop - start;
// Loop over input tiles
while (block_offset < TILE_SIZE * tiles)
{
// TestBarrier between threadblock reductions
__syncthreads();
// Load tile of data
LoadDirectBlocked(linear_tid, d_in + block_offset, data);
block_offset += TILE_SIZE;
// Start cycle timer
clock_t start = clock();
// Cooperatively reduce the tile's aggregate
BlockReduceT block_reduce(temp_storage) ;
T tile_aggregate = DeviceTest(block_reduce, data, reduction_op);
// Stop cycle timer
#if CUB_PTX_ARCH == 100
// Bug: recording stop clock causes mis-write of running prefix value
clock_t stop = 0;
#else
clock_t stop = clock();
#endif // CUB_PTX_ARCH == 100
elapsed += (start > stop) ? start - stop : stop - start;
// Reduce threadblock aggregate
block_aggregate = reduction_op(block_aggregate, tile_aggregate);
}
// Store data
if (linear_tid == 0)
{
d_out[0] = block_aggregate;
*d_elapsed = elapsed;
}
}
}
/**
* Test partial-tile reduction kernel (where num_items < BLOCK_THREADS)
*/
template <
BlockReduceAlgorithm ALGORITHM,
int BLOCK_DIM_X,
int BLOCK_DIM_Y,
int BLOCK_DIM_Z,
typename T,
typename ReductionOp>
__launch_bounds__ (BLOCK_DIM_X * BLOCK_DIM_Y * BLOCK_DIM_Z)
__global__ void PartialTileReduceKernel(
T *d_in,
T *d_out,
int num_items,
ReductionOp reduction_op,
clock_t *d_elapsed)
{
// Cooperative threadblock reduction utility type (returns aggregate only in thread-0)
typedef BlockReduce<T, BLOCK_DIM_X, ALGORITHM, BLOCK_DIM_Y, BLOCK_DIM_Z> BlockReduceT;
// Allocate temp storage in shared memory
__shared__ typename BlockReduceT::TempStorage temp_storage;
int linear_tid = RowMajorTid(BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z);
// Per-thread tile data
T partial;
// Load partial tile data
if (linear_tid < num_items)
{
partial = d_in[linear_tid];
}
// Start cycle timer
clock_t start = clock();
// Cooperatively reduce the tile's aggregate
BlockReduceT block_reduce(temp_storage) ;
T tile_aggregate = DeviceTest(block_reduce, partial, reduction_op, num_items);
// Stop cycle timer
#if CUB_PTX_ARCH == 100
// Bug: recording stop clock causes mis-write of running prefix value
clock_t stop = 0;
#else
clock_t stop = clock();
#endif // CUB_PTX_ARCH == 100
clock_t elapsed = (start > stop) ? start - stop : stop - start;
// Store data
if (linear_tid == 0)
{
d_out[0] = tile_aggregate;
*d_elapsed = elapsed;
}
}
//---------------------------------------------------------------------
// Host utility subroutines
//---------------------------------------------------------------------
/**
* Initialize problem (and solution)
*/
template <
typename T,
typename ReductionOp>
void Initialize(
GenMode gen_mode,
T *h_in,
T h_reference[1],
ReductionOp reduction_op,
int num_items)
{
for (int i = 0; i < num_items; ++i)
{
InitValue(gen_mode, h_in[i], i);
if (i == 0)
h_reference[0] = h_in[0];
else
h_reference[0] = reduction_op(h_reference[0], h_in[i]);
}
if (g_verbose)
{
printf("Input:\n");
DisplayResults(h_in, num_items);
printf("\n");
}
}
//---------------------------------------------------------------------
// Full tile test generation
//---------------------------------------------------------------------
/**
* Test full-tile reduction. (Specialized for sufficient resources)
*/
template <
BlockReduceAlgorithm ALGORITHM,
int BLOCK_DIM_X,
int BLOCK_DIM_Y,
int BLOCK_DIM_Z,
int ITEMS_PER_THREAD,
typename T,
typename ReductionOp>
void TestFullTile(
GenMode gen_mode,
int tiles,
ReductionOp reduction_op,
Int2Type<true> sufficient_resources)
{
const int BLOCK_THREADS = BLOCK_DIM_X * BLOCK_DIM_Y * BLOCK_DIM_Z;
const int TILE_SIZE = BLOCK_THREADS * ITEMS_PER_THREAD;
int num_items = TILE_SIZE * tiles;
// Allocate host arrays
T *h_in = new T[num_items];
T h_reference[1];
// Initialize problem
Initialize(gen_mode, h_in, h_reference, reduction_op, num_items);
// Initialize/clear device arrays
T *d_in = NULL;
T *d_out = NULL;
clock_t *d_elapsed = NULL;
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_elapsed, sizeof(unsigned long long)));
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_in, sizeof(T) * num_items));
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_out, sizeof(T) * 1));
CubDebugExit(cudaMemcpy(d_in, h_in, sizeof(T) * num_items, cudaMemcpyHostToDevice));
CubDebugExit(cudaMemset(d_out, 0, sizeof(T) * 1));
// Test multi-tile (unguarded)
printf("TestFullTile %s, %s, gen-mode %d, num_items(%d), BLOCK_THREADS(%d) (%d,%d,%d), ITEMS_PER_THREAD(%d), tiles(%d), %s (%d bytes) elements:\n",
Equals<ReductionOp, Sum>::VALUE ? "Sum" : "Max",
(ALGORITHM == BLOCK_REDUCE_RAKING) ? "BLOCK_REDUCE_RAKING" : (ALGORITHM == BLOCK_REDUCE_RAKING_COMMUTATIVE_ONLY) ? "BLOCK_REDUCE_RAKING_COMMUTATIVE_ONLY" : "BLOCK_REDUCE_WARP_REDUCTIONS",
gen_mode,
num_items,
BLOCK_THREADS, BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z,
ITEMS_PER_THREAD,
tiles,
typeid(T).name(),
(int) sizeof(T));
fflush(stdout);
dim3 block_dims(BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z);
FullTileReduceKernel<ALGORITHM, BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z, ITEMS_PER_THREAD><<<1, block_dims>>>(
d_in,
d_out,
reduction_op,
tiles,
d_elapsed);
CubDebugExit(cudaPeekAtLastError());
CubDebugExit(cudaDeviceSynchronize());
// Copy out and display results
printf("\tReduction results: ");
int compare = CompareDeviceResults(h_reference, d_out, 1, g_verbose, g_verbose);
printf("%s\n", compare ? "FAIL" : "PASS");
AssertEquals(0, compare);
printf("\tElapsed clocks: ");
DisplayDeviceResults(d_elapsed, 1);
// Cleanup
if (h_in) delete[] h_in;
if (d_in) CubDebugExit(g_allocator.DeviceFree(d_in));
if (d_out) CubDebugExit(g_allocator.DeviceFree(d_out));
if (d_elapsed) CubDebugExit(g_allocator.DeviceFree(d_elapsed));
}
/**
* Test full-tile reduction. (Specialized for insufficient resources)
*/
template <
BlockReduceAlgorithm ALGORITHM,
int BLOCK_DIM_X,
int BLOCK_DIM_Y,
int BLOCK_DIM_Z,
int ITEMS_PER_THREAD,
typename T,
typename ReductionOp>
void TestFullTile(
GenMode gen_mode,
int tiles,
ReductionOp reduction_op,
Int2Type<false> sufficient_resources)
{}
/**
* Test full-tile reduction.
*/
template <
BlockReduceAlgorithm ALGORITHM,
int BLOCK_DIM_X,
int BLOCK_DIM_Y,
int BLOCK_DIM_Z,
int ITEMS_PER_THREAD,
typename T,
typename ReductionOp>
void TestFullTile(
GenMode gen_mode,
int tiles,
ReductionOp reduction_op)
{
// Check size of smem storage for the target arch to make sure it will fit
typedef BlockReduce<T, BLOCK_DIM_X, ALGORITHM, BLOCK_DIM_Y, BLOCK_DIM_Z, TEST_ARCH> BlockReduceT;
enum
{
#if defined(SM100) || defined(SM110) || defined(SM130)
sufficient_smem = (sizeof(typename BlockReduceT::TempStorage) <= 16 * 1024),
sufficient_threads = ((BLOCK_DIM_X * BLOCK_DIM_Y * BLOCK_DIM_Z) <= 512),
#else
sufficient_smem = (sizeof(typename BlockReduceT::TempStorage) <= 48 * 1024),
sufficient_threads = ((BLOCK_DIM_X * BLOCK_DIM_Y * BLOCK_DIM_Z) <= 1024),
#endif
};
TestFullTile<ALGORITHM, BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z, ITEMS_PER_THREAD, T>(gen_mode, tiles, reduction_op, Int2Type<sufficient_smem && sufficient_threads>());
}
/**
* Run battery of tests for different threadblock dimensions
*/
template <
BlockReduceAlgorithm ALGORITHM,
int BLOCK_THREADS,
int ITEMS_PER_THREAD,
typename T,
typename ReductionOp>
void TestFullTile(
GenMode gen_mode,
int tiles,
ReductionOp reduction_op)
{
TestFullTile<ALGORITHM, BLOCK_THREADS, 1, 1, ITEMS_PER_THREAD, T>(gen_mode, tiles, reduction_op);
TestFullTile<ALGORITHM, BLOCK_THREADS, 2, 2, ITEMS_PER_THREAD, T>(gen_mode, tiles, reduction_op);
}
/**
* Run battery of tests for different thread items
*/
template <
BlockReduceAlgorithm ALGORITHM,
int BLOCK_THREADS,
typename T,
typename ReductionOp>
void TestFullTile(
GenMode gen_mode,
int tiles,
ReductionOp reduction_op)
{
TestFullTile<ALGORITHM, BLOCK_THREADS, 1, T>(gen_mode, tiles, reduction_op);
TestFullTile<ALGORITHM, BLOCK_THREADS, 4, T>(gen_mode, tiles, reduction_op);
}
/**
* Run battery of full-tile tests for different numbers of tiles
*/
template <
BlockReduceAlgorithm ALGORITHM,
int BLOCK_THREADS,
typename T,
typename ReductionOp>
void TestFullTile(
GenMode gen_mode,
ReductionOp reduction_op)
{
for (int tiles = 1; tiles < 3; tiles++)
{
TestFullTile<ALGORITHM, BLOCK_THREADS, T>(gen_mode, tiles, reduction_op);
}
}
//---------------------------------------------------------------------
// Partial-tile test generation
//---------------------------------------------------------------------
/**
* Test partial-tile reduction. (Specialized for sufficient resources)
*/
template <
BlockReduceAlgorithm ALGORITHM,
int BLOCK_DIM_X,
int BLOCK_DIM_Y,
int BLOCK_DIM_Z,
typename T,
typename ReductionOp>
void TestPartialTile(
GenMode gen_mode,
int num_items,
ReductionOp reduction_op,
Int2Type<true> sufficient_resources)
{
const int BLOCK_THREADS = BLOCK_DIM_X * BLOCK_DIM_Y * BLOCK_DIM_Z;
const int TILE_SIZE = BLOCK_THREADS;
// Allocate host arrays
T *h_in = new T[num_items];
T h_reference[1];
// Initialize problem
Initialize(gen_mode, h_in, h_reference, reduction_op, num_items);
// Initialize/clear device arrays
T *d_in = NULL;
T *d_out = NULL;
clock_t *d_elapsed = NULL;
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_elapsed, sizeof(unsigned long long)));
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_in, sizeof(T) * TILE_SIZE));
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_out, sizeof(T) * 1));
CubDebugExit(cudaMemcpy(d_in, h_in, sizeof(T) * num_items, cudaMemcpyHostToDevice));
CubDebugExit(cudaMemset(d_out, 0, sizeof(T) * 1));
printf("TestPartialTile %s, gen-mode %d, num_items(%d), BLOCK_THREADS(%d) (%d,%d,%d), %s (%d bytes) elements:\n",
(ALGORITHM == BLOCK_REDUCE_RAKING) ? "BLOCK_REDUCE_RAKING" : (ALGORITHM == BLOCK_REDUCE_RAKING_COMMUTATIVE_ONLY) ? "BLOCK_REDUCE_RAKING_COMMUTATIVE_ONLY" : "BLOCK_REDUCE_WARP_REDUCTIONS",
gen_mode,
num_items,
BLOCK_THREADS, BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z,
typeid(T).name(),
(int) sizeof(T));
fflush(stdout);
dim3 block_dims(BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z);
PartialTileReduceKernel<ALGORITHM, BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z><<<1, block_dims>>>(
d_in,
d_out,
num_items,
reduction_op,
d_elapsed);
CubDebugExit(cudaPeekAtLastError());
CubDebugExit(cudaDeviceSynchronize());
// Copy out and display results
printf("\tReduction results: ");
int compare = CompareDeviceResults(h_reference, d_out, 1, g_verbose, g_verbose);
printf("%s\n", compare ? "FAIL" : "PASS");
AssertEquals(0, compare);
printf("\tElapsed clocks: ");
DisplayDeviceResults(d_elapsed, 1);
// Cleanup
if (h_in) delete[] h_in;
if (d_in) CubDebugExit(g_allocator.DeviceFree(d_in));
if (d_out) CubDebugExit(g_allocator.DeviceFree(d_out));
if (d_elapsed) CubDebugExit(g_allocator.DeviceFree(d_elapsed));
}
/**
* Test partial-tile reduction (specialized for insufficient resources)
*/
template <
BlockReduceAlgorithm ALGORITHM,
int BLOCK_DIM_X,
int BLOCK_DIM_Y,
int BLOCK_DIM_Z,
typename T,
typename ReductionOp>
void TestPartialTile(
GenMode gen_mode,
int num_items,
ReductionOp reduction_op,
Int2Type<false> sufficient_resources)
{}
/**
* Run battery of partial-tile tests for different numbers of effective threads and thread dimensions
*/
template <
BlockReduceAlgorithm ALGORITHM,
int BLOCK_DIM_X,
int BLOCK_DIM_Y,
int BLOCK_DIM_Z,
typename T,
typename ReductionOp>
void TestPartialTile(
GenMode gen_mode,
int num_items,
ReductionOp reduction_op)
{
// Check size of smem storage for the target arch to make sure it will fit
typedef BlockReduce<T, BLOCK_DIM_X, ALGORITHM, BLOCK_DIM_Y, BLOCK_DIM_Z, TEST_ARCH> BlockReduceT;
enum
{
#if defined(SM100) || defined(SM110) || defined(SM130)
sufficient_smem = sizeof(typename BlockReduceT::TempStorage) <= 16 * 1024,
sufficient_threads = (BLOCK_DIM_X * BLOCK_DIM_Y * BLOCK_DIM_Z) <= 512,
#else
sufficient_smem = sizeof(typename BlockReduceT::TempStorage) <= 48 * 1024,
sufficient_threads = (BLOCK_DIM_X * BLOCK_DIM_Y * BLOCK_DIM_Z) <= 1024,
#endif
};
TestPartialTile<ALGORITHM, BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z, T>(gen_mode, num_items, reduction_op, Int2Type<sufficient_smem && sufficient_threads>());
}
/**
* Run battery of partial-tile tests for different numbers of effective threads and thread dimensions
*/
template <
BlockReduceAlgorithm ALGORITHM,
int BLOCK_THREADS,
typename T,
typename ReductionOp>
void TestPartialTile(
GenMode gen_mode,
ReductionOp reduction_op)
{
for (
int num_items = 1;
num_items < BLOCK_THREADS;
num_items += CUB_MAX(1, BLOCK_THREADS / 5))
{
TestPartialTile<ALGORITHM, BLOCK_THREADS, 1, 1, T>(gen_mode, num_items, reduction_op);
TestPartialTile<ALGORITHM, BLOCK_THREADS, 2, 2, T>(gen_mode, num_items, reduction_op);
}
}
//---------------------------------------------------------------------
// Main
//---------------------------------------------------------------------
/**
* Run battery of full-tile tests for different gen modes
*/
template <
BlockReduceAlgorithm ALGORITHM,
int BLOCK_THREADS,
typename T,
typename ReductionOp>
void Test(
ReductionOp reduction_op)
{
TestFullTile<ALGORITHM, BLOCK_THREADS, T>(UNIFORM, reduction_op);
TestPartialTile<ALGORITHM, BLOCK_THREADS, T>(UNIFORM, reduction_op);
TestFullTile<ALGORITHM, BLOCK_THREADS, T>(INTEGER_SEED, reduction_op);
TestPartialTile<ALGORITHM, BLOCK_THREADS, T>(INTEGER_SEED, reduction_op);
if (Traits<T>::CATEGORY != FLOATING_POINT)
{
// Don't test randomly-generated floats b/c of stability
TestFullTile<ALGORITHM, BLOCK_THREADS, T>(RANDOM, reduction_op);
TestPartialTile<ALGORITHM, BLOCK_THREADS, T>(RANDOM, reduction_op);
}
}
/**
* Run battery of tests for different block-reduction algorithmic variants
*/
template <
int BLOCK_THREADS,
typename T,
typename ReductionOp>
void Test(
ReductionOp reduction_op)
{
#ifdef TEST_RAKING
Test<BLOCK_REDUCE_RAKING, BLOCK_THREADS, T>(reduction_op);
Test<BLOCK_REDUCE_RAKING_COMMUTATIVE_ONLY, BLOCK_THREADS, T>(reduction_op);
#endif
#ifdef TEST_WARP_REDUCTIONS
Test<BLOCK_REDUCE_WARP_REDUCTIONS, BLOCK_THREADS, T>(reduction_op);
#endif
}
/**
* Run battery of tests for different block sizes
*/
template <
typename T,
typename ReductionOp>
void Test(
ReductionOp reduction_op)
{
Test<7, T>(reduction_op);
Test<32, T>(reduction_op);
Test<63, T>(reduction_op);
Test<97, T>(reduction_op);
Test<128, T>(reduction_op);
Test<238, T>(reduction_op);
}
/**
* Run battery of tests for different block sizes
*/
template <typename T>
void Test()
{
Test<T>(Sum());
Test<T>(Max());
}
/**
* Main
*/
int main(int argc, char** argv)
{
// Initialize command line
CommandLineArgs args(argc, argv);
g_verbose = args.CheckCmdLineFlag("v");
args.GetCmdLineArgument("repeat", g_repeat);
// Print usage
if (args.CheckCmdLineFlag("help"))
{
printf("%s "
"[--device=<device-id>] "
"[--repeat=<repetitions of entire test suite>]"
"[--v] "
"\n", argv[0]);
exit(0);
}
// Initialize device
CubDebugExit(args.DeviceInit());
// Get ptx version
int ptx_version;
CubDebugExit(PtxVersion(ptx_version));
#ifdef QUICK_TEST
// Compile/run quick tests
printf("\n full tile ------------------------\n\n");
TestFullTile<BLOCK_REDUCE_RAKING, 128, 1, 1, 4, int>(RANDOM, 1, Sum());
TestFullTile<BLOCK_REDUCE_RAKING_COMMUTATIVE_ONLY, 128, 1, 1, 4, int>(RANDOM, 1, Sum());
TestFullTile<BLOCK_REDUCE_WARP_REDUCTIONS, 128, 1, 1, 4, int>(RANDOM, 1, Sum());
TestFullTile<BLOCK_REDUCE_RAKING, 128, 1, 1, 1, int>(RANDOM, 1, Sum());
TestFullTile<BLOCK_REDUCE_RAKING_COMMUTATIVE_ONLY, 128, 1, 1, 1, int>(RANDOM, 1, Sum());
TestFullTile<BLOCK_REDUCE_WARP_REDUCTIONS, 128, 1, 1, 1, int>(RANDOM, 1, Sum());
printf("\n partial tile ------------------------\n\n");
TestPartialTile<BLOCK_REDUCE_RAKING, 128, 1, 1, int>(RANDOM, 7, Sum());
TestPartialTile<BLOCK_REDUCE_RAKING_COMMUTATIVE_ONLY, 128, 1, 1, int>(RANDOM, 7, Sum());
TestPartialTile<BLOCK_REDUCE_WARP_REDUCTIONS, 128, 1, 1, int>(RANDOM, 7, Sum());
#else
// Compile/run thorough tests
for (int i = 0; i <= g_repeat; ++i)
{
// primitives
Test<char>();
Test<short>();
Test<int>();
Test<long long>();
if (ptx_version > 120) // Don't check doubles on PTX120 or below because they're down-converted
Test<double>();
Test<float>();
// vector types
Test<char2>();
Test<short2>();
Test<int2>();
Test<longlong2>();
Test<char4>();
Test<short4>();
Test<int4>();
Test<longlong4>();
// Complex types
Test<TestFoo>();
Test<TestBar>();
}
#endif
return 0;
}
|
c1f87fa5fdd7def94396dbb215f6f5bcd87a4be4.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 2019 Xilinx Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <vector>
//#include <float.h>
#include "caffe/layers/conv_fixed_layer.hpp"
#include "caffe/util/quantize.hpp"
namespace caffe {
template <typename Dtype>
void ConvolutionFixedLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype> *> &bottom, const vector<Blob<Dtype> *> &top) {
auto fixed_blobs = fixed_forward_conv_layer_->blobs();
if (!enable_fix_) {
caffe_copy(this->blobs_[0]->count(), this->blobs_[0]->gpu_data(),
fixed_blobs[0]->mutable_gpu_data());
if (this->bias_term_) {
caffe_copy(this->blobs_[1]->count(), this->blobs_[1]->gpu_data(),
fixed_blobs[1]->mutable_gpu_data());
}
fixed_forward_conv_layer_->Forward(bottom, top);
return;
}
// Fix weights and bias
if (this->phase_ == TRAIN || this->iter() == 0) {
if (this->fixed_method_ == FixedParameter_FixedMethod_OVER_FLOW) {
this->weight_dec_pos_ = (int)::floor(caffe_gpu_fix_pos_overflow(
this->blobs_[0]->count(), this->blobs_[0]->gpu_data(),
this->bit_width_));
if (this->bias_term_) {
this->bias_dec_pos_ = (int)::floor(caffe_gpu_fix_pos_overflow(
this->blobs_[1]->count(), this->blobs_[1]->gpu_data(),
this->bit_width_));
}
} else if (this->fixed_method_ == FixedParameter_FixedMethod_DIFF_S) {
this->weight_dec_pos_ = (int)::floor(caffe_gpu_fix_pos_diffs(
this->blobs_[0]->count(), this->blobs_[0]->gpu_data(),
this->bit_width_));
if (this->bias_term_) {
this->bias_dec_pos_ = (int)::floor(caffe_gpu_fix_pos_diffs(
this->blobs_[1]->count(), this->blobs_[1]->gpu_data(),
this->bit_width_));
}
} else {
LOG(FATAL) << "Unknown fixed method: " << this->fixed_method_;
}
caffe_gpu_fix(this->blobs_[0]->count(), this->blobs_[0]->gpu_data(),
fixed_blobs[0]->mutable_gpu_data(), this->bit_width_,
this->weight_dec_pos_);
if (this->bias_term_) {
caffe_gpu_fix(this->blobs_[1]->count(), this->blobs_[1]->gpu_data(),
fixed_blobs[1]->mutable_gpu_data(), this->bit_width_,
this->bias_dec_pos_);
}
} else if (this->phase_ == TEST) {
}
fixed_forward_conv_layer_->Forward(bottom, top);
}
template <typename Dtype>
void ConvolutionFixedLayer<Dtype>::Backward_gpu(
const vector<Blob<Dtype> *> &top, const vector<bool> &propagate_down,
const vector<Blob<Dtype> *> &bottom) {
ConvolutionLayer<Dtype>::Backward_gpu(top, propagate_down, bottom);
}
INSTANTIATE_LAYER_GPU_FUNCS(ConvolutionFixedLayer);
} // namespace caffe
| c1f87fa5fdd7def94396dbb215f6f5bcd87a4be4.cu | /*
* Copyright 2019 Xilinx Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <vector>
//#include <float.h>
#include "caffe/layers/conv_fixed_layer.hpp"
#include "caffe/util/quantize.hpp"
namespace caffe {
template <typename Dtype>
void ConvolutionFixedLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype> *> &bottom, const vector<Blob<Dtype> *> &top) {
auto fixed_blobs = fixed_forward_conv_layer_->blobs();
if (!enable_fix_) {
caffe_copy(this->blobs_[0]->count(), this->blobs_[0]->gpu_data(),
fixed_blobs[0]->mutable_gpu_data());
if (this->bias_term_) {
caffe_copy(this->blobs_[1]->count(), this->blobs_[1]->gpu_data(),
fixed_blobs[1]->mutable_gpu_data());
}
fixed_forward_conv_layer_->Forward(bottom, top);
return;
}
// Fix weights and bias
if (this->phase_ == TRAIN || this->iter() == 0) {
if (this->fixed_method_ == FixedParameter_FixedMethod_OVER_FLOW) {
this->weight_dec_pos_ = (int)std::floor(caffe_gpu_fix_pos_overflow(
this->blobs_[0]->count(), this->blobs_[0]->gpu_data(),
this->bit_width_));
if (this->bias_term_) {
this->bias_dec_pos_ = (int)std::floor(caffe_gpu_fix_pos_overflow(
this->blobs_[1]->count(), this->blobs_[1]->gpu_data(),
this->bit_width_));
}
} else if (this->fixed_method_ == FixedParameter_FixedMethod_DIFF_S) {
this->weight_dec_pos_ = (int)std::floor(caffe_gpu_fix_pos_diffs(
this->blobs_[0]->count(), this->blobs_[0]->gpu_data(),
this->bit_width_));
if (this->bias_term_) {
this->bias_dec_pos_ = (int)std::floor(caffe_gpu_fix_pos_diffs(
this->blobs_[1]->count(), this->blobs_[1]->gpu_data(),
this->bit_width_));
}
} else {
LOG(FATAL) << "Unknown fixed method: " << this->fixed_method_;
}
caffe_gpu_fix(this->blobs_[0]->count(), this->blobs_[0]->gpu_data(),
fixed_blobs[0]->mutable_gpu_data(), this->bit_width_,
this->weight_dec_pos_);
if (this->bias_term_) {
caffe_gpu_fix(this->blobs_[1]->count(), this->blobs_[1]->gpu_data(),
fixed_blobs[1]->mutable_gpu_data(), this->bit_width_,
this->bias_dec_pos_);
}
} else if (this->phase_ == TEST) {
}
fixed_forward_conv_layer_->Forward(bottom, top);
}
template <typename Dtype>
void ConvolutionFixedLayer<Dtype>::Backward_gpu(
const vector<Blob<Dtype> *> &top, const vector<bool> &propagate_down,
const vector<Blob<Dtype> *> &bottom) {
ConvolutionLayer<Dtype>::Backward_gpu(top, propagate_down, bottom);
}
INSTANTIATE_LAYER_GPU_FUNCS(ConvolutionFixedLayer);
} // namespace caffe
|
0fea4207e2184c0e66b4f0e333ad0d90966951f3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void pool(unsigned char* image, unsigned char* new_image, unsigned height, unsigned width, int thread_count)
{
// process image
int offset = (blockIdx.x * blockDim.x + threadIdx.x)*4;
for (int i = offset; i < (width*height); i+=(thread_count*4) )
{
int x = i % (width * 2) * 2;
int y = i / (width * 2);
int p1 = 8 * width * y + x;
int p2 = 8 * width * y + x + 4;
int p3 = 8 * width * y + x + 4 * width;
int p4 = 8 * width * y + x + 4 * width + 4;
unsigned r[] = { image[p1], image[p2], image[p3], image[p4] };
unsigned g[] = { image[p1+1], image[p2+1], image[p3+1], image[p4+1] };
unsigned b[] = { image[p1+2], image[p2+2], image[p3+2], image[p4+2] };
unsigned a[] = { image[p1+3], image[p2+3], image[p3+3], image[p4+3] };
int rMax = r[0];
int gMax = g[0];
int bMax = b[0];
int aMax = a[0];
for (int j = 1; j < 4; j++ )
{
if (r[j] > rMax) rMax = r[j];
if (g[j] > gMax) gMax = g[j];
if (b[j] > bMax) bMax = b[j];
if (a[j] > aMax) aMax = a[j];
}
new_image[i] = rMax;
new_image[i+1] = gMax;
new_image[i+2] = bMax;
new_image[i+3] = aMax;
}
} | 0fea4207e2184c0e66b4f0e333ad0d90966951f3.cu | #include "includes.h"
__global__ void pool(unsigned char* image, unsigned char* new_image, unsigned height, unsigned width, int thread_count)
{
// process image
int offset = (blockIdx.x * blockDim.x + threadIdx.x)*4;
for (int i = offset; i < (width*height); i+=(thread_count*4) )
{
int x = i % (width * 2) * 2;
int y = i / (width * 2);
int p1 = 8 * width * y + x;
int p2 = 8 * width * y + x + 4;
int p3 = 8 * width * y + x + 4 * width;
int p4 = 8 * width * y + x + 4 * width + 4;
unsigned r[] = { image[p1], image[p2], image[p3], image[p4] };
unsigned g[] = { image[p1+1], image[p2+1], image[p3+1], image[p4+1] };
unsigned b[] = { image[p1+2], image[p2+2], image[p3+2], image[p4+2] };
unsigned a[] = { image[p1+3], image[p2+3], image[p3+3], image[p4+3] };
int rMax = r[0];
int gMax = g[0];
int bMax = b[0];
int aMax = a[0];
for (int j = 1; j < 4; j++ )
{
if (r[j] > rMax) rMax = r[j];
if (g[j] > gMax) gMax = g[j];
if (b[j] > bMax) bMax = b[j];
if (a[j] > aMax) aMax = a[j];
}
new_image[i] = rMax;
new_image[i+1] = gMax;
new_image[i+2] = bMax;
new_image[i+3] = aMax;
}
} |
895a799985d579d02cadaaa32e9636344d235a60.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include "Device.h"
#include "Slice.h"
using std::cout;
using std::endl;
/*----------------------------------------------------------------------*\
|* Declaration *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Imported *|
\*-------------------------------------*/
extern __global__ void slice(float* ptrTabDev,int n);
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
/*----------------------------------------------------------------------*\
|* Implementation *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Constructeur *|
\*-------------------------------------*/
Slice::Slice(const Grid& grid, int n) :
n(n),pi(0)
{
// Grid
{
this->dg = grid.dg;
this->db = grid.db;
}
this->nbThread = grid.threadCounts();
this->sizeOctet = nbThread * sizeof(float); // octet
// MM
{
std::cout <<"|"<< sizeOctet<<"|";
// MM (malloc Device)
{
Device::malloc(&ptrTabDev, sizeOctet);
}
ptrTab = new float[nbThread];
Device::lastCudaError("AddVector MM (end allocation)"); // temp debug, facultatif
}
}
Slice::~Slice(void)
{
//MM (device free)
{
Device::free(ptrTabDev);
Device::lastCudaError("AddVector MM (end deallocation)"); // temp debug, facultatif
}
}
/*--------------------------------------*\
|* Methode *|
\*-------------------------------------*/
float Slice::run()
{
Device::lastCudaError("slice(before)"); // temp debug
hipLaunchKernelGGL(( slice), dim3(dg),dim3(db), 0, 0, ptrTabDev, n); // assynchrone
Device::lastCudaError("slice (after)"); // temp debug
//Device::synchronize(); // Temp,debug, only for printf in GPU
// MM (Device -> Host)
{
Device::memcpyDToH(ptrTab, ptrTabDev, sizeOctet); // barriere synchronisation implicite
}
for (int i = 0; i < nbThread; i++)
{
pi += ptrTab[i];
}
pi = pi / (float) n;
std::cout << pi;
return pi;
}
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
/*----------------------------------------------------------------------*\
|* End *|
\*---------------------------------------------------------------------*/
| 895a799985d579d02cadaaa32e9636344d235a60.cu | #include <iostream>
#include "Device.h"
#include "Slice.h"
using std::cout;
using std::endl;
/*----------------------------------------------------------------------*\
|* Declaration *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Imported *|
\*-------------------------------------*/
extern __global__ void slice(float* ptrTabDev,int n);
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
/*----------------------------------------------------------------------*\
|* Implementation *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Constructeur *|
\*-------------------------------------*/
Slice::Slice(const Grid& grid, int n) :
n(n),pi(0)
{
// Grid
{
this->dg = grid.dg;
this->db = grid.db;
}
this->nbThread = grid.threadCounts();
this->sizeOctet = nbThread * sizeof(float); // octet
// MM
{
std::cout <<"|"<< sizeOctet<<"|";
// MM (malloc Device)
{
Device::malloc(&ptrTabDev, sizeOctet);
}
ptrTab = new float[nbThread];
Device::lastCudaError("AddVector MM (end allocation)"); // temp debug, facultatif
}
}
Slice::~Slice(void)
{
//MM (device free)
{
Device::free(ptrTabDev);
Device::lastCudaError("AddVector MM (end deallocation)"); // temp debug, facultatif
}
}
/*--------------------------------------*\
|* Methode *|
\*-------------------------------------*/
float Slice::run()
{
Device::lastCudaError("slice(before)"); // temp debug
slice<<<dg,db>>>(ptrTabDev, n); // assynchrone
Device::lastCudaError("slice (after)"); // temp debug
//Device::synchronize(); // Temp,debug, only for printf in GPU
// MM (Device -> Host)
{
Device::memcpyDToH(ptrTab, ptrTabDev, sizeOctet); // barriere synchronisation implicite
}
for (int i = 0; i < nbThread; i++)
{
pi += ptrTab[i];
}
pi = pi / (float) n;
std::cout << pi;
return pi;
}
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
/*----------------------------------------------------------------------*\
|* End *|
\*---------------------------------------------------------------------*/
|
c6b73ed905eef6ddb22388f3eb38647c9f32ad7e.hip | // !!! This is a file automatically generated by hipify!!!
#include "./common/common.h"
#include <hip/hip_runtime.h>
#include <stdio.h>
#define N 1000*10
__global__ void Kernel_Memcpy(int* dst, int* src){
int thid = threadIdx.x;
int blid = blockIdx.x;
int i;
if(thid < 1 && blid < 1){
printf("device src:\n");
for ( i = 0; i < N; i++) {
printf("%d ", src[i]);
}
printf("\n");
printf("device dst:\n");
for ( i = 0; i < N; i++) {
printf("%d ", dst[i]);
}
printf("\n");
printf("hipMemcpyAsync()\n");
hipMemcpyAsync(dst, src, sizeof(int) * N ,hipMemcpyDeviceToDevice);
// for ( i = 0; i < N; i++) {
// dst[i] = src[i];
// }
printf("device src:\n");
for ( i = 0; i < N; i++) {
printf("%d ", src[i]);
}
printf("\n");
printf("device dst:\n");
for ( i = 0; i < N; i++) {
printf("%d ", dst[i]);
}
printf("\n");
}
else{}
}//end-kernel
__global__ void Kernel_print_array(int *array){
int i;
printf("N: %d\n",N );
printf("%s\n","device array" );
for ( i = 0; i < N; i++) {
printf("%d ", array[i]);
}
printf("\n");
}//end-kernel
int main(void){
int *h_src, *h_dst;
int *d_src, *d_dst;
//----set up device START-----
int dev_num =0;
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp,dev_num);
printf("Using Device %d:%s\n",dev_num,deviceProp.name);
hipSetDevice(dev_num);
//----set up device END-----
h_src = (int*)malloc(sizeof(int) * N);
h_dst = (int*)malloc(sizeof(int) * N);
memset(h_src, 0, sizeof(int) * N);
memset(h_dst, 0, sizeof(int) * N);
printf("host src:\n");
for (size_t i = 0; i < N; i++) {
h_src[i] = i+1;
printf("%d ", h_src[i]);
}
printf("\n");
printf("host dst:\n");
for (size_t i = 0; i < N; i++) {
printf("%d ", h_dst[i]);
}
printf("\n");
hipMalloc((int**)&d_src, sizeof(int)*N);
hipMalloc((int**)&d_dst, sizeof(int)*N);
hipMemcpy(d_src, h_src, sizeof(int)*N, hipMemcpyHostToDevice);
printf("Kernel_Memcpy<<<>>>()\n");
hipLaunchKernelGGL(( Kernel_Memcpy), dim3(1),dim3(1), 0, 0, d_dst, d_src);
hipMemcpy(h_dst, d_dst, sizeof(int)*N, hipMemcpyDeviceToHost);
printf("host src:\n");
for (size_t i = 0; i < N; i++) {
printf("%d ", h_src[i]);
}
printf("\n");
printf("host dst:\n");
for (size_t i = 0; i < N; i++) {
printf("%d ", h_dst[i]);
}
printf("\n");
CHECK(hipDeviceReset());
return EXIT_SUCCESS;
}
| c6b73ed905eef6ddb22388f3eb38647c9f32ad7e.cu | #include "./common/common.h"
#include <cuda_runtime.h>
#include <stdio.h>
#define N 1000*10
__global__ void Kernel_Memcpy(int* dst, int* src){
int thid = threadIdx.x;
int blid = blockIdx.x;
int i;
if(thid < 1 && blid < 1){
printf("device src:\n");
for ( i = 0; i < N; i++) {
printf("%d ", src[i]);
}
printf("\n");
printf("device dst:\n");
for ( i = 0; i < N; i++) {
printf("%d ", dst[i]);
}
printf("\n");
printf("cudaMemcpyAsync()\n");
cudaMemcpyAsync(dst, src, sizeof(int) * N ,cudaMemcpyDeviceToDevice);
// for ( i = 0; i < N; i++) {
// dst[i] = src[i];
// }
printf("device src:\n");
for ( i = 0; i < N; i++) {
printf("%d ", src[i]);
}
printf("\n");
printf("device dst:\n");
for ( i = 0; i < N; i++) {
printf("%d ", dst[i]);
}
printf("\n");
}
else{}
}//end-kernel
__global__ void Kernel_print_array(int *array){
int i;
printf("N: %d\n",N );
printf("%s\n","device array" );
for ( i = 0; i < N; i++) {
printf("%d ", array[i]);
}
printf("\n");
}//end-kernel
int main(void){
int *h_src, *h_dst;
int *d_src, *d_dst;
//----set up device START-----
int dev_num =0;
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp,dev_num);
printf("Using Device %d:%s\n",dev_num,deviceProp.name);
cudaSetDevice(dev_num);
//----set up device END-----
h_src = (int*)malloc(sizeof(int) * N);
h_dst = (int*)malloc(sizeof(int) * N);
memset(h_src, 0, sizeof(int) * N);
memset(h_dst, 0, sizeof(int) * N);
printf("host src:\n");
for (size_t i = 0; i < N; i++) {
h_src[i] = i+1;
printf("%d ", h_src[i]);
}
printf("\n");
printf("host dst:\n");
for (size_t i = 0; i < N; i++) {
printf("%d ", h_dst[i]);
}
printf("\n");
cudaMalloc((int**)&d_src, sizeof(int)*N);
cudaMalloc((int**)&d_dst, sizeof(int)*N);
cudaMemcpy(d_src, h_src, sizeof(int)*N, cudaMemcpyHostToDevice);
printf("Kernel_Memcpy<<<>>>()\n");
Kernel_Memcpy<<<1,1>>>(d_dst, d_src);
cudaMemcpy(h_dst, d_dst, sizeof(int)*N, cudaMemcpyDeviceToHost);
printf("host src:\n");
for (size_t i = 0; i < N; i++) {
printf("%d ", h_src[i]);
}
printf("\n");
printf("host dst:\n");
for (size_t i = 0; i < N; i++) {
printf("%d ", h_dst[i]);
}
printf("\n");
CHECK(cudaDeviceReset());
return EXIT_SUCCESS;
}
|
b758d8fa22008343ad82d526895513f50982f7d1.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <helper_timer.h>
#include <helper_image.h>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
__global__ void imageKernel(const float *a, float *b, int im_width, int im_height, int pitch) {
int ix = threadIdx.x + blockDim.x * blockIdx.x;
int iy = threadIdx.y + blockDim.y * blockIdx.y;
float sum = 0.0f;
int oxx, oyy;
for (int xx = ix - 2; xx < ix + 2; ++xx) {
for (int yy = iy - 2; yy < iy + 2; ++yy) {
oxx = xx;
oyy = yy;
if (xx < 0) oxx = 0;
if (yy < 0) oyy = 0;
if (xx >= im_width) oxx = im_width - 1;
if (yy >= im_height) oyy = im_height - 1;
sum += a[oyy * (pitch / sizeof(float)) + oxx];
}
}
if (ix < im_width && iy < im_height) {
int idx = iy * (pitch / sizeof(float)) + ix;
b[idx] = sum / 25;
if (b[idx] > 1.0f) {
b[idx] = 1.0f;
}
if (b[idx] < 0.0f) {
b[idx] = 0.0f;
}
}
}
void blurPixel(const float* a, float* b, int ix, int iy, int im_width, int im_height) {
float sum = 0.0f;
int oxx, oyy;
for (int xx = ix - 2; xx < ix + 2; ++xx) {
for (int yy = iy - 2; yy < iy + 2; ++yy) {
oxx = xx;
oyy = yy;
if (xx < 0) oxx = 0;
if (yy < 0) oyy = 0;
if (xx >= im_width) oxx = im_width - 1;
if (yy >= im_height) oyy = im_height - 1;
sum += a[oyy * im_width + oxx];
}
}
if (ix < im_width && iy < im_height) {
int idx = iy * im_width + ix;
b[idx] = sum / 25;
if (b[idx] > 1.0f) {
b[idx] = 1.0f;
}
if (b[idx] < 0.0f) {
b[idx] = 0.0f;
}
}
}
void task4() {
int blockSizeX = 0, blockSizeY = 0;
int size = 0;
int blockX = 0, blockY = 0;
float *b = 0;
float *c = 0;
float *d = 0;
size_t pitch = 0;
size_t pitch1 = 0;
float *aOnDevice = 0;
float *bOnDevice = 0;
unsigned int im_width, im_height;
float* im_data = NULL;
sdkLoadPGM("lena.pgm", &im_data, &im_width, &im_height);
size = im_width * im_height;
printf("Type size of block X: ");
scanf("%d", &blockSizeX);
printf("\nType size of Y block: ");
scanf("%d", &blockSizeY);
if (im_width % blockSizeX == 0) {
blockX = im_width / blockSizeX;
} else {
blockX = im_width / blockSizeX + 1;
}
if (im_height % blockSizeY == 0) {
blockY = im_height / blockSizeY;
} else {
blockY = im_height / blockSizeY + 1;
}
b = (float*)malloc(sizeof(float) * size);
c = (float*)malloc(sizeof(float) * size);
d = (float*)malloc(sizeof(float) * size);
hipSetDevice(0);
hipMallocPitch((void**)&aOnDevice, &pitch, im_width * sizeof(float), im_height);
hipMallocPitch((void**)&bOnDevice, &pitch1, im_width * sizeof(float), im_height);
hipMemcpy2D(aOnDevice, pitch, im_data, im_width * sizeof(float), im_width * sizeof(float), im_height, hipMemcpyHostToDevice);
hipMemcpy2D(bOnDevice, pitch1, im_data, im_width * sizeof(float), im_width * sizeof(float), im_height, hipMemcpyHostToDevice);
dim3 gridDims = dim3(blockX, blockY, 1);
dim3 blockDims = dim3(blockSizeX, blockSizeY, 1);
StopWatchInterface* hTimer;
sdkCreateTimer(&hTimer);
sdkResetTimer(&hTimer);
hipDeviceSynchronize();
sdkStartTimer(&hTimer);
for(int i = 0; i < 100; ++i) {
hipLaunchKernelGGL(( imageKernel), dim3(gridDims),dim3(blockDims), 0, 0, aOnDevice, bOnDevice, im_width, im_height, pitch);
}
hipDeviceSynchronize();
sdkStopTimer(&hTimer);
float time1 = sdkGetTimerValue(&hTimer) / 100;
hipMemcpy2D(c, im_width * sizeof(float), bOnDevice, pitch1, im_width * sizeof(float), im_height, hipMemcpyDeviceToHost);
sdkSavePGM("lena_out_gpu.pgm", c, im_width, im_height);
hipFree(aOnDevice);
hipFree(bOnDevice);
sdkResetTimer(&hTimer);
sdkStartTimer(&hTimer);
for(int x = 0; x < im_width; ++x) {
for(int y = 0; y < im_height; ++y) {
blurPixel(im_data, d, x, y, im_width, im_height);
}
}
sdkStopTimer(&hTimer);
float time2 = sdkGetTimerValue(&hTimer);
printf("CUDA: %f, CPU: %f \n", time1, time2);
sdkSavePGM("lena_out_cpu.pgm", d, im_width, im_height);
sdkResetTimer(&hTimer);
hipDeviceReset();
free(b);
free(c);
free(d);
} | b758d8fa22008343ad82d526895513f50982f7d1.cu | #include <stdio.h>
#include <helper_timer.h>
#include <helper_image.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
__global__ void imageKernel(const float *a, float *b, int im_width, int im_height, int pitch) {
int ix = threadIdx.x + blockDim.x * blockIdx.x;
int iy = threadIdx.y + blockDim.y * blockIdx.y;
float sum = 0.0f;
int oxx, oyy;
for (int xx = ix - 2; xx < ix + 2; ++xx) {
for (int yy = iy - 2; yy < iy + 2; ++yy) {
oxx = xx;
oyy = yy;
if (xx < 0) oxx = 0;
if (yy < 0) oyy = 0;
if (xx >= im_width) oxx = im_width - 1;
if (yy >= im_height) oyy = im_height - 1;
sum += a[oyy * (pitch / sizeof(float)) + oxx];
}
}
if (ix < im_width && iy < im_height) {
int idx = iy * (pitch / sizeof(float)) + ix;
b[idx] = sum / 25;
if (b[idx] > 1.0f) {
b[idx] = 1.0f;
}
if (b[idx] < 0.0f) {
b[idx] = 0.0f;
}
}
}
void blurPixel(const float* a, float* b, int ix, int iy, int im_width, int im_height) {
float sum = 0.0f;
int oxx, oyy;
for (int xx = ix - 2; xx < ix + 2; ++xx) {
for (int yy = iy - 2; yy < iy + 2; ++yy) {
oxx = xx;
oyy = yy;
if (xx < 0) oxx = 0;
if (yy < 0) oyy = 0;
if (xx >= im_width) oxx = im_width - 1;
if (yy >= im_height) oyy = im_height - 1;
sum += a[oyy * im_width + oxx];
}
}
if (ix < im_width && iy < im_height) {
int idx = iy * im_width + ix;
b[idx] = sum / 25;
if (b[idx] > 1.0f) {
b[idx] = 1.0f;
}
if (b[idx] < 0.0f) {
b[idx] = 0.0f;
}
}
}
void task4() {
int blockSizeX = 0, blockSizeY = 0;
int size = 0;
int blockX = 0, blockY = 0;
float *b = 0;
float *c = 0;
float *d = 0;
size_t pitch = 0;
size_t pitch1 = 0;
float *aOnDevice = 0;
float *bOnDevice = 0;
unsigned int im_width, im_height;
float* im_data = NULL;
sdkLoadPGM("lena.pgm", &im_data, &im_width, &im_height);
size = im_width * im_height;
printf("Type size of block X: ");
scanf("%d", &blockSizeX);
printf("\nType size of Y block: ");
scanf("%d", &blockSizeY);
if (im_width % blockSizeX == 0) {
blockX = im_width / blockSizeX;
} else {
blockX = im_width / blockSizeX + 1;
}
if (im_height % blockSizeY == 0) {
blockY = im_height / blockSizeY;
} else {
blockY = im_height / blockSizeY + 1;
}
b = (float*)malloc(sizeof(float) * size);
c = (float*)malloc(sizeof(float) * size);
d = (float*)malloc(sizeof(float) * size);
cudaSetDevice(0);
cudaMallocPitch((void**)&aOnDevice, &pitch, im_width * sizeof(float), im_height);
cudaMallocPitch((void**)&bOnDevice, &pitch1, im_width * sizeof(float), im_height);
cudaMemcpy2D(aOnDevice, pitch, im_data, im_width * sizeof(float), im_width * sizeof(float), im_height, cudaMemcpyHostToDevice);
cudaMemcpy2D(bOnDevice, pitch1, im_data, im_width * sizeof(float), im_width * sizeof(float), im_height, cudaMemcpyHostToDevice);
dim3 gridDims = dim3(blockX, blockY, 1);
dim3 blockDims = dim3(blockSizeX, blockSizeY, 1);
StopWatchInterface* hTimer;
sdkCreateTimer(&hTimer);
sdkResetTimer(&hTimer);
cudaDeviceSynchronize();
sdkStartTimer(&hTimer);
for(int i = 0; i < 100; ++i) {
imageKernel<<<gridDims,blockDims>>>(aOnDevice, bOnDevice, im_width, im_height, pitch);
}
cudaDeviceSynchronize();
sdkStopTimer(&hTimer);
float time1 = sdkGetTimerValue(&hTimer) / 100;
cudaMemcpy2D(c, im_width * sizeof(float), bOnDevice, pitch1, im_width * sizeof(float), im_height, cudaMemcpyDeviceToHost);
sdkSavePGM("lena_out_gpu.pgm", c, im_width, im_height);
cudaFree(aOnDevice);
cudaFree(bOnDevice);
sdkResetTimer(&hTimer);
sdkStartTimer(&hTimer);
for(int x = 0; x < im_width; ++x) {
for(int y = 0; y < im_height; ++y) {
blurPixel(im_data, d, x, y, im_width, im_height);
}
}
sdkStopTimer(&hTimer);
float time2 = sdkGetTimerValue(&hTimer);
printf("CUDA: %f, CPU: %f \n", time1, time2);
sdkSavePGM("lena_out_cpu.pgm", d, im_width, im_height);
sdkResetTimer(&hTimer);
cudaDeviceReset();
free(b);
free(c);
free(d);
} |
4bd488a8ed1606d35aebb6f3e291f195f3938e16.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "timer.hpp"
#include <algorithm>
#include <iostream>
#include <stdio.h>
#include <vector>
#include <iostream>
__global__ void dot_product(int* x, int* y, int* dot, int N) {
int index = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
__shared__ int cache[128];
int temp = 0;
while (index < N) {
temp += (x[index] + y[index]) * (x[index] - y[index]);
index += stride;
}
cache[threadIdx.x] = temp;
__syncthreads();
for (int i = blockDim.x/2; i > 0; i/= 2) {
__syncthreads();
if (threadIdx.x < i)
cache[threadIdx.x] += cache[threadIdx.x + i];
}
if (threadIdx.x == 0)
atomicAdd(dot, cache[0]);
}
int main() {
Timer timer;
int N = 1000;
int *x = (int *)malloc(sizeof(int) * N);
int *y = (int *)malloc(sizeof(int) * N);
int *dot = (int *)malloc(sizeof(int));
for (int i = 0; i < N; i++) {
x[i] = 1;
y[i] = 2;
}
*dot = 0;
int *cuda_x;
int *cuda_y;
int *cuda_dot;
hipMalloc(&cuda_x, sizeof(int) * N);
hipMalloc(&cuda_y, sizeof(int) * N);
hipMalloc(&cuda_dot, sizeof(int));
hipMemcpy(cuda_x, x, sizeof(int) * N, hipMemcpyHostToDevice);
hipMemcpy(cuda_y, y, sizeof(int) * N, hipMemcpyHostToDevice);
hipMemcpy(cuda_dot, dot, sizeof(int), hipMemcpyHostToDevice);
std::vector<double> timings;
for(int reps=0; reps < 10; ++reps) {
timer.reset();
hipLaunchKernelGGL(( dot_product), dim3(N/256), dim3(128), 0, 0, cuda_x, cuda_y, cuda_dot, N);
hipMemcpy(dot, cuda_dot, sizeof(int), hipMemcpyDeviceToHost);
timings.push_back(timer.get());
std::cout << "Dot Product = " << *dot << std::endl;
*dot = 0;
hipMemcpy(cuda_dot, dot, sizeof(int), hipMemcpyHostToDevice);
}
std::sort(timings.begin(), timings.end());
double time_elapsed = timings[10/2];
std::cout << "Time elapsed: " << time_elapsed << std::endl << std::endl;
return EXIT_SUCCESS;
} | 4bd488a8ed1606d35aebb6f3e291f195f3938e16.cu | #include "timer.hpp"
#include <algorithm>
#include <iostream>
#include <stdio.h>
#include <vector>
#include <iostream>
__global__ void dot_product(int* x, int* y, int* dot, int N) {
int index = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
__shared__ int cache[128];
int temp = 0;
while (index < N) {
temp += (x[index] + y[index]) * (x[index] - y[index]);
index += stride;
}
cache[threadIdx.x] = temp;
__syncthreads();
for (int i = blockDim.x/2; i > 0; i/= 2) {
__syncthreads();
if (threadIdx.x < i)
cache[threadIdx.x] += cache[threadIdx.x + i];
}
if (threadIdx.x == 0)
atomicAdd(dot, cache[0]);
}
int main() {
Timer timer;
int N = 1000;
int *x = (int *)malloc(sizeof(int) * N);
int *y = (int *)malloc(sizeof(int) * N);
int *dot = (int *)malloc(sizeof(int));
for (int i = 0; i < N; i++) {
x[i] = 1;
y[i] = 2;
}
*dot = 0;
int *cuda_x;
int *cuda_y;
int *cuda_dot;
cudaMalloc(&cuda_x, sizeof(int) * N);
cudaMalloc(&cuda_y, sizeof(int) * N);
cudaMalloc(&cuda_dot, sizeof(int));
cudaMemcpy(cuda_x, x, sizeof(int) * N, cudaMemcpyHostToDevice);
cudaMemcpy(cuda_y, y, sizeof(int) * N, cudaMemcpyHostToDevice);
cudaMemcpy(cuda_dot, dot, sizeof(int), cudaMemcpyHostToDevice);
std::vector<double> timings;
for(int reps=0; reps < 10; ++reps) {
timer.reset();
dot_product<<<N/256, 128>>>(cuda_x, cuda_y, cuda_dot, N);
cudaMemcpy(dot, cuda_dot, sizeof(int), cudaMemcpyDeviceToHost);
timings.push_back(timer.get());
std::cout << "Dot Product = " << *dot << std::endl;
*dot = 0;
cudaMemcpy(cuda_dot, dot, sizeof(int), cudaMemcpyHostToDevice);
}
std::sort(timings.begin(), timings.end());
double time_elapsed = timings[10/2];
std::cout << "Time elapsed: " << time_elapsed << std::endl << std::endl;
return EXIT_SUCCESS;
} |
4d1f9f0549bf5655464b3b56e535e119a454907c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void rotate_weights_kernel(const float *src_weight_gpu, float *weight_deform_gpu, int nweights, int n, int kernel_size, int reverse)
{
const int index = blockIdx.x*blockDim.x + threadIdx.x;
const int kernel_area = kernel_size * kernel_size;
const int i = index * kernel_area;
const int stage_step = (nweights / kernel_area) / 4; // 4 stages
const int stage_id = index / stage_step;
// nweights = (c / groups) * n * size * size;
// kernel_area = size*size
if (i < nweights)
{
// if(reverse)
if (stage_id == 0) {
// simple copy
for (int y = 0; y < kernel_size; ++y) {
for (int x = 0; x < kernel_size; ++x) {
const int src_i = x + y*kernel_size + i;
const int dst_i = x + y*kernel_size + i;
if (reverse) weight_deform_gpu[src_i] = src_weight_gpu[dst_i];
else weight_deform_gpu[dst_i] = src_weight_gpu[src_i];
}
}
}
else if (stage_id == 1)
{
// 90 degree clockwise rotation - 1
for (int y = 0; y < kernel_size; ++y) {
for (int x = 0; x < kernel_size; ++x) {
const int src_i = x + y*kernel_size + i;
const int dst_i = (kernel_size - 1 - y) + x*kernel_size + i;
if (reverse) weight_deform_gpu[src_i] = src_weight_gpu[dst_i];
else weight_deform_gpu[dst_i] = src_weight_gpu[src_i];
}
}
}
else if (stage_id == 2)
{
// 180 degree clockwise rotation - 2
for (int y = 0; y < kernel_size; ++y) {
for (int x = 0; x < kernel_size; ++x) {
const int src_i = x + y*kernel_size + i;
const int dst_i = (kernel_size - 1 - x) + (kernel_size - 1 - y)*kernel_size + i;
if (reverse) weight_deform_gpu[src_i] = src_weight_gpu[dst_i];
else weight_deform_gpu[dst_i] = src_weight_gpu[src_i];
}
}
}
else if (stage_id == 3)
{
// 270 degree clockwise rotation - 3
for (int y = 0; y < kernel_size; ++y) {
for (int x = 0; x < kernel_size; ++x) {
const int src_i = x + y*kernel_size + i;
const int dst_i = y + (kernel_size - 1 - x)*kernel_size + i;
if (reverse) weight_deform_gpu[src_i] = src_weight_gpu[dst_i];
else weight_deform_gpu[dst_i] = src_weight_gpu[src_i];
}
}
}
}
} | 4d1f9f0549bf5655464b3b56e535e119a454907c.cu | #include "includes.h"
__global__ void rotate_weights_kernel(const float *src_weight_gpu, float *weight_deform_gpu, int nweights, int n, int kernel_size, int reverse)
{
const int index = blockIdx.x*blockDim.x + threadIdx.x;
const int kernel_area = kernel_size * kernel_size;
const int i = index * kernel_area;
const int stage_step = (nweights / kernel_area) / 4; // 4 stages
const int stage_id = index / stage_step;
// nweights = (c / groups) * n * size * size;
// kernel_area = size*size
if (i < nweights)
{
// if(reverse)
if (stage_id == 0) {
// simple copy
for (int y = 0; y < kernel_size; ++y) {
for (int x = 0; x < kernel_size; ++x) {
const int src_i = x + y*kernel_size + i;
const int dst_i = x + y*kernel_size + i;
if (reverse) weight_deform_gpu[src_i] = src_weight_gpu[dst_i];
else weight_deform_gpu[dst_i] = src_weight_gpu[src_i];
}
}
}
else if (stage_id == 1)
{
// 90 degree clockwise rotation - 1
for (int y = 0; y < kernel_size; ++y) {
for (int x = 0; x < kernel_size; ++x) {
const int src_i = x + y*kernel_size + i;
const int dst_i = (kernel_size - 1 - y) + x*kernel_size + i;
if (reverse) weight_deform_gpu[src_i] = src_weight_gpu[dst_i];
else weight_deform_gpu[dst_i] = src_weight_gpu[src_i];
}
}
}
else if (stage_id == 2)
{
// 180 degree clockwise rotation - 2
for (int y = 0; y < kernel_size; ++y) {
for (int x = 0; x < kernel_size; ++x) {
const int src_i = x + y*kernel_size + i;
const int dst_i = (kernel_size - 1 - x) + (kernel_size - 1 - y)*kernel_size + i;
if (reverse) weight_deform_gpu[src_i] = src_weight_gpu[dst_i];
else weight_deform_gpu[dst_i] = src_weight_gpu[src_i];
}
}
}
else if (stage_id == 3)
{
// 270 degree clockwise rotation - 3
for (int y = 0; y < kernel_size; ++y) {
for (int x = 0; x < kernel_size; ++x) {
const int src_i = x + y*kernel_size + i;
const int dst_i = y + (kernel_size - 1 - x)*kernel_size + i;
if (reverse) weight_deform_gpu[src_i] = src_weight_gpu[dst_i];
else weight_deform_gpu[dst_i] = src_weight_gpu[src_i];
}
}
}
}
} |
5c8a8156c8cf0eec5588874e838efb818a4226dd.hip | // !!! This is a file automatically generated by hipify!!!
/*
* hugewiki.cu
*
* Created on: Feb 10, 2015
* Author: Wei Tan ([email protected])
* Alternating Least Square for Matrix Factorization on CUDA 7.0+
* Code optimized for F = 100, and on cc 3.5, 3.7 platforms. Also tested in cc 5.2
*/
#include <cstdio>
#include <cstdlib>
#include <hip/hip_runtime.h>
#include <rocblas.h>
#include <iostream>
#include <fstream>
#include <hipsparse.h>
#include <chrono>
#include <stdlib.h>
#include <stdio.h>
#include <omp.h>
#include <string>
#include <sstream>
#include "./common.h"
#include "../src/als.h"
#include "../src/cg.h"
//variable definition
#define F 100
#define TILE_SIZE F/10
#define SCAN_BATCH 30
#define THETA_BATCH 3
#define X_BATCH 240
#define ITERS 10
#define M 50082603
#define N 39780
#define NNZ 3101144313
#define NNZ_TEST 344573330
//0.05 when use both "full" kernels
#define LAMBDA 0.048
//hardware specific
#define GPU_COUNT 4
#define DEVICEID 0 // the anchor device
//debug option to save model
//#define CUMF_SAVE_MODEL
//#define CUMF_TT_FP16
void saveDeviceFloatArrayToFile(std::string fileName, int size, float* d_array){
float* h_array;
cudacall(hipHostMalloc( (void** ) &h_array, size * sizeof(h_array[0])) );
cudacall(hipMemcpy(h_array, d_array, size * sizeof(h_array[0]),hipMemcpyDeviceToHost));
FILE * outfile = fopen(fileName.c_str(), "wb");
fwrite(h_array, sizeof(float), size, outfile);
fclose(outfile);
hipHostFree(h_array);
}
__global__ void
__launch_bounds__(64, 6)
get_hermitian100_tt_fp16(const int batch_offset, half2* tt,
const int* csrRowIndex, const int* csrColIndex, const float lambda, const int m,
const float* __restrict__ thetaT) {
extern __shared__ float2 thetaTemp[];
int row = blockIdx.x + batch_offset;
if (row < m) {
//this block needs to handle end - start thetaT columns
int start = csrRowIndex[row];
int end = csrRowIndex[row + 1];
//slide through [start, end] by window size SCAN_BATCH
int iterations = (end - start - 1)/SCAN_BATCH + 1;
float temp0= 0, temp1= 0, temp2= 0, temp3= 0, temp4= 0, temp5= 0, temp6= 0, temp7= 0, temp8= 0, temp9 = 0;
float temp10= 0, temp11= 0, temp12= 0, temp13= 0, temp14= 0, temp15= 0, temp16= 0, temp17= 0, temp18= 0, temp19 = 0;
float temp20= 0, temp21= 0, temp22= 0, temp23= 0, temp24= 0, temp25= 0, temp26= 0, temp27= 0, temp28= 0, temp29 = 0;
float temp30= 0, temp31= 0, temp32= 0, temp33= 0, temp34= 0, temp35= 0, temp36= 0, temp37= 0, temp38= 0, temp39 = 0;
float temp40= 0, temp41= 0, temp42= 0, temp43= 0, temp44= 0, temp45= 0, temp46= 0, temp47= 0, temp48= 0, temp49 = 0;
float temp50= 0, temp51= 0, temp52= 0, temp53= 0, temp54= 0, temp55= 0, temp56= 0, temp57= 0, temp58= 0, temp59 = 0;
float temp60= 0, temp61= 0, temp62= 0, temp63= 0, temp64= 0, temp65= 0, temp66= 0, temp67= 0, temp68= 0, temp69 = 0;
float temp70= 0, temp71= 0, temp72= 0, temp73= 0, temp74= 0, temp75= 0, temp76= 0, temp77= 0, temp78= 0, temp79 = 0;
float temp80= 0, temp81= 0, temp82= 0, temp83= 0, temp84= 0, temp85= 0, temp86= 0, temp87= 0, temp88= 0, temp89 = 0;
float temp90= 0, temp91= 0, temp92= 0, temp93= 0, temp94= 0, temp95= 0, temp96= 0, temp97= 0, temp98= 0, temp99 = 0;
int tile_x = 0;
int tile_y = 0;
int tile = F/10;
for ( int i = 0; i < 10; i++){
int end = ((20-i)*(i+1))/2;
if(threadIdx.x < end){
tile_x = i * tile;
tile_y = (10 + threadIdx.x - end) * tile;
break;
}
}
//iteration: copy gmem-->smem; aggregate smem-->register
for (int iter = 0; iter < iterations; iter ++){
float2 theta;
//copy texture --> smem, and sync
/*
if(threadIdx.x < SCAN_BATCH){
if(iter*SCAN_BATCH + threadIdx.x < end - start){
for (int k = 0; k < F; k += 2){
theta.x = tex1Dfetch(thetaTTexRef, F * csrColIndex[start + iter*SCAN_BATCH + threadIdx.x] + k);
theta.y = tex1Dfetch(thetaTTexRef, F * csrColIndex[start + iter*SCAN_BATCH + threadIdx.x] + k+1);
thetaTemp[threadIdx.x * F/2 + k/2] = theta;
}
}
//must be the last iteration; no need to check
//not enough theta to copy, set zero
else
memset(&thetaTemp[threadIdx.x*F/2], 0, F*sizeof(float));
}
*/
//two layers: warp divergence unless we split at 32
//require 32 >= SCAN_BATCH
if(threadIdx.x < 2*32 ){
//int index = threadIdx.x;
int index = threadIdx.x - (threadIdx.x/32)*32; //0 to 31;
if(index < SCAN_BATCH){
if(iter*SCAN_BATCH + index < end - start){
//for (int k = 50*(threadIdx.x/32); k < 50*(threadIdx.x/32) + 50; k += 2){
//IMPORTANT: for loop has constant and identical start and end
if(threadIdx.x < 32){
for (int k = 0; k < 50; k += 2){
theta.x = __ldg(&thetaT[ F * csrColIndex[start + iter*SCAN_BATCH + index] + k]);
theta.y = __ldg(&thetaT[ F * csrColIndex[start + iter*SCAN_BATCH + index] + k+1]);
thetaTemp[index * F/2 + k/2] = theta;
}
}
else {
for (int k = 0; k < 50; k += 2){
theta.x = __ldg(&thetaT[ F * csrColIndex[start + iter*SCAN_BATCH + index] + k + 50]);
theta.y = __ldg(&thetaT[ F * csrColIndex[start + iter*SCAN_BATCH + index] + k + 51]);
thetaTemp[index * F/2 + k/2 + 25] = theta;
}
}
}
//must be the last iteration; no need to check
//not enough theta to copy, set zero
else
memset(&thetaTemp[index*F/2], 0, F*sizeof(float));
}
}
/* //issue: not coalesced access to csrColIndex
if(threadIdx.x < F && threadIdx.x%2 == 0){
for(int k = 0; k< SCAN_BATCH; k++){
if(iter*SCAN_BATCH + k < end - start){
theta.x = tex1Dfetch(thetaTTexRef, F * csrColIndex[start + iter*SCAN_BATCH + k] + threadIdx.x);
theta.y = tex1Dfetch(thetaTTexRef, F * csrColIndex[start + iter*SCAN_BATCH + k] + threadIdx.x +1);
thetaTemp[k * F/2 + threadIdx.x/2] = theta;
}
//must be the last iteration; no need to check
//not enough theta to copy, set zero
else
memset(&thetaTemp[k*F/2 + threadIdx.x/2], 0, 2*sizeof(float));
}
}
*/
/*
int layers = blockDim.x/SCAN_BATCH; //100/30 = 3
//int height = blockDim.x/layers; //30
int y = threadIdx.x/SCAN_BATCH;//0,1,2,3; y==3 is not viable
//min(y, (layers-1)) * height
int y_start = y * 30;//0-29:0;30-59:30;60-89:60
int y_end = y_start + 30; //0-29:30;30-59:60;60-89:90
if(y >= layers - 1) y_end = F; //60-89:100
if(threadIdx.x - y_start < SCAN_BATCH){
if(iter*SCAN_BATCH + (threadIdx.x - y_start) < end - start){
for (int k = y_start; k < y_end; k += 2){
theta.x =
tex1Dfetch(thetaTTexRef, F * csrColIndex[start + iter*SCAN_BATCH + (threadIdx.x - y_start)] + k);
theta.y =
tex1Dfetch(thetaTTexRef, F * csrColIndex[start + iter*SCAN_BATCH + (threadIdx.x - y_start)] + k+1);
thetaTemp[(threadIdx.x - y_start)* F/2 + k/2] = theta;
}
}
//must be the last iteration; no need to check
//not enough theta to copy, set zero
else
memset(&thetaTemp[(threadIdx.x - y_start)*F/2 + y_start/2], 0, (y_end - y_start)*sizeof(float));
}
*/
__syncthreads();
//tile: 10*10
if(threadIdx.x < 55 ){
for(int k = 0; k < SCAN_BATCH; k++){
accumulate_in_registers();
}
}
}
//end of iteration in copying from smem and aggregating in register
__syncthreads();
#ifdef DEBUG
//if(threadIdx.x==0)
// printf("***temp 0~9: %f %f %f %f %f %f %f %f %f %f\n", temp0, temp1, temp2, temp3, temp4, temp5, temp6, temp7, temp8, temp9);
#endif
if(threadIdx.x < 55 ){
//weighted-lambda regularization
if(tile_x == tile_y){
float temp = (end - start) * lambda;
temp0 += temp;
temp11 += temp;
temp22 += temp;
temp33 += temp;
temp44 += temp;
temp55 += temp;
temp66 += temp;
temp77 += temp;
temp88 += temp;
temp99 += temp;
}
//copy output to gmem
int index = blockIdx.x*F*F/2;
//fill_lower_half_from_registers();
fill_lower_half_from_registers_fp16();
//symmetric
if(tile_x!=tile_y){
//fill_upper_half_from_registers();
fill_upper_half_from_registers_fp16();
}
}
}
}
__global__ void
__launch_bounds__(64, 6)
get_hermitian100(const int batch_offset, float* tt, const int* csrRowIndex, const int* csrColIndex, const float lambda, const int m, const float* __restrict__ thetaT) {
extern __shared__ float2 thetaTemp[];
int row = blockIdx.x + batch_offset;
if (row < m) {
//this block needs to handle end - start thetaT columns
int start = csrRowIndex[row];
int end = csrRowIndex[row + 1];
//slide through [start, end] by window size SCAN_BATCH
int iterations = (end - start - 1)/SCAN_BATCH + 1;
float temp0= 0, temp1= 0, temp2= 0, temp3= 0, temp4= 0, temp5= 0, temp6= 0, temp7= 0, temp8= 0, temp9 = 0;
float temp10= 0, temp11= 0, temp12= 0, temp13= 0, temp14= 0, temp15= 0, temp16= 0, temp17= 0, temp18= 0, temp19 = 0;
float temp20= 0, temp21= 0, temp22= 0, temp23= 0, temp24= 0, temp25= 0, temp26= 0, temp27= 0, temp28= 0, temp29 = 0;
float temp30= 0, temp31= 0, temp32= 0, temp33= 0, temp34= 0, temp35= 0, temp36= 0, temp37= 0, temp38= 0, temp39 = 0;
float temp40= 0, temp41= 0, temp42= 0, temp43= 0, temp44= 0, temp45= 0, temp46= 0, temp47= 0, temp48= 0, temp49 = 0;
float temp50= 0, temp51= 0, temp52= 0, temp53= 0, temp54= 0, temp55= 0, temp56= 0, temp57= 0, temp58= 0, temp59 = 0;
float temp60= 0, temp61= 0, temp62= 0, temp63= 0, temp64= 0, temp65= 0, temp66= 0, temp67= 0, temp68= 0, temp69 = 0;
float temp70= 0, temp71= 0, temp72= 0, temp73= 0, temp74= 0, temp75= 0, temp76= 0, temp77= 0, temp78= 0, temp79 = 0;
float temp80= 0, temp81= 0, temp82= 0, temp83= 0, temp84= 0, temp85= 0, temp86= 0, temp87= 0, temp88= 0, temp89 = 0;
float temp90= 0, temp91= 0, temp92= 0, temp93= 0, temp94= 0, temp95= 0, temp96= 0, temp97= 0, temp98= 0, temp99 = 0;
int tile_x = 0;
int tile_y = 0;
int tile = F/10;
for ( int i = 0; i < 10; i++){
int end = ((20-i)*(i+1))/2;
if(threadIdx.x < end){
tile_x = i * tile;
tile_y = (10 + threadIdx.x - end) * tile;
break;
}
}
//iteration: copy gmem-->smem; aggregate smem-->register
for (int iter = 0; iter < iterations; iter ++){
float2 theta;
//copy texture --> smem, and sync
/*
if(threadIdx.x < SCAN_BATCH){
if(iter*SCAN_BATCH + threadIdx.x < end - start){
for (int k = 0; k < F; k += 2){
theta.x = tex1Dfetch(thetaTTexRef, F * csrColIndex[start + iter*SCAN_BATCH + threadIdx.x] + k);
theta.y = tex1Dfetch(thetaTTexRef, F * csrColIndex[start + iter*SCAN_BATCH + threadIdx.x] + k+1);
thetaTemp[threadIdx.x * F/2 + k/2] = theta;
}
}
//must be the last iteration; no need to check
//not enough theta to copy, set zero
else
memset(&thetaTemp[threadIdx.x*F/2], 0, F*sizeof(float));
}
*/
//two layers: warp divergence unless we split at 32
//require 32 >= SCAN_BATCH
if(threadIdx.x < 2*32 ){
//int index = threadIdx.x;
int index = threadIdx.x - (threadIdx.x/32)*32; //0 to 31;
if(index < SCAN_BATCH){
if(iter*SCAN_BATCH + index < end - start){
//for (int k = 50*(threadIdx.x/32); k < 50*(threadIdx.x/32) + 50; k += 2){
//IMPORTANT: for loop has constant and identical start and end
if(threadIdx.x < 32){
for (int k = 0; k < 50; k += 2){
theta.x = __ldg(&thetaT[ F * csrColIndex[start + iter*SCAN_BATCH + index] + k]);
theta.y = __ldg(&thetaT[ F * csrColIndex[start + iter*SCAN_BATCH + index] + k+1]);
thetaTemp[index * F/2 + k/2] = theta;
}
}
else {
for (int k = 0; k < 50; k += 2){
theta.x = __ldg(&thetaT[ F * csrColIndex[start + iter*SCAN_BATCH + index] + k + 50]);
theta.y = __ldg(&thetaT[ F * csrColIndex[start + iter*SCAN_BATCH + index] + k + 51]);
thetaTemp[index * F/2 + k/2 + 25] = theta;
}
}
}
//must be the last iteration; no need to check
//not enough theta to copy, set zero
else
memset(&thetaTemp[index*F/2], 0, F*sizeof(float));
}
}
/* //issue: not coalesced access to csrColIndex
if(threadIdx.x < F && threadIdx.x%2 == 0){
for(int k = 0; k< SCAN_BATCH; k++){
if(iter*SCAN_BATCH + k < end - start){
theta.x = tex1Dfetch(thetaTTexRef, F * csrColIndex[start + iter*SCAN_BATCH + k] + threadIdx.x);
theta.y = tex1Dfetch(thetaTTexRef, F * csrColIndex[start + iter*SCAN_BATCH + k] + threadIdx.x +1);
thetaTemp[k * F/2 + threadIdx.x/2] = theta;
}
//must be the last iteration; no need to check
//not enough theta to copy, set zero
else
memset(&thetaTemp[k*F/2 + threadIdx.x/2], 0, 2*sizeof(float));
}
}
*/
/*
int layers = blockDim.x/SCAN_BATCH; //100/30 = 3
//int height = blockDim.x/layers; //30
int y = threadIdx.x/SCAN_BATCH;//0,1,2,3; y==3 is not viable
//min(y, (layers-1)) * height
int y_start = y * 30;//0-29:0;30-59:30;60-89:60
int y_end = y_start + 30; //0-29:30;30-59:60;60-89:90
if(y >= layers - 1) y_end = F; //60-89:100
if(threadIdx.x - y_start < SCAN_BATCH){
if(iter*SCAN_BATCH + (threadIdx.x - y_start) < end - start){
for (int k = y_start; k < y_end; k += 2){
theta.x =
tex1Dfetch(thetaTTexRef, F * csrColIndex[start + iter*SCAN_BATCH + (threadIdx.x - y_start)] + k);
theta.y =
tex1Dfetch(thetaTTexRef, F * csrColIndex[start + iter*SCAN_BATCH + (threadIdx.x - y_start)] + k+1);
thetaTemp[(threadIdx.x - y_start)* F/2 + k/2] = theta;
}
}
//must be the last iteration; no need to check
//not enough theta to copy, set zero
else
memset(&thetaTemp[(threadIdx.x - y_start)*F/2 + y_start/2], 0, (y_end - y_start)*sizeof(float));
}
*/
__syncthreads();
//tile: 10*10
if(threadIdx.x < 55 ){
for(int k = 0; k < SCAN_BATCH; k++){
accumulate_in_registers();
}
}
}
//end of iteration in copying from smem and aggregating in register
__syncthreads();
#ifdef DEBUG
//if(threadIdx.x==0)
// printf("***temp 0~9: %f %f %f %f %f %f %f %f %f %f\n", temp0, temp1, temp2, temp3, temp4, temp5, temp6, temp7, temp8, temp9);
#endif
if(threadIdx.x < 55 ){
//copy output to gmem
int index = blockIdx.x*F*F;
fill_lower_half_from_registers();
//symmetric
if(tile_x!=tile_y){
fill_upper_half_from_registers();
}
//regularization
if(tile_x == tile_y){
for(int k = 0; k < tile; k++)
tt[index + (tile_x+k)*(1+F)] += (end - start) * lambda;
}
}
}
}
//split a big csr into many by rows. the row id of sub-matrices need to be changed
//inval = inval - inval[0]
__global__ void zeroIndex(int * inVal, const unsigned int inVal_0, const int size) {
int i = blockDim.x*blockIdx.x + threadIdx.x;
if(i < size){
inVal[i] = (unsigned)inVal[i] - inVal_0;
}
}
texture<float> xTTexRef;
texture<float> thetaTTexRef;
__global__ void
__launch_bounds__(100, 4)
updateThetaByBlock2pRegDsmemTile(float * xx, const int* cscRowIndex,
const int* cscColIndex, const float lambda, const float * XT) {
__shared__ float2 xTemp[SCAN_BATCH * F/2];
int col = blockIdx.x;
if (col < N) {
//this block needs to handle end - start XT columns
int start = cscColIndex[col];
int end = cscColIndex[col + 1];
int iterations = (end - start -1)/SCAN_BATCH + 1;
float temp0= 0, temp1= 0, temp2= 0, temp3= 0, temp4= 0, temp5= 0, temp6= 0, temp7= 0, temp8= 0, temp9 = 0;
float temp10= 0, temp11= 0, temp12= 0, temp13= 0, temp14= 0, temp15= 0, temp16= 0, temp17= 0, temp18= 0, temp19 = 0;
float temp20= 0, temp21= 0, temp22= 0, temp23= 0, temp24= 0, temp25= 0, temp26= 0, temp27= 0, temp28= 0, temp29 = 0;
float temp30= 0, temp31= 0, temp32= 0, temp33= 0, temp34= 0, temp35= 0, temp36= 0, temp37= 0, temp38= 0, temp39 = 0;
float temp40= 0, temp41= 0, temp42= 0, temp43= 0, temp44= 0, temp45= 0, temp46= 0, temp47= 0, temp48= 0, temp49 = 0;
float temp50= 0, temp51= 0, temp52= 0, temp53= 0, temp54= 0, temp55= 0, temp56= 0, temp57= 0, temp58= 0, temp59 = 0;
float temp60= 0, temp61= 0, temp62= 0, temp63= 0, temp64= 0, temp65= 0, temp66= 0, temp67= 0, temp68= 0, temp69 = 0;
float temp70= 0, temp71= 0, temp72= 0, temp73= 0, temp74= 0, temp75= 0, temp76= 0, temp77= 0, temp78= 0, temp79 = 0;
float temp80= 0, temp81= 0, temp82= 0, temp83= 0, temp84= 0, temp85= 0, temp86= 0, temp87= 0, temp88= 0, temp89 = 0;
float temp90= 0, temp91= 0, temp92= 0, temp93= 0, temp94= 0, temp95= 0, temp96= 0, temp97= 0, temp98= 0, temp99 = 0;
float2 x;
int tile = F/10;
int tile_x = (threadIdx.x/tile) * tile;//start x of this tile
int tile_y = (threadIdx.x%tile) * tile;//start y of this tile
for (int iter = 0; iter < iterations; iter ++){
//copy texture --> smem, and sync
if(threadIdx.x < SCAN_BATCH){
if(iter*SCAN_BATCH + threadIdx.x < end - start){
for (int k = 0; k < F; k += 2){
x.x =
XT[ F * cscRowIndex[start + iter*SCAN_BATCH + threadIdx.x] + k ];
x.y =
XT [ F * cscRowIndex[start + iter*SCAN_BATCH + threadIdx.x] + k+1 ];
xTemp[threadIdx.x * F/2 + k/2] = x;
}
}
//must be the last iteration; no need to check
//not enough theta to copy, set zero
else
memset(&xTemp[threadIdx.x*F/2], 0, F*sizeof(float));
}
__syncthreads();
///////////////////////////////////////////////////////////////////////////////////////////////////////////
//tile: 10*10
for(int k = 0; k < SCAN_BATCH; k++){
temp0 += xTemp[tile_x/2 + k*F/2].x * xTemp[tile_y/2 + k*F/2].x;
temp1 += xTemp[tile_x/2 + k*F/2].x * xTemp[tile_y/2 + k*F/2].y;
temp2 += xTemp[tile_x/2 + k*F/2].x * xTemp[tile_y/2 +1 + k*F/2].x;
temp3 += xTemp[tile_x/2 + k*F/2].x * xTemp[tile_y/2 +1 + k*F/2].y;
temp4 += xTemp[tile_x/2 + k*F/2].x * xTemp[tile_y/2 +2 + k*F/2].x;
temp5 += xTemp[tile_x/2 + k*F/2].x * xTemp[tile_y/2 +2 + k*F/2].y;
temp6 += xTemp[tile_x/2 + k*F/2].x * xTemp[tile_y/2 +3 + k*F/2].x;
temp7 += xTemp[tile_x/2 + k*F/2].x * xTemp[tile_y/2 +3 + k*F/2].y;
temp8 += xTemp[tile_x/2 + k*F/2].x * xTemp[tile_y/2 +4 + k*F/2].x;
temp9 += xTemp[tile_x/2 + k*F/2].x * xTemp[tile_y/2 +4 + k*F/2].y;
temp10 += xTemp[tile_x/2 + k*F/2].y * xTemp[tile_y/2 + k*F/2].x;
temp11 += xTemp[tile_x/2 + k*F/2].y * xTemp[tile_y/2 + k*F/2].y;
temp12 += xTemp[tile_x/2 + k*F/2].y * xTemp[tile_y/2 +1 + k*F/2].x;
temp13 += xTemp[tile_x/2 + k*F/2].y * xTemp[tile_y/2 +1 + k*F/2].y;
temp14 += xTemp[tile_x/2 + k*F/2].y * xTemp[tile_y/2 +2 + k*F/2].x;
temp15 += xTemp[tile_x/2 + k*F/2].y * xTemp[tile_y/2 +2 + k*F/2].y;
temp16 += xTemp[tile_x/2 + k*F/2].y * xTemp[tile_y/2 +3 + k*F/2].x;
temp17 += xTemp[tile_x/2 + k*F/2].y * xTemp[tile_y/2 +3 + k*F/2].y;
temp18 += xTemp[tile_x/2 + k*F/2].y * xTemp[tile_y/2 +4 + k*F/2].x;
temp19 += xTemp[tile_x/2 + k*F/2].y * xTemp[tile_y/2 +4 + k*F/2].y;
temp20 += xTemp[tile_x/2 +1 + k*F/2].x * xTemp[tile_y/2 + k*F/2].x;
temp21 += xTemp[tile_x/2 +1 + k*F/2].x * xTemp[tile_y/2 + k*F/2].y;
temp22 += xTemp[tile_x/2 +1 + k*F/2].x * xTemp[tile_y/2 +1 + k*F/2].x;
temp23 += xTemp[tile_x/2 +1 + k*F/2].x * xTemp[tile_y/2 +1 + k*F/2].y;
temp24 += xTemp[tile_x/2 +1 + k*F/2].x * xTemp[tile_y/2 +2 + k*F/2].x;
temp25 += xTemp[tile_x/2 +1 + k*F/2].x * xTemp[tile_y/2 +2 + k*F/2].y;
temp26 += xTemp[tile_x/2 +1 + k*F/2].x * xTemp[tile_y/2 +3 + k*F/2].x;
temp27 += xTemp[tile_x/2 +1 + k*F/2].x * xTemp[tile_y/2 +3 + k*F/2].y;
temp28 += xTemp[tile_x/2 +1 + k*F/2].x * xTemp[tile_y/2 +4 + k*F/2].x;
temp29 += xTemp[tile_x/2 +1 + k*F/2].x * xTemp[tile_y/2 +4 + k*F/2].y;
temp30 += xTemp[tile_x/2 +1 + k*F/2].y * xTemp[tile_y/2 + k*F/2].x;
temp31 += xTemp[tile_x/2 +1 + k*F/2].y * xTemp[tile_y/2 + k*F/2].y;
temp32 += xTemp[tile_x/2 +1 + k*F/2].y * xTemp[tile_y/2 +1 + k*F/2].x;
temp33 += xTemp[tile_x/2 +1 + k*F/2].y * xTemp[tile_y/2 +1 + k*F/2].y;
temp34 += xTemp[tile_x/2 +1 + k*F/2].y * xTemp[tile_y/2 +2 + k*F/2].x;
temp35 += xTemp[tile_x/2 +1 + k*F/2].y * xTemp[tile_y/2 +2 + k*F/2].y;
temp36 += xTemp[tile_x/2 +1 + k*F/2].y * xTemp[tile_y/2 +3 + k*F/2].x;
temp37 += xTemp[tile_x/2 +1 + k*F/2].y * xTemp[tile_y/2 +3 + k*F/2].y;
temp38 += xTemp[tile_x/2 +1 + k*F/2].y * xTemp[tile_y/2 +4 + k*F/2].x;
temp39 += xTemp[tile_x/2 +1 + k*F/2].y * xTemp[tile_y/2 +4 + k*F/2].y;
temp40 += xTemp[tile_x/2 +2 + k*F/2].x * xTemp[tile_y/2 + k*F/2].x;
temp41 += xTemp[tile_x/2 +2 + k*F/2].x * xTemp[tile_y/2 + k*F/2].y;
temp42 += xTemp[tile_x/2 +2 + k*F/2].x * xTemp[tile_y/2 +1 + k*F/2].x;
temp43 += xTemp[tile_x/2 +2 + k*F/2].x * xTemp[tile_y/2 +1 + k*F/2].y;
temp44 += xTemp[tile_x/2 +2 + k*F/2].x * xTemp[tile_y/2 +2 + k*F/2].x;
temp45 += xTemp[tile_x/2 +2 + k*F/2].x * xTemp[tile_y/2 +2 + k*F/2].y;
temp46 += xTemp[tile_x/2 +2 + k*F/2].x * xTemp[tile_y/2 +3 + k*F/2].x;
temp47 += xTemp[tile_x/2 +2 + k*F/2].x * xTemp[tile_y/2 +3 + k*F/2].y;
temp48 += xTemp[tile_x/2 +2 + k*F/2].x * xTemp[tile_y/2 +4 + k*F/2].x;
temp49 += xTemp[tile_x/2 +2 + k*F/2].x * xTemp[tile_y/2 +4 + k*F/2].y;
temp50 += xTemp[tile_x/2 +2 + k*F/2].y * xTemp[tile_y/2 + k*F/2].x;
temp51 += xTemp[tile_x/2 +2 + k*F/2].y * xTemp[tile_y/2 + k*F/2].y;
temp52 += xTemp[tile_x/2 +2 + k*F/2].y * xTemp[tile_y/2 +1 + k*F/2].x;
temp53 += xTemp[tile_x/2 +2 + k*F/2].y * xTemp[tile_y/2 +1 + k*F/2].y;
temp54 += xTemp[tile_x/2 +2 + k*F/2].y * xTemp[tile_y/2 +2 + k*F/2].x;
temp55 += xTemp[tile_x/2 +2 + k*F/2].y * xTemp[tile_y/2 +2 + k*F/2].y;
temp56 += xTemp[tile_x/2 +2 + k*F/2].y * xTemp[tile_y/2 +3 + k*F/2].x;
temp57 += xTemp[tile_x/2 +2 + k*F/2].y * xTemp[tile_y/2 +3 + k*F/2].y;
temp58 += xTemp[tile_x/2 +2 + k*F/2].y * xTemp[tile_y/2 +4 + k*F/2].x;
temp59 += xTemp[tile_x/2 +2 + k*F/2].y * xTemp[tile_y/2 +4 + k*F/2].y;
temp60 += xTemp[tile_x/2 +3 + k*F/2].x * xTemp[tile_y/2 + k*F/2].x;
temp61 += xTemp[tile_x/2 +3 + k*F/2].x * xTemp[tile_y/2 + k*F/2].y;
temp62 += xTemp[tile_x/2 +3 + k*F/2].x * xTemp[tile_y/2 +1 + k*F/2].x;
temp63 += xTemp[tile_x/2 +3 + k*F/2].x * xTemp[tile_y/2 +1 + k*F/2].y;
temp64 += xTemp[tile_x/2 +3 + k*F/2].x * xTemp[tile_y/2 +2 + k*F/2].x;
temp65 += xTemp[tile_x/2 +3 + k*F/2].x * xTemp[tile_y/2 +2 + k*F/2].y;
temp66 += xTemp[tile_x/2 +3 + k*F/2].x * xTemp[tile_y/2 +3 + k*F/2].x;
temp67 += xTemp[tile_x/2 +3 + k*F/2].x * xTemp[tile_y/2 +3 + k*F/2].y;
temp68 += xTemp[tile_x/2 +3 + k*F/2].x * xTemp[tile_y/2 +4 + k*F/2].x;
temp69 += xTemp[tile_x/2 +3 + k*F/2].x * xTemp[tile_y/2 +4 + k*F/2].y;
temp70 += xTemp[tile_x/2 +3 + k*F/2].y * xTemp[tile_y/2 + k*F/2].x;
temp71 += xTemp[tile_x/2 +3 + k*F/2].y * xTemp[tile_y/2 + k*F/2].y;
temp72 += xTemp[tile_x/2 +3 + k*F/2].y * xTemp[tile_y/2 +1 + k*F/2].x;
temp73 += xTemp[tile_x/2 +3 + k*F/2].y * xTemp[tile_y/2 +1 + k*F/2].y;
temp74 += xTemp[tile_x/2 +3 + k*F/2].y * xTemp[tile_y/2 +2 + k*F/2].x;
temp75 += xTemp[tile_x/2 +3 + k*F/2].y * xTemp[tile_y/2 +2 + k*F/2].y;
temp76 += xTemp[tile_x/2 +3 + k*F/2].y * xTemp[tile_y/2 +3 + k*F/2].x;
temp77 += xTemp[tile_x/2 +3 + k*F/2].y * xTemp[tile_y/2 +3 + k*F/2].y;
temp78 += xTemp[tile_x/2 +3 + k*F/2].y * xTemp[tile_y/2 +4 + k*F/2].x;
temp79 += xTemp[tile_x/2 +3 + k*F/2].y * xTemp[tile_y/2 +4 + k*F/2].y;
temp80 += xTemp[tile_x/2 +4 + k*F/2].x * xTemp[tile_y/2 + k*F/2].x;
temp81 += xTemp[tile_x/2 +4 + k*F/2].x * xTemp[tile_y/2 + k*F/2].y;
temp82 += xTemp[tile_x/2 +4 + k*F/2].x * xTemp[tile_y/2 +1 + k*F/2].x;
temp83 += xTemp[tile_x/2 +4 + k*F/2].x * xTemp[tile_y/2 +1 + k*F/2].y;
temp84 += xTemp[tile_x/2 +4 + k*F/2].x * xTemp[tile_y/2 +2 + k*F/2].x;
temp85 += xTemp[tile_x/2 +4 + k*F/2].x * xTemp[tile_y/2 +2 + k*F/2].y;
temp86 += xTemp[tile_x/2 +4 + k*F/2].x * xTemp[tile_y/2 +3 + k*F/2].x;
temp87 += xTemp[tile_x/2 +4 + k*F/2].x * xTemp[tile_y/2 +3 + k*F/2].y;
temp88 += xTemp[tile_x/2 +4 + k*F/2].x * xTemp[tile_y/2 +4 + k*F/2].x;
temp89 += xTemp[tile_x/2 +4 + k*F/2].x * xTemp[tile_y/2 +4 + k*F/2].y;
temp90 += xTemp[tile_x/2 +4 + k*F/2].y * xTemp[tile_y/2 + k*F/2].x;
temp91 += xTemp[tile_x/2 +4 + k*F/2].y * xTemp[tile_y/2 + k*F/2].y;
temp92 += xTemp[tile_x/2 +4 + k*F/2].y * xTemp[tile_y/2 +1 + k*F/2].x;
temp93 += xTemp[tile_x/2 +4 + k*F/2].y * xTemp[tile_y/2 +1 + k*F/2].y;
temp94 += xTemp[tile_x/2 +4 + k*F/2].y * xTemp[tile_y/2 +2 + k*F/2].x;
temp95 += xTemp[tile_x/2 +4 + k*F/2].y * xTemp[tile_y/2 +2 + k*F/2].y;
temp96 += xTemp[tile_x/2 +4 + k*F/2].y * xTemp[tile_y/2 +3 + k*F/2].x;
temp97 += xTemp[tile_x/2 +4 + k*F/2].y * xTemp[tile_y/2 +3 + k*F/2].y;
temp98 += xTemp[tile_x/2 +4 + k*F/2].y * xTemp[tile_y/2 +4 + k*F/2].x;
temp99 += xTemp[tile_x/2 +4 + k*F/2].y * xTemp[tile_y/2 +4 + k*F/2].y;
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////
__syncthreads();
}
int index = blockIdx.x*F*F;
///*
//copy output to gmem
xx[index + tile_x + tile_y*F] = temp0;
xx[index + tile_x + (tile_y + 1)*F] = temp1;
xx[index + tile_x + (tile_y + 2)*F] = temp2;
xx[index + tile_x + (tile_y + 3)*F] = temp3;
xx[index + tile_x + (tile_y + 4)*F] = temp4;
xx[index + tile_x + (tile_y + 5)*F] = temp5;
xx[index + tile_x + (tile_y + 6)*F] = temp6;
xx[index + tile_x + (tile_y + 7)*F] = temp7;
xx[index + tile_x + (tile_y + 8)*F] = temp8;
xx[index + tile_x + (tile_y + 9)*F] = temp9;
xx[index + tile_x + 1 + tile_y*F] = temp10;
xx[index + tile_x + 1 + (tile_y + 1)*F] = temp11;
xx[index + tile_x + 1 + (tile_y + 2)*F] = temp12;
xx[index + tile_x + 1 + (tile_y + 3)*F] = temp13;
xx[index + tile_x + 1 + (tile_y + 4)*F] = temp14;
xx[index + tile_x + 1 + (tile_y + 5)*F] = temp15;
xx[index + tile_x + 1 + (tile_y + 6)*F] = temp16;
xx[index + tile_x + 1 + (tile_y + 7)*F] = temp17;
xx[index + tile_x + 1 + (tile_y + 8)*F] = temp18;
xx[index + tile_x + 1 + (tile_y + 9)*F] = temp19;
xx[index + tile_x + 2 + tile_y*F] = temp20;
xx[index + tile_x + 2 + (tile_y + 1)*F] = temp21;
xx[index + tile_x + 2 + (tile_y + 2)*F] = temp22;
xx[index + tile_x + 2 + (tile_y + 3)*F] = temp23;
xx[index + tile_x + 2 + (tile_y + 4)*F] = temp24;
xx[index + tile_x + 2 + (tile_y + 5)*F] = temp25;
xx[index + tile_x + 2 + (tile_y + 6)*F] = temp26;
xx[index + tile_x + 2 + (tile_y + 7)*F] = temp27;
xx[index + tile_x + 2 + (tile_y + 8)*F] = temp28;
xx[index + tile_x + 2 + (tile_y + 9)*F] = temp29;
xx[index + tile_x + 3 + tile_y*F] = temp30;
xx[index + tile_x + 3 + (tile_y + 1)*F] = temp31;
xx[index + tile_x + 3 + (tile_y + 2)*F] = temp32;
xx[index + tile_x + 3 + (tile_y + 3)*F] = temp33;
xx[index + tile_x + 3 + (tile_y + 4)*F] = temp34;
xx[index + tile_x + 3 + (tile_y + 5)*F] = temp35;
xx[index + tile_x + 3 + (tile_y + 6)*F] = temp36;
xx[index + tile_x + 3 + (tile_y + 7)*F] = temp37;
xx[index + tile_x + 3 + (tile_y + 8)*F] = temp38;
xx[index + tile_x + 3 + (tile_y + 9)*F] = temp39;
xx[index + tile_x + 4 + tile_y*F] = temp40;
xx[index + tile_x + 4 + (tile_y + 1)*F] = temp41;
xx[index + tile_x + 4 + (tile_y + 2)*F] = temp42;
xx[index + tile_x + 4 + (tile_y + 3)*F] = temp43;
xx[index + tile_x + 4 + (tile_y + 4)*F] = temp44;
xx[index + tile_x + 4 + (tile_y + 5)*F] = temp45;
xx[index + tile_x + 4 + (tile_y + 6)*F] = temp46;
xx[index + tile_x + 4 + (tile_y + 7)*F] = temp47;
xx[index + tile_x + 4 + (tile_y + 8)*F] = temp48;
xx[index + tile_x + 4 + (tile_y + 9)*F] = temp49;
xx[index + tile_x + 5 + tile_y*F] = temp50;
xx[index + tile_x + 5 + (tile_y + 1)*F] = temp51;
xx[index + tile_x + 5 + (tile_y + 2)*F] = temp52;
xx[index + tile_x + 5 + (tile_y + 3)*F] = temp53;
xx[index + tile_x + 5 + (tile_y + 4)*F] = temp54;
xx[index + tile_x + 5 + (tile_y + 5)*F] = temp55;
xx[index + tile_x + 5 + (tile_y + 6)*F] = temp56;
xx[index + tile_x + 5 + (tile_y + 7)*F] = temp57;
xx[index + tile_x + 5 + (tile_y + 8)*F] = temp58;
xx[index + tile_x + 5 + (tile_y + 9)*F] = temp59;
xx[index + tile_x + 6 + tile_y*F] = temp60;
xx[index + tile_x + 6 + (tile_y + 1)*F] = temp61;
xx[index + tile_x + 6 + (tile_y + 2)*F] = temp62;
xx[index + tile_x + 6 + (tile_y + 3)*F] = temp63;
xx[index + tile_x + 6 + (tile_y + 4)*F] = temp64;
xx[index + tile_x + 6 + (tile_y + 5)*F] = temp65;
xx[index + tile_x + 6 + (tile_y + 6)*F] = temp66;
xx[index + tile_x + 6 + (tile_y + 7)*F] = temp67;
xx[index + tile_x + 6 + (tile_y + 8)*F] = temp68;
xx[index + tile_x + 6 + (tile_y + 9)*F] = temp69;
xx[index + tile_x + 7 + tile_y*F] = temp70;
xx[index + tile_x + 7 + (tile_y + 1)*F] = temp71;
xx[index + tile_x + 7 + (tile_y + 2)*F] = temp72;
xx[index + tile_x + 7 + (tile_y + 3)*F] = temp73;
xx[index + tile_x + 7 + (tile_y + 4)*F] = temp74;
xx[index + tile_x + 7 + (tile_y + 5)*F] = temp75;
xx[index + tile_x + 7 + (tile_y + 6)*F] = temp76;
xx[index + tile_x + 7 + (tile_y + 7)*F] = temp77;
xx[index + tile_x + 7 + (tile_y + 8)*F] = temp78;
xx[index + tile_x + 7 + (tile_y + 9)*F] = temp79;
xx[index + tile_x + 8 + tile_y*F] = temp80;
xx[index + tile_x + 8 + (tile_y + 1)*F] = temp81;
xx[index + tile_x + 8 + (tile_y + 2)*F] = temp82;
xx[index + tile_x + 8 + (tile_y + 3)*F] = temp83;
xx[index + tile_x + 8 + (tile_y + 4)*F] = temp84;
xx[index + tile_x + 8 + (tile_y + 5)*F] = temp85;
xx[index + tile_x + 8 + (tile_y + 6)*F] = temp86;
xx[index + tile_x + 8 + (tile_y + 7)*F] = temp87;
xx[index + tile_x + 8 + (tile_y + 8)*F] = temp88;
xx[index + tile_x + 8 + (tile_y + 9)*F] = temp89;
xx[index + tile_x + 9 + tile_y*F] = temp90;
xx[index + tile_x + 9 + (tile_y + 1)*F] = temp91;
xx[index + tile_x + 9 + (tile_y + 2)*F] = temp92;
xx[index + tile_x + 9 + (tile_y + 3)*F] = temp93;
xx[index + tile_x + 9 + (tile_y + 4)*F] = temp94;
xx[index + tile_x + 9 + (tile_y + 5)*F] = temp95;
xx[index + tile_x + 9 + (tile_y + 6)*F] = temp96;
xx[index + tile_x + 9 + (tile_y + 7)*F] = temp97;
xx[index + tile_x + 9 + (tile_y + 8)*F] = temp98;
xx[index + tile_x + 9 + (tile_y + 9)*F] = temp99;
//*/
//regularization
if(tile_x == tile_y){
for(int k = 0; k < tile; k++)
xx[index + (tile_x+k)*(1+F)] += (end - start) * lambda;
}
}
}
__global__ void
__launch_bounds__(64, 6)
get_hermitian_x(float* tt,
const int* csrRowIndex, const int* csrColIndex, const float lambda) {
__shared__ float2 thetaTemp[SCAN_BATCH * F/2];
int row = blockIdx.x;
if (row < M) {
//this block needs to handle end - start thetaT columns
int start = csrRowIndex[row];
int end = csrRowIndex[row + 1];
//slide through [start, end] by window size SCAN_BATCH
int iterations = (end - start - 1)/SCAN_BATCH + 1;
float temp0= 0, temp1= 0, temp2= 0, temp3= 0, temp4= 0, temp5= 0, temp6= 0, temp7= 0, temp8= 0, temp9 = 0;
float temp10= 0, temp11= 0, temp12= 0, temp13= 0, temp14= 0, temp15= 0, temp16= 0, temp17= 0, temp18= 0, temp19 = 0;
float temp20= 0, temp21= 0, temp22= 0, temp23= 0, temp24= 0, temp25= 0, temp26= 0, temp27= 0, temp28= 0, temp29 = 0;
float temp30= 0, temp31= 0, temp32= 0, temp33= 0, temp34= 0, temp35= 0, temp36= 0, temp37= 0, temp38= 0, temp39 = 0;
float temp40= 0, temp41= 0, temp42= 0, temp43= 0, temp44= 0, temp45= 0, temp46= 0, temp47= 0, temp48= 0, temp49 = 0;
float temp50= 0, temp51= 0, temp52= 0, temp53= 0, temp54= 0, temp55= 0, temp56= 0, temp57= 0, temp58= 0, temp59 = 0;
float temp60= 0, temp61= 0, temp62= 0, temp63= 0, temp64= 0, temp65= 0, temp66= 0, temp67= 0, temp68= 0, temp69 = 0;
float temp70= 0, temp71= 0, temp72= 0, temp73= 0, temp74= 0, temp75= 0, temp76= 0, temp77= 0, temp78= 0, temp79 = 0;
float temp80= 0, temp81= 0, temp82= 0, temp83= 0, temp84= 0, temp85= 0, temp86= 0, temp87= 0, temp88= 0, temp89 = 0;
float temp90= 0, temp91= 0, temp92= 0, temp93= 0, temp94= 0, temp95= 0, temp96= 0, temp97= 0, temp98= 0, temp99 = 0;
//int tile_x = (threadIdx.x/tile) * tile;//start x of this tile
//int tile_y = (threadIdx.x%tile) * tile;//start y of this tile
int tile_x = 0;
int tile_y = 0;
int tile = F/10;
for ( int i = 0; i < 10; i++){
int end = ((20-i)*(i+1))/2;
if(threadIdx.x < end){
tile_x = i * tile;
tile_y = (10 + threadIdx.x - end) * tile;
break;
}
}
//iteration: copy gmem-->smem; aggregate smem-->register
for (int iter = 0; iter < iterations; iter ++){
float2 theta;
//copy texture --> smem, and sync
/*
if(threadIdx.x < SCAN_BATCH){
if(iter*SCAN_BATCH + threadIdx.x < end - start){
for (int k = 0; k < F; k += 2){
theta.x = tex1Dfetch(thetaTTexRef, F * csrColIndex[start + iter*SCAN_BATCH + threadIdx.x] + k);
theta.y = tex1Dfetch(thetaTTexRef, F * csrColIndex[start + iter*SCAN_BATCH + threadIdx.x] + k+1);
thetaTemp[threadIdx.x * F/2 + k/2] = theta;
}
}
//must be the last iteration; no need to check
//not enough theta to copy, set zero
else
memset(&thetaTemp[threadIdx.x*F/2], 0, F*sizeof(float));
}
*/
//two layers: warp divergence unless we split at 32
//32 > SCAN_BATCH
if(threadIdx.x < 2*32 ){
//int index = threadIdx.x;
int index = threadIdx.x - (threadIdx.x/32)*32; //0 to 31;
if(index < SCAN_BATCH){
if(iter*SCAN_BATCH + index < end - start){
//for (int k = 50*(threadIdx.x/32); k < 50*(threadIdx.x/32) + 50; k += 2){
//IMPORTANT: for loop has constant and identical start and end
if(threadIdx.x < 32){
for (int k = 0; k < 50; k += 2){
theta.x = tex1Dfetch(thetaTTexRef, F * csrColIndex[start + iter*SCAN_BATCH + index] + k);
theta.y = tex1Dfetch(thetaTTexRef, F * csrColIndex[start + iter*SCAN_BATCH + index] + k+1);
thetaTemp[index * F/2 + k/2] = theta;
}
}
else {
for (int k = 0; k < 50; k += 2){
theta.x = tex1Dfetch(thetaTTexRef, F * csrColIndex[start + iter*SCAN_BATCH + index] + k + 50);
theta.y = tex1Dfetch(thetaTTexRef, F * csrColIndex[start + iter*SCAN_BATCH + index] + k + 51);
thetaTemp[index * F/2 + k/2 + 25] = theta;
}
}
}
//must be the last iteration; no need to check
//not enough theta to copy, set zero
else
memset(&thetaTemp[index*F/2], 0, F*sizeof(float));
}
}
/* //issue: not coalesced access to csrColIndex
if(threadIdx.x < F && threadIdx.x%2 == 0){
for(int k = 0; k< SCAN_BATCH; k++){
if(iter*SCAN_BATCH + k < end - start){
theta.x = tex1Dfetch(thetaTTexRef, F * csrColIndex[start + iter*SCAN_BATCH + k] + threadIdx.x);
theta.y = tex1Dfetch(thetaTTexRef, F * csrColIndex[start + iter*SCAN_BATCH + k] + threadIdx.x +1);
thetaTemp[k * F/2 + threadIdx.x/2] = theta;
}
//must be the last iteration; no need to check
//not enough theta to copy, set zero
else
memset(&thetaTemp[k*F/2 + threadIdx.x/2], 0, 2*sizeof(float));
}
}
*/
/*
int layers = blockDim.x/SCAN_BATCH; //100/30 = 3
//int height = blockDim.x/layers; //30
int y = threadIdx.x/SCAN_BATCH;//0,1,2,3; y==3 is not viable
//min(y, (layers-1)) * height
int y_start = y * 30;//0-29:0;30-59:30;60-89:60
int y_end = y_start + 30; //0-29:30;30-59:60;60-89:90
if(y >= layers - 1) y_end = F; //60-89:100
if(threadIdx.x - y_start < SCAN_BATCH){
if(iter*SCAN_BATCH + (threadIdx.x - y_start) < end - start){
for (int k = y_start; k < y_end; k += 2){
theta.x =
tex1Dfetch(thetaTTexRef, F * csrColIndex[start + iter*SCAN_BATCH + (threadIdx.x - y_start)] + k);
theta.y =
tex1Dfetch(thetaTTexRef, F * csrColIndex[start + iter*SCAN_BATCH + (threadIdx.x - y_start)] + k+1);
thetaTemp[(threadIdx.x - y_start)* F/2 + k/2] = theta;
}
}
//must be the last iteration; no need to check
//not enough theta to copy, set zero
else
memset(&thetaTemp[(threadIdx.x - y_start)*F/2 + y_start/2], 0, (y_end - y_start)*sizeof(float));
}
*/
__syncthreads();
///////////////////////////////////////////////////////////////////////////////////////////////////////////
//tile: 10*10
if(threadIdx.x < 55 ){
for(int k = 0; k < SCAN_BATCH; k++){
temp0 += thetaTemp[tile_x/2 + k*F/2].x * thetaTemp[tile_y/2 + k*F/2].x;
temp1 += thetaTemp[tile_x/2 + k*F/2].x * thetaTemp[tile_y/2 + k*F/2].y;
temp2 += thetaTemp[tile_x/2 + k*F/2].x * thetaTemp[tile_y/2 +1 + k*F/2].x;
temp3 += thetaTemp[tile_x/2 + k*F/2].x * thetaTemp[tile_y/2 +1 + k*F/2].y;
temp4 += thetaTemp[tile_x/2 + k*F/2].x * thetaTemp[tile_y/2 +2 + k*F/2].x;
temp5 += thetaTemp[tile_x/2 + k*F/2].x * thetaTemp[tile_y/2 +2 + k*F/2].y;
temp6 += thetaTemp[tile_x/2 + k*F/2].x * thetaTemp[tile_y/2 +3 + k*F/2].x;
temp7 += thetaTemp[tile_x/2 + k*F/2].x * thetaTemp[tile_y/2 +3 + k*F/2].y;
temp8 += thetaTemp[tile_x/2 + k*F/2].x * thetaTemp[tile_y/2 +4 + k*F/2].x;
temp9 += thetaTemp[tile_x/2 + k*F/2].x * thetaTemp[tile_y/2 +4 + k*F/2].y;
temp10 += thetaTemp[tile_x/2 + k*F/2].y * thetaTemp[tile_y/2 + k*F/2].x;
temp11 += thetaTemp[tile_x/2 + k*F/2].y * thetaTemp[tile_y/2 + k*F/2].y;
temp12 += thetaTemp[tile_x/2 + k*F/2].y * thetaTemp[tile_y/2 +1 + k*F/2].x;
temp13 += thetaTemp[tile_x/2 + k*F/2].y * thetaTemp[tile_y/2 +1 + k*F/2].y;
temp14 += thetaTemp[tile_x/2 + k*F/2].y * thetaTemp[tile_y/2 +2 + k*F/2].x;
temp15 += thetaTemp[tile_x/2 + k*F/2].y * thetaTemp[tile_y/2 +2 + k*F/2].y;
temp16 += thetaTemp[tile_x/2 + k*F/2].y * thetaTemp[tile_y/2 +3 + k*F/2].x;
temp17 += thetaTemp[tile_x/2 + k*F/2].y * thetaTemp[tile_y/2 +3 + k*F/2].y;
temp18 += thetaTemp[tile_x/2 + k*F/2].y * thetaTemp[tile_y/2 +4 + k*F/2].x;
temp19 += thetaTemp[tile_x/2 + k*F/2].y * thetaTemp[tile_y/2 +4 + k*F/2].y;
temp20 += thetaTemp[tile_x/2 +1 + k*F/2].x * thetaTemp[tile_y/2 + k*F/2].x;
temp21 += thetaTemp[tile_x/2 +1 + k*F/2].x * thetaTemp[tile_y/2 + k*F/2].y;
temp22 += thetaTemp[tile_x/2 +1 + k*F/2].x * thetaTemp[tile_y/2 +1 + k*F/2].x;
temp23 += thetaTemp[tile_x/2 +1 + k*F/2].x * thetaTemp[tile_y/2 +1 + k*F/2].y;
temp24 += thetaTemp[tile_x/2 +1 + k*F/2].x * thetaTemp[tile_y/2 +2 + k*F/2].x;
temp25 += thetaTemp[tile_x/2 +1 + k*F/2].x * thetaTemp[tile_y/2 +2 + k*F/2].y;
temp26 += thetaTemp[tile_x/2 +1 + k*F/2].x * thetaTemp[tile_y/2 +3 + k*F/2].x;
temp27 += thetaTemp[tile_x/2 +1 + k*F/2].x * thetaTemp[tile_y/2 +3 + k*F/2].y;
temp28 += thetaTemp[tile_x/2 +1 + k*F/2].x * thetaTemp[tile_y/2 +4 + k*F/2].x;
temp29 += thetaTemp[tile_x/2 +1 + k*F/2].x * thetaTemp[tile_y/2 +4 + k*F/2].y;
temp30 += thetaTemp[tile_x/2 +1 + k*F/2].y * thetaTemp[tile_y/2 + k*F/2].x;
temp31 += thetaTemp[tile_x/2 +1 + k*F/2].y * thetaTemp[tile_y/2 + k*F/2].y;
temp32 += thetaTemp[tile_x/2 +1 + k*F/2].y * thetaTemp[tile_y/2 +1 + k*F/2].x;
temp33 += thetaTemp[tile_x/2 +1 + k*F/2].y * thetaTemp[tile_y/2 +1 + k*F/2].y;
temp34 += thetaTemp[tile_x/2 +1 + k*F/2].y * thetaTemp[tile_y/2 +2 + k*F/2].x;
temp35 += thetaTemp[tile_x/2 +1 + k*F/2].y * thetaTemp[tile_y/2 +2 + k*F/2].y;
temp36 += thetaTemp[tile_x/2 +1 + k*F/2].y * thetaTemp[tile_y/2 +3 + k*F/2].x;
temp37 += thetaTemp[tile_x/2 +1 + k*F/2].y * thetaTemp[tile_y/2 +3 + k*F/2].y;
temp38 += thetaTemp[tile_x/2 +1 + k*F/2].y * thetaTemp[tile_y/2 +4 + k*F/2].x;
temp39 += thetaTemp[tile_x/2 +1 + k*F/2].y * thetaTemp[tile_y/2 +4 + k*F/2].y;
temp40 += thetaTemp[tile_x/2 +2 + k*F/2].x * thetaTemp[tile_y/2 + k*F/2].x;
temp41 += thetaTemp[tile_x/2 +2 + k*F/2].x * thetaTemp[tile_y/2 + k*F/2].y;
temp42 += thetaTemp[tile_x/2 +2 + k*F/2].x * thetaTemp[tile_y/2 +1 + k*F/2].x;
temp43 += thetaTemp[tile_x/2 +2 + k*F/2].x * thetaTemp[tile_y/2 +1 + k*F/2].y;
temp44 += thetaTemp[tile_x/2 +2 + k*F/2].x * thetaTemp[tile_y/2 +2 + k*F/2].x;
temp45 += thetaTemp[tile_x/2 +2 + k*F/2].x * thetaTemp[tile_y/2 +2 + k*F/2].y;
temp46 += thetaTemp[tile_x/2 +2 + k*F/2].x * thetaTemp[tile_y/2 +3 + k*F/2].x;
temp47 += thetaTemp[tile_x/2 +2 + k*F/2].x * thetaTemp[tile_y/2 +3 + k*F/2].y;
temp48 += thetaTemp[tile_x/2 +2 + k*F/2].x * thetaTemp[tile_y/2 +4 + k*F/2].x;
temp49 += thetaTemp[tile_x/2 +2 + k*F/2].x * thetaTemp[tile_y/2 +4 + k*F/2].y;
temp50 += thetaTemp[tile_x/2 +2 + k*F/2].y * thetaTemp[tile_y/2 + k*F/2].x;
temp51 += thetaTemp[tile_x/2 +2 + k*F/2].y * thetaTemp[tile_y/2 + k*F/2].y;
temp52 += thetaTemp[tile_x/2 +2 + k*F/2].y * thetaTemp[tile_y/2 +1 + k*F/2].x;
temp53 += thetaTemp[tile_x/2 +2 + k*F/2].y * thetaTemp[tile_y/2 +1 + k*F/2].y;
temp54 += thetaTemp[tile_x/2 +2 + k*F/2].y * thetaTemp[tile_y/2 +2 + k*F/2].x;
temp55 += thetaTemp[tile_x/2 +2 + k*F/2].y * thetaTemp[tile_y/2 +2 + k*F/2].y;
temp56 += thetaTemp[tile_x/2 +2 + k*F/2].y * thetaTemp[tile_y/2 +3 + k*F/2].x;
temp57 += thetaTemp[tile_x/2 +2 + k*F/2].y * thetaTemp[tile_y/2 +3 + k*F/2].y;
temp58 += thetaTemp[tile_x/2 +2 + k*F/2].y * thetaTemp[tile_y/2 +4 + k*F/2].x;
temp59 += thetaTemp[tile_x/2 +2 + k*F/2].y * thetaTemp[tile_y/2 +4 + k*F/2].y;
temp60 += thetaTemp[tile_x/2 +3 + k*F/2].x * thetaTemp[tile_y/2 + k*F/2].x;
temp61 += thetaTemp[tile_x/2 +3 + k*F/2].x * thetaTemp[tile_y/2 + k*F/2].y;
temp62 += thetaTemp[tile_x/2 +3 + k*F/2].x * thetaTemp[tile_y/2 +1 + k*F/2].x;
temp63 += thetaTemp[tile_x/2 +3 + k*F/2].x * thetaTemp[tile_y/2 +1 + k*F/2].y;
temp64 += thetaTemp[tile_x/2 +3 + k*F/2].x * thetaTemp[tile_y/2 +2 + k*F/2].x;
temp65 += thetaTemp[tile_x/2 +3 + k*F/2].x * thetaTemp[tile_y/2 +2 + k*F/2].y;
temp66 += thetaTemp[tile_x/2 +3 + k*F/2].x * thetaTemp[tile_y/2 +3 + k*F/2].x;
temp67 += thetaTemp[tile_x/2 +3 + k*F/2].x * thetaTemp[tile_y/2 +3 + k*F/2].y;
temp68 += thetaTemp[tile_x/2 +3 + k*F/2].x * thetaTemp[tile_y/2 +4 + k*F/2].x;
temp69 += thetaTemp[tile_x/2 +3 + k*F/2].x * thetaTemp[tile_y/2 +4 + k*F/2].y;
temp70 += thetaTemp[tile_x/2 +3 + k*F/2].y * thetaTemp[tile_y/2 + k*F/2].x;
temp71 += thetaTemp[tile_x/2 +3 + k*F/2].y * thetaTemp[tile_y/2 + k*F/2].y;
temp72 += thetaTemp[tile_x/2 +3 + k*F/2].y * thetaTemp[tile_y/2 +1 + k*F/2].x;
temp73 += thetaTemp[tile_x/2 +3 + k*F/2].y * thetaTemp[tile_y/2 +1 + k*F/2].y;
temp74 += thetaTemp[tile_x/2 +3 + k*F/2].y * thetaTemp[tile_y/2 +2 + k*F/2].x;
temp75 += thetaTemp[tile_x/2 +3 + k*F/2].y * thetaTemp[tile_y/2 +2 + k*F/2].y;
temp76 += thetaTemp[tile_x/2 +3 + k*F/2].y * thetaTemp[tile_y/2 +3 + k*F/2].x;
temp77 += thetaTemp[tile_x/2 +3 + k*F/2].y * thetaTemp[tile_y/2 +3 + k*F/2].y;
temp78 += thetaTemp[tile_x/2 +3 + k*F/2].y * thetaTemp[tile_y/2 +4 + k*F/2].x;
temp79 += thetaTemp[tile_x/2 +3 + k*F/2].y * thetaTemp[tile_y/2 +4 + k*F/2].y;
temp80 += thetaTemp[tile_x/2 +4 + k*F/2].x * thetaTemp[tile_y/2 + k*F/2].x;
temp81 += thetaTemp[tile_x/2 +4 + k*F/2].x * thetaTemp[tile_y/2 + k*F/2].y;
temp82 += thetaTemp[tile_x/2 +4 + k*F/2].x * thetaTemp[tile_y/2 +1 + k*F/2].x;
temp83 += thetaTemp[tile_x/2 +4 + k*F/2].x * thetaTemp[tile_y/2 +1 + k*F/2].y;
temp84 += thetaTemp[tile_x/2 +4 + k*F/2].x * thetaTemp[tile_y/2 +2 + k*F/2].x;
temp85 += thetaTemp[tile_x/2 +4 + k*F/2].x * thetaTemp[tile_y/2 +2 + k*F/2].y;
temp86 += thetaTemp[tile_x/2 +4 + k*F/2].x * thetaTemp[tile_y/2 +3 + k*F/2].x;
temp87 += thetaTemp[tile_x/2 +4 + k*F/2].x * thetaTemp[tile_y/2 +3 + k*F/2].y;
temp88 += thetaTemp[tile_x/2 +4 + k*F/2].x * thetaTemp[tile_y/2 +4 + k*F/2].x;
temp89 += thetaTemp[tile_x/2 +4 + k*F/2].x * thetaTemp[tile_y/2 +4 + k*F/2].y;
temp90 += thetaTemp[tile_x/2 +4 + k*F/2].y * thetaTemp[tile_y/2 + k*F/2].x;
temp91 += thetaTemp[tile_x/2 +4 + k*F/2].y * thetaTemp[tile_y/2 + k*F/2].y;
temp92 += thetaTemp[tile_x/2 +4 + k*F/2].y * thetaTemp[tile_y/2 +1 + k*F/2].x;
temp93 += thetaTemp[tile_x/2 +4 + k*F/2].y * thetaTemp[tile_y/2 +1 + k*F/2].y;
temp94 += thetaTemp[tile_x/2 +4 + k*F/2].y * thetaTemp[tile_y/2 +2 + k*F/2].x;
temp95 += thetaTemp[tile_x/2 +4 + k*F/2].y * thetaTemp[tile_y/2 +2 + k*F/2].y;
temp96 += thetaTemp[tile_x/2 +4 + k*F/2].y * thetaTemp[tile_y/2 +3 + k*F/2].x;
temp97 += thetaTemp[tile_x/2 +4 + k*F/2].y * thetaTemp[tile_y/2 +3 + k*F/2].y;
temp98 += thetaTemp[tile_x/2 +4 + k*F/2].y * thetaTemp[tile_y/2 +4 + k*F/2].x;
temp99 += thetaTemp[tile_x/2 +4 + k*F/2].y * thetaTemp[tile_y/2 +4 + k*F/2].y;
}
}
}
//end of iteration in copying from smem and aggregating in register
///////////////////////////////////////////////////////////////////////////////////////////////////////////
__syncthreads();
///*
if(threadIdx.x < 55 ){
//copy output to gmem
int index = blockIdx.x*F*F;
tt[index + tile_x + tile_y*F] = temp0;
tt[index + tile_x + (tile_y + 1)*F] = temp1;
tt[index + tile_x + (tile_y + 2)*F] = temp2;
tt[index + tile_x + (tile_y + 3)*F] = temp3;
tt[index + tile_x + (tile_y + 4)*F] = temp4;
tt[index + tile_x + (tile_y + 5)*F] = temp5;
tt[index + tile_x + (tile_y + 6)*F] = temp6;
tt[index + tile_x + (tile_y + 7)*F] = temp7;
tt[index + tile_x + (tile_y + 8)*F] = temp8;
tt[index + tile_x + (tile_y + 9)*F] = temp9;
tt[index + tile_x + 1 + tile_y*F] = temp10;
tt[index + tile_x + 1 + (tile_y + 1)*F] = temp11;
tt[index + tile_x + 1 + (tile_y + 2)*F] = temp12;
tt[index + tile_x + 1 + (tile_y + 3)*F] = temp13;
tt[index + tile_x + 1 + (tile_y + 4)*F] = temp14;
tt[index + tile_x + 1 + (tile_y + 5)*F] = temp15;
tt[index + tile_x + 1 + (tile_y + 6)*F] = temp16;
tt[index + tile_x + 1 + (tile_y + 7)*F] = temp17;
tt[index + tile_x + 1 + (tile_y + 8)*F] = temp18;
tt[index + tile_x + 1 + (tile_y + 9)*F] = temp19;
tt[index + tile_x + 2 + tile_y*F] = temp20;
tt[index + tile_x + 2 + (tile_y + 1)*F] = temp21;
tt[index + tile_x + 2 + (tile_y + 2)*F] = temp22;
tt[index + tile_x + 2 + (tile_y + 3)*F] = temp23;
tt[index + tile_x + 2 + (tile_y + 4)*F] = temp24;
tt[index + tile_x + 2 + (tile_y + 5)*F] = temp25;
tt[index + tile_x + 2 + (tile_y + 6)*F] = temp26;
tt[index + tile_x + 2 + (tile_y + 7)*F] = temp27;
tt[index + tile_x + 2 + (tile_y + 8)*F] = temp28;
tt[index + tile_x + 2 + (tile_y + 9)*F] = temp29;
tt[index + tile_x + 3 + tile_y*F] = temp30;
tt[index + tile_x + 3 + (tile_y + 1)*F] = temp31;
tt[index + tile_x + 3 + (tile_y + 2)*F] = temp32;
tt[index + tile_x + 3 + (tile_y + 3)*F] = temp33;
tt[index + tile_x + 3 + (tile_y + 4)*F] = temp34;
tt[index + tile_x + 3 + (tile_y + 5)*F] = temp35;
tt[index + tile_x + 3 + (tile_y + 6)*F] = temp36;
tt[index + tile_x + 3 + (tile_y + 7)*F] = temp37;
tt[index + tile_x + 3 + (tile_y + 8)*F] = temp38;
tt[index + tile_x + 3 + (tile_y + 9)*F] = temp39;
tt[index + tile_x + 4 + tile_y*F] = temp40;
tt[index + tile_x + 4 + (tile_y + 1)*F] = temp41;
tt[index + tile_x + 4 + (tile_y + 2)*F] = temp42;
tt[index + tile_x + 4 + (tile_y + 3)*F] = temp43;
tt[index + tile_x + 4 + (tile_y + 4)*F] = temp44;
tt[index + tile_x + 4 + (tile_y + 5)*F] = temp45;
tt[index + tile_x + 4 + (tile_y + 6)*F] = temp46;
tt[index + tile_x + 4 + (tile_y + 7)*F] = temp47;
tt[index + tile_x + 4 + (tile_y + 8)*F] = temp48;
tt[index + tile_x + 4 + (tile_y + 9)*F] = temp49;
tt[index + tile_x + 5 + tile_y*F] = temp50;
tt[index + tile_x + 5 + (tile_y + 1)*F] = temp51;
tt[index + tile_x + 5 + (tile_y + 2)*F] = temp52;
tt[index + tile_x + 5 + (tile_y + 3)*F] = temp53;
tt[index + tile_x + 5 + (tile_y + 4)*F] = temp54;
tt[index + tile_x + 5 + (tile_y + 5)*F] = temp55;
tt[index + tile_x + 5 + (tile_y + 6)*F] = temp56;
tt[index + tile_x + 5 + (tile_y + 7)*F] = temp57;
tt[index + tile_x + 5 + (tile_y + 8)*F] = temp58;
tt[index + tile_x + 5 + (tile_y + 9)*F] = temp59;
tt[index + tile_x + 6 + tile_y*F] = temp60;
tt[index + tile_x + 6 + (tile_y + 1)*F] = temp61;
tt[index + tile_x + 6 + (tile_y + 2)*F] = temp62;
tt[index + tile_x + 6 + (tile_y + 3)*F] = temp63;
tt[index + tile_x + 6 + (tile_y + 4)*F] = temp64;
tt[index + tile_x + 6 + (tile_y + 5)*F] = temp65;
tt[index + tile_x + 6 + (tile_y + 6)*F] = temp66;
tt[index + tile_x + 6 + (tile_y + 7)*F] = temp67;
tt[index + tile_x + 6 + (tile_y + 8)*F] = temp68;
tt[index + tile_x + 6 + (tile_y + 9)*F] = temp69;
tt[index + tile_x + 7 + tile_y*F] = temp70;
tt[index + tile_x + 7 + (tile_y + 1)*F] = temp71;
tt[index + tile_x + 7 + (tile_y + 2)*F] = temp72;
tt[index + tile_x + 7 + (tile_y + 3)*F] = temp73;
tt[index + tile_x + 7 + (tile_y + 4)*F] = temp74;
tt[index + tile_x + 7 + (tile_y + 5)*F] = temp75;
tt[index + tile_x + 7 + (tile_y + 6)*F] = temp76;
tt[index + tile_x + 7 + (tile_y + 7)*F] = temp77;
tt[index + tile_x + 7 + (tile_y + 8)*F] = temp78;
tt[index + tile_x + 7 + (tile_y + 9)*F] = temp79;
tt[index + tile_x + 8 + tile_y*F] = temp80;
tt[index + tile_x + 8 + (tile_y + 1)*F] = temp81;
tt[index + tile_x + 8 + (tile_y + 2)*F] = temp82;
tt[index + tile_x + 8 + (tile_y + 3)*F] = temp83;
tt[index + tile_x + 8 + (tile_y + 4)*F] = temp84;
tt[index + tile_x + 8 + (tile_y + 5)*F] = temp85;
tt[index + tile_x + 8 + (tile_y + 6)*F] = temp86;
tt[index + tile_x + 8 + (tile_y + 7)*F] = temp87;
tt[index + tile_x + 8 + (tile_y + 8)*F] = temp88;
tt[index + tile_x + 8 + (tile_y + 9)*F] = temp89;
tt[index + tile_x + 9 + tile_y*F] = temp90;
tt[index + tile_x + 9 + (tile_y + 1)*F] = temp91;
tt[index + tile_x + 9 + (tile_y + 2)*F] = temp92;
tt[index + tile_x + 9 + (tile_y + 3)*F] = temp93;
tt[index + tile_x + 9 + (tile_y + 4)*F] = temp94;
tt[index + tile_x + 9 + (tile_y + 5)*F] = temp95;
tt[index + tile_x + 9 + (tile_y + 6)*F] = temp96;
tt[index + tile_x + 9 + (tile_y + 7)*F] = temp97;
tt[index + tile_x + 9 + (tile_y + 8)*F] = temp98;
tt[index + tile_x + 9 + (tile_y + 9)*F] = temp99;
//symmetric
if(tile_x!=tile_y){
tt[index + tile_y + 0+ (tile_x + 0)*F]= temp0;
tt[index + tile_y + 1+ (tile_x + 0)*F]= temp1;
tt[index + tile_y + 2+ (tile_x + 0)*F]= temp2;
tt[index + tile_y + 3+ (tile_x + 0)*F]= temp3;
tt[index + tile_y + 4+ (tile_x + 0)*F]= temp4;
tt[index + tile_y + 5+ (tile_x + 0)*F]= temp5;
tt[index + tile_y + 6+ (tile_x + 0)*F]= temp6;
tt[index + tile_y + 7+ (tile_x + 0)*F]= temp7;
tt[index + tile_y + 8+ (tile_x + 0)*F]= temp8;
tt[index + tile_y + 9+ (tile_x + 0)*F]= temp9;
tt[index + tile_y + 0+ (tile_x + 1)*F]= temp10;
tt[index + tile_y + 1+ (tile_x + 1)*F]= temp11;
tt[index + tile_y + 2+ (tile_x + 1)*F]= temp12;
tt[index + tile_y + 3+ (tile_x + 1)*F]= temp13;
tt[index + tile_y + 4+ (tile_x + 1)*F]= temp14;
tt[index + tile_y + 5+ (tile_x + 1)*F]= temp15;
tt[index + tile_y + 6+ (tile_x + 1)*F]= temp16;
tt[index + tile_y + 7+ (tile_x + 1)*F]= temp17;
tt[index + tile_y + 8+ (tile_x + 1)*F]= temp18;
tt[index + tile_y + 9+ (tile_x + 1)*F]= temp19;
tt[index + tile_y + 0+ (tile_x + 2)*F]= temp20;
tt[index + tile_y + 1+ (tile_x + 2)*F]= temp21;
tt[index + tile_y + 2+ (tile_x + 2)*F]= temp22;
tt[index + tile_y + 3+ (tile_x + 2)*F]= temp23;
tt[index + tile_y + 4+ (tile_x + 2)*F]= temp24;
tt[index + tile_y + 5+ (tile_x + 2)*F]= temp25;
tt[index + tile_y + 6+ (tile_x + 2)*F]= temp26;
tt[index + tile_y + 7+ (tile_x + 2)*F]= temp27;
tt[index + tile_y + 8+ (tile_x + 2)*F]= temp28;
tt[index + tile_y + 9+ (tile_x + 2)*F]= temp29;
tt[index + tile_y + 0+ (tile_x + 3)*F]= temp30;
tt[index + tile_y + 1+ (tile_x + 3)*F]= temp31;
tt[index + tile_y + 2+ (tile_x + 3)*F]= temp32;
tt[index + tile_y + 3+ (tile_x + 3)*F]= temp33;
tt[index + tile_y + 4+ (tile_x + 3)*F]= temp34;
tt[index + tile_y + 5+ (tile_x + 3)*F]= temp35;
tt[index + tile_y + 6+ (tile_x + 3)*F]= temp36;
tt[index + tile_y + 7+ (tile_x + 3)*F]= temp37;
tt[index + tile_y + 8+ (tile_x + 3)*F]= temp38;
tt[index + tile_y + 9+ (tile_x + 3)*F]= temp39;
tt[index + tile_y + 0+ (tile_x + 4)*F]= temp40;
tt[index + tile_y + 1+ (tile_x + 4)*F]= temp41;
tt[index + tile_y + 2+ (tile_x + 4)*F]= temp42;
tt[index + tile_y + 3+ (tile_x + 4)*F]= temp43;
tt[index + tile_y + 4+ (tile_x + 4)*F]= temp44;
tt[index + tile_y + 5+ (tile_x + 4)*F]= temp45;
tt[index + tile_y + 6+ (tile_x + 4)*F]= temp46;
tt[index + tile_y + 7+ (tile_x + 4)*F]= temp47;
tt[index + tile_y + 8+ (tile_x + 4)*F]= temp48;
tt[index + tile_y + 9+ (tile_x + 4)*F]= temp49;
tt[index + tile_y + 0+ (tile_x + 5)*F]= temp50;
tt[index + tile_y + 1+ (tile_x + 5)*F]= temp51;
tt[index + tile_y + 2+ (tile_x + 5)*F]= temp52;
tt[index + tile_y + 3+ (tile_x + 5)*F]= temp53;
tt[index + tile_y + 4+ (tile_x + 5)*F]= temp54;
tt[index + tile_y + 5+ (tile_x + 5)*F]= temp55;
tt[index + tile_y + 6+ (tile_x + 5)*F]= temp56;
tt[index + tile_y + 7+ (tile_x + 5)*F]= temp57;
tt[index + tile_y + 8+ (tile_x + 5)*F]= temp58;
tt[index + tile_y + 9+ (tile_x + 5)*F]= temp59;
tt[index + tile_y + 0+ (tile_x + 6)*F]= temp60;
tt[index + tile_y + 1+ (tile_x + 6)*F]= temp61;
tt[index + tile_y + 2+ (tile_x + 6)*F]= temp62;
tt[index + tile_y + 3+ (tile_x + 6)*F]= temp63;
tt[index + tile_y + 4+ (tile_x + 6)*F]= temp64;
tt[index + tile_y + 5+ (tile_x + 6)*F]= temp65;
tt[index + tile_y + 6+ (tile_x + 6)*F]= temp66;
tt[index + tile_y + 7+ (tile_x + 6)*F]= temp67;
tt[index + tile_y + 8+ (tile_x + 6)*F]= temp68;
tt[index + tile_y + 9+ (tile_x + 6)*F]= temp69;
tt[index + tile_y + 0+ (tile_x + 7)*F]= temp70;
tt[index + tile_y + 1+ (tile_x + 7)*F]= temp71;
tt[index + tile_y + 2+ (tile_x + 7)*F]= temp72;
tt[index + tile_y + 3+ (tile_x + 7)*F]= temp73;
tt[index + tile_y + 4+ (tile_x + 7)*F]= temp74;
tt[index + tile_y + 5+ (tile_x + 7)*F]= temp75;
tt[index + tile_y + 6+ (tile_x + 7)*F]= temp76;
tt[index + tile_y + 7+ (tile_x + 7)*F]= temp77;
tt[index + tile_y + 8+ (tile_x + 7)*F]= temp78;
tt[index + tile_y + 9+ (tile_x + 7)*F]= temp79;
tt[index + tile_y + 0+ (tile_x + 8)*F]= temp80;
tt[index + tile_y + 1+ (tile_x + 8)*F]= temp81;
tt[index + tile_y + 2+ (tile_x + 8)*F]= temp82;
tt[index + tile_y + 3+ (tile_x + 8)*F]= temp83;
tt[index + tile_y + 4+ (tile_x + 8)*F]= temp84;
tt[index + tile_y + 5+ (tile_x + 8)*F]= temp85;
tt[index + tile_y + 6+ (tile_x + 8)*F]= temp86;
tt[index + tile_y + 7+ (tile_x + 8)*F]= temp87;
tt[index + tile_y + 8+ (tile_x + 8)*F]= temp88;
tt[index + tile_y + 9+ (tile_x + 8)*F]= temp89;
tt[index + tile_y + 0+ (tile_x + 9)*F]= temp90;
tt[index + tile_y + 1+ (tile_x + 9)*F]= temp91;
tt[index + tile_y + 2+ (tile_x + 9)*F]= temp92;
tt[index + tile_y + 3+ (tile_x + 9)*F]= temp93;
tt[index + tile_y + 4+ (tile_x + 9)*F]= temp94;
tt[index + tile_y + 5+ (tile_x + 9)*F]= temp95;
tt[index + tile_y + 6+ (tile_x + 9)*F]= temp96;
tt[index + tile_y + 7+ (tile_x + 9)*F]= temp97;
tt[index + tile_y + 8+ (tile_x + 9)*F]= temp98;
tt[index + tile_y + 9+ (tile_x + 9)*F]= temp99;
}
//regularization
if(tile_x == tile_y){
for(int k = 0; k < tile; k++)
tt[index + (tile_x+k)*(1+F)] += (end - start) * lambda;
}
}
//*/
}
}
__global__ void
__launch_bounds__(64, 6)
get_hermitian_theta(float* xx,
const int* cscRowIndex, const int* cscColIndex, const float lambda, const float * XT) {
__shared__ float2 xTemp[SCAN_BATCH * F/2];
int col = blockIdx.x;
if (col < N) {
//this block needs to handle end - start thetaT columns
int start = cscColIndex[col];
int end = cscColIndex[col + 1];
//slide through [start, end] by window size SCAN_BATCH
int iterations = (end - start - 1)/SCAN_BATCH + 1;
float temp0= 0, temp1= 0, temp2= 0, temp3= 0, temp4= 0, temp5= 0, temp6= 0, temp7= 0, temp8= 0, temp9 = 0;
float temp10= 0, temp11= 0, temp12= 0, temp13= 0, temp14= 0, temp15= 0, temp16= 0, temp17= 0, temp18= 0, temp19 = 0;
float temp20= 0, temp21= 0, temp22= 0, temp23= 0, temp24= 0, temp25= 0, temp26= 0, temp27= 0, temp28= 0, temp29 = 0;
float temp30= 0, temp31= 0, temp32= 0, temp33= 0, temp34= 0, temp35= 0, temp36= 0, temp37= 0, temp38= 0, temp39 = 0;
float temp40= 0, temp41= 0, temp42= 0, temp43= 0, temp44= 0, temp45= 0, temp46= 0, temp47= 0, temp48= 0, temp49 = 0;
float temp50= 0, temp51= 0, temp52= 0, temp53= 0, temp54= 0, temp55= 0, temp56= 0, temp57= 0, temp58= 0, temp59 = 0;
float temp60= 0, temp61= 0, temp62= 0, temp63= 0, temp64= 0, temp65= 0, temp66= 0, temp67= 0, temp68= 0, temp69 = 0;
float temp70= 0, temp71= 0, temp72= 0, temp73= 0, temp74= 0, temp75= 0, temp76= 0, temp77= 0, temp78= 0, temp79 = 0;
float temp80= 0, temp81= 0, temp82= 0, temp83= 0, temp84= 0, temp85= 0, temp86= 0, temp87= 0, temp88= 0, temp89 = 0;
float temp90= 0, temp91= 0, temp92= 0, temp93= 0, temp94= 0, temp95= 0, temp96= 0, temp97= 0, temp98= 0, temp99 = 0;
//int tile_x = (threadIdx.x/tile) * tile;//start x of this tile
//int tile_y = (threadIdx.x%tile) * tile;//start y of this tile
int tile_x = 0;
int tile_y = 0;
int tile = F/10;
for ( int i = 0; i < 10; i++){
int end = ((20-i)*(i+1))/2;
if(threadIdx.x < end){
tile_x = i * tile;
tile_y = (10 + threadIdx.x - end) * tile;
break;
}
}
//iteration: copy gmem-->smem; aggregate smem-->register
for (int iter = 0; iter < iterations; iter ++){
float2 x;
//copy texture --> smem, and sync
/*
if(threadIdx.x < SCAN_BATCH){
if(iter*SCAN_BATCH + threadIdx.x < end - start){
for (int k = 0; k < F; k += 2){
theta.x = tex1Dfetch(thetaTTexRef, F * csrColIndex[start + iter*SCAN_BATCH + threadIdx.x] + k);
theta.y = tex1Dfetch(thetaTTexRef, F * csrColIndex[start + iter*SCAN_BATCH + threadIdx.x] + k+1);
thetaTemp[threadIdx.x * F/2 + k/2] = theta;
}
}
//must be the last iteration; no need to check
//not enough theta to copy, set zero
else
memset(&thetaTemp[threadIdx.x*F/2], 0, F*sizeof(float));
}
*/
//two layers: warp divergence unless we split at 32
//32 > SCAN_BATCH
if(threadIdx.x < 2*32 ){
//int index = threadIdx.x;
int index = threadIdx.x - (threadIdx.x/32)*32; //0 to 31;
if(index < SCAN_BATCH){
if(iter*SCAN_BATCH + index < end - start){
//for (int k = 50*(threadIdx.x/32); k < 50*(threadIdx.x/32) + 50; k += 2){
//IMPORTANT: for loop has constant and identical start and end
if(threadIdx.x < 32){
for (int k = 0; k < 50; k += 2){
x.x = XT[ F * cscRowIndex[start + iter*SCAN_BATCH + index] + k ];
x.y = XT[ F * cscRowIndex[start + iter*SCAN_BATCH + index] + k+1];
xTemp[index * F/2 + k/2] = x;
}
}
else {
for (int k = 0; k < 50; k += 2){
x.x = XT[ F * cscRowIndex[start + iter*SCAN_BATCH + index] + k + 50];
x.y = XT[ F * cscRowIndex[start + iter*SCAN_BATCH + index] + k + 51];
xTemp[index * F/2 + k/2 + 25] = x;
}
}
}
//must be the last iteration; no need to check
//not enough theta to copy, set zero
else
memset(&xTemp[index*F/2], 0, F*sizeof(float));
}
}
/* //issue: not coalesced access to csrColIndex
if(threadIdx.x < F && threadIdx.x%2 == 0){
for(int k = 0; k< SCAN_BATCH; k++){
if(iter*SCAN_BATCH + k < end - start){
theta.x = tex1Dfetch(thetaTTexRef, F * csrColIndex[start + iter*SCAN_BATCH + k] + threadIdx.x);
theta.y = tex1Dfetch(thetaTTexRef, F * csrColIndex[start + iter*SCAN_BATCH + k] + threadIdx.x +1);
thetaTemp[k * F/2 + threadIdx.x/2] = theta;
}
//must be the last iteration; no need to check
//not enough theta to copy, set zero
else
memset(&thetaTemp[k*F/2 + threadIdx.x/2], 0, 2*sizeof(float));
}
}
*/
/*
int layers = blockDim.x/SCAN_BATCH; //100/30 = 3
//int height = blockDim.x/layers; //30
int y = threadIdx.x/SCAN_BATCH;//0,1,2,3; y==3 is not viable
//min(y, (layers-1)) * height
int y_start = y * 30;//0-29:0;30-59:30;60-89:60
int y_end = y_start + 30; //0-29:30;30-59:60;60-89:90
if(y >= layers - 1) y_end = F; //60-89:100
if(threadIdx.x - y_start < SCAN_BATCH){
if(iter*SCAN_BATCH + (threadIdx.x - y_start) < end - start){
for (int k = y_start; k < y_end; k += 2){
theta.x =
tex1Dfetch(thetaTTexRef, F * csrColIndex[start + iter*SCAN_BATCH + (threadIdx.x - y_start)] + k);
theta.y =
tex1Dfetch(thetaTTexRef, F * csrColIndex[start + iter*SCAN_BATCH + (threadIdx.x - y_start)] + k+1);
thetaTemp[(threadIdx.x - y_start)* F/2 + k/2] = theta;
}
}
//must be the last iteration; no need to check
//not enough theta to copy, set zero
else
memset(&thetaTemp[(threadIdx.x - y_start)*F/2 + y_start/2], 0, (y_end - y_start)*sizeof(float));
}
*/
__syncthreads();
///////////////////////////////////////////////////////////////////////////////////////////////////////////
//tile: 10*10
if(threadIdx.x < 55 ){
for(int k = 0; k < SCAN_BATCH; k++){
temp0 += xTemp[tile_x/2 + k*F/2].x * xTemp[tile_y/2 + k*F/2].x;
temp1 += xTemp[tile_x/2 + k*F/2].x * xTemp[tile_y/2 + k*F/2].y;
temp2 += xTemp[tile_x/2 + k*F/2].x * xTemp[tile_y/2 +1 + k*F/2].x;
temp3 += xTemp[tile_x/2 + k*F/2].x * xTemp[tile_y/2 +1 + k*F/2].y;
temp4 += xTemp[tile_x/2 + k*F/2].x * xTemp[tile_y/2 +2 + k*F/2].x;
temp5 += xTemp[tile_x/2 + k*F/2].x * xTemp[tile_y/2 +2 + k*F/2].y;
temp6 += xTemp[tile_x/2 + k*F/2].x * xTemp[tile_y/2 +3 + k*F/2].x;
temp7 += xTemp[tile_x/2 + k*F/2].x * xTemp[tile_y/2 +3 + k*F/2].y;
temp8 += xTemp[tile_x/2 + k*F/2].x * xTemp[tile_y/2 +4 + k*F/2].x;
temp9 += xTemp[tile_x/2 + k*F/2].x * xTemp[tile_y/2 +4 + k*F/2].y;
temp10 += xTemp[tile_x/2 + k*F/2].y * xTemp[tile_y/2 + k*F/2].x;
temp11 += xTemp[tile_x/2 + k*F/2].y * xTemp[tile_y/2 + k*F/2].y;
temp12 += xTemp[tile_x/2 + k*F/2].y * xTemp[tile_y/2 +1 + k*F/2].x;
temp13 += xTemp[tile_x/2 + k*F/2].y * xTemp[tile_y/2 +1 + k*F/2].y;
temp14 += xTemp[tile_x/2 + k*F/2].y * xTemp[tile_y/2 +2 + k*F/2].x;
temp15 += xTemp[tile_x/2 + k*F/2].y * xTemp[tile_y/2 +2 + k*F/2].y;
temp16 += xTemp[tile_x/2 + k*F/2].y * xTemp[tile_y/2 +3 + k*F/2].x;
temp17 += xTemp[tile_x/2 + k*F/2].y * xTemp[tile_y/2 +3 + k*F/2].y;
temp18 += xTemp[tile_x/2 + k*F/2].y * xTemp[tile_y/2 +4 + k*F/2].x;
temp19 += xTemp[tile_x/2 + k*F/2].y * xTemp[tile_y/2 +4 + k*F/2].y;
temp20 += xTemp[tile_x/2 +1 + k*F/2].x * xTemp[tile_y/2 + k*F/2].x;
temp21 += xTemp[tile_x/2 +1 + k*F/2].x * xTemp[tile_y/2 + k*F/2].y;
temp22 += xTemp[tile_x/2 +1 + k*F/2].x * xTemp[tile_y/2 +1 + k*F/2].x;
temp23 += xTemp[tile_x/2 +1 + k*F/2].x * xTemp[tile_y/2 +1 + k*F/2].y;
temp24 += xTemp[tile_x/2 +1 + k*F/2].x * xTemp[tile_y/2 +2 + k*F/2].x;
temp25 += xTemp[tile_x/2 +1 + k*F/2].x * xTemp[tile_y/2 +2 + k*F/2].y;
temp26 += xTemp[tile_x/2 +1 + k*F/2].x * xTemp[tile_y/2 +3 + k*F/2].x;
temp27 += xTemp[tile_x/2 +1 + k*F/2].x * xTemp[tile_y/2 +3 + k*F/2].y;
temp28 += xTemp[tile_x/2 +1 + k*F/2].x * xTemp[tile_y/2 +4 + k*F/2].x;
temp29 += xTemp[tile_x/2 +1 + k*F/2].x * xTemp[tile_y/2 +4 + k*F/2].y;
temp30 += xTemp[tile_x/2 +1 + k*F/2].y * xTemp[tile_y/2 + k*F/2].x;
temp31 += xTemp[tile_x/2 +1 + k*F/2].y * xTemp[tile_y/2 + k*F/2].y;
temp32 += xTemp[tile_x/2 +1 + k*F/2].y * xTemp[tile_y/2 +1 + k*F/2].x;
temp33 += xTemp[tile_x/2 +1 + k*F/2].y * xTemp[tile_y/2 +1 + k*F/2].y;
temp34 += xTemp[tile_x/2 +1 + k*F/2].y * xTemp[tile_y/2 +2 + k*F/2].x;
temp35 += xTemp[tile_x/2 +1 + k*F/2].y * xTemp[tile_y/2 +2 + k*F/2].y;
temp36 += xTemp[tile_x/2 +1 + k*F/2].y * xTemp[tile_y/2 +3 + k*F/2].x;
temp37 += xTemp[tile_x/2 +1 + k*F/2].y * xTemp[tile_y/2 +3 + k*F/2].y;
temp38 += xTemp[tile_x/2 +1 + k*F/2].y * xTemp[tile_y/2 +4 + k*F/2].x;
temp39 += xTemp[tile_x/2 +1 + k*F/2].y * xTemp[tile_y/2 +4 + k*F/2].y;
temp40 += xTemp[tile_x/2 +2 + k*F/2].x * xTemp[tile_y/2 + k*F/2].x;
temp41 += xTemp[tile_x/2 +2 + k*F/2].x * xTemp[tile_y/2 + k*F/2].y;
temp42 += xTemp[tile_x/2 +2 + k*F/2].x * xTemp[tile_y/2 +1 + k*F/2].x;
temp43 += xTemp[tile_x/2 +2 + k*F/2].x * xTemp[tile_y/2 +1 + k*F/2].y;
temp44 += xTemp[tile_x/2 +2 + k*F/2].x * xTemp[tile_y/2 +2 + k*F/2].x;
temp45 += xTemp[tile_x/2 +2 + k*F/2].x * xTemp[tile_y/2 +2 + k*F/2].y;
temp46 += xTemp[tile_x/2 +2 + k*F/2].x * xTemp[tile_y/2 +3 + k*F/2].x;
temp47 += xTemp[tile_x/2 +2 + k*F/2].x * xTemp[tile_y/2 +3 + k*F/2].y;
temp48 += xTemp[tile_x/2 +2 + k*F/2].x * xTemp[tile_y/2 +4 + k*F/2].x;
temp49 += xTemp[tile_x/2 +2 + k*F/2].x * xTemp[tile_y/2 +4 + k*F/2].y;
temp50 += xTemp[tile_x/2 +2 + k*F/2].y * xTemp[tile_y/2 + k*F/2].x;
temp51 += xTemp[tile_x/2 +2 + k*F/2].y * xTemp[tile_y/2 + k*F/2].y;
temp52 += xTemp[tile_x/2 +2 + k*F/2].y * xTemp[tile_y/2 +1 + k*F/2].x;
temp53 += xTemp[tile_x/2 +2 + k*F/2].y * xTemp[tile_y/2 +1 + k*F/2].y;
temp54 += xTemp[tile_x/2 +2 + k*F/2].y * xTemp[tile_y/2 +2 + k*F/2].x;
temp55 += xTemp[tile_x/2 +2 + k*F/2].y * xTemp[tile_y/2 +2 + k*F/2].y;
temp56 += xTemp[tile_x/2 +2 + k*F/2].y * xTemp[tile_y/2 +3 + k*F/2].x;
temp57 += xTemp[tile_x/2 +2 + k*F/2].y * xTemp[tile_y/2 +3 + k*F/2].y;
temp58 += xTemp[tile_x/2 +2 + k*F/2].y * xTemp[tile_y/2 +4 + k*F/2].x;
temp59 += xTemp[tile_x/2 +2 + k*F/2].y * xTemp[tile_y/2 +4 + k*F/2].y;
temp60 += xTemp[tile_x/2 +3 + k*F/2].x * xTemp[tile_y/2 + k*F/2].x;
temp61 += xTemp[tile_x/2 +3 + k*F/2].x * xTemp[tile_y/2 + k*F/2].y;
temp62 += xTemp[tile_x/2 +3 + k*F/2].x * xTemp[tile_y/2 +1 + k*F/2].x;
temp63 += xTemp[tile_x/2 +3 + k*F/2].x * xTemp[tile_y/2 +1 + k*F/2].y;
temp64 += xTemp[tile_x/2 +3 + k*F/2].x * xTemp[tile_y/2 +2 + k*F/2].x;
temp65 += xTemp[tile_x/2 +3 + k*F/2].x * xTemp[tile_y/2 +2 + k*F/2].y;
temp66 += xTemp[tile_x/2 +3 + k*F/2].x * xTemp[tile_y/2 +3 + k*F/2].x;
temp67 += xTemp[tile_x/2 +3 + k*F/2].x * xTemp[tile_y/2 +3 + k*F/2].y;
temp68 += xTemp[tile_x/2 +3 + k*F/2].x * xTemp[tile_y/2 +4 + k*F/2].x;
temp69 += xTemp[tile_x/2 +3 + k*F/2].x * xTemp[tile_y/2 +4 + k*F/2].y;
temp70 += xTemp[tile_x/2 +3 + k*F/2].y * xTemp[tile_y/2 + k*F/2].x;
temp71 += xTemp[tile_x/2 +3 + k*F/2].y * xTemp[tile_y/2 + k*F/2].y;
temp72 += xTemp[tile_x/2 +3 + k*F/2].y * xTemp[tile_y/2 +1 + k*F/2].x;
temp73 += xTemp[tile_x/2 +3 + k*F/2].y * xTemp[tile_y/2 +1 + k*F/2].y;
temp74 += xTemp[tile_x/2 +3 + k*F/2].y * xTemp[tile_y/2 +2 + k*F/2].x;
temp75 += xTemp[tile_x/2 +3 + k*F/2].y * xTemp[tile_y/2 +2 + k*F/2].y;
temp76 += xTemp[tile_x/2 +3 + k*F/2].y * xTemp[tile_y/2 +3 + k*F/2].x;
temp77 += xTemp[tile_x/2 +3 + k*F/2].y * xTemp[tile_y/2 +3 + k*F/2].y;
temp78 += xTemp[tile_x/2 +3 + k*F/2].y * xTemp[tile_y/2 +4 + k*F/2].x;
temp79 += xTemp[tile_x/2 +3 + k*F/2].y * xTemp[tile_y/2 +4 + k*F/2].y;
temp80 += xTemp[tile_x/2 +4 + k*F/2].x * xTemp[tile_y/2 + k*F/2].x;
temp81 += xTemp[tile_x/2 +4 + k*F/2].x * xTemp[tile_y/2 + k*F/2].y;
temp82 += xTemp[tile_x/2 +4 + k*F/2].x * xTemp[tile_y/2 +1 + k*F/2].x;
temp83 += xTemp[tile_x/2 +4 + k*F/2].x * xTemp[tile_y/2 +1 + k*F/2].y;
temp84 += xTemp[tile_x/2 +4 + k*F/2].x * xTemp[tile_y/2 +2 + k*F/2].x;
temp85 += xTemp[tile_x/2 +4 + k*F/2].x * xTemp[tile_y/2 +2 + k*F/2].y;
temp86 += xTemp[tile_x/2 +4 + k*F/2].x * xTemp[tile_y/2 +3 + k*F/2].x;
temp87 += xTemp[tile_x/2 +4 + k*F/2].x * xTemp[tile_y/2 +3 + k*F/2].y;
temp88 += xTemp[tile_x/2 +4 + k*F/2].x * xTemp[tile_y/2 +4 + k*F/2].x;
temp89 += xTemp[tile_x/2 +4 + k*F/2].x * xTemp[tile_y/2 +4 + k*F/2].y;
temp90 += xTemp[tile_x/2 +4 + k*F/2].y * xTemp[tile_y/2 + k*F/2].x;
temp91 += xTemp[tile_x/2 +4 + k*F/2].y * xTemp[tile_y/2 + k*F/2].y;
temp92 += xTemp[tile_x/2 +4 + k*F/2].y * xTemp[tile_y/2 +1 + k*F/2].x;
temp93 += xTemp[tile_x/2 +4 + k*F/2].y * xTemp[tile_y/2 +1 + k*F/2].y;
temp94 += xTemp[tile_x/2 +4 + k*F/2].y * xTemp[tile_y/2 +2 + k*F/2].x;
temp95 += xTemp[tile_x/2 +4 + k*F/2].y * xTemp[tile_y/2 +2 + k*F/2].y;
temp96 += xTemp[tile_x/2 +4 + k*F/2].y * xTemp[tile_y/2 +3 + k*F/2].x;
temp97 += xTemp[tile_x/2 +4 + k*F/2].y * xTemp[tile_y/2 +3 + k*F/2].y;
temp98 += xTemp[tile_x/2 +4 + k*F/2].y * xTemp[tile_y/2 +4 + k*F/2].x;
temp99 += xTemp[tile_x/2 +4 + k*F/2].y * xTemp[tile_y/2 +4 + k*F/2].y;
}
}
}
//end of iteration in copying from smem and aggregating in register
///////////////////////////////////////////////////////////////////////////////////////////////////////////
__syncthreads();
///*
if(threadIdx.x < 55 ){
//copy output to gmem
int index = blockIdx.x*F*F;
xx[index + tile_x + tile_y*F] = temp0;
xx[index + tile_x + (tile_y + 1)*F] = temp1;
xx[index + tile_x + (tile_y + 2)*F] = temp2;
xx[index + tile_x + (tile_y + 3)*F] = temp3;
xx[index + tile_x + (tile_y + 4)*F] = temp4;
xx[index + tile_x + (tile_y + 5)*F] = temp5;
xx[index + tile_x + (tile_y + 6)*F] = temp6;
xx[index + tile_x + (tile_y + 7)*F] = temp7;
xx[index + tile_x + (tile_y + 8)*F] = temp8;
xx[index + tile_x + (tile_y + 9)*F] = temp9;
xx[index + tile_x + 1 + tile_y*F] = temp10;
xx[index + tile_x + 1 + (tile_y + 1)*F] = temp11;
xx[index + tile_x + 1 + (tile_y + 2)*F] = temp12;
xx[index + tile_x + 1 + (tile_y + 3)*F] = temp13;
xx[index + tile_x + 1 + (tile_y + 4)*F] = temp14;
xx[index + tile_x + 1 + (tile_y + 5)*F] = temp15;
xx[index + tile_x + 1 + (tile_y + 6)*F] = temp16;
xx[index + tile_x + 1 + (tile_y + 7)*F] = temp17;
xx[index + tile_x + 1 + (tile_y + 8)*F] = temp18;
xx[index + tile_x + 1 + (tile_y + 9)*F] = temp19;
xx[index + tile_x + 2 + tile_y*F] = temp20;
xx[index + tile_x + 2 + (tile_y + 1)*F] = temp21;
xx[index + tile_x + 2 + (tile_y + 2)*F] = temp22;
xx[index + tile_x + 2 + (tile_y + 3)*F] = temp23;
xx[index + tile_x + 2 + (tile_y + 4)*F] = temp24;
xx[index + tile_x + 2 + (tile_y + 5)*F] = temp25;
xx[index + tile_x + 2 + (tile_y + 6)*F] = temp26;
xx[index + tile_x + 2 + (tile_y + 7)*F] = temp27;
xx[index + tile_x + 2 + (tile_y + 8)*F] = temp28;
xx[index + tile_x + 2 + (tile_y + 9)*F] = temp29;
xx[index + tile_x + 3 + tile_y*F] = temp30;
xx[index + tile_x + 3 + (tile_y + 1)*F] = temp31;
xx[index + tile_x + 3 + (tile_y + 2)*F] = temp32;
xx[index + tile_x + 3 + (tile_y + 3)*F] = temp33;
xx[index + tile_x + 3 + (tile_y + 4)*F] = temp34;
xx[index + tile_x + 3 + (tile_y + 5)*F] = temp35;
xx[index + tile_x + 3 + (tile_y + 6)*F] = temp36;
xx[index + tile_x + 3 + (tile_y + 7)*F] = temp37;
xx[index + tile_x + 3 + (tile_y + 8)*F] = temp38;
xx[index + tile_x + 3 + (tile_y + 9)*F] = temp39;
xx[index + tile_x + 4 + tile_y*F] = temp40;
xx[index + tile_x + 4 + (tile_y + 1)*F] = temp41;
xx[index + tile_x + 4 + (tile_y + 2)*F] = temp42;
xx[index + tile_x + 4 + (tile_y + 3)*F] = temp43;
xx[index + tile_x + 4 + (tile_y + 4)*F] = temp44;
xx[index + tile_x + 4 + (tile_y + 5)*F] = temp45;
xx[index + tile_x + 4 + (tile_y + 6)*F] = temp46;
xx[index + tile_x + 4 + (tile_y + 7)*F] = temp47;
xx[index + tile_x + 4 + (tile_y + 8)*F] = temp48;
xx[index + tile_x + 4 + (tile_y + 9)*F] = temp49;
xx[index + tile_x + 5 + tile_y*F] = temp50;
xx[index + tile_x + 5 + (tile_y + 1)*F] = temp51;
xx[index + tile_x + 5 + (tile_y + 2)*F] = temp52;
xx[index + tile_x + 5 + (tile_y + 3)*F] = temp53;
xx[index + tile_x + 5 + (tile_y + 4)*F] = temp54;
xx[index + tile_x + 5 + (tile_y + 5)*F] = temp55;
xx[index + tile_x + 5 + (tile_y + 6)*F] = temp56;
xx[index + tile_x + 5 + (tile_y + 7)*F] = temp57;
xx[index + tile_x + 5 + (tile_y + 8)*F] = temp58;
xx[index + tile_x + 5 + (tile_y + 9)*F] = temp59;
xx[index + tile_x + 6 + tile_y*F] = temp60;
xx[index + tile_x + 6 + (tile_y + 1)*F] = temp61;
xx[index + tile_x + 6 + (tile_y + 2)*F] = temp62;
xx[index + tile_x + 6 + (tile_y + 3)*F] = temp63;
xx[index + tile_x + 6 + (tile_y + 4)*F] = temp64;
xx[index + tile_x + 6 + (tile_y + 5)*F] = temp65;
xx[index + tile_x + 6 + (tile_y + 6)*F] = temp66;
xx[index + tile_x + 6 + (tile_y + 7)*F] = temp67;
xx[index + tile_x + 6 + (tile_y + 8)*F] = temp68;
xx[index + tile_x + 6 + (tile_y + 9)*F] = temp69;
xx[index + tile_x + 7 + tile_y*F] = temp70;
xx[index + tile_x + 7 + (tile_y + 1)*F] = temp71;
xx[index + tile_x + 7 + (tile_y + 2)*F] = temp72;
xx[index + tile_x + 7 + (tile_y + 3)*F] = temp73;
xx[index + tile_x + 7 + (tile_y + 4)*F] = temp74;
xx[index + tile_x + 7 + (tile_y + 5)*F] = temp75;
xx[index + tile_x + 7 + (tile_y + 6)*F] = temp76;
xx[index + tile_x + 7 + (tile_y + 7)*F] = temp77;
xx[index + tile_x + 7 + (tile_y + 8)*F] = temp78;
xx[index + tile_x + 7 + (tile_y + 9)*F] = temp79;
xx[index + tile_x + 8 + tile_y*F] = temp80;
xx[index + tile_x + 8 + (tile_y + 1)*F] = temp81;
xx[index + tile_x + 8 + (tile_y + 2)*F] = temp82;
xx[index + tile_x + 8 + (tile_y + 3)*F] = temp83;
xx[index + tile_x + 8 + (tile_y + 4)*F] = temp84;
xx[index + tile_x + 8 + (tile_y + 5)*F] = temp85;
xx[index + tile_x + 8 + (tile_y + 6)*F] = temp86;
xx[index + tile_x + 8 + (tile_y + 7)*F] = temp87;
xx[index + tile_x + 8 + (tile_y + 8)*F] = temp88;
xx[index + tile_x + 8 + (tile_y + 9)*F] = temp89;
xx[index + tile_x + 9 + tile_y*F] = temp90;
xx[index + tile_x + 9 + (tile_y + 1)*F] = temp91;
xx[index + tile_x + 9 + (tile_y + 2)*F] = temp92;
xx[index + tile_x + 9 + (tile_y + 3)*F] = temp93;
xx[index + tile_x + 9 + (tile_y + 4)*F] = temp94;
xx[index + tile_x + 9 + (tile_y + 5)*F] = temp95;
xx[index + tile_x + 9 + (tile_y + 6)*F] = temp96;
xx[index + tile_x + 9 + (tile_y + 7)*F] = temp97;
xx[index + tile_x + 9 + (tile_y + 8)*F] = temp98;
xx[index + tile_x + 9 + (tile_y + 9)*F] = temp99;
//symmetric
if(tile_x!=tile_y){
xx[index + tile_y + 0+ (tile_x + 0)*F]= temp0;
xx[index + tile_y + 1+ (tile_x + 0)*F]= temp1;
xx[index + tile_y + 2+ (tile_x + 0)*F]= temp2;
xx[index + tile_y + 3+ (tile_x + 0)*F]= temp3;
xx[index + tile_y + 4+ (tile_x + 0)*F]= temp4;
xx[index + tile_y + 5+ (tile_x + 0)*F]= temp5;
xx[index + tile_y + 6+ (tile_x + 0)*F]= temp6;
xx[index + tile_y + 7+ (tile_x + 0)*F]= temp7;
xx[index + tile_y + 8+ (tile_x + 0)*F]= temp8;
xx[index + tile_y + 9+ (tile_x + 0)*F]= temp9;
xx[index + tile_y + 0+ (tile_x + 1)*F]= temp10;
xx[index + tile_y + 1+ (tile_x + 1)*F]= temp11;
xx[index + tile_y + 2+ (tile_x + 1)*F]= temp12;
xx[index + tile_y + 3+ (tile_x + 1)*F]= temp13;
xx[index + tile_y + 4+ (tile_x + 1)*F]= temp14;
xx[index + tile_y + 5+ (tile_x + 1)*F]= temp15;
xx[index + tile_y + 6+ (tile_x + 1)*F]= temp16;
xx[index + tile_y + 7+ (tile_x + 1)*F]= temp17;
xx[index + tile_y + 8+ (tile_x + 1)*F]= temp18;
xx[index + tile_y + 9+ (tile_x + 1)*F]= temp19;
xx[index + tile_y + 0+ (tile_x + 2)*F]= temp20;
xx[index + tile_y + 1+ (tile_x + 2)*F]= temp21;
xx[index + tile_y + 2+ (tile_x + 2)*F]= temp22;
xx[index + tile_y + 3+ (tile_x + 2)*F]= temp23;
xx[index + tile_y + 4+ (tile_x + 2)*F]= temp24;
xx[index + tile_y + 5+ (tile_x + 2)*F]= temp25;
xx[index + tile_y + 6+ (tile_x + 2)*F]= temp26;
xx[index + tile_y + 7+ (tile_x + 2)*F]= temp27;
xx[index + tile_y + 8+ (tile_x + 2)*F]= temp28;
xx[index + tile_y + 9+ (tile_x + 2)*F]= temp29;
xx[index + tile_y + 0+ (tile_x + 3)*F]= temp30;
xx[index + tile_y + 1+ (tile_x + 3)*F]= temp31;
xx[index + tile_y + 2+ (tile_x + 3)*F]= temp32;
xx[index + tile_y + 3+ (tile_x + 3)*F]= temp33;
xx[index + tile_y + 4+ (tile_x + 3)*F]= temp34;
xx[index + tile_y + 5+ (tile_x + 3)*F]= temp35;
xx[index + tile_y + 6+ (tile_x + 3)*F]= temp36;
xx[index + tile_y + 7+ (tile_x + 3)*F]= temp37;
xx[index + tile_y + 8+ (tile_x + 3)*F]= temp38;
xx[index + tile_y + 9+ (tile_x + 3)*F]= temp39;
xx[index + tile_y + 0+ (tile_x + 4)*F]= temp40;
xx[index + tile_y + 1+ (tile_x + 4)*F]= temp41;
xx[index + tile_y + 2+ (tile_x + 4)*F]= temp42;
xx[index + tile_y + 3+ (tile_x + 4)*F]= temp43;
xx[index + tile_y + 4+ (tile_x + 4)*F]= temp44;
xx[index + tile_y + 5+ (tile_x + 4)*F]= temp45;
xx[index + tile_y + 6+ (tile_x + 4)*F]= temp46;
xx[index + tile_y + 7+ (tile_x + 4)*F]= temp47;
xx[index + tile_y + 8+ (tile_x + 4)*F]= temp48;
xx[index + tile_y + 9+ (tile_x + 4)*F]= temp49;
xx[index + tile_y + 0+ (tile_x + 5)*F]= temp50;
xx[index + tile_y + 1+ (tile_x + 5)*F]= temp51;
xx[index + tile_y + 2+ (tile_x + 5)*F]= temp52;
xx[index + tile_y + 3+ (tile_x + 5)*F]= temp53;
xx[index + tile_y + 4+ (tile_x + 5)*F]= temp54;
xx[index + tile_y + 5+ (tile_x + 5)*F]= temp55;
xx[index + tile_y + 6+ (tile_x + 5)*F]= temp56;
xx[index + tile_y + 7+ (tile_x + 5)*F]= temp57;
xx[index + tile_y + 8+ (tile_x + 5)*F]= temp58;
xx[index + tile_y + 9+ (tile_x + 5)*F]= temp59;
xx[index + tile_y + 0+ (tile_x + 6)*F]= temp60;
xx[index + tile_y + 1+ (tile_x + 6)*F]= temp61;
xx[index + tile_y + 2+ (tile_x + 6)*F]= temp62;
xx[index + tile_y + 3+ (tile_x + 6)*F]= temp63;
xx[index + tile_y + 4+ (tile_x + 6)*F]= temp64;
xx[index + tile_y + 5+ (tile_x + 6)*F]= temp65;
xx[index + tile_y + 6+ (tile_x + 6)*F]= temp66;
xx[index + tile_y + 7+ (tile_x + 6)*F]= temp67;
xx[index + tile_y + 8+ (tile_x + 6)*F]= temp68;
xx[index + tile_y + 9+ (tile_x + 6)*F]= temp69;
xx[index + tile_y + 0+ (tile_x + 7)*F]= temp70;
xx[index + tile_y + 1+ (tile_x + 7)*F]= temp71;
xx[index + tile_y + 2+ (tile_x + 7)*F]= temp72;
xx[index + tile_y + 3+ (tile_x + 7)*F]= temp73;
xx[index + tile_y + 4+ (tile_x + 7)*F]= temp74;
xx[index + tile_y + 5+ (tile_x + 7)*F]= temp75;
xx[index + tile_y + 6+ (tile_x + 7)*F]= temp76;
xx[index + tile_y + 7+ (tile_x + 7)*F]= temp77;
xx[index + tile_y + 8+ (tile_x + 7)*F]= temp78;
xx[index + tile_y + 9+ (tile_x + 7)*F]= temp79;
xx[index + tile_y + 0+ (tile_x + 8)*F]= temp80;
xx[index + tile_y + 1+ (tile_x + 8)*F]= temp81;
xx[index + tile_y + 2+ (tile_x + 8)*F]= temp82;
xx[index + tile_y + 3+ (tile_x + 8)*F]= temp83;
xx[index + tile_y + 4+ (tile_x + 8)*F]= temp84;
xx[index + tile_y + 5+ (tile_x + 8)*F]= temp85;
xx[index + tile_y + 6+ (tile_x + 8)*F]= temp86;
xx[index + tile_y + 7+ (tile_x + 8)*F]= temp87;
xx[index + tile_y + 8+ (tile_x + 8)*F]= temp88;
xx[index + tile_y + 9+ (tile_x + 8)*F]= temp89;
xx[index + tile_y + 0+ (tile_x + 9)*F]= temp90;
xx[index + tile_y + 1+ (tile_x + 9)*F]= temp91;
xx[index + tile_y + 2+ (tile_x + 9)*F]= temp92;
xx[index + tile_y + 3+ (tile_x + 9)*F]= temp93;
xx[index + tile_y + 4+ (tile_x + 9)*F]= temp94;
xx[index + tile_y + 5+ (tile_x + 9)*F]= temp95;
xx[index + tile_y + 6+ (tile_x + 9)*F]= temp96;
xx[index + tile_y + 7+ (tile_x + 9)*F]= temp97;
xx[index + tile_y + 8+ (tile_x + 9)*F]= temp98;
xx[index + tile_y + 9+ (tile_x + 9)*F]= temp99;
}
//regularization
if(tile_x == tile_y){
for(int k = 0; k < tile; k++)
xx[index + (tile_x+k)*(1+F)] += (end - start) * lambda;
}
}
//*/
}
}
__global__ void
__launch_bounds__(100, 4)
updateXByBlock2pRegDsmemTile(float* tt,
const int* csrRowIndex, const int* csrColIndex, const float lambda) {
__shared__ float2 thetaTemp[SCAN_BATCH * F/2];
int row = blockIdx.x;
if (row < M) {
//this block needs to handle end - start thetaT columns
int start = csrRowIndex[row];
int end = csrRowIndex[row + 1];
//slide through [start, end] by window size SCAN_BATCH
int iterations = (end - start - 1)/SCAN_BATCH + 1;
float temp0= 0, temp1= 0, temp2= 0, temp3= 0, temp4= 0, temp5= 0, temp6= 0, temp7= 0, temp8= 0, temp9 = 0;
float temp10= 0, temp11= 0, temp12= 0, temp13= 0, temp14= 0, temp15= 0, temp16= 0, temp17= 0, temp18= 0, temp19 = 0;
float temp20= 0, temp21= 0, temp22= 0, temp23= 0, temp24= 0, temp25= 0, temp26= 0, temp27= 0, temp28= 0, temp29 = 0;
float temp30= 0, temp31= 0, temp32= 0, temp33= 0, temp34= 0, temp35= 0, temp36= 0, temp37= 0, temp38= 0, temp39 = 0;
float temp40= 0, temp41= 0, temp42= 0, temp43= 0, temp44= 0, temp45= 0, temp46= 0, temp47= 0, temp48= 0, temp49 = 0;
float temp50= 0, temp51= 0, temp52= 0, temp53= 0, temp54= 0, temp55= 0, temp56= 0, temp57= 0, temp58= 0, temp59 = 0;
float temp60= 0, temp61= 0, temp62= 0, temp63= 0, temp64= 0, temp65= 0, temp66= 0, temp67= 0, temp68= 0, temp69 = 0;
float temp70= 0, temp71= 0, temp72= 0, temp73= 0, temp74= 0, temp75= 0, temp76= 0, temp77= 0, temp78= 0, temp79 = 0;
float temp80= 0, temp81= 0, temp82= 0, temp83= 0, temp84= 0, temp85= 0, temp86= 0, temp87= 0, temp88= 0, temp89 = 0;
float temp90= 0, temp91= 0, temp92= 0, temp93= 0, temp94= 0, temp95= 0, temp96= 0, temp97= 0, temp98= 0, temp99 = 0;
float2 theta;
int tile = F/10;
int tile_x = (threadIdx.x/tile) * tile;//start x of this tile
int tile_y = (threadIdx.x%tile) * tile;//start y of this tile
for (int iter = 0; iter < iterations; iter ++){
//copy texture --> smem, and sync
if(threadIdx.x < SCAN_BATCH){
if(iter*SCAN_BATCH + threadIdx.x < end - start){
for (int k = 0; k < F; k += 2){
theta.x =
tex1Dfetch(thetaTTexRef, F * csrColIndex[start + iter*SCAN_BATCH + threadIdx.x] + k);
theta.y =
tex1Dfetch(thetaTTexRef, F * csrColIndex[start + iter*SCAN_BATCH + threadIdx.x] + k+1);
thetaTemp[threadIdx.x * F/2 + k/2] = theta;
}
}
//must be the last iteration; no need to check
//not enough theta to copy, set zero
else
memset(&thetaTemp[threadIdx.x*F/2], 0, F*sizeof(float));
}
__syncthreads();
///////////////////////////////////////////////////////////////////////////////////////////////////////////
//tile: 10*10
for(int k = 0; k < SCAN_BATCH; k++){
temp0 += thetaTemp[tile_x/2 + k*F/2].x * thetaTemp[tile_y/2 + k*F/2].x;
temp1 += thetaTemp[tile_x/2 + k*F/2].x * thetaTemp[tile_y/2 + k*F/2].y;
temp2 += thetaTemp[tile_x/2 + k*F/2].x * thetaTemp[tile_y/2 +1 + k*F/2].x;
temp3 += thetaTemp[tile_x/2 + k*F/2].x * thetaTemp[tile_y/2 +1 + k*F/2].y;
temp4 += thetaTemp[tile_x/2 + k*F/2].x * thetaTemp[tile_y/2 +2 + k*F/2].x;
temp5 += thetaTemp[tile_x/2 + k*F/2].x * thetaTemp[tile_y/2 +2 + k*F/2].y;
temp6 += thetaTemp[tile_x/2 + k*F/2].x * thetaTemp[tile_y/2 +3 + k*F/2].x;
temp7 += thetaTemp[tile_x/2 + k*F/2].x * thetaTemp[tile_y/2 +3 + k*F/2].y;
temp8 += thetaTemp[tile_x/2 + k*F/2].x * thetaTemp[tile_y/2 +4 + k*F/2].x;
temp9 += thetaTemp[tile_x/2 + k*F/2].x * thetaTemp[tile_y/2 +4 + k*F/2].y;
temp10 += thetaTemp[tile_x/2 + k*F/2].y * thetaTemp[tile_y/2 + k*F/2].x;
temp11 += thetaTemp[tile_x/2 + k*F/2].y * thetaTemp[tile_y/2 + k*F/2].y;
temp12 += thetaTemp[tile_x/2 + k*F/2].y * thetaTemp[tile_y/2 +1 + k*F/2].x;
temp13 += thetaTemp[tile_x/2 + k*F/2].y * thetaTemp[tile_y/2 +1 + k*F/2].y;
temp14 += thetaTemp[tile_x/2 + k*F/2].y * thetaTemp[tile_y/2 +2 + k*F/2].x;
temp15 += thetaTemp[tile_x/2 + k*F/2].y * thetaTemp[tile_y/2 +2 + k*F/2].y;
temp16 += thetaTemp[tile_x/2 + k*F/2].y * thetaTemp[tile_y/2 +3 + k*F/2].x;
temp17 += thetaTemp[tile_x/2 + k*F/2].y * thetaTemp[tile_y/2 +3 + k*F/2].y;
temp18 += thetaTemp[tile_x/2 + k*F/2].y * thetaTemp[tile_y/2 +4 + k*F/2].x;
temp19 += thetaTemp[tile_x/2 + k*F/2].y * thetaTemp[tile_y/2 +4 + k*F/2].y;
temp20 += thetaTemp[tile_x/2 +1 + k*F/2].x * thetaTemp[tile_y/2 + k*F/2].x;
temp21 += thetaTemp[tile_x/2 +1 + k*F/2].x * thetaTemp[tile_y/2 + k*F/2].y;
temp22 += thetaTemp[tile_x/2 +1 + k*F/2].x * thetaTemp[tile_y/2 +1 + k*F/2].x;
temp23 += thetaTemp[tile_x/2 +1 + k*F/2].x * thetaTemp[tile_y/2 +1 + k*F/2].y;
temp24 += thetaTemp[tile_x/2 +1 + k*F/2].x * thetaTemp[tile_y/2 +2 + k*F/2].x;
temp25 += thetaTemp[tile_x/2 +1 + k*F/2].x * thetaTemp[tile_y/2 +2 + k*F/2].y;
temp26 += thetaTemp[tile_x/2 +1 + k*F/2].x * thetaTemp[tile_y/2 +3 + k*F/2].x;
temp27 += thetaTemp[tile_x/2 +1 + k*F/2].x * thetaTemp[tile_y/2 +3 + k*F/2].y;
temp28 += thetaTemp[tile_x/2 +1 + k*F/2].x * thetaTemp[tile_y/2 +4 + k*F/2].x;
temp29 += thetaTemp[tile_x/2 +1 + k*F/2].x * thetaTemp[tile_y/2 +4 + k*F/2].y;
temp30 += thetaTemp[tile_x/2 +1 + k*F/2].y * thetaTemp[tile_y/2 + k*F/2].x;
temp31 += thetaTemp[tile_x/2 +1 + k*F/2].y * thetaTemp[tile_y/2 + k*F/2].y;
temp32 += thetaTemp[tile_x/2 +1 + k*F/2].y * thetaTemp[tile_y/2 +1 + k*F/2].x;
temp33 += thetaTemp[tile_x/2 +1 + k*F/2].y * thetaTemp[tile_y/2 +1 + k*F/2].y;
temp34 += thetaTemp[tile_x/2 +1 + k*F/2].y * thetaTemp[tile_y/2 +2 + k*F/2].x;
temp35 += thetaTemp[tile_x/2 +1 + k*F/2].y * thetaTemp[tile_y/2 +2 + k*F/2].y;
temp36 += thetaTemp[tile_x/2 +1 + k*F/2].y * thetaTemp[tile_y/2 +3 + k*F/2].x;
temp37 += thetaTemp[tile_x/2 +1 + k*F/2].y * thetaTemp[tile_y/2 +3 + k*F/2].y;
temp38 += thetaTemp[tile_x/2 +1 + k*F/2].y * thetaTemp[tile_y/2 +4 + k*F/2].x;
temp39 += thetaTemp[tile_x/2 +1 + k*F/2].y * thetaTemp[tile_y/2 +4 + k*F/2].y;
temp40 += thetaTemp[tile_x/2 +2 + k*F/2].x * thetaTemp[tile_y/2 + k*F/2].x;
temp41 += thetaTemp[tile_x/2 +2 + k*F/2].x * thetaTemp[tile_y/2 + k*F/2].y;
temp42 += thetaTemp[tile_x/2 +2 + k*F/2].x * thetaTemp[tile_y/2 +1 + k*F/2].x;
temp43 += thetaTemp[tile_x/2 +2 + k*F/2].x * thetaTemp[tile_y/2 +1 + k*F/2].y;
temp44 += thetaTemp[tile_x/2 +2 + k*F/2].x * thetaTemp[tile_y/2 +2 + k*F/2].x;
temp45 += thetaTemp[tile_x/2 +2 + k*F/2].x * thetaTemp[tile_y/2 +2 + k*F/2].y;
temp46 += thetaTemp[tile_x/2 +2 + k*F/2].x * thetaTemp[tile_y/2 +3 + k*F/2].x;
temp47 += thetaTemp[tile_x/2 +2 + k*F/2].x * thetaTemp[tile_y/2 +3 + k*F/2].y;
temp48 += thetaTemp[tile_x/2 +2 + k*F/2].x * thetaTemp[tile_y/2 +4 + k*F/2].x;
temp49 += thetaTemp[tile_x/2 +2 + k*F/2].x * thetaTemp[tile_y/2 +4 + k*F/2].y;
temp50 += thetaTemp[tile_x/2 +2 + k*F/2].y * thetaTemp[tile_y/2 + k*F/2].x;
temp51 += thetaTemp[tile_x/2 +2 + k*F/2].y * thetaTemp[tile_y/2 + k*F/2].y;
temp52 += thetaTemp[tile_x/2 +2 + k*F/2].y * thetaTemp[tile_y/2 +1 + k*F/2].x;
temp53 += thetaTemp[tile_x/2 +2 + k*F/2].y * thetaTemp[tile_y/2 +1 + k*F/2].y;
temp54 += thetaTemp[tile_x/2 +2 + k*F/2].y * thetaTemp[tile_y/2 +2 + k*F/2].x;
temp55 += thetaTemp[tile_x/2 +2 + k*F/2].y * thetaTemp[tile_y/2 +2 + k*F/2].y;
temp56 += thetaTemp[tile_x/2 +2 + k*F/2].y * thetaTemp[tile_y/2 +3 + k*F/2].x;
temp57 += thetaTemp[tile_x/2 +2 + k*F/2].y * thetaTemp[tile_y/2 +3 + k*F/2].y;
temp58 += thetaTemp[tile_x/2 +2 + k*F/2].y * thetaTemp[tile_y/2 +4 + k*F/2].x;
temp59 += thetaTemp[tile_x/2 +2 + k*F/2].y * thetaTemp[tile_y/2 +4 + k*F/2].y;
temp60 += thetaTemp[tile_x/2 +3 + k*F/2].x * thetaTemp[tile_y/2 + k*F/2].x;
temp61 += thetaTemp[tile_x/2 +3 + k*F/2].x * thetaTemp[tile_y/2 + k*F/2].y;
temp62 += thetaTemp[tile_x/2 +3 + k*F/2].x * thetaTemp[tile_y/2 +1 + k*F/2].x;
temp63 += thetaTemp[tile_x/2 +3 + k*F/2].x * thetaTemp[tile_y/2 +1 + k*F/2].y;
temp64 += thetaTemp[tile_x/2 +3 + k*F/2].x * thetaTemp[tile_y/2 +2 + k*F/2].x;
temp65 += thetaTemp[tile_x/2 +3 + k*F/2].x * thetaTemp[tile_y/2 +2 + k*F/2].y;
temp66 += thetaTemp[tile_x/2 +3 + k*F/2].x * thetaTemp[tile_y/2 +3 + k*F/2].x;
temp67 += thetaTemp[tile_x/2 +3 + k*F/2].x * thetaTemp[tile_y/2 +3 + k*F/2].y;
temp68 += thetaTemp[tile_x/2 +3 + k*F/2].x * thetaTemp[tile_y/2 +4 + k*F/2].x;
temp69 += thetaTemp[tile_x/2 +3 + k*F/2].x * thetaTemp[tile_y/2 +4 + k*F/2].y;
temp70 += thetaTemp[tile_x/2 +3 + k*F/2].y * thetaTemp[tile_y/2 + k*F/2].x;
temp71 += thetaTemp[tile_x/2 +3 + k*F/2].y * thetaTemp[tile_y/2 + k*F/2].y;
temp72 += thetaTemp[tile_x/2 +3 + k*F/2].y * thetaTemp[tile_y/2 +1 + k*F/2].x;
temp73 += thetaTemp[tile_x/2 +3 + k*F/2].y * thetaTemp[tile_y/2 +1 + k*F/2].y;
temp74 += thetaTemp[tile_x/2 +3 + k*F/2].y * thetaTemp[tile_y/2 +2 + k*F/2].x;
temp75 += thetaTemp[tile_x/2 +3 + k*F/2].y * thetaTemp[tile_y/2 +2 + k*F/2].y;
temp76 += thetaTemp[tile_x/2 +3 + k*F/2].y * thetaTemp[tile_y/2 +3 + k*F/2].x;
temp77 += thetaTemp[tile_x/2 +3 + k*F/2].y * thetaTemp[tile_y/2 +3 + k*F/2].y;
temp78 += thetaTemp[tile_x/2 +3 + k*F/2].y * thetaTemp[tile_y/2 +4 + k*F/2].x;
temp79 += thetaTemp[tile_x/2 +3 + k*F/2].y * thetaTemp[tile_y/2 +4 + k*F/2].y;
temp80 += thetaTemp[tile_x/2 +4 + k*F/2].x * thetaTemp[tile_y/2 + k*F/2].x;
temp81 += thetaTemp[tile_x/2 +4 + k*F/2].x * thetaTemp[tile_y/2 + k*F/2].y;
temp82 += thetaTemp[tile_x/2 +4 + k*F/2].x * thetaTemp[tile_y/2 +1 + k*F/2].x;
temp83 += thetaTemp[tile_x/2 +4 + k*F/2].x * thetaTemp[tile_y/2 +1 + k*F/2].y;
temp84 += thetaTemp[tile_x/2 +4 + k*F/2].x * thetaTemp[tile_y/2 +2 + k*F/2].x;
temp85 += thetaTemp[tile_x/2 +4 + k*F/2].x * thetaTemp[tile_y/2 +2 + k*F/2].y;
temp86 += thetaTemp[tile_x/2 +4 + k*F/2].x * thetaTemp[tile_y/2 +3 + k*F/2].x;
temp87 += thetaTemp[tile_x/2 +4 + k*F/2].x * thetaTemp[tile_y/2 +3 + k*F/2].y;
temp88 += thetaTemp[tile_x/2 +4 + k*F/2].x * thetaTemp[tile_y/2 +4 + k*F/2].x;
temp89 += thetaTemp[tile_x/2 +4 + k*F/2].x * thetaTemp[tile_y/2 +4 + k*F/2].y;
temp90 += thetaTemp[tile_x/2 +4 + k*F/2].y * thetaTemp[tile_y/2 + k*F/2].x;
temp91 += thetaTemp[tile_x/2 +4 + k*F/2].y * thetaTemp[tile_y/2 + k*F/2].y;
temp92 += thetaTemp[tile_x/2 +4 + k*F/2].y * thetaTemp[tile_y/2 +1 + k*F/2].x;
temp93 += thetaTemp[tile_x/2 +4 + k*F/2].y * thetaTemp[tile_y/2 +1 + k*F/2].y;
temp94 += thetaTemp[tile_x/2 +4 + k*F/2].y * thetaTemp[tile_y/2 +2 + k*F/2].x;
temp95 += thetaTemp[tile_x/2 +4 + k*F/2].y * thetaTemp[tile_y/2 +2 + k*F/2].y;
temp96 += thetaTemp[tile_x/2 +4 + k*F/2].y * thetaTemp[tile_y/2 +3 + k*F/2].x;
temp97 += thetaTemp[tile_x/2 +4 + k*F/2].y * thetaTemp[tile_y/2 +3 + k*F/2].y;
temp98 += thetaTemp[tile_x/2 +4 + k*F/2].y * thetaTemp[tile_y/2 +4 + k*F/2].x;
temp99 += thetaTemp[tile_x/2 +4 + k*F/2].y * thetaTemp[tile_y/2 +4 + k*F/2].y;
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////
__syncthreads();
}
int index = blockIdx.x*F*F;
///*
//copy output to gmem
tt[index + tile_x + tile_y*F] = temp0;
tt[index + tile_x + (tile_y + 1)*F] = temp1;
tt[index + tile_x + (tile_y + 2)*F] = temp2;
tt[index + tile_x + (tile_y + 3)*F] = temp3;
tt[index + tile_x + (tile_y + 4)*F] = temp4;
tt[index + tile_x + (tile_y + 5)*F] = temp5;
tt[index + tile_x + (tile_y + 6)*F] = temp6;
tt[index + tile_x + (tile_y + 7)*F] = temp7;
tt[index + tile_x + (tile_y + 8)*F] = temp8;
tt[index + tile_x + (tile_y + 9)*F] = temp9;
tt[index + tile_x + 1 + tile_y*F] = temp10;
tt[index + tile_x + 1 + (tile_y + 1)*F] = temp11;
tt[index + tile_x + 1 + (tile_y + 2)*F] = temp12;
tt[index + tile_x + 1 + (tile_y + 3)*F] = temp13;
tt[index + tile_x + 1 + (tile_y + 4)*F] = temp14;
tt[index + tile_x + 1 + (tile_y + 5)*F] = temp15;
tt[index + tile_x + 1 + (tile_y + 6)*F] = temp16;
tt[index + tile_x + 1 + (tile_y + 7)*F] = temp17;
tt[index + tile_x + 1 + (tile_y + 8)*F] = temp18;
tt[index + tile_x + 1 + (tile_y + 9)*F] = temp19;
tt[index + tile_x + 2 + tile_y*F] = temp20;
tt[index + tile_x + 2 + (tile_y + 1)*F] = temp21;
tt[index + tile_x + 2 + (tile_y + 2)*F] = temp22;
tt[index + tile_x + 2 + (tile_y + 3)*F] = temp23;
tt[index + tile_x + 2 + (tile_y + 4)*F] = temp24;
tt[index + tile_x + 2 + (tile_y + 5)*F] = temp25;
tt[index + tile_x + 2 + (tile_y + 6)*F] = temp26;
tt[index + tile_x + 2 + (tile_y + 7)*F] = temp27;
tt[index + tile_x + 2 + (tile_y + 8)*F] = temp28;
tt[index + tile_x + 2 + (tile_y + 9)*F] = temp29;
tt[index + tile_x + 3 + tile_y*F] = temp30;
tt[index + tile_x + 3 + (tile_y + 1)*F] = temp31;
tt[index + tile_x + 3 + (tile_y + 2)*F] = temp32;
tt[index + tile_x + 3 + (tile_y + 3)*F] = temp33;
tt[index + tile_x + 3 + (tile_y + 4)*F] = temp34;
tt[index + tile_x + 3 + (tile_y + 5)*F] = temp35;
tt[index + tile_x + 3 + (tile_y + 6)*F] = temp36;
tt[index + tile_x + 3 + (tile_y + 7)*F] = temp37;
tt[index + tile_x + 3 + (tile_y + 8)*F] = temp38;
tt[index + tile_x + 3 + (tile_y + 9)*F] = temp39;
tt[index + tile_x + 4 + tile_y*F] = temp40;
tt[index + tile_x + 4 + (tile_y + 1)*F] = temp41;
tt[index + tile_x + 4 + (tile_y + 2)*F] = temp42;
tt[index + tile_x + 4 + (tile_y + 3)*F] = temp43;
tt[index + tile_x + 4 + (tile_y + 4)*F] = temp44;
tt[index + tile_x + 4 + (tile_y + 5)*F] = temp45;
tt[index + tile_x + 4 + (tile_y + 6)*F] = temp46;
tt[index + tile_x + 4 + (tile_y + 7)*F] = temp47;
tt[index + tile_x + 4 + (tile_y + 8)*F] = temp48;
tt[index + tile_x + 4 + (tile_y + 9)*F] = temp49;
tt[index + tile_x + 5 + tile_y*F] = temp50;
tt[index + tile_x + 5 + (tile_y + 1)*F] = temp51;
tt[index + tile_x + 5 + (tile_y + 2)*F] = temp52;
tt[index + tile_x + 5 + (tile_y + 3)*F] = temp53;
tt[index + tile_x + 5 + (tile_y + 4)*F] = temp54;
tt[index + tile_x + 5 + (tile_y + 5)*F] = temp55;
tt[index + tile_x + 5 + (tile_y + 6)*F] = temp56;
tt[index + tile_x + 5 + (tile_y + 7)*F] = temp57;
tt[index + tile_x + 5 + (tile_y + 8)*F] = temp58;
tt[index + tile_x + 5 + (tile_y + 9)*F] = temp59;
tt[index + tile_x + 6 + tile_y*F] = temp60;
tt[index + tile_x + 6 + (tile_y + 1)*F] = temp61;
tt[index + tile_x + 6 + (tile_y + 2)*F] = temp62;
tt[index + tile_x + 6 + (tile_y + 3)*F] = temp63;
tt[index + tile_x + 6 + (tile_y + 4)*F] = temp64;
tt[index + tile_x + 6 + (tile_y + 5)*F] = temp65;
tt[index + tile_x + 6 + (tile_y + 6)*F] = temp66;
tt[index + tile_x + 6 + (tile_y + 7)*F] = temp67;
tt[index + tile_x + 6 + (tile_y + 8)*F] = temp68;
tt[index + tile_x + 6 + (tile_y + 9)*F] = temp69;
tt[index + tile_x + 7 + tile_y*F] = temp70;
tt[index + tile_x + 7 + (tile_y + 1)*F] = temp71;
tt[index + tile_x + 7 + (tile_y + 2)*F] = temp72;
tt[index + tile_x + 7 + (tile_y + 3)*F] = temp73;
tt[index + tile_x + 7 + (tile_y + 4)*F] = temp74;
tt[index + tile_x + 7 + (tile_y + 5)*F] = temp75;
tt[index + tile_x + 7 + (tile_y + 6)*F] = temp76;
tt[index + tile_x + 7 + (tile_y + 7)*F] = temp77;
tt[index + tile_x + 7 + (tile_y + 8)*F] = temp78;
tt[index + tile_x + 7 + (tile_y + 9)*F] = temp79;
tt[index + tile_x + 8 + tile_y*F] = temp80;
tt[index + tile_x + 8 + (tile_y + 1)*F] = temp81;
tt[index + tile_x + 8 + (tile_y + 2)*F] = temp82;
tt[index + tile_x + 8 + (tile_y + 3)*F] = temp83;
tt[index + tile_x + 8 + (tile_y + 4)*F] = temp84;
tt[index + tile_x + 8 + (tile_y + 5)*F] = temp85;
tt[index + tile_x + 8 + (tile_y + 6)*F] = temp86;
tt[index + tile_x + 8 + (tile_y + 7)*F] = temp87;
tt[index + tile_x + 8 + (tile_y + 8)*F] = temp88;
tt[index + tile_x + 8 + (tile_y + 9)*F] = temp89;
tt[index + tile_x + 9 + tile_y*F] = temp90;
tt[index + tile_x + 9 + (tile_y + 1)*F] = temp91;
tt[index + tile_x + 9 + (tile_y + 2)*F] = temp92;
tt[index + tile_x + 9 + (tile_y + 3)*F] = temp93;
tt[index + tile_x + 9 + (tile_y + 4)*F] = temp94;
tt[index + tile_x + 9 + (tile_y + 5)*F] = temp95;
tt[index + tile_x + 9 + (tile_y + 6)*F] = temp96;
tt[index + tile_x + 9 + (tile_y + 7)*F] = temp97;
tt[index + tile_x + 9 + (tile_y + 8)*F] = temp98;
tt[index + tile_x + 9 + (tile_y + 9)*F] = temp99;
//*/
//regularization
if(tile_x == tile_y){
for(int k = 0; k < tile; k++)
tt[index + (tile_x+k)*(1+F)] += (end - start) * lambda;
}
}
}
void loadCSRSparseMatrix(const char* dataFile, const char* rowFile, const char* colFile,
float* data, unsigned int* row, int* col) {
printf("\n loading CSR...\n");
FILE *dFile = fopen(dataFile,"rb");
FILE *rFile = fopen(rowFile,"rb");
FILE *cFile = fopen(colFile,"rb");
if (!rFile||!dFile||!cFile)
{
printf("Unable to open file!");
return;
}
fread(&row[0], 4*(M+1) ,1, rFile);
fread(&col[0], 4*NNZ ,1, cFile);
fread(&data[0], 4*NNZ ,1, dFile);
fclose(rFile);
fclose(dFile);
fclose(cFile);
}
void loadCSCSparseMatrix(const char* dataFile, const char* rowFile, const char* colFile, float * data, int* row, int* col) {
printf("\n loading CSC...\n");
FILE *dFile = fopen(dataFile,"rb");
FILE *rFile = fopen(rowFile,"rb");
FILE *cFile = fopen(colFile,"rb");
if (!rFile||!dFile||!dFile)
{
printf("Unable to open file!");
return;
}
fread(&data[0], 4*NNZ ,1, dFile);
fread(&row[0], 4*NNZ ,1, rFile);
fread(&col[0], 4*(N+1) ,1, cFile);
fclose(rFile);
fclose(dFile);
fclose(cFile);
}
void loadCSCSparseMatrixInBatch(const std::string dataFile, const std::string rowFile, const std::string colFile, float * data, int* row, int* col, long csc_nnz, int n) {
printf("\n loading CSC from %s, %s, %s \n", dataFile.c_str(), rowFile.c_str(), colFile.c_str());
FILE *dFile = fopen(dataFile.c_str(),"rb");
FILE *rFile = fopen(rowFile.c_str(),"rb");
FILE *cFile = fopen(colFile.c_str(),"rb");
if (!rFile||!dFile||!dFile)
{
printf("Unable to open file!");
return;
}
fread(&data[0], 4*csc_nnz ,1, dFile);
fread(&row[0], 4*csc_nnz ,1, rFile);
fread(&col[0], 4*(n+1) ,1, cFile);
fclose(rFile);
fclose(dFile);
fclose(cFile);
}
void loadCooSparseMatrixRowPtr(const char* rowFile, int* row) {
printf("\n loading COO...\n");
FILE *rfile = fopen(rowFile,"rb");
fread(&row[0], 4*NNZ ,1, rfile);
fclose(rfile);
//FILE *file = fopen("./hugewiki_R_train_coo.row.bin", "wb");
//fwrite(row, 4*NNZ, 1, file);
//fclose(file);
}
void loadCooSparseMatrix(const char* dataFile, const char* rowFile, const char* colFile,
float* data, int* row, int* col, int nnz) {
std::ifstream dfile(dataFile);
std::ifstream rfile(rowFile);
std::ifstream cfile(colFile);
float d;
int d_i = 0;
while (dfile >> d) {
//printf("%f ",d);
data[d_i++] = d;
}
int r;
int r_i = 0;
while (rfile >> r) {
//printf("%d ",r);
row[r_i++] = r;
}
int c;
int c_i = 0;
while (cfile >> c) {
//printf("%d ",c);
col[c_i++] = c;
}
}
inline void updateX(
// const int batch_id,
const int batch_size, const long batch_offset, float * ythetaT, float * tt, float * XT_h,
hipblasHandle_t handle,
// const int m, const int n, const int f, const int nnz,
float** devPtrTTHost, float **devPtrYthetaTHost,
float **devPtrTT, float **devPtrYthetaT, int *P, int *INFO){
//auto t0 = std::chrono::high_resolution_clock::now();
//left-hand side pointers
for (int k = 0; k < batch_size; k++) {
devPtrTTHost[k] = &tt[k * F * F];
}
cudacall(hipMemcpy(devPtrTT, devPtrTTHost,
batch_size * sizeof(*devPtrTT),hipMemcpyHostToDevice));
int * info2 = (int *) malloc(sizeof(int));
//right-hand side pointer
for (int k = 0; k < batch_size; k++) {
devPtrYthetaTHost[k] = &ythetaT[k * F];
}
cudacall(hipMemcpy(devPtrYthetaT, devPtrYthetaTHost, batch_size * sizeof(*devPtrYthetaT),
hipMemcpyHostToDevice));
//getrf then getrs
//printf("\t\t\tbatch %d, prepare in secs: %f\n", batch_id, seconds() - t0);
//t0 = seconds();
hipblasSgetrfBatched(handle, F, devPtrTT, F, P, INFO, batch_size);
//hipDeviceSynchronize();
//cudaCheckError();
//printf("\t\t\tbatch %d, LU factorization of tt in secs: %f\n", batch_id, seconds() - t0);
//t0 = seconds();
hipblasSgetrsBatched(handle, HIPBLAS_OP_N, F, 1,
(const float ** ) devPtrTT, F, P, devPtrYthetaT, F, info2, batch_size);
//hipDeviceSynchronize();
//cudaCheckError();
//printf("\t\t\tbatch %d, solve after LU in secs: %f\n", batch_id, seconds() - t0);
//t0 = seconds();
cudacall( hipMemcpy(&XT_h[batch_offset * F], ythetaT,
batch_size * F * sizeof(float), hipMemcpyDeviceToHost) );
//printf("\t\t\tbatch %d, copy to host XT_h secs: %f\n", batch_id, seconds() - t0);
}
int updateTheta(const int batch_size, const int batch_offset, float * xx,
float * yTXT, float * thetaT,
hipblasHandle_t handle, const int n, const int f){
float ** devPtrXXHost = (float**) malloc(batch_size * sizeof(devPtrXXHost[0]));
float **devPtrXX = 0;
for (int k = 0; k < batch_size; k++) {
devPtrXXHost[k] = &xx[k * F * F];
}
cudacall(hipMalloc((void** ) &devPtrXX, batch_size * sizeof(*devPtrXX)));
cudacall(hipMemcpy(devPtrXX, devPtrXXHost, batch_size * sizeof(*devPtrXX), hipMemcpyHostToDevice));
int *P, *INFO;
cudacall(hipMalloc(&P, f * batch_size * sizeof(int)));
cudacall(hipMalloc(&INFO, batch_size * sizeof(int)));
hipblasSgetrfBatched(handle, F, devPtrXX, F, P, INFO, batch_size);
hipDeviceSynchronize();
cudaCheckError();
//gettimeofday(&tv1, NULL);
//elapsed = (tv1.tv_sec - tv0.tv_sec)
// + (tv1.tv_usec - tv0.tv_usec) / 1000000.0;
//printf("\t %f seconds. \n", elapsed);
//printf("******* solve xx * thetaT = yTXT with CUDA 7.\n");
float **devPtrYTXTHost = 0;
float **devPtrYTXT = 0;
devPtrYTXTHost = (float**) malloc(batch_size * sizeof(devPtrYTXTHost[0]));
for (int k = 0; k < batch_size; k++) {
devPtrYTXTHost[k] = &yTXT[k * F];
}
cudacall(hipMalloc((void** ) &devPtrYTXT, batch_size * sizeof(*devPtrYTXT)));
cudacall(hipMemcpy(devPtrYTXT, devPtrYTXTHost, batch_size * sizeof(*devPtrYTXT),hipMemcpyHostToDevice));
int * info2 = (int *) malloc(sizeof(int));
hipblasSgetrsBatched(handle, HIPBLAS_OP_N, F, 1,
(const float ** ) devPtrXX, F, P, devPtrYTXT, F, info2, batch_size);
hipDeviceSynchronize();
cudaCheckError();
cudacall( hipMemcpy( &thetaT[batch_offset * F], yTXT,
batch_size * F * sizeof(float), hipMemcpyDeviceToDevice) );
//gettimeofday(&tv2, NULL);
//elapsed = (tv2.tv_sec - tv1.tv_sec)
// + (tv2.tv_usec - tv1.tv_usec) / 1000000.0;
//printf("\t %f seconds. \n", elapsed);
/*
//testing purpose
float* yTXHost = (float *) malloc(f * n * sizeof(yTXHost[0]));
cudacall(hipMemcpy(yTXHost, yTXT, n * f * sizeof(float), hipMemcpyDeviceToHost));
printf("\n*********yTXT***\n");
for (int i = 0; i < n * f; i++) {
printf("%f\t", yTXHost[i]);
}
printf("\n");
*/
/*
float* thetaTHost = (float *) malloc(f * n * sizeof(thetaTHost[0]));
cudacall( hipMemcpy(thetaTHost, thetaT, n * f * sizeof(float),hipMemcpyDeviceToHost));
printf("\n*********ThetaT***\n");
for (int i = 0; i < n * f; i++) {
printf("%f\t", thetaTHost[i]);
}
printf("\n");
*/
free(devPtrXXHost);
hipFree(devPtrXX);
hipFree(P);
hipFree(INFO);
free(info2);
free(devPtrYTXTHost);
hipFree(devPtrYTXT);
return 0;
}
__global__ void RMSE(const float * csrVal, const int* cooRowIndex,
const int* csrColIndex, const float * thetaT, const float * XT, float * error, const int nnz,
const int error_size) {
int i = blockDim.x*blockIdx.x + threadIdx.x;
if (i < nnz) {
int row = cooRowIndex[i];
int col = csrColIndex[i];
float e = csrVal[i];
//if(i%1000000==0) printf("row: %d, col: %d, csrVal[%d]: %f.\t", row, col, i, e);
for (int k = 0; k < F; k++) {
e -= tex1Dfetch(thetaTTexRef, F * col + k) * tex1Dfetch(xTTexRef, F * row + k);
}
atomicAdd(&error[i%error_size], e*e);
//error[i] = e*e;
//if(i%1000000==0) printf("error[%d]: %f.\n", i, e);
}
}
__global__ void RMSE_CSC(const float * cscVal, const int* cscRowIndex,
const int* cscColIndex, const float * thetaT, const float * XT, float * error,
const int error_size, int* nan) {
int col = blockIdx.x;
int start = cscColIndex[col];
int end = cscColIndex[col + 1];
if (col < N && threadIdx.x < end - start) {
for (int i = 0; threadIdx.x + i*blockDim.x < end - start; i++) {
int index = start + i*blockDim.x + threadIdx.x;
float e0 = cscVal[index];
float e = e0;
//if(isnan(e)) printf("ERROR: NAN***\n");
int row = cscRowIndex[index];
//if(isfinite(((double)row))) printf("ERROR: NAN@@@\n");
for (int k = 0; k < F; k++) {
e -= tex1Dfetch(thetaTTexRef, F * col + k) * XT[ F * row + k];
//TODO: fix this, a user/item does not show up in training
//if(isnan(e1)) printf("e1: NAN!!!%d, %d, %d\n", index, col, row);
//if(isnan(e2)) printf("e2: NAN!!!%d, %d, %d\n", index, col, row);
}
if(isnan(e)) {
e = 0;
atomicAdd(&nan[0],1);
}
//if(isnan(e)) printf("ERROR: NAN!!!%d, %d, %d\n", index, col, row);
atomicAdd(&error[row%error_size], e*e);
}
}
}
int main() {
printf("enable p2p among %d GPUs if available.\n", GPU_COUNT);
enableP2P(GPU_COUNT);
//initialize cublas, cusparse
hipblasHandle_t handle[GPU_COUNT];
hipsparseHandle_t cushandle[GPU_COUNT];
for(int gpu_id = 0; gpu_id < GPU_COUNT; gpu_id ++){
cudacall(hipSetDevice(gpu_id));
cublascall(hipblasCreate(&handle[gpu_id]));
cusparsecall(hipsparseCreate(&cushandle[gpu_id]));
}
hipSetDevice(DEVICEID);
long m = M;
long n = N;
long f = F;
long nnz = NNZ;
float lambda = LAMBDA;
unsigned int* csrRowIndexHostPtr;
cudacall(hipHostMalloc( (void** ) &csrRowIndexHostPtr, (m + 1) * sizeof(int)) );
int* csrColIndexHostPtr;
cudacall(hipHostMalloc( (void** ) &csrColIndexHostPtr, nnz * sizeof(int)) );
float* csrValHostPtr;
cudacall(hipHostMalloc( (void** ) &csrValHostPtr, nnz * sizeof(float)) );
long csc_nnz[GPU_COUNT] = {777607310, 773335400, 777305655, 772895948};
long csc_m[GPU_COUNT] = {12520650, 12520650, 12520650, 12520653};
long csc_nnz_test[GPU_COUNT] = {86418516, 85913272, 86357875, 85883667};
float* cscValHostPtr[GPU_COUNT];
int* cscRowIndexHostPtr[GPU_COUNT];
int* cscColIndexHostPtr[GPU_COUNT];
for(int gpu_id = 0; gpu_id < GPU_COUNT; gpu_id ++){
cudacall(hipHostMalloc( (void** ) &cscValHostPtr[gpu_id], csc_nnz[gpu_id] * sizeof(float)) );
cudacall(hipHostMalloc( (void** ) &cscRowIndexHostPtr[gpu_id], csc_nnz[gpu_id] * sizeof(int)) );
cudacall(hipHostMalloc( (void** ) &cscColIndexHostPtr[gpu_id], (n+1) * sizeof(int)) );
}
float* testCscValHostPtr[GPU_COUNT];
int* testCscRowIndexHostPtr[GPU_COUNT];
int* testCscColIndexHostPtr[GPU_COUNT];
for(int gpu_id = 0; gpu_id < GPU_COUNT; gpu_id ++){
cudacall(hipHostMalloc( (void** ) &testCscValHostPtr[gpu_id], csc_nnz_test[gpu_id] * sizeof(float)) );
cudacall(hipHostMalloc( (void** ) &testCscRowIndexHostPtr[gpu_id], csc_nnz_test[gpu_id] * sizeof(int)) );
cudacall(hipHostMalloc( (void** ) &testCscColIndexHostPtr[gpu_id], (n+1) * sizeof(int)) );
}
//calculate X from thetaT first, need to initialize thetaT
float* thetaTHost;
cudacall(hipHostMalloc( (void** ) &thetaTHost, n * f * sizeof(float)) );
//index of XT_h need a long -- beyond what int32 can handle (2^31 or 2^32)
float * XT_h;
//cudacall (hipHostMalloc((void **)&XT_h, f * m * sizeof(XT_h[0]), hipHostMallocMapped) );
cudacall (hipHostMalloc((void **)&XT_h, f * m * sizeof(XT_h[0])) );
//initialize thetaT on host
srand (time(0));
for (int k = 0; k < n * f; k++)
thetaTHost[k] = 0.5*((float) rand() / (RAND_MAX));
//thetaTHost[k] = 0.1*((float) rand() / (float)RAND_MAX);
//thetaTHost[k] = 0;
//CG needs an initial value of XT
memset(XT_h,0,m*f*sizeof(float));
//for (long k = 0; k < m * f; k++)
// XT_h[k] = 0.5*((float) rand() / (RAND_MAX));
//device pointers
int * csrRowIndex[GPU_COUNT];
int * csrColIndex[GPU_COUNT];
float * csrVal[GPU_COUNT];
float * thetaT[GPU_COUNT];
float * XT_d[GPU_COUNT];
float * cscVal[GPU_COUNT];
int * cscRowIndex[GPU_COUNT];
int * cscColIndex[GPU_COUNT];
printf("*******starting loading training and testing sets to host.\n");
loadCSRSparseMatrix("../data/hugewiki/hugewiki_R_train_csr.data", "../data/hugewiki/hugewiki_R_train_csr.indptr", "../data/hugewiki/hugewiki_R_train_csr.indices",
csrValHostPtr, csrRowIndexHostPtr, csrColIndexHostPtr);
omp_set_num_threads(GPU_COUNT);
#pragma omp parallel
{
int gpu_id = omp_get_thread_num();
std::string str1("../data/hugewiki/hugewiki_R_train_csc.data.bin");
std::string str2("../data/hugewiki/hugewiki_R_train_csc.indices.bin");
std::string str3("../data/hugewiki/hugewiki_R_train_csc.indptr.bin");
//printf("%s",(str+to_string(gpu_id)).c_str());
loadCSCSparseMatrixInBatch((str1 + to_string(gpu_id)).c_str(),
(str2 + to_string(gpu_id)).c_str(),
(str3 + to_string(gpu_id)).c_str(),
cscValHostPtr[gpu_id], cscRowIndexHostPtr[gpu_id], cscColIndexHostPtr[gpu_id], csc_nnz[gpu_id], n);
}
#pragma omp parallel
{
int gpu_id = omp_get_thread_num();
std::string str1("../data/hugewiki/hugewiki_R_test_csc.data.bin");
std::string str2("../data/hugewiki/hugewiki_R_test_csc.indices.bin");
std::string str3("../data/hugewiki/hugewiki_R_test_csc.indptr.bin");
//printf("%s",(str+to_string(gpu_id)).c_str());
loadCSCSparseMatrixInBatch((str1 + to_string(gpu_id)).c_str(),
(str2 + to_string(gpu_id)).c_str(),
(str3 + to_string(gpu_id)).c_str(),
testCscValHostPtr[gpu_id], testCscRowIndexHostPtr[gpu_id],
testCscColIndexHostPtr[gpu_id], csc_nnz_test[gpu_id], n);
}
printf("\n loaded csr to host; print data, row and col array\n");
for (int i = 0; i < nnz && i < 10; i++) {
printf("%f ", csrValHostPtr[i]);
}
printf("\n");
for (int i = 0; i < nnz && i < 10; i++) {
printf("%d ", csrRowIndexHostPtr[i]);
}
printf("\n");
for (int i = 0; i < nnz && i < 10; i++) {
printf("%d ", csrColIndexHostPtr[i]);
}
printf("\n");
printf("\n loaded csc to host; print data, row and col array\n");
for (int i = 0; i < nnz && i < 10; i++) {
printf("%f ", cscValHostPtr[0][i]);
}
printf("\n");
for (int i = 0; i < nnz && i < 10; i++) {
printf("%d ", cscRowIndexHostPtr[0][i]);
}
printf("\n");
for (int i = 0; i < nnz && i < 10; i++) {
printf("%d ", cscColIndexHostPtr[0][i]);
}
printf("\n");
printf("\n loaded csc test to host; print data, row and col array\n");
for (int i = 0; i < nnz && i < 10; i++) {
printf("%f ", testCscValHostPtr[0][i]);
}
printf("\n");
for (int i = 0; i < nnz && i < 10; i++) {
printf("%d ", testCscRowIndexHostPtr[0][i]);
}
printf("\n");
for (int i = 0; i < nnz && i < 10; i++) {
printf("%d ", testCscColIndexHostPtr[0][i]);
}
printf("\n");
cudacall(hipDeviceSetCacheConfig(hipFuncCachePreferShared));
//64-bit smem access
//http://acceleware.com/blog/maximizing-shared-memory-bandwidth-nvidia-kepler-gpus
hipDeviceSetSharedMemConfig(hipSharedMemBankSizeEightByte);
hipSharedMemConfig pConfig;
hipDeviceGetSharedMemConfig (&pConfig);
//printf("%d\n", pConfig);
cudacall(hipSetDevice(DEVICEID));
hipsparseMatDescr_t descr;
cusparsecall( hipsparseCreateMatDescr(&descr));
hipsparseSetMatType(descr, HIPSPARSE_MATRIX_TYPE_GENERAL);
hipsparseSetMatIndexBase(descr, HIPSPARSE_INDEX_BASE_ZERO);
const float alpha = 1.0f;
const float beta = 0.0f;
for(int gpu_id = 0; gpu_id < GPU_COUNT; gpu_id ++){
cudacall(hipSetDevice(gpu_id));
cudacall(hipMalloc((void** ) &thetaT[gpu_id], f * n * sizeof(float)));
printf("*******copy memory to GPU %d...\n", gpu_id);
cudacall(hipMemcpy(thetaT[gpu_id], thetaTHost, (size_t ) (n * f * sizeof(float)), hipMemcpyHostToDevice));
}
//host pointers for cublas batch operations
float ** devPtrTTHost[GPU_COUNT];
float **devPtrYthetaTHost[GPU_COUNT];
for(int iter = 0; iter < ITERS ; iter ++){
printf("---------------------------update X iteration %d ----------------------------------\n", iter);
auto t0 = std::chrono::high_resolution_clock::now();
//parallel in all GPUs, or only 1
int parallelism_level = GPU_COUNT;
omp_set_num_threads(parallelism_level);
//gpu memory to be used across batches
//last batch size, the largest among batches
int batch_size_max = m - (X_BATCH - 1)*(m/X_BATCH);
int counter = 0;
#pragma omp parallel shared (counter)
{
//this is the code on one gpu
int gpu_id = omp_get_thread_num();
cudacall(hipSetDevice(gpu_id));
//for batch solvers
cudacall(hipHostMalloc( (void** ) &devPtrTTHost[gpu_id], batch_size_max * sizeof(*devPtrTTHost) ) );
cudacall(hipHostMalloc( (void** ) &devPtrYthetaTHost[gpu_id], batch_size_max * sizeof(*devPtrYthetaTHost) ) );
float * thetaT_local = thetaT[gpu_id];
cudacall (hipBindTexture(NULL, thetaTTexRef, thetaT_local, n * f * sizeof(float)));
float * tt = 0;
//last batch size, the largest among batches
int batch_size = m - (X_BATCH - 1)*(m/X_BATCH);
//TODO: to get batch_nnz_max from csrRowIndexHostPtr
int batch_nnz_max = 16000000;
long batch_offset;
cudacall(hipMalloc((void** ) &csrRowIndex[gpu_id],(batch_size + 1) * sizeof(csrRowIndex[0][0])));
cudacall(hipMalloc((void** ) &csrColIndex[gpu_id], batch_nnz_max * sizeof(csrColIndex[0][0])));
cudacall(hipMalloc((void** ) &csrVal[gpu_id], batch_nnz_max * sizeof(csrVal[0][0])));
float * ytheta = 0;
float * ythetaT = 0;
cudacall(hipMalloc((void** ) &ytheta, f * batch_size * sizeof(ytheta[0])));
cudacall(hipMalloc((void** ) &ythetaT, f * batch_size * sizeof(ythetaT[0])));
#ifdef CUMF_TT_FP16
cudacall(hipMalloc((void** ) &tt, f/2 * f * batch_size * sizeof(float)));
#else
cudacall(hipMalloc((void** ) &tt, f * f * batch_size * sizeof(float)));
#endif
//for batch solvers
float **devPtrTT = 0;
float **devPtrYthetaT = 0;
int *P, *INFO;
cudacall(hipMalloc((void** ) &devPtrTT, batch_size * sizeof(*devPtrTT)));
cudacall(hipMalloc(&P, f * batch_size * sizeof(int)) );
cudacall(hipMalloc(&INFO, batch_size * sizeof(int) ));
cudacall(hipMalloc((void** ) &devPtrYthetaT, batch_size * sizeof(*devPtrYthetaT)));
int batch_id = 0;
//gpu 0 handles batches 0, 4, 8 ...
//for(int batch_id = gpu_id; batch_id < X_BATCH; batch_id += parallelism_level)
while(counter < X_BATCH)
{
#pragma omp critical
{
batch_id = counter;
counter = counter + 1;
}
auto t2 = std::chrono::high_resolution_clock::now();
if(batch_id != X_BATCH - 1)
batch_size = m/X_BATCH;
batch_offset = batch_id * (m/X_BATCH);
int batch_nnz =
csrRowIndexHostPtr[batch_offset + batch_size] - csrRowIndexHostPtr[batch_offset];
printf("\tbatch %d of %d; size: %d, offset: %ld, batch_nnz %d, on gpu %d\n",
batch_id, X_BATCH, batch_size, batch_offset, batch_nnz, gpu_id);
//copy CSR rating matrices in
cudacall(hipMemcpy(csrRowIndex[gpu_id], &csrRowIndexHostPtr[batch_offset],
(batch_size + 1) * sizeof(csrRowIndex[0][0]), hipMemcpyHostToDevice));
//in place update: csrRowIndex --> csrRowIndex - csrRowIndex[0]
hipLaunchKernelGGL(( zeroIndex), dim3((batch_size + 1 - 1)/1024 + 1), dim3(1024), 0, 0,
csrRowIndex[gpu_id], csrRowIndexHostPtr[batch_offset], batch_size + 1);
cudacall(hipMemcpy(csrColIndex[gpu_id], &csrColIndexHostPtr[csrRowIndexHostPtr[batch_offset]],
batch_nnz * sizeof(csrColIndex[0][0]), hipMemcpyHostToDevice));
cudacall(hipMemcpy(csrVal[gpu_id], &csrValHostPtr[csrRowIndexHostPtr[batch_offset]],
batch_nnz * sizeof(csrVal[0][0]),hipMemcpyHostToDevice));
//process right hand: Y*theta
hipsparseScsrmm2(cushandle[gpu_id], HIPSPARSE_OPERATION_NON_TRANSPOSE,
HIPSPARSE_OPERATION_TRANSPOSE, batch_size, f, n, batch_nnz, &alpha, descr, csrVal[gpu_id],
csrRowIndex[gpu_id], csrColIndex[gpu_id], thetaT[gpu_id], f, &beta, ytheta, batch_size);
//transpose ytheta: ytheta: m*f; need ythetaT = (ytheta).T = f*m
hipblasSgeam(handle[gpu_id], HIPBLAS_OP_T, HIPBLAS_OP_N, f, batch_size, &alpha,
(const float * ) ytheta, batch_size, &beta, ythetaT, f, ythetaT, f);
hipDeviceSynchronize();
cudaCheckError();
//generate left-hand: tt: batch_size*(F*F)
auto tX = std::chrono::high_resolution_clock::now();
std::chrono::duration<double> elapsed = tX - t2;
printf("\t\t batch %d before tt kernel gpu: %d, seconds: %f \n",
batch_id, gpu_id, elapsed.count());
auto t1 = std::chrono::high_resolution_clock::now();
#ifdef CUMF_TT_FP16
hipLaunchKernelGGL(( get_hermitian100_tt_fp16), dim3(batch_size), dim3(64), SCAN_BATCH * f/2*sizeof(float2), 0,
0, (half2*) tt, csrRowIndex[gpu_id], csrColIndex[gpu_id], lambda, batch_size, thetaT[gpu_id]);
#else
//get_hermitian_x<<<batch_size, 64>>>
// (tt, csrRowIndex[gpu_id], csrColIndex[gpu_id], lambda);
//updateXByBlock2pRegDsmemTile<<<batch_size, F>>>
// (tt, csrRowIndex[gpu_id], csrColIndex[gpu_id], lambda);
hipLaunchKernelGGL(( get_hermitian100), dim3(batch_size), dim3(64), SCAN_BATCH * f/2*sizeof(float2), 0,
0, tt, csrRowIndex[gpu_id], csrColIndex[gpu_id], lambda, batch_size, thetaT[gpu_id]);
#endif
hipDeviceSynchronize();
cudaCheckError();
tX = std::chrono::high_resolution_clock::now();
elapsed = tX - t1;
printf("\t\t batch %d tt kernel gpu: %d, seconds: %f \n",
batch_id, gpu_id, elapsed.count());
t1 = std::chrono::high_resolution_clock::now();
/*
#ifdef CUMF_SAVE_MODEL
if(iter==0&&batch_id==0)
saveDeviceFloatArrayToFile(std::string("../log/0904/hugewiki.tt.hermitkernel"), f * f * batch_size, tt);
#endif
updateX(batch_id, batch_size, batch_offset, ythetaT, tt, XT_h,
handle[gpu_id], m, n, f, nnz, devPtrTTHost[gpu_id], devPtrYthetaTHost[gpu_id],
devPtrTT, devPtrYthetaT, P, INFO);
#ifdef CUMF_SAVE_MODEL
if(iter==0&&batch_id==0)
saveDeviceFloatArrayToFile(std::string("../log/0904/hugewiki.lu.hermitkernel.xt"), f * batch_size, ythetaT);
#endif
*/
///*
float * XT = 0;
cudacall(hipMalloc((void** ) &XT, f * batch_size * sizeof(XT[0])));
cudacall( hipMemcpy(XT, &XT_h[batch_offset * F],
batch_size * F * sizeof(float), hipMemcpyHostToDevice) );
#ifdef CUMF_TT_FP16
printf("CG solver with fp16.\n");
updateXWithCGHost_tt_fp16(tt, XT, ythetaT, batch_size, f, 6);
#else
printf("CG solver with fp32.\n");
updateXWithCGHost(tt, XT, ythetaT, batch_size, 100, 100);
#endif
cudacall( hipMemcpy(&XT_h[batch_offset * F], XT,
batch_size * F * sizeof(float), hipMemcpyDeviceToHost) );
#ifdef CUMF_SAVE_MODEL
if(batch_id==0)
saveDeviceFloatArrayToFile(std::string("../log/0903/hugewiki.cg.xt.")+ to_string(iter), f * batch_size, XT);
#endif
cudacall(hipFree(XT));
//*/
tX = std::chrono::high_resolution_clock::now();
elapsed = tX - t1;
printf("\t\t batch %d updateX by solving tt , gpu: %d, seconds: %f \n",
batch_id, gpu_id, elapsed.count());
tX = std::chrono::high_resolution_clock::now();
elapsed = tX - t2;
printf("\tbatch %d on gpu %d, runs %f \n", batch_id, gpu_id, elapsed.count());
}//end of update x batch
auto tX = std::chrono::high_resolution_clock::now();
std::chrono::duration<double> elapsed = tX - t0;
printf("update X run %f seconds at gpu %d.\n", elapsed.count(), gpu_id);
cudacall(hipFree(ytheta));
cudacall(hipFree(tt));
cudacall(hipFree(csrVal[gpu_id]));
cudacall(hipFree(csrRowIndex[gpu_id]));
cudacall(hipFree(csrColIndex[gpu_id]));
cudacall(hipFree(ythetaT));
hipFree(P);
hipFree(INFO);
hipFree(devPtrTT);
hipFree(devPtrYthetaT);
cudacall(hipHostFree(devPtrTTHost[gpu_id]));
cudacall(hipHostFree(devPtrYthetaTHost[gpu_id]));
}//end of omp parallel loop
auto tX = std::chrono::high_resolution_clock::now();
std::chrono::duration<double> elapsed = tX - t0;
printf("update X run %f seconds, gridSize: %ld \n", elapsed.count(), m);
auto start = std::chrono::high_resolution_clock::now();
printf("---------------------------------- update theta iteration %d----------------------------------\n",
iter);
//in batches, when N is huge
for(int batch_id = 0; batch_id< THETA_BATCH; batch_id ++){
int batch_size = 0;
if(batch_id != THETA_BATCH - 1)
batch_size = n/THETA_BATCH;
else
batch_size = n - batch_id*(n/THETA_BATCH);
int batch_offset = batch_id * (n/THETA_BATCH);
printf("batch %d / %d, size: %d\n", batch_id + 1, THETA_BATCH, batch_size);
float * yTX[GPU_COUNT];
float * yTXT[GPU_COUNT];
const float alpha = 1.0f;
const float beta = 0.0f;
float * xx[GPU_COUNT];
omp_set_num_threads(GPU_COUNT);
t0 = std::chrono::high_resolution_clock::now();
#pragma omp parallel
{
int gpu_id = omp_get_thread_num();
long offset = 0;
for(int k = 0; k < gpu_id; k ++)
offset += csc_m[k];
cudacall(hipSetDevice(gpu_id));
printf("\tGather xx on GPU %d.\n",gpu_id);
auto t1 = std::chrono::high_resolution_clock::now();
//distribute XT[] to XT_d[i]
cudacall(hipMalloc((void** ) &XT_d[gpu_id], f * csc_m[gpu_id] * sizeof(float)));
//printf("offset: %lld, copy XT_h[%lld] to XT_d[%d]:\n", offset, offset*f, gpu_id);
cudacall(hipMemcpy(XT_d[gpu_id], &XT_h[offset*f],
f * csc_m[gpu_id] * sizeof(float), hipMemcpyHostToDevice));
//copy csc to GPU
int batch_nnz = cscColIndexHostPtr[gpu_id][batch_offset + batch_size] - cscColIndexHostPtr[gpu_id][batch_offset];
cudacall(hipMalloc((void** ) &cscRowIndex[gpu_id],batch_nnz * sizeof(int)));
cudacall(hipMalloc((void** ) &cscColIndex[gpu_id], (batch_size + 1) * sizeof(int)));
cudacall(hipMalloc((void** ) &cscVal[gpu_id], batch_nnz * sizeof(float)));
hipMemcpyAsync(cscRowIndex[gpu_id], &cscRowIndexHostPtr[gpu_id][cscColIndexHostPtr[gpu_id][batch_offset]],
batch_nnz * sizeof(cscRowIndex[0][0]), hipMemcpyHostToDevice);
hipMemcpy(cscColIndex[gpu_id], &cscColIndexHostPtr[gpu_id][batch_offset],
(batch_size + 1) * sizeof(cscColIndex[0][0]), hipMemcpyHostToDevice);
hipMemcpy(cscVal[gpu_id], &cscValHostPtr[gpu_id][cscColIndexHostPtr[gpu_id][batch_offset]],
batch_nnz * sizeof(cscVal[0][0]), hipMemcpyHostToDevice);
cudacall(hipMalloc((void** ) &yTXT[gpu_id], f * batch_size * sizeof(float)));
cudacall(hipMalloc((void** ) &yTX[gpu_id], f * batch_size * sizeof(float)));
cudacall(hipMalloc((void** ) &xx[gpu_id], f * f * batch_size * sizeof(float)));
auto tX = std::chrono::high_resolution_clock::now();
std::chrono::duration<double> elapsed = tX - t1;
printf("\t\tbatch %d memory alloc and cpy gpu %d seconds: %f.\n",
batch_id, gpu_id, elapsed.count());
//in place update: cscColIndex --> cscColIndex - cscColIndex[0]
hipLaunchKernelGGL(( zeroIndex), dim3((batch_size + 1 - 1)/256 + 1), dim3(256), 0, 0,
cscColIndex[gpu_id], cscColIndexHostPtr[gpu_id][batch_offset], batch_size + 1);
//process right-hand side: (Y'*X)'
hipDeviceSynchronize();
cudaCheckError();
t1 = std::chrono::high_resolution_clock::now();
hipsparseScsrmm2(cushandle[gpu_id], HIPSPARSE_OPERATION_NON_TRANSPOSE,
HIPSPARSE_OPERATION_TRANSPOSE, batch_size, f, csc_m[gpu_id],
batch_nnz, &alpha, descr, cscVal[gpu_id], cscColIndex[gpu_id],
cscRowIndex[gpu_id], XT_d[gpu_id], f, &beta, yTX[gpu_id], batch_size);
hipblasSgeam(handle[gpu_id], HIPBLAS_OP_T, HIPBLAS_OP_N, f, batch_size, &alpha,
(const float * ) yTX[gpu_id], batch_size, &beta, yTXT[gpu_id], f, yTXT[gpu_id], f);
hipDeviceSynchronize();
cudaCheckError();
tX = std::chrono::high_resolution_clock::now();
elapsed = tX - t1;
printf("\t\tbatch %d right-hand side gpu %d seconds: %f.\n", batch_id, gpu_id, elapsed.count());
//process left-hand side: generate hessian matrix xx
t1 = std::chrono::high_resolution_clock::now();
hipLaunchKernelGGL(( get_hermitian_theta), dim3(batch_size), dim3(64), 0, 0,
xx[gpu_id], cscRowIndex[gpu_id], cscColIndex[gpu_id], lambda, XT_d[gpu_id]);
//get_hermitian100<<<batch_size, 64, SCAN_BATCH * f/2*sizeof(float2)>>>
// (0, xx[gpu_id], cscColIndex[gpu_id], cscRowIndex[gpu_id], lambda, batch_size, XT_d[gpu_id]);
//updateThetaByBlock2pRegDsmemTile<<<batch_size, F>>>
// (xx[gpu_id], cscRowIndex[gpu_id], cscColIndex[gpu_id], lambda, XT_d[gpu_id]);
hipDeviceSynchronize();
cudaCheckError();
tX = std::chrono::high_resolution_clock::now();
elapsed = tX - t1;
printf("\t\tbatch %d xx kernel gpu %d seconds: %f.\n", batch_id, gpu_id, elapsed.count());
t1 = std::chrono::high_resolution_clock::now();
cudacall(hipFree(yTX[gpu_id]));
cudacall(hipFree(cscRowIndex[gpu_id]));
cudacall(hipFree(cscColIndex[gpu_id]));
cudacall(hipFree(cscVal[gpu_id]));
tX = std::chrono::high_resolution_clock::now();
elapsed = tX - t1;
printf("\t\tbatch %d hipFree gpu %d seconds: %f.\n", batch_id, gpu_id, elapsed.count());
}
tX = std::chrono::high_resolution_clock::now();
elapsed = tX - t0;
printf("\tbatch %d gather xx in %d GPUs run %f seconds.\n",
batch_id, GPU_COUNT, elapsed.count());
t0 = std::chrono::high_resolution_clock::now();
printf("\t\tadd xx before updateTheta on a given GPU.\n");
//xx[0] += xx[1] + xx[2] + xx[3]
cudacall(hipSetDevice(0));
float * xx_hotel;
cudacall(hipMalloc((void** ) &xx_hotel, f * f * batch_size * sizeof(float)));
cudaCheckError();
for(int gpu_id = 1; gpu_id < GPU_COUNT; gpu_id ++){
//printf("copy from gpu:%d.\n", gpu_id);
cudacall(hipMemcpy(xx_hotel, xx[gpu_id], f * f * batch_size * sizeof(float), hipMemcpyDefault));
hipDeviceSynchronize();
cudaCheckError();
//printf("add.\n");
hipblasSaxpy(handle[0], f * f * batch_size, &alpha, xx_hotel, 1, xx[0], 1);
hipDeviceSynchronize();
cudaCheckError();
}
cudacall(hipFree(xx_hotel));
printf("\t\tadd yTXT before updateTheta on a given GPU.\n");
//xx[0] += xx[1] + xx[2] + xx[3]
float * yTXT_hotel;
cudacall(hipMalloc((void** ) &yTXT_hotel, f * batch_size * sizeof(float)));
for(int gpu_id = 1; gpu_id < GPU_COUNT; gpu_id ++){
cudacall(hipMemcpy(yTXT_hotel, yTXT[gpu_id], f * batch_size * sizeof(float), hipMemcpyDefault));
hipblasSaxpy(handle[0], f * batch_size, &alpha, yTXT_hotel, 1, yTXT[0], 1);
hipDeviceSynchronize();
cudaCheckError();
}
cudacall(hipFree(yTXT_hotel));
//printf("*******invoke updateTheta with batch_size: %d, batch_offset: %d.\n", batch_size, batch_offset);
updateTheta(batch_size, batch_offset, xx[0], yTXT[0], thetaT[0], handle[0], n, f);
tX = std::chrono::high_resolution_clock::now();
elapsed = tX - t0;
printf("\tbatch: %d gather and updateTheta in one GPU run %f seconds.\n",
batch_id, elapsed.count());
for(int gpu_id = 0; gpu_id < GPU_COUNT; gpu_id ++){
cudacall(hipFree(xx[gpu_id]));
cudacall(hipFree(yTXT[gpu_id]));
cudacall(hipFree(XT_d[gpu_id]));
}
}//end of update theta batches
//propagate thetaT[0] to non-anchor devices
for(int gpu_id = 1; gpu_id < GPU_COUNT; gpu_id ++)
cudacall( hipMemcpy(thetaT[gpu_id], thetaT[0], n * F * sizeof(float), hipMemcpyDeviceToDevice) );
auto end = std::chrono::high_resolution_clock::now();
elapsed = end - start;
printf("update theta run %f seconds, gridSize: %ld.\n", elapsed.count(), n);
//////////////////////////////////////////////////////////////////////////////////////////////////
printf("Calculate RMSE in batches.\n");
//has to calculate in batches since cooRowIndex + csrColIndex + csrVal is so big
cudacall(hipSetDevice(0));
float * errors_train = 0;
float * errors_test = 0;
int error_size = 4096;
int* nan_train = 0;
int* nan_test = 0;
cudacall(hipMalloc((void** ) &errors_train, error_size * sizeof(errors_train[0])));
cudacall(hipMemset(errors_train, 0, error_size*sizeof(float)) );
cudacall(hipMalloc((void** ) &errors_test, error_size * sizeof(errors_test[0])));
cudacall(hipMemset(errors_test, 0, error_size*sizeof(float)) );
for(int batch_id = 0; batch_id < GPU_COUNT; batch_id ++){
printf("iteration: %d\n", batch_id);
int row_offset = 0;
for(int k = 0; k < batch_id; k ++){
row_offset += csc_m[k];
}
float * XT_small;
int * cscRowIndex_small;
int * cscColIndex_small;
float * cscVal_small;
cudacall(hipMalloc((void** ) &XT_small, f * csc_m[batch_id] * sizeof(float)));
cudacall(hipMemcpy(XT_small, &XT_h[(long) row_offset*f], f * csc_m[batch_id] * sizeof(float), hipMemcpyHostToDevice));
printf("cal train rmse in batch: %d/%d, nnz:%ld, n(col): %ld, \n",
batch_id, GPU_COUNT, csc_nnz[batch_id], n);
cudacall(hipMalloc((void** ) &cscRowIndex_small,csc_nnz[batch_id] * sizeof(int)));
cudacall(hipMalloc((void** ) &cscColIndex_small, (n + 1) * sizeof(int)));
cudacall(hipMalloc((void** ) &cscVal_small, csc_nnz[batch_id] * sizeof(float)));
cudacall(hipMemcpy(cscRowIndex_small, cscRowIndexHostPtr[batch_id],
csc_nnz[batch_id] * sizeof(int), hipMemcpyHostToDevice));
cudacall(hipMemcpy(cscColIndex_small, cscColIndexHostPtr[batch_id],
(n + 1) * sizeof(int), hipMemcpyHostToDevice));
cudacall(hipMemcpy(cscVal_small, cscValHostPtr[batch_id],
csc_nnz[batch_id] * sizeof(float), hipMemcpyHostToDevice));
cudacall(hipMalloc((void** ) &nan_train, sizeof(int)));
cudacall( hipMemset(nan_train, 0, sizeof(int)) );
cudacall(hipMalloc((void** ) &nan_test, sizeof(int)));
cudacall( hipMemset(nan_test, 0, sizeof(int)) );
hipLaunchKernelGGL(( RMSE_CSC), dim3(n), dim3(512), 0, 0, cscVal_small, cscRowIndex_small,
cscColIndex_small, thetaT[0], XT_small, errors_train, error_size, nan_train);
hipDeviceSynchronize();
cudaCheckError();
cudacall(hipFree(cscRowIndex_small));
cudacall(hipFree(cscColIndex_small));
cudacall(hipFree(cscVal_small));
printf("cal test rmse in batch: %d/%d, nnz_test:%ld, n(col): %ld, \n",
batch_id, GPU_COUNT, csc_nnz_test[batch_id], n);
cudacall(hipMalloc((void** ) &cscRowIndex_small,csc_nnz_test[batch_id] * sizeof(int)));
cudacall(hipMalloc((void** ) &cscColIndex_small, (n + 1) * sizeof(int)));
cudacall(hipMalloc((void** ) &cscVal_small, csc_nnz_test[batch_id] * sizeof(float)));
cudacall(hipMemcpy(cscRowIndex_small, testCscRowIndexHostPtr[batch_id],
csc_nnz_test[batch_id] * sizeof(int), hipMemcpyHostToDevice));
cudacall(hipMemcpy(cscColIndex_small, testCscColIndexHostPtr[batch_id],
(n + 1) * sizeof(int), hipMemcpyHostToDevice));
cudacall(hipMemcpy(cscVal_small, testCscValHostPtr[batch_id],
csc_nnz_test[batch_id] * sizeof(float), hipMemcpyHostToDevice));
hipLaunchKernelGGL(( RMSE_CSC), dim3(n), dim3(512), 0, 0, cscVal_small, cscRowIndex_small,
cscColIndex_small, thetaT[0], XT_small, errors_test, error_size, nan_test);
hipDeviceSynchronize();
cudaCheckError();
int* nan_train_host = (int*) malloc (sizeof(int));
int* nan_test_host = (int*) malloc (sizeof(int));
hipMemcpy(nan_train_host, nan_train, sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(nan_test_host, nan_test, sizeof(int), hipMemcpyDeviceToHost);
printf("train #nan: %d\n", *nan_train_host);
printf("test #nan: %d\n", *nan_test_host);
cudacall(hipFree(nan_train));
cudacall(hipFree(nan_test));
cudacall(hipFree(cscRowIndex_small));
cudacall(hipFree(cscColIndex_small));
cudacall(hipFree(cscVal_small));
cudacall(hipFree(XT_small));
}
printf("summarize RMSE: \n");
float* rmse_train = (float*) malloc (sizeof(float));
cublascall( hipblasSasum(handle[0], error_size, errors_train, 1, rmse_train) );
hipDeviceSynchronize();
cudaCheckError();
float* rmse_test = (float*) malloc (sizeof(float));
cublascall( hipblasSasum(handle[0], error_size, errors_test, 1, rmse_test) );
hipDeviceSynchronize();
cudaCheckError();
printf("@@@@@@@@@@@@@@@@@@@ Train RMSE in iter %d: %f\n", iter, sqrt((*rmse_train)/nnz));
printf("@@@@@@@@@@@@@@@@@@@ Test RMSE in iter %d: %f\n", iter, sqrt((*rmse_test)/(NNZ_TEST - 12750)));
cudacall(hipFree(errors_train));
cudacall(hipFree(errors_test));
//*/
}
/*
//save model to a file
cudacall(hipMemcpy(thetaTHost, thetaT[0], n * f * sizeof(float), hipMemcpyDeviceToHost) );
FILE * xfile = fopen("XT.data", "wb");
FILE * thetafile = fopen("thetaT.data", "wb");
fwrite(XT_h, sizeof(float), m*f, xfile);
fwrite(thetaTHost, sizeof(float), n*f, thetafile);
fclose(xfile);
fclose(thetafile);
*/
cudacall(hipHostFree(XT_h));
cudacall(hipHostFree(csrRowIndexHostPtr));
cudacall(hipHostFree(csrColIndexHostPtr));
cudacall(hipHostFree(csrValHostPtr));
hipHostFree(thetaTHost);
for(int gpu_id = 0; gpu_id < GPU_COUNT; gpu_id ++){
cudacall(hipHostFree(cscValHostPtr[gpu_id]));
cudacall(hipHostFree(cscRowIndexHostPtr[gpu_id]));
cudacall(hipHostFree(cscColIndexHostPtr[gpu_id]));
cudacall(hipSetDevice(gpu_id));
//cudacall(hipDeviceReset());
}
printf("ALS Done.\n");
return 0;
}
| 5c8a8156c8cf0eec5588874e838efb818a4226dd.cu | /*
* hugewiki.cu
*
* Created on: Feb 10, 2015
* Author: Wei Tan ([email protected])
* Alternating Least Square for Matrix Factorization on CUDA 7.0+
* Code optimized for F = 100, and on cc 3.5, 3.7 platforms. Also tested in cc 5.2
*/
#include <cstdio>
#include <cstdlib>
#include <cuda_runtime.h>
#include <cublas_v2.h>
#include <iostream>
#include <fstream>
#include <cusparse.h>
#include <chrono>
#include <stdlib.h>
#include <stdio.h>
#include <omp.h>
#include <string>
#include <sstream>
#include "./common.h"
#include "../src/als.h"
#include "../src/cg.h"
//variable definition
#define F 100
#define TILE_SIZE F/10
#define SCAN_BATCH 30
#define THETA_BATCH 3
#define X_BATCH 240
#define ITERS 10
#define M 50082603
#define N 39780
#define NNZ 3101144313
#define NNZ_TEST 344573330
//0.05 when use both "full" kernels
#define LAMBDA 0.048
//hardware specific
#define GPU_COUNT 4
#define DEVICEID 0 // the anchor device
//debug option to save model
//#define CUMF_SAVE_MODEL
//#define CUMF_TT_FP16
void saveDeviceFloatArrayToFile(std::string fileName, int size, float* d_array){
float* h_array;
cudacall(cudaMallocHost( (void** ) &h_array, size * sizeof(h_array[0])) );
cudacall(cudaMemcpy(h_array, d_array, size * sizeof(h_array[0]),cudaMemcpyDeviceToHost));
FILE * outfile = fopen(fileName.c_str(), "wb");
fwrite(h_array, sizeof(float), size, outfile);
fclose(outfile);
cudaFreeHost(h_array);
}
__global__ void
__launch_bounds__(64, 6)
get_hermitian100_tt_fp16(const int batch_offset, half2* tt,
const int* csrRowIndex, const int* csrColIndex, const float lambda, const int m,
const float* __restrict__ thetaT) {
extern __shared__ float2 thetaTemp[];
int row = blockIdx.x + batch_offset;
if (row < m) {
//this block needs to handle end - start thetaT columns
int start = csrRowIndex[row];
int end = csrRowIndex[row + 1];
//slide through [start, end] by window size SCAN_BATCH
int iterations = (end - start - 1)/SCAN_BATCH + 1;
float temp0= 0, temp1= 0, temp2= 0, temp3= 0, temp4= 0, temp5= 0, temp6= 0, temp7= 0, temp8= 0, temp9 = 0;
float temp10= 0, temp11= 0, temp12= 0, temp13= 0, temp14= 0, temp15= 0, temp16= 0, temp17= 0, temp18= 0, temp19 = 0;
float temp20= 0, temp21= 0, temp22= 0, temp23= 0, temp24= 0, temp25= 0, temp26= 0, temp27= 0, temp28= 0, temp29 = 0;
float temp30= 0, temp31= 0, temp32= 0, temp33= 0, temp34= 0, temp35= 0, temp36= 0, temp37= 0, temp38= 0, temp39 = 0;
float temp40= 0, temp41= 0, temp42= 0, temp43= 0, temp44= 0, temp45= 0, temp46= 0, temp47= 0, temp48= 0, temp49 = 0;
float temp50= 0, temp51= 0, temp52= 0, temp53= 0, temp54= 0, temp55= 0, temp56= 0, temp57= 0, temp58= 0, temp59 = 0;
float temp60= 0, temp61= 0, temp62= 0, temp63= 0, temp64= 0, temp65= 0, temp66= 0, temp67= 0, temp68= 0, temp69 = 0;
float temp70= 0, temp71= 0, temp72= 0, temp73= 0, temp74= 0, temp75= 0, temp76= 0, temp77= 0, temp78= 0, temp79 = 0;
float temp80= 0, temp81= 0, temp82= 0, temp83= 0, temp84= 0, temp85= 0, temp86= 0, temp87= 0, temp88= 0, temp89 = 0;
float temp90= 0, temp91= 0, temp92= 0, temp93= 0, temp94= 0, temp95= 0, temp96= 0, temp97= 0, temp98= 0, temp99 = 0;
int tile_x = 0;
int tile_y = 0;
int tile = F/10;
for ( int i = 0; i < 10; i++){
int end = ((20-i)*(i+1))/2;
if(threadIdx.x < end){
tile_x = i * tile;
tile_y = (10 + threadIdx.x - end) * tile;
break;
}
}
//iteration: copy gmem-->smem; aggregate smem-->register
for (int iter = 0; iter < iterations; iter ++){
float2 theta;
//copy texture --> smem, and sync
/*
if(threadIdx.x < SCAN_BATCH){
if(iter*SCAN_BATCH + threadIdx.x < end - start){
for (int k = 0; k < F; k += 2){
theta.x = tex1Dfetch(thetaTTexRef, F * csrColIndex[start + iter*SCAN_BATCH + threadIdx.x] + k);
theta.y = tex1Dfetch(thetaTTexRef, F * csrColIndex[start + iter*SCAN_BATCH + threadIdx.x] + k+1);
thetaTemp[threadIdx.x * F/2 + k/2] = theta;
}
}
//must be the last iteration; no need to check
//not enough theta to copy, set zero
else
memset(&thetaTemp[threadIdx.x*F/2], 0, F*sizeof(float));
}
*/
//two layers: warp divergence unless we split at 32
//require 32 >= SCAN_BATCH
if(threadIdx.x < 2*32 ){
//int index = threadIdx.x;
int index = threadIdx.x - (threadIdx.x/32)*32; //0 to 31;
if(index < SCAN_BATCH){
if(iter*SCAN_BATCH + index < end - start){
//for (int k = 50*(threadIdx.x/32); k < 50*(threadIdx.x/32) + 50; k += 2){
//IMPORTANT: for loop has constant and identical start and end
if(threadIdx.x < 32){
for (int k = 0; k < 50; k += 2){
theta.x = __ldg(&thetaT[ F * csrColIndex[start + iter*SCAN_BATCH + index] + k]);
theta.y = __ldg(&thetaT[ F * csrColIndex[start + iter*SCAN_BATCH + index] + k+1]);
thetaTemp[index * F/2 + k/2] = theta;
}
}
else {
for (int k = 0; k < 50; k += 2){
theta.x = __ldg(&thetaT[ F * csrColIndex[start + iter*SCAN_BATCH + index] + k + 50]);
theta.y = __ldg(&thetaT[ F * csrColIndex[start + iter*SCAN_BATCH + index] + k + 51]);
thetaTemp[index * F/2 + k/2 + 25] = theta;
}
}
}
//must be the last iteration; no need to check
//not enough theta to copy, set zero
else
memset(&thetaTemp[index*F/2], 0, F*sizeof(float));
}
}
/* //issue: not coalesced access to csrColIndex
if(threadIdx.x < F && threadIdx.x%2 == 0){
for(int k = 0; k< SCAN_BATCH; k++){
if(iter*SCAN_BATCH + k < end - start){
theta.x = tex1Dfetch(thetaTTexRef, F * csrColIndex[start + iter*SCAN_BATCH + k] + threadIdx.x);
theta.y = tex1Dfetch(thetaTTexRef, F * csrColIndex[start + iter*SCAN_BATCH + k] + threadIdx.x +1);
thetaTemp[k * F/2 + threadIdx.x/2] = theta;
}
//must be the last iteration; no need to check
//not enough theta to copy, set zero
else
memset(&thetaTemp[k*F/2 + threadIdx.x/2], 0, 2*sizeof(float));
}
}
*/
/*
int layers = blockDim.x/SCAN_BATCH; //100/30 = 3
//int height = blockDim.x/layers; //30
int y = threadIdx.x/SCAN_BATCH;//0,1,2,3; y==3 is not viable
//min(y, (layers-1)) * height
int y_start = y * 30;//0-29:0;30-59:30;60-89:60
int y_end = y_start + 30; //0-29:30;30-59:60;60-89:90
if(y >= layers - 1) y_end = F; //60-89:100
if(threadIdx.x - y_start < SCAN_BATCH){
if(iter*SCAN_BATCH + (threadIdx.x - y_start) < end - start){
for (int k = y_start; k < y_end; k += 2){
theta.x =
tex1Dfetch(thetaTTexRef, F * csrColIndex[start + iter*SCAN_BATCH + (threadIdx.x - y_start)] + k);
theta.y =
tex1Dfetch(thetaTTexRef, F * csrColIndex[start + iter*SCAN_BATCH + (threadIdx.x - y_start)] + k+1);
thetaTemp[(threadIdx.x - y_start)* F/2 + k/2] = theta;
}
}
//must be the last iteration; no need to check
//not enough theta to copy, set zero
else
memset(&thetaTemp[(threadIdx.x - y_start)*F/2 + y_start/2], 0, (y_end - y_start)*sizeof(float));
}
*/
__syncthreads();
//tile: 10*10
if(threadIdx.x < 55 ){
for(int k = 0; k < SCAN_BATCH; k++){
accumulate_in_registers();
}
}
}
//end of iteration in copying from smem and aggregating in register
__syncthreads();
#ifdef DEBUG
//if(threadIdx.x==0)
// printf("***temp 0~9: %f %f %f %f %f %f %f %f %f %f\n", temp0, temp1, temp2, temp3, temp4, temp5, temp6, temp7, temp8, temp9);
#endif
if(threadIdx.x < 55 ){
//weighted-lambda regularization
if(tile_x == tile_y){
float temp = (end - start) * lambda;
temp0 += temp;
temp11 += temp;
temp22 += temp;
temp33 += temp;
temp44 += temp;
temp55 += temp;
temp66 += temp;
temp77 += temp;
temp88 += temp;
temp99 += temp;
}
//copy output to gmem
int index = blockIdx.x*F*F/2;
//fill_lower_half_from_registers();
fill_lower_half_from_registers_fp16();
//symmetric
if(tile_x!=tile_y){
//fill_upper_half_from_registers();
fill_upper_half_from_registers_fp16();
}
}
}
}
__global__ void
__launch_bounds__(64, 6)
get_hermitian100(const int batch_offset, float* tt, const int* csrRowIndex, const int* csrColIndex, const float lambda, const int m, const float* __restrict__ thetaT) {
extern __shared__ float2 thetaTemp[];
int row = blockIdx.x + batch_offset;
if (row < m) {
//this block needs to handle end - start thetaT columns
int start = csrRowIndex[row];
int end = csrRowIndex[row + 1];
//slide through [start, end] by window size SCAN_BATCH
int iterations = (end - start - 1)/SCAN_BATCH + 1;
float temp0= 0, temp1= 0, temp2= 0, temp3= 0, temp4= 0, temp5= 0, temp6= 0, temp7= 0, temp8= 0, temp9 = 0;
float temp10= 0, temp11= 0, temp12= 0, temp13= 0, temp14= 0, temp15= 0, temp16= 0, temp17= 0, temp18= 0, temp19 = 0;
float temp20= 0, temp21= 0, temp22= 0, temp23= 0, temp24= 0, temp25= 0, temp26= 0, temp27= 0, temp28= 0, temp29 = 0;
float temp30= 0, temp31= 0, temp32= 0, temp33= 0, temp34= 0, temp35= 0, temp36= 0, temp37= 0, temp38= 0, temp39 = 0;
float temp40= 0, temp41= 0, temp42= 0, temp43= 0, temp44= 0, temp45= 0, temp46= 0, temp47= 0, temp48= 0, temp49 = 0;
float temp50= 0, temp51= 0, temp52= 0, temp53= 0, temp54= 0, temp55= 0, temp56= 0, temp57= 0, temp58= 0, temp59 = 0;
float temp60= 0, temp61= 0, temp62= 0, temp63= 0, temp64= 0, temp65= 0, temp66= 0, temp67= 0, temp68= 0, temp69 = 0;
float temp70= 0, temp71= 0, temp72= 0, temp73= 0, temp74= 0, temp75= 0, temp76= 0, temp77= 0, temp78= 0, temp79 = 0;
float temp80= 0, temp81= 0, temp82= 0, temp83= 0, temp84= 0, temp85= 0, temp86= 0, temp87= 0, temp88= 0, temp89 = 0;
float temp90= 0, temp91= 0, temp92= 0, temp93= 0, temp94= 0, temp95= 0, temp96= 0, temp97= 0, temp98= 0, temp99 = 0;
int tile_x = 0;
int tile_y = 0;
int tile = F/10;
for ( int i = 0; i < 10; i++){
int end = ((20-i)*(i+1))/2;
if(threadIdx.x < end){
tile_x = i * tile;
tile_y = (10 + threadIdx.x - end) * tile;
break;
}
}
//iteration: copy gmem-->smem; aggregate smem-->register
for (int iter = 0; iter < iterations; iter ++){
float2 theta;
//copy texture --> smem, and sync
/*
if(threadIdx.x < SCAN_BATCH){
if(iter*SCAN_BATCH + threadIdx.x < end - start){
for (int k = 0; k < F; k += 2){
theta.x = tex1Dfetch(thetaTTexRef, F * csrColIndex[start + iter*SCAN_BATCH + threadIdx.x] + k);
theta.y = tex1Dfetch(thetaTTexRef, F * csrColIndex[start + iter*SCAN_BATCH + threadIdx.x] + k+1);
thetaTemp[threadIdx.x * F/2 + k/2] = theta;
}
}
//must be the last iteration; no need to check
//not enough theta to copy, set zero
else
memset(&thetaTemp[threadIdx.x*F/2], 0, F*sizeof(float));
}
*/
//two layers: warp divergence unless we split at 32
//require 32 >= SCAN_BATCH
if(threadIdx.x < 2*32 ){
//int index = threadIdx.x;
int index = threadIdx.x - (threadIdx.x/32)*32; //0 to 31;
if(index < SCAN_BATCH){
if(iter*SCAN_BATCH + index < end - start){
//for (int k = 50*(threadIdx.x/32); k < 50*(threadIdx.x/32) + 50; k += 2){
//IMPORTANT: for loop has constant and identical start and end
if(threadIdx.x < 32){
for (int k = 0; k < 50; k += 2){
theta.x = __ldg(&thetaT[ F * csrColIndex[start + iter*SCAN_BATCH + index] + k]);
theta.y = __ldg(&thetaT[ F * csrColIndex[start + iter*SCAN_BATCH + index] + k+1]);
thetaTemp[index * F/2 + k/2] = theta;
}
}
else {
for (int k = 0; k < 50; k += 2){
theta.x = __ldg(&thetaT[ F * csrColIndex[start + iter*SCAN_BATCH + index] + k + 50]);
theta.y = __ldg(&thetaT[ F * csrColIndex[start + iter*SCAN_BATCH + index] + k + 51]);
thetaTemp[index * F/2 + k/2 + 25] = theta;
}
}
}
//must be the last iteration; no need to check
//not enough theta to copy, set zero
else
memset(&thetaTemp[index*F/2], 0, F*sizeof(float));
}
}
/* //issue: not coalesced access to csrColIndex
if(threadIdx.x < F && threadIdx.x%2 == 0){
for(int k = 0; k< SCAN_BATCH; k++){
if(iter*SCAN_BATCH + k < end - start){
theta.x = tex1Dfetch(thetaTTexRef, F * csrColIndex[start + iter*SCAN_BATCH + k] + threadIdx.x);
theta.y = tex1Dfetch(thetaTTexRef, F * csrColIndex[start + iter*SCAN_BATCH + k] + threadIdx.x +1);
thetaTemp[k * F/2 + threadIdx.x/2] = theta;
}
//must be the last iteration; no need to check
//not enough theta to copy, set zero
else
memset(&thetaTemp[k*F/2 + threadIdx.x/2], 0, 2*sizeof(float));
}
}
*/
/*
int layers = blockDim.x/SCAN_BATCH; //100/30 = 3
//int height = blockDim.x/layers; //30
int y = threadIdx.x/SCAN_BATCH;//0,1,2,3; y==3 is not viable
//min(y, (layers-1)) * height
int y_start = y * 30;//0-29:0;30-59:30;60-89:60
int y_end = y_start + 30; //0-29:30;30-59:60;60-89:90
if(y >= layers - 1) y_end = F; //60-89:100
if(threadIdx.x - y_start < SCAN_BATCH){
if(iter*SCAN_BATCH + (threadIdx.x - y_start) < end - start){
for (int k = y_start; k < y_end; k += 2){
theta.x =
tex1Dfetch(thetaTTexRef, F * csrColIndex[start + iter*SCAN_BATCH + (threadIdx.x - y_start)] + k);
theta.y =
tex1Dfetch(thetaTTexRef, F * csrColIndex[start + iter*SCAN_BATCH + (threadIdx.x - y_start)] + k+1);
thetaTemp[(threadIdx.x - y_start)* F/2 + k/2] = theta;
}
}
//must be the last iteration; no need to check
//not enough theta to copy, set zero
else
memset(&thetaTemp[(threadIdx.x - y_start)*F/2 + y_start/2], 0, (y_end - y_start)*sizeof(float));
}
*/
__syncthreads();
//tile: 10*10
if(threadIdx.x < 55 ){
for(int k = 0; k < SCAN_BATCH; k++){
accumulate_in_registers();
}
}
}
//end of iteration in copying from smem and aggregating in register
__syncthreads();
#ifdef DEBUG
//if(threadIdx.x==0)
// printf("***temp 0~9: %f %f %f %f %f %f %f %f %f %f\n", temp0, temp1, temp2, temp3, temp4, temp5, temp6, temp7, temp8, temp9);
#endif
if(threadIdx.x < 55 ){
//copy output to gmem
int index = blockIdx.x*F*F;
fill_lower_half_from_registers();
//symmetric
if(tile_x!=tile_y){
fill_upper_half_from_registers();
}
//regularization
if(tile_x == tile_y){
for(int k = 0; k < tile; k++)
tt[index + (tile_x+k)*(1+F)] += (end - start) * lambda;
}
}
}
}
//split a big csr into many by rows. the row id of sub-matrices need to be changed
//inval = inval - inval[0]
__global__ void zeroIndex(int * inVal, const unsigned int inVal_0, const int size) {
int i = blockDim.x*blockIdx.x + threadIdx.x;
if(i < size){
inVal[i] = (unsigned)inVal[i] - inVal_0;
}
}
texture<float> xTTexRef;
texture<float> thetaTTexRef;
__global__ void
__launch_bounds__(100, 4)
updateThetaByBlock2pRegDsmemTile(float * xx, const int* cscRowIndex,
const int* cscColIndex, const float lambda, const float * XT) {
__shared__ float2 xTemp[SCAN_BATCH * F/2];
int col = blockIdx.x;
if (col < N) {
//this block needs to handle end - start XT columns
int start = cscColIndex[col];
int end = cscColIndex[col + 1];
int iterations = (end - start -1)/SCAN_BATCH + 1;
float temp0= 0, temp1= 0, temp2= 0, temp3= 0, temp4= 0, temp5= 0, temp6= 0, temp7= 0, temp8= 0, temp9 = 0;
float temp10= 0, temp11= 0, temp12= 0, temp13= 0, temp14= 0, temp15= 0, temp16= 0, temp17= 0, temp18= 0, temp19 = 0;
float temp20= 0, temp21= 0, temp22= 0, temp23= 0, temp24= 0, temp25= 0, temp26= 0, temp27= 0, temp28= 0, temp29 = 0;
float temp30= 0, temp31= 0, temp32= 0, temp33= 0, temp34= 0, temp35= 0, temp36= 0, temp37= 0, temp38= 0, temp39 = 0;
float temp40= 0, temp41= 0, temp42= 0, temp43= 0, temp44= 0, temp45= 0, temp46= 0, temp47= 0, temp48= 0, temp49 = 0;
float temp50= 0, temp51= 0, temp52= 0, temp53= 0, temp54= 0, temp55= 0, temp56= 0, temp57= 0, temp58= 0, temp59 = 0;
float temp60= 0, temp61= 0, temp62= 0, temp63= 0, temp64= 0, temp65= 0, temp66= 0, temp67= 0, temp68= 0, temp69 = 0;
float temp70= 0, temp71= 0, temp72= 0, temp73= 0, temp74= 0, temp75= 0, temp76= 0, temp77= 0, temp78= 0, temp79 = 0;
float temp80= 0, temp81= 0, temp82= 0, temp83= 0, temp84= 0, temp85= 0, temp86= 0, temp87= 0, temp88= 0, temp89 = 0;
float temp90= 0, temp91= 0, temp92= 0, temp93= 0, temp94= 0, temp95= 0, temp96= 0, temp97= 0, temp98= 0, temp99 = 0;
float2 x;
int tile = F/10;
int tile_x = (threadIdx.x/tile) * tile;//start x of this tile
int tile_y = (threadIdx.x%tile) * tile;//start y of this tile
for (int iter = 0; iter < iterations; iter ++){
//copy texture --> smem, and sync
if(threadIdx.x < SCAN_BATCH){
if(iter*SCAN_BATCH + threadIdx.x < end - start){
for (int k = 0; k < F; k += 2){
x.x =
XT[ F * cscRowIndex[start + iter*SCAN_BATCH + threadIdx.x] + k ];
x.y =
XT [ F * cscRowIndex[start + iter*SCAN_BATCH + threadIdx.x] + k+1 ];
xTemp[threadIdx.x * F/2 + k/2] = x;
}
}
//must be the last iteration; no need to check
//not enough theta to copy, set zero
else
memset(&xTemp[threadIdx.x*F/2], 0, F*sizeof(float));
}
__syncthreads();
///////////////////////////////////////////////////////////////////////////////////////////////////////////
//tile: 10*10
for(int k = 0; k < SCAN_BATCH; k++){
temp0 += xTemp[tile_x/2 + k*F/2].x * xTemp[tile_y/2 + k*F/2].x;
temp1 += xTemp[tile_x/2 + k*F/2].x * xTemp[tile_y/2 + k*F/2].y;
temp2 += xTemp[tile_x/2 + k*F/2].x * xTemp[tile_y/2 +1 + k*F/2].x;
temp3 += xTemp[tile_x/2 + k*F/2].x * xTemp[tile_y/2 +1 + k*F/2].y;
temp4 += xTemp[tile_x/2 + k*F/2].x * xTemp[tile_y/2 +2 + k*F/2].x;
temp5 += xTemp[tile_x/2 + k*F/2].x * xTemp[tile_y/2 +2 + k*F/2].y;
temp6 += xTemp[tile_x/2 + k*F/2].x * xTemp[tile_y/2 +3 + k*F/2].x;
temp7 += xTemp[tile_x/2 + k*F/2].x * xTemp[tile_y/2 +3 + k*F/2].y;
temp8 += xTemp[tile_x/2 + k*F/2].x * xTemp[tile_y/2 +4 + k*F/2].x;
temp9 += xTemp[tile_x/2 + k*F/2].x * xTemp[tile_y/2 +4 + k*F/2].y;
temp10 += xTemp[tile_x/2 + k*F/2].y * xTemp[tile_y/2 + k*F/2].x;
temp11 += xTemp[tile_x/2 + k*F/2].y * xTemp[tile_y/2 + k*F/2].y;
temp12 += xTemp[tile_x/2 + k*F/2].y * xTemp[tile_y/2 +1 + k*F/2].x;
temp13 += xTemp[tile_x/2 + k*F/2].y * xTemp[tile_y/2 +1 + k*F/2].y;
temp14 += xTemp[tile_x/2 + k*F/2].y * xTemp[tile_y/2 +2 + k*F/2].x;
temp15 += xTemp[tile_x/2 + k*F/2].y * xTemp[tile_y/2 +2 + k*F/2].y;
temp16 += xTemp[tile_x/2 + k*F/2].y * xTemp[tile_y/2 +3 + k*F/2].x;
temp17 += xTemp[tile_x/2 + k*F/2].y * xTemp[tile_y/2 +3 + k*F/2].y;
temp18 += xTemp[tile_x/2 + k*F/2].y * xTemp[tile_y/2 +4 + k*F/2].x;
temp19 += xTemp[tile_x/2 + k*F/2].y * xTemp[tile_y/2 +4 + k*F/2].y;
temp20 += xTemp[tile_x/2 +1 + k*F/2].x * xTemp[tile_y/2 + k*F/2].x;
temp21 += xTemp[tile_x/2 +1 + k*F/2].x * xTemp[tile_y/2 + k*F/2].y;
temp22 += xTemp[tile_x/2 +1 + k*F/2].x * xTemp[tile_y/2 +1 + k*F/2].x;
temp23 += xTemp[tile_x/2 +1 + k*F/2].x * xTemp[tile_y/2 +1 + k*F/2].y;
temp24 += xTemp[tile_x/2 +1 + k*F/2].x * xTemp[tile_y/2 +2 + k*F/2].x;
temp25 += xTemp[tile_x/2 +1 + k*F/2].x * xTemp[tile_y/2 +2 + k*F/2].y;
temp26 += xTemp[tile_x/2 +1 + k*F/2].x * xTemp[tile_y/2 +3 + k*F/2].x;
temp27 += xTemp[tile_x/2 +1 + k*F/2].x * xTemp[tile_y/2 +3 + k*F/2].y;
temp28 += xTemp[tile_x/2 +1 + k*F/2].x * xTemp[tile_y/2 +4 + k*F/2].x;
temp29 += xTemp[tile_x/2 +1 + k*F/2].x * xTemp[tile_y/2 +4 + k*F/2].y;
temp30 += xTemp[tile_x/2 +1 + k*F/2].y * xTemp[tile_y/2 + k*F/2].x;
temp31 += xTemp[tile_x/2 +1 + k*F/2].y * xTemp[tile_y/2 + k*F/2].y;
temp32 += xTemp[tile_x/2 +1 + k*F/2].y * xTemp[tile_y/2 +1 + k*F/2].x;
temp33 += xTemp[tile_x/2 +1 + k*F/2].y * xTemp[tile_y/2 +1 + k*F/2].y;
temp34 += xTemp[tile_x/2 +1 + k*F/2].y * xTemp[tile_y/2 +2 + k*F/2].x;
temp35 += xTemp[tile_x/2 +1 + k*F/2].y * xTemp[tile_y/2 +2 + k*F/2].y;
temp36 += xTemp[tile_x/2 +1 + k*F/2].y * xTemp[tile_y/2 +3 + k*F/2].x;
temp37 += xTemp[tile_x/2 +1 + k*F/2].y * xTemp[tile_y/2 +3 + k*F/2].y;
temp38 += xTemp[tile_x/2 +1 + k*F/2].y * xTemp[tile_y/2 +4 + k*F/2].x;
temp39 += xTemp[tile_x/2 +1 + k*F/2].y * xTemp[tile_y/2 +4 + k*F/2].y;
temp40 += xTemp[tile_x/2 +2 + k*F/2].x * xTemp[tile_y/2 + k*F/2].x;
temp41 += xTemp[tile_x/2 +2 + k*F/2].x * xTemp[tile_y/2 + k*F/2].y;
temp42 += xTemp[tile_x/2 +2 + k*F/2].x * xTemp[tile_y/2 +1 + k*F/2].x;
temp43 += xTemp[tile_x/2 +2 + k*F/2].x * xTemp[tile_y/2 +1 + k*F/2].y;
temp44 += xTemp[tile_x/2 +2 + k*F/2].x * xTemp[tile_y/2 +2 + k*F/2].x;
temp45 += xTemp[tile_x/2 +2 + k*F/2].x * xTemp[tile_y/2 +2 + k*F/2].y;
temp46 += xTemp[tile_x/2 +2 + k*F/2].x * xTemp[tile_y/2 +3 + k*F/2].x;
temp47 += xTemp[tile_x/2 +2 + k*F/2].x * xTemp[tile_y/2 +3 + k*F/2].y;
temp48 += xTemp[tile_x/2 +2 + k*F/2].x * xTemp[tile_y/2 +4 + k*F/2].x;
temp49 += xTemp[tile_x/2 +2 + k*F/2].x * xTemp[tile_y/2 +4 + k*F/2].y;
temp50 += xTemp[tile_x/2 +2 + k*F/2].y * xTemp[tile_y/2 + k*F/2].x;
temp51 += xTemp[tile_x/2 +2 + k*F/2].y * xTemp[tile_y/2 + k*F/2].y;
temp52 += xTemp[tile_x/2 +2 + k*F/2].y * xTemp[tile_y/2 +1 + k*F/2].x;
temp53 += xTemp[tile_x/2 +2 + k*F/2].y * xTemp[tile_y/2 +1 + k*F/2].y;
temp54 += xTemp[tile_x/2 +2 + k*F/2].y * xTemp[tile_y/2 +2 + k*F/2].x;
temp55 += xTemp[tile_x/2 +2 + k*F/2].y * xTemp[tile_y/2 +2 + k*F/2].y;
temp56 += xTemp[tile_x/2 +2 + k*F/2].y * xTemp[tile_y/2 +3 + k*F/2].x;
temp57 += xTemp[tile_x/2 +2 + k*F/2].y * xTemp[tile_y/2 +3 + k*F/2].y;
temp58 += xTemp[tile_x/2 +2 + k*F/2].y * xTemp[tile_y/2 +4 + k*F/2].x;
temp59 += xTemp[tile_x/2 +2 + k*F/2].y * xTemp[tile_y/2 +4 + k*F/2].y;
temp60 += xTemp[tile_x/2 +3 + k*F/2].x * xTemp[tile_y/2 + k*F/2].x;
temp61 += xTemp[tile_x/2 +3 + k*F/2].x * xTemp[tile_y/2 + k*F/2].y;
temp62 += xTemp[tile_x/2 +3 + k*F/2].x * xTemp[tile_y/2 +1 + k*F/2].x;
temp63 += xTemp[tile_x/2 +3 + k*F/2].x * xTemp[tile_y/2 +1 + k*F/2].y;
temp64 += xTemp[tile_x/2 +3 + k*F/2].x * xTemp[tile_y/2 +2 + k*F/2].x;
temp65 += xTemp[tile_x/2 +3 + k*F/2].x * xTemp[tile_y/2 +2 + k*F/2].y;
temp66 += xTemp[tile_x/2 +3 + k*F/2].x * xTemp[tile_y/2 +3 + k*F/2].x;
temp67 += xTemp[tile_x/2 +3 + k*F/2].x * xTemp[tile_y/2 +3 + k*F/2].y;
temp68 += xTemp[tile_x/2 +3 + k*F/2].x * xTemp[tile_y/2 +4 + k*F/2].x;
temp69 += xTemp[tile_x/2 +3 + k*F/2].x * xTemp[tile_y/2 +4 + k*F/2].y;
temp70 += xTemp[tile_x/2 +3 + k*F/2].y * xTemp[tile_y/2 + k*F/2].x;
temp71 += xTemp[tile_x/2 +3 + k*F/2].y * xTemp[tile_y/2 + k*F/2].y;
temp72 += xTemp[tile_x/2 +3 + k*F/2].y * xTemp[tile_y/2 +1 + k*F/2].x;
temp73 += xTemp[tile_x/2 +3 + k*F/2].y * xTemp[tile_y/2 +1 + k*F/2].y;
temp74 += xTemp[tile_x/2 +3 + k*F/2].y * xTemp[tile_y/2 +2 + k*F/2].x;
temp75 += xTemp[tile_x/2 +3 + k*F/2].y * xTemp[tile_y/2 +2 + k*F/2].y;
temp76 += xTemp[tile_x/2 +3 + k*F/2].y * xTemp[tile_y/2 +3 + k*F/2].x;
temp77 += xTemp[tile_x/2 +3 + k*F/2].y * xTemp[tile_y/2 +3 + k*F/2].y;
temp78 += xTemp[tile_x/2 +3 + k*F/2].y * xTemp[tile_y/2 +4 + k*F/2].x;
temp79 += xTemp[tile_x/2 +3 + k*F/2].y * xTemp[tile_y/2 +4 + k*F/2].y;
temp80 += xTemp[tile_x/2 +4 + k*F/2].x * xTemp[tile_y/2 + k*F/2].x;
temp81 += xTemp[tile_x/2 +4 + k*F/2].x * xTemp[tile_y/2 + k*F/2].y;
temp82 += xTemp[tile_x/2 +4 + k*F/2].x * xTemp[tile_y/2 +1 + k*F/2].x;
temp83 += xTemp[tile_x/2 +4 + k*F/2].x * xTemp[tile_y/2 +1 + k*F/2].y;
temp84 += xTemp[tile_x/2 +4 + k*F/2].x * xTemp[tile_y/2 +2 + k*F/2].x;
temp85 += xTemp[tile_x/2 +4 + k*F/2].x * xTemp[tile_y/2 +2 + k*F/2].y;
temp86 += xTemp[tile_x/2 +4 + k*F/2].x * xTemp[tile_y/2 +3 + k*F/2].x;
temp87 += xTemp[tile_x/2 +4 + k*F/2].x * xTemp[tile_y/2 +3 + k*F/2].y;
temp88 += xTemp[tile_x/2 +4 + k*F/2].x * xTemp[tile_y/2 +4 + k*F/2].x;
temp89 += xTemp[tile_x/2 +4 + k*F/2].x * xTemp[tile_y/2 +4 + k*F/2].y;
temp90 += xTemp[tile_x/2 +4 + k*F/2].y * xTemp[tile_y/2 + k*F/2].x;
temp91 += xTemp[tile_x/2 +4 + k*F/2].y * xTemp[tile_y/2 + k*F/2].y;
temp92 += xTemp[tile_x/2 +4 + k*F/2].y * xTemp[tile_y/2 +1 + k*F/2].x;
temp93 += xTemp[tile_x/2 +4 + k*F/2].y * xTemp[tile_y/2 +1 + k*F/2].y;
temp94 += xTemp[tile_x/2 +4 + k*F/2].y * xTemp[tile_y/2 +2 + k*F/2].x;
temp95 += xTemp[tile_x/2 +4 + k*F/2].y * xTemp[tile_y/2 +2 + k*F/2].y;
temp96 += xTemp[tile_x/2 +4 + k*F/2].y * xTemp[tile_y/2 +3 + k*F/2].x;
temp97 += xTemp[tile_x/2 +4 + k*F/2].y * xTemp[tile_y/2 +3 + k*F/2].y;
temp98 += xTemp[tile_x/2 +4 + k*F/2].y * xTemp[tile_y/2 +4 + k*F/2].x;
temp99 += xTemp[tile_x/2 +4 + k*F/2].y * xTemp[tile_y/2 +4 + k*F/2].y;
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////
__syncthreads();
}
int index = blockIdx.x*F*F;
///*
//copy output to gmem
xx[index + tile_x + tile_y*F] = temp0;
xx[index + tile_x + (tile_y + 1)*F] = temp1;
xx[index + tile_x + (tile_y + 2)*F] = temp2;
xx[index + tile_x + (tile_y + 3)*F] = temp3;
xx[index + tile_x + (tile_y + 4)*F] = temp4;
xx[index + tile_x + (tile_y + 5)*F] = temp5;
xx[index + tile_x + (tile_y + 6)*F] = temp6;
xx[index + tile_x + (tile_y + 7)*F] = temp7;
xx[index + tile_x + (tile_y + 8)*F] = temp8;
xx[index + tile_x + (tile_y + 9)*F] = temp9;
xx[index + tile_x + 1 + tile_y*F] = temp10;
xx[index + tile_x + 1 + (tile_y + 1)*F] = temp11;
xx[index + tile_x + 1 + (tile_y + 2)*F] = temp12;
xx[index + tile_x + 1 + (tile_y + 3)*F] = temp13;
xx[index + tile_x + 1 + (tile_y + 4)*F] = temp14;
xx[index + tile_x + 1 + (tile_y + 5)*F] = temp15;
xx[index + tile_x + 1 + (tile_y + 6)*F] = temp16;
xx[index + tile_x + 1 + (tile_y + 7)*F] = temp17;
xx[index + tile_x + 1 + (tile_y + 8)*F] = temp18;
xx[index + tile_x + 1 + (tile_y + 9)*F] = temp19;
xx[index + tile_x + 2 + tile_y*F] = temp20;
xx[index + tile_x + 2 + (tile_y + 1)*F] = temp21;
xx[index + tile_x + 2 + (tile_y + 2)*F] = temp22;
xx[index + tile_x + 2 + (tile_y + 3)*F] = temp23;
xx[index + tile_x + 2 + (tile_y + 4)*F] = temp24;
xx[index + tile_x + 2 + (tile_y + 5)*F] = temp25;
xx[index + tile_x + 2 + (tile_y + 6)*F] = temp26;
xx[index + tile_x + 2 + (tile_y + 7)*F] = temp27;
xx[index + tile_x + 2 + (tile_y + 8)*F] = temp28;
xx[index + tile_x + 2 + (tile_y + 9)*F] = temp29;
xx[index + tile_x + 3 + tile_y*F] = temp30;
xx[index + tile_x + 3 + (tile_y + 1)*F] = temp31;
xx[index + tile_x + 3 + (tile_y + 2)*F] = temp32;
xx[index + tile_x + 3 + (tile_y + 3)*F] = temp33;
xx[index + tile_x + 3 + (tile_y + 4)*F] = temp34;
xx[index + tile_x + 3 + (tile_y + 5)*F] = temp35;
xx[index + tile_x + 3 + (tile_y + 6)*F] = temp36;
xx[index + tile_x + 3 + (tile_y + 7)*F] = temp37;
xx[index + tile_x + 3 + (tile_y + 8)*F] = temp38;
xx[index + tile_x + 3 + (tile_y + 9)*F] = temp39;
xx[index + tile_x + 4 + tile_y*F] = temp40;
xx[index + tile_x + 4 + (tile_y + 1)*F] = temp41;
xx[index + tile_x + 4 + (tile_y + 2)*F] = temp42;
xx[index + tile_x + 4 + (tile_y + 3)*F] = temp43;
xx[index + tile_x + 4 + (tile_y + 4)*F] = temp44;
xx[index + tile_x + 4 + (tile_y + 5)*F] = temp45;
xx[index + tile_x + 4 + (tile_y + 6)*F] = temp46;
xx[index + tile_x + 4 + (tile_y + 7)*F] = temp47;
xx[index + tile_x + 4 + (tile_y + 8)*F] = temp48;
xx[index + tile_x + 4 + (tile_y + 9)*F] = temp49;
xx[index + tile_x + 5 + tile_y*F] = temp50;
xx[index + tile_x + 5 + (tile_y + 1)*F] = temp51;
xx[index + tile_x + 5 + (tile_y + 2)*F] = temp52;
xx[index + tile_x + 5 + (tile_y + 3)*F] = temp53;
xx[index + tile_x + 5 + (tile_y + 4)*F] = temp54;
xx[index + tile_x + 5 + (tile_y + 5)*F] = temp55;
xx[index + tile_x + 5 + (tile_y + 6)*F] = temp56;
xx[index + tile_x + 5 + (tile_y + 7)*F] = temp57;
xx[index + tile_x + 5 + (tile_y + 8)*F] = temp58;
xx[index + tile_x + 5 + (tile_y + 9)*F] = temp59;
xx[index + tile_x + 6 + tile_y*F] = temp60;
xx[index + tile_x + 6 + (tile_y + 1)*F] = temp61;
xx[index + tile_x + 6 + (tile_y + 2)*F] = temp62;
xx[index + tile_x + 6 + (tile_y + 3)*F] = temp63;
xx[index + tile_x + 6 + (tile_y + 4)*F] = temp64;
xx[index + tile_x + 6 + (tile_y + 5)*F] = temp65;
xx[index + tile_x + 6 + (tile_y + 6)*F] = temp66;
xx[index + tile_x + 6 + (tile_y + 7)*F] = temp67;
xx[index + tile_x + 6 + (tile_y + 8)*F] = temp68;
xx[index + tile_x + 6 + (tile_y + 9)*F] = temp69;
xx[index + tile_x + 7 + tile_y*F] = temp70;
xx[index + tile_x + 7 + (tile_y + 1)*F] = temp71;
xx[index + tile_x + 7 + (tile_y + 2)*F] = temp72;
xx[index + tile_x + 7 + (tile_y + 3)*F] = temp73;
xx[index + tile_x + 7 + (tile_y + 4)*F] = temp74;
xx[index + tile_x + 7 + (tile_y + 5)*F] = temp75;
xx[index + tile_x + 7 + (tile_y + 6)*F] = temp76;
xx[index + tile_x + 7 + (tile_y + 7)*F] = temp77;
xx[index + tile_x + 7 + (tile_y + 8)*F] = temp78;
xx[index + tile_x + 7 + (tile_y + 9)*F] = temp79;
xx[index + tile_x + 8 + tile_y*F] = temp80;
xx[index + tile_x + 8 + (tile_y + 1)*F] = temp81;
xx[index + tile_x + 8 + (tile_y + 2)*F] = temp82;
xx[index + tile_x + 8 + (tile_y + 3)*F] = temp83;
xx[index + tile_x + 8 + (tile_y + 4)*F] = temp84;
xx[index + tile_x + 8 + (tile_y + 5)*F] = temp85;
xx[index + tile_x + 8 + (tile_y + 6)*F] = temp86;
xx[index + tile_x + 8 + (tile_y + 7)*F] = temp87;
xx[index + tile_x + 8 + (tile_y + 8)*F] = temp88;
xx[index + tile_x + 8 + (tile_y + 9)*F] = temp89;
xx[index + tile_x + 9 + tile_y*F] = temp90;
xx[index + tile_x + 9 + (tile_y + 1)*F] = temp91;
xx[index + tile_x + 9 + (tile_y + 2)*F] = temp92;
xx[index + tile_x + 9 + (tile_y + 3)*F] = temp93;
xx[index + tile_x + 9 + (tile_y + 4)*F] = temp94;
xx[index + tile_x + 9 + (tile_y + 5)*F] = temp95;
xx[index + tile_x + 9 + (tile_y + 6)*F] = temp96;
xx[index + tile_x + 9 + (tile_y + 7)*F] = temp97;
xx[index + tile_x + 9 + (tile_y + 8)*F] = temp98;
xx[index + tile_x + 9 + (tile_y + 9)*F] = temp99;
//*/
//regularization
if(tile_x == tile_y){
for(int k = 0; k < tile; k++)
xx[index + (tile_x+k)*(1+F)] += (end - start) * lambda;
}
}
}
__global__ void
__launch_bounds__(64, 6)
get_hermitian_x(float* tt,
const int* csrRowIndex, const int* csrColIndex, const float lambda) {
__shared__ float2 thetaTemp[SCAN_BATCH * F/2];
int row = blockIdx.x;
if (row < M) {
//this block needs to handle end - start thetaT columns
int start = csrRowIndex[row];
int end = csrRowIndex[row + 1];
//slide through [start, end] by window size SCAN_BATCH
int iterations = (end - start - 1)/SCAN_BATCH + 1;
float temp0= 0, temp1= 0, temp2= 0, temp3= 0, temp4= 0, temp5= 0, temp6= 0, temp7= 0, temp8= 0, temp9 = 0;
float temp10= 0, temp11= 0, temp12= 0, temp13= 0, temp14= 0, temp15= 0, temp16= 0, temp17= 0, temp18= 0, temp19 = 0;
float temp20= 0, temp21= 0, temp22= 0, temp23= 0, temp24= 0, temp25= 0, temp26= 0, temp27= 0, temp28= 0, temp29 = 0;
float temp30= 0, temp31= 0, temp32= 0, temp33= 0, temp34= 0, temp35= 0, temp36= 0, temp37= 0, temp38= 0, temp39 = 0;
float temp40= 0, temp41= 0, temp42= 0, temp43= 0, temp44= 0, temp45= 0, temp46= 0, temp47= 0, temp48= 0, temp49 = 0;
float temp50= 0, temp51= 0, temp52= 0, temp53= 0, temp54= 0, temp55= 0, temp56= 0, temp57= 0, temp58= 0, temp59 = 0;
float temp60= 0, temp61= 0, temp62= 0, temp63= 0, temp64= 0, temp65= 0, temp66= 0, temp67= 0, temp68= 0, temp69 = 0;
float temp70= 0, temp71= 0, temp72= 0, temp73= 0, temp74= 0, temp75= 0, temp76= 0, temp77= 0, temp78= 0, temp79 = 0;
float temp80= 0, temp81= 0, temp82= 0, temp83= 0, temp84= 0, temp85= 0, temp86= 0, temp87= 0, temp88= 0, temp89 = 0;
float temp90= 0, temp91= 0, temp92= 0, temp93= 0, temp94= 0, temp95= 0, temp96= 0, temp97= 0, temp98= 0, temp99 = 0;
//int tile_x = (threadIdx.x/tile) * tile;//start x of this tile
//int tile_y = (threadIdx.x%tile) * tile;//start y of this tile
int tile_x = 0;
int tile_y = 0;
int tile = F/10;
for ( int i = 0; i < 10; i++){
int end = ((20-i)*(i+1))/2;
if(threadIdx.x < end){
tile_x = i * tile;
tile_y = (10 + threadIdx.x - end) * tile;
break;
}
}
//iteration: copy gmem-->smem; aggregate smem-->register
for (int iter = 0; iter < iterations; iter ++){
float2 theta;
//copy texture --> smem, and sync
/*
if(threadIdx.x < SCAN_BATCH){
if(iter*SCAN_BATCH + threadIdx.x < end - start){
for (int k = 0; k < F; k += 2){
theta.x = tex1Dfetch(thetaTTexRef, F * csrColIndex[start + iter*SCAN_BATCH + threadIdx.x] + k);
theta.y = tex1Dfetch(thetaTTexRef, F * csrColIndex[start + iter*SCAN_BATCH + threadIdx.x] + k+1);
thetaTemp[threadIdx.x * F/2 + k/2] = theta;
}
}
//must be the last iteration; no need to check
//not enough theta to copy, set zero
else
memset(&thetaTemp[threadIdx.x*F/2], 0, F*sizeof(float));
}
*/
//two layers: warp divergence unless we split at 32
//32 > SCAN_BATCH
if(threadIdx.x < 2*32 ){
//int index = threadIdx.x;
int index = threadIdx.x - (threadIdx.x/32)*32; //0 to 31;
if(index < SCAN_BATCH){
if(iter*SCAN_BATCH + index < end - start){
//for (int k = 50*(threadIdx.x/32); k < 50*(threadIdx.x/32) + 50; k += 2){
//IMPORTANT: for loop has constant and identical start and end
if(threadIdx.x < 32){
for (int k = 0; k < 50; k += 2){
theta.x = tex1Dfetch(thetaTTexRef, F * csrColIndex[start + iter*SCAN_BATCH + index] + k);
theta.y = tex1Dfetch(thetaTTexRef, F * csrColIndex[start + iter*SCAN_BATCH + index] + k+1);
thetaTemp[index * F/2 + k/2] = theta;
}
}
else {
for (int k = 0; k < 50; k += 2){
theta.x = tex1Dfetch(thetaTTexRef, F * csrColIndex[start + iter*SCAN_BATCH + index] + k + 50);
theta.y = tex1Dfetch(thetaTTexRef, F * csrColIndex[start + iter*SCAN_BATCH + index] + k + 51);
thetaTemp[index * F/2 + k/2 + 25] = theta;
}
}
}
//must be the last iteration; no need to check
//not enough theta to copy, set zero
else
memset(&thetaTemp[index*F/2], 0, F*sizeof(float));
}
}
/* //issue: not coalesced access to csrColIndex
if(threadIdx.x < F && threadIdx.x%2 == 0){
for(int k = 0; k< SCAN_BATCH; k++){
if(iter*SCAN_BATCH + k < end - start){
theta.x = tex1Dfetch(thetaTTexRef, F * csrColIndex[start + iter*SCAN_BATCH + k] + threadIdx.x);
theta.y = tex1Dfetch(thetaTTexRef, F * csrColIndex[start + iter*SCAN_BATCH + k] + threadIdx.x +1);
thetaTemp[k * F/2 + threadIdx.x/2] = theta;
}
//must be the last iteration; no need to check
//not enough theta to copy, set zero
else
memset(&thetaTemp[k*F/2 + threadIdx.x/2], 0, 2*sizeof(float));
}
}
*/
/*
int layers = blockDim.x/SCAN_BATCH; //100/30 = 3
//int height = blockDim.x/layers; //30
int y = threadIdx.x/SCAN_BATCH;//0,1,2,3; y==3 is not viable
//min(y, (layers-1)) * height
int y_start = y * 30;//0-29:0;30-59:30;60-89:60
int y_end = y_start + 30; //0-29:30;30-59:60;60-89:90
if(y >= layers - 1) y_end = F; //60-89:100
if(threadIdx.x - y_start < SCAN_BATCH){
if(iter*SCAN_BATCH + (threadIdx.x - y_start) < end - start){
for (int k = y_start; k < y_end; k += 2){
theta.x =
tex1Dfetch(thetaTTexRef, F * csrColIndex[start + iter*SCAN_BATCH + (threadIdx.x - y_start)] + k);
theta.y =
tex1Dfetch(thetaTTexRef, F * csrColIndex[start + iter*SCAN_BATCH + (threadIdx.x - y_start)] + k+1);
thetaTemp[(threadIdx.x - y_start)* F/2 + k/2] = theta;
}
}
//must be the last iteration; no need to check
//not enough theta to copy, set zero
else
memset(&thetaTemp[(threadIdx.x - y_start)*F/2 + y_start/2], 0, (y_end - y_start)*sizeof(float));
}
*/
__syncthreads();
///////////////////////////////////////////////////////////////////////////////////////////////////////////
//tile: 10*10
if(threadIdx.x < 55 ){
for(int k = 0; k < SCAN_BATCH; k++){
temp0 += thetaTemp[tile_x/2 + k*F/2].x * thetaTemp[tile_y/2 + k*F/2].x;
temp1 += thetaTemp[tile_x/2 + k*F/2].x * thetaTemp[tile_y/2 + k*F/2].y;
temp2 += thetaTemp[tile_x/2 + k*F/2].x * thetaTemp[tile_y/2 +1 + k*F/2].x;
temp3 += thetaTemp[tile_x/2 + k*F/2].x * thetaTemp[tile_y/2 +1 + k*F/2].y;
temp4 += thetaTemp[tile_x/2 + k*F/2].x * thetaTemp[tile_y/2 +2 + k*F/2].x;
temp5 += thetaTemp[tile_x/2 + k*F/2].x * thetaTemp[tile_y/2 +2 + k*F/2].y;
temp6 += thetaTemp[tile_x/2 + k*F/2].x * thetaTemp[tile_y/2 +3 + k*F/2].x;
temp7 += thetaTemp[tile_x/2 + k*F/2].x * thetaTemp[tile_y/2 +3 + k*F/2].y;
temp8 += thetaTemp[tile_x/2 + k*F/2].x * thetaTemp[tile_y/2 +4 + k*F/2].x;
temp9 += thetaTemp[tile_x/2 + k*F/2].x * thetaTemp[tile_y/2 +4 + k*F/2].y;
temp10 += thetaTemp[tile_x/2 + k*F/2].y * thetaTemp[tile_y/2 + k*F/2].x;
temp11 += thetaTemp[tile_x/2 + k*F/2].y * thetaTemp[tile_y/2 + k*F/2].y;
temp12 += thetaTemp[tile_x/2 + k*F/2].y * thetaTemp[tile_y/2 +1 + k*F/2].x;
temp13 += thetaTemp[tile_x/2 + k*F/2].y * thetaTemp[tile_y/2 +1 + k*F/2].y;
temp14 += thetaTemp[tile_x/2 + k*F/2].y * thetaTemp[tile_y/2 +2 + k*F/2].x;
temp15 += thetaTemp[tile_x/2 + k*F/2].y * thetaTemp[tile_y/2 +2 + k*F/2].y;
temp16 += thetaTemp[tile_x/2 + k*F/2].y * thetaTemp[tile_y/2 +3 + k*F/2].x;
temp17 += thetaTemp[tile_x/2 + k*F/2].y * thetaTemp[tile_y/2 +3 + k*F/2].y;
temp18 += thetaTemp[tile_x/2 + k*F/2].y * thetaTemp[tile_y/2 +4 + k*F/2].x;
temp19 += thetaTemp[tile_x/2 + k*F/2].y * thetaTemp[tile_y/2 +4 + k*F/2].y;
temp20 += thetaTemp[tile_x/2 +1 + k*F/2].x * thetaTemp[tile_y/2 + k*F/2].x;
temp21 += thetaTemp[tile_x/2 +1 + k*F/2].x * thetaTemp[tile_y/2 + k*F/2].y;
temp22 += thetaTemp[tile_x/2 +1 + k*F/2].x * thetaTemp[tile_y/2 +1 + k*F/2].x;
temp23 += thetaTemp[tile_x/2 +1 + k*F/2].x * thetaTemp[tile_y/2 +1 + k*F/2].y;
temp24 += thetaTemp[tile_x/2 +1 + k*F/2].x * thetaTemp[tile_y/2 +2 + k*F/2].x;
temp25 += thetaTemp[tile_x/2 +1 + k*F/2].x * thetaTemp[tile_y/2 +2 + k*F/2].y;
temp26 += thetaTemp[tile_x/2 +1 + k*F/2].x * thetaTemp[tile_y/2 +3 + k*F/2].x;
temp27 += thetaTemp[tile_x/2 +1 + k*F/2].x * thetaTemp[tile_y/2 +3 + k*F/2].y;
temp28 += thetaTemp[tile_x/2 +1 + k*F/2].x * thetaTemp[tile_y/2 +4 + k*F/2].x;
temp29 += thetaTemp[tile_x/2 +1 + k*F/2].x * thetaTemp[tile_y/2 +4 + k*F/2].y;
temp30 += thetaTemp[tile_x/2 +1 + k*F/2].y * thetaTemp[tile_y/2 + k*F/2].x;
temp31 += thetaTemp[tile_x/2 +1 + k*F/2].y * thetaTemp[tile_y/2 + k*F/2].y;
temp32 += thetaTemp[tile_x/2 +1 + k*F/2].y * thetaTemp[tile_y/2 +1 + k*F/2].x;
temp33 += thetaTemp[tile_x/2 +1 + k*F/2].y * thetaTemp[tile_y/2 +1 + k*F/2].y;
temp34 += thetaTemp[tile_x/2 +1 + k*F/2].y * thetaTemp[tile_y/2 +2 + k*F/2].x;
temp35 += thetaTemp[tile_x/2 +1 + k*F/2].y * thetaTemp[tile_y/2 +2 + k*F/2].y;
temp36 += thetaTemp[tile_x/2 +1 + k*F/2].y * thetaTemp[tile_y/2 +3 + k*F/2].x;
temp37 += thetaTemp[tile_x/2 +1 + k*F/2].y * thetaTemp[tile_y/2 +3 + k*F/2].y;
temp38 += thetaTemp[tile_x/2 +1 + k*F/2].y * thetaTemp[tile_y/2 +4 + k*F/2].x;
temp39 += thetaTemp[tile_x/2 +1 + k*F/2].y * thetaTemp[tile_y/2 +4 + k*F/2].y;
temp40 += thetaTemp[tile_x/2 +2 + k*F/2].x * thetaTemp[tile_y/2 + k*F/2].x;
temp41 += thetaTemp[tile_x/2 +2 + k*F/2].x * thetaTemp[tile_y/2 + k*F/2].y;
temp42 += thetaTemp[tile_x/2 +2 + k*F/2].x * thetaTemp[tile_y/2 +1 + k*F/2].x;
temp43 += thetaTemp[tile_x/2 +2 + k*F/2].x * thetaTemp[tile_y/2 +1 + k*F/2].y;
temp44 += thetaTemp[tile_x/2 +2 + k*F/2].x * thetaTemp[tile_y/2 +2 + k*F/2].x;
temp45 += thetaTemp[tile_x/2 +2 + k*F/2].x * thetaTemp[tile_y/2 +2 + k*F/2].y;
temp46 += thetaTemp[tile_x/2 +2 + k*F/2].x * thetaTemp[tile_y/2 +3 + k*F/2].x;
temp47 += thetaTemp[tile_x/2 +2 + k*F/2].x * thetaTemp[tile_y/2 +3 + k*F/2].y;
temp48 += thetaTemp[tile_x/2 +2 + k*F/2].x * thetaTemp[tile_y/2 +4 + k*F/2].x;
temp49 += thetaTemp[tile_x/2 +2 + k*F/2].x * thetaTemp[tile_y/2 +4 + k*F/2].y;
temp50 += thetaTemp[tile_x/2 +2 + k*F/2].y * thetaTemp[tile_y/2 + k*F/2].x;
temp51 += thetaTemp[tile_x/2 +2 + k*F/2].y * thetaTemp[tile_y/2 + k*F/2].y;
temp52 += thetaTemp[tile_x/2 +2 + k*F/2].y * thetaTemp[tile_y/2 +1 + k*F/2].x;
temp53 += thetaTemp[tile_x/2 +2 + k*F/2].y * thetaTemp[tile_y/2 +1 + k*F/2].y;
temp54 += thetaTemp[tile_x/2 +2 + k*F/2].y * thetaTemp[tile_y/2 +2 + k*F/2].x;
temp55 += thetaTemp[tile_x/2 +2 + k*F/2].y * thetaTemp[tile_y/2 +2 + k*F/2].y;
temp56 += thetaTemp[tile_x/2 +2 + k*F/2].y * thetaTemp[tile_y/2 +3 + k*F/2].x;
temp57 += thetaTemp[tile_x/2 +2 + k*F/2].y * thetaTemp[tile_y/2 +3 + k*F/2].y;
temp58 += thetaTemp[tile_x/2 +2 + k*F/2].y * thetaTemp[tile_y/2 +4 + k*F/2].x;
temp59 += thetaTemp[tile_x/2 +2 + k*F/2].y * thetaTemp[tile_y/2 +4 + k*F/2].y;
temp60 += thetaTemp[tile_x/2 +3 + k*F/2].x * thetaTemp[tile_y/2 + k*F/2].x;
temp61 += thetaTemp[tile_x/2 +3 + k*F/2].x * thetaTemp[tile_y/2 + k*F/2].y;
temp62 += thetaTemp[tile_x/2 +3 + k*F/2].x * thetaTemp[tile_y/2 +1 + k*F/2].x;
temp63 += thetaTemp[tile_x/2 +3 + k*F/2].x * thetaTemp[tile_y/2 +1 + k*F/2].y;
temp64 += thetaTemp[tile_x/2 +3 + k*F/2].x * thetaTemp[tile_y/2 +2 + k*F/2].x;
temp65 += thetaTemp[tile_x/2 +3 + k*F/2].x * thetaTemp[tile_y/2 +2 + k*F/2].y;
temp66 += thetaTemp[tile_x/2 +3 + k*F/2].x * thetaTemp[tile_y/2 +3 + k*F/2].x;
temp67 += thetaTemp[tile_x/2 +3 + k*F/2].x * thetaTemp[tile_y/2 +3 + k*F/2].y;
temp68 += thetaTemp[tile_x/2 +3 + k*F/2].x * thetaTemp[tile_y/2 +4 + k*F/2].x;
temp69 += thetaTemp[tile_x/2 +3 + k*F/2].x * thetaTemp[tile_y/2 +4 + k*F/2].y;
temp70 += thetaTemp[tile_x/2 +3 + k*F/2].y * thetaTemp[tile_y/2 + k*F/2].x;
temp71 += thetaTemp[tile_x/2 +3 + k*F/2].y * thetaTemp[tile_y/2 + k*F/2].y;
temp72 += thetaTemp[tile_x/2 +3 + k*F/2].y * thetaTemp[tile_y/2 +1 + k*F/2].x;
temp73 += thetaTemp[tile_x/2 +3 + k*F/2].y * thetaTemp[tile_y/2 +1 + k*F/2].y;
temp74 += thetaTemp[tile_x/2 +3 + k*F/2].y * thetaTemp[tile_y/2 +2 + k*F/2].x;
temp75 += thetaTemp[tile_x/2 +3 + k*F/2].y * thetaTemp[tile_y/2 +2 + k*F/2].y;
temp76 += thetaTemp[tile_x/2 +3 + k*F/2].y * thetaTemp[tile_y/2 +3 + k*F/2].x;
temp77 += thetaTemp[tile_x/2 +3 + k*F/2].y * thetaTemp[tile_y/2 +3 + k*F/2].y;
temp78 += thetaTemp[tile_x/2 +3 + k*F/2].y * thetaTemp[tile_y/2 +4 + k*F/2].x;
temp79 += thetaTemp[tile_x/2 +3 + k*F/2].y * thetaTemp[tile_y/2 +4 + k*F/2].y;
temp80 += thetaTemp[tile_x/2 +4 + k*F/2].x * thetaTemp[tile_y/2 + k*F/2].x;
temp81 += thetaTemp[tile_x/2 +4 + k*F/2].x * thetaTemp[tile_y/2 + k*F/2].y;
temp82 += thetaTemp[tile_x/2 +4 + k*F/2].x * thetaTemp[tile_y/2 +1 + k*F/2].x;
temp83 += thetaTemp[tile_x/2 +4 + k*F/2].x * thetaTemp[tile_y/2 +1 + k*F/2].y;
temp84 += thetaTemp[tile_x/2 +4 + k*F/2].x * thetaTemp[tile_y/2 +2 + k*F/2].x;
temp85 += thetaTemp[tile_x/2 +4 + k*F/2].x * thetaTemp[tile_y/2 +2 + k*F/2].y;
temp86 += thetaTemp[tile_x/2 +4 + k*F/2].x * thetaTemp[tile_y/2 +3 + k*F/2].x;
temp87 += thetaTemp[tile_x/2 +4 + k*F/2].x * thetaTemp[tile_y/2 +3 + k*F/2].y;
temp88 += thetaTemp[tile_x/2 +4 + k*F/2].x * thetaTemp[tile_y/2 +4 + k*F/2].x;
temp89 += thetaTemp[tile_x/2 +4 + k*F/2].x * thetaTemp[tile_y/2 +4 + k*F/2].y;
temp90 += thetaTemp[tile_x/2 +4 + k*F/2].y * thetaTemp[tile_y/2 + k*F/2].x;
temp91 += thetaTemp[tile_x/2 +4 + k*F/2].y * thetaTemp[tile_y/2 + k*F/2].y;
temp92 += thetaTemp[tile_x/2 +4 + k*F/2].y * thetaTemp[tile_y/2 +1 + k*F/2].x;
temp93 += thetaTemp[tile_x/2 +4 + k*F/2].y * thetaTemp[tile_y/2 +1 + k*F/2].y;
temp94 += thetaTemp[tile_x/2 +4 + k*F/2].y * thetaTemp[tile_y/2 +2 + k*F/2].x;
temp95 += thetaTemp[tile_x/2 +4 + k*F/2].y * thetaTemp[tile_y/2 +2 + k*F/2].y;
temp96 += thetaTemp[tile_x/2 +4 + k*F/2].y * thetaTemp[tile_y/2 +3 + k*F/2].x;
temp97 += thetaTemp[tile_x/2 +4 + k*F/2].y * thetaTemp[tile_y/2 +3 + k*F/2].y;
temp98 += thetaTemp[tile_x/2 +4 + k*F/2].y * thetaTemp[tile_y/2 +4 + k*F/2].x;
temp99 += thetaTemp[tile_x/2 +4 + k*F/2].y * thetaTemp[tile_y/2 +4 + k*F/2].y;
}
}
}
//end of iteration in copying from smem and aggregating in register
///////////////////////////////////////////////////////////////////////////////////////////////////////////
__syncthreads();
///*
if(threadIdx.x < 55 ){
//copy output to gmem
int index = blockIdx.x*F*F;
tt[index + tile_x + tile_y*F] = temp0;
tt[index + tile_x + (tile_y + 1)*F] = temp1;
tt[index + tile_x + (tile_y + 2)*F] = temp2;
tt[index + tile_x + (tile_y + 3)*F] = temp3;
tt[index + tile_x + (tile_y + 4)*F] = temp4;
tt[index + tile_x + (tile_y + 5)*F] = temp5;
tt[index + tile_x + (tile_y + 6)*F] = temp6;
tt[index + tile_x + (tile_y + 7)*F] = temp7;
tt[index + tile_x + (tile_y + 8)*F] = temp8;
tt[index + tile_x + (tile_y + 9)*F] = temp9;
tt[index + tile_x + 1 + tile_y*F] = temp10;
tt[index + tile_x + 1 + (tile_y + 1)*F] = temp11;
tt[index + tile_x + 1 + (tile_y + 2)*F] = temp12;
tt[index + tile_x + 1 + (tile_y + 3)*F] = temp13;
tt[index + tile_x + 1 + (tile_y + 4)*F] = temp14;
tt[index + tile_x + 1 + (tile_y + 5)*F] = temp15;
tt[index + tile_x + 1 + (tile_y + 6)*F] = temp16;
tt[index + tile_x + 1 + (tile_y + 7)*F] = temp17;
tt[index + tile_x + 1 + (tile_y + 8)*F] = temp18;
tt[index + tile_x + 1 + (tile_y + 9)*F] = temp19;
tt[index + tile_x + 2 + tile_y*F] = temp20;
tt[index + tile_x + 2 + (tile_y + 1)*F] = temp21;
tt[index + tile_x + 2 + (tile_y + 2)*F] = temp22;
tt[index + tile_x + 2 + (tile_y + 3)*F] = temp23;
tt[index + tile_x + 2 + (tile_y + 4)*F] = temp24;
tt[index + tile_x + 2 + (tile_y + 5)*F] = temp25;
tt[index + tile_x + 2 + (tile_y + 6)*F] = temp26;
tt[index + tile_x + 2 + (tile_y + 7)*F] = temp27;
tt[index + tile_x + 2 + (tile_y + 8)*F] = temp28;
tt[index + tile_x + 2 + (tile_y + 9)*F] = temp29;
tt[index + tile_x + 3 + tile_y*F] = temp30;
tt[index + tile_x + 3 + (tile_y + 1)*F] = temp31;
tt[index + tile_x + 3 + (tile_y + 2)*F] = temp32;
tt[index + tile_x + 3 + (tile_y + 3)*F] = temp33;
tt[index + tile_x + 3 + (tile_y + 4)*F] = temp34;
tt[index + tile_x + 3 + (tile_y + 5)*F] = temp35;
tt[index + tile_x + 3 + (tile_y + 6)*F] = temp36;
tt[index + tile_x + 3 + (tile_y + 7)*F] = temp37;
tt[index + tile_x + 3 + (tile_y + 8)*F] = temp38;
tt[index + tile_x + 3 + (tile_y + 9)*F] = temp39;
tt[index + tile_x + 4 + tile_y*F] = temp40;
tt[index + tile_x + 4 + (tile_y + 1)*F] = temp41;
tt[index + tile_x + 4 + (tile_y + 2)*F] = temp42;
tt[index + tile_x + 4 + (tile_y + 3)*F] = temp43;
tt[index + tile_x + 4 + (tile_y + 4)*F] = temp44;
tt[index + tile_x + 4 + (tile_y + 5)*F] = temp45;
tt[index + tile_x + 4 + (tile_y + 6)*F] = temp46;
tt[index + tile_x + 4 + (tile_y + 7)*F] = temp47;
tt[index + tile_x + 4 + (tile_y + 8)*F] = temp48;
tt[index + tile_x + 4 + (tile_y + 9)*F] = temp49;
tt[index + tile_x + 5 + tile_y*F] = temp50;
tt[index + tile_x + 5 + (tile_y + 1)*F] = temp51;
tt[index + tile_x + 5 + (tile_y + 2)*F] = temp52;
tt[index + tile_x + 5 + (tile_y + 3)*F] = temp53;
tt[index + tile_x + 5 + (tile_y + 4)*F] = temp54;
tt[index + tile_x + 5 + (tile_y + 5)*F] = temp55;
tt[index + tile_x + 5 + (tile_y + 6)*F] = temp56;
tt[index + tile_x + 5 + (tile_y + 7)*F] = temp57;
tt[index + tile_x + 5 + (tile_y + 8)*F] = temp58;
tt[index + tile_x + 5 + (tile_y + 9)*F] = temp59;
tt[index + tile_x + 6 + tile_y*F] = temp60;
tt[index + tile_x + 6 + (tile_y + 1)*F] = temp61;
tt[index + tile_x + 6 + (tile_y + 2)*F] = temp62;
tt[index + tile_x + 6 + (tile_y + 3)*F] = temp63;
tt[index + tile_x + 6 + (tile_y + 4)*F] = temp64;
tt[index + tile_x + 6 + (tile_y + 5)*F] = temp65;
tt[index + tile_x + 6 + (tile_y + 6)*F] = temp66;
tt[index + tile_x + 6 + (tile_y + 7)*F] = temp67;
tt[index + tile_x + 6 + (tile_y + 8)*F] = temp68;
tt[index + tile_x + 6 + (tile_y + 9)*F] = temp69;
tt[index + tile_x + 7 + tile_y*F] = temp70;
tt[index + tile_x + 7 + (tile_y + 1)*F] = temp71;
tt[index + tile_x + 7 + (tile_y + 2)*F] = temp72;
tt[index + tile_x + 7 + (tile_y + 3)*F] = temp73;
tt[index + tile_x + 7 + (tile_y + 4)*F] = temp74;
tt[index + tile_x + 7 + (tile_y + 5)*F] = temp75;
tt[index + tile_x + 7 + (tile_y + 6)*F] = temp76;
tt[index + tile_x + 7 + (tile_y + 7)*F] = temp77;
tt[index + tile_x + 7 + (tile_y + 8)*F] = temp78;
tt[index + tile_x + 7 + (tile_y + 9)*F] = temp79;
tt[index + tile_x + 8 + tile_y*F] = temp80;
tt[index + tile_x + 8 + (tile_y + 1)*F] = temp81;
tt[index + tile_x + 8 + (tile_y + 2)*F] = temp82;
tt[index + tile_x + 8 + (tile_y + 3)*F] = temp83;
tt[index + tile_x + 8 + (tile_y + 4)*F] = temp84;
tt[index + tile_x + 8 + (tile_y + 5)*F] = temp85;
tt[index + tile_x + 8 + (tile_y + 6)*F] = temp86;
tt[index + tile_x + 8 + (tile_y + 7)*F] = temp87;
tt[index + tile_x + 8 + (tile_y + 8)*F] = temp88;
tt[index + tile_x + 8 + (tile_y + 9)*F] = temp89;
tt[index + tile_x + 9 + tile_y*F] = temp90;
tt[index + tile_x + 9 + (tile_y + 1)*F] = temp91;
tt[index + tile_x + 9 + (tile_y + 2)*F] = temp92;
tt[index + tile_x + 9 + (tile_y + 3)*F] = temp93;
tt[index + tile_x + 9 + (tile_y + 4)*F] = temp94;
tt[index + tile_x + 9 + (tile_y + 5)*F] = temp95;
tt[index + tile_x + 9 + (tile_y + 6)*F] = temp96;
tt[index + tile_x + 9 + (tile_y + 7)*F] = temp97;
tt[index + tile_x + 9 + (tile_y + 8)*F] = temp98;
tt[index + tile_x + 9 + (tile_y + 9)*F] = temp99;
//symmetric
if(tile_x!=tile_y){
tt[index + tile_y + 0+ (tile_x + 0)*F]= temp0;
tt[index + tile_y + 1+ (tile_x + 0)*F]= temp1;
tt[index + tile_y + 2+ (tile_x + 0)*F]= temp2;
tt[index + tile_y + 3+ (tile_x + 0)*F]= temp3;
tt[index + tile_y + 4+ (tile_x + 0)*F]= temp4;
tt[index + tile_y + 5+ (tile_x + 0)*F]= temp5;
tt[index + tile_y + 6+ (tile_x + 0)*F]= temp6;
tt[index + tile_y + 7+ (tile_x + 0)*F]= temp7;
tt[index + tile_y + 8+ (tile_x + 0)*F]= temp8;
tt[index + tile_y + 9+ (tile_x + 0)*F]= temp9;
tt[index + tile_y + 0+ (tile_x + 1)*F]= temp10;
tt[index + tile_y + 1+ (tile_x + 1)*F]= temp11;
tt[index + tile_y + 2+ (tile_x + 1)*F]= temp12;
tt[index + tile_y + 3+ (tile_x + 1)*F]= temp13;
tt[index + tile_y + 4+ (tile_x + 1)*F]= temp14;
tt[index + tile_y + 5+ (tile_x + 1)*F]= temp15;
tt[index + tile_y + 6+ (tile_x + 1)*F]= temp16;
tt[index + tile_y + 7+ (tile_x + 1)*F]= temp17;
tt[index + tile_y + 8+ (tile_x + 1)*F]= temp18;
tt[index + tile_y + 9+ (tile_x + 1)*F]= temp19;
tt[index + tile_y + 0+ (tile_x + 2)*F]= temp20;
tt[index + tile_y + 1+ (tile_x + 2)*F]= temp21;
tt[index + tile_y + 2+ (tile_x + 2)*F]= temp22;
tt[index + tile_y + 3+ (tile_x + 2)*F]= temp23;
tt[index + tile_y + 4+ (tile_x + 2)*F]= temp24;
tt[index + tile_y + 5+ (tile_x + 2)*F]= temp25;
tt[index + tile_y + 6+ (tile_x + 2)*F]= temp26;
tt[index + tile_y + 7+ (tile_x + 2)*F]= temp27;
tt[index + tile_y + 8+ (tile_x + 2)*F]= temp28;
tt[index + tile_y + 9+ (tile_x + 2)*F]= temp29;
tt[index + tile_y + 0+ (tile_x + 3)*F]= temp30;
tt[index + tile_y + 1+ (tile_x + 3)*F]= temp31;
tt[index + tile_y + 2+ (tile_x + 3)*F]= temp32;
tt[index + tile_y + 3+ (tile_x + 3)*F]= temp33;
tt[index + tile_y + 4+ (tile_x + 3)*F]= temp34;
tt[index + tile_y + 5+ (tile_x + 3)*F]= temp35;
tt[index + tile_y + 6+ (tile_x + 3)*F]= temp36;
tt[index + tile_y + 7+ (tile_x + 3)*F]= temp37;
tt[index + tile_y + 8+ (tile_x + 3)*F]= temp38;
tt[index + tile_y + 9+ (tile_x + 3)*F]= temp39;
tt[index + tile_y + 0+ (tile_x + 4)*F]= temp40;
tt[index + tile_y + 1+ (tile_x + 4)*F]= temp41;
tt[index + tile_y + 2+ (tile_x + 4)*F]= temp42;
tt[index + tile_y + 3+ (tile_x + 4)*F]= temp43;
tt[index + tile_y + 4+ (tile_x + 4)*F]= temp44;
tt[index + tile_y + 5+ (tile_x + 4)*F]= temp45;
tt[index + tile_y + 6+ (tile_x + 4)*F]= temp46;
tt[index + tile_y + 7+ (tile_x + 4)*F]= temp47;
tt[index + tile_y + 8+ (tile_x + 4)*F]= temp48;
tt[index + tile_y + 9+ (tile_x + 4)*F]= temp49;
tt[index + tile_y + 0+ (tile_x + 5)*F]= temp50;
tt[index + tile_y + 1+ (tile_x + 5)*F]= temp51;
tt[index + tile_y + 2+ (tile_x + 5)*F]= temp52;
tt[index + tile_y + 3+ (tile_x + 5)*F]= temp53;
tt[index + tile_y + 4+ (tile_x + 5)*F]= temp54;
tt[index + tile_y + 5+ (tile_x + 5)*F]= temp55;
tt[index + tile_y + 6+ (tile_x + 5)*F]= temp56;
tt[index + tile_y + 7+ (tile_x + 5)*F]= temp57;
tt[index + tile_y + 8+ (tile_x + 5)*F]= temp58;
tt[index + tile_y + 9+ (tile_x + 5)*F]= temp59;
tt[index + tile_y + 0+ (tile_x + 6)*F]= temp60;
tt[index + tile_y + 1+ (tile_x + 6)*F]= temp61;
tt[index + tile_y + 2+ (tile_x + 6)*F]= temp62;
tt[index + tile_y + 3+ (tile_x + 6)*F]= temp63;
tt[index + tile_y + 4+ (tile_x + 6)*F]= temp64;
tt[index + tile_y + 5+ (tile_x + 6)*F]= temp65;
tt[index + tile_y + 6+ (tile_x + 6)*F]= temp66;
tt[index + tile_y + 7+ (tile_x + 6)*F]= temp67;
tt[index + tile_y + 8+ (tile_x + 6)*F]= temp68;
tt[index + tile_y + 9+ (tile_x + 6)*F]= temp69;
tt[index + tile_y + 0+ (tile_x + 7)*F]= temp70;
tt[index + tile_y + 1+ (tile_x + 7)*F]= temp71;
tt[index + tile_y + 2+ (tile_x + 7)*F]= temp72;
tt[index + tile_y + 3+ (tile_x + 7)*F]= temp73;
tt[index + tile_y + 4+ (tile_x + 7)*F]= temp74;
tt[index + tile_y + 5+ (tile_x + 7)*F]= temp75;
tt[index + tile_y + 6+ (tile_x + 7)*F]= temp76;
tt[index + tile_y + 7+ (tile_x + 7)*F]= temp77;
tt[index + tile_y + 8+ (tile_x + 7)*F]= temp78;
tt[index + tile_y + 9+ (tile_x + 7)*F]= temp79;
tt[index + tile_y + 0+ (tile_x + 8)*F]= temp80;
tt[index + tile_y + 1+ (tile_x + 8)*F]= temp81;
tt[index + tile_y + 2+ (tile_x + 8)*F]= temp82;
tt[index + tile_y + 3+ (tile_x + 8)*F]= temp83;
tt[index + tile_y + 4+ (tile_x + 8)*F]= temp84;
tt[index + tile_y + 5+ (tile_x + 8)*F]= temp85;
tt[index + tile_y + 6+ (tile_x + 8)*F]= temp86;
tt[index + tile_y + 7+ (tile_x + 8)*F]= temp87;
tt[index + tile_y + 8+ (tile_x + 8)*F]= temp88;
tt[index + tile_y + 9+ (tile_x + 8)*F]= temp89;
tt[index + tile_y + 0+ (tile_x + 9)*F]= temp90;
tt[index + tile_y + 1+ (tile_x + 9)*F]= temp91;
tt[index + tile_y + 2+ (tile_x + 9)*F]= temp92;
tt[index + tile_y + 3+ (tile_x + 9)*F]= temp93;
tt[index + tile_y + 4+ (tile_x + 9)*F]= temp94;
tt[index + tile_y + 5+ (tile_x + 9)*F]= temp95;
tt[index + tile_y + 6+ (tile_x + 9)*F]= temp96;
tt[index + tile_y + 7+ (tile_x + 9)*F]= temp97;
tt[index + tile_y + 8+ (tile_x + 9)*F]= temp98;
tt[index + tile_y + 9+ (tile_x + 9)*F]= temp99;
}
//regularization
if(tile_x == tile_y){
for(int k = 0; k < tile; k++)
tt[index + (tile_x+k)*(1+F)] += (end - start) * lambda;
}
}
//*/
}
}
__global__ void
__launch_bounds__(64, 6)
get_hermitian_theta(float* xx,
const int* cscRowIndex, const int* cscColIndex, const float lambda, const float * XT) {
__shared__ float2 xTemp[SCAN_BATCH * F/2];
int col = blockIdx.x;
if (col < N) {
//this block needs to handle end - start thetaT columns
int start = cscColIndex[col];
int end = cscColIndex[col + 1];
//slide through [start, end] by window size SCAN_BATCH
int iterations = (end - start - 1)/SCAN_BATCH + 1;
float temp0= 0, temp1= 0, temp2= 0, temp3= 0, temp4= 0, temp5= 0, temp6= 0, temp7= 0, temp8= 0, temp9 = 0;
float temp10= 0, temp11= 0, temp12= 0, temp13= 0, temp14= 0, temp15= 0, temp16= 0, temp17= 0, temp18= 0, temp19 = 0;
float temp20= 0, temp21= 0, temp22= 0, temp23= 0, temp24= 0, temp25= 0, temp26= 0, temp27= 0, temp28= 0, temp29 = 0;
float temp30= 0, temp31= 0, temp32= 0, temp33= 0, temp34= 0, temp35= 0, temp36= 0, temp37= 0, temp38= 0, temp39 = 0;
float temp40= 0, temp41= 0, temp42= 0, temp43= 0, temp44= 0, temp45= 0, temp46= 0, temp47= 0, temp48= 0, temp49 = 0;
float temp50= 0, temp51= 0, temp52= 0, temp53= 0, temp54= 0, temp55= 0, temp56= 0, temp57= 0, temp58= 0, temp59 = 0;
float temp60= 0, temp61= 0, temp62= 0, temp63= 0, temp64= 0, temp65= 0, temp66= 0, temp67= 0, temp68= 0, temp69 = 0;
float temp70= 0, temp71= 0, temp72= 0, temp73= 0, temp74= 0, temp75= 0, temp76= 0, temp77= 0, temp78= 0, temp79 = 0;
float temp80= 0, temp81= 0, temp82= 0, temp83= 0, temp84= 0, temp85= 0, temp86= 0, temp87= 0, temp88= 0, temp89 = 0;
float temp90= 0, temp91= 0, temp92= 0, temp93= 0, temp94= 0, temp95= 0, temp96= 0, temp97= 0, temp98= 0, temp99 = 0;
//int tile_x = (threadIdx.x/tile) * tile;//start x of this tile
//int tile_y = (threadIdx.x%tile) * tile;//start y of this tile
int tile_x = 0;
int tile_y = 0;
int tile = F/10;
for ( int i = 0; i < 10; i++){
int end = ((20-i)*(i+1))/2;
if(threadIdx.x < end){
tile_x = i * tile;
tile_y = (10 + threadIdx.x - end) * tile;
break;
}
}
//iteration: copy gmem-->smem; aggregate smem-->register
for (int iter = 0; iter < iterations; iter ++){
float2 x;
//copy texture --> smem, and sync
/*
if(threadIdx.x < SCAN_BATCH){
if(iter*SCAN_BATCH + threadIdx.x < end - start){
for (int k = 0; k < F; k += 2){
theta.x = tex1Dfetch(thetaTTexRef, F * csrColIndex[start + iter*SCAN_BATCH + threadIdx.x] + k);
theta.y = tex1Dfetch(thetaTTexRef, F * csrColIndex[start + iter*SCAN_BATCH + threadIdx.x] + k+1);
thetaTemp[threadIdx.x * F/2 + k/2] = theta;
}
}
//must be the last iteration; no need to check
//not enough theta to copy, set zero
else
memset(&thetaTemp[threadIdx.x*F/2], 0, F*sizeof(float));
}
*/
//two layers: warp divergence unless we split at 32
//32 > SCAN_BATCH
if(threadIdx.x < 2*32 ){
//int index = threadIdx.x;
int index = threadIdx.x - (threadIdx.x/32)*32; //0 to 31;
if(index < SCAN_BATCH){
if(iter*SCAN_BATCH + index < end - start){
//for (int k = 50*(threadIdx.x/32); k < 50*(threadIdx.x/32) + 50; k += 2){
//IMPORTANT: for loop has constant and identical start and end
if(threadIdx.x < 32){
for (int k = 0; k < 50; k += 2){
x.x = XT[ F * cscRowIndex[start + iter*SCAN_BATCH + index] + k ];
x.y = XT[ F * cscRowIndex[start + iter*SCAN_BATCH + index] + k+1];
xTemp[index * F/2 + k/2] = x;
}
}
else {
for (int k = 0; k < 50; k += 2){
x.x = XT[ F * cscRowIndex[start + iter*SCAN_BATCH + index] + k + 50];
x.y = XT[ F * cscRowIndex[start + iter*SCAN_BATCH + index] + k + 51];
xTemp[index * F/2 + k/2 + 25] = x;
}
}
}
//must be the last iteration; no need to check
//not enough theta to copy, set zero
else
memset(&xTemp[index*F/2], 0, F*sizeof(float));
}
}
/* //issue: not coalesced access to csrColIndex
if(threadIdx.x < F && threadIdx.x%2 == 0){
for(int k = 0; k< SCAN_BATCH; k++){
if(iter*SCAN_BATCH + k < end - start){
theta.x = tex1Dfetch(thetaTTexRef, F * csrColIndex[start + iter*SCAN_BATCH + k] + threadIdx.x);
theta.y = tex1Dfetch(thetaTTexRef, F * csrColIndex[start + iter*SCAN_BATCH + k] + threadIdx.x +1);
thetaTemp[k * F/2 + threadIdx.x/2] = theta;
}
//must be the last iteration; no need to check
//not enough theta to copy, set zero
else
memset(&thetaTemp[k*F/2 + threadIdx.x/2], 0, 2*sizeof(float));
}
}
*/
/*
int layers = blockDim.x/SCAN_BATCH; //100/30 = 3
//int height = blockDim.x/layers; //30
int y = threadIdx.x/SCAN_BATCH;//0,1,2,3; y==3 is not viable
//min(y, (layers-1)) * height
int y_start = y * 30;//0-29:0;30-59:30;60-89:60
int y_end = y_start + 30; //0-29:30;30-59:60;60-89:90
if(y >= layers - 1) y_end = F; //60-89:100
if(threadIdx.x - y_start < SCAN_BATCH){
if(iter*SCAN_BATCH + (threadIdx.x - y_start) < end - start){
for (int k = y_start; k < y_end; k += 2){
theta.x =
tex1Dfetch(thetaTTexRef, F * csrColIndex[start + iter*SCAN_BATCH + (threadIdx.x - y_start)] + k);
theta.y =
tex1Dfetch(thetaTTexRef, F * csrColIndex[start + iter*SCAN_BATCH + (threadIdx.x - y_start)] + k+1);
thetaTemp[(threadIdx.x - y_start)* F/2 + k/2] = theta;
}
}
//must be the last iteration; no need to check
//not enough theta to copy, set zero
else
memset(&thetaTemp[(threadIdx.x - y_start)*F/2 + y_start/2], 0, (y_end - y_start)*sizeof(float));
}
*/
__syncthreads();
///////////////////////////////////////////////////////////////////////////////////////////////////////////
//tile: 10*10
if(threadIdx.x < 55 ){
for(int k = 0; k < SCAN_BATCH; k++){
temp0 += xTemp[tile_x/2 + k*F/2].x * xTemp[tile_y/2 + k*F/2].x;
temp1 += xTemp[tile_x/2 + k*F/2].x * xTemp[tile_y/2 + k*F/2].y;
temp2 += xTemp[tile_x/2 + k*F/2].x * xTemp[tile_y/2 +1 + k*F/2].x;
temp3 += xTemp[tile_x/2 + k*F/2].x * xTemp[tile_y/2 +1 + k*F/2].y;
temp4 += xTemp[tile_x/2 + k*F/2].x * xTemp[tile_y/2 +2 + k*F/2].x;
temp5 += xTemp[tile_x/2 + k*F/2].x * xTemp[tile_y/2 +2 + k*F/2].y;
temp6 += xTemp[tile_x/2 + k*F/2].x * xTemp[tile_y/2 +3 + k*F/2].x;
temp7 += xTemp[tile_x/2 + k*F/2].x * xTemp[tile_y/2 +3 + k*F/2].y;
temp8 += xTemp[tile_x/2 + k*F/2].x * xTemp[tile_y/2 +4 + k*F/2].x;
temp9 += xTemp[tile_x/2 + k*F/2].x * xTemp[tile_y/2 +4 + k*F/2].y;
temp10 += xTemp[tile_x/2 + k*F/2].y * xTemp[tile_y/2 + k*F/2].x;
temp11 += xTemp[tile_x/2 + k*F/2].y * xTemp[tile_y/2 + k*F/2].y;
temp12 += xTemp[tile_x/2 + k*F/2].y * xTemp[tile_y/2 +1 + k*F/2].x;
temp13 += xTemp[tile_x/2 + k*F/2].y * xTemp[tile_y/2 +1 + k*F/2].y;
temp14 += xTemp[tile_x/2 + k*F/2].y * xTemp[tile_y/2 +2 + k*F/2].x;
temp15 += xTemp[tile_x/2 + k*F/2].y * xTemp[tile_y/2 +2 + k*F/2].y;
temp16 += xTemp[tile_x/2 + k*F/2].y * xTemp[tile_y/2 +3 + k*F/2].x;
temp17 += xTemp[tile_x/2 + k*F/2].y * xTemp[tile_y/2 +3 + k*F/2].y;
temp18 += xTemp[tile_x/2 + k*F/2].y * xTemp[tile_y/2 +4 + k*F/2].x;
temp19 += xTemp[tile_x/2 + k*F/2].y * xTemp[tile_y/2 +4 + k*F/2].y;
temp20 += xTemp[tile_x/2 +1 + k*F/2].x * xTemp[tile_y/2 + k*F/2].x;
temp21 += xTemp[tile_x/2 +1 + k*F/2].x * xTemp[tile_y/2 + k*F/2].y;
temp22 += xTemp[tile_x/2 +1 + k*F/2].x * xTemp[tile_y/2 +1 + k*F/2].x;
temp23 += xTemp[tile_x/2 +1 + k*F/2].x * xTemp[tile_y/2 +1 + k*F/2].y;
temp24 += xTemp[tile_x/2 +1 + k*F/2].x * xTemp[tile_y/2 +2 + k*F/2].x;
temp25 += xTemp[tile_x/2 +1 + k*F/2].x * xTemp[tile_y/2 +2 + k*F/2].y;
temp26 += xTemp[tile_x/2 +1 + k*F/2].x * xTemp[tile_y/2 +3 + k*F/2].x;
temp27 += xTemp[tile_x/2 +1 + k*F/2].x * xTemp[tile_y/2 +3 + k*F/2].y;
temp28 += xTemp[tile_x/2 +1 + k*F/2].x * xTemp[tile_y/2 +4 + k*F/2].x;
temp29 += xTemp[tile_x/2 +1 + k*F/2].x * xTemp[tile_y/2 +4 + k*F/2].y;
temp30 += xTemp[tile_x/2 +1 + k*F/2].y * xTemp[tile_y/2 + k*F/2].x;
temp31 += xTemp[tile_x/2 +1 + k*F/2].y * xTemp[tile_y/2 + k*F/2].y;
temp32 += xTemp[tile_x/2 +1 + k*F/2].y * xTemp[tile_y/2 +1 + k*F/2].x;
temp33 += xTemp[tile_x/2 +1 + k*F/2].y * xTemp[tile_y/2 +1 + k*F/2].y;
temp34 += xTemp[tile_x/2 +1 + k*F/2].y * xTemp[tile_y/2 +2 + k*F/2].x;
temp35 += xTemp[tile_x/2 +1 + k*F/2].y * xTemp[tile_y/2 +2 + k*F/2].y;
temp36 += xTemp[tile_x/2 +1 + k*F/2].y * xTemp[tile_y/2 +3 + k*F/2].x;
temp37 += xTemp[tile_x/2 +1 + k*F/2].y * xTemp[tile_y/2 +3 + k*F/2].y;
temp38 += xTemp[tile_x/2 +1 + k*F/2].y * xTemp[tile_y/2 +4 + k*F/2].x;
temp39 += xTemp[tile_x/2 +1 + k*F/2].y * xTemp[tile_y/2 +4 + k*F/2].y;
temp40 += xTemp[tile_x/2 +2 + k*F/2].x * xTemp[tile_y/2 + k*F/2].x;
temp41 += xTemp[tile_x/2 +2 + k*F/2].x * xTemp[tile_y/2 + k*F/2].y;
temp42 += xTemp[tile_x/2 +2 + k*F/2].x * xTemp[tile_y/2 +1 + k*F/2].x;
temp43 += xTemp[tile_x/2 +2 + k*F/2].x * xTemp[tile_y/2 +1 + k*F/2].y;
temp44 += xTemp[tile_x/2 +2 + k*F/2].x * xTemp[tile_y/2 +2 + k*F/2].x;
temp45 += xTemp[tile_x/2 +2 + k*F/2].x * xTemp[tile_y/2 +2 + k*F/2].y;
temp46 += xTemp[tile_x/2 +2 + k*F/2].x * xTemp[tile_y/2 +3 + k*F/2].x;
temp47 += xTemp[tile_x/2 +2 + k*F/2].x * xTemp[tile_y/2 +3 + k*F/2].y;
temp48 += xTemp[tile_x/2 +2 + k*F/2].x * xTemp[tile_y/2 +4 + k*F/2].x;
temp49 += xTemp[tile_x/2 +2 + k*F/2].x * xTemp[tile_y/2 +4 + k*F/2].y;
temp50 += xTemp[tile_x/2 +2 + k*F/2].y * xTemp[tile_y/2 + k*F/2].x;
temp51 += xTemp[tile_x/2 +2 + k*F/2].y * xTemp[tile_y/2 + k*F/2].y;
temp52 += xTemp[tile_x/2 +2 + k*F/2].y * xTemp[tile_y/2 +1 + k*F/2].x;
temp53 += xTemp[tile_x/2 +2 + k*F/2].y * xTemp[tile_y/2 +1 + k*F/2].y;
temp54 += xTemp[tile_x/2 +2 + k*F/2].y * xTemp[tile_y/2 +2 + k*F/2].x;
temp55 += xTemp[tile_x/2 +2 + k*F/2].y * xTemp[tile_y/2 +2 + k*F/2].y;
temp56 += xTemp[tile_x/2 +2 + k*F/2].y * xTemp[tile_y/2 +3 + k*F/2].x;
temp57 += xTemp[tile_x/2 +2 + k*F/2].y * xTemp[tile_y/2 +3 + k*F/2].y;
temp58 += xTemp[tile_x/2 +2 + k*F/2].y * xTemp[tile_y/2 +4 + k*F/2].x;
temp59 += xTemp[tile_x/2 +2 + k*F/2].y * xTemp[tile_y/2 +4 + k*F/2].y;
temp60 += xTemp[tile_x/2 +3 + k*F/2].x * xTemp[tile_y/2 + k*F/2].x;
temp61 += xTemp[tile_x/2 +3 + k*F/2].x * xTemp[tile_y/2 + k*F/2].y;
temp62 += xTemp[tile_x/2 +3 + k*F/2].x * xTemp[tile_y/2 +1 + k*F/2].x;
temp63 += xTemp[tile_x/2 +3 + k*F/2].x * xTemp[tile_y/2 +1 + k*F/2].y;
temp64 += xTemp[tile_x/2 +3 + k*F/2].x * xTemp[tile_y/2 +2 + k*F/2].x;
temp65 += xTemp[tile_x/2 +3 + k*F/2].x * xTemp[tile_y/2 +2 + k*F/2].y;
temp66 += xTemp[tile_x/2 +3 + k*F/2].x * xTemp[tile_y/2 +3 + k*F/2].x;
temp67 += xTemp[tile_x/2 +3 + k*F/2].x * xTemp[tile_y/2 +3 + k*F/2].y;
temp68 += xTemp[tile_x/2 +3 + k*F/2].x * xTemp[tile_y/2 +4 + k*F/2].x;
temp69 += xTemp[tile_x/2 +3 + k*F/2].x * xTemp[tile_y/2 +4 + k*F/2].y;
temp70 += xTemp[tile_x/2 +3 + k*F/2].y * xTemp[tile_y/2 + k*F/2].x;
temp71 += xTemp[tile_x/2 +3 + k*F/2].y * xTemp[tile_y/2 + k*F/2].y;
temp72 += xTemp[tile_x/2 +3 + k*F/2].y * xTemp[tile_y/2 +1 + k*F/2].x;
temp73 += xTemp[tile_x/2 +3 + k*F/2].y * xTemp[tile_y/2 +1 + k*F/2].y;
temp74 += xTemp[tile_x/2 +3 + k*F/2].y * xTemp[tile_y/2 +2 + k*F/2].x;
temp75 += xTemp[tile_x/2 +3 + k*F/2].y * xTemp[tile_y/2 +2 + k*F/2].y;
temp76 += xTemp[tile_x/2 +3 + k*F/2].y * xTemp[tile_y/2 +3 + k*F/2].x;
temp77 += xTemp[tile_x/2 +3 + k*F/2].y * xTemp[tile_y/2 +3 + k*F/2].y;
temp78 += xTemp[tile_x/2 +3 + k*F/2].y * xTemp[tile_y/2 +4 + k*F/2].x;
temp79 += xTemp[tile_x/2 +3 + k*F/2].y * xTemp[tile_y/2 +4 + k*F/2].y;
temp80 += xTemp[tile_x/2 +4 + k*F/2].x * xTemp[tile_y/2 + k*F/2].x;
temp81 += xTemp[tile_x/2 +4 + k*F/2].x * xTemp[tile_y/2 + k*F/2].y;
temp82 += xTemp[tile_x/2 +4 + k*F/2].x * xTemp[tile_y/2 +1 + k*F/2].x;
temp83 += xTemp[tile_x/2 +4 + k*F/2].x * xTemp[tile_y/2 +1 + k*F/2].y;
temp84 += xTemp[tile_x/2 +4 + k*F/2].x * xTemp[tile_y/2 +2 + k*F/2].x;
temp85 += xTemp[tile_x/2 +4 + k*F/2].x * xTemp[tile_y/2 +2 + k*F/2].y;
temp86 += xTemp[tile_x/2 +4 + k*F/2].x * xTemp[tile_y/2 +3 + k*F/2].x;
temp87 += xTemp[tile_x/2 +4 + k*F/2].x * xTemp[tile_y/2 +3 + k*F/2].y;
temp88 += xTemp[tile_x/2 +4 + k*F/2].x * xTemp[tile_y/2 +4 + k*F/2].x;
temp89 += xTemp[tile_x/2 +4 + k*F/2].x * xTemp[tile_y/2 +4 + k*F/2].y;
temp90 += xTemp[tile_x/2 +4 + k*F/2].y * xTemp[tile_y/2 + k*F/2].x;
temp91 += xTemp[tile_x/2 +4 + k*F/2].y * xTemp[tile_y/2 + k*F/2].y;
temp92 += xTemp[tile_x/2 +4 + k*F/2].y * xTemp[tile_y/2 +1 + k*F/2].x;
temp93 += xTemp[tile_x/2 +4 + k*F/2].y * xTemp[tile_y/2 +1 + k*F/2].y;
temp94 += xTemp[tile_x/2 +4 + k*F/2].y * xTemp[tile_y/2 +2 + k*F/2].x;
temp95 += xTemp[tile_x/2 +4 + k*F/2].y * xTemp[tile_y/2 +2 + k*F/2].y;
temp96 += xTemp[tile_x/2 +4 + k*F/2].y * xTemp[tile_y/2 +3 + k*F/2].x;
temp97 += xTemp[tile_x/2 +4 + k*F/2].y * xTemp[tile_y/2 +3 + k*F/2].y;
temp98 += xTemp[tile_x/2 +4 + k*F/2].y * xTemp[tile_y/2 +4 + k*F/2].x;
temp99 += xTemp[tile_x/2 +4 + k*F/2].y * xTemp[tile_y/2 +4 + k*F/2].y;
}
}
}
//end of iteration in copying from smem and aggregating in register
///////////////////////////////////////////////////////////////////////////////////////////////////////////
__syncthreads();
///*
if(threadIdx.x < 55 ){
//copy output to gmem
int index = blockIdx.x*F*F;
xx[index + tile_x + tile_y*F] = temp0;
xx[index + tile_x + (tile_y + 1)*F] = temp1;
xx[index + tile_x + (tile_y + 2)*F] = temp2;
xx[index + tile_x + (tile_y + 3)*F] = temp3;
xx[index + tile_x + (tile_y + 4)*F] = temp4;
xx[index + tile_x + (tile_y + 5)*F] = temp5;
xx[index + tile_x + (tile_y + 6)*F] = temp6;
xx[index + tile_x + (tile_y + 7)*F] = temp7;
xx[index + tile_x + (tile_y + 8)*F] = temp8;
xx[index + tile_x + (tile_y + 9)*F] = temp9;
xx[index + tile_x + 1 + tile_y*F] = temp10;
xx[index + tile_x + 1 + (tile_y + 1)*F] = temp11;
xx[index + tile_x + 1 + (tile_y + 2)*F] = temp12;
xx[index + tile_x + 1 + (tile_y + 3)*F] = temp13;
xx[index + tile_x + 1 + (tile_y + 4)*F] = temp14;
xx[index + tile_x + 1 + (tile_y + 5)*F] = temp15;
xx[index + tile_x + 1 + (tile_y + 6)*F] = temp16;
xx[index + tile_x + 1 + (tile_y + 7)*F] = temp17;
xx[index + tile_x + 1 + (tile_y + 8)*F] = temp18;
xx[index + tile_x + 1 + (tile_y + 9)*F] = temp19;
xx[index + tile_x + 2 + tile_y*F] = temp20;
xx[index + tile_x + 2 + (tile_y + 1)*F] = temp21;
xx[index + tile_x + 2 + (tile_y + 2)*F] = temp22;
xx[index + tile_x + 2 + (tile_y + 3)*F] = temp23;
xx[index + tile_x + 2 + (tile_y + 4)*F] = temp24;
xx[index + tile_x + 2 + (tile_y + 5)*F] = temp25;
xx[index + tile_x + 2 + (tile_y + 6)*F] = temp26;
xx[index + tile_x + 2 + (tile_y + 7)*F] = temp27;
xx[index + tile_x + 2 + (tile_y + 8)*F] = temp28;
xx[index + tile_x + 2 + (tile_y + 9)*F] = temp29;
xx[index + tile_x + 3 + tile_y*F] = temp30;
xx[index + tile_x + 3 + (tile_y + 1)*F] = temp31;
xx[index + tile_x + 3 + (tile_y + 2)*F] = temp32;
xx[index + tile_x + 3 + (tile_y + 3)*F] = temp33;
xx[index + tile_x + 3 + (tile_y + 4)*F] = temp34;
xx[index + tile_x + 3 + (tile_y + 5)*F] = temp35;
xx[index + tile_x + 3 + (tile_y + 6)*F] = temp36;
xx[index + tile_x + 3 + (tile_y + 7)*F] = temp37;
xx[index + tile_x + 3 + (tile_y + 8)*F] = temp38;
xx[index + tile_x + 3 + (tile_y + 9)*F] = temp39;
xx[index + tile_x + 4 + tile_y*F] = temp40;
xx[index + tile_x + 4 + (tile_y + 1)*F] = temp41;
xx[index + tile_x + 4 + (tile_y + 2)*F] = temp42;
xx[index + tile_x + 4 + (tile_y + 3)*F] = temp43;
xx[index + tile_x + 4 + (tile_y + 4)*F] = temp44;
xx[index + tile_x + 4 + (tile_y + 5)*F] = temp45;
xx[index + tile_x + 4 + (tile_y + 6)*F] = temp46;
xx[index + tile_x + 4 + (tile_y + 7)*F] = temp47;
xx[index + tile_x + 4 + (tile_y + 8)*F] = temp48;
xx[index + tile_x + 4 + (tile_y + 9)*F] = temp49;
xx[index + tile_x + 5 + tile_y*F] = temp50;
xx[index + tile_x + 5 + (tile_y + 1)*F] = temp51;
xx[index + tile_x + 5 + (tile_y + 2)*F] = temp52;
xx[index + tile_x + 5 + (tile_y + 3)*F] = temp53;
xx[index + tile_x + 5 + (tile_y + 4)*F] = temp54;
xx[index + tile_x + 5 + (tile_y + 5)*F] = temp55;
xx[index + tile_x + 5 + (tile_y + 6)*F] = temp56;
xx[index + tile_x + 5 + (tile_y + 7)*F] = temp57;
xx[index + tile_x + 5 + (tile_y + 8)*F] = temp58;
xx[index + tile_x + 5 + (tile_y + 9)*F] = temp59;
xx[index + tile_x + 6 + tile_y*F] = temp60;
xx[index + tile_x + 6 + (tile_y + 1)*F] = temp61;
xx[index + tile_x + 6 + (tile_y + 2)*F] = temp62;
xx[index + tile_x + 6 + (tile_y + 3)*F] = temp63;
xx[index + tile_x + 6 + (tile_y + 4)*F] = temp64;
xx[index + tile_x + 6 + (tile_y + 5)*F] = temp65;
xx[index + tile_x + 6 + (tile_y + 6)*F] = temp66;
xx[index + tile_x + 6 + (tile_y + 7)*F] = temp67;
xx[index + tile_x + 6 + (tile_y + 8)*F] = temp68;
xx[index + tile_x + 6 + (tile_y + 9)*F] = temp69;
xx[index + tile_x + 7 + tile_y*F] = temp70;
xx[index + tile_x + 7 + (tile_y + 1)*F] = temp71;
xx[index + tile_x + 7 + (tile_y + 2)*F] = temp72;
xx[index + tile_x + 7 + (tile_y + 3)*F] = temp73;
xx[index + tile_x + 7 + (tile_y + 4)*F] = temp74;
xx[index + tile_x + 7 + (tile_y + 5)*F] = temp75;
xx[index + tile_x + 7 + (tile_y + 6)*F] = temp76;
xx[index + tile_x + 7 + (tile_y + 7)*F] = temp77;
xx[index + tile_x + 7 + (tile_y + 8)*F] = temp78;
xx[index + tile_x + 7 + (tile_y + 9)*F] = temp79;
xx[index + tile_x + 8 + tile_y*F] = temp80;
xx[index + tile_x + 8 + (tile_y + 1)*F] = temp81;
xx[index + tile_x + 8 + (tile_y + 2)*F] = temp82;
xx[index + tile_x + 8 + (tile_y + 3)*F] = temp83;
xx[index + tile_x + 8 + (tile_y + 4)*F] = temp84;
xx[index + tile_x + 8 + (tile_y + 5)*F] = temp85;
xx[index + tile_x + 8 + (tile_y + 6)*F] = temp86;
xx[index + tile_x + 8 + (tile_y + 7)*F] = temp87;
xx[index + tile_x + 8 + (tile_y + 8)*F] = temp88;
xx[index + tile_x + 8 + (tile_y + 9)*F] = temp89;
xx[index + tile_x + 9 + tile_y*F] = temp90;
xx[index + tile_x + 9 + (tile_y + 1)*F] = temp91;
xx[index + tile_x + 9 + (tile_y + 2)*F] = temp92;
xx[index + tile_x + 9 + (tile_y + 3)*F] = temp93;
xx[index + tile_x + 9 + (tile_y + 4)*F] = temp94;
xx[index + tile_x + 9 + (tile_y + 5)*F] = temp95;
xx[index + tile_x + 9 + (tile_y + 6)*F] = temp96;
xx[index + tile_x + 9 + (tile_y + 7)*F] = temp97;
xx[index + tile_x + 9 + (tile_y + 8)*F] = temp98;
xx[index + tile_x + 9 + (tile_y + 9)*F] = temp99;
//symmetric
if(tile_x!=tile_y){
xx[index + tile_y + 0+ (tile_x + 0)*F]= temp0;
xx[index + tile_y + 1+ (tile_x + 0)*F]= temp1;
xx[index + tile_y + 2+ (tile_x + 0)*F]= temp2;
xx[index + tile_y + 3+ (tile_x + 0)*F]= temp3;
xx[index + tile_y + 4+ (tile_x + 0)*F]= temp4;
xx[index + tile_y + 5+ (tile_x + 0)*F]= temp5;
xx[index + tile_y + 6+ (tile_x + 0)*F]= temp6;
xx[index + tile_y + 7+ (tile_x + 0)*F]= temp7;
xx[index + tile_y + 8+ (tile_x + 0)*F]= temp8;
xx[index + tile_y + 9+ (tile_x + 0)*F]= temp9;
xx[index + tile_y + 0+ (tile_x + 1)*F]= temp10;
xx[index + tile_y + 1+ (tile_x + 1)*F]= temp11;
xx[index + tile_y + 2+ (tile_x + 1)*F]= temp12;
xx[index + tile_y + 3+ (tile_x + 1)*F]= temp13;
xx[index + tile_y + 4+ (tile_x + 1)*F]= temp14;
xx[index + tile_y + 5+ (tile_x + 1)*F]= temp15;
xx[index + tile_y + 6+ (tile_x + 1)*F]= temp16;
xx[index + tile_y + 7+ (tile_x + 1)*F]= temp17;
xx[index + tile_y + 8+ (tile_x + 1)*F]= temp18;
xx[index + tile_y + 9+ (tile_x + 1)*F]= temp19;
xx[index + tile_y + 0+ (tile_x + 2)*F]= temp20;
xx[index + tile_y + 1+ (tile_x + 2)*F]= temp21;
xx[index + tile_y + 2+ (tile_x + 2)*F]= temp22;
xx[index + tile_y + 3+ (tile_x + 2)*F]= temp23;
xx[index + tile_y + 4+ (tile_x + 2)*F]= temp24;
xx[index + tile_y + 5+ (tile_x + 2)*F]= temp25;
xx[index + tile_y + 6+ (tile_x + 2)*F]= temp26;
xx[index + tile_y + 7+ (tile_x + 2)*F]= temp27;
xx[index + tile_y + 8+ (tile_x + 2)*F]= temp28;
xx[index + tile_y + 9+ (tile_x + 2)*F]= temp29;
xx[index + tile_y + 0+ (tile_x + 3)*F]= temp30;
xx[index + tile_y + 1+ (tile_x + 3)*F]= temp31;
xx[index + tile_y + 2+ (tile_x + 3)*F]= temp32;
xx[index + tile_y + 3+ (tile_x + 3)*F]= temp33;
xx[index + tile_y + 4+ (tile_x + 3)*F]= temp34;
xx[index + tile_y + 5+ (tile_x + 3)*F]= temp35;
xx[index + tile_y + 6+ (tile_x + 3)*F]= temp36;
xx[index + tile_y + 7+ (tile_x + 3)*F]= temp37;
xx[index + tile_y + 8+ (tile_x + 3)*F]= temp38;
xx[index + tile_y + 9+ (tile_x + 3)*F]= temp39;
xx[index + tile_y + 0+ (tile_x + 4)*F]= temp40;
xx[index + tile_y + 1+ (tile_x + 4)*F]= temp41;
xx[index + tile_y + 2+ (tile_x + 4)*F]= temp42;
xx[index + tile_y + 3+ (tile_x + 4)*F]= temp43;
xx[index + tile_y + 4+ (tile_x + 4)*F]= temp44;
xx[index + tile_y + 5+ (tile_x + 4)*F]= temp45;
xx[index + tile_y + 6+ (tile_x + 4)*F]= temp46;
xx[index + tile_y + 7+ (tile_x + 4)*F]= temp47;
xx[index + tile_y + 8+ (tile_x + 4)*F]= temp48;
xx[index + tile_y + 9+ (tile_x + 4)*F]= temp49;
xx[index + tile_y + 0+ (tile_x + 5)*F]= temp50;
xx[index + tile_y + 1+ (tile_x + 5)*F]= temp51;
xx[index + tile_y + 2+ (tile_x + 5)*F]= temp52;
xx[index + tile_y + 3+ (tile_x + 5)*F]= temp53;
xx[index + tile_y + 4+ (tile_x + 5)*F]= temp54;
xx[index + tile_y + 5+ (tile_x + 5)*F]= temp55;
xx[index + tile_y + 6+ (tile_x + 5)*F]= temp56;
xx[index + tile_y + 7+ (tile_x + 5)*F]= temp57;
xx[index + tile_y + 8+ (tile_x + 5)*F]= temp58;
xx[index + tile_y + 9+ (tile_x + 5)*F]= temp59;
xx[index + tile_y + 0+ (tile_x + 6)*F]= temp60;
xx[index + tile_y + 1+ (tile_x + 6)*F]= temp61;
xx[index + tile_y + 2+ (tile_x + 6)*F]= temp62;
xx[index + tile_y + 3+ (tile_x + 6)*F]= temp63;
xx[index + tile_y + 4+ (tile_x + 6)*F]= temp64;
xx[index + tile_y + 5+ (tile_x + 6)*F]= temp65;
xx[index + tile_y + 6+ (tile_x + 6)*F]= temp66;
xx[index + tile_y + 7+ (tile_x + 6)*F]= temp67;
xx[index + tile_y + 8+ (tile_x + 6)*F]= temp68;
xx[index + tile_y + 9+ (tile_x + 6)*F]= temp69;
xx[index + tile_y + 0+ (tile_x + 7)*F]= temp70;
xx[index + tile_y + 1+ (tile_x + 7)*F]= temp71;
xx[index + tile_y + 2+ (tile_x + 7)*F]= temp72;
xx[index + tile_y + 3+ (tile_x + 7)*F]= temp73;
xx[index + tile_y + 4+ (tile_x + 7)*F]= temp74;
xx[index + tile_y + 5+ (tile_x + 7)*F]= temp75;
xx[index + tile_y + 6+ (tile_x + 7)*F]= temp76;
xx[index + tile_y + 7+ (tile_x + 7)*F]= temp77;
xx[index + tile_y + 8+ (tile_x + 7)*F]= temp78;
xx[index + tile_y + 9+ (tile_x + 7)*F]= temp79;
xx[index + tile_y + 0+ (tile_x + 8)*F]= temp80;
xx[index + tile_y + 1+ (tile_x + 8)*F]= temp81;
xx[index + tile_y + 2+ (tile_x + 8)*F]= temp82;
xx[index + tile_y + 3+ (tile_x + 8)*F]= temp83;
xx[index + tile_y + 4+ (tile_x + 8)*F]= temp84;
xx[index + tile_y + 5+ (tile_x + 8)*F]= temp85;
xx[index + tile_y + 6+ (tile_x + 8)*F]= temp86;
xx[index + tile_y + 7+ (tile_x + 8)*F]= temp87;
xx[index + tile_y + 8+ (tile_x + 8)*F]= temp88;
xx[index + tile_y + 9+ (tile_x + 8)*F]= temp89;
xx[index + tile_y + 0+ (tile_x + 9)*F]= temp90;
xx[index + tile_y + 1+ (tile_x + 9)*F]= temp91;
xx[index + tile_y + 2+ (tile_x + 9)*F]= temp92;
xx[index + tile_y + 3+ (tile_x + 9)*F]= temp93;
xx[index + tile_y + 4+ (tile_x + 9)*F]= temp94;
xx[index + tile_y + 5+ (tile_x + 9)*F]= temp95;
xx[index + tile_y + 6+ (tile_x + 9)*F]= temp96;
xx[index + tile_y + 7+ (tile_x + 9)*F]= temp97;
xx[index + tile_y + 8+ (tile_x + 9)*F]= temp98;
xx[index + tile_y + 9+ (tile_x + 9)*F]= temp99;
}
//regularization
if(tile_x == tile_y){
for(int k = 0; k < tile; k++)
xx[index + (tile_x+k)*(1+F)] += (end - start) * lambda;
}
}
//*/
}
}
__global__ void
__launch_bounds__(100, 4)
updateXByBlock2pRegDsmemTile(float* tt,
const int* csrRowIndex, const int* csrColIndex, const float lambda) {
__shared__ float2 thetaTemp[SCAN_BATCH * F/2];
int row = blockIdx.x;
if (row < M) {
//this block needs to handle end - start thetaT columns
int start = csrRowIndex[row];
int end = csrRowIndex[row + 1];
//slide through [start, end] by window size SCAN_BATCH
int iterations = (end - start - 1)/SCAN_BATCH + 1;
float temp0= 0, temp1= 0, temp2= 0, temp3= 0, temp4= 0, temp5= 0, temp6= 0, temp7= 0, temp8= 0, temp9 = 0;
float temp10= 0, temp11= 0, temp12= 0, temp13= 0, temp14= 0, temp15= 0, temp16= 0, temp17= 0, temp18= 0, temp19 = 0;
float temp20= 0, temp21= 0, temp22= 0, temp23= 0, temp24= 0, temp25= 0, temp26= 0, temp27= 0, temp28= 0, temp29 = 0;
float temp30= 0, temp31= 0, temp32= 0, temp33= 0, temp34= 0, temp35= 0, temp36= 0, temp37= 0, temp38= 0, temp39 = 0;
float temp40= 0, temp41= 0, temp42= 0, temp43= 0, temp44= 0, temp45= 0, temp46= 0, temp47= 0, temp48= 0, temp49 = 0;
float temp50= 0, temp51= 0, temp52= 0, temp53= 0, temp54= 0, temp55= 0, temp56= 0, temp57= 0, temp58= 0, temp59 = 0;
float temp60= 0, temp61= 0, temp62= 0, temp63= 0, temp64= 0, temp65= 0, temp66= 0, temp67= 0, temp68= 0, temp69 = 0;
float temp70= 0, temp71= 0, temp72= 0, temp73= 0, temp74= 0, temp75= 0, temp76= 0, temp77= 0, temp78= 0, temp79 = 0;
float temp80= 0, temp81= 0, temp82= 0, temp83= 0, temp84= 0, temp85= 0, temp86= 0, temp87= 0, temp88= 0, temp89 = 0;
float temp90= 0, temp91= 0, temp92= 0, temp93= 0, temp94= 0, temp95= 0, temp96= 0, temp97= 0, temp98= 0, temp99 = 0;
float2 theta;
int tile = F/10;
int tile_x = (threadIdx.x/tile) * tile;//start x of this tile
int tile_y = (threadIdx.x%tile) * tile;//start y of this tile
for (int iter = 0; iter < iterations; iter ++){
//copy texture --> smem, and sync
if(threadIdx.x < SCAN_BATCH){
if(iter*SCAN_BATCH + threadIdx.x < end - start){
for (int k = 0; k < F; k += 2){
theta.x =
tex1Dfetch(thetaTTexRef, F * csrColIndex[start + iter*SCAN_BATCH + threadIdx.x] + k);
theta.y =
tex1Dfetch(thetaTTexRef, F * csrColIndex[start + iter*SCAN_BATCH + threadIdx.x] + k+1);
thetaTemp[threadIdx.x * F/2 + k/2] = theta;
}
}
//must be the last iteration; no need to check
//not enough theta to copy, set zero
else
memset(&thetaTemp[threadIdx.x*F/2], 0, F*sizeof(float));
}
__syncthreads();
///////////////////////////////////////////////////////////////////////////////////////////////////////////
//tile: 10*10
for(int k = 0; k < SCAN_BATCH; k++){
temp0 += thetaTemp[tile_x/2 + k*F/2].x * thetaTemp[tile_y/2 + k*F/2].x;
temp1 += thetaTemp[tile_x/2 + k*F/2].x * thetaTemp[tile_y/2 + k*F/2].y;
temp2 += thetaTemp[tile_x/2 + k*F/2].x * thetaTemp[tile_y/2 +1 + k*F/2].x;
temp3 += thetaTemp[tile_x/2 + k*F/2].x * thetaTemp[tile_y/2 +1 + k*F/2].y;
temp4 += thetaTemp[tile_x/2 + k*F/2].x * thetaTemp[tile_y/2 +2 + k*F/2].x;
temp5 += thetaTemp[tile_x/2 + k*F/2].x * thetaTemp[tile_y/2 +2 + k*F/2].y;
temp6 += thetaTemp[tile_x/2 + k*F/2].x * thetaTemp[tile_y/2 +3 + k*F/2].x;
temp7 += thetaTemp[tile_x/2 + k*F/2].x * thetaTemp[tile_y/2 +3 + k*F/2].y;
temp8 += thetaTemp[tile_x/2 + k*F/2].x * thetaTemp[tile_y/2 +4 + k*F/2].x;
temp9 += thetaTemp[tile_x/2 + k*F/2].x * thetaTemp[tile_y/2 +4 + k*F/2].y;
temp10 += thetaTemp[tile_x/2 + k*F/2].y * thetaTemp[tile_y/2 + k*F/2].x;
temp11 += thetaTemp[tile_x/2 + k*F/2].y * thetaTemp[tile_y/2 + k*F/2].y;
temp12 += thetaTemp[tile_x/2 + k*F/2].y * thetaTemp[tile_y/2 +1 + k*F/2].x;
temp13 += thetaTemp[tile_x/2 + k*F/2].y * thetaTemp[tile_y/2 +1 + k*F/2].y;
temp14 += thetaTemp[tile_x/2 + k*F/2].y * thetaTemp[tile_y/2 +2 + k*F/2].x;
temp15 += thetaTemp[tile_x/2 + k*F/2].y * thetaTemp[tile_y/2 +2 + k*F/2].y;
temp16 += thetaTemp[tile_x/2 + k*F/2].y * thetaTemp[tile_y/2 +3 + k*F/2].x;
temp17 += thetaTemp[tile_x/2 + k*F/2].y * thetaTemp[tile_y/2 +3 + k*F/2].y;
temp18 += thetaTemp[tile_x/2 + k*F/2].y * thetaTemp[tile_y/2 +4 + k*F/2].x;
temp19 += thetaTemp[tile_x/2 + k*F/2].y * thetaTemp[tile_y/2 +4 + k*F/2].y;
temp20 += thetaTemp[tile_x/2 +1 + k*F/2].x * thetaTemp[tile_y/2 + k*F/2].x;
temp21 += thetaTemp[tile_x/2 +1 + k*F/2].x * thetaTemp[tile_y/2 + k*F/2].y;
temp22 += thetaTemp[tile_x/2 +1 + k*F/2].x * thetaTemp[tile_y/2 +1 + k*F/2].x;
temp23 += thetaTemp[tile_x/2 +1 + k*F/2].x * thetaTemp[tile_y/2 +1 + k*F/2].y;
temp24 += thetaTemp[tile_x/2 +1 + k*F/2].x * thetaTemp[tile_y/2 +2 + k*F/2].x;
temp25 += thetaTemp[tile_x/2 +1 + k*F/2].x * thetaTemp[tile_y/2 +2 + k*F/2].y;
temp26 += thetaTemp[tile_x/2 +1 + k*F/2].x * thetaTemp[tile_y/2 +3 + k*F/2].x;
temp27 += thetaTemp[tile_x/2 +1 + k*F/2].x * thetaTemp[tile_y/2 +3 + k*F/2].y;
temp28 += thetaTemp[tile_x/2 +1 + k*F/2].x * thetaTemp[tile_y/2 +4 + k*F/2].x;
temp29 += thetaTemp[tile_x/2 +1 + k*F/2].x * thetaTemp[tile_y/2 +4 + k*F/2].y;
temp30 += thetaTemp[tile_x/2 +1 + k*F/2].y * thetaTemp[tile_y/2 + k*F/2].x;
temp31 += thetaTemp[tile_x/2 +1 + k*F/2].y * thetaTemp[tile_y/2 + k*F/2].y;
temp32 += thetaTemp[tile_x/2 +1 + k*F/2].y * thetaTemp[tile_y/2 +1 + k*F/2].x;
temp33 += thetaTemp[tile_x/2 +1 + k*F/2].y * thetaTemp[tile_y/2 +1 + k*F/2].y;
temp34 += thetaTemp[tile_x/2 +1 + k*F/2].y * thetaTemp[tile_y/2 +2 + k*F/2].x;
temp35 += thetaTemp[tile_x/2 +1 + k*F/2].y * thetaTemp[tile_y/2 +2 + k*F/2].y;
temp36 += thetaTemp[tile_x/2 +1 + k*F/2].y * thetaTemp[tile_y/2 +3 + k*F/2].x;
temp37 += thetaTemp[tile_x/2 +1 + k*F/2].y * thetaTemp[tile_y/2 +3 + k*F/2].y;
temp38 += thetaTemp[tile_x/2 +1 + k*F/2].y * thetaTemp[tile_y/2 +4 + k*F/2].x;
temp39 += thetaTemp[tile_x/2 +1 + k*F/2].y * thetaTemp[tile_y/2 +4 + k*F/2].y;
temp40 += thetaTemp[tile_x/2 +2 + k*F/2].x * thetaTemp[tile_y/2 + k*F/2].x;
temp41 += thetaTemp[tile_x/2 +2 + k*F/2].x * thetaTemp[tile_y/2 + k*F/2].y;
temp42 += thetaTemp[tile_x/2 +2 + k*F/2].x * thetaTemp[tile_y/2 +1 + k*F/2].x;
temp43 += thetaTemp[tile_x/2 +2 + k*F/2].x * thetaTemp[tile_y/2 +1 + k*F/2].y;
temp44 += thetaTemp[tile_x/2 +2 + k*F/2].x * thetaTemp[tile_y/2 +2 + k*F/2].x;
temp45 += thetaTemp[tile_x/2 +2 + k*F/2].x * thetaTemp[tile_y/2 +2 + k*F/2].y;
temp46 += thetaTemp[tile_x/2 +2 + k*F/2].x * thetaTemp[tile_y/2 +3 + k*F/2].x;
temp47 += thetaTemp[tile_x/2 +2 + k*F/2].x * thetaTemp[tile_y/2 +3 + k*F/2].y;
temp48 += thetaTemp[tile_x/2 +2 + k*F/2].x * thetaTemp[tile_y/2 +4 + k*F/2].x;
temp49 += thetaTemp[tile_x/2 +2 + k*F/2].x * thetaTemp[tile_y/2 +4 + k*F/2].y;
temp50 += thetaTemp[tile_x/2 +2 + k*F/2].y * thetaTemp[tile_y/2 + k*F/2].x;
temp51 += thetaTemp[tile_x/2 +2 + k*F/2].y * thetaTemp[tile_y/2 + k*F/2].y;
temp52 += thetaTemp[tile_x/2 +2 + k*F/2].y * thetaTemp[tile_y/2 +1 + k*F/2].x;
temp53 += thetaTemp[tile_x/2 +2 + k*F/2].y * thetaTemp[tile_y/2 +1 + k*F/2].y;
temp54 += thetaTemp[tile_x/2 +2 + k*F/2].y * thetaTemp[tile_y/2 +2 + k*F/2].x;
temp55 += thetaTemp[tile_x/2 +2 + k*F/2].y * thetaTemp[tile_y/2 +2 + k*F/2].y;
temp56 += thetaTemp[tile_x/2 +2 + k*F/2].y * thetaTemp[tile_y/2 +3 + k*F/2].x;
temp57 += thetaTemp[tile_x/2 +2 + k*F/2].y * thetaTemp[tile_y/2 +3 + k*F/2].y;
temp58 += thetaTemp[tile_x/2 +2 + k*F/2].y * thetaTemp[tile_y/2 +4 + k*F/2].x;
temp59 += thetaTemp[tile_x/2 +2 + k*F/2].y * thetaTemp[tile_y/2 +4 + k*F/2].y;
temp60 += thetaTemp[tile_x/2 +3 + k*F/2].x * thetaTemp[tile_y/2 + k*F/2].x;
temp61 += thetaTemp[tile_x/2 +3 + k*F/2].x * thetaTemp[tile_y/2 + k*F/2].y;
temp62 += thetaTemp[tile_x/2 +3 + k*F/2].x * thetaTemp[tile_y/2 +1 + k*F/2].x;
temp63 += thetaTemp[tile_x/2 +3 + k*F/2].x * thetaTemp[tile_y/2 +1 + k*F/2].y;
temp64 += thetaTemp[tile_x/2 +3 + k*F/2].x * thetaTemp[tile_y/2 +2 + k*F/2].x;
temp65 += thetaTemp[tile_x/2 +3 + k*F/2].x * thetaTemp[tile_y/2 +2 + k*F/2].y;
temp66 += thetaTemp[tile_x/2 +3 + k*F/2].x * thetaTemp[tile_y/2 +3 + k*F/2].x;
temp67 += thetaTemp[tile_x/2 +3 + k*F/2].x * thetaTemp[tile_y/2 +3 + k*F/2].y;
temp68 += thetaTemp[tile_x/2 +3 + k*F/2].x * thetaTemp[tile_y/2 +4 + k*F/2].x;
temp69 += thetaTemp[tile_x/2 +3 + k*F/2].x * thetaTemp[tile_y/2 +4 + k*F/2].y;
temp70 += thetaTemp[tile_x/2 +3 + k*F/2].y * thetaTemp[tile_y/2 + k*F/2].x;
temp71 += thetaTemp[tile_x/2 +3 + k*F/2].y * thetaTemp[tile_y/2 + k*F/2].y;
temp72 += thetaTemp[tile_x/2 +3 + k*F/2].y * thetaTemp[tile_y/2 +1 + k*F/2].x;
temp73 += thetaTemp[tile_x/2 +3 + k*F/2].y * thetaTemp[tile_y/2 +1 + k*F/2].y;
temp74 += thetaTemp[tile_x/2 +3 + k*F/2].y * thetaTemp[tile_y/2 +2 + k*F/2].x;
temp75 += thetaTemp[tile_x/2 +3 + k*F/2].y * thetaTemp[tile_y/2 +2 + k*F/2].y;
temp76 += thetaTemp[tile_x/2 +3 + k*F/2].y * thetaTemp[tile_y/2 +3 + k*F/2].x;
temp77 += thetaTemp[tile_x/2 +3 + k*F/2].y * thetaTemp[tile_y/2 +3 + k*F/2].y;
temp78 += thetaTemp[tile_x/2 +3 + k*F/2].y * thetaTemp[tile_y/2 +4 + k*F/2].x;
temp79 += thetaTemp[tile_x/2 +3 + k*F/2].y * thetaTemp[tile_y/2 +4 + k*F/2].y;
temp80 += thetaTemp[tile_x/2 +4 + k*F/2].x * thetaTemp[tile_y/2 + k*F/2].x;
temp81 += thetaTemp[tile_x/2 +4 + k*F/2].x * thetaTemp[tile_y/2 + k*F/2].y;
temp82 += thetaTemp[tile_x/2 +4 + k*F/2].x * thetaTemp[tile_y/2 +1 + k*F/2].x;
temp83 += thetaTemp[tile_x/2 +4 + k*F/2].x * thetaTemp[tile_y/2 +1 + k*F/2].y;
temp84 += thetaTemp[tile_x/2 +4 + k*F/2].x * thetaTemp[tile_y/2 +2 + k*F/2].x;
temp85 += thetaTemp[tile_x/2 +4 + k*F/2].x * thetaTemp[tile_y/2 +2 + k*F/2].y;
temp86 += thetaTemp[tile_x/2 +4 + k*F/2].x * thetaTemp[tile_y/2 +3 + k*F/2].x;
temp87 += thetaTemp[tile_x/2 +4 + k*F/2].x * thetaTemp[tile_y/2 +3 + k*F/2].y;
temp88 += thetaTemp[tile_x/2 +4 + k*F/2].x * thetaTemp[tile_y/2 +4 + k*F/2].x;
temp89 += thetaTemp[tile_x/2 +4 + k*F/2].x * thetaTemp[tile_y/2 +4 + k*F/2].y;
temp90 += thetaTemp[tile_x/2 +4 + k*F/2].y * thetaTemp[tile_y/2 + k*F/2].x;
temp91 += thetaTemp[tile_x/2 +4 + k*F/2].y * thetaTemp[tile_y/2 + k*F/2].y;
temp92 += thetaTemp[tile_x/2 +4 + k*F/2].y * thetaTemp[tile_y/2 +1 + k*F/2].x;
temp93 += thetaTemp[tile_x/2 +4 + k*F/2].y * thetaTemp[tile_y/2 +1 + k*F/2].y;
temp94 += thetaTemp[tile_x/2 +4 + k*F/2].y * thetaTemp[tile_y/2 +2 + k*F/2].x;
temp95 += thetaTemp[tile_x/2 +4 + k*F/2].y * thetaTemp[tile_y/2 +2 + k*F/2].y;
temp96 += thetaTemp[tile_x/2 +4 + k*F/2].y * thetaTemp[tile_y/2 +3 + k*F/2].x;
temp97 += thetaTemp[tile_x/2 +4 + k*F/2].y * thetaTemp[tile_y/2 +3 + k*F/2].y;
temp98 += thetaTemp[tile_x/2 +4 + k*F/2].y * thetaTemp[tile_y/2 +4 + k*F/2].x;
temp99 += thetaTemp[tile_x/2 +4 + k*F/2].y * thetaTemp[tile_y/2 +4 + k*F/2].y;
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////
__syncthreads();
}
int index = blockIdx.x*F*F;
///*
//copy output to gmem
tt[index + tile_x + tile_y*F] = temp0;
tt[index + tile_x + (tile_y + 1)*F] = temp1;
tt[index + tile_x + (tile_y + 2)*F] = temp2;
tt[index + tile_x + (tile_y + 3)*F] = temp3;
tt[index + tile_x + (tile_y + 4)*F] = temp4;
tt[index + tile_x + (tile_y + 5)*F] = temp5;
tt[index + tile_x + (tile_y + 6)*F] = temp6;
tt[index + tile_x + (tile_y + 7)*F] = temp7;
tt[index + tile_x + (tile_y + 8)*F] = temp8;
tt[index + tile_x + (tile_y + 9)*F] = temp9;
tt[index + tile_x + 1 + tile_y*F] = temp10;
tt[index + tile_x + 1 + (tile_y + 1)*F] = temp11;
tt[index + tile_x + 1 + (tile_y + 2)*F] = temp12;
tt[index + tile_x + 1 + (tile_y + 3)*F] = temp13;
tt[index + tile_x + 1 + (tile_y + 4)*F] = temp14;
tt[index + tile_x + 1 + (tile_y + 5)*F] = temp15;
tt[index + tile_x + 1 + (tile_y + 6)*F] = temp16;
tt[index + tile_x + 1 + (tile_y + 7)*F] = temp17;
tt[index + tile_x + 1 + (tile_y + 8)*F] = temp18;
tt[index + tile_x + 1 + (tile_y + 9)*F] = temp19;
tt[index + tile_x + 2 + tile_y*F] = temp20;
tt[index + tile_x + 2 + (tile_y + 1)*F] = temp21;
tt[index + tile_x + 2 + (tile_y + 2)*F] = temp22;
tt[index + tile_x + 2 + (tile_y + 3)*F] = temp23;
tt[index + tile_x + 2 + (tile_y + 4)*F] = temp24;
tt[index + tile_x + 2 + (tile_y + 5)*F] = temp25;
tt[index + tile_x + 2 + (tile_y + 6)*F] = temp26;
tt[index + tile_x + 2 + (tile_y + 7)*F] = temp27;
tt[index + tile_x + 2 + (tile_y + 8)*F] = temp28;
tt[index + tile_x + 2 + (tile_y + 9)*F] = temp29;
tt[index + tile_x + 3 + tile_y*F] = temp30;
tt[index + tile_x + 3 + (tile_y + 1)*F] = temp31;
tt[index + tile_x + 3 + (tile_y + 2)*F] = temp32;
tt[index + tile_x + 3 + (tile_y + 3)*F] = temp33;
tt[index + tile_x + 3 + (tile_y + 4)*F] = temp34;
tt[index + tile_x + 3 + (tile_y + 5)*F] = temp35;
tt[index + tile_x + 3 + (tile_y + 6)*F] = temp36;
tt[index + tile_x + 3 + (tile_y + 7)*F] = temp37;
tt[index + tile_x + 3 + (tile_y + 8)*F] = temp38;
tt[index + tile_x + 3 + (tile_y + 9)*F] = temp39;
tt[index + tile_x + 4 + tile_y*F] = temp40;
tt[index + tile_x + 4 + (tile_y + 1)*F] = temp41;
tt[index + tile_x + 4 + (tile_y + 2)*F] = temp42;
tt[index + tile_x + 4 + (tile_y + 3)*F] = temp43;
tt[index + tile_x + 4 + (tile_y + 4)*F] = temp44;
tt[index + tile_x + 4 + (tile_y + 5)*F] = temp45;
tt[index + tile_x + 4 + (tile_y + 6)*F] = temp46;
tt[index + tile_x + 4 + (tile_y + 7)*F] = temp47;
tt[index + tile_x + 4 + (tile_y + 8)*F] = temp48;
tt[index + tile_x + 4 + (tile_y + 9)*F] = temp49;
tt[index + tile_x + 5 + tile_y*F] = temp50;
tt[index + tile_x + 5 + (tile_y + 1)*F] = temp51;
tt[index + tile_x + 5 + (tile_y + 2)*F] = temp52;
tt[index + tile_x + 5 + (tile_y + 3)*F] = temp53;
tt[index + tile_x + 5 + (tile_y + 4)*F] = temp54;
tt[index + tile_x + 5 + (tile_y + 5)*F] = temp55;
tt[index + tile_x + 5 + (tile_y + 6)*F] = temp56;
tt[index + tile_x + 5 + (tile_y + 7)*F] = temp57;
tt[index + tile_x + 5 + (tile_y + 8)*F] = temp58;
tt[index + tile_x + 5 + (tile_y + 9)*F] = temp59;
tt[index + tile_x + 6 + tile_y*F] = temp60;
tt[index + tile_x + 6 + (tile_y + 1)*F] = temp61;
tt[index + tile_x + 6 + (tile_y + 2)*F] = temp62;
tt[index + tile_x + 6 + (tile_y + 3)*F] = temp63;
tt[index + tile_x + 6 + (tile_y + 4)*F] = temp64;
tt[index + tile_x + 6 + (tile_y + 5)*F] = temp65;
tt[index + tile_x + 6 + (tile_y + 6)*F] = temp66;
tt[index + tile_x + 6 + (tile_y + 7)*F] = temp67;
tt[index + tile_x + 6 + (tile_y + 8)*F] = temp68;
tt[index + tile_x + 6 + (tile_y + 9)*F] = temp69;
tt[index + tile_x + 7 + tile_y*F] = temp70;
tt[index + tile_x + 7 + (tile_y + 1)*F] = temp71;
tt[index + tile_x + 7 + (tile_y + 2)*F] = temp72;
tt[index + tile_x + 7 + (tile_y + 3)*F] = temp73;
tt[index + tile_x + 7 + (tile_y + 4)*F] = temp74;
tt[index + tile_x + 7 + (tile_y + 5)*F] = temp75;
tt[index + tile_x + 7 + (tile_y + 6)*F] = temp76;
tt[index + tile_x + 7 + (tile_y + 7)*F] = temp77;
tt[index + tile_x + 7 + (tile_y + 8)*F] = temp78;
tt[index + tile_x + 7 + (tile_y + 9)*F] = temp79;
tt[index + tile_x + 8 + tile_y*F] = temp80;
tt[index + tile_x + 8 + (tile_y + 1)*F] = temp81;
tt[index + tile_x + 8 + (tile_y + 2)*F] = temp82;
tt[index + tile_x + 8 + (tile_y + 3)*F] = temp83;
tt[index + tile_x + 8 + (tile_y + 4)*F] = temp84;
tt[index + tile_x + 8 + (tile_y + 5)*F] = temp85;
tt[index + tile_x + 8 + (tile_y + 6)*F] = temp86;
tt[index + tile_x + 8 + (tile_y + 7)*F] = temp87;
tt[index + tile_x + 8 + (tile_y + 8)*F] = temp88;
tt[index + tile_x + 8 + (tile_y + 9)*F] = temp89;
tt[index + tile_x + 9 + tile_y*F] = temp90;
tt[index + tile_x + 9 + (tile_y + 1)*F] = temp91;
tt[index + tile_x + 9 + (tile_y + 2)*F] = temp92;
tt[index + tile_x + 9 + (tile_y + 3)*F] = temp93;
tt[index + tile_x + 9 + (tile_y + 4)*F] = temp94;
tt[index + tile_x + 9 + (tile_y + 5)*F] = temp95;
tt[index + tile_x + 9 + (tile_y + 6)*F] = temp96;
tt[index + tile_x + 9 + (tile_y + 7)*F] = temp97;
tt[index + tile_x + 9 + (tile_y + 8)*F] = temp98;
tt[index + tile_x + 9 + (tile_y + 9)*F] = temp99;
//*/
//regularization
if(tile_x == tile_y){
for(int k = 0; k < tile; k++)
tt[index + (tile_x+k)*(1+F)] += (end - start) * lambda;
}
}
}
void loadCSRSparseMatrix(const char* dataFile, const char* rowFile, const char* colFile,
float* data, unsigned int* row, int* col) {
printf("\n loading CSR...\n");
FILE *dFile = fopen(dataFile,"rb");
FILE *rFile = fopen(rowFile,"rb");
FILE *cFile = fopen(colFile,"rb");
if (!rFile||!dFile||!cFile)
{
printf("Unable to open file!");
return;
}
fread(&row[0], 4*(M+1) ,1, rFile);
fread(&col[0], 4*NNZ ,1, cFile);
fread(&data[0], 4*NNZ ,1, dFile);
fclose(rFile);
fclose(dFile);
fclose(cFile);
}
void loadCSCSparseMatrix(const char* dataFile, const char* rowFile, const char* colFile, float * data, int* row, int* col) {
printf("\n loading CSC...\n");
FILE *dFile = fopen(dataFile,"rb");
FILE *rFile = fopen(rowFile,"rb");
FILE *cFile = fopen(colFile,"rb");
if (!rFile||!dFile||!dFile)
{
printf("Unable to open file!");
return;
}
fread(&data[0], 4*NNZ ,1, dFile);
fread(&row[0], 4*NNZ ,1, rFile);
fread(&col[0], 4*(N+1) ,1, cFile);
fclose(rFile);
fclose(dFile);
fclose(cFile);
}
void loadCSCSparseMatrixInBatch(const std::string dataFile, const std::string rowFile, const std::string colFile, float * data, int* row, int* col, long csc_nnz, int n) {
printf("\n loading CSC from %s, %s, %s \n", dataFile.c_str(), rowFile.c_str(), colFile.c_str());
FILE *dFile = fopen(dataFile.c_str(),"rb");
FILE *rFile = fopen(rowFile.c_str(),"rb");
FILE *cFile = fopen(colFile.c_str(),"rb");
if (!rFile||!dFile||!dFile)
{
printf("Unable to open file!");
return;
}
fread(&data[0], 4*csc_nnz ,1, dFile);
fread(&row[0], 4*csc_nnz ,1, rFile);
fread(&col[0], 4*(n+1) ,1, cFile);
fclose(rFile);
fclose(dFile);
fclose(cFile);
}
void loadCooSparseMatrixRowPtr(const char* rowFile, int* row) {
printf("\n loading COO...\n");
FILE *rfile = fopen(rowFile,"rb");
fread(&row[0], 4*NNZ ,1, rfile);
fclose(rfile);
//FILE *file = fopen("./hugewiki_R_train_coo.row.bin", "wb");
//fwrite(row, 4*NNZ, 1, file);
//fclose(file);
}
void loadCooSparseMatrix(const char* dataFile, const char* rowFile, const char* colFile,
float* data, int* row, int* col, int nnz) {
std::ifstream dfile(dataFile);
std::ifstream rfile(rowFile);
std::ifstream cfile(colFile);
float d;
int d_i = 0;
while (dfile >> d) {
//printf("%f ",d);
data[d_i++] = d;
}
int r;
int r_i = 0;
while (rfile >> r) {
//printf("%d ",r);
row[r_i++] = r;
}
int c;
int c_i = 0;
while (cfile >> c) {
//printf("%d ",c);
col[c_i++] = c;
}
}
inline void updateX(
// const int batch_id,
const int batch_size, const long batch_offset, float * ythetaT, float * tt, float * XT_h,
cublasHandle_t handle,
// const int m, const int n, const int f, const int nnz,
float** devPtrTTHost, float **devPtrYthetaTHost,
float **devPtrTT, float **devPtrYthetaT, int *P, int *INFO){
//auto t0 = std::chrono::high_resolution_clock::now();
//left-hand side pointers
for (int k = 0; k < batch_size; k++) {
devPtrTTHost[k] = &tt[k * F * F];
}
cudacall(cudaMemcpy(devPtrTT, devPtrTTHost,
batch_size * sizeof(*devPtrTT),cudaMemcpyHostToDevice));
int * info2 = (int *) malloc(sizeof(int));
//right-hand side pointer
for (int k = 0; k < batch_size; k++) {
devPtrYthetaTHost[k] = &ythetaT[k * F];
}
cudacall(cudaMemcpy(devPtrYthetaT, devPtrYthetaTHost, batch_size * sizeof(*devPtrYthetaT),
cudaMemcpyHostToDevice));
//getrf then getrs
//printf("\t\t\tbatch %d, prepare in secs: %f\n", batch_id, seconds() - t0);
//t0 = seconds();
cublasSgetrfBatched(handle, F, devPtrTT, F, P, INFO, batch_size);
//cudaDeviceSynchronize();
//cudaCheckError();
//printf("\t\t\tbatch %d, LU factorization of tt in secs: %f\n", batch_id, seconds() - t0);
//t0 = seconds();
cublasSgetrsBatched(handle, CUBLAS_OP_N, F, 1,
(const float ** ) devPtrTT, F, P, devPtrYthetaT, F, info2, batch_size);
//cudaDeviceSynchronize();
//cudaCheckError();
//printf("\t\t\tbatch %d, solve after LU in secs: %f\n", batch_id, seconds() - t0);
//t0 = seconds();
cudacall( cudaMemcpy(&XT_h[batch_offset * F], ythetaT,
batch_size * F * sizeof(float), cudaMemcpyDeviceToHost) );
//printf("\t\t\tbatch %d, copy to host XT_h secs: %f\n", batch_id, seconds() - t0);
}
int updateTheta(const int batch_size, const int batch_offset, float * xx,
float * yTXT, float * thetaT,
cublasHandle_t handle, const int n, const int f){
float ** devPtrXXHost = (float**) malloc(batch_size * sizeof(devPtrXXHost[0]));
float **devPtrXX = 0;
for (int k = 0; k < batch_size; k++) {
devPtrXXHost[k] = &xx[k * F * F];
}
cudacall(cudaMalloc((void** ) &devPtrXX, batch_size * sizeof(*devPtrXX)));
cudacall(cudaMemcpy(devPtrXX, devPtrXXHost, batch_size * sizeof(*devPtrXX), cudaMemcpyHostToDevice));
int *P, *INFO;
cudacall(cudaMalloc(&P, f * batch_size * sizeof(int)));
cudacall(cudaMalloc(&INFO, batch_size * sizeof(int)));
cublasSgetrfBatched(handle, F, devPtrXX, F, P, INFO, batch_size);
cudaDeviceSynchronize();
cudaCheckError();
//gettimeofday(&tv1, NULL);
//elapsed = (tv1.tv_sec - tv0.tv_sec)
// + (tv1.tv_usec - tv0.tv_usec) / 1000000.0;
//printf("\t %f seconds. \n", elapsed);
//printf("******* solve xx * thetaT = yTXT with CUDA 7.\n");
float **devPtrYTXTHost = 0;
float **devPtrYTXT = 0;
devPtrYTXTHost = (float**) malloc(batch_size * sizeof(devPtrYTXTHost[0]));
for (int k = 0; k < batch_size; k++) {
devPtrYTXTHost[k] = &yTXT[k * F];
}
cudacall(cudaMalloc((void** ) &devPtrYTXT, batch_size * sizeof(*devPtrYTXT)));
cudacall(cudaMemcpy(devPtrYTXT, devPtrYTXTHost, batch_size * sizeof(*devPtrYTXT),cudaMemcpyHostToDevice));
int * info2 = (int *) malloc(sizeof(int));
cublasSgetrsBatched(handle, CUBLAS_OP_N, F, 1,
(const float ** ) devPtrXX, F, P, devPtrYTXT, F, info2, batch_size);
cudaDeviceSynchronize();
cudaCheckError();
cudacall( cudaMemcpy( &thetaT[batch_offset * F], yTXT,
batch_size * F * sizeof(float), cudaMemcpyDeviceToDevice) );
//gettimeofday(&tv2, NULL);
//elapsed = (tv2.tv_sec - tv1.tv_sec)
// + (tv2.tv_usec - tv1.tv_usec) / 1000000.0;
//printf("\t %f seconds. \n", elapsed);
/*
//testing purpose
float* yTXHost = (float *) malloc(f * n * sizeof(yTXHost[0]));
cudacall(cudaMemcpy(yTXHost, yTXT, n * f * sizeof(float), cudaMemcpyDeviceToHost));
printf("\n*********yTXT***\n");
for (int i = 0; i < n * f; i++) {
printf("%f\t", yTXHost[i]);
}
printf("\n");
*/
/*
float* thetaTHost = (float *) malloc(f * n * sizeof(thetaTHost[0]));
cudacall( cudaMemcpy(thetaTHost, thetaT, n * f * sizeof(float),cudaMemcpyDeviceToHost));
printf("\n*********ThetaT***\n");
for (int i = 0; i < n * f; i++) {
printf("%f\t", thetaTHost[i]);
}
printf("\n");
*/
free(devPtrXXHost);
cudaFree(devPtrXX);
cudaFree(P);
cudaFree(INFO);
free(info2);
free(devPtrYTXTHost);
cudaFree(devPtrYTXT);
return 0;
}
__global__ void RMSE(const float * csrVal, const int* cooRowIndex,
const int* csrColIndex, const float * thetaT, const float * XT, float * error, const int nnz,
const int error_size) {
int i = blockDim.x*blockIdx.x + threadIdx.x;
if (i < nnz) {
int row = cooRowIndex[i];
int col = csrColIndex[i];
float e = csrVal[i];
//if(i%1000000==0) printf("row: %d, col: %d, csrVal[%d]: %f.\t", row, col, i, e);
for (int k = 0; k < F; k++) {
e -= tex1Dfetch(thetaTTexRef, F * col + k) * tex1Dfetch(xTTexRef, F * row + k);
}
atomicAdd(&error[i%error_size], e*e);
//error[i] = e*e;
//if(i%1000000==0) printf("error[%d]: %f.\n", i, e);
}
}
__global__ void RMSE_CSC(const float * cscVal, const int* cscRowIndex,
const int* cscColIndex, const float * thetaT, const float * XT, float * error,
const int error_size, int* nan) {
int col = blockIdx.x;
int start = cscColIndex[col];
int end = cscColIndex[col + 1];
if (col < N && threadIdx.x < end - start) {
for (int i = 0; threadIdx.x + i*blockDim.x < end - start; i++) {
int index = start + i*blockDim.x + threadIdx.x;
float e0 = cscVal[index];
float e = e0;
//if(isnan(e)) printf("ERROR: NAN***\n");
int row = cscRowIndex[index];
//if(isfinite(((double)row))) printf("ERROR: NAN@@@\n");
for (int k = 0; k < F; k++) {
e -= tex1Dfetch(thetaTTexRef, F * col + k) * XT[ F * row + k];
//TODO: fix this, a user/item does not show up in training
//if(isnan(e1)) printf("e1: NAN!!!%d, %d, %d\n", index, col, row);
//if(isnan(e2)) printf("e2: NAN!!!%d, %d, %d\n", index, col, row);
}
if(isnan(e)) {
e = 0;
atomicAdd(&nan[0],1);
}
//if(isnan(e)) printf("ERROR: NAN!!!%d, %d, %d\n", index, col, row);
atomicAdd(&error[row%error_size], e*e);
}
}
}
int main() {
printf("enable p2p among %d GPUs if available.\n", GPU_COUNT);
enableP2P(GPU_COUNT);
//initialize cublas, cusparse
cublasHandle_t handle[GPU_COUNT];
cusparseHandle_t cushandle[GPU_COUNT];
for(int gpu_id = 0; gpu_id < GPU_COUNT; gpu_id ++){
cudacall(cudaSetDevice(gpu_id));
cublascall(cublasCreate(&handle[gpu_id]));
cusparsecall(cusparseCreate(&cushandle[gpu_id]));
}
cudaSetDevice(DEVICEID);
long m = M;
long n = N;
long f = F;
long nnz = NNZ;
float lambda = LAMBDA;
unsigned int* csrRowIndexHostPtr;
cudacall(cudaMallocHost( (void** ) &csrRowIndexHostPtr, (m + 1) * sizeof(int)) );
int* csrColIndexHostPtr;
cudacall(cudaMallocHost( (void** ) &csrColIndexHostPtr, nnz * sizeof(int)) );
float* csrValHostPtr;
cudacall(cudaMallocHost( (void** ) &csrValHostPtr, nnz * sizeof(float)) );
long csc_nnz[GPU_COUNT] = {777607310, 773335400, 777305655, 772895948};
long csc_m[GPU_COUNT] = {12520650, 12520650, 12520650, 12520653};
long csc_nnz_test[GPU_COUNT] = {86418516, 85913272, 86357875, 85883667};
float* cscValHostPtr[GPU_COUNT];
int* cscRowIndexHostPtr[GPU_COUNT];
int* cscColIndexHostPtr[GPU_COUNT];
for(int gpu_id = 0; gpu_id < GPU_COUNT; gpu_id ++){
cudacall(cudaMallocHost( (void** ) &cscValHostPtr[gpu_id], csc_nnz[gpu_id] * sizeof(float)) );
cudacall(cudaMallocHost( (void** ) &cscRowIndexHostPtr[gpu_id], csc_nnz[gpu_id] * sizeof(int)) );
cudacall(cudaMallocHost( (void** ) &cscColIndexHostPtr[gpu_id], (n+1) * sizeof(int)) );
}
float* testCscValHostPtr[GPU_COUNT];
int* testCscRowIndexHostPtr[GPU_COUNT];
int* testCscColIndexHostPtr[GPU_COUNT];
for(int gpu_id = 0; gpu_id < GPU_COUNT; gpu_id ++){
cudacall(cudaMallocHost( (void** ) &testCscValHostPtr[gpu_id], csc_nnz_test[gpu_id] * sizeof(float)) );
cudacall(cudaMallocHost( (void** ) &testCscRowIndexHostPtr[gpu_id], csc_nnz_test[gpu_id] * sizeof(int)) );
cudacall(cudaMallocHost( (void** ) &testCscColIndexHostPtr[gpu_id], (n+1) * sizeof(int)) );
}
//calculate X from thetaT first, need to initialize thetaT
float* thetaTHost;
cudacall(cudaMallocHost( (void** ) &thetaTHost, n * f * sizeof(float)) );
//index of XT_h need a long -- beyond what int32 can handle (2^31 or 2^32)
float * XT_h;
//cudacall (cudaHostAlloc((void **)&XT_h, f * m * sizeof(XT_h[0]), cudaHostAllocMapped) );
cudacall (cudaMallocHost((void **)&XT_h, f * m * sizeof(XT_h[0])) );
//initialize thetaT on host
srand (time(0));
for (int k = 0; k < n * f; k++)
thetaTHost[k] = 0.5*((float) rand() / (RAND_MAX));
//thetaTHost[k] = 0.1*((float) rand() / (float)RAND_MAX);
//thetaTHost[k] = 0;
//CG needs an initial value of XT
memset(XT_h,0,m*f*sizeof(float));
//for (long k = 0; k < m * f; k++)
// XT_h[k] = 0.5*((float) rand() / (RAND_MAX));
//device pointers
int * csrRowIndex[GPU_COUNT];
int * csrColIndex[GPU_COUNT];
float * csrVal[GPU_COUNT];
float * thetaT[GPU_COUNT];
float * XT_d[GPU_COUNT];
float * cscVal[GPU_COUNT];
int * cscRowIndex[GPU_COUNT];
int * cscColIndex[GPU_COUNT];
printf("*******starting loading training and testing sets to host.\n");
loadCSRSparseMatrix("../data/hugewiki/hugewiki_R_train_csr.data", "../data/hugewiki/hugewiki_R_train_csr.indptr", "../data/hugewiki/hugewiki_R_train_csr.indices",
csrValHostPtr, csrRowIndexHostPtr, csrColIndexHostPtr);
omp_set_num_threads(GPU_COUNT);
#pragma omp parallel
{
int gpu_id = omp_get_thread_num();
std::string str1("../data/hugewiki/hugewiki_R_train_csc.data.bin");
std::string str2("../data/hugewiki/hugewiki_R_train_csc.indices.bin");
std::string str3("../data/hugewiki/hugewiki_R_train_csc.indptr.bin");
//printf("%s",(str+to_string(gpu_id)).c_str());
loadCSCSparseMatrixInBatch((str1 + to_string(gpu_id)).c_str(),
(str2 + to_string(gpu_id)).c_str(),
(str3 + to_string(gpu_id)).c_str(),
cscValHostPtr[gpu_id], cscRowIndexHostPtr[gpu_id], cscColIndexHostPtr[gpu_id], csc_nnz[gpu_id], n);
}
#pragma omp parallel
{
int gpu_id = omp_get_thread_num();
std::string str1("../data/hugewiki/hugewiki_R_test_csc.data.bin");
std::string str2("../data/hugewiki/hugewiki_R_test_csc.indices.bin");
std::string str3("../data/hugewiki/hugewiki_R_test_csc.indptr.bin");
//printf("%s",(str+to_string(gpu_id)).c_str());
loadCSCSparseMatrixInBatch((str1 + to_string(gpu_id)).c_str(),
(str2 + to_string(gpu_id)).c_str(),
(str3 + to_string(gpu_id)).c_str(),
testCscValHostPtr[gpu_id], testCscRowIndexHostPtr[gpu_id],
testCscColIndexHostPtr[gpu_id], csc_nnz_test[gpu_id], n);
}
printf("\n loaded csr to host; print data, row and col array\n");
for (int i = 0; i < nnz && i < 10; i++) {
printf("%f ", csrValHostPtr[i]);
}
printf("\n");
for (int i = 0; i < nnz && i < 10; i++) {
printf("%d ", csrRowIndexHostPtr[i]);
}
printf("\n");
for (int i = 0; i < nnz && i < 10; i++) {
printf("%d ", csrColIndexHostPtr[i]);
}
printf("\n");
printf("\n loaded csc to host; print data, row and col array\n");
for (int i = 0; i < nnz && i < 10; i++) {
printf("%f ", cscValHostPtr[0][i]);
}
printf("\n");
for (int i = 0; i < nnz && i < 10; i++) {
printf("%d ", cscRowIndexHostPtr[0][i]);
}
printf("\n");
for (int i = 0; i < nnz && i < 10; i++) {
printf("%d ", cscColIndexHostPtr[0][i]);
}
printf("\n");
printf("\n loaded csc test to host; print data, row and col array\n");
for (int i = 0; i < nnz && i < 10; i++) {
printf("%f ", testCscValHostPtr[0][i]);
}
printf("\n");
for (int i = 0; i < nnz && i < 10; i++) {
printf("%d ", testCscRowIndexHostPtr[0][i]);
}
printf("\n");
for (int i = 0; i < nnz && i < 10; i++) {
printf("%d ", testCscColIndexHostPtr[0][i]);
}
printf("\n");
cudacall(cudaDeviceSetCacheConfig(cudaFuncCachePreferShared));
//64-bit smem access
//http://acceleware.com/blog/maximizing-shared-memory-bandwidth-nvidia-kepler-gpus
cudaDeviceSetSharedMemConfig(cudaSharedMemBankSizeEightByte);
cudaSharedMemConfig pConfig;
cudaDeviceGetSharedMemConfig (&pConfig);
//printf("%d\n", pConfig);
cudacall(cudaSetDevice(DEVICEID));
cusparseMatDescr_t descr;
cusparsecall( cusparseCreateMatDescr(&descr));
cusparseSetMatType(descr, CUSPARSE_MATRIX_TYPE_GENERAL);
cusparseSetMatIndexBase(descr, CUSPARSE_INDEX_BASE_ZERO);
const float alpha = 1.0f;
const float beta = 0.0f;
for(int gpu_id = 0; gpu_id < GPU_COUNT; gpu_id ++){
cudacall(cudaSetDevice(gpu_id));
cudacall(cudaMalloc((void** ) &thetaT[gpu_id], f * n * sizeof(float)));
printf("*******copy memory to GPU %d...\n", gpu_id);
cudacall(cudaMemcpy(thetaT[gpu_id], thetaTHost, (size_t ) (n * f * sizeof(float)), cudaMemcpyHostToDevice));
}
//host pointers for cublas batch operations
float ** devPtrTTHost[GPU_COUNT];
float **devPtrYthetaTHost[GPU_COUNT];
for(int iter = 0; iter < ITERS ; iter ++){
printf("---------------------------update X iteration %d ----------------------------------\n", iter);
auto t0 = std::chrono::high_resolution_clock::now();
//parallel in all GPUs, or only 1
int parallelism_level = GPU_COUNT;
omp_set_num_threads(parallelism_level);
//gpu memory to be used across batches
//last batch size, the largest among batches
int batch_size_max = m - (X_BATCH - 1)*(m/X_BATCH);
int counter = 0;
#pragma omp parallel shared (counter)
{
//this is the code on one gpu
int gpu_id = omp_get_thread_num();
cudacall(cudaSetDevice(gpu_id));
//for batch solvers
cudacall(cudaMallocHost( (void** ) &devPtrTTHost[gpu_id], batch_size_max * sizeof(*devPtrTTHost) ) );
cudacall(cudaMallocHost( (void** ) &devPtrYthetaTHost[gpu_id], batch_size_max * sizeof(*devPtrYthetaTHost) ) );
float * thetaT_local = thetaT[gpu_id];
cudacall (cudaBindTexture(NULL, thetaTTexRef, thetaT_local, n * f * sizeof(float)));
float * tt = 0;
//last batch size, the largest among batches
int batch_size = m - (X_BATCH - 1)*(m/X_BATCH);
//TODO: to get batch_nnz_max from csrRowIndexHostPtr
int batch_nnz_max = 16000000;
long batch_offset;
cudacall(cudaMalloc((void** ) &csrRowIndex[gpu_id],(batch_size + 1) * sizeof(csrRowIndex[0][0])));
cudacall(cudaMalloc((void** ) &csrColIndex[gpu_id], batch_nnz_max * sizeof(csrColIndex[0][0])));
cudacall(cudaMalloc((void** ) &csrVal[gpu_id], batch_nnz_max * sizeof(csrVal[0][0])));
float * ytheta = 0;
float * ythetaT = 0;
cudacall(cudaMalloc((void** ) &ytheta, f * batch_size * sizeof(ytheta[0])));
cudacall(cudaMalloc((void** ) &ythetaT, f * batch_size * sizeof(ythetaT[0])));
#ifdef CUMF_TT_FP16
cudacall(cudaMalloc((void** ) &tt, f/2 * f * batch_size * sizeof(float)));
#else
cudacall(cudaMalloc((void** ) &tt, f * f * batch_size * sizeof(float)));
#endif
//for batch solvers
float **devPtrTT = 0;
float **devPtrYthetaT = 0;
int *P, *INFO;
cudacall(cudaMalloc((void** ) &devPtrTT, batch_size * sizeof(*devPtrTT)));
cudacall(cudaMalloc(&P, f * batch_size * sizeof(int)) );
cudacall(cudaMalloc(&INFO, batch_size * sizeof(int) ));
cudacall(cudaMalloc((void** ) &devPtrYthetaT, batch_size * sizeof(*devPtrYthetaT)));
int batch_id = 0;
//gpu 0 handles batches 0, 4, 8 ...
//for(int batch_id = gpu_id; batch_id < X_BATCH; batch_id += parallelism_level)
while(counter < X_BATCH)
{
#pragma omp critical
{
batch_id = counter;
counter = counter + 1;
}
auto t2 = std::chrono::high_resolution_clock::now();
if(batch_id != X_BATCH - 1)
batch_size = m/X_BATCH;
batch_offset = batch_id * (m/X_BATCH);
int batch_nnz =
csrRowIndexHostPtr[batch_offset + batch_size] - csrRowIndexHostPtr[batch_offset];
printf("\tbatch %d of %d; size: %d, offset: %ld, batch_nnz %d, on gpu %d\n",
batch_id, X_BATCH, batch_size, batch_offset, batch_nnz, gpu_id);
//copy CSR rating matrices in
cudacall(cudaMemcpy(csrRowIndex[gpu_id], &csrRowIndexHostPtr[batch_offset],
(batch_size + 1) * sizeof(csrRowIndex[0][0]), cudaMemcpyHostToDevice));
//in place update: csrRowIndex --> csrRowIndex - csrRowIndex[0]
zeroIndex<<<(batch_size + 1 - 1)/1024 + 1, 1024>>>
(csrRowIndex[gpu_id], csrRowIndexHostPtr[batch_offset], batch_size + 1);
cudacall(cudaMemcpy(csrColIndex[gpu_id], &csrColIndexHostPtr[csrRowIndexHostPtr[batch_offset]],
batch_nnz * sizeof(csrColIndex[0][0]), cudaMemcpyHostToDevice));
cudacall(cudaMemcpy(csrVal[gpu_id], &csrValHostPtr[csrRowIndexHostPtr[batch_offset]],
batch_nnz * sizeof(csrVal[0][0]),cudaMemcpyHostToDevice));
//process right hand: Y*theta
cusparseScsrmm2(cushandle[gpu_id], CUSPARSE_OPERATION_NON_TRANSPOSE,
CUSPARSE_OPERATION_TRANSPOSE, batch_size, f, n, batch_nnz, &alpha, descr, csrVal[gpu_id],
csrRowIndex[gpu_id], csrColIndex[gpu_id], thetaT[gpu_id], f, &beta, ytheta, batch_size);
//transpose ytheta: ytheta: m*f; need ythetaT = (ytheta).T = f*m
cublasSgeam(handle[gpu_id], CUBLAS_OP_T, CUBLAS_OP_N, f, batch_size, &alpha,
(const float * ) ytheta, batch_size, &beta, ythetaT, f, ythetaT, f);
cudaDeviceSynchronize();
cudaCheckError();
//generate left-hand: tt: batch_size*(F*F)
auto tX = std::chrono::high_resolution_clock::now();
std::chrono::duration<double> elapsed = tX - t2;
printf("\t\t batch %d before tt kernel gpu: %d, seconds: %f \n",
batch_id, gpu_id, elapsed.count());
auto t1 = std::chrono::high_resolution_clock::now();
#ifdef CUMF_TT_FP16
get_hermitian100_tt_fp16<<<batch_size, 64, SCAN_BATCH * f/2*sizeof(float2)>>>
(0, (half2*) tt, csrRowIndex[gpu_id], csrColIndex[gpu_id], lambda, batch_size, thetaT[gpu_id]);
#else
//get_hermitian_x<<<batch_size, 64>>>
// (tt, csrRowIndex[gpu_id], csrColIndex[gpu_id], lambda);
//updateXByBlock2pRegDsmemTile<<<batch_size, F>>>
// (tt, csrRowIndex[gpu_id], csrColIndex[gpu_id], lambda);
get_hermitian100<<<batch_size, 64, SCAN_BATCH * f/2*sizeof(float2)>>>
(0, tt, csrRowIndex[gpu_id], csrColIndex[gpu_id], lambda, batch_size, thetaT[gpu_id]);
#endif
cudaDeviceSynchronize();
cudaCheckError();
tX = std::chrono::high_resolution_clock::now();
elapsed = tX - t1;
printf("\t\t batch %d tt kernel gpu: %d, seconds: %f \n",
batch_id, gpu_id, elapsed.count());
t1 = std::chrono::high_resolution_clock::now();
/*
#ifdef CUMF_SAVE_MODEL
if(iter==0&&batch_id==0)
saveDeviceFloatArrayToFile(std::string("../log/0904/hugewiki.tt.hermitkernel"), f * f * batch_size, tt);
#endif
updateX(batch_id, batch_size, batch_offset, ythetaT, tt, XT_h,
handle[gpu_id], m, n, f, nnz, devPtrTTHost[gpu_id], devPtrYthetaTHost[gpu_id],
devPtrTT, devPtrYthetaT, P, INFO);
#ifdef CUMF_SAVE_MODEL
if(iter==0&&batch_id==0)
saveDeviceFloatArrayToFile(std::string("../log/0904/hugewiki.lu.hermitkernel.xt"), f * batch_size, ythetaT);
#endif
*/
///*
float * XT = 0;
cudacall(cudaMalloc((void** ) &XT, f * batch_size * sizeof(XT[0])));
cudacall( cudaMemcpy(XT, &XT_h[batch_offset * F],
batch_size * F * sizeof(float), cudaMemcpyHostToDevice) );
#ifdef CUMF_TT_FP16
printf("CG solver with fp16.\n");
updateXWithCGHost_tt_fp16(tt, XT, ythetaT, batch_size, f, 6);
#else
printf("CG solver with fp32.\n");
updateXWithCGHost(tt, XT, ythetaT, batch_size, 100, 100);
#endif
cudacall( cudaMemcpy(&XT_h[batch_offset * F], XT,
batch_size * F * sizeof(float), cudaMemcpyDeviceToHost) );
#ifdef CUMF_SAVE_MODEL
if(batch_id==0)
saveDeviceFloatArrayToFile(std::string("../log/0903/hugewiki.cg.xt.")+ to_string(iter), f * batch_size, XT);
#endif
cudacall(cudaFree(XT));
//*/
tX = std::chrono::high_resolution_clock::now();
elapsed = tX - t1;
printf("\t\t batch %d updateX by solving tt , gpu: %d, seconds: %f \n",
batch_id, gpu_id, elapsed.count());
tX = std::chrono::high_resolution_clock::now();
elapsed = tX - t2;
printf("\tbatch %d on gpu %d, runs %f \n", batch_id, gpu_id, elapsed.count());
}//end of update x batch
auto tX = std::chrono::high_resolution_clock::now();
std::chrono::duration<double> elapsed = tX - t0;
printf("update X run %f seconds at gpu %d.\n", elapsed.count(), gpu_id);
cudacall(cudaFree(ytheta));
cudacall(cudaFree(tt));
cudacall(cudaFree(csrVal[gpu_id]));
cudacall(cudaFree(csrRowIndex[gpu_id]));
cudacall(cudaFree(csrColIndex[gpu_id]));
cudacall(cudaFree(ythetaT));
cudaFree(P);
cudaFree(INFO);
cudaFree(devPtrTT);
cudaFree(devPtrYthetaT);
cudacall(cudaFreeHost(devPtrTTHost[gpu_id]));
cudacall(cudaFreeHost(devPtrYthetaTHost[gpu_id]));
}//end of omp parallel loop
auto tX = std::chrono::high_resolution_clock::now();
std::chrono::duration<double> elapsed = tX - t0;
printf("update X run %f seconds, gridSize: %ld \n", elapsed.count(), m);
auto start = std::chrono::high_resolution_clock::now();
printf("---------------------------------- update theta iteration %d----------------------------------\n",
iter);
//in batches, when N is huge
for(int batch_id = 0; batch_id< THETA_BATCH; batch_id ++){
int batch_size = 0;
if(batch_id != THETA_BATCH - 1)
batch_size = n/THETA_BATCH;
else
batch_size = n - batch_id*(n/THETA_BATCH);
int batch_offset = batch_id * (n/THETA_BATCH);
printf("batch %d / %d, size: %d\n", batch_id + 1, THETA_BATCH, batch_size);
float * yTX[GPU_COUNT];
float * yTXT[GPU_COUNT];
const float alpha = 1.0f;
const float beta = 0.0f;
float * xx[GPU_COUNT];
omp_set_num_threads(GPU_COUNT);
t0 = std::chrono::high_resolution_clock::now();
#pragma omp parallel
{
int gpu_id = omp_get_thread_num();
long offset = 0;
for(int k = 0; k < gpu_id; k ++)
offset += csc_m[k];
cudacall(cudaSetDevice(gpu_id));
printf("\tGather xx on GPU %d.\n",gpu_id);
auto t1 = std::chrono::high_resolution_clock::now();
//distribute XT[] to XT_d[i]
cudacall(cudaMalloc((void** ) &XT_d[gpu_id], f * csc_m[gpu_id] * sizeof(float)));
//printf("offset: %lld, copy XT_h[%lld] to XT_d[%d]:\n", offset, offset*f, gpu_id);
cudacall(cudaMemcpy(XT_d[gpu_id], &XT_h[offset*f],
f * csc_m[gpu_id] * sizeof(float), cudaMemcpyHostToDevice));
//copy csc to GPU
int batch_nnz = cscColIndexHostPtr[gpu_id][batch_offset + batch_size] - cscColIndexHostPtr[gpu_id][batch_offset];
cudacall(cudaMalloc((void** ) &cscRowIndex[gpu_id],batch_nnz * sizeof(int)));
cudacall(cudaMalloc((void** ) &cscColIndex[gpu_id], (batch_size + 1) * sizeof(int)));
cudacall(cudaMalloc((void** ) &cscVal[gpu_id], batch_nnz * sizeof(float)));
cudaMemcpyAsync(cscRowIndex[gpu_id], &cscRowIndexHostPtr[gpu_id][cscColIndexHostPtr[gpu_id][batch_offset]],
batch_nnz * sizeof(cscRowIndex[0][0]), cudaMemcpyHostToDevice);
cudaMemcpy(cscColIndex[gpu_id], &cscColIndexHostPtr[gpu_id][batch_offset],
(batch_size + 1) * sizeof(cscColIndex[0][0]), cudaMemcpyHostToDevice);
cudaMemcpy(cscVal[gpu_id], &cscValHostPtr[gpu_id][cscColIndexHostPtr[gpu_id][batch_offset]],
batch_nnz * sizeof(cscVal[0][0]), cudaMemcpyHostToDevice);
cudacall(cudaMalloc((void** ) &yTXT[gpu_id], f * batch_size * sizeof(float)));
cudacall(cudaMalloc((void** ) &yTX[gpu_id], f * batch_size * sizeof(float)));
cudacall(cudaMalloc((void** ) &xx[gpu_id], f * f * batch_size * sizeof(float)));
auto tX = std::chrono::high_resolution_clock::now();
std::chrono::duration<double> elapsed = tX - t1;
printf("\t\tbatch %d memory alloc and cpy gpu %d seconds: %f.\n",
batch_id, gpu_id, elapsed.count());
//in place update: cscColIndex --> cscColIndex - cscColIndex[0]
zeroIndex<<<(batch_size + 1 - 1)/256 + 1, 256>>>
(cscColIndex[gpu_id], cscColIndexHostPtr[gpu_id][batch_offset], batch_size + 1);
//process right-hand side: (Y'*X)'
cudaDeviceSynchronize();
cudaCheckError();
t1 = std::chrono::high_resolution_clock::now();
cusparseScsrmm2(cushandle[gpu_id], CUSPARSE_OPERATION_NON_TRANSPOSE,
CUSPARSE_OPERATION_TRANSPOSE, batch_size, f, csc_m[gpu_id],
batch_nnz, &alpha, descr, cscVal[gpu_id], cscColIndex[gpu_id],
cscRowIndex[gpu_id], XT_d[gpu_id], f, &beta, yTX[gpu_id], batch_size);
cublasSgeam(handle[gpu_id], CUBLAS_OP_T, CUBLAS_OP_N, f, batch_size, &alpha,
(const float * ) yTX[gpu_id], batch_size, &beta, yTXT[gpu_id], f, yTXT[gpu_id], f);
cudaDeviceSynchronize();
cudaCheckError();
tX = std::chrono::high_resolution_clock::now();
elapsed = tX - t1;
printf("\t\tbatch %d right-hand side gpu %d seconds: %f.\n", batch_id, gpu_id, elapsed.count());
//process left-hand side: generate hessian matrix xx
t1 = std::chrono::high_resolution_clock::now();
get_hermitian_theta<<<batch_size, 64>>>
(xx[gpu_id], cscRowIndex[gpu_id], cscColIndex[gpu_id], lambda, XT_d[gpu_id]);
//get_hermitian100<<<batch_size, 64, SCAN_BATCH * f/2*sizeof(float2)>>>
// (0, xx[gpu_id], cscColIndex[gpu_id], cscRowIndex[gpu_id], lambda, batch_size, XT_d[gpu_id]);
//updateThetaByBlock2pRegDsmemTile<<<batch_size, F>>>
// (xx[gpu_id], cscRowIndex[gpu_id], cscColIndex[gpu_id], lambda, XT_d[gpu_id]);
cudaDeviceSynchronize();
cudaCheckError();
tX = std::chrono::high_resolution_clock::now();
elapsed = tX - t1;
printf("\t\tbatch %d xx kernel gpu %d seconds: %f.\n", batch_id, gpu_id, elapsed.count());
t1 = std::chrono::high_resolution_clock::now();
cudacall(cudaFree(yTX[gpu_id]));
cudacall(cudaFree(cscRowIndex[gpu_id]));
cudacall(cudaFree(cscColIndex[gpu_id]));
cudacall(cudaFree(cscVal[gpu_id]));
tX = std::chrono::high_resolution_clock::now();
elapsed = tX - t1;
printf("\t\tbatch %d cudaFree gpu %d seconds: %f.\n", batch_id, gpu_id, elapsed.count());
}
tX = std::chrono::high_resolution_clock::now();
elapsed = tX - t0;
printf("\tbatch %d gather xx in %d GPUs run %f seconds.\n",
batch_id, GPU_COUNT, elapsed.count());
t0 = std::chrono::high_resolution_clock::now();
printf("\t\tadd xx before updateTheta on a given GPU.\n");
//xx[0] += xx[1] + xx[2] + xx[3]
cudacall(cudaSetDevice(0));
float * xx_hotel;
cudacall(cudaMalloc((void** ) &xx_hotel, f * f * batch_size * sizeof(float)));
cudaCheckError();
for(int gpu_id = 1; gpu_id < GPU_COUNT; gpu_id ++){
//printf("copy from gpu:%d.\n", gpu_id);
cudacall(cudaMemcpy(xx_hotel, xx[gpu_id], f * f * batch_size * sizeof(float), cudaMemcpyDefault));
cudaDeviceSynchronize();
cudaCheckError();
//printf("add.\n");
cublasSaxpy(handle[0], f * f * batch_size, &alpha, xx_hotel, 1, xx[0], 1);
cudaDeviceSynchronize();
cudaCheckError();
}
cudacall(cudaFree(xx_hotel));
printf("\t\tadd yTXT before updateTheta on a given GPU.\n");
//xx[0] += xx[1] + xx[2] + xx[3]
float * yTXT_hotel;
cudacall(cudaMalloc((void** ) &yTXT_hotel, f * batch_size * sizeof(float)));
for(int gpu_id = 1; gpu_id < GPU_COUNT; gpu_id ++){
cudacall(cudaMemcpy(yTXT_hotel, yTXT[gpu_id], f * batch_size * sizeof(float), cudaMemcpyDefault));
cublasSaxpy(handle[0], f * batch_size, &alpha, yTXT_hotel, 1, yTXT[0], 1);
cudaDeviceSynchronize();
cudaCheckError();
}
cudacall(cudaFree(yTXT_hotel));
//printf("*******invoke updateTheta with batch_size: %d, batch_offset: %d.\n", batch_size, batch_offset);
updateTheta(batch_size, batch_offset, xx[0], yTXT[0], thetaT[0], handle[0], n, f);
tX = std::chrono::high_resolution_clock::now();
elapsed = tX - t0;
printf("\tbatch: %d gather and updateTheta in one GPU run %f seconds.\n",
batch_id, elapsed.count());
for(int gpu_id = 0; gpu_id < GPU_COUNT; gpu_id ++){
cudacall(cudaFree(xx[gpu_id]));
cudacall(cudaFree(yTXT[gpu_id]));
cudacall(cudaFree(XT_d[gpu_id]));
}
}//end of update theta batches
//propagate thetaT[0] to non-anchor devices
for(int gpu_id = 1; gpu_id < GPU_COUNT; gpu_id ++)
cudacall( cudaMemcpy(thetaT[gpu_id], thetaT[0], n * F * sizeof(float), cudaMemcpyDeviceToDevice) );
auto end = std::chrono::high_resolution_clock::now();
elapsed = end - start;
printf("update theta run %f seconds, gridSize: %ld.\n", elapsed.count(), n);
//////////////////////////////////////////////////////////////////////////////////////////////////
printf("Calculate RMSE in batches.\n");
//has to calculate in batches since cooRowIndex + csrColIndex + csrVal is so big
cudacall(cudaSetDevice(0));
float * errors_train = 0;
float * errors_test = 0;
int error_size = 4096;
int* nan_train = 0;
int* nan_test = 0;
cudacall(cudaMalloc((void** ) &errors_train, error_size * sizeof(errors_train[0])));
cudacall(cudaMemset(errors_train, 0, error_size*sizeof(float)) );
cudacall(cudaMalloc((void** ) &errors_test, error_size * sizeof(errors_test[0])));
cudacall(cudaMemset(errors_test, 0, error_size*sizeof(float)) );
for(int batch_id = 0; batch_id < GPU_COUNT; batch_id ++){
printf("iteration: %d\n", batch_id);
int row_offset = 0;
for(int k = 0; k < batch_id; k ++){
row_offset += csc_m[k];
}
float * XT_small;
int * cscRowIndex_small;
int * cscColIndex_small;
float * cscVal_small;
cudacall(cudaMalloc((void** ) &XT_small, f * csc_m[batch_id] * sizeof(float)));
cudacall(cudaMemcpy(XT_small, &XT_h[(long) row_offset*f], f * csc_m[batch_id] * sizeof(float), cudaMemcpyHostToDevice));
printf("cal train rmse in batch: %d/%d, nnz:%ld, n(col): %ld, \n",
batch_id, GPU_COUNT, csc_nnz[batch_id], n);
cudacall(cudaMalloc((void** ) &cscRowIndex_small,csc_nnz[batch_id] * sizeof(int)));
cudacall(cudaMalloc((void** ) &cscColIndex_small, (n + 1) * sizeof(int)));
cudacall(cudaMalloc((void** ) &cscVal_small, csc_nnz[batch_id] * sizeof(float)));
cudacall(cudaMemcpy(cscRowIndex_small, cscRowIndexHostPtr[batch_id],
csc_nnz[batch_id] * sizeof(int), cudaMemcpyHostToDevice));
cudacall(cudaMemcpy(cscColIndex_small, cscColIndexHostPtr[batch_id],
(n + 1) * sizeof(int), cudaMemcpyHostToDevice));
cudacall(cudaMemcpy(cscVal_small, cscValHostPtr[batch_id],
csc_nnz[batch_id] * sizeof(float), cudaMemcpyHostToDevice));
cudacall(cudaMalloc((void** ) &nan_train, sizeof(int)));
cudacall( cudaMemset(nan_train, 0, sizeof(int)) );
cudacall(cudaMalloc((void** ) &nan_test, sizeof(int)));
cudacall( cudaMemset(nan_test, 0, sizeof(int)) );
RMSE_CSC<<<n, 512>>>(cscVal_small, cscRowIndex_small,
cscColIndex_small, thetaT[0], XT_small, errors_train, error_size, nan_train);
cudaDeviceSynchronize();
cudaCheckError();
cudacall(cudaFree(cscRowIndex_small));
cudacall(cudaFree(cscColIndex_small));
cudacall(cudaFree(cscVal_small));
printf("cal test rmse in batch: %d/%d, nnz_test:%ld, n(col): %ld, \n",
batch_id, GPU_COUNT, csc_nnz_test[batch_id], n);
cudacall(cudaMalloc((void** ) &cscRowIndex_small,csc_nnz_test[batch_id] * sizeof(int)));
cudacall(cudaMalloc((void** ) &cscColIndex_small, (n + 1) * sizeof(int)));
cudacall(cudaMalloc((void** ) &cscVal_small, csc_nnz_test[batch_id] * sizeof(float)));
cudacall(cudaMemcpy(cscRowIndex_small, testCscRowIndexHostPtr[batch_id],
csc_nnz_test[batch_id] * sizeof(int), cudaMemcpyHostToDevice));
cudacall(cudaMemcpy(cscColIndex_small, testCscColIndexHostPtr[batch_id],
(n + 1) * sizeof(int), cudaMemcpyHostToDevice));
cudacall(cudaMemcpy(cscVal_small, testCscValHostPtr[batch_id],
csc_nnz_test[batch_id] * sizeof(float), cudaMemcpyHostToDevice));
RMSE_CSC<<<n, 512>>>(cscVal_small, cscRowIndex_small,
cscColIndex_small, thetaT[0], XT_small, errors_test, error_size, nan_test);
cudaDeviceSynchronize();
cudaCheckError();
int* nan_train_host = (int*) malloc (sizeof(int));
int* nan_test_host = (int*) malloc (sizeof(int));
cudaMemcpy(nan_train_host, nan_train, sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(nan_test_host, nan_test, sizeof(int), cudaMemcpyDeviceToHost);
printf("train #nan: %d\n", *nan_train_host);
printf("test #nan: %d\n", *nan_test_host);
cudacall(cudaFree(nan_train));
cudacall(cudaFree(nan_test));
cudacall(cudaFree(cscRowIndex_small));
cudacall(cudaFree(cscColIndex_small));
cudacall(cudaFree(cscVal_small));
cudacall(cudaFree(XT_small));
}
printf("summarize RMSE: \n");
float* rmse_train = (float*) malloc (sizeof(float));
cublascall( cublasSasum(handle[0], error_size, errors_train, 1, rmse_train) );
cudaDeviceSynchronize();
cudaCheckError();
float* rmse_test = (float*) malloc (sizeof(float));
cublascall( cublasSasum(handle[0], error_size, errors_test, 1, rmse_test) );
cudaDeviceSynchronize();
cudaCheckError();
printf("@@@@@@@@@@@@@@@@@@@ Train RMSE in iter %d: %f\n", iter, sqrt((*rmse_train)/nnz));
printf("@@@@@@@@@@@@@@@@@@@ Test RMSE in iter %d: %f\n", iter, sqrt((*rmse_test)/(NNZ_TEST - 12750)));
cudacall(cudaFree(errors_train));
cudacall(cudaFree(errors_test));
//*/
}
/*
//save model to a file
cudacall(cudaMemcpy(thetaTHost, thetaT[0], n * f * sizeof(float), cudaMemcpyDeviceToHost) );
FILE * xfile = fopen("XT.data", "wb");
FILE * thetafile = fopen("thetaT.data", "wb");
fwrite(XT_h, sizeof(float), m*f, xfile);
fwrite(thetaTHost, sizeof(float), n*f, thetafile);
fclose(xfile);
fclose(thetafile);
*/
cudacall(cudaFreeHost(XT_h));
cudacall(cudaFreeHost(csrRowIndexHostPtr));
cudacall(cudaFreeHost(csrColIndexHostPtr));
cudacall(cudaFreeHost(csrValHostPtr));
cudaFreeHost(thetaTHost);
for(int gpu_id = 0; gpu_id < GPU_COUNT; gpu_id ++){
cudacall(cudaFreeHost(cscValHostPtr[gpu_id]));
cudacall(cudaFreeHost(cscRowIndexHostPtr[gpu_id]));
cudacall(cudaFreeHost(cscColIndexHostPtr[gpu_id]));
cudacall(cudaSetDevice(gpu_id));
//cudacall(cudaDeviceReset());
}
printf("ALS Done.\n");
return 0;
}
|
a0e2914e3666cbfbea3c6d0fa428d5339d5b0d7a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2009 NVIDIA Corporation. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property and
* proprietary rights in and to this software and related documentation and
* any modifications thereto. Any use, reproduction, disclosure, or distribution
* of this software and related documentation without an express license
* agreement from NVIDIA Corporation is strictly prohibited.
*
*/
#ifndef _SCAN_NAIVE_KERNEL_H_
#define _SCAN_NAIVE_KERNEL_H_
///////////////////////////////////////////////////////////////////////////////
//! Naive compute implementation of scan, one thread per element
//! Not work efficient: log(n) steps, but n * (log(n) - 1) adds.
//! Not shared storage efficient either -- this requires ping-ponging
//! arrays in shared memory due to hazards so 2 * n storage space.
//!
//! Pro: Simple
//! Con: Not work efficient
//!
//! @param g_odata output data in global memory
//! @param g_idata input data in global memory
//! @param n input number of elements to scan from input data
///////////////////////////////////////////////////////////////////////////////
__extern__shared__ float temp[];
__global__ void scan_naive(float *g_odata, float *g_idata, int n)
{
// Dynamically allocated shared memory for scan kernels
int thid = threadIdx.x;
int pout = 0;
int pin = 1;
// Cache the computational window in shared memory
temp[pout*n + thid] = (thid > 0) ? g_idata[thid-1] : 0;
for (int offset = 1; offset < n; offset *= 2)
{
pout = 1 - pout;
pin = 1 - pout;
__syncthreads();
temp[pout*n+thid] = temp[pin*n+thid];
if (thid >= offset)
temp[pout*n+thid] += temp[pin*n+thid - offset];
}
__syncthreads();
g_odata[thid] = temp[pout*n+thid];
}
#endif // #ifndef _SCAN_NAIVE_KERNEL_H_
| a0e2914e3666cbfbea3c6d0fa428d5339d5b0d7a.cu | /*
* Copyright 1993-2009 NVIDIA Corporation. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property and
* proprietary rights in and to this software and related documentation and
* any modifications thereto. Any use, reproduction, disclosure, or distribution
* of this software and related documentation without an express license
* agreement from NVIDIA Corporation is strictly prohibited.
*
*/
#ifndef _SCAN_NAIVE_KERNEL_H_
#define _SCAN_NAIVE_KERNEL_H_
///////////////////////////////////////////////////////////////////////////////
//! Naive compute implementation of scan, one thread per element
//! Not work efficient: log(n) steps, but n * (log(n) - 1) adds.
//! Not shared storage efficient either -- this requires ping-ponging
//! arrays in shared memory due to hazards so 2 * n storage space.
//!
//! Pro: Simple
//! Con: Not work efficient
//!
//! @param g_odata output data in global memory
//! @param g_idata input data in global memory
//! @param n input number of elements to scan from input data
///////////////////////////////////////////////////////////////////////////////
__extern__shared__ float temp[];
__global__ void scan_naive(float *g_odata, float *g_idata, int n)
{
// Dynamically allocated shared memory for scan kernels
int thid = threadIdx.x;
int pout = 0;
int pin = 1;
// Cache the computational window in shared memory
temp[pout*n + thid] = (thid > 0) ? g_idata[thid-1] : 0;
for (int offset = 1; offset < n; offset *= 2)
{
pout = 1 - pout;
pin = 1 - pout;
__syncthreads();
temp[pout*n+thid] = temp[pin*n+thid];
if (thid >= offset)
temp[pout*n+thid] += temp[pin*n+thid - offset];
}
__syncthreads();
g_odata[thid] = temp[pout*n+thid];
}
#endif // #ifndef _SCAN_NAIVE_KERNEL_H_
|
9365c5436526d886a46692a9089e557f05187a6c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
extern "C" {
#ifndef REAL
#define REAL float
#endif
#ifndef CAST
#define CAST(fun) fun ## f
#endif
#ifndef REAL2o3
#define REAL2o3 (REAL)0.6666666666666667
#endif
#ifndef REAL3o2
#define REAL3o2 (REAL)1.5
#endif
}
__global__ void uplo_round (const int sd, const int unit, const int bottom, const REAL* a, const int offset_a, const int ld_a, REAL* b, const int offset_b, const int ld_b) {
const int gid_0 = blockIdx.x * blockDim.x + threadIdx.x;
const int gid_1 = blockIdx.y * blockDim.y + threadIdx.y;
const bool valid = (gid_0 < sd) && (gid_1 < sd);
const bool check = valid &&
((unit == 132) ? bottom * gid_0 > bottom * gid_1 : bottom * gid_0 >= bottom * gid_1);
if (check) {
b[offset_b + gid_0 + gid_1 * ld_b] = CAST(round)(a[offset_a + gid_0 + gid_1 * ld_a]);
}
} | 9365c5436526d886a46692a9089e557f05187a6c.cu | #include "includes.h"
extern "C" {
#ifndef REAL
#define REAL float
#endif
#ifndef CAST
#define CAST(fun) fun ## f
#endif
#ifndef REAL2o3
#define REAL2o3 (REAL)0.6666666666666667
#endif
#ifndef REAL3o2
#define REAL3o2 (REAL)1.5
#endif
}
__global__ void uplo_round (const int sd, const int unit, const int bottom, const REAL* a, const int offset_a, const int ld_a, REAL* b, const int offset_b, const int ld_b) {
const int gid_0 = blockIdx.x * blockDim.x + threadIdx.x;
const int gid_1 = blockIdx.y * blockDim.y + threadIdx.y;
const bool valid = (gid_0 < sd) && (gid_1 < sd);
const bool check = valid &&
((unit == 132) ? bottom * gid_0 > bottom * gid_1 : bottom * gid_0 >= bottom * gid_1);
if (check) {
b[offset_b + gid_0 + gid_1 * ld_b] = CAST(round)(a[offset_a + gid_0 + gid_1 * ld_a]);
}
} |
c841f46fa28d193b16ed338f6a0addeba588d510.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define BLK_CHN 64
#define WLEN 1
#define WLEN_N 2
__global__ void NormalizeL(const float *input, float *output) {
const int H = 256;
const int W = 256;
const int h = blockIdx.x * blockDim.x + threadIdx.x;
const int w = (blockIdx.y * blockDim.y + threadIdx.y)*WLEN_N;
if (h >= H || w >= W) return;
const int hW_w = h * W + w;
for(int j=0;j<WLEN_N;j++) {
output[hW_w+j] = input[hW_w+j]/100-0.5;
}
}
__global__ void Conv2d(const float *input, const float *weight, const float *bias, float *output,
const int stride, const int pad, const int dilation, const int has_bias_int,
const int C, const int H,
const int K, const int R,
const int OH,
const int with_relu_int) {
const int W = H;
const int S = R;
const int OW = OH;
const int k = blockIdx.x * blockDim.x + threadIdx.x;
const int oh = blockIdx.y * blockDim.y + threadIdx.y;
const int ow = (blockIdx.z * blockDim.z+ threadIdx.z)*WLEN;
if (k >= K || oh >= OH || ow >= OW) return;
const int RS = R * S;
const int kCRS = k * C * RS;
const float b_val = (has_bias_int==1) ? bias[k] : 0;
//float o[WLEN] = {b_val, b_val};
float o[WLEN] = {b_val};
for (int c = 0; c < C; ++c) {
const int kCRS_cRS = kCRS + c * RS;
const int cHW = c * H * W;
for (int r = 0; r < R; ++r) {
const int kCRS_cRS_rS = kCRS_cRS + r * S;
for (int s = 0; s < S; ++s) {
const int h = oh * stride - pad + r * dilation;
int w[WLEN];
w[0] = ow * stride - pad + s * dilation;
for(int j=1;j<WLEN;j++) {
w[j] = w[j-1] + stride;
}
const float wt = weight[kCRS_cRS_rS + s];
const int cHW_hW = cHW + h * W;
for (int j=0;j<WLEN;j++) {
if (h < 0 || h >= H || w[j] < 0 || w[j] >= W) {}
else {
o[j] += input[cHW_hW + w[j]] * wt;
}
}
}
}
}
const int kOHOW_ohOW_ow = k * OH * OW + oh * OW + ow;
for (int j=0;j<WLEN;j++) {
output[kOHOW_ohOW_ow+j] = (with_relu_int==1) ? fmaxf(o[j],0) : o[j];
}
}
__global__ void Conv2d64(const float *input, const float *weight, const float *bias, float *output,
const int stride, const int pad, const int dilation,
const int C, const int H,
const int K,
const int OH) {
const int W = H;
const int R = 3;
const int S = 3;
const int OW = OH;
const int k = blockIdx.x * blockDim.x + threadIdx.x;
const int oh = blockIdx.y * blockDim.y + threadIdx.y;
const int ow = (blockIdx.z * blockDim.z+ threadIdx.z)*WLEN;
if (k >= K || oh >= OH || ow >= OW) return;
const int blen_1 = blockDim.y;
const int blen_2 = blockDim.z;
const int index_flattened = threadIdx.y*blen_2 + threadIdx.z;
const int DIV = C/BLK_CHN;
const int ch_div = BLK_CHN/blen_1/blen_2;
const int RS = R * S;
const int kCRS = k * C * RS;
//float o[WLEN] = {bias[k], bias[k]};
float o[WLEN] = {bias[k]};
__shared__ float weight_local[BLK_CHN*3*3];
for (int i=0;i<DIV;i++) {
__syncthreads();
for (int c = ch_div*index_flattened; c < ch_div*(index_flattened+1); ++c) {
const int kCRS_cRS = kCRS + (c + i*BLK_CHN) * RS;
const int _cRS = c * RS;
for (int r = 0; r < R; ++r) {
const int rS = r * S;
const int _cRS_rS = _cRS + rS;
for (int s = 0; s < S; ++s) {
weight_local[_cRS_rS + s] = weight[kCRS_cRS + rS + s];
}
}
}
__syncthreads();
for (int c = i*BLK_CHN; c < (i+1)*BLK_CHN; ++c) {
const int cHW = c * H * W;
const int _cRS = (c-i*BLK_CHN) * RS;
for (int r = 0; r < R; ++r) {
const int _cRS_rS = _cRS + r * S;
for (int s = 0; s < S; ++s) {
const int h = oh * stride - pad + r * dilation;
const int cHW_hW = cHW + h * W;
const float wt = weight_local[_cRS_rS + s];
int w[WLEN];
w[0] = ow * stride - pad + s * dilation;
for(int j=1;j<WLEN;j++) {
w[j] = w[j-1] + stride;
}
for (int j=0;j<WLEN;j++) {
if (h >= 0 && h < H && w[j] >= 0 && w[j] < W) {
o[j] += input[cHW_hW + w[j]] * wt;
}
}
}
}
}
}
const int ind = k * OH * OW + oh * OW + ow;
for (int j=0;j<WLEN;j++) {
output[ind+j] = fmaxf(o[j],0);
}
}
__global__ void BatchNorm2d(const float *input, const float *weight, const float *bias, const float *running_mean, const float *running_var, float *output,
const int C, const int H) {
const float eps = 1e-5;
const int W = H;
const int c = blockIdx.x * blockDim.x + threadIdx.x;
const int h = blockIdx.y * blockDim.y + threadIdx.y;
const int w = blockIdx.z * blockDim.z+ threadIdx.z;
if (c >= C || h >= H || w >= W) return;
const int cHW_hW_w = c * H * W + h * W + w;
output[cHW_hW_w] = (input[cHW_hW_w] - running_mean[c]) / sqrt(running_var[c] + eps) * weight[c] + bias[c];
}
__global__ void ConvTranspose2dReLU(const float *input, const float *weight, const float *bias, float *output) {
const int stride = 2;
const int pad = 1;
const int C = 512;
const int H = 32;
const int W = 32;
const int K = 256;
const int R = 4;
const int S = 4;
const int OH = 64;
const int OW = 64;
const int k = blockIdx.x * blockDim.x + threadIdx.x;
const int oh = blockIdx.y * blockDim.y + threadIdx.y;
const int ow = blockIdx.z * blockDim.z+ threadIdx.z;
if (k >= K || oh >= OH || ow >= OW) return;
const int blen_2 = blockDim.z;
const int index_flattened = threadIdx.z;
const int DIV = C/BLK_CHN;
const int ch_div = BLK_CHN/blen_2;
const int RS = R * S;
const int HW = H * W;
const int KRS = K * RS;
const int kRS = k * RS;
float o = bias[k];
__shared__ float weight_local[BLK_CHN*4*4];
for (int i=0;i<DIV;i++) {
__syncthreads();
for (int c = ch_div*index_flattened; c < ch_div*(index_flattened+1); ++c) {
const int kRS_cKRS = kRS + (c + i*BLK_CHN) * KRS;
const int _cRS = c * RS;
for (int r = 0; r < R; ++r) {
const int rS = r * S;
const int _cRS_rS = _cRS + rS;
for (int s = 0; s < S; ++s) {
weight_local[_cRS_rS + s] = weight[kRS_cKRS + rS + s];
}
}
}
__syncthreads();
for (int c = i*BLK_CHN; c < (i+1)*BLK_CHN; ++c) {
const int _cRS = (c-i*BLK_CHN) * RS;
const int cHW = c * HW;
for (int r = 0; r < R; ++r) {
const int _cRS_rS = _cRS + r * S;
for (int s = 0; s < S; ++s) {
const int oh_pad__r = oh + pad - r;
const int ow_pad__s = ow + pad - s;
const int h = oh_pad__r / stride;
const int w = ow_pad__s / stride;
if ((oh_pad__r % stride != 0) || (ow_pad__s % stride != 0) ||
(h < 0 || h >= H || w < 0 || w >= W)) continue;
o += input[cHW + h * W + w] * weight_local[_cRS_rS + s];
}
}
}
}
output[k * OH * OW + oh * OW + ow] = fmaxf(o,0);
}
__global__ void Softmax(const float *input, float *output) {
const int C = 313;
const int H = 64;
const int W = 64;
const int h = blockIdx.x * blockDim.x + threadIdx.x;
const int w = blockIdx.y * blockDim.y + threadIdx.y;
if (h >= H || w >= W) return;
const int HW = H * W;
const int hW_w = h * W + w;
float exp_input_reg[313];
float sum = 0;
for(int c=0;c<C;++c) {
exp_input_reg[c] = exp(input[c * HW + hW_w]);
sum += exp_input_reg[c];
}
for(int c=0;c<C;++c) {
output[c * HW + hW_w] = exp_input_reg[c] / sum;
}
}
__global__ void UpsampleUnnormalize(const float *input, float *output) {
const float scale_factor = 4;
const int C = 2;
const int H = 64;
const int W = 64;
const int OH = 256;
const int OW = 256;
const int c = blockIdx.x * blockDim.x + threadIdx.x;
const int oh = blockIdx.y * blockDim.y + threadIdx.y;
const int ow = blockIdx.z * blockDim.z+ threadIdx.z;
if (c >= C || oh >= OH || ow >= OW) return;
const float miv = 0.5 / scale_factor - 0.5;
const float h = oh / scale_factor + miv;
const float w = ow / scale_factor + miv;
int h0 = floor(h), w0 = floor(w);
int h1 = h0 + 1, w1 = w0 + 1;
const float h_offset = h - h0, w_offset = w - w0;
const float om_ho = 1 - h_offset;
const float om_wo = 1 - w_offset;
const float w00 = om_ho * om_wo;
const float w01 = om_ho * w_offset;
const float w10 = h_offset * om_wo;
const float w11 = h_offset * w_offset;
const int hm1 = H - 1;
const int wm1 = W - 1;
h0 = h0 < 0 ? 0 : (h0 > hm1 ? hm1 : h0);
h1 = h1 < 0 ? 0 : (h1 > hm1 ? hm1 : h1);
w0 = w0 < 0 ? 0 : (w0 > wm1 ? wm1 : w0);
w1 = w1 < 0 ? 0 : (w1 > wm1 ? wm1 : w1);
const int cHW = c * H * W;
const int h0W = h0 * W;
const int h1W = h1 * W;
const int cHW_h0W = cHW + h0W;
const int cHW_h1W = cHW + h1W;
float mid = w00 * input[cHW_h0W + w0]
+ w01 * input[cHW_h0W + w1]
+ w10 * input[cHW_h1W + w0]
+ w11 * input[cHW_h1W + w1];
output[c * OH * OW + oh * OW + ow] = mid*110;
}
| c841f46fa28d193b16ed338f6a0addeba588d510.cu | #define BLK_CHN 64
#define WLEN 1
#define WLEN_N 2
__global__ void NormalizeL(const float *input, float *output) {
const int H = 256;
const int W = 256;
const int h = blockIdx.x * blockDim.x + threadIdx.x;
const int w = (blockIdx.y * blockDim.y + threadIdx.y)*WLEN_N;
if (h >= H || w >= W) return;
const int hW_w = h * W + w;
for(int j=0;j<WLEN_N;j++) {
output[hW_w+j] = input[hW_w+j]/100-0.5;
}
}
__global__ void Conv2d(const float *input, const float *weight, const float *bias, float *output,
const int stride, const int pad, const int dilation, const int has_bias_int,
const int C, const int H,
const int K, const int R,
const int OH,
const int with_relu_int) {
const int W = H;
const int S = R;
const int OW = OH;
const int k = blockIdx.x * blockDim.x + threadIdx.x;
const int oh = blockIdx.y * blockDim.y + threadIdx.y;
const int ow = (blockIdx.z * blockDim.z+ threadIdx.z)*WLEN;
if (k >= K || oh >= OH || ow >= OW) return;
const int RS = R * S;
const int kCRS = k * C * RS;
const float b_val = (has_bias_int==1) ? bias[k] : 0;
//float o[WLEN] = {b_val, b_val};
float o[WLEN] = {b_val};
for (int c = 0; c < C; ++c) {
const int kCRS_cRS = kCRS + c * RS;
const int cHW = c * H * W;
for (int r = 0; r < R; ++r) {
const int kCRS_cRS_rS = kCRS_cRS + r * S;
for (int s = 0; s < S; ++s) {
const int h = oh * stride - pad + r * dilation;
int w[WLEN];
w[0] = ow * stride - pad + s * dilation;
for(int j=1;j<WLEN;j++) {
w[j] = w[j-1] + stride;
}
const float wt = weight[kCRS_cRS_rS + s];
const int cHW_hW = cHW + h * W;
for (int j=0;j<WLEN;j++) {
if (h < 0 || h >= H || w[j] < 0 || w[j] >= W) {}
else {
o[j] += input[cHW_hW + w[j]] * wt;
}
}
}
}
}
const int kOHOW_ohOW_ow = k * OH * OW + oh * OW + ow;
for (int j=0;j<WLEN;j++) {
output[kOHOW_ohOW_ow+j] = (with_relu_int==1) ? fmaxf(o[j],0) : o[j];
}
}
__global__ void Conv2d64(const float *input, const float *weight, const float *bias, float *output,
const int stride, const int pad, const int dilation,
const int C, const int H,
const int K,
const int OH) {
const int W = H;
const int R = 3;
const int S = 3;
const int OW = OH;
const int k = blockIdx.x * blockDim.x + threadIdx.x;
const int oh = blockIdx.y * blockDim.y + threadIdx.y;
const int ow = (blockIdx.z * blockDim.z+ threadIdx.z)*WLEN;
if (k >= K || oh >= OH || ow >= OW) return;
const int blen_1 = blockDim.y;
const int blen_2 = blockDim.z;
const int index_flattened = threadIdx.y*blen_2 + threadIdx.z;
const int DIV = C/BLK_CHN;
const int ch_div = BLK_CHN/blen_1/blen_2;
const int RS = R * S;
const int kCRS = k * C * RS;
//float o[WLEN] = {bias[k], bias[k]};
float o[WLEN] = {bias[k]};
__shared__ float weight_local[BLK_CHN*3*3];
for (int i=0;i<DIV;i++) {
__syncthreads();
for (int c = ch_div*index_flattened; c < ch_div*(index_flattened+1); ++c) {
const int kCRS_cRS = kCRS + (c + i*BLK_CHN) * RS;
const int _cRS = c * RS;
for (int r = 0; r < R; ++r) {
const int rS = r * S;
const int _cRS_rS = _cRS + rS;
for (int s = 0; s < S; ++s) {
weight_local[_cRS_rS + s] = weight[kCRS_cRS + rS + s];
}
}
}
__syncthreads();
for (int c = i*BLK_CHN; c < (i+1)*BLK_CHN; ++c) {
const int cHW = c * H * W;
const int _cRS = (c-i*BLK_CHN) * RS;
for (int r = 0; r < R; ++r) {
const int _cRS_rS = _cRS + r * S;
for (int s = 0; s < S; ++s) {
const int h = oh * stride - pad + r * dilation;
const int cHW_hW = cHW + h * W;
const float wt = weight_local[_cRS_rS + s];
int w[WLEN];
w[0] = ow * stride - pad + s * dilation;
for(int j=1;j<WLEN;j++) {
w[j] = w[j-1] + stride;
}
for (int j=0;j<WLEN;j++) {
if (h >= 0 && h < H && w[j] >= 0 && w[j] < W) {
o[j] += input[cHW_hW + w[j]] * wt;
}
}
}
}
}
}
const int ind = k * OH * OW + oh * OW + ow;
for (int j=0;j<WLEN;j++) {
output[ind+j] = fmaxf(o[j],0);
}
}
__global__ void BatchNorm2d(const float *input, const float *weight, const float *bias, const float *running_mean, const float *running_var, float *output,
const int C, const int H) {
const float eps = 1e-5;
const int W = H;
const int c = blockIdx.x * blockDim.x + threadIdx.x;
const int h = blockIdx.y * blockDim.y + threadIdx.y;
const int w = blockIdx.z * blockDim.z+ threadIdx.z;
if (c >= C || h >= H || w >= W) return;
const int cHW_hW_w = c * H * W + h * W + w;
output[cHW_hW_w] = (input[cHW_hW_w] - running_mean[c]) / sqrt(running_var[c] + eps) * weight[c] + bias[c];
}
__global__ void ConvTranspose2dReLU(const float *input, const float *weight, const float *bias, float *output) {
const int stride = 2;
const int pad = 1;
const int C = 512;
const int H = 32;
const int W = 32;
const int K = 256;
const int R = 4;
const int S = 4;
const int OH = 64;
const int OW = 64;
const int k = blockIdx.x * blockDim.x + threadIdx.x;
const int oh = blockIdx.y * blockDim.y + threadIdx.y;
const int ow = blockIdx.z * blockDim.z+ threadIdx.z;
if (k >= K || oh >= OH || ow >= OW) return;
const int blen_2 = blockDim.z;
const int index_flattened = threadIdx.z;
const int DIV = C/BLK_CHN;
const int ch_div = BLK_CHN/blen_2;
const int RS = R * S;
const int HW = H * W;
const int KRS = K * RS;
const int kRS = k * RS;
float o = bias[k];
__shared__ float weight_local[BLK_CHN*4*4];
for (int i=0;i<DIV;i++) {
__syncthreads();
for (int c = ch_div*index_flattened; c < ch_div*(index_flattened+1); ++c) {
const int kRS_cKRS = kRS + (c + i*BLK_CHN) * KRS;
const int _cRS = c * RS;
for (int r = 0; r < R; ++r) {
const int rS = r * S;
const int _cRS_rS = _cRS + rS;
for (int s = 0; s < S; ++s) {
weight_local[_cRS_rS + s] = weight[kRS_cKRS + rS + s];
}
}
}
__syncthreads();
for (int c = i*BLK_CHN; c < (i+1)*BLK_CHN; ++c) {
const int _cRS = (c-i*BLK_CHN) * RS;
const int cHW = c * HW;
for (int r = 0; r < R; ++r) {
const int _cRS_rS = _cRS + r * S;
for (int s = 0; s < S; ++s) {
const int oh_pad__r = oh + pad - r;
const int ow_pad__s = ow + pad - s;
const int h = oh_pad__r / stride;
const int w = ow_pad__s / stride;
if ((oh_pad__r % stride != 0) || (ow_pad__s % stride != 0) ||
(h < 0 || h >= H || w < 0 || w >= W)) continue;
o += input[cHW + h * W + w] * weight_local[_cRS_rS + s];
}
}
}
}
output[k * OH * OW + oh * OW + ow] = fmaxf(o,0);
}
__global__ void Softmax(const float *input, float *output) {
const int C = 313;
const int H = 64;
const int W = 64;
const int h = blockIdx.x * blockDim.x + threadIdx.x;
const int w = blockIdx.y * blockDim.y + threadIdx.y;
if (h >= H || w >= W) return;
const int HW = H * W;
const int hW_w = h * W + w;
float exp_input_reg[313];
float sum = 0;
for(int c=0;c<C;++c) {
exp_input_reg[c] = exp(input[c * HW + hW_w]);
sum += exp_input_reg[c];
}
for(int c=0;c<C;++c) {
output[c * HW + hW_w] = exp_input_reg[c] / sum;
}
}
__global__ void UpsampleUnnormalize(const float *input, float *output) {
const float scale_factor = 4;
const int C = 2;
const int H = 64;
const int W = 64;
const int OH = 256;
const int OW = 256;
const int c = blockIdx.x * blockDim.x + threadIdx.x;
const int oh = blockIdx.y * blockDim.y + threadIdx.y;
const int ow = blockIdx.z * blockDim.z+ threadIdx.z;
if (c >= C || oh >= OH || ow >= OW) return;
const float miv = 0.5 / scale_factor - 0.5;
const float h = oh / scale_factor + miv;
const float w = ow / scale_factor + miv;
int h0 = floor(h), w0 = floor(w);
int h1 = h0 + 1, w1 = w0 + 1;
const float h_offset = h - h0, w_offset = w - w0;
const float om_ho = 1 - h_offset;
const float om_wo = 1 - w_offset;
const float w00 = om_ho * om_wo;
const float w01 = om_ho * w_offset;
const float w10 = h_offset * om_wo;
const float w11 = h_offset * w_offset;
const int hm1 = H - 1;
const int wm1 = W - 1;
h0 = h0 < 0 ? 0 : (h0 > hm1 ? hm1 : h0);
h1 = h1 < 0 ? 0 : (h1 > hm1 ? hm1 : h1);
w0 = w0 < 0 ? 0 : (w0 > wm1 ? wm1 : w0);
w1 = w1 < 0 ? 0 : (w1 > wm1 ? wm1 : w1);
const int cHW = c * H * W;
const int h0W = h0 * W;
const int h1W = h1 * W;
const int cHW_h0W = cHW + h0W;
const int cHW_h1W = cHW + h1W;
float mid = w00 * input[cHW_h0W + w0]
+ w01 * input[cHW_h0W + w1]
+ w10 * input[cHW_h1W + w0]
+ w11 * input[cHW_h1W + w1];
output[c * OH * OW + oh * OW + ow] = mid*110;
}
|
5745a91d050dd19a3cd126ec37421374825cce09.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include "paddle/fluid/framework/eigen.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/operators/cvm_op.h"
#include "paddle/fluid/platform/cuda_primitives.h"
namespace paddle {
namespace operators {
using platform::PADDLE_CUDA_NUM_THREADS;
using Tensor = framework::Tensor;
using LoDTensor = framework::LoDTensor;
#define CUDA_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); \
i += blockDim.x * gridDim.x)
template <typename T>
__global__ void CvmComputeKernel(const bool use_cvm, const int64_t item_width,
const T* X, T* Y, int64_t numel) {
CUDA_KERNEL_LOOP(i, numel) {
if (use_cvm) {
if (i % item_width == 0) {
Y[i] = log(X[i] + 1);
} else if (i % item_width == 1) {
Y[i] = log(X[i] + 1) - log(X[i - 1] + 1);
} else {
Y[i] = X[i];
}
} else {
Y[i] = X[i / (item_width - 2) * item_width + i % (item_width - 2) + 2];
}
}
}
template <typename T>
__global__ void CvmGradComputeKernel(const bool use_cvm,
const int64_t item_width, const T* CVM,
const T* DY, T* DX, bool has_lod,
const size_t* lod, size_t lod_size,
int64_t numel) {
CUDA_KERNEL_LOOP(i, numel) {
int offset = i % item_width;
if (offset <= 1) {
int cvm_id = i / item_width;
if (has_lod) {
int low = 1;
int high = lod_size - 1;
while (low < high) {
int mid = (low + high) / 2;
if (cvm_id < lod[mid])
high = mid;
else
low = mid + 1;
}
cvm_id = low - 1;
}
DX[i] = CVM[2 * cvm_id + offset];
} else {
if (use_cvm) {
DX[i] = DY[i];
} else {
DX[i] = DY[i / item_width * (item_width - 2) + i % item_width - 2];
}
}
}
}
template <typename T>
class CVMCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& context) const override {
const auto* x = context.Input<LoDTensor>("X");
const T* x_data = x->data<T>();
auto batch_size = x->dims()[0];
auto numel = x->numel();
auto item_size = numel / batch_size;
auto use_cvm = context.Attr<bool>("use_cvm");
auto* y = context.Output<LoDTensor>("Y");
T* y_data = y->mutable_data<T>(context.GetPlace());
// for Input X do not have Lod Information.
auto stream =
context.template device_context<platform::CUDADeviceContext>().stream();
if (x->NumLevels() == 0) {
hipLaunchKernelGGL(( CvmComputeKernel), (numel + PADDLE_CUDA_NUM_THREADS - 1) /
PADDLE_CUDA_NUM_THREADS,
dim3(PADDLE_CUDA_NUM_THREADS), 0, stream,
use_cvm, item_size, x_data, y_data, y->numel());
} else {
auto lod = x->lod()[0];
PADDLE_ENFORCE_EQ(
batch_size, lod[lod.size() - 1],
platform::errors::PreconditionNotMet(
"Input(X)'s dim[0] must be equal to last element of lod"));
hipLaunchKernelGGL(( CvmComputeKernel), (numel + PADDLE_CUDA_NUM_THREADS - 1) /
PADDLE_CUDA_NUM_THREADS,
dim3(PADDLE_CUDA_NUM_THREADS), 0, stream,
use_cvm, item_size, x_data, y_data, y->numel());
}
}
};
template <typename T>
class CVMGradCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& context) const override {
auto* dx = context.Output<LoDTensor>(framework::GradVarName("X"));
T* dx_data = dx->mutable_data<T>(context.GetPlace());
const Tensor* cvm = context.Input<Tensor>("CVM");
const T* cvm_data = cvm->data<T>();
const auto* dOut =
context.Input<framework::LoDTensor>(framework::GradVarName("Y"));
const T* dout_data = dOut->data<T>();
auto use_cvm = context.Attr<bool>("use_cvm");
auto offset = 2;
auto batch_size = dx->dims()[0];
auto dx_numel = dx->numel();
auto item_size = dx_numel / batch_size;
// for Input X do not have Lod Information.
auto stream =
context.template device_context<platform::CUDADeviceContext>().stream();
if (dx->NumLevels() == 0) {
hipLaunchKernelGGL(( CvmGradComputeKernel), (dx_numel + PADDLE_CUDA_NUM_THREADS - 1) /
PADDLE_CUDA_NUM_THREADS,
dim3(PADDLE_CUDA_NUM_THREADS), 0, stream,
use_cvm, item_size, cvm_data, dout_data, dx_data, false, NULL, 0,
dx_numel);
} else {
auto lod = dx->lod()[0];
PADDLE_ENFORCE_EQ(
batch_size, lod[lod.size() - 1],
platform::errors::PreconditionNotMet(
"Output(X@GRAD)'s dim[0] must be equal to last element of lod"));
hipLaunchKernelGGL(( CvmGradComputeKernel), (dx_numel + PADDLE_CUDA_NUM_THREADS - 1) /
PADDLE_CUDA_NUM_THREADS,
dim3(PADDLE_CUDA_NUM_THREADS), 0, stream,
use_cvm, item_size, cvm_data, dout_data, dx_data, true,
lod.CUDAData(context.GetPlace()), lod.size(), dx_numel);
}
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(cvm, ops::CVMCUDAKernel<float>,
ops::CVMCUDAKernel<double>);
REGISTER_OP_CUDA_KERNEL(cvm_grad, ops::CVMGradCUDAKernel<float>,
ops::CVMGradCUDAKernel<double>);
| 5745a91d050dd19a3cd126ec37421374825cce09.cu | /* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include "paddle/fluid/framework/eigen.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/operators/cvm_op.h"
#include "paddle/fluid/platform/cuda_primitives.h"
namespace paddle {
namespace operators {
using platform::PADDLE_CUDA_NUM_THREADS;
using Tensor = framework::Tensor;
using LoDTensor = framework::LoDTensor;
#define CUDA_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); \
i += blockDim.x * gridDim.x)
template <typename T>
__global__ void CvmComputeKernel(const bool use_cvm, const int64_t item_width,
const T* X, T* Y, int64_t numel) {
CUDA_KERNEL_LOOP(i, numel) {
if (use_cvm) {
if (i % item_width == 0) {
Y[i] = log(X[i] + 1);
} else if (i % item_width == 1) {
Y[i] = log(X[i] + 1) - log(X[i - 1] + 1);
} else {
Y[i] = X[i];
}
} else {
Y[i] = X[i / (item_width - 2) * item_width + i % (item_width - 2) + 2];
}
}
}
template <typename T>
__global__ void CvmGradComputeKernel(const bool use_cvm,
const int64_t item_width, const T* CVM,
const T* DY, T* DX, bool has_lod,
const size_t* lod, size_t lod_size,
int64_t numel) {
CUDA_KERNEL_LOOP(i, numel) {
int offset = i % item_width;
if (offset <= 1) {
int cvm_id = i / item_width;
if (has_lod) {
int low = 1;
int high = lod_size - 1;
while (low < high) {
int mid = (low + high) / 2;
if (cvm_id < lod[mid])
high = mid;
else
low = mid + 1;
}
cvm_id = low - 1;
}
DX[i] = CVM[2 * cvm_id + offset];
} else {
if (use_cvm) {
DX[i] = DY[i];
} else {
DX[i] = DY[i / item_width * (item_width - 2) + i % item_width - 2];
}
}
}
}
template <typename T>
class CVMCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& context) const override {
const auto* x = context.Input<LoDTensor>("X");
const T* x_data = x->data<T>();
auto batch_size = x->dims()[0];
auto numel = x->numel();
auto item_size = numel / batch_size;
auto use_cvm = context.Attr<bool>("use_cvm");
auto* y = context.Output<LoDTensor>("Y");
T* y_data = y->mutable_data<T>(context.GetPlace());
// for Input X do not have Lod Information.
auto stream =
context.template device_context<platform::CUDADeviceContext>().stream();
if (x->NumLevels() == 0) {
CvmComputeKernel<<<(numel + PADDLE_CUDA_NUM_THREADS - 1) /
PADDLE_CUDA_NUM_THREADS,
PADDLE_CUDA_NUM_THREADS, 0, stream>>>(
use_cvm, item_size, x_data, y_data, y->numel());
} else {
auto lod = x->lod()[0];
PADDLE_ENFORCE_EQ(
batch_size, lod[lod.size() - 1],
platform::errors::PreconditionNotMet(
"Input(X)'s dim[0] must be equal to last element of lod"));
CvmComputeKernel<<<(numel + PADDLE_CUDA_NUM_THREADS - 1) /
PADDLE_CUDA_NUM_THREADS,
PADDLE_CUDA_NUM_THREADS, 0, stream>>>(
use_cvm, item_size, x_data, y_data, y->numel());
}
}
};
template <typename T>
class CVMGradCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& context) const override {
auto* dx = context.Output<LoDTensor>(framework::GradVarName("X"));
T* dx_data = dx->mutable_data<T>(context.GetPlace());
const Tensor* cvm = context.Input<Tensor>("CVM");
const T* cvm_data = cvm->data<T>();
const auto* dOut =
context.Input<framework::LoDTensor>(framework::GradVarName("Y"));
const T* dout_data = dOut->data<T>();
auto use_cvm = context.Attr<bool>("use_cvm");
auto offset = 2;
auto batch_size = dx->dims()[0];
auto dx_numel = dx->numel();
auto item_size = dx_numel / batch_size;
// for Input X do not have Lod Information.
auto stream =
context.template device_context<platform::CUDADeviceContext>().stream();
if (dx->NumLevels() == 0) {
CvmGradComputeKernel<<<(dx_numel + PADDLE_CUDA_NUM_THREADS - 1) /
PADDLE_CUDA_NUM_THREADS,
PADDLE_CUDA_NUM_THREADS, 0, stream>>>(
use_cvm, item_size, cvm_data, dout_data, dx_data, false, NULL, 0,
dx_numel);
} else {
auto lod = dx->lod()[0];
PADDLE_ENFORCE_EQ(
batch_size, lod[lod.size() - 1],
platform::errors::PreconditionNotMet(
"Output(X@GRAD)'s dim[0] must be equal to last element of lod"));
CvmGradComputeKernel<<<(dx_numel + PADDLE_CUDA_NUM_THREADS - 1) /
PADDLE_CUDA_NUM_THREADS,
PADDLE_CUDA_NUM_THREADS, 0, stream>>>(
use_cvm, item_size, cvm_data, dout_data, dx_data, true,
lod.CUDAData(context.GetPlace()), lod.size(), dx_numel);
}
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(cvm, ops::CVMCUDAKernel<float>,
ops::CVMCUDAKernel<double>);
REGISTER_OP_CUDA_KERNEL(cvm_grad, ops::CVMGradCUDAKernel<float>,
ops::CVMGradCUDAKernel<double>);
|
58b57e0469037fe7ea8f92678b468d4234212158.hip | // !!! This is a file automatically generated by hipify!!!
#include<stdio.h>
#include<iostream>
using namespace std;
int main() {
int dCount;
hipGetDeviceCount(&dCount);
for(int i=0; i<dCount+3; i++)
{
hipDeviceProp_t prop;
hipError_t err = hipGetDeviceProperties(&prop, i);
if(err != hipSuccess)
cout<<"yes"<<endl;
printf("CUDA Device#%d\n", i);
printf("Device name:%s\n", prop.name);
printf("multiProcessorCount:%d\n", prop.multiProcessorCount);
printf("maxThreadsPerBlock:%d\n", prop.maxThreadsPerBlock);
printf("warpSize:%d\n", prop.warpSize);
printf("maxThreadsDim[3]:%d, %d, %d\n",
prop.maxThreadsDim[0],
prop.maxThreadsDim[1],
prop.maxThreadsDim[2]);
printf("maxGridSize[3]:%d, %d, %d\n",
prop.maxGridSize[0],
prop.maxGridSize[1],
prop.maxGridSize[2]);
}
return 0;
}
| 58b57e0469037fe7ea8f92678b468d4234212158.cu | #include<stdio.h>
#include<iostream>
using namespace std;
int main() {
int dCount;
cudaGetDeviceCount(&dCount);
for(int i=0; i<dCount+3; i++)
{
cudaDeviceProp prop;
cudaError_t err = cudaGetDeviceProperties(&prop, i);
if(err != cudaSuccess)
cout<<"yes"<<endl;
printf("CUDA Device#%d\n", i);
printf("Device name:%s\n", prop.name);
printf("multiProcessorCount:%d\n", prop.multiProcessorCount);
printf("maxThreadsPerBlock:%d\n", prop.maxThreadsPerBlock);
printf("warpSize:%d\n", prop.warpSize);
printf("maxThreadsDim[3]:%d, %d, %d\n",
prop.maxThreadsDim[0],
prop.maxThreadsDim[1],
prop.maxThreadsDim[2]);
printf("maxGridSize[3]:%d, %d, %d\n",
prop.maxGridSize[0],
prop.maxGridSize[1],
prop.maxGridSize[2]);
}
return 0;
}
|
023c84d7c47a1d0e353c4d918b386cf9f6adc860.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// This file is part of SVO - Semi-direct Visual Odometry.
//
// Copyright (C) 2014 Christian Forster <forster at ifi dot uzh dot ch>
// (Robotics and Perception Group, University of Zurich, Switzerland).
//
// This file is subject to the terms and conditions defined in the file
// 'LICENSE', which is part of this source code package.
#include <svo/img_align/sparse_img_align_device_utils.cuh>
#include <imp/cu_core/cu_texture.cuh>
#include <imp/cu_core/cu_utils.hpp>
#include <svo/common/logging.h>
#include <svo/img_align/sparse_img_align_base.h>
namespace svo {
GpuCacheHandler::GpuCacheHandler():
patch_area_(0), feature_capacity_(0), reduction_cache_capacity_(0)
{ }
GpuCacheHandler::GpuCacheHandler(const size_t patch_area):
patch_area_(patch_area), feature_capacity_(0), reduction_cache_capacity_(0)
{ }
void GpuCacheHandler::setPatchArea(const size_t patch_area)
{
patch_area_ = patch_area;
}
void GpuCacheHandler::reserveFeatureCapacity(const size_t capacity)
{
CHECK_GT(patch_area_,0);
// Check if enough or to much memory is allocated.
if( (capacity > feature_capacity_) || (feature_capacity_ - capacity > kMaxStorageSurplus))
{
SVO_WARN_STREAM("Reallocate GPU memory. Changing capacity from " << feature_capacity_ << " to "
<< capacity << " features.");
uv_cache_.reset(new UvCache(capacity));
xyz_ref_cache_.reset(new XyzRefCache(capacity));
jacobian_proj_cache_.reset(new JacobianProjCache(capacity*kJacProjStride));
jacobian_cache_.reset(new JacobianCache(capacity*kJacStride*patch_area_));
residual_cache_.reset(new ResidualCache(capacity*patch_area_));
visibility_mask_.reset(new VisibilityMask(capacity));
ref_patch_cache_.reset(new RefPatchCache(capacity*patch_area_));
disparity_cache_.reset(new DistparitiyCache(capacity));
feature_capacity_ = capacity;
}
}
void GpuCacheHandler::reserveReductionCacheCapacity(const size_t capacity)
{
// Check if enough or to much memory is allocated.
if( (capacity > reduction_cache_capacity_) || (reduction_cache_capacity_ - capacity > kMaxStorageSurplus))
{
SVO_WARN_STREAM("Reallocate memory for reduction step from " << reduction_cache_capacity_ << " to "
<< capacity << " blocks.");
hessian_reduction_cache_.reset(new HessianReductionCache(capacity*kHessianTriagStride));
gradient_reduction_cache_.reset(new GradientReductionCache(capacity*kJacStride));
chi2_reduction_cache_.reset(new GradientReductionCache(capacity));
nr_visible_cache_.reset(new NrVisibleCache(capacity));
hessian_reduction_cache_host_.reset(new HessianReductionCacheHost(capacity*kHessianTriagStride));
gradient_reduction_cache_host_.reset(new GradientReductionCacheHost(capacity*kJacStride));
chi2_reduction_cache_host_.reset(new GradientReductionCacheHost(capacity));
nr_visible_cache_host_.reset(new NrVisibleCacheHost(capacity));
reduction_cache_capacity_ = capacity;
}
else
{
// Set region of interest to the correct value to make
// copying from device to host possible.
if(nr_visible_cache_host_->roi().length() != capacity)
{
SVO_DEBUG_STREAM("Change region of interest of linear memory (before "
<< nr_visible_cache_host_->roi().length() << ", after "
<< capacity << " elements)");
hessian_reduction_cache_->setRoi(imp::Roi1u(0,capacity*kHessianTriagStride));
gradient_reduction_cache_->setRoi(imp::Roi1u(0,capacity*kJacStride));
chi2_reduction_cache_->setRoi(imp::Roi1u(0,capacity));
nr_visible_cache_->setRoi(imp::Roi1u(0,capacity));
hessian_reduction_cache_host_->setRoi(imp::Roi1u(0,capacity*kHessianTriagStride));
gradient_reduction_cache_host_->setRoi(imp::Roi1u(0,capacity*kJacStride));
chi2_reduction_cache_host_->setRoi(imp::Roi1u(0,capacity));
nr_visible_cache_host_->setRoi(imp::Roi1u(0,capacity));
}
}
}
inline void GpuCacheHandler::copyReductionCacheDeviceToHost()
{
hessian_reduction_cache_->copyTo(*hessian_reduction_cache_host_);
gradient_reduction_cache_->copyTo(*gradient_reduction_cache_host_);
chi2_reduction_cache_->copyTo(*chi2_reduction_cache_host_);
nr_visible_cache_->copyTo(*nr_visible_cache_host_);
}
namespace sparse_img_align_device_utils
{
__host__ __device__ __forceinline__
void setGx(imp::cu::Matrix<FloatTypeGpu,3,6>& __restrict__ g_x,
const Float3TypeGpu& __restrict__ p_in_imu)
{
g_x(0,0) = 1.0;
g_x(0,1) = 0.0;
g_x(0,2) = 0.0;
g_x(0,3) = 0.0;
g_x(0,4) = p_in_imu.z;
g_x(0,5) = -p_in_imu.y;
g_x(1,0) = 0.0;
g_x(1,1) = 1.0;
g_x(1,2) = 0.0;
g_x(1,3) = -p_in_imu.z;
g_x(1,4) = 0.0;
g_x(1,5) = p_in_imu.x;
g_x(2,0) = 0.0;
g_x(2,1) = 0.0;
g_x(2,2) = 1.0;
g_x(2,3) = p_in_imu.y;
g_x(2,4) = -p_in_imu.x;
g_x(2,5) = 0.0;
}
//Todo: This function should be a member function of the CPU camera
__host__ __device__ __forceinline__
void setPinholeJacobian(imp::cu::Matrix<FloatTypeGpu,2,3>& __restrict__ jac_cam,
const Float3TypeGpu& __restrict__ p_in_cam,
const FloatTypeGpu& __restrict__ focal_length)
{
FloatTypeGpu ratio_p_x_z_cam = p_in_cam.x/p_in_cam.z;
FloatTypeGpu ratio_p_y_z_cam = p_in_cam.y/p_in_cam.z;
FloatTypeGpu ratio_fl_p_z_cam = focal_length/p_in_cam.z;
jac_cam(0,0) = ratio_fl_p_z_cam;
jac_cam(0,1) = 0.0;
jac_cam(0,2) = -ratio_fl_p_z_cam*ratio_p_x_z_cam;
jac_cam(1,0) = 0.0;
jac_cam(1,1) = ratio_fl_p_z_cam;
jac_cam(1,2) = -ratio_fl_p_z_cam*ratio_p_y_z_cam;
}
__global__ void k_baseCachesGeneric(const imp::cu::Matrix<FloatTypeGpu,3,4> T_imu_cam,
const imp::cu::Matrix<FloatTypeGpu,3,3> R_imu_cam,
const FloatTypeGpu focal_length,
const Float3TypeGpu* __restrict__ p_in_cam,
FloatTypeGpu* __restrict__ jac_proj_cache,
const unsigned int nr_features)
{
const unsigned int i = blockIdx.x*blockDim.x+threadIdx.x;
if(i < nr_features)
{
const Float3TypeGpu p_in_imu = transform(T_imu_cam,p_in_cam[i]);
imp::cu::Matrix<FloatTypeGpu,3,6> g_x;
setGx(g_x,p_in_imu);
imp::cu::Matrix<FloatTypeGpu,2,3> jac_cam;
setPinholeJacobian(jac_cam,p_in_cam[i],focal_length);
imp::cu::Matrix<FloatTypeGpu,2,6> jac_proj = ((jac_cam*R_imu_cam)*g_x);
// wite to buffer
int offset = 2*6*i;
#pragma unroll
for(int row = 0; row < 2;++row)
{
#pragma unroll
for(int col = 0; col < 6; ++col)
{
// times (-1) because of our definition of the photometric error
jac_proj_cache[offset + col] = -1.0f*jac_proj(row,col);
}
offset +=6;
}
}
}
__global__ void k_baseCachesPinhole(const imp::cu::Matrix<FloatTypeGpu,3,4> T_imu_cam,
const imp::cu::Matrix<FloatTypeGpu,3,3> R_cam_imu,
const FloatTypeGpu focal_length,
const Float3TypeGpu* __restrict__ p_in_cam,
FloatTypeGpu* __restrict__ jac_proj_cache,
const unsigned int nr_features)
{
const unsigned int i = blockIdx.x*blockDim.x+threadIdx.x;
if(i < nr_features)
{
Float3TypeGpu p_in_imu = transform(T_imu_cam,p_in_cam[i]);
FloatTypeGpu ratio_p_x_z_cam = p_in_cam[i].x/p_in_cam[i].z;
FloatTypeGpu ratio_p_y_z_cam = p_in_cam[i].y/p_in_cam[i].z;
// times (-1) because of our definition of the photometric error
FloatTypeGpu ratio_fl_p_z_cam = (-1.0)*focal_length/p_in_cam[i].z;
FloatTypeGpu r00 = ratio_fl_p_z_cam*(R_cam_imu(0,0) - R_cam_imu(2,0)*ratio_p_x_z_cam);
FloatTypeGpu r01 = ratio_fl_p_z_cam*(R_cam_imu(0,1) - R_cam_imu(2,1)*ratio_p_x_z_cam);
FloatTypeGpu r02 = ratio_fl_p_z_cam*(R_cam_imu(0,2) - R_cam_imu(2,2)*ratio_p_x_z_cam);
FloatTypeGpu r10 = ratio_fl_p_z_cam*(R_cam_imu(1,0) - R_cam_imu(2,0)*ratio_p_y_z_cam);
FloatTypeGpu r11 = ratio_fl_p_z_cam*(R_cam_imu(1,1) - R_cam_imu(2,1)*ratio_p_y_z_cam);
FloatTypeGpu r12 = ratio_fl_p_z_cam*(R_cam_imu(1,2) - R_cam_imu(2,2)*ratio_p_y_z_cam);
const int offset = 2*6*i;
jac_proj_cache[offset] = r00;
jac_proj_cache[offset + 1] = r01;
jac_proj_cache[offset + 2] = r02;
jac_proj_cache[offset + 3] = -p_in_imu.z*r01 + p_in_imu.y*r02;
jac_proj_cache[offset + 4] = p_in_imu.z*r00 - p_in_imu.x*r02;
jac_proj_cache[offset + 5] = -p_in_imu.y*r00 + p_in_imu.x*r01;
jac_proj_cache[offset + 6] = r10;
jac_proj_cache[offset + 7] = r11;
jac_proj_cache[offset + 8] = r12;
jac_proj_cache[offset + 9] = -p_in_imu.z*r11 + p_in_imu.y*r12;
jac_proj_cache[offset + 10] = p_in_imu.z*r10 - p_in_imu.x*r12;
jac_proj_cache[offset + 11] = -p_in_imu.y*r10 + p_in_imu.x*r11;
}
}
void precomputeBaseCaches(std::vector<Float2TypeGpu>& uv_cache,
std::vector<Float3TypeGpu>& xyz_ref_cache,
const std::vector<size_t>& first_ftr_index,
const std::vector<size_t>& nbr_of_ftrs,
const std::vector<imp::cu::Matrix<FloatTypeGpu,3,4>::Ptr>& cu_T_imu_cam_bundle,
const std::vector<imp::cu::Matrix<FloatTypeGpu,3,4>::Ptr>& cu_T_cam_imu_bundle,
const std::vector<imp::cu::PinholeCamera::Ptr>& cu_camera_bundle,
const size_t& nbr_fts_to_track,
GpuCacheHandler& gpu_cache)
{
// Prepare the GPU buffers.
gpu_cache.reserveFeatureCapacity(nbr_fts_to_track);
// Transfer data from CPU to GPU.
LinearMemoryFloat2 uv_linear(reinterpret_cast<Float2PixelGpu*>(uv_cache.data()),uv_cache.size(),true);
LinearMemoryFloat3 xyz_linear(reinterpret_cast<Float3PixelGpu*>(xyz_ref_cache.data()),xyz_ref_cache.size(),true);
gpu_cache.uv().setRoi(uv_linear.roi());
gpu_cache.uv().copyFrom(uv_linear);
gpu_cache.xyzRef().setRoi(xyz_linear.roi());
gpu_cache.xyzRef().copyFrom(xyz_linear);
// Fill base caches.
for(int i = 0; i< static_cast<int>(cu_camera_bundle.size()); ++i)
{
imp::cu::Fragmentation<32,1> frag(nbr_of_ftrs.at(i));
hipLaunchKernelGGL(( k_baseCachesPinhole)
,
dim3(frag.dimGrid),dim3(frag.dimBlock)
, 0, 0, *cu_T_imu_cam_bundle.at(i),
cu_T_cam_imu_bundle.at(i)->block<3,3>(0,0),
cu_camera_bundle.at(i)->fx(),
&gpu_cache.xyzRef().cuData()[first_ftr_index.at(i)],
&gpu_cache.jacProj().cuData()[first_ftr_index.at(i)*GpuCacheHandler::kJacProjStride],
nbr_of_ftrs.at(i));
}
hipDeviceSynchronize();
}
__global__ void k_jacobianAndRefPatches(imp::cu::Texture2D ref_tex,
const Float2TypeGpu* __restrict__ uv,
const FloatTypeGpu* __restrict__ jac_proj_cache,
const int patch_size,
const int level,
const unsigned int nrFeatures,
FloatTypeGpu* __restrict__ jacobian_cache,
FloatTypeGpu* __restrict__ ref_patch_cache)
{
const unsigned int i = blockIdx.x*blockDim.x+threadIdx.x;
if(i < nrFeatures)
{
const FloatTypeGpu scale = 1.0f/(1<<level);
const FloatTypeGpu patch_area = patch_size*patch_size;
const FloatTypeGpu upper_left_coord_x = uv[i].x*scale - (patch_size - 1)/2.0f;
const FloatTypeGpu upper_left_coord_y = uv[i].y*scale - (patch_size - 1)/2.0f;
size_t ref_patch_index_offset = patch_area*i;
size_t jacobian_index_offset = patch_area*GpuCacheHandler::kJacStride*i;
size_t jac_proj_cache_index_offset = GpuCacheHandler::kJacProjStride*i;
#pragma unroll 4
for(int row = 0; row < patch_size; ++row)
{
#pragma unroll 4
for(int col = 0; col < patch_size; ++col, ++ref_patch_index_offset, jacobian_index_offset += 8)
{
FloatTypeGpu center_texel;
imp::cu::tex2DFetch(center_texel, ref_tex,upper_left_coord_x + col, upper_left_coord_y + row);
ref_patch_cache[ref_patch_index_offset] = 255.0f*center_texel;
FloatTypeGpu dx_left,dx_right,dy_up,dy_down;
imp::cu::tex2DFetch(dx_left, ref_tex,upper_left_coord_x + col - 1, upper_left_coord_y + row);
imp::cu::tex2DFetch(dx_right, ref_tex,upper_left_coord_x + col + 1, upper_left_coord_y + row);
imp::cu::tex2DFetch(dy_up, ref_tex,upper_left_coord_x + col, upper_left_coord_y + row - 1);
imp::cu::tex2DFetch(dy_down, ref_tex,upper_left_coord_x + col, upper_left_coord_y + row + 1);
const FloatTypeGpu dx = 0.5f*(dx_right - dx_left)*255.0f;
const FloatTypeGpu dy = 0.5f*(dy_down - dy_up)*255.0f;
#pragma unroll
for(int i = 0; i < 6; ++i)
{
jacobian_cache[jacobian_index_offset + i] = (dx*(jac_proj_cache[jac_proj_cache_index_offset + i])
+ dy*(jac_proj_cache[jac_proj_cache_index_offset + 6 + i]))*scale;
}
//jacobian_cache[jacobian_index_offset + 6] = -255*center_texel;
//jacobian_cache[jacobian_index_offset + 7] = -1;
jacobian_cache[jacobian_index_offset + 6] = 0.0;
jacobian_cache[jacobian_index_offset + 7] = 0.0;
}
}
}
}
// TODO: imp::ImagePyramid version (currently not working)
//void precomputeJacobiansAndRefPatches(
// std::vector<imp::ImagePyramid8uC1::Ptr>& ref_pyramid,
// const int level,
// const int patch_size,
// const bool estimate_alpha,
// const bool estimate_beta,
// const std::vector<size_t>& first_ftr_index,
// const std::vector<size_t>& nbr_of_ftrs,
// GpuCacheHandler& gpu_cache)
//{
// int patch_area = patch_size*patch_size;
// for(int ii = 0; ii < static_cast<int>(pyramid.size());++ii)
// {
// std::shared_ptr<imp::cu::Texture2D> ref_tex =
// std::dynamic_pointer_cast<imp::cu::ImageGpu8uC1>(ref_pyramid.at(ii)->at(level))
// ->genTexture(false,hipFilterModeLinear,hipAddressModeBorder,hipReadModeNormalizedFloat);
// dim3 threads(32);
// dim3 blocks((nbr_of_ftrs.at(ii) + threads.x-1)/threads.x);
// std::cout << " features " << ii << " = " << nbr_of_ftrs.at(ii) << std::endl;
// k_jacobianAndRefPatches<<<blocks,threads>>>(*ref_tex.get(),
// reinterpret_cast<float2*>(&gpu_cache.uv().data()[first_ftr_index.at(ii)]),
// reinterpret_cast<float*>(&gpu_cache.jacProj().data()[first_ftr_index.at(ii)*12]),
// patch_size , level , nbr_of_ftrs.at(ii),
// reinterpret_cast<float*>(&gpu_cache.jacobian().data()[first_ftr_index.at(ii)*8*patch_area]),
// reinterpret_cast<float*>(&gpu_cache.refPatch().data()[first_ftr_index.at(ii)*patch_area]));
// }
// hipDeviceSynchronize();
//}
void precomputeJacobiansAndRefPatches(
const std::vector<std::vector<imp::cu::ImageGpu8uC1::Ptr> >& ref_pyramid,
const int level,
const int patch_size,
const bool estimate_alpha,
const bool estimate_beta,
const std::vector<size_t>& first_ftr_index,
const std::vector<size_t>& nbr_of_ftrs,
GpuCacheHandler& gpu_cache)
{
int patch_area = patch_size*patch_size;
for(int i = 0; i < static_cast<int>(ref_pyramid.size());++i)
{
std::shared_ptr<imp::cu::Texture2D> ref_tex =
std::dynamic_pointer_cast<imp::cu::ImageGpu8uC1>(ref_pyramid.at(i).at(level))
->genTexture(false,hipFilterModeLinear,hipAddressModeBorder,hipReadModeNormalizedFloat);
imp::cu::Fragmentation<32,1> frag(nbr_of_ftrs.at(i));
hipLaunchKernelGGL(( k_jacobianAndRefPatches)
,
dim3(frag.dimGrid), dim3(frag.dimBlock)
, 0, 0, *ref_tex,&gpu_cache.uv().cuData()[first_ftr_index.at(i)],
&gpu_cache.jacProj().cuData()[first_ftr_index.at(i)*GpuCacheHandler::kJacProjStride],
patch_size , level , nbr_of_ftrs.at(i),
&gpu_cache.jacobian().cuData()[first_ftr_index.at(i)*GpuCacheHandler::kJacStride*patch_area],
&gpu_cache.refPatch().cuData()[first_ftr_index.at(i)*patch_area]);
}
hipDeviceSynchronize();
}
__global__ void k_residuals(const imp::cu::Texture2D cur_tex,
const int width,
const int height,
const imp::cu::Matrix<FloatTypeGpu,3,4> T_cur_ref,
const imp::cu::PinholeCamera cam,
const FloatTypeGpu* __restrict__ ref_patch_cache,
const Float3TypeGpu* __restrict__ xyz_ref,
FloatTypeGpu alpha_illumination,
FloatTypeGpu beta_illumionation,
const int patch_size,
const int level,
const unsigned int nrFeatures,
BoolTypeGpu* __restrict__ visibility_cache,
FloatTypeGpu* __restrict__ residual_cache)
{
const unsigned int i = blockIdx.x*blockDim.x+threadIdx.x;
if(i < nrFeatures)
{
FloatTypeGpu scale = 1.0f/(1<<level);
const int patch_area = patch_size*patch_size;
Float2TypeGpu uv_cur = static_cast<Float2TypeGpu>(
cam.world2cam(static_cast<Float3TypeGpu>(transform(T_cur_ref,xyz_ref[i]))));
const FloatTypeGpu upper_left_coord_x = uv_cur.x*scale - (patch_size - 1)/2.0f;
const FloatTypeGpu upper_left_coord_y = uv_cur.y*scale - (patch_size - 1)/2.0f;
// Check if projection is within the image.
if(upper_left_coord_x < 0.0f || upper_left_coord_y < 0.0f
|| upper_left_coord_x + patch_size >= width - 1 || upper_left_coord_y + patch_size >= height - 1)
{
visibility_cache[i] = 0;
}
else
{
visibility_cache[i] = 1;
int pixel = 0;
#pragma unroll 4
for(int row = 0; row < patch_size; ++row)
{
#pragma unroll 4
for(int col = 0; col < patch_size; ++col,++pixel)
{
FloatTypeGpu cur_Texel;
imp::cu::tex2DFetch(cur_Texel, cur_tex,upper_left_coord_x + col, upper_left_coord_y + row);
residual_cache[i*patch_area + pixel] = static_cast<FloatTypeGpu>(
255.0*cur_Texel*(1.0 + alpha_illumination) + beta_illumionation)
- ref_patch_cache[i*patch_area + pixel];
}
}
}
}
}
void computeResidualsOfFrame(
const std::vector<std::vector<imp::cu::ImageGpu8uC1::Ptr>>& cur_pyramid,
const std::vector<imp::cu::Matrix<FloatTypeGpu,3,4>>& cu_T_cur_ref_bundle,
const std::vector<imp::cu::PinholeCamera::Ptr>& cu_camera_bundle,
const std::vector<size_t>& first_ftr_index,
const std::vector<size_t>& nbr_of_ftrs,
const int level,
const int patch_size,
const bool estimate_alpha,
const bool estimate_beta,
GpuCacheHandler& gpu_cache)
{
const size_t patch_area = patch_size*patch_size;
for(int i = 0; i < static_cast<int>(cur_pyramid.size());++i)
{
std::shared_ptr<imp::cu::Texture2D> cur_tex = std::dynamic_pointer_cast<imp::cu::ImageGpu8uC1>(
cur_pyramid.at(i).at(level))
->genTexture(false,hipFilterModeLinear,hipAddressModeBorder,hipReadModeNormalizedFloat);
imp::cu::Fragmentation<32,1> frag(nbr_of_ftrs.at(i));
hipLaunchKernelGGL(( k_residuals)
,
dim3(frag.dimGrid),dim3(frag.dimBlock)
, 0, 0, *cur_tex, cur_pyramid.at(i).at(level)->width(),
cur_pyramid.at(i).at(level)->height(),
cu_T_cur_ref_bundle.at(i),
*cu_camera_bundle.at(i),
&gpu_cache.refPatch().cuData()[first_ftr_index.at(i)*patch_area],
&gpu_cache.xyzRef().cuData()[first_ftr_index.at(i)],
estimate_alpha, estimate_beta, patch_size, level, nbr_of_ftrs.at(i),
&gpu_cache.visibility().cuData()[first_ftr_index.at(i)],
&gpu_cache.residual().cuData()[first_ftr_index.at(i)*patch_area]);
}
hipDeviceSynchronize();
}
inline unsigned int nextPow2(unsigned int x)
{
--x;
x |= x >> 1;
x |= x >> 2;
x |= x >> 4;
x |= x >> 8;
x |= x >> 16;
return ++x;
}
bool isPow2(unsigned int x)
{
return ((x&(x-1))==0);
}
template <size_t _n_elements>
__host__ __device__ __forceinline__
void setToZero(FloatTypeGpu* mem)
{
#pragma unroll
for(int ind = 0; ind < _n_elements; ++ind)
{
mem[ind] = 0.0;
}
}
template <size_t _matrix_size>
__host__ __device__ __forceinline__
void setVVTUpperTriag(FloatTypeGpu* __restrict__ upper_triag_row_maj,
const FloatTypeGpu* __restrict__ vect,
const FloatTypeGpu& __restrict__ weight = 1.0)
{
int index = 0;
#pragma unroll
for(int row = 0; row < _matrix_size; ++row)
{
#pragma unroll
for(int col = row; col < _matrix_size; ++col,++index)
{
upper_triag_row_maj[index] = weight*vect[row]*vect[col];
}
}
}
template <size_t _matrix_size>
__host__ __device__ __forceinline__
void addVVTUpperTriag(FloatTypeGpu* __restrict__ upper_triag_row_maj,
const FloatTypeGpu* __restrict__ vect,
const FloatTypeGpu& __restrict__ weight = 1.0)
{
int index = 0;
#pragma unroll
for(int row = 0; row < _matrix_size; ++row)
{
#pragma unroll
for(int col = row; col < _matrix_size; ++col,++index)
{
upper_triag_row_maj[index] += weight*vect[row]*vect[col];
}
}
}
template <size_t _vector_size>
__host__ __device__ __forceinline__
void addVector(FloatTypeGpu* __restrict__ sum_vect,
const FloatTypeGpu* __restrict__ addend_vect)
{
#pragma unroll
for(int ind = 0; ind < _vector_size; ++ind)
{
sum_vect[ind] += addend_vect[ind];
}
}
template <size_t _vector_size>
__host__ __device__ __forceinline__
void addWeightedVector(FloatTypeGpu* __restrict__ sum_vect,
const FloatTypeGpu* __restrict__ addend_vect,
const FloatTypeGpu& __restrict__ weight = 1.0)
{
#pragma unroll
for(int ind = 0; ind < _vector_size; ++ind)
{
sum_vect[ind] += weight*addend_vect[ind];
}
}
template <size_t _vector_size>
__host__ __device__ __forceinline__
void subWeightedVector(FloatTypeGpu* __restrict__ sum_vect,
const FloatTypeGpu* __restrict__ addend_vect,
const FloatTypeGpu& __restrict__ weight = 1.0)
{
#pragma unroll
for(int ind = 0; ind < _vector_size; ++ind)
{
sum_vect[ind] -= weight*addend_vect[ind];
}
}
template <size_t _vector_size>
__host__ __device__ __forceinline__
void setWeightedVector(FloatTypeGpu* __restrict__ dest_vect,
const FloatTypeGpu* __restrict__ src_vect,
const FloatTypeGpu& __restrict__ weight = 1.0)
{
#pragma unroll
for(int ind = 0; ind < _vector_size; ++ind)
{
dest_vect[ind] = weight*src_vect[ind];
}
}
template <size_t _vector_size>
__host__ __device__ __forceinline__
void copyVector(FloatTypeGpu* __restrict__ dest_vect,
const FloatTypeGpu* __restrict__ src_vect)
{
#pragma unroll
for(int ind = 0; ind < _vector_size; ++ind)
{
dest_vect[ind] = src_vect[ind];
}
}
// _block_size must be power of 2
template <unsigned int _block_size, bool n_is_pow2>
__global__ void k_reduceHessianGradient(const FloatTypeGpu* __restrict__ jacobian_cache,
const FloatTypeGpu* __restrict__ residual_cache,
const BoolTypeGpu* __restrict__ visibility_cache,
FloatTypeGpu* __restrict__ gradient_cache,
FloatTypeGpu* __restrict__ hessian_cache,
UIntTypeGpu* __restrict__ nr_meas,
FloatTypeGpu* __restrict__ chi2,
const unsigned int n_elements,
const unsigned int patch_area)
{
constexpr unsigned int kHessianTriagN = SparseImgAlignBase::kHessianTriagN;
constexpr unsigned int kJacobianSize = SparseImgAlignBase::kJacobianSize;
__shared__ FloatTypeGpu s_hessian_data[_block_size*kHessianTriagN];
__shared__ FloatTypeGpu s_gradient_data[_block_size*kJacobianSize];
__shared__ FloatTypeGpu s_chi2[_block_size];
__shared__ UIntTypeGpu s_chi2_nr_meas[_block_size];
FloatTypeGpu jacobian[kJacobianSize];
FloatTypeGpu gradient[kJacobianSize];
FloatTypeGpu hessian[kHessianTriagN];
FloatTypeGpu chi2_temp;
UIntTypeGpu chi2_nr_meas = 0;
const unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*_block_size*2 + threadIdx.x;
const unsigned int gridSize = _block_size*2*gridDim.x;
const unsigned int hessian_index = tid*kHessianTriagN;
const unsigned int gradient_index = tid*kJacobianSize;
// We reduce multiple elements per thread. The number is determined by the
// number of active thread blocks (via gridDim). More blocks will result
// in a larger gridSize and therefore fewer elements per thread.
// We reading from global memory and write to shared memory.
// Get first element.
if((!n_is_pow2)&&(i >= n_elements))
{
setToZero<kJacobianSize>(gradient);
setToZero<kHessianTriagN>(hessian);
chi2_temp = 0.0;
}
else
{
const unsigned int visib_index = i/patch_area;
BoolTypeGpu visible = visibility_cache[visib_index];
if(visible == 1)
{
FloatTypeGpu residual = residual_cache[i];
// TODO: add weighting function
FloatTypeGpu weight = 1.0;// weight_function(residual/weight_scale);
copyVector<kJacobianSize>(jacobian,&jacobian_cache[i*kJacobianSize]);
setVVTUpperTriag<kJacobianSize>(hessian,jacobian,weight);
setWeightedVector<kJacobianSize>(gradient,jacobian, -weight*residual);
chi2_temp = residual*residual*weight;
++chi2_nr_meas;
}
else
{
setToZero<kJacobianSize>(gradient);
setToZero<kHessianTriagN>(hessian);
chi2_temp = 0.0;
}
// Get second element.
// Ensure we don't read out of bounds -- this is optimized away for powerOf2 problem size.
if (n_is_pow2 || i + _block_size < n_elements)
{
i += _block_size;
const unsigned int visib_index = i/patch_area;
BoolTypeGpu visible = visibility_cache[visib_index];
if(visible == 1)
{
FloatTypeGpu residual = residual_cache[i];
//TODO: add weighting function
FloatTypeGpu weight = 1.0;//weight_function(residual/weight_scale);
copyVector<kJacobianSize>(jacobian,&jacobian_cache[i*kJacobianSize]);
addVVTUpperTriag<kJacobianSize>(hessian,jacobian,weight);
subWeightedVector<kJacobianSize>(gradient,jacobian, weight*residual);
chi2_temp += residual*residual*weight;
++chi2_nr_meas;
}
}
i += (gridSize - _block_size);
}
// Add further elements if available.
while (i < n_elements)
{
const unsigned int visib_index = i/patch_area;
BoolTypeGpu visible = visibility_cache[visib_index];
if(visible == 1)
{
FloatTypeGpu residual = residual_cache[i];
//TODO: add weighting function
FloatTypeGpu weight = 1.0;// weight_function(residual/weight_scale);
copyVector<kJacobianSize>(jacobian,&jacobian_cache[i*kJacobianSize]);
addVVTUpperTriag<kJacobianSize>(hessian,jacobian,weight);
subWeightedVector<kJacobianSize>(gradient,jacobian, weight*residual);
chi2_temp += residual*residual*weight;
++chi2_nr_meas;
}
// Add second element.
// Ensure we don't read out of bounds -- this is optimized away for powerOf2 sized arrays.
if (n_is_pow2 || i + _block_size < n_elements)
{
i += _block_size;
const unsigned int visib_index = i/patch_area;
BoolTypeGpu visible = visibility_cache[visib_index];
if(visible == 1)
{
FloatTypeGpu residual = residual_cache[i];
//TODO: add weighting function
FloatTypeGpu weight = 1.0;// visible*weight_function(residual/weight_scale);
copyVector<kJacobianSize>(jacobian,&jacobian_cache[i*kJacobianSize]);
addVVTUpperTriag<kJacobianSize>(hessian,jacobian,weight);
subWeightedVector<kJacobianSize>(gradient,jacobian, weight*residual);
chi2_temp += residual*residual*weight;
++chi2_nr_meas;
}
}
i += (gridSize - _block_size);
}
// Each thread puts its local sum into shared memory.
copyVector<kJacobianSize>(&s_gradient_data[gradient_index],gradient);
copyVector<kHessianTriagN>(&s_hessian_data[hessian_index],hessian);
s_chi2[tid] = chi2_temp;
s_chi2_nr_meas[tid] = chi2_nr_meas;
__syncthreads();
// Do reduction in shared mem.
if ((_block_size >= 512) && (tid < 256))
{
// Add to local variable.
addVector<kJacobianSize>(gradient,&s_gradient_data[(tid + 256)*kJacobianSize]);
addVector<kHessianTriagN>(hessian,&s_hessian_data[(tid + 256)*kHessianTriagN]);
chi2_temp += s_chi2[tid + 256];
chi2_nr_meas += s_chi2_nr_meas[tid + 256];
// Store result to shared memory.
copyVector<kJacobianSize>(&s_gradient_data[gradient_index],gradient);
copyVector<kHessianTriagN>(&s_hessian_data[hessian_index],hessian);
s_chi2[tid] = chi2_temp;
s_chi2_nr_meas[tid] = chi2_nr_meas;
}
__syncthreads();
if ((_block_size >= 256) &&(tid < 128))
{
// add to local variable
addVector<kJacobianSize>(gradient,&s_gradient_data[(tid + 128)*kJacobianSize]);
addVector<kHessianTriagN>(hessian,&s_hessian_data[(tid + 128)*kHessianTriagN]);
chi2_temp += s_chi2[tid + 128];
chi2_nr_meas += s_chi2_nr_meas[tid + 128];
// Store result to shared memory.
copyVector<kJacobianSize>(&s_gradient_data[gradient_index],gradient);
copyVector<kHessianTriagN>(&s_hessian_data[hessian_index],hessian);
s_chi2[tid] = chi2_temp;
s_chi2_nr_meas[tid] = chi2_nr_meas;
}
__syncthreads();
if ((_block_size >= 128) && (tid < 64))
{
// add to local variable
addVector<kJacobianSize>(gradient,&s_gradient_data[(tid + 64)*kJacobianSize]);
addVector<kHessianTriagN>(hessian,&s_hessian_data[(tid + 64)*kHessianTriagN]);
chi2_temp += s_chi2[tid + 64];
chi2_nr_meas += s_chi2_nr_meas[tid + 64];
// Store result to shared memory.
copyVector<kJacobianSize>(&s_gradient_data[gradient_index],gradient);
copyVector<kHessianTriagN>(&s_hessian_data[hessian_index],hessian);
s_chi2[tid] = chi2_temp;
s_chi2_nr_meas[tid] = chi2_nr_meas;
}
__syncthreads();
// TODO: __shfl_down can be used for reduction when only a single warp (32 threads) is left.
// #if (__CUDA_ARCH__ >= 300 )
// Do reduction with __shfl_down ...
// #else
// Fully unroll reduction within a single warp. Theoretically __syncthreads() is not necessary anymore
// as all threads are in the same warp. But with __syncthreads() the performance seems to be slightly increased.
// The reason for this is not yet clear.
if ((_block_size >= 64) && (tid < 32))
{
// Add to local variable.
addVector<kJacobianSize>(gradient,&s_gradient_data[(tid + 32)*kJacobianSize]);
addVector<kHessianTriagN>(hessian,&s_hessian_data[(tid + 32)*kHessianTriagN]);
chi2_temp += s_chi2[tid + 32];
chi2_nr_meas += s_chi2_nr_meas[tid + 32];
// Store result to shared memory.
copyVector<kJacobianSize>(&s_gradient_data[gradient_index],gradient);
copyVector<kHessianTriagN>(&s_hessian_data[hessian_index],hessian);
s_chi2[tid] = chi2_temp;
s_chi2_nr_meas[tid] = chi2_nr_meas;
}
__syncthreads();
if ((_block_size >= 32) && (tid < 16))
{
// Add to local variable.
addVector<kJacobianSize>(gradient,&s_gradient_data[(tid + 16)*kJacobianSize]);
addVector<kHessianTriagN>(hessian,&s_hessian_data[(tid + 16)*kHessianTriagN]);
chi2_temp += s_chi2[tid + 16];
chi2_nr_meas += s_chi2_nr_meas[tid + 16];
// Store result to shared memory.
copyVector<kJacobianSize>(&s_gradient_data[gradient_index],gradient);
copyVector<kHessianTriagN>(&s_hessian_data[hessian_index],hessian);
s_chi2[tid] = chi2_temp;
s_chi2_nr_meas[tid] = chi2_nr_meas;
}
__syncthreads();
if ((_block_size >= 16) && (tid < 8))
{
// Add to local variable.
addVector<kJacobianSize>(gradient,&s_gradient_data[(tid + 8)*kJacobianSize]);
addVector<kHessianTriagN>(hessian,&s_hessian_data[(tid + 8)*kHessianTriagN]);
chi2_temp += s_chi2[tid + 8];
chi2_nr_meas += s_chi2_nr_meas[tid + 8];
// Store result to shared memory.
copyVector<kJacobianSize>(&s_gradient_data[gradient_index],gradient);
copyVector<kHessianTriagN>(&s_hessian_data[hessian_index],hessian);
s_chi2[tid] = chi2_temp;
s_chi2_nr_meas[tid] = chi2_nr_meas;
}
__syncthreads();
if ((_block_size >= 8) && (tid < 4))
{
// Add to local variable.
addVector<kJacobianSize>(gradient,&s_gradient_data[(tid + 4)*kJacobianSize]);
addVector<kHessianTriagN>(hessian,&s_hessian_data[(tid + 4)*kHessianTriagN]);
chi2_temp += s_chi2[tid + 4];
chi2_nr_meas += s_chi2_nr_meas[tid + 4];
// Store result to shared memory.
copyVector<kJacobianSize>(&s_gradient_data[gradient_index],gradient);
copyVector<kHessianTriagN>(&s_hessian_data[hessian_index],hessian);
s_chi2[tid] = chi2_temp;
s_chi2_nr_meas[tid] = chi2_nr_meas;
}
__syncthreads();
if ((_block_size >= 4) && (tid < 2))
{
// Add to local variable.
addVector<kJacobianSize>(gradient,&s_gradient_data[(tid + 2)*kJacobianSize]);
addVector<kHessianTriagN>(hessian,&s_hessian_data[(tid + 2)*kHessianTriagN]);
chi2_temp += s_chi2[tid + 2];
chi2_nr_meas += s_chi2_nr_meas[tid + 2];
// Store result to shared memory.
copyVector<kJacobianSize>(&s_gradient_data[gradient_index],gradient);
copyVector<kHessianTriagN>(&s_hessian_data[hessian_index],hessian);
s_chi2[tid] = chi2_temp;
s_chi2_nr_meas[tid] = chi2_nr_meas;
}
__syncthreads();
if ((_block_size >= 2) && ( tid < 1))
{
// Add to local variable.
addVector<kJacobianSize>(gradient,&s_gradient_data[(tid + 1)*kJacobianSize]);
addVector<kHessianTriagN>(hessian,&s_hessian_data[(tid + 1)*kHessianTriagN]);
chi2_temp += s_chi2[tid + 1];
chi2_nr_meas += s_chi2_nr_meas[tid + 1];
}
__syncthreads();
// Write result for this block to global memory.
if (tid == 0)
{
copyVector<kJacobianSize>(&gradient_cache[blockIdx.x*kJacobianSize],gradient);
copyVector<kHessianTriagN>(&hessian_cache[blockIdx.x*kHessianTriagN],hessian);
chi2[blockIdx.x] = chi2_temp;
nr_meas[blockIdx.x] = chi2_nr_meas;
}
}
void reduceHessianGradient(const size_t size,
const size_t patch_area,
const int threads,
const int blocks,
const FloatTypeGpu* __restrict__ jacobian_input_device,
const BoolTypeGpu* __restrict__ visibility_input_device,
const FloatTypeGpu* __restrict__ residual_input_device,
FloatTypeGpu* __restrict__ gradient_output,
FloatTypeGpu* __restrict__ hessian_output,
UIntTypeGpu* __restrict__ nrMeas,
FloatTypeGpu* __restrict__ chi2)
{
dim3 dimBlock(threads, 1, 1);
dim3 dimGrid(blocks, 1, 1);
if (isPow2(size))
{
switch (threads)
{
case 512:
SVO_ERROR_STREAM(" 512 threads exceed the 48kB of available shared memory per block!");
// k_jacobianReduceHessianGradient<512, true><<< dimGrid, dimBlock >>>(jacobian_input_device,
// residual_input_device,
// visibility_input_device,
// gradient_output,
// hessian_output,
// size, patch_area);
break;
case 256:
hipLaunchKernelGGL(( k_reduceHessianGradient<256, true>), dim3(dimGrid), dim3(dimBlock) , 0, 0, jacobian_input_device,
residual_input_device,
visibility_input_device,
gradient_output,
hessian_output,
nrMeas,
chi2,
size,
patch_area);
break;
case 128:
hipLaunchKernelGGL(( k_reduceHessianGradient<128, true>), dim3(dimGrid), dim3(dimBlock) , 0, 0, jacobian_input_device,
residual_input_device,
visibility_input_device,
gradient_output,
hessian_output,
nrMeas,
chi2,
size,
patch_area);
break;
case 64:
hipLaunchKernelGGL(( k_reduceHessianGradient<64, true>), dim3(dimGrid), dim3(dimBlock) , 0, 0, jacobian_input_device,
residual_input_device,
visibility_input_device,
gradient_output,
hessian_output,
nrMeas,
chi2,
size,
patch_area);
break;
case 32:
hipLaunchKernelGGL(( k_reduceHessianGradient<32, true>), dim3(dimGrid), dim3(dimBlock) , 0, 0, jacobian_input_device,
residual_input_device,
visibility_input_device,
gradient_output,
hessian_output,
nrMeas,
chi2,
size,
patch_area);
break;
case 16:
hipLaunchKernelGGL(( k_reduceHessianGradient<16, true>), dim3(dimGrid), dim3(dimBlock) , 0, 0, jacobian_input_device,
residual_input_device,
visibility_input_device,
gradient_output,
hessian_output,
nrMeas,
chi2,
size,
patch_area);
break;
case 8:
hipLaunchKernelGGL(( k_reduceHessianGradient<8, true>), dim3(dimGrid), dim3(dimBlock) , 0, 0, jacobian_input_device,
residual_input_device,
visibility_input_device,
gradient_output,
hessian_output,
nrMeas,
chi2,
size,
patch_area);
break;
case 4:
hipLaunchKernelGGL(( k_reduceHessianGradient<4, true>), dim3(dimGrid), dim3(dimBlock) , 0, 0, jacobian_input_device,
residual_input_device,
visibility_input_device,
gradient_output,
hessian_output,
nrMeas,
chi2,
size,
patch_area);
break;
case 2:
hipLaunchKernelGGL(( k_reduceHessianGradient<2, true>), dim3(dimGrid), dim3(dimBlock) , 0, 0, jacobian_input_device,
residual_input_device,
visibility_input_device,
gradient_output,
hessian_output,
nrMeas,
chi2,
size,
patch_area);
break;
case 1:
hipLaunchKernelGGL(( k_reduceHessianGradient<1, true>), dim3(dimGrid), dim3(dimBlock) , 0, 0, jacobian_input_device,
residual_input_device,
visibility_input_device,
gradient_output,
hessian_output,
nrMeas,
chi2,
size,
patch_area);
break;
default:
SVO_ERROR_STREAM("The block size must be a power of 2 for the reduction step! Block size is " << threads << ".");
break;
}
}
else
{
switch (threads)
{
case 512:
SVO_ERROR_STREAM(" 512 threads exceed the 48kB of available shared memory per block!");
// hipLaunchKernelGGL(( k_reduceHessianGradient<256, false>), dim3(dimGrid), dim3(dimBlock) , 0, 0, jacobian_input_device,
// residual_input_device,
// visibility_input_device,
// gradient_output,
// hessian_output,
// nrMeas,
// chi2,
// size,patch_area);
break;
case 256:
hipLaunchKernelGGL(( k_reduceHessianGradient<256, false>), dim3(dimGrid), dim3(dimBlock) , 0, 0, jacobian_input_device,
residual_input_device,
visibility_input_device,
gradient_output,
hessian_output,
nrMeas,
chi2,
size,
patch_area);
break;
case 128:
hipLaunchKernelGGL(( k_reduceHessianGradient<128, false>), dim3(dimGrid), dim3(dimBlock) , 0, 0, jacobian_input_device,
residual_input_device,
visibility_input_device,
gradient_output,
hessian_output,
nrMeas,
chi2,
size,
patch_area);
break;
case 64:
hipLaunchKernelGGL(( k_reduceHessianGradient<64, false>), dim3(dimGrid), dim3(dimBlock) , 0, 0, jacobian_input_device,
residual_input_device,
visibility_input_device,
gradient_output,
hessian_output,
nrMeas,
chi2,
size,
patch_area);
break;
case 32:
hipLaunchKernelGGL(( k_reduceHessianGradient<32, false>), dim3(dimGrid), dim3(dimBlock) , 0, 0, jacobian_input_device,
residual_input_device,
visibility_input_device,
gradient_output,
hessian_output,
nrMeas,
chi2,
size,
patch_area);
break;
case 16:
hipLaunchKernelGGL(( k_reduceHessianGradient<16, false>), dim3(dimGrid), dim3(dimBlock) , 0, 0, jacobian_input_device,
residual_input_device,
visibility_input_device,
gradient_output,
hessian_output,
nrMeas,
chi2,
size,
patch_area);
break;
case 8:
hipLaunchKernelGGL(( k_reduceHessianGradient<8, false>), dim3(dimGrid), dim3(dimBlock) , 0, 0, jacobian_input_device,
residual_input_device,
visibility_input_device,
gradient_output,
hessian_output,
nrMeas,
chi2,
size,
patch_area);
break;
case 4:
hipLaunchKernelGGL(( k_reduceHessianGradient<4, false>), dim3(dimGrid), dim3(dimBlock) , 0, 0, jacobian_input_device,
residual_input_device,
visibility_input_device,
gradient_output,
hessian_output,
nrMeas,
chi2,
size,
patch_area);
break;
case 2:
hipLaunchKernelGGL(( k_reduceHessianGradient<2, false>), dim3(dimGrid), dim3(dimBlock) , 0, 0, jacobian_input_device,
residual_input_device,
visibility_input_device,
gradient_output,
hessian_output,
nrMeas,
chi2,
size,
patch_area);
break;
case 1:
hipLaunchKernelGGL(( k_reduceHessianGradient<1, false>), dim3(dimGrid), dim3(dimBlock) , 0, 0, jacobian_input_device,
residual_input_device,
visibility_input_device,
gradient_output,
hessian_output,
nrMeas,
chi2,
size,
patch_area);
break;
default:
SVO_ERROR_STREAM("The block size must be a power of 2 for the reduction step! Block size is " << threads << ".");
break;
}
}
}
void getNumBlocksAndThreads(const size_t nr_elements,
const int max_grid_size_device,
const int max_block_size_device,
const int max_blocks ,
const int max_threads,
const int elements_per_thread,
int &blocks, int &threads)
{
threads = (nr_elements < static_cast<size_t>(max_threads)*2) ? nextPow2((nr_elements + 1)/ 2) : max_threads;
blocks = (nr_elements + (threads * elements_per_thread - 1)) / (threads * elements_per_thread);
if ((float)threads*blocks > (float)max_grid_size_device * max_block_size_device)
{
throw std::runtime_error("Desired number of threads is too large.");
}
if(blocks > max_blocks)
{
blocks = max_blocks;
}
//TODO: comment this block if max_block is for sure < max_grid_size_device
if (blocks > max_grid_size_device)
{
std::cout << "Desired number of blocks is bigger then the maximum grid size of the target device."
<< std::endl;
blocks /= 2;
threads *= 2;
}
}
void computeNumBlocksAndThreadsReduction(const size_t nr_features,
const size_t patch_area,
const GPUProperties& gpu_props,
int &num_blocks, int &num_threads)
{
const int max_threads = 256;
const int max_blocks = 64;
const size_t nr_elements = nr_features*patch_area;
// To reduce data of size N, log(N) elements should be reduced per thread for best performance.
// (c.f. cuda reduction example)
const int nr_elements_per_thread = ::max(
static_cast<int>(::floor(log2 (static_cast<double>(nr_elements)))),2);
getNumBlocksAndThreads(nr_elements, gpu_props.maxGridSizeX(), gpu_props.maxThreadsPerBlock(),
max_blocks, max_threads, nr_elements_per_thread, num_blocks, num_threads);
}
void reduceHessianGradientCPU(const int num_blocks,
const LinearMemoryFloat& __restrict__ gradient_input_host,
const LinearMemoryFloat& __restrict__ hessian_input_host,
const LinearMemoryUInt& __restrict__ nMeas_input_host,
const LinearMemoryFloat& __restrict__ chi2_input_host,
FloatTypeGpu gradient_out[],
FloatTypeGpu hessian_out[],
FloatTypeGpu& chi2)
{
memset(hessian_out,0,SparseImgAlignBase::kHessianTriagN*sizeof(FloatTypeGpu));
memset(gradient_out,0,SparseImgAlignBase::kJacobianSize*sizeof(FloatTypeGpu));
chi2 = 0;
unsigned int n_meas = 0;
#pragma unroll 5
for(unsigned int block = 0; block< static_cast<unsigned int>(num_blocks); ++block)
{
#pragma unroll
for(unsigned int i = 0; i < SparseImgAlignBase::kHessianTriagN; ++i)
{
hessian_out[i] += hessian_input_host[block*SparseImgAlignBase::kHessianTriagN + i];
}
#pragma unroll
for(unsigned int i = 0; i < SparseImgAlignBase::kJacobianSize; ++i)
{
gradient_out[i] += gradient_input_host[block*SparseImgAlignBase::kJacobianSize + i];
}
n_meas += nMeas_input_host[block];
chi2 += chi2_input_host[block];
}
chi2 = chi2/n_meas;
}
FloatTypeGpu computeHessianAndGradient(SparseImgAlignBase::HessianMatrix* H,
SparseImgAlignBase::GradientVector* g,
const size_t nr_elements,
const size_t patch_area,
GpuCacheHandler& gpu_cache,
const int num_blocks, const int num_threads)
{
FloatTypeGpu hessian_triag[SparseImgAlignBase::kHessianTriagN];
FloatTypeGpu gradient[SparseImgAlignBase::kJacobianSize];
reduceHessianGradient(nr_elements, patch_area, num_threads, num_blocks,
gpu_cache.jacobian().cuData(),
gpu_cache.visibility().cuData(),
gpu_cache.residual().cuData(),
gpu_cache.gradientDevice().cuData(),
gpu_cache.hessianDevice().cuData(),
gpu_cache.nrVisibleDevice().cuData(),
gpu_cache.chi2Device().cuData());
hipDeviceSynchronize();
// Sum the results of each block on CPU.
FloatTypeGpu chi2;
gpu_cache.copyReductionCacheDeviceToHost();
reduceHessianGradientCPU(num_blocks,
gpu_cache.gradientHost(),
gpu_cache.hessianHost(),
gpu_cache.nrVisibleHost(),
gpu_cache.chi2Host(),
gradient,
hessian_triag,chi2);
// Copy result to H and g.
#pragma unroll
for(unsigned int row = 0, index = 0; row < SparseImgAlignBase::kJacobianSize; ++row)
{
#pragma unroll
for(unsigned int col = row; col < SparseImgAlignBase::kJacobianSize; ++col,++index)
{
(*H)(row,col) = (*H)(col,row) = hessian_triag[index];
}
}
#pragma unroll
for(unsigned int index = 0; index < SparseImgAlignBase::kJacobianSize; ++index)
{
(*g)(index,0) = gradient[index];
}
return chi2;
}
__global__ void k_disparities(const imp::cu::Matrix<FloatTypeGpu,3,4> T_cur_ref,
const imp::cu::PinholeCamera cam,
const Float3TypeGpu* __restrict__ xyz_ref,
const BoolTypeGpu* __restrict__ visibility_cache,
const Float2TypeGpu* __restrict__ uv,
const unsigned int nrFeatures,
const FloatTypeGpu not_visible_value,
FloatTypeGpu* __restrict__ disparity)
{
const unsigned int i = blockIdx.x*blockDim.x+threadIdx.x;
if(i < nrFeatures)
{
// Check if projection is within the image.
if(visibility_cache[i] == 1)
{
Float2TypeGpu uv_cur = static_cast<Float2TypeGpu>(
cam.world2cam(static_cast<Float3TypeGpu>(transform(T_cur_ref,xyz_ref[i]))));
Float2TypeGpu disparity_vec = make_float2(uv[i].x - uv_cur.x, uv[i].y - uv_cur.y);
disparity[i] = sqrt(disparity_vec.x*disparity_vec.x + disparity_vec.y*disparity_vec.y);
}
else
{
disparity[i] = not_visible_value;
}
}
}
template<class T>
size_t copyArrayNonNegative(T* __restrict__ dest, T* __restrict__ src,size_t number_elements)
{
size_t number_elements_copied = 0;
for(size_t i = 0; i < number_elements; ++i)
{
if(src[i] > 0)
{
dest[number_elements_copied++] = src[i];
}
}
return number_elements_copied;
}
FloatTypeGpu computeDisparity(
const std::vector<std::vector<imp::cu::ImageGpu8uC1::Ptr>>& cur_pyramid,
const std::vector<imp::cu::Matrix<FloatTypeGpu,3,4>>& cu_T_cur_ref_bundle,
const std::vector<imp::cu::PinholeCamera::Ptr>& cu_camera_bundle,
const std::vector<size_t>& first_ftr_index,
const std::vector<size_t>& nbr_of_ftrs,
const size_t total_number_of_features,
GpuCacheHandler& gpu_cache)
{
for(int i = 0; i < static_cast<int>(cur_pyramid.size());++i)
{
imp::cu::Fragmentation<32,1> frag(nbr_of_ftrs.at(i));
hipLaunchKernelGGL(( k_disparities)
,
dim3(frag.dimGrid),dim3(frag.dimBlock)
, 0, 0, cu_T_cur_ref_bundle.at(i),
*cu_camera_bundle.at(i),
&gpu_cache.xyzRef().cuData()[first_ftr_index.at(i)],
&gpu_cache.visibility().cuData()[first_ftr_index.at(i)],
&gpu_cache.uv().cuData()[first_ftr_index.at(i)],
nbr_of_ftrs.at(i),
FLT_MAX,
&gpu_cache.disparity().cuData()[first_ftr_index.at(i)]);
}
hipDeviceSynchronize();
// Transfer disparities from GPU to CPU
FloatTypeGpu* disparity_pointer;
disparity_pointer = (FloatTypeGpu*) malloc(total_number_of_features*sizeof(FloatTypeGpu));
hipMemcpy(disparity_pointer,gpu_cache.disparity().cuData(),total_number_of_features*sizeof(FloatTypeGpu),hipMemcpyDeviceToHost);
/// If the "not_visible_value" is set to FLT_MAX, the median value is allways overestimating
/// the true median value. We could also remove the values that are not reprojected by setting
/// "not_visible_value" in k_disparities to -1 and than run the following operations
// {
// FloatTypeGpu* disparity_pointer_no_neg;
// disparity_pointer_no_neg = (FloatTypeGpu*) malloc(total_number_of_features*sizeof(FloatTypeGpu));
// size_t successfull_reprojections = copyArrayNonNegative<FloatTypeGpu>(disparity_pointer_no_neg,disparity_pointer,total_number_of_features);
// FloatTypeGpu* disparity_end = &disparity_pointer_no_neg[successfull_reprojections - 1];
// FloatTypeGpu* middle_ptr = &disparity_pointer_no_neg[successfull_reprojections/2];
// std::nth_element(disparity_pointer_no_neg, middle_ptr, disparity_end);
// std::cout << "Median with removal = " << *middle_ptr << std::endl;
// std::cout << "Total " << total_number_of_features << std::endl;
// std::cout << "successfull " << successfull_reprojections << std::endl;
// free(disparity_pointer_no_neg);
// }
FloatTypeGpu* disparity_end = &disparity_pointer[total_number_of_features - 1];
FloatTypeGpu* middle_ptr = &disparity_pointer[total_number_of_features/2];
std::nth_element(disparity_pointer, middle_ptr, disparity_end);
free(disparity_pointer);
return *middle_ptr;
}
} // namespace sparse_img_align_device_utils
} // namespace svo
| 023c84d7c47a1d0e353c4d918b386cf9f6adc860.cu | // This file is part of SVO - Semi-direct Visual Odometry.
//
// Copyright (C) 2014 Christian Forster <forster at ifi dot uzh dot ch>
// (Robotics and Perception Group, University of Zurich, Switzerland).
//
// This file is subject to the terms and conditions defined in the file
// 'LICENSE', which is part of this source code package.
#include <svo/img_align/sparse_img_align_device_utils.cuh>
#include <imp/cu_core/cu_texture.cuh>
#include <imp/cu_core/cu_utils.hpp>
#include <svo/common/logging.h>
#include <svo/img_align/sparse_img_align_base.h>
namespace svo {
GpuCacheHandler::GpuCacheHandler():
patch_area_(0), feature_capacity_(0), reduction_cache_capacity_(0)
{ }
GpuCacheHandler::GpuCacheHandler(const size_t patch_area):
patch_area_(patch_area), feature_capacity_(0), reduction_cache_capacity_(0)
{ }
void GpuCacheHandler::setPatchArea(const size_t patch_area)
{
patch_area_ = patch_area;
}
void GpuCacheHandler::reserveFeatureCapacity(const size_t capacity)
{
CHECK_GT(patch_area_,0);
// Check if enough or to much memory is allocated.
if( (capacity > feature_capacity_) || (feature_capacity_ - capacity > kMaxStorageSurplus))
{
SVO_WARN_STREAM("Reallocate GPU memory. Changing capacity from " << feature_capacity_ << " to "
<< capacity << " features.");
uv_cache_.reset(new UvCache(capacity));
xyz_ref_cache_.reset(new XyzRefCache(capacity));
jacobian_proj_cache_.reset(new JacobianProjCache(capacity*kJacProjStride));
jacobian_cache_.reset(new JacobianCache(capacity*kJacStride*patch_area_));
residual_cache_.reset(new ResidualCache(capacity*patch_area_));
visibility_mask_.reset(new VisibilityMask(capacity));
ref_patch_cache_.reset(new RefPatchCache(capacity*patch_area_));
disparity_cache_.reset(new DistparitiyCache(capacity));
feature_capacity_ = capacity;
}
}
void GpuCacheHandler::reserveReductionCacheCapacity(const size_t capacity)
{
// Check if enough or to much memory is allocated.
if( (capacity > reduction_cache_capacity_) || (reduction_cache_capacity_ - capacity > kMaxStorageSurplus))
{
SVO_WARN_STREAM("Reallocate memory for reduction step from " << reduction_cache_capacity_ << " to "
<< capacity << " blocks.");
hessian_reduction_cache_.reset(new HessianReductionCache(capacity*kHessianTriagStride));
gradient_reduction_cache_.reset(new GradientReductionCache(capacity*kJacStride));
chi2_reduction_cache_.reset(new GradientReductionCache(capacity));
nr_visible_cache_.reset(new NrVisibleCache(capacity));
hessian_reduction_cache_host_.reset(new HessianReductionCacheHost(capacity*kHessianTriagStride));
gradient_reduction_cache_host_.reset(new GradientReductionCacheHost(capacity*kJacStride));
chi2_reduction_cache_host_.reset(new GradientReductionCacheHost(capacity));
nr_visible_cache_host_.reset(new NrVisibleCacheHost(capacity));
reduction_cache_capacity_ = capacity;
}
else
{
// Set region of interest to the correct value to make
// copying from device to host possible.
if(nr_visible_cache_host_->roi().length() != capacity)
{
SVO_DEBUG_STREAM("Change region of interest of linear memory (before "
<< nr_visible_cache_host_->roi().length() << ", after "
<< capacity << " elements)");
hessian_reduction_cache_->setRoi(imp::Roi1u(0,capacity*kHessianTriagStride));
gradient_reduction_cache_->setRoi(imp::Roi1u(0,capacity*kJacStride));
chi2_reduction_cache_->setRoi(imp::Roi1u(0,capacity));
nr_visible_cache_->setRoi(imp::Roi1u(0,capacity));
hessian_reduction_cache_host_->setRoi(imp::Roi1u(0,capacity*kHessianTriagStride));
gradient_reduction_cache_host_->setRoi(imp::Roi1u(0,capacity*kJacStride));
chi2_reduction_cache_host_->setRoi(imp::Roi1u(0,capacity));
nr_visible_cache_host_->setRoi(imp::Roi1u(0,capacity));
}
}
}
inline void GpuCacheHandler::copyReductionCacheDeviceToHost()
{
hessian_reduction_cache_->copyTo(*hessian_reduction_cache_host_);
gradient_reduction_cache_->copyTo(*gradient_reduction_cache_host_);
chi2_reduction_cache_->copyTo(*chi2_reduction_cache_host_);
nr_visible_cache_->copyTo(*nr_visible_cache_host_);
}
namespace sparse_img_align_device_utils
{
__host__ __device__ __forceinline__
void setGx(imp::cu::Matrix<FloatTypeGpu,3,6>& __restrict__ g_x,
const Float3TypeGpu& __restrict__ p_in_imu)
{
g_x(0,0) = 1.0;
g_x(0,1) = 0.0;
g_x(0,2) = 0.0;
g_x(0,3) = 0.0;
g_x(0,4) = p_in_imu.z;
g_x(0,5) = -p_in_imu.y;
g_x(1,0) = 0.0;
g_x(1,1) = 1.0;
g_x(1,2) = 0.0;
g_x(1,3) = -p_in_imu.z;
g_x(1,4) = 0.0;
g_x(1,5) = p_in_imu.x;
g_x(2,0) = 0.0;
g_x(2,1) = 0.0;
g_x(2,2) = 1.0;
g_x(2,3) = p_in_imu.y;
g_x(2,4) = -p_in_imu.x;
g_x(2,5) = 0.0;
}
//Todo: This function should be a member function of the CPU camera
__host__ __device__ __forceinline__
void setPinholeJacobian(imp::cu::Matrix<FloatTypeGpu,2,3>& __restrict__ jac_cam,
const Float3TypeGpu& __restrict__ p_in_cam,
const FloatTypeGpu& __restrict__ focal_length)
{
FloatTypeGpu ratio_p_x_z_cam = p_in_cam.x/p_in_cam.z;
FloatTypeGpu ratio_p_y_z_cam = p_in_cam.y/p_in_cam.z;
FloatTypeGpu ratio_fl_p_z_cam = focal_length/p_in_cam.z;
jac_cam(0,0) = ratio_fl_p_z_cam;
jac_cam(0,1) = 0.0;
jac_cam(0,2) = -ratio_fl_p_z_cam*ratio_p_x_z_cam;
jac_cam(1,0) = 0.0;
jac_cam(1,1) = ratio_fl_p_z_cam;
jac_cam(1,2) = -ratio_fl_p_z_cam*ratio_p_y_z_cam;
}
__global__ void k_baseCachesGeneric(const imp::cu::Matrix<FloatTypeGpu,3,4> T_imu_cam,
const imp::cu::Matrix<FloatTypeGpu,3,3> R_imu_cam,
const FloatTypeGpu focal_length,
const Float3TypeGpu* __restrict__ p_in_cam,
FloatTypeGpu* __restrict__ jac_proj_cache,
const unsigned int nr_features)
{
const unsigned int i = blockIdx.x*blockDim.x+threadIdx.x;
if(i < nr_features)
{
const Float3TypeGpu p_in_imu = transform(T_imu_cam,p_in_cam[i]);
imp::cu::Matrix<FloatTypeGpu,3,6> g_x;
setGx(g_x,p_in_imu);
imp::cu::Matrix<FloatTypeGpu,2,3> jac_cam;
setPinholeJacobian(jac_cam,p_in_cam[i],focal_length);
imp::cu::Matrix<FloatTypeGpu,2,6> jac_proj = ((jac_cam*R_imu_cam)*g_x);
// wite to buffer
int offset = 2*6*i;
#pragma unroll
for(int row = 0; row < 2;++row)
{
#pragma unroll
for(int col = 0; col < 6; ++col)
{
// times (-1) because of our definition of the photometric error
jac_proj_cache[offset + col] = -1.0f*jac_proj(row,col);
}
offset +=6;
}
}
}
__global__ void k_baseCachesPinhole(const imp::cu::Matrix<FloatTypeGpu,3,4> T_imu_cam,
const imp::cu::Matrix<FloatTypeGpu,3,3> R_cam_imu,
const FloatTypeGpu focal_length,
const Float3TypeGpu* __restrict__ p_in_cam,
FloatTypeGpu* __restrict__ jac_proj_cache,
const unsigned int nr_features)
{
const unsigned int i = blockIdx.x*blockDim.x+threadIdx.x;
if(i < nr_features)
{
Float3TypeGpu p_in_imu = transform(T_imu_cam,p_in_cam[i]);
FloatTypeGpu ratio_p_x_z_cam = p_in_cam[i].x/p_in_cam[i].z;
FloatTypeGpu ratio_p_y_z_cam = p_in_cam[i].y/p_in_cam[i].z;
// times (-1) because of our definition of the photometric error
FloatTypeGpu ratio_fl_p_z_cam = (-1.0)*focal_length/p_in_cam[i].z;
FloatTypeGpu r00 = ratio_fl_p_z_cam*(R_cam_imu(0,0) - R_cam_imu(2,0)*ratio_p_x_z_cam);
FloatTypeGpu r01 = ratio_fl_p_z_cam*(R_cam_imu(0,1) - R_cam_imu(2,1)*ratio_p_x_z_cam);
FloatTypeGpu r02 = ratio_fl_p_z_cam*(R_cam_imu(0,2) - R_cam_imu(2,2)*ratio_p_x_z_cam);
FloatTypeGpu r10 = ratio_fl_p_z_cam*(R_cam_imu(1,0) - R_cam_imu(2,0)*ratio_p_y_z_cam);
FloatTypeGpu r11 = ratio_fl_p_z_cam*(R_cam_imu(1,1) - R_cam_imu(2,1)*ratio_p_y_z_cam);
FloatTypeGpu r12 = ratio_fl_p_z_cam*(R_cam_imu(1,2) - R_cam_imu(2,2)*ratio_p_y_z_cam);
const int offset = 2*6*i;
jac_proj_cache[offset] = r00;
jac_proj_cache[offset + 1] = r01;
jac_proj_cache[offset + 2] = r02;
jac_proj_cache[offset + 3] = -p_in_imu.z*r01 + p_in_imu.y*r02;
jac_proj_cache[offset + 4] = p_in_imu.z*r00 - p_in_imu.x*r02;
jac_proj_cache[offset + 5] = -p_in_imu.y*r00 + p_in_imu.x*r01;
jac_proj_cache[offset + 6] = r10;
jac_proj_cache[offset + 7] = r11;
jac_proj_cache[offset + 8] = r12;
jac_proj_cache[offset + 9] = -p_in_imu.z*r11 + p_in_imu.y*r12;
jac_proj_cache[offset + 10] = p_in_imu.z*r10 - p_in_imu.x*r12;
jac_proj_cache[offset + 11] = -p_in_imu.y*r10 + p_in_imu.x*r11;
}
}
void precomputeBaseCaches(std::vector<Float2TypeGpu>& uv_cache,
std::vector<Float3TypeGpu>& xyz_ref_cache,
const std::vector<size_t>& first_ftr_index,
const std::vector<size_t>& nbr_of_ftrs,
const std::vector<imp::cu::Matrix<FloatTypeGpu,3,4>::Ptr>& cu_T_imu_cam_bundle,
const std::vector<imp::cu::Matrix<FloatTypeGpu,3,4>::Ptr>& cu_T_cam_imu_bundle,
const std::vector<imp::cu::PinholeCamera::Ptr>& cu_camera_bundle,
const size_t& nbr_fts_to_track,
GpuCacheHandler& gpu_cache)
{
// Prepare the GPU buffers.
gpu_cache.reserveFeatureCapacity(nbr_fts_to_track);
// Transfer data from CPU to GPU.
LinearMemoryFloat2 uv_linear(reinterpret_cast<Float2PixelGpu*>(uv_cache.data()),uv_cache.size(),true);
LinearMemoryFloat3 xyz_linear(reinterpret_cast<Float3PixelGpu*>(xyz_ref_cache.data()),xyz_ref_cache.size(),true);
gpu_cache.uv().setRoi(uv_linear.roi());
gpu_cache.uv().copyFrom(uv_linear);
gpu_cache.xyzRef().setRoi(xyz_linear.roi());
gpu_cache.xyzRef().copyFrom(xyz_linear);
// Fill base caches.
for(int i = 0; i< static_cast<int>(cu_camera_bundle.size()); ++i)
{
imp::cu::Fragmentation<32,1> frag(nbr_of_ftrs.at(i));
k_baseCachesPinhole
<<<
frag.dimGrid,frag.dimBlock
>>>(*cu_T_imu_cam_bundle.at(i),
cu_T_cam_imu_bundle.at(i)->block<3,3>(0,0),
cu_camera_bundle.at(i)->fx(),
&gpu_cache.xyzRef().cuData()[first_ftr_index.at(i)],
&gpu_cache.jacProj().cuData()[first_ftr_index.at(i)*GpuCacheHandler::kJacProjStride],
nbr_of_ftrs.at(i));
}
cudaDeviceSynchronize();
}
__global__ void k_jacobianAndRefPatches(imp::cu::Texture2D ref_tex,
const Float2TypeGpu* __restrict__ uv,
const FloatTypeGpu* __restrict__ jac_proj_cache,
const int patch_size,
const int level,
const unsigned int nrFeatures,
FloatTypeGpu* __restrict__ jacobian_cache,
FloatTypeGpu* __restrict__ ref_patch_cache)
{
const unsigned int i = blockIdx.x*blockDim.x+threadIdx.x;
if(i < nrFeatures)
{
const FloatTypeGpu scale = 1.0f/(1<<level);
const FloatTypeGpu patch_area = patch_size*patch_size;
const FloatTypeGpu upper_left_coord_x = uv[i].x*scale - (patch_size - 1)/2.0f;
const FloatTypeGpu upper_left_coord_y = uv[i].y*scale - (patch_size - 1)/2.0f;
size_t ref_patch_index_offset = patch_area*i;
size_t jacobian_index_offset = patch_area*GpuCacheHandler::kJacStride*i;
size_t jac_proj_cache_index_offset = GpuCacheHandler::kJacProjStride*i;
#pragma unroll 4
for(int row = 0; row < patch_size; ++row)
{
#pragma unroll 4
for(int col = 0; col < patch_size; ++col, ++ref_patch_index_offset, jacobian_index_offset += 8)
{
FloatTypeGpu center_texel;
imp::cu::tex2DFetch(center_texel, ref_tex,upper_left_coord_x + col, upper_left_coord_y + row);
ref_patch_cache[ref_patch_index_offset] = 255.0f*center_texel;
FloatTypeGpu dx_left,dx_right,dy_up,dy_down;
imp::cu::tex2DFetch(dx_left, ref_tex,upper_left_coord_x + col - 1, upper_left_coord_y + row);
imp::cu::tex2DFetch(dx_right, ref_tex,upper_left_coord_x + col + 1, upper_left_coord_y + row);
imp::cu::tex2DFetch(dy_up, ref_tex,upper_left_coord_x + col, upper_left_coord_y + row - 1);
imp::cu::tex2DFetch(dy_down, ref_tex,upper_left_coord_x + col, upper_left_coord_y + row + 1);
const FloatTypeGpu dx = 0.5f*(dx_right - dx_left)*255.0f;
const FloatTypeGpu dy = 0.5f*(dy_down - dy_up)*255.0f;
#pragma unroll
for(int i = 0; i < 6; ++i)
{
jacobian_cache[jacobian_index_offset + i] = (dx*(jac_proj_cache[jac_proj_cache_index_offset + i])
+ dy*(jac_proj_cache[jac_proj_cache_index_offset + 6 + i]))*scale;
}
//jacobian_cache[jacobian_index_offset + 6] = -255*center_texel;
//jacobian_cache[jacobian_index_offset + 7] = -1;
jacobian_cache[jacobian_index_offset + 6] = 0.0;
jacobian_cache[jacobian_index_offset + 7] = 0.0;
}
}
}
}
// TODO: imp::ImagePyramid version (currently not working)
//void precomputeJacobiansAndRefPatches(
// std::vector<imp::ImagePyramid8uC1::Ptr>& ref_pyramid,
// const int level,
// const int patch_size,
// const bool estimate_alpha,
// const bool estimate_beta,
// const std::vector<size_t>& first_ftr_index,
// const std::vector<size_t>& nbr_of_ftrs,
// GpuCacheHandler& gpu_cache)
//{
// int patch_area = patch_size*patch_size;
// for(int ii = 0; ii < static_cast<int>(pyramid.size());++ii)
// {
// std::shared_ptr<imp::cu::Texture2D> ref_tex =
// std::dynamic_pointer_cast<imp::cu::ImageGpu8uC1>(ref_pyramid.at(ii)->at(level))
// ->genTexture(false,cudaFilterModeLinear,cudaAddressModeBorder,cudaReadModeNormalizedFloat);
// dim3 threads(32);
// dim3 blocks((nbr_of_ftrs.at(ii) + threads.x-1)/threads.x);
// std::cout << " features " << ii << " = " << nbr_of_ftrs.at(ii) << std::endl;
// k_jacobianAndRefPatches<<<blocks,threads>>>(*ref_tex.get(),
// reinterpret_cast<float2*>(&gpu_cache.uv().data()[first_ftr_index.at(ii)]),
// reinterpret_cast<float*>(&gpu_cache.jacProj().data()[first_ftr_index.at(ii)*12]),
// patch_size , level , nbr_of_ftrs.at(ii),
// reinterpret_cast<float*>(&gpu_cache.jacobian().data()[first_ftr_index.at(ii)*8*patch_area]),
// reinterpret_cast<float*>(&gpu_cache.refPatch().data()[first_ftr_index.at(ii)*patch_area]));
// }
// cudaDeviceSynchronize();
//}
void precomputeJacobiansAndRefPatches(
const std::vector<std::vector<imp::cu::ImageGpu8uC1::Ptr> >& ref_pyramid,
const int level,
const int patch_size,
const bool estimate_alpha,
const bool estimate_beta,
const std::vector<size_t>& first_ftr_index,
const std::vector<size_t>& nbr_of_ftrs,
GpuCacheHandler& gpu_cache)
{
int patch_area = patch_size*patch_size;
for(int i = 0; i < static_cast<int>(ref_pyramid.size());++i)
{
std::shared_ptr<imp::cu::Texture2D> ref_tex =
std::dynamic_pointer_cast<imp::cu::ImageGpu8uC1>(ref_pyramid.at(i).at(level))
->genTexture(false,cudaFilterModeLinear,cudaAddressModeBorder,cudaReadModeNormalizedFloat);
imp::cu::Fragmentation<32,1> frag(nbr_of_ftrs.at(i));
k_jacobianAndRefPatches
<<<
frag.dimGrid, frag.dimBlock
>>>(*ref_tex,&gpu_cache.uv().cuData()[first_ftr_index.at(i)],
&gpu_cache.jacProj().cuData()[first_ftr_index.at(i)*GpuCacheHandler::kJacProjStride],
patch_size , level , nbr_of_ftrs.at(i),
&gpu_cache.jacobian().cuData()[first_ftr_index.at(i)*GpuCacheHandler::kJacStride*patch_area],
&gpu_cache.refPatch().cuData()[first_ftr_index.at(i)*patch_area]);
}
cudaDeviceSynchronize();
}
__global__ void k_residuals(const imp::cu::Texture2D cur_tex,
const int width,
const int height,
const imp::cu::Matrix<FloatTypeGpu,3,4> T_cur_ref,
const imp::cu::PinholeCamera cam,
const FloatTypeGpu* __restrict__ ref_patch_cache,
const Float3TypeGpu* __restrict__ xyz_ref,
FloatTypeGpu alpha_illumination,
FloatTypeGpu beta_illumionation,
const int patch_size,
const int level,
const unsigned int nrFeatures,
BoolTypeGpu* __restrict__ visibility_cache,
FloatTypeGpu* __restrict__ residual_cache)
{
const unsigned int i = blockIdx.x*blockDim.x+threadIdx.x;
if(i < nrFeatures)
{
FloatTypeGpu scale = 1.0f/(1<<level);
const int patch_area = patch_size*patch_size;
Float2TypeGpu uv_cur = static_cast<Float2TypeGpu>(
cam.world2cam(static_cast<Float3TypeGpu>(transform(T_cur_ref,xyz_ref[i]))));
const FloatTypeGpu upper_left_coord_x = uv_cur.x*scale - (patch_size - 1)/2.0f;
const FloatTypeGpu upper_left_coord_y = uv_cur.y*scale - (patch_size - 1)/2.0f;
// Check if projection is within the image.
if(upper_left_coord_x < 0.0f || upper_left_coord_y < 0.0f
|| upper_left_coord_x + patch_size >= width - 1 || upper_left_coord_y + patch_size >= height - 1)
{
visibility_cache[i] = 0;
}
else
{
visibility_cache[i] = 1;
int pixel = 0;
#pragma unroll 4
for(int row = 0; row < patch_size; ++row)
{
#pragma unroll 4
for(int col = 0; col < patch_size; ++col,++pixel)
{
FloatTypeGpu cur_Texel;
imp::cu::tex2DFetch(cur_Texel, cur_tex,upper_left_coord_x + col, upper_left_coord_y + row);
residual_cache[i*patch_area + pixel] = static_cast<FloatTypeGpu>(
255.0*cur_Texel*(1.0 + alpha_illumination) + beta_illumionation)
- ref_patch_cache[i*patch_area + pixel];
}
}
}
}
}
void computeResidualsOfFrame(
const std::vector<std::vector<imp::cu::ImageGpu8uC1::Ptr>>& cur_pyramid,
const std::vector<imp::cu::Matrix<FloatTypeGpu,3,4>>& cu_T_cur_ref_bundle,
const std::vector<imp::cu::PinholeCamera::Ptr>& cu_camera_bundle,
const std::vector<size_t>& first_ftr_index,
const std::vector<size_t>& nbr_of_ftrs,
const int level,
const int patch_size,
const bool estimate_alpha,
const bool estimate_beta,
GpuCacheHandler& gpu_cache)
{
const size_t patch_area = patch_size*patch_size;
for(int i = 0; i < static_cast<int>(cur_pyramid.size());++i)
{
std::shared_ptr<imp::cu::Texture2D> cur_tex = std::dynamic_pointer_cast<imp::cu::ImageGpu8uC1>(
cur_pyramid.at(i).at(level))
->genTexture(false,cudaFilterModeLinear,cudaAddressModeBorder,cudaReadModeNormalizedFloat);
imp::cu::Fragmentation<32,1> frag(nbr_of_ftrs.at(i));
k_residuals
<<<
frag.dimGrid,frag.dimBlock
>>>(*cur_tex, cur_pyramid.at(i).at(level)->width(),
cur_pyramid.at(i).at(level)->height(),
cu_T_cur_ref_bundle.at(i),
*cu_camera_bundle.at(i),
&gpu_cache.refPatch().cuData()[first_ftr_index.at(i)*patch_area],
&gpu_cache.xyzRef().cuData()[first_ftr_index.at(i)],
estimate_alpha, estimate_beta, patch_size, level, nbr_of_ftrs.at(i),
&gpu_cache.visibility().cuData()[first_ftr_index.at(i)],
&gpu_cache.residual().cuData()[first_ftr_index.at(i)*patch_area]);
}
cudaDeviceSynchronize();
}
inline unsigned int nextPow2(unsigned int x)
{
--x;
x |= x >> 1;
x |= x >> 2;
x |= x >> 4;
x |= x >> 8;
x |= x >> 16;
return ++x;
}
bool isPow2(unsigned int x)
{
return ((x&(x-1))==0);
}
template <size_t _n_elements>
__host__ __device__ __forceinline__
void setToZero(FloatTypeGpu* mem)
{
#pragma unroll
for(int ind = 0; ind < _n_elements; ++ind)
{
mem[ind] = 0.0;
}
}
template <size_t _matrix_size>
__host__ __device__ __forceinline__
void setVVTUpperTriag(FloatTypeGpu* __restrict__ upper_triag_row_maj,
const FloatTypeGpu* __restrict__ vect,
const FloatTypeGpu& __restrict__ weight = 1.0)
{
int index = 0;
#pragma unroll
for(int row = 0; row < _matrix_size; ++row)
{
#pragma unroll
for(int col = row; col < _matrix_size; ++col,++index)
{
upper_triag_row_maj[index] = weight*vect[row]*vect[col];
}
}
}
template <size_t _matrix_size>
__host__ __device__ __forceinline__
void addVVTUpperTriag(FloatTypeGpu* __restrict__ upper_triag_row_maj,
const FloatTypeGpu* __restrict__ vect,
const FloatTypeGpu& __restrict__ weight = 1.0)
{
int index = 0;
#pragma unroll
for(int row = 0; row < _matrix_size; ++row)
{
#pragma unroll
for(int col = row; col < _matrix_size; ++col,++index)
{
upper_triag_row_maj[index] += weight*vect[row]*vect[col];
}
}
}
template <size_t _vector_size>
__host__ __device__ __forceinline__
void addVector(FloatTypeGpu* __restrict__ sum_vect,
const FloatTypeGpu* __restrict__ addend_vect)
{
#pragma unroll
for(int ind = 0; ind < _vector_size; ++ind)
{
sum_vect[ind] += addend_vect[ind];
}
}
template <size_t _vector_size>
__host__ __device__ __forceinline__
void addWeightedVector(FloatTypeGpu* __restrict__ sum_vect,
const FloatTypeGpu* __restrict__ addend_vect,
const FloatTypeGpu& __restrict__ weight = 1.0)
{
#pragma unroll
for(int ind = 0; ind < _vector_size; ++ind)
{
sum_vect[ind] += weight*addend_vect[ind];
}
}
template <size_t _vector_size>
__host__ __device__ __forceinline__
void subWeightedVector(FloatTypeGpu* __restrict__ sum_vect,
const FloatTypeGpu* __restrict__ addend_vect,
const FloatTypeGpu& __restrict__ weight = 1.0)
{
#pragma unroll
for(int ind = 0; ind < _vector_size; ++ind)
{
sum_vect[ind] -= weight*addend_vect[ind];
}
}
template <size_t _vector_size>
__host__ __device__ __forceinline__
void setWeightedVector(FloatTypeGpu* __restrict__ dest_vect,
const FloatTypeGpu* __restrict__ src_vect,
const FloatTypeGpu& __restrict__ weight = 1.0)
{
#pragma unroll
for(int ind = 0; ind < _vector_size; ++ind)
{
dest_vect[ind] = weight*src_vect[ind];
}
}
template <size_t _vector_size>
__host__ __device__ __forceinline__
void copyVector(FloatTypeGpu* __restrict__ dest_vect,
const FloatTypeGpu* __restrict__ src_vect)
{
#pragma unroll
for(int ind = 0; ind < _vector_size; ++ind)
{
dest_vect[ind] = src_vect[ind];
}
}
// _block_size must be power of 2
template <unsigned int _block_size, bool n_is_pow2>
__global__ void k_reduceHessianGradient(const FloatTypeGpu* __restrict__ jacobian_cache,
const FloatTypeGpu* __restrict__ residual_cache,
const BoolTypeGpu* __restrict__ visibility_cache,
FloatTypeGpu* __restrict__ gradient_cache,
FloatTypeGpu* __restrict__ hessian_cache,
UIntTypeGpu* __restrict__ nr_meas,
FloatTypeGpu* __restrict__ chi2,
const unsigned int n_elements,
const unsigned int patch_area)
{
constexpr unsigned int kHessianTriagN = SparseImgAlignBase::kHessianTriagN;
constexpr unsigned int kJacobianSize = SparseImgAlignBase::kJacobianSize;
__shared__ FloatTypeGpu s_hessian_data[_block_size*kHessianTriagN];
__shared__ FloatTypeGpu s_gradient_data[_block_size*kJacobianSize];
__shared__ FloatTypeGpu s_chi2[_block_size];
__shared__ UIntTypeGpu s_chi2_nr_meas[_block_size];
FloatTypeGpu jacobian[kJacobianSize];
FloatTypeGpu gradient[kJacobianSize];
FloatTypeGpu hessian[kHessianTriagN];
FloatTypeGpu chi2_temp;
UIntTypeGpu chi2_nr_meas = 0;
const unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*_block_size*2 + threadIdx.x;
const unsigned int gridSize = _block_size*2*gridDim.x;
const unsigned int hessian_index = tid*kHessianTriagN;
const unsigned int gradient_index = tid*kJacobianSize;
// We reduce multiple elements per thread. The number is determined by the
// number of active thread blocks (via gridDim). More blocks will result
// in a larger gridSize and therefore fewer elements per thread.
// We reading from global memory and write to shared memory.
// Get first element.
if((!n_is_pow2)&&(i >= n_elements))
{
setToZero<kJacobianSize>(gradient);
setToZero<kHessianTriagN>(hessian);
chi2_temp = 0.0;
}
else
{
const unsigned int visib_index = i/patch_area;
BoolTypeGpu visible = visibility_cache[visib_index];
if(visible == 1)
{
FloatTypeGpu residual = residual_cache[i];
// TODO: add weighting function
FloatTypeGpu weight = 1.0;// weight_function(residual/weight_scale);
copyVector<kJacobianSize>(jacobian,&jacobian_cache[i*kJacobianSize]);
setVVTUpperTriag<kJacobianSize>(hessian,jacobian,weight);
setWeightedVector<kJacobianSize>(gradient,jacobian, -weight*residual);
chi2_temp = residual*residual*weight;
++chi2_nr_meas;
}
else
{
setToZero<kJacobianSize>(gradient);
setToZero<kHessianTriagN>(hessian);
chi2_temp = 0.0;
}
// Get second element.
// Ensure we don't read out of bounds -- this is optimized away for powerOf2 problem size.
if (n_is_pow2 || i + _block_size < n_elements)
{
i += _block_size;
const unsigned int visib_index = i/patch_area;
BoolTypeGpu visible = visibility_cache[visib_index];
if(visible == 1)
{
FloatTypeGpu residual = residual_cache[i];
//TODO: add weighting function
FloatTypeGpu weight = 1.0;//weight_function(residual/weight_scale);
copyVector<kJacobianSize>(jacobian,&jacobian_cache[i*kJacobianSize]);
addVVTUpperTriag<kJacobianSize>(hessian,jacobian,weight);
subWeightedVector<kJacobianSize>(gradient,jacobian, weight*residual);
chi2_temp += residual*residual*weight;
++chi2_nr_meas;
}
}
i += (gridSize - _block_size);
}
// Add further elements if available.
while (i < n_elements)
{
const unsigned int visib_index = i/patch_area;
BoolTypeGpu visible = visibility_cache[visib_index];
if(visible == 1)
{
FloatTypeGpu residual = residual_cache[i];
//TODO: add weighting function
FloatTypeGpu weight = 1.0;// weight_function(residual/weight_scale);
copyVector<kJacobianSize>(jacobian,&jacobian_cache[i*kJacobianSize]);
addVVTUpperTriag<kJacobianSize>(hessian,jacobian,weight);
subWeightedVector<kJacobianSize>(gradient,jacobian, weight*residual);
chi2_temp += residual*residual*weight;
++chi2_nr_meas;
}
// Add second element.
// Ensure we don't read out of bounds -- this is optimized away for powerOf2 sized arrays.
if (n_is_pow2 || i + _block_size < n_elements)
{
i += _block_size;
const unsigned int visib_index = i/patch_area;
BoolTypeGpu visible = visibility_cache[visib_index];
if(visible == 1)
{
FloatTypeGpu residual = residual_cache[i];
//TODO: add weighting function
FloatTypeGpu weight = 1.0;// visible*weight_function(residual/weight_scale);
copyVector<kJacobianSize>(jacobian,&jacobian_cache[i*kJacobianSize]);
addVVTUpperTriag<kJacobianSize>(hessian,jacobian,weight);
subWeightedVector<kJacobianSize>(gradient,jacobian, weight*residual);
chi2_temp += residual*residual*weight;
++chi2_nr_meas;
}
}
i += (gridSize - _block_size);
}
// Each thread puts its local sum into shared memory.
copyVector<kJacobianSize>(&s_gradient_data[gradient_index],gradient);
copyVector<kHessianTriagN>(&s_hessian_data[hessian_index],hessian);
s_chi2[tid] = chi2_temp;
s_chi2_nr_meas[tid] = chi2_nr_meas;
__syncthreads();
// Do reduction in shared mem.
if ((_block_size >= 512) && (tid < 256))
{
// Add to local variable.
addVector<kJacobianSize>(gradient,&s_gradient_data[(tid + 256)*kJacobianSize]);
addVector<kHessianTriagN>(hessian,&s_hessian_data[(tid + 256)*kHessianTriagN]);
chi2_temp += s_chi2[tid + 256];
chi2_nr_meas += s_chi2_nr_meas[tid + 256];
// Store result to shared memory.
copyVector<kJacobianSize>(&s_gradient_data[gradient_index],gradient);
copyVector<kHessianTriagN>(&s_hessian_data[hessian_index],hessian);
s_chi2[tid] = chi2_temp;
s_chi2_nr_meas[tid] = chi2_nr_meas;
}
__syncthreads();
if ((_block_size >= 256) &&(tid < 128))
{
// add to local variable
addVector<kJacobianSize>(gradient,&s_gradient_data[(tid + 128)*kJacobianSize]);
addVector<kHessianTriagN>(hessian,&s_hessian_data[(tid + 128)*kHessianTriagN]);
chi2_temp += s_chi2[tid + 128];
chi2_nr_meas += s_chi2_nr_meas[tid + 128];
// Store result to shared memory.
copyVector<kJacobianSize>(&s_gradient_data[gradient_index],gradient);
copyVector<kHessianTriagN>(&s_hessian_data[hessian_index],hessian);
s_chi2[tid] = chi2_temp;
s_chi2_nr_meas[tid] = chi2_nr_meas;
}
__syncthreads();
if ((_block_size >= 128) && (tid < 64))
{
// add to local variable
addVector<kJacobianSize>(gradient,&s_gradient_data[(tid + 64)*kJacobianSize]);
addVector<kHessianTriagN>(hessian,&s_hessian_data[(tid + 64)*kHessianTriagN]);
chi2_temp += s_chi2[tid + 64];
chi2_nr_meas += s_chi2_nr_meas[tid + 64];
// Store result to shared memory.
copyVector<kJacobianSize>(&s_gradient_data[gradient_index],gradient);
copyVector<kHessianTriagN>(&s_hessian_data[hessian_index],hessian);
s_chi2[tid] = chi2_temp;
s_chi2_nr_meas[tid] = chi2_nr_meas;
}
__syncthreads();
// TODO: __shfl_down can be used for reduction when only a single warp (32 threads) is left.
// #if (__CUDA_ARCH__ >= 300 )
// Do reduction with __shfl_down ...
// #else
// Fully unroll reduction within a single warp. Theoretically __syncthreads() is not necessary anymore
// as all threads are in the same warp. But with __syncthreads() the performance seems to be slightly increased.
// The reason for this is not yet clear.
if ((_block_size >= 64) && (tid < 32))
{
// Add to local variable.
addVector<kJacobianSize>(gradient,&s_gradient_data[(tid + 32)*kJacobianSize]);
addVector<kHessianTriagN>(hessian,&s_hessian_data[(tid + 32)*kHessianTriagN]);
chi2_temp += s_chi2[tid + 32];
chi2_nr_meas += s_chi2_nr_meas[tid + 32];
// Store result to shared memory.
copyVector<kJacobianSize>(&s_gradient_data[gradient_index],gradient);
copyVector<kHessianTriagN>(&s_hessian_data[hessian_index],hessian);
s_chi2[tid] = chi2_temp;
s_chi2_nr_meas[tid] = chi2_nr_meas;
}
__syncthreads();
if ((_block_size >= 32) && (tid < 16))
{
// Add to local variable.
addVector<kJacobianSize>(gradient,&s_gradient_data[(tid + 16)*kJacobianSize]);
addVector<kHessianTriagN>(hessian,&s_hessian_data[(tid + 16)*kHessianTriagN]);
chi2_temp += s_chi2[tid + 16];
chi2_nr_meas += s_chi2_nr_meas[tid + 16];
// Store result to shared memory.
copyVector<kJacobianSize>(&s_gradient_data[gradient_index],gradient);
copyVector<kHessianTriagN>(&s_hessian_data[hessian_index],hessian);
s_chi2[tid] = chi2_temp;
s_chi2_nr_meas[tid] = chi2_nr_meas;
}
__syncthreads();
if ((_block_size >= 16) && (tid < 8))
{
// Add to local variable.
addVector<kJacobianSize>(gradient,&s_gradient_data[(tid + 8)*kJacobianSize]);
addVector<kHessianTriagN>(hessian,&s_hessian_data[(tid + 8)*kHessianTriagN]);
chi2_temp += s_chi2[tid + 8];
chi2_nr_meas += s_chi2_nr_meas[tid + 8];
// Store result to shared memory.
copyVector<kJacobianSize>(&s_gradient_data[gradient_index],gradient);
copyVector<kHessianTriagN>(&s_hessian_data[hessian_index],hessian);
s_chi2[tid] = chi2_temp;
s_chi2_nr_meas[tid] = chi2_nr_meas;
}
__syncthreads();
if ((_block_size >= 8) && (tid < 4))
{
// Add to local variable.
addVector<kJacobianSize>(gradient,&s_gradient_data[(tid + 4)*kJacobianSize]);
addVector<kHessianTriagN>(hessian,&s_hessian_data[(tid + 4)*kHessianTriagN]);
chi2_temp += s_chi2[tid + 4];
chi2_nr_meas += s_chi2_nr_meas[tid + 4];
// Store result to shared memory.
copyVector<kJacobianSize>(&s_gradient_data[gradient_index],gradient);
copyVector<kHessianTriagN>(&s_hessian_data[hessian_index],hessian);
s_chi2[tid] = chi2_temp;
s_chi2_nr_meas[tid] = chi2_nr_meas;
}
__syncthreads();
if ((_block_size >= 4) && (tid < 2))
{
// Add to local variable.
addVector<kJacobianSize>(gradient,&s_gradient_data[(tid + 2)*kJacobianSize]);
addVector<kHessianTriagN>(hessian,&s_hessian_data[(tid + 2)*kHessianTriagN]);
chi2_temp += s_chi2[tid + 2];
chi2_nr_meas += s_chi2_nr_meas[tid + 2];
// Store result to shared memory.
copyVector<kJacobianSize>(&s_gradient_data[gradient_index],gradient);
copyVector<kHessianTriagN>(&s_hessian_data[hessian_index],hessian);
s_chi2[tid] = chi2_temp;
s_chi2_nr_meas[tid] = chi2_nr_meas;
}
__syncthreads();
if ((_block_size >= 2) && ( tid < 1))
{
// Add to local variable.
addVector<kJacobianSize>(gradient,&s_gradient_data[(tid + 1)*kJacobianSize]);
addVector<kHessianTriagN>(hessian,&s_hessian_data[(tid + 1)*kHessianTriagN]);
chi2_temp += s_chi2[tid + 1];
chi2_nr_meas += s_chi2_nr_meas[tid + 1];
}
__syncthreads();
// Write result for this block to global memory.
if (tid == 0)
{
copyVector<kJacobianSize>(&gradient_cache[blockIdx.x*kJacobianSize],gradient);
copyVector<kHessianTriagN>(&hessian_cache[blockIdx.x*kHessianTriagN],hessian);
chi2[blockIdx.x] = chi2_temp;
nr_meas[blockIdx.x] = chi2_nr_meas;
}
}
void reduceHessianGradient(const size_t size,
const size_t patch_area,
const int threads,
const int blocks,
const FloatTypeGpu* __restrict__ jacobian_input_device,
const BoolTypeGpu* __restrict__ visibility_input_device,
const FloatTypeGpu* __restrict__ residual_input_device,
FloatTypeGpu* __restrict__ gradient_output,
FloatTypeGpu* __restrict__ hessian_output,
UIntTypeGpu* __restrict__ nrMeas,
FloatTypeGpu* __restrict__ chi2)
{
dim3 dimBlock(threads, 1, 1);
dim3 dimGrid(blocks, 1, 1);
if (isPow2(size))
{
switch (threads)
{
case 512:
SVO_ERROR_STREAM(" 512 threads exceed the 48kB of available shared memory per block!");
// k_jacobianReduceHessianGradient<512, true><<< dimGrid, dimBlock >>>(jacobian_input_device,
// residual_input_device,
// visibility_input_device,
// gradient_output,
// hessian_output,
// size, patch_area);
break;
case 256:
k_reduceHessianGradient<256, true><<< dimGrid, dimBlock >>>(jacobian_input_device,
residual_input_device,
visibility_input_device,
gradient_output,
hessian_output,
nrMeas,
chi2,
size,
patch_area);
break;
case 128:
k_reduceHessianGradient<128, true><<< dimGrid, dimBlock >>>(jacobian_input_device,
residual_input_device,
visibility_input_device,
gradient_output,
hessian_output,
nrMeas,
chi2,
size,
patch_area);
break;
case 64:
k_reduceHessianGradient<64, true><<< dimGrid, dimBlock >>>(jacobian_input_device,
residual_input_device,
visibility_input_device,
gradient_output,
hessian_output,
nrMeas,
chi2,
size,
patch_area);
break;
case 32:
k_reduceHessianGradient<32, true><<< dimGrid, dimBlock >>>(jacobian_input_device,
residual_input_device,
visibility_input_device,
gradient_output,
hessian_output,
nrMeas,
chi2,
size,
patch_area);
break;
case 16:
k_reduceHessianGradient<16, true><<< dimGrid, dimBlock >>>(jacobian_input_device,
residual_input_device,
visibility_input_device,
gradient_output,
hessian_output,
nrMeas,
chi2,
size,
patch_area);
break;
case 8:
k_reduceHessianGradient<8, true><<< dimGrid, dimBlock >>>(jacobian_input_device,
residual_input_device,
visibility_input_device,
gradient_output,
hessian_output,
nrMeas,
chi2,
size,
patch_area);
break;
case 4:
k_reduceHessianGradient<4, true><<< dimGrid, dimBlock >>>(jacobian_input_device,
residual_input_device,
visibility_input_device,
gradient_output,
hessian_output,
nrMeas,
chi2,
size,
patch_area);
break;
case 2:
k_reduceHessianGradient<2, true><<< dimGrid, dimBlock >>>(jacobian_input_device,
residual_input_device,
visibility_input_device,
gradient_output,
hessian_output,
nrMeas,
chi2,
size,
patch_area);
break;
case 1:
k_reduceHessianGradient<1, true><<< dimGrid, dimBlock >>>(jacobian_input_device,
residual_input_device,
visibility_input_device,
gradient_output,
hessian_output,
nrMeas,
chi2,
size,
patch_area);
break;
default:
SVO_ERROR_STREAM("The block size must be a power of 2 for the reduction step! Block size is " << threads << ".");
break;
}
}
else
{
switch (threads)
{
case 512:
SVO_ERROR_STREAM(" 512 threads exceed the 48kB of available shared memory per block!");
// k_reduceHessianGradient<256, false><<< dimGrid, dimBlock >>>(jacobian_input_device,
// residual_input_device,
// visibility_input_device,
// gradient_output,
// hessian_output,
// nrMeas,
// chi2,
// size,patch_area);
break;
case 256:
k_reduceHessianGradient<256, false><<< dimGrid, dimBlock >>>(jacobian_input_device,
residual_input_device,
visibility_input_device,
gradient_output,
hessian_output,
nrMeas,
chi2,
size,
patch_area);
break;
case 128:
k_reduceHessianGradient<128, false><<< dimGrid, dimBlock >>>(jacobian_input_device,
residual_input_device,
visibility_input_device,
gradient_output,
hessian_output,
nrMeas,
chi2,
size,
patch_area);
break;
case 64:
k_reduceHessianGradient<64, false><<< dimGrid, dimBlock >>>(jacobian_input_device,
residual_input_device,
visibility_input_device,
gradient_output,
hessian_output,
nrMeas,
chi2,
size,
patch_area);
break;
case 32:
k_reduceHessianGradient<32, false><<< dimGrid, dimBlock >>>(jacobian_input_device,
residual_input_device,
visibility_input_device,
gradient_output,
hessian_output,
nrMeas,
chi2,
size,
patch_area);
break;
case 16:
k_reduceHessianGradient<16, false><<< dimGrid, dimBlock >>>(jacobian_input_device,
residual_input_device,
visibility_input_device,
gradient_output,
hessian_output,
nrMeas,
chi2,
size,
patch_area);
break;
case 8:
k_reduceHessianGradient<8, false><<< dimGrid, dimBlock >>>(jacobian_input_device,
residual_input_device,
visibility_input_device,
gradient_output,
hessian_output,
nrMeas,
chi2,
size,
patch_area);
break;
case 4:
k_reduceHessianGradient<4, false><<< dimGrid, dimBlock >>>(jacobian_input_device,
residual_input_device,
visibility_input_device,
gradient_output,
hessian_output,
nrMeas,
chi2,
size,
patch_area);
break;
case 2:
k_reduceHessianGradient<2, false><<< dimGrid, dimBlock >>>(jacobian_input_device,
residual_input_device,
visibility_input_device,
gradient_output,
hessian_output,
nrMeas,
chi2,
size,
patch_area);
break;
case 1:
k_reduceHessianGradient<1, false><<< dimGrid, dimBlock >>>(jacobian_input_device,
residual_input_device,
visibility_input_device,
gradient_output,
hessian_output,
nrMeas,
chi2,
size,
patch_area);
break;
default:
SVO_ERROR_STREAM("The block size must be a power of 2 for the reduction step! Block size is " << threads << ".");
break;
}
}
}
void getNumBlocksAndThreads(const size_t nr_elements,
const int max_grid_size_device,
const int max_block_size_device,
const int max_blocks ,
const int max_threads,
const int elements_per_thread,
int &blocks, int &threads)
{
threads = (nr_elements < static_cast<size_t>(max_threads)*2) ? nextPow2((nr_elements + 1)/ 2) : max_threads;
blocks = (nr_elements + (threads * elements_per_thread - 1)) / (threads * elements_per_thread);
if ((float)threads*blocks > (float)max_grid_size_device * max_block_size_device)
{
throw std::runtime_error("Desired number of threads is too large.");
}
if(blocks > max_blocks)
{
blocks = max_blocks;
}
//TODO: comment this block if max_block is for sure < max_grid_size_device
if (blocks > max_grid_size_device)
{
std::cout << "Desired number of blocks is bigger then the maximum grid size of the target device."
<< std::endl;
blocks /= 2;
threads *= 2;
}
}
void computeNumBlocksAndThreadsReduction(const size_t nr_features,
const size_t patch_area,
const GPUProperties& gpu_props,
int &num_blocks, int &num_threads)
{
const int max_threads = 256;
const int max_blocks = 64;
const size_t nr_elements = nr_features*patch_area;
// To reduce data of size N, log(N) elements should be reduced per thread for best performance.
// (c.f. cuda reduction example)
const int nr_elements_per_thread = std::max(
static_cast<int>(std::floor(log2 (static_cast<double>(nr_elements)))),2);
getNumBlocksAndThreads(nr_elements, gpu_props.maxGridSizeX(), gpu_props.maxThreadsPerBlock(),
max_blocks, max_threads, nr_elements_per_thread, num_blocks, num_threads);
}
void reduceHessianGradientCPU(const int num_blocks,
const LinearMemoryFloat& __restrict__ gradient_input_host,
const LinearMemoryFloat& __restrict__ hessian_input_host,
const LinearMemoryUInt& __restrict__ nMeas_input_host,
const LinearMemoryFloat& __restrict__ chi2_input_host,
FloatTypeGpu gradient_out[],
FloatTypeGpu hessian_out[],
FloatTypeGpu& chi2)
{
memset(hessian_out,0,SparseImgAlignBase::kHessianTriagN*sizeof(FloatTypeGpu));
memset(gradient_out,0,SparseImgAlignBase::kJacobianSize*sizeof(FloatTypeGpu));
chi2 = 0;
unsigned int n_meas = 0;
#pragma unroll 5
for(unsigned int block = 0; block< static_cast<unsigned int>(num_blocks); ++block)
{
#pragma unroll
for(unsigned int i = 0; i < SparseImgAlignBase::kHessianTriagN; ++i)
{
hessian_out[i] += hessian_input_host[block*SparseImgAlignBase::kHessianTriagN + i];
}
#pragma unroll
for(unsigned int i = 0; i < SparseImgAlignBase::kJacobianSize; ++i)
{
gradient_out[i] += gradient_input_host[block*SparseImgAlignBase::kJacobianSize + i];
}
n_meas += nMeas_input_host[block];
chi2 += chi2_input_host[block];
}
chi2 = chi2/n_meas;
}
FloatTypeGpu computeHessianAndGradient(SparseImgAlignBase::HessianMatrix* H,
SparseImgAlignBase::GradientVector* g,
const size_t nr_elements,
const size_t patch_area,
GpuCacheHandler& gpu_cache,
const int num_blocks, const int num_threads)
{
FloatTypeGpu hessian_triag[SparseImgAlignBase::kHessianTriagN];
FloatTypeGpu gradient[SparseImgAlignBase::kJacobianSize];
reduceHessianGradient(nr_elements, patch_area, num_threads, num_blocks,
gpu_cache.jacobian().cuData(),
gpu_cache.visibility().cuData(),
gpu_cache.residual().cuData(),
gpu_cache.gradientDevice().cuData(),
gpu_cache.hessianDevice().cuData(),
gpu_cache.nrVisibleDevice().cuData(),
gpu_cache.chi2Device().cuData());
cudaDeviceSynchronize();
// Sum the results of each block on CPU.
FloatTypeGpu chi2;
gpu_cache.copyReductionCacheDeviceToHost();
reduceHessianGradientCPU(num_blocks,
gpu_cache.gradientHost(),
gpu_cache.hessianHost(),
gpu_cache.nrVisibleHost(),
gpu_cache.chi2Host(),
gradient,
hessian_triag,chi2);
// Copy result to H and g.
#pragma unroll
for(unsigned int row = 0, index = 0; row < SparseImgAlignBase::kJacobianSize; ++row)
{
#pragma unroll
for(unsigned int col = row; col < SparseImgAlignBase::kJacobianSize; ++col,++index)
{
(*H)(row,col) = (*H)(col,row) = hessian_triag[index];
}
}
#pragma unroll
for(unsigned int index = 0; index < SparseImgAlignBase::kJacobianSize; ++index)
{
(*g)(index,0) = gradient[index];
}
return chi2;
}
__global__ void k_disparities(const imp::cu::Matrix<FloatTypeGpu,3,4> T_cur_ref,
const imp::cu::PinholeCamera cam,
const Float3TypeGpu* __restrict__ xyz_ref,
const BoolTypeGpu* __restrict__ visibility_cache,
const Float2TypeGpu* __restrict__ uv,
const unsigned int nrFeatures,
const FloatTypeGpu not_visible_value,
FloatTypeGpu* __restrict__ disparity)
{
const unsigned int i = blockIdx.x*blockDim.x+threadIdx.x;
if(i < nrFeatures)
{
// Check if projection is within the image.
if(visibility_cache[i] == 1)
{
Float2TypeGpu uv_cur = static_cast<Float2TypeGpu>(
cam.world2cam(static_cast<Float3TypeGpu>(transform(T_cur_ref,xyz_ref[i]))));
Float2TypeGpu disparity_vec = make_float2(uv[i].x - uv_cur.x, uv[i].y - uv_cur.y);
disparity[i] = sqrt(disparity_vec.x*disparity_vec.x + disparity_vec.y*disparity_vec.y);
}
else
{
disparity[i] = not_visible_value;
}
}
}
template<class T>
size_t copyArrayNonNegative(T* __restrict__ dest, T* __restrict__ src,size_t number_elements)
{
size_t number_elements_copied = 0;
for(size_t i = 0; i < number_elements; ++i)
{
if(src[i] > 0)
{
dest[number_elements_copied++] = src[i];
}
}
return number_elements_copied;
}
FloatTypeGpu computeDisparity(
const std::vector<std::vector<imp::cu::ImageGpu8uC1::Ptr>>& cur_pyramid,
const std::vector<imp::cu::Matrix<FloatTypeGpu,3,4>>& cu_T_cur_ref_bundle,
const std::vector<imp::cu::PinholeCamera::Ptr>& cu_camera_bundle,
const std::vector<size_t>& first_ftr_index,
const std::vector<size_t>& nbr_of_ftrs,
const size_t total_number_of_features,
GpuCacheHandler& gpu_cache)
{
for(int i = 0; i < static_cast<int>(cur_pyramid.size());++i)
{
imp::cu::Fragmentation<32,1> frag(nbr_of_ftrs.at(i));
k_disparities
<<<
frag.dimGrid,frag.dimBlock
>>>(cu_T_cur_ref_bundle.at(i),
*cu_camera_bundle.at(i),
&gpu_cache.xyzRef().cuData()[first_ftr_index.at(i)],
&gpu_cache.visibility().cuData()[first_ftr_index.at(i)],
&gpu_cache.uv().cuData()[first_ftr_index.at(i)],
nbr_of_ftrs.at(i),
FLT_MAX,
&gpu_cache.disparity().cuData()[first_ftr_index.at(i)]);
}
cudaDeviceSynchronize();
// Transfer disparities from GPU to CPU
FloatTypeGpu* disparity_pointer;
disparity_pointer = (FloatTypeGpu*) malloc(total_number_of_features*sizeof(FloatTypeGpu));
cudaMemcpy(disparity_pointer,gpu_cache.disparity().cuData(),total_number_of_features*sizeof(FloatTypeGpu),cudaMemcpyDeviceToHost);
/// If the "not_visible_value" is set to FLT_MAX, the median value is allways overestimating
/// the true median value. We could also remove the values that are not reprojected by setting
/// "not_visible_value" in k_disparities to -1 and than run the following operations
// {
// FloatTypeGpu* disparity_pointer_no_neg;
// disparity_pointer_no_neg = (FloatTypeGpu*) malloc(total_number_of_features*sizeof(FloatTypeGpu));
// size_t successfull_reprojections = copyArrayNonNegative<FloatTypeGpu>(disparity_pointer_no_neg,disparity_pointer,total_number_of_features);
// FloatTypeGpu* disparity_end = &disparity_pointer_no_neg[successfull_reprojections - 1];
// FloatTypeGpu* middle_ptr = &disparity_pointer_no_neg[successfull_reprojections/2];
// std::nth_element(disparity_pointer_no_neg, middle_ptr, disparity_end);
// std::cout << "Median with removal = " << *middle_ptr << std::endl;
// std::cout << "Total " << total_number_of_features << std::endl;
// std::cout << "successfull " << successfull_reprojections << std::endl;
// free(disparity_pointer_no_neg);
// }
FloatTypeGpu* disparity_end = &disparity_pointer[total_number_of_features - 1];
FloatTypeGpu* middle_ptr = &disparity_pointer[total_number_of_features/2];
std::nth_element(disparity_pointer, middle_ptr, disparity_end);
free(disparity_pointer);
return *middle_ptr;
}
} // namespace sparse_img_align_device_utils
} // namespace svo
|
4d29e8436eef3a7eeabe3af365aaf6cd51fd1053.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/native/Pool.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/hip/HIPApplyUtils.cuh>
#include <ATen/hip/detail/TensorInfo.cuh>
#include <ATen/hip/detail/IndexUtils.cuh>
#include <ATen/hip/detail/KernelUtils.h>
#include <THH/THHNumerics.cuh>
#include <c10/macros/Macros.h>
namespace at {
namespace native {
namespace {
__device__ inline int min(int a, int b) {
return a <= b ? a : b;
}
__device__ inline int max(int a, int b) {
return a >= b ? a : b;
}
template <typename scalar_t, typename accscalar_t, bool COUNT_INCLUDE_PAD, bool USE_DIVISOR>
__global__ void avg_pool2d_out_cuda_frame(const int nthreads,
const scalar_t* const bottom_data, const int num, const int channels,
const int height, const int width, const int pooled_height,
const int pooled_width, const int kernel_h, const int kernel_w,
const int stride_h, const int stride_w, const int pad_h, const int pad_w,
scalar_t* const top_data, const int divisor_override) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int pw = index % pooled_width;
const int ph = (index / pooled_width) % pooled_height;
const int c = (index / pooled_width / pooled_height) % channels;
const int n = index / pooled_width / pooled_height / channels;
int hstart = ph * stride_h - pad_h;
int wstart = pw * stride_w - pad_w;
int hend = min(hstart + kernel_h, height + pad_h);
int wend = min(wstart + kernel_w, width + pad_w);
const int pool_size = (hend - hstart) * (wend - wstart);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
hend = min(hend, height);
wend = min(wend, width);
accscalar_t aveval = accscalar_t(0);
const scalar_t* const bottom_slice = bottom_data + (n * channels + c) * height * width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
aveval += bottom_slice[h * width + w];
}
}
int divide_factor;
if (USE_DIVISOR) {
divide_factor = divisor_override;
} else {
if(COUNT_INCLUDE_PAD) {
divide_factor = pool_size;
} else {
divide_factor = (hend - hstart) * (wend - wstart);
}
}
top_data[index] = ScalarConvert<accscalar_t, scalar_t>::to(aveval / divide_factor);
}
}
template <typename scalar_t, typename accscalar_t, bool COUNT_INCLUDE_PAD, bool USE_DIVISOR>
__global__ void avg_pool2d_backward_out_cuda_frame(const int nthreads, const scalar_t* const top_diff,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int kernel_h, const int kernel_w, const int stride_h,
const int stride_w, const int pad_h, const int pad_w,
scalar_t* const bottom_diff, const int divisor_override) {
CUDA_KERNEL_LOOP(index, nthreads) {
// find out the local index
// find out the local offset
const int w = index % width + pad_w;
const int h = (index / width) % height + pad_h;
const int c = (index / width / height) % channels;
const int n = index / width / height / channels;
const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;
const int phend = min(h / stride_h + 1, pooled_height);
const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;
const int pwend = min(w / stride_w + 1, pooled_width);
accscalar_t gradient = accscalar_t(0);
const scalar_t* const top_diff_slice =
top_diff + (n * channels + c) * pooled_height * pooled_width;
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
// figure out the pooling size
int hstart = ph * stride_h - pad_h;
int wstart = pw * stride_w - pad_w;
int hend = min(hstart + kernel_h, height + pad_h);
int wend = min(wstart + kernel_w, width + pad_w);
int pool_size = (hend - hstart) * (wend - wstart);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
hend = min(hend, height);
wend = min(wend, width);
int divide_factor;
if (USE_DIVISOR) {
divide_factor = divisor_override;
} else {
if(COUNT_INCLUDE_PAD) {
divide_factor = pool_size;
} else {
divide_factor = (hend - hstart) * (wend - wstart);
}
}
gradient += top_diff_slice[ph * pooled_width + pw] / divide_factor;
}
}
bottom_diff[index] = ScalarConvert<accscalar_t, scalar_t>::to(gradient);
}
}
void avg_pool2d_out_cuda_template(
Tensor& output,
const Tensor& input_,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
bool ceil_mode,
bool count_include_pad,
c10::optional<int64_t> divisor_override)
{
TensorArg output_arg{ output, "output", 1 };
TensorArg input_arg{ input_, "input_", 2 };
checkAllSameGPU("avg_pool2d_out_cuda", {output_arg, input_arg});
// #20866, #22032: Guarantee this for the official C++ API?
TORCH_CHECK(kernel_size.size() == 1 || kernel_size.size() == 2,
"avg_pool2d: kernel_size must either be a single int, or a tuple of two ints");
const int kH = safe_downcast<int, int64_t>(kernel_size[0]);
const int kW = kernel_size.size() == 1 ? kH : safe_downcast<int, int64_t>(kernel_size[1]);
TORCH_CHECK(stride.empty() || stride.size() == 1 || stride.size() == 2,
"avg_pool2d: stride must either be omitted, a single int, or a tuple of two ints");
const int dH = stride.empty() ? kH : safe_downcast<int, int64_t>(stride[0]);
const int dW = stride.empty() ? kW :
stride.size() == 1 ? dH : safe_downcast<int, int64_t>(stride[1]);
TORCH_CHECK(padding.size() == 1 || padding.size() == 2,
"avg_pool2d: padding must either be a single int, or a tuple of two ints");
const int padH = safe_downcast<int, int64_t>(padding[0]);
const int padW = padding.size() == 1 ? padH : safe_downcast<int, int64_t>(padding[1]);
TORCH_CHECK((input_.ndimension() == 3 || input_.ndimension() == 4),
"non-empty 3D or 4D (batch mode) tensor expected for input");
TORCH_CHECK(!divisor_override.has_value() || divisor_override.value() != 0,
"divisor must be not zero");
const int64_t nbatch = input_.ndimension() == 4 ? input_.size(-4) : 1;
const int64_t nInputPlane = input_.size(-3);
const int64_t inputHeight = input_.size(-2);
const int64_t inputWidth = input_.size(-1);
const int64_t outputWidth = pooling_output_shape<int64_t>(inputWidth, kW, padW, dW, 1, ceil_mode);
const int64_t outputHeight = pooling_output_shape<int64_t>(inputHeight, kH, padH, dH, 1, ceil_mode);
pool2d_shape_check(
input_,
kH, kW, dH, dW, padH, padW, 1, 1,
nInputPlane,
inputHeight, inputWidth,
outputHeight, outputWidth);
Tensor input = input_.contiguous();
output.resize_({nbatch, nInputPlane, outputHeight, outputWidth});
const int32_t count = safe_downcast<int32_t, int64_t>(output.numel());
const uint32_t num_threads = ::min(at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, 1024);
const uint32_t num_blocks = cuda::ATenCeilDiv<uint32_t>(count, num_threads);
if (divisor_override.has_value()) {
AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(),
"avg_pool2d_out_cuda_frame",
[&] {
AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "avg_pool2d_out_cuda_frame", [&] {
using accscalar_t = acc_type<scalar_t, true>;
scalar_t *output_data = output.data_ptr<scalar_t>();
scalar_t *input_data = input.data_ptr<scalar_t>();
hipLaunchKernelGGL(( avg_pool2d_out_cuda_frame<scalar_t, accscalar_t, false, true>)
, dim3(num_blocks), dim3(num_threads), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
count,
input_data,
nbatch,
nInputPlane,
inputHeight, inputWidth,
outputHeight, outputWidth,
kH, kW,
dH, dW,
padH, padW,
output_data,
divisor_override.value());
});
}
);
} else {
if (count_include_pad) {
AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(),
"avg_pool2d_out_cuda_frame",
[&] {
AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "avg_pool2d_out_cuda_frame", [&] {
using accscalar_t = acc_type<scalar_t, true>;
scalar_t *output_data = output.data_ptr<scalar_t>();
scalar_t *input_data = input.data_ptr<scalar_t>();
hipLaunchKernelGGL(( avg_pool2d_out_cuda_frame<scalar_t, accscalar_t, true, false>)
, dim3(num_blocks), dim3(num_threads), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
count,
input_data,
nbatch,
nInputPlane,
inputHeight, inputWidth,
outputHeight, outputWidth,
kH, kW,
dH, dW,
padH, padW,
output_data, 0);
});
}
);
}
else {
AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(),
"avg_pool2d_out_cuda_frame",
[&] {
AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "avg_pool2d_out_cuda_frame", [&] {
using accscalar_t = acc_type<scalar_t, true>;
scalar_t *output_data = output.data_ptr<scalar_t>();
scalar_t *input_data = input.data_ptr<scalar_t>();
hipLaunchKernelGGL(( avg_pool2d_out_cuda_frame<scalar_t, accscalar_t, false, false>)
, dim3(num_blocks), dim3(num_threads), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
count,
input_data,
nbatch,
nInputPlane,
inputHeight, inputWidth,
outputHeight, outputWidth,
kH, kW,
dH, dW,
padH, padW,
output_data, 0);
});
}
);
}
}
THCudaCheck(hipGetLastError());
if (input.ndimension() == 3) {
output.resize_({nInputPlane, outputHeight, outputWidth});
}
}
Tensor& avg_pool2d_backward_out_cuda_template(
Tensor& gradInput,
const Tensor& gradOutput_,
const Tensor& input_,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
bool ceil_mode,
bool count_include_pad,
c10::optional<int64_t> divisor_override)
{
TensorArg gradInput_arg{ gradInput, "gradInput", 1 };
TensorArg gradOutput_arg{ gradOutput_, "gradOutput_", 2 };
TensorArg input_arg{ input_, "input_", 3 };
checkAllSameGPU("avg_pool2d_backward_out_cuda",
{gradInput_arg, gradOutput_arg, input_arg});
// #20866, #22032: Guarantee this for the official C++ API?
TORCH_CHECK(kernel_size.size() == 1 || kernel_size.size() == 2,
"avg_pool2d: kernel_size must either be a single int, or a tuple of two ints");
const int kH = safe_downcast<int, int64_t>(kernel_size[0]);
const int kW = kernel_size.size() == 1 ? kH : safe_downcast<int, int64_t>(kernel_size[1]);
TORCH_CHECK(stride.empty() || stride.size() == 1 || stride.size() == 2,
"avg_pool2d: stride must either be omitted, a single int, or a tuple of two ints");
const int dH = stride.empty() ? kH : safe_downcast<int, int64_t>(stride[0]);
const int dW = stride.empty() ? kW :
stride.size() == 1 ? dH : safe_downcast<int, int64_t>(stride[1]);
TORCH_CHECK(padding.size() == 1 || padding.size() == 2,
"avg_pool2d: padding must either be a single int, or a tuple of two ints");
const int padH = safe_downcast<int, int64_t>(padding[0]);
const int padW = padding.size() == 1 ? padH : safe_downcast<int, int64_t>(padding[1]);
TORCH_CHECK((input_.ndimension() == 3 || input_.ndimension() == 4),
"non-empty 3D or 4D (batch mode) tensor expected for input");
TORCH_CHECK(!divisor_override.has_value() || divisor_override.value() != 0,
"divisor must be not zero");
const Tensor input = input_.contiguous();
const Tensor gradOutput = gradOutput_.contiguous();
const int64_t nbatch = input.ndimension() == 4 ? input.size(-4) : 1;
const int64_t nInputPlane = input.size(-3);
const int64_t inputHeight = input.size(-2);
const int64_t inputWidth = input.size(-1);
const int64_t outputHeight = pooling_output_shape<int64_t>(inputHeight, kH, padH, dH, 1, ceil_mode);
const int64_t outputWidth = pooling_output_shape<int64_t>(inputWidth, kW, padW, dW, 1, ceil_mode);
avg_pool2d_backward_shape_check(
input_,
gradOutput_,
nbatch,
kH, kW, dH, dW, padH, padW,
nInputPlane,
inputHeight, inputWidth,
outputHeight, outputWidth);
gradInput.resize_as_(input);
const int32_t count = safe_downcast<int32_t, int64_t>(input.numel());
const uint32_t num_threads = ::min(at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, 1024);
const uint32_t num_blocks = cuda::ATenCeilDiv<uint32_t>(count, num_threads);
if (divisor_override.has_value()) {
AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(),
"avg_pool2d_backward_out_cuda_frame",
[&] {
AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "avg_pool2d_backward_out_cuda_frame", [&] {
using accscalar_t = acc_type<scalar_t, true>;
scalar_t *gradOutput_data = gradOutput.data_ptr<scalar_t>();
scalar_t *gradInput_data = gradInput.data_ptr<scalar_t>();
hipLaunchKernelGGL(( avg_pool2d_backward_out_cuda_frame<scalar_t, accscalar_t, false, true>)
, dim3(num_blocks), dim3(num_threads), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
count,
gradOutput_data,
nbatch,
nInputPlane,
inputHeight, inputWidth,
outputHeight, outputWidth,
kH, kW,
dH, dW,
padH, padW,
gradInput_data,
divisor_override.value());
});
}
);
} else {
if (count_include_pad) {
AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(),
"avg_pool2d_backward_out_cuda_frame",
[&] {
AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "avg_pool2d_backward_out_cuda_frame", [&] {
using accscalar_t = acc_type<scalar_t, true>;
scalar_t *gradOutput_data = gradOutput.data_ptr<scalar_t>();
scalar_t *gradInput_data = gradInput.data_ptr<scalar_t>();
hipLaunchKernelGGL(( avg_pool2d_backward_out_cuda_frame<scalar_t, accscalar_t, true, false>)
, dim3(num_blocks), dim3(num_threads), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
count,
gradOutput_data,
nbatch,
nInputPlane,
inputHeight, inputWidth,
outputHeight, outputWidth,
kH, kW,
dH, dW,
padH, padW,
gradInput_data, 0);
});
}
);
}
else {
AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(),
"avg_pool2d_backward_out_cuda_frame",
[&] {
AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "avg_pool2d_backward_out_cuda_frame", [&] {
using accscalar_t = acc_type<scalar_t, true>;
scalar_t *gradOutput_data = gradOutput.data_ptr<scalar_t>();
scalar_t *gradInput_data = gradInput.data_ptr<scalar_t>();
hipLaunchKernelGGL(( avg_pool2d_backward_out_cuda_frame<scalar_t, accscalar_t, false, false>)
, dim3(num_blocks), dim3(num_threads), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
count,
gradOutput_data,
nbatch,
nInputPlane,
inputHeight, inputWidth,
outputHeight, outputWidth,
kH, kW,
dH, dW,
padH, padW,
gradInput_data, 0);
});
}
);
}
}
THCudaCheck(hipGetLastError());
return gradInput;
}
} // namespace
Tensor& avg_pool2d_out_cuda(
Tensor& output,
const Tensor& input,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
bool ceil_mode,
bool count_include_pad,
c10::optional<int64_t> divisor_override)
{
avg_pool2d_out_cuda_template(
output,
input,
kernel_size,
stride,
padding,
ceil_mode,
count_include_pad,
divisor_override);
return output;
}
Tensor avg_pool2d_cuda(
const Tensor& input,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
bool ceil_mode,
bool count_include_pad,
c10::optional<int64_t> divisor_override)
{
Tensor output = at::empty({0}, input.options());
avg_pool2d_out_cuda_template(
output,
input,
kernel_size,
stride,
padding,
ceil_mode,
count_include_pad,
divisor_override);
return output;
}
Tensor& avg_pool2d_backward_out_cuda(
Tensor& gradInput,
const Tensor& gradOutput_,
const Tensor& input,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
bool ceil_mode,
bool count_include_pad,
c10::optional<int64_t> divisor_override)
{
avg_pool2d_backward_out_cuda_template(
gradInput,
gradOutput_,
input,
kernel_size,
stride,
padding,
ceil_mode,
count_include_pad,
divisor_override);
return gradInput;
}
Tensor avg_pool2d_backward_cuda(
const Tensor& gradOutput_,
const Tensor& input,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
bool ceil_mode,
bool count_include_pad,
c10::optional<int64_t> divisor_override)
{
auto gradInput = at::zeros_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
avg_pool2d_backward_out_cuda_template(
gradInput,
gradOutput_,
input,
kernel_size,
stride,
padding,
ceil_mode,
count_include_pad,
divisor_override);
return gradInput;
}
} // at::native
} // at
| 4d29e8436eef3a7eeabe3af365aaf6cd51fd1053.cu | #include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/native/Pool.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/cuda/CUDAApplyUtils.cuh>
#include <ATen/cuda/detail/TensorInfo.cuh>
#include <ATen/cuda/detail/IndexUtils.cuh>
#include <ATen/cuda/detail/KernelUtils.h>
#include <THC/THCNumerics.cuh>
#include <c10/macros/Macros.h>
namespace at {
namespace native {
namespace {
__device__ inline int min(int a, int b) {
return a <= b ? a : b;
}
__device__ inline int max(int a, int b) {
return a >= b ? a : b;
}
template <typename scalar_t, typename accscalar_t, bool COUNT_INCLUDE_PAD, bool USE_DIVISOR>
__global__ void avg_pool2d_out_cuda_frame(const int nthreads,
const scalar_t* const bottom_data, const int num, const int channels,
const int height, const int width, const int pooled_height,
const int pooled_width, const int kernel_h, const int kernel_w,
const int stride_h, const int stride_w, const int pad_h, const int pad_w,
scalar_t* const top_data, const int divisor_override) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int pw = index % pooled_width;
const int ph = (index / pooled_width) % pooled_height;
const int c = (index / pooled_width / pooled_height) % channels;
const int n = index / pooled_width / pooled_height / channels;
int hstart = ph * stride_h - pad_h;
int wstart = pw * stride_w - pad_w;
int hend = min(hstart + kernel_h, height + pad_h);
int wend = min(wstart + kernel_w, width + pad_w);
const int pool_size = (hend - hstart) * (wend - wstart);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
hend = min(hend, height);
wend = min(wend, width);
accscalar_t aveval = accscalar_t(0);
const scalar_t* const bottom_slice = bottom_data + (n * channels + c) * height * width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
aveval += bottom_slice[h * width + w];
}
}
int divide_factor;
if (USE_DIVISOR) {
divide_factor = divisor_override;
} else {
if(COUNT_INCLUDE_PAD) {
divide_factor = pool_size;
} else {
divide_factor = (hend - hstart) * (wend - wstart);
}
}
top_data[index] = ScalarConvert<accscalar_t, scalar_t>::to(aveval / divide_factor);
}
}
template <typename scalar_t, typename accscalar_t, bool COUNT_INCLUDE_PAD, bool USE_DIVISOR>
__global__ void avg_pool2d_backward_out_cuda_frame(const int nthreads, const scalar_t* const top_diff,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int kernel_h, const int kernel_w, const int stride_h,
const int stride_w, const int pad_h, const int pad_w,
scalar_t* const bottom_diff, const int divisor_override) {
CUDA_KERNEL_LOOP(index, nthreads) {
// find out the local index
// find out the local offset
const int w = index % width + pad_w;
const int h = (index / width) % height + pad_h;
const int c = (index / width / height) % channels;
const int n = index / width / height / channels;
const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;
const int phend = min(h / stride_h + 1, pooled_height);
const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;
const int pwend = min(w / stride_w + 1, pooled_width);
accscalar_t gradient = accscalar_t(0);
const scalar_t* const top_diff_slice =
top_diff + (n * channels + c) * pooled_height * pooled_width;
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
// figure out the pooling size
int hstart = ph * stride_h - pad_h;
int wstart = pw * stride_w - pad_w;
int hend = min(hstart + kernel_h, height + pad_h);
int wend = min(wstart + kernel_w, width + pad_w);
int pool_size = (hend - hstart) * (wend - wstart);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
hend = min(hend, height);
wend = min(wend, width);
int divide_factor;
if (USE_DIVISOR) {
divide_factor = divisor_override;
} else {
if(COUNT_INCLUDE_PAD) {
divide_factor = pool_size;
} else {
divide_factor = (hend - hstart) * (wend - wstart);
}
}
gradient += top_diff_slice[ph * pooled_width + pw] / divide_factor;
}
}
bottom_diff[index] = ScalarConvert<accscalar_t, scalar_t>::to(gradient);
}
}
void avg_pool2d_out_cuda_template(
Tensor& output,
const Tensor& input_,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
bool ceil_mode,
bool count_include_pad,
c10::optional<int64_t> divisor_override)
{
TensorArg output_arg{ output, "output", 1 };
TensorArg input_arg{ input_, "input_", 2 };
checkAllSameGPU("avg_pool2d_out_cuda", {output_arg, input_arg});
// #20866, #22032: Guarantee this for the official C++ API?
TORCH_CHECK(kernel_size.size() == 1 || kernel_size.size() == 2,
"avg_pool2d: kernel_size must either be a single int, or a tuple of two ints");
const int kH = safe_downcast<int, int64_t>(kernel_size[0]);
const int kW = kernel_size.size() == 1 ? kH : safe_downcast<int, int64_t>(kernel_size[1]);
TORCH_CHECK(stride.empty() || stride.size() == 1 || stride.size() == 2,
"avg_pool2d: stride must either be omitted, a single int, or a tuple of two ints");
const int dH = stride.empty() ? kH : safe_downcast<int, int64_t>(stride[0]);
const int dW = stride.empty() ? kW :
stride.size() == 1 ? dH : safe_downcast<int, int64_t>(stride[1]);
TORCH_CHECK(padding.size() == 1 || padding.size() == 2,
"avg_pool2d: padding must either be a single int, or a tuple of two ints");
const int padH = safe_downcast<int, int64_t>(padding[0]);
const int padW = padding.size() == 1 ? padH : safe_downcast<int, int64_t>(padding[1]);
TORCH_CHECK((input_.ndimension() == 3 || input_.ndimension() == 4),
"non-empty 3D or 4D (batch mode) tensor expected for input");
TORCH_CHECK(!divisor_override.has_value() || divisor_override.value() != 0,
"divisor must be not zero");
const int64_t nbatch = input_.ndimension() == 4 ? input_.size(-4) : 1;
const int64_t nInputPlane = input_.size(-3);
const int64_t inputHeight = input_.size(-2);
const int64_t inputWidth = input_.size(-1);
const int64_t outputWidth = pooling_output_shape<int64_t>(inputWidth, kW, padW, dW, 1, ceil_mode);
const int64_t outputHeight = pooling_output_shape<int64_t>(inputHeight, kH, padH, dH, 1, ceil_mode);
pool2d_shape_check(
input_,
kH, kW, dH, dW, padH, padW, 1, 1,
nInputPlane,
inputHeight, inputWidth,
outputHeight, outputWidth);
Tensor input = input_.contiguous();
output.resize_({nbatch, nInputPlane, outputHeight, outputWidth});
const int32_t count = safe_downcast<int32_t, int64_t>(output.numel());
const uint32_t num_threads = std::min(at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, 1024);
const uint32_t num_blocks = cuda::ATenCeilDiv<uint32_t>(count, num_threads);
if (divisor_override.has_value()) {
AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(),
"avg_pool2d_out_cuda_frame",
[&] {
AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "avg_pool2d_out_cuda_frame", [&] {
using accscalar_t = acc_type<scalar_t, true>;
scalar_t *output_data = output.data_ptr<scalar_t>();
scalar_t *input_data = input.data_ptr<scalar_t>();
avg_pool2d_out_cuda_frame<scalar_t, accscalar_t, false, true>
<<<num_blocks, num_threads, 0, at::cuda::getCurrentCUDAStream()>>>(
count,
input_data,
nbatch,
nInputPlane,
inputHeight, inputWidth,
outputHeight, outputWidth,
kH, kW,
dH, dW,
padH, padW,
output_data,
divisor_override.value());
});
}
);
} else {
if (count_include_pad) {
AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(),
"avg_pool2d_out_cuda_frame",
[&] {
AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "avg_pool2d_out_cuda_frame", [&] {
using accscalar_t = acc_type<scalar_t, true>;
scalar_t *output_data = output.data_ptr<scalar_t>();
scalar_t *input_data = input.data_ptr<scalar_t>();
avg_pool2d_out_cuda_frame<scalar_t, accscalar_t, true, false>
<<<num_blocks, num_threads, 0, at::cuda::getCurrentCUDAStream()>>>(
count,
input_data,
nbatch,
nInputPlane,
inputHeight, inputWidth,
outputHeight, outputWidth,
kH, kW,
dH, dW,
padH, padW,
output_data, 0);
});
}
);
}
else {
AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(),
"avg_pool2d_out_cuda_frame",
[&] {
AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "avg_pool2d_out_cuda_frame", [&] {
using accscalar_t = acc_type<scalar_t, true>;
scalar_t *output_data = output.data_ptr<scalar_t>();
scalar_t *input_data = input.data_ptr<scalar_t>();
avg_pool2d_out_cuda_frame<scalar_t, accscalar_t, false, false>
<<<num_blocks, num_threads, 0, at::cuda::getCurrentCUDAStream()>>>(
count,
input_data,
nbatch,
nInputPlane,
inputHeight, inputWidth,
outputHeight, outputWidth,
kH, kW,
dH, dW,
padH, padW,
output_data, 0);
});
}
);
}
}
THCudaCheck(cudaGetLastError());
if (input.ndimension() == 3) {
output.resize_({nInputPlane, outputHeight, outputWidth});
}
}
Tensor& avg_pool2d_backward_out_cuda_template(
Tensor& gradInput,
const Tensor& gradOutput_,
const Tensor& input_,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
bool ceil_mode,
bool count_include_pad,
c10::optional<int64_t> divisor_override)
{
TensorArg gradInput_arg{ gradInput, "gradInput", 1 };
TensorArg gradOutput_arg{ gradOutput_, "gradOutput_", 2 };
TensorArg input_arg{ input_, "input_", 3 };
checkAllSameGPU("avg_pool2d_backward_out_cuda",
{gradInput_arg, gradOutput_arg, input_arg});
// #20866, #22032: Guarantee this for the official C++ API?
TORCH_CHECK(kernel_size.size() == 1 || kernel_size.size() == 2,
"avg_pool2d: kernel_size must either be a single int, or a tuple of two ints");
const int kH = safe_downcast<int, int64_t>(kernel_size[0]);
const int kW = kernel_size.size() == 1 ? kH : safe_downcast<int, int64_t>(kernel_size[1]);
TORCH_CHECK(stride.empty() || stride.size() == 1 || stride.size() == 2,
"avg_pool2d: stride must either be omitted, a single int, or a tuple of two ints");
const int dH = stride.empty() ? kH : safe_downcast<int, int64_t>(stride[0]);
const int dW = stride.empty() ? kW :
stride.size() == 1 ? dH : safe_downcast<int, int64_t>(stride[1]);
TORCH_CHECK(padding.size() == 1 || padding.size() == 2,
"avg_pool2d: padding must either be a single int, or a tuple of two ints");
const int padH = safe_downcast<int, int64_t>(padding[0]);
const int padW = padding.size() == 1 ? padH : safe_downcast<int, int64_t>(padding[1]);
TORCH_CHECK((input_.ndimension() == 3 || input_.ndimension() == 4),
"non-empty 3D or 4D (batch mode) tensor expected for input");
TORCH_CHECK(!divisor_override.has_value() || divisor_override.value() != 0,
"divisor must be not zero");
const Tensor input = input_.contiguous();
const Tensor gradOutput = gradOutput_.contiguous();
const int64_t nbatch = input.ndimension() == 4 ? input.size(-4) : 1;
const int64_t nInputPlane = input.size(-3);
const int64_t inputHeight = input.size(-2);
const int64_t inputWidth = input.size(-1);
const int64_t outputHeight = pooling_output_shape<int64_t>(inputHeight, kH, padH, dH, 1, ceil_mode);
const int64_t outputWidth = pooling_output_shape<int64_t>(inputWidth, kW, padW, dW, 1, ceil_mode);
avg_pool2d_backward_shape_check(
input_,
gradOutput_,
nbatch,
kH, kW, dH, dW, padH, padW,
nInputPlane,
inputHeight, inputWidth,
outputHeight, outputWidth);
gradInput.resize_as_(input);
const int32_t count = safe_downcast<int32_t, int64_t>(input.numel());
const uint32_t num_threads = std::min(at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, 1024);
const uint32_t num_blocks = cuda::ATenCeilDiv<uint32_t>(count, num_threads);
if (divisor_override.has_value()) {
AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(),
"avg_pool2d_backward_out_cuda_frame",
[&] {
AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "avg_pool2d_backward_out_cuda_frame", [&] {
using accscalar_t = acc_type<scalar_t, true>;
scalar_t *gradOutput_data = gradOutput.data_ptr<scalar_t>();
scalar_t *gradInput_data = gradInput.data_ptr<scalar_t>();
avg_pool2d_backward_out_cuda_frame<scalar_t, accscalar_t, false, true>
<<<num_blocks, num_threads, 0, at::cuda::getCurrentCUDAStream()>>>(
count,
gradOutput_data,
nbatch,
nInputPlane,
inputHeight, inputWidth,
outputHeight, outputWidth,
kH, kW,
dH, dW,
padH, padW,
gradInput_data,
divisor_override.value());
});
}
);
} else {
if (count_include_pad) {
AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(),
"avg_pool2d_backward_out_cuda_frame",
[&] {
AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "avg_pool2d_backward_out_cuda_frame", [&] {
using accscalar_t = acc_type<scalar_t, true>;
scalar_t *gradOutput_data = gradOutput.data_ptr<scalar_t>();
scalar_t *gradInput_data = gradInput.data_ptr<scalar_t>();
avg_pool2d_backward_out_cuda_frame<scalar_t, accscalar_t, true, false>
<<<num_blocks, num_threads, 0, at::cuda::getCurrentCUDAStream()>>>(
count,
gradOutput_data,
nbatch,
nInputPlane,
inputHeight, inputWidth,
outputHeight, outputWidth,
kH, kW,
dH, dW,
padH, padW,
gradInput_data, 0);
});
}
);
}
else {
AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(),
"avg_pool2d_backward_out_cuda_frame",
[&] {
AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "avg_pool2d_backward_out_cuda_frame", [&] {
using accscalar_t = acc_type<scalar_t, true>;
scalar_t *gradOutput_data = gradOutput.data_ptr<scalar_t>();
scalar_t *gradInput_data = gradInput.data_ptr<scalar_t>();
avg_pool2d_backward_out_cuda_frame<scalar_t, accscalar_t, false, false>
<<<num_blocks, num_threads, 0, at::cuda::getCurrentCUDAStream()>>>(
count,
gradOutput_data,
nbatch,
nInputPlane,
inputHeight, inputWidth,
outputHeight, outputWidth,
kH, kW,
dH, dW,
padH, padW,
gradInput_data, 0);
});
}
);
}
}
THCudaCheck(cudaGetLastError());
return gradInput;
}
} // namespace
Tensor& avg_pool2d_out_cuda(
Tensor& output,
const Tensor& input,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
bool ceil_mode,
bool count_include_pad,
c10::optional<int64_t> divisor_override)
{
avg_pool2d_out_cuda_template(
output,
input,
kernel_size,
stride,
padding,
ceil_mode,
count_include_pad,
divisor_override);
return output;
}
Tensor avg_pool2d_cuda(
const Tensor& input,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
bool ceil_mode,
bool count_include_pad,
c10::optional<int64_t> divisor_override)
{
Tensor output = at::empty({0}, input.options());
avg_pool2d_out_cuda_template(
output,
input,
kernel_size,
stride,
padding,
ceil_mode,
count_include_pad,
divisor_override);
return output;
}
Tensor& avg_pool2d_backward_out_cuda(
Tensor& gradInput,
const Tensor& gradOutput_,
const Tensor& input,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
bool ceil_mode,
bool count_include_pad,
c10::optional<int64_t> divisor_override)
{
avg_pool2d_backward_out_cuda_template(
gradInput,
gradOutput_,
input,
kernel_size,
stride,
padding,
ceil_mode,
count_include_pad,
divisor_override);
return gradInput;
}
Tensor avg_pool2d_backward_cuda(
const Tensor& gradOutput_,
const Tensor& input,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
bool ceil_mode,
bool count_include_pad,
c10::optional<int64_t> divisor_override)
{
auto gradInput = at::zeros_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
avg_pool2d_backward_out_cuda_template(
gradInput,
gradOutput_,
input,
kernel_size,
stride,
padding,
ceil_mode,
count_include_pad,
divisor_override);
return gradInput;
}
} // at::native
} // at
|
62c4c6a79370198ce0fb00ae36c6bc99739786c8.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <cutil.h>
#include <math.h>
// Includes
#include <stdio.h>
#include "../include/ContAcq-IntClk.h"
// includes, project
#include "../include/sdkHelper.h" // helper for shared functions common to CUDA SDK samples
//#include <shrQATest.h>
//#include <shrUtils.h>
// includes CUDA
#include <hip/hip_runtime.h>
#define THREADS_PER_BLOCK 256
#define NUM_OF_BLOCKS 15
#define F 8
#define ITERATIONS (unsigned)( 2000 )
#define ITERATIONS2 1
#define max_tid THREADS_PER_BLOCK*NUM_OF_BLOCKS
#define LINE_SIZE 128
#define SETS 64
#define ASSOC 6
#define SIMD_WIDTH 32
// Variables
int* h_A;
int* h_B;
int* h_C;
int* d_A;
int* d_B;
int* d_C;
bool noprompt = false;
unsigned int my_timer;
// Functions
void CleanupResources(void);
void RandomInit(int*, int);
void ParseArguments(int, char**);
////////////////////////////////////////////////////////////////////////////////
// These are CUDA Helper functions
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors(hipError_t err, const char *file, const int line ){
if(hipSuccess != err){
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, hipGetErrorString( err ) );
exit(-1);
}
}
// This will output the proper error string when calling hipGetLastError
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
inline void __getLastCudaError(const char *errorMessage, const char *file, const int line ){
hipError_t err = hipGetLastError();
if (hipSuccess != err){
fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, hipGetErrorString( err ) );
exit(-1);
}
}
// end of CUDA Helper Functions
// Device code
__global__ void PowerKernal(int* A, int* C, int N){
int tid = blockDim.x * blockIdx.x + threadIdx.x;
//Do Some Computation
//int size = (LINE_SIZE*ASSOC*SETS)/sizeof(int);
//unsigned j=0, k=0;
int m_sum=N;
// m_sum = A[tid*F];
for (unsigned j=0; j<ITERATIONS2; j++){
for(unsigned k=0; k<ITERATIONS; ++k){
m_sum=A[((unsigned)(tid*F)+(unsigned)(k*max_tid*F))];
}
m_sum+=j;
}
C[tid]=m_sum;
__syncthreads();
}
// Host code
int main(){
printf("Power Microbenchmarks\n");
//int N = LINE_SIZE*SETS*ASSOC;
unsigned N =((unsigned)(max_tid*F)+(unsigned)(ITERATIONS*max_tid*F));
size_t size = N * sizeof(int);
// Allocate input vectors h_A and h_B in host memory
h_A = (int*)malloc(size);
if (h_A == 0) CleanupResources();
//h_B = (float*)malloc(size);
//if (h_B == 0) CleanupResources();
h_C = (int*)malloc(size);
if (h_C == 0) CleanupResources();
// Initialize input vectors
RandomInit(h_A, N);
//RandomInit(h_B, N);
// Allocate vectors in device memory
checkCudaErrors( hipMalloc((void**)&d_A, size) );
//checkCudaErrors( hipMalloc((void**)&d_B, size) );
checkCudaErrors( hipMalloc((void**)&d_C, size) );
// Copy vectors from host memory to device memory
checkCudaErrors( hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice) );
//checkCudaErrors( hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice) );
//VecAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, N);
dim3 dimGrid(NUM_OF_BLOCKS,1);
dim3 dimBlock(THREADS_PER_BLOCK,1);
CUT_SAFE_CALL(cutCreateTimer(&my_timer));
TaskHandle taskhandle = LaunchDAQ();
CUT_SAFE_CALL(cutStartTimer(my_timer));
hipLaunchKernelGGL(( PowerKernal), dim3(dimGrid),dim3(dimBlock), 0, 0, d_A, d_C, N);
CUDA_SAFE_CALL( hipDeviceSynchronize() );
printf("execution time = %f\n", cutGetTimerValue(my_timer));
TurnOffDAQ(taskhandle, cutGetTimerValue(my_timer));
CUT_SAFE_CALL(cutStopTimer(my_timer));
CUT_SAFE_CALL(cutDeleteTimer(my_timer));
getLastCudaError("kernel launch failure");
#ifdef _DEBUG
checkCudaErrors( hipDeviceSynchronize() );
#endif
// Copy result from device memory to host memory
// h_C contains the result in host memory
checkCudaErrors( hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost) );
CleanupResources();
return 0;
}
void CleanupResources(void){
// Free device memory
if (d_A)
hipFree(d_A);
//if (d_B)
// hipFree(d_B);
if (d_C)
hipFree(d_C);
// Free host memory
if (h_A)
free(h_A);
// if (h_B)
// free(h_B);
if (h_C)
free(h_C);
}
// Allocates an array with random float entries.
void RandomInit(int* data, int n){
for (int i = 0; i < n; ++i)
data[i] = (int)(rand() / RAND_MAX);
}
| 62c4c6a79370198ce0fb00ae36c6bc99739786c8.cu | #include <stdio.h>
#include <stdlib.h>
#include <cutil.h>
#include <math.h>
// Includes
#include <stdio.h>
#include "../include/ContAcq-IntClk.h"
// includes, project
#include "../include/sdkHelper.h" // helper for shared functions common to CUDA SDK samples
//#include <shrQATest.h>
//#include <shrUtils.h>
// includes CUDA
#include <cuda_runtime.h>
#define THREADS_PER_BLOCK 256
#define NUM_OF_BLOCKS 15
#define F 8
#define ITERATIONS (unsigned)( 2000 )
#define ITERATIONS2 1
#define max_tid THREADS_PER_BLOCK*NUM_OF_BLOCKS
#define LINE_SIZE 128
#define SETS 64
#define ASSOC 6
#define SIMD_WIDTH 32
// Variables
int* h_A;
int* h_B;
int* h_C;
int* d_A;
int* d_B;
int* d_C;
bool noprompt = false;
unsigned int my_timer;
// Functions
void CleanupResources(void);
void RandomInit(int*, int);
void ParseArguments(int, char**);
////////////////////////////////////////////////////////////////////////////////
// These are CUDA Helper functions
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors(cudaError err, const char *file, const int line ){
if(cudaSuccess != err){
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, cudaGetErrorString( err ) );
exit(-1);
}
}
// This will output the proper error string when calling cudaGetLastError
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
inline void __getLastCudaError(const char *errorMessage, const char *file, const int line ){
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err){
fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, cudaGetErrorString( err ) );
exit(-1);
}
}
// end of CUDA Helper Functions
// Device code
__global__ void PowerKernal(int* A, int* C, int N){
int tid = blockDim.x * blockIdx.x + threadIdx.x;
//Do Some Computation
//int size = (LINE_SIZE*ASSOC*SETS)/sizeof(int);
//unsigned j=0, k=0;
int m_sum=N;
// m_sum = A[tid*F];
for (unsigned j=0; j<ITERATIONS2; j++){
for(unsigned k=0; k<ITERATIONS; ++k){
m_sum=A[((unsigned)(tid*F)+(unsigned)(k*max_tid*F))];
}
m_sum+=j;
}
C[tid]=m_sum;
__syncthreads();
}
// Host code
int main(){
printf("Power Microbenchmarks\n");
//int N = LINE_SIZE*SETS*ASSOC;
unsigned N =((unsigned)(max_tid*F)+(unsigned)(ITERATIONS*max_tid*F));
size_t size = N * sizeof(int);
// Allocate input vectors h_A and h_B in host memory
h_A = (int*)malloc(size);
if (h_A == 0) CleanupResources();
//h_B = (float*)malloc(size);
//if (h_B == 0) CleanupResources();
h_C = (int*)malloc(size);
if (h_C == 0) CleanupResources();
// Initialize input vectors
RandomInit(h_A, N);
//RandomInit(h_B, N);
// Allocate vectors in device memory
checkCudaErrors( cudaMalloc((void**)&d_A, size) );
//checkCudaErrors( cudaMalloc((void**)&d_B, size) );
checkCudaErrors( cudaMalloc((void**)&d_C, size) );
// Copy vectors from host memory to device memory
checkCudaErrors( cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice) );
//checkCudaErrors( cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice) );
//VecAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, N);
dim3 dimGrid(NUM_OF_BLOCKS,1);
dim3 dimBlock(THREADS_PER_BLOCK,1);
CUT_SAFE_CALL(cutCreateTimer(&my_timer));
TaskHandle taskhandle = LaunchDAQ();
CUT_SAFE_CALL(cutStartTimer(my_timer));
PowerKernal<<<dimGrid,dimBlock>>>(d_A, d_C, N);
CUDA_SAFE_CALL( cudaThreadSynchronize() );
printf("execution time = %f\n", cutGetTimerValue(my_timer));
TurnOffDAQ(taskhandle, cutGetTimerValue(my_timer));
CUT_SAFE_CALL(cutStopTimer(my_timer));
CUT_SAFE_CALL(cutDeleteTimer(my_timer));
getLastCudaError("kernel launch failure");
#ifdef _DEBUG
checkCudaErrors( cudaDeviceSynchronize() );
#endif
// Copy result from device memory to host memory
// h_C contains the result in host memory
checkCudaErrors( cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost) );
CleanupResources();
return 0;
}
void CleanupResources(void){
// Free device memory
if (d_A)
cudaFree(d_A);
//if (d_B)
// cudaFree(d_B);
if (d_C)
cudaFree(d_C);
// Free host memory
if (h_A)
free(h_A);
// if (h_B)
// free(h_B);
if (h_C)
free(h_C);
}
// Allocates an array with random float entries.
void RandomInit(int* data, int n){
for (int i = 0; i < n; ++i)
data[i] = (int)(rand() / RAND_MAX);
}
|
2fc40b2efe04b5771c1f99f1af6fb259a3e58808.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// Assignment 1: ParallelSine
// CSCI 415: Networking and Parallel Computation
// Spring 2017
// Name(s):Chengyao Tang,Victoria Kyereme
//
// Sine implementation derived from slides here: http://15418.courses.cs.cmu.edu/spring2016/lecture/basicarch
// standard imports
#include <stdio.h>
#include <math.h>
#include <iomanip>
#include <iostream>
#include <string>
#include <sys/time.h>
// problem size (vector length) N
static const int N = 12345678;
// Number of terms to use when approximating sine
static const int TERMS = 6;
// kernel function (CPU - Do not modify)
void sine_serial(float *input, float *output)
{
int i;
for (i=0; i<N; i++) {
float value = input[i];
float numer = input[i] * input[i] * input[i];
int denom = 6; // 3!
int sign = -1;
for (int j=1; j<=TERMS;j++)
{
value += sign * numer / denom;
numer *= input[i] * input[i];
denom *= (2*j+2) * (2*j+3);
sign *= -1;
}
output[i] = value;
}
}
// kernel function (CUDA device)
// TODO: Implement your graphics kernel here. See assignment instructions for method information
__global__ void sine_parallel(float*d_input,float*d_output ){
int idx = blockIdx.x*blockDim.x+threadIdx.x;
float d_value = d_input[idx];
float d_numer = d_input[idx]*d_input[idx]*d_input[idx];
int d_denom = 6;
int d_sign = -1;
for (int d_j=1;d_j<=TERMS; d_j++){
d_value += d_sign *d_numer/d_denom;
d_numer *= d_input[idx]* d_input[idx];
d_denom *= (2*d_j+2)* (2*d_j+3);
d_sign *= -1;
}
d_output[idx] = d_value;
}
// BEGIN: timing and error checking routines (do not modify)
// Returns the current time in microseconds
long long start_timer() {
struct timeval tv;
gettimeofday(&tv, NULL);
return tv.tv_sec * 1000000 + tv.tv_usec;
}
// Prints the time elapsed since the specified time
long long stop_timer(long long start_time, std::string name) {
struct timeval tv;
gettimeofday(&tv, NULL);
long long end_time = tv.tv_sec * 1000000 + tv.tv_usec;
std::cout << std::setprecision(5);
std::cout << name << ": " << ((float) (end_time - start_time)) / (1000 * 1000) << " sec\n";
return end_time - start_time;
}
void checkErrors(const char label[])
{
// we need to synchronise first to catch errors due to
// asynchroneous operations that would otherwise
// potentially go unnoticed
hipError_t err;
err = hipDeviceSynchronize();
if (err != hipSuccess)
{
char *e = (char*) hipGetErrorString(err);
fprintf(stderr, "CUDA Error: %s (at %s)", e, label);
}
err = hipGetLastError();
if (err != hipSuccess)
{
char *e = (char*) hipGetErrorString(err);
fprintf(stderr, "CUDA Error: %s (at %s)", e, label);
}
}
// END: timing and error checking routines (do not modify)
int main (int argc, char **argv)
{
//BEGIN: CPU implementation (do not modify)
float *h_cpu_result = (float*)malloc(N*sizeof(float));
float *h_input = (float*)malloc(N*sizeof(float));
//Initialize data on CPU
int i;
for (i=0; i<N; i++)
{
h_input[i] = 0.1f * i;
}
//Execute and time the CPU version
long long CPU_start_time = start_timer();
sine_serial(h_input, h_cpu_result);
long long CPU_time = stop_timer(CPU_start_time, "\nCPU Run Time");
//END: CPU implementation (do not modify)
//TODO: Prepare and run your kernel, make sure to copy your results back into h_gpu_result and display your timing results
float *h_gpu_result = (float*)malloc(N*sizeof(float));
//declare GPU memory pointers
float *d_input;
float *d_output;
long long Memory_Allocation_start_time = start_timer();
long long GPU_start_time = start_timer();
//allocate GPU memory
hipMalloc((void **) &d_input,N*sizeof(float));
hipMalloc((void **) &d_output,N*sizeof(float));
long long Memory_Allocation_end_time = stop_timer(Memory_Allocation_start_time,"\nGPU Memory allocation time:");
//transfer the array to the GPU
long long Memory_copy_to_device_start_time = start_timer();
hipMemcpy(d_input, h_input, N*sizeof(float),hipMemcpyHostToDevice);
long long Memory_copy_to_device_end_time = stop_timer(Memory_copy_to_device_start_time,"\nGPU Memory Copy to Device time:");
//launch the kernel
int threards = N/1024;
long long Kernal_run_start_time = start_timer();
hipLaunchKernelGGL(( sine_parallel), dim3(threards),dim3(1024), 0, 0, d_input,d_output);
long long Kernal_run_end_time = stop_timer(Kernal_run_start_time,"\nGPU Kernal run Time:");
//copy back the result array to the CPU
long long Memory_copy_to_Host_start_time = start_timer();
hipMemcpy(h_gpu_result,d_output,N*sizeof(float),hipMemcpyDeviceToHost);
long long Memory_copy_to_Host_end_time = stop_timer(Memory_copy_to_Host_start_time,"\nGPU Memory Copy to Host time:");
long long GPU_end_time = stop_timer(GPU_start_time,"\nTotal GPU Run time:");
// Checking to make sure the CPU and GPU results match - Do not modify
int errorCount = 0;
for (i=0; i<N; i++)
{
if (abs(h_cpu_result[i]-h_gpu_result[i]) > 1e-6)
errorCount = errorCount + 1;
}
if (errorCount > 0)
printf("Result comparison failed.\n");
else
printf("Result comparison passed.\n");
// Cleaning up memory
free(h_input);
free(h_cpu_result);
free(h_gpu_result);
//Cleaning up memory for gpu pointers
hipFree(d_input);
hipFree(d_output);
return 0;
}
| 2fc40b2efe04b5771c1f99f1af6fb259a3e58808.cu | //
// Assignment 1: ParallelSine
// CSCI 415: Networking and Parallel Computation
// Spring 2017
// Name(s):Chengyao Tang,Victoria Kyereme
//
// Sine implementation derived from slides here: http://15418.courses.cs.cmu.edu/spring2016/lecture/basicarch
// standard imports
#include <stdio.h>
#include <math.h>
#include <iomanip>
#include <iostream>
#include <string>
#include <sys/time.h>
// problem size (vector length) N
static const int N = 12345678;
// Number of terms to use when approximating sine
static const int TERMS = 6;
// kernel function (CPU - Do not modify)
void sine_serial(float *input, float *output)
{
int i;
for (i=0; i<N; i++) {
float value = input[i];
float numer = input[i] * input[i] * input[i];
int denom = 6; // 3!
int sign = -1;
for (int j=1; j<=TERMS;j++)
{
value += sign * numer / denom;
numer *= input[i] * input[i];
denom *= (2*j+2) * (2*j+3);
sign *= -1;
}
output[i] = value;
}
}
// kernel function (CUDA device)
// TODO: Implement your graphics kernel here. See assignment instructions for method information
__global__ void sine_parallel(float*d_input,float*d_output ){
int idx = blockIdx.x*blockDim.x+threadIdx.x;
float d_value = d_input[idx];
float d_numer = d_input[idx]*d_input[idx]*d_input[idx];
int d_denom = 6;
int d_sign = -1;
for (int d_j=1;d_j<=TERMS; d_j++){
d_value += d_sign *d_numer/d_denom;
d_numer *= d_input[idx]* d_input[idx];
d_denom *= (2*d_j+2)* (2*d_j+3);
d_sign *= -1;
}
d_output[idx] = d_value;
}
// BEGIN: timing and error checking routines (do not modify)
// Returns the current time in microseconds
long long start_timer() {
struct timeval tv;
gettimeofday(&tv, NULL);
return tv.tv_sec * 1000000 + tv.tv_usec;
}
// Prints the time elapsed since the specified time
long long stop_timer(long long start_time, std::string name) {
struct timeval tv;
gettimeofday(&tv, NULL);
long long end_time = tv.tv_sec * 1000000 + tv.tv_usec;
std::cout << std::setprecision(5);
std::cout << name << ": " << ((float) (end_time - start_time)) / (1000 * 1000) << " sec\n";
return end_time - start_time;
}
void checkErrors(const char label[])
{
// we need to synchronise first to catch errors due to
// asynchroneous operations that would otherwise
// potentially go unnoticed
cudaError_t err;
err = cudaThreadSynchronize();
if (err != cudaSuccess)
{
char *e = (char*) cudaGetErrorString(err);
fprintf(stderr, "CUDA Error: %s (at %s)", e, label);
}
err = cudaGetLastError();
if (err != cudaSuccess)
{
char *e = (char*) cudaGetErrorString(err);
fprintf(stderr, "CUDA Error: %s (at %s)", e, label);
}
}
// END: timing and error checking routines (do not modify)
int main (int argc, char **argv)
{
//BEGIN: CPU implementation (do not modify)
float *h_cpu_result = (float*)malloc(N*sizeof(float));
float *h_input = (float*)malloc(N*sizeof(float));
//Initialize data on CPU
int i;
for (i=0; i<N; i++)
{
h_input[i] = 0.1f * i;
}
//Execute and time the CPU version
long long CPU_start_time = start_timer();
sine_serial(h_input, h_cpu_result);
long long CPU_time = stop_timer(CPU_start_time, "\nCPU Run Time");
//END: CPU implementation (do not modify)
//TODO: Prepare and run your kernel, make sure to copy your results back into h_gpu_result and display your timing results
float *h_gpu_result = (float*)malloc(N*sizeof(float));
//declare GPU memory pointers
float *d_input;
float *d_output;
long long Memory_Allocation_start_time = start_timer();
long long GPU_start_time = start_timer();
//allocate GPU memory
cudaMalloc((void **) &d_input,N*sizeof(float));
cudaMalloc((void **) &d_output,N*sizeof(float));
long long Memory_Allocation_end_time = stop_timer(Memory_Allocation_start_time,"\nGPU Memory allocation time:");
//transfer the array to the GPU
long long Memory_copy_to_device_start_time = start_timer();
cudaMemcpy(d_input, h_input, N*sizeof(float),cudaMemcpyHostToDevice);
long long Memory_copy_to_device_end_time = stop_timer(Memory_copy_to_device_start_time,"\nGPU Memory Copy to Device time:");
//launch the kernel
int threards = N/1024;
long long Kernal_run_start_time = start_timer();
sine_parallel<<<threards,1024>>>(d_input,d_output);
long long Kernal_run_end_time = stop_timer(Kernal_run_start_time,"\nGPU Kernal run Time:");
//copy back the result array to the CPU
long long Memory_copy_to_Host_start_time = start_timer();
cudaMemcpy(h_gpu_result,d_output,N*sizeof(float),cudaMemcpyDeviceToHost);
long long Memory_copy_to_Host_end_time = stop_timer(Memory_copy_to_Host_start_time,"\nGPU Memory Copy to Host time:");
long long GPU_end_time = stop_timer(GPU_start_time,"\nTotal GPU Run time:");
// Checking to make sure the CPU and GPU results match - Do not modify
int errorCount = 0;
for (i=0; i<N; i++)
{
if (abs(h_cpu_result[i]-h_gpu_result[i]) > 1e-6)
errorCount = errorCount + 1;
}
if (errorCount > 0)
printf("Result comparison failed.\n");
else
printf("Result comparison passed.\n");
// Cleaning up memory
free(h_input);
free(h_cpu_result);
free(h_gpu_result);
//Cleaning up memory for gpu pointers
cudaFree(d_input);
cudaFree(d_output);
return 0;
}
|
4b536c9f65db165ef8ba1452db9567bfcca6996e.hip | // !!! This is a file automatically generated by hipify!!!
// Rishabh Agarwal - 18je0676
#include <bits/stdc++.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
using namespace std;
#define check(statement) do {\
hipError_t error = statement;\
if (error != hipSuccess) {\
cout << "Failed to run stmt " << __LINE__ << "\n";\
cout << "Got CUDA error ... " << hipGetErrorString(error) << "\n";\
return -1;\
}\
} while(0)
// kernel function
__global__ void convolutionKernel(float *a,float *b,float *c,int maskWidth,int width) {
int i=blockIdx.x*blockDim.x+threadIdx.x;
float cvalue=0.0;
int start_point=i-(maskWidth/2);
for(int j = 0;j < maskWidth; j++) {
if((start_point + j) >= 0 && (start_point+j) < width) {
cvalue += a[start_point + j] * b[j];
}
}
c[i]=cvalue;
}
// main function
int main() {
float * input;
float * mask;
float * output;
float * dinput;
float * dmask;
float * doutput;
int maskWidth=3;
int width=5;
// allocating memomry to input, mask and output
input = (float *)malloc(sizeof(float) * width);
mask = (float *)malloc(sizeof(float) * maskWidth);
output = (float *)malloc(sizeof(float) * width);
// assigning values to input, mask and output
for(int i=0;i<width;i++) {
input[i]=1.0;
}
for(int i=0;i < maskWidth;i++) {
mask[i]=1.0;
}
cout << "\nInput: \n";
for(int i=0; i<width; i++) {
cout << input[i] << " ";
}
cout << "\n";
cout << "\nMask: \n";
for(int i=0; i < maskWidth; i++) {
cout << mask[i] << " ";
}
cout << "\n";
// allocating device memory
check(hipMalloc((void **)&dinput, sizeof(float) * width));
check(hipMalloc((void **)&dmask, sizeof(float) * maskWidth));
check(hipMalloc((void **)&doutput, sizeof(float) * width));
// copying memory from host to device
check(hipMemcpy(dinput, input, sizeof(float) * width, hipMemcpyHostToDevice));
check(hipMemcpy(dmask, mask, sizeof(float) * maskWidth, hipMemcpyHostToDevice));
// kernel dimensions
dim3 dimGrid(((width-1)/maskWidth) + 1, 1,1);
dim3 dimBlock(maskWidth,1, 1);
// calling kernel
hipLaunchKernelGGL(( convolutionKernel), dim3(dimGrid),dim3(dimBlock), 0, 0, dinput, dmask, doutput, maskWidth, width);
hipDeviceSynchronize();
// copying memory back from device to host
check(hipMemcpy(output, doutput, sizeof(float) * width, hipMemcpyDeviceToHost));
cout << "\nOutput: \n";
for(int i=0; i < width; i++) {
cout << output[i] << " ";
}
hipFree(dinput);
hipFree(dmask);
hipFree(doutput);
free(input);
free(output);
free(mask);
return 0;
}
| 4b536c9f65db165ef8ba1452db9567bfcca6996e.cu | // Rishabh Agarwal - 18je0676
#include <bits/stdc++.h>
#include <cuda.h>
#include <cuda_runtime_api.h>
using namespace std;
#define check(statement) do {\
cudaError_t error = statement;\
if (error != cudaSuccess) {\
cout << "Failed to run stmt " << __LINE__ << "\n";\
cout << "Got CUDA error ... " << cudaGetErrorString(error) << "\n";\
return -1;\
}\
} while(0)
// kernel function
__global__ void convolutionKernel(float *a,float *b,float *c,int maskWidth,int width) {
int i=blockIdx.x*blockDim.x+threadIdx.x;
float cvalue=0.0;
int start_point=i-(maskWidth/2);
for(int j = 0;j < maskWidth; j++) {
if((start_point + j) >= 0 && (start_point+j) < width) {
cvalue += a[start_point + j] * b[j];
}
}
c[i]=cvalue;
}
// main function
int main() {
float * input;
float * mask;
float * output;
float * dinput;
float * dmask;
float * doutput;
int maskWidth=3;
int width=5;
// allocating memomry to input, mask and output
input = (float *)malloc(sizeof(float) * width);
mask = (float *)malloc(sizeof(float) * maskWidth);
output = (float *)malloc(sizeof(float) * width);
// assigning values to input, mask and output
for(int i=0;i<width;i++) {
input[i]=1.0;
}
for(int i=0;i < maskWidth;i++) {
mask[i]=1.0;
}
cout << "\nInput: \n";
for(int i=0; i<width; i++) {
cout << input[i] << " ";
}
cout << "\n";
cout << "\nMask: \n";
for(int i=0; i < maskWidth; i++) {
cout << mask[i] << " ";
}
cout << "\n";
// allocating device memory
check(cudaMalloc((void **)&dinput, sizeof(float) * width));
check(cudaMalloc((void **)&dmask, sizeof(float) * maskWidth));
check(cudaMalloc((void **)&doutput, sizeof(float) * width));
// copying memory from host to device
check(cudaMemcpy(dinput, input, sizeof(float) * width, cudaMemcpyHostToDevice));
check(cudaMemcpy(dmask, mask, sizeof(float) * maskWidth, cudaMemcpyHostToDevice));
// kernel dimensions
dim3 dimGrid(((width-1)/maskWidth) + 1, 1,1);
dim3 dimBlock(maskWidth,1, 1);
// calling kernel
convolutionKernel<<<dimGrid,dimBlock>>>(dinput, dmask, doutput, maskWidth, width);
cudaDeviceSynchronize();
// copying memory back from device to host
check(cudaMemcpy(output, doutput, sizeof(float) * width, cudaMemcpyDeviceToHost));
cout << "\nOutput: \n";
for(int i=0; i < width; i++) {
cout << output[i] << " ";
}
cudaFree(dinput);
cudaFree(dmask);
cudaFree(doutput);
free(input);
free(output);
free(mask);
return 0;
}
|
a9073b4f3358d4ff034616dd38f4385b4e586c13.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void __word2vecNegFilt(int nrows, int ncols, int nwords, int *WA, int *WB, float *A, float *B, float lrate, float vexp) {} | a9073b4f3358d4ff034616dd38f4385b4e586c13.cu | #include "includes.h"
__global__ void __word2vecNegFilt(int nrows, int ncols, int nwords, int *WA, int *WB, float *A, float *B, float lrate, float vexp) {} |
b7603829bf9123e68e97346d0610e1eb3fd51eaf.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <stdio.h>
__global__ void divergence_test_ker()
{
if(threadIdx.x % 2 == 0)
printf("threadIdx.x %d : This is an even thread.\n", threadIdx.x);
else
printf("threadIdx.x %d : This is an odd thread.\n", threadIdx.x);
}
__host__ int main()
{
hipSetDevice(0);
hipLaunchKernelGGL(( divergence_test_ker) , dim3(1), dim3(32), 0, 0, );
hipDeviceSynchronize();
hipDeviceReset();
}
| b7603829bf9123e68e97346d0610e1eb3fd51eaf.cu | #include <cuda_runtime.h>
#include <stdio.h>
__global__ void divergence_test_ker()
{
if(threadIdx.x % 2 == 0)
printf("threadIdx.x %d : This is an even thread.\n", threadIdx.x);
else
printf("threadIdx.x %d : This is an odd thread.\n", threadIdx.x);
}
__host__ int main()
{
cudaSetDevice(0);
divergence_test_ker <<<1, 32>>>();
cudaDeviceSynchronize();
cudaDeviceReset();
}
|
cd0279aa3c9f5c6659eae303fb0ec37e7d79f05e.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "integrateOptimised.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *n = NULL;
hipMalloc(&n, XSIZE*YSIZE);
float *g_sum = NULL;
hipMalloc(&g_sum, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
integrateOptimised), dim3(gridBlock),dim3(threadBlock), 0, 0, n,g_sum);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
integrateOptimised), dim3(gridBlock),dim3(threadBlock), 0, 0, n,g_sum);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
integrateOptimised), dim3(gridBlock),dim3(threadBlock), 0, 0, n,g_sum);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | cd0279aa3c9f5c6659eae303fb0ec37e7d79f05e.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "integrateOptimised.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *n = NULL;
cudaMalloc(&n, XSIZE*YSIZE);
float *g_sum = NULL;
cudaMalloc(&g_sum, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
integrateOptimised<<<gridBlock,threadBlock>>>(n,g_sum);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
integrateOptimised<<<gridBlock,threadBlock>>>(n,g_sum);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
integrateOptimised<<<gridBlock,threadBlock>>>(n,g_sum);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
e89fab1e7b204034443e2006ef5dbe703d29b68c.hip | // !!! This is a file automatically generated by hipify!!!
#include "common.h"
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <math.h>
#include "kernel.hip"
__global__ void test_init(float *u, float *u_new, int N)
{
unsigned int i = threadIdx.x + blockIdx.x * blockDim.x;
unsigned int j = tjreadIdx.y + blockIdx.y * blockDIm.y;
unsigned int idx = j * N + i;
if(i<N && j<N) {
u[idx] = 0.0;
u_new[idx] = 1.0;
}
}
int main(int argc, char **argv)
{
printf("%s Starting...\n", argv[0]);
// set up device
int dev = 0;
hipDeviceProp_t deviceProp;
CHECK(hipGetDeviceProperties(&deviceProp, dev));
printf("Using Device %d: %s\n", dev, deviceProp.name);
CHECK(hipSetDevice(dev));
float error
// set up problem size
int N = 6
int nxy = N * N;
int nBytes = nxy * sizeof(float);
printf("Problem size: nx %d ny %d\n", N, N);
// malloc device global memory
float *d_u, *d_u_new, *d_error;
CHECK(hipMalloc((void **)&d_u , nBytes));
CHECK(hipMalloc((void **)&d_u_new , nBytes));
CHECK(hipMalloc((void **)&d_error , sizeof(float)));
//set-up blocks and threads
int dimx = 3;
int dimy = 3;
dim3 block(dimx, dimy);
dim3 grid((nx + block.x - 1) / block.x, (ny + block.y - 1) / block.y);
//initialize u
hipLaunchKernelGGL(( test_init), dim3(grid),dim3(block), 0, 0, d_u, d_u_new, N)
CHECK(hipDeviceSynchronize());
// compute error
hipLaunchKernelGGL(( computeError), dim3(grid), dim3(block), 0, 0, d_error, d_u, d_u_new, N);
CHECK(hipDeviceSynchronize());
CHECK(hipMemcpy(error, d_error, sizeof(float), hipMemcpyDeviceToHost));
printf("Error after initialization: %e, expected value: 1.0\n",error);
// update
hipLaunchKernelGGL(( updateSolution), dim3(grid),dim3(block), 0, 0, d_u, d_u_new, N);
CHECK(hipDeviceSynchronize());
//compute error again
hipLaunchKernelGGL(( computeError), dim3(grid), dim3(block), 0, 0, d_error, d_u, d_u_new, N);
CHECK(hipDeviceSynchronize());
CHECK(hipMemcpy(error, d_error, sizeof(float), hipMemcpyDeviceToHost));
printf("Error after update: %e, expected value: 0.0\n",error);
// check kernel error
CHECK(hipGetLastError());
// free device global memory
CHECK(hipFree(d_u));
CHECK(hipFree(d_u_new));
CHECK(hipFree(d_error));
// reset device
CHECK(hipDeviceReset());
return (0);
}
| e89fab1e7b204034443e2006ef5dbe703d29b68c.cu | #include "common.h"
#include <cuda_runtime.h>
#include <stdio.h>
#include <math.h>
#include "kernel.cu"
__global__ void test_init(float *u, float *u_new, int N)
{
unsigned int i = threadIdx.x + blockIdx.x * blockDim.x;
unsigned int j = tjreadIdx.y + blockIdx.y * blockDIm.y;
unsigned int idx = j * N + i;
if(i<N && j<N) {
u[idx] = 0.0;
u_new[idx] = 1.0;
}
}
int main(int argc, char **argv)
{
printf("%s Starting...\n", argv[0]);
// set up device
int dev = 0;
cudaDeviceProp deviceProp;
CHECK(cudaGetDeviceProperties(&deviceProp, dev));
printf("Using Device %d: %s\n", dev, deviceProp.name);
CHECK(cudaSetDevice(dev));
float error
// set up problem size
int N = 6
int nxy = N * N;
int nBytes = nxy * sizeof(float);
printf("Problem size: nx %d ny %d\n", N, N);
// malloc device global memory
float *d_u, *d_u_new, *d_error;
CHECK(cudaMalloc((void **)&d_u , nBytes));
CHECK(cudaMalloc((void **)&d_u_new , nBytes));
CHECK(cudaMalloc((void **)&d_error , sizeof(float)));
//set-up blocks and threads
int dimx = 3;
int dimy = 3;
dim3 block(dimx, dimy);
dim3 grid((nx + block.x - 1) / block.x, (ny + block.y - 1) / block.y);
//initialize u
test_init<<<grid,block>>>(d_u, d_u_new, N)
CHECK(cudaDeviceSynchronize());
// compute error
computeError<<<grid, block>>>(d_error, d_u, d_u_new, N);
CHECK(cudaDeviceSynchronize());
CHECK(cudaMemcpy(error, d_error, sizeof(float), cudaMemcpyDeviceToHost));
printf("Error after initialization: %e, expected value: 1.0\n",error);
// update
updateSolution<<<grid,block>>>(d_u, d_u_new, N);
CHECK(cudaDeviceSynchronize());
//compute error again
computeError<<<grid, block>>>(d_error, d_u, d_u_new, N);
CHECK(cudaDeviceSynchronize());
CHECK(cudaMemcpy(error, d_error, sizeof(float), cudaMemcpyDeviceToHost));
printf("Error after update: %e, expected value: 0.0\n",error);
// check kernel error
CHECK(cudaGetLastError());
// free device global memory
CHECK(cudaFree(d_u));
CHECK(cudaFree(d_u_new));
CHECK(cudaFree(d_error));
// reset device
CHECK(cudaDeviceReset());
return (0);
}
|
d0ea3d1d33548d095e871156058f95b802cc6dd3.hip | // !!! This is a file automatically generated by hipify!!!
#include <cstdio>
#include <cstdlib>
#include <random>
#include <sys/time.h>
#include <rocblas.h>
#include <cudaProfiler.h>
#include <omp.h>
#define NUM_CORE (6)
#define EPS (1e-3)
static const char *_cudaGetErrorEnum(hipblasStatus_t error)
{
switch (error)
{
case HIPBLAS_STATUS_SUCCESS:
return "HIPBLAS_STATUS_SUCCESS";
case HIPBLAS_STATUS_NOT_INITIALIZED:
return "HIPBLAS_STATUS_NOT_INITIALIZED";
case HIPBLAS_STATUS_ALLOC_FAILED:
return "HIPBLAS_STATUS_ALLOC_FAILED";
case HIPBLAS_STATUS_INVALID_VALUE:
return "HIPBLAS_STATUS_INVALID_VALUE";
case HIPBLAS_STATUS_ARCH_MISMATCH:
return "HIPBLAS_STATUS_ARCH_MISMATCH";
case HIPBLAS_STATUS_MAPPING_ERROR:
return "HIPBLAS_STATUS_MAPPING_ERROR";
case HIPBLAS_STATUS_EXECUTION_FAILED:
return "HIPBLAS_STATUS_EXECUTION_FAILED";
case HIPBLAS_STATUS_INTERNAL_ERROR:
return "HIPBLAS_STATUS_INTERNAL_ERROR";
}
return "<unknown>";
}
#define cuBLASErrChk(ans) { cuBLASAssert((ans), __FILE__, __LINE__); }
inline void cuBLASAssert(hipblasStatus_t code, const char *file, int line, bool abort=true)
{
if (code != HIPBLAS_STATUS_SUCCESS)
{
fprintf(stderr,"CUDA assert: %s %s %d\n", _cudaGetErrorEnum(code), file, line);
if (abort) exit(code);
}
}
#define cudaErrChk(ans) { cudaAssert((ans), __FILE__, __LINE__); }
inline void cudaAssert(hipError_t code, const char *file, int line, bool abort=true)
{
if (code != hipSuccess)
{
fprintf(stderr,"CUDA assert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
struct config {
int AH;
int AW;
int BH;
int BW;
int CH;
int CW;
float alpha;
float beta;
bool do_test;
};
/***************************************
* Device code "matmul"
**************************************/
/***************************************
* Host code "matmul"
**************************************/
float* host_mat_mul(const float* A, const float* B, const float* C, const struct config conf) {
printf("[Kernel] Run kernal\n");
/*** Initialize device memory ***/
size_t size_A = sizeof(float)*conf.AH*conf.AW;
size_t size_B = sizeof(float)*conf.BH*conf.BW;
size_t size_C = sizeof(float)*conf.CH*conf.CW;
float *d_A, *d_B, *d_C;
float *result = (float *) malloc (conf.CH*conf.CW*sizeof(float));
cudaErrChk (hipMalloc ((void**)(&d_A), size_A));
cudaErrChk (hipMalloc ((void**)(&d_B), size_B));
cudaErrChk (hipMalloc ((void**)(&d_C), size_C));
cudaErrChk (hipMemcpy (d_A, A, size_A, hipMemcpyHostToDevice));
cudaErrChk (hipMemcpy (d_B, B, size_B, hipMemcpyHostToDevice));
cudaErrChk (hipMemcpy (d_C, C, size_C, hipMemcpyHostToDevice));
cudaErrChk (hipDeviceSynchronize ())
/*** Setup execution config ***/
int cublas_version;
hipblasHandle_t handle;
cuBLASErrChk (hipblasCreate (&handle));
cuBLASErrChk (cublasGetVersion (handle, &cublas_version));
cuBLASErrChk (cublasSetMathMode (handle, CUBLAS_DEFAULT_MATH));
printf(" Tensor core not used [cublas ver. %d.%d]\n" , cublas_version/1000, cublas_version%1000);
printf(" Acceptable error rate %.7f [%f*GT < Pred < %f*GT]\n" , EPS, (1-EPS), (1+EPS));
int m=conf.CH, n=conf.CW, k=conf.AW;
const float *alpha=&(conf.alpha), *beta=&(conf.beta);
/*** Run CUDA kernel ***/
hipEvent_t start, stop;
cudaErrChk(hipEventCreate(&start));
cudaErrChk(hipEventCreate(&stop));
cudaErrChk(hipEventRecord(start, NULL));
// Main body
cuBLASErrChk (hipblasSgemm (handle, HIPBLAS_OP_N, HIPBLAS_OP_N, n, m, k, alpha, d_B, n, d_A, k, beta, d_C, n));
// End of main body
cudaErrChk(hipEventRecord(stop, NULL));
cudaErrChk(hipEventSynchronize(stop));
float msec_total = 0.0f;
float gflo = conf.CH*conf.CW*(2.0*conf.AW+2)*1e-9;
cudaErrChk(hipEventElapsedTime(&msec_total, start, stop));
printf(" Elaped time: %.4f msec\n", msec_total);
printf(" gFlops : %.4f gflops\n", gflo/(msec_total*1e-3));
cudaErrChk (hipMemcpy(result, d_C, size_C, hipMemcpyDeviceToHost));
cudaErrChk (hipDeviceSynchronize ())
cudaErrChk (hipFree (d_A));
cudaErrChk (hipFree (d_B));
cudaErrChk (hipFree (d_C));
cuBLASErrChk (hipblasDestroy (handle));
return result;
}
/****************************************
* Helper functions for host
****************************************/
const struct config host_get_cmd_args(int argc, char** argv) {
int m=10240, n=10240, k=10240;
float alpha=1.0f, beta=1.0f;
bool do_test = false;
if (argc >= 2)
do_test = (bool)atoi(argv[1]);
if (argc >= 7) {
m = atoi(argv[2]);
n = atoi(argv[3]);
k = atoi(argv[4]);
alpha = atof(argv[5]);
beta = atof(argv[6]);
}
struct config conf = {
m,
k,
k,
n,
m,
n,
alpha,
beta,
do_test
};
printf("\n================================================\n");
printf("CUDA implementaion of SGEMM\n");
printf(" args: ./matmul [test] [m, n, k, alpha, beta]\n");
printf(" C[a, c] = alpha * A[a, b] @ B[b, c] + beta * C[a, c]\n");
printf(" C[%d, %d] = %f * A[%d, %d] @ B[%d, %d] + %f * C[%d, %d]\n", m,n,alpha,m,k,k,n,beta,m,n);
printf("================================================\n\n");
return conf;
}
void host_initialize(float *mem, const int H, const int W) {
for (int i=0; i<H; i++) {
for (int j=0; j<W; j++) {
mem[i*W+j] = (float)(rand()%100)/100;
}
}
}
void host_test(const float *A, const float *B, const float *C, const float * result, const struct config conf) {
if (conf.do_test == false) {
printf("[TEST] Test skipped..\n");
return;
}
printf("[TEST] Test start..\n");
float alpha=conf.alpha, beta=conf.beta;
int len_k = conf.AW;
bool flag_exit[NUM_CORE] = {0};
for (int i=0; i<conf.CH; i++) {
if (i%100 == 0) {
printf("\r Test....[%5d/%5d]", i, conf.CH);
fflush(stdout);
}
#pragma omp parallel for num_threads(NUM_CORE)
for (int j=0; j<conf.CW; j++) {
int tid = omp_get_thread_num();
float sum = 0;
for (int k=0; k<len_k; k++) {
sum += A[i*conf.AW+k]*B[k*conf.BW+j];
}
sum = alpha*sum+beta*C[i*conf.CW+j];
// if ( result[i*conf.CW+j] != sum ) {
if ( (float)((1.f-EPS)*sum) >= (float)result[i*conf.CW+j] || (float)result[i*conf.CW+j] >= (float)((1.f+EPS)*sum) ) {
printf(" \n[ERROR] [%d threads] result should be : %f < C[%d][%d] = %.f < %f (GT: %f)\n", tid, (1.f-EPS)*sum, i, j, result[i*conf.CW+j], (1.f+EPS)*sum, sum );
printf(" Test failed...!\n");
flag_exit[tid] = true;
}
}
for (int tid=0; tid<NUM_CORE; tid++)
if (flag_exit[tid] == true)
return;
}
printf("\n Test passed!!\n");
return;
}
/***************************************
* Main function
**************************************/
int main(int argc, char** argv) {
/*** Program configuration ***/
const struct config conf = host_get_cmd_args(argc, argv);
srand(0);
/*** Initialize Data ***/
float *A = (float *) malloc (conf.AH*conf.AW*sizeof(float));
float *B = (float *) malloc (conf.BH*conf.BW*sizeof(float));
float *C = (float *) calloc (conf.CH*conf.CW,sizeof(float));
host_initialize(A, conf.AH, conf.AW);
host_initialize(B, conf.BH, conf.BW);
host_initialize(C, conf.CH, conf.CW);
size_t total_size = (size_t)(conf.AH*conf.AW*sizeof(float) + conf.BH*conf.BW*sizeof(float) + 2.0*conf.CH*conf.CW*sizeof(float));
printf("[Mem] Total size of matrices : %.3fGB\n", total_size*1e-9);
/*** Run matmul ***/
float* result = host_mat_mul (A, B, C, conf);
/*** Test result ***/
host_test(A, B, C, result, conf);
/*** Finalize ***/
free (A);
free (B);
free (C);
free (result);
hipProfilerStop ();
return 0;
}
| d0ea3d1d33548d095e871156058f95b802cc6dd3.cu |
#include <cstdio>
#include <cstdlib>
#include <random>
#include <sys/time.h>
#include <cublas_v2.h>
#include <cudaProfiler.h>
#include <omp.h>
#define NUM_CORE (6)
#define EPS (1e-3)
static const char *_cudaGetErrorEnum(cublasStatus_t error)
{
switch (error)
{
case CUBLAS_STATUS_SUCCESS:
return "CUBLAS_STATUS_SUCCESS";
case CUBLAS_STATUS_NOT_INITIALIZED:
return "CUBLAS_STATUS_NOT_INITIALIZED";
case CUBLAS_STATUS_ALLOC_FAILED:
return "CUBLAS_STATUS_ALLOC_FAILED";
case CUBLAS_STATUS_INVALID_VALUE:
return "CUBLAS_STATUS_INVALID_VALUE";
case CUBLAS_STATUS_ARCH_MISMATCH:
return "CUBLAS_STATUS_ARCH_MISMATCH";
case CUBLAS_STATUS_MAPPING_ERROR:
return "CUBLAS_STATUS_MAPPING_ERROR";
case CUBLAS_STATUS_EXECUTION_FAILED:
return "CUBLAS_STATUS_EXECUTION_FAILED";
case CUBLAS_STATUS_INTERNAL_ERROR:
return "CUBLAS_STATUS_INTERNAL_ERROR";
}
return "<unknown>";
}
#define cuBLASErrChk(ans) { cuBLASAssert((ans), __FILE__, __LINE__); }
inline void cuBLASAssert(cublasStatus_t code, const char *file, int line, bool abort=true)
{
if (code != CUBLAS_STATUS_SUCCESS)
{
fprintf(stderr,"CUDA assert: %s %s %d\n", _cudaGetErrorEnum(code), file, line);
if (abort) exit(code);
}
}
#define cudaErrChk(ans) { cudaAssert((ans), __FILE__, __LINE__); }
inline void cudaAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"CUDA assert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
struct config {
int AH;
int AW;
int BH;
int BW;
int CH;
int CW;
float alpha;
float beta;
bool do_test;
};
/***************************************
* Device code "matmul"
**************************************/
/***************************************
* Host code "matmul"
**************************************/
float* host_mat_mul(const float* A, const float* B, const float* C, const struct config conf) {
printf("[Kernel] Run kernal\n");
/*** Initialize device memory ***/
size_t size_A = sizeof(float)*conf.AH*conf.AW;
size_t size_B = sizeof(float)*conf.BH*conf.BW;
size_t size_C = sizeof(float)*conf.CH*conf.CW;
float *d_A, *d_B, *d_C;
float *result = (float *) malloc (conf.CH*conf.CW*sizeof(float));
cudaErrChk (cudaMalloc ((void**)(&d_A), size_A));
cudaErrChk (cudaMalloc ((void**)(&d_B), size_B));
cudaErrChk (cudaMalloc ((void**)(&d_C), size_C));
cudaErrChk (cudaMemcpy (d_A, A, size_A, cudaMemcpyHostToDevice));
cudaErrChk (cudaMemcpy (d_B, B, size_B, cudaMemcpyHostToDevice));
cudaErrChk (cudaMemcpy (d_C, C, size_C, cudaMemcpyHostToDevice));
cudaErrChk (cudaDeviceSynchronize ())
/*** Setup execution config ***/
int cublas_version;
cublasHandle_t handle;
cuBLASErrChk (cublasCreate (&handle));
cuBLASErrChk (cublasGetVersion (handle, &cublas_version));
cuBLASErrChk (cublasSetMathMode (handle, CUBLAS_DEFAULT_MATH));
printf(" Tensor core not used [cublas ver. %d.%d]\n" , cublas_version/1000, cublas_version%1000);
printf(" Acceptable error rate %.7f [%f*GT < Pred < %f*GT]\n" , EPS, (1-EPS), (1+EPS));
int m=conf.CH, n=conf.CW, k=conf.AW;
const float *alpha=&(conf.alpha), *beta=&(conf.beta);
/*** Run CUDA kernel ***/
cudaEvent_t start, stop;
cudaErrChk(cudaEventCreate(&start));
cudaErrChk(cudaEventCreate(&stop));
cudaErrChk(cudaEventRecord(start, NULL));
// Main body
cuBLASErrChk (cublasSgemm (handle, CUBLAS_OP_N, CUBLAS_OP_N, n, m, k, alpha, d_B, n, d_A, k, beta, d_C, n));
// End of main body
cudaErrChk(cudaEventRecord(stop, NULL));
cudaErrChk(cudaEventSynchronize(stop));
float msec_total = 0.0f;
float gflo = conf.CH*conf.CW*(2.0*conf.AW+2)*1e-9;
cudaErrChk(cudaEventElapsedTime(&msec_total, start, stop));
printf(" Elaped time: %.4f msec\n", msec_total);
printf(" gFlops : %.4f gflops\n", gflo/(msec_total*1e-3));
cudaErrChk (cudaMemcpy(result, d_C, size_C, cudaMemcpyDeviceToHost));
cudaErrChk (cudaDeviceSynchronize ())
cudaErrChk (cudaFree (d_A));
cudaErrChk (cudaFree (d_B));
cudaErrChk (cudaFree (d_C));
cuBLASErrChk (cublasDestroy (handle));
return result;
}
/****************************************
* Helper functions for host
****************************************/
const struct config host_get_cmd_args(int argc, char** argv) {
int m=10240, n=10240, k=10240;
float alpha=1.0f, beta=1.0f;
bool do_test = false;
if (argc >= 2)
do_test = (bool)atoi(argv[1]);
if (argc >= 7) {
m = atoi(argv[2]);
n = atoi(argv[3]);
k = atoi(argv[4]);
alpha = atof(argv[5]);
beta = atof(argv[6]);
}
struct config conf = {
m,
k,
k,
n,
m,
n,
alpha,
beta,
do_test
};
printf("\n================================================\n");
printf("CUDA implementaion of SGEMM\n");
printf(" args: ./matmul [test] [m, n, k, alpha, beta]\n");
printf(" C[a, c] = alpha * A[a, b] @ B[b, c] + beta * C[a, c]\n");
printf(" C[%d, %d] = %f * A[%d, %d] @ B[%d, %d] + %f * C[%d, %d]\n", m,n,alpha,m,k,k,n,beta,m,n);
printf("================================================\n\n");
return conf;
}
void host_initialize(float *mem, const int H, const int W) {
for (int i=0; i<H; i++) {
for (int j=0; j<W; j++) {
mem[i*W+j] = (float)(rand()%100)/100;
}
}
}
void host_test(const float *A, const float *B, const float *C, const float * result, const struct config conf) {
if (conf.do_test == false) {
printf("[TEST] Test skipped..\n");
return;
}
printf("[TEST] Test start..\n");
float alpha=conf.alpha, beta=conf.beta;
int len_k = conf.AW;
bool flag_exit[NUM_CORE] = {0};
for (int i=0; i<conf.CH; i++) {
if (i%100 == 0) {
printf("\r Test....[%5d/%5d]", i, conf.CH);
fflush(stdout);
}
#pragma omp parallel for num_threads(NUM_CORE)
for (int j=0; j<conf.CW; j++) {
int tid = omp_get_thread_num();
float sum = 0;
for (int k=0; k<len_k; k++) {
sum += A[i*conf.AW+k]*B[k*conf.BW+j];
}
sum = alpha*sum+beta*C[i*conf.CW+j];
// if ( result[i*conf.CW+j] != sum ) {
if ( (float)((1.f-EPS)*sum) >= (float)result[i*conf.CW+j] || (float)result[i*conf.CW+j] >= (float)((1.f+EPS)*sum) ) {
printf(" \n[ERROR] [%d threads] result should be : %f < C[%d][%d] = %.f < %f (GT: %f)\n", tid, (1.f-EPS)*sum, i, j, result[i*conf.CW+j], (1.f+EPS)*sum, sum );
printf(" Test failed...!\n");
flag_exit[tid] = true;
}
}
for (int tid=0; tid<NUM_CORE; tid++)
if (flag_exit[tid] == true)
return;
}
printf("\n Test passed!!\n");
return;
}
/***************************************
* Main function
**************************************/
int main(int argc, char** argv) {
/*** Program configuration ***/
const struct config conf = host_get_cmd_args(argc, argv);
srand(0);
/*** Initialize Data ***/
float *A = (float *) malloc (conf.AH*conf.AW*sizeof(float));
float *B = (float *) malloc (conf.BH*conf.BW*sizeof(float));
float *C = (float *) calloc (conf.CH*conf.CW,sizeof(float));
host_initialize(A, conf.AH, conf.AW);
host_initialize(B, conf.BH, conf.BW);
host_initialize(C, conf.CH, conf.CW);
size_t total_size = (size_t)(conf.AH*conf.AW*sizeof(float) + conf.BH*conf.BW*sizeof(float) + 2.0*conf.CH*conf.CW*sizeof(float));
printf("[Mem] Total size of matrices : %.3fGB\n", total_size*1e-9);
/*** Run matmul ***/
float* result = host_mat_mul (A, B, C, conf);
/*** Test result ***/
host_test(A, B, C, result, conf);
/*** Finalize ***/
free (A);
free (B);
free (C);
free (result);
cuProfilerStop ();
return 0;
}
|
f785e5d5f7c0e254f4a4b1f7cd283756c91d1008.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2015 by Contributors
* Copyright (c) 2017 Microsoft
* Licensed under The Apache-2.0 License [see LICENSE for details]
* \file multi_proposal.cu
* \brief MultiProposal Operator
* \author Shaoqing Ren, Xizhou Zhu, Jian Guo
*/
#include <dmlc/logging.h>
#include <dmlc/parameter.h>
#include <mxnet/operator.h>
#include <mshadow/tensor.h>
#include <mshadow/cuda/reduce.cuh>
#include <thrust/sort.h>
#include <thrust/execution_policy.h>
#include <thrust/functional.h>
#include <map>
#include <vector>
#include <string>
#include <utility>
#include <ctime>
#include <iostream>
#include "../operator_common.h"
#include "../mshadow_op.h"
#include "./multi_proposal-inl.h"
#define DIVUP(m, n) ((m) / (n) + ((m) % (n) > 0))
#define FRCNN_CUDA_CHECK(condition) \
/* Code block avoids redefinition of hipError_t error */ \
do { \
hipError_t error = condition; \
CHECK_EQ(error, hipSuccess) << " " << hipGetErrorString(error); \
} while (0)
namespace mshadow {
namespace cuda {
namespace multi_proposal {
// scores are (b, 2 * anchor, h, w)
// workspace_proposals are (b, h * w * anchor, 5)
// w defines "x" and h defines "y"
// count should be total anchors numbers, h * w * anchorsh * w * anchors
template<typename DType>
__global__ void ProposalGridKernel(const int count,
const int num_anchors,
const int height,
const int width,
const int feature_stride,
const DType* scores,
DType* workspace_proposals) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x;
index < count;
index += blockDim.x * gridDim.x) {
int a = index % num_anchors;
int w = (index / num_anchors) % width;
int h = (index / num_anchors / width) % height;
int b = index / num_anchors / width / height;
workspace_proposals[index * 5 + 0] = workspace_proposals[a * 5 + 0] + w * feature_stride;
workspace_proposals[index * 5 + 1] = workspace_proposals[a * 5 + 1] + h * feature_stride;
workspace_proposals[index * 5 + 2] = workspace_proposals[a * 5 + 2] + w * feature_stride;
workspace_proposals[index * 5 + 3] = workspace_proposals[a * 5 + 3] + h * feature_stride;
workspace_proposals[index * 5 + 4] =
scores[((b * (2 * num_anchors) + a + num_anchors) * height + h) * width + w];
}
}
// boxes are (b, h * w * anchor, 5)
// deltas are (b, 4 * anchor, h, w)
// out_pred_boxes are (b, h * w * anchor, 5)
// count should be total anchors numbers, b * h * w * anchors
// in-place write: boxes and out_pred_boxes are the same location
template<typename DType>
__global__ void BBoxPredKernel(const int count,
const int num_anchors,
const int feat_height,
const int feat_width,
const int feature_stride,
const DType* im_infos,
const DType* boxes,
const DType* deltas,
DType* out_pred_boxes) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x;
index < count;
index += blockDim.x * gridDim.x) {
int a = index % num_anchors;
int w = (index / num_anchors) % feat_width;
int h = (index / num_anchors / feat_width) % feat_height;
int b = index / num_anchors / feat_width / feat_height;
float im_height = float(im_infos[b * 3]);
float im_width = float(im_infos[b * 3 + 1]);
int real_height = static_cast<int>(im_height / feature_stride);
int real_width = static_cast<int>(im_width / feature_stride);
float width = float(boxes[index * 5 + 2]) - float(boxes[index * 5 + 0]) + 1.0f;
float height = float(boxes[index * 5 + 3]) - float(boxes[index * 5 + 1]) + 1.0f;
float ctr_x = float(boxes[index * 5 + 0]) + 0.5f * (width - 1.0f);
float ctr_y = float(boxes[index * 5 + 1]) + 0.5f * (height - 1.0f);
int ba = (b * num_anchors + a);
float dx = float(deltas[((ba * 4) * feat_height + h) * feat_width + w]);
float dy = float(deltas[((ba * 4 + 1) * feat_height + h) * feat_width + w]);
float dw = float(deltas[((ba * 4 + 2) * feat_height + h) * feat_width + w]);
float dh = float(deltas[((ba * 4 + 3) * feat_height + h) * feat_width + w]);
float pred_ctr_x = dx * width + ctr_x;
float pred_ctr_y = dy * height + ctr_y;
float pred_w = exp(dw) * width;
float pred_h = exp(dh) * height;
float pred_x1 = pred_ctr_x - 0.5f * (pred_w - 1.0f);
float pred_y1 = pred_ctr_y - 0.5f * (pred_h - 1.0f);
float pred_x2 = pred_ctr_x + 0.5f * (pred_w - 1.0f);
float pred_y2 = pred_ctr_y + 0.5f * (pred_h - 1.0f);
pred_x1 = max(min(pred_x1, im_width - 1.0f), 0.0f);
pred_y1 = max(min(pred_y1, im_height - 1.0f), 0.0f);
pred_x2 = max(min(pred_x2, im_width - 1.0f), 0.0f);
pred_y2 = max(min(pred_y2, im_height - 1.0f), 0.0f);
out_pred_boxes[index * 5 + 0] = pred_x1;
out_pred_boxes[index * 5 + 1] = pred_y1;
out_pred_boxes[index * 5 + 2] = pred_x2;
out_pred_boxes[index * 5 + 3] = pred_y2;
if (h >= real_height || w >= real_width) {
out_pred_boxes[index * 5 + 4] = -1.0f;
}
}
}
// boxes are (b, h * w * anchor, 5)
// deltas are (b, 4 * anchor, h, w)
// out_pred_boxes are (b, h * w * anchor, 5)
// count should be total anchors numbers, b * h * w * anchors
// in-place write: boxes and out_pred_boxes are the same location
template<typename DType>
__global__ void IoUPredKernel(const int count,
const int num_anchors,
const int feat_height,
const int feat_width,
const int feature_stride,
const DType* im_infos,
const DType* boxes,
const DType* deltas,
DType* out_pred_boxes) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x;
index < count;
index += blockDim.x * gridDim.x) {
int a = index % num_anchors;
int w = (index / num_anchors) % feat_width;
int h = (index / num_anchors / feat_width) % feat_height;
int b = index / num_anchors / feat_width / feat_height;
float im_height = float(im_infos[b * 3]);
float im_width = float(im_infos[b * 3 + 1]);
int real_height = static_cast<int>(im_height / feature_stride);
int real_width = static_cast<int>(im_width / feature_stride);
float x1 = float(boxes[index * 5 + 0]);
float y1 = float(boxes[index * 5 + 1]);
float x2 = float(boxes[index * 5 + 2]);
float y2 = float(boxes[index * 5 + 3]);
int ba = (b * num_anchors + a);
float dx1 = float(deltas[((ba * 4) * feat_height + h) * feat_width + w]);
float dy1 = float(deltas[((ba * 4 + 1) * feat_height + h) * feat_width + w]);
float dx2 = float(deltas[((ba * 4 + 2) * feat_height + h) * feat_width + w]);
float dy2 = float(deltas[((ba * 4 + 3) * feat_height + h) * feat_width + w]);
float pred_x1 = max(min(x1 + dx1, im_width - 1.0f), 0.0f);
float pred_y1 = max(min(y1 + dy1, im_height - 1.0f), 0.0f);
float pred_x2 = max(min(x2 + dx2, im_width - 1.0f), 0.0f);
float pred_y2 = max(min(y2 + dy2, im_height - 1.0f), 0.0f);
out_pred_boxes[index * 5 + 0] = pred_x1;
out_pred_boxes[index * 5 + 1] = pred_y1;
out_pred_boxes[index * 5 + 2] = pred_x2;
out_pred_boxes[index * 5 + 3] = pred_y2;
if (h >= real_height || w >= real_width) {
out_pred_boxes[index * 5 + 4] = -1.0f;
}
}
}
// filter box with stride less than rpn_min_size
// filter: set score to zero
// dets (b, n, 5)
template<typename DType>
__global__ void FilterBoxKernel(const int count,
const int count_anchors,
const float original_min_size,
const DType* im_infos,
DType* dets) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x;
index < count;
index += blockDim.x * gridDim.x) {
int b = index / count_anchors;
float iw = float(dets[index * 5 + 2]) - float(dets[index * 5 + 0]) + 1.0f;
float ih = float(dets[index * 5 + 3]) - float(dets[index * 5 + 1]) + 1.0f;
float min_size = original_min_size * float(im_infos[b * 3 + 2]);
if (iw < min_size || ih < min_size) {
dets[index * 5 + 0] -= min_size / 2;
dets[index * 5 + 1] -= min_size / 2;
dets[index * 5 + 2] += min_size / 2;
dets[index * 5 + 3] += min_size / 2;
dets[index * 5 + 4] = -1.0f;
}
}
}
// copy score and init order
// dets (n, 5); score (n, ); order (n, )
// count should be n (total anchors or proposals)
template<typename DType>
__global__ void CopyScoreKernel(const int count,
const DType* dets,
DType* score,
int* order) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x;
index < count;
index += blockDim.x * gridDim.x) {
score[index] = dets[index * 5 + 4];
order[index] = index;
}
}
// reorder proposals according to order and keep the top_n proposals
// prev_dets (n, 5); order (n, ); dets (n, 5)
// count should be output anchor numbers (top_n)
template<typename DType>
__global__ void ReorderProposalsKernel(const int count,
const DType* prev_dets,
const int* order,
DType* dets) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x;
index < count;
index += blockDim.x * gridDim.x) {
const int order_i = order[index];
for (int j = 0; j < 5; j ++) {
dets[index * 5 + j] = prev_dets[order_i * 5 + j];
}
}
}
template<typename DType>
__device__ inline DType devIoU(DType const * const a, DType const * const b) {
float left = max(float(a[0]), float(b[0])), right = min(float(a[2]), float(b[2]));
float top = max(float(a[1]), float(b[1])), bottom = min(float(a[3]), float(b[3]));
float width = max(right - left + 1, 0.f), height = max(bottom - top + 1, 0.f);
float interS = width * height;
float Sa = (float(a[2]) - float(a[0]) + 1) * (float(a[3]) - float(a[1]) + 1);
float Sb = (float(b[2]) - float(b[0]) + 1) * (float(b[3]) - float(b[1]) + 1);
return interS / (Sa + Sb - interS);
}
template<typename DType>
__global__ void nms_kernel(const int n_boxes, const float nms_overlap_thresh,
const DType *dev_boxes, uint64_t *dev_mask) {
const int threadsPerBlock = sizeof(uint64_t) * 8;
const int row_start = blockIdx.y;
const int col_start = blockIdx.x;
// if (row_start > col_start) return;
const int row_size =
min(n_boxes - row_start * threadsPerBlock, threadsPerBlock);
const int col_size =
min(n_boxes - col_start * threadsPerBlock, threadsPerBlock);
__shared__ DType block_boxes[threadsPerBlock * 5];
if (threadIdx.x < col_size) {
block_boxes[threadIdx.x * 5 + 0] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 0];
block_boxes[threadIdx.x * 5 + 1] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 1];
block_boxes[threadIdx.x * 5 + 2] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 2];
block_boxes[threadIdx.x * 5 + 3] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 3];
block_boxes[threadIdx.x * 5 + 4] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 4];
}
__syncthreads();
if (threadIdx.x < row_size) {
const int cur_box_idx = threadsPerBlock * row_start + threadIdx.x;
const DType *cur_box = dev_boxes + cur_box_idx * 5;
int i = 0;
uint64_t t = 0;
int start = 0;
if (row_start == col_start) {
start = threadIdx.x + 1;
}
for (i = start; i < col_size; i++) {
if (devIoU(cur_box, block_boxes + i * 5) > nms_overlap_thresh) {
t |= 1ULL << i;
}
}
const int col_blocks = DIVUP(n_boxes, threadsPerBlock);
dev_mask[cur_box_idx * col_blocks + col_start] = t;
}
}
template<typename DType>
void _nms(const mshadow::Tensor<gpu, 2, DType>& boxes,
const float nms_overlap_thresh,
const int rpn_post_nms_top_n,
int *keep,
int *num_out) {
const int threadsPerBlock = sizeof(uint64_t) * 8; // 64
const int boxes_num = boxes.size(0);
const int boxes_dim = boxes.size(1);
DType* boxes_dev = boxes.dptr_;
uint64_t* mask_dev = NULL;
const int col_blocks = DIVUP(boxes_num, threadsPerBlock);
FRCNN_CUDA_CHECK(hipMalloc(&mask_dev,
boxes_num * col_blocks * sizeof(uint64_t)));
dim3 blocks(DIVUP(boxes_num, threadsPerBlock),
DIVUP(boxes_num, threadsPerBlock));
dim3 threads(threadsPerBlock);
hipLaunchKernelGGL(( nms_kernel), dim3(blocks), dim3(threads), 0, 0, boxes_num,
nms_overlap_thresh,
boxes_dev,
mask_dev);
FRCNN_CUDA_CHECK(hipPeekAtLastError());
std::vector<uint64_t> mask_host(boxes_num * col_blocks);
FRCNN_CUDA_CHECK(hipMemcpy(&mask_host[0],
mask_dev,
sizeof(uint64_t) * boxes_num * col_blocks,
hipMemcpyDeviceToHost));
std::vector<uint64_t> remv(col_blocks);
memset(&remv[0], 0, sizeof(uint64_t) * col_blocks);
int num_to_keep = 0;
for (int i = 0; i < boxes_num; i++) {
int nblock = i / threadsPerBlock;
int inblock = i % threadsPerBlock;
if (!(remv[nblock] & (1ULL << inblock))) {
keep[num_to_keep++] = i;
if (num_to_keep >= rpn_post_nms_top_n) break;
uint64_t *p = &mask_host[0] + i * col_blocks;
for (int j = nblock; j < col_blocks; j++) {
remv[j] |= p[j];
}
}
}
*num_out = num_to_keep;
FRCNN_CUDA_CHECK(hipFree(mask_dev));
}
// copy proposals to output
// dets (top_n, 5); keep (top_n, ); out (top_n, )
// count should be top_n (total anchors or proposals)
template<typename DType>
__global__ void PrepareOutput(const int count,
const DType* dets,
const int* keep,
const int out_size,
const int image_index,
DType* out,
DType* score) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x;
index < count;
index += blockDim.x * gridDim.x) {
out[index * 5] = image_index;
if (index < out_size) {
int keep_i = keep[index];
for (int j = 0; j < 4; ++j) {
out[index * 5 + j + 1] = dets[keep_i * 5 + j];
}
score[index] = dets[keep_i * 5 + 4];
} else {
int keep_i = keep[index % out_size];
for (int j = 0; j < 4; ++j) {
out[index * 5 + j + 1] = dets[keep_i * 5 + j];
}
score[index] = dets[keep_i * 5 + 4];
}
}
}
} // namespace multi_proposal
} // namespace cuda
} // namespace mshadow
namespace mxnet {
namespace op {
template<typename xpu, typename DType>
class MultiProposalGPUOp : public Operator{
public:
explicit MultiProposalGPUOp(MultiProposalParam param) {
this->param_ = param;
}
virtual void Forward(const OpContext &ctx,
const std::vector<TBlob> &in_data,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &out_data,
const std::vector<TBlob> &aux_states) {
using namespace mshadow;
using namespace mshadow::expr;
using namespace mshadow::cuda;
using namespace mshadow::cuda::multi_proposal;
CHECK_EQ(in_data.size(), 3);
CHECK_EQ(out_data.size(), 2);
CHECK_GT(req.size(), 1);
CHECK_EQ(req[proposal::kOut], kWriteTo);
/*CHECK_EQ(in_data[proposal::kClsProb].shape_[0], 1)
<< "Sorry, multiple images each device is not implemented.";*/
Stream<xpu> *s = ctx.get_stream<xpu>();
Tensor<xpu, 4, DType> scores = in_data[proposal::kClsProb].get<xpu, 4, DType>(s);
Tensor<xpu, 4, DType> bbox_deltas = in_data[proposal::kBBoxPred].get<xpu, 4, DType>(s);
Tensor<xpu, 2, DType> im_info = in_data[proposal::kImInfo].get<xpu, 2, DType>(s);
Tensor<xpu, 2, DType> out = out_data[proposal::kOut].get<xpu, 2, DType>(s);
Tensor<xpu, 2, DType> out_score = out_data[proposal::kScore].get<xpu, 2, DType>(s);
int num_images = scores.size(0);
int num_anchors = scores.size(1) / 2;
int height = scores.size(2);
int width = scores.size(3);
int count_anchors = num_anchors * height * width; // count of total anchors
int count = num_images * count_anchors;
// set to -1 for max
int rpn_pre_nms_top_n = (param_.rpn_pre_nms_top_n > 0) ? param_.rpn_pre_nms_top_n
: count_anchors;
rpn_pre_nms_top_n = ::min(rpn_pre_nms_top_n, count_anchors);
int rpn_post_nms_top_n = ::min(param_.rpn_post_nms_top_n, rpn_pre_nms_top_n);
// Generate first anchors based on base anchor
std::vector<DType> base_anchor(4);
base_anchor[0] = 0.0;
base_anchor[1] = 0.0;
base_anchor[2] = param_.feature_stride - 1.0;
base_anchor[3] = param_.feature_stride - 1.0;
CHECK_EQ(num_anchors, param_.ratios.ndim() * param_.scales.ndim());
std::vector<DType> anchors;
utils::GenerateAnchors(base_anchor,
param_.ratios,
param_.scales,
&anchors);
// Copy generated anchors to GPU
DType* workspace_proposals_ptr = NULL;
FRCNN_CUDA_CHECK(hipMalloc(&workspace_proposals_ptr,
sizeof(DType) * num_images * count_anchors * 5));
Tensor<xpu, 3, DType> workspace_proposals(workspace_proposals_ptr,
Shape3(num_images, count_anchors, 5));
FRCNN_CUDA_CHECK(hipMemcpy(workspace_proposals.dptr_, &anchors[0],
sizeof(DType) * anchors.size(), hipMemcpyHostToDevice));
// Copy proposals to a mesh grid
dim3 dimGrid((count + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock);
dim3 dimBlock(kMaxThreadsPerBlock);
CheckLaunchParam(dimGrid, dimBlock, "ProposalGrid");
hipLaunchKernelGGL(( ProposalGridKernel), dim3(dimGrid), dim3(dimBlock), 0, 0,
count, num_anchors, height, width, param_.feature_stride,
scores.dptr_, workspace_proposals.dptr_);
FRCNN_CUDA_CHECK(hipPeekAtLastError());
// Transform anchors and bbox_deltas into bboxes
CheckLaunchParam(dimGrid, dimBlock, "BBoxPred");
if (param_.iou_loss) {
hipLaunchKernelGGL(( IoUPredKernel), dim3(dimGrid), dim3(dimBlock), 0, 0,
count, num_anchors, height, width, param_.feature_stride, im_info.dptr_,
workspace_proposals.dptr_, bbox_deltas.dptr_, workspace_proposals.dptr_);
} else {
hipLaunchKernelGGL(( BBoxPredKernel), dim3(dimGrid), dim3(dimBlock), 0, 0,
count, num_anchors, height, width, param_.feature_stride, im_info.dptr_,
workspace_proposals.dptr_, bbox_deltas.dptr_, workspace_proposals.dptr_);
}
FRCNN_CUDA_CHECK(hipPeekAtLastError());
// filter boxes with less than rpn_min_size
CheckLaunchParam(dimGrid, dimBlock, "FilterBox");
hipLaunchKernelGGL(( FilterBoxKernel), dim3(dimGrid), dim3(dimBlock), 0, 0,
count, count_anchors, param_.rpn_min_size, im_info.dptr_, workspace_proposals.dptr_);
FRCNN_CUDA_CHECK(hipPeekAtLastError());
dimGrid = dim3((count_anchors + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock);
dimBlock = dim3(kMaxThreadsPerBlock);
// Copy score to a continuous memory
DType* score_ptr = NULL;
FRCNN_CUDA_CHECK(hipMalloc(&score_ptr, sizeof(DType) * count_anchors));
Tensor<xpu, 1, DType> score(score_ptr, Shape1(count_anchors));
int* order_ptr = NULL;
FRCNN_CUDA_CHECK(hipMalloc(&order_ptr, sizeof(int) * count_anchors));
Tensor<xpu, 1, int> order(order_ptr, Shape1(count_anchors));
DType* workspace_ordered_proposals_ptr = NULL;
FRCNN_CUDA_CHECK(hipMalloc(&workspace_ordered_proposals_ptr,
sizeof(DType) * rpn_pre_nms_top_n * 5));
Tensor<xpu, 2, DType> workspace_ordered_proposals(workspace_ordered_proposals_ptr,
Shape2(rpn_pre_nms_top_n, 5));
int* keep;
FRCNN_CUDA_CHECK(hipMalloc(&keep, sizeof(int) * rpn_pre_nms_top_n));
for (int b = 0; b < num_images; b++) {
CheckLaunchParam(dimGrid, dimBlock, "CopyScore");
CopyScoreKernel << <dimGrid, dimBlock >> >(
count_anchors, workspace_proposals.dptr_ + b * count_anchors * 5,
score.dptr_, order.dptr_);
FRCNN_CUDA_CHECK(hipPeekAtLastError());
// argsort score, save order
thrust::stable_sort_by_key(thrust::device,
score.dptr_,
score.dptr_ + score.size(0),
order.dptr_,
thrust::greater<DType>());
FRCNN_CUDA_CHECK(hipPeekAtLastError());
// Reorder proposals according to order
dimGrid.x = (rpn_pre_nms_top_n + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock;
CheckLaunchParam(dimGrid, dimBlock, "ReorderProposals");
ReorderProposalsKernel << <dimGrid, dimBlock >> >(
rpn_pre_nms_top_n, workspace_proposals.dptr_ + b * count_anchors * 5,
order.dptr_, workspace_ordered_proposals.dptr_);
FRCNN_CUDA_CHECK(hipPeekAtLastError());
// perform nms
std::vector<int> _keep(workspace_ordered_proposals.size(0));
int out_size = 0;
_nms(workspace_ordered_proposals,
param_.threshold,
rpn_post_nms_top_n,
&_keep[0],
&out_size);
// copy nms result to gpu
FRCNN_CUDA_CHECK(hipMemcpy(keep, &_keep[0], sizeof(int) * _keep.size(),
hipMemcpyHostToDevice));
// copy results after nms
dimGrid.x = (param_.rpn_post_nms_top_n + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock;
CheckLaunchParam(dimGrid, dimBlock, "PrepareOutput");
PrepareOutput << <dimGrid, dimBlock >> >(
param_.rpn_post_nms_top_n, workspace_ordered_proposals.dptr_, keep, out_size, b,
out.dptr_ + b * param_.rpn_post_nms_top_n * 5,
out_score.dptr_ + b * param_.rpn_post_nms_top_n);
FRCNN_CUDA_CHECK(hipPeekAtLastError());
}
// free temporary memory
FRCNN_CUDA_CHECK(hipFree(keep));
FRCNN_CUDA_CHECK(hipFree(workspace_ordered_proposals_ptr));
FRCNN_CUDA_CHECK(hipFree(workspace_proposals_ptr));
FRCNN_CUDA_CHECK(hipFree(score_ptr));
FRCNN_CUDA_CHECK(hipFree(order_ptr));
}
virtual void Backward(const OpContext &ctx,
const std::vector<TBlob> &out_grad,
const std::vector<TBlob> &in_data,
const std::vector<TBlob> &out_data,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &in_grad,
const std::vector<TBlob> &aux_states) {
using namespace mshadow;
using namespace mshadow::expr;
CHECK_EQ(in_grad.size(), 3);
Stream<xpu> *s = ctx.get_stream<xpu>();
Tensor<xpu, 4, DType> gscores = in_grad[proposal::kClsProb].get<xpu, 4, DType>(s);
Tensor<xpu, 4, DType> gbbox = in_grad[proposal::kBBoxPred].get<xpu, 4, DType>(s);
Tensor<xpu, 2, DType> ginfo = in_grad[proposal::kImInfo].get<xpu, 2, DType>(s);
// can not assume the grad would be zero
Assign(gscores, req[proposal::kClsProb], 0);
Assign(gbbox, req[proposal::kBBoxPred], 0);
Assign(ginfo, req[proposal::kImInfo], 0);
}
private:
MultiProposalParam param_;
}; // class MultiProposalGPUOp
template<>
Operator* CreateOp<gpu>(MultiProposalParam param, int dtype) {
Operator *op = NULL;
MSHADOW_REAL_TYPE_SWITCH(dtype, DType, {
op = new MultiProposalGPUOp<gpu, DType>(param);
});
return op;
}
} // namespace op
} // namespace mxnet
| f785e5d5f7c0e254f4a4b1f7cd283756c91d1008.cu | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2015 by Contributors
* Copyright (c) 2017 Microsoft
* Licensed under The Apache-2.0 License [see LICENSE for details]
* \file multi_proposal.cu
* \brief MultiProposal Operator
* \author Shaoqing Ren, Xizhou Zhu, Jian Guo
*/
#include <dmlc/logging.h>
#include <dmlc/parameter.h>
#include <mxnet/operator.h>
#include <mshadow/tensor.h>
#include <mshadow/cuda/reduce.cuh>
#include <thrust/sort.h>
#include <thrust/execution_policy.h>
#include <thrust/functional.h>
#include <map>
#include <vector>
#include <string>
#include <utility>
#include <ctime>
#include <iostream>
#include "../operator_common.h"
#include "../mshadow_op.h"
#include "./multi_proposal-inl.h"
#define DIVUP(m, n) ((m) / (n) + ((m) % (n) > 0))
#define FRCNN_CUDA_CHECK(condition) \
/* Code block avoids redefinition of cudaError_t error */ \
do { \
cudaError_t error = condition; \
CHECK_EQ(error, cudaSuccess) << " " << cudaGetErrorString(error); \
} while (0)
namespace mshadow {
namespace cuda {
namespace multi_proposal {
// scores are (b, 2 * anchor, h, w)
// workspace_proposals are (b, h * w * anchor, 5)
// w defines "x" and h defines "y"
// count should be total anchors numbers, h * w * anchorsh * w * anchors
template<typename DType>
__global__ void ProposalGridKernel(const int count,
const int num_anchors,
const int height,
const int width,
const int feature_stride,
const DType* scores,
DType* workspace_proposals) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x;
index < count;
index += blockDim.x * gridDim.x) {
int a = index % num_anchors;
int w = (index / num_anchors) % width;
int h = (index / num_anchors / width) % height;
int b = index / num_anchors / width / height;
workspace_proposals[index * 5 + 0] = workspace_proposals[a * 5 + 0] + w * feature_stride;
workspace_proposals[index * 5 + 1] = workspace_proposals[a * 5 + 1] + h * feature_stride;
workspace_proposals[index * 5 + 2] = workspace_proposals[a * 5 + 2] + w * feature_stride;
workspace_proposals[index * 5 + 3] = workspace_proposals[a * 5 + 3] + h * feature_stride;
workspace_proposals[index * 5 + 4] =
scores[((b * (2 * num_anchors) + a + num_anchors) * height + h) * width + w];
}
}
// boxes are (b, h * w * anchor, 5)
// deltas are (b, 4 * anchor, h, w)
// out_pred_boxes are (b, h * w * anchor, 5)
// count should be total anchors numbers, b * h * w * anchors
// in-place write: boxes and out_pred_boxes are the same location
template<typename DType>
__global__ void BBoxPredKernel(const int count,
const int num_anchors,
const int feat_height,
const int feat_width,
const int feature_stride,
const DType* im_infos,
const DType* boxes,
const DType* deltas,
DType* out_pred_boxes) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x;
index < count;
index += blockDim.x * gridDim.x) {
int a = index % num_anchors;
int w = (index / num_anchors) % feat_width;
int h = (index / num_anchors / feat_width) % feat_height;
int b = index / num_anchors / feat_width / feat_height;
float im_height = float(im_infos[b * 3]);
float im_width = float(im_infos[b * 3 + 1]);
int real_height = static_cast<int>(im_height / feature_stride);
int real_width = static_cast<int>(im_width / feature_stride);
float width = float(boxes[index * 5 + 2]) - float(boxes[index * 5 + 0]) + 1.0f;
float height = float(boxes[index * 5 + 3]) - float(boxes[index * 5 + 1]) + 1.0f;
float ctr_x = float(boxes[index * 5 + 0]) + 0.5f * (width - 1.0f);
float ctr_y = float(boxes[index * 5 + 1]) + 0.5f * (height - 1.0f);
int ba = (b * num_anchors + a);
float dx = float(deltas[((ba * 4) * feat_height + h) * feat_width + w]);
float dy = float(deltas[((ba * 4 + 1) * feat_height + h) * feat_width + w]);
float dw = float(deltas[((ba * 4 + 2) * feat_height + h) * feat_width + w]);
float dh = float(deltas[((ba * 4 + 3) * feat_height + h) * feat_width + w]);
float pred_ctr_x = dx * width + ctr_x;
float pred_ctr_y = dy * height + ctr_y;
float pred_w = exp(dw) * width;
float pred_h = exp(dh) * height;
float pred_x1 = pred_ctr_x - 0.5f * (pred_w - 1.0f);
float pred_y1 = pred_ctr_y - 0.5f * (pred_h - 1.0f);
float pred_x2 = pred_ctr_x + 0.5f * (pred_w - 1.0f);
float pred_y2 = pred_ctr_y + 0.5f * (pred_h - 1.0f);
pred_x1 = max(min(pred_x1, im_width - 1.0f), 0.0f);
pred_y1 = max(min(pred_y1, im_height - 1.0f), 0.0f);
pred_x2 = max(min(pred_x2, im_width - 1.0f), 0.0f);
pred_y2 = max(min(pred_y2, im_height - 1.0f), 0.0f);
out_pred_boxes[index * 5 + 0] = pred_x1;
out_pred_boxes[index * 5 + 1] = pred_y1;
out_pred_boxes[index * 5 + 2] = pred_x2;
out_pred_boxes[index * 5 + 3] = pred_y2;
if (h >= real_height || w >= real_width) {
out_pred_boxes[index * 5 + 4] = -1.0f;
}
}
}
// boxes are (b, h * w * anchor, 5)
// deltas are (b, 4 * anchor, h, w)
// out_pred_boxes are (b, h * w * anchor, 5)
// count should be total anchors numbers, b * h * w * anchors
// in-place write: boxes and out_pred_boxes are the same location
template<typename DType>
__global__ void IoUPredKernel(const int count,
const int num_anchors,
const int feat_height,
const int feat_width,
const int feature_stride,
const DType* im_infos,
const DType* boxes,
const DType* deltas,
DType* out_pred_boxes) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x;
index < count;
index += blockDim.x * gridDim.x) {
int a = index % num_anchors;
int w = (index / num_anchors) % feat_width;
int h = (index / num_anchors / feat_width) % feat_height;
int b = index / num_anchors / feat_width / feat_height;
float im_height = float(im_infos[b * 3]);
float im_width = float(im_infos[b * 3 + 1]);
int real_height = static_cast<int>(im_height / feature_stride);
int real_width = static_cast<int>(im_width / feature_stride);
float x1 = float(boxes[index * 5 + 0]);
float y1 = float(boxes[index * 5 + 1]);
float x2 = float(boxes[index * 5 + 2]);
float y2 = float(boxes[index * 5 + 3]);
int ba = (b * num_anchors + a);
float dx1 = float(deltas[((ba * 4) * feat_height + h) * feat_width + w]);
float dy1 = float(deltas[((ba * 4 + 1) * feat_height + h) * feat_width + w]);
float dx2 = float(deltas[((ba * 4 + 2) * feat_height + h) * feat_width + w]);
float dy2 = float(deltas[((ba * 4 + 3) * feat_height + h) * feat_width + w]);
float pred_x1 = max(min(x1 + dx1, im_width - 1.0f), 0.0f);
float pred_y1 = max(min(y1 + dy1, im_height - 1.0f), 0.0f);
float pred_x2 = max(min(x2 + dx2, im_width - 1.0f), 0.0f);
float pred_y2 = max(min(y2 + dy2, im_height - 1.0f), 0.0f);
out_pred_boxes[index * 5 + 0] = pred_x1;
out_pred_boxes[index * 5 + 1] = pred_y1;
out_pred_boxes[index * 5 + 2] = pred_x2;
out_pred_boxes[index * 5 + 3] = pred_y2;
if (h >= real_height || w >= real_width) {
out_pred_boxes[index * 5 + 4] = -1.0f;
}
}
}
// filter box with stride less than rpn_min_size
// filter: set score to zero
// dets (b, n, 5)
template<typename DType>
__global__ void FilterBoxKernel(const int count,
const int count_anchors,
const float original_min_size,
const DType* im_infos,
DType* dets) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x;
index < count;
index += blockDim.x * gridDim.x) {
int b = index / count_anchors;
float iw = float(dets[index * 5 + 2]) - float(dets[index * 5 + 0]) + 1.0f;
float ih = float(dets[index * 5 + 3]) - float(dets[index * 5 + 1]) + 1.0f;
float min_size = original_min_size * float(im_infos[b * 3 + 2]);
if (iw < min_size || ih < min_size) {
dets[index * 5 + 0] -= min_size / 2;
dets[index * 5 + 1] -= min_size / 2;
dets[index * 5 + 2] += min_size / 2;
dets[index * 5 + 3] += min_size / 2;
dets[index * 5 + 4] = -1.0f;
}
}
}
// copy score and init order
// dets (n, 5); score (n, ); order (n, )
// count should be n (total anchors or proposals)
template<typename DType>
__global__ void CopyScoreKernel(const int count,
const DType* dets,
DType* score,
int* order) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x;
index < count;
index += blockDim.x * gridDim.x) {
score[index] = dets[index * 5 + 4];
order[index] = index;
}
}
// reorder proposals according to order and keep the top_n proposals
// prev_dets (n, 5); order (n, ); dets (n, 5)
// count should be output anchor numbers (top_n)
template<typename DType>
__global__ void ReorderProposalsKernel(const int count,
const DType* prev_dets,
const int* order,
DType* dets) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x;
index < count;
index += blockDim.x * gridDim.x) {
const int order_i = order[index];
for (int j = 0; j < 5; j ++) {
dets[index * 5 + j] = prev_dets[order_i * 5 + j];
}
}
}
template<typename DType>
__device__ inline DType devIoU(DType const * const a, DType const * const b) {
float left = max(float(a[0]), float(b[0])), right = min(float(a[2]), float(b[2]));
float top = max(float(a[1]), float(b[1])), bottom = min(float(a[3]), float(b[3]));
float width = max(right - left + 1, 0.f), height = max(bottom - top + 1, 0.f);
float interS = width * height;
float Sa = (float(a[2]) - float(a[0]) + 1) * (float(a[3]) - float(a[1]) + 1);
float Sb = (float(b[2]) - float(b[0]) + 1) * (float(b[3]) - float(b[1]) + 1);
return interS / (Sa + Sb - interS);
}
template<typename DType>
__global__ void nms_kernel(const int n_boxes, const float nms_overlap_thresh,
const DType *dev_boxes, uint64_t *dev_mask) {
const int threadsPerBlock = sizeof(uint64_t) * 8;
const int row_start = blockIdx.y;
const int col_start = blockIdx.x;
// if (row_start > col_start) return;
const int row_size =
min(n_boxes - row_start * threadsPerBlock, threadsPerBlock);
const int col_size =
min(n_boxes - col_start * threadsPerBlock, threadsPerBlock);
__shared__ DType block_boxes[threadsPerBlock * 5];
if (threadIdx.x < col_size) {
block_boxes[threadIdx.x * 5 + 0] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 0];
block_boxes[threadIdx.x * 5 + 1] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 1];
block_boxes[threadIdx.x * 5 + 2] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 2];
block_boxes[threadIdx.x * 5 + 3] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 3];
block_boxes[threadIdx.x * 5 + 4] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 4];
}
__syncthreads();
if (threadIdx.x < row_size) {
const int cur_box_idx = threadsPerBlock * row_start + threadIdx.x;
const DType *cur_box = dev_boxes + cur_box_idx * 5;
int i = 0;
uint64_t t = 0;
int start = 0;
if (row_start == col_start) {
start = threadIdx.x + 1;
}
for (i = start; i < col_size; i++) {
if (devIoU(cur_box, block_boxes + i * 5) > nms_overlap_thresh) {
t |= 1ULL << i;
}
}
const int col_blocks = DIVUP(n_boxes, threadsPerBlock);
dev_mask[cur_box_idx * col_blocks + col_start] = t;
}
}
template<typename DType>
void _nms(const mshadow::Tensor<gpu, 2, DType>& boxes,
const float nms_overlap_thresh,
const int rpn_post_nms_top_n,
int *keep,
int *num_out) {
const int threadsPerBlock = sizeof(uint64_t) * 8; // 64
const int boxes_num = boxes.size(0);
const int boxes_dim = boxes.size(1);
DType* boxes_dev = boxes.dptr_;
uint64_t* mask_dev = NULL;
const int col_blocks = DIVUP(boxes_num, threadsPerBlock);
FRCNN_CUDA_CHECK(cudaMalloc(&mask_dev,
boxes_num * col_blocks * sizeof(uint64_t)));
dim3 blocks(DIVUP(boxes_num, threadsPerBlock),
DIVUP(boxes_num, threadsPerBlock));
dim3 threads(threadsPerBlock);
nms_kernel<<<blocks, threads>>>(boxes_num,
nms_overlap_thresh,
boxes_dev,
mask_dev);
FRCNN_CUDA_CHECK(cudaPeekAtLastError());
std::vector<uint64_t> mask_host(boxes_num * col_blocks);
FRCNN_CUDA_CHECK(cudaMemcpy(&mask_host[0],
mask_dev,
sizeof(uint64_t) * boxes_num * col_blocks,
cudaMemcpyDeviceToHost));
std::vector<uint64_t> remv(col_blocks);
memset(&remv[0], 0, sizeof(uint64_t) * col_blocks);
int num_to_keep = 0;
for (int i = 0; i < boxes_num; i++) {
int nblock = i / threadsPerBlock;
int inblock = i % threadsPerBlock;
if (!(remv[nblock] & (1ULL << inblock))) {
keep[num_to_keep++] = i;
if (num_to_keep >= rpn_post_nms_top_n) break;
uint64_t *p = &mask_host[0] + i * col_blocks;
for (int j = nblock; j < col_blocks; j++) {
remv[j] |= p[j];
}
}
}
*num_out = num_to_keep;
FRCNN_CUDA_CHECK(cudaFree(mask_dev));
}
// copy proposals to output
// dets (top_n, 5); keep (top_n, ); out (top_n, )
// count should be top_n (total anchors or proposals)
template<typename DType>
__global__ void PrepareOutput(const int count,
const DType* dets,
const int* keep,
const int out_size,
const int image_index,
DType* out,
DType* score) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x;
index < count;
index += blockDim.x * gridDim.x) {
out[index * 5] = image_index;
if (index < out_size) {
int keep_i = keep[index];
for (int j = 0; j < 4; ++j) {
out[index * 5 + j + 1] = dets[keep_i * 5 + j];
}
score[index] = dets[keep_i * 5 + 4];
} else {
int keep_i = keep[index % out_size];
for (int j = 0; j < 4; ++j) {
out[index * 5 + j + 1] = dets[keep_i * 5 + j];
}
score[index] = dets[keep_i * 5 + 4];
}
}
}
} // namespace multi_proposal
} // namespace cuda
} // namespace mshadow
namespace mxnet {
namespace op {
template<typename xpu, typename DType>
class MultiProposalGPUOp : public Operator{
public:
explicit MultiProposalGPUOp(MultiProposalParam param) {
this->param_ = param;
}
virtual void Forward(const OpContext &ctx,
const std::vector<TBlob> &in_data,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &out_data,
const std::vector<TBlob> &aux_states) {
using namespace mshadow;
using namespace mshadow::expr;
using namespace mshadow::cuda;
using namespace mshadow::cuda::multi_proposal;
CHECK_EQ(in_data.size(), 3);
CHECK_EQ(out_data.size(), 2);
CHECK_GT(req.size(), 1);
CHECK_EQ(req[proposal::kOut], kWriteTo);
/*CHECK_EQ(in_data[proposal::kClsProb].shape_[0], 1)
<< "Sorry, multiple images each device is not implemented.";*/
Stream<xpu> *s = ctx.get_stream<xpu>();
Tensor<xpu, 4, DType> scores = in_data[proposal::kClsProb].get<xpu, 4, DType>(s);
Tensor<xpu, 4, DType> bbox_deltas = in_data[proposal::kBBoxPred].get<xpu, 4, DType>(s);
Tensor<xpu, 2, DType> im_info = in_data[proposal::kImInfo].get<xpu, 2, DType>(s);
Tensor<xpu, 2, DType> out = out_data[proposal::kOut].get<xpu, 2, DType>(s);
Tensor<xpu, 2, DType> out_score = out_data[proposal::kScore].get<xpu, 2, DType>(s);
int num_images = scores.size(0);
int num_anchors = scores.size(1) / 2;
int height = scores.size(2);
int width = scores.size(3);
int count_anchors = num_anchors * height * width; // count of total anchors
int count = num_images * count_anchors;
// set to -1 for max
int rpn_pre_nms_top_n = (param_.rpn_pre_nms_top_n > 0) ? param_.rpn_pre_nms_top_n
: count_anchors;
rpn_pre_nms_top_n = std::min(rpn_pre_nms_top_n, count_anchors);
int rpn_post_nms_top_n = std::min(param_.rpn_post_nms_top_n, rpn_pre_nms_top_n);
// Generate first anchors based on base anchor
std::vector<DType> base_anchor(4);
base_anchor[0] = 0.0;
base_anchor[1] = 0.0;
base_anchor[2] = param_.feature_stride - 1.0;
base_anchor[3] = param_.feature_stride - 1.0;
CHECK_EQ(num_anchors, param_.ratios.ndim() * param_.scales.ndim());
std::vector<DType> anchors;
utils::GenerateAnchors(base_anchor,
param_.ratios,
param_.scales,
&anchors);
// Copy generated anchors to GPU
DType* workspace_proposals_ptr = NULL;
FRCNN_CUDA_CHECK(cudaMalloc(&workspace_proposals_ptr,
sizeof(DType) * num_images * count_anchors * 5));
Tensor<xpu, 3, DType> workspace_proposals(workspace_proposals_ptr,
Shape3(num_images, count_anchors, 5));
FRCNN_CUDA_CHECK(cudaMemcpy(workspace_proposals.dptr_, &anchors[0],
sizeof(DType) * anchors.size(), cudaMemcpyHostToDevice));
// Copy proposals to a mesh grid
dim3 dimGrid((count + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock);
dim3 dimBlock(kMaxThreadsPerBlock);
CheckLaunchParam(dimGrid, dimBlock, "ProposalGrid");
ProposalGridKernel<<<dimGrid, dimBlock>>>(
count, num_anchors, height, width, param_.feature_stride,
scores.dptr_, workspace_proposals.dptr_);
FRCNN_CUDA_CHECK(cudaPeekAtLastError());
// Transform anchors and bbox_deltas into bboxes
CheckLaunchParam(dimGrid, dimBlock, "BBoxPred");
if (param_.iou_loss) {
IoUPredKernel<<<dimGrid, dimBlock>>>(
count, num_anchors, height, width, param_.feature_stride, im_info.dptr_,
workspace_proposals.dptr_, bbox_deltas.dptr_, workspace_proposals.dptr_);
} else {
BBoxPredKernel<<<dimGrid, dimBlock>>>(
count, num_anchors, height, width, param_.feature_stride, im_info.dptr_,
workspace_proposals.dptr_, bbox_deltas.dptr_, workspace_proposals.dptr_);
}
FRCNN_CUDA_CHECK(cudaPeekAtLastError());
// filter boxes with less than rpn_min_size
CheckLaunchParam(dimGrid, dimBlock, "FilterBox");
FilterBoxKernel<<<dimGrid, dimBlock>>>(
count, count_anchors, param_.rpn_min_size, im_info.dptr_, workspace_proposals.dptr_);
FRCNN_CUDA_CHECK(cudaPeekAtLastError());
dimGrid = dim3((count_anchors + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock);
dimBlock = dim3(kMaxThreadsPerBlock);
// Copy score to a continuous memory
DType* score_ptr = NULL;
FRCNN_CUDA_CHECK(cudaMalloc(&score_ptr, sizeof(DType) * count_anchors));
Tensor<xpu, 1, DType> score(score_ptr, Shape1(count_anchors));
int* order_ptr = NULL;
FRCNN_CUDA_CHECK(cudaMalloc(&order_ptr, sizeof(int) * count_anchors));
Tensor<xpu, 1, int> order(order_ptr, Shape1(count_anchors));
DType* workspace_ordered_proposals_ptr = NULL;
FRCNN_CUDA_CHECK(cudaMalloc(&workspace_ordered_proposals_ptr,
sizeof(DType) * rpn_pre_nms_top_n * 5));
Tensor<xpu, 2, DType> workspace_ordered_proposals(workspace_ordered_proposals_ptr,
Shape2(rpn_pre_nms_top_n, 5));
int* keep;
FRCNN_CUDA_CHECK(cudaMalloc(&keep, sizeof(int) * rpn_pre_nms_top_n));
for (int b = 0; b < num_images; b++) {
CheckLaunchParam(dimGrid, dimBlock, "CopyScore");
CopyScoreKernel << <dimGrid, dimBlock >> >(
count_anchors, workspace_proposals.dptr_ + b * count_anchors * 5,
score.dptr_, order.dptr_);
FRCNN_CUDA_CHECK(cudaPeekAtLastError());
// argsort score, save order
thrust::stable_sort_by_key(thrust::device,
score.dptr_,
score.dptr_ + score.size(0),
order.dptr_,
thrust::greater<DType>());
FRCNN_CUDA_CHECK(cudaPeekAtLastError());
// Reorder proposals according to order
dimGrid.x = (rpn_pre_nms_top_n + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock;
CheckLaunchParam(dimGrid, dimBlock, "ReorderProposals");
ReorderProposalsKernel << <dimGrid, dimBlock >> >(
rpn_pre_nms_top_n, workspace_proposals.dptr_ + b * count_anchors * 5,
order.dptr_, workspace_ordered_proposals.dptr_);
FRCNN_CUDA_CHECK(cudaPeekAtLastError());
// perform nms
std::vector<int> _keep(workspace_ordered_proposals.size(0));
int out_size = 0;
_nms(workspace_ordered_proposals,
param_.threshold,
rpn_post_nms_top_n,
&_keep[0],
&out_size);
// copy nms result to gpu
FRCNN_CUDA_CHECK(cudaMemcpy(keep, &_keep[0], sizeof(int) * _keep.size(),
cudaMemcpyHostToDevice));
// copy results after nms
dimGrid.x = (param_.rpn_post_nms_top_n + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock;
CheckLaunchParam(dimGrid, dimBlock, "PrepareOutput");
PrepareOutput << <dimGrid, dimBlock >> >(
param_.rpn_post_nms_top_n, workspace_ordered_proposals.dptr_, keep, out_size, b,
out.dptr_ + b * param_.rpn_post_nms_top_n * 5,
out_score.dptr_ + b * param_.rpn_post_nms_top_n);
FRCNN_CUDA_CHECK(cudaPeekAtLastError());
}
// free temporary memory
FRCNN_CUDA_CHECK(cudaFree(keep));
FRCNN_CUDA_CHECK(cudaFree(workspace_ordered_proposals_ptr));
FRCNN_CUDA_CHECK(cudaFree(workspace_proposals_ptr));
FRCNN_CUDA_CHECK(cudaFree(score_ptr));
FRCNN_CUDA_CHECK(cudaFree(order_ptr));
}
virtual void Backward(const OpContext &ctx,
const std::vector<TBlob> &out_grad,
const std::vector<TBlob> &in_data,
const std::vector<TBlob> &out_data,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &in_grad,
const std::vector<TBlob> &aux_states) {
using namespace mshadow;
using namespace mshadow::expr;
CHECK_EQ(in_grad.size(), 3);
Stream<xpu> *s = ctx.get_stream<xpu>();
Tensor<xpu, 4, DType> gscores = in_grad[proposal::kClsProb].get<xpu, 4, DType>(s);
Tensor<xpu, 4, DType> gbbox = in_grad[proposal::kBBoxPred].get<xpu, 4, DType>(s);
Tensor<xpu, 2, DType> ginfo = in_grad[proposal::kImInfo].get<xpu, 2, DType>(s);
// can not assume the grad would be zero
Assign(gscores, req[proposal::kClsProb], 0);
Assign(gbbox, req[proposal::kBBoxPred], 0);
Assign(ginfo, req[proposal::kImInfo], 0);
}
private:
MultiProposalParam param_;
}; // class MultiProposalGPUOp
template<>
Operator* CreateOp<gpu>(MultiProposalParam param, int dtype) {
Operator *op = NULL;
MSHADOW_REAL_TYPE_SWITCH(dtype, DType, {
op = new MultiProposalGPUOp<gpu, DType>(param);
});
return op;
}
} // namespace op
} // namespace mxnet
|
9453c1c248f92fa05e539b210194cfce782e1dd7.hip | // !!! This is a file automatically generated by hipify!!!
#if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<4>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<4>;
using LayoutDst = cutlass::layout::TensorNCHW;
using ThreadBlockShape = cutlass::gemm::GemmShape<64, 128, 32>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 32>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationRelu<
float, 1, int32_t, float, float>;
using Convolution = cutlass::convolution::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, float,
LayoutDst, float, LayoutDst, int32_t,
cutlass::convolution::ConvType::kConvolution, cutlass::arch::OpClassSimt, cutlass::arch::Sm61,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::convolution::threadblock::ConvolutionNCxHWxThreadblockSwizzle<
cutlass::convolution::ConvType::kConvolution>,
2, 4, 16, false,
cutlass::arch::OpMultiplyAdd>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const typename Convolution::ElementSrc* d_src,
const typename Convolution::ElementFilter* d_filter,
const typename Convolution::ElementBias* d_bias,
const typename Convolution::ElementDst* d_z,
typename Convolution::ElementDst* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
hipStream_t stream);
#pragma GCC diagnostic pop
#endif
| 9453c1c248f92fa05e539b210194cfce782e1dd7.cu | #if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<4>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<4>;
using LayoutDst = cutlass::layout::TensorNCHW;
using ThreadBlockShape = cutlass::gemm::GemmShape<64, 128, 32>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 32>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationRelu<
float, 1, int32_t, float, float>;
using Convolution = cutlass::convolution::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, float,
LayoutDst, float, LayoutDst, int32_t,
cutlass::convolution::ConvType::kConvolution, cutlass::arch::OpClassSimt, cutlass::arch::Sm61,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::convolution::threadblock::ConvolutionNCxHWxThreadblockSwizzle<
cutlass::convolution::ConvType::kConvolution>,
2, 4, 16, false,
cutlass::arch::OpMultiplyAdd>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const typename Convolution::ElementSrc* d_src,
const typename Convolution::ElementFilter* d_filter,
const typename Convolution::ElementBias* d_bias,
const typename Convolution::ElementDst* d_z,
typename Convolution::ElementDst* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
cudaStream_t stream);
#pragma GCC diagnostic pop
#endif
|
ea8e5b01433a9ca6b13f1de67b785bec9c8f8360.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include "stdio.h"
#include <vector>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/functional.h>
#include <thrust/iterator/zip_iterator.h>
#include <thrust/transform_reduce.h>
#include <thrust/iterator/constant_iterator.h>
#include <thrust/iterator/counting_iterator.h>
#include <hip/hip_runtime.h>
// #include "/private/herten/NVIDIA_GPU_Computing_SDK/C/src/simplePrintf/cuPrintf.cuh"
__global__ void buggyAdd (int * numbers) {
numbers[0]++;
}
__global__ void betterAdd(int * numbers) {
atomicAdd(&numbers[0], 1);
}
// typedef thrust::tuple<thrust::constant_iterator<thrust::device_vector<int>* >, thrust::counting_iterator<int> > myTuple;
struct coarsePlus {
int n;
coarsePlus(int _n) : n(_n) {}
__host__ __device__ double operator() (const thrust::tuple<thrust::device_vector<int>*, int> &thatTuple) const {
double sum = 0;
int factor = *thrust::get<1>(thatTuple);
for (int i = n*factor; i < (2*n*factor - 1); i++) {
sum += (**(thrust::get<0>(thatTuple)))[i];
}
return sum;
}
};
__global__ void potential_scatter (float* charge, float* grid) {
float currentCharge = charge[threadIdx.x];
int gridDimension = blockDim.x;
for (int i = 0; i < 100; i++) { // 100 should actually be gridDimension
float denominator = threadIdx.x - (i + 1); // thats the distance
//FIX BY ROLF (HAS TO BE TESTED:)
// float denominator = 1.0 / (1.0 + fabs(threadIdx.x - i));
float _denominator = denominator;
if (denominator < 0) _denominator = -denominator; // there is no abs() on cuda devices or something
currentCharge /= _denominator;
// cuPrintf("Position %i, value1 = %f", i, grid[i]);
// atomicAdd(&grid[i],2);
// cuPrintf(", value2 = %f", grid[i]);
atomicAdd(&grid[i], 2*currentCharge);
}
}
int main (int argc, char** argv) {
int* host_number = new int;
host_number[0] = 10;
std::cout << "source number = " << host_number[0] << std::endl;
int* dev_number;
hipMalloc((void**) &dev_number, sizeof(int));
std::vector<int> blocks, threads;
blocks.push_back(1); blocks.push_back(1); blocks.push_back(2); blocks.push_back(10); blocks.push_back(100); blocks.push_back(65535);
threads.push_back(1); threads.push_back(10); threads.push_back(450); threads.push_back(100); threads.push_back(1000); threads.push_back(1024);
std::cout << "## Part 1: Simple (buggy) add: " << std::endl;
for (unsigned int i = 0; i < blocks.size(); i++) {
hipMemcpy(dev_number, host_number, sizeof(int), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( buggyAdd), dim3(blocks[i]),dim3(threads[i]), 0, 0, dev_number);
hipMemcpy(host_number, dev_number, sizeof(int), hipMemcpyDeviceToHost);
std::cout << " <<<" << blocks[i] << "," << threads[i] << ">>> = " << host_number[0] << std::endl;
host_number[0] = 10;
}
std::cout << "## Part 2: Using atomicAdd()" << std::endl;
for (unsigned int i = 0; i < blocks.size(); i++) {
hipMemcpy(dev_number, host_number, sizeof(int), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( betterAdd), dim3(blocks[i]),dim3(threads[i]), 0, 0, dev_number);
hipMemcpy(host_number, dev_number, sizeof(int), hipMemcpyDeviceToHost);
std::cout << " <<<" << blocks[i] << "," << threads[i] << ">>> = " << host_number[0] << std::endl;
host_number[0] = 10;
}
std::cout << "## Part 3: coarsePlus" << std::endl;
int sizeOfVector = 100;
thrust::host_vector<int> h_vec(sizeOfVector);
srand(23);
for (unsigned int i = 0; i < sizeOfVector; i++) {
h_vec[i] = rand() % 100;
}
thrust::device_vector<int> d_vec = h_vec;
thrust::constant_iterator<thrust::device_vector<int>* > constIt = thrust::make_constant_iterator(&d_vec);
int result = thrust::transform_reduce(
thrust::make_zip_iterator(
thrust::make_tuple(
// thrust::make_constant_iterator(d_vec*),
constIt, // no constant iterator as a first element! because: start of a zip iterator is first element; end is, when first element again is reached (or something) - but with constant iterator it's always the same - so start = end - so invoked just once
// solution: switch them around: first part counting it, second part const it
thrust::make_counting_iterator(0)
)
),
thrust::make_zip_iterator(
thrust::make_tuple(
// thrust::make_constant_iterator(d_vec*),
constIt,
thrust::make_counting_iterator(0)+sizeOfVector
)
),
coarsePlus(8),
0,
thrust::plus<int>());
std::cout << "## Part 4: Coulomb potential" << std::endl;
// Generating potential
int nOfCharges = 100;
float* charge = new float[nOfCharges];
srand(23);
for (unsigned int i = 0; i < nOfCharges; i++) {
charge[i] = rand() % 100;
std::cout << "Charge at position " << i << " is equal to " << charge[i] << std::endl;
}
float* grid = new float[nOfCharges];
for (unsigned int i = 0; i < nOfCharges; i++) {
grid[i] = 5; // for debugging, real value is 0
}
float* dev_charge = 0;
float* dev_grid = 0;
hipMalloc((void**) &dev_charge, nOfCharges * sizeof(float));
hipMalloc((void**) &dev_grid, nOfCharges * sizeof(float));
hipMemcpy(dev_charge, charge, nOfCharges * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(dev_grid, grid, nOfCharges * sizeof(float), hipMemcpyHostToDevice);
// cudaPrintfInit();
hipLaunchKernelGGL(( potential_scatter), dim3(1),dim3(nOfCharges), 0, 0, dev_charge, dev_grid);
// cudaPrintfDisplay(std::stdout, true);
// cudaPrintfEnd;
hipMemcpy(grid, dev_grid, nOfCharges * sizeof(float), hipMemcpyDeviceToHost);
for (unsigned int i = 0; i < nOfCharges; i++) {
std::cout << "Potential at position " << i << " is equal to " << grid[i] << std::endl;
}
// Thrust
// thrust::host_vector<float> tCharge(nOfCharges);
// thrust::host_vector<float> tGrid(nOfCharges);
// for (unsigned int i = 0; i < nOfCharges; i++) {
// tCharge[i] = charge[i];
// tGrid[i] = 0;
// }
//
// thrust::device_vector<float> dev_tCharge = tCharge;
// thrust::device_vector<float> dev_tGrid = tGrid;
//
// float* rawTCharge = thrust::raw_pointer_cast( &tCharge[0] );
// float* rawTGrid = thrust::raw_pointer_cast( &tGrid[0] );
//
// potential_scatter<<<1, nOfCharges>>>(rawTCharge ,rawTGrid);
}
| ea8e5b01433a9ca6b13f1de67b785bec9c8f8360.cu | #include <iostream>
#include "stdio.h"
#include <vector>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/functional.h>
#include <thrust/iterator/zip_iterator.h>
#include <thrust/transform_reduce.h>
#include <thrust/iterator/constant_iterator.h>
#include <thrust/iterator/counting_iterator.h>
#include <cuda.h>
// #include "/private/herten/NVIDIA_GPU_Computing_SDK/C/src/simplePrintf/cuPrintf.cuh"
__global__ void buggyAdd (int * numbers) {
numbers[0]++;
}
__global__ void betterAdd(int * numbers) {
atomicAdd(&numbers[0], 1);
}
// typedef thrust::tuple<thrust::constant_iterator<thrust::device_vector<int>* >, thrust::counting_iterator<int> > myTuple;
struct coarsePlus {
int n;
coarsePlus(int _n) : n(_n) {}
__host__ __device__ double operator() (const thrust::tuple<thrust::device_vector<int>*, int> &thatTuple) const {
double sum = 0;
int factor = *thrust::get<1>(thatTuple);
for (int i = n*factor; i < (2*n*factor - 1); i++) {
sum += (**(thrust::get<0>(thatTuple)))[i];
}
return sum;
}
};
__global__ void potential_scatter (float* charge, float* grid) {
float currentCharge = charge[threadIdx.x];
int gridDimension = blockDim.x;
for (int i = 0; i < 100; i++) { // 100 should actually be gridDimension
float denominator = threadIdx.x - (i + 1); // thats the distance
//FIX BY ROLF (HAS TO BE TESTED:)
// float denominator = 1.0 / (1.0 + fabs(threadIdx.x - i));
float _denominator = denominator;
if (denominator < 0) _denominator = -denominator; // there is no abs() on cuda devices or something
currentCharge /= _denominator;
// cuPrintf("Position %i, value1 = %f", i, grid[i]);
// atomicAdd(&grid[i],2);
// cuPrintf(", value2 = %f", grid[i]);
atomicAdd(&grid[i], 2*currentCharge);
}
}
int main (int argc, char** argv) {
int* host_number = new int;
host_number[0] = 10;
std::cout << "source number = " << host_number[0] << std::endl;
int* dev_number;
cudaMalloc((void**) &dev_number, sizeof(int));
std::vector<int> blocks, threads;
blocks.push_back(1); blocks.push_back(1); blocks.push_back(2); blocks.push_back(10); blocks.push_back(100); blocks.push_back(65535);
threads.push_back(1); threads.push_back(10); threads.push_back(450); threads.push_back(100); threads.push_back(1000); threads.push_back(1024);
std::cout << "## Part 1: Simple (buggy) add: " << std::endl;
for (unsigned int i = 0; i < blocks.size(); i++) {
cudaMemcpy(dev_number, host_number, sizeof(int), cudaMemcpyHostToDevice);
buggyAdd<<<blocks[i],threads[i]>>> (dev_number);
cudaMemcpy(host_number, dev_number, sizeof(int), cudaMemcpyDeviceToHost);
std::cout << " <<<" << blocks[i] << "," << threads[i] << ">>> = " << host_number[0] << std::endl;
host_number[0] = 10;
}
std::cout << "## Part 2: Using atomicAdd()" << std::endl;
for (unsigned int i = 0; i < blocks.size(); i++) {
cudaMemcpy(dev_number, host_number, sizeof(int), cudaMemcpyHostToDevice);
betterAdd<<<blocks[i],threads[i]>>> (dev_number);
cudaMemcpy(host_number, dev_number, sizeof(int), cudaMemcpyDeviceToHost);
std::cout << " <<<" << blocks[i] << "," << threads[i] << ">>> = " << host_number[0] << std::endl;
host_number[0] = 10;
}
std::cout << "## Part 3: coarsePlus" << std::endl;
int sizeOfVector = 100;
thrust::host_vector<int> h_vec(sizeOfVector);
srand(23);
for (unsigned int i = 0; i < sizeOfVector; i++) {
h_vec[i] = rand() % 100;
}
thrust::device_vector<int> d_vec = h_vec;
thrust::constant_iterator<thrust::device_vector<int>* > constIt = thrust::make_constant_iterator(&d_vec);
int result = thrust::transform_reduce(
thrust::make_zip_iterator(
thrust::make_tuple(
// thrust::make_constant_iterator(d_vec*),
constIt, // no constant iterator as a first element! because: start of a zip iterator is first element; end is, when first element again is reached (or something) - but with constant iterator it's always the same - so start = end - so invoked just once
// solution: switch them around: first part counting it, second part const it
thrust::make_counting_iterator(0)
)
),
thrust::make_zip_iterator(
thrust::make_tuple(
// thrust::make_constant_iterator(d_vec*),
constIt,
thrust::make_counting_iterator(0)+sizeOfVector
)
),
coarsePlus(8),
0,
thrust::plus<int>());
std::cout << "## Part 4: Coulomb potential" << std::endl;
// Generating potential
int nOfCharges = 100;
float* charge = new float[nOfCharges];
srand(23);
for (unsigned int i = 0; i < nOfCharges; i++) {
charge[i] = rand() % 100;
std::cout << "Charge at position " << i << " is equal to " << charge[i] << std::endl;
}
float* grid = new float[nOfCharges];
for (unsigned int i = 0; i < nOfCharges; i++) {
grid[i] = 5; // for debugging, real value is 0
}
float* dev_charge = 0;
float* dev_grid = 0;
cudaMalloc((void**) &dev_charge, nOfCharges * sizeof(float));
cudaMalloc((void**) &dev_grid, nOfCharges * sizeof(float));
cudaMemcpy(dev_charge, charge, nOfCharges * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(dev_grid, grid, nOfCharges * sizeof(float), cudaMemcpyHostToDevice);
// cudaPrintfInit();
potential_scatter<<<1,nOfCharges>>>(dev_charge, dev_grid);
// cudaPrintfDisplay(std::stdout, true);
// cudaPrintfEnd;
cudaMemcpy(grid, dev_grid, nOfCharges * sizeof(float), cudaMemcpyDeviceToHost);
for (unsigned int i = 0; i < nOfCharges; i++) {
std::cout << "Potential at position " << i << " is equal to " << grid[i] << std::endl;
}
// Thrust
// thrust::host_vector<float> tCharge(nOfCharges);
// thrust::host_vector<float> tGrid(nOfCharges);
// for (unsigned int i = 0; i < nOfCharges; i++) {
// tCharge[i] = charge[i];
// tGrid[i] = 0;
// }
//
// thrust::device_vector<float> dev_tCharge = tCharge;
// thrust::device_vector<float> dev_tGrid = tGrid;
//
// float* rawTCharge = thrust::raw_pointer_cast( &tCharge[0] );
// float* rawTGrid = thrust::raw_pointer_cast( &tGrid[0] );
//
// potential_scatter<<<1, nOfCharges>>>(rawTCharge ,rawTGrid);
}
|
13ba5a67dc84019dd977587d6e82742bc9ee61fa.hip | // !!! This is a file automatically generated by hipify!!!
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <random>
#include <vector>
#include "gtest/gtest.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/operator.h"
#include "paddle/fluid/framework/program_desc.h"
#include "paddle/fluid/framework/tensor_util.h"
#include "paddle/fluid/operators/fused/attn_feed_forward.h"
#include "paddle/fluid/platform/float16.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/funcs/math_function.h"
namespace framework = paddle::framework;
namespace platform = paddle::platform;
USE_OP(matmul);
USE_OP_ITSELF(elementwise_add);
PD_DECLARE_KERNEL(add, CPU, ALL_LAYOUT);
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
PD_DECLARE_KERNEL(add_grad, GPU, ALL_LAYOUT);
PD_DECLARE_KERNEL(add, KPS, ALL_LAYOUT);
#endif
// get paddle matmul op results as baseline
template <typename T>
void GetLinearOp(const std::vector<T> &x,
const std::vector<T> &y,
const framework::DDim &x_dim,
const framework::DDim &y_dim,
const phi::GPUContext &ctx,
bool transpose_a,
bool transpose_b,
float alpha,
std::vector<T> *out) {
framework::Scope scope;
auto var_x = scope.Var("X");
auto tensor_x = var_x->GetMutable<framework::LoDTensor>();
auto var_y = scope.Var("Y");
auto tensor_y = var_y->GetMutable<framework::LoDTensor>();
auto var_out = scope.Var("Out");
auto tensor_out = var_out->GetMutable<framework::LoDTensor>();
tensor_x->Resize(x_dim);
tensor_y->Resize(y_dim);
tensor_out->Resize({x_dim[0], x_dim[1], y_dim[0]});
auto x_ptr = tensor_x->mutable_data<T>(ctx.GetPlace());
auto y_ptr = tensor_y->mutable_data<T>(ctx.GetPlace());
auto z_ptr = tensor_out->mutable_data<T>(ctx.GetPlace());
auto size_x = static_cast<size_t>(phi::product(x_dim));
auto size_y = static_cast<size_t>(phi::product(y_dim));
auto size_z = x_dim[0] * x_dim[1] * y_dim[0];
hipMemcpy(x_ptr, x.data(), size_x * sizeof(T), hipMemcpyHostToDevice);
hipMemcpy(y_ptr, y.data(), size_y * sizeof(T), hipMemcpyHostToDevice);
framework::AttributeMap attrs;
attrs.insert({"transpose_X", transpose_a});
attrs.insert({"transpose_Y", transpose_b});
attrs.insert({"alpha", alpha});
auto op = framework::OpRegistry::CreateOp(
"matmul", {{"X", {"X"}}, {"Y", {"Y"}}}, {{"Out", {"Out"}}}, attrs);
op->Run(scope, ctx.GetPlace());
hipMemcpy(out->data(), z_ptr, size_z * sizeof(T), hipMemcpyDeviceToHost);
ctx.Wait();
}
// get paddle elementwise_add op results as baseline
template <typename T>
void GetElementwiseAddOp(const std::vector<T> &x,
const std::vector<T> &y,
const int bsz_seq,
const int output_size,
const phi::GPUContext &ctx,
std::vector<T> *out) {
framework::Scope scope;
auto var_x = scope.Var("X");
auto tensor_x = var_x->GetMutable<framework::LoDTensor>();
auto var_y = scope.Var("Y");
auto tensor_y = var_y->GetMutable<framework::LoDTensor>();
auto var_out = scope.Var("Out");
auto tensor_out = var_out->GetMutable<framework::LoDTensor>();
tensor_x->Resize({bsz_seq, output_size});
tensor_y->Resize({output_size});
tensor_out->Resize({bsz_seq, output_size});
auto x_ptr = tensor_x->mutable_data<T>(ctx.GetPlace());
auto y_ptr = tensor_y->mutable_data<T>(ctx.GetPlace());
auto z_ptr = tensor_out->mutable_data<T>(ctx.GetPlace());
auto size_x = bsz_seq * output_size;
auto size_y = output_size;
auto size_z = bsz_seq * output_size;
hipMemcpy(x_ptr, x.data(), size_x * sizeof(T), hipMemcpyHostToDevice);
hipMemcpy(y_ptr, y.data(), size_y * sizeof(T), hipMemcpyHostToDevice);
framework::AttributeMap attrs;
auto op = framework::OpRegistry::CreateOp("elementwise_add",
{{"X", {"X"}}, {"Y", {"Y"}}},
{{"Out", {"Out"}}},
attrs);
op->Run(scope, ctx.GetPlace());
hipMemcpy(out->data(), z_ptr, size_z * sizeof(T), hipMemcpyDeviceToHost);
ctx.Wait();
}
// get paddle matmul_grad op results as baseline
template <typename T>
void GetLinearOpGrad(const std::vector<T> &x_vec,
const std::vector<T> &y_vec,
const std::vector<T> &dout_vec,
const framework::DDim &x_dim,
const framework::DDim &y_dim,
const framework::DDim &out_dim,
const phi::GPUContext &ctx,
bool transpose_a,
bool transpose_b,
float alpha,
std::vector<T> *dinput_vec,
std::vector<T> *dweight_vec) {
framework::Scope scope;
auto var_x = scope.Var("X");
auto tensor_x = var_x->GetMutable<framework::LoDTensor>();
auto var_y = scope.Var("Y");
auto tensor_y = var_y->GetMutable<framework::LoDTensor>();
auto var_dout = scope.Var("DOut");
auto tensor_dout = var_dout->GetMutable<framework::LoDTensor>();
tensor_x->Resize(x_dim);
tensor_y->Resize(y_dim);
tensor_dout->Resize(out_dim);
auto var_dx = scope.Var("DX");
auto tensor_dx = var_dx->GetMutable<framework::LoDTensor>();
auto var_dy = scope.Var("DY");
auto tensor_dy = var_dy->GetMutable<framework::LoDTensor>();
tensor_dx->Resize(x_dim);
tensor_dy->Resize(y_dim);
auto x_ptr = tensor_x->mutable_data<T>(ctx.GetPlace());
auto y_ptr = tensor_y->mutable_data<T>(ctx.GetPlace());
auto dout_ptr = tensor_dout->mutable_data<T>(ctx.GetPlace());
auto dinput_ptr = tensor_dx->mutable_data<T>(ctx.GetPlace());
auto dweight_ptr = tensor_dy->mutable_data<T>(ctx.GetPlace());
auto size_x = static_cast<size_t>(phi::product(x_dim));
auto size_y = static_cast<size_t>(phi::product(y_dim));
auto size_z = x_dim[0] * x_dim[1] * y_dim[0];
hipMemcpy(x_ptr, x_vec.data(), size_x * sizeof(T), hipMemcpyHostToDevice);
hipMemcpy(y_ptr, y_vec.data(), size_y * sizeof(T), hipMemcpyHostToDevice);
hipMemcpy(
dout_ptr, dout_vec.data(), size_z * sizeof(T), hipMemcpyHostToDevice);
bool use_mkldnn = false;
std::vector<int> fused_reshape_X = {};
std::vector<int> fused_reshape_Y = {};
std::vector<int> fused_reshape_Out = {};
std::vector<int> fused_transpose_X = {};
std::vector<int> fused_transpose_Y = {};
std::vector<int> fused_transpose_Out = {};
bool use_quantizer = false, force_fp32_output = false;
std::string mkldnn_data_type = "float32";
float Scale_x = 1.0, Scale_y = 1.0, Scale_out = 1.0;
framework::AttributeMap attrs;
attrs.insert({"transpose_X", transpose_a});
attrs.insert({"transpose_Y", transpose_b});
attrs.insert({"alpha", alpha});
attrs.insert({"use_mkldnn", use_mkldnn});
attrs.insert({"fused_reshape_X", fused_reshape_X});
attrs.insert({"fused_reshape_Y", fused_reshape_Y});
attrs.insert({"fused_reshape_Out", fused_reshape_Out});
attrs.insert({"fused_transpose_X", fused_transpose_X});
attrs.insert({"fused_transpose_Y", fused_transpose_Y});
attrs.insert({"fused_transpose_Out", fused_transpose_Out});
attrs.insert({"use_quantizer", use_quantizer});
attrs.insert({"mkldnn_data_type", mkldnn_data_type});
attrs.insert({"Scale_x", Scale_x});
attrs.insert({"Scale_y", Scale_y});
attrs.insert({"Scale_out", Scale_out});
attrs.insert({"force_fp32_output", force_fp32_output});
auto op = framework::OpRegistry::CreateOp(
"matmul_grad",
{{"Out@GRAD", {"DOut"}}, {"X", {"X"}}, {"Y", {"Y"}}},
{{"X@GRAD", {"DX"}}, {"Y@GRAD", {"DY"}}},
attrs);
op->Run(scope, ctx.GetPlace());
hipMemcpy(dinput_vec->data(),
dinput_ptr,
size_x * sizeof(T),
hipMemcpyDeviceToHost);
hipMemcpy(dweight_vec->data(),
dweight_ptr,
size_y * sizeof(T),
hipMemcpyDeviceToHost);
ctx.Wait();
}
// get paddle elementwise_add_grad op results as baseline
template <typename T>
void GetElementwiseAddOpGrad(const std::vector<T> &dout_vec,
const int bsz_seq,
const int output_size,
const phi::GPUContext &ctx,
std::vector<T> *dy_vec) {
framework::Scope scope;
auto var_x = scope.Var("X");
auto tensor_x = var_x->GetMutable<framework::LoDTensor>();
auto var_y = scope.Var("Y");
auto tensor_y = var_y->GetMutable<framework::LoDTensor>();
auto var_dout = scope.Var("DOut");
auto tensor_dout = var_dout->GetMutable<framework::LoDTensor>();
tensor_x->Resize({bsz_seq, output_size});
tensor_y->Resize({output_size});
tensor_dout->Resize({bsz_seq, output_size});
auto var_dx = scope.Var("DX");
auto tensor_dx = var_dx->GetMutable<framework::LoDTensor>();
auto var_dy = scope.Var("DY");
auto tensor_dy = var_dy->GetMutable<framework::LoDTensor>();
tensor_dx->Resize({bsz_seq, output_size});
tensor_dy->Resize({output_size});
auto dout_ptr = tensor_dout->mutable_data<T>(ctx.GetPlace());
auto tensor_dy_ptr = tensor_dy->mutable_data<T>(ctx.GetPlace());
auto size_z = static_cast<size_t>(bsz_seq * output_size);
hipMemcpy(
dout_ptr, dout_vec.data(), size_z * sizeof(T), hipMemcpyHostToDevice);
int axis = -1;
bool use_mkldnn = false, use_quantizer = false;
std::string mkldnn_data_type = "float32";
std::string x_data_format = "", y_data_format = "";
float Scale_x = 1.0, Scale_y = 1.0, Scale_out = 1.0;
framework::AttributeMap attrs;
attrs.insert({"axis", axis});
attrs.insert({"use_mkldnn", use_mkldnn});
attrs.insert({"x_data_format", x_data_format});
attrs.insert({"y_data_format", y_data_format});
attrs.insert({"use_quantizer", use_quantizer});
attrs.insert({"mkldnn_data_type", mkldnn_data_type});
attrs.insert({"Scale_x", Scale_x});
attrs.insert({"Scale_y", Scale_y});
attrs.insert({"Scale_out", Scale_out});
auto op = framework::OpRegistry::CreateOp(
"elementwise_add_grad",
{{"Out@GRAD", {"DOut"}}, {"X", {"X"}}, {"Y", {"Y"}}},
{{"X@GRAD", {"DX"}}, {"Y@GRAD", {"DY"}}},
attrs);
op->Run(scope, ctx.GetPlace());
auto size_y = static_cast<size_t>(output_size);
hipMemcpy(dy_vec->data(),
tensor_dy_ptr,
size_y * sizeof(T),
hipMemcpyDeviceToHost);
ctx.Wait();
}
template <typename T>
class TestFeedForward {
public:
TestFeedForward() {
batch_size_ = 16;
seq_len_ = 128;
num_head_ = 16;
dim_head_ = 64;
dim_embed_ = 1024;
has_bias_ = false;
}
TestFeedForward(int batch_size,
int seq_len,
int num_head,
int dim_head,
int dim_embed,
bool has_bias) {
batch_size_ = batch_size;
seq_len_ = seq_len;
num_head_ = num_head;
dim_head_ = dim_head;
dim_embed_ = dim_embed;
has_bias_ = has_bias;
}
~TestFeedForward() { delete ctx_; }
void SetUp() {
bsz_seq_ = batch_size_ * seq_len_;
output_size_ = 3 * num_head_ * dim_head_;
input_size_ = dim_embed_;
ctx_ = new phi::GPUContext(place_);
ctx_->SetAllocator(paddle::memory::allocation::AllocatorFacade::Instance()
.GetAllocator(place_, ctx_->stream())
.get());
ctx_->SetHostAllocator(
paddle::memory::allocation::AllocatorFacade::Instance()
.GetAllocator(paddle::platform::CPUPlace())
.get());
ctx_->SetZeroAllocator(
paddle::memory::allocation::AllocatorFacade::Instance()
.GetZeroAllocator(place_)
.get());
ctx_->SetPinnedAllocator(
paddle::memory::allocation::AllocatorFacade::Instance()
.GetAllocator(paddle::platform::CUDAPinnedPlace())
.get());
ctx_->PartialInitWithAllocator();
size_src_ = bsz_seq_ * dim_embed_; // src: [bs, seq_len, em_dim]
size_weight_ = dim_embed_ * output_size_; // weight: [output_size, em_dim]
size_output_ =
bsz_seq_ * output_size_; // output: [bs, seq_len, output_size]
size_bias_ = output_size_;
base_out_vec_.resize(size_output_);
base_bias_out_vec_.resize(size_output_);
base_dinput_vec_.resize(size_src_);
base_dweight_vec_.resize(size_weight_);
base_dbias_vec_.resize(size_bias_);
src_vec_.resize(size_src_);
weight_vec_.resize(size_weight_);
bias_vec_.resize(size_bias_);
doutput_vec_.resize(size_output_);
std::default_random_engine random(time(NULL));
std::uniform_real_distribution<float> dis(0.0, 1.0);
for (int i = 0; i < size_src_; i++) {
src_vec_[i] = static_cast<T>(dis(random));
}
for (int i = 0; i < size_weight_; i++) {
weight_vec_[i] = static_cast<T>(dis(random));
}
for (int i = 0; i < size_bias_; i++) {
bias_vec_[i] = static_cast<T>(dis(random));
}
for (int i = 0; i < size_output_; i++) {
doutput_vec_[i] = static_cast<T>(dis(random));
}
framework::TensorFromVector<T>(src_vec_, *ctx_, &src_);
src_.Resize({batch_size_, seq_len_, dim_embed_});
framework::TensorFromVector<T>(weight_vec_, *ctx_, &weight_);
weight_.Resize({output_size_, dim_embed_});
out_.Resize({batch_size_, seq_len_, output_size_});
out_.mutable_data<T>(place_);
if (has_bias_) {
framework::TensorFromVector<T>(bias_vec_, *ctx_, &bias_);
bias_.Resize({output_size_});
bias_out_.Resize({batch_size_, seq_len_, output_size_});
bias_out_.mutable_data<T>(place_);
}
framework::TensorFromVector<T>(doutput_vec_, *ctx_, &doutput_);
doutput_.Resize({batch_size_, seq_len_, output_size_});
dinput_.Resize({batch_size_, seq_len_, dim_embed_});
dinput_.mutable_data<T>(place_);
dweight_.Resize({output_size_, dim_embed_});
dweight_.mutable_data<T>(place_);
if (has_bias_) {
dbias_.Resize({output_size_});
dbias_.mutable_data<T>(place_);
}
}
void BaselineForward() {
bool transpose_a = false, transpose_b = true;
float alpha = 1;
GetLinearOp(src_vec_,
weight_vec_,
src_.dims(),
weight_.dims(),
*ctx_,
transpose_a,
transpose_b,
alpha,
&base_out_vec_);
if (has_bias_) {
GetElementwiseAddOp(base_out_vec_,
bias_vec_,
bsz_seq_,
output_size_,
*ctx_,
&base_bias_out_vec_);
}
ctx_->Wait();
}
// get forward results of feedforward.
void FusedForward() {
T *p_weight = weight_.data<T>();
T *p_src = src_.data<T>();
T *p_output = out_.data<T>();
T *p_bias = nullptr;
T *p_bias_output = nullptr;
if (has_bias_) {
p_bias = bias_.data<T>();
p_bias_output = bias_out_.data<T>();
}
auto qkv_compute = paddle::operators::FeedForward<T>(
*ctx_, bsz_seq_, output_size_, input_size_, has_bias_);
qkv_compute.ComputeForward(
p_weight, p_src, p_bias, p_output, p_bias_output);
ctx_->Wait();
}
void BaselineBackward() {
bool transpose_a = false, transpose_b = true;
float alpha = 1;
GetLinearOpGrad(src_vec_,
weight_vec_,
doutput_vec_,
src_.dims(),
weight_.dims(),
out_.dims(),
*ctx_,
transpose_a,
transpose_b,
alpha,
&base_dinput_vec_,
&base_dweight_vec_);
if (has_bias_) {
GetElementwiseAddOpGrad(
doutput_vec_, bsz_seq_, output_size_, *ctx_, &base_dbias_vec_);
}
ctx_->Wait();
}
// get backward results of feedforward.
void FusedBackward() {
T *p_weight = weight_.data<T>();
T *p_src = src_.data<T>();
T *p_doutput = doutput_.data<T>();
T *p_dinput = dinput_.data<T>();
T *p_dweight = dweight_.data<T>();
T *bias_ptr = nullptr;
if (has_bias_) {
bias_ptr = dbias_.data<T>();
}
auto qkv_compute = paddle::operators::FeedForward<T>(
*ctx_, bsz_seq_, output_size_, input_size_, has_bias_);
qkv_compute.ComputeBackward(
p_src, p_weight, p_doutput, p_dinput, p_dweight, bias_ptr);
ctx_->Wait();
}
void Run() {
SetUp();
BaselineForward();
FusedForward();
BaselineBackward();
FusedBackward();
}
// check forward correctness between baseline and results of feedforward.
void CheckOut(const T diff, bool is_relative_atol = false) {
std::vector<T> out(size_output_);
std::vector<T> bias_out(size_output_);
paddle::framework::TensorToVector(out_, *ctx_, &out);
if (has_bias_) {
paddle::framework::TensorToVector(bias_out_, *ctx_, &bias_out);
}
ctx_->Wait();
for (int i = 0; i < size_output_; i++) {
if (is_relative_atol) {
EXPECT_LT(std::abs((out[i] - base_out_vec_[i]) / base_out_vec_[i]),
diff);
} else {
EXPECT_LT(std::abs(out[i] - base_out_vec_[i]), diff);
}
if (has_bias_) {
if (is_relative_atol) {
EXPECT_LT(std::abs((bias_out[i] - base_bias_out_vec_[i]) /
base_bias_out_vec_[i]),
diff);
} else {
EXPECT_LT(std::abs(bias_out[i] - base_bias_out_vec_[i]), diff);
}
}
}
}
// check backward correctness between baseline and results of feedforward.
void CheckGrad(const T diff, bool is_relative_atol = false) {
std::vector<T> h_dinput(size_src_);
paddle::framework::TensorToVector(dinput_, *ctx_, &h_dinput);
for (int i = 0; i < size_src_; i++) {
if (is_relative_atol) {
EXPECT_LT(
std::abs((h_dinput[i] - base_dinput_vec_[i]) / base_dinput_vec_[i]),
diff);
} else {
EXPECT_LT(std::abs(h_dinput[i] - base_dinput_vec_[i]), diff);
}
}
std::vector<T> h_dweight(size_weight_);
paddle::framework::TensorToVector(dweight_, *ctx_, &h_dweight);
for (int i = 0; i < size_weight_; i++) {
if (is_relative_atol) {
EXPECT_LT(std::abs((h_dweight[i] - base_dweight_vec_[i]) /
base_dweight_vec_[i]),
diff);
} else {
EXPECT_LT(std::abs(h_dweight[i] - base_dweight_vec_[i]), diff);
}
}
if (has_bias_) {
std::vector<T> h_dbias(size_bias_);
paddle::framework::TensorToVector(dbias_, *ctx_, &h_dbias);
for (int i = 0; i < size_bias_; i++) {
if (is_relative_atol) {
EXPECT_LT(
std::abs((h_dbias[i] - base_dbias_vec_[i]) / base_dbias_vec_[i]),
diff);
} else {
EXPECT_LT(std::abs(h_dbias[i] - base_dbias_vec_[i]), diff);
}
}
}
}
private:
int batch_size_, seq_len_, num_head_, dim_head_, dim_embed_;
int bsz_seq_, output_size_, input_size_;
bool has_bias_;
int size_src_, size_weight_, size_bias_, size_output_;
framework::Tensor src_, weight_, bias_, out_, bias_out_;
framework::Tensor dinput_, dweight_, dbias_, doutput_;
std::vector<T> src_vec_, weight_vec_, bias_vec_, out_vec_, bias_out_vec_;
std::vector<T> dinput_vec_, dweight_vec_, dbias_vec_, doutput_vec_;
// results of baseline.
std::vector<T> base_out_vec_, base_bias_out_vec_;
std::vector<T> base_dinput_vec_, base_dweight_vec_, base_dbias_vec_;
platform::CUDAPlace place_;
phi::GPUContext *ctx_;
};
// test for fp32, fp16, fp32+bias and fp16+bias
TEST(FeedForward, GPUFeedforwardBertLargeSizeFp32) {
int batch_size = 16;
int seq_len = 128;
int num_head = 16;
int dim_head = 64;
int dim_embed = 1024;
bool has_bias = false;
TestFeedForward<float> test(
batch_size, seq_len, num_head, dim_head, dim_embed, has_bias);
test.Run();
test.CheckOut(static_cast<float>(1e-5));
test.CheckGrad(static_cast<float>(1e-5));
}
TEST(FeedForward, GPUFeedforwardBertLargeSizeFp16) {
int batch_size = 16;
int seq_len = 128;
int num_head = 16;
int dim_head = 64;
int dim_embed = 1024;
bool has_bias = false;
TestFeedForward<paddle::platform::float16> test(
batch_size, seq_len, num_head, dim_head, dim_embed, has_bias);
test.Run();
test.CheckOut(static_cast<paddle::platform::float16>(1e-5));
test.CheckGrad(static_cast<paddle::platform::float16>(1e-5));
}
TEST(FeedForward, GPUFeedforwardBertLargeSizeFp32Bias) {
int batch_size = 16;
int seq_len = 128;
int num_head = 16;
int dim_head = 64;
int dim_embed = 1024;
bool has_bias = true;
TestFeedForward<float> test(
batch_size, seq_len, num_head, dim_head, dim_embed, has_bias);
test.Run();
test.CheckOut(static_cast<float>(1e-5));
test.CheckGrad(static_cast<float>(1e-3));
}
TEST(FeedForward, GPUFeedforwardBertLargeSizeFp16Bias) {
int batch_size = 16;
int seq_len = 128;
int num_head = 16;
int dim_head = 64;
int dim_embed = 1024;
bool has_bias = true;
TestFeedForward<paddle::platform::float16> test(
batch_size, seq_len, num_head, dim_head, dim_embed, has_bias);
test.Run();
test.CheckOut(static_cast<paddle::platform::float16>(1e-2));
test.CheckGrad(static_cast<paddle::platform::float16>(1e-2), true);
}
| 13ba5a67dc84019dd977587d6e82742bc9ee61fa.cu | /* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <random>
#include <vector>
#include "gtest/gtest.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/operator.h"
#include "paddle/fluid/framework/program_desc.h"
#include "paddle/fluid/framework/tensor_util.h"
#include "paddle/fluid/operators/fused/attn_feed_forward.h"
#include "paddle/fluid/platform/float16.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/funcs/math_function.h"
namespace framework = paddle::framework;
namespace platform = paddle::platform;
USE_OP(matmul);
USE_OP_ITSELF(elementwise_add);
PD_DECLARE_KERNEL(add, CPU, ALL_LAYOUT);
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
PD_DECLARE_KERNEL(add_grad, GPU, ALL_LAYOUT);
PD_DECLARE_KERNEL(add, KPS, ALL_LAYOUT);
#endif
// get paddle matmul op results as baseline
template <typename T>
void GetLinearOp(const std::vector<T> &x,
const std::vector<T> &y,
const framework::DDim &x_dim,
const framework::DDim &y_dim,
const phi::GPUContext &ctx,
bool transpose_a,
bool transpose_b,
float alpha,
std::vector<T> *out) {
framework::Scope scope;
auto var_x = scope.Var("X");
auto tensor_x = var_x->GetMutable<framework::LoDTensor>();
auto var_y = scope.Var("Y");
auto tensor_y = var_y->GetMutable<framework::LoDTensor>();
auto var_out = scope.Var("Out");
auto tensor_out = var_out->GetMutable<framework::LoDTensor>();
tensor_x->Resize(x_dim);
tensor_y->Resize(y_dim);
tensor_out->Resize({x_dim[0], x_dim[1], y_dim[0]});
auto x_ptr = tensor_x->mutable_data<T>(ctx.GetPlace());
auto y_ptr = tensor_y->mutable_data<T>(ctx.GetPlace());
auto z_ptr = tensor_out->mutable_data<T>(ctx.GetPlace());
auto size_x = static_cast<size_t>(phi::product(x_dim));
auto size_y = static_cast<size_t>(phi::product(y_dim));
auto size_z = x_dim[0] * x_dim[1] * y_dim[0];
cudaMemcpy(x_ptr, x.data(), size_x * sizeof(T), cudaMemcpyHostToDevice);
cudaMemcpy(y_ptr, y.data(), size_y * sizeof(T), cudaMemcpyHostToDevice);
framework::AttributeMap attrs;
attrs.insert({"transpose_X", transpose_a});
attrs.insert({"transpose_Y", transpose_b});
attrs.insert({"alpha", alpha});
auto op = framework::OpRegistry::CreateOp(
"matmul", {{"X", {"X"}}, {"Y", {"Y"}}}, {{"Out", {"Out"}}}, attrs);
op->Run(scope, ctx.GetPlace());
cudaMemcpy(out->data(), z_ptr, size_z * sizeof(T), cudaMemcpyDeviceToHost);
ctx.Wait();
}
// get paddle elementwise_add op results as baseline
template <typename T>
void GetElementwiseAddOp(const std::vector<T> &x,
const std::vector<T> &y,
const int bsz_seq,
const int output_size,
const phi::GPUContext &ctx,
std::vector<T> *out) {
framework::Scope scope;
auto var_x = scope.Var("X");
auto tensor_x = var_x->GetMutable<framework::LoDTensor>();
auto var_y = scope.Var("Y");
auto tensor_y = var_y->GetMutable<framework::LoDTensor>();
auto var_out = scope.Var("Out");
auto tensor_out = var_out->GetMutable<framework::LoDTensor>();
tensor_x->Resize({bsz_seq, output_size});
tensor_y->Resize({output_size});
tensor_out->Resize({bsz_seq, output_size});
auto x_ptr = tensor_x->mutable_data<T>(ctx.GetPlace());
auto y_ptr = tensor_y->mutable_data<T>(ctx.GetPlace());
auto z_ptr = tensor_out->mutable_data<T>(ctx.GetPlace());
auto size_x = bsz_seq * output_size;
auto size_y = output_size;
auto size_z = bsz_seq * output_size;
cudaMemcpy(x_ptr, x.data(), size_x * sizeof(T), cudaMemcpyHostToDevice);
cudaMemcpy(y_ptr, y.data(), size_y * sizeof(T), cudaMemcpyHostToDevice);
framework::AttributeMap attrs;
auto op = framework::OpRegistry::CreateOp("elementwise_add",
{{"X", {"X"}}, {"Y", {"Y"}}},
{{"Out", {"Out"}}},
attrs);
op->Run(scope, ctx.GetPlace());
cudaMemcpy(out->data(), z_ptr, size_z * sizeof(T), cudaMemcpyDeviceToHost);
ctx.Wait();
}
// get paddle matmul_grad op results as baseline
template <typename T>
void GetLinearOpGrad(const std::vector<T> &x_vec,
const std::vector<T> &y_vec,
const std::vector<T> &dout_vec,
const framework::DDim &x_dim,
const framework::DDim &y_dim,
const framework::DDim &out_dim,
const phi::GPUContext &ctx,
bool transpose_a,
bool transpose_b,
float alpha,
std::vector<T> *dinput_vec,
std::vector<T> *dweight_vec) {
framework::Scope scope;
auto var_x = scope.Var("X");
auto tensor_x = var_x->GetMutable<framework::LoDTensor>();
auto var_y = scope.Var("Y");
auto tensor_y = var_y->GetMutable<framework::LoDTensor>();
auto var_dout = scope.Var("DOut");
auto tensor_dout = var_dout->GetMutable<framework::LoDTensor>();
tensor_x->Resize(x_dim);
tensor_y->Resize(y_dim);
tensor_dout->Resize(out_dim);
auto var_dx = scope.Var("DX");
auto tensor_dx = var_dx->GetMutable<framework::LoDTensor>();
auto var_dy = scope.Var("DY");
auto tensor_dy = var_dy->GetMutable<framework::LoDTensor>();
tensor_dx->Resize(x_dim);
tensor_dy->Resize(y_dim);
auto x_ptr = tensor_x->mutable_data<T>(ctx.GetPlace());
auto y_ptr = tensor_y->mutable_data<T>(ctx.GetPlace());
auto dout_ptr = tensor_dout->mutable_data<T>(ctx.GetPlace());
auto dinput_ptr = tensor_dx->mutable_data<T>(ctx.GetPlace());
auto dweight_ptr = tensor_dy->mutable_data<T>(ctx.GetPlace());
auto size_x = static_cast<size_t>(phi::product(x_dim));
auto size_y = static_cast<size_t>(phi::product(y_dim));
auto size_z = x_dim[0] * x_dim[1] * y_dim[0];
cudaMemcpy(x_ptr, x_vec.data(), size_x * sizeof(T), cudaMemcpyHostToDevice);
cudaMemcpy(y_ptr, y_vec.data(), size_y * sizeof(T), cudaMemcpyHostToDevice);
cudaMemcpy(
dout_ptr, dout_vec.data(), size_z * sizeof(T), cudaMemcpyHostToDevice);
bool use_mkldnn = false;
std::vector<int> fused_reshape_X = {};
std::vector<int> fused_reshape_Y = {};
std::vector<int> fused_reshape_Out = {};
std::vector<int> fused_transpose_X = {};
std::vector<int> fused_transpose_Y = {};
std::vector<int> fused_transpose_Out = {};
bool use_quantizer = false, force_fp32_output = false;
std::string mkldnn_data_type = "float32";
float Scale_x = 1.0, Scale_y = 1.0, Scale_out = 1.0;
framework::AttributeMap attrs;
attrs.insert({"transpose_X", transpose_a});
attrs.insert({"transpose_Y", transpose_b});
attrs.insert({"alpha", alpha});
attrs.insert({"use_mkldnn", use_mkldnn});
attrs.insert({"fused_reshape_X", fused_reshape_X});
attrs.insert({"fused_reshape_Y", fused_reshape_Y});
attrs.insert({"fused_reshape_Out", fused_reshape_Out});
attrs.insert({"fused_transpose_X", fused_transpose_X});
attrs.insert({"fused_transpose_Y", fused_transpose_Y});
attrs.insert({"fused_transpose_Out", fused_transpose_Out});
attrs.insert({"use_quantizer", use_quantizer});
attrs.insert({"mkldnn_data_type", mkldnn_data_type});
attrs.insert({"Scale_x", Scale_x});
attrs.insert({"Scale_y", Scale_y});
attrs.insert({"Scale_out", Scale_out});
attrs.insert({"force_fp32_output", force_fp32_output});
auto op = framework::OpRegistry::CreateOp(
"matmul_grad",
{{"Out@GRAD", {"DOut"}}, {"X", {"X"}}, {"Y", {"Y"}}},
{{"X@GRAD", {"DX"}}, {"Y@GRAD", {"DY"}}},
attrs);
op->Run(scope, ctx.GetPlace());
cudaMemcpy(dinput_vec->data(),
dinput_ptr,
size_x * sizeof(T),
cudaMemcpyDeviceToHost);
cudaMemcpy(dweight_vec->data(),
dweight_ptr,
size_y * sizeof(T),
cudaMemcpyDeviceToHost);
ctx.Wait();
}
// get paddle elementwise_add_grad op results as baseline
template <typename T>
void GetElementwiseAddOpGrad(const std::vector<T> &dout_vec,
const int bsz_seq,
const int output_size,
const phi::GPUContext &ctx,
std::vector<T> *dy_vec) {
framework::Scope scope;
auto var_x = scope.Var("X");
auto tensor_x = var_x->GetMutable<framework::LoDTensor>();
auto var_y = scope.Var("Y");
auto tensor_y = var_y->GetMutable<framework::LoDTensor>();
auto var_dout = scope.Var("DOut");
auto tensor_dout = var_dout->GetMutable<framework::LoDTensor>();
tensor_x->Resize({bsz_seq, output_size});
tensor_y->Resize({output_size});
tensor_dout->Resize({bsz_seq, output_size});
auto var_dx = scope.Var("DX");
auto tensor_dx = var_dx->GetMutable<framework::LoDTensor>();
auto var_dy = scope.Var("DY");
auto tensor_dy = var_dy->GetMutable<framework::LoDTensor>();
tensor_dx->Resize({bsz_seq, output_size});
tensor_dy->Resize({output_size});
auto dout_ptr = tensor_dout->mutable_data<T>(ctx.GetPlace());
auto tensor_dy_ptr = tensor_dy->mutable_data<T>(ctx.GetPlace());
auto size_z = static_cast<size_t>(bsz_seq * output_size);
cudaMemcpy(
dout_ptr, dout_vec.data(), size_z * sizeof(T), cudaMemcpyHostToDevice);
int axis = -1;
bool use_mkldnn = false, use_quantizer = false;
std::string mkldnn_data_type = "float32";
std::string x_data_format = "", y_data_format = "";
float Scale_x = 1.0, Scale_y = 1.0, Scale_out = 1.0;
framework::AttributeMap attrs;
attrs.insert({"axis", axis});
attrs.insert({"use_mkldnn", use_mkldnn});
attrs.insert({"x_data_format", x_data_format});
attrs.insert({"y_data_format", y_data_format});
attrs.insert({"use_quantizer", use_quantizer});
attrs.insert({"mkldnn_data_type", mkldnn_data_type});
attrs.insert({"Scale_x", Scale_x});
attrs.insert({"Scale_y", Scale_y});
attrs.insert({"Scale_out", Scale_out});
auto op = framework::OpRegistry::CreateOp(
"elementwise_add_grad",
{{"Out@GRAD", {"DOut"}}, {"X", {"X"}}, {"Y", {"Y"}}},
{{"X@GRAD", {"DX"}}, {"Y@GRAD", {"DY"}}},
attrs);
op->Run(scope, ctx.GetPlace());
auto size_y = static_cast<size_t>(output_size);
cudaMemcpy(dy_vec->data(),
tensor_dy_ptr,
size_y * sizeof(T),
cudaMemcpyDeviceToHost);
ctx.Wait();
}
template <typename T>
class TestFeedForward {
public:
TestFeedForward() {
batch_size_ = 16;
seq_len_ = 128;
num_head_ = 16;
dim_head_ = 64;
dim_embed_ = 1024;
has_bias_ = false;
}
TestFeedForward(int batch_size,
int seq_len,
int num_head,
int dim_head,
int dim_embed,
bool has_bias) {
batch_size_ = batch_size;
seq_len_ = seq_len;
num_head_ = num_head;
dim_head_ = dim_head;
dim_embed_ = dim_embed;
has_bias_ = has_bias;
}
~TestFeedForward() { delete ctx_; }
void SetUp() {
bsz_seq_ = batch_size_ * seq_len_;
output_size_ = 3 * num_head_ * dim_head_;
input_size_ = dim_embed_;
ctx_ = new phi::GPUContext(place_);
ctx_->SetAllocator(paddle::memory::allocation::AllocatorFacade::Instance()
.GetAllocator(place_, ctx_->stream())
.get());
ctx_->SetHostAllocator(
paddle::memory::allocation::AllocatorFacade::Instance()
.GetAllocator(paddle::platform::CPUPlace())
.get());
ctx_->SetZeroAllocator(
paddle::memory::allocation::AllocatorFacade::Instance()
.GetZeroAllocator(place_)
.get());
ctx_->SetPinnedAllocator(
paddle::memory::allocation::AllocatorFacade::Instance()
.GetAllocator(paddle::platform::CUDAPinnedPlace())
.get());
ctx_->PartialInitWithAllocator();
size_src_ = bsz_seq_ * dim_embed_; // src: [bs, seq_len, em_dim]
size_weight_ = dim_embed_ * output_size_; // weight: [output_size, em_dim]
size_output_ =
bsz_seq_ * output_size_; // output: [bs, seq_len, output_size]
size_bias_ = output_size_;
base_out_vec_.resize(size_output_);
base_bias_out_vec_.resize(size_output_);
base_dinput_vec_.resize(size_src_);
base_dweight_vec_.resize(size_weight_);
base_dbias_vec_.resize(size_bias_);
src_vec_.resize(size_src_);
weight_vec_.resize(size_weight_);
bias_vec_.resize(size_bias_);
doutput_vec_.resize(size_output_);
std::default_random_engine random(time(NULL));
std::uniform_real_distribution<float> dis(0.0, 1.0);
for (int i = 0; i < size_src_; i++) {
src_vec_[i] = static_cast<T>(dis(random));
}
for (int i = 0; i < size_weight_; i++) {
weight_vec_[i] = static_cast<T>(dis(random));
}
for (int i = 0; i < size_bias_; i++) {
bias_vec_[i] = static_cast<T>(dis(random));
}
for (int i = 0; i < size_output_; i++) {
doutput_vec_[i] = static_cast<T>(dis(random));
}
framework::TensorFromVector<T>(src_vec_, *ctx_, &src_);
src_.Resize({batch_size_, seq_len_, dim_embed_});
framework::TensorFromVector<T>(weight_vec_, *ctx_, &weight_);
weight_.Resize({output_size_, dim_embed_});
out_.Resize({batch_size_, seq_len_, output_size_});
out_.mutable_data<T>(place_);
if (has_bias_) {
framework::TensorFromVector<T>(bias_vec_, *ctx_, &bias_);
bias_.Resize({output_size_});
bias_out_.Resize({batch_size_, seq_len_, output_size_});
bias_out_.mutable_data<T>(place_);
}
framework::TensorFromVector<T>(doutput_vec_, *ctx_, &doutput_);
doutput_.Resize({batch_size_, seq_len_, output_size_});
dinput_.Resize({batch_size_, seq_len_, dim_embed_});
dinput_.mutable_data<T>(place_);
dweight_.Resize({output_size_, dim_embed_});
dweight_.mutable_data<T>(place_);
if (has_bias_) {
dbias_.Resize({output_size_});
dbias_.mutable_data<T>(place_);
}
}
void BaselineForward() {
bool transpose_a = false, transpose_b = true;
float alpha = 1;
GetLinearOp(src_vec_,
weight_vec_,
src_.dims(),
weight_.dims(),
*ctx_,
transpose_a,
transpose_b,
alpha,
&base_out_vec_);
if (has_bias_) {
GetElementwiseAddOp(base_out_vec_,
bias_vec_,
bsz_seq_,
output_size_,
*ctx_,
&base_bias_out_vec_);
}
ctx_->Wait();
}
// get forward results of feedforward.
void FusedForward() {
T *p_weight = weight_.data<T>();
T *p_src = src_.data<T>();
T *p_output = out_.data<T>();
T *p_bias = nullptr;
T *p_bias_output = nullptr;
if (has_bias_) {
p_bias = bias_.data<T>();
p_bias_output = bias_out_.data<T>();
}
auto qkv_compute = paddle::operators::FeedForward<T>(
*ctx_, bsz_seq_, output_size_, input_size_, has_bias_);
qkv_compute.ComputeForward(
p_weight, p_src, p_bias, p_output, p_bias_output);
ctx_->Wait();
}
void BaselineBackward() {
bool transpose_a = false, transpose_b = true;
float alpha = 1;
GetLinearOpGrad(src_vec_,
weight_vec_,
doutput_vec_,
src_.dims(),
weight_.dims(),
out_.dims(),
*ctx_,
transpose_a,
transpose_b,
alpha,
&base_dinput_vec_,
&base_dweight_vec_);
if (has_bias_) {
GetElementwiseAddOpGrad(
doutput_vec_, bsz_seq_, output_size_, *ctx_, &base_dbias_vec_);
}
ctx_->Wait();
}
// get backward results of feedforward.
void FusedBackward() {
T *p_weight = weight_.data<T>();
T *p_src = src_.data<T>();
T *p_doutput = doutput_.data<T>();
T *p_dinput = dinput_.data<T>();
T *p_dweight = dweight_.data<T>();
T *bias_ptr = nullptr;
if (has_bias_) {
bias_ptr = dbias_.data<T>();
}
auto qkv_compute = paddle::operators::FeedForward<T>(
*ctx_, bsz_seq_, output_size_, input_size_, has_bias_);
qkv_compute.ComputeBackward(
p_src, p_weight, p_doutput, p_dinput, p_dweight, bias_ptr);
ctx_->Wait();
}
void Run() {
SetUp();
BaselineForward();
FusedForward();
BaselineBackward();
FusedBackward();
}
// check forward correctness between baseline and results of feedforward.
void CheckOut(const T diff, bool is_relative_atol = false) {
std::vector<T> out(size_output_);
std::vector<T> bias_out(size_output_);
paddle::framework::TensorToVector(out_, *ctx_, &out);
if (has_bias_) {
paddle::framework::TensorToVector(bias_out_, *ctx_, &bias_out);
}
ctx_->Wait();
for (int i = 0; i < size_output_; i++) {
if (is_relative_atol) {
EXPECT_LT(std::abs((out[i] - base_out_vec_[i]) / base_out_vec_[i]),
diff);
} else {
EXPECT_LT(std::abs(out[i] - base_out_vec_[i]), diff);
}
if (has_bias_) {
if (is_relative_atol) {
EXPECT_LT(std::abs((bias_out[i] - base_bias_out_vec_[i]) /
base_bias_out_vec_[i]),
diff);
} else {
EXPECT_LT(std::abs(bias_out[i] - base_bias_out_vec_[i]), diff);
}
}
}
}
// check backward correctness between baseline and results of feedforward.
void CheckGrad(const T diff, bool is_relative_atol = false) {
std::vector<T> h_dinput(size_src_);
paddle::framework::TensorToVector(dinput_, *ctx_, &h_dinput);
for (int i = 0; i < size_src_; i++) {
if (is_relative_atol) {
EXPECT_LT(
std::abs((h_dinput[i] - base_dinput_vec_[i]) / base_dinput_vec_[i]),
diff);
} else {
EXPECT_LT(std::abs(h_dinput[i] - base_dinput_vec_[i]), diff);
}
}
std::vector<T> h_dweight(size_weight_);
paddle::framework::TensorToVector(dweight_, *ctx_, &h_dweight);
for (int i = 0; i < size_weight_; i++) {
if (is_relative_atol) {
EXPECT_LT(std::abs((h_dweight[i] - base_dweight_vec_[i]) /
base_dweight_vec_[i]),
diff);
} else {
EXPECT_LT(std::abs(h_dweight[i] - base_dweight_vec_[i]), diff);
}
}
if (has_bias_) {
std::vector<T> h_dbias(size_bias_);
paddle::framework::TensorToVector(dbias_, *ctx_, &h_dbias);
for (int i = 0; i < size_bias_; i++) {
if (is_relative_atol) {
EXPECT_LT(
std::abs((h_dbias[i] - base_dbias_vec_[i]) / base_dbias_vec_[i]),
diff);
} else {
EXPECT_LT(std::abs(h_dbias[i] - base_dbias_vec_[i]), diff);
}
}
}
}
private:
int batch_size_, seq_len_, num_head_, dim_head_, dim_embed_;
int bsz_seq_, output_size_, input_size_;
bool has_bias_;
int size_src_, size_weight_, size_bias_, size_output_;
framework::Tensor src_, weight_, bias_, out_, bias_out_;
framework::Tensor dinput_, dweight_, dbias_, doutput_;
std::vector<T> src_vec_, weight_vec_, bias_vec_, out_vec_, bias_out_vec_;
std::vector<T> dinput_vec_, dweight_vec_, dbias_vec_, doutput_vec_;
// results of baseline.
std::vector<T> base_out_vec_, base_bias_out_vec_;
std::vector<T> base_dinput_vec_, base_dweight_vec_, base_dbias_vec_;
platform::CUDAPlace place_;
phi::GPUContext *ctx_;
};
// test for fp32, fp16, fp32+bias and fp16+bias
TEST(FeedForward, GPUFeedforwardBertLargeSizeFp32) {
int batch_size = 16;
int seq_len = 128;
int num_head = 16;
int dim_head = 64;
int dim_embed = 1024;
bool has_bias = false;
TestFeedForward<float> test(
batch_size, seq_len, num_head, dim_head, dim_embed, has_bias);
test.Run();
test.CheckOut(static_cast<float>(1e-5));
test.CheckGrad(static_cast<float>(1e-5));
}
TEST(FeedForward, GPUFeedforwardBertLargeSizeFp16) {
int batch_size = 16;
int seq_len = 128;
int num_head = 16;
int dim_head = 64;
int dim_embed = 1024;
bool has_bias = false;
TestFeedForward<paddle::platform::float16> test(
batch_size, seq_len, num_head, dim_head, dim_embed, has_bias);
test.Run();
test.CheckOut(static_cast<paddle::platform::float16>(1e-5));
test.CheckGrad(static_cast<paddle::platform::float16>(1e-5));
}
TEST(FeedForward, GPUFeedforwardBertLargeSizeFp32Bias) {
int batch_size = 16;
int seq_len = 128;
int num_head = 16;
int dim_head = 64;
int dim_embed = 1024;
bool has_bias = true;
TestFeedForward<float> test(
batch_size, seq_len, num_head, dim_head, dim_embed, has_bias);
test.Run();
test.CheckOut(static_cast<float>(1e-5));
test.CheckGrad(static_cast<float>(1e-3));
}
TEST(FeedForward, GPUFeedforwardBertLargeSizeFp16Bias) {
int batch_size = 16;
int seq_len = 128;
int num_head = 16;
int dim_head = 64;
int dim_embed = 1024;
bool has_bias = true;
TestFeedForward<paddle::platform::float16> test(
batch_size, seq_len, num_head, dim_head, dim_embed, has_bias);
test.Run();
test.CheckOut(static_cast<paddle::platform::float16>(1e-2));
test.CheckGrad(static_cast<paddle::platform::float16>(1e-2), true);
}
|
ba3954727e068a0132c6bbef3eeb3df56f48f588.hip | // !!! This is a file automatically generated by hipify!!!
/* Includes, system */
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
/* DEVICE CODE */
__global__ void suma_2_enteros(int *d1, int *d2, int *sum){
*sum = *d1 + *d2;
}
/* HOST CODE*/
int main(int argc, char** argv)
{
int DeviceCount = 0;
int h_d1,h_d2,h_sum;
int *d_d1,*d_d2,*d_sum;
h_d1 = 2; h_d2 = 3;
/* Initialize CUDA */
if (hipInit(0) != 0){
printf("ERROR de inicializacion\n");
exit(0);
}
hipGetDeviceCount(&DeviceCount);
if (DeviceCount == 0){
printf("ERROR ningun dispositivo soporta CUDA\n");
exit(0);
}
hipMalloc((void**)&d_d1, sizeof(d_d1));
hipMalloc((void**)&d_d2, sizeof(d_d2));
hipMalloc((void**)&d_sum, sizeof(d_sum));
hipMemcpy(d_d1,&h_d1,sizeof(h_d1),hipMemcpyHostToDevice);
hipMemcpy(d_d2,&h_d2,sizeof(h_d2),hipMemcpyHostToDevice);
hipLaunchKernelGGL(( suma_2_enteros), dim3(1),dim3(1), 0, 0, d_d1,d_d2,d_sum);
hipMemcpy(&h_sum,d_sum,sizeof(h_sum),hipMemcpyDeviceToHost);
printf("Resultado: %d \n",h_sum);
hipFree(d_d1);hipFree(d_d2);hipFree(d_sum);
}
| ba3954727e068a0132c6bbef3eeb3df56f48f588.cu |
/* Includes, system */
#include <stdio.h>
#include <cuda.h>
#include <cuda_runtime.h>
/* DEVICE CODE */
__global__ void suma_2_enteros(int *d1, int *d2, int *sum){
*sum = *d1 + *d2;
}
/* HOST CODE*/
int main(int argc, char** argv)
{
int DeviceCount = 0;
int h_d1,h_d2,h_sum;
int *d_d1,*d_d2,*d_sum;
h_d1 = 2; h_d2 = 3;
/* Initialize CUDA */
if (cuInit(0) != 0){
printf("ERROR de inicializacion\n");
exit(0);
}
cuDeviceGetCount(&DeviceCount);
if (DeviceCount == 0){
printf("ERROR ningun dispositivo soporta CUDA\n");
exit(0);
}
cudaMalloc((void**)&d_d1, sizeof(d_d1));
cudaMalloc((void**)&d_d2, sizeof(d_d2));
cudaMalloc((void**)&d_sum, sizeof(d_sum));
cudaMemcpy(d_d1,&h_d1,sizeof(h_d1),cudaMemcpyHostToDevice);
cudaMemcpy(d_d2,&h_d2,sizeof(h_d2),cudaMemcpyHostToDevice);
suma_2_enteros<<<1,1>>>(d_d1,d_d2,d_sum);
cudaMemcpy(&h_sum,d_sum,sizeof(h_sum),cudaMemcpyDeviceToHost);
printf("Resultado: %d \n",h_sum);
cudaFree(d_d1);cudaFree(d_d2);cudaFree(d_sum);
}
|
9dfd10ce119a20f4a4d7e7252d6c375780a81306.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*This program calculates pi using a Simpson's Rule estimation of the
integral of arctangent from 0 to 1. When inputting the number of
iterations to perform, more iterations = more precision. The number of
iterations is given as a command line argument. If no argument is
provided, a default value of 20,000 is used. At 20,000 iterations, the
value of pi is guaranteed to be accurate up to 8 decimal places. This
version uses NVIDIA CUDA to perform parallel computation of the
partial sums on a GPU.
The amount of work each core does is given by the two #defines below.
These values will need to be tuned for each device this code runs on in
order to get maximum performance. For example, on the Oakley cluster of
the Ohio Supercomputer Center, which uses the Tesla M2070, there are
14 streaming multiprocessors (SM's), with 32 cores each, for a total of
448 cores. Thus, 448 threads will be created, with each thread performing
multiple iterations (total_iterations / (NUM_BLOCKS * THREADS_PER_BLOCK)
to be precise). Thus, the more iterations given, the more work each thread
does. The number of threads is kept constant in order to make clean-up
easier and to not exceed the capabilities (max number of threads or blocks)
of any particular GPU device. The last thread might have a few extra
iterations if that number doesn't divide evenly.
The number of decimal digits to use as the precision of the calculations is
also given as a command-line argument. Obviously, the higher the number, the
more digits you can successfully calculate. Accuracy still relies on the number
of iterations, though: a high number of digits but low number of iterations
will still result in a low number of digits of precision. Thus, you should
only increase the number of digits when your iterations get too high and
you find that your calculations are no longer precise due to internal
rounding error. You'll probably find that increasing the digits will decrease
performance severely. It is recommended, though, that since error accumulates,
the more digits you want to find, the more padding you'll need to add to the
end of the word to absorb that error. As a general rule of thumb, if you
want to calculate x digits, make your words 2x long. Of course, this also
increases the runtime by 2x.
Compilation on OSC is a little complicated. First, log into oakley.osc.edu and
request an interactive computation node since it has the GPU:
qsub -I -l walltime=0:59:00 -l nodes=1:gpus=1
Once you have acquired a node, first load the CUDA module:
module load cuda
Then compile it with the NVIDIA compiler:
nvcc -O -arch sm_20 -o make_pi_5 make_pi_5.cu
Finally, to run it, just give it the iterations and precision arguments:
./make_pi_5 20000 25
The -arch option is needed because we use dynamic memory allocation on the GPU.
Only second-generation and higher CUDA devices support this, so this code won't
run on the oldest NVIDIA graphics cards.
*/
// Includes. Optimum values for OSC are:
// NUM_BLOCKS 14
// THREADS_PER_BLOCK 32
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <unistd.h>
#define NUM_BLOCKS 14
#define THREADS_PER_BLOCK 32
// A bignum is stored as all its decimal digits, separated into an array.
// Really, it's quite terrible for performance, but it allows infinite digits.
// Or at least as many as we can store in memory. The power tells us where to
// put the decimal point, and the number of significant digits tells us how
// many of the digits in the number are actually used. The precision tells us
// the maximum number of digits possible for this particular instance.
typedef struct {
signed long int power;
unsigned long int sig_digs;
char * digits;
unsigned long int precision;
} bignum;
// Function pointers, mostly for bignum operations. Note that in our use
// below, we assume most of the arithmetic functions don't fail and thus
// don't check their return values. Hope they're tested well... Notice
// now that we have mirrored versions for the GPU, most of which just
// have to call the GPU memory allocation functions.
__global__ void calculate (long *, long *, char *, long *, long *, char *, long, long);
__host__ bignum * bignum_init(long int);
__host__ void bignum_reset(bignum *);
__host__ void bignum_clear(bignum *);
__host__ int bignum_set_int(bignum *, long int);
__host__ void bignum_set(bignum *, bignum *);
__host__ void bignum_print(bignum *, long int);
__host__ int bignum_add(bignum *, bignum *, bignum *);
__host__ int bignum_add_int(bignum *, bignum *, long int);
__host__ int bignum_mult(bignum *, bignum *, bignum *);
__host__ int bignum_mult_int(bignum *, bignum *, long int);
__host__ int bignum_divide(bignum *, bignum *, bignum *);
__host__ int bignum_int_divide(bignum *, long int, bignum *);
__host__ int bignum_divide_int(bignum *, bignum *, long int);
__device__ bignum * bignum_init_gpu(long int);
__device__ void bignum_reset_gpu(bignum *);
__device__ void bignum_clear_gpu(bignum *);
__device__ int bignum_set_int_gpu(bignum *, long int);
__device__ void bignum_set_gpu(bignum *, bignum *);
__device__ int bignum_add_gpu(bignum *, bignum *, bignum *);
__device__ int bignum_add_int_gpu(bignum *, bignum *, long int);
__device__ int bignum_mult_gpu(bignum *, bignum *, bignum *);
__device__ int bignum_mult_int_gpu(bignum *, bignum *, long int);
__device__ int bignum_divide_gpu(bignum *, bignum *, bignum *);
__device__ int bignum_int_divide_gpu(bignum *, long int, bignum *);
__device__ int bignum_divide_int_gpu(bignum *, bignum *, long int);
// Main function
int main (int argc, char * argv[])
{
// Obtain command line arguments
long iterations = 20000L;
if (argc > 1) {
iterations = atol (argv[1]);
if (iterations < 1L) {
iterations = 20000L;
}
}
long max_digits = 25L;
if (argc > 2) {
max_digits = atoi (argv[2]);
if (max_digits < 1L) {
max_digits = 25L;
}
}
// Initialize global storage. Notice that we now need extra arrays for data
// transfer between the GPU and regular RAM. These will hold the partial
// sums that each of the threads calculate. Unfortunately, due to the way
// bignums are structured, each of their arguments has to be transferred
// separately. Luckily, this only happens once.
long clock_start = (long)clock();
long int i, j;
if (hipDeviceSetLimit(hipLimitMallocHeapSize, (NUM_BLOCKS * THREADS_PER_BLOCK * 16384))
!= hipSuccess) { printf("\nError setting GPU heap size.\n"); return 1; }
hipDeviceSynchronize();
long * hosttrappower = (long *)calloc((int)(NUM_BLOCKS * THREADS_PER_BLOCK), sizeof(long));
long * hosttrapsig_digs = (long *)calloc((int)(NUM_BLOCKS * THREADS_PER_BLOCK), sizeof(long));
char * hosttrapdigits = (char *)calloc((int)(NUM_BLOCKS * THREADS_PER_BLOCK * max_digits), sizeof(char));
long * hostmidpower = (long *)calloc((int)(NUM_BLOCKS * THREADS_PER_BLOCK), sizeof(long));
long * hostmidsig_digs = (long *)calloc((int)(NUM_BLOCKS * THREADS_PER_BLOCK), sizeof(long));
char * hostmiddigits = (char *)calloc((int)(NUM_BLOCKS * THREADS_PER_BLOCK * max_digits), sizeof(char));
if ((hosttrappower == 0) || (hosttrapsig_digs == 0) || (hosttrapdigits == 0) ||
(hostmidpower == 0) || (hostmidsig_digs == 0) || (hostmiddigits == 0)) {
printf("\nError allocating memory on the CPU.\n");
return 1;
}
long * devicetrappower;
long * devicetrapsig_digs;
char * devicetrapdigits;
long * devicemidpower;
long * devicemidsig_digs;
char * devicemiddigits;
if (hipMalloc((void**)&devicetrappower, (int)(NUM_BLOCKS * THREADS_PER_BLOCK * sizeof(long)))
!= hipSuccess) { printf("\nError allocating memory on GPU.\n"); return 1; }
if (hipMalloc((void**)&devicetrapsig_digs, (int)(NUM_BLOCKS * THREADS_PER_BLOCK * sizeof(long)))
!= hipSuccess) { printf("\nError allocating memory on GPU.\n"); return 1; }
if (hipMalloc((void**)&devicetrapdigits, (int)(NUM_BLOCKS * THREADS_PER_BLOCK * max_digits * sizeof(char)))
!= hipSuccess) { printf("\nError allocating memory on GPU.\n"); return 1; }
if (hipMalloc((void**)&devicemidpower, (int)(NUM_BLOCKS * THREADS_PER_BLOCK * sizeof(long)))
!= hipSuccess) { printf("\nError allocating memory on GPU.\n"); return 1; }
if (hipMalloc((void**)&devicemidsig_digs, (int)(NUM_BLOCKS * THREADS_PER_BLOCK * sizeof(long)))
!= hipSuccess) { printf("\nError allocating memory on GPU.\n"); return 1; }
if (hipMalloc((void**)&devicemiddigits, (int)(NUM_BLOCKS * THREADS_PER_BLOCK * max_digits * sizeof(char)))
!= hipSuccess) { printf("\nError allocating memory on GPU.\n"); return 1; }
hipDeviceSynchronize();
char * accepted_pi = "3.14159265358979323846264338327950288419716939937510"
"58209749445923078164062862089986280348253421170679\0";
char pi_printer[2];
pi_printer[0] = '0';
pi_printer[1] = '\0';
// Split off worker threads. When dividing the work, if the number of
// threads does not evenly divide into the desired number of iterations,
// give any extra iterations to the final thread. This gives the final
// thread at most (num_threads - 1) extra iterations. Notice that this
// is a 1D-grid of work, and we use function arguments this time. Also,
// remember the number of threads is held constant, thanks to #defines,
// at NUM_BLOCKS * THREADS_PER_BLOCK.
dim3 numBlocks(NUM_BLOCKS);
dim3 threadsPerBlock(THREADS_PER_BLOCK);
hipLaunchKernelGGL(( calculate), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, devicetrappower, devicetrapsig_digs,
devicetrapdigits, devicemidpower, devicemidsig_digs, devicemiddigits, iterations, max_digits);
hipDeviceSynchronize();
// Copy results back from GPU
if (hipMemcpy(hosttrappower, devicetrappower, (int)(NUM_BLOCKS * THREADS_PER_BLOCK * sizeof(long)),
hipMemcpyDeviceToHost) != hipSuccess) { printf("\nError copying memory from GPU.\n"); return 3; }
if (hipMemcpy(hosttrapsig_digs, devicetrapsig_digs, (int)(NUM_BLOCKS * THREADS_PER_BLOCK * sizeof(long)),
hipMemcpyDeviceToHost) != hipSuccess) { printf("\nError copying memory from GPU.\n"); return 3; }
if (hipMemcpy(hosttrapdigits, devicetrapdigits, (int)(NUM_BLOCKS * THREADS_PER_BLOCK * max_digits * sizeof(char)),
hipMemcpyDeviceToHost) != hipSuccess) { printf("\nError copying memory from GPU.\n"); return 3; }
if (hipMemcpy(hostmidpower, devicemidpower, (int)(NUM_BLOCKS * THREADS_PER_BLOCK * sizeof(long)),
hipMemcpyDeviceToHost) != hipSuccess) { printf("\nError copying memory from GPU.\n"); return 3; }
if (hipMemcpy(hostmidsig_digs, devicemidsig_digs, (int)(NUM_BLOCKS * THREADS_PER_BLOCK * sizeof(long)),
hipMemcpyDeviceToHost) != hipSuccess) { printf("\nError copying memory from GPU.\n"); return 3; }
if (hipMemcpy(hostmiddigits, devicemiddigits, (int)(NUM_BLOCKS * THREADS_PER_BLOCK * max_digits * sizeof(char)),
hipMemcpyDeviceToHost) != hipSuccess) { printf("\nError copying memory from GPU.\n"); return 3; }
hipDeviceSynchronize();
if (hipFree(devicetrappower) != hipSuccess) { printf("\nError freeing GPU memory.\n"); return 3; }
if (hipFree(devicetrapsig_digs) != hipSuccess) { printf("\nError freeing GPU memory.\n"); return 3; }
if (hipFree(devicetrapdigits) != hipSuccess) { printf("\nError freeing GPU memory.\n"); return 3; }
if (hipFree(devicemidpower) != hipSuccess) { printf("\nError freeing GPU memory.\n"); return 3; }
if (hipFree(devicemidsig_digs) != hipSuccess) { printf("\nError freeing GPU memory.\n"); return 3; }
if (hipFree(devicemiddigits) != hipSuccess) { printf("\nError freeing GPU memory.\n"); return 3; }
// After worker threads end, clean up each of the partial sums
bignum * trap = bignum_init(max_digits);
bignum * mid = bignum_init(max_digits);
bignum * temp = bignum_init(max_digits);
bignum * simp = bignum_init(max_digits);
if (trap == 0 || mid == 0 || temp == 0 || simp == 0) {
printf("Error allocating memory. Now exiting.\n");
return -1;
}
for (i = 0L; i < (NUM_BLOCKS * THREADS_PER_BLOCK); i++) {
simp->power = hosttrappower[i];
simp->sig_digs = hosttrapsig_digs[i];
for (j = 0L; j < max_digits; j++) {
simp->digits[(int)j] = hosttrapdigits[(int)((i * max_digits) + j)];
}
bignum_add(temp, trap, simp);
bignum_reset(trap);
bignum_reset(simp);
bignum_set(trap, temp);
bignum_reset(temp);
simp->power = hostmidpower[i];
simp->sig_digs = hostmidsig_digs[i];
for (j = 0L; j < max_digits; j++) {
simp->digits[(int)j] = hostmiddigits[(int)((i * max_digits) + j)];
}
bignum_add(temp, mid, simp);
bignum_reset(mid);
bignum_reset(simp);
bignum_set(mid, temp);
bignum_reset(temp);
}
// Finally, Simpson's Rule is applied
bignum_mult_int(temp, mid, 2L);
bignum_reset(mid);
bignum_set(mid, temp);
bignum_reset(temp);
bignum_add(temp, trap, mid);
bignum_reset(trap);
bignum_set(trap, temp);
bignum_reset(temp);
bignum_divide_int(temp, trap, 3L);
bignum_reset(trap);
bignum_set(trap, temp);
bignum_reset(temp);
bignum_mult_int(simp, trap, 4L);
long clock_end = (long)clock();
printf("The calculated value of pi is ");
bignum_print(simp, 0L);
printf("\nThe actual value of pi is 3.");
for (i = 0L; i < (max_digits - 1L); i++) {
// This may print an extra digit or two because, somewhere down in the
// code, we're losing our last sig dig during normal math, but it's
// bubbling back up, and causing the final result to lose a place or
// two. It's not a big deal, and I don't want to do anything about it,
// so we'll just have the ends of the numbers not line up. Whatever.
pi_printer[0] = accepted_pi[(int)(i + 2L)];
printf("%s", pi_printer);
}
printf("\nThe time taken to calculate this was %.2f seconds\n",
((float)(clock_end - clock_start)) / (float)CLOCKS_PER_SEC);
// Free global storage
free(hosttrappower);
free(hosttrapsig_digs);
free(hosttrapdigits);
free(hostmidpower);
free(hostmidsig_digs);
free(hostmiddigits);
bignum_clear(trap);
bignum_clear(mid);
bignum_clear(simp);
bignum_clear(temp);
return 0;
}
// Function executed by each thread to incrementally calculate the overall value
__global__ void calculate (long * devicetrappower, long * devicetrapsig_digs,
char * devicetrapdigits, long * devicemidpower, long * devicemidsig_digs,
char * devicemiddigits, long iterations, long max_digits)
{
// Initialize needed variables and check for errors
long threadid = threadIdx.x + (blockIdx.x * THREADS_PER_BLOCK);
long lowlimit = threadid * (iterations / (NUM_BLOCKS * THREADS_PER_BLOCK));
long highlimit = (((threadid + 1L) == (NUM_BLOCKS * THREADS_PER_BLOCK)) ? iterations :
((threadid + 1L) * (iterations / (NUM_BLOCKS * THREADS_PER_BLOCK))));
bignum * trap = bignum_init_gpu(max_digits);
bignum * mid = bignum_init_gpu(max_digits);
bignum * inverseiterations = bignum_init_gpu(max_digits);
bignum * temp_holder = bignum_init_gpu(max_digits);
bignum * temp_holder2 = bignum_init_gpu(max_digits);
bignum * inc = bignum_init_gpu(max_digits);
bignum * leftrect = bignum_init_gpu(max_digits);
bignum * rightrect = bignum_init_gpu(max_digits);
if (trap == 0 || mid == 0 || inverseiterations == 0 || temp_holder == 0 ||
temp_holder2 == 0 || inc == 0 || leftrect == 0 || rightrect == 0) {
return;
}
// Initialize values of needed variables
bignum_set_int_gpu(temp_holder, iterations);
bignum_int_divide_gpu(inverseiterations, 1L, temp_holder);
bignum_reset_gpu(temp_holder);
long i;
long k = lowlimit;
bignum_divide_int_gpu(temp_holder, inverseiterations, 2L);
bignum_set_int_gpu(inc, k);
bignum_mult_gpu(temp_holder2, inc, inverseiterations);
bignum_reset_gpu(inc);
bignum_set_gpu(inc, temp_holder2);
bignum_reset_gpu(temp_holder2);
bignum_add_gpu(temp_holder2, inc, temp_holder);
bignum_reset_gpu(inc);
bignum_set_gpu(inc, temp_holder2);
bignum_reset_gpu(temp_holder2);
bignum_reset_gpu(temp_holder);
// Main iteration loop. Note that the values of inverseiterations, inc,
// mid, and trap are preserved across loop iterations, as is counter k.
// inverseiterations is a constant that is stored for simplicity. Man,
// this is looking more and more like assembly...
for (i = lowlimit; i < highlimit; i++) {
// First, the trapezoid rule is used to estimate pi
bignum_reset_gpu(leftrect);
bignum_set_int_gpu(leftrect, k);
bignum_mult_gpu(temp_holder2, leftrect, inverseiterations);
bignum_reset_gpu(leftrect);
bignum_set_gpu(leftrect, temp_holder2);
bignum_reset_gpu(temp_holder2);
k++;
bignum_reset_gpu(rightrect);
bignum_set_int_gpu(rightrect, k);
bignum_mult_gpu(temp_holder2, rightrect, inverseiterations);
bignum_reset_gpu(rightrect);
bignum_set_gpu(rightrect, temp_holder2);
bignum_reset_gpu(temp_holder2);
bignum_add_gpu(temp_holder, leftrect, rightrect);
bignum_divide_int_gpu(temp_holder2, temp_holder, 2L);
bignum_reset_gpu(temp_holder);
bignum_set_gpu(temp_holder, temp_holder2);
bignum_reset_gpu(temp_holder2);
bignum_mult_gpu(temp_holder2, temp_holder, temp_holder);
bignum_reset_gpu(temp_holder);
bignum_set_gpu(temp_holder, temp_holder2);
bignum_reset_gpu(temp_holder2);
bignum_add_int_gpu(temp_holder2, temp_holder, 1L);
bignum_reset_gpu(temp_holder);
bignum_set_gpu(temp_holder, temp_holder2);
bignum_reset_gpu(temp_holder2);
bignum_int_divide_gpu(temp_holder2, 1L, temp_holder);
bignum_reset_gpu(temp_holder);
bignum_set_gpu(temp_holder, temp_holder2);
bignum_reset_gpu(temp_holder2);
bignum_mult_gpu(temp_holder2, temp_holder, inverseiterations);
bignum_reset_gpu(temp_holder);
bignum_set_gpu(temp_holder, temp_holder2);
bignum_reset_gpu(temp_holder2);
bignum_add_gpu(temp_holder2, trap, temp_holder);
bignum_reset_gpu(trap);
bignum_set_gpu(trap, temp_holder2);
bignum_reset_gpu(temp_holder2);
bignum_reset_gpu(temp_holder);
// Next, the midpoint rule is also used to estimate pi
bignum_set_gpu(temp_holder, inc);
bignum_add_gpu(temp_holder2, inc, inverseiterations);
bignum_reset_gpu(inc);
bignum_set_gpu(inc, temp_holder2);
bignum_reset_gpu(temp_holder2);
bignum_mult_gpu(temp_holder2, temp_holder, temp_holder);
bignum_reset_gpu(temp_holder);
bignum_set_gpu(temp_holder, temp_holder2);
bignum_reset_gpu(temp_holder2);
bignum_add_int_gpu(temp_holder2, temp_holder, 1L);
bignum_reset_gpu(temp_holder);
bignum_set_gpu(temp_holder, temp_holder2);
bignum_reset_gpu(temp_holder2);
bignum_int_divide_gpu(temp_holder2, 1L, temp_holder);
bignum_reset_gpu(temp_holder);
bignum_set_gpu(temp_holder, temp_holder2);
bignum_reset_gpu(temp_holder2);
bignum_mult_gpu(temp_holder2, temp_holder, inverseiterations);
bignum_reset_gpu(temp_holder);
bignum_set_gpu(temp_holder, temp_holder2);
bignum_reset_gpu(temp_holder2);
bignum_add_gpu(temp_holder2, mid, temp_holder);
bignum_reset_gpu(mid);
bignum_set_gpu(mid, temp_holder2);
bignum_reset_gpu(temp_holder2);
bignum_reset_gpu(temp_holder);
}
// Save partial result, clear memory, and exit
devicetrappower[threadid] = trap->power;
devicetrapsig_digs[threadid] = trap->sig_digs;
for (i = 0; i < max_digits; i++) {
devicetrapdigits[(threadid * max_digits) + i] = trap->digits[i];
}
devicemidpower[threadid] = mid->power;
devicemidsig_digs[threadid] = mid->sig_digs;
for (i = 0; i < max_digits; i++) {
devicemiddigits[(threadid * max_digits) + i] = mid->digits[i];
}
bignum_clear_gpu(trap);
bignum_clear_gpu(mid);
bignum_clear_gpu(inverseiterations);
bignum_clear_gpu(temp_holder);
bignum_clear_gpu(temp_holder2);
bignum_clear_gpu(inc);
bignum_clear_gpu(leftrect);
bignum_clear_gpu(rightrect);
}
// Create space for a bignum with the specified precision.
// Technically, it's also initialized if we interpret having zero
// significant digits as the number having a value of zero.
__host__ bignum * bignum_init(long int precision) {
bignum * temp_ptr = (bignum *)calloc(1, sizeof(bignum));
temp_ptr->digits = (char *)calloc((int)precision, sizeof(char));
if ((temp_ptr->digits) == 0) { temp_ptr = 0; }
temp_ptr->precision = precision;
return temp_ptr;
}
// Resets a bignum's value to zero. memcpy isn't used because
// why bring the string library into this just for this use?
__host__ void bignum_reset(bignum * numval) {
if ((numval->sig_digs) > 0L) {
long int i;
for (i = 0L; i < numval->precision; i++) { numval->digits[(int)i] = '\0'; }
numval->power = 0L;
numval->sig_digs = 0L;
}
return;
}
// Free memory used by a bignum when we're done with it
__host__ void bignum_clear(bignum * oldnum) {
free(oldnum->digits);
free(oldnum);
return;
}
// Set an instance of a bignum to an integer value. Note that if we can't
// initialize the temp word we need for copying, we return false (value = 0).
// We also assume that the number is non-negative since we only store
// unsigned numbers. We assume the result is initialized/reset. Finally,
// we handle zero specially by just resetting (again?) the result. Note that
// we explicitly assume the number to convert fits within the max number of
// digits. If we try to convert a number bigger than we can store, it won't work.
__host__ int bignum_set_int(bignum * numval, long int intval) {
if (intval > 0L) {
// Separate out the individual digits (stored backwards)
char * temp_word = (char *)calloc((int)(numval->precision), sizeof(char));
if (temp_word == 0) { return 0; }
long int temp_int = intval;
long int counter = 0L;
while (temp_int > 0L) {
temp_word[(int)counter] = (char)(temp_int % 10L);
temp_int = temp_int / 10L;
counter++;
}
// Detect any trailing zeros that we don't need to store
numval->power = counter - 1L;
long int leadingzeros = 0L;
int hasleading = 1;
while (hasleading == 1) {
if (temp_word[(int)leadingzeros] != 0) { hasleading = 0; }
else { leadingzeros++; }
}
// Store final result into actual bignum variable
for (temp_int = 0L; temp_int < (counter - leadingzeros); temp_int++) {
numval->digits[(int)temp_int] = temp_word[(int)(counter - temp_int - 1L)];
}
numval->sig_digs = counter - leadingzeros;
free(temp_word);
return 1;
}
else { bignum_reset(numval); return 1; }
}
// Set an instance of a bignum to the value of another bignum. We don't assume
// they're both the same precision; just use the precision of the new number.
// We do assume that the new number has already been initialized, though.
// strncpy is not used since it quits after seeing the first zero.
__host__ void bignum_set(bignum * newnum, bignum * oldnum) {
if ((oldnum->sig_digs) > 0L) {
newnum->power = oldnum->power;
newnum->sig_digs = ((oldnum->sig_digs > newnum->precision) ?
(newnum->precision) : (oldnum->sig_digs));
long int i;
for (i = 0L; i < newnum->sig_digs; i++) {
newnum->digits[(int)i] = oldnum->digits[(int)i];
}
}
else { bignum_reset(newnum); }
return;
}
// Use printf to print the number one digit at a time. There are a few cases:
// power > significant digits: pad end with zeros
// significant digits > power: fractional digit (non-integer)
// power is negative: total value less than 1
// The second argument is the maximum number of significant digits to print.
// If it's zero, then all available digits will be printed, maxing out at
// the precision of the number (the total amount is could possibly store).
// Note that this is different from total digits printed: zeroes after a
// decimal point but before the first significant digit don't count, and we
// make sure we print at least the integral part of the number (we only
// chop off fractional portions).
__host__ void bignum_print(bignum * numval, long int maxdigits) {
long int i;
long int limit = numval->sig_digs;
if (numval->sig_digs == 0L) { printf("0"); } else {
if ((maxdigits > 0L) && (maxdigits < numval->sig_digs)) {
limit = maxdigits;
}
if (numval->power < 0L) {
printf("0.");
for (i = 1L; i < (-1L * (numval->power)); i++) { printf("0"); }
for (i = 0L; i < limit; i++) {
printf("%d", (int)(numval->digits[(int)i]));
}
}
else if (numval->sig_digs > (numval->power + 1L)) {
for (i = 0L; i <= numval->power; i++) {
printf("%d", (int)(numval->digits[(int)i]));
}
if (limit > (numval->power + 1L)) { printf("."); }
for (i = (numval->power + 1L); i < limit; i++) {
printf("%d", (int)(numval->digits[(int)i]));
}
}
else { for (i = 0L; i < numval->sig_digs; i++) {
printf("%d", (int)(numval->digits[(int)i])); }
}
if ((numval->power > 0L) && ((numval->power + 1L) > numval->sig_digs)) {
for (i = 0L; i < ((numval->power + 1L) - numval->sig_digs); i++) {
printf("0");
}
} }
fflush(stdout);
return;
}
// Adds two bignums together and stores the result. Uses the functions to
// reset and set the location of the result internally, so current contents of
// result operand will be overwritten. Like bignum_set_int, returns 1 if
// addition was successful or 0 if an error occurred. A special shortcut is
// taken if either (or both) of the operands are zero. Note that it is possible
// for large additions to cause underflow to zero. In that case, special care is
// taken to make sure the proper input operand is used. Note that we assume the
// precision of all three operands is the same. If it's not, something terrible
// like a seg fault or incorrect answer will probably occur. Most importantly,
// the result operand CANNOT be the same as one of the input operands, since
// the result is clobbered immediately and used as a scratchpad. Note that this
// is also unsigned addition: not only does it not accept negative numbers, it
// also doesn't do subtraction (which, for that matter, isn't commutative).
__host__ int bignum_add(bignum * resultnum, bignum * leftnum, bignum * rightnum) {
bignum_reset(resultnum);
if ((leftnum->sig_digs == 0L) && (rightnum->sig_digs > 0L)) {
bignum_set(resultnum, rightnum);
return 1;
}
else if ((rightnum->sig_digs == 0L) && (leftnum->sig_digs > 0L)) {
bignum_set(resultnum, leftnum);
return 1;
}
else if ((leftnum->sig_digs == 0L) && (rightnum->sig_digs == 0L)) { return 1; }
else {
// First check for overshift: if the larger number's power is too much
// bigger than the smaller number's, the smaller will be completely lost,
// and we'll just end up with the large number as the result.
if ((((leftnum->power - rightnum->power) > 0) &&
((leftnum->power - rightnum->power) > resultnum->precision))) {
bignum_set(resultnum, leftnum);
return 1;
}
if ((((rightnum->power - leftnum->power) > 0) &&
((rightnum->power - leftnum->power) > resultnum->precision))) {
bignum_set(resultnum, rightnum);
return 1;
}
// Next, shift the smaller operand to match the larger one by copying
// it into the result operand as a partial sum. Also copy over the
// power and total significant digits into the result.
bignum * bigger;
bignum * smaller;
if ((leftnum->power - rightnum->power) >= 0L) {
bigger = leftnum;
smaller = rightnum;
}
else {
bigger = rightnum;
smaller = leftnum;
}
long int difference = bigger->power - smaller->power;
long int startdigit = smaller->sig_digs + difference;
long int transfertotal = smaller->sig_digs;
if (startdigit > resultnum->precision) {
startdigit = resultnum->precision - difference;
transfertotal = startdigit;
}
long int startdigitcopy = startdigit;
startdigit--;
long int i;
for (i = 0L; i < transfertotal; i++) {
if ((startdigit - difference) >= 0L) {
resultnum->digits[(int)startdigit] =
smaller->digits[(int)(startdigit - difference)];
}
startdigit--;
}
// Now the main addition loop: loop through each digit and add it.
// The carry from the previous digit will add to the current one.
// Note that we detect any trailing zeros to take from the sig_digs.
// Also, copy over the power and significant digits
resultnum->power = bigger->power;
resultnum->sig_digs = startdigitcopy;
if (bigger->sig_digs > resultnum->sig_digs) {
resultnum->sig_digs = bigger->sig_digs;
startdigitcopy = resultnum->sig_digs;
}
int trailingzeros = 1;
long int zerocount = 0L;
char carry = 0;
for (i = 0L; i < resultnum->sig_digs; i++) {
resultnum->digits[(int)(startdigitcopy - i - 1L)] +=
(bigger->digits[(int)(startdigitcopy - i - 1L)] + carry);
if (resultnum->digits[(int)(startdigitcopy - i - 1L)] >= 10) {
resultnum->digits[(int)(startdigitcopy - i - 1L)] -= 10;
carry = 1;
} else { carry = 0; }
if (trailingzeros == 1) {
if (resultnum->digits[(int)(startdigitcopy - i - 1L)] == '\0') {
zerocount++;
} else { trailingzeros = 0; }
}
}
// If we've got trailing zeros, subtract them from the final count of
// sig_digs. Also, if we have a carry, we need to shift everything...
resultnum->sig_digs -= zerocount;
if (carry > 0) {
transfertotal = resultnum->sig_digs;
if (transfertotal == resultnum->precision) { transfertotal--; }
startdigitcopy = transfertotal - 1L;
for (i = 0L; i < transfertotal; i++) {
if (startdigitcopy >= 0L) {
resultnum->digits[(int)(startdigitcopy + 1L)] =
resultnum->digits[(int)startdigitcopy];
}
else if ((startdigitcopy + 1L) >= 0L) {
resultnum->digits[(int)(startdigitcopy + 1L)] = 0;
}
startdigitcopy--;
}
resultnum->digits[0] = carry;
resultnum->power++;
resultnum->sig_digs++;
}
if (resultnum->sig_digs > resultnum->precision) {
resultnum->sig_digs = resultnum->precision;
}
return 1;
}
}
// A convenience wrapper that temporarily creates a new bignum out of the
// given integer, calls bignum_add with it and the other operand, and deletes
// the temporary bignum before exiting. Any problems that bignum_add encounters
// are passed back up through this function and returned to the caller.
__host__ int bignum_add_int(bignum * resultnum, bignum * leftnum, long int rightint) {
bignum_reset(resultnum);
if ((rightint == 0L) && (leftnum->sig_digs > 0L)) {
bignum_set(resultnum, leftnum);
return 1;
}
else if ((leftnum->sig_digs == 0L) && (rightint > 0L)) {
return bignum_set_int(resultnum, rightint);
}
else if ((leftnum->sig_digs == 0L) && (rightint == 0L)) { return 1; }
else {
bignum * tempnum = bignum_init(resultnum->precision);
if (tempnum == 0) { return 0; }
if (bignum_set_int(tempnum, rightint) == 0) {
bignum_clear(tempnum);
return 0;
}
int retval = bignum_add(resultnum, leftnum, tempnum);
bignum_clear(tempnum);
return retval;
}
}
// Multiplies two bignums together and stores the result. Like add, uses
// functions to reset and set the location of the result, and returns 1 upon
// success or 0 if an error occurred. A special shortcut is taken if either
// operand is zero, since the result will thus also be zero. Note that we assume
// the precision of all three operands is the same. If it's not, something
// terrible like a seg fault or incorrect answer will probably occur. Most
// importantly, the result operand CANNOT be the same as one of the input
// operands, since the result is clobbered immediately and used as a scratchpad.
// Also, note that this is unsigned: it assumes both operands are positive.
__host__ int bignum_mult(bignum * resultnum, bignum * leftnum, bignum * rightnum) {
bignum_reset(resultnum);
if ((leftnum->sig_digs == 0L) || (rightnum->sig_digs == 0L)) { return 1; }
else {
// Initialize the scratchpad and find the digit limits
char * temp_word = (char *)calloc((int)(2L * (resultnum->precision)), sizeof(char));
if (temp_word == 0) { return 0; }
bignum * bigger;
bignum * smaller;
if (((signed long int)leftnum->sig_digs - (signed long int)rightnum->sig_digs) >= 0L) {
bigger = leftnum;
smaller = rightnum;
}
else if ((rightnum->sig_digs - leftnum->sig_digs) > 0L) {
bigger = rightnum;
smaller = leftnum;
}
long int bigstart = (bigger->sig_digs) - 1L;
long int smallstart = (smaller->sig_digs) - 1L;
long int bigcounter, smallcounter;
char carry = 0;
// Perform the shift-addition loop. We choose to loop over each
// digit of the smaller number for fewer overall iterations. If
// the current bigloop has a zero, we can just skip that iteration.
// Also, record the final carry, power, and sig_digs values.
for (bigcounter = 0L; bigcounter < (smaller->sig_digs); bigcounter++) {
if (smaller->digits[(int)(smallstart - bigcounter)] != '\0') {
carry = 0;
for(smallcounter = 0L; smallcounter < (bigger->sig_digs); smallcounter++) {
temp_word[(int)((2L * (resultnum->precision)) - smallcounter -
bigcounter - 1L)] += (carry + (smaller->digits[(int)(smallstart -
bigcounter)] * bigger->digits[(int)(bigstart - smallcounter)]));
carry = temp_word[(int)((2L * (resultnum->precision)) -
smallcounter - bigcounter - 1L)] / 10;
temp_word[(int)((2L * (resultnum->precision)) - smallcounter -
bigcounter - 1L)] %= 10;
}
temp_word[(int)((2L * (resultnum->precision)) - bigcounter -
(bigger->sig_digs) - 1L)] = carry;
}
}
resultnum->power = ((bigger->power) + (smaller->power));
resultnum->sig_digs = ((bigger->sig_digs) + (smaller->sig_digs));
// Adjust for lack of a final carry or trailing zeros.
if (carry < 1) {
(resultnum->sig_digs)--;
(resultnum->power)--;
}
(resultnum->power)++;
int trailingzeros = 1;
long int zerocount = 0L;
long int i = (2L * (resultnum->precision) - 1L);
while (trailingzeros == 1) {
if (temp_word[(int)i] == '\0') {
zerocount++;
} else { trailingzeros = 0; }
i--;
}
resultnum->sig_digs -= zerocount;
if ((resultnum->sig_digs) > (resultnum->precision)) {
resultnum->sig_digs = (resultnum->precision);
}
// Finally, copy from the temp word into the result, taking into
// account any digits we may lose due to precision.
long int tempstart = (2L * (resultnum->precision)) - ((bigger->sig_digs) +
(smaller->sig_digs));
if (carry < 1) { tempstart++; }
for (i = 0L; i < (resultnum->sig_digs); i++) {
resultnum->digits[(int)i] = temp_word[(int)(tempstart + i)];
}
free(temp_word);
return 1;
}
}
// Like bignum_add_int, a convenience wrapper that creates a temporary bignum
// out of the integer and passes it to bignum_mult. Any problems encountered
// in client functions are passed back up to the original caller.
__host__ int bignum_mult_int(bignum * resultnum, bignum * leftnum, long int rightint) {
bignum_reset(resultnum);
if ((leftnum->sig_digs == 0L) || (rightint == 0L)) { return 1; }
else {
bignum * tempnum = bignum_init(resultnum->precision);
if (tempnum == 0) { return 0; }
if (bignum_set_int(tempnum, rightint) == 0) {
bignum_clear(tempnum);
return 0;
}
int retval = bignum_mult(resultnum, leftnum, tempnum);
bignum_clear(tempnum);
return retval;
}
}
// Divides two bignums. Taken in terms of a fraction, leftnum is the numerator
// and rightnum is the denominator. Performs an explicit check to make sure
// the denominator is not zero, and returns 0 (an error) if it is. Returns 1 upon
// success or 0 if an error occurs. A special shortcut is taken if the numerator is
// zero. Note that we assume the precision of all three operands is the same. If it's
// not, something terrible like a seg fault or incorrect answer will probably occur.
// Most importantly, the result operand CANNOT be the same as one of the input
// operands, since the result is clobbered immediately and used as a scratchpad.
// Also, note that this is unsigned: it assumes both operands are positive.
__host__ int bignum_divide(bignum * resultnum, bignum * numerator, bignum * denominator) {
bignum_reset(resultnum);
if (denominator->sig_digs == 0L) { return 0; }
else if (numerator->sig_digs == 0L) { return 1; }
else {
// Initialize the scratchpad and initially copy the numerator into it.
// Also initialize the result's power.
char * temp_word = (char *)calloc((int)(2L *
(resultnum->precision) + 2L), sizeof(char)); // May only need to be + 1L
if (temp_word == 0) { return 0; }
long int i;
for (i = 0L; i < numerator->sig_digs; i++) {
temp_word[(int)(i + 1L)] = numerator->digits[(int)i];
}
resultnum->power = (numerator->power - denominator->power);
long int sigdigctr = 0L;
long int numeratorindex = 0L;
// First see if we need to "shift" the numerator by comparing it.
i = ((denominator->sig_digs) - 1L);
int denom_bigger = 1;
while ((i >= 0L) && (denom_bigger == 1)) {
if ((denominator->digits[(int)((denominator->sig_digs) - i - 1L)]) >
(temp_word[(int)((denominator->sig_digs) - i)])) {
i = 0L;
}
else if ((denominator->digits[(int)((denominator->sig_digs) -
i - 1L)]) < (temp_word[(int)((denominator->sig_digs) - i)])) {
denom_bigger = 0;
}
else if (((denominator->digits[(int)((denominator->sig_digs) - i -
1L)]) == (temp_word[(int)((denominator->sig_digs) - i)])) && (i == 0L)) {
denom_bigger = 0;
}
i--;
}
if (denom_bigger == 1) {
numeratorindex++;
(resultnum->power)--;
}
// Now the main division loop. Note that there's two ways to terminate:
// either we've filled the entire precision of the result word and are
// forced to truncate our result, or our answer divides exactly. In the
// second case, once we've exhausted the numerator's significant digits
// and our temp word contains nothing but zeros, we can end early since
// all subsequent iterations would contribute only zeros as well. Note
// that special care will be taken to detect extra zeros at the end of
// the result so that the sig_digs is recorded correctly. Also, we don't
// round, we truncate, which doesn't minimize error.
int nonzero = 1;
while ((sigdigctr < (resultnum->precision)) && (nonzero == 1)) {
// First run the subtraction loop.
char current_digit = 0;
int numer_bigger = 1;
while (numer_bigger == 1) {
// To subtract, first run a comparison to see if the numerator
// is bigger. If it is, increment the counter and subtract.
i = ((denominator->sig_digs) - 1L);
denom_bigger = 1;
if (temp_word[(int)numeratorindex] > 0) { denom_bigger = 0; }
while ((i >= 0L) && (denom_bigger == 1)) {
if ((denominator->digits[(int)((denominator->sig_digs) -
i - 1L)]) > (temp_word[(int)((denominator->sig_digs) +
numeratorindex - i)])) {
i = 0L;
}
else if ((denominator->digits[(int)((denominator->sig_digs) -
i - 1L)]) < (temp_word[(int)((denominator->sig_digs) +
numeratorindex - i)])) {
denom_bigger = 0;
}
else if (((denominator->digits[(int)((denominator->sig_digs) -
i - 1L)]) == (temp_word[(int)((denominator->sig_digs) +
numeratorindex - i)])) && (i == 0L)) {
denom_bigger = 0;
}
i--;
}
if (denom_bigger == 1) {
numer_bigger = 0;
}
// Increment counter and perform subtraction loop.
if (numer_bigger == 1) {
current_digit++;
for (i = 0L; i < (denominator->sig_digs); i++) {
temp_word[(int)((denominator->sig_digs) +
numeratorindex - i)] -= (denominator->digits[
(int)((denominator->sig_digs) - i - 1L)]);
if ((temp_word[(int)((denominator->sig_digs) +
numeratorindex - i)]) < 0) {
temp_word[(int)((denominator->sig_digs) +
numeratorindex - i)] += 10L;
(temp_word[(int)((denominator->sig_digs) +
numeratorindex - i - 1L)]) -= 1L;
}
}
}
}
// If we're past all of the numerator's significant digits, run
// zero detection on it to see if we can end early.
if (sigdigctr > (numerator->sig_digs)) { // May only need to be >=
long int zerocounter = 0L;
i = 0L;
while ((i == zerocounter) && (i <= (denominator->sig_digs))) {
if ((temp_word[(int)(numeratorindex + i)]) < 1) { zerocounter++; }
i++;
}
if (zerocounter == ((denominator->sig_digs) + 1L)) { nonzero = 0; }
}
// Once we have obtained the proper digit in the result, save it.
if (sigdigctr < resultnum->precision) {
resultnum->digits[(int)sigdigctr] = current_digit;
}
sigdigctr++;
numeratorindex++;
}
// Record the result's sig digs, taking care to detect trailing zeros.
resultnum->sig_digs = sigdigctr;
int trailingzeros = 1;
long int zerocount = 0L;
i = sigdigctr - 1L;
while (trailingzeros == 1) {
if (resultnum->digits[(int)i] == '\0') {
zerocount++;
} else { trailingzeros = 0; }
i--;
}
(resultnum->sig_digs) -= zerocount;
free (temp_word);
return 1;
}
}
// A convenience wrapper that creates a temporary bignum out of the integer.
// Since division is not commutative, two wrappers are given. Any problems
// encountered in client functions are passed back up to the original caller.
__host__ int bignum_int_divide(bignum * resultnum, long int leftint, bignum * rightnum) {
bignum_reset(resultnum);
if (rightnum->sig_digs == 0L) { return 0; }
else if (leftint == 0L) { return 1; }
else {
bignum * tempnum = bignum_init(resultnum->precision);
if (tempnum == 0) { return 0; }
if (bignum_set_int(tempnum, leftint) == 0) {
bignum_clear(tempnum);
return 0;
}
int retval = bignum_divide(resultnum, tempnum, rightnum);
bignum_clear(tempnum);
return retval;
}
}
// A convenience wrapper that creates a temporary bignum out of the integer.
// Since division is not commutative, two wrappers are given. Any problems
// encountered in client functions are passed back up to the original caller.
__host__ int bignum_divide_int(bignum * resultnum, bignum * leftnum, long int rightint) {
bignum_reset(resultnum);
if (rightint == 0L) { return 0; }
else if (leftnum->sig_digs == 0L) { return 1; }
else {
bignum * tempnum = bignum_init(resultnum->precision);
if (tempnum == 0) { return 0; }
if (bignum_set_int(tempnum, rightint) == 0) {
bignum_clear(tempnum);
return 0;
}
int retval = bignum_divide(resultnum, leftnum, tempnum);
bignum_clear(tempnum);
return retval;
}
}
// Create space for a bignum with the specified precision.
// Technically, it's also initialized if we interpret having zero
// significant digits as the number having a value of zero.
__device__ bignum * bignum_init_gpu(long int precision) {
bignum * temp_ptr = (bignum *)malloc(sizeof(bignum));
if (temp_ptr == 0) { return temp_ptr; }
temp_ptr->digits = (char *)malloc((int)(precision * sizeof(char)));
if ((temp_ptr->digits) == 0) { temp_ptr = 0; return temp_ptr; }
int i;
for (i = 0; i < precision; i++) { temp_ptr->digits[i] = '\0'; }
temp_ptr->power = 0L;
temp_ptr->sig_digs = 0L;
temp_ptr->precision = precision;
return temp_ptr;
}
// Resets a bignum's value to zero. memcpy isn't used because
// why bring the string library into this just for this use?
__device__ void bignum_reset_gpu(bignum * numval) {
if ((numval->sig_digs) > 0L) {
long int i;
for (i = 0L; i < numval->precision; i++) { numval->digits[(int)i] = '\0'; }
numval->power = 0L;
numval->sig_digs = 0L;
}
return;
}
// Free memory used by a bignum when we're done with it
__device__ void bignum_clear_gpu(bignum * oldnum) {
free(oldnum->digits);
free(oldnum);
return;
}
// Set an instance of a bignum to an integer value. Note that if we can't
// initialize the temp word we need for copying, we return false (value = 0).
// We also assume that the number is non-negative since we only store
// unsigned numbers. We assume the result is initialized/reset. Finally,
// we handle zero specially by just resetting (again?) the result. Note that
// we explicitly assume the number to convert fits within the max number of
// digits. If we try to convert a number bigger than we can store, it won't work.
__device__ int bignum_set_int_gpu(bignum * numval, long int intval) {
if (intval > 0L) {
// Separate out the individual digits (stored backwards)
char * temp_word = (char *)malloc((int)(numval->precision * sizeof(char)));
if (temp_word == 0) { return 0; }
long int i;
for (i = 0; i < numval->precision; i++) { temp_word[(int)i] = '\0'; }
long int temp_int = intval;
long int counter = 0L;
while (temp_int > 0L) {
temp_word[(int)counter] = (char)(temp_int % 10L);
temp_int = temp_int / 10L;
counter++;
}
// Detect any trailing zeros that we don't need to store
numval->power = counter - 1L;
long int leadingzeros = 0L;
int hasleading = 1;
while (hasleading == 1) {
if (temp_word[(int)leadingzeros] != 0) { hasleading = 0; }
else { leadingzeros++; }
}
// Store final result into actual bignum variable
for (temp_int = 0L; temp_int < (counter - leadingzeros); temp_int++) {
numval->digits[(int)temp_int] = temp_word[(int)(counter - temp_int - 1L)];
}
numval->sig_digs = counter - leadingzeros;
free(temp_word);
return 1;
}
else { bignum_reset_gpu(numval); return 1; }
}
// Set an instance of a bignum to the value of another bignum. We don't assume
// they're both the same precision; just use the precision of the new number.
// We do assume that the new number has already been initialized, though.
// strncpy is not used since it quits after seeing the first zero.
__device__ void bignum_set_gpu(bignum * newnum, bignum * oldnum) {
if ((oldnum->sig_digs) > 0L) {
newnum->power = oldnum->power;
newnum->sig_digs = ((oldnum->sig_digs > newnum->precision) ?
(newnum->precision) : (oldnum->sig_digs));
long int i;
for (i = 0L; i < newnum->sig_digs; i++) {
newnum->digits[(int)i] = oldnum->digits[(int)i];
}
}
else { bignum_reset_gpu(newnum); }
return;
}
// Adds two bignums together and stores the result. Uses the functions to
// reset and set the location of the result internally, so current contents of
// result operand will be overwritten. Like bignum_set_int, returns 1 if
// addition was successful or 0 if an error occurred. A special shortcut is
// taken if either (or both) of the operands are zero. Note that it is possible
// for large additions to cause underflow to zero. In that case, special care is
// taken to make sure the proper input operand is used. Note that we assume the
// precision of all three operands is the same. If it's not, something terrible
// like a seg fault or incorrect answer will probably occur. Most importantly,
// the result operand CANNOT be the same as one of the input operands, since
// the result is clobbered immediately and used as a scratchpad. Note that this
// is also unsigned addition: not only does it not accept negative numbers, it
// also doesn't do subtraction (which, for that matter, isn't commutative).
__device__ int bignum_add_gpu(bignum * resultnum, bignum * leftnum, bignum * rightnum) {
bignum_reset_gpu(resultnum);
if ((leftnum->sig_digs == 0L) && (rightnum->sig_digs > 0L)) {
bignum_set_gpu(resultnum, rightnum);
return 1;
}
else if ((rightnum->sig_digs == 0L) && (leftnum->sig_digs > 0L)) {
bignum_set_gpu(resultnum, leftnum);
return 1;
}
else if ((leftnum->sig_digs == 0L) && (rightnum->sig_digs == 0L)) { return 1; }
else {
// First check for overshift: if the larger number's power is too much
// bigger than the smaller number's, the smaller will be completely lost,
// and we'll just end up with the large number as the result.
if ((((leftnum->power - rightnum->power) > 0) &&
((leftnum->power - rightnum->power) > resultnum->precision))) {
bignum_set_gpu(resultnum, leftnum);
return 1;
}
if ((((rightnum->power - leftnum->power) > 0) &&
((rightnum->power - leftnum->power) > resultnum->precision))) {
bignum_set_gpu(resultnum, rightnum);
return 1;
}
// Next, shift the smaller operand to match the larger one by copying
// it into the result operand as a partial sum. Also copy over the
// power and total significant digits into the result.
bignum * bigger;
bignum * smaller;
if ((leftnum->power - rightnum->power) >= 0L) {
bigger = leftnum;
smaller = rightnum;
}
else {
bigger = rightnum;
smaller = leftnum;
}
long int difference = bigger->power - smaller->power;
long int startdigit = smaller->sig_digs + difference;
long int transfertotal = smaller->sig_digs;
if (startdigit > resultnum->precision) {
startdigit = resultnum->precision - difference;
transfertotal = startdigit;
}
long int startdigitcopy = startdigit;
startdigit--;
long int i;
for (i = 0L; i < transfertotal; i++) {
if ((startdigit - difference) >= 0L) {
resultnum->digits[(int)startdigit] =
smaller->digits[(int)(startdigit - difference)];
}
startdigit--;
}
// Now the main addition loop: loop through each digit and add it.
// The carry from the previous digit will add to the current one.
// Note that we detect any trailing zeros to take from the sig_digs.
// Also, copy over the power and significant digits
resultnum->power = bigger->power;
resultnum->sig_digs = startdigitcopy;
if (bigger->sig_digs > resultnum->sig_digs) {
resultnum->sig_digs = bigger->sig_digs;
startdigitcopy = resultnum->sig_digs;
}
int trailingzeros = 1;
long int zerocount = 0L;
char carry = 0;
for (i = 0L; i < resultnum->sig_digs; i++) {
resultnum->digits[(int)(startdigitcopy - i - 1L)] +=
(bigger->digits[(int)(startdigitcopy - i - 1L)] + carry);
if (resultnum->digits[(int)(startdigitcopy - i - 1L)] >= 10) {
resultnum->digits[(int)(startdigitcopy - i - 1L)] -= 10;
carry = 1;
} else { carry = 0; }
if (trailingzeros == 1) {
if (resultnum->digits[(int)(startdigitcopy - i - 1L)] == '\0') {
zerocount++;
} else { trailingzeros = 0; }
}
}
// If we've got trailing zeros, subtract them from the final count of
// sig_digs. Also, if we have a carry, we need to shift everything...
resultnum->sig_digs -= zerocount;
if (carry > 0) {
transfertotal = resultnum->sig_digs;
if (transfertotal == resultnum->precision) { transfertotal--; }
startdigitcopy = transfertotal - 1L;
for (i = 0L; i < transfertotal; i++) {
if (startdigitcopy >= 0L) {
resultnum->digits[(int)(startdigitcopy + 1L)] =
resultnum->digits[(int)startdigitcopy];
}
else if ((startdigitcopy + 1L) >= 0L) {
resultnum->digits[(int)(startdigitcopy + 1L)] = '\0';
}
startdigitcopy--;
}
resultnum->digits[0] = carry;
resultnum->power++;
resultnum->sig_digs++;
}
if (resultnum->sig_digs > resultnum->precision) {
resultnum->sig_digs = resultnum->precision;
}
return 1;
}
}
// A convenience wrapper that temporarily creates a new bignum out of the
// given integer, calls bignum_add with it and the other operand, and deletes
// the temporary bignum before exiting. Any problems that bignum_add encounters
// are passed back up through this function and returned to the caller.
__device__ int bignum_add_int_gpu(bignum * resultnum, bignum * leftnum, long int rightint) {
bignum_reset_gpu(resultnum);
if ((rightint == 0L) && (leftnum->sig_digs > 0L)) {
bignum_set_gpu(resultnum, leftnum);
return 1;
}
else if ((leftnum->sig_digs == 0L) && (rightint > 0L)) {
return bignum_set_int_gpu(resultnum, rightint);
}
else if ((leftnum->sig_digs == 0L) && (rightint == 0L)) { return 1; }
else {
bignum * tempnum = bignum_init_gpu(resultnum->precision);
if (tempnum == 0) { return 0; }
if (bignum_set_int_gpu(tempnum, rightint) == 0) {
bignum_clear_gpu(tempnum);
return 0;
}
int retval = bignum_add_gpu(resultnum, leftnum, tempnum);
bignum_clear_gpu(tempnum);
return retval;
}
}
// Multiplies two bignums together and stores the result. Like add, uses
// functions to reset and set the location of the result, and returns 1 upon
// success or 0 if an error occurred. A special shortcut is taken if either
// operand is zero, since the result will thus also be zero. Note that we assume
// the precision of all three operands is the same. If it's not, something
// terrible like a seg fault or incorrect answer will probably occur. Most
// importantly, the result operand CANNOT be the same as one of the input
// operands, since the result is clobbered immediately and used as a scratchpad.
// Also, note that this is unsigned: it assumes both operands are positive.
__device__ int bignum_mult_gpu(bignum * resultnum, bignum * leftnum, bignum * rightnum) {
bignum_reset_gpu(resultnum);
if ((leftnum->sig_digs == 0L) || (rightnum->sig_digs == 0L)) { return 1; }
else {
// Initialize the scratchpad and find the digit limits
char * temp_word = (char *)malloc((int)(2L * (resultnum->precision) * sizeof(char)));
if (temp_word == 0) { return 0; }
long int i;
for (i = 0; i < (2L * resultnum->precision); i++) { temp_word[(int)i] = '\0'; }
bignum * bigger;
bignum * smaller;
if (((signed long int)leftnum->sig_digs - (signed long int)rightnum->sig_digs) >= 0L) {
bigger = leftnum;
smaller = rightnum;
}
else if ((rightnum->sig_digs - leftnum->sig_digs) > 0L) {
bigger = rightnum;
smaller = leftnum;
}
long int bigstart = (bigger->sig_digs) - 1L;
long int smallstart = (smaller->sig_digs) - 1L;
long int bigcounter, smallcounter;
char carry = 0;
// Perform the shift-addition loop. We choose to loop over each
// digit of the smaller number for fewer overall iterations. If
// the current bigloop has a zero, we can just skip that iteration.
// Also, record the final carry, power, and sig_digs values.
for (bigcounter = 0L; bigcounter < (smaller->sig_digs); bigcounter++) {
if (smaller->digits[(int)(smallstart - bigcounter)] != '\0') {
carry = 0;
for(smallcounter = 0L; smallcounter < (bigger->sig_digs); smallcounter++) {
temp_word[(int)((2L * (resultnum->precision)) - smallcounter -
bigcounter - 1L)] += (carry + (smaller->digits[(int)(smallstart -
bigcounter)] * bigger->digits[(int)(bigstart - smallcounter)]));
carry = temp_word[(int)((2L * (resultnum->precision)) -
smallcounter - bigcounter - 1L)] / 10;
temp_word[(int)((2L * (resultnum->precision)) - smallcounter -
bigcounter - 1L)] %= 10;
}
temp_word[(int)((2L * (resultnum->precision)) - bigcounter -
(bigger->sig_digs) - 1L)] = carry;
}
}
resultnum->power = ((bigger->power) + (smaller->power));
resultnum->sig_digs = ((bigger->sig_digs) + (smaller->sig_digs));
// Adjust for lack of a final carry or trailing zeros.
if (carry < 1) {
(resultnum->sig_digs)--;
(resultnum->power)--;
}
(resultnum->power)++;
int trailingzeros = 1;
long int zerocount = 0L;
i = (2L * (resultnum->precision) - 1L);
while (trailingzeros == 1) {
if (temp_word[(int)i] == '\0') {
zerocount++;
} else { trailingzeros = 0; }
i--;
}
resultnum->sig_digs -= zerocount;
if ((resultnum->sig_digs) > (resultnum->precision)) {
resultnum->sig_digs = (resultnum->precision);
}
// Finally, copy from the temp word into the result, taking into
// account any digits we may lose due to precision.
long int tempstart = (2L * (resultnum->precision)) - ((bigger->sig_digs) +
(smaller->sig_digs));
if (carry < 1) { tempstart++; }
for (i = 0L; i < (resultnum->sig_digs); i++) {
resultnum->digits[(int)i] = temp_word[(int)(tempstart + i)];
}
free(temp_word);
return 1;
}
}
// Like bignum_add_int, a convenience wrapper that creates a temporary bignum
// out of the integer and passes it to bignum_mult. Any problems encountered
// in client functions are passed back up to the original caller.
__device__ int bignum_mult_int_gpu(bignum * resultnum, bignum * leftnum, long int rightint) {
bignum_reset_gpu(resultnum);
if ((leftnum->sig_digs == 0L) || (rightint == 0L)) { return 1; }
else {
bignum * tempnum = bignum_init_gpu(resultnum->precision);
if (tempnum == 0) { return 0; }
if (bignum_set_int_gpu(tempnum, rightint) == 0) {
bignum_clear_gpu(tempnum);
return 0;
}
int retval = bignum_mult_gpu(resultnum, leftnum, tempnum);
bignum_clear_gpu(tempnum);
return retval;
}
}
// Divides two bignums. Taken in terms of a fraction, leftnum is the numerator
// and rightnum is the denominator. Performs an explicit check to make sure
// the denominator is not zero, and returns 0 (an error) if it is. Returns 1 upon
// success or 0 if an error occurs. A special shortcut is taken if the numerator is
// zero. Note that we assume the precision of all three operands is the same. If it's
// not, something terrible like a seg fault or incorrect answer will probably occur.
// Most importantly, the result operand CANNOT be the same as one of the input
// operands, since the result is clobbered immediately and used as a scratchpad.
// Also, note that this is unsigned: it assumes both operands are positive.
__device__ int bignum_divide_gpu(bignum * resultnum, bignum * numerator, bignum * denominator) {
bignum_reset_gpu(resultnum);
if (denominator->sig_digs == 0L) { return 0; }
else if (numerator->sig_digs == 0L) { return 1; }
else {
// Initialize the scratchpad and initially copy the numerator into it.
// Also initialize the result's power.
char * temp_word = (char *)malloc((int)(2L *
(resultnum->precision) + 2L * sizeof(char))); // May only need to be + 1L
if (temp_word == 0) { return 0; }
long int i;
temp_word[0] = '\0';
for (i = 0L; i < numerator->sig_digs; i++) {
temp_word[(int)(i + 1L)] = numerator->digits[(int)i];
}
for (i = (1L + numerator->sig_digs); i <
(2L * resultnum->precision + 2L); i++) { temp_word[(int)i] = '\0'; }
resultnum->power = (numerator->power - denominator->power);
long int sigdigctr = 0L;
long int numeratorindex = 0L;
// First see if we need to "shift" the numerator by comparing it.
i = ((denominator->sig_digs) - 1L);
int denom_bigger = 1;
while ((i >= 0L) && (denom_bigger == 1)) {
if ((denominator->digits[(int)((denominator->sig_digs) - i - 1L)]) >
(temp_word[(int)((denominator->sig_digs) - i)])) {
i = 0L;
}
else if ((denominator->digits[(int)((denominator->sig_digs) -
i - 1L)]) < (temp_word[(int)((denominator->sig_digs) - i)])) {
denom_bigger = 0;
}
else if (((denominator->digits[(int)((denominator->sig_digs) - i -
1L)]) == (temp_word[(int)((denominator->sig_digs) - i)])) && (i == 0L)) {
denom_bigger = 0;
}
i--;
}
if (denom_bigger == 1) {
numeratorindex++;
(resultnum->power)--;
}
// Now the main division loop. Note that there's two ways to terminate:
// either we've filled the entire precision of the result word and are
// forced to truncate our result, or our answer divides exactly. In the
// second case, once we've exhausted the numerator's significant digits
// and our temp word contains nothing but zeros, we can end early since
// all subsequent iterations would contribute only zeros as well. Note
// that special care will be taken to detect extra zeros at the end of
// the result so that the sig_digs is recorded correctly. Also, we don't
// round, we truncate, which doesn't minimize error.
int nonzero = 1;
while ((sigdigctr < (resultnum->precision)) && (nonzero == 1)) {
// First run the subtraction loop.
char current_digit = 0;
int numer_bigger = 1;
while (numer_bigger == 1) {
// To subtract, first run a comparison to see if the numerator
// is bigger. If it is, increment the counter and subtract.
i = ((denominator->sig_digs) - 1L);
denom_bigger = 1;
if (temp_word[(int)numeratorindex] > 0) { denom_bigger = 0; }
while ((i >= 0L) && (denom_bigger == 1)) {
if ((denominator->digits[(int)((denominator->sig_digs) -
i - 1L)]) > (temp_word[(int)((denominator->sig_digs) +
numeratorindex - i)])) {
i = 0L;
}
else if ((denominator->digits[(int)((denominator->sig_digs) -
i - 1L)]) < (temp_word[(int)((denominator->sig_digs) +
numeratorindex - i)])) {
denom_bigger = 0;
}
else if (((denominator->digits[(int)((denominator->sig_digs) -
i - 1L)]) == (temp_word[(int)((denominator->sig_digs) +
numeratorindex - i)])) && (i == 0L)) {
denom_bigger = 0;
}
i--;
}
if (denom_bigger == 1) {
numer_bigger = 0;
}
// Increment counter and perform subtraction loop.
if (numer_bigger == 1) {
current_digit++;
for (i = 0L; i < (denominator->sig_digs); i++) {
temp_word[(int)((denominator->sig_digs) +
numeratorindex - i)] -= (denominator->digits[
(int)((denominator->sig_digs) - i - 1L)]);
if ((temp_word[(int)((denominator->sig_digs) +
numeratorindex - i)]) < 0) {
temp_word[(int)((denominator->sig_digs) +
numeratorindex - i)] += 10L;
(temp_word[(int)((denominator->sig_digs) +
numeratorindex - i - 1L)]) -= 1L;
}
}
}
}
// If we're past all of the numerator's significant digits, run
// zero detection on it to see if we can end early.
if (sigdigctr > (numerator->sig_digs)) { // May only need to be >=
long int zerocounter = 0L;
i = 0L;
while ((i == zerocounter) && (i <= (denominator->sig_digs))) {
if ((temp_word[(int)(numeratorindex + i)]) < 1) { zerocounter++; }
i++;
}
if (zerocounter == ((denominator->sig_digs) + 1L)) { nonzero = 0; }
}
// Once we have obtained the proper digit in the result, save it.
if (sigdigctr < resultnum->precision) {
resultnum->digits[(int)sigdigctr] = current_digit;
}
sigdigctr++;
numeratorindex++;
}
// Record the result's sig digs, taking care to detect trailing zeros.
resultnum->sig_digs = sigdigctr;
int trailingzeros = 1;
long int zerocount = 0L;
i = sigdigctr - 1L;
while (trailingzeros == 1) {
if (resultnum->digits[(int)i] == '\0') {
zerocount++;
} else { trailingzeros = 0; }
i--;
}
(resultnum->sig_digs) -= zerocount;
free(temp_word);
return 1;
}
}
// A convenience wrapper that creates a temporary bignum out of the integer.
// Since division is not commutative, two wrappers are given. Any problems
// encountered in client functions are passed back up to the original caller.
__device__ int bignum_int_divide_gpu(bignum * resultnum, long int leftint, bignum * rightnum) {
bignum_reset_gpu(resultnum);
if (rightnum->sig_digs == 0L) { return 0; }
else if (leftint == 0L) { return 1; }
else {
bignum * tempnum = bignum_init_gpu(resultnum->precision);
if (tempnum == 0) { return 0; }
if (bignum_set_int_gpu(tempnum, leftint) == 0) {
bignum_clear_gpu(tempnum);
return 0;
}
int retval = bignum_divide_gpu(resultnum, tempnum, rightnum);
bignum_clear_gpu(tempnum);
return retval;
}
}
// A convenience wrapper that creates a temporary bignum out of the integer.
// Since division is not commutative, two wrappers are given. Any problems
// encountered in client functions are passed back up to the original caller.
__device__ int bignum_divide_int_gpu(bignum * resultnum, bignum * leftnum, long int rightint) {
bignum_reset_gpu(resultnum);
if (rightint == 0L) { return 0; }
else if (leftnum->sig_digs == 0L) { return 1; }
else {
bignum * tempnum = bignum_init_gpu(resultnum->precision);
if (tempnum == 0) { return 0; }
if (bignum_set_int_gpu(tempnum, rightint) == 0) {
bignum_clear_gpu(tempnum);
return 0;
}
int retval = bignum_divide_gpu(resultnum, leftnum, tempnum);
bignum_clear_gpu(tempnum);
return retval;
}
}
| 9dfd10ce119a20f4a4d7e7252d6c375780a81306.cu | /*This program calculates pi using a Simpson's Rule estimation of the
integral of arctangent from 0 to 1. When inputting the number of
iterations to perform, more iterations = more precision. The number of
iterations is given as a command line argument. If no argument is
provided, a default value of 20,000 is used. At 20,000 iterations, the
value of pi is guaranteed to be accurate up to 8 decimal places. This
version uses NVIDIA CUDA to perform parallel computation of the
partial sums on a GPU.
The amount of work each core does is given by the two #defines below.
These values will need to be tuned for each device this code runs on in
order to get maximum performance. For example, on the Oakley cluster of
the Ohio Supercomputer Center, which uses the Tesla M2070, there are
14 streaming multiprocessors (SM's), with 32 cores each, for a total of
448 cores. Thus, 448 threads will be created, with each thread performing
multiple iterations (total_iterations / (NUM_BLOCKS * THREADS_PER_BLOCK)
to be precise). Thus, the more iterations given, the more work each thread
does. The number of threads is kept constant in order to make clean-up
easier and to not exceed the capabilities (max number of threads or blocks)
of any particular GPU device. The last thread might have a few extra
iterations if that number doesn't divide evenly.
The number of decimal digits to use as the precision of the calculations is
also given as a command-line argument. Obviously, the higher the number, the
more digits you can successfully calculate. Accuracy still relies on the number
of iterations, though: a high number of digits but low number of iterations
will still result in a low number of digits of precision. Thus, you should
only increase the number of digits when your iterations get too high and
you find that your calculations are no longer precise due to internal
rounding error. You'll probably find that increasing the digits will decrease
performance severely. It is recommended, though, that since error accumulates,
the more digits you want to find, the more padding you'll need to add to the
end of the word to absorb that error. As a general rule of thumb, if you
want to calculate x digits, make your words 2x long. Of course, this also
increases the runtime by 2x.
Compilation on OSC is a little complicated. First, log into oakley.osc.edu and
request an interactive computation node since it has the GPU:
qsub -I -l walltime=0:59:00 -l nodes=1:gpus=1
Once you have acquired a node, first load the CUDA module:
module load cuda
Then compile it with the NVIDIA compiler:
nvcc -O -arch sm_20 -o make_pi_5 make_pi_5.cu
Finally, to run it, just give it the iterations and precision arguments:
./make_pi_5 20000 25
The -arch option is needed because we use dynamic memory allocation on the GPU.
Only second-generation and higher CUDA devices support this, so this code won't
run on the oldest NVIDIA graphics cards.
*/
// Includes. Optimum values for OSC are:
// NUM_BLOCKS 14
// THREADS_PER_BLOCK 32
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <unistd.h>
#define NUM_BLOCKS 14
#define THREADS_PER_BLOCK 32
// A bignum is stored as all its decimal digits, separated into an array.
// Really, it's quite terrible for performance, but it allows infinite digits.
// Or at least as many as we can store in memory. The power tells us where to
// put the decimal point, and the number of significant digits tells us how
// many of the digits in the number are actually used. The precision tells us
// the maximum number of digits possible for this particular instance.
typedef struct {
signed long int power;
unsigned long int sig_digs;
char * digits;
unsigned long int precision;
} bignum;
// Function pointers, mostly for bignum operations. Note that in our use
// below, we assume most of the arithmetic functions don't fail and thus
// don't check their return values. Hope they're tested well... Notice
// now that we have mirrored versions for the GPU, most of which just
// have to call the GPU memory allocation functions.
__global__ void calculate (long *, long *, char *, long *, long *, char *, long, long);
__host__ bignum * bignum_init(long int);
__host__ void bignum_reset(bignum *);
__host__ void bignum_clear(bignum *);
__host__ int bignum_set_int(bignum *, long int);
__host__ void bignum_set(bignum *, bignum *);
__host__ void bignum_print(bignum *, long int);
__host__ int bignum_add(bignum *, bignum *, bignum *);
__host__ int bignum_add_int(bignum *, bignum *, long int);
__host__ int bignum_mult(bignum *, bignum *, bignum *);
__host__ int bignum_mult_int(bignum *, bignum *, long int);
__host__ int bignum_divide(bignum *, bignum *, bignum *);
__host__ int bignum_int_divide(bignum *, long int, bignum *);
__host__ int bignum_divide_int(bignum *, bignum *, long int);
__device__ bignum * bignum_init_gpu(long int);
__device__ void bignum_reset_gpu(bignum *);
__device__ void bignum_clear_gpu(bignum *);
__device__ int bignum_set_int_gpu(bignum *, long int);
__device__ void bignum_set_gpu(bignum *, bignum *);
__device__ int bignum_add_gpu(bignum *, bignum *, bignum *);
__device__ int bignum_add_int_gpu(bignum *, bignum *, long int);
__device__ int bignum_mult_gpu(bignum *, bignum *, bignum *);
__device__ int bignum_mult_int_gpu(bignum *, bignum *, long int);
__device__ int bignum_divide_gpu(bignum *, bignum *, bignum *);
__device__ int bignum_int_divide_gpu(bignum *, long int, bignum *);
__device__ int bignum_divide_int_gpu(bignum *, bignum *, long int);
// Main function
int main (int argc, char * argv[])
{
// Obtain command line arguments
long iterations = 20000L;
if (argc > 1) {
iterations = atol (argv[1]);
if (iterations < 1L) {
iterations = 20000L;
}
}
long max_digits = 25L;
if (argc > 2) {
max_digits = atoi (argv[2]);
if (max_digits < 1L) {
max_digits = 25L;
}
}
// Initialize global storage. Notice that we now need extra arrays for data
// transfer between the GPU and regular RAM. These will hold the partial
// sums that each of the threads calculate. Unfortunately, due to the way
// bignums are structured, each of their arguments has to be transferred
// separately. Luckily, this only happens once.
long clock_start = (long)clock();
long int i, j;
if (cudaDeviceSetLimit(cudaLimitMallocHeapSize, (NUM_BLOCKS * THREADS_PER_BLOCK * 16384))
!= cudaSuccess) { printf("\nError setting GPU heap size.\n"); return 1; }
cudaDeviceSynchronize();
long * hosttrappower = (long *)calloc((int)(NUM_BLOCKS * THREADS_PER_BLOCK), sizeof(long));
long * hosttrapsig_digs = (long *)calloc((int)(NUM_BLOCKS * THREADS_PER_BLOCK), sizeof(long));
char * hosttrapdigits = (char *)calloc((int)(NUM_BLOCKS * THREADS_PER_BLOCK * max_digits), sizeof(char));
long * hostmidpower = (long *)calloc((int)(NUM_BLOCKS * THREADS_PER_BLOCK), sizeof(long));
long * hostmidsig_digs = (long *)calloc((int)(NUM_BLOCKS * THREADS_PER_BLOCK), sizeof(long));
char * hostmiddigits = (char *)calloc((int)(NUM_BLOCKS * THREADS_PER_BLOCK * max_digits), sizeof(char));
if ((hosttrappower == 0) || (hosttrapsig_digs == 0) || (hosttrapdigits == 0) ||
(hostmidpower == 0) || (hostmidsig_digs == 0) || (hostmiddigits == 0)) {
printf("\nError allocating memory on the CPU.\n");
return 1;
}
long * devicetrappower;
long * devicetrapsig_digs;
char * devicetrapdigits;
long * devicemidpower;
long * devicemidsig_digs;
char * devicemiddigits;
if (cudaMalloc((void**)&devicetrappower, (int)(NUM_BLOCKS * THREADS_PER_BLOCK * sizeof(long)))
!= cudaSuccess) { printf("\nError allocating memory on GPU.\n"); return 1; }
if (cudaMalloc((void**)&devicetrapsig_digs, (int)(NUM_BLOCKS * THREADS_PER_BLOCK * sizeof(long)))
!= cudaSuccess) { printf("\nError allocating memory on GPU.\n"); return 1; }
if (cudaMalloc((void**)&devicetrapdigits, (int)(NUM_BLOCKS * THREADS_PER_BLOCK * max_digits * sizeof(char)))
!= cudaSuccess) { printf("\nError allocating memory on GPU.\n"); return 1; }
if (cudaMalloc((void**)&devicemidpower, (int)(NUM_BLOCKS * THREADS_PER_BLOCK * sizeof(long)))
!= cudaSuccess) { printf("\nError allocating memory on GPU.\n"); return 1; }
if (cudaMalloc((void**)&devicemidsig_digs, (int)(NUM_BLOCKS * THREADS_PER_BLOCK * sizeof(long)))
!= cudaSuccess) { printf("\nError allocating memory on GPU.\n"); return 1; }
if (cudaMalloc((void**)&devicemiddigits, (int)(NUM_BLOCKS * THREADS_PER_BLOCK * max_digits * sizeof(char)))
!= cudaSuccess) { printf("\nError allocating memory on GPU.\n"); return 1; }
cudaDeviceSynchronize();
char * accepted_pi = "3.14159265358979323846264338327950288419716939937510"
"58209749445923078164062862089986280348253421170679\0";
char pi_printer[2];
pi_printer[0] = '0';
pi_printer[1] = '\0';
// Split off worker threads. When dividing the work, if the number of
// threads does not evenly divide into the desired number of iterations,
// give any extra iterations to the final thread. This gives the final
// thread at most (num_threads - 1) extra iterations. Notice that this
// is a 1D-grid of work, and we use function arguments this time. Also,
// remember the number of threads is held constant, thanks to #defines,
// at NUM_BLOCKS * THREADS_PER_BLOCK.
dim3 numBlocks(NUM_BLOCKS);
dim3 threadsPerBlock(THREADS_PER_BLOCK);
calculate<<<numBlocks, threadsPerBlock>>>(devicetrappower, devicetrapsig_digs,
devicetrapdigits, devicemidpower, devicemidsig_digs, devicemiddigits, iterations, max_digits);
cudaDeviceSynchronize();
// Copy results back from GPU
if (cudaMemcpy(hosttrappower, devicetrappower, (int)(NUM_BLOCKS * THREADS_PER_BLOCK * sizeof(long)),
cudaMemcpyDeviceToHost) != cudaSuccess) { printf("\nError copying memory from GPU.\n"); return 3; }
if (cudaMemcpy(hosttrapsig_digs, devicetrapsig_digs, (int)(NUM_BLOCKS * THREADS_PER_BLOCK * sizeof(long)),
cudaMemcpyDeviceToHost) != cudaSuccess) { printf("\nError copying memory from GPU.\n"); return 3; }
if (cudaMemcpy(hosttrapdigits, devicetrapdigits, (int)(NUM_BLOCKS * THREADS_PER_BLOCK * max_digits * sizeof(char)),
cudaMemcpyDeviceToHost) != cudaSuccess) { printf("\nError copying memory from GPU.\n"); return 3; }
if (cudaMemcpy(hostmidpower, devicemidpower, (int)(NUM_BLOCKS * THREADS_PER_BLOCK * sizeof(long)),
cudaMemcpyDeviceToHost) != cudaSuccess) { printf("\nError copying memory from GPU.\n"); return 3; }
if (cudaMemcpy(hostmidsig_digs, devicemidsig_digs, (int)(NUM_BLOCKS * THREADS_PER_BLOCK * sizeof(long)),
cudaMemcpyDeviceToHost) != cudaSuccess) { printf("\nError copying memory from GPU.\n"); return 3; }
if (cudaMemcpy(hostmiddigits, devicemiddigits, (int)(NUM_BLOCKS * THREADS_PER_BLOCK * max_digits * sizeof(char)),
cudaMemcpyDeviceToHost) != cudaSuccess) { printf("\nError copying memory from GPU.\n"); return 3; }
cudaDeviceSynchronize();
if (cudaFree(devicetrappower) != cudaSuccess) { printf("\nError freeing GPU memory.\n"); return 3; }
if (cudaFree(devicetrapsig_digs) != cudaSuccess) { printf("\nError freeing GPU memory.\n"); return 3; }
if (cudaFree(devicetrapdigits) != cudaSuccess) { printf("\nError freeing GPU memory.\n"); return 3; }
if (cudaFree(devicemidpower) != cudaSuccess) { printf("\nError freeing GPU memory.\n"); return 3; }
if (cudaFree(devicemidsig_digs) != cudaSuccess) { printf("\nError freeing GPU memory.\n"); return 3; }
if (cudaFree(devicemiddigits) != cudaSuccess) { printf("\nError freeing GPU memory.\n"); return 3; }
// After worker threads end, clean up each of the partial sums
bignum * trap = bignum_init(max_digits);
bignum * mid = bignum_init(max_digits);
bignum * temp = bignum_init(max_digits);
bignum * simp = bignum_init(max_digits);
if (trap == 0 || mid == 0 || temp == 0 || simp == 0) {
printf("Error allocating memory. Now exiting.\n");
return -1;
}
for (i = 0L; i < (NUM_BLOCKS * THREADS_PER_BLOCK); i++) {
simp->power = hosttrappower[i];
simp->sig_digs = hosttrapsig_digs[i];
for (j = 0L; j < max_digits; j++) {
simp->digits[(int)j] = hosttrapdigits[(int)((i * max_digits) + j)];
}
bignum_add(temp, trap, simp);
bignum_reset(trap);
bignum_reset(simp);
bignum_set(trap, temp);
bignum_reset(temp);
simp->power = hostmidpower[i];
simp->sig_digs = hostmidsig_digs[i];
for (j = 0L; j < max_digits; j++) {
simp->digits[(int)j] = hostmiddigits[(int)((i * max_digits) + j)];
}
bignum_add(temp, mid, simp);
bignum_reset(mid);
bignum_reset(simp);
bignum_set(mid, temp);
bignum_reset(temp);
}
// Finally, Simpson's Rule is applied
bignum_mult_int(temp, mid, 2L);
bignum_reset(mid);
bignum_set(mid, temp);
bignum_reset(temp);
bignum_add(temp, trap, mid);
bignum_reset(trap);
bignum_set(trap, temp);
bignum_reset(temp);
bignum_divide_int(temp, trap, 3L);
bignum_reset(trap);
bignum_set(trap, temp);
bignum_reset(temp);
bignum_mult_int(simp, trap, 4L);
long clock_end = (long)clock();
printf("The calculated value of pi is ");
bignum_print(simp, 0L);
printf("\nThe actual value of pi is 3.");
for (i = 0L; i < (max_digits - 1L); i++) {
// This may print an extra digit or two because, somewhere down in the
// code, we're losing our last sig dig during normal math, but it's
// bubbling back up, and causing the final result to lose a place or
// two. It's not a big deal, and I don't want to do anything about it,
// so we'll just have the ends of the numbers not line up. Whatever.
pi_printer[0] = accepted_pi[(int)(i + 2L)];
printf("%s", pi_printer);
}
printf("\nThe time taken to calculate this was %.2f seconds\n",
((float)(clock_end - clock_start)) / (float)CLOCKS_PER_SEC);
// Free global storage
free(hosttrappower);
free(hosttrapsig_digs);
free(hosttrapdigits);
free(hostmidpower);
free(hostmidsig_digs);
free(hostmiddigits);
bignum_clear(trap);
bignum_clear(mid);
bignum_clear(simp);
bignum_clear(temp);
return 0;
}
// Function executed by each thread to incrementally calculate the overall value
__global__ void calculate (long * devicetrappower, long * devicetrapsig_digs,
char * devicetrapdigits, long * devicemidpower, long * devicemidsig_digs,
char * devicemiddigits, long iterations, long max_digits)
{
// Initialize needed variables and check for errors
long threadid = threadIdx.x + (blockIdx.x * THREADS_PER_BLOCK);
long lowlimit = threadid * (iterations / (NUM_BLOCKS * THREADS_PER_BLOCK));
long highlimit = (((threadid + 1L) == (NUM_BLOCKS * THREADS_PER_BLOCK)) ? iterations :
((threadid + 1L) * (iterations / (NUM_BLOCKS * THREADS_PER_BLOCK))));
bignum * trap = bignum_init_gpu(max_digits);
bignum * mid = bignum_init_gpu(max_digits);
bignum * inverseiterations = bignum_init_gpu(max_digits);
bignum * temp_holder = bignum_init_gpu(max_digits);
bignum * temp_holder2 = bignum_init_gpu(max_digits);
bignum * inc = bignum_init_gpu(max_digits);
bignum * leftrect = bignum_init_gpu(max_digits);
bignum * rightrect = bignum_init_gpu(max_digits);
if (trap == 0 || mid == 0 || inverseiterations == 0 || temp_holder == 0 ||
temp_holder2 == 0 || inc == 0 || leftrect == 0 || rightrect == 0) {
return;
}
// Initialize values of needed variables
bignum_set_int_gpu(temp_holder, iterations);
bignum_int_divide_gpu(inverseiterations, 1L, temp_holder);
bignum_reset_gpu(temp_holder);
long i;
long k = lowlimit;
bignum_divide_int_gpu(temp_holder, inverseiterations, 2L);
bignum_set_int_gpu(inc, k);
bignum_mult_gpu(temp_holder2, inc, inverseiterations);
bignum_reset_gpu(inc);
bignum_set_gpu(inc, temp_holder2);
bignum_reset_gpu(temp_holder2);
bignum_add_gpu(temp_holder2, inc, temp_holder);
bignum_reset_gpu(inc);
bignum_set_gpu(inc, temp_holder2);
bignum_reset_gpu(temp_holder2);
bignum_reset_gpu(temp_holder);
// Main iteration loop. Note that the values of inverseiterations, inc,
// mid, and trap are preserved across loop iterations, as is counter k.
// inverseiterations is a constant that is stored for simplicity. Man,
// this is looking more and more like assembly...
for (i = lowlimit; i < highlimit; i++) {
// First, the trapezoid rule is used to estimate pi
bignum_reset_gpu(leftrect);
bignum_set_int_gpu(leftrect, k);
bignum_mult_gpu(temp_holder2, leftrect, inverseiterations);
bignum_reset_gpu(leftrect);
bignum_set_gpu(leftrect, temp_holder2);
bignum_reset_gpu(temp_holder2);
k++;
bignum_reset_gpu(rightrect);
bignum_set_int_gpu(rightrect, k);
bignum_mult_gpu(temp_holder2, rightrect, inverseiterations);
bignum_reset_gpu(rightrect);
bignum_set_gpu(rightrect, temp_holder2);
bignum_reset_gpu(temp_holder2);
bignum_add_gpu(temp_holder, leftrect, rightrect);
bignum_divide_int_gpu(temp_holder2, temp_holder, 2L);
bignum_reset_gpu(temp_holder);
bignum_set_gpu(temp_holder, temp_holder2);
bignum_reset_gpu(temp_holder2);
bignum_mult_gpu(temp_holder2, temp_holder, temp_holder);
bignum_reset_gpu(temp_holder);
bignum_set_gpu(temp_holder, temp_holder2);
bignum_reset_gpu(temp_holder2);
bignum_add_int_gpu(temp_holder2, temp_holder, 1L);
bignum_reset_gpu(temp_holder);
bignum_set_gpu(temp_holder, temp_holder2);
bignum_reset_gpu(temp_holder2);
bignum_int_divide_gpu(temp_holder2, 1L, temp_holder);
bignum_reset_gpu(temp_holder);
bignum_set_gpu(temp_holder, temp_holder2);
bignum_reset_gpu(temp_holder2);
bignum_mult_gpu(temp_holder2, temp_holder, inverseiterations);
bignum_reset_gpu(temp_holder);
bignum_set_gpu(temp_holder, temp_holder2);
bignum_reset_gpu(temp_holder2);
bignum_add_gpu(temp_holder2, trap, temp_holder);
bignum_reset_gpu(trap);
bignum_set_gpu(trap, temp_holder2);
bignum_reset_gpu(temp_holder2);
bignum_reset_gpu(temp_holder);
// Next, the midpoint rule is also used to estimate pi
bignum_set_gpu(temp_holder, inc);
bignum_add_gpu(temp_holder2, inc, inverseiterations);
bignum_reset_gpu(inc);
bignum_set_gpu(inc, temp_holder2);
bignum_reset_gpu(temp_holder2);
bignum_mult_gpu(temp_holder2, temp_holder, temp_holder);
bignum_reset_gpu(temp_holder);
bignum_set_gpu(temp_holder, temp_holder2);
bignum_reset_gpu(temp_holder2);
bignum_add_int_gpu(temp_holder2, temp_holder, 1L);
bignum_reset_gpu(temp_holder);
bignum_set_gpu(temp_holder, temp_holder2);
bignum_reset_gpu(temp_holder2);
bignum_int_divide_gpu(temp_holder2, 1L, temp_holder);
bignum_reset_gpu(temp_holder);
bignum_set_gpu(temp_holder, temp_holder2);
bignum_reset_gpu(temp_holder2);
bignum_mult_gpu(temp_holder2, temp_holder, inverseiterations);
bignum_reset_gpu(temp_holder);
bignum_set_gpu(temp_holder, temp_holder2);
bignum_reset_gpu(temp_holder2);
bignum_add_gpu(temp_holder2, mid, temp_holder);
bignum_reset_gpu(mid);
bignum_set_gpu(mid, temp_holder2);
bignum_reset_gpu(temp_holder2);
bignum_reset_gpu(temp_holder);
}
// Save partial result, clear memory, and exit
devicetrappower[threadid] = trap->power;
devicetrapsig_digs[threadid] = trap->sig_digs;
for (i = 0; i < max_digits; i++) {
devicetrapdigits[(threadid * max_digits) + i] = trap->digits[i];
}
devicemidpower[threadid] = mid->power;
devicemidsig_digs[threadid] = mid->sig_digs;
for (i = 0; i < max_digits; i++) {
devicemiddigits[(threadid * max_digits) + i] = mid->digits[i];
}
bignum_clear_gpu(trap);
bignum_clear_gpu(mid);
bignum_clear_gpu(inverseiterations);
bignum_clear_gpu(temp_holder);
bignum_clear_gpu(temp_holder2);
bignum_clear_gpu(inc);
bignum_clear_gpu(leftrect);
bignum_clear_gpu(rightrect);
}
// Create space for a bignum with the specified precision.
// Technically, it's also initialized if we interpret having zero
// significant digits as the number having a value of zero.
__host__ bignum * bignum_init(long int precision) {
bignum * temp_ptr = (bignum *)calloc(1, sizeof(bignum));
temp_ptr->digits = (char *)calloc((int)precision, sizeof(char));
if ((temp_ptr->digits) == 0) { temp_ptr = 0; }
temp_ptr->precision = precision;
return temp_ptr;
}
// Resets a bignum's value to zero. memcpy isn't used because
// why bring the string library into this just for this use?
__host__ void bignum_reset(bignum * numval) {
if ((numval->sig_digs) > 0L) {
long int i;
for (i = 0L; i < numval->precision; i++) { numval->digits[(int)i] = '\0'; }
numval->power = 0L;
numval->sig_digs = 0L;
}
return;
}
// Free memory used by a bignum when we're done with it
__host__ void bignum_clear(bignum * oldnum) {
free(oldnum->digits);
free(oldnum);
return;
}
// Set an instance of a bignum to an integer value. Note that if we can't
// initialize the temp word we need for copying, we return false (value = 0).
// We also assume that the number is non-negative since we only store
// unsigned numbers. We assume the result is initialized/reset. Finally,
// we handle zero specially by just resetting (again?) the result. Note that
// we explicitly assume the number to convert fits within the max number of
// digits. If we try to convert a number bigger than we can store, it won't work.
__host__ int bignum_set_int(bignum * numval, long int intval) {
if (intval > 0L) {
// Separate out the individual digits (stored backwards)
char * temp_word = (char *)calloc((int)(numval->precision), sizeof(char));
if (temp_word == 0) { return 0; }
long int temp_int = intval;
long int counter = 0L;
while (temp_int > 0L) {
temp_word[(int)counter] = (char)(temp_int % 10L);
temp_int = temp_int / 10L;
counter++;
}
// Detect any trailing zeros that we don't need to store
numval->power = counter - 1L;
long int leadingzeros = 0L;
int hasleading = 1;
while (hasleading == 1) {
if (temp_word[(int)leadingzeros] != 0) { hasleading = 0; }
else { leadingzeros++; }
}
// Store final result into actual bignum variable
for (temp_int = 0L; temp_int < (counter - leadingzeros); temp_int++) {
numval->digits[(int)temp_int] = temp_word[(int)(counter - temp_int - 1L)];
}
numval->sig_digs = counter - leadingzeros;
free(temp_word);
return 1;
}
else { bignum_reset(numval); return 1; }
}
// Set an instance of a bignum to the value of another bignum. We don't assume
// they're both the same precision; just use the precision of the new number.
// We do assume that the new number has already been initialized, though.
// strncpy is not used since it quits after seeing the first zero.
__host__ void bignum_set(bignum * newnum, bignum * oldnum) {
if ((oldnum->sig_digs) > 0L) {
newnum->power = oldnum->power;
newnum->sig_digs = ((oldnum->sig_digs > newnum->precision) ?
(newnum->precision) : (oldnum->sig_digs));
long int i;
for (i = 0L; i < newnum->sig_digs; i++) {
newnum->digits[(int)i] = oldnum->digits[(int)i];
}
}
else { bignum_reset(newnum); }
return;
}
// Use printf to print the number one digit at a time. There are a few cases:
// power > significant digits: pad end with zeros
// significant digits > power: fractional digit (non-integer)
// power is negative: total value less than 1
// The second argument is the maximum number of significant digits to print.
// If it's zero, then all available digits will be printed, maxing out at
// the precision of the number (the total amount is could possibly store).
// Note that this is different from total digits printed: zeroes after a
// decimal point but before the first significant digit don't count, and we
// make sure we print at least the integral part of the number (we only
// chop off fractional portions).
__host__ void bignum_print(bignum * numval, long int maxdigits) {
long int i;
long int limit = numval->sig_digs;
if (numval->sig_digs == 0L) { printf("0"); } else {
if ((maxdigits > 0L) && (maxdigits < numval->sig_digs)) {
limit = maxdigits;
}
if (numval->power < 0L) {
printf("0.");
for (i = 1L; i < (-1L * (numval->power)); i++) { printf("0"); }
for (i = 0L; i < limit; i++) {
printf("%d", (int)(numval->digits[(int)i]));
}
}
else if (numval->sig_digs > (numval->power + 1L)) {
for (i = 0L; i <= numval->power; i++) {
printf("%d", (int)(numval->digits[(int)i]));
}
if (limit > (numval->power + 1L)) { printf("."); }
for (i = (numval->power + 1L); i < limit; i++) {
printf("%d", (int)(numval->digits[(int)i]));
}
}
else { for (i = 0L; i < numval->sig_digs; i++) {
printf("%d", (int)(numval->digits[(int)i])); }
}
if ((numval->power > 0L) && ((numval->power + 1L) > numval->sig_digs)) {
for (i = 0L; i < ((numval->power + 1L) - numval->sig_digs); i++) {
printf("0");
}
} }
fflush(stdout);
return;
}
// Adds two bignums together and stores the result. Uses the functions to
// reset and set the location of the result internally, so current contents of
// result operand will be overwritten. Like bignum_set_int, returns 1 if
// addition was successful or 0 if an error occurred. A special shortcut is
// taken if either (or both) of the operands are zero. Note that it is possible
// for large additions to cause underflow to zero. In that case, special care is
// taken to make sure the proper input operand is used. Note that we assume the
// precision of all three operands is the same. If it's not, something terrible
// like a seg fault or incorrect answer will probably occur. Most importantly,
// the result operand CANNOT be the same as one of the input operands, since
// the result is clobbered immediately and used as a scratchpad. Note that this
// is also unsigned addition: not only does it not accept negative numbers, it
// also doesn't do subtraction (which, for that matter, isn't commutative).
__host__ int bignum_add(bignum * resultnum, bignum * leftnum, bignum * rightnum) {
bignum_reset(resultnum);
if ((leftnum->sig_digs == 0L) && (rightnum->sig_digs > 0L)) {
bignum_set(resultnum, rightnum);
return 1;
}
else if ((rightnum->sig_digs == 0L) && (leftnum->sig_digs > 0L)) {
bignum_set(resultnum, leftnum);
return 1;
}
else if ((leftnum->sig_digs == 0L) && (rightnum->sig_digs == 0L)) { return 1; }
else {
// First check for overshift: if the larger number's power is too much
// bigger than the smaller number's, the smaller will be completely lost,
// and we'll just end up with the large number as the result.
if ((((leftnum->power - rightnum->power) > 0) &&
((leftnum->power - rightnum->power) > resultnum->precision))) {
bignum_set(resultnum, leftnum);
return 1;
}
if ((((rightnum->power - leftnum->power) > 0) &&
((rightnum->power - leftnum->power) > resultnum->precision))) {
bignum_set(resultnum, rightnum);
return 1;
}
// Next, shift the smaller operand to match the larger one by copying
// it into the result operand as a partial sum. Also copy over the
// power and total significant digits into the result.
bignum * bigger;
bignum * smaller;
if ((leftnum->power - rightnum->power) >= 0L) {
bigger = leftnum;
smaller = rightnum;
}
else {
bigger = rightnum;
smaller = leftnum;
}
long int difference = bigger->power - smaller->power;
long int startdigit = smaller->sig_digs + difference;
long int transfertotal = smaller->sig_digs;
if (startdigit > resultnum->precision) {
startdigit = resultnum->precision - difference;
transfertotal = startdigit;
}
long int startdigitcopy = startdigit;
startdigit--;
long int i;
for (i = 0L; i < transfertotal; i++) {
if ((startdigit - difference) >= 0L) {
resultnum->digits[(int)startdigit] =
smaller->digits[(int)(startdigit - difference)];
}
startdigit--;
}
// Now the main addition loop: loop through each digit and add it.
// The carry from the previous digit will add to the current one.
// Note that we detect any trailing zeros to take from the sig_digs.
// Also, copy over the power and significant digits
resultnum->power = bigger->power;
resultnum->sig_digs = startdigitcopy;
if (bigger->sig_digs > resultnum->sig_digs) {
resultnum->sig_digs = bigger->sig_digs;
startdigitcopy = resultnum->sig_digs;
}
int trailingzeros = 1;
long int zerocount = 0L;
char carry = 0;
for (i = 0L; i < resultnum->sig_digs; i++) {
resultnum->digits[(int)(startdigitcopy - i - 1L)] +=
(bigger->digits[(int)(startdigitcopy - i - 1L)] + carry);
if (resultnum->digits[(int)(startdigitcopy - i - 1L)] >= 10) {
resultnum->digits[(int)(startdigitcopy - i - 1L)] -= 10;
carry = 1;
} else { carry = 0; }
if (trailingzeros == 1) {
if (resultnum->digits[(int)(startdigitcopy - i - 1L)] == '\0') {
zerocount++;
} else { trailingzeros = 0; }
}
}
// If we've got trailing zeros, subtract them from the final count of
// sig_digs. Also, if we have a carry, we need to shift everything...
resultnum->sig_digs -= zerocount;
if (carry > 0) {
transfertotal = resultnum->sig_digs;
if (transfertotal == resultnum->precision) { transfertotal--; }
startdigitcopy = transfertotal - 1L;
for (i = 0L; i < transfertotal; i++) {
if (startdigitcopy >= 0L) {
resultnum->digits[(int)(startdigitcopy + 1L)] =
resultnum->digits[(int)startdigitcopy];
}
else if ((startdigitcopy + 1L) >= 0L) {
resultnum->digits[(int)(startdigitcopy + 1L)] = 0;
}
startdigitcopy--;
}
resultnum->digits[0] = carry;
resultnum->power++;
resultnum->sig_digs++;
}
if (resultnum->sig_digs > resultnum->precision) {
resultnum->sig_digs = resultnum->precision;
}
return 1;
}
}
// A convenience wrapper that temporarily creates a new bignum out of the
// given integer, calls bignum_add with it and the other operand, and deletes
// the temporary bignum before exiting. Any problems that bignum_add encounters
// are passed back up through this function and returned to the caller.
__host__ int bignum_add_int(bignum * resultnum, bignum * leftnum, long int rightint) {
bignum_reset(resultnum);
if ((rightint == 0L) && (leftnum->sig_digs > 0L)) {
bignum_set(resultnum, leftnum);
return 1;
}
else if ((leftnum->sig_digs == 0L) && (rightint > 0L)) {
return bignum_set_int(resultnum, rightint);
}
else if ((leftnum->sig_digs == 0L) && (rightint == 0L)) { return 1; }
else {
bignum * tempnum = bignum_init(resultnum->precision);
if (tempnum == 0) { return 0; }
if (bignum_set_int(tempnum, rightint) == 0) {
bignum_clear(tempnum);
return 0;
}
int retval = bignum_add(resultnum, leftnum, tempnum);
bignum_clear(tempnum);
return retval;
}
}
// Multiplies two bignums together and stores the result. Like add, uses
// functions to reset and set the location of the result, and returns 1 upon
// success or 0 if an error occurred. A special shortcut is taken if either
// operand is zero, since the result will thus also be zero. Note that we assume
// the precision of all three operands is the same. If it's not, something
// terrible like a seg fault or incorrect answer will probably occur. Most
// importantly, the result operand CANNOT be the same as one of the input
// operands, since the result is clobbered immediately and used as a scratchpad.
// Also, note that this is unsigned: it assumes both operands are positive.
__host__ int bignum_mult(bignum * resultnum, bignum * leftnum, bignum * rightnum) {
bignum_reset(resultnum);
if ((leftnum->sig_digs == 0L) || (rightnum->sig_digs == 0L)) { return 1; }
else {
// Initialize the scratchpad and find the digit limits
char * temp_word = (char *)calloc((int)(2L * (resultnum->precision)), sizeof(char));
if (temp_word == 0) { return 0; }
bignum * bigger;
bignum * smaller;
if (((signed long int)leftnum->sig_digs - (signed long int)rightnum->sig_digs) >= 0L) {
bigger = leftnum;
smaller = rightnum;
}
else if ((rightnum->sig_digs - leftnum->sig_digs) > 0L) {
bigger = rightnum;
smaller = leftnum;
}
long int bigstart = (bigger->sig_digs) - 1L;
long int smallstart = (smaller->sig_digs) - 1L;
long int bigcounter, smallcounter;
char carry = 0;
// Perform the shift-addition loop. We choose to loop over each
// digit of the smaller number for fewer overall iterations. If
// the current bigloop has a zero, we can just skip that iteration.
// Also, record the final carry, power, and sig_digs values.
for (bigcounter = 0L; bigcounter < (smaller->sig_digs); bigcounter++) {
if (smaller->digits[(int)(smallstart - bigcounter)] != '\0') {
carry = 0;
for(smallcounter = 0L; smallcounter < (bigger->sig_digs); smallcounter++) {
temp_word[(int)((2L * (resultnum->precision)) - smallcounter -
bigcounter - 1L)] += (carry + (smaller->digits[(int)(smallstart -
bigcounter)] * bigger->digits[(int)(bigstart - smallcounter)]));
carry = temp_word[(int)((2L * (resultnum->precision)) -
smallcounter - bigcounter - 1L)] / 10;
temp_word[(int)((2L * (resultnum->precision)) - smallcounter -
bigcounter - 1L)] %= 10;
}
temp_word[(int)((2L * (resultnum->precision)) - bigcounter -
(bigger->sig_digs) - 1L)] = carry;
}
}
resultnum->power = ((bigger->power) + (smaller->power));
resultnum->sig_digs = ((bigger->sig_digs) + (smaller->sig_digs));
// Adjust for lack of a final carry or trailing zeros.
if (carry < 1) {
(resultnum->sig_digs)--;
(resultnum->power)--;
}
(resultnum->power)++;
int trailingzeros = 1;
long int zerocount = 0L;
long int i = (2L * (resultnum->precision) - 1L);
while (trailingzeros == 1) {
if (temp_word[(int)i] == '\0') {
zerocount++;
} else { trailingzeros = 0; }
i--;
}
resultnum->sig_digs -= zerocount;
if ((resultnum->sig_digs) > (resultnum->precision)) {
resultnum->sig_digs = (resultnum->precision);
}
// Finally, copy from the temp word into the result, taking into
// account any digits we may lose due to precision.
long int tempstart = (2L * (resultnum->precision)) - ((bigger->sig_digs) +
(smaller->sig_digs));
if (carry < 1) { tempstart++; }
for (i = 0L; i < (resultnum->sig_digs); i++) {
resultnum->digits[(int)i] = temp_word[(int)(tempstart + i)];
}
free(temp_word);
return 1;
}
}
// Like bignum_add_int, a convenience wrapper that creates a temporary bignum
// out of the integer and passes it to bignum_mult. Any problems encountered
// in client functions are passed back up to the original caller.
__host__ int bignum_mult_int(bignum * resultnum, bignum * leftnum, long int rightint) {
bignum_reset(resultnum);
if ((leftnum->sig_digs == 0L) || (rightint == 0L)) { return 1; }
else {
bignum * tempnum = bignum_init(resultnum->precision);
if (tempnum == 0) { return 0; }
if (bignum_set_int(tempnum, rightint) == 0) {
bignum_clear(tempnum);
return 0;
}
int retval = bignum_mult(resultnum, leftnum, tempnum);
bignum_clear(tempnum);
return retval;
}
}
// Divides two bignums. Taken in terms of a fraction, leftnum is the numerator
// and rightnum is the denominator. Performs an explicit check to make sure
// the denominator is not zero, and returns 0 (an error) if it is. Returns 1 upon
// success or 0 if an error occurs. A special shortcut is taken if the numerator is
// zero. Note that we assume the precision of all three operands is the same. If it's
// not, something terrible like a seg fault or incorrect answer will probably occur.
// Most importantly, the result operand CANNOT be the same as one of the input
// operands, since the result is clobbered immediately and used as a scratchpad.
// Also, note that this is unsigned: it assumes both operands are positive.
__host__ int bignum_divide(bignum * resultnum, bignum * numerator, bignum * denominator) {
bignum_reset(resultnum);
if (denominator->sig_digs == 0L) { return 0; }
else if (numerator->sig_digs == 0L) { return 1; }
else {
// Initialize the scratchpad and initially copy the numerator into it.
// Also initialize the result's power.
char * temp_word = (char *)calloc((int)(2L *
(resultnum->precision) + 2L), sizeof(char)); // May only need to be + 1L
if (temp_word == 0) { return 0; }
long int i;
for (i = 0L; i < numerator->sig_digs; i++) {
temp_word[(int)(i + 1L)] = numerator->digits[(int)i];
}
resultnum->power = (numerator->power - denominator->power);
long int sigdigctr = 0L;
long int numeratorindex = 0L;
// First see if we need to "shift" the numerator by comparing it.
i = ((denominator->sig_digs) - 1L);
int denom_bigger = 1;
while ((i >= 0L) && (denom_bigger == 1)) {
if ((denominator->digits[(int)((denominator->sig_digs) - i - 1L)]) >
(temp_word[(int)((denominator->sig_digs) - i)])) {
i = 0L;
}
else if ((denominator->digits[(int)((denominator->sig_digs) -
i - 1L)]) < (temp_word[(int)((denominator->sig_digs) - i)])) {
denom_bigger = 0;
}
else if (((denominator->digits[(int)((denominator->sig_digs) - i -
1L)]) == (temp_word[(int)((denominator->sig_digs) - i)])) && (i == 0L)) {
denom_bigger = 0;
}
i--;
}
if (denom_bigger == 1) {
numeratorindex++;
(resultnum->power)--;
}
// Now the main division loop. Note that there's two ways to terminate:
// either we've filled the entire precision of the result word and are
// forced to truncate our result, or our answer divides exactly. In the
// second case, once we've exhausted the numerator's significant digits
// and our temp word contains nothing but zeros, we can end early since
// all subsequent iterations would contribute only zeros as well. Note
// that special care will be taken to detect extra zeros at the end of
// the result so that the sig_digs is recorded correctly. Also, we don't
// round, we truncate, which doesn't minimize error.
int nonzero = 1;
while ((sigdigctr < (resultnum->precision)) && (nonzero == 1)) {
// First run the subtraction loop.
char current_digit = 0;
int numer_bigger = 1;
while (numer_bigger == 1) {
// To subtract, first run a comparison to see if the numerator
// is bigger. If it is, increment the counter and subtract.
i = ((denominator->sig_digs) - 1L);
denom_bigger = 1;
if (temp_word[(int)numeratorindex] > 0) { denom_bigger = 0; }
while ((i >= 0L) && (denom_bigger == 1)) {
if ((denominator->digits[(int)((denominator->sig_digs) -
i - 1L)]) > (temp_word[(int)((denominator->sig_digs) +
numeratorindex - i)])) {
i = 0L;
}
else if ((denominator->digits[(int)((denominator->sig_digs) -
i - 1L)]) < (temp_word[(int)((denominator->sig_digs) +
numeratorindex - i)])) {
denom_bigger = 0;
}
else if (((denominator->digits[(int)((denominator->sig_digs) -
i - 1L)]) == (temp_word[(int)((denominator->sig_digs) +
numeratorindex - i)])) && (i == 0L)) {
denom_bigger = 0;
}
i--;
}
if (denom_bigger == 1) {
numer_bigger = 0;
}
// Increment counter and perform subtraction loop.
if (numer_bigger == 1) {
current_digit++;
for (i = 0L; i < (denominator->sig_digs); i++) {
temp_word[(int)((denominator->sig_digs) +
numeratorindex - i)] -= (denominator->digits[
(int)((denominator->sig_digs) - i - 1L)]);
if ((temp_word[(int)((denominator->sig_digs) +
numeratorindex - i)]) < 0) {
temp_word[(int)((denominator->sig_digs) +
numeratorindex - i)] += 10L;
(temp_word[(int)((denominator->sig_digs) +
numeratorindex - i - 1L)]) -= 1L;
}
}
}
}
// If we're past all of the numerator's significant digits, run
// zero detection on it to see if we can end early.
if (sigdigctr > (numerator->sig_digs)) { // May only need to be >=
long int zerocounter = 0L;
i = 0L;
while ((i == zerocounter) && (i <= (denominator->sig_digs))) {
if ((temp_word[(int)(numeratorindex + i)]) < 1) { zerocounter++; }
i++;
}
if (zerocounter == ((denominator->sig_digs) + 1L)) { nonzero = 0; }
}
// Once we have obtained the proper digit in the result, save it.
if (sigdigctr < resultnum->precision) {
resultnum->digits[(int)sigdigctr] = current_digit;
}
sigdigctr++;
numeratorindex++;
}
// Record the result's sig digs, taking care to detect trailing zeros.
resultnum->sig_digs = sigdigctr;
int trailingzeros = 1;
long int zerocount = 0L;
i = sigdigctr - 1L;
while (trailingzeros == 1) {
if (resultnum->digits[(int)i] == '\0') {
zerocount++;
} else { trailingzeros = 0; }
i--;
}
(resultnum->sig_digs) -= zerocount;
free (temp_word);
return 1;
}
}
// A convenience wrapper that creates a temporary bignum out of the integer.
// Since division is not commutative, two wrappers are given. Any problems
// encountered in client functions are passed back up to the original caller.
__host__ int bignum_int_divide(bignum * resultnum, long int leftint, bignum * rightnum) {
bignum_reset(resultnum);
if (rightnum->sig_digs == 0L) { return 0; }
else if (leftint == 0L) { return 1; }
else {
bignum * tempnum = bignum_init(resultnum->precision);
if (tempnum == 0) { return 0; }
if (bignum_set_int(tempnum, leftint) == 0) {
bignum_clear(tempnum);
return 0;
}
int retval = bignum_divide(resultnum, tempnum, rightnum);
bignum_clear(tempnum);
return retval;
}
}
// A convenience wrapper that creates a temporary bignum out of the integer.
// Since division is not commutative, two wrappers are given. Any problems
// encountered in client functions are passed back up to the original caller.
__host__ int bignum_divide_int(bignum * resultnum, bignum * leftnum, long int rightint) {
bignum_reset(resultnum);
if (rightint == 0L) { return 0; }
else if (leftnum->sig_digs == 0L) { return 1; }
else {
bignum * tempnum = bignum_init(resultnum->precision);
if (tempnum == 0) { return 0; }
if (bignum_set_int(tempnum, rightint) == 0) {
bignum_clear(tempnum);
return 0;
}
int retval = bignum_divide(resultnum, leftnum, tempnum);
bignum_clear(tempnum);
return retval;
}
}
// Create space for a bignum with the specified precision.
// Technically, it's also initialized if we interpret having zero
// significant digits as the number having a value of zero.
__device__ bignum * bignum_init_gpu(long int precision) {
bignum * temp_ptr = (bignum *)malloc(sizeof(bignum));
if (temp_ptr == 0) { return temp_ptr; }
temp_ptr->digits = (char *)malloc((int)(precision * sizeof(char)));
if ((temp_ptr->digits) == 0) { temp_ptr = 0; return temp_ptr; }
int i;
for (i = 0; i < precision; i++) { temp_ptr->digits[i] = '\0'; }
temp_ptr->power = 0L;
temp_ptr->sig_digs = 0L;
temp_ptr->precision = precision;
return temp_ptr;
}
// Resets a bignum's value to zero. memcpy isn't used because
// why bring the string library into this just for this use?
__device__ void bignum_reset_gpu(bignum * numval) {
if ((numval->sig_digs) > 0L) {
long int i;
for (i = 0L; i < numval->precision; i++) { numval->digits[(int)i] = '\0'; }
numval->power = 0L;
numval->sig_digs = 0L;
}
return;
}
// Free memory used by a bignum when we're done with it
__device__ void bignum_clear_gpu(bignum * oldnum) {
free(oldnum->digits);
free(oldnum);
return;
}
// Set an instance of a bignum to an integer value. Note that if we can't
// initialize the temp word we need for copying, we return false (value = 0).
// We also assume that the number is non-negative since we only store
// unsigned numbers. We assume the result is initialized/reset. Finally,
// we handle zero specially by just resetting (again?) the result. Note that
// we explicitly assume the number to convert fits within the max number of
// digits. If we try to convert a number bigger than we can store, it won't work.
__device__ int bignum_set_int_gpu(bignum * numval, long int intval) {
if (intval > 0L) {
// Separate out the individual digits (stored backwards)
char * temp_word = (char *)malloc((int)(numval->precision * sizeof(char)));
if (temp_word == 0) { return 0; }
long int i;
for (i = 0; i < numval->precision; i++) { temp_word[(int)i] = '\0'; }
long int temp_int = intval;
long int counter = 0L;
while (temp_int > 0L) {
temp_word[(int)counter] = (char)(temp_int % 10L);
temp_int = temp_int / 10L;
counter++;
}
// Detect any trailing zeros that we don't need to store
numval->power = counter - 1L;
long int leadingzeros = 0L;
int hasleading = 1;
while (hasleading == 1) {
if (temp_word[(int)leadingzeros] != 0) { hasleading = 0; }
else { leadingzeros++; }
}
// Store final result into actual bignum variable
for (temp_int = 0L; temp_int < (counter - leadingzeros); temp_int++) {
numval->digits[(int)temp_int] = temp_word[(int)(counter - temp_int - 1L)];
}
numval->sig_digs = counter - leadingzeros;
free(temp_word);
return 1;
}
else { bignum_reset_gpu(numval); return 1; }
}
// Set an instance of a bignum to the value of another bignum. We don't assume
// they're both the same precision; just use the precision of the new number.
// We do assume that the new number has already been initialized, though.
// strncpy is not used since it quits after seeing the first zero.
__device__ void bignum_set_gpu(bignum * newnum, bignum * oldnum) {
if ((oldnum->sig_digs) > 0L) {
newnum->power = oldnum->power;
newnum->sig_digs = ((oldnum->sig_digs > newnum->precision) ?
(newnum->precision) : (oldnum->sig_digs));
long int i;
for (i = 0L; i < newnum->sig_digs; i++) {
newnum->digits[(int)i] = oldnum->digits[(int)i];
}
}
else { bignum_reset_gpu(newnum); }
return;
}
// Adds two bignums together and stores the result. Uses the functions to
// reset and set the location of the result internally, so current contents of
// result operand will be overwritten. Like bignum_set_int, returns 1 if
// addition was successful or 0 if an error occurred. A special shortcut is
// taken if either (or both) of the operands are zero. Note that it is possible
// for large additions to cause underflow to zero. In that case, special care is
// taken to make sure the proper input operand is used. Note that we assume the
// precision of all three operands is the same. If it's not, something terrible
// like a seg fault or incorrect answer will probably occur. Most importantly,
// the result operand CANNOT be the same as one of the input operands, since
// the result is clobbered immediately and used as a scratchpad. Note that this
// is also unsigned addition: not only does it not accept negative numbers, it
// also doesn't do subtraction (which, for that matter, isn't commutative).
__device__ int bignum_add_gpu(bignum * resultnum, bignum * leftnum, bignum * rightnum) {
bignum_reset_gpu(resultnum);
if ((leftnum->sig_digs == 0L) && (rightnum->sig_digs > 0L)) {
bignum_set_gpu(resultnum, rightnum);
return 1;
}
else if ((rightnum->sig_digs == 0L) && (leftnum->sig_digs > 0L)) {
bignum_set_gpu(resultnum, leftnum);
return 1;
}
else if ((leftnum->sig_digs == 0L) && (rightnum->sig_digs == 0L)) { return 1; }
else {
// First check for overshift: if the larger number's power is too much
// bigger than the smaller number's, the smaller will be completely lost,
// and we'll just end up with the large number as the result.
if ((((leftnum->power - rightnum->power) > 0) &&
((leftnum->power - rightnum->power) > resultnum->precision))) {
bignum_set_gpu(resultnum, leftnum);
return 1;
}
if ((((rightnum->power - leftnum->power) > 0) &&
((rightnum->power - leftnum->power) > resultnum->precision))) {
bignum_set_gpu(resultnum, rightnum);
return 1;
}
// Next, shift the smaller operand to match the larger one by copying
// it into the result operand as a partial sum. Also copy over the
// power and total significant digits into the result.
bignum * bigger;
bignum * smaller;
if ((leftnum->power - rightnum->power) >= 0L) {
bigger = leftnum;
smaller = rightnum;
}
else {
bigger = rightnum;
smaller = leftnum;
}
long int difference = bigger->power - smaller->power;
long int startdigit = smaller->sig_digs + difference;
long int transfertotal = smaller->sig_digs;
if (startdigit > resultnum->precision) {
startdigit = resultnum->precision - difference;
transfertotal = startdigit;
}
long int startdigitcopy = startdigit;
startdigit--;
long int i;
for (i = 0L; i < transfertotal; i++) {
if ((startdigit - difference) >= 0L) {
resultnum->digits[(int)startdigit] =
smaller->digits[(int)(startdigit - difference)];
}
startdigit--;
}
// Now the main addition loop: loop through each digit and add it.
// The carry from the previous digit will add to the current one.
// Note that we detect any trailing zeros to take from the sig_digs.
// Also, copy over the power and significant digits
resultnum->power = bigger->power;
resultnum->sig_digs = startdigitcopy;
if (bigger->sig_digs > resultnum->sig_digs) {
resultnum->sig_digs = bigger->sig_digs;
startdigitcopy = resultnum->sig_digs;
}
int trailingzeros = 1;
long int zerocount = 0L;
char carry = 0;
for (i = 0L; i < resultnum->sig_digs; i++) {
resultnum->digits[(int)(startdigitcopy - i - 1L)] +=
(bigger->digits[(int)(startdigitcopy - i - 1L)] + carry);
if (resultnum->digits[(int)(startdigitcopy - i - 1L)] >= 10) {
resultnum->digits[(int)(startdigitcopy - i - 1L)] -= 10;
carry = 1;
} else { carry = 0; }
if (trailingzeros == 1) {
if (resultnum->digits[(int)(startdigitcopy - i - 1L)] == '\0') {
zerocount++;
} else { trailingzeros = 0; }
}
}
// If we've got trailing zeros, subtract them from the final count of
// sig_digs. Also, if we have a carry, we need to shift everything...
resultnum->sig_digs -= zerocount;
if (carry > 0) {
transfertotal = resultnum->sig_digs;
if (transfertotal == resultnum->precision) { transfertotal--; }
startdigitcopy = transfertotal - 1L;
for (i = 0L; i < transfertotal; i++) {
if (startdigitcopy >= 0L) {
resultnum->digits[(int)(startdigitcopy + 1L)] =
resultnum->digits[(int)startdigitcopy];
}
else if ((startdigitcopy + 1L) >= 0L) {
resultnum->digits[(int)(startdigitcopy + 1L)] = '\0';
}
startdigitcopy--;
}
resultnum->digits[0] = carry;
resultnum->power++;
resultnum->sig_digs++;
}
if (resultnum->sig_digs > resultnum->precision) {
resultnum->sig_digs = resultnum->precision;
}
return 1;
}
}
// A convenience wrapper that temporarily creates a new bignum out of the
// given integer, calls bignum_add with it and the other operand, and deletes
// the temporary bignum before exiting. Any problems that bignum_add encounters
// are passed back up through this function and returned to the caller.
__device__ int bignum_add_int_gpu(bignum * resultnum, bignum * leftnum, long int rightint) {
bignum_reset_gpu(resultnum);
if ((rightint == 0L) && (leftnum->sig_digs > 0L)) {
bignum_set_gpu(resultnum, leftnum);
return 1;
}
else if ((leftnum->sig_digs == 0L) && (rightint > 0L)) {
return bignum_set_int_gpu(resultnum, rightint);
}
else if ((leftnum->sig_digs == 0L) && (rightint == 0L)) { return 1; }
else {
bignum * tempnum = bignum_init_gpu(resultnum->precision);
if (tempnum == 0) { return 0; }
if (bignum_set_int_gpu(tempnum, rightint) == 0) {
bignum_clear_gpu(tempnum);
return 0;
}
int retval = bignum_add_gpu(resultnum, leftnum, tempnum);
bignum_clear_gpu(tempnum);
return retval;
}
}
// Multiplies two bignums together and stores the result. Like add, uses
// functions to reset and set the location of the result, and returns 1 upon
// success or 0 if an error occurred. A special shortcut is taken if either
// operand is zero, since the result will thus also be zero. Note that we assume
// the precision of all three operands is the same. If it's not, something
// terrible like a seg fault or incorrect answer will probably occur. Most
// importantly, the result operand CANNOT be the same as one of the input
// operands, since the result is clobbered immediately and used as a scratchpad.
// Also, note that this is unsigned: it assumes both operands are positive.
__device__ int bignum_mult_gpu(bignum * resultnum, bignum * leftnum, bignum * rightnum) {
bignum_reset_gpu(resultnum);
if ((leftnum->sig_digs == 0L) || (rightnum->sig_digs == 0L)) { return 1; }
else {
// Initialize the scratchpad and find the digit limits
char * temp_word = (char *)malloc((int)(2L * (resultnum->precision) * sizeof(char)));
if (temp_word == 0) { return 0; }
long int i;
for (i = 0; i < (2L * resultnum->precision); i++) { temp_word[(int)i] = '\0'; }
bignum * bigger;
bignum * smaller;
if (((signed long int)leftnum->sig_digs - (signed long int)rightnum->sig_digs) >= 0L) {
bigger = leftnum;
smaller = rightnum;
}
else if ((rightnum->sig_digs - leftnum->sig_digs) > 0L) {
bigger = rightnum;
smaller = leftnum;
}
long int bigstart = (bigger->sig_digs) - 1L;
long int smallstart = (smaller->sig_digs) - 1L;
long int bigcounter, smallcounter;
char carry = 0;
// Perform the shift-addition loop. We choose to loop over each
// digit of the smaller number for fewer overall iterations. If
// the current bigloop has a zero, we can just skip that iteration.
// Also, record the final carry, power, and sig_digs values.
for (bigcounter = 0L; bigcounter < (smaller->sig_digs); bigcounter++) {
if (smaller->digits[(int)(smallstart - bigcounter)] != '\0') {
carry = 0;
for(smallcounter = 0L; smallcounter < (bigger->sig_digs); smallcounter++) {
temp_word[(int)((2L * (resultnum->precision)) - smallcounter -
bigcounter - 1L)] += (carry + (smaller->digits[(int)(smallstart -
bigcounter)] * bigger->digits[(int)(bigstart - smallcounter)]));
carry = temp_word[(int)((2L * (resultnum->precision)) -
smallcounter - bigcounter - 1L)] / 10;
temp_word[(int)((2L * (resultnum->precision)) - smallcounter -
bigcounter - 1L)] %= 10;
}
temp_word[(int)((2L * (resultnum->precision)) - bigcounter -
(bigger->sig_digs) - 1L)] = carry;
}
}
resultnum->power = ((bigger->power) + (smaller->power));
resultnum->sig_digs = ((bigger->sig_digs) + (smaller->sig_digs));
// Adjust for lack of a final carry or trailing zeros.
if (carry < 1) {
(resultnum->sig_digs)--;
(resultnum->power)--;
}
(resultnum->power)++;
int trailingzeros = 1;
long int zerocount = 0L;
i = (2L * (resultnum->precision) - 1L);
while (trailingzeros == 1) {
if (temp_word[(int)i] == '\0') {
zerocount++;
} else { trailingzeros = 0; }
i--;
}
resultnum->sig_digs -= zerocount;
if ((resultnum->sig_digs) > (resultnum->precision)) {
resultnum->sig_digs = (resultnum->precision);
}
// Finally, copy from the temp word into the result, taking into
// account any digits we may lose due to precision.
long int tempstart = (2L * (resultnum->precision)) - ((bigger->sig_digs) +
(smaller->sig_digs));
if (carry < 1) { tempstart++; }
for (i = 0L; i < (resultnum->sig_digs); i++) {
resultnum->digits[(int)i] = temp_word[(int)(tempstart + i)];
}
free(temp_word);
return 1;
}
}
// Like bignum_add_int, a convenience wrapper that creates a temporary bignum
// out of the integer and passes it to bignum_mult. Any problems encountered
// in client functions are passed back up to the original caller.
__device__ int bignum_mult_int_gpu(bignum * resultnum, bignum * leftnum, long int rightint) {
bignum_reset_gpu(resultnum);
if ((leftnum->sig_digs == 0L) || (rightint == 0L)) { return 1; }
else {
bignum * tempnum = bignum_init_gpu(resultnum->precision);
if (tempnum == 0) { return 0; }
if (bignum_set_int_gpu(tempnum, rightint) == 0) {
bignum_clear_gpu(tempnum);
return 0;
}
int retval = bignum_mult_gpu(resultnum, leftnum, tempnum);
bignum_clear_gpu(tempnum);
return retval;
}
}
// Divides two bignums. Taken in terms of a fraction, leftnum is the numerator
// and rightnum is the denominator. Performs an explicit check to make sure
// the denominator is not zero, and returns 0 (an error) if it is. Returns 1 upon
// success or 0 if an error occurs. A special shortcut is taken if the numerator is
// zero. Note that we assume the precision of all three operands is the same. If it's
// not, something terrible like a seg fault or incorrect answer will probably occur.
// Most importantly, the result operand CANNOT be the same as one of the input
// operands, since the result is clobbered immediately and used as a scratchpad.
// Also, note that this is unsigned: it assumes both operands are positive.
__device__ int bignum_divide_gpu(bignum * resultnum, bignum * numerator, bignum * denominator) {
bignum_reset_gpu(resultnum);
if (denominator->sig_digs == 0L) { return 0; }
else if (numerator->sig_digs == 0L) { return 1; }
else {
// Initialize the scratchpad and initially copy the numerator into it.
// Also initialize the result's power.
char * temp_word = (char *)malloc((int)(2L *
(resultnum->precision) + 2L * sizeof(char))); // May only need to be + 1L
if (temp_word == 0) { return 0; }
long int i;
temp_word[0] = '\0';
for (i = 0L; i < numerator->sig_digs; i++) {
temp_word[(int)(i + 1L)] = numerator->digits[(int)i];
}
for (i = (1L + numerator->sig_digs); i <
(2L * resultnum->precision + 2L); i++) { temp_word[(int)i] = '\0'; }
resultnum->power = (numerator->power - denominator->power);
long int sigdigctr = 0L;
long int numeratorindex = 0L;
// First see if we need to "shift" the numerator by comparing it.
i = ((denominator->sig_digs) - 1L);
int denom_bigger = 1;
while ((i >= 0L) && (denom_bigger == 1)) {
if ((denominator->digits[(int)((denominator->sig_digs) - i - 1L)]) >
(temp_word[(int)((denominator->sig_digs) - i)])) {
i = 0L;
}
else if ((denominator->digits[(int)((denominator->sig_digs) -
i - 1L)]) < (temp_word[(int)((denominator->sig_digs) - i)])) {
denom_bigger = 0;
}
else if (((denominator->digits[(int)((denominator->sig_digs) - i -
1L)]) == (temp_word[(int)((denominator->sig_digs) - i)])) && (i == 0L)) {
denom_bigger = 0;
}
i--;
}
if (denom_bigger == 1) {
numeratorindex++;
(resultnum->power)--;
}
// Now the main division loop. Note that there's two ways to terminate:
// either we've filled the entire precision of the result word and are
// forced to truncate our result, or our answer divides exactly. In the
// second case, once we've exhausted the numerator's significant digits
// and our temp word contains nothing but zeros, we can end early since
// all subsequent iterations would contribute only zeros as well. Note
// that special care will be taken to detect extra zeros at the end of
// the result so that the sig_digs is recorded correctly. Also, we don't
// round, we truncate, which doesn't minimize error.
int nonzero = 1;
while ((sigdigctr < (resultnum->precision)) && (nonzero == 1)) {
// First run the subtraction loop.
char current_digit = 0;
int numer_bigger = 1;
while (numer_bigger == 1) {
// To subtract, first run a comparison to see if the numerator
// is bigger. If it is, increment the counter and subtract.
i = ((denominator->sig_digs) - 1L);
denom_bigger = 1;
if (temp_word[(int)numeratorindex] > 0) { denom_bigger = 0; }
while ((i >= 0L) && (denom_bigger == 1)) {
if ((denominator->digits[(int)((denominator->sig_digs) -
i - 1L)]) > (temp_word[(int)((denominator->sig_digs) +
numeratorindex - i)])) {
i = 0L;
}
else if ((denominator->digits[(int)((denominator->sig_digs) -
i - 1L)]) < (temp_word[(int)((denominator->sig_digs) +
numeratorindex - i)])) {
denom_bigger = 0;
}
else if (((denominator->digits[(int)((denominator->sig_digs) -
i - 1L)]) == (temp_word[(int)((denominator->sig_digs) +
numeratorindex - i)])) && (i == 0L)) {
denom_bigger = 0;
}
i--;
}
if (denom_bigger == 1) {
numer_bigger = 0;
}
// Increment counter and perform subtraction loop.
if (numer_bigger == 1) {
current_digit++;
for (i = 0L; i < (denominator->sig_digs); i++) {
temp_word[(int)((denominator->sig_digs) +
numeratorindex - i)] -= (denominator->digits[
(int)((denominator->sig_digs) - i - 1L)]);
if ((temp_word[(int)((denominator->sig_digs) +
numeratorindex - i)]) < 0) {
temp_word[(int)((denominator->sig_digs) +
numeratorindex - i)] += 10L;
(temp_word[(int)((denominator->sig_digs) +
numeratorindex - i - 1L)]) -= 1L;
}
}
}
}
// If we're past all of the numerator's significant digits, run
// zero detection on it to see if we can end early.
if (sigdigctr > (numerator->sig_digs)) { // May only need to be >=
long int zerocounter = 0L;
i = 0L;
while ((i == zerocounter) && (i <= (denominator->sig_digs))) {
if ((temp_word[(int)(numeratorindex + i)]) < 1) { zerocounter++; }
i++;
}
if (zerocounter == ((denominator->sig_digs) + 1L)) { nonzero = 0; }
}
// Once we have obtained the proper digit in the result, save it.
if (sigdigctr < resultnum->precision) {
resultnum->digits[(int)sigdigctr] = current_digit;
}
sigdigctr++;
numeratorindex++;
}
// Record the result's sig digs, taking care to detect trailing zeros.
resultnum->sig_digs = sigdigctr;
int trailingzeros = 1;
long int zerocount = 0L;
i = sigdigctr - 1L;
while (trailingzeros == 1) {
if (resultnum->digits[(int)i] == '\0') {
zerocount++;
} else { trailingzeros = 0; }
i--;
}
(resultnum->sig_digs) -= zerocount;
free(temp_word);
return 1;
}
}
// A convenience wrapper that creates a temporary bignum out of the integer.
// Since division is not commutative, two wrappers are given. Any problems
// encountered in client functions are passed back up to the original caller.
__device__ int bignum_int_divide_gpu(bignum * resultnum, long int leftint, bignum * rightnum) {
bignum_reset_gpu(resultnum);
if (rightnum->sig_digs == 0L) { return 0; }
else if (leftint == 0L) { return 1; }
else {
bignum * tempnum = bignum_init_gpu(resultnum->precision);
if (tempnum == 0) { return 0; }
if (bignum_set_int_gpu(tempnum, leftint) == 0) {
bignum_clear_gpu(tempnum);
return 0;
}
int retval = bignum_divide_gpu(resultnum, tempnum, rightnum);
bignum_clear_gpu(tempnum);
return retval;
}
}
// A convenience wrapper that creates a temporary bignum out of the integer.
// Since division is not commutative, two wrappers are given. Any problems
// encountered in client functions are passed back up to the original caller.
__device__ int bignum_divide_int_gpu(bignum * resultnum, bignum * leftnum, long int rightint) {
bignum_reset_gpu(resultnum);
if (rightint == 0L) { return 0; }
else if (leftnum->sig_digs == 0L) { return 1; }
else {
bignum * tempnum = bignum_init_gpu(resultnum->precision);
if (tempnum == 0) { return 0; }
if (bignum_set_int_gpu(tempnum, rightint) == 0) {
bignum_clear_gpu(tempnum);
return 0;
}
int retval = bignum_divide_gpu(resultnum, leftnum, tempnum);
bignum_clear_gpu(tempnum);
return retval;
}
}
|
45de96d1694f48702b6200c715f14b15b54bfe33.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 2018 BlazingDB, Inc.
* Copyright 2018 Felipe Aramburu <[email protected]>
* Copyright 2018 Alexander Ocsa <[email protected]>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gdf/gdf.h>
#include <gdf/utils.h>
#include <gdf/errorutils.h>
#include <hip/hip_runtime.h>
#include <vector>
#include <thrust/functional.h>
#include <thrust/device_ptr.h>
#include <thrust/device_vector.h>
#include <thrust/copy.h>
#include <thrust/remove.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/execution_policy.h>
#include <thrust/iterator/iterator_adaptor.h>
#include <thrust/iterator/transform_iterator.h>
#include "thrust_rmm_allocator.h"
//std lib
#include <map>
// thrust::device_vector set to use rmmAlloc and rmmFree.
template <typename T>
using Vector = thrust::device_vector<T, rmm_allocator<T>>;
//wow the freaking example from iterator_adaptpr, what a break right!
template<typename Iterator>
class repeat_iterator
: public thrust::iterator_adaptor<
repeat_iterator<Iterator>, // the first template parameter is the name of the iterator we're creating
Iterator // the second template parameter is the name of the iterator we're adapting
// we can use the default for the additional template parameters
>
{
public:
// shorthand for the name of the iterator_adaptor we're deriving from
typedef thrust::iterator_adaptor<
repeat_iterator<Iterator>,
Iterator
> super_t;
__host__ __device__
repeat_iterator(const Iterator &x, int n) : super_t(x), begin(x), n(n) {}
// befriend thrust::iterator_core_access to allow it access to the private interface below
friend class thrust::iterator_core_access;
private:
// repeat each element of the adapted range n times
unsigned int n;
// used to keep track of where we began
const Iterator begin;
// it is private because only thrust::iterator_core_access needs access to it
__host__ __device__
typename super_t::reference dereference() const
{
return *(begin + (this->base() - begin) / n);
}
};
typedef repeat_iterator<thrust::detail::normal_iterator<thrust::device_ptr<gdf_valid_type> > > gdf_valid_iterator;
gdf_size_type get_number_of_bytes_for_valid (gdf_size_type column_size) {
return sizeof(gdf_valid_type) * (column_size + GDF_VALID_BITSIZE - 1) / GDF_VALID_BITSIZE;
}
// note: functor inherits from unary_function
struct modulus_bit_width : public thrust::unary_function<gdf_size_type,gdf_size_type>
{
gdf_size_type n_bytes;
gdf_size_type column_size;
modulus_bit_width (gdf_size_type b_nytes, gdf_size_type column_size) {
this->n_bytes = n_bytes;
this->column_size = column_size;
}
__host__ __device__
gdf_size_type operator()(gdf_size_type x) const
{
gdf_size_type col_position = x / 8;
gdf_size_type length_col = n_bytes != col_position+1 ? GDF_VALID_BITSIZE : column_size - GDF_VALID_BITSIZE * (n_bytes - 1);
//return x % GDF_VALID_BITSIZE;
return (length_col - 1) - (x % 8);
// x <<
}
};
struct shift_left: public thrust::unary_function<gdf_valid_type,gdf_valid_type>
{
gdf_valid_type num_bits;
shift_left(gdf_valid_type num_bits): num_bits(num_bits){
}
__host__ __device__
gdf_valid_type operator()(gdf_valid_type x) const
{
return x << num_bits;
}
};
struct shift_right: public thrust::unary_function<gdf_valid_type,gdf_valid_type>
{
gdf_valid_type num_bits;
bool not_too_many;
shift_right(gdf_valid_type num_bits, bool not_too_many)
: num_bits(num_bits), not_too_many(not_too_many){
}
__host__ __device__
gdf_valid_type operator()(gdf_valid_type x) const
{
//if you want to force the shift to be fill bits with 0 you need to use an unsigned type
/*if (not_too_many) { // is the last
return x;
}*/
return *((unsigned char *) &x) >> num_bits;
}
};
struct bit_or: public thrust::unary_function<thrust::tuple<gdf_valid_type,gdf_valid_type>,gdf_valid_type>
{
__host__ __device__
gdf_valid_type operator()(thrust::tuple<gdf_valid_type,gdf_valid_type> x) const
{
return thrust::get<0>(x) | thrust::get<1>(x);
}
};
typedef thrust::transform_iterator<modulus_bit_width, thrust::counting_iterator<gdf_size_type> > bit_position_iterator;
template<typename stencil_type>
struct is_stencil_true
{
__host__ __device__
bool operator()(const thrust::tuple<stencil_type, gdf_valid_iterator::value_type, bit_position_iterator::value_type> value)
{
gdf_size_type position = thrust::get<2>(value);
return ((thrust::get<1>(value) >> position) & 1) && (thrust::get<0>(value) != 0);
}
};
struct is_bit_set
{
__host__ __device__
bool operator()(const thrust::tuple< gdf_valid_iterator::value_type, bit_position_iterator::value_type> value)
{
gdf_size_type position = thrust::get<1>(value);
return ((thrust::get<0>(value) >> position) & 1);
}
};
struct bit_mask_pack_op : public thrust::unary_function<int64_t,gdf_valid_type>
{
__host__ __device__
gdf_valid_type operator()(const int64_t expanded)
{
gdf_valid_type result = 0;
for(unsigned int i = 0; i < GDF_VALID_BITSIZE; i++){
// 0, 8, 16, ....,48, 56
unsigned char byte = (expanded >> ( (GDF_VALID_BITSIZE - 1 - i ) * 8));
result |= (byte & 1) << i;
}
return (result);
}
};
std::map<gdf_dtype, int16_t> column_type_width = {{GDF_INT8, sizeof(int8_t)}, {GDF_INT16, sizeof(int16_t)},{GDF_INT32, sizeof(int32_t)}, {GDF_INT64, sizeof(int64_t)},
{GDF_FLOAT32, sizeof(float)}, {GDF_FLOAT64, sizeof(double)} };
//because applying a stencil only needs to know the WIDTH of a type for copying to output, we won't be making a bunch of templated version to store this but rather
//storing a map from gdf_type to width
//TODO: add a way for the space where we store temp bitmaps for compaction be allocated
//on the outside
gdf_error gpu_apply_stencil(gdf_column *lhs, gdf_column * stencil, gdf_column * output){
//OK: add a rquire here that output and lhs are the same size
GDF_REQUIRE(output->size == lhs->size, GDF_COLUMN_SIZE_MISMATCH);
GDF_REQUIRE(lhs->dtype == output->dtype, GDF_DTYPE_MISMATCH);
GDF_REQUIRE(!lhs->valid, GDF_VALIDITY_UNSUPPORTED);
//find the width in bytes of this data type
auto searched_item = column_type_width.find(lhs->dtype);
int16_t width = searched_item->second; //width in bytes
searched_item = column_type_width.find(stencil->dtype);
int16_t stencil_width= searched_item->second; //width in bytes
hipStream_t stream;
hipStreamCreate(&stream);
rmm_temp_allocator allocator(stream);
auto exec = thrust::hip::par(allocator).on(stream);
size_t n_bytes = get_number_of_bytes_for_valid(stencil->size);
bit_position_iterator bit_position_iter(thrust::make_counting_iterator<gdf_size_type>(0), modulus_bit_width(n_bytes, stencil->size));
gdf_valid_iterator valid_iterator(thrust::detail::make_normal_iterator(thrust::device_pointer_cast(stencil->valid)),GDF_VALID_BITSIZE);
//TODO: can probably make this happen with some kind of iterator so it can work on any width size
//zip the stencil and the valid iterator together
typedef thrust::tuple<thrust::detail::normal_iterator<thrust::device_ptr<int8_t> >,gdf_valid_iterator, bit_position_iterator > zipped_stencil_tuple;
typedef thrust::zip_iterator<zipped_stencil_tuple> zipped_stencil_iterator;
//what kind of shit is that you might wonder?
//well basically we are zipping up an iterator to the stencil, one to the bit masks, and one which lets us get the bit position based on our index
zipped_stencil_iterator zipped_stencil_iter(
thrust::make_tuple(
thrust::detail::make_normal_iterator(thrust::device_pointer_cast((int8_t * )stencil->data)),
valid_iterator,
thrust::make_transform_iterator<modulus_bit_width, thrust::counting_iterator<gdf_size_type> >(
thrust::make_counting_iterator<gdf_size_type>(0),
modulus_bit_width(n_bytes, stencil->size))
));
//NOTE!!!! the output column is getting set to a specific size but we are NOT compacting the allocation,
//whoever calls that should handle that
if(width == 1){
thrust::detail::normal_iterator<thrust::device_ptr<int8_t> > input_start =
thrust::detail::make_normal_iterator(thrust::device_pointer_cast((int8_t *) lhs->data));
thrust::detail::normal_iterator<thrust::device_ptr<int8_t> > output_start =
thrust::detail::make_normal_iterator(thrust::device_pointer_cast((int8_t *) output->data));
thrust::detail::normal_iterator<thrust::device_ptr<int8_t> > output_end =
thrust::copy_if(exec,input_start,input_start + lhs->size,zipped_stencil_iter,output_start,is_stencil_true<thrust::detail::normal_iterator<thrust::device_ptr<int8_t> >::value_type >());
output->size = output_end - output_start;
}else if(width == 2){
thrust::detail::normal_iterator<thrust::device_ptr<int16_t> > input_start =
thrust::detail::make_normal_iterator(thrust::device_pointer_cast((int16_t *) lhs->data));
thrust::detail::normal_iterator<thrust::device_ptr<int16_t> > output_start =
thrust::detail::make_normal_iterator(thrust::device_pointer_cast((int16_t *) output->data));
thrust::detail::normal_iterator<thrust::device_ptr<int16_t> > output_end =
thrust::copy_if(exec,input_start,input_start + lhs->size,zipped_stencil_iter,output_start,is_stencil_true<thrust::detail::normal_iterator<thrust::device_ptr<int8_t> >::value_type >());
output->size = output_end - output_start;
}else if(width == 4){
thrust::detail::normal_iterator<thrust::device_ptr<int32_t> > input_start =
thrust::detail::make_normal_iterator(thrust::device_pointer_cast((int32_t *) lhs->data));
thrust::detail::normal_iterator<thrust::device_ptr<int32_t> > output_start =
thrust::detail::make_normal_iterator(thrust::device_pointer_cast((int32_t *) output->data));
thrust::detail::normal_iterator<thrust::device_ptr<int32_t> > output_end =
thrust::copy_if(exec,input_start,input_start + lhs->size,zipped_stencil_iter,output_start,is_stencil_true<thrust::detail::normal_iterator<thrust::device_ptr<int8_t> >::value_type >());
output->size = output_end - output_start;
}else if(width == 8){
thrust::detail::normal_iterator<thrust::device_ptr<int64_t> > input_start =
thrust::detail::make_normal_iterator(thrust::device_pointer_cast((int64_t *) lhs->data));
thrust::detail::normal_iterator<thrust::device_ptr<int64_t> > output_start =
thrust::detail::make_normal_iterator(thrust::device_pointer_cast((int64_t *) output->data));
thrust::detail::normal_iterator<thrust::device_ptr<int64_t> > output_end =
thrust::copy_if(exec,input_start,input_start + lhs->size,zipped_stencil_iter,output_start,is_stencil_true<thrust::detail::normal_iterator<thrust::device_ptr<int8_t> >::value_type >());
output->size = output_end - output_start;
}
gdf_size_type num_values = lhs->size;
//TODO:BRING OVER THE BITMASK!!!
//need to store a prefix sum
//align to size 8
Vector<gdf_valid_type> valid_bit_mask; //we are expanding the bit mask to an int8 because I can't envision an algorithm that operates on the bitmask that
if(num_values % GDF_VALID_BITSIZE != 0){
valid_bit_mask.resize(num_values + (GDF_VALID_BITSIZE - (num_values % GDF_VALID_BITSIZE))); //align this allocation on GDF_VALID_BITSIZE so we don't have to bounds check
}else{
valid_bit_mask.resize(num_values);
}
// doesn't require the use for a prefix sum which will have size 8 * num rows which is much larger than this
typedef thrust::tuple<gdf_valid_iterator, bit_position_iterator > mask_tuple;
typedef thrust::zip_iterator<mask_tuple> zipped_mask;
zipped_mask zipped_mask_iter(
thrust::make_tuple(
valid_iterator,
thrust::make_transform_iterator<modulus_bit_width, thrust::counting_iterator<gdf_size_type> >(
thrust::make_counting_iterator<gdf_size_type>(0),
modulus_bit_width(n_bytes, stencil->size))
)
);
typedef thrust::transform_iterator<is_bit_set, zipped_mask > bit_set_iterator;
bit_set_iterator bit_set_iter = thrust::make_transform_iterator<is_bit_set,zipped_mask>(
zipped_mask_iter,
is_bit_set()
);
//copy the bitmask to device_vector of int8
thrust::copy(exec, bit_set_iter, bit_set_iter + num_values, valid_bit_mask.begin());
//remove the values that don't pass the stencil
thrust::remove_if(exec,valid_bit_mask.begin(), valid_bit_mask.begin() + num_values,zipped_stencil_iter, is_stencil_true<thrust::detail::normal_iterator<thrust::device_ptr<int8_t> >::value_type >());
//recompact the values and store them in the output bitmask
//we can group them into pieces of 8 because we aligned this earlier on when we made the device_vector
thrust::detail::normal_iterator<thrust::device_ptr<int64_t> > valid_bit_mask_group_8_iter =
thrust::detail::make_normal_iterator(thrust::device_pointer_cast((int64_t *) valid_bit_mask.data().get()));
//you may notice that we can write out more bytes than our valid_num_bytes, this only happens when we are not aligned to GDF_VALID_BITSIZE bytes, becasue the
//arrow standard requires 64 byte alignment, this is a safe assumption to make
thrust::transform(exec, valid_bit_mask_group_8_iter, valid_bit_mask_group_8_iter + ((num_values + GDF_VALID_BITSIZE - 1) / GDF_VALID_BITSIZE),
thrust::detail::make_normal_iterator(thrust::device_pointer_cast(output->valid)),bit_mask_pack_op());
hipStreamSynchronize(stream);
hipStreamDestroy(stream);
return GDF_SUCCESS;
}
size_t get_last_byte_length(size_t column_size) {
size_t n_bytes = get_number_of_bytes_for_valid(column_size);
size_t length = column_size - GDF_VALID_BITSIZE * (n_bytes - 1);
if (n_bytes == 1 ) {
length = column_size;
}
return length;
}
size_t get_right_byte_length(size_t column_size, size_t iter, size_t left_length) {
size_t n_bytes = get_number_of_bytes_for_valid(column_size);
size_t length = column_size - GDF_VALID_BITSIZE * (n_bytes - 1);
if (iter == n_bytes - 1) { // the last one
if (left_length + length > GDF_VALID_BITSIZE) {
length = GDF_VALID_BITSIZE - left_length;
}
}
else {
length = GDF_VALID_BITSIZE - left_length;
}
return length;
}
bool last_with_too_many_bits(size_t column_size, size_t iter, size_t left_length) {
size_t n_bytes = get_number_of_bytes_for_valid(column_size);
size_t length = column_size - GDF_VALID_BITSIZE * (n_bytes - 1);
if (iter == n_bytes) { // the last one
// the last one has to many bits
if (left_length + length > GDF_VALID_BITSIZE) {
return true;
}
}
return false;
}
gdf_valid_type concat_bins (gdf_valid_type A, gdf_valid_type B, int len_a, int len_b, bool has_next, size_t right_length){
A = A << len_b;
if (!has_next) {
B = B << len_a;
B = B >> len_a;
} else {
B = B >> right_length - len_b;
}
return (A | B);
}
gdf_error gpu_concat(gdf_column *lhs, gdf_column *rhs, gdf_column *output)
{
GDF_REQUIRE( (lhs->dtype == output->dtype ) && ( rhs->dtype == output->dtype), GDF_VALIDITY_MISSING);
GDF_REQUIRE(output->size == lhs->size + rhs->size, GDF_COLUMN_SIZE_MISMATCH);
hipStream_t stream;
hipStreamCreate(&stream);
int type_width = column_type_width[ lhs->dtype ];
hipMemcpyAsync(output->data, lhs->data, type_width * lhs->size, hipMemcpyDeviceToDevice, stream);
hipMemcpyAsync( (void *)( (int8_t*) (output->data) + type_width * lhs->size), rhs->data, type_width * rhs->size, hipMemcpyDeviceToDevice, stream);
int left_num_chars = get_number_of_bytes_for_valid(lhs->size);
int right_num_chars = get_number_of_bytes_for_valid(rhs->size);
int output_num_chars = get_number_of_bytes_for_valid(output->size);
thrust::device_ptr<gdf_valid_type> left_device_bits = thrust::device_pointer_cast((gdf_valid_type *)lhs->valid);
thrust::device_ptr<gdf_valid_type> right_device_bits = thrust::device_pointer_cast((gdf_valid_type *)rhs->valid);
thrust::device_ptr<gdf_valid_type> output_device_bits = thrust::device_pointer_cast((gdf_valid_type *)output->valid);
thrust::copy(left_device_bits, left_device_bits + left_num_chars, output_device_bits);
gdf_valid_type shift_bits = (GDF_VALID_BITSIZE - (lhs->size % GDF_VALID_BITSIZE));
if(shift_bits == 8){
shift_bits = 0;
}
if (right_num_chars > 0) {
size_t prev_len = get_last_byte_length(lhs->size);
// copy all the rnbytes bytes from right column
if (shift_bits == 0) {
thrust::copy(right_device_bits, right_device_bits + right_num_chars, output_device_bits + left_num_chars);
}
else {
thrust::host_vector<gdf_valid_type> last_byte (2);
thrust::copy (left_device_bits + left_num_chars - 1, left_device_bits + left_num_chars, last_byte.begin());
thrust::copy (right_device_bits, right_device_bits + 1, last_byte.begin() + 1);
size_t curr_len = get_right_byte_length(rhs->size, 0, prev_len);
if (1 != right_num_chars) {
last_byte[1] = last_byte[1] >> prev_len;
}
auto flag = last_with_too_many_bits(rhs->size, 0 + 1, prev_len);
size_t last_right_byte_length = rhs->size - GDF_VALID_BITSIZE * (right_num_chars - 1);
last_byte[0] = concat_bins(last_byte[0], last_byte[1], prev_len, curr_len, flag, last_right_byte_length);
thrust::copy( last_byte.begin(), last_byte.begin() + 1, output_device_bits + left_num_chars - 1);
if(right_num_chars > 1) {
using first_iterator_type = thrust::transform_iterator<shift_left,Vector<gdf_valid_type>::iterator>;
using second_iterator_type = thrust::transform_iterator<shift_right,Vector<gdf_valid_type>::iterator>;
using offset_tuple = thrust::tuple<first_iterator_type, second_iterator_type>;
using zipped_offset = thrust::zip_iterator<offset_tuple>;
auto too_many_bits = last_with_too_many_bits(rhs->size, right_num_chars, prev_len);
size_t last_byte_length = get_last_byte_length(rhs->size);
if (last_byte_length >= (GDF_VALID_BITSIZE - shift_bits)) { //
thrust::host_vector<gdf_valid_type> last_byte (right_device_bits + right_num_chars - 1, right_device_bits + right_num_chars);
last_byte[0] = last_byte[0] << GDF_VALID_BITSIZE - last_byte_length;
thrust::copy( last_byte.begin(), last_byte.begin() + 1, right_device_bits + right_num_chars - 1);
}
zipped_offset zipped_offset_iter(
thrust::make_tuple(
thrust::make_transform_iterator<shift_left, Vector<gdf_valid_type>::iterator >(
right_device_bits,
shift_left(shift_bits)),
thrust::make_transform_iterator<shift_right, Vector<gdf_valid_type>::iterator >(
right_device_bits + 1,
shift_right(GDF_VALID_BITSIZE - shift_bits, !too_many_bits))
)
);
//so what this does is give you an iterator which gives you a tuple where you have your char, and the char after you, so you can get the last bits!
using transformed_or = thrust::transform_iterator<bit_or, zipped_offset>;
//now we want to make a transform iterator that ands these values together
transformed_or ored_offset_iter =
thrust::make_transform_iterator<bit_or,zipped_offset> (
zipped_offset_iter,
bit_or()
);
//because one of the iterators is + 1 we dont want to read the last char here since it could be past the end of our allocation
thrust::copy( ored_offset_iter, ored_offset_iter + right_num_chars - 1, output_device_bits + left_num_chars);
thrust::host_vector<gdf_valid_type> last_byte (right_device_bits + right_num_chars - 1, right_device_bits + right_num_chars);
last_byte[0] = last_byte[0] >> GDF_VALID_BITSIZE - last_byte_length;
thrust::copy( last_byte.begin(), last_byte.begin() + 1, right_device_bits + right_num_chars - 1);
if ( !too_many_bits ) {
thrust::host_vector<gdf_valid_type> last_byte (2);
thrust::copy (right_device_bits + right_num_chars - 2, right_device_bits + right_num_chars - 1, last_byte.begin());
thrust::copy (right_device_bits + right_num_chars - 1, right_device_bits + right_num_chars, last_byte.begin() + 1);
last_byte[0] = last_byte[0] << last_byte_length | last_byte[1];
thrust::copy( last_byte.begin(), last_byte.begin() + 1, output_device_bits + output_num_chars - 1);
}
}
}
if( last_with_too_many_bits(rhs->size, right_num_chars, prev_len)){
thrust::host_vector<gdf_valid_type> last_byte (right_device_bits + right_num_chars - 1, right_device_bits + right_num_chars);
size_t prev_len = get_last_byte_length(lhs->size);
size_t curr_len = get_right_byte_length(rhs->size, right_num_chars - 1, prev_len);
last_byte[0] = last_byte[0] << curr_len;
last_byte[0] = last_byte[0] >> curr_len;
thrust::copy( last_byte.begin(), last_byte.begin() + 1, output_device_bits + output_num_chars - 1);
}
}
hipStreamSynchronize(stream);
hipStreamDestroy(stream);
return GDF_SUCCESS;
}
| 45de96d1694f48702b6200c715f14b15b54bfe33.cu | /*
* Copyright 2018 BlazingDB, Inc.
* Copyright 2018 Felipe Aramburu <[email protected]>
* Copyright 2018 Alexander Ocsa <[email protected]>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gdf/gdf.h>
#include <gdf/utils.h>
#include <gdf/errorutils.h>
#include <cuda_runtime.h>
#include <vector>
#include <thrust/functional.h>
#include <thrust/device_ptr.h>
#include <thrust/device_vector.h>
#include <thrust/copy.h>
#include <thrust/remove.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/execution_policy.h>
#include <thrust/iterator/iterator_adaptor.h>
#include <thrust/iterator/transform_iterator.h>
#include "thrust_rmm_allocator.h"
//std lib
#include <map>
// thrust::device_vector set to use rmmAlloc and rmmFree.
template <typename T>
using Vector = thrust::device_vector<T, rmm_allocator<T>>;
//wow the freaking example from iterator_adaptpr, what a break right!
template<typename Iterator>
class repeat_iterator
: public thrust::iterator_adaptor<
repeat_iterator<Iterator>, // the first template parameter is the name of the iterator we're creating
Iterator // the second template parameter is the name of the iterator we're adapting
// we can use the default for the additional template parameters
>
{
public:
// shorthand for the name of the iterator_adaptor we're deriving from
typedef thrust::iterator_adaptor<
repeat_iterator<Iterator>,
Iterator
> super_t;
__host__ __device__
repeat_iterator(const Iterator &x, int n) : super_t(x), begin(x), n(n) {}
// befriend thrust::iterator_core_access to allow it access to the private interface below
friend class thrust::iterator_core_access;
private:
// repeat each element of the adapted range n times
unsigned int n;
// used to keep track of where we began
const Iterator begin;
// it is private because only thrust::iterator_core_access needs access to it
__host__ __device__
typename super_t::reference dereference() const
{
return *(begin + (this->base() - begin) / n);
}
};
typedef repeat_iterator<thrust::detail::normal_iterator<thrust::device_ptr<gdf_valid_type> > > gdf_valid_iterator;
gdf_size_type get_number_of_bytes_for_valid (gdf_size_type column_size) {
return sizeof(gdf_valid_type) * (column_size + GDF_VALID_BITSIZE - 1) / GDF_VALID_BITSIZE;
}
// note: functor inherits from unary_function
struct modulus_bit_width : public thrust::unary_function<gdf_size_type,gdf_size_type>
{
gdf_size_type n_bytes;
gdf_size_type column_size;
modulus_bit_width (gdf_size_type b_nytes, gdf_size_type column_size) {
this->n_bytes = n_bytes;
this->column_size = column_size;
}
__host__ __device__
gdf_size_type operator()(gdf_size_type x) const
{
gdf_size_type col_position = x / 8;
gdf_size_type length_col = n_bytes != col_position+1 ? GDF_VALID_BITSIZE : column_size - GDF_VALID_BITSIZE * (n_bytes - 1);
//return x % GDF_VALID_BITSIZE;
return (length_col - 1) - (x % 8);
// x <<
}
};
struct shift_left: public thrust::unary_function<gdf_valid_type,gdf_valid_type>
{
gdf_valid_type num_bits;
shift_left(gdf_valid_type num_bits): num_bits(num_bits){
}
__host__ __device__
gdf_valid_type operator()(gdf_valid_type x) const
{
return x << num_bits;
}
};
struct shift_right: public thrust::unary_function<gdf_valid_type,gdf_valid_type>
{
gdf_valid_type num_bits;
bool not_too_many;
shift_right(gdf_valid_type num_bits, bool not_too_many)
: num_bits(num_bits), not_too_many(not_too_many){
}
__host__ __device__
gdf_valid_type operator()(gdf_valid_type x) const
{
//if you want to force the shift to be fill bits with 0 you need to use an unsigned type
/*if (not_too_many) { // is the last
return x;
}*/
return *((unsigned char *) &x) >> num_bits;
}
};
struct bit_or: public thrust::unary_function<thrust::tuple<gdf_valid_type,gdf_valid_type>,gdf_valid_type>
{
__host__ __device__
gdf_valid_type operator()(thrust::tuple<gdf_valid_type,gdf_valid_type> x) const
{
return thrust::get<0>(x) | thrust::get<1>(x);
}
};
typedef thrust::transform_iterator<modulus_bit_width, thrust::counting_iterator<gdf_size_type> > bit_position_iterator;
template<typename stencil_type>
struct is_stencil_true
{
__host__ __device__
bool operator()(const thrust::tuple<stencil_type, gdf_valid_iterator::value_type, bit_position_iterator::value_type> value)
{
gdf_size_type position = thrust::get<2>(value);
return ((thrust::get<1>(value) >> position) & 1) && (thrust::get<0>(value) != 0);
}
};
struct is_bit_set
{
__host__ __device__
bool operator()(const thrust::tuple< gdf_valid_iterator::value_type, bit_position_iterator::value_type> value)
{
gdf_size_type position = thrust::get<1>(value);
return ((thrust::get<0>(value) >> position) & 1);
}
};
struct bit_mask_pack_op : public thrust::unary_function<int64_t,gdf_valid_type>
{
__host__ __device__
gdf_valid_type operator()(const int64_t expanded)
{
gdf_valid_type result = 0;
for(unsigned int i = 0; i < GDF_VALID_BITSIZE; i++){
// 0, 8, 16, ....,48, 56
unsigned char byte = (expanded >> ( (GDF_VALID_BITSIZE - 1 - i ) * 8));
result |= (byte & 1) << i;
}
return (result);
}
};
std::map<gdf_dtype, int16_t> column_type_width = {{GDF_INT8, sizeof(int8_t)}, {GDF_INT16, sizeof(int16_t)},{GDF_INT32, sizeof(int32_t)}, {GDF_INT64, sizeof(int64_t)},
{GDF_FLOAT32, sizeof(float)}, {GDF_FLOAT64, sizeof(double)} };
//because applying a stencil only needs to know the WIDTH of a type for copying to output, we won't be making a bunch of templated version to store this but rather
//storing a map from gdf_type to width
//TODO: add a way for the space where we store temp bitmaps for compaction be allocated
//on the outside
gdf_error gpu_apply_stencil(gdf_column *lhs, gdf_column * stencil, gdf_column * output){
//OK: add a rquire here that output and lhs are the same size
GDF_REQUIRE(output->size == lhs->size, GDF_COLUMN_SIZE_MISMATCH);
GDF_REQUIRE(lhs->dtype == output->dtype, GDF_DTYPE_MISMATCH);
GDF_REQUIRE(!lhs->valid, GDF_VALIDITY_UNSUPPORTED);
//find the width in bytes of this data type
auto searched_item = column_type_width.find(lhs->dtype);
int16_t width = searched_item->second; //width in bytes
searched_item = column_type_width.find(stencil->dtype);
int16_t stencil_width= searched_item->second; //width in bytes
cudaStream_t stream;
cudaStreamCreate(&stream);
rmm_temp_allocator allocator(stream);
auto exec = thrust::cuda::par(allocator).on(stream);
size_t n_bytes = get_number_of_bytes_for_valid(stencil->size);
bit_position_iterator bit_position_iter(thrust::make_counting_iterator<gdf_size_type>(0), modulus_bit_width(n_bytes, stencil->size));
gdf_valid_iterator valid_iterator(thrust::detail::make_normal_iterator(thrust::device_pointer_cast(stencil->valid)),GDF_VALID_BITSIZE);
//TODO: can probably make this happen with some kind of iterator so it can work on any width size
//zip the stencil and the valid iterator together
typedef thrust::tuple<thrust::detail::normal_iterator<thrust::device_ptr<int8_t> >,gdf_valid_iterator, bit_position_iterator > zipped_stencil_tuple;
typedef thrust::zip_iterator<zipped_stencil_tuple> zipped_stencil_iterator;
//what kind of shit is that you might wonder?
//well basically we are zipping up an iterator to the stencil, one to the bit masks, and one which lets us get the bit position based on our index
zipped_stencil_iterator zipped_stencil_iter(
thrust::make_tuple(
thrust::detail::make_normal_iterator(thrust::device_pointer_cast((int8_t * )stencil->data)),
valid_iterator,
thrust::make_transform_iterator<modulus_bit_width, thrust::counting_iterator<gdf_size_type> >(
thrust::make_counting_iterator<gdf_size_type>(0),
modulus_bit_width(n_bytes, stencil->size))
));
//NOTE!!!! the output column is getting set to a specific size but we are NOT compacting the allocation,
//whoever calls that should handle that
if(width == 1){
thrust::detail::normal_iterator<thrust::device_ptr<int8_t> > input_start =
thrust::detail::make_normal_iterator(thrust::device_pointer_cast((int8_t *) lhs->data));
thrust::detail::normal_iterator<thrust::device_ptr<int8_t> > output_start =
thrust::detail::make_normal_iterator(thrust::device_pointer_cast((int8_t *) output->data));
thrust::detail::normal_iterator<thrust::device_ptr<int8_t> > output_end =
thrust::copy_if(exec,input_start,input_start + lhs->size,zipped_stencil_iter,output_start,is_stencil_true<thrust::detail::normal_iterator<thrust::device_ptr<int8_t> >::value_type >());
output->size = output_end - output_start;
}else if(width == 2){
thrust::detail::normal_iterator<thrust::device_ptr<int16_t> > input_start =
thrust::detail::make_normal_iterator(thrust::device_pointer_cast((int16_t *) lhs->data));
thrust::detail::normal_iterator<thrust::device_ptr<int16_t> > output_start =
thrust::detail::make_normal_iterator(thrust::device_pointer_cast((int16_t *) output->data));
thrust::detail::normal_iterator<thrust::device_ptr<int16_t> > output_end =
thrust::copy_if(exec,input_start,input_start + lhs->size,zipped_stencil_iter,output_start,is_stencil_true<thrust::detail::normal_iterator<thrust::device_ptr<int8_t> >::value_type >());
output->size = output_end - output_start;
}else if(width == 4){
thrust::detail::normal_iterator<thrust::device_ptr<int32_t> > input_start =
thrust::detail::make_normal_iterator(thrust::device_pointer_cast((int32_t *) lhs->data));
thrust::detail::normal_iterator<thrust::device_ptr<int32_t> > output_start =
thrust::detail::make_normal_iterator(thrust::device_pointer_cast((int32_t *) output->data));
thrust::detail::normal_iterator<thrust::device_ptr<int32_t> > output_end =
thrust::copy_if(exec,input_start,input_start + lhs->size,zipped_stencil_iter,output_start,is_stencil_true<thrust::detail::normal_iterator<thrust::device_ptr<int8_t> >::value_type >());
output->size = output_end - output_start;
}else if(width == 8){
thrust::detail::normal_iterator<thrust::device_ptr<int64_t> > input_start =
thrust::detail::make_normal_iterator(thrust::device_pointer_cast((int64_t *) lhs->data));
thrust::detail::normal_iterator<thrust::device_ptr<int64_t> > output_start =
thrust::detail::make_normal_iterator(thrust::device_pointer_cast((int64_t *) output->data));
thrust::detail::normal_iterator<thrust::device_ptr<int64_t> > output_end =
thrust::copy_if(exec,input_start,input_start + lhs->size,zipped_stencil_iter,output_start,is_stencil_true<thrust::detail::normal_iterator<thrust::device_ptr<int8_t> >::value_type >());
output->size = output_end - output_start;
}
gdf_size_type num_values = lhs->size;
//TODO:BRING OVER THE BITMASK!!!
//need to store a prefix sum
//align to size 8
Vector<gdf_valid_type> valid_bit_mask; //we are expanding the bit mask to an int8 because I can't envision an algorithm that operates on the bitmask that
if(num_values % GDF_VALID_BITSIZE != 0){
valid_bit_mask.resize(num_values + (GDF_VALID_BITSIZE - (num_values % GDF_VALID_BITSIZE))); //align this allocation on GDF_VALID_BITSIZE so we don't have to bounds check
}else{
valid_bit_mask.resize(num_values);
}
// doesn't require the use for a prefix sum which will have size 8 * num rows which is much larger than this
typedef thrust::tuple<gdf_valid_iterator, bit_position_iterator > mask_tuple;
typedef thrust::zip_iterator<mask_tuple> zipped_mask;
zipped_mask zipped_mask_iter(
thrust::make_tuple(
valid_iterator,
thrust::make_transform_iterator<modulus_bit_width, thrust::counting_iterator<gdf_size_type> >(
thrust::make_counting_iterator<gdf_size_type>(0),
modulus_bit_width(n_bytes, stencil->size))
)
);
typedef thrust::transform_iterator<is_bit_set, zipped_mask > bit_set_iterator;
bit_set_iterator bit_set_iter = thrust::make_transform_iterator<is_bit_set,zipped_mask>(
zipped_mask_iter,
is_bit_set()
);
//copy the bitmask to device_vector of int8
thrust::copy(exec, bit_set_iter, bit_set_iter + num_values, valid_bit_mask.begin());
//remove the values that don't pass the stencil
thrust::remove_if(exec,valid_bit_mask.begin(), valid_bit_mask.begin() + num_values,zipped_stencil_iter, is_stencil_true<thrust::detail::normal_iterator<thrust::device_ptr<int8_t> >::value_type >());
//recompact the values and store them in the output bitmask
//we can group them into pieces of 8 because we aligned this earlier on when we made the device_vector
thrust::detail::normal_iterator<thrust::device_ptr<int64_t> > valid_bit_mask_group_8_iter =
thrust::detail::make_normal_iterator(thrust::device_pointer_cast((int64_t *) valid_bit_mask.data().get()));
//you may notice that we can write out more bytes than our valid_num_bytes, this only happens when we are not aligned to GDF_VALID_BITSIZE bytes, becasue the
//arrow standard requires 64 byte alignment, this is a safe assumption to make
thrust::transform(exec, valid_bit_mask_group_8_iter, valid_bit_mask_group_8_iter + ((num_values + GDF_VALID_BITSIZE - 1) / GDF_VALID_BITSIZE),
thrust::detail::make_normal_iterator(thrust::device_pointer_cast(output->valid)),bit_mask_pack_op());
cudaStreamSynchronize(stream);
cudaStreamDestroy(stream);
return GDF_SUCCESS;
}
size_t get_last_byte_length(size_t column_size) {
size_t n_bytes = get_number_of_bytes_for_valid(column_size);
size_t length = column_size - GDF_VALID_BITSIZE * (n_bytes - 1);
if (n_bytes == 1 ) {
length = column_size;
}
return length;
}
size_t get_right_byte_length(size_t column_size, size_t iter, size_t left_length) {
size_t n_bytes = get_number_of_bytes_for_valid(column_size);
size_t length = column_size - GDF_VALID_BITSIZE * (n_bytes - 1);
if (iter == n_bytes - 1) { // the last one
if (left_length + length > GDF_VALID_BITSIZE) {
length = GDF_VALID_BITSIZE - left_length;
}
}
else {
length = GDF_VALID_BITSIZE - left_length;
}
return length;
}
bool last_with_too_many_bits(size_t column_size, size_t iter, size_t left_length) {
size_t n_bytes = get_number_of_bytes_for_valid(column_size);
size_t length = column_size - GDF_VALID_BITSIZE * (n_bytes - 1);
if (iter == n_bytes) { // the last one
// the last one has to many bits
if (left_length + length > GDF_VALID_BITSIZE) {
return true;
}
}
return false;
}
gdf_valid_type concat_bins (gdf_valid_type A, gdf_valid_type B, int len_a, int len_b, bool has_next, size_t right_length){
A = A << len_b;
if (!has_next) {
B = B << len_a;
B = B >> len_a;
} else {
B = B >> right_length - len_b;
}
return (A | B);
}
gdf_error gpu_concat(gdf_column *lhs, gdf_column *rhs, gdf_column *output)
{
GDF_REQUIRE( (lhs->dtype == output->dtype ) && ( rhs->dtype == output->dtype), GDF_VALIDITY_MISSING);
GDF_REQUIRE(output->size == lhs->size + rhs->size, GDF_COLUMN_SIZE_MISMATCH);
cudaStream_t stream;
cudaStreamCreate(&stream);
int type_width = column_type_width[ lhs->dtype ];
cudaMemcpyAsync(output->data, lhs->data, type_width * lhs->size, cudaMemcpyDeviceToDevice, stream);
cudaMemcpyAsync( (void *)( (int8_t*) (output->data) + type_width * lhs->size), rhs->data, type_width * rhs->size, cudaMemcpyDeviceToDevice, stream);
int left_num_chars = get_number_of_bytes_for_valid(lhs->size);
int right_num_chars = get_number_of_bytes_for_valid(rhs->size);
int output_num_chars = get_number_of_bytes_for_valid(output->size);
thrust::device_ptr<gdf_valid_type> left_device_bits = thrust::device_pointer_cast((gdf_valid_type *)lhs->valid);
thrust::device_ptr<gdf_valid_type> right_device_bits = thrust::device_pointer_cast((gdf_valid_type *)rhs->valid);
thrust::device_ptr<gdf_valid_type> output_device_bits = thrust::device_pointer_cast((gdf_valid_type *)output->valid);
thrust::copy(left_device_bits, left_device_bits + left_num_chars, output_device_bits);
gdf_valid_type shift_bits = (GDF_VALID_BITSIZE - (lhs->size % GDF_VALID_BITSIZE));
if(shift_bits == 8){
shift_bits = 0;
}
if (right_num_chars > 0) {
size_t prev_len = get_last_byte_length(lhs->size);
// copy all the rnbytes bytes from right column
if (shift_bits == 0) {
thrust::copy(right_device_bits, right_device_bits + right_num_chars, output_device_bits + left_num_chars);
}
else {
thrust::host_vector<gdf_valid_type> last_byte (2);
thrust::copy (left_device_bits + left_num_chars - 1, left_device_bits + left_num_chars, last_byte.begin());
thrust::copy (right_device_bits, right_device_bits + 1, last_byte.begin() + 1);
size_t curr_len = get_right_byte_length(rhs->size, 0, prev_len);
if (1 != right_num_chars) {
last_byte[1] = last_byte[1] >> prev_len;
}
auto flag = last_with_too_many_bits(rhs->size, 0 + 1, prev_len);
size_t last_right_byte_length = rhs->size - GDF_VALID_BITSIZE * (right_num_chars - 1);
last_byte[0] = concat_bins(last_byte[0], last_byte[1], prev_len, curr_len, flag, last_right_byte_length);
thrust::copy( last_byte.begin(), last_byte.begin() + 1, output_device_bits + left_num_chars - 1);
if(right_num_chars > 1) {
using first_iterator_type = thrust::transform_iterator<shift_left,Vector<gdf_valid_type>::iterator>;
using second_iterator_type = thrust::transform_iterator<shift_right,Vector<gdf_valid_type>::iterator>;
using offset_tuple = thrust::tuple<first_iterator_type, second_iterator_type>;
using zipped_offset = thrust::zip_iterator<offset_tuple>;
auto too_many_bits = last_with_too_many_bits(rhs->size, right_num_chars, prev_len);
size_t last_byte_length = get_last_byte_length(rhs->size);
if (last_byte_length >= (GDF_VALID_BITSIZE - shift_bits)) { //
thrust::host_vector<gdf_valid_type> last_byte (right_device_bits + right_num_chars - 1, right_device_bits + right_num_chars);
last_byte[0] = last_byte[0] << GDF_VALID_BITSIZE - last_byte_length;
thrust::copy( last_byte.begin(), last_byte.begin() + 1, right_device_bits + right_num_chars - 1);
}
zipped_offset zipped_offset_iter(
thrust::make_tuple(
thrust::make_transform_iterator<shift_left, Vector<gdf_valid_type>::iterator >(
right_device_bits,
shift_left(shift_bits)),
thrust::make_transform_iterator<shift_right, Vector<gdf_valid_type>::iterator >(
right_device_bits + 1,
shift_right(GDF_VALID_BITSIZE - shift_bits, !too_many_bits))
)
);
//so what this does is give you an iterator which gives you a tuple where you have your char, and the char after you, so you can get the last bits!
using transformed_or = thrust::transform_iterator<bit_or, zipped_offset>;
//now we want to make a transform iterator that ands these values together
transformed_or ored_offset_iter =
thrust::make_transform_iterator<bit_or,zipped_offset> (
zipped_offset_iter,
bit_or()
);
//because one of the iterators is + 1 we dont want to read the last char here since it could be past the end of our allocation
thrust::copy( ored_offset_iter, ored_offset_iter + right_num_chars - 1, output_device_bits + left_num_chars);
thrust::host_vector<gdf_valid_type> last_byte (right_device_bits + right_num_chars - 1, right_device_bits + right_num_chars);
last_byte[0] = last_byte[0] >> GDF_VALID_BITSIZE - last_byte_length;
thrust::copy( last_byte.begin(), last_byte.begin() + 1, right_device_bits + right_num_chars - 1);
if ( !too_many_bits ) {
thrust::host_vector<gdf_valid_type> last_byte (2);
thrust::copy (right_device_bits + right_num_chars - 2, right_device_bits + right_num_chars - 1, last_byte.begin());
thrust::copy (right_device_bits + right_num_chars - 1, right_device_bits + right_num_chars, last_byte.begin() + 1);
last_byte[0] = last_byte[0] << last_byte_length | last_byte[1];
thrust::copy( last_byte.begin(), last_byte.begin() + 1, output_device_bits + output_num_chars - 1);
}
}
}
if( last_with_too_many_bits(rhs->size, right_num_chars, prev_len)){
thrust::host_vector<gdf_valid_type> last_byte (right_device_bits + right_num_chars - 1, right_device_bits + right_num_chars);
size_t prev_len = get_last_byte_length(lhs->size);
size_t curr_len = get_right_byte_length(rhs->size, right_num_chars - 1, prev_len);
last_byte[0] = last_byte[0] << curr_len;
last_byte[0] = last_byte[0] >> curr_len;
thrust::copy( last_byte.begin(), last_byte.begin() + 1, output_device_bits + output_num_chars - 1);
}
}
cudaStreamSynchronize(stream);
cudaStreamDestroy(stream);
return GDF_SUCCESS;
}
|
b198dac4182eeb6560ecebab6393daaa11347a01.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* ------------
* This code is provided solely for the personal and private use of
* students taking the CSC367H1 course at the University of Toronto.
* Copying for purposes other than this use is expressly prohibited.
* All forms of distribution of this code, whether as given or with
* any changes, are expressly prohibited.
*
* Authors: Bogdan Simion, Felipe de Azevedo Piovezan
*
* All of the files in this directory and all subdirectories are:
* Copyright (c) 2017 Bogdan Simion
* -------------
*/
#include "kernels.h"
#include <stdio.h>
extern __constant__ int8_t cst_filter[81];
/* This is your own kernel, you should decide which parameters to add
here*/
/*
__global__ void kernel5(int32_t dimension,
const int32_t *input, int32_t *output, int32_t width, int32_t height, int32_t *g_max, int32_t *g_min)
{
__shared__ int32_t sdata_min[1024];
__shared__ int32_t sdata_max[1024];
__shared__ int32_t bordered_input[36][36];
int tid = threadIdx.x + threadIdx.y * 32;
//printf("ThreadIDX %d: %d x %d\n",tid, threadIdx.x, threadIdx.y);
int y = blockIdx.y * 32 + threadIdx.y;
int x = blockIdx.x * 32 + threadIdx.x;
int shared_x = threadIdx.x + 2;
int shared_y = threadIdx.y + 2;
int input_pos = y * width + x;
bordered_input[shared_y][shared_x] = 0;
if(x < width && y < height){
bordered_input[shared_y][shared_x] = input[input_pos];
}
if(threadIdx.x == 0 || threadIdx.x == 1){
bordered_input[shared_y][shared_x - 2] = 0;
if(blockIdx.x > 0 && y < height){ bordered_input[shared_y][shared_x - 2] = input[input_pos - 2];}
}
if(threadIdx.y == 0 || threadIdx.y == 1){
bordered_input[shared_y - 2][shared_x] = 0;
if(blockIdx.y > 0 && x < width){ bordered_input[shared_y - 2][shared_x] = input[input_pos - 2 * width];}
}
if(threadIdx.x == 0 && threadIdx.y == 0){
bordered_input[shared_y - 1][shared_x - 1] = 0;
bordered_input[shared_y - 2][shared_x - 2] = 0;
if(blockIdx.x > 0 && blockIdx.y > 0){
bordered_input[shared_y - 1][shared_x - 1] = input[input_pos - 1 - 1 * width];
bordered_input[shared_y - 1][shared_x - 2] = input[input_pos - 2 - 1 * width];
bordered_input[shared_y - 2][shared_x - 1] = input[input_pos - 1 - 2 * width];
bordered_input[shared_y - 2][shared_x - 2] = input[input_pos - 2 - 2 * width];
}
}
if(threadIdx.x == 31 && threadIdx.y == 0){
bordered_input[shared_y - 1][shared_x + 1] = 0;
bordered_input[shared_y - 2][shared_x + 2] = 0;
if(x + 2 < width && blockIdx.y > 0){
bordered_input[shared_y - 1][shared_x + 2] = input[input_pos + 2 - 1 * width];
bordered_input[shared_y - 2][shared_x + 2] = input[input_pos + 2 - 2 * width];
}
if(x + 1 < width && blockIdx.y > 0){
bordered_input[shared_y - 1][shared_x + 1] = input[input_pos + 1 - 1 * width];
bordered_input[shared_y - 2][shared_x + 1] = input[input_pos + 1 - 2 * width];
}
}
if(threadIdx.x == 0 && threadIdx.y == 31){
bordered_input[shared_y + 1][shared_x - 1] = 0;
bordered_input[shared_y + 2][shared_x - 2] = 0;
if(blockIdx.x > 0 && y + 2 < height){
bordered_input[shared_y + 2][shared_x - 1] = input[input_pos - 1 + 2 * width];
bordered_input[shared_y + 2][shared_x - 2] = input[input_pos - 2 + 2 * width];
}
if(blockIdx.x > 0 && y + 1 < height){
bordered_input[shared_y + 1][shared_x - 1] = input[input_pos - 1 + 1 * width];
bordered_input[shared_y + 1][shared_x - 2] = input[input_pos - 2 + 1 * width];
}
}
if(threadIdx.x == 31 && threadIdx.y == 31){
bordered_input[shared_y + 1][shared_x + 1] = 0;
bordered_input[shared_y + 2][shared_x + 2] = 0;
if(x + 2 < width && y + 2 < height){
bordered_input[shared_y + 1][shared_x + 2] = input[input_pos + 2 + 1 * width];
bordered_input[shared_y + 2][shared_x + 1] = input[input_pos + 1 + 2 * width];
bordered_input[shared_y + 2][shared_x + 2] = input[input_pos + 2 + 2 * width];
}
if(x + 1 < width && y + 1 < height ){
bordered_input[shared_y + 1][shared_x + 1] = input[input_pos + 1 + 1 * width];
}
}
if(threadIdx.x == 30 || threadIdx.x == 31){
bordered_input[shared_y][shared_x + 2] = 0;
if(x + 2 < width && y < height){
bordered_input[shared_y][shared_x + 2] = input[input_pos + 2];
}
}
if(threadIdx.y == 30 || threadIdx.y == 31){
bordered_input[shared_y + 2][shared_x] = 0;
if(y + 2 < height && x < width){bordered_input[shared_y + 2 ][shared_x] = input[input_pos + 2 * width];}
}
sdata_min[tid] = 999999;
sdata_max[tid] = -999999;
__syncthreads();
*/
/*
if(tid == 0 && blockIdx.y == 1 && blockIdx.x == 1){
printf("Block %d %d\n",blockIdx.x, blockIdx.y);
for(int p_y = 0; p_y < 36; p_y++){
for(int p_x = 0; p_x < 36; p_x++){
printf("%d ", bordered_input[p_y][p_x]);
}
printf("\n");
}
}
*/
// if (height % devProp.maxThreadsDim[0] > 0) {rows += 1;}
/*
if(x < width && y < height){
int32_t sum = 0;
// int initial_off = dimension / 2;
int img_x = shared_x - dimension / 2;
int img_y = shared_y - dimension / 2;
for(int f_y = 0; f_y < dimension; f_y++){
for(int f_x = 0; f_x < dimension; f_x ++){
int fil_pos = dimension * f_y + f_x;
sum += bordered_input[img_y][img_x] * cst_filter[fil_pos];
img_x++;
}
img_y++;
img_x = shared_x - dimension / 2;
}
output[input_pos] = sum;
sdata_min[tid] = sum;
sdata_max[tid] = sum;
}
__syncthreads();
for (unsigned int s = 512; s > 0; s >>= 1) {
if (tid < s) {
if(sdata_max[tid] < sdata_max[tid + s]){
sdata_max[tid] = sdata_max[tid + s];
}
if(sdata_min[tid + s] < sdata_min[tid]){
sdata_min[tid] = sdata_min[tid + s];
}
}
__syncthreads();
}
if (tid == 0) {
g_max[blockIdx.x + gridDim.x * blockIdx.y] = sdata_max[0];
g_min[blockIdx.x + gridDim.x * blockIdx.y] = sdata_min[0];
}
}
__global__ void normalize5(int32_t *image, int32_t width, int32_t height,
int32_t smallest, int32_t biggest)
{
int y = blockIdx.y * 32 + threadIdx.y;
int x = blockIdx.x * 32 + threadIdx.x;
int idx = y * width + x;
if(smallest != biggest && x < width && y < height){
image[idx] = ((image[idx] - smallest) * 255) / (biggest - smallest);
}
}
*/
__global__ void kernel5(int32_t dimension,
const int32_t *input, int32_t *output, int32_t width, int32_t height, int32_t *g_max, int32_t *g_min)
{
int start = threadIdx.x + blockIdx.x * blockDim.x;
__shared__ int32_t sdata_min[512];
__shared__ int32_t sdata_max[512];
unsigned int tid = threadIdx.x;
sdata_min[tid] = 999999;
sdata_max[tid] = -999999;
int32_t min_v = 999999;
int32_t max_v = -999999;
// if (height % devProp.maxThreadsDim[0] > 0) {rows += 1;}
for(int idx = start; idx < height * width; idx += gridDim.x * blockDim.x){
int32_t sum = 0;
// int initial_off = dimension / 2;
int img_x = idx % width - dimension / 2;
int img_y = idx / width - dimension / 2;
for(int y = 0; y < dimension; y++){
for(int x = 0; x < dimension; x ++){
if(img_x >= 0 && img_x < width && img_y >= 0 && img_y < height){
int fil_pos = dimension * y + x;
int img_pos = width * img_y + img_x;
sum += input[img_pos] * cst_filter[fil_pos];
}
img_x++;
}
img_y++;
img_x = idx % width - dimension / 2;
}
output[idx] = sum;
if(sum > max_v) {max_v = sum;}
if(sum < min_v) {min_v = sum;}
}
sdata_min[tid] = min_v;
sdata_max[tid] = max_v;
__syncthreads();
for (unsigned int s = blockDim.x/2; s > 32; s >>= 1) {
if (tid < s) {
if(sdata_max[tid] < sdata_max[tid + s]){
sdata_max[tid] = sdata_max[tid + s];
}
if(sdata_min[tid + s] < sdata_min[tid]){
sdata_min[tid] = sdata_min[tid + s];
}
}
__syncthreads();
}
unsigned int blockSize = blockDim.x;
if (tid < 32) {
volatile int32_t* smem_max = sdata_max;
volatile int32_t* smem_min = sdata_min;
if (blockSize >= 64) {
if(smem_max[tid] < smem_max[tid + 32]){
smem_max[tid] = smem_max[tid + 32];
}
if(smem_min[tid + 32] < smem_min[tid]){
smem_min[tid] = smem_min[tid + 32];
}
}
if (blockSize >= 32) {
if(smem_max[tid] < smem_max[tid + 16]){
smem_max[tid] = smem_max[tid + 16];
}
if(smem_min[tid + 16] < smem_min[tid]){
smem_min[tid] = smem_min[tid + 16];
}
}
if (blockSize >= 16) {
if(smem_max[tid] < smem_max[tid + 8]){
smem_max[tid] = smem_max[tid + 8];
}
if(smem_min[tid + 8] < smem_min[tid]){
smem_min[tid] = smem_min[tid + 8];
}
}
if (blockSize >= 8) {
if(smem_max[tid] < smem_max[tid + 4]){
smem_max[tid] = smem_max[tid + 4];
}
if(smem_min[tid + 4] < smem_min[tid]){
smem_min[tid] = smem_min[tid + 4];
}
}
if (blockSize >= 4) {
if(smem_max[tid] < smem_max[tid + 2]){
smem_max[tid] = smem_max[tid + 2];
}
if(smem_min[tid + 2] < smem_min[tid]){
smem_min[tid] = smem_min[tid + 2];
}
}
if (blockSize >= 2) {
if(smem_max[tid] < smem_max[tid + 1]){
smem_max[tid] = smem_max[tid + 1];
}
if(smem_min[tid + 1] < smem_min[tid]){
smem_min[tid] = smem_min[tid + 1];
}
}
}
if (tid == 0) {
g_max[blockIdx.x] = sdata_max[0];
g_min[blockIdx.x] = sdata_min[0];
}
}
__global__ void normalize5(int32_t *image, int32_t width, int32_t height,
int32_t smallest, int32_t biggest)
{
if(smallest != biggest){
int start = threadIdx.x + blockIdx.x * blockDim.x;
for(int idx = start; idx < height * width; idx += gridDim.x * blockDim.x){
if (idx < width * height){
image[idx] = ((image[idx] - smallest) * 255) / (biggest - smallest);
}
}
}
}
| b198dac4182eeb6560ecebab6393daaa11347a01.cu | /* ------------
* This code is provided solely for the personal and private use of
* students taking the CSC367H1 course at the University of Toronto.
* Copying for purposes other than this use is expressly prohibited.
* All forms of distribution of this code, whether as given or with
* any changes, are expressly prohibited.
*
* Authors: Bogdan Simion, Felipe de Azevedo Piovezan
*
* All of the files in this directory and all subdirectories are:
* Copyright (c) 2017 Bogdan Simion
* -------------
*/
#include "kernels.h"
#include <stdio.h>
extern __constant__ int8_t cst_filter[81];
/* This is your own kernel, you should decide which parameters to add
here*/
/*
__global__ void kernel5(int32_t dimension,
const int32_t *input, int32_t *output, int32_t width, int32_t height, int32_t *g_max, int32_t *g_min)
{
__shared__ int32_t sdata_min[1024];
__shared__ int32_t sdata_max[1024];
__shared__ int32_t bordered_input[36][36];
int tid = threadIdx.x + threadIdx.y * 32;
//printf("ThreadIDX %d: %d x %d\n",tid, threadIdx.x, threadIdx.y);
int y = blockIdx.y * 32 + threadIdx.y;
int x = blockIdx.x * 32 + threadIdx.x;
int shared_x = threadIdx.x + 2;
int shared_y = threadIdx.y + 2;
int input_pos = y * width + x;
bordered_input[shared_y][shared_x] = 0;
if(x < width && y < height){
bordered_input[shared_y][shared_x] = input[input_pos];
}
if(threadIdx.x == 0 || threadIdx.x == 1){
bordered_input[shared_y][shared_x - 2] = 0;
if(blockIdx.x > 0 && y < height){ bordered_input[shared_y][shared_x - 2] = input[input_pos - 2];}
}
if(threadIdx.y == 0 || threadIdx.y == 1){
bordered_input[shared_y - 2][shared_x] = 0;
if(blockIdx.y > 0 && x < width){ bordered_input[shared_y - 2][shared_x] = input[input_pos - 2 * width];}
}
if(threadIdx.x == 0 && threadIdx.y == 0){
bordered_input[shared_y - 1][shared_x - 1] = 0;
bordered_input[shared_y - 2][shared_x - 2] = 0;
if(blockIdx.x > 0 && blockIdx.y > 0){
bordered_input[shared_y - 1][shared_x - 1] = input[input_pos - 1 - 1 * width];
bordered_input[shared_y - 1][shared_x - 2] = input[input_pos - 2 - 1 * width];
bordered_input[shared_y - 2][shared_x - 1] = input[input_pos - 1 - 2 * width];
bordered_input[shared_y - 2][shared_x - 2] = input[input_pos - 2 - 2 * width];
}
}
if(threadIdx.x == 31 && threadIdx.y == 0){
bordered_input[shared_y - 1][shared_x + 1] = 0;
bordered_input[shared_y - 2][shared_x + 2] = 0;
if(x + 2 < width && blockIdx.y > 0){
bordered_input[shared_y - 1][shared_x + 2] = input[input_pos + 2 - 1 * width];
bordered_input[shared_y - 2][shared_x + 2] = input[input_pos + 2 - 2 * width];
}
if(x + 1 < width && blockIdx.y > 0){
bordered_input[shared_y - 1][shared_x + 1] = input[input_pos + 1 - 1 * width];
bordered_input[shared_y - 2][shared_x + 1] = input[input_pos + 1 - 2 * width];
}
}
if(threadIdx.x == 0 && threadIdx.y == 31){
bordered_input[shared_y + 1][shared_x - 1] = 0;
bordered_input[shared_y + 2][shared_x - 2] = 0;
if(blockIdx.x > 0 && y + 2 < height){
bordered_input[shared_y + 2][shared_x - 1] = input[input_pos - 1 + 2 * width];
bordered_input[shared_y + 2][shared_x - 2] = input[input_pos - 2 + 2 * width];
}
if(blockIdx.x > 0 && y + 1 < height){
bordered_input[shared_y + 1][shared_x - 1] = input[input_pos - 1 + 1 * width];
bordered_input[shared_y + 1][shared_x - 2] = input[input_pos - 2 + 1 * width];
}
}
if(threadIdx.x == 31 && threadIdx.y == 31){
bordered_input[shared_y + 1][shared_x + 1] = 0;
bordered_input[shared_y + 2][shared_x + 2] = 0;
if(x + 2 < width && y + 2 < height){
bordered_input[shared_y + 1][shared_x + 2] = input[input_pos + 2 + 1 * width];
bordered_input[shared_y + 2][shared_x + 1] = input[input_pos + 1 + 2 * width];
bordered_input[shared_y + 2][shared_x + 2] = input[input_pos + 2 + 2 * width];
}
if(x + 1 < width && y + 1 < height ){
bordered_input[shared_y + 1][shared_x + 1] = input[input_pos + 1 + 1 * width];
}
}
if(threadIdx.x == 30 || threadIdx.x == 31){
bordered_input[shared_y][shared_x + 2] = 0;
if(x + 2 < width && y < height){
bordered_input[shared_y][shared_x + 2] = input[input_pos + 2];
}
}
if(threadIdx.y == 30 || threadIdx.y == 31){
bordered_input[shared_y + 2][shared_x] = 0;
if(y + 2 < height && x < width){bordered_input[shared_y + 2 ][shared_x] = input[input_pos + 2 * width];}
}
sdata_min[tid] = 999999;
sdata_max[tid] = -999999;
__syncthreads();
*/
/*
if(tid == 0 && blockIdx.y == 1 && blockIdx.x == 1){
printf("Block %d %d\n",blockIdx.x, blockIdx.y);
for(int p_y = 0; p_y < 36; p_y++){
for(int p_x = 0; p_x < 36; p_x++){
printf("%d ", bordered_input[p_y][p_x]);
}
printf("\n");
}
}
*/
// if (height % devProp.maxThreadsDim[0] > 0) {rows += 1;}
/*
if(x < width && y < height){
int32_t sum = 0;
// int initial_off = dimension / 2;
int img_x = shared_x - dimension / 2;
int img_y = shared_y - dimension / 2;
for(int f_y = 0; f_y < dimension; f_y++){
for(int f_x = 0; f_x < dimension; f_x ++){
int fil_pos = dimension * f_y + f_x;
sum += bordered_input[img_y][img_x] * cst_filter[fil_pos];
img_x++;
}
img_y++;
img_x = shared_x - dimension / 2;
}
output[input_pos] = sum;
sdata_min[tid] = sum;
sdata_max[tid] = sum;
}
__syncthreads();
for (unsigned int s = 512; s > 0; s >>= 1) {
if (tid < s) {
if(sdata_max[tid] < sdata_max[tid + s]){
sdata_max[tid] = sdata_max[tid + s];
}
if(sdata_min[tid + s] < sdata_min[tid]){
sdata_min[tid] = sdata_min[tid + s];
}
}
__syncthreads();
}
if (tid == 0) {
g_max[blockIdx.x + gridDim.x * blockIdx.y] = sdata_max[0];
g_min[blockIdx.x + gridDim.x * blockIdx.y] = sdata_min[0];
}
}
__global__ void normalize5(int32_t *image, int32_t width, int32_t height,
int32_t smallest, int32_t biggest)
{
int y = blockIdx.y * 32 + threadIdx.y;
int x = blockIdx.x * 32 + threadIdx.x;
int idx = y * width + x;
if(smallest != biggest && x < width && y < height){
image[idx] = ((image[idx] - smallest) * 255) / (biggest - smallest);
}
}
*/
__global__ void kernel5(int32_t dimension,
const int32_t *input, int32_t *output, int32_t width, int32_t height, int32_t *g_max, int32_t *g_min)
{
int start = threadIdx.x + blockIdx.x * blockDim.x;
__shared__ int32_t sdata_min[512];
__shared__ int32_t sdata_max[512];
unsigned int tid = threadIdx.x;
sdata_min[tid] = 999999;
sdata_max[tid] = -999999;
int32_t min_v = 999999;
int32_t max_v = -999999;
// if (height % devProp.maxThreadsDim[0] > 0) {rows += 1;}
for(int idx = start; idx < height * width; idx += gridDim.x * blockDim.x){
int32_t sum = 0;
// int initial_off = dimension / 2;
int img_x = idx % width - dimension / 2;
int img_y = idx / width - dimension / 2;
for(int y = 0; y < dimension; y++){
for(int x = 0; x < dimension; x ++){
if(img_x >= 0 && img_x < width && img_y >= 0 && img_y < height){
int fil_pos = dimension * y + x;
int img_pos = width * img_y + img_x;
sum += input[img_pos] * cst_filter[fil_pos];
}
img_x++;
}
img_y++;
img_x = idx % width - dimension / 2;
}
output[idx] = sum;
if(sum > max_v) {max_v = sum;}
if(sum < min_v) {min_v = sum;}
}
sdata_min[tid] = min_v;
sdata_max[tid] = max_v;
__syncthreads();
for (unsigned int s = blockDim.x/2; s > 32; s >>= 1) {
if (tid < s) {
if(sdata_max[tid] < sdata_max[tid + s]){
sdata_max[tid] = sdata_max[tid + s];
}
if(sdata_min[tid + s] < sdata_min[tid]){
sdata_min[tid] = sdata_min[tid + s];
}
}
__syncthreads();
}
unsigned int blockSize = blockDim.x;
if (tid < 32) {
volatile int32_t* smem_max = sdata_max;
volatile int32_t* smem_min = sdata_min;
if (blockSize >= 64) {
if(smem_max[tid] < smem_max[tid + 32]){
smem_max[tid] = smem_max[tid + 32];
}
if(smem_min[tid + 32] < smem_min[tid]){
smem_min[tid] = smem_min[tid + 32];
}
}
if (blockSize >= 32) {
if(smem_max[tid] < smem_max[tid + 16]){
smem_max[tid] = smem_max[tid + 16];
}
if(smem_min[tid + 16] < smem_min[tid]){
smem_min[tid] = smem_min[tid + 16];
}
}
if (blockSize >= 16) {
if(smem_max[tid] < smem_max[tid + 8]){
smem_max[tid] = smem_max[tid + 8];
}
if(smem_min[tid + 8] < smem_min[tid]){
smem_min[tid] = smem_min[tid + 8];
}
}
if (blockSize >= 8) {
if(smem_max[tid] < smem_max[tid + 4]){
smem_max[tid] = smem_max[tid + 4];
}
if(smem_min[tid + 4] < smem_min[tid]){
smem_min[tid] = smem_min[tid + 4];
}
}
if (blockSize >= 4) {
if(smem_max[tid] < smem_max[tid + 2]){
smem_max[tid] = smem_max[tid + 2];
}
if(smem_min[tid + 2] < smem_min[tid]){
smem_min[tid] = smem_min[tid + 2];
}
}
if (blockSize >= 2) {
if(smem_max[tid] < smem_max[tid + 1]){
smem_max[tid] = smem_max[tid + 1];
}
if(smem_min[tid + 1] < smem_min[tid]){
smem_min[tid] = smem_min[tid + 1];
}
}
}
if (tid == 0) {
g_max[blockIdx.x] = sdata_max[0];
g_min[blockIdx.x] = sdata_min[0];
}
}
__global__ void normalize5(int32_t *image, int32_t width, int32_t height,
int32_t smallest, int32_t biggest)
{
if(smallest != biggest){
int start = threadIdx.x + blockIdx.x * blockDim.x;
for(int idx = start; idx < height * width; idx += gridDim.x * blockDim.x){
if (idx < width * height){
image[idx] = ((image[idx] - smallest) * 255) / (biggest - smallest);
}
}
}
}
|
250705502e60c3efc01e8f4605b16c8ba54ba19d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2012-2017 VideoStitch SAS
// Copyright (c) 2018 stitchEm
#include "backend/common/imageOps.hpp"
#include "backend/common/vectorOps.hpp"
#include "cuda/util.hpp"
#include "../deviceBuffer.hpp"
#include "../deviceStream.hpp"
#include "backend/common/imageOps.hpp"
#define CUDABLOCKSIZE 512
namespace VideoStitch {
namespace Image {
namespace {
template <typename PixelTypeIn, typename PixelTypeOut>
__global__ void subtractRGBKernel(uint32_t *dst, const uint32_t *toSubtract, std::size_t size) {
std::size_t i = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x;
if (i < size) {
uint32_t vSrc = toSubtract[i];
uint32_t vDst = dst[i];
int32_t toSubtractIsSolid = !!PixelTypeIn::a(vSrc);
int32_t r = PixelTypeIn::r(vDst) - toSubtractIsSolid * PixelTypeIn::r(vSrc);
int32_t g = PixelTypeIn::g(vDst) - toSubtractIsSolid * PixelTypeIn::g(vSrc);
int32_t b = PixelTypeIn::b(vDst) - toSubtractIsSolid * PixelTypeIn::b(vSrc);
dst[i] = PixelTypeOut::pack(r, g, b, PixelTypeIn::a(vDst));
}
}
template <typename T>
__global__ void subtractKernel(T *dst, const T *toSubtract, std::size_t size) {
std::size_t i = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x;
if (i < size) {
dst[i] -= toSubtract[i];
}
}
} // namespace
Status subtractRGBA(GPU::Buffer<uint32_t> dst, GPU::Buffer<const uint32_t> toSubtract, std::size_t size,
GPU::Stream stream) {
dim3 dimBlock(CUDABLOCKSIZE);
dim3 dimGrid(Cuda::compute2DGridForFlatBuffer(size, CUDABLOCKSIZE));
hipLaunchKernelGGL(( subtractRGBKernel<RGBA, RGBA>), dim3(dimGrid), dim3(dimBlock), 0, stream.get(), dst.get(), toSubtract.get(), size);
return CUDA_STATUS;
}
Status subtractRGB210(GPU::Buffer<uint32_t> dst, GPU::Buffer<const uint32_t> toSubtract, std::size_t size,
GPU::Stream stream) {
dim3 dimBlock(CUDABLOCKSIZE);
dim3 dimGrid(Cuda::compute2DGridForFlatBuffer(size, CUDABLOCKSIZE));
hipLaunchKernelGGL(( subtractRGBKernel<RGB210, RGB210>), dim3(dimGrid), dim3(dimBlock), 0, stream.get(), dst.get(), toSubtract.get(), size);
return CUDA_STATUS;
}
Status subtract(GPU::Buffer<uint32_t> dst, GPU::Buffer<const uint32_t> toSubtract, std::size_t size,
GPU::Stream stream) {
dim3 dimBlock(CUDABLOCKSIZE);
dim3 dimGrid(Cuda::compute2DGridForFlatBuffer(size, CUDABLOCKSIZE));
hipLaunchKernelGGL(( subtractRGBKernel<RGBA, RGB210>), dim3(dimGrid), dim3(dimBlock), 0, stream.get(), dst.get(), toSubtract.get(), size);
return CUDA_STATUS;
}
template <typename T>
Status subtractRaw(GPU::Buffer<T> dst, GPU::Buffer<const T> toSubtract, std::size_t size, GPU::Stream stream) {
dim3 dimBlock(CUDABLOCKSIZE);
dim3 dimGrid(Cuda::compute2DGridForFlatBuffer(size, CUDABLOCKSIZE));
hipLaunchKernelGGL(( subtractKernel<T>), dim3(dimGrid), dim3(dimBlock), 0, stream.get(), dst.get(), toSubtract.get(), size);
return CUDA_STATUS;
}
namespace {
template <typename A, typename B, typename Result, bool clamp = false>
__global__ void addRGBKernel(uint32_t *dst, const uint32_t *toAdd, std::size_t size) {
std::size_t i = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x;
if (i < size) {
uint32_t vSrc = toAdd[i];
uint32_t vDst = dst[i];
int32_t srcIsSolid = !!B::a(vSrc);
int32_t dstIsSolid = !!A::a(vDst);
int32_t r = dstIsSolid * (A::r(vDst) + srcIsSolid * B::r(vSrc));
int32_t g = dstIsSolid * (A::g(vDst) + srcIsSolid * B::g(vSrc));
int32_t b = dstIsSolid * (A::b(vDst) + srcIsSolid * B::b(vSrc));
if (clamp) {
dst[i] = Result::pack(clamp8(r), clamp8(g), clamp8(b), dstIsSolid * 0xff);
} else {
dst[i] = Result::pack(r, g, b, dstIsSolid * 0xff);
}
}
}
__global__ void addRGB210Kernel(uint32_t *dst, const uint32_t *toAdd0, const uint32_t *toAdd1, std::size_t size) {
std::size_t i = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x;
if (i < size) {
uint32_t vSrc = toAdd0[i];
uint32_t vDst = toAdd1[i];
int32_t srcIsSolid = !!RGB210::a(vSrc);
int32_t dstIsSolid = !!RGB210::a(vDst);
int32_t r = dstIsSolid * (RGB210::r(vDst) + srcIsSolid * RGB210::r(vSrc));
int32_t g = dstIsSolid * (RGB210::g(vDst) + srcIsSolid * RGB210::g(vSrc));
int32_t b = dstIsSolid * (RGB210::b(vDst) + srcIsSolid * RGB210::b(vSrc));
dst[i] = RGB210::pack(r, g, b, dstIsSolid * 0xff);
}
}
template <typename T>
__global__ void addKernel(T *dst, const T *toAdd, std::size_t size) {
std::size_t i = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x;
if (i < size) {
dst[i] += toAdd[i];
}
}
template <typename T>
__global__ void addKernel(T *dst, const T *toAdd0, const T *toAdd1, std::size_t size) {
std::size_t i = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x;
if (i < size) {
dst[i] = toAdd0[i] + toAdd1[i];
}
}
} // namespace
template <typename A, typename B, typename Result>
Status add(GPU::Buffer<uint32_t> dst, GPU::Buffer<const uint32_t> toAdd, std::size_t size, GPU::Stream stream) {
dim3 dimBlock(CUDABLOCKSIZE);
dim3 dimGrid(Cuda::compute2DGridForFlatBuffer(size, CUDABLOCKSIZE));
hipLaunchKernelGGL(( addRGBKernel<A, B, Result>), dim3(dimGrid), dim3(dimBlock), 0, stream.get(), dst.get(), toAdd.get(), size);
return CUDA_STATUS;
}
template <typename T>
Status addRaw(GPU::Buffer<T> dst, GPU::Buffer<const T> toAdd, std::size_t size, GPU::Stream stream) {
dim3 dimBlock(CUDABLOCKSIZE);
dim3 dimGrid(Cuda::compute2DGridForFlatBuffer(size, CUDABLOCKSIZE));
hipLaunchKernelGGL(( addKernel<T>), dim3(dimGrid), dim3(dimBlock), 0, stream.get(), dst.get(), toAdd.get(), size);
return CUDA_STATUS;
}
template <typename T>
Status addRaw(GPU::Buffer<T> dst, GPU::Buffer<const T> toAdd0, GPU::Buffer<const T> toAdd1, std::size_t size,
GPU::Stream stream) {
dim3 dimBlock(CUDABLOCKSIZE);
dim3 dimGrid(Cuda::compute2DGridForFlatBuffer(size, CUDABLOCKSIZE));
hipLaunchKernelGGL(( addKernel<T>), dim3(dimGrid), dim3(dimBlock), 0, stream.get(), dst.get(), toAdd0.get(), toAdd1.get(), size);
return CUDA_STATUS;
}
Status addRGB210(GPU::Buffer<uint32_t> dst, GPU::Buffer<const uint32_t> toAdd0, GPU::Buffer<const uint32_t> toAdd1,
std::size_t size, GPU::Stream stream) {
dim3 dimBlock(CUDABLOCKSIZE);
dim3 dimGrid(Cuda::compute2DGridForFlatBuffer(size, CUDABLOCKSIZE));
hipLaunchKernelGGL(( addRGB210Kernel), dim3(dimGrid), dim3(dimBlock), 0, stream.get(), dst.get(), toAdd0.get(), toAdd1.get(), size);
return CUDA_STATUS;
}
Status add10(GPU::Buffer<uint32_t> dst, GPU::Buffer<const uint32_t> toSubtract, std::size_t size, GPU::Stream stream) {
dim3 dimBlock(CUDABLOCKSIZE);
dim3 dimGrid(Cuda::compute2DGridForFlatBuffer(size, CUDABLOCKSIZE));
hipLaunchKernelGGL(( addRGBKernel<RGB210, RGB210, RGB210>), dim3(dimGrid), dim3(dimBlock), 0, stream.get(), dst.get(), toSubtract.get(), size);
return CUDA_STATUS;
}
Status add10n8(GPU::Buffer<uint32_t> dst, GPU::Buffer<const uint32_t> toSubtract, std::size_t size,
GPU::Stream stream) {
dim3 dimBlock(CUDABLOCKSIZE);
dim3 dimGrid(Cuda::compute2DGridForFlatBuffer(size, CUDABLOCKSIZE));
hipLaunchKernelGGL(( addRGBKernel<RGB210, RGBA, RGB210>), dim3(dimGrid), dim3(dimBlock), 0, stream.get(), dst.get(), toSubtract.get(), size);
return CUDA_STATUS;
}
Status add10n8Clamp(GPU::Buffer<uint32_t> dst, GPU::Buffer<const uint32_t> toSubtract, std::size_t size,
GPU::Stream stream) {
dim3 dimBlock(CUDABLOCKSIZE);
dim3 dimGrid(Cuda::compute2DGridForFlatBuffer(size, CUDABLOCKSIZE));
hipLaunchKernelGGL(( addRGBKernel<RGB210, RGBA, RGBA, true>), dim3(dimGrid), dim3(dimBlock), 0, stream.get(), dst.get(), toSubtract.get(), size);
return CUDA_STATUS;
}
Status addClamp(GPU::Buffer<uint32_t> dst, GPU::Buffer<const uint32_t> toSubtract, std::size_t size,
GPU::Stream stream) {
dim3 dimBlock(CUDABLOCKSIZE);
dim3 dimGrid(Cuda::compute2DGridForFlatBuffer(size, CUDABLOCKSIZE));
hipLaunchKernelGGL(( addRGBKernel<RGB210, RGB210, RGBA, true>), dim3(dimGrid), dim3(dimBlock), 0, stream.get(), dst.get(), toSubtract.get(), size);
return CUDA_STATUS;
}
namespace {
template <typename T>
__global__ void andOperatorKernel(T *dst, const T *toAnd, std::size_t size) {
std::size_t i = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x;
if (i < size) {
dst[i] = dst[i] & toAnd[i];
}
}
template <typename T>
__global__ void mulOperatorKernel(T *dst, const T toMul, std::size_t size) {
std::size_t i = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x;
if (i < size) {
dst[i] = dst[i] * toMul;
}
}
} // namespace
template <typename T>
Status andOperatorRaw(GPU::Buffer<T> dst, GPU::Buffer<const T> toAnd, std::size_t size, GPU::Stream stream) {
dim3 dimBlock(CUDABLOCKSIZE);
dim3 dimGrid(Cuda::compute2DGridForFlatBuffer(size, CUDABLOCKSIZE));
hipLaunchKernelGGL(( andOperatorKernel<T>), dim3(dimGrid), dim3(dimBlock), 0, stream.get(), dst.get(), toAnd.get(), size);
return CUDA_STATUS;
}
template Status andOperatorRaw(GPU::Buffer<unsigned char> dst, GPU::Buffer<const unsigned char> toAnd, std::size_t size,
GPU::Stream stream);
template <typename T>
Status mulOperatorRaw(GPU::Buffer<T> dst, const T toMul, std::size_t size, GPU::Stream stream) {
dim3 dimBlock(CUDABLOCKSIZE);
dim3 dimGrid(Cuda::compute2DGridForFlatBuffer(size, CUDABLOCKSIZE));
hipLaunchKernelGGL(( mulOperatorKernel<T>), dim3(dimGrid), dim3(dimBlock), 0, stream.get(), dst.get(), toMul, size);
return CUDA_STATUS;
}
template Status mulOperatorRaw(GPU::Buffer<float2> dst, const float2 toMul, std::size_t size, GPU::Stream stream);
#include "backend/common/image/imageOps.inst"
} // namespace Image
} // namespace VideoStitch
| 250705502e60c3efc01e8f4605b16c8ba54ba19d.cu | // Copyright (c) 2012-2017 VideoStitch SAS
// Copyright (c) 2018 stitchEm
#include "backend/common/imageOps.hpp"
#include "backend/common/vectorOps.hpp"
#include "cuda/util.hpp"
#include "../deviceBuffer.hpp"
#include "../deviceStream.hpp"
#include "backend/common/imageOps.hpp"
#define CUDABLOCKSIZE 512
namespace VideoStitch {
namespace Image {
namespace {
template <typename PixelTypeIn, typename PixelTypeOut>
__global__ void subtractRGBKernel(uint32_t *dst, const uint32_t *toSubtract, std::size_t size) {
std::size_t i = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x;
if (i < size) {
uint32_t vSrc = toSubtract[i];
uint32_t vDst = dst[i];
int32_t toSubtractIsSolid = !!PixelTypeIn::a(vSrc);
int32_t r = PixelTypeIn::r(vDst) - toSubtractIsSolid * PixelTypeIn::r(vSrc);
int32_t g = PixelTypeIn::g(vDst) - toSubtractIsSolid * PixelTypeIn::g(vSrc);
int32_t b = PixelTypeIn::b(vDst) - toSubtractIsSolid * PixelTypeIn::b(vSrc);
dst[i] = PixelTypeOut::pack(r, g, b, PixelTypeIn::a(vDst));
}
}
template <typename T>
__global__ void subtractKernel(T *dst, const T *toSubtract, std::size_t size) {
std::size_t i = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x;
if (i < size) {
dst[i] -= toSubtract[i];
}
}
} // namespace
Status subtractRGBA(GPU::Buffer<uint32_t> dst, GPU::Buffer<const uint32_t> toSubtract, std::size_t size,
GPU::Stream stream) {
dim3 dimBlock(CUDABLOCKSIZE);
dim3 dimGrid(Cuda::compute2DGridForFlatBuffer(size, CUDABLOCKSIZE));
subtractRGBKernel<RGBA, RGBA><<<dimGrid, dimBlock, 0, stream.get()>>>(dst.get(), toSubtract.get(), size);
return CUDA_STATUS;
}
Status subtractRGB210(GPU::Buffer<uint32_t> dst, GPU::Buffer<const uint32_t> toSubtract, std::size_t size,
GPU::Stream stream) {
dim3 dimBlock(CUDABLOCKSIZE);
dim3 dimGrid(Cuda::compute2DGridForFlatBuffer(size, CUDABLOCKSIZE));
subtractRGBKernel<RGB210, RGB210><<<dimGrid, dimBlock, 0, stream.get()>>>(dst.get(), toSubtract.get(), size);
return CUDA_STATUS;
}
Status subtract(GPU::Buffer<uint32_t> dst, GPU::Buffer<const uint32_t> toSubtract, std::size_t size,
GPU::Stream stream) {
dim3 dimBlock(CUDABLOCKSIZE);
dim3 dimGrid(Cuda::compute2DGridForFlatBuffer(size, CUDABLOCKSIZE));
subtractRGBKernel<RGBA, RGB210><<<dimGrid, dimBlock, 0, stream.get()>>>(dst.get(), toSubtract.get(), size);
return CUDA_STATUS;
}
template <typename T>
Status subtractRaw(GPU::Buffer<T> dst, GPU::Buffer<const T> toSubtract, std::size_t size, GPU::Stream stream) {
dim3 dimBlock(CUDABLOCKSIZE);
dim3 dimGrid(Cuda::compute2DGridForFlatBuffer(size, CUDABLOCKSIZE));
subtractKernel<T><<<dimGrid, dimBlock, 0, stream.get()>>>(dst.get(), toSubtract.get(), size);
return CUDA_STATUS;
}
namespace {
template <typename A, typename B, typename Result, bool clamp = false>
__global__ void addRGBKernel(uint32_t *dst, const uint32_t *toAdd, std::size_t size) {
std::size_t i = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x;
if (i < size) {
uint32_t vSrc = toAdd[i];
uint32_t vDst = dst[i];
int32_t srcIsSolid = !!B::a(vSrc);
int32_t dstIsSolid = !!A::a(vDst);
int32_t r = dstIsSolid * (A::r(vDst) + srcIsSolid * B::r(vSrc));
int32_t g = dstIsSolid * (A::g(vDst) + srcIsSolid * B::g(vSrc));
int32_t b = dstIsSolid * (A::b(vDst) + srcIsSolid * B::b(vSrc));
if (clamp) {
dst[i] = Result::pack(clamp8(r), clamp8(g), clamp8(b), dstIsSolid * 0xff);
} else {
dst[i] = Result::pack(r, g, b, dstIsSolid * 0xff);
}
}
}
__global__ void addRGB210Kernel(uint32_t *dst, const uint32_t *toAdd0, const uint32_t *toAdd1, std::size_t size) {
std::size_t i = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x;
if (i < size) {
uint32_t vSrc = toAdd0[i];
uint32_t vDst = toAdd1[i];
int32_t srcIsSolid = !!RGB210::a(vSrc);
int32_t dstIsSolid = !!RGB210::a(vDst);
int32_t r = dstIsSolid * (RGB210::r(vDst) + srcIsSolid * RGB210::r(vSrc));
int32_t g = dstIsSolid * (RGB210::g(vDst) + srcIsSolid * RGB210::g(vSrc));
int32_t b = dstIsSolid * (RGB210::b(vDst) + srcIsSolid * RGB210::b(vSrc));
dst[i] = RGB210::pack(r, g, b, dstIsSolid * 0xff);
}
}
template <typename T>
__global__ void addKernel(T *dst, const T *toAdd, std::size_t size) {
std::size_t i = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x;
if (i < size) {
dst[i] += toAdd[i];
}
}
template <typename T>
__global__ void addKernel(T *dst, const T *toAdd0, const T *toAdd1, std::size_t size) {
std::size_t i = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x;
if (i < size) {
dst[i] = toAdd0[i] + toAdd1[i];
}
}
} // namespace
template <typename A, typename B, typename Result>
Status add(GPU::Buffer<uint32_t> dst, GPU::Buffer<const uint32_t> toAdd, std::size_t size, GPU::Stream stream) {
dim3 dimBlock(CUDABLOCKSIZE);
dim3 dimGrid(Cuda::compute2DGridForFlatBuffer(size, CUDABLOCKSIZE));
addRGBKernel<A, B, Result><<<dimGrid, dimBlock, 0, stream.get()>>>(dst.get(), toAdd.get(), size);
return CUDA_STATUS;
}
template <typename T>
Status addRaw(GPU::Buffer<T> dst, GPU::Buffer<const T> toAdd, std::size_t size, GPU::Stream stream) {
dim3 dimBlock(CUDABLOCKSIZE);
dim3 dimGrid(Cuda::compute2DGridForFlatBuffer(size, CUDABLOCKSIZE));
addKernel<T><<<dimGrid, dimBlock, 0, stream.get()>>>(dst.get(), toAdd.get(), size);
return CUDA_STATUS;
}
template <typename T>
Status addRaw(GPU::Buffer<T> dst, GPU::Buffer<const T> toAdd0, GPU::Buffer<const T> toAdd1, std::size_t size,
GPU::Stream stream) {
dim3 dimBlock(CUDABLOCKSIZE);
dim3 dimGrid(Cuda::compute2DGridForFlatBuffer(size, CUDABLOCKSIZE));
addKernel<T><<<dimGrid, dimBlock, 0, stream.get()>>>(dst.get(), toAdd0.get(), toAdd1.get(), size);
return CUDA_STATUS;
}
Status addRGB210(GPU::Buffer<uint32_t> dst, GPU::Buffer<const uint32_t> toAdd0, GPU::Buffer<const uint32_t> toAdd1,
std::size_t size, GPU::Stream stream) {
dim3 dimBlock(CUDABLOCKSIZE);
dim3 dimGrid(Cuda::compute2DGridForFlatBuffer(size, CUDABLOCKSIZE));
addRGB210Kernel<<<dimGrid, dimBlock, 0, stream.get()>>>(dst.get(), toAdd0.get(), toAdd1.get(), size);
return CUDA_STATUS;
}
Status add10(GPU::Buffer<uint32_t> dst, GPU::Buffer<const uint32_t> toSubtract, std::size_t size, GPU::Stream stream) {
dim3 dimBlock(CUDABLOCKSIZE);
dim3 dimGrid(Cuda::compute2DGridForFlatBuffer(size, CUDABLOCKSIZE));
addRGBKernel<RGB210, RGB210, RGB210><<<dimGrid, dimBlock, 0, stream.get()>>>(dst.get(), toSubtract.get(), size);
return CUDA_STATUS;
}
Status add10n8(GPU::Buffer<uint32_t> dst, GPU::Buffer<const uint32_t> toSubtract, std::size_t size,
GPU::Stream stream) {
dim3 dimBlock(CUDABLOCKSIZE);
dim3 dimGrid(Cuda::compute2DGridForFlatBuffer(size, CUDABLOCKSIZE));
addRGBKernel<RGB210, RGBA, RGB210><<<dimGrid, dimBlock, 0, stream.get()>>>(dst.get(), toSubtract.get(), size);
return CUDA_STATUS;
}
Status add10n8Clamp(GPU::Buffer<uint32_t> dst, GPU::Buffer<const uint32_t> toSubtract, std::size_t size,
GPU::Stream stream) {
dim3 dimBlock(CUDABLOCKSIZE);
dim3 dimGrid(Cuda::compute2DGridForFlatBuffer(size, CUDABLOCKSIZE));
addRGBKernel<RGB210, RGBA, RGBA, true><<<dimGrid, dimBlock, 0, stream.get()>>>(dst.get(), toSubtract.get(), size);
return CUDA_STATUS;
}
Status addClamp(GPU::Buffer<uint32_t> dst, GPU::Buffer<const uint32_t> toSubtract, std::size_t size,
GPU::Stream stream) {
dim3 dimBlock(CUDABLOCKSIZE);
dim3 dimGrid(Cuda::compute2DGridForFlatBuffer(size, CUDABLOCKSIZE));
addRGBKernel<RGB210, RGB210, RGBA, true><<<dimGrid, dimBlock, 0, stream.get()>>>(dst.get(), toSubtract.get(), size);
return CUDA_STATUS;
}
namespace {
template <typename T>
__global__ void andOperatorKernel(T *dst, const T *toAnd, std::size_t size) {
std::size_t i = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x;
if (i < size) {
dst[i] = dst[i] & toAnd[i];
}
}
template <typename T>
__global__ void mulOperatorKernel(T *dst, const T toMul, std::size_t size) {
std::size_t i = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x;
if (i < size) {
dst[i] = dst[i] * toMul;
}
}
} // namespace
template <typename T>
Status andOperatorRaw(GPU::Buffer<T> dst, GPU::Buffer<const T> toAnd, std::size_t size, GPU::Stream stream) {
dim3 dimBlock(CUDABLOCKSIZE);
dim3 dimGrid(Cuda::compute2DGridForFlatBuffer(size, CUDABLOCKSIZE));
andOperatorKernel<T><<<dimGrid, dimBlock, 0, stream.get()>>>(dst.get(), toAnd.get(), size);
return CUDA_STATUS;
}
template Status andOperatorRaw(GPU::Buffer<unsigned char> dst, GPU::Buffer<const unsigned char> toAnd, std::size_t size,
GPU::Stream stream);
template <typename T>
Status mulOperatorRaw(GPU::Buffer<T> dst, const T toMul, std::size_t size, GPU::Stream stream) {
dim3 dimBlock(CUDABLOCKSIZE);
dim3 dimGrid(Cuda::compute2DGridForFlatBuffer(size, CUDABLOCKSIZE));
mulOperatorKernel<T><<<dimGrid, dimBlock, 0, stream.get()>>>(dst.get(), toMul, size);
return CUDA_STATUS;
}
template Status mulOperatorRaw(GPU::Buffer<float2> dst, const float2 toMul, std::size_t size, GPU::Stream stream);
#include "backend/common/image/imageOps.inst"
} // namespace Image
} // namespace VideoStitch
|
291161e86565658589273934763a3073489c89d0.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime_api.h>
#include <time.h>
/****************************************************************************
This program gives an example of a poor way to implement a password cracker
in CUDA C. It is poor because it acheives this with just one thread, which
is obviously not good given the scale of parallelism available to CUDA
programs.
The intentions of this program are:
1) Demonstrate the use of __device__ and __global__ functions
2) Enable a simulation of password cracking in the absence of library
with equivalent functionality to libcrypt. The password to be found
is hardcoded into a function called is_a_match.
Compile and run with:
nvcc -o cuda_crack cuda_crack.cu
./cuda_crack
Dr Kevan Buckley, University of Wolverhampton, 2018
*****************************************************************************/
/****************************************************************************
This function returns 1 if the attempt at cracking the password is
identical to the plain text password string stored in the program.
Otherwise,it returns 0.
*****************************************************************************/
__device__ int is_a_match(char *attempt) {
char plain_password[] = "KB5234";
char *a = attempt;
char *p = plain_password;
while(*a == *p) {
if(*a == '\0') {
return 1;
}
a++;
p++;
}
return 0;
}
/****************************************************************************
The kernel function assume that there will be only one thread and uses
nested loops to generate all possible passwords and test whether they match
the hidden password.
*****************************************************************************/
__global__ void kernel() {
int w, a, s, d, g;
char password[6];
password[2] = '\0';
password[0] ='A' + threadIdx.x;
password[1] ='A' + blockIdx.x;
for(w = 0; w < 10; w++){
g = w + '0';
password[2] =g;
for(a = 0; a < 10; a++){
g = a + '0';
password[3] =g;
for(s = 0; s < 10; s++){
g = s + '0';
password[4] =g;
for(d = 0; d < 10; d++){
g = d + '0';
password[5] =g;
if(is_a_match(password)) {
printf("password found: %s\n", password);
}
}
}
}
}
}
int time_difference(struct timespec *start, struct timespec *finish,
long long int *difference) {
long long int ds = finish->tv_sec - start->tv_sec;
long long int dn = finish->tv_nsec - start->tv_nsec;
if (dn < 0) {
ds--;
dn += 1000000000;
}
*difference = ds * 1000000000 + dn;
return !(*difference > 0);
}
int main() {
struct timespec start, finish;
long long int time_elapsed;
clock_gettime(CLOCK_MONOTONIC, &start);
clock_gettime(CLOCK_MONOTONIC, &finish);
time_difference(&start, &finish, &time_elapsed);
printf("Time elapsed was %lldns or %0.9lfs\n", time_elapsed,
(time_elapsed / 1.0e9));
hipLaunchKernelGGL(( kernel) , dim3(26), dim3(26), 0, 0, );
hipDeviceSynchronize();
return 0;
}
| 291161e86565658589273934763a3073489c89d0.cu | #include <stdio.h>
#include <cuda_runtime_api.h>
#include <time.h>
/****************************************************************************
This program gives an example of a poor way to implement a password cracker
in CUDA C. It is poor because it acheives this with just one thread, which
is obviously not good given the scale of parallelism available to CUDA
programs.
The intentions of this program are:
1) Demonstrate the use of __device__ and __global__ functions
2) Enable a simulation of password cracking in the absence of library
with equivalent functionality to libcrypt. The password to be found
is hardcoded into a function called is_a_match.
Compile and run with:
nvcc -o cuda_crack cuda_crack.cu
./cuda_crack
Dr Kevan Buckley, University of Wolverhampton, 2018
*****************************************************************************/
/****************************************************************************
This function returns 1 if the attempt at cracking the password is
identical to the plain text password string stored in the program.
Otherwise,it returns 0.
*****************************************************************************/
__device__ int is_a_match(char *attempt) {
char plain_password[] = "KB5234";
char *a = attempt;
char *p = plain_password;
while(*a == *p) {
if(*a == '\0') {
return 1;
}
a++;
p++;
}
return 0;
}
/****************************************************************************
The kernel function assume that there will be only one thread and uses
nested loops to generate all possible passwords and test whether they match
the hidden password.
*****************************************************************************/
__global__ void kernel() {
int w, a, s, d, g;
char password[6];
password[2] = '\0';
password[0] ='A' + threadIdx.x;
password[1] ='A' + blockIdx.x;
for(w = 0; w < 10; w++){
g = w + '0';
password[2] =g;
for(a = 0; a < 10; a++){
g = a + '0';
password[3] =g;
for(s = 0; s < 10; s++){
g = s + '0';
password[4] =g;
for(d = 0; d < 10; d++){
g = d + '0';
password[5] =g;
if(is_a_match(password)) {
printf("password found: %s\n", password);
}
}
}
}
}
}
int time_difference(struct timespec *start, struct timespec *finish,
long long int *difference) {
long long int ds = finish->tv_sec - start->tv_sec;
long long int dn = finish->tv_nsec - start->tv_nsec;
if (dn < 0) {
ds--;
dn += 1000000000;
}
*difference = ds * 1000000000 + dn;
return !(*difference > 0);
}
int main() {
struct timespec start, finish;
long long int time_elapsed;
clock_gettime(CLOCK_MONOTONIC, &start);
clock_gettime(CLOCK_MONOTONIC, &finish);
time_difference(&start, &finish, &time_elapsed);
printf("Time elapsed was %lldns or %0.9lfs\n", time_elapsed,
(time_elapsed / 1.0e9));
kernel <<<26, 26>>>();
cudaThreadSynchronize();
return 0;
}
|
6e1bb3b6864c0d538a6a9595d3ccfa07d9b30c60.hip | // !!! This is a file automatically generated by hipify!!!
// CUDA Dependencies
#include <hip/hip_runtime.h>
// GL dependencies
#define GLM_FORCE_CUDA
#include <glm/glm.hpp>
namespace CuKee
{
} | 6e1bb3b6864c0d538a6a9595d3ccfa07d9b30c60.cu | // CUDA Dependencies
#include <cuda.h>
// GL dependencies
#define GLM_FORCE_CUDA
#include <glm/glm.hpp>
namespace CuKee
{
} |
af170e71d7ea74ff0c3c804afc0ac54630beb9db.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* 11.69:cc: entry function 'generate' used 63 registers, 0 bytes smem, 0 bytes lmem, 0 bytes cmem
* ... multiprocessor occupancy 50.0% : 1024 threads over 32 warps in 16 blocks
*
* benchmarking mandelbrot
* collecting 100 samples, 1 iterations each, in estimated 1193.660 s
* mean: 28.51441 ms, lb 28.44537 ms, ub 28.62653 ms, ci 0.950
* std dev: 441.4357 us, lb 308.6300 us, ub 656.7542 us, ci 0.950
* found 11 outliers among 100 samples (11.0%)
* 6 (6.0%) high mild
* 5 (5.0%) high severe
* variance introduced by outliers: 8.472%
* variance is slightly inflated by outliers
*/
#include <accelerate_cuda.h>
typedef DIM2 DimOut;
extern "C" __global__ void generate(const DIM0 shIn0, const float* __restrict__ arrIn0_a3, const float* __restrict__ arrIn0_a2, const float* __restrict__ arrIn0_a1, const float* __restrict__ arrIn0_a0, const DIM2 shOut, Word32* __restrict__ arrOut_a0)
{
const int shapeSize = size(shOut);
const int gridSize = blockDim.x * gridDim.x;
int ix;
for (ix = blockDim.x * blockIdx.x + threadIdx.x; ix < shapeSize; ix += gridSize) {
const DimOut sh = fromIndex(shOut, ix);
const Int64 v0 = (Int64) 255;
const int v1 = toIndex(shIn0, shape());
const float v2 = arrIn0_a3[v1];
const float v3 = arrIn0_a2[v1];
const float v4 = arrIn0_a1[v1];
const float v5 = arrIn0_a0[v1];
const Int64 v6 = sh.a1;
const Int64 v7 = sh.a0;
const float v8 = v2 + (float) v7 * (v4 - v2) / 800.0f;
const float v9 = v3 + (float) v6 * (v5 - v3) / 600.0f;
const float v10 = v8;
const float v11 = v9;
const Int64 v12 = (Int64) 0;
const float v13 = v10 * v10 - v11 * v11;
const float v14 = v10 * v11 + v11 * v10;
const float v15 = v8 + v13;
const float v16 = v9 + v14;
const Word8 v17 = v15 * v15 + v16 * v16 > 4.0f;
const float v18 = v17 ? v10 : v15;
const float v19 = v17 ? v11 : v16;
const Int64 v20 = v17 ? v12 : (Int64) 1;
const float v21 = v18 * v18 - v19 * v19;
const float v22 = v18 * v19 + v19 * v18;
const float v23 = v8 + v21;
const float v24 = v9 + v22;
const Word8 v25 = v23 * v23 + v24 * v24 > 4.0f;
const float v26 = v25 ? v18 : v23;
const float v27 = v25 ? v19 : v24;
const Int64 v28 = v25 ? v20 : (Int64) 1 + v20;
const float v29 = v26 * v26 - v27 * v27;
const float v30 = v26 * v27 + v27 * v26;
const float v31 = v8 + v29;
const float v32 = v9 + v30;
const Word8 v33 = v31 * v31 + v32 * v32 > 4.0f;
const float v34 = v33 ? v26 : v31;
const float v35 = v33 ? v27 : v32;
const Int64 v36 = v33 ? v28 : (Int64) 1 + v28;
const float v37 = v34 * v34 - v35 * v35;
const float v38 = v34 * v35 + v35 * v34;
const float v39 = v8 + v37;
const float v40 = v9 + v38;
const Word8 v41 = v39 * v39 + v40 * v40 > 4.0f;
const float v42 = v41 ? v34 : v39;
const float v43 = v41 ? v35 : v40;
const Int64 v44 = v41 ? v36 : (Int64) 1 + v36;
const float v45 = v42 * v42 - v43 * v43;
const float v46 = v42 * v43 + v43 * v42;
const float v47 = v8 + v45;
const float v48 = v9 + v46;
const Word8 v49 = v47 * v47 + v48 * v48 > 4.0f;
const float v50 = v49 ? v42 : v47;
const float v51 = v49 ? v43 : v48;
const Int64 v52 = v49 ? v44 : (Int64) 1 + v44;
const float v53 = v50 * v50 - v51 * v51;
const float v54 = v50 * v51 + v51 * v50;
const float v55 = v8 + v53;
const float v56 = v9 + v54;
const Word8 v57 = v55 * v55 + v56 * v56 > 4.0f;
const float v58 = v57 ? v50 : v55;
const float v59 = v57 ? v51 : v56;
const Int64 v60 = v57 ? v52 : (Int64) 1 + v52;
const float v61 = v58 * v58 - v59 * v59;
const float v62 = v58 * v59 + v59 * v58;
const float v63 = v8 + v61;
const float v64 = v9 + v62;
const Word8 v65 = v63 * v63 + v64 * v64 > 4.0f;
const float v66 = v65 ? v58 : v63;
const float v67 = v65 ? v59 : v64;
const Int64 v68 = v65 ? v60 : (Int64) 1 + v60;
const float v69 = v66 * v66 - v67 * v67;
const float v70 = v66 * v67 + v67 * v66;
const float v71 = v8 + v69;
const float v72 = v9 + v70;
const Word8 v73 = v71 * v71 + v72 * v72 > 4.0f;
const float v74 = v73 ? v66 : v71;
const float v75 = v73 ? v67 : v72;
const Int64 v76 = v73 ? v68 : (Int64) 1 + v68;
const float v77 = v74 * v74 - v75 * v75;
const float v78 = v74 * v75 + v75 * v74;
const float v79 = v8 + v77;
const float v80 = v9 + v78;
const Word8 v81 = v79 * v79 + v80 * v80 > 4.0f;
const float v82 = v81 ? v74 : v79;
const float v83 = v81 ? v75 : v80;
const Int64 v84 = v81 ? v76 : (Int64) 1 + v76;
const float v85 = v82 * v82 - v83 * v83;
const float v86 = v82 * v83 + v83 * v82;
const float v87 = v8 + v85;
const float v88 = v9 + v86;
const Word8 v89 = v87 * v87 + v88 * v88 > 4.0f;
const float v90 = v89 ? v82 : v87;
const float v91 = v89 ? v83 : v88;
const Int64 v92 = v89 ? v84 : (Int64) 1 + v84;
const float v93 = v90 * v90 - v91 * v91;
const float v94 = v90 * v91 + v91 * v90;
const float v95 = v8 + v93;
const float v96 = v9 + v94;
const Word8 v97 = v95 * v95 + v96 * v96 > 4.0f;
const float v98 = v97 ? v90 : v95;
const float v99 = v97 ? v91 : v96;
const Int64 v100 = v97 ? v92 : (Int64) 1 + v92;
const float v101 = v98 * v98 - v99 * v99;
const float v102 = v98 * v99 + v99 * v98;
const float v103 = v8 + v101;
const float v104 = v9 + v102;
const Word8 v105 = v103 * v103 + v104 * v104 > 4.0f;
const float v106 = v105 ? v98 : v103;
const float v107 = v105 ? v99 : v104;
const Int64 v108 = v105 ? v100 : (Int64) 1 + v100;
const float v109 = v106 * v106 - v107 * v107;
const float v110 = v106 * v107 + v107 * v106;
const float v111 = v8 + v109;
const float v112 = v9 + v110;
const Word8 v113 = v111 * v111 + v112 * v112 > 4.0f;
const float v114 = v113 ? v106 : v111;
const float v115 = v113 ? v107 : v112;
const Int64 v116 = v113 ? v108 : (Int64) 1 + v108;
const float v117 = v114 * v114 - v115 * v115;
const float v118 = v114 * v115 + v115 * v114;
const float v119 = v8 + v117;
const float v120 = v9 + v118;
const Word8 v121 = v119 * v119 + v120 * v120 > 4.0f;
const float v122 = v121 ? v114 : v119;
const float v123 = v121 ? v115 : v120;
const Int64 v124 = v121 ? v116 : (Int64) 1 + v116;
const float v125 = v122 * v122 - v123 * v123;
const float v126 = v122 * v123 + v123 * v122;
const float v127 = v8 + v125;
const float v128 = v9 + v126;
const Word8 v129 = v127 * v127 + v128 * v128 > 4.0f;
const float v130 = v129 ? v122 : v127;
const float v131 = v129 ? v123 : v128;
const Int64 v132 = v129 ? v124 : (Int64) 1 + v124;
const float v133 = v130 * v130 - v131 * v131;
const float v134 = v130 * v131 + v131 * v130;
const float v135 = v8 + v133;
const float v136 = v9 + v134;
const Word8 v137 = v135 * v135 + v136 * v136 > 4.0f;
const float v138 = v137 ? v130 : v135;
const float v139 = v137 ? v131 : v136;
const Int64 v140 = v137 ? v132 : (Int64) 1 + v132;
const float v141 = v138 * v138 - v139 * v139;
const float v142 = v138 * v139 + v139 * v138;
const float v143 = v8 + v141;
const float v144 = v9 + v142;
const Word8 v145 = v143 * v143 + v144 * v144 > 4.0f;
const float v146 = v145 ? v138 : v143;
const float v147 = v145 ? v139 : v144;
const Int64 v148 = v145 ? v140 : (Int64) 1 + v140;
const float v149 = v146 * v146 - v147 * v147;
const float v150 = v146 * v147 + v147 * v146;
const float v151 = v8 + v149;
const float v152 = v9 + v150;
const Word8 v153 = v151 * v151 + v152 * v152 > 4.0f;
const float v154 = v153 ? v146 : v151;
const float v155 = v153 ? v147 : v152;
const Int64 v156 = v153 ? v148 : (Int64) 1 + v148;
const float v157 = v154 * v154 - v155 * v155;
const float v158 = v154 * v155 + v155 * v154;
const float v159 = v8 + v157;
const float v160 = v9 + v158;
const Word8 v161 = v159 * v159 + v160 * v160 > 4.0f;
const float v162 = v161 ? v154 : v159;
const float v163 = v161 ? v155 : v160;
const Int64 v164 = v161 ? v156 : (Int64) 1 + v156;
const float v165 = v162 * v162 - v163 * v163;
const float v166 = v162 * v163 + v163 * v162;
const float v167 = v8 + v165;
const float v168 = v9 + v166;
const Word8 v169 = v167 * v167 + v168 * v168 > 4.0f;
const float v170 = v169 ? v162 : v167;
const float v171 = v169 ? v163 : v168;
const Int64 v172 = v169 ? v164 : (Int64) 1 + v164;
const float v173 = v170 * v170 - v171 * v171;
const float v174 = v170 * v171 + v171 * v170;
const float v175 = v8 + v173;
const float v176 = v9 + v174;
const Word8 v177 = v175 * v175 + v176 * v176 > 4.0f;
const float v178 = v177 ? v170 : v175;
const float v179 = v177 ? v171 : v176;
const Int64 v180 = v177 ? v172 : (Int64) 1 + v172;
const float v181 = v178 * v178 - v179 * v179;
const float v182 = v178 * v179 + v179 * v178;
const float v183 = v8 + v181;
const float v184 = v9 + v182;
const Word8 v185 = v183 * v183 + v184 * v184 > 4.0f;
const float v186 = v185 ? v178 : v183;
const float v187 = v185 ? v179 : v184;
const Int64 v188 = v185 ? v180 : (Int64) 1 + v180;
const float v189 = v186 * v186 - v187 * v187;
const float v190 = v186 * v187 + v187 * v186;
const float v191 = v8 + v189;
const float v192 = v9 + v190;
const Word8 v193 = v191 * v191 + v192 * v192 > 4.0f;
const float v194 = v193 ? v186 : v191;
const float v195 = v193 ? v187 : v192;
const Int64 v196 = v193 ? v188 : (Int64) 1 + v188;
const float v197 = v194 * v194 - v195 * v195;
const float v198 = v194 * v195 + v195 * v194;
const float v199 = v8 + v197;
const float v200 = v9 + v198;
const Word8 v201 = v199 * v199 + v200 * v200 > 4.0f;
const float v202 = v201 ? v194 : v199;
const float v203 = v201 ? v195 : v200;
const Int64 v204 = v201 ? v196 : (Int64) 1 + v196;
const float v205 = v202 * v202 - v203 * v203;
const float v206 = v202 * v203 + v203 * v202;
const float v207 = v8 + v205;
const float v208 = v9 + v206;
const Word8 v209 = v207 * v207 + v208 * v208 > 4.0f;
const float v210 = v209 ? v202 : v207;
const float v211 = v209 ? v203 : v208;
const Int64 v212 = v209 ? v204 : (Int64) 1 + v204;
const float v213 = v210 * v210 - v211 * v211;
const float v214 = v210 * v211 + v211 * v210;
const float v215 = v8 + v213;
const float v216 = v9 + v214;
const Word8 v217 = v215 * v215 + v216 * v216 > 4.0f;
const float v218 = v217 ? v210 : v215;
const float v219 = v217 ? v211 : v216;
const Int64 v220 = v217 ? v212 : (Int64) 1 + v212;
const float v221 = v218 * v218 - v219 * v219;
const float v222 = v218 * v219 + v219 * v218;
const float v223 = v8 + v221;
const float v224 = v9 + v222;
const Word8 v225 = v223 * v223 + v224 * v224 > 4.0f;
const float v226 = v225 ? v218 : v223;
const float v227 = v225 ? v219 : v224;
const Int64 v228 = v225 ? v220 : (Int64) 1 + v220;
const float v229 = v226 * v226 - v227 * v227;
const float v230 = v226 * v227 + v227 * v226;
const float v231 = v8 + v229;
const float v232 = v9 + v230;
const Word8 v233 = v231 * v231 + v232 * v232 > 4.0f;
const float v234 = v233 ? v226 : v231;
const float v235 = v233 ? v227 : v232;
const Int64 v236 = v233 ? v228 : (Int64) 1 + v228;
const float v237 = v234 * v234 - v235 * v235;
const float v238 = v234 * v235 + v235 * v234;
const float v239 = v8 + v237;
const float v240 = v9 + v238;
const Word8 v241 = v239 * v239 + v240 * v240 > 4.0f;
const float v242 = v241 ? v234 : v239;
const float v243 = v241 ? v235 : v240;
const Int64 v244 = v241 ? v236 : (Int64) 1 + v236;
const float v245 = v242 * v242 - v243 * v243;
const float v246 = v242 * v243 + v243 * v242;
const float v247 = v8 + v245;
const float v248 = v9 + v246;
const Word8 v249 = v247 * v247 + v248 * v248 > 4.0f;
const float v250 = v249 ? v242 : v247;
const float v251 = v249 ? v243 : v248;
const Int64 v252 = v249 ? v244 : (Int64) 1 + v244;
const float v253 = v250 * v250 - v251 * v251;
const float v254 = v250 * v251 + v251 * v250;
const float v255 = v8 + v253;
const float v256 = v9 + v254;
const Word8 v257 = v255 * v255 + v256 * v256 > 4.0f;
const float v258 = v257 ? v250 : v255;
const float v259 = v257 ? v251 : v256;
const Int64 v260 = v257 ? v252 : (Int64) 1 + v252;
const float v261 = v258 * v258 - v259 * v259;
const float v262 = v258 * v259 + v259 * v258;
const float v263 = v8 + v261;
const float v264 = v9 + v262;
const Word8 v265 = v263 * v263 + v264 * v264 > 4.0f;
const float v266 = v265 ? v258 : v263;
const float v267 = v265 ? v259 : v264;
const Int64 v268 = v265 ? v260 : (Int64) 1 + v260;
const float v269 = v266 * v266 - v267 * v267;
const float v270 = v266 * v267 + v267 * v266;
const float v271 = v8 + v269;
const float v272 = v9 + v270;
const Word8 v273 = v271 * v271 + v272 * v272 > 4.0f;
const float v274 = v273 ? v266 : v271;
const float v275 = v273 ? v267 : v272;
const Int64 v276 = v273 ? v268 : (Int64) 1 + v268;
const float v277 = v274 * v274 - v275 * v275;
const float v278 = v274 * v275 + v275 * v274;
const float v279 = v8 + v277;
const float v280 = v9 + v278;
const Word8 v281 = v279 * v279 + v280 * v280 > 4.0f;
const float v282 = v281 ? v274 : v279;
const float v283 = v281 ? v275 : v280;
const Int64 v284 = v281 ? v276 : (Int64) 1 + v276;
const float v285 = v282 * v282 - v283 * v283;
const float v286 = v282 * v283 + v283 * v282;
const float v287 = v8 + v285;
const float v288 = v9 + v286;
const Word8 v289 = v287 * v287 + v288 * v288 > 4.0f;
const float v290 = v289 ? v282 : v287;
const float v291 = v289 ? v283 : v288;
const Int64 v292 = v289 ? v284 : (Int64) 1 + v284;
const float v293 = v290 * v290 - v291 * v291;
const float v294 = v290 * v291 + v291 * v290;
const float v295 = v8 + v293;
const float v296 = v9 + v294;
const Word8 v297 = v295 * v295 + v296 * v296 > 4.0f;
const float v298 = v297 ? v290 : v295;
const float v299 = v297 ? v291 : v296;
const Int64 v300 = v297 ? v292 : (Int64) 1 + v292;
const float v301 = v298 * v298 - v299 * v299;
const float v302 = v298 * v299 + v299 * v298;
const float v303 = v8 + v301;
const float v304 = v9 + v302;
const Word8 v305 = v303 * v303 + v304 * v304 > 4.0f;
const float v306 = v305 ? v298 : v303;
const float v307 = v305 ? v299 : v304;
const Int64 v308 = v305 ? v300 : (Int64) 1 + v300;
const float v309 = v306 * v306 - v307 * v307;
const float v310 = v306 * v307 + v307 * v306;
const float v311 = v8 + v309;
const float v312 = v9 + v310;
const Word8 v313 = v311 * v311 + v312 * v312 > 4.0f;
const float v314 = v313 ? v306 : v311;
const float v315 = v313 ? v307 : v312;
const Int64 v316 = v313 ? v308 : (Int64) 1 + v308;
const float v317 = v314 * v314 - v315 * v315;
const float v318 = v314 * v315 + v315 * v314;
const float v319 = v8 + v317;
const float v320 = v9 + v318;
const Word8 v321 = v319 * v319 + v320 * v320 > 4.0f;
const float v322 = v321 ? v314 : v319;
const float v323 = v321 ? v315 : v320;
const Int64 v324 = v321 ? v316 : (Int64) 1 + v316;
const float v325 = v322 * v322 - v323 * v323;
const float v326 = v322 * v323 + v323 * v322;
const float v327 = v8 + v325;
const float v328 = v9 + v326;
const Word8 v329 = v327 * v327 + v328 * v328 > 4.0f;
const float v330 = v329 ? v322 : v327;
const float v331 = v329 ? v323 : v328;
const Int64 v332 = v329 ? v324 : (Int64) 1 + v324;
const float v333 = v330 * v330 - v331 * v331;
const float v334 = v330 * v331 + v331 * v330;
const float v335 = v8 + v333;
const float v336 = v9 + v334;
const Word8 v337 = v335 * v335 + v336 * v336 > 4.0f;
const float v338 = v337 ? v330 : v335;
const float v339 = v337 ? v331 : v336;
const Int64 v340 = v337 ? v332 : (Int64) 1 + v332;
const float v341 = v338 * v338 - v339 * v339;
const float v342 = v338 * v339 + v339 * v338;
const float v343 = v8 + v341;
const float v344 = v9 + v342;
const Word8 v345 = v343 * v343 + v344 * v344 > 4.0f;
const float v346 = v345 ? v338 : v343;
const float v347 = v345 ? v339 : v344;
const Int64 v348 = v345 ? v340 : (Int64) 1 + v340;
const float v349 = v346 * v346 - v347 * v347;
const float v350 = v346 * v347 + v347 * v346;
const float v351 = v8 + v349;
const float v352 = v9 + v350;
const Word8 v353 = v351 * v351 + v352 * v352 > 4.0f;
const float v354 = v353 ? v346 : v351;
const float v355 = v353 ? v347 : v352;
const Int64 v356 = v353 ? v348 : (Int64) 1 + v348;
const float v357 = v354 * v354 - v355 * v355;
const float v358 = v354 * v355 + v355 * v354;
const float v359 = v8 + v357;
const float v360 = v9 + v358;
const Word8 v361 = v359 * v359 + v360 * v360 > 4.0f;
const float v362 = v361 ? v354 : v359;
const float v363 = v361 ? v355 : v360;
const Int64 v364 = v361 ? v356 : (Int64) 1 + v356;
const float v365 = v362 * v362 - v363 * v363;
const float v366 = v362 * v363 + v363 * v362;
const float v367 = v8 + v365;
const float v368 = v9 + v366;
const Word8 v369 = v367 * v367 + v368 * v368 > 4.0f;
const float v370 = v369 ? v362 : v367;
const float v371 = v369 ? v363 : v368;
const Int64 v372 = v369 ? v364 : (Int64) 1 + v364;
const float v373 = v370 * v370 - v371 * v371;
const float v374 = v370 * v371 + v371 * v370;
const float v375 = v8 + v373;
const float v376 = v9 + v374;
const Word8 v377 = v375 * v375 + v376 * v376 > 4.0f;
const float v378 = v377 ? v370 : v375;
const float v379 = v377 ? v371 : v376;
const Int64 v380 = v377 ? v372 : (Int64) 1 + v372;
const float v381 = v378 * v378 - v379 * v379;
const float v382 = v378 * v379 + v379 * v378;
const float v383 = v8 + v381;
const float v384 = v9 + v382;
const Word8 v385 = v383 * v383 + v384 * v384 > 4.0f;
const float v386 = v385 ? v378 : v383;
const float v387 = v385 ? v379 : v384;
const Int64 v388 = v385 ? v380 : (Int64) 1 + v380;
const float v389 = v386 * v386 - v387 * v387;
const float v390 = v386 * v387 + v387 * v386;
const float v391 = v8 + v389;
const float v392 = v9 + v390;
const Word8 v393 = v391 * v391 + v392 * v392 > 4.0f;
const float v394 = v393 ? v386 : v391;
const float v395 = v393 ? v387 : v392;
const Int64 v396 = v393 ? v388 : (Int64) 1 + v388;
const float v397 = v394 * v394 - v395 * v395;
const float v398 = v394 * v395 + v395 * v394;
const float v399 = v8 + v397;
const float v400 = v9 + v398;
const Word8 v401 = v399 * v399 + v400 * v400 > 4.0f;
const float v402 = v401 ? v394 : v399;
const float v403 = v401 ? v395 : v400;
const Int64 v404 = v401 ? v396 : (Int64) 1 + v396;
const float v405 = v402 * v402 - v403 * v403;
const float v406 = v402 * v403 + v403 * v402;
const float v407 = v8 + v405;
const float v408 = v9 + v406;
const Word8 v409 = v407 * v407 + v408 * v408 > 4.0f;
const float v410 = v409 ? v402 : v407;
const float v411 = v409 ? v403 : v408;
const Int64 v412 = v409 ? v404 : (Int64) 1 + v404;
const float v413 = v410 * v410 - v411 * v411;
const float v414 = v410 * v411 + v411 * v410;
const float v415 = v8 + v413;
const float v416 = v9 + v414;
const Word8 v417 = v415 * v415 + v416 * v416 > 4.0f;
const float v418 = v417 ? v410 : v415;
const float v419 = v417 ? v411 : v416;
const Int64 v420 = v417 ? v412 : (Int64) 1 + v412;
const float v421 = v418 * v418 - v419 * v419;
const float v422 = v418 * v419 + v419 * v418;
const float v423 = v8 + v421;
const float v424 = v9 + v422;
const Word8 v425 = v423 * v423 + v424 * v424 > 4.0f;
const float v426 = v425 ? v418 : v423;
const float v427 = v425 ? v419 : v424;
const Int64 v428 = v425 ? v420 : (Int64) 1 + v420;
const float v429 = v426 * v426 - v427 * v427;
const float v430 = v426 * v427 + v427 * v426;
const float v431 = v8 + v429;
const float v432 = v9 + v430;
const Word8 v433 = v431 * v431 + v432 * v432 > 4.0f;
const float v434 = v433 ? v426 : v431;
const float v435 = v433 ? v427 : v432;
const Int64 v436 = v433 ? v428 : (Int64) 1 + v428;
const float v437 = v434 * v434 - v435 * v435;
const float v438 = v434 * v435 + v435 * v434;
const float v439 = v8 + v437;
const float v440 = v9 + v438;
const Word8 v441 = v439 * v439 + v440 * v440 > 4.0f;
const float v442 = v441 ? v434 : v439;
const float v443 = v441 ? v435 : v440;
const Int64 v444 = v441 ? v436 : (Int64) 1 + v436;
const float v445 = v442 * v442 - v443 * v443;
const float v446 = v442 * v443 + v443 * v442;
const float v447 = v8 + v445;
const float v448 = v9 + v446;
const Word8 v449 = v447 * v447 + v448 * v448 > 4.0f;
const float v450 = v449 ? v442 : v447;
const float v451 = v449 ? v443 : v448;
const Int64 v452 = v449 ? v444 : (Int64) 1 + v444;
const float v453 = v450 * v450 - v451 * v451;
const float v454 = v450 * v451 + v451 * v450;
const float v455 = v8 + v453;
const float v456 = v9 + v454;
const Word8 v457 = v455 * v455 + v456 * v456 > 4.0f;
const float v458 = v457 ? v450 : v455;
const float v459 = v457 ? v451 : v456;
const Int64 v460 = v457 ? v452 : (Int64) 1 + v452;
const float v461 = v458 * v458 - v459 * v459;
const float v462 = v458 * v459 + v459 * v458;
const float v463 = v8 + v461;
const float v464 = v9 + v462;
const Word8 v465 = v463 * v463 + v464 * v464 > 4.0f;
const float v466 = v465 ? v458 : v463;
const float v467 = v465 ? v459 : v464;
const Int64 v468 = v465 ? v460 : (Int64) 1 + v460;
const float v469 = v466 * v466 - v467 * v467;
const float v470 = v466 * v467 + v467 * v466;
const float v471 = v8 + v469;
const float v472 = v9 + v470;
const Word8 v473 = v471 * v471 + v472 * v472 > 4.0f;
const float v474 = v473 ? v466 : v471;
const float v475 = v473 ? v467 : v472;
const Int64 v476 = v473 ? v468 : (Int64) 1 + v468;
const float v477 = v474 * v474 - v475 * v475;
const float v478 = v474 * v475 + v475 * v474;
const float v479 = v8 + v477;
const float v480 = v9 + v478;
const Word8 v481 = v479 * v479 + v480 * v480 > 4.0f;
const float v482 = v481 ? v474 : v479;
const float v483 = v481 ? v475 : v480;
const Int64 v484 = v481 ? v476 : (Int64) 1 + v476;
const float v485 = v482 * v482 - v483 * v483;
const float v486 = v482 * v483 + v483 * v482;
const float v487 = v8 + v485;
const float v488 = v9 + v486;
const Word8 v489 = v487 * v487 + v488 * v488 > 4.0f;
const float v490 = v489 ? v482 : v487;
const float v491 = v489 ? v483 : v488;
const Int64 v492 = v489 ? v484 : (Int64) 1 + v484;
const float v493 = v490 * v490 - v491 * v491;
const float v494 = v490 * v491 + v491 * v490;
const float v495 = v8 + v493;
const float v496 = v9 + v494;
const Word8 v497 = v495 * v495 + v496 * v496 > 4.0f;
const float v498 = v497 ? v490 : v495;
const float v499 = v497 ? v491 : v496;
const Int64 v500 = v497 ? v492 : (Int64) 1 + v492;
const float v501 = v498 * v498 - v499 * v499;
const float v502 = v498 * v499 + v499 * v498;
const float v503 = v8 + v501;
const float v504 = v9 + v502;
const Word8 v505 = v503 * v503 + v504 * v504 > 4.0f;
const float v506 = v505 ? v498 : v503;
const float v507 = v505 ? v499 : v504;
const Int64 v508 = v505 ? v500 : (Int64) 1 + v500;
const float v509 = v506 * v506 - v507 * v507;
const float v510 = v506 * v507 + v507 * v506;
const float v511 = v8 + v509;
const float v512 = v9 + v510;
const Word8 v513 = v511 * v511 + v512 * v512 > 4.0f;
const float v514 = v513 ? v506 : v511;
const float v515 = v513 ? v507 : v512;
const Int64 v516 = v513 ? v508 : (Int64) 1 + v508;
const float v517 = v514 * v514 - v515 * v515;
const float v518 = v514 * v515 + v515 * v514;
const float v519 = v8 + v517;
const float v520 = v9 + v518;
const Word8 v521 = v519 * v519 + v520 * v520 > 4.0f;
const float v522 = v521 ? v514 : v519;
const float v523 = v521 ? v515 : v520;
const Int64 v524 = v521 ? v516 : (Int64) 1 + v516;
const float v525 = v522 * v522 - v523 * v523;
const float v526 = v522 * v523 + v523 * v522;
const float v527 = v8 + v525;
const float v528 = v9 + v526;
const Word8 v529 = v527 * v527 + v528 * v528 > 4.0f;
const float v530 = v529 ? v522 : v527;
const float v531 = v529 ? v523 : v528;
const Int64 v532 = v529 ? v524 : (Int64) 1 + v524;
const float v533 = v530 * v530 - v531 * v531;
const float v534 = v530 * v531 + v531 * v530;
const float v535 = v8 + v533;
const float v536 = v9 + v534;
const Word8 v537 = v535 * v535 + v536 * v536 > 4.0f;
const float v538 = v537 ? v530 : v535;
const float v539 = v537 ? v531 : v536;
const Int64 v540 = v537 ? v532 : (Int64) 1 + v532;
const float v541 = v538 * v538 - v539 * v539;
const float v542 = v538 * v539 + v539 * v538;
const float v543 = v8 + v541;
const float v544 = v9 + v542;
const Word8 v545 = v543 * v543 + v544 * v544 > 4.0f;
const float v546 = v545 ? v538 : v543;
const float v547 = v545 ? v539 : v544;
const Int64 v548 = v545 ? v540 : (Int64) 1 + v540;
const float v549 = v546 * v546 - v547 * v547;
const float v550 = v546 * v547 + v547 * v546;
const float v551 = v8 + v549;
const float v552 = v9 + v550;
const Word8 v553 = v551 * v551 + v552 * v552 > 4.0f;
const float v554 = v553 ? v546 : v551;
const float v555 = v553 ? v547 : v552;
const Int64 v556 = v553 ? v548 : (Int64) 1 + v548;
const float v557 = v554 * v554 - v555 * v555;
const float v558 = v554 * v555 + v555 * v554;
const float v559 = v8 + v557;
const float v560 = v9 + v558;
const Word8 v561 = v559 * v559 + v560 * v560 > 4.0f;
const float v562 = v561 ? v554 : v559;
const float v563 = v561 ? v555 : v560;
const Int64 v564 = v561 ? v556 : (Int64) 1 + v556;
const float v565 = v562 * v562 - v563 * v563;
const float v566 = v562 * v563 + v563 * v562;
const float v567 = v8 + v565;
const float v568 = v9 + v566;
const Word8 v569 = v567 * v567 + v568 * v568 > 4.0f;
const float v570 = v569 ? v562 : v567;
const float v571 = v569 ? v563 : v568;
const Int64 v572 = v569 ? v564 : (Int64) 1 + v564;
const float v573 = v570 * v570 - v571 * v571;
const float v574 = v570 * v571 + v571 * v570;
const float v575 = v8 + v573;
const float v576 = v9 + v574;
const Word8 v577 = v575 * v575 + v576 * v576 > 4.0f;
const float v578 = v577 ? v570 : v575;
const float v579 = v577 ? v571 : v576;
const Int64 v580 = v577 ? v572 : (Int64) 1 + v572;
const float v581 = v578 * v578 - v579 * v579;
const float v582 = v578 * v579 + v579 * v578;
const float v583 = v8 + v581;
const float v584 = v9 + v582;
const Word8 v585 = v583 * v583 + v584 * v584 > 4.0f;
const float v586 = v585 ? v578 : v583;
const float v587 = v585 ? v579 : v584;
const Int64 v588 = v585 ? v580 : (Int64) 1 + v580;
const float v589 = v586 * v586 - v587 * v587;
const float v590 = v586 * v587 + v587 * v586;
const float v591 = v8 + v589;
const float v592 = v9 + v590;
const Word8 v593 = v591 * v591 + v592 * v592 > 4.0f;
const float v594 = v593 ? v586 : v591;
const float v595 = v593 ? v587 : v592;
const Int64 v596 = v593 ? v588 : (Int64) 1 + v588;
const float v597 = v594 * v594 - v595 * v595;
const float v598 = v594 * v595 + v595 * v594;
const float v599 = v8 + v597;
const float v600 = v9 + v598;
const Word8 v601 = v599 * v599 + v600 * v600 > 4.0f;
const float v602 = v601 ? v594 : v599;
const float v603 = v601 ? v595 : v600;
const Int64 v604 = v601 ? v596 : (Int64) 1 + v596;
const float v605 = v602 * v602 - v603 * v603;
const float v606 = v602 * v603 + v603 * v602;
const float v607 = v8 + v605;
const float v608 = v9 + v606;
const Word8 v609 = v607 * v607 + v608 * v608 > 4.0f;
const float v610 = v609 ? v602 : v607;
const float v611 = v609 ? v603 : v608;
const Int64 v612 = v609 ? v604 : (Int64) 1 + v604;
const float v613 = v610 * v610 - v611 * v611;
const float v614 = v610 * v611 + v611 * v610;
const float v615 = v8 + v613;
const float v616 = v9 + v614;
const Word8 v617 = v615 * v615 + v616 * v616 > 4.0f;
const float v618 = v617 ? v610 : v615;
const float v619 = v617 ? v611 : v616;
const Int64 v620 = v617 ? v612 : (Int64) 1 + v612;
const float v621 = v618 * v618 - v619 * v619;
const float v622 = v618 * v619 + v619 * v618;
const float v623 = v8 + v621;
const float v624 = v9 + v622;
const Word8 v625 = v623 * v623 + v624 * v624 > 4.0f;
const float v626 = v625 ? v618 : v623;
const float v627 = v625 ? v619 : v624;
const Int64 v628 = v625 ? v620 : (Int64) 1 + v620;
const float v629 = v626 * v626 - v627 * v627;
const float v630 = v626 * v627 + v627 * v626;
const float v631 = v8 + v629;
const float v632 = v9 + v630;
const Word8 v633 = v631 * v631 + v632 * v632 > 4.0f;
const float v634 = v633 ? v626 : v631;
const float v635 = v633 ? v627 : v632;
const Int64 v636 = v633 ? v628 : (Int64) 1 + v628;
const float v637 = v634 * v634 - v635 * v635;
const float v638 = v634 * v635 + v635 * v634;
const float v639 = v8 + v637;
const float v640 = v9 + v638;
const Word8 v641 = v639 * v639 + v640 * v640 > 4.0f;
const float v642 = v641 ? v634 : v639;
const float v643 = v641 ? v635 : v640;
const Int64 v644 = v641 ? v636 : (Int64) 1 + v636;
const float v645 = v642 * v642 - v643 * v643;
const float v646 = v642 * v643 + v643 * v642;
const float v647 = v8 + v645;
const float v648 = v9 + v646;
const Word8 v649 = v647 * v647 + v648 * v648 > 4.0f;
const float v650 = v649 ? v642 : v647;
const float v651 = v649 ? v643 : v648;
const Int64 v652 = v649 ? v644 : (Int64) 1 + v644;
const float v653 = v650 * v650 - v651 * v651;
const float v654 = v650 * v651 + v651 * v650;
const float v655 = v8 + v653;
const float v656 = v9 + v654;
const Word8 v657 = v655 * v655 + v656 * v656 > 4.0f;
const float v658 = v657 ? v650 : v655;
const float v659 = v657 ? v651 : v656;
const Int64 v660 = v657 ? v652 : (Int64) 1 + v652;
const float v661 = v658 * v658 - v659 * v659;
const float v662 = v658 * v659 + v659 * v658;
const float v663 = v8 + v661;
const float v664 = v9 + v662;
const Word8 v665 = v663 * v663 + v664 * v664 > 4.0f;
const float v666 = v665 ? v658 : v663;
const float v667 = v665 ? v659 : v664;
const Int64 v668 = v665 ? v660 : (Int64) 1 + v660;
const float v669 = v666 * v666 - v667 * v667;
const float v670 = v666 * v667 + v667 * v666;
const float v671 = v8 + v669;
const float v672 = v9 + v670;
const Word8 v673 = v671 * v671 + v672 * v672 > 4.0f;
const float v674 = v673 ? v666 : v671;
const float v675 = v673 ? v667 : v672;
const Int64 v676 = v673 ? v668 : (Int64) 1 + v668;
const float v677 = v674 * v674 - v675 * v675;
const float v678 = v674 * v675 + v675 * v674;
const float v679 = v8 + v677;
const float v680 = v9 + v678;
const Word8 v681 = v679 * v679 + v680 * v680 > 4.0f;
const float v682 = v681 ? v674 : v679;
const float v683 = v681 ? v675 : v680;
const Int64 v684 = v681 ? v676 : (Int64) 1 + v676;
const float v685 = v682 * v682 - v683 * v683;
const float v686 = v682 * v683 + v683 * v682;
const float v687 = v8 + v685;
const float v688 = v9 + v686;
const Word8 v689 = v687 * v687 + v688 * v688 > 4.0f;
const float v690 = v689 ? v682 : v687;
const float v691 = v689 ? v683 : v688;
const Int64 v692 = v689 ? v684 : (Int64) 1 + v684;
const float v693 = v690 * v690 - v691 * v691;
const float v694 = v690 * v691 + v691 * v690;
const float v695 = v8 + v693;
const float v696 = v9 + v694;
const Word8 v697 = v695 * v695 + v696 * v696 > 4.0f;
const float v698 = v697 ? v690 : v695;
const float v699 = v697 ? v691 : v696;
const Int64 v700 = v697 ? v692 : (Int64) 1 + v692;
const float v701 = v698 * v698 - v699 * v699;
const float v702 = v698 * v699 + v699 * v698;
const float v703 = v8 + v701;
const float v704 = v9 + v702;
const Word8 v705 = v703 * v703 + v704 * v704 > 4.0f;
const float v706 = v705 ? v698 : v703;
const float v707 = v705 ? v699 : v704;
const Int64 v708 = v705 ? v700 : (Int64) 1 + v700;
const float v709 = v706 * v706 - v707 * v707;
const float v710 = v706 * v707 + v707 * v706;
const float v711 = v8 + v709;
const float v712 = v9 + v710;
const Word8 v713 = v711 * v711 + v712 * v712 > 4.0f;
const float v714 = v713 ? v706 : v711;
const float v715 = v713 ? v707 : v712;
const Int64 v716 = v713 ? v708 : (Int64) 1 + v708;
const float v717 = v714 * v714 - v715 * v715;
const float v718 = v714 * v715 + v715 * v714;
const float v719 = v8 + v717;
const float v720 = v9 + v718;
const Word8 v721 = v719 * v719 + v720 * v720 > 4.0f;
const float v722 = v721 ? v714 : v719;
const float v723 = v721 ? v715 : v720;
const Int64 v724 = v721 ? v716 : (Int64) 1 + v716;
const float v725 = v722 * v722 - v723 * v723;
const float v726 = v722 * v723 + v723 * v722;
const float v727 = v8 + v725;
const float v728 = v9 + v726;
const Word8 v729 = v727 * v727 + v728 * v728 > 4.0f;
const float v730 = v729 ? v722 : v727;
const float v731 = v729 ? v723 : v728;
const Int64 v732 = v729 ? v724 : (Int64) 1 + v724;
const float v733 = v730 * v730 - v731 * v731;
const float v734 = v730 * v731 + v731 * v730;
const float v735 = v8 + v733;
const float v736 = v9 + v734;
const Word8 v737 = v735 * v735 + v736 * v736 > 4.0f;
const float v738 = v737 ? v730 : v735;
const float v739 = v737 ? v731 : v736;
const Int64 v740 = v737 ? v732 : (Int64) 1 + v732;
const float v741 = v738 * v738 - v739 * v739;
const float v742 = v738 * v739 + v739 * v738;
const float v743 = v8 + v741;
const float v744 = v9 + v742;
const Word8 v745 = v743 * v743 + v744 * v744 > 4.0f;
const float v746 = v745 ? v738 : v743;
const float v747 = v745 ? v739 : v744;
const Int64 v748 = v745 ? v740 : (Int64) 1 + v740;
const float v749 = v746 * v746 - v747 * v747;
const float v750 = v746 * v747 + v747 * v746;
const float v751 = v8 + v749;
const float v752 = v9 + v750;
const Word8 v753 = v751 * v751 + v752 * v752 > 4.0f;
const float v754 = v753 ? v746 : v751;
const float v755 = v753 ? v747 : v752;
const Int64 v756 = v753 ? v748 : (Int64) 1 + v748;
const float v757 = v754 * v754 - v755 * v755;
const float v758 = v754 * v755 + v755 * v754;
const float v759 = v8 + v757;
const float v760 = v9 + v758;
const Word8 v761 = v759 * v759 + v760 * v760 > 4.0f;
const float v762 = v761 ? v754 : v759;
const float v763 = v761 ? v755 : v760;
const Int64 v764 = v761 ? v756 : (Int64) 1 + v756;
const float v765 = v762 * v762 - v763 * v763;
const float v766 = v762 * v763 + v763 * v762;
const float v767 = v8 + v765;
const float v768 = v9 + v766;
const Word8 v769 = v767 * v767 + v768 * v768 > 4.0f;
const float v770 = v769 ? v762 : v767;
const float v771 = v769 ? v763 : v768;
const Int64 v772 = v769 ? v764 : (Int64) 1 + v764;
const float v773 = v770 * v770 - v771 * v771;
const float v774 = v770 * v771 + v771 * v770;
const float v775 = v8 + v773;
const float v776 = v9 + v774;
const Word8 v777 = v775 * v775 + v776 * v776 > 4.0f;
const float v778 = v777 ? v770 : v775;
const float v779 = v777 ? v771 : v776;
const Int64 v780 = v777 ? v772 : (Int64) 1 + v772;
const float v781 = v778 * v778 - v779 * v779;
const float v782 = v778 * v779 + v779 * v778;
const float v783 = v8 + v781;
const float v784 = v9 + v782;
const Word8 v785 = v783 * v783 + v784 * v784 > 4.0f;
const float v786 = v785 ? v778 : v783;
const float v787 = v785 ? v779 : v784;
const Int64 v788 = v785 ? v780 : (Int64) 1 + v780;
const float v789 = v786 * v786 - v787 * v787;
const float v790 = v786 * v787 + v787 * v786;
const float v791 = v8 + v789;
const float v792 = v9 + v790;
const Word8 v793 = v791 * v791 + v792 * v792 > 4.0f;
const float v794 = v793 ? v786 : v791;
const float v795 = v793 ? v787 : v792;
const Int64 v796 = v793 ? v788 : (Int64) 1 + v788;
const float v797 = v794 * v794 - v795 * v795;
const float v798 = v794 * v795 + v795 * v794;
const float v799 = v8 + v797;
const float v800 = v9 + v798;
const Word8 v801 = v799 * v799 + v800 * v800 > 4.0f;
const float v802 = v801 ? v794 : v799;
const float v803 = v801 ? v795 : v800;
const Int64 v804 = v801 ? v796 : (Int64) 1 + v796;
const float v805 = v802 * v802 - v803 * v803;
const float v806 = v802 * v803 + v803 * v802;
const float v807 = v8 + v805;
const float v808 = v9 + v806;
const Word8 v809 = v807 * v807 + v808 * v808 > 4.0f;
const float v810 = v809 ? v802 : v807;
const float v811 = v809 ? v803 : v808;
const Int64 v812 = v809 ? v804 : (Int64) 1 + v804;
const float v813 = v810 * v810 - v811 * v811;
const float v814 = v810 * v811 + v811 * v810;
const float v815 = v8 + v813;
const float v816 = v9 + v814;
const Word8 v817 = v815 * v815 + v816 * v816 > 4.0f;
const float v818 = v817 ? v810 : v815;
const float v819 = v817 ? v811 : v816;
const Int64 v820 = v817 ? v812 : (Int64) 1 + v812;
const float v821 = v818 * v818 - v819 * v819;
const float v822 = v818 * v819 + v819 * v818;
const float v823 = v8 + v821;
const float v824 = v9 + v822;
const Word8 v825 = v823 * v823 + v824 * v824 > 4.0f;
const float v826 = v825 ? v818 : v823;
const float v827 = v825 ? v819 : v824;
const Int64 v828 = v825 ? v820 : (Int64) 1 + v820;
const float v829 = v826 * v826 - v827 * v827;
const float v830 = v826 * v827 + v827 * v826;
const float v831 = v8 + v829;
const float v832 = v9 + v830;
const Word8 v833 = v831 * v831 + v832 * v832 > 4.0f;
const float v834 = v833 ? v826 : v831;
const float v835 = v833 ? v827 : v832;
const Int64 v836 = v833 ? v828 : (Int64) 1 + v828;
const float v837 = v834 * v834 - v835 * v835;
const float v838 = v834 * v835 + v835 * v834;
const float v839 = v8 + v837;
const float v840 = v9 + v838;
const Word8 v841 = v839 * v839 + v840 * v840 > 4.0f;
const float v842 = v841 ? v834 : v839;
const float v843 = v841 ? v835 : v840;
const Int64 v844 = v841 ? v836 : (Int64) 1 + v836;
const float v845 = v842 * v842 - v843 * v843;
const float v846 = v842 * v843 + v843 * v842;
const float v847 = v8 + v845;
const float v848 = v9 + v846;
const Word8 v849 = v847 * v847 + v848 * v848 > 4.0f;
const float v850 = v849 ? v842 : v847;
const float v851 = v849 ? v843 : v848;
const Int64 v852 = v849 ? v844 : (Int64) 1 + v844;
const float v853 = v850 * v850 - v851 * v851;
const float v854 = v850 * v851 + v851 * v850;
const float v855 = v8 + v853;
const float v856 = v9 + v854;
const Word8 v857 = v855 * v855 + v856 * v856 > 4.0f;
const float v858 = v857 ? v850 : v855;
const float v859 = v857 ? v851 : v856;
const Int64 v860 = v857 ? v852 : (Int64) 1 + v852;
const float v861 = v858 * v858 - v859 * v859;
const float v862 = v858 * v859 + v859 * v858;
const float v863 = v8 + v861;
const float v864 = v9 + v862;
const Word8 v865 = v863 * v863 + v864 * v864 > 4.0f;
const float v866 = v865 ? v858 : v863;
const float v867 = v865 ? v859 : v864;
const Int64 v868 = v865 ? v860 : (Int64) 1 + v860;
const float v869 = v866 * v866 - v867 * v867;
const float v870 = v866 * v867 + v867 * v866;
const float v871 = v8 + v869;
const float v872 = v9 + v870;
const Word8 v873 = v871 * v871 + v872 * v872 > 4.0f;
const float v874 = v873 ? v866 : v871;
const float v875 = v873 ? v867 : v872;
const Int64 v876 = v873 ? v868 : (Int64) 1 + v868;
const float v877 = v874 * v874 - v875 * v875;
const float v878 = v874 * v875 + v875 * v874;
const float v879 = v8 + v877;
const float v880 = v9 + v878;
const Word8 v881 = v879 * v879 + v880 * v880 > 4.0f;
const float v882 = v881 ? v874 : v879;
const float v883 = v881 ? v875 : v880;
const Int64 v884 = v881 ? v876 : (Int64) 1 + v876;
const float v885 = v882 * v882 - v883 * v883;
const float v886 = v882 * v883 + v883 * v882;
const float v887 = v8 + v885;
const float v888 = v9 + v886;
const Word8 v889 = v887 * v887 + v888 * v888 > 4.0f;
const float v890 = v889 ? v882 : v887;
const float v891 = v889 ? v883 : v888;
const Int64 v892 = v889 ? v884 : (Int64) 1 + v884;
const float v893 = v890 * v890 - v891 * v891;
const float v894 = v890 * v891 + v891 * v890;
const float v895 = v8 + v893;
const float v896 = v9 + v894;
const Word8 v897 = v895 * v895 + v896 * v896 > 4.0f;
const float v898 = v897 ? v890 : v895;
const float v899 = v897 ? v891 : v896;
const Int64 v900 = v897 ? v892 : (Int64) 1 + v892;
const float v901 = v898 * v898 - v899 * v899;
const float v902 = v898 * v899 + v899 * v898;
const float v903 = v8 + v901;
const float v904 = v9 + v902;
const Word8 v905 = v903 * v903 + v904 * v904 > 4.0f;
const float v906 = v905 ? v898 : v903;
const float v907 = v905 ? v899 : v904;
const Int64 v908 = v905 ? v900 : (Int64) 1 + v900;
const float v909 = v906 * v906 - v907 * v907;
const float v910 = v906 * v907 + v907 * v906;
const float v911 = v8 + v909;
const float v912 = v9 + v910;
const Word8 v913 = v911 * v911 + v912 * v912 > 4.0f;
const float v914 = v913 ? v906 : v911;
const float v915 = v913 ? v907 : v912;
const Int64 v916 = v913 ? v908 : (Int64) 1 + v908;
const float v917 = v914 * v914 - v915 * v915;
const float v918 = v914 * v915 + v915 * v914;
const float v919 = v8 + v917;
const float v920 = v9 + v918;
const Word8 v921 = v919 * v919 + v920 * v920 > 4.0f;
const float v922 = v921 ? v914 : v919;
const float v923 = v921 ? v915 : v920;
const Int64 v924 = v921 ? v916 : (Int64) 1 + v916;
const float v925 = v922 * v922 - v923 * v923;
const float v926 = v922 * v923 + v923 * v922;
const float v927 = v8 + v925;
const float v928 = v9 + v926;
const Word8 v929 = v927 * v927 + v928 * v928 > 4.0f;
const float v930 = v929 ? v922 : v927;
const float v931 = v929 ? v923 : v928;
const Int64 v932 = v929 ? v924 : (Int64) 1 + v924;
const float v933 = v930 * v930 - v931 * v931;
const float v934 = v930 * v931 + v931 * v930;
const float v935 = v8 + v933;
const float v936 = v9 + v934;
const Word8 v937 = v935 * v935 + v936 * v936 > 4.0f;
const float v938 = v937 ? v930 : v935;
const float v939 = v937 ? v931 : v936;
const Int64 v940 = v937 ? v932 : (Int64) 1 + v932;
const float v941 = v938 * v938 - v939 * v939;
const float v942 = v938 * v939 + v939 * v938;
const float v943 = v8 + v941;
const float v944 = v9 + v942;
const Word8 v945 = v943 * v943 + v944 * v944 > 4.0f;
const float v946 = v945 ? v938 : v943;
const float v947 = v945 ? v939 : v944;
const Int64 v948 = v945 ? v940 : (Int64) 1 + v940;
const float v949 = v946 * v946 - v947 * v947;
const float v950 = v946 * v947 + v947 * v946;
const float v951 = v8 + v949;
const float v952 = v9 + v950;
const Word8 v953 = v951 * v951 + v952 * v952 > 4.0f;
const float v954 = v953 ? v946 : v951;
const float v955 = v953 ? v947 : v952;
const Int64 v956 = v953 ? v948 : (Int64) 1 + v948;
const float v957 = v954 * v954 - v955 * v955;
const float v958 = v954 * v955 + v955 * v954;
const float v959 = v8 + v957;
const float v960 = v9 + v958;
const Word8 v961 = v959 * v959 + v960 * v960 > 4.0f;
const float v962 = v961 ? v954 : v959;
const float v963 = v961 ? v955 : v960;
const Int64 v964 = v961 ? v956 : (Int64) 1 + v956;
const float v965 = v962 * v962 - v963 * v963;
const float v966 = v962 * v963 + v963 * v962;
const float v967 = v8 + v965;
const float v968 = v9 + v966;
const Word8 v969 = v967 * v967 + v968 * v968 > 4.0f;
const float v970 = v969 ? v962 : v967;
const float v971 = v969 ? v963 : v968;
const Int64 v972 = v969 ? v964 : (Int64) 1 + v964;
const float v973 = v970 * v970 - v971 * v971;
const float v974 = v970 * v971 + v971 * v970;
const float v975 = v8 + v973;
const float v976 = v9 + v974;
const Word8 v977 = v975 * v975 + v976 * v976 > 4.0f;
const float v978 = v977 ? v970 : v975;
const float v979 = v977 ? v971 : v976;
const Int64 v980 = v977 ? v972 : (Int64) 1 + v972;
const float v981 = v978 * v978 - v979 * v979;
const float v982 = v978 * v979 + v979 * v978;
const float v983 = v8 + v981;
const float v984 = v9 + v982;
const Word8 v985 = v983 * v983 + v984 * v984 > 4.0f;
const float v986 = v985 ? v978 : v983;
const float v987 = v985 ? v979 : v984;
const Int64 v988 = v985 ? v980 : (Int64) 1 + v980;
const float v989 = v986 * v986 - v987 * v987;
const float v990 = v986 * v987 + v987 * v986;
const float v991 = v8 + v989;
const float v992 = v9 + v990;
const Word8 v993 = v991 * v991 + v992 * v992 > 4.0f;
const float v994 = v993 ? v986 : v991;
const float v995 = v993 ? v987 : v992;
const Int64 v996 = v993 ? v988 : (Int64) 1 + v988;
const float v997 = v994 * v994 - v995 * v995;
const float v998 = v994 * v995 + v995 * v994;
const float v999 = v8 + v997;
const float v1000 = v9 + v998;
const Word8 v1001 = v999 * v999 + v1000 * v1000 > 4.0f;
const float v1002 = v1001 ? v994 : v999;
const float v1003 = v1001 ? v995 : v1000;
const Int64 v1004 = v1001 ? v996 : (Int64) 1 + v996;
const float v1005 = v1002 * v1002 - v1003 * v1003;
const float v1006 = v1002 * v1003 + v1003 * v1002;
const float v1007 = v8 + v1005;
const float v1008 = v9 + v1006;
const Word8 v1009 = v1007 * v1007 + v1008 * v1008 > 4.0f;
const float v1010 = v1009 ? v1002 : v1007;
const float v1011 = v1009 ? v1003 : v1008;
const Int64 v1012 = v1009 ? v1004 : (Int64) 1 + v1004;
const float v1013 = v1010 * v1010 - v1011 * v1011;
const float v1014 = v1010 * v1011 + v1011 * v1010;
const float v1015 = v8 + v1013;
const float v1016 = v9 + v1014;
const Word8 v1017 = v1015 * v1015 + v1016 * v1016 > 4.0f;
const float v1018 = v1017 ? v1010 : v1015;
const float v1019 = v1017 ? v1011 : v1016;
const Int64 v1020 = v1017 ? v1012 : (Int64) 1 + v1012;
const float v1021 = v1018 * v1018 - v1019 * v1019;
const float v1022 = v1018 * v1019 + v1019 * v1018;
const float v1023 = v8 + v1021;
const float v1024 = v9 + v1022;
const Word8 v1025 = v1023 * v1023 + v1024 * v1024 > 4.0f;
const float v1026 = v1025 ? v1018 : v1023;
const float v1027 = v1025 ? v1019 : v1024;
const Int64 v1028 = v1025 ? v1020 : (Int64) 1 + v1020;
const float v1029 = v1026 * v1026 - v1027 * v1027;
const float v1030 = v1026 * v1027 + v1027 * v1026;
const float v1031 = v8 + v1029;
const float v1032 = v9 + v1030;
const Word8 v1033 = v1031 * v1031 + v1032 * v1032 > 4.0f;
const float v1034 = v1033 ? v1026 : v1031;
const float v1035 = v1033 ? v1027 : v1032;
const Int64 v1036 = v1033 ? v1028 : (Int64) 1 + v1028;
const float v1037 = v1034 * v1034 - v1035 * v1035;
const float v1038 = v1034 * v1035 + v1035 * v1034;
const float v1039 = v8 + v1037;
const float v1040 = v9 + v1038;
const Word8 v1041 = v1039 * v1039 + v1040 * v1040 > 4.0f;
const float v1042 = v1041 ? v1034 : v1039;
const float v1043 = v1041 ? v1035 : v1040;
const Int64 v1044 = v1041 ? v1036 : (Int64) 1 + v1036;
const float v1045 = v1042 * v1042 - v1043 * v1043;
const float v1046 = v1042 * v1043 + v1043 * v1042;
const float v1047 = v8 + v1045;
const float v1048 = v9 + v1046;
const Word8 v1049 = v1047 * v1047 + v1048 * v1048 > 4.0f;
const float v1050 = v1049 ? v1042 : v1047;
const float v1051 = v1049 ? v1043 : v1048;
const Int64 v1052 = v1049 ? v1044 : (Int64) 1 + v1044;
const float v1053 = v1050 * v1050 - v1051 * v1051;
const float v1054 = v1050 * v1051 + v1051 * v1050;
const float v1055 = v8 + v1053;
const float v1056 = v9 + v1054;
const Word8 v1057 = v1055 * v1055 + v1056 * v1056 > 4.0f;
const float v1058 = v1057 ? v1050 : v1055;
const float v1059 = v1057 ? v1051 : v1056;
const Int64 v1060 = v1057 ? v1052 : (Int64) 1 + v1052;
const float v1061 = v1058 * v1058 - v1059 * v1059;
const float v1062 = v1058 * v1059 + v1059 * v1058;
const float v1063 = v8 + v1061;
const float v1064 = v9 + v1062;
const Word8 v1065 = v1063 * v1063 + v1064 * v1064 > 4.0f;
const float v1066 = v1065 ? v1058 : v1063;
const float v1067 = v1065 ? v1059 : v1064;
const Int64 v1068 = v1065 ? v1060 : (Int64) 1 + v1060;
const float v1069 = v1066 * v1066 - v1067 * v1067;
const float v1070 = v1066 * v1067 + v1067 * v1066;
const float v1071 = v8 + v1069;
const float v1072 = v9 + v1070;
const Word8 v1073 = v1071 * v1071 + v1072 * v1072 > 4.0f;
const float v1074 = v1073 ? v1066 : v1071;
const float v1075 = v1073 ? v1067 : v1072;
const Int64 v1076 = v1073 ? v1068 : (Int64) 1 + v1068;
const float v1077 = v1074 * v1074 - v1075 * v1075;
const float v1078 = v1074 * v1075 + v1075 * v1074;
const float v1079 = v8 + v1077;
const float v1080 = v9 + v1078;
const Word8 v1081 = v1079 * v1079 + v1080 * v1080 > 4.0f;
const float v1082 = v1081 ? v1074 : v1079;
const float v1083 = v1081 ? v1075 : v1080;
const Int64 v1084 = v1081 ? v1076 : (Int64) 1 + v1076;
const float v1085 = v1082 * v1082 - v1083 * v1083;
const float v1086 = v1082 * v1083 + v1083 * v1082;
const float v1087 = v8 + v1085;
const float v1088 = v9 + v1086;
const Word8 v1089 = v1087 * v1087 + v1088 * v1088 > 4.0f;
const float v1090 = v1089 ? v1082 : v1087;
const float v1091 = v1089 ? v1083 : v1088;
const Int64 v1092 = v1089 ? v1084 : (Int64) 1 + v1084;
const float v1093 = v1090 * v1090 - v1091 * v1091;
const float v1094 = v1090 * v1091 + v1091 * v1090;
const float v1095 = v8 + v1093;
const float v1096 = v9 + v1094;
const Word8 v1097 = v1095 * v1095 + v1096 * v1096 > 4.0f;
const float v1098 = v1097 ? v1090 : v1095;
const float v1099 = v1097 ? v1091 : v1096;
const Int64 v1100 = v1097 ? v1092 : (Int64) 1 + v1092;
const float v1101 = v1098 * v1098 - v1099 * v1099;
const float v1102 = v1098 * v1099 + v1099 * v1098;
const float v1103 = v8 + v1101;
const float v1104 = v9 + v1102;
const Word8 v1105 = v1103 * v1103 + v1104 * v1104 > 4.0f;
const float v1106 = v1105 ? v1098 : v1103;
const float v1107 = v1105 ? v1099 : v1104;
const Int64 v1108 = v1105 ? v1100 : (Int64) 1 + v1100;
const float v1109 = v1106 * v1106 - v1107 * v1107;
const float v1110 = v1106 * v1107 + v1107 * v1106;
const float v1111 = v8 + v1109;
const float v1112 = v9 + v1110;
const Word8 v1113 = v1111 * v1111 + v1112 * v1112 > 4.0f;
const float v1114 = v1113 ? v1106 : v1111;
const float v1115 = v1113 ? v1107 : v1112;
const Int64 v1116 = v1113 ? v1108 : (Int64) 1 + v1108;
const float v1117 = v1114 * v1114 - v1115 * v1115;
const float v1118 = v1114 * v1115 + v1115 * v1114;
const float v1119 = v8 + v1117;
const float v1120 = v9 + v1118;
const Word8 v1121 = v1119 * v1119 + v1120 * v1120 > 4.0f;
const float v1122 = v1121 ? v1114 : v1119;
const float v1123 = v1121 ? v1115 : v1120;
const Int64 v1124 = v1121 ? v1116 : (Int64) 1 + v1116;
const float v1125 = v1122 * v1122 - v1123 * v1123;
const float v1126 = v1122 * v1123 + v1123 * v1122;
const float v1127 = v8 + v1125;
const float v1128 = v9 + v1126;
const Word8 v1129 = v1127 * v1127 + v1128 * v1128 > 4.0f;
const float v1130 = v1129 ? v1122 : v1127;
const float v1131 = v1129 ? v1123 : v1128;
const Int64 v1132 = v1129 ? v1124 : (Int64) 1 + v1124;
const float v1133 = v1130 * v1130 - v1131 * v1131;
const float v1134 = v1130 * v1131 + v1131 * v1130;
const float v1135 = v8 + v1133;
const float v1136 = v9 + v1134;
const Word8 v1137 = v1135 * v1135 + v1136 * v1136 > 4.0f;
const float v1138 = v1137 ? v1130 : v1135;
const float v1139 = v1137 ? v1131 : v1136;
const Int64 v1140 = v1137 ? v1132 : (Int64) 1 + v1132;
const float v1141 = v1138 * v1138 - v1139 * v1139;
const float v1142 = v1138 * v1139 + v1139 * v1138;
const float v1143 = v8 + v1141;
const float v1144 = v9 + v1142;
const Word8 v1145 = v1143 * v1143 + v1144 * v1144 > 4.0f;
const float v1146 = v1145 ? v1138 : v1143;
const float v1147 = v1145 ? v1139 : v1144;
const Int64 v1148 = v1145 ? v1140 : (Int64) 1 + v1140;
const float v1149 = v1146 * v1146 - v1147 * v1147;
const float v1150 = v1146 * v1147 + v1147 * v1146;
const float v1151 = v8 + v1149;
const float v1152 = v9 + v1150;
const Word8 v1153 = v1151 * v1151 + v1152 * v1152 > 4.0f;
const float v1154 = v1153 ? v1146 : v1151;
const float v1155 = v1153 ? v1147 : v1152;
const Int64 v1156 = v1153 ? v1148 : (Int64) 1 + v1148;
const float v1157 = v1154 * v1154 - v1155 * v1155;
const float v1158 = v1154 * v1155 + v1155 * v1154;
const float v1159 = v8 + v1157;
const float v1160 = v9 + v1158;
const Word8 v1161 = v1159 * v1159 + v1160 * v1160 > 4.0f;
const float v1162 = v1161 ? v1154 : v1159;
const float v1163 = v1161 ? v1155 : v1160;
const Int64 v1164 = v1161 ? v1156 : (Int64) 1 + v1156;
const float v1165 = v1162 * v1162 - v1163 * v1163;
const float v1166 = v1162 * v1163 + v1163 * v1162;
const float v1167 = v8 + v1165;
const float v1168 = v9 + v1166;
const Word8 v1169 = v1167 * v1167 + v1168 * v1168 > 4.0f;
const float v1170 = v1169 ? v1162 : v1167;
const float v1171 = v1169 ? v1163 : v1168;
const Int64 v1172 = v1169 ? v1164 : (Int64) 1 + v1164;
const float v1173 = v1170 * v1170 - v1171 * v1171;
const float v1174 = v1170 * v1171 + v1171 * v1170;
const float v1175 = v8 + v1173;
const float v1176 = v9 + v1174;
const Word8 v1177 = v1175 * v1175 + v1176 * v1176 > 4.0f;
const float v1178 = v1177 ? v1170 : v1175;
const float v1179 = v1177 ? v1171 : v1176;
const Int64 v1180 = v1177 ? v1172 : (Int64) 1 + v1172;
const float v1181 = v1178 * v1178 - v1179 * v1179;
const float v1182 = v1178 * v1179 + v1179 * v1178;
const float v1183 = v8 + v1181;
const float v1184 = v9 + v1182;
const Word8 v1185 = v1183 * v1183 + v1184 * v1184 > 4.0f;
const float v1186 = v1185 ? v1178 : v1183;
const float v1187 = v1185 ? v1179 : v1184;
const Int64 v1188 = v1185 ? v1180 : (Int64) 1 + v1180;
const float v1189 = v1186 * v1186 - v1187 * v1187;
const float v1190 = v1186 * v1187 + v1187 * v1186;
const float v1191 = v8 + v1189;
const float v1192 = v9 + v1190;
const Word8 v1193 = v1191 * v1191 + v1192 * v1192 > 4.0f;
const float v1194 = v1193 ? v1186 : v1191;
const float v1195 = v1193 ? v1187 : v1192;
const Int64 v1196 = v1193 ? v1188 : (Int64) 1 + v1188;
const float v1197 = v1194 * v1194 - v1195 * v1195;
const float v1198 = v1194 * v1195 + v1195 * v1194;
const float v1199 = v8 + v1197;
const float v1200 = v9 + v1198;
const Word8 v1201 = v1199 * v1199 + v1200 * v1200 > 4.0f;
const float v1202 = v1201 ? v1194 : v1199;
const float v1203 = v1201 ? v1195 : v1200;
const Int64 v1204 = v1201 ? v1196 : (Int64) 1 + v1196;
const float v1205 = v1202 * v1202 - v1203 * v1203;
const float v1206 = v1202 * v1203 + v1203 * v1202;
const float v1207 = v8 + v1205;
const float v1208 = v9 + v1206;
const Word8 v1209 = v1207 * v1207 + v1208 * v1208 > 4.0f;
const float v1210 = v1209 ? v1202 : v1207;
const float v1211 = v1209 ? v1203 : v1208;
const Int64 v1212 = v1209 ? v1204 : (Int64) 1 + v1204;
const float v1213 = v1210 * v1210 - v1211 * v1211;
const float v1214 = v1210 * v1211 + v1211 * v1210;
const float v1215 = v8 + v1213;
const float v1216 = v9 + v1214;
const Word8 v1217 = v1215 * v1215 + v1216 * v1216 > 4.0f;
const float v1218 = v1217 ? v1210 : v1215;
const float v1219 = v1217 ? v1211 : v1216;
const Int64 v1220 = v1217 ? v1212 : (Int64) 1 + v1212;
const float v1221 = v1218 * v1218 - v1219 * v1219;
const float v1222 = v1218 * v1219 + v1219 * v1218;
const float v1223 = v8 + v1221;
const float v1224 = v9 + v1222;
const Word8 v1225 = v1223 * v1223 + v1224 * v1224 > 4.0f;
const float v1226 = v1225 ? v1218 : v1223;
const float v1227 = v1225 ? v1219 : v1224;
const Int64 v1228 = v1225 ? v1220 : (Int64) 1 + v1220;
const float v1229 = v1226 * v1226 - v1227 * v1227;
const float v1230 = v1226 * v1227 + v1227 * v1226;
const float v1231 = v8 + v1229;
const float v1232 = v9 + v1230;
const Word8 v1233 = v1231 * v1231 + v1232 * v1232 > 4.0f;
const float v1234 = v1233 ? v1226 : v1231;
const float v1235 = v1233 ? v1227 : v1232;
const Int64 v1236 = v1233 ? v1228 : (Int64) 1 + v1228;
const float v1237 = v1234 * v1234 - v1235 * v1235;
const float v1238 = v1234 * v1235 + v1235 * v1234;
const float v1239 = v8 + v1237;
const float v1240 = v9 + v1238;
const Word8 v1241 = v1239 * v1239 + v1240 * v1240 > 4.0f;
const float v1242 = v1241 ? v1234 : v1239;
const float v1243 = v1241 ? v1235 : v1240;
const Int64 v1244 = v1241 ? v1236 : (Int64) 1 + v1236;
const float v1245 = v1242 * v1242 - v1243 * v1243;
const float v1246 = v1242 * v1243 + v1243 * v1242;
const float v1247 = v8 + v1245;
const float v1248 = v9 + v1246;
const Word8 v1249 = v1247 * v1247 + v1248 * v1248 > 4.0f;
const float v1250 = v1249 ? v1242 : v1247;
const float v1251 = v1249 ? v1243 : v1248;
const Int64 v1252 = v1249 ? v1244 : (Int64) 1 + v1244;
const float v1253 = v1250 * v1250 - v1251 * v1251;
const float v1254 = v1250 * v1251 + v1251 * v1250;
const float v1255 = v8 + v1253;
const float v1256 = v9 + v1254;
const Word8 v1257 = v1255 * v1255 + v1256 * v1256 > 4.0f;
const float v1258 = v1257 ? v1250 : v1255;
const float v1259 = v1257 ? v1251 : v1256;
const Int64 v1260 = v1257 ? v1252 : (Int64) 1 + v1252;
const float v1261 = v1258 * v1258 - v1259 * v1259;
const float v1262 = v1258 * v1259 + v1259 * v1258;
const float v1263 = v8 + v1261;
const float v1264 = v9 + v1262;
const Word8 v1265 = v1263 * v1263 + v1264 * v1264 > 4.0f;
const float v1266 = v1265 ? v1258 : v1263;
const float v1267 = v1265 ? v1259 : v1264;
const Int64 v1268 = v1265 ? v1260 : (Int64) 1 + v1260;
const float v1269 = v1266 * v1266 - v1267 * v1267;
const float v1270 = v1266 * v1267 + v1267 * v1266;
const float v1271 = v8 + v1269;
const float v1272 = v9 + v1270;
const Word8 v1273 = v1271 * v1271 + v1272 * v1272 > 4.0f;
const float v1274 = v1273 ? v1266 : v1271;
const float v1275 = v1273 ? v1267 : v1272;
const Int64 v1276 = v1273 ? v1268 : (Int64) 1 + v1268;
const float v1277 = v1274 * v1274 - v1275 * v1275;
const float v1278 = v1274 * v1275 + v1275 * v1274;
const float v1279 = v8 + v1277;
const float v1280 = v9 + v1278;
const Word8 v1281 = v1279 * v1279 + v1280 * v1280 > 4.0f;
const float v1282 = v1281 ? v1274 : v1279;
const float v1283 = v1281 ? v1275 : v1280;
const Int64 v1284 = v1281 ? v1276 : (Int64) 1 + v1276;
const float v1285 = v1282 * v1282 - v1283 * v1283;
const float v1286 = v1282 * v1283 + v1283 * v1282;
const float v1287 = v8 + v1285;
const float v1288 = v9 + v1286;
const Word8 v1289 = v1287 * v1287 + v1288 * v1288 > 4.0f;
const float v1290 = v1289 ? v1282 : v1287;
const float v1291 = v1289 ? v1283 : v1288;
const Int64 v1292 = v1289 ? v1284 : (Int64) 1 + v1284;
const float v1293 = v1290 * v1290 - v1291 * v1291;
const float v1294 = v1290 * v1291 + v1291 * v1290;
const float v1295 = v8 + v1293;
const float v1296 = v9 + v1294;
const Word8 v1297 = v1295 * v1295 + v1296 * v1296 > 4.0f;
const float v1298 = v1297 ? v1290 : v1295;
const float v1299 = v1297 ? v1291 : v1296;
const Int64 v1300 = v1297 ? v1292 : (Int64) 1 + v1292;
const float v1301 = v1298 * v1298 - v1299 * v1299;
const float v1302 = v1298 * v1299 + v1299 * v1298;
const float v1303 = v8 + v1301;
const float v1304 = v9 + v1302;
const Word8 v1305 = v1303 * v1303 + v1304 * v1304 > 4.0f;
const float v1306 = v1305 ? v1298 : v1303;
const float v1307 = v1305 ? v1299 : v1304;
const Int64 v1308 = v1305 ? v1300 : (Int64) 1 + v1300;
const float v1309 = v1306 * v1306 - v1307 * v1307;
const float v1310 = v1306 * v1307 + v1307 * v1306;
const float v1311 = v8 + v1309;
const float v1312 = v9 + v1310;
const Word8 v1313 = v1311 * v1311 + v1312 * v1312 > 4.0f;
const float v1314 = v1313 ? v1306 : v1311;
const float v1315 = v1313 ? v1307 : v1312;
const Int64 v1316 = v1313 ? v1308 : (Int64) 1 + v1308;
const float v1317 = v1314 * v1314 - v1315 * v1315;
const float v1318 = v1314 * v1315 + v1315 * v1314;
const float v1319 = v8 + v1317;
const float v1320 = v9 + v1318;
const Word8 v1321 = v1319 * v1319 + v1320 * v1320 > 4.0f;
const float v1322 = v1321 ? v1314 : v1319;
const float v1323 = v1321 ? v1315 : v1320;
const Int64 v1324 = v1321 ? v1316 : (Int64) 1 + v1316;
const float v1325 = v1322 * v1322 - v1323 * v1323;
const float v1326 = v1322 * v1323 + v1323 * v1322;
const float v1327 = v8 + v1325;
const float v1328 = v9 + v1326;
const Word8 v1329 = v1327 * v1327 + v1328 * v1328 > 4.0f;
const float v1330 = v1329 ? v1322 : v1327;
const float v1331 = v1329 ? v1323 : v1328;
const Int64 v1332 = v1329 ? v1324 : (Int64) 1 + v1324;
const float v1333 = v1330 * v1330 - v1331 * v1331;
const float v1334 = v1330 * v1331 + v1331 * v1330;
const float v1335 = v8 + v1333;
const float v1336 = v9 + v1334;
const Word8 v1337 = v1335 * v1335 + v1336 * v1336 > 4.0f;
const float v1338 = v1337 ? v1330 : v1335;
const float v1339 = v1337 ? v1331 : v1336;
const Int64 v1340 = v1337 ? v1332 : (Int64) 1 + v1332;
const float v1341 = v1338 * v1338 - v1339 * v1339;
const float v1342 = v1338 * v1339 + v1339 * v1338;
const float v1343 = v8 + v1341;
const float v1344 = v9 + v1342;
const Word8 v1345 = v1343 * v1343 + v1344 * v1344 > 4.0f;
const float v1346 = v1345 ? v1338 : v1343;
const float v1347 = v1345 ? v1339 : v1344;
const Int64 v1348 = v1345 ? v1340 : (Int64) 1 + v1340;
const float v1349 = v1346 * v1346 - v1347 * v1347;
const float v1350 = v1346 * v1347 + v1347 * v1346;
const float v1351 = v8 + v1349;
const float v1352 = v9 + v1350;
const Word8 v1353 = v1351 * v1351 + v1352 * v1352 > 4.0f;
const float v1354 = v1353 ? v1346 : v1351;
const float v1355 = v1353 ? v1347 : v1352;
const Int64 v1356 = v1353 ? v1348 : (Int64) 1 + v1348;
const float v1357 = v1354 * v1354 - v1355 * v1355;
const float v1358 = v1354 * v1355 + v1355 * v1354;
const float v1359 = v8 + v1357;
const float v1360 = v9 + v1358;
const Word8 v1361 = v1359 * v1359 + v1360 * v1360 > 4.0f;
const float v1362 = v1361 ? v1354 : v1359;
const float v1363 = v1361 ? v1355 : v1360;
const Int64 v1364 = v1361 ? v1356 : (Int64) 1 + v1356;
const float v1365 = v1362 * v1362 - v1363 * v1363;
const float v1366 = v1362 * v1363 + v1363 * v1362;
const float v1367 = v8 + v1365;
const float v1368 = v9 + v1366;
const Word8 v1369 = v1367 * v1367 + v1368 * v1368 > 4.0f;
const float v1370 = v1369 ? v1362 : v1367;
const float v1371 = v1369 ? v1363 : v1368;
const Int64 v1372 = v1369 ? v1364 : (Int64) 1 + v1364;
const float v1373 = v1370 * v1370 - v1371 * v1371;
const float v1374 = v1370 * v1371 + v1371 * v1370;
const float v1375 = v8 + v1373;
const float v1376 = v9 + v1374;
const Word8 v1377 = v1375 * v1375 + v1376 * v1376 > 4.0f;
const float v1378 = v1377 ? v1370 : v1375;
const float v1379 = v1377 ? v1371 : v1376;
const Int64 v1380 = v1377 ? v1372 : (Int64) 1 + v1372;
const float v1381 = v1378 * v1378 - v1379 * v1379;
const float v1382 = v1378 * v1379 + v1379 * v1378;
const float v1383 = v8 + v1381;
const float v1384 = v9 + v1382;
const Word8 v1385 = v1383 * v1383 + v1384 * v1384 > 4.0f;
const float v1386 = v1385 ? v1378 : v1383;
const float v1387 = v1385 ? v1379 : v1384;
const Int64 v1388 = v1385 ? v1380 : (Int64) 1 + v1380;
const float v1389 = v1386 * v1386 - v1387 * v1387;
const float v1390 = v1386 * v1387 + v1387 * v1386;
const float v1391 = v8 + v1389;
const float v1392 = v9 + v1390;
const Word8 v1393 = v1391 * v1391 + v1392 * v1392 > 4.0f;
const float v1394 = v1393 ? v1386 : v1391;
const float v1395 = v1393 ? v1387 : v1392;
const Int64 v1396 = v1393 ? v1388 : (Int64) 1 + v1388;
const float v1397 = v1394 * v1394 - v1395 * v1395;
const float v1398 = v1394 * v1395 + v1395 * v1394;
const float v1399 = v8 + v1397;
const float v1400 = v9 + v1398;
const Word8 v1401 = v1399 * v1399 + v1400 * v1400 > 4.0f;
const float v1402 = v1401 ? v1394 : v1399;
const float v1403 = v1401 ? v1395 : v1400;
const Int64 v1404 = v1401 ? v1396 : (Int64) 1 + v1396;
const float v1405 = v1402 * v1402 - v1403 * v1403;
const float v1406 = v1402 * v1403 + v1403 * v1402;
const float v1407 = v8 + v1405;
const float v1408 = v9 + v1406;
const Word8 v1409 = v1407 * v1407 + v1408 * v1408 > 4.0f;
const float v1410 = v1409 ? v1402 : v1407;
const float v1411 = v1409 ? v1403 : v1408;
const Int64 v1412 = v1409 ? v1404 : (Int64) 1 + v1404;
const float v1413 = v1410 * v1410 - v1411 * v1411;
const float v1414 = v1410 * v1411 + v1411 * v1410;
const float v1415 = v8 + v1413;
const float v1416 = v9 + v1414;
const Word8 v1417 = v1415 * v1415 + v1416 * v1416 > 4.0f;
const float v1418 = v1417 ? v1410 : v1415;
const float v1419 = v1417 ? v1411 : v1416;
const Int64 v1420 = v1417 ? v1412 : (Int64) 1 + v1412;
const float v1421 = v1418 * v1418 - v1419 * v1419;
const float v1422 = v1418 * v1419 + v1419 * v1418;
const float v1423 = v8 + v1421;
const float v1424 = v9 + v1422;
const Word8 v1425 = v1423 * v1423 + v1424 * v1424 > 4.0f;
const float v1426 = v1425 ? v1418 : v1423;
const float v1427 = v1425 ? v1419 : v1424;
const Int64 v1428 = v1425 ? v1420 : (Int64) 1 + v1420;
const float v1429 = v1426 * v1426 - v1427 * v1427;
const float v1430 = v1426 * v1427 + v1427 * v1426;
const float v1431 = v8 + v1429;
const float v1432 = v9 + v1430;
const Word8 v1433 = v1431 * v1431 + v1432 * v1432 > 4.0f;
const float v1434 = v1433 ? v1426 : v1431;
const float v1435 = v1433 ? v1427 : v1432;
const Int64 v1436 = v1433 ? v1428 : (Int64) 1 + v1428;
const float v1437 = v1434 * v1434 - v1435 * v1435;
const float v1438 = v1434 * v1435 + v1435 * v1434;
const float v1439 = v8 + v1437;
const float v1440 = v9 + v1438;
const Word8 v1441 = v1439 * v1439 + v1440 * v1440 > 4.0f;
const float v1442 = v1441 ? v1434 : v1439;
const float v1443 = v1441 ? v1435 : v1440;
const Int64 v1444 = v1441 ? v1436 : (Int64) 1 + v1436;
const float v1445 = v1442 * v1442 - v1443 * v1443;
const float v1446 = v1442 * v1443 + v1443 * v1442;
const float v1447 = v8 + v1445;
const float v1448 = v9 + v1446;
const Word8 v1449 = v1447 * v1447 + v1448 * v1448 > 4.0f;
const float v1450 = v1449 ? v1442 : v1447;
const float v1451 = v1449 ? v1443 : v1448;
const Int64 v1452 = v1449 ? v1444 : (Int64) 1 + v1444;
const float v1453 = v1450 * v1450 - v1451 * v1451;
const float v1454 = v1450 * v1451 + v1451 * v1450;
const float v1455 = v8 + v1453;
const float v1456 = v9 + v1454;
const Word8 v1457 = v1455 * v1455 + v1456 * v1456 > 4.0f;
const float v1458 = v1457 ? v1450 : v1455;
const float v1459 = v1457 ? v1451 : v1456;
const Int64 v1460 = v1457 ? v1452 : (Int64) 1 + v1452;
const float v1461 = v1458 * v1458 - v1459 * v1459;
const float v1462 = v1458 * v1459 + v1459 * v1458;
const float v1463 = v8 + v1461;
const float v1464 = v9 + v1462;
const Word8 v1465 = v1463 * v1463 + v1464 * v1464 > 4.0f;
const float v1466 = v1465 ? v1458 : v1463;
const float v1467 = v1465 ? v1459 : v1464;
const Int64 v1468 = v1465 ? v1460 : (Int64) 1 + v1460;
const float v1469 = v1466 * v1466 - v1467 * v1467;
const float v1470 = v1466 * v1467 + v1467 * v1466;
const float v1471 = v8 + v1469;
const float v1472 = v9 + v1470;
const Word8 v1473 = v1471 * v1471 + v1472 * v1472 > 4.0f;
const float v1474 = v1473 ? v1466 : v1471;
const float v1475 = v1473 ? v1467 : v1472;
const Int64 v1476 = v1473 ? v1468 : (Int64) 1 + v1468;
const float v1477 = v1474 * v1474 - v1475 * v1475;
const float v1478 = v1474 * v1475 + v1475 * v1474;
const float v1479 = v8 + v1477;
const float v1480 = v9 + v1478;
const Word8 v1481 = v1479 * v1479 + v1480 * v1480 > 4.0f;
const float v1482 = v1481 ? v1474 : v1479;
const float v1483 = v1481 ? v1475 : v1480;
const Int64 v1484 = v1481 ? v1476 : (Int64) 1 + v1476;
const float v1485 = v1482 * v1482 - v1483 * v1483;
const float v1486 = v1482 * v1483 + v1483 * v1482;
const float v1487 = v8 + v1485;
const float v1488 = v9 + v1486;
const Word8 v1489 = v1487 * v1487 + v1488 * v1488 > 4.0f;
const float v1490 = v1489 ? v1482 : v1487;
const float v1491 = v1489 ? v1483 : v1488;
const Int64 v1492 = v1489 ? v1484 : (Int64) 1 + v1484;
const float v1493 = v1490 * v1490 - v1491 * v1491;
const float v1494 = v1490 * v1491 + v1491 * v1490;
const float v1495 = v8 + v1493;
const float v1496 = v9 + v1494;
const Word8 v1497 = v1495 * v1495 + v1496 * v1496 > 4.0f;
const float v1498 = v1497 ? v1490 : v1495;
const float v1499 = v1497 ? v1491 : v1496;
const Int64 v1500 = v1497 ? v1492 : (Int64) 1 + v1492;
const float v1501 = v1498 * v1498 - v1499 * v1499;
const float v1502 = v1498 * v1499 + v1499 * v1498;
const float v1503 = v8 + v1501;
const float v1504 = v9 + v1502;
const Word8 v1505 = v1503 * v1503 + v1504 * v1504 > 4.0f;
const float v1506 = v1505 ? v1498 : v1503;
const float v1507 = v1505 ? v1499 : v1504;
const Int64 v1508 = v1505 ? v1500 : (Int64) 1 + v1500;
const float v1509 = v1506 * v1506 - v1507 * v1507;
const float v1510 = v1506 * v1507 + v1507 * v1506;
const float v1511 = v8 + v1509;
const float v1512 = v9 + v1510;
const Word8 v1513 = v1511 * v1511 + v1512 * v1512 > 4.0f;
const float v1514 = v1513 ? v1506 : v1511;
const float v1515 = v1513 ? v1507 : v1512;
const Int64 v1516 = v1513 ? v1508 : (Int64) 1 + v1508;
const float v1517 = v1514 * v1514 - v1515 * v1515;
const float v1518 = v1514 * v1515 + v1515 * v1514;
const float v1519 = v8 + v1517;
const float v1520 = v9 + v1518;
const Word8 v1521 = v1519 * v1519 + v1520 * v1520 > 4.0f;
const float v1522 = v1521 ? v1514 : v1519;
const float v1523 = v1521 ? v1515 : v1520;
const Int64 v1524 = v1521 ? v1516 : (Int64) 1 + v1516;
const float v1525 = v1522 * v1522 - v1523 * v1523;
const float v1526 = v1522 * v1523 + v1523 * v1522;
const float v1527 = v8 + v1525;
const float v1528 = v9 + v1526;
const Word8 v1529 = v1527 * v1527 + v1528 * v1528 > 4.0f;
const float v1530 = v1529 ? v1522 : v1527;
const float v1531 = v1529 ? v1523 : v1528;
const Int64 v1532 = v1529 ? v1524 : (Int64) 1 + v1524;
const float v1533 = v1530 * v1530 - v1531 * v1531;
const float v1534 = v1530 * v1531 + v1531 * v1530;
const float v1535 = v8 + v1533;
const float v1536 = v9 + v1534;
const Word8 v1537 = v1535 * v1535 + v1536 * v1536 > 4.0f;
const float v1538 = v1537 ? v1530 : v1535;
const float v1539 = v1537 ? v1531 : v1536;
const Int64 v1540 = v1537 ? v1532 : (Int64) 1 + v1532;
const float v1541 = v1538 * v1538 - v1539 * v1539;
const float v1542 = v1538 * v1539 + v1539 * v1538;
const float v1543 = v8 + v1541;
const float v1544 = v9 + v1542;
const Word8 v1545 = v1543 * v1543 + v1544 * v1544 > 4.0f;
const float v1546 = v1545 ? v1538 : v1543;
const float v1547 = v1545 ? v1539 : v1544;
const Int64 v1548 = v1545 ? v1540 : (Int64) 1 + v1540;
const float v1549 = v1546 * v1546 - v1547 * v1547;
const float v1550 = v1546 * v1547 + v1547 * v1546;
const float v1551 = v8 + v1549;
const float v1552 = v9 + v1550;
const Word8 v1553 = v1551 * v1551 + v1552 * v1552 > 4.0f;
const float v1554 = v1553 ? v1546 : v1551;
const float v1555 = v1553 ? v1547 : v1552;
const Int64 v1556 = v1553 ? v1548 : (Int64) 1 + v1548;
const float v1557 = v1554 * v1554 - v1555 * v1555;
const float v1558 = v1554 * v1555 + v1555 * v1554;
const float v1559 = v8 + v1557;
const float v1560 = v9 + v1558;
const Word8 v1561 = v1559 * v1559 + v1560 * v1560 > 4.0f;
const float v1562 = v1561 ? v1554 : v1559;
const float v1563 = v1561 ? v1555 : v1560;
const Int64 v1564 = v1561 ? v1556 : (Int64) 1 + v1556;
const float v1565 = v1562 * v1562 - v1563 * v1563;
const float v1566 = v1562 * v1563 + v1563 * v1562;
const float v1567 = v8 + v1565;
const float v1568 = v9 + v1566;
const Word8 v1569 = v1567 * v1567 + v1568 * v1568 > 4.0f;
const float v1570 = v1569 ? v1562 : v1567;
const float v1571 = v1569 ? v1563 : v1568;
const Int64 v1572 = v1569 ? v1564 : (Int64) 1 + v1564;
const float v1573 = v1570 * v1570 - v1571 * v1571;
const float v1574 = v1570 * v1571 + v1571 * v1570;
const float v1575 = v8 + v1573;
const float v1576 = v9 + v1574;
const Word8 v1577 = v1575 * v1575 + v1576 * v1576 > 4.0f;
const float v1578 = v1577 ? v1570 : v1575;
const float v1579 = v1577 ? v1571 : v1576;
const Int64 v1580 = v1577 ? v1572 : (Int64) 1 + v1572;
const float v1581 = v1578 * v1578 - v1579 * v1579;
const float v1582 = v1578 * v1579 + v1579 * v1578;
const float v1583 = v8 + v1581;
const float v1584 = v9 + v1582;
const Word8 v1585 = v1583 * v1583 + v1584 * v1584 > 4.0f;
const float v1586 = v1585 ? v1578 : v1583;
const float v1587 = v1585 ? v1579 : v1584;
const Int64 v1588 = v1585 ? v1580 : (Int64) 1 + v1580;
const float v1589 = v1586 * v1586 - v1587 * v1587;
const float v1590 = v1586 * v1587 + v1587 * v1586;
const float v1591 = v8 + v1589;
const float v1592 = v9 + v1590;
const Word8 v1593 = v1591 * v1591 + v1592 * v1592 > 4.0f;
const float v1594 = v1593 ? v1586 : v1591;
const float v1595 = v1593 ? v1587 : v1592;
const Int64 v1596 = v1593 ? v1588 : (Int64) 1 + v1588;
const float v1597 = v1594 * v1594 - v1595 * v1595;
const float v1598 = v1594 * v1595 + v1595 * v1594;
const float v1599 = v8 + v1597;
const float v1600 = v9 + v1598;
const Word8 v1601 = v1599 * v1599 + v1600 * v1600 > 4.0f;
const float v1602 = v1601 ? v1594 : v1599;
const float v1603 = v1601 ? v1595 : v1600;
const Int64 v1604 = v1601 ? v1596 : (Int64) 1 + v1596;
const float v1605 = v1602 * v1602 - v1603 * v1603;
const float v1606 = v1602 * v1603 + v1603 * v1602;
const float v1607 = v8 + v1605;
const float v1608 = v9 + v1606;
const Word8 v1609 = v1607 * v1607 + v1608 * v1608 > 4.0f;
const float v1610 = v1609 ? v1602 : v1607;
const float v1611 = v1609 ? v1603 : v1608;
const Int64 v1612 = v1609 ? v1604 : (Int64) 1 + v1604;
const float v1613 = v1610 * v1610 - v1611 * v1611;
const float v1614 = v1610 * v1611 + v1611 * v1610;
const float v1615 = v8 + v1613;
const float v1616 = v9 + v1614;
const Word8 v1617 = v1615 * v1615 + v1616 * v1616 > 4.0f;
const float v1618 = v1617 ? v1610 : v1615;
const float v1619 = v1617 ? v1611 : v1616;
const Int64 v1620 = v1617 ? v1612 : (Int64) 1 + v1612;
const float v1621 = v1618 * v1618 - v1619 * v1619;
const float v1622 = v1618 * v1619 + v1619 * v1618;
const float v1623 = v8 + v1621;
const float v1624 = v9 + v1622;
const Word8 v1625 = v1623 * v1623 + v1624 * v1624 > 4.0f;
const float v1626 = v1625 ? v1618 : v1623;
const float v1627 = v1625 ? v1619 : v1624;
const Int64 v1628 = v1625 ? v1620 : (Int64) 1 + v1620;
const float v1629 = v1626 * v1626 - v1627 * v1627;
const float v1630 = v1626 * v1627 + v1627 * v1626;
const float v1631 = v8 + v1629;
const float v1632 = v9 + v1630;
const Word8 v1633 = v1631 * v1631 + v1632 * v1632 > 4.0f;
const float v1634 = v1633 ? v1626 : v1631;
const float v1635 = v1633 ? v1627 : v1632;
const Int64 v1636 = v1633 ? v1628 : (Int64) 1 + v1628;
const float v1637 = v1634 * v1634 - v1635 * v1635;
const float v1638 = v1634 * v1635 + v1635 * v1634;
const float v1639 = v8 + v1637;
const float v1640 = v9 + v1638;
const Word8 v1641 = v1639 * v1639 + v1640 * v1640 > 4.0f;
const float v1642 = v1641 ? v1634 : v1639;
const float v1643 = v1641 ? v1635 : v1640;
const Int64 v1644 = v1641 ? v1636 : (Int64) 1 + v1636;
const float v1645 = v1642 * v1642 - v1643 * v1643;
const float v1646 = v1642 * v1643 + v1643 * v1642;
const float v1647 = v8 + v1645;
const float v1648 = v9 + v1646;
const Word8 v1649 = v1647 * v1647 + v1648 * v1648 > 4.0f;
const float v1650 = v1649 ? v1642 : v1647;
const float v1651 = v1649 ? v1643 : v1648;
const Int64 v1652 = v1649 ? v1644 : (Int64) 1 + v1644;
const float v1653 = v1650 * v1650 - v1651 * v1651;
const float v1654 = v1650 * v1651 + v1651 * v1650;
const float v1655 = v8 + v1653;
const float v1656 = v9 + v1654;
const Word8 v1657 = v1655 * v1655 + v1656 * v1656 > 4.0f;
const float v1658 = v1657 ? v1650 : v1655;
const float v1659 = v1657 ? v1651 : v1656;
const Int64 v1660 = v1657 ? v1652 : (Int64) 1 + v1652;
const float v1661 = v1658 * v1658 - v1659 * v1659;
const float v1662 = v1658 * v1659 + v1659 * v1658;
const float v1663 = v8 + v1661;
const float v1664 = v9 + v1662;
const Word8 v1665 = v1663 * v1663 + v1664 * v1664 > 4.0f;
const float v1666 = v1665 ? v1658 : v1663;
const float v1667 = v1665 ? v1659 : v1664;
const Int64 v1668 = v1665 ? v1660 : (Int64) 1 + v1660;
const float v1669 = v1666 * v1666 - v1667 * v1667;
const float v1670 = v1666 * v1667 + v1667 * v1666;
const float v1671 = v8 + v1669;
const float v1672 = v9 + v1670;
const Word8 v1673 = v1671 * v1671 + v1672 * v1672 > 4.0f;
const float v1674 = v1673 ? v1666 : v1671;
const float v1675 = v1673 ? v1667 : v1672;
const Int64 v1676 = v1673 ? v1668 : (Int64) 1 + v1668;
const float v1677 = v1674 * v1674 - v1675 * v1675;
const float v1678 = v1674 * v1675 + v1675 * v1674;
const float v1679 = v8 + v1677;
const float v1680 = v9 + v1678;
const Word8 v1681 = v1679 * v1679 + v1680 * v1680 > 4.0f;
const float v1682 = v1681 ? v1674 : v1679;
const float v1683 = v1681 ? v1675 : v1680;
const Int64 v1684 = v1681 ? v1676 : (Int64) 1 + v1676;
const float v1685 = v1682 * v1682 - v1683 * v1683;
const float v1686 = v1682 * v1683 + v1683 * v1682;
const float v1687 = v8 + v1685;
const float v1688 = v9 + v1686;
const Word8 v1689 = v1687 * v1687 + v1688 * v1688 > 4.0f;
const float v1690 = v1689 ? v1682 : v1687;
const float v1691 = v1689 ? v1683 : v1688;
const Int64 v1692 = v1689 ? v1684 : (Int64) 1 + v1684;
const float v1693 = v1690 * v1690 - v1691 * v1691;
const float v1694 = v1690 * v1691 + v1691 * v1690;
const float v1695 = v8 + v1693;
const float v1696 = v9 + v1694;
const Word8 v1697 = v1695 * v1695 + v1696 * v1696 > 4.0f;
const float v1698 = v1697 ? v1690 : v1695;
const float v1699 = v1697 ? v1691 : v1696;
const Int64 v1700 = v1697 ? v1692 : (Int64) 1 + v1692;
const float v1701 = v1698 * v1698 - v1699 * v1699;
const float v1702 = v1698 * v1699 + v1699 * v1698;
const float v1703 = v8 + v1701;
const float v1704 = v9 + v1702;
const Word8 v1705 = v1703 * v1703 + v1704 * v1704 > 4.0f;
const float v1706 = v1705 ? v1698 : v1703;
const float v1707 = v1705 ? v1699 : v1704;
const Int64 v1708 = v1705 ? v1700 : (Int64) 1 + v1700;
const float v1709 = v1706 * v1706 - v1707 * v1707;
const float v1710 = v1706 * v1707 + v1707 * v1706;
const float v1711 = v8 + v1709;
const float v1712 = v9 + v1710;
const Word8 v1713 = v1711 * v1711 + v1712 * v1712 > 4.0f;
const float v1714 = v1713 ? v1706 : v1711;
const float v1715 = v1713 ? v1707 : v1712;
const Int64 v1716 = v1713 ? v1708 : (Int64) 1 + v1708;
const float v1717 = v1714 * v1714 - v1715 * v1715;
const float v1718 = v1714 * v1715 + v1715 * v1714;
const float v1719 = v8 + v1717;
const float v1720 = v9 + v1718;
const Word8 v1721 = v1719 * v1719 + v1720 * v1720 > 4.0f;
const float v1722 = v1721 ? v1714 : v1719;
const float v1723 = v1721 ? v1715 : v1720;
const Int64 v1724 = v1721 ? v1716 : (Int64) 1 + v1716;
const float v1725 = v1722 * v1722 - v1723 * v1723;
const float v1726 = v1722 * v1723 + v1723 * v1722;
const float v1727 = v8 + v1725;
const float v1728 = v9 + v1726;
const Word8 v1729 = v1727 * v1727 + v1728 * v1728 > 4.0f;
const float v1730 = v1729 ? v1722 : v1727;
const float v1731 = v1729 ? v1723 : v1728;
const Int64 v1732 = v1729 ? v1724 : (Int64) 1 + v1724;
const float v1733 = v1730 * v1730 - v1731 * v1731;
const float v1734 = v1730 * v1731 + v1731 * v1730;
const float v1735 = v8 + v1733;
const float v1736 = v9 + v1734;
const Word8 v1737 = v1735 * v1735 + v1736 * v1736 > 4.0f;
const float v1738 = v1737 ? v1730 : v1735;
const float v1739 = v1737 ? v1731 : v1736;
const Int64 v1740 = v1737 ? v1732 : (Int64) 1 + v1732;
const float v1741 = v1738 * v1738 - v1739 * v1739;
const float v1742 = v1738 * v1739 + v1739 * v1738;
const float v1743 = v8 + v1741;
const float v1744 = v9 + v1742;
const Word8 v1745 = v1743 * v1743 + v1744 * v1744 > 4.0f;
const float v1746 = v1745 ? v1738 : v1743;
const float v1747 = v1745 ? v1739 : v1744;
const Int64 v1748 = v1745 ? v1740 : (Int64) 1 + v1740;
const float v1749 = v1746 * v1746 - v1747 * v1747;
const float v1750 = v1746 * v1747 + v1747 * v1746;
const float v1751 = v8 + v1749;
const float v1752 = v9 + v1750;
const Word8 v1753 = v1751 * v1751 + v1752 * v1752 > 4.0f;
const float v1754 = v1753 ? v1746 : v1751;
const float v1755 = v1753 ? v1747 : v1752;
const Int64 v1756 = v1753 ? v1748 : (Int64) 1 + v1748;
const float v1757 = v1754 * v1754 - v1755 * v1755;
const float v1758 = v1754 * v1755 + v1755 * v1754;
const float v1759 = v8 + v1757;
const float v1760 = v9 + v1758;
const Word8 v1761 = v1759 * v1759 + v1760 * v1760 > 4.0f;
const float v1762 = v1761 ? v1754 : v1759;
const float v1763 = v1761 ? v1755 : v1760;
const Int64 v1764 = v1761 ? v1756 : (Int64) 1 + v1756;
const float v1765 = v1762 * v1762 - v1763 * v1763;
const float v1766 = v1762 * v1763 + v1763 * v1762;
const float v1767 = v8 + v1765;
const float v1768 = v9 + v1766;
const Word8 v1769 = v1767 * v1767 + v1768 * v1768 > 4.0f;
const float v1770 = v1769 ? v1762 : v1767;
const float v1771 = v1769 ? v1763 : v1768;
const Int64 v1772 = v1769 ? v1764 : (Int64) 1 + v1764;
const float v1773 = v1770 * v1770 - v1771 * v1771;
const float v1774 = v1770 * v1771 + v1771 * v1770;
const float v1775 = v8 + v1773;
const float v1776 = v9 + v1774;
const Word8 v1777 = v1775 * v1775 + v1776 * v1776 > 4.0f;
const float v1778 = v1777 ? v1770 : v1775;
const float v1779 = v1777 ? v1771 : v1776;
const Int64 v1780 = v1777 ? v1772 : (Int64) 1 + v1772;
const float v1781 = v1778 * v1778 - v1779 * v1779;
const float v1782 = v1778 * v1779 + v1779 * v1778;
const float v1783 = v8 + v1781;
const float v1784 = v9 + v1782;
const Word8 v1785 = v1783 * v1783 + v1784 * v1784 > 4.0f;
const float v1786 = v1785 ? v1778 : v1783;
const float v1787 = v1785 ? v1779 : v1784;
const Int64 v1788 = v1785 ? v1780 : (Int64) 1 + v1780;
const float v1789 = v1786 * v1786 - v1787 * v1787;
const float v1790 = v1786 * v1787 + v1787 * v1786;
const float v1791 = v8 + v1789;
const float v1792 = v9 + v1790;
const Word8 v1793 = v1791 * v1791 + v1792 * v1792 > 4.0f;
const float v1794 = v1793 ? v1786 : v1791;
const float v1795 = v1793 ? v1787 : v1792;
const Int64 v1796 = v1793 ? v1788 : (Int64) 1 + v1788;
const float v1797 = v1794 * v1794 - v1795 * v1795;
const float v1798 = v1794 * v1795 + v1795 * v1794;
const float v1799 = v8 + v1797;
const float v1800 = v9 + v1798;
const Word8 v1801 = v1799 * v1799 + v1800 * v1800 > 4.0f;
const float v1802 = v1801 ? v1794 : v1799;
const float v1803 = v1801 ? v1795 : v1800;
const Int64 v1804 = v1801 ? v1796 : (Int64) 1 + v1796;
const float v1805 = v1802 * v1802 - v1803 * v1803;
const float v1806 = v1802 * v1803 + v1803 * v1802;
const float v1807 = v8 + v1805;
const float v1808 = v9 + v1806;
const Word8 v1809 = v1807 * v1807 + v1808 * v1808 > 4.0f;
const float v1810 = v1809 ? v1802 : v1807;
const float v1811 = v1809 ? v1803 : v1808;
const Int64 v1812 = v1809 ? v1804 : (Int64) 1 + v1804;
const float v1813 = v1810 * v1810 - v1811 * v1811;
const float v1814 = v1810 * v1811 + v1811 * v1810;
const float v1815 = v8 + v1813;
const float v1816 = v9 + v1814;
const Word8 v1817 = v1815 * v1815 + v1816 * v1816 > 4.0f;
const float v1818 = v1817 ? v1810 : v1815;
const float v1819 = v1817 ? v1811 : v1816;
const Int64 v1820 = v1817 ? v1812 : (Int64) 1 + v1812;
const float v1821 = v1818 * v1818 - v1819 * v1819;
const float v1822 = v1818 * v1819 + v1819 * v1818;
const float v1823 = v8 + v1821;
const float v1824 = v9 + v1822;
const Word8 v1825 = v1823 * v1823 + v1824 * v1824 > 4.0f;
const float v1826 = v1825 ? v1818 : v1823;
const float v1827 = v1825 ? v1819 : v1824;
const Int64 v1828 = v1825 ? v1820 : (Int64) 1 + v1820;
const float v1829 = v1826 * v1826 - v1827 * v1827;
const float v1830 = v1826 * v1827 + v1827 * v1826;
const float v1831 = v8 + v1829;
const float v1832 = v9 + v1830;
const Word8 v1833 = v1831 * v1831 + v1832 * v1832 > 4.0f;
const float v1834 = v1833 ? v1826 : v1831;
const float v1835 = v1833 ? v1827 : v1832;
const Int64 v1836 = v1833 ? v1828 : (Int64) 1 + v1828;
const float v1837 = v1834 * v1834 - v1835 * v1835;
const float v1838 = v1834 * v1835 + v1835 * v1834;
const float v1839 = v8 + v1837;
const float v1840 = v9 + v1838;
const Word8 v1841 = v1839 * v1839 + v1840 * v1840 > 4.0f;
const float v1842 = v1841 ? v1834 : v1839;
const float v1843 = v1841 ? v1835 : v1840;
const Int64 v1844 = v1841 ? v1836 : (Int64) 1 + v1836;
const float v1845 = v1842 * v1842 - v1843 * v1843;
const float v1846 = v1842 * v1843 + v1843 * v1842;
const float v1847 = v8 + v1845;
const float v1848 = v9 + v1846;
const Word8 v1849 = v1847 * v1847 + v1848 * v1848 > 4.0f;
const float v1850 = v1849 ? v1842 : v1847;
const float v1851 = v1849 ? v1843 : v1848;
const Int64 v1852 = v1849 ? v1844 : (Int64) 1 + v1844;
const float v1853 = v1850 * v1850 - v1851 * v1851;
const float v1854 = v1850 * v1851 + v1851 * v1850;
const float v1855 = v8 + v1853;
const float v1856 = v9 + v1854;
const Word8 v1857 = v1855 * v1855 + v1856 * v1856 > 4.0f;
const float v1858 = v1857 ? v1850 : v1855;
const float v1859 = v1857 ? v1851 : v1856;
const Int64 v1860 = v1857 ? v1852 : (Int64) 1 + v1852;
const float v1861 = v1858 * v1858 - v1859 * v1859;
const float v1862 = v1858 * v1859 + v1859 * v1858;
const float v1863 = v8 + v1861;
const float v1864 = v9 + v1862;
const Word8 v1865 = v1863 * v1863 + v1864 * v1864 > 4.0f;
const float v1866 = v1865 ? v1858 : v1863;
const float v1867 = v1865 ? v1859 : v1864;
const Int64 v1868 = v1865 ? v1860 : (Int64) 1 + v1860;
const float v1869 = v1866 * v1866 - v1867 * v1867;
const float v1870 = v1866 * v1867 + v1867 * v1866;
const float v1871 = v8 + v1869;
const float v1872 = v9 + v1870;
const Word8 v1873 = v1871 * v1871 + v1872 * v1872 > 4.0f;
const float v1874 = v1873 ? v1866 : v1871;
const float v1875 = v1873 ? v1867 : v1872;
const Int64 v1876 = v1873 ? v1868 : (Int64) 1 + v1868;
const float v1877 = v1874 * v1874 - v1875 * v1875;
const float v1878 = v1874 * v1875 + v1875 * v1874;
const float v1879 = v8 + v1877;
const float v1880 = v9 + v1878;
const Word8 v1881 = v1879 * v1879 + v1880 * v1880 > 4.0f;
const float v1882 = v1881 ? v1874 : v1879;
const float v1883 = v1881 ? v1875 : v1880;
const Int64 v1884 = v1881 ? v1876 : (Int64) 1 + v1876;
const float v1885 = v1882 * v1882 - v1883 * v1883;
const float v1886 = v1882 * v1883 + v1883 * v1882;
const float v1887 = v8 + v1885;
const float v1888 = v9 + v1886;
const Word8 v1889 = v1887 * v1887 + v1888 * v1888 > 4.0f;
const float v1890 = v1889 ? v1882 : v1887;
const float v1891 = v1889 ? v1883 : v1888;
const Int64 v1892 = v1889 ? v1884 : (Int64) 1 + v1884;
const float v1893 = v1890 * v1890 - v1891 * v1891;
const float v1894 = v1890 * v1891 + v1891 * v1890;
const float v1895 = v8 + v1893;
const float v1896 = v9 + v1894;
const Word8 v1897 = v1895 * v1895 + v1896 * v1896 > 4.0f;
const float v1898 = v1897 ? v1890 : v1895;
const float v1899 = v1897 ? v1891 : v1896;
const Int64 v1900 = v1897 ? v1892 : (Int64) 1 + v1892;
const float v1901 = v1898 * v1898 - v1899 * v1899;
const float v1902 = v1898 * v1899 + v1899 * v1898;
const float v1903 = v8 + v1901;
const float v1904 = v9 + v1902;
const Word8 v1905 = v1903 * v1903 + v1904 * v1904 > 4.0f;
const float v1906 = v1905 ? v1898 : v1903;
const float v1907 = v1905 ? v1899 : v1904;
const Int64 v1908 = v1905 ? v1900 : (Int64) 1 + v1900;
const float v1909 = v1906 * v1906 - v1907 * v1907;
const float v1910 = v1906 * v1907 + v1907 * v1906;
const float v1911 = v8 + v1909;
const float v1912 = v9 + v1910;
const Word8 v1913 = v1911 * v1911 + v1912 * v1912 > 4.0f;
const float v1914 = v1913 ? v1906 : v1911;
const float v1915 = v1913 ? v1907 : v1912;
const Int64 v1916 = v1913 ? v1908 : (Int64) 1 + v1908;
const float v1917 = v1914 * v1914 - v1915 * v1915;
const float v1918 = v1914 * v1915 + v1915 * v1914;
const float v1919 = v8 + v1917;
const float v1920 = v9 + v1918;
const Word8 v1921 = v1919 * v1919 + v1920 * v1920 > 4.0f;
const float v1922 = v1921 ? v1914 : v1919;
const float v1923 = v1921 ? v1915 : v1920;
const Int64 v1924 = v1921 ? v1916 : (Int64) 1 + v1916;
const float v1925 = v1922 * v1922 - v1923 * v1923;
const float v1926 = v1922 * v1923 + v1923 * v1922;
const float v1927 = v8 + v1925;
const float v1928 = v9 + v1926;
const Word8 v1929 = v1927 * v1927 + v1928 * v1928 > 4.0f;
const float v1930 = v1929 ? v1922 : v1927;
const float v1931 = v1929 ? v1923 : v1928;
const Int64 v1932 = v1929 ? v1924 : (Int64) 1 + v1924;
const float v1933 = v1930 * v1930 - v1931 * v1931;
const float v1934 = v1930 * v1931 + v1931 * v1930;
const float v1935 = v8 + v1933;
const float v1936 = v9 + v1934;
const Word8 v1937 = v1935 * v1935 + v1936 * v1936 > 4.0f;
const float v1938 = v1937 ? v1930 : v1935;
const float v1939 = v1937 ? v1931 : v1936;
const Int64 v1940 = v1937 ? v1932 : (Int64) 1 + v1932;
const float v1941 = v1938 * v1938 - v1939 * v1939;
const float v1942 = v1938 * v1939 + v1939 * v1938;
const float v1943 = v8 + v1941;
const float v1944 = v9 + v1942;
const Word8 v1945 = v1943 * v1943 + v1944 * v1944 > 4.0f;
const float v1946 = v1945 ? v1938 : v1943;
const float v1947 = v1945 ? v1939 : v1944;
const Int64 v1948 = v1945 ? v1940 : (Int64) 1 + v1940;
const float v1949 = v1946 * v1946 - v1947 * v1947;
const float v1950 = v1946 * v1947 + v1947 * v1946;
const float v1951 = v8 + v1949;
const float v1952 = v9 + v1950;
const Word8 v1953 = v1951 * v1951 + v1952 * v1952 > 4.0f;
const float v1954 = v1953 ? v1946 : v1951;
const float v1955 = v1953 ? v1947 : v1952;
const Int64 v1956 = v1953 ? v1948 : (Int64) 1 + v1948;
const float v1957 = v1954 * v1954 - v1955 * v1955;
const float v1958 = v1954 * v1955 + v1955 * v1954;
const float v1959 = v8 + v1957;
const float v1960 = v9 + v1958;
const Word8 v1961 = v1959 * v1959 + v1960 * v1960 > 4.0f;
const float v1962 = v1961 ? v1954 : v1959;
const float v1963 = v1961 ? v1955 : v1960;
const Int64 v1964 = v1961 ? v1956 : (Int64) 1 + v1956;
const float v1965 = v1962 * v1962 - v1963 * v1963;
const float v1966 = v1962 * v1963 + v1963 * v1962;
const float v1967 = v8 + v1965;
const float v1968 = v9 + v1966;
const Word8 v1969 = v1967 * v1967 + v1968 * v1968 > 4.0f;
const float v1970 = v1969 ? v1962 : v1967;
const float v1971 = v1969 ? v1963 : v1968;
const Int64 v1972 = v1969 ? v1964 : (Int64) 1 + v1964;
const float v1973 = v1970 * v1970 - v1971 * v1971;
const float v1974 = v1970 * v1971 + v1971 * v1970;
const float v1975 = v8 + v1973;
const float v1976 = v9 + v1974;
const Word8 v1977 = v1975 * v1975 + v1976 * v1976 > 4.0f;
const float v1978 = v1977 ? v1970 : v1975;
const float v1979 = v1977 ? v1971 : v1976;
const Int64 v1980 = v1977 ? v1972 : (Int64) 1 + v1972;
const float v1981 = v1978 * v1978 - v1979 * v1979;
const float v1982 = v1978 * v1979 + v1979 * v1978;
const float v1983 = v8 + v1981;
const float v1984 = v9 + v1982;
const Word8 v1985 = v1983 * v1983 + v1984 * v1984 > 4.0f;
const float v1986 = v1985 ? v1978 : v1983;
const float v1987 = v1985 ? v1979 : v1984;
const Int64 v1988 = v1985 ? v1980 : (Int64) 1 + v1980;
const float v1989 = v1986 * v1986 - v1987 * v1987;
const float v1990 = v1986 * v1987 + v1987 * v1986;
const float v1991 = v8 + v1989;
const float v1992 = v9 + v1990;
const Word8 v1993 = v1991 * v1991 + v1992 * v1992 > 4.0f;
const float v1994 = v1993 ? v1986 : v1991;
const float v1995 = v1993 ? v1987 : v1992;
const Int64 v1996 = v1993 ? v1988 : (Int64) 1 + v1988;
const float v1997 = v1994 * v1994 - v1995 * v1995;
const float v1998 = v1994 * v1995 + v1995 * v1994;
const float v1999 = v8 + v1997;
const float v2000 = v9 + v1998;
const Word8 v2001 = v1999 * v1999 + v2000 * v2000 > 4.0f;
const float v2002 = v2001 ? v1994 : v1999;
const float v2003 = v2001 ? v1995 : v2000;
const Int64 v2004 = v2001 ? v1996 : (Int64) 1 + v1996;
const float v2005 = v2002 * v2002 - v2003 * v2003;
const float v2006 = v2002 * v2003 + v2003 * v2002;
const float v2007 = v8 + v2005;
const float v2008 = v9 + v2006;
const Word8 v2009 = v2007 * v2007 + v2008 * v2008 > 4.0f;
const float v2010 = v2009 ? v2002 : v2007;
const float v2011 = v2009 ? v2003 : v2008;
const Int64 v2012 = v2009 ? v2004 : (Int64) 1 + v2004;
const float v2013 = v2010 * v2010 - v2011 * v2011;
const float v2014 = v2010 * v2011 + v2011 * v2010;
const float v2015 = v8 + v2013;
const float v2016 = v9 + v2014;
const Word8 v2017 = v2015 * v2015 + v2016 * v2016 > 4.0f;
const float v2018 = v2017 ? v2010 : v2015;
const float v2019 = v2017 ? v2011 : v2016;
const Int64 v2020 = v2017 ? v2012 : (Int64) 1 + v2012;
const float v2021 = v2018 * v2018 - v2019 * v2019;
const float v2022 = v2018 * v2019 + v2019 * v2018;
const float v2023 = v8 + v2021;
const float v2024 = v9 + v2022;
const Word8 v2025 = v2023 * v2023 + v2024 * v2024 > 4.0f;
const float v2026 = v2025 ? v2018 : v2023;
const float v2027 = v2025 ? v2019 : v2024;
const Int64 v2028 = v2025 ? v2020 : (Int64) 1 + v2020;
const float v2029 = v2026 * v2026 - v2027 * v2027;
const float v2030 = v2026 * v2027 + v2027 * v2026;
const float v2031 = v8 + v2029;
const float v2032 = v9 + v2030;
const Word8 v2033 = v2031 * v2031 + v2032 * v2032 > 4.0f;
const float v2034 = v2033 ? v2026 : v2031;
const float v2035 = v2033 ? v2027 : v2032;
const Int64 v2036 = v2033 ? v2028 : (Int64) 1 + v2028;
const float v2037 = v2034 * v2034 - v2035 * v2035;
const float v2038 = v2034 * v2035 + v2035 * v2034;
const float v2039 = v8 + v2037;
const float v2040 = v9 + v2038;
const Word8 v2041 = v2039 * v2039 + v2040 * v2040 > 4.0f;
const float v2042 = v2041 ? v2034 : v2039;
const float v2043 = v2041 ? v2035 : v2040;
const Int64 v2044 = v2041 ? v2036 : (Int64) 1 + v2036;
const float v2045 = v2042 * v2042 - v2043 * v2043;
const float v2046 = v2042 * v2043 + v2043 * v2042;
const float v2047 = v8 + v2045;
const float v2048 = v9 + v2046;
const Word8 v2049 = v2047 * v2047 + v2048 * v2048 > 4.0f;
const Int64 v2050 = (Int64) (v2049 ? v2044 : (Int64) 1 + v2044);
const Word8 v2051 = v0 == v2050;
const Int64 v2052 = v0 - v2050;
const Word8 v2053 = (Word8) 0;
const Word8 v2054 = (Word8) ((Int64) 7 * v2052);
const Word8 v2055 = (Word8) ((Int64) 5 * v2052);
const Word8 v2056 = (Word8) ((Int64) 3 * v2052);
arrOut_a0[ix] = v2051 ? (Word32) 4278190080 : (Word32) 4294967295 - ((Word32) v2053 + (Word32) 256 * (Word32) v2054 + (Word32) 65536 * (Word32) v2055 + (Word32) 16777216 * (Word32) v2056);
}
}
| af170e71d7ea74ff0c3c804afc0ac54630beb9db.cu | /*
* 11.69:cc: entry function 'generate' used 63 registers, 0 bytes smem, 0 bytes lmem, 0 bytes cmem
* ... multiprocessor occupancy 50.0% : 1024 threads over 32 warps in 16 blocks
*
* benchmarking mandelbrot
* collecting 100 samples, 1 iterations each, in estimated 1193.660 s
* mean: 28.51441 ms, lb 28.44537 ms, ub 28.62653 ms, ci 0.950
* std dev: 441.4357 us, lb 308.6300 us, ub 656.7542 us, ci 0.950
* found 11 outliers among 100 samples (11.0%)
* 6 (6.0%) high mild
* 5 (5.0%) high severe
* variance introduced by outliers: 8.472%
* variance is slightly inflated by outliers
*/
#include <accelerate_cuda.h>
typedef DIM2 DimOut;
extern "C" __global__ void generate(const DIM0 shIn0, const float* __restrict__ arrIn0_a3, const float* __restrict__ arrIn0_a2, const float* __restrict__ arrIn0_a1, const float* __restrict__ arrIn0_a0, const DIM2 shOut, Word32* __restrict__ arrOut_a0)
{
const int shapeSize = size(shOut);
const int gridSize = blockDim.x * gridDim.x;
int ix;
for (ix = blockDim.x * blockIdx.x + threadIdx.x; ix < shapeSize; ix += gridSize) {
const DimOut sh = fromIndex(shOut, ix);
const Int64 v0 = (Int64) 255;
const int v1 = toIndex(shIn0, shape());
const float v2 = arrIn0_a3[v1];
const float v3 = arrIn0_a2[v1];
const float v4 = arrIn0_a1[v1];
const float v5 = arrIn0_a0[v1];
const Int64 v6 = sh.a1;
const Int64 v7 = sh.a0;
const float v8 = v2 + (float) v7 * (v4 - v2) / 800.0f;
const float v9 = v3 + (float) v6 * (v5 - v3) / 600.0f;
const float v10 = v8;
const float v11 = v9;
const Int64 v12 = (Int64) 0;
const float v13 = v10 * v10 - v11 * v11;
const float v14 = v10 * v11 + v11 * v10;
const float v15 = v8 + v13;
const float v16 = v9 + v14;
const Word8 v17 = v15 * v15 + v16 * v16 > 4.0f;
const float v18 = v17 ? v10 : v15;
const float v19 = v17 ? v11 : v16;
const Int64 v20 = v17 ? v12 : (Int64) 1;
const float v21 = v18 * v18 - v19 * v19;
const float v22 = v18 * v19 + v19 * v18;
const float v23 = v8 + v21;
const float v24 = v9 + v22;
const Word8 v25 = v23 * v23 + v24 * v24 > 4.0f;
const float v26 = v25 ? v18 : v23;
const float v27 = v25 ? v19 : v24;
const Int64 v28 = v25 ? v20 : (Int64) 1 + v20;
const float v29 = v26 * v26 - v27 * v27;
const float v30 = v26 * v27 + v27 * v26;
const float v31 = v8 + v29;
const float v32 = v9 + v30;
const Word8 v33 = v31 * v31 + v32 * v32 > 4.0f;
const float v34 = v33 ? v26 : v31;
const float v35 = v33 ? v27 : v32;
const Int64 v36 = v33 ? v28 : (Int64) 1 + v28;
const float v37 = v34 * v34 - v35 * v35;
const float v38 = v34 * v35 + v35 * v34;
const float v39 = v8 + v37;
const float v40 = v9 + v38;
const Word8 v41 = v39 * v39 + v40 * v40 > 4.0f;
const float v42 = v41 ? v34 : v39;
const float v43 = v41 ? v35 : v40;
const Int64 v44 = v41 ? v36 : (Int64) 1 + v36;
const float v45 = v42 * v42 - v43 * v43;
const float v46 = v42 * v43 + v43 * v42;
const float v47 = v8 + v45;
const float v48 = v9 + v46;
const Word8 v49 = v47 * v47 + v48 * v48 > 4.0f;
const float v50 = v49 ? v42 : v47;
const float v51 = v49 ? v43 : v48;
const Int64 v52 = v49 ? v44 : (Int64) 1 + v44;
const float v53 = v50 * v50 - v51 * v51;
const float v54 = v50 * v51 + v51 * v50;
const float v55 = v8 + v53;
const float v56 = v9 + v54;
const Word8 v57 = v55 * v55 + v56 * v56 > 4.0f;
const float v58 = v57 ? v50 : v55;
const float v59 = v57 ? v51 : v56;
const Int64 v60 = v57 ? v52 : (Int64) 1 + v52;
const float v61 = v58 * v58 - v59 * v59;
const float v62 = v58 * v59 + v59 * v58;
const float v63 = v8 + v61;
const float v64 = v9 + v62;
const Word8 v65 = v63 * v63 + v64 * v64 > 4.0f;
const float v66 = v65 ? v58 : v63;
const float v67 = v65 ? v59 : v64;
const Int64 v68 = v65 ? v60 : (Int64) 1 + v60;
const float v69 = v66 * v66 - v67 * v67;
const float v70 = v66 * v67 + v67 * v66;
const float v71 = v8 + v69;
const float v72 = v9 + v70;
const Word8 v73 = v71 * v71 + v72 * v72 > 4.0f;
const float v74 = v73 ? v66 : v71;
const float v75 = v73 ? v67 : v72;
const Int64 v76 = v73 ? v68 : (Int64) 1 + v68;
const float v77 = v74 * v74 - v75 * v75;
const float v78 = v74 * v75 + v75 * v74;
const float v79 = v8 + v77;
const float v80 = v9 + v78;
const Word8 v81 = v79 * v79 + v80 * v80 > 4.0f;
const float v82 = v81 ? v74 : v79;
const float v83 = v81 ? v75 : v80;
const Int64 v84 = v81 ? v76 : (Int64) 1 + v76;
const float v85 = v82 * v82 - v83 * v83;
const float v86 = v82 * v83 + v83 * v82;
const float v87 = v8 + v85;
const float v88 = v9 + v86;
const Word8 v89 = v87 * v87 + v88 * v88 > 4.0f;
const float v90 = v89 ? v82 : v87;
const float v91 = v89 ? v83 : v88;
const Int64 v92 = v89 ? v84 : (Int64) 1 + v84;
const float v93 = v90 * v90 - v91 * v91;
const float v94 = v90 * v91 + v91 * v90;
const float v95 = v8 + v93;
const float v96 = v9 + v94;
const Word8 v97 = v95 * v95 + v96 * v96 > 4.0f;
const float v98 = v97 ? v90 : v95;
const float v99 = v97 ? v91 : v96;
const Int64 v100 = v97 ? v92 : (Int64) 1 + v92;
const float v101 = v98 * v98 - v99 * v99;
const float v102 = v98 * v99 + v99 * v98;
const float v103 = v8 + v101;
const float v104 = v9 + v102;
const Word8 v105 = v103 * v103 + v104 * v104 > 4.0f;
const float v106 = v105 ? v98 : v103;
const float v107 = v105 ? v99 : v104;
const Int64 v108 = v105 ? v100 : (Int64) 1 + v100;
const float v109 = v106 * v106 - v107 * v107;
const float v110 = v106 * v107 + v107 * v106;
const float v111 = v8 + v109;
const float v112 = v9 + v110;
const Word8 v113 = v111 * v111 + v112 * v112 > 4.0f;
const float v114 = v113 ? v106 : v111;
const float v115 = v113 ? v107 : v112;
const Int64 v116 = v113 ? v108 : (Int64) 1 + v108;
const float v117 = v114 * v114 - v115 * v115;
const float v118 = v114 * v115 + v115 * v114;
const float v119 = v8 + v117;
const float v120 = v9 + v118;
const Word8 v121 = v119 * v119 + v120 * v120 > 4.0f;
const float v122 = v121 ? v114 : v119;
const float v123 = v121 ? v115 : v120;
const Int64 v124 = v121 ? v116 : (Int64) 1 + v116;
const float v125 = v122 * v122 - v123 * v123;
const float v126 = v122 * v123 + v123 * v122;
const float v127 = v8 + v125;
const float v128 = v9 + v126;
const Word8 v129 = v127 * v127 + v128 * v128 > 4.0f;
const float v130 = v129 ? v122 : v127;
const float v131 = v129 ? v123 : v128;
const Int64 v132 = v129 ? v124 : (Int64) 1 + v124;
const float v133 = v130 * v130 - v131 * v131;
const float v134 = v130 * v131 + v131 * v130;
const float v135 = v8 + v133;
const float v136 = v9 + v134;
const Word8 v137 = v135 * v135 + v136 * v136 > 4.0f;
const float v138 = v137 ? v130 : v135;
const float v139 = v137 ? v131 : v136;
const Int64 v140 = v137 ? v132 : (Int64) 1 + v132;
const float v141 = v138 * v138 - v139 * v139;
const float v142 = v138 * v139 + v139 * v138;
const float v143 = v8 + v141;
const float v144 = v9 + v142;
const Word8 v145 = v143 * v143 + v144 * v144 > 4.0f;
const float v146 = v145 ? v138 : v143;
const float v147 = v145 ? v139 : v144;
const Int64 v148 = v145 ? v140 : (Int64) 1 + v140;
const float v149 = v146 * v146 - v147 * v147;
const float v150 = v146 * v147 + v147 * v146;
const float v151 = v8 + v149;
const float v152 = v9 + v150;
const Word8 v153 = v151 * v151 + v152 * v152 > 4.0f;
const float v154 = v153 ? v146 : v151;
const float v155 = v153 ? v147 : v152;
const Int64 v156 = v153 ? v148 : (Int64) 1 + v148;
const float v157 = v154 * v154 - v155 * v155;
const float v158 = v154 * v155 + v155 * v154;
const float v159 = v8 + v157;
const float v160 = v9 + v158;
const Word8 v161 = v159 * v159 + v160 * v160 > 4.0f;
const float v162 = v161 ? v154 : v159;
const float v163 = v161 ? v155 : v160;
const Int64 v164 = v161 ? v156 : (Int64) 1 + v156;
const float v165 = v162 * v162 - v163 * v163;
const float v166 = v162 * v163 + v163 * v162;
const float v167 = v8 + v165;
const float v168 = v9 + v166;
const Word8 v169 = v167 * v167 + v168 * v168 > 4.0f;
const float v170 = v169 ? v162 : v167;
const float v171 = v169 ? v163 : v168;
const Int64 v172 = v169 ? v164 : (Int64) 1 + v164;
const float v173 = v170 * v170 - v171 * v171;
const float v174 = v170 * v171 + v171 * v170;
const float v175 = v8 + v173;
const float v176 = v9 + v174;
const Word8 v177 = v175 * v175 + v176 * v176 > 4.0f;
const float v178 = v177 ? v170 : v175;
const float v179 = v177 ? v171 : v176;
const Int64 v180 = v177 ? v172 : (Int64) 1 + v172;
const float v181 = v178 * v178 - v179 * v179;
const float v182 = v178 * v179 + v179 * v178;
const float v183 = v8 + v181;
const float v184 = v9 + v182;
const Word8 v185 = v183 * v183 + v184 * v184 > 4.0f;
const float v186 = v185 ? v178 : v183;
const float v187 = v185 ? v179 : v184;
const Int64 v188 = v185 ? v180 : (Int64) 1 + v180;
const float v189 = v186 * v186 - v187 * v187;
const float v190 = v186 * v187 + v187 * v186;
const float v191 = v8 + v189;
const float v192 = v9 + v190;
const Word8 v193 = v191 * v191 + v192 * v192 > 4.0f;
const float v194 = v193 ? v186 : v191;
const float v195 = v193 ? v187 : v192;
const Int64 v196 = v193 ? v188 : (Int64) 1 + v188;
const float v197 = v194 * v194 - v195 * v195;
const float v198 = v194 * v195 + v195 * v194;
const float v199 = v8 + v197;
const float v200 = v9 + v198;
const Word8 v201 = v199 * v199 + v200 * v200 > 4.0f;
const float v202 = v201 ? v194 : v199;
const float v203 = v201 ? v195 : v200;
const Int64 v204 = v201 ? v196 : (Int64) 1 + v196;
const float v205 = v202 * v202 - v203 * v203;
const float v206 = v202 * v203 + v203 * v202;
const float v207 = v8 + v205;
const float v208 = v9 + v206;
const Word8 v209 = v207 * v207 + v208 * v208 > 4.0f;
const float v210 = v209 ? v202 : v207;
const float v211 = v209 ? v203 : v208;
const Int64 v212 = v209 ? v204 : (Int64) 1 + v204;
const float v213 = v210 * v210 - v211 * v211;
const float v214 = v210 * v211 + v211 * v210;
const float v215 = v8 + v213;
const float v216 = v9 + v214;
const Word8 v217 = v215 * v215 + v216 * v216 > 4.0f;
const float v218 = v217 ? v210 : v215;
const float v219 = v217 ? v211 : v216;
const Int64 v220 = v217 ? v212 : (Int64) 1 + v212;
const float v221 = v218 * v218 - v219 * v219;
const float v222 = v218 * v219 + v219 * v218;
const float v223 = v8 + v221;
const float v224 = v9 + v222;
const Word8 v225 = v223 * v223 + v224 * v224 > 4.0f;
const float v226 = v225 ? v218 : v223;
const float v227 = v225 ? v219 : v224;
const Int64 v228 = v225 ? v220 : (Int64) 1 + v220;
const float v229 = v226 * v226 - v227 * v227;
const float v230 = v226 * v227 + v227 * v226;
const float v231 = v8 + v229;
const float v232 = v9 + v230;
const Word8 v233 = v231 * v231 + v232 * v232 > 4.0f;
const float v234 = v233 ? v226 : v231;
const float v235 = v233 ? v227 : v232;
const Int64 v236 = v233 ? v228 : (Int64) 1 + v228;
const float v237 = v234 * v234 - v235 * v235;
const float v238 = v234 * v235 + v235 * v234;
const float v239 = v8 + v237;
const float v240 = v9 + v238;
const Word8 v241 = v239 * v239 + v240 * v240 > 4.0f;
const float v242 = v241 ? v234 : v239;
const float v243 = v241 ? v235 : v240;
const Int64 v244 = v241 ? v236 : (Int64) 1 + v236;
const float v245 = v242 * v242 - v243 * v243;
const float v246 = v242 * v243 + v243 * v242;
const float v247 = v8 + v245;
const float v248 = v9 + v246;
const Word8 v249 = v247 * v247 + v248 * v248 > 4.0f;
const float v250 = v249 ? v242 : v247;
const float v251 = v249 ? v243 : v248;
const Int64 v252 = v249 ? v244 : (Int64) 1 + v244;
const float v253 = v250 * v250 - v251 * v251;
const float v254 = v250 * v251 + v251 * v250;
const float v255 = v8 + v253;
const float v256 = v9 + v254;
const Word8 v257 = v255 * v255 + v256 * v256 > 4.0f;
const float v258 = v257 ? v250 : v255;
const float v259 = v257 ? v251 : v256;
const Int64 v260 = v257 ? v252 : (Int64) 1 + v252;
const float v261 = v258 * v258 - v259 * v259;
const float v262 = v258 * v259 + v259 * v258;
const float v263 = v8 + v261;
const float v264 = v9 + v262;
const Word8 v265 = v263 * v263 + v264 * v264 > 4.0f;
const float v266 = v265 ? v258 : v263;
const float v267 = v265 ? v259 : v264;
const Int64 v268 = v265 ? v260 : (Int64) 1 + v260;
const float v269 = v266 * v266 - v267 * v267;
const float v270 = v266 * v267 + v267 * v266;
const float v271 = v8 + v269;
const float v272 = v9 + v270;
const Word8 v273 = v271 * v271 + v272 * v272 > 4.0f;
const float v274 = v273 ? v266 : v271;
const float v275 = v273 ? v267 : v272;
const Int64 v276 = v273 ? v268 : (Int64) 1 + v268;
const float v277 = v274 * v274 - v275 * v275;
const float v278 = v274 * v275 + v275 * v274;
const float v279 = v8 + v277;
const float v280 = v9 + v278;
const Word8 v281 = v279 * v279 + v280 * v280 > 4.0f;
const float v282 = v281 ? v274 : v279;
const float v283 = v281 ? v275 : v280;
const Int64 v284 = v281 ? v276 : (Int64) 1 + v276;
const float v285 = v282 * v282 - v283 * v283;
const float v286 = v282 * v283 + v283 * v282;
const float v287 = v8 + v285;
const float v288 = v9 + v286;
const Word8 v289 = v287 * v287 + v288 * v288 > 4.0f;
const float v290 = v289 ? v282 : v287;
const float v291 = v289 ? v283 : v288;
const Int64 v292 = v289 ? v284 : (Int64) 1 + v284;
const float v293 = v290 * v290 - v291 * v291;
const float v294 = v290 * v291 + v291 * v290;
const float v295 = v8 + v293;
const float v296 = v9 + v294;
const Word8 v297 = v295 * v295 + v296 * v296 > 4.0f;
const float v298 = v297 ? v290 : v295;
const float v299 = v297 ? v291 : v296;
const Int64 v300 = v297 ? v292 : (Int64) 1 + v292;
const float v301 = v298 * v298 - v299 * v299;
const float v302 = v298 * v299 + v299 * v298;
const float v303 = v8 + v301;
const float v304 = v9 + v302;
const Word8 v305 = v303 * v303 + v304 * v304 > 4.0f;
const float v306 = v305 ? v298 : v303;
const float v307 = v305 ? v299 : v304;
const Int64 v308 = v305 ? v300 : (Int64) 1 + v300;
const float v309 = v306 * v306 - v307 * v307;
const float v310 = v306 * v307 + v307 * v306;
const float v311 = v8 + v309;
const float v312 = v9 + v310;
const Word8 v313 = v311 * v311 + v312 * v312 > 4.0f;
const float v314 = v313 ? v306 : v311;
const float v315 = v313 ? v307 : v312;
const Int64 v316 = v313 ? v308 : (Int64) 1 + v308;
const float v317 = v314 * v314 - v315 * v315;
const float v318 = v314 * v315 + v315 * v314;
const float v319 = v8 + v317;
const float v320 = v9 + v318;
const Word8 v321 = v319 * v319 + v320 * v320 > 4.0f;
const float v322 = v321 ? v314 : v319;
const float v323 = v321 ? v315 : v320;
const Int64 v324 = v321 ? v316 : (Int64) 1 + v316;
const float v325 = v322 * v322 - v323 * v323;
const float v326 = v322 * v323 + v323 * v322;
const float v327 = v8 + v325;
const float v328 = v9 + v326;
const Word8 v329 = v327 * v327 + v328 * v328 > 4.0f;
const float v330 = v329 ? v322 : v327;
const float v331 = v329 ? v323 : v328;
const Int64 v332 = v329 ? v324 : (Int64) 1 + v324;
const float v333 = v330 * v330 - v331 * v331;
const float v334 = v330 * v331 + v331 * v330;
const float v335 = v8 + v333;
const float v336 = v9 + v334;
const Word8 v337 = v335 * v335 + v336 * v336 > 4.0f;
const float v338 = v337 ? v330 : v335;
const float v339 = v337 ? v331 : v336;
const Int64 v340 = v337 ? v332 : (Int64) 1 + v332;
const float v341 = v338 * v338 - v339 * v339;
const float v342 = v338 * v339 + v339 * v338;
const float v343 = v8 + v341;
const float v344 = v9 + v342;
const Word8 v345 = v343 * v343 + v344 * v344 > 4.0f;
const float v346 = v345 ? v338 : v343;
const float v347 = v345 ? v339 : v344;
const Int64 v348 = v345 ? v340 : (Int64) 1 + v340;
const float v349 = v346 * v346 - v347 * v347;
const float v350 = v346 * v347 + v347 * v346;
const float v351 = v8 + v349;
const float v352 = v9 + v350;
const Word8 v353 = v351 * v351 + v352 * v352 > 4.0f;
const float v354 = v353 ? v346 : v351;
const float v355 = v353 ? v347 : v352;
const Int64 v356 = v353 ? v348 : (Int64) 1 + v348;
const float v357 = v354 * v354 - v355 * v355;
const float v358 = v354 * v355 + v355 * v354;
const float v359 = v8 + v357;
const float v360 = v9 + v358;
const Word8 v361 = v359 * v359 + v360 * v360 > 4.0f;
const float v362 = v361 ? v354 : v359;
const float v363 = v361 ? v355 : v360;
const Int64 v364 = v361 ? v356 : (Int64) 1 + v356;
const float v365 = v362 * v362 - v363 * v363;
const float v366 = v362 * v363 + v363 * v362;
const float v367 = v8 + v365;
const float v368 = v9 + v366;
const Word8 v369 = v367 * v367 + v368 * v368 > 4.0f;
const float v370 = v369 ? v362 : v367;
const float v371 = v369 ? v363 : v368;
const Int64 v372 = v369 ? v364 : (Int64) 1 + v364;
const float v373 = v370 * v370 - v371 * v371;
const float v374 = v370 * v371 + v371 * v370;
const float v375 = v8 + v373;
const float v376 = v9 + v374;
const Word8 v377 = v375 * v375 + v376 * v376 > 4.0f;
const float v378 = v377 ? v370 : v375;
const float v379 = v377 ? v371 : v376;
const Int64 v380 = v377 ? v372 : (Int64) 1 + v372;
const float v381 = v378 * v378 - v379 * v379;
const float v382 = v378 * v379 + v379 * v378;
const float v383 = v8 + v381;
const float v384 = v9 + v382;
const Word8 v385 = v383 * v383 + v384 * v384 > 4.0f;
const float v386 = v385 ? v378 : v383;
const float v387 = v385 ? v379 : v384;
const Int64 v388 = v385 ? v380 : (Int64) 1 + v380;
const float v389 = v386 * v386 - v387 * v387;
const float v390 = v386 * v387 + v387 * v386;
const float v391 = v8 + v389;
const float v392 = v9 + v390;
const Word8 v393 = v391 * v391 + v392 * v392 > 4.0f;
const float v394 = v393 ? v386 : v391;
const float v395 = v393 ? v387 : v392;
const Int64 v396 = v393 ? v388 : (Int64) 1 + v388;
const float v397 = v394 * v394 - v395 * v395;
const float v398 = v394 * v395 + v395 * v394;
const float v399 = v8 + v397;
const float v400 = v9 + v398;
const Word8 v401 = v399 * v399 + v400 * v400 > 4.0f;
const float v402 = v401 ? v394 : v399;
const float v403 = v401 ? v395 : v400;
const Int64 v404 = v401 ? v396 : (Int64) 1 + v396;
const float v405 = v402 * v402 - v403 * v403;
const float v406 = v402 * v403 + v403 * v402;
const float v407 = v8 + v405;
const float v408 = v9 + v406;
const Word8 v409 = v407 * v407 + v408 * v408 > 4.0f;
const float v410 = v409 ? v402 : v407;
const float v411 = v409 ? v403 : v408;
const Int64 v412 = v409 ? v404 : (Int64) 1 + v404;
const float v413 = v410 * v410 - v411 * v411;
const float v414 = v410 * v411 + v411 * v410;
const float v415 = v8 + v413;
const float v416 = v9 + v414;
const Word8 v417 = v415 * v415 + v416 * v416 > 4.0f;
const float v418 = v417 ? v410 : v415;
const float v419 = v417 ? v411 : v416;
const Int64 v420 = v417 ? v412 : (Int64) 1 + v412;
const float v421 = v418 * v418 - v419 * v419;
const float v422 = v418 * v419 + v419 * v418;
const float v423 = v8 + v421;
const float v424 = v9 + v422;
const Word8 v425 = v423 * v423 + v424 * v424 > 4.0f;
const float v426 = v425 ? v418 : v423;
const float v427 = v425 ? v419 : v424;
const Int64 v428 = v425 ? v420 : (Int64) 1 + v420;
const float v429 = v426 * v426 - v427 * v427;
const float v430 = v426 * v427 + v427 * v426;
const float v431 = v8 + v429;
const float v432 = v9 + v430;
const Word8 v433 = v431 * v431 + v432 * v432 > 4.0f;
const float v434 = v433 ? v426 : v431;
const float v435 = v433 ? v427 : v432;
const Int64 v436 = v433 ? v428 : (Int64) 1 + v428;
const float v437 = v434 * v434 - v435 * v435;
const float v438 = v434 * v435 + v435 * v434;
const float v439 = v8 + v437;
const float v440 = v9 + v438;
const Word8 v441 = v439 * v439 + v440 * v440 > 4.0f;
const float v442 = v441 ? v434 : v439;
const float v443 = v441 ? v435 : v440;
const Int64 v444 = v441 ? v436 : (Int64) 1 + v436;
const float v445 = v442 * v442 - v443 * v443;
const float v446 = v442 * v443 + v443 * v442;
const float v447 = v8 + v445;
const float v448 = v9 + v446;
const Word8 v449 = v447 * v447 + v448 * v448 > 4.0f;
const float v450 = v449 ? v442 : v447;
const float v451 = v449 ? v443 : v448;
const Int64 v452 = v449 ? v444 : (Int64) 1 + v444;
const float v453 = v450 * v450 - v451 * v451;
const float v454 = v450 * v451 + v451 * v450;
const float v455 = v8 + v453;
const float v456 = v9 + v454;
const Word8 v457 = v455 * v455 + v456 * v456 > 4.0f;
const float v458 = v457 ? v450 : v455;
const float v459 = v457 ? v451 : v456;
const Int64 v460 = v457 ? v452 : (Int64) 1 + v452;
const float v461 = v458 * v458 - v459 * v459;
const float v462 = v458 * v459 + v459 * v458;
const float v463 = v8 + v461;
const float v464 = v9 + v462;
const Word8 v465 = v463 * v463 + v464 * v464 > 4.0f;
const float v466 = v465 ? v458 : v463;
const float v467 = v465 ? v459 : v464;
const Int64 v468 = v465 ? v460 : (Int64) 1 + v460;
const float v469 = v466 * v466 - v467 * v467;
const float v470 = v466 * v467 + v467 * v466;
const float v471 = v8 + v469;
const float v472 = v9 + v470;
const Word8 v473 = v471 * v471 + v472 * v472 > 4.0f;
const float v474 = v473 ? v466 : v471;
const float v475 = v473 ? v467 : v472;
const Int64 v476 = v473 ? v468 : (Int64) 1 + v468;
const float v477 = v474 * v474 - v475 * v475;
const float v478 = v474 * v475 + v475 * v474;
const float v479 = v8 + v477;
const float v480 = v9 + v478;
const Word8 v481 = v479 * v479 + v480 * v480 > 4.0f;
const float v482 = v481 ? v474 : v479;
const float v483 = v481 ? v475 : v480;
const Int64 v484 = v481 ? v476 : (Int64) 1 + v476;
const float v485 = v482 * v482 - v483 * v483;
const float v486 = v482 * v483 + v483 * v482;
const float v487 = v8 + v485;
const float v488 = v9 + v486;
const Word8 v489 = v487 * v487 + v488 * v488 > 4.0f;
const float v490 = v489 ? v482 : v487;
const float v491 = v489 ? v483 : v488;
const Int64 v492 = v489 ? v484 : (Int64) 1 + v484;
const float v493 = v490 * v490 - v491 * v491;
const float v494 = v490 * v491 + v491 * v490;
const float v495 = v8 + v493;
const float v496 = v9 + v494;
const Word8 v497 = v495 * v495 + v496 * v496 > 4.0f;
const float v498 = v497 ? v490 : v495;
const float v499 = v497 ? v491 : v496;
const Int64 v500 = v497 ? v492 : (Int64) 1 + v492;
const float v501 = v498 * v498 - v499 * v499;
const float v502 = v498 * v499 + v499 * v498;
const float v503 = v8 + v501;
const float v504 = v9 + v502;
const Word8 v505 = v503 * v503 + v504 * v504 > 4.0f;
const float v506 = v505 ? v498 : v503;
const float v507 = v505 ? v499 : v504;
const Int64 v508 = v505 ? v500 : (Int64) 1 + v500;
const float v509 = v506 * v506 - v507 * v507;
const float v510 = v506 * v507 + v507 * v506;
const float v511 = v8 + v509;
const float v512 = v9 + v510;
const Word8 v513 = v511 * v511 + v512 * v512 > 4.0f;
const float v514 = v513 ? v506 : v511;
const float v515 = v513 ? v507 : v512;
const Int64 v516 = v513 ? v508 : (Int64) 1 + v508;
const float v517 = v514 * v514 - v515 * v515;
const float v518 = v514 * v515 + v515 * v514;
const float v519 = v8 + v517;
const float v520 = v9 + v518;
const Word8 v521 = v519 * v519 + v520 * v520 > 4.0f;
const float v522 = v521 ? v514 : v519;
const float v523 = v521 ? v515 : v520;
const Int64 v524 = v521 ? v516 : (Int64) 1 + v516;
const float v525 = v522 * v522 - v523 * v523;
const float v526 = v522 * v523 + v523 * v522;
const float v527 = v8 + v525;
const float v528 = v9 + v526;
const Word8 v529 = v527 * v527 + v528 * v528 > 4.0f;
const float v530 = v529 ? v522 : v527;
const float v531 = v529 ? v523 : v528;
const Int64 v532 = v529 ? v524 : (Int64) 1 + v524;
const float v533 = v530 * v530 - v531 * v531;
const float v534 = v530 * v531 + v531 * v530;
const float v535 = v8 + v533;
const float v536 = v9 + v534;
const Word8 v537 = v535 * v535 + v536 * v536 > 4.0f;
const float v538 = v537 ? v530 : v535;
const float v539 = v537 ? v531 : v536;
const Int64 v540 = v537 ? v532 : (Int64) 1 + v532;
const float v541 = v538 * v538 - v539 * v539;
const float v542 = v538 * v539 + v539 * v538;
const float v543 = v8 + v541;
const float v544 = v9 + v542;
const Word8 v545 = v543 * v543 + v544 * v544 > 4.0f;
const float v546 = v545 ? v538 : v543;
const float v547 = v545 ? v539 : v544;
const Int64 v548 = v545 ? v540 : (Int64) 1 + v540;
const float v549 = v546 * v546 - v547 * v547;
const float v550 = v546 * v547 + v547 * v546;
const float v551 = v8 + v549;
const float v552 = v9 + v550;
const Word8 v553 = v551 * v551 + v552 * v552 > 4.0f;
const float v554 = v553 ? v546 : v551;
const float v555 = v553 ? v547 : v552;
const Int64 v556 = v553 ? v548 : (Int64) 1 + v548;
const float v557 = v554 * v554 - v555 * v555;
const float v558 = v554 * v555 + v555 * v554;
const float v559 = v8 + v557;
const float v560 = v9 + v558;
const Word8 v561 = v559 * v559 + v560 * v560 > 4.0f;
const float v562 = v561 ? v554 : v559;
const float v563 = v561 ? v555 : v560;
const Int64 v564 = v561 ? v556 : (Int64) 1 + v556;
const float v565 = v562 * v562 - v563 * v563;
const float v566 = v562 * v563 + v563 * v562;
const float v567 = v8 + v565;
const float v568 = v9 + v566;
const Word8 v569 = v567 * v567 + v568 * v568 > 4.0f;
const float v570 = v569 ? v562 : v567;
const float v571 = v569 ? v563 : v568;
const Int64 v572 = v569 ? v564 : (Int64) 1 + v564;
const float v573 = v570 * v570 - v571 * v571;
const float v574 = v570 * v571 + v571 * v570;
const float v575 = v8 + v573;
const float v576 = v9 + v574;
const Word8 v577 = v575 * v575 + v576 * v576 > 4.0f;
const float v578 = v577 ? v570 : v575;
const float v579 = v577 ? v571 : v576;
const Int64 v580 = v577 ? v572 : (Int64) 1 + v572;
const float v581 = v578 * v578 - v579 * v579;
const float v582 = v578 * v579 + v579 * v578;
const float v583 = v8 + v581;
const float v584 = v9 + v582;
const Word8 v585 = v583 * v583 + v584 * v584 > 4.0f;
const float v586 = v585 ? v578 : v583;
const float v587 = v585 ? v579 : v584;
const Int64 v588 = v585 ? v580 : (Int64) 1 + v580;
const float v589 = v586 * v586 - v587 * v587;
const float v590 = v586 * v587 + v587 * v586;
const float v591 = v8 + v589;
const float v592 = v9 + v590;
const Word8 v593 = v591 * v591 + v592 * v592 > 4.0f;
const float v594 = v593 ? v586 : v591;
const float v595 = v593 ? v587 : v592;
const Int64 v596 = v593 ? v588 : (Int64) 1 + v588;
const float v597 = v594 * v594 - v595 * v595;
const float v598 = v594 * v595 + v595 * v594;
const float v599 = v8 + v597;
const float v600 = v9 + v598;
const Word8 v601 = v599 * v599 + v600 * v600 > 4.0f;
const float v602 = v601 ? v594 : v599;
const float v603 = v601 ? v595 : v600;
const Int64 v604 = v601 ? v596 : (Int64) 1 + v596;
const float v605 = v602 * v602 - v603 * v603;
const float v606 = v602 * v603 + v603 * v602;
const float v607 = v8 + v605;
const float v608 = v9 + v606;
const Word8 v609 = v607 * v607 + v608 * v608 > 4.0f;
const float v610 = v609 ? v602 : v607;
const float v611 = v609 ? v603 : v608;
const Int64 v612 = v609 ? v604 : (Int64) 1 + v604;
const float v613 = v610 * v610 - v611 * v611;
const float v614 = v610 * v611 + v611 * v610;
const float v615 = v8 + v613;
const float v616 = v9 + v614;
const Word8 v617 = v615 * v615 + v616 * v616 > 4.0f;
const float v618 = v617 ? v610 : v615;
const float v619 = v617 ? v611 : v616;
const Int64 v620 = v617 ? v612 : (Int64) 1 + v612;
const float v621 = v618 * v618 - v619 * v619;
const float v622 = v618 * v619 + v619 * v618;
const float v623 = v8 + v621;
const float v624 = v9 + v622;
const Word8 v625 = v623 * v623 + v624 * v624 > 4.0f;
const float v626 = v625 ? v618 : v623;
const float v627 = v625 ? v619 : v624;
const Int64 v628 = v625 ? v620 : (Int64) 1 + v620;
const float v629 = v626 * v626 - v627 * v627;
const float v630 = v626 * v627 + v627 * v626;
const float v631 = v8 + v629;
const float v632 = v9 + v630;
const Word8 v633 = v631 * v631 + v632 * v632 > 4.0f;
const float v634 = v633 ? v626 : v631;
const float v635 = v633 ? v627 : v632;
const Int64 v636 = v633 ? v628 : (Int64) 1 + v628;
const float v637 = v634 * v634 - v635 * v635;
const float v638 = v634 * v635 + v635 * v634;
const float v639 = v8 + v637;
const float v640 = v9 + v638;
const Word8 v641 = v639 * v639 + v640 * v640 > 4.0f;
const float v642 = v641 ? v634 : v639;
const float v643 = v641 ? v635 : v640;
const Int64 v644 = v641 ? v636 : (Int64) 1 + v636;
const float v645 = v642 * v642 - v643 * v643;
const float v646 = v642 * v643 + v643 * v642;
const float v647 = v8 + v645;
const float v648 = v9 + v646;
const Word8 v649 = v647 * v647 + v648 * v648 > 4.0f;
const float v650 = v649 ? v642 : v647;
const float v651 = v649 ? v643 : v648;
const Int64 v652 = v649 ? v644 : (Int64) 1 + v644;
const float v653 = v650 * v650 - v651 * v651;
const float v654 = v650 * v651 + v651 * v650;
const float v655 = v8 + v653;
const float v656 = v9 + v654;
const Word8 v657 = v655 * v655 + v656 * v656 > 4.0f;
const float v658 = v657 ? v650 : v655;
const float v659 = v657 ? v651 : v656;
const Int64 v660 = v657 ? v652 : (Int64) 1 + v652;
const float v661 = v658 * v658 - v659 * v659;
const float v662 = v658 * v659 + v659 * v658;
const float v663 = v8 + v661;
const float v664 = v9 + v662;
const Word8 v665 = v663 * v663 + v664 * v664 > 4.0f;
const float v666 = v665 ? v658 : v663;
const float v667 = v665 ? v659 : v664;
const Int64 v668 = v665 ? v660 : (Int64) 1 + v660;
const float v669 = v666 * v666 - v667 * v667;
const float v670 = v666 * v667 + v667 * v666;
const float v671 = v8 + v669;
const float v672 = v9 + v670;
const Word8 v673 = v671 * v671 + v672 * v672 > 4.0f;
const float v674 = v673 ? v666 : v671;
const float v675 = v673 ? v667 : v672;
const Int64 v676 = v673 ? v668 : (Int64) 1 + v668;
const float v677 = v674 * v674 - v675 * v675;
const float v678 = v674 * v675 + v675 * v674;
const float v679 = v8 + v677;
const float v680 = v9 + v678;
const Word8 v681 = v679 * v679 + v680 * v680 > 4.0f;
const float v682 = v681 ? v674 : v679;
const float v683 = v681 ? v675 : v680;
const Int64 v684 = v681 ? v676 : (Int64) 1 + v676;
const float v685 = v682 * v682 - v683 * v683;
const float v686 = v682 * v683 + v683 * v682;
const float v687 = v8 + v685;
const float v688 = v9 + v686;
const Word8 v689 = v687 * v687 + v688 * v688 > 4.0f;
const float v690 = v689 ? v682 : v687;
const float v691 = v689 ? v683 : v688;
const Int64 v692 = v689 ? v684 : (Int64) 1 + v684;
const float v693 = v690 * v690 - v691 * v691;
const float v694 = v690 * v691 + v691 * v690;
const float v695 = v8 + v693;
const float v696 = v9 + v694;
const Word8 v697 = v695 * v695 + v696 * v696 > 4.0f;
const float v698 = v697 ? v690 : v695;
const float v699 = v697 ? v691 : v696;
const Int64 v700 = v697 ? v692 : (Int64) 1 + v692;
const float v701 = v698 * v698 - v699 * v699;
const float v702 = v698 * v699 + v699 * v698;
const float v703 = v8 + v701;
const float v704 = v9 + v702;
const Word8 v705 = v703 * v703 + v704 * v704 > 4.0f;
const float v706 = v705 ? v698 : v703;
const float v707 = v705 ? v699 : v704;
const Int64 v708 = v705 ? v700 : (Int64) 1 + v700;
const float v709 = v706 * v706 - v707 * v707;
const float v710 = v706 * v707 + v707 * v706;
const float v711 = v8 + v709;
const float v712 = v9 + v710;
const Word8 v713 = v711 * v711 + v712 * v712 > 4.0f;
const float v714 = v713 ? v706 : v711;
const float v715 = v713 ? v707 : v712;
const Int64 v716 = v713 ? v708 : (Int64) 1 + v708;
const float v717 = v714 * v714 - v715 * v715;
const float v718 = v714 * v715 + v715 * v714;
const float v719 = v8 + v717;
const float v720 = v9 + v718;
const Word8 v721 = v719 * v719 + v720 * v720 > 4.0f;
const float v722 = v721 ? v714 : v719;
const float v723 = v721 ? v715 : v720;
const Int64 v724 = v721 ? v716 : (Int64) 1 + v716;
const float v725 = v722 * v722 - v723 * v723;
const float v726 = v722 * v723 + v723 * v722;
const float v727 = v8 + v725;
const float v728 = v9 + v726;
const Word8 v729 = v727 * v727 + v728 * v728 > 4.0f;
const float v730 = v729 ? v722 : v727;
const float v731 = v729 ? v723 : v728;
const Int64 v732 = v729 ? v724 : (Int64) 1 + v724;
const float v733 = v730 * v730 - v731 * v731;
const float v734 = v730 * v731 + v731 * v730;
const float v735 = v8 + v733;
const float v736 = v9 + v734;
const Word8 v737 = v735 * v735 + v736 * v736 > 4.0f;
const float v738 = v737 ? v730 : v735;
const float v739 = v737 ? v731 : v736;
const Int64 v740 = v737 ? v732 : (Int64) 1 + v732;
const float v741 = v738 * v738 - v739 * v739;
const float v742 = v738 * v739 + v739 * v738;
const float v743 = v8 + v741;
const float v744 = v9 + v742;
const Word8 v745 = v743 * v743 + v744 * v744 > 4.0f;
const float v746 = v745 ? v738 : v743;
const float v747 = v745 ? v739 : v744;
const Int64 v748 = v745 ? v740 : (Int64) 1 + v740;
const float v749 = v746 * v746 - v747 * v747;
const float v750 = v746 * v747 + v747 * v746;
const float v751 = v8 + v749;
const float v752 = v9 + v750;
const Word8 v753 = v751 * v751 + v752 * v752 > 4.0f;
const float v754 = v753 ? v746 : v751;
const float v755 = v753 ? v747 : v752;
const Int64 v756 = v753 ? v748 : (Int64) 1 + v748;
const float v757 = v754 * v754 - v755 * v755;
const float v758 = v754 * v755 + v755 * v754;
const float v759 = v8 + v757;
const float v760 = v9 + v758;
const Word8 v761 = v759 * v759 + v760 * v760 > 4.0f;
const float v762 = v761 ? v754 : v759;
const float v763 = v761 ? v755 : v760;
const Int64 v764 = v761 ? v756 : (Int64) 1 + v756;
const float v765 = v762 * v762 - v763 * v763;
const float v766 = v762 * v763 + v763 * v762;
const float v767 = v8 + v765;
const float v768 = v9 + v766;
const Word8 v769 = v767 * v767 + v768 * v768 > 4.0f;
const float v770 = v769 ? v762 : v767;
const float v771 = v769 ? v763 : v768;
const Int64 v772 = v769 ? v764 : (Int64) 1 + v764;
const float v773 = v770 * v770 - v771 * v771;
const float v774 = v770 * v771 + v771 * v770;
const float v775 = v8 + v773;
const float v776 = v9 + v774;
const Word8 v777 = v775 * v775 + v776 * v776 > 4.0f;
const float v778 = v777 ? v770 : v775;
const float v779 = v777 ? v771 : v776;
const Int64 v780 = v777 ? v772 : (Int64) 1 + v772;
const float v781 = v778 * v778 - v779 * v779;
const float v782 = v778 * v779 + v779 * v778;
const float v783 = v8 + v781;
const float v784 = v9 + v782;
const Word8 v785 = v783 * v783 + v784 * v784 > 4.0f;
const float v786 = v785 ? v778 : v783;
const float v787 = v785 ? v779 : v784;
const Int64 v788 = v785 ? v780 : (Int64) 1 + v780;
const float v789 = v786 * v786 - v787 * v787;
const float v790 = v786 * v787 + v787 * v786;
const float v791 = v8 + v789;
const float v792 = v9 + v790;
const Word8 v793 = v791 * v791 + v792 * v792 > 4.0f;
const float v794 = v793 ? v786 : v791;
const float v795 = v793 ? v787 : v792;
const Int64 v796 = v793 ? v788 : (Int64) 1 + v788;
const float v797 = v794 * v794 - v795 * v795;
const float v798 = v794 * v795 + v795 * v794;
const float v799 = v8 + v797;
const float v800 = v9 + v798;
const Word8 v801 = v799 * v799 + v800 * v800 > 4.0f;
const float v802 = v801 ? v794 : v799;
const float v803 = v801 ? v795 : v800;
const Int64 v804 = v801 ? v796 : (Int64) 1 + v796;
const float v805 = v802 * v802 - v803 * v803;
const float v806 = v802 * v803 + v803 * v802;
const float v807 = v8 + v805;
const float v808 = v9 + v806;
const Word8 v809 = v807 * v807 + v808 * v808 > 4.0f;
const float v810 = v809 ? v802 : v807;
const float v811 = v809 ? v803 : v808;
const Int64 v812 = v809 ? v804 : (Int64) 1 + v804;
const float v813 = v810 * v810 - v811 * v811;
const float v814 = v810 * v811 + v811 * v810;
const float v815 = v8 + v813;
const float v816 = v9 + v814;
const Word8 v817 = v815 * v815 + v816 * v816 > 4.0f;
const float v818 = v817 ? v810 : v815;
const float v819 = v817 ? v811 : v816;
const Int64 v820 = v817 ? v812 : (Int64) 1 + v812;
const float v821 = v818 * v818 - v819 * v819;
const float v822 = v818 * v819 + v819 * v818;
const float v823 = v8 + v821;
const float v824 = v9 + v822;
const Word8 v825 = v823 * v823 + v824 * v824 > 4.0f;
const float v826 = v825 ? v818 : v823;
const float v827 = v825 ? v819 : v824;
const Int64 v828 = v825 ? v820 : (Int64) 1 + v820;
const float v829 = v826 * v826 - v827 * v827;
const float v830 = v826 * v827 + v827 * v826;
const float v831 = v8 + v829;
const float v832 = v9 + v830;
const Word8 v833 = v831 * v831 + v832 * v832 > 4.0f;
const float v834 = v833 ? v826 : v831;
const float v835 = v833 ? v827 : v832;
const Int64 v836 = v833 ? v828 : (Int64) 1 + v828;
const float v837 = v834 * v834 - v835 * v835;
const float v838 = v834 * v835 + v835 * v834;
const float v839 = v8 + v837;
const float v840 = v9 + v838;
const Word8 v841 = v839 * v839 + v840 * v840 > 4.0f;
const float v842 = v841 ? v834 : v839;
const float v843 = v841 ? v835 : v840;
const Int64 v844 = v841 ? v836 : (Int64) 1 + v836;
const float v845 = v842 * v842 - v843 * v843;
const float v846 = v842 * v843 + v843 * v842;
const float v847 = v8 + v845;
const float v848 = v9 + v846;
const Word8 v849 = v847 * v847 + v848 * v848 > 4.0f;
const float v850 = v849 ? v842 : v847;
const float v851 = v849 ? v843 : v848;
const Int64 v852 = v849 ? v844 : (Int64) 1 + v844;
const float v853 = v850 * v850 - v851 * v851;
const float v854 = v850 * v851 + v851 * v850;
const float v855 = v8 + v853;
const float v856 = v9 + v854;
const Word8 v857 = v855 * v855 + v856 * v856 > 4.0f;
const float v858 = v857 ? v850 : v855;
const float v859 = v857 ? v851 : v856;
const Int64 v860 = v857 ? v852 : (Int64) 1 + v852;
const float v861 = v858 * v858 - v859 * v859;
const float v862 = v858 * v859 + v859 * v858;
const float v863 = v8 + v861;
const float v864 = v9 + v862;
const Word8 v865 = v863 * v863 + v864 * v864 > 4.0f;
const float v866 = v865 ? v858 : v863;
const float v867 = v865 ? v859 : v864;
const Int64 v868 = v865 ? v860 : (Int64) 1 + v860;
const float v869 = v866 * v866 - v867 * v867;
const float v870 = v866 * v867 + v867 * v866;
const float v871 = v8 + v869;
const float v872 = v9 + v870;
const Word8 v873 = v871 * v871 + v872 * v872 > 4.0f;
const float v874 = v873 ? v866 : v871;
const float v875 = v873 ? v867 : v872;
const Int64 v876 = v873 ? v868 : (Int64) 1 + v868;
const float v877 = v874 * v874 - v875 * v875;
const float v878 = v874 * v875 + v875 * v874;
const float v879 = v8 + v877;
const float v880 = v9 + v878;
const Word8 v881 = v879 * v879 + v880 * v880 > 4.0f;
const float v882 = v881 ? v874 : v879;
const float v883 = v881 ? v875 : v880;
const Int64 v884 = v881 ? v876 : (Int64) 1 + v876;
const float v885 = v882 * v882 - v883 * v883;
const float v886 = v882 * v883 + v883 * v882;
const float v887 = v8 + v885;
const float v888 = v9 + v886;
const Word8 v889 = v887 * v887 + v888 * v888 > 4.0f;
const float v890 = v889 ? v882 : v887;
const float v891 = v889 ? v883 : v888;
const Int64 v892 = v889 ? v884 : (Int64) 1 + v884;
const float v893 = v890 * v890 - v891 * v891;
const float v894 = v890 * v891 + v891 * v890;
const float v895 = v8 + v893;
const float v896 = v9 + v894;
const Word8 v897 = v895 * v895 + v896 * v896 > 4.0f;
const float v898 = v897 ? v890 : v895;
const float v899 = v897 ? v891 : v896;
const Int64 v900 = v897 ? v892 : (Int64) 1 + v892;
const float v901 = v898 * v898 - v899 * v899;
const float v902 = v898 * v899 + v899 * v898;
const float v903 = v8 + v901;
const float v904 = v9 + v902;
const Word8 v905 = v903 * v903 + v904 * v904 > 4.0f;
const float v906 = v905 ? v898 : v903;
const float v907 = v905 ? v899 : v904;
const Int64 v908 = v905 ? v900 : (Int64) 1 + v900;
const float v909 = v906 * v906 - v907 * v907;
const float v910 = v906 * v907 + v907 * v906;
const float v911 = v8 + v909;
const float v912 = v9 + v910;
const Word8 v913 = v911 * v911 + v912 * v912 > 4.0f;
const float v914 = v913 ? v906 : v911;
const float v915 = v913 ? v907 : v912;
const Int64 v916 = v913 ? v908 : (Int64) 1 + v908;
const float v917 = v914 * v914 - v915 * v915;
const float v918 = v914 * v915 + v915 * v914;
const float v919 = v8 + v917;
const float v920 = v9 + v918;
const Word8 v921 = v919 * v919 + v920 * v920 > 4.0f;
const float v922 = v921 ? v914 : v919;
const float v923 = v921 ? v915 : v920;
const Int64 v924 = v921 ? v916 : (Int64) 1 + v916;
const float v925 = v922 * v922 - v923 * v923;
const float v926 = v922 * v923 + v923 * v922;
const float v927 = v8 + v925;
const float v928 = v9 + v926;
const Word8 v929 = v927 * v927 + v928 * v928 > 4.0f;
const float v930 = v929 ? v922 : v927;
const float v931 = v929 ? v923 : v928;
const Int64 v932 = v929 ? v924 : (Int64) 1 + v924;
const float v933 = v930 * v930 - v931 * v931;
const float v934 = v930 * v931 + v931 * v930;
const float v935 = v8 + v933;
const float v936 = v9 + v934;
const Word8 v937 = v935 * v935 + v936 * v936 > 4.0f;
const float v938 = v937 ? v930 : v935;
const float v939 = v937 ? v931 : v936;
const Int64 v940 = v937 ? v932 : (Int64) 1 + v932;
const float v941 = v938 * v938 - v939 * v939;
const float v942 = v938 * v939 + v939 * v938;
const float v943 = v8 + v941;
const float v944 = v9 + v942;
const Word8 v945 = v943 * v943 + v944 * v944 > 4.0f;
const float v946 = v945 ? v938 : v943;
const float v947 = v945 ? v939 : v944;
const Int64 v948 = v945 ? v940 : (Int64) 1 + v940;
const float v949 = v946 * v946 - v947 * v947;
const float v950 = v946 * v947 + v947 * v946;
const float v951 = v8 + v949;
const float v952 = v9 + v950;
const Word8 v953 = v951 * v951 + v952 * v952 > 4.0f;
const float v954 = v953 ? v946 : v951;
const float v955 = v953 ? v947 : v952;
const Int64 v956 = v953 ? v948 : (Int64) 1 + v948;
const float v957 = v954 * v954 - v955 * v955;
const float v958 = v954 * v955 + v955 * v954;
const float v959 = v8 + v957;
const float v960 = v9 + v958;
const Word8 v961 = v959 * v959 + v960 * v960 > 4.0f;
const float v962 = v961 ? v954 : v959;
const float v963 = v961 ? v955 : v960;
const Int64 v964 = v961 ? v956 : (Int64) 1 + v956;
const float v965 = v962 * v962 - v963 * v963;
const float v966 = v962 * v963 + v963 * v962;
const float v967 = v8 + v965;
const float v968 = v9 + v966;
const Word8 v969 = v967 * v967 + v968 * v968 > 4.0f;
const float v970 = v969 ? v962 : v967;
const float v971 = v969 ? v963 : v968;
const Int64 v972 = v969 ? v964 : (Int64) 1 + v964;
const float v973 = v970 * v970 - v971 * v971;
const float v974 = v970 * v971 + v971 * v970;
const float v975 = v8 + v973;
const float v976 = v9 + v974;
const Word8 v977 = v975 * v975 + v976 * v976 > 4.0f;
const float v978 = v977 ? v970 : v975;
const float v979 = v977 ? v971 : v976;
const Int64 v980 = v977 ? v972 : (Int64) 1 + v972;
const float v981 = v978 * v978 - v979 * v979;
const float v982 = v978 * v979 + v979 * v978;
const float v983 = v8 + v981;
const float v984 = v9 + v982;
const Word8 v985 = v983 * v983 + v984 * v984 > 4.0f;
const float v986 = v985 ? v978 : v983;
const float v987 = v985 ? v979 : v984;
const Int64 v988 = v985 ? v980 : (Int64) 1 + v980;
const float v989 = v986 * v986 - v987 * v987;
const float v990 = v986 * v987 + v987 * v986;
const float v991 = v8 + v989;
const float v992 = v9 + v990;
const Word8 v993 = v991 * v991 + v992 * v992 > 4.0f;
const float v994 = v993 ? v986 : v991;
const float v995 = v993 ? v987 : v992;
const Int64 v996 = v993 ? v988 : (Int64) 1 + v988;
const float v997 = v994 * v994 - v995 * v995;
const float v998 = v994 * v995 + v995 * v994;
const float v999 = v8 + v997;
const float v1000 = v9 + v998;
const Word8 v1001 = v999 * v999 + v1000 * v1000 > 4.0f;
const float v1002 = v1001 ? v994 : v999;
const float v1003 = v1001 ? v995 : v1000;
const Int64 v1004 = v1001 ? v996 : (Int64) 1 + v996;
const float v1005 = v1002 * v1002 - v1003 * v1003;
const float v1006 = v1002 * v1003 + v1003 * v1002;
const float v1007 = v8 + v1005;
const float v1008 = v9 + v1006;
const Word8 v1009 = v1007 * v1007 + v1008 * v1008 > 4.0f;
const float v1010 = v1009 ? v1002 : v1007;
const float v1011 = v1009 ? v1003 : v1008;
const Int64 v1012 = v1009 ? v1004 : (Int64) 1 + v1004;
const float v1013 = v1010 * v1010 - v1011 * v1011;
const float v1014 = v1010 * v1011 + v1011 * v1010;
const float v1015 = v8 + v1013;
const float v1016 = v9 + v1014;
const Word8 v1017 = v1015 * v1015 + v1016 * v1016 > 4.0f;
const float v1018 = v1017 ? v1010 : v1015;
const float v1019 = v1017 ? v1011 : v1016;
const Int64 v1020 = v1017 ? v1012 : (Int64) 1 + v1012;
const float v1021 = v1018 * v1018 - v1019 * v1019;
const float v1022 = v1018 * v1019 + v1019 * v1018;
const float v1023 = v8 + v1021;
const float v1024 = v9 + v1022;
const Word8 v1025 = v1023 * v1023 + v1024 * v1024 > 4.0f;
const float v1026 = v1025 ? v1018 : v1023;
const float v1027 = v1025 ? v1019 : v1024;
const Int64 v1028 = v1025 ? v1020 : (Int64) 1 + v1020;
const float v1029 = v1026 * v1026 - v1027 * v1027;
const float v1030 = v1026 * v1027 + v1027 * v1026;
const float v1031 = v8 + v1029;
const float v1032 = v9 + v1030;
const Word8 v1033 = v1031 * v1031 + v1032 * v1032 > 4.0f;
const float v1034 = v1033 ? v1026 : v1031;
const float v1035 = v1033 ? v1027 : v1032;
const Int64 v1036 = v1033 ? v1028 : (Int64) 1 + v1028;
const float v1037 = v1034 * v1034 - v1035 * v1035;
const float v1038 = v1034 * v1035 + v1035 * v1034;
const float v1039 = v8 + v1037;
const float v1040 = v9 + v1038;
const Word8 v1041 = v1039 * v1039 + v1040 * v1040 > 4.0f;
const float v1042 = v1041 ? v1034 : v1039;
const float v1043 = v1041 ? v1035 : v1040;
const Int64 v1044 = v1041 ? v1036 : (Int64) 1 + v1036;
const float v1045 = v1042 * v1042 - v1043 * v1043;
const float v1046 = v1042 * v1043 + v1043 * v1042;
const float v1047 = v8 + v1045;
const float v1048 = v9 + v1046;
const Word8 v1049 = v1047 * v1047 + v1048 * v1048 > 4.0f;
const float v1050 = v1049 ? v1042 : v1047;
const float v1051 = v1049 ? v1043 : v1048;
const Int64 v1052 = v1049 ? v1044 : (Int64) 1 + v1044;
const float v1053 = v1050 * v1050 - v1051 * v1051;
const float v1054 = v1050 * v1051 + v1051 * v1050;
const float v1055 = v8 + v1053;
const float v1056 = v9 + v1054;
const Word8 v1057 = v1055 * v1055 + v1056 * v1056 > 4.0f;
const float v1058 = v1057 ? v1050 : v1055;
const float v1059 = v1057 ? v1051 : v1056;
const Int64 v1060 = v1057 ? v1052 : (Int64) 1 + v1052;
const float v1061 = v1058 * v1058 - v1059 * v1059;
const float v1062 = v1058 * v1059 + v1059 * v1058;
const float v1063 = v8 + v1061;
const float v1064 = v9 + v1062;
const Word8 v1065 = v1063 * v1063 + v1064 * v1064 > 4.0f;
const float v1066 = v1065 ? v1058 : v1063;
const float v1067 = v1065 ? v1059 : v1064;
const Int64 v1068 = v1065 ? v1060 : (Int64) 1 + v1060;
const float v1069 = v1066 * v1066 - v1067 * v1067;
const float v1070 = v1066 * v1067 + v1067 * v1066;
const float v1071 = v8 + v1069;
const float v1072 = v9 + v1070;
const Word8 v1073 = v1071 * v1071 + v1072 * v1072 > 4.0f;
const float v1074 = v1073 ? v1066 : v1071;
const float v1075 = v1073 ? v1067 : v1072;
const Int64 v1076 = v1073 ? v1068 : (Int64) 1 + v1068;
const float v1077 = v1074 * v1074 - v1075 * v1075;
const float v1078 = v1074 * v1075 + v1075 * v1074;
const float v1079 = v8 + v1077;
const float v1080 = v9 + v1078;
const Word8 v1081 = v1079 * v1079 + v1080 * v1080 > 4.0f;
const float v1082 = v1081 ? v1074 : v1079;
const float v1083 = v1081 ? v1075 : v1080;
const Int64 v1084 = v1081 ? v1076 : (Int64) 1 + v1076;
const float v1085 = v1082 * v1082 - v1083 * v1083;
const float v1086 = v1082 * v1083 + v1083 * v1082;
const float v1087 = v8 + v1085;
const float v1088 = v9 + v1086;
const Word8 v1089 = v1087 * v1087 + v1088 * v1088 > 4.0f;
const float v1090 = v1089 ? v1082 : v1087;
const float v1091 = v1089 ? v1083 : v1088;
const Int64 v1092 = v1089 ? v1084 : (Int64) 1 + v1084;
const float v1093 = v1090 * v1090 - v1091 * v1091;
const float v1094 = v1090 * v1091 + v1091 * v1090;
const float v1095 = v8 + v1093;
const float v1096 = v9 + v1094;
const Word8 v1097 = v1095 * v1095 + v1096 * v1096 > 4.0f;
const float v1098 = v1097 ? v1090 : v1095;
const float v1099 = v1097 ? v1091 : v1096;
const Int64 v1100 = v1097 ? v1092 : (Int64) 1 + v1092;
const float v1101 = v1098 * v1098 - v1099 * v1099;
const float v1102 = v1098 * v1099 + v1099 * v1098;
const float v1103 = v8 + v1101;
const float v1104 = v9 + v1102;
const Word8 v1105 = v1103 * v1103 + v1104 * v1104 > 4.0f;
const float v1106 = v1105 ? v1098 : v1103;
const float v1107 = v1105 ? v1099 : v1104;
const Int64 v1108 = v1105 ? v1100 : (Int64) 1 + v1100;
const float v1109 = v1106 * v1106 - v1107 * v1107;
const float v1110 = v1106 * v1107 + v1107 * v1106;
const float v1111 = v8 + v1109;
const float v1112 = v9 + v1110;
const Word8 v1113 = v1111 * v1111 + v1112 * v1112 > 4.0f;
const float v1114 = v1113 ? v1106 : v1111;
const float v1115 = v1113 ? v1107 : v1112;
const Int64 v1116 = v1113 ? v1108 : (Int64) 1 + v1108;
const float v1117 = v1114 * v1114 - v1115 * v1115;
const float v1118 = v1114 * v1115 + v1115 * v1114;
const float v1119 = v8 + v1117;
const float v1120 = v9 + v1118;
const Word8 v1121 = v1119 * v1119 + v1120 * v1120 > 4.0f;
const float v1122 = v1121 ? v1114 : v1119;
const float v1123 = v1121 ? v1115 : v1120;
const Int64 v1124 = v1121 ? v1116 : (Int64) 1 + v1116;
const float v1125 = v1122 * v1122 - v1123 * v1123;
const float v1126 = v1122 * v1123 + v1123 * v1122;
const float v1127 = v8 + v1125;
const float v1128 = v9 + v1126;
const Word8 v1129 = v1127 * v1127 + v1128 * v1128 > 4.0f;
const float v1130 = v1129 ? v1122 : v1127;
const float v1131 = v1129 ? v1123 : v1128;
const Int64 v1132 = v1129 ? v1124 : (Int64) 1 + v1124;
const float v1133 = v1130 * v1130 - v1131 * v1131;
const float v1134 = v1130 * v1131 + v1131 * v1130;
const float v1135 = v8 + v1133;
const float v1136 = v9 + v1134;
const Word8 v1137 = v1135 * v1135 + v1136 * v1136 > 4.0f;
const float v1138 = v1137 ? v1130 : v1135;
const float v1139 = v1137 ? v1131 : v1136;
const Int64 v1140 = v1137 ? v1132 : (Int64) 1 + v1132;
const float v1141 = v1138 * v1138 - v1139 * v1139;
const float v1142 = v1138 * v1139 + v1139 * v1138;
const float v1143 = v8 + v1141;
const float v1144 = v9 + v1142;
const Word8 v1145 = v1143 * v1143 + v1144 * v1144 > 4.0f;
const float v1146 = v1145 ? v1138 : v1143;
const float v1147 = v1145 ? v1139 : v1144;
const Int64 v1148 = v1145 ? v1140 : (Int64) 1 + v1140;
const float v1149 = v1146 * v1146 - v1147 * v1147;
const float v1150 = v1146 * v1147 + v1147 * v1146;
const float v1151 = v8 + v1149;
const float v1152 = v9 + v1150;
const Word8 v1153 = v1151 * v1151 + v1152 * v1152 > 4.0f;
const float v1154 = v1153 ? v1146 : v1151;
const float v1155 = v1153 ? v1147 : v1152;
const Int64 v1156 = v1153 ? v1148 : (Int64) 1 + v1148;
const float v1157 = v1154 * v1154 - v1155 * v1155;
const float v1158 = v1154 * v1155 + v1155 * v1154;
const float v1159 = v8 + v1157;
const float v1160 = v9 + v1158;
const Word8 v1161 = v1159 * v1159 + v1160 * v1160 > 4.0f;
const float v1162 = v1161 ? v1154 : v1159;
const float v1163 = v1161 ? v1155 : v1160;
const Int64 v1164 = v1161 ? v1156 : (Int64) 1 + v1156;
const float v1165 = v1162 * v1162 - v1163 * v1163;
const float v1166 = v1162 * v1163 + v1163 * v1162;
const float v1167 = v8 + v1165;
const float v1168 = v9 + v1166;
const Word8 v1169 = v1167 * v1167 + v1168 * v1168 > 4.0f;
const float v1170 = v1169 ? v1162 : v1167;
const float v1171 = v1169 ? v1163 : v1168;
const Int64 v1172 = v1169 ? v1164 : (Int64) 1 + v1164;
const float v1173 = v1170 * v1170 - v1171 * v1171;
const float v1174 = v1170 * v1171 + v1171 * v1170;
const float v1175 = v8 + v1173;
const float v1176 = v9 + v1174;
const Word8 v1177 = v1175 * v1175 + v1176 * v1176 > 4.0f;
const float v1178 = v1177 ? v1170 : v1175;
const float v1179 = v1177 ? v1171 : v1176;
const Int64 v1180 = v1177 ? v1172 : (Int64) 1 + v1172;
const float v1181 = v1178 * v1178 - v1179 * v1179;
const float v1182 = v1178 * v1179 + v1179 * v1178;
const float v1183 = v8 + v1181;
const float v1184 = v9 + v1182;
const Word8 v1185 = v1183 * v1183 + v1184 * v1184 > 4.0f;
const float v1186 = v1185 ? v1178 : v1183;
const float v1187 = v1185 ? v1179 : v1184;
const Int64 v1188 = v1185 ? v1180 : (Int64) 1 + v1180;
const float v1189 = v1186 * v1186 - v1187 * v1187;
const float v1190 = v1186 * v1187 + v1187 * v1186;
const float v1191 = v8 + v1189;
const float v1192 = v9 + v1190;
const Word8 v1193 = v1191 * v1191 + v1192 * v1192 > 4.0f;
const float v1194 = v1193 ? v1186 : v1191;
const float v1195 = v1193 ? v1187 : v1192;
const Int64 v1196 = v1193 ? v1188 : (Int64) 1 + v1188;
const float v1197 = v1194 * v1194 - v1195 * v1195;
const float v1198 = v1194 * v1195 + v1195 * v1194;
const float v1199 = v8 + v1197;
const float v1200 = v9 + v1198;
const Word8 v1201 = v1199 * v1199 + v1200 * v1200 > 4.0f;
const float v1202 = v1201 ? v1194 : v1199;
const float v1203 = v1201 ? v1195 : v1200;
const Int64 v1204 = v1201 ? v1196 : (Int64) 1 + v1196;
const float v1205 = v1202 * v1202 - v1203 * v1203;
const float v1206 = v1202 * v1203 + v1203 * v1202;
const float v1207 = v8 + v1205;
const float v1208 = v9 + v1206;
const Word8 v1209 = v1207 * v1207 + v1208 * v1208 > 4.0f;
const float v1210 = v1209 ? v1202 : v1207;
const float v1211 = v1209 ? v1203 : v1208;
const Int64 v1212 = v1209 ? v1204 : (Int64) 1 + v1204;
const float v1213 = v1210 * v1210 - v1211 * v1211;
const float v1214 = v1210 * v1211 + v1211 * v1210;
const float v1215 = v8 + v1213;
const float v1216 = v9 + v1214;
const Word8 v1217 = v1215 * v1215 + v1216 * v1216 > 4.0f;
const float v1218 = v1217 ? v1210 : v1215;
const float v1219 = v1217 ? v1211 : v1216;
const Int64 v1220 = v1217 ? v1212 : (Int64) 1 + v1212;
const float v1221 = v1218 * v1218 - v1219 * v1219;
const float v1222 = v1218 * v1219 + v1219 * v1218;
const float v1223 = v8 + v1221;
const float v1224 = v9 + v1222;
const Word8 v1225 = v1223 * v1223 + v1224 * v1224 > 4.0f;
const float v1226 = v1225 ? v1218 : v1223;
const float v1227 = v1225 ? v1219 : v1224;
const Int64 v1228 = v1225 ? v1220 : (Int64) 1 + v1220;
const float v1229 = v1226 * v1226 - v1227 * v1227;
const float v1230 = v1226 * v1227 + v1227 * v1226;
const float v1231 = v8 + v1229;
const float v1232 = v9 + v1230;
const Word8 v1233 = v1231 * v1231 + v1232 * v1232 > 4.0f;
const float v1234 = v1233 ? v1226 : v1231;
const float v1235 = v1233 ? v1227 : v1232;
const Int64 v1236 = v1233 ? v1228 : (Int64) 1 + v1228;
const float v1237 = v1234 * v1234 - v1235 * v1235;
const float v1238 = v1234 * v1235 + v1235 * v1234;
const float v1239 = v8 + v1237;
const float v1240 = v9 + v1238;
const Word8 v1241 = v1239 * v1239 + v1240 * v1240 > 4.0f;
const float v1242 = v1241 ? v1234 : v1239;
const float v1243 = v1241 ? v1235 : v1240;
const Int64 v1244 = v1241 ? v1236 : (Int64) 1 + v1236;
const float v1245 = v1242 * v1242 - v1243 * v1243;
const float v1246 = v1242 * v1243 + v1243 * v1242;
const float v1247 = v8 + v1245;
const float v1248 = v9 + v1246;
const Word8 v1249 = v1247 * v1247 + v1248 * v1248 > 4.0f;
const float v1250 = v1249 ? v1242 : v1247;
const float v1251 = v1249 ? v1243 : v1248;
const Int64 v1252 = v1249 ? v1244 : (Int64) 1 + v1244;
const float v1253 = v1250 * v1250 - v1251 * v1251;
const float v1254 = v1250 * v1251 + v1251 * v1250;
const float v1255 = v8 + v1253;
const float v1256 = v9 + v1254;
const Word8 v1257 = v1255 * v1255 + v1256 * v1256 > 4.0f;
const float v1258 = v1257 ? v1250 : v1255;
const float v1259 = v1257 ? v1251 : v1256;
const Int64 v1260 = v1257 ? v1252 : (Int64) 1 + v1252;
const float v1261 = v1258 * v1258 - v1259 * v1259;
const float v1262 = v1258 * v1259 + v1259 * v1258;
const float v1263 = v8 + v1261;
const float v1264 = v9 + v1262;
const Word8 v1265 = v1263 * v1263 + v1264 * v1264 > 4.0f;
const float v1266 = v1265 ? v1258 : v1263;
const float v1267 = v1265 ? v1259 : v1264;
const Int64 v1268 = v1265 ? v1260 : (Int64) 1 + v1260;
const float v1269 = v1266 * v1266 - v1267 * v1267;
const float v1270 = v1266 * v1267 + v1267 * v1266;
const float v1271 = v8 + v1269;
const float v1272 = v9 + v1270;
const Word8 v1273 = v1271 * v1271 + v1272 * v1272 > 4.0f;
const float v1274 = v1273 ? v1266 : v1271;
const float v1275 = v1273 ? v1267 : v1272;
const Int64 v1276 = v1273 ? v1268 : (Int64) 1 + v1268;
const float v1277 = v1274 * v1274 - v1275 * v1275;
const float v1278 = v1274 * v1275 + v1275 * v1274;
const float v1279 = v8 + v1277;
const float v1280 = v9 + v1278;
const Word8 v1281 = v1279 * v1279 + v1280 * v1280 > 4.0f;
const float v1282 = v1281 ? v1274 : v1279;
const float v1283 = v1281 ? v1275 : v1280;
const Int64 v1284 = v1281 ? v1276 : (Int64) 1 + v1276;
const float v1285 = v1282 * v1282 - v1283 * v1283;
const float v1286 = v1282 * v1283 + v1283 * v1282;
const float v1287 = v8 + v1285;
const float v1288 = v9 + v1286;
const Word8 v1289 = v1287 * v1287 + v1288 * v1288 > 4.0f;
const float v1290 = v1289 ? v1282 : v1287;
const float v1291 = v1289 ? v1283 : v1288;
const Int64 v1292 = v1289 ? v1284 : (Int64) 1 + v1284;
const float v1293 = v1290 * v1290 - v1291 * v1291;
const float v1294 = v1290 * v1291 + v1291 * v1290;
const float v1295 = v8 + v1293;
const float v1296 = v9 + v1294;
const Word8 v1297 = v1295 * v1295 + v1296 * v1296 > 4.0f;
const float v1298 = v1297 ? v1290 : v1295;
const float v1299 = v1297 ? v1291 : v1296;
const Int64 v1300 = v1297 ? v1292 : (Int64) 1 + v1292;
const float v1301 = v1298 * v1298 - v1299 * v1299;
const float v1302 = v1298 * v1299 + v1299 * v1298;
const float v1303 = v8 + v1301;
const float v1304 = v9 + v1302;
const Word8 v1305 = v1303 * v1303 + v1304 * v1304 > 4.0f;
const float v1306 = v1305 ? v1298 : v1303;
const float v1307 = v1305 ? v1299 : v1304;
const Int64 v1308 = v1305 ? v1300 : (Int64) 1 + v1300;
const float v1309 = v1306 * v1306 - v1307 * v1307;
const float v1310 = v1306 * v1307 + v1307 * v1306;
const float v1311 = v8 + v1309;
const float v1312 = v9 + v1310;
const Word8 v1313 = v1311 * v1311 + v1312 * v1312 > 4.0f;
const float v1314 = v1313 ? v1306 : v1311;
const float v1315 = v1313 ? v1307 : v1312;
const Int64 v1316 = v1313 ? v1308 : (Int64) 1 + v1308;
const float v1317 = v1314 * v1314 - v1315 * v1315;
const float v1318 = v1314 * v1315 + v1315 * v1314;
const float v1319 = v8 + v1317;
const float v1320 = v9 + v1318;
const Word8 v1321 = v1319 * v1319 + v1320 * v1320 > 4.0f;
const float v1322 = v1321 ? v1314 : v1319;
const float v1323 = v1321 ? v1315 : v1320;
const Int64 v1324 = v1321 ? v1316 : (Int64) 1 + v1316;
const float v1325 = v1322 * v1322 - v1323 * v1323;
const float v1326 = v1322 * v1323 + v1323 * v1322;
const float v1327 = v8 + v1325;
const float v1328 = v9 + v1326;
const Word8 v1329 = v1327 * v1327 + v1328 * v1328 > 4.0f;
const float v1330 = v1329 ? v1322 : v1327;
const float v1331 = v1329 ? v1323 : v1328;
const Int64 v1332 = v1329 ? v1324 : (Int64) 1 + v1324;
const float v1333 = v1330 * v1330 - v1331 * v1331;
const float v1334 = v1330 * v1331 + v1331 * v1330;
const float v1335 = v8 + v1333;
const float v1336 = v9 + v1334;
const Word8 v1337 = v1335 * v1335 + v1336 * v1336 > 4.0f;
const float v1338 = v1337 ? v1330 : v1335;
const float v1339 = v1337 ? v1331 : v1336;
const Int64 v1340 = v1337 ? v1332 : (Int64) 1 + v1332;
const float v1341 = v1338 * v1338 - v1339 * v1339;
const float v1342 = v1338 * v1339 + v1339 * v1338;
const float v1343 = v8 + v1341;
const float v1344 = v9 + v1342;
const Word8 v1345 = v1343 * v1343 + v1344 * v1344 > 4.0f;
const float v1346 = v1345 ? v1338 : v1343;
const float v1347 = v1345 ? v1339 : v1344;
const Int64 v1348 = v1345 ? v1340 : (Int64) 1 + v1340;
const float v1349 = v1346 * v1346 - v1347 * v1347;
const float v1350 = v1346 * v1347 + v1347 * v1346;
const float v1351 = v8 + v1349;
const float v1352 = v9 + v1350;
const Word8 v1353 = v1351 * v1351 + v1352 * v1352 > 4.0f;
const float v1354 = v1353 ? v1346 : v1351;
const float v1355 = v1353 ? v1347 : v1352;
const Int64 v1356 = v1353 ? v1348 : (Int64) 1 + v1348;
const float v1357 = v1354 * v1354 - v1355 * v1355;
const float v1358 = v1354 * v1355 + v1355 * v1354;
const float v1359 = v8 + v1357;
const float v1360 = v9 + v1358;
const Word8 v1361 = v1359 * v1359 + v1360 * v1360 > 4.0f;
const float v1362 = v1361 ? v1354 : v1359;
const float v1363 = v1361 ? v1355 : v1360;
const Int64 v1364 = v1361 ? v1356 : (Int64) 1 + v1356;
const float v1365 = v1362 * v1362 - v1363 * v1363;
const float v1366 = v1362 * v1363 + v1363 * v1362;
const float v1367 = v8 + v1365;
const float v1368 = v9 + v1366;
const Word8 v1369 = v1367 * v1367 + v1368 * v1368 > 4.0f;
const float v1370 = v1369 ? v1362 : v1367;
const float v1371 = v1369 ? v1363 : v1368;
const Int64 v1372 = v1369 ? v1364 : (Int64) 1 + v1364;
const float v1373 = v1370 * v1370 - v1371 * v1371;
const float v1374 = v1370 * v1371 + v1371 * v1370;
const float v1375 = v8 + v1373;
const float v1376 = v9 + v1374;
const Word8 v1377 = v1375 * v1375 + v1376 * v1376 > 4.0f;
const float v1378 = v1377 ? v1370 : v1375;
const float v1379 = v1377 ? v1371 : v1376;
const Int64 v1380 = v1377 ? v1372 : (Int64) 1 + v1372;
const float v1381 = v1378 * v1378 - v1379 * v1379;
const float v1382 = v1378 * v1379 + v1379 * v1378;
const float v1383 = v8 + v1381;
const float v1384 = v9 + v1382;
const Word8 v1385 = v1383 * v1383 + v1384 * v1384 > 4.0f;
const float v1386 = v1385 ? v1378 : v1383;
const float v1387 = v1385 ? v1379 : v1384;
const Int64 v1388 = v1385 ? v1380 : (Int64) 1 + v1380;
const float v1389 = v1386 * v1386 - v1387 * v1387;
const float v1390 = v1386 * v1387 + v1387 * v1386;
const float v1391 = v8 + v1389;
const float v1392 = v9 + v1390;
const Word8 v1393 = v1391 * v1391 + v1392 * v1392 > 4.0f;
const float v1394 = v1393 ? v1386 : v1391;
const float v1395 = v1393 ? v1387 : v1392;
const Int64 v1396 = v1393 ? v1388 : (Int64) 1 + v1388;
const float v1397 = v1394 * v1394 - v1395 * v1395;
const float v1398 = v1394 * v1395 + v1395 * v1394;
const float v1399 = v8 + v1397;
const float v1400 = v9 + v1398;
const Word8 v1401 = v1399 * v1399 + v1400 * v1400 > 4.0f;
const float v1402 = v1401 ? v1394 : v1399;
const float v1403 = v1401 ? v1395 : v1400;
const Int64 v1404 = v1401 ? v1396 : (Int64) 1 + v1396;
const float v1405 = v1402 * v1402 - v1403 * v1403;
const float v1406 = v1402 * v1403 + v1403 * v1402;
const float v1407 = v8 + v1405;
const float v1408 = v9 + v1406;
const Word8 v1409 = v1407 * v1407 + v1408 * v1408 > 4.0f;
const float v1410 = v1409 ? v1402 : v1407;
const float v1411 = v1409 ? v1403 : v1408;
const Int64 v1412 = v1409 ? v1404 : (Int64) 1 + v1404;
const float v1413 = v1410 * v1410 - v1411 * v1411;
const float v1414 = v1410 * v1411 + v1411 * v1410;
const float v1415 = v8 + v1413;
const float v1416 = v9 + v1414;
const Word8 v1417 = v1415 * v1415 + v1416 * v1416 > 4.0f;
const float v1418 = v1417 ? v1410 : v1415;
const float v1419 = v1417 ? v1411 : v1416;
const Int64 v1420 = v1417 ? v1412 : (Int64) 1 + v1412;
const float v1421 = v1418 * v1418 - v1419 * v1419;
const float v1422 = v1418 * v1419 + v1419 * v1418;
const float v1423 = v8 + v1421;
const float v1424 = v9 + v1422;
const Word8 v1425 = v1423 * v1423 + v1424 * v1424 > 4.0f;
const float v1426 = v1425 ? v1418 : v1423;
const float v1427 = v1425 ? v1419 : v1424;
const Int64 v1428 = v1425 ? v1420 : (Int64) 1 + v1420;
const float v1429 = v1426 * v1426 - v1427 * v1427;
const float v1430 = v1426 * v1427 + v1427 * v1426;
const float v1431 = v8 + v1429;
const float v1432 = v9 + v1430;
const Word8 v1433 = v1431 * v1431 + v1432 * v1432 > 4.0f;
const float v1434 = v1433 ? v1426 : v1431;
const float v1435 = v1433 ? v1427 : v1432;
const Int64 v1436 = v1433 ? v1428 : (Int64) 1 + v1428;
const float v1437 = v1434 * v1434 - v1435 * v1435;
const float v1438 = v1434 * v1435 + v1435 * v1434;
const float v1439 = v8 + v1437;
const float v1440 = v9 + v1438;
const Word8 v1441 = v1439 * v1439 + v1440 * v1440 > 4.0f;
const float v1442 = v1441 ? v1434 : v1439;
const float v1443 = v1441 ? v1435 : v1440;
const Int64 v1444 = v1441 ? v1436 : (Int64) 1 + v1436;
const float v1445 = v1442 * v1442 - v1443 * v1443;
const float v1446 = v1442 * v1443 + v1443 * v1442;
const float v1447 = v8 + v1445;
const float v1448 = v9 + v1446;
const Word8 v1449 = v1447 * v1447 + v1448 * v1448 > 4.0f;
const float v1450 = v1449 ? v1442 : v1447;
const float v1451 = v1449 ? v1443 : v1448;
const Int64 v1452 = v1449 ? v1444 : (Int64) 1 + v1444;
const float v1453 = v1450 * v1450 - v1451 * v1451;
const float v1454 = v1450 * v1451 + v1451 * v1450;
const float v1455 = v8 + v1453;
const float v1456 = v9 + v1454;
const Word8 v1457 = v1455 * v1455 + v1456 * v1456 > 4.0f;
const float v1458 = v1457 ? v1450 : v1455;
const float v1459 = v1457 ? v1451 : v1456;
const Int64 v1460 = v1457 ? v1452 : (Int64) 1 + v1452;
const float v1461 = v1458 * v1458 - v1459 * v1459;
const float v1462 = v1458 * v1459 + v1459 * v1458;
const float v1463 = v8 + v1461;
const float v1464 = v9 + v1462;
const Word8 v1465 = v1463 * v1463 + v1464 * v1464 > 4.0f;
const float v1466 = v1465 ? v1458 : v1463;
const float v1467 = v1465 ? v1459 : v1464;
const Int64 v1468 = v1465 ? v1460 : (Int64) 1 + v1460;
const float v1469 = v1466 * v1466 - v1467 * v1467;
const float v1470 = v1466 * v1467 + v1467 * v1466;
const float v1471 = v8 + v1469;
const float v1472 = v9 + v1470;
const Word8 v1473 = v1471 * v1471 + v1472 * v1472 > 4.0f;
const float v1474 = v1473 ? v1466 : v1471;
const float v1475 = v1473 ? v1467 : v1472;
const Int64 v1476 = v1473 ? v1468 : (Int64) 1 + v1468;
const float v1477 = v1474 * v1474 - v1475 * v1475;
const float v1478 = v1474 * v1475 + v1475 * v1474;
const float v1479 = v8 + v1477;
const float v1480 = v9 + v1478;
const Word8 v1481 = v1479 * v1479 + v1480 * v1480 > 4.0f;
const float v1482 = v1481 ? v1474 : v1479;
const float v1483 = v1481 ? v1475 : v1480;
const Int64 v1484 = v1481 ? v1476 : (Int64) 1 + v1476;
const float v1485 = v1482 * v1482 - v1483 * v1483;
const float v1486 = v1482 * v1483 + v1483 * v1482;
const float v1487 = v8 + v1485;
const float v1488 = v9 + v1486;
const Word8 v1489 = v1487 * v1487 + v1488 * v1488 > 4.0f;
const float v1490 = v1489 ? v1482 : v1487;
const float v1491 = v1489 ? v1483 : v1488;
const Int64 v1492 = v1489 ? v1484 : (Int64) 1 + v1484;
const float v1493 = v1490 * v1490 - v1491 * v1491;
const float v1494 = v1490 * v1491 + v1491 * v1490;
const float v1495 = v8 + v1493;
const float v1496 = v9 + v1494;
const Word8 v1497 = v1495 * v1495 + v1496 * v1496 > 4.0f;
const float v1498 = v1497 ? v1490 : v1495;
const float v1499 = v1497 ? v1491 : v1496;
const Int64 v1500 = v1497 ? v1492 : (Int64) 1 + v1492;
const float v1501 = v1498 * v1498 - v1499 * v1499;
const float v1502 = v1498 * v1499 + v1499 * v1498;
const float v1503 = v8 + v1501;
const float v1504 = v9 + v1502;
const Word8 v1505 = v1503 * v1503 + v1504 * v1504 > 4.0f;
const float v1506 = v1505 ? v1498 : v1503;
const float v1507 = v1505 ? v1499 : v1504;
const Int64 v1508 = v1505 ? v1500 : (Int64) 1 + v1500;
const float v1509 = v1506 * v1506 - v1507 * v1507;
const float v1510 = v1506 * v1507 + v1507 * v1506;
const float v1511 = v8 + v1509;
const float v1512 = v9 + v1510;
const Word8 v1513 = v1511 * v1511 + v1512 * v1512 > 4.0f;
const float v1514 = v1513 ? v1506 : v1511;
const float v1515 = v1513 ? v1507 : v1512;
const Int64 v1516 = v1513 ? v1508 : (Int64) 1 + v1508;
const float v1517 = v1514 * v1514 - v1515 * v1515;
const float v1518 = v1514 * v1515 + v1515 * v1514;
const float v1519 = v8 + v1517;
const float v1520 = v9 + v1518;
const Word8 v1521 = v1519 * v1519 + v1520 * v1520 > 4.0f;
const float v1522 = v1521 ? v1514 : v1519;
const float v1523 = v1521 ? v1515 : v1520;
const Int64 v1524 = v1521 ? v1516 : (Int64) 1 + v1516;
const float v1525 = v1522 * v1522 - v1523 * v1523;
const float v1526 = v1522 * v1523 + v1523 * v1522;
const float v1527 = v8 + v1525;
const float v1528 = v9 + v1526;
const Word8 v1529 = v1527 * v1527 + v1528 * v1528 > 4.0f;
const float v1530 = v1529 ? v1522 : v1527;
const float v1531 = v1529 ? v1523 : v1528;
const Int64 v1532 = v1529 ? v1524 : (Int64) 1 + v1524;
const float v1533 = v1530 * v1530 - v1531 * v1531;
const float v1534 = v1530 * v1531 + v1531 * v1530;
const float v1535 = v8 + v1533;
const float v1536 = v9 + v1534;
const Word8 v1537 = v1535 * v1535 + v1536 * v1536 > 4.0f;
const float v1538 = v1537 ? v1530 : v1535;
const float v1539 = v1537 ? v1531 : v1536;
const Int64 v1540 = v1537 ? v1532 : (Int64) 1 + v1532;
const float v1541 = v1538 * v1538 - v1539 * v1539;
const float v1542 = v1538 * v1539 + v1539 * v1538;
const float v1543 = v8 + v1541;
const float v1544 = v9 + v1542;
const Word8 v1545 = v1543 * v1543 + v1544 * v1544 > 4.0f;
const float v1546 = v1545 ? v1538 : v1543;
const float v1547 = v1545 ? v1539 : v1544;
const Int64 v1548 = v1545 ? v1540 : (Int64) 1 + v1540;
const float v1549 = v1546 * v1546 - v1547 * v1547;
const float v1550 = v1546 * v1547 + v1547 * v1546;
const float v1551 = v8 + v1549;
const float v1552 = v9 + v1550;
const Word8 v1553 = v1551 * v1551 + v1552 * v1552 > 4.0f;
const float v1554 = v1553 ? v1546 : v1551;
const float v1555 = v1553 ? v1547 : v1552;
const Int64 v1556 = v1553 ? v1548 : (Int64) 1 + v1548;
const float v1557 = v1554 * v1554 - v1555 * v1555;
const float v1558 = v1554 * v1555 + v1555 * v1554;
const float v1559 = v8 + v1557;
const float v1560 = v9 + v1558;
const Word8 v1561 = v1559 * v1559 + v1560 * v1560 > 4.0f;
const float v1562 = v1561 ? v1554 : v1559;
const float v1563 = v1561 ? v1555 : v1560;
const Int64 v1564 = v1561 ? v1556 : (Int64) 1 + v1556;
const float v1565 = v1562 * v1562 - v1563 * v1563;
const float v1566 = v1562 * v1563 + v1563 * v1562;
const float v1567 = v8 + v1565;
const float v1568 = v9 + v1566;
const Word8 v1569 = v1567 * v1567 + v1568 * v1568 > 4.0f;
const float v1570 = v1569 ? v1562 : v1567;
const float v1571 = v1569 ? v1563 : v1568;
const Int64 v1572 = v1569 ? v1564 : (Int64) 1 + v1564;
const float v1573 = v1570 * v1570 - v1571 * v1571;
const float v1574 = v1570 * v1571 + v1571 * v1570;
const float v1575 = v8 + v1573;
const float v1576 = v9 + v1574;
const Word8 v1577 = v1575 * v1575 + v1576 * v1576 > 4.0f;
const float v1578 = v1577 ? v1570 : v1575;
const float v1579 = v1577 ? v1571 : v1576;
const Int64 v1580 = v1577 ? v1572 : (Int64) 1 + v1572;
const float v1581 = v1578 * v1578 - v1579 * v1579;
const float v1582 = v1578 * v1579 + v1579 * v1578;
const float v1583 = v8 + v1581;
const float v1584 = v9 + v1582;
const Word8 v1585 = v1583 * v1583 + v1584 * v1584 > 4.0f;
const float v1586 = v1585 ? v1578 : v1583;
const float v1587 = v1585 ? v1579 : v1584;
const Int64 v1588 = v1585 ? v1580 : (Int64) 1 + v1580;
const float v1589 = v1586 * v1586 - v1587 * v1587;
const float v1590 = v1586 * v1587 + v1587 * v1586;
const float v1591 = v8 + v1589;
const float v1592 = v9 + v1590;
const Word8 v1593 = v1591 * v1591 + v1592 * v1592 > 4.0f;
const float v1594 = v1593 ? v1586 : v1591;
const float v1595 = v1593 ? v1587 : v1592;
const Int64 v1596 = v1593 ? v1588 : (Int64) 1 + v1588;
const float v1597 = v1594 * v1594 - v1595 * v1595;
const float v1598 = v1594 * v1595 + v1595 * v1594;
const float v1599 = v8 + v1597;
const float v1600 = v9 + v1598;
const Word8 v1601 = v1599 * v1599 + v1600 * v1600 > 4.0f;
const float v1602 = v1601 ? v1594 : v1599;
const float v1603 = v1601 ? v1595 : v1600;
const Int64 v1604 = v1601 ? v1596 : (Int64) 1 + v1596;
const float v1605 = v1602 * v1602 - v1603 * v1603;
const float v1606 = v1602 * v1603 + v1603 * v1602;
const float v1607 = v8 + v1605;
const float v1608 = v9 + v1606;
const Word8 v1609 = v1607 * v1607 + v1608 * v1608 > 4.0f;
const float v1610 = v1609 ? v1602 : v1607;
const float v1611 = v1609 ? v1603 : v1608;
const Int64 v1612 = v1609 ? v1604 : (Int64) 1 + v1604;
const float v1613 = v1610 * v1610 - v1611 * v1611;
const float v1614 = v1610 * v1611 + v1611 * v1610;
const float v1615 = v8 + v1613;
const float v1616 = v9 + v1614;
const Word8 v1617 = v1615 * v1615 + v1616 * v1616 > 4.0f;
const float v1618 = v1617 ? v1610 : v1615;
const float v1619 = v1617 ? v1611 : v1616;
const Int64 v1620 = v1617 ? v1612 : (Int64) 1 + v1612;
const float v1621 = v1618 * v1618 - v1619 * v1619;
const float v1622 = v1618 * v1619 + v1619 * v1618;
const float v1623 = v8 + v1621;
const float v1624 = v9 + v1622;
const Word8 v1625 = v1623 * v1623 + v1624 * v1624 > 4.0f;
const float v1626 = v1625 ? v1618 : v1623;
const float v1627 = v1625 ? v1619 : v1624;
const Int64 v1628 = v1625 ? v1620 : (Int64) 1 + v1620;
const float v1629 = v1626 * v1626 - v1627 * v1627;
const float v1630 = v1626 * v1627 + v1627 * v1626;
const float v1631 = v8 + v1629;
const float v1632 = v9 + v1630;
const Word8 v1633 = v1631 * v1631 + v1632 * v1632 > 4.0f;
const float v1634 = v1633 ? v1626 : v1631;
const float v1635 = v1633 ? v1627 : v1632;
const Int64 v1636 = v1633 ? v1628 : (Int64) 1 + v1628;
const float v1637 = v1634 * v1634 - v1635 * v1635;
const float v1638 = v1634 * v1635 + v1635 * v1634;
const float v1639 = v8 + v1637;
const float v1640 = v9 + v1638;
const Word8 v1641 = v1639 * v1639 + v1640 * v1640 > 4.0f;
const float v1642 = v1641 ? v1634 : v1639;
const float v1643 = v1641 ? v1635 : v1640;
const Int64 v1644 = v1641 ? v1636 : (Int64) 1 + v1636;
const float v1645 = v1642 * v1642 - v1643 * v1643;
const float v1646 = v1642 * v1643 + v1643 * v1642;
const float v1647 = v8 + v1645;
const float v1648 = v9 + v1646;
const Word8 v1649 = v1647 * v1647 + v1648 * v1648 > 4.0f;
const float v1650 = v1649 ? v1642 : v1647;
const float v1651 = v1649 ? v1643 : v1648;
const Int64 v1652 = v1649 ? v1644 : (Int64) 1 + v1644;
const float v1653 = v1650 * v1650 - v1651 * v1651;
const float v1654 = v1650 * v1651 + v1651 * v1650;
const float v1655 = v8 + v1653;
const float v1656 = v9 + v1654;
const Word8 v1657 = v1655 * v1655 + v1656 * v1656 > 4.0f;
const float v1658 = v1657 ? v1650 : v1655;
const float v1659 = v1657 ? v1651 : v1656;
const Int64 v1660 = v1657 ? v1652 : (Int64) 1 + v1652;
const float v1661 = v1658 * v1658 - v1659 * v1659;
const float v1662 = v1658 * v1659 + v1659 * v1658;
const float v1663 = v8 + v1661;
const float v1664 = v9 + v1662;
const Word8 v1665 = v1663 * v1663 + v1664 * v1664 > 4.0f;
const float v1666 = v1665 ? v1658 : v1663;
const float v1667 = v1665 ? v1659 : v1664;
const Int64 v1668 = v1665 ? v1660 : (Int64) 1 + v1660;
const float v1669 = v1666 * v1666 - v1667 * v1667;
const float v1670 = v1666 * v1667 + v1667 * v1666;
const float v1671 = v8 + v1669;
const float v1672 = v9 + v1670;
const Word8 v1673 = v1671 * v1671 + v1672 * v1672 > 4.0f;
const float v1674 = v1673 ? v1666 : v1671;
const float v1675 = v1673 ? v1667 : v1672;
const Int64 v1676 = v1673 ? v1668 : (Int64) 1 + v1668;
const float v1677 = v1674 * v1674 - v1675 * v1675;
const float v1678 = v1674 * v1675 + v1675 * v1674;
const float v1679 = v8 + v1677;
const float v1680 = v9 + v1678;
const Word8 v1681 = v1679 * v1679 + v1680 * v1680 > 4.0f;
const float v1682 = v1681 ? v1674 : v1679;
const float v1683 = v1681 ? v1675 : v1680;
const Int64 v1684 = v1681 ? v1676 : (Int64) 1 + v1676;
const float v1685 = v1682 * v1682 - v1683 * v1683;
const float v1686 = v1682 * v1683 + v1683 * v1682;
const float v1687 = v8 + v1685;
const float v1688 = v9 + v1686;
const Word8 v1689 = v1687 * v1687 + v1688 * v1688 > 4.0f;
const float v1690 = v1689 ? v1682 : v1687;
const float v1691 = v1689 ? v1683 : v1688;
const Int64 v1692 = v1689 ? v1684 : (Int64) 1 + v1684;
const float v1693 = v1690 * v1690 - v1691 * v1691;
const float v1694 = v1690 * v1691 + v1691 * v1690;
const float v1695 = v8 + v1693;
const float v1696 = v9 + v1694;
const Word8 v1697 = v1695 * v1695 + v1696 * v1696 > 4.0f;
const float v1698 = v1697 ? v1690 : v1695;
const float v1699 = v1697 ? v1691 : v1696;
const Int64 v1700 = v1697 ? v1692 : (Int64) 1 + v1692;
const float v1701 = v1698 * v1698 - v1699 * v1699;
const float v1702 = v1698 * v1699 + v1699 * v1698;
const float v1703 = v8 + v1701;
const float v1704 = v9 + v1702;
const Word8 v1705 = v1703 * v1703 + v1704 * v1704 > 4.0f;
const float v1706 = v1705 ? v1698 : v1703;
const float v1707 = v1705 ? v1699 : v1704;
const Int64 v1708 = v1705 ? v1700 : (Int64) 1 + v1700;
const float v1709 = v1706 * v1706 - v1707 * v1707;
const float v1710 = v1706 * v1707 + v1707 * v1706;
const float v1711 = v8 + v1709;
const float v1712 = v9 + v1710;
const Word8 v1713 = v1711 * v1711 + v1712 * v1712 > 4.0f;
const float v1714 = v1713 ? v1706 : v1711;
const float v1715 = v1713 ? v1707 : v1712;
const Int64 v1716 = v1713 ? v1708 : (Int64) 1 + v1708;
const float v1717 = v1714 * v1714 - v1715 * v1715;
const float v1718 = v1714 * v1715 + v1715 * v1714;
const float v1719 = v8 + v1717;
const float v1720 = v9 + v1718;
const Word8 v1721 = v1719 * v1719 + v1720 * v1720 > 4.0f;
const float v1722 = v1721 ? v1714 : v1719;
const float v1723 = v1721 ? v1715 : v1720;
const Int64 v1724 = v1721 ? v1716 : (Int64) 1 + v1716;
const float v1725 = v1722 * v1722 - v1723 * v1723;
const float v1726 = v1722 * v1723 + v1723 * v1722;
const float v1727 = v8 + v1725;
const float v1728 = v9 + v1726;
const Word8 v1729 = v1727 * v1727 + v1728 * v1728 > 4.0f;
const float v1730 = v1729 ? v1722 : v1727;
const float v1731 = v1729 ? v1723 : v1728;
const Int64 v1732 = v1729 ? v1724 : (Int64) 1 + v1724;
const float v1733 = v1730 * v1730 - v1731 * v1731;
const float v1734 = v1730 * v1731 + v1731 * v1730;
const float v1735 = v8 + v1733;
const float v1736 = v9 + v1734;
const Word8 v1737 = v1735 * v1735 + v1736 * v1736 > 4.0f;
const float v1738 = v1737 ? v1730 : v1735;
const float v1739 = v1737 ? v1731 : v1736;
const Int64 v1740 = v1737 ? v1732 : (Int64) 1 + v1732;
const float v1741 = v1738 * v1738 - v1739 * v1739;
const float v1742 = v1738 * v1739 + v1739 * v1738;
const float v1743 = v8 + v1741;
const float v1744 = v9 + v1742;
const Word8 v1745 = v1743 * v1743 + v1744 * v1744 > 4.0f;
const float v1746 = v1745 ? v1738 : v1743;
const float v1747 = v1745 ? v1739 : v1744;
const Int64 v1748 = v1745 ? v1740 : (Int64) 1 + v1740;
const float v1749 = v1746 * v1746 - v1747 * v1747;
const float v1750 = v1746 * v1747 + v1747 * v1746;
const float v1751 = v8 + v1749;
const float v1752 = v9 + v1750;
const Word8 v1753 = v1751 * v1751 + v1752 * v1752 > 4.0f;
const float v1754 = v1753 ? v1746 : v1751;
const float v1755 = v1753 ? v1747 : v1752;
const Int64 v1756 = v1753 ? v1748 : (Int64) 1 + v1748;
const float v1757 = v1754 * v1754 - v1755 * v1755;
const float v1758 = v1754 * v1755 + v1755 * v1754;
const float v1759 = v8 + v1757;
const float v1760 = v9 + v1758;
const Word8 v1761 = v1759 * v1759 + v1760 * v1760 > 4.0f;
const float v1762 = v1761 ? v1754 : v1759;
const float v1763 = v1761 ? v1755 : v1760;
const Int64 v1764 = v1761 ? v1756 : (Int64) 1 + v1756;
const float v1765 = v1762 * v1762 - v1763 * v1763;
const float v1766 = v1762 * v1763 + v1763 * v1762;
const float v1767 = v8 + v1765;
const float v1768 = v9 + v1766;
const Word8 v1769 = v1767 * v1767 + v1768 * v1768 > 4.0f;
const float v1770 = v1769 ? v1762 : v1767;
const float v1771 = v1769 ? v1763 : v1768;
const Int64 v1772 = v1769 ? v1764 : (Int64) 1 + v1764;
const float v1773 = v1770 * v1770 - v1771 * v1771;
const float v1774 = v1770 * v1771 + v1771 * v1770;
const float v1775 = v8 + v1773;
const float v1776 = v9 + v1774;
const Word8 v1777 = v1775 * v1775 + v1776 * v1776 > 4.0f;
const float v1778 = v1777 ? v1770 : v1775;
const float v1779 = v1777 ? v1771 : v1776;
const Int64 v1780 = v1777 ? v1772 : (Int64) 1 + v1772;
const float v1781 = v1778 * v1778 - v1779 * v1779;
const float v1782 = v1778 * v1779 + v1779 * v1778;
const float v1783 = v8 + v1781;
const float v1784 = v9 + v1782;
const Word8 v1785 = v1783 * v1783 + v1784 * v1784 > 4.0f;
const float v1786 = v1785 ? v1778 : v1783;
const float v1787 = v1785 ? v1779 : v1784;
const Int64 v1788 = v1785 ? v1780 : (Int64) 1 + v1780;
const float v1789 = v1786 * v1786 - v1787 * v1787;
const float v1790 = v1786 * v1787 + v1787 * v1786;
const float v1791 = v8 + v1789;
const float v1792 = v9 + v1790;
const Word8 v1793 = v1791 * v1791 + v1792 * v1792 > 4.0f;
const float v1794 = v1793 ? v1786 : v1791;
const float v1795 = v1793 ? v1787 : v1792;
const Int64 v1796 = v1793 ? v1788 : (Int64) 1 + v1788;
const float v1797 = v1794 * v1794 - v1795 * v1795;
const float v1798 = v1794 * v1795 + v1795 * v1794;
const float v1799 = v8 + v1797;
const float v1800 = v9 + v1798;
const Word8 v1801 = v1799 * v1799 + v1800 * v1800 > 4.0f;
const float v1802 = v1801 ? v1794 : v1799;
const float v1803 = v1801 ? v1795 : v1800;
const Int64 v1804 = v1801 ? v1796 : (Int64) 1 + v1796;
const float v1805 = v1802 * v1802 - v1803 * v1803;
const float v1806 = v1802 * v1803 + v1803 * v1802;
const float v1807 = v8 + v1805;
const float v1808 = v9 + v1806;
const Word8 v1809 = v1807 * v1807 + v1808 * v1808 > 4.0f;
const float v1810 = v1809 ? v1802 : v1807;
const float v1811 = v1809 ? v1803 : v1808;
const Int64 v1812 = v1809 ? v1804 : (Int64) 1 + v1804;
const float v1813 = v1810 * v1810 - v1811 * v1811;
const float v1814 = v1810 * v1811 + v1811 * v1810;
const float v1815 = v8 + v1813;
const float v1816 = v9 + v1814;
const Word8 v1817 = v1815 * v1815 + v1816 * v1816 > 4.0f;
const float v1818 = v1817 ? v1810 : v1815;
const float v1819 = v1817 ? v1811 : v1816;
const Int64 v1820 = v1817 ? v1812 : (Int64) 1 + v1812;
const float v1821 = v1818 * v1818 - v1819 * v1819;
const float v1822 = v1818 * v1819 + v1819 * v1818;
const float v1823 = v8 + v1821;
const float v1824 = v9 + v1822;
const Word8 v1825 = v1823 * v1823 + v1824 * v1824 > 4.0f;
const float v1826 = v1825 ? v1818 : v1823;
const float v1827 = v1825 ? v1819 : v1824;
const Int64 v1828 = v1825 ? v1820 : (Int64) 1 + v1820;
const float v1829 = v1826 * v1826 - v1827 * v1827;
const float v1830 = v1826 * v1827 + v1827 * v1826;
const float v1831 = v8 + v1829;
const float v1832 = v9 + v1830;
const Word8 v1833 = v1831 * v1831 + v1832 * v1832 > 4.0f;
const float v1834 = v1833 ? v1826 : v1831;
const float v1835 = v1833 ? v1827 : v1832;
const Int64 v1836 = v1833 ? v1828 : (Int64) 1 + v1828;
const float v1837 = v1834 * v1834 - v1835 * v1835;
const float v1838 = v1834 * v1835 + v1835 * v1834;
const float v1839 = v8 + v1837;
const float v1840 = v9 + v1838;
const Word8 v1841 = v1839 * v1839 + v1840 * v1840 > 4.0f;
const float v1842 = v1841 ? v1834 : v1839;
const float v1843 = v1841 ? v1835 : v1840;
const Int64 v1844 = v1841 ? v1836 : (Int64) 1 + v1836;
const float v1845 = v1842 * v1842 - v1843 * v1843;
const float v1846 = v1842 * v1843 + v1843 * v1842;
const float v1847 = v8 + v1845;
const float v1848 = v9 + v1846;
const Word8 v1849 = v1847 * v1847 + v1848 * v1848 > 4.0f;
const float v1850 = v1849 ? v1842 : v1847;
const float v1851 = v1849 ? v1843 : v1848;
const Int64 v1852 = v1849 ? v1844 : (Int64) 1 + v1844;
const float v1853 = v1850 * v1850 - v1851 * v1851;
const float v1854 = v1850 * v1851 + v1851 * v1850;
const float v1855 = v8 + v1853;
const float v1856 = v9 + v1854;
const Word8 v1857 = v1855 * v1855 + v1856 * v1856 > 4.0f;
const float v1858 = v1857 ? v1850 : v1855;
const float v1859 = v1857 ? v1851 : v1856;
const Int64 v1860 = v1857 ? v1852 : (Int64) 1 + v1852;
const float v1861 = v1858 * v1858 - v1859 * v1859;
const float v1862 = v1858 * v1859 + v1859 * v1858;
const float v1863 = v8 + v1861;
const float v1864 = v9 + v1862;
const Word8 v1865 = v1863 * v1863 + v1864 * v1864 > 4.0f;
const float v1866 = v1865 ? v1858 : v1863;
const float v1867 = v1865 ? v1859 : v1864;
const Int64 v1868 = v1865 ? v1860 : (Int64) 1 + v1860;
const float v1869 = v1866 * v1866 - v1867 * v1867;
const float v1870 = v1866 * v1867 + v1867 * v1866;
const float v1871 = v8 + v1869;
const float v1872 = v9 + v1870;
const Word8 v1873 = v1871 * v1871 + v1872 * v1872 > 4.0f;
const float v1874 = v1873 ? v1866 : v1871;
const float v1875 = v1873 ? v1867 : v1872;
const Int64 v1876 = v1873 ? v1868 : (Int64) 1 + v1868;
const float v1877 = v1874 * v1874 - v1875 * v1875;
const float v1878 = v1874 * v1875 + v1875 * v1874;
const float v1879 = v8 + v1877;
const float v1880 = v9 + v1878;
const Word8 v1881 = v1879 * v1879 + v1880 * v1880 > 4.0f;
const float v1882 = v1881 ? v1874 : v1879;
const float v1883 = v1881 ? v1875 : v1880;
const Int64 v1884 = v1881 ? v1876 : (Int64) 1 + v1876;
const float v1885 = v1882 * v1882 - v1883 * v1883;
const float v1886 = v1882 * v1883 + v1883 * v1882;
const float v1887 = v8 + v1885;
const float v1888 = v9 + v1886;
const Word8 v1889 = v1887 * v1887 + v1888 * v1888 > 4.0f;
const float v1890 = v1889 ? v1882 : v1887;
const float v1891 = v1889 ? v1883 : v1888;
const Int64 v1892 = v1889 ? v1884 : (Int64) 1 + v1884;
const float v1893 = v1890 * v1890 - v1891 * v1891;
const float v1894 = v1890 * v1891 + v1891 * v1890;
const float v1895 = v8 + v1893;
const float v1896 = v9 + v1894;
const Word8 v1897 = v1895 * v1895 + v1896 * v1896 > 4.0f;
const float v1898 = v1897 ? v1890 : v1895;
const float v1899 = v1897 ? v1891 : v1896;
const Int64 v1900 = v1897 ? v1892 : (Int64) 1 + v1892;
const float v1901 = v1898 * v1898 - v1899 * v1899;
const float v1902 = v1898 * v1899 + v1899 * v1898;
const float v1903 = v8 + v1901;
const float v1904 = v9 + v1902;
const Word8 v1905 = v1903 * v1903 + v1904 * v1904 > 4.0f;
const float v1906 = v1905 ? v1898 : v1903;
const float v1907 = v1905 ? v1899 : v1904;
const Int64 v1908 = v1905 ? v1900 : (Int64) 1 + v1900;
const float v1909 = v1906 * v1906 - v1907 * v1907;
const float v1910 = v1906 * v1907 + v1907 * v1906;
const float v1911 = v8 + v1909;
const float v1912 = v9 + v1910;
const Word8 v1913 = v1911 * v1911 + v1912 * v1912 > 4.0f;
const float v1914 = v1913 ? v1906 : v1911;
const float v1915 = v1913 ? v1907 : v1912;
const Int64 v1916 = v1913 ? v1908 : (Int64) 1 + v1908;
const float v1917 = v1914 * v1914 - v1915 * v1915;
const float v1918 = v1914 * v1915 + v1915 * v1914;
const float v1919 = v8 + v1917;
const float v1920 = v9 + v1918;
const Word8 v1921 = v1919 * v1919 + v1920 * v1920 > 4.0f;
const float v1922 = v1921 ? v1914 : v1919;
const float v1923 = v1921 ? v1915 : v1920;
const Int64 v1924 = v1921 ? v1916 : (Int64) 1 + v1916;
const float v1925 = v1922 * v1922 - v1923 * v1923;
const float v1926 = v1922 * v1923 + v1923 * v1922;
const float v1927 = v8 + v1925;
const float v1928 = v9 + v1926;
const Word8 v1929 = v1927 * v1927 + v1928 * v1928 > 4.0f;
const float v1930 = v1929 ? v1922 : v1927;
const float v1931 = v1929 ? v1923 : v1928;
const Int64 v1932 = v1929 ? v1924 : (Int64) 1 + v1924;
const float v1933 = v1930 * v1930 - v1931 * v1931;
const float v1934 = v1930 * v1931 + v1931 * v1930;
const float v1935 = v8 + v1933;
const float v1936 = v9 + v1934;
const Word8 v1937 = v1935 * v1935 + v1936 * v1936 > 4.0f;
const float v1938 = v1937 ? v1930 : v1935;
const float v1939 = v1937 ? v1931 : v1936;
const Int64 v1940 = v1937 ? v1932 : (Int64) 1 + v1932;
const float v1941 = v1938 * v1938 - v1939 * v1939;
const float v1942 = v1938 * v1939 + v1939 * v1938;
const float v1943 = v8 + v1941;
const float v1944 = v9 + v1942;
const Word8 v1945 = v1943 * v1943 + v1944 * v1944 > 4.0f;
const float v1946 = v1945 ? v1938 : v1943;
const float v1947 = v1945 ? v1939 : v1944;
const Int64 v1948 = v1945 ? v1940 : (Int64) 1 + v1940;
const float v1949 = v1946 * v1946 - v1947 * v1947;
const float v1950 = v1946 * v1947 + v1947 * v1946;
const float v1951 = v8 + v1949;
const float v1952 = v9 + v1950;
const Word8 v1953 = v1951 * v1951 + v1952 * v1952 > 4.0f;
const float v1954 = v1953 ? v1946 : v1951;
const float v1955 = v1953 ? v1947 : v1952;
const Int64 v1956 = v1953 ? v1948 : (Int64) 1 + v1948;
const float v1957 = v1954 * v1954 - v1955 * v1955;
const float v1958 = v1954 * v1955 + v1955 * v1954;
const float v1959 = v8 + v1957;
const float v1960 = v9 + v1958;
const Word8 v1961 = v1959 * v1959 + v1960 * v1960 > 4.0f;
const float v1962 = v1961 ? v1954 : v1959;
const float v1963 = v1961 ? v1955 : v1960;
const Int64 v1964 = v1961 ? v1956 : (Int64) 1 + v1956;
const float v1965 = v1962 * v1962 - v1963 * v1963;
const float v1966 = v1962 * v1963 + v1963 * v1962;
const float v1967 = v8 + v1965;
const float v1968 = v9 + v1966;
const Word8 v1969 = v1967 * v1967 + v1968 * v1968 > 4.0f;
const float v1970 = v1969 ? v1962 : v1967;
const float v1971 = v1969 ? v1963 : v1968;
const Int64 v1972 = v1969 ? v1964 : (Int64) 1 + v1964;
const float v1973 = v1970 * v1970 - v1971 * v1971;
const float v1974 = v1970 * v1971 + v1971 * v1970;
const float v1975 = v8 + v1973;
const float v1976 = v9 + v1974;
const Word8 v1977 = v1975 * v1975 + v1976 * v1976 > 4.0f;
const float v1978 = v1977 ? v1970 : v1975;
const float v1979 = v1977 ? v1971 : v1976;
const Int64 v1980 = v1977 ? v1972 : (Int64) 1 + v1972;
const float v1981 = v1978 * v1978 - v1979 * v1979;
const float v1982 = v1978 * v1979 + v1979 * v1978;
const float v1983 = v8 + v1981;
const float v1984 = v9 + v1982;
const Word8 v1985 = v1983 * v1983 + v1984 * v1984 > 4.0f;
const float v1986 = v1985 ? v1978 : v1983;
const float v1987 = v1985 ? v1979 : v1984;
const Int64 v1988 = v1985 ? v1980 : (Int64) 1 + v1980;
const float v1989 = v1986 * v1986 - v1987 * v1987;
const float v1990 = v1986 * v1987 + v1987 * v1986;
const float v1991 = v8 + v1989;
const float v1992 = v9 + v1990;
const Word8 v1993 = v1991 * v1991 + v1992 * v1992 > 4.0f;
const float v1994 = v1993 ? v1986 : v1991;
const float v1995 = v1993 ? v1987 : v1992;
const Int64 v1996 = v1993 ? v1988 : (Int64) 1 + v1988;
const float v1997 = v1994 * v1994 - v1995 * v1995;
const float v1998 = v1994 * v1995 + v1995 * v1994;
const float v1999 = v8 + v1997;
const float v2000 = v9 + v1998;
const Word8 v2001 = v1999 * v1999 + v2000 * v2000 > 4.0f;
const float v2002 = v2001 ? v1994 : v1999;
const float v2003 = v2001 ? v1995 : v2000;
const Int64 v2004 = v2001 ? v1996 : (Int64) 1 + v1996;
const float v2005 = v2002 * v2002 - v2003 * v2003;
const float v2006 = v2002 * v2003 + v2003 * v2002;
const float v2007 = v8 + v2005;
const float v2008 = v9 + v2006;
const Word8 v2009 = v2007 * v2007 + v2008 * v2008 > 4.0f;
const float v2010 = v2009 ? v2002 : v2007;
const float v2011 = v2009 ? v2003 : v2008;
const Int64 v2012 = v2009 ? v2004 : (Int64) 1 + v2004;
const float v2013 = v2010 * v2010 - v2011 * v2011;
const float v2014 = v2010 * v2011 + v2011 * v2010;
const float v2015 = v8 + v2013;
const float v2016 = v9 + v2014;
const Word8 v2017 = v2015 * v2015 + v2016 * v2016 > 4.0f;
const float v2018 = v2017 ? v2010 : v2015;
const float v2019 = v2017 ? v2011 : v2016;
const Int64 v2020 = v2017 ? v2012 : (Int64) 1 + v2012;
const float v2021 = v2018 * v2018 - v2019 * v2019;
const float v2022 = v2018 * v2019 + v2019 * v2018;
const float v2023 = v8 + v2021;
const float v2024 = v9 + v2022;
const Word8 v2025 = v2023 * v2023 + v2024 * v2024 > 4.0f;
const float v2026 = v2025 ? v2018 : v2023;
const float v2027 = v2025 ? v2019 : v2024;
const Int64 v2028 = v2025 ? v2020 : (Int64) 1 + v2020;
const float v2029 = v2026 * v2026 - v2027 * v2027;
const float v2030 = v2026 * v2027 + v2027 * v2026;
const float v2031 = v8 + v2029;
const float v2032 = v9 + v2030;
const Word8 v2033 = v2031 * v2031 + v2032 * v2032 > 4.0f;
const float v2034 = v2033 ? v2026 : v2031;
const float v2035 = v2033 ? v2027 : v2032;
const Int64 v2036 = v2033 ? v2028 : (Int64) 1 + v2028;
const float v2037 = v2034 * v2034 - v2035 * v2035;
const float v2038 = v2034 * v2035 + v2035 * v2034;
const float v2039 = v8 + v2037;
const float v2040 = v9 + v2038;
const Word8 v2041 = v2039 * v2039 + v2040 * v2040 > 4.0f;
const float v2042 = v2041 ? v2034 : v2039;
const float v2043 = v2041 ? v2035 : v2040;
const Int64 v2044 = v2041 ? v2036 : (Int64) 1 + v2036;
const float v2045 = v2042 * v2042 - v2043 * v2043;
const float v2046 = v2042 * v2043 + v2043 * v2042;
const float v2047 = v8 + v2045;
const float v2048 = v9 + v2046;
const Word8 v2049 = v2047 * v2047 + v2048 * v2048 > 4.0f;
const Int64 v2050 = (Int64) (v2049 ? v2044 : (Int64) 1 + v2044);
const Word8 v2051 = v0 == v2050;
const Int64 v2052 = v0 - v2050;
const Word8 v2053 = (Word8) 0;
const Word8 v2054 = (Word8) ((Int64) 7 * v2052);
const Word8 v2055 = (Word8) ((Int64) 5 * v2052);
const Word8 v2056 = (Word8) ((Int64) 3 * v2052);
arrOut_a0[ix] = v2051 ? (Word32) 4278190080 : (Word32) 4294967295 - ((Word32) v2053 + (Word32) 256 * (Word32) v2054 + (Word32) 65536 * (Word32) v2055 + (Word32) 16777216 * (Word32) v2056);
}
}
|
e5541a292ac7cb9612afda9c365e19b5380cc3e9.hip | // !!! This is a file automatically generated by hipify!!!
/*
* NAME: Tomas Vycas
*
* ASSIGNMENT GOALS ACHIEVED:
Block scan
Full scan for large vectors
Bank conflict avoidance optimization (BCAO)
*
* TIMINGS (BLOCK_SIZE = 128):
Block scan without BCAO = 1.10294 msecs
Block scan with BCAO = 0.47206 msecs
Full scan without BCAO = 1.39594 msecs
Full scan with BCAO = 0.76058 msecs
*
* MACHINE:
* CPU - Intel Core i7-8700 CPU @ 3.20GHz 12
* GPU - GeForce RTX 2060
*/
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <helper_cuda.h>
#include <helper_functions.h>
#include <math.h>
// A helper macro to simplify handling cuda error checking
#define CUDA_ERROR( err, msg ) { \
if (err != hipSuccess) {\
printf( "%s: %s in %s at line %d\n", msg, hipGetErrorString( err ), __FILE__, __LINE__);\
exit( EXIT_FAILURE );\
}\
}
// This block size achieved best results
#define BLOCK_SIZE 128
// For BCAO
#define NUM_BANKS 32
#define LOG_NUM_BANKS 5
#define ZERO_BANK_CONFLICTS
#ifdef ZERO_BANK_CONFLICTS
#define CONFLICT_FREE_OFFSET(n) ( ((n) >> LOG_NUM_BANKS) + ((n) >> (2 * LOG_NUM_BANKS)) )
#else
#define CONFLICT_FREE_OFFSET(n) ((n) >> LOG_NUM_BANKS)
#endif
// You need extra shared memory space if using BCAO because of
// the padding. Note this is the number of WORDS of padding:
#define EXTRA (CONFLICT_FREE_OFFSET((BLOCK_SIZE * 2 - 1))
// Compares two arrays and outputs if they match or prints the first element that failed the check otherwise
bool compareArrays(int *array1, int *array2, int numElements) {
for (int i = 0; i < numElements; ++i) {
if (array1[i] != array2[i]) {
printf("ARRAY CHECK FAIL at arr1 = %d, arr2 = %d, at index = %d\n", array1[i], array2[i], i);
return false;
}
}
return true;
}
// Sequential implementation of a full array scan
__host__
void hostFullScan(int *g_idata, int *g_odata, int n) {
g_odata[0] = 0;
for (int i = 1; i < n; i++) {
g_odata[i] = g_odata[i - 1] + g_idata[i - 1];
}
}
// Outputs prescanned array with BLOCK_SIZE * 2 blocks
// Is unstable on large arrays
__host__
void hostBlockScan(const int *x, int *y , int numElements){
int num_blocks = 1 + (numElements - 1) / BLOCK_SIZE;
for (int blk = 0; blk < num_blocks; blk++){
int blk_start = blk * BLOCK_SIZE*2;
int blk_end = blk_start + BLOCK_SIZE*2;
if (blk_end > numElements){
blk_end = numElements;
}
y[blk_start] = 0; // since this is a prescan, not a scan
for(int j = blk_start + 1; j < blk_end; j++){
y[j] = x[j-1] + y[j-1];
}
}
}
// Takes the output array and for each block i, adds value i from INCR array to every element
__global__
void uniformAdd(int *outputArray, int numElements, int *INCR){
int index = threadIdx.x + (2 * BLOCK_SIZE) * blockIdx.x;
int valueToAdd = INCR[blockIdx.x];
// Each thread sums two elements
if (index < numElements){
outputArray[index] += valueToAdd;
}
if (index + BLOCK_SIZE < numElements){
outputArray[index + BLOCK_SIZE] += valueToAdd;
}
}
// Block prescan that works on any array length on BLOCK_SIZE * 2 length blocks
__global__
void blockPrescan(int *g_idata, int *g_odata, int n, int *SUM)
{
__shared__ int temp[BLOCK_SIZE << 1]; // allocated on invocation
int thid = threadIdx.x;
int offset = 1;
int blockOffset = BLOCK_SIZE * blockIdx.x * 2;
// Copy the correct elements form the global array
if (blockOffset + (thid * 2) < n){
temp[thid * 2] = g_idata[blockOffset + (thid * 2)];
}
if (blockOffset + (thid * 2) + 1 < n){
temp[(thid * 2)+1] = g_idata[blockOffset + (thid * 2)+1];
}
// Build sum in place up the tree
for (int d = BLOCK_SIZE; d > 0; d >>= 1){
__syncthreads();
if (thid < d){
int ai = offset*((thid * 2)+1)-1;
int bi = offset*((thid * 2)+2)-1;
temp[bi] += temp[ai];
}
offset <<= 1;
}
if (thid == 0) {
if(SUM != NULL){
// If doing a FULL scan, save the last value in the SUMS array for later processing
SUM[blockIdx.x] = temp[(BLOCK_SIZE << 1) - 1];
}
temp[(BLOCK_SIZE << 1) - 1] = 0; // clear the last element
}
// Traverse down tree & build scan
for (int d = 1; d < BLOCK_SIZE << 1; d <<= 1){
offset >>= 1;
__syncthreads();
if (thid < d){
int ai = offset*((thid * 2)+1)-1;
int bi = offset*((thid * 2)+2)-1;
int t = temp[ai];
temp[ai] = temp[bi];
temp[bi] += t;
}
}
// Copy the new array back to global array
__syncthreads();
if (blockOffset + (thid * 2) < n){
g_odata[blockOffset + (thid * 2)] = temp[(thid * 2)]; // write results to device memory
}
if (blockOffset + (thid * 2) + 1 < n){
g_odata[blockOffset + ((thid * 2)+1)] = temp[(thid * 2)+1];
}
}
__host__
void fullPrescan(int *h_x, int *h_y, int numElements) {
// Error code to check return values for CUDA calls
hipError_t err = hipSuccess;
size_t size = numElements * sizeof(int);
// The number of blocks it would take to process the array at each level
int blocksPerGridL1 = 1 + (numElements - 1) / (BLOCK_SIZE * 2);
int blocksPerGridL2 = 1 + blocksPerGridL1 / (BLOCK_SIZE * 2);
int blocksPerGridL3 = 1 + blocksPerGridL2 / (BLOCK_SIZE * 2);
int *d_x = NULL;
err = hipMalloc((void **) &d_x, size);
CUDA_ERROR(err, "Failed to allocate device array x");
int *d_y = NULL;
err = hipMalloc((void**) &d_y, size);
CUDA_ERROR(err, "Failed to allocate device array y");
// Only define in here and actually allocate memory to these arrays if needed
int *d_SUMS_LEVEL1 = NULL;
int *d_INCR_LEVEL1 = NULL;
int *d_SUMS_LEVEL2 = NULL;
int *d_INCR_LEVEL2 = NULL;
err = hipMemcpy(d_x, h_x, size, hipMemcpyHostToDevice);
CUDA_ERROR(err, "Failed to copy array x from host to device");
// Create the device timer
hipEvent_t d_start, d_stop;
float d_msecs;
hipEventCreate(&d_start);
hipEventCreate(&d_stop);
//-----------------Pick the correct level and execute the kernels----------
// The correct level is going to be where the SUMS array can be prescanned with only one block
if(blocksPerGridL1 == 1){
hipEventRecord(d_start, 0);
hipLaunchKernelGGL(( blockPrescan), dim3(blocksPerGridL1), dim3(BLOCK_SIZE), 0, 0, d_x, d_y, numElements, NULL);
hipEventRecord(d_stop, 0);
hipEventSynchronize(d_stop);
hipDeviceSynchronize();
} else if (blocksPerGridL2 == 1) {
// SUMS and INCR arrays need to be allocated to store intermediate values
err = hipMalloc((void**) &d_SUMS_LEVEL1, blocksPerGridL1 * sizeof(int));
CUDA_ERROR(err, "Failed to free device array x");
err = hipMalloc((void**) &d_INCR_LEVEL1, blocksPerGridL1 * sizeof(int));
CUDA_ERROR(err, "Failed to allocate device vector d_INCR_LEVEL1");
// Start timer and the execution of kernels
hipEventRecord(d_start, 0);
hipLaunchKernelGGL(( blockPrescan), dim3(blocksPerGridL1), dim3(BLOCK_SIZE), 0, 0, d_x, d_y, numElements, d_SUMS_LEVEL1);
// Run a second prescan on the SUMS array
hipLaunchKernelGGL(( blockPrescan), dim3(blocksPerGridL2), dim3(BLOCK_SIZE), 0, 0, d_SUMS_LEVEL1, d_INCR_LEVEL1, blocksPerGridL1, NULL);
// Add the values of INCR array to the corresponding blocks of the d_y array
hipLaunchKernelGGL(( uniformAdd), dim3(blocksPerGridL1), dim3(BLOCK_SIZE), 0, 0, d_y, numElements, d_INCR_LEVEL1);
// Stop the timer
hipEventRecord(d_stop, 0);
hipEventSynchronize(d_stop);
hipDeviceSynchronize();
} else if (blocksPerGridL3 == 1) {
// SUMS and INCR arrays need to be allocated to store intermediate values
err = hipMalloc((void**) &d_SUMS_LEVEL1, blocksPerGridL1 * sizeof(int));
CUDA_ERROR(err, "Failed to allocate device vector d_SUMS_LEVEL1");
err = hipMalloc((void**) &d_SUMS_LEVEL2, (BLOCK_SIZE * 2) * sizeof(int));
CUDA_ERROR(err, "Failed to allocate device vector d_SUMS_LEVEL2");
err = hipMalloc((void**) &d_INCR_LEVEL1, blocksPerGridL1 * sizeof(int));
CUDA_ERROR(err, "Failed to allocate device vector d_INCR");
err = hipMalloc((void**) &d_INCR_LEVEL2, (BLOCK_SIZE * 2)* sizeof(int));
CUDA_ERROR(err, "Failed to allocate device vector d_INCR");
// Start timer and the execution of kernels
hipEventRecord(d_start, 0);
hipLaunchKernelGGL(( blockPrescan), dim3(blocksPerGridL1), dim3(BLOCK_SIZE), 0, 0, d_x, d_y, numElements, d_SUMS_LEVEL1);
hipLaunchKernelGGL(( blockPrescan), dim3(blocksPerGridL2), dim3(BLOCK_SIZE), 0, 0, d_SUMS_LEVEL1, d_INCR_LEVEL1, blocksPerGridL1, d_SUMS_LEVEL2);
hipLaunchKernelGGL(( blockPrescan), dim3(blocksPerGridL3), dim3(BLOCK_SIZE), 0, 0, d_SUMS_LEVEL2, d_INCR_LEVEL2, blocksPerGridL2, NULL);
hipLaunchKernelGGL(( uniformAdd), dim3(blocksPerGridL2), dim3(BLOCK_SIZE), 0, 0, d_INCR_LEVEL1, blocksPerGridL1, d_INCR_LEVEL2);
hipLaunchKernelGGL(( uniformAdd), dim3(blocksPerGridL1), dim3(BLOCK_SIZE), 0, 0, d_y, numElements, d_INCR_LEVEL1);
// Stop the timer
hipEventRecord(d_stop, 0);
hipEventSynchronize(d_stop);
hipDeviceSynchronize();
}else {
printf("The array of length = %d is to large for a level 3 FULL prescan\n", numElements);
exit(EXIT_FAILURE);
}
//---------------------------Timing and verification-----------------------
err = hipGetLastError();
CUDA_ERROR(err, "Failed to launch fullPrescan");
err = hipEventElapsedTime(&d_msecs, d_start, d_stop);
CUDA_ERROR(err, "Failed to get elapsed time");
int *h_dOutput = (int *)malloc(size);
err = hipMemcpy(h_dOutput, d_y, size, hipMemcpyDeviceToHost);
CUDA_ERROR(err, "Failed to copy array y from device to host");
// Verify that the result vector is correct
if(compareArrays(h_dOutput, h_y, numElements)){
printf("DEVICE FULL non-BCAO prescan test passed, the scan took %.5f msecs\n", d_msecs);
}else{
printf("DEVICE FULL non-BCAO prescan test failed, the scan took %.5f msecs\n", d_msecs);
}
//-------------------------------Cleanup-----------------------------------
// Free device memory
err = hipFree(d_x);
CUDA_ERROR(err, "Failed to free device array x");
err = hipFree(d_y);
CUDA_ERROR(err, "Failed to free device array y");
// Only need to free these arrays if they were allocated
if(blocksPerGridL2 == 1 || blocksPerGridL3 == 1){
err = hipFree(d_SUMS_LEVEL1);
CUDA_ERROR(err, "Failed to free device array d_SUMS_LEVEL1");
err = hipFree(d_INCR_LEVEL1);
CUDA_ERROR(err, "Failed to free device array d_INCR_LEVEL1");
}
if(blocksPerGridL3 == 1){
err = hipFree(d_SUMS_LEVEL2);
CUDA_ERROR(err, "Failed to free device array d_SUMS_LEVEL2");
err = hipFree(d_INCR_LEVEL2);
CUDA_ERROR(err, "Failed to free device array d_INCR_LEVEL2");
}
// Destroy device timer events
hipEventDestroy(d_start);
hipEventDestroy(d_stop);
// Reset the device
err = hipDeviceReset();
CUDA_ERROR(err, "Failed to reset the device");
}
// BCAO Block prescan that works on any array length on BLOCK_SIZE * 2 length blocks
__global__
void BCAO_blockPrescan(int *g_idata, int *g_odata, int n, int *SUM)
{
__shared__ int temp[BLOCK_SIZE * 2 + (BLOCK_SIZE)]; // allocated on invocation
int thid = threadIdx.x;
int offset = 1;
int blockOffset = BLOCK_SIZE * blockIdx.x * 2;
// Create the correct offsets for BCAO
int ai = thid;
int bi = thid + BLOCK_SIZE;
int bankOffsetA = CONFLICT_FREE_OFFSET(ai);
int bankOffsetB = CONFLICT_FREE_OFFSET(bi);
// Copy the correct elements form the global array
if (blockOffset + ai < n){
temp[ai + bankOffsetA] = g_idata[blockOffset + ai]; // load input into shared memory
}
if (blockOffset + bi < n){
temp[bi + bankOffsetB] = g_idata[blockOffset + bi];
}
// Build sum in place up the tree
for (int d = BLOCK_SIZE; d > 0; d >>= 1){
__syncthreads();
if (thid < d){
int ai = offset*(2*thid+1)-1;
int bi = offset*(2*thid+2)-1;
ai += CONFLICT_FREE_OFFSET(ai);
bi += CONFLICT_FREE_OFFSET(bi);
temp[bi] += temp[ai];
}
offset *= 2;
}
if (thid == 0) {
if(SUM != NULL){
// If doing a FULL scan, save the last value in the SUMS array for later processing
SUM[blockIdx.x] = temp[(BLOCK_SIZE * 2) - 1 + CONFLICT_FREE_OFFSET((BLOCK_SIZE * 2) - 1)];
}
temp[(BLOCK_SIZE * 2) - 1 + CONFLICT_FREE_OFFSET((BLOCK_SIZE * 2) - 1)] = 0; // clear the last element
}
// Traverse down tree & build scan
for (int d = 1; d < BLOCK_SIZE * 2; d *= 2){
offset >>= 1;
__syncthreads();
if (thid < d){
int ai = offset*(2*thid+1)-1;
int bi = offset*(2*thid+2)-1;
ai += CONFLICT_FREE_OFFSET(ai);
bi += CONFLICT_FREE_OFFSET(bi);
int t = temp[ai];
temp[ai] = temp[bi];
temp[bi] += t;
}
}
// Copy the new array back to global array
__syncthreads();
if (blockOffset + ai < n){
g_odata[blockOffset + ai] = temp[ai + bankOffsetA]; // write results to device memory
}
if (blockOffset + bi < n){
g_odata[blockOffset + bi] = temp[bi + bankOffsetB];
}
}
__host__
void BCAO_fullPrescan(int *h_x, int *h_y, int numElements) {
// Error code to check return values for CUDA calls
hipError_t err = hipSuccess;
size_t size = numElements * sizeof(int);
// The number of blocks it would take to process the array at each level
int blocksPerGridL1 = 1 + (numElements - 1) / (BLOCK_SIZE * 2);
int blocksPerGridL2 = 1 + blocksPerGridL1 / (BLOCK_SIZE * 2);
int blocksPerGridL3 = 1 + blocksPerGridL2 / (BLOCK_SIZE * 2);
int *d_x = NULL;
err = hipMalloc((void **) &d_x, size);
CUDA_ERROR(err, "Failed to allocate device array x");
int *d_y = NULL;
err = hipMalloc((void**) &d_y, size);
CUDA_ERROR(err, "Failed to allocate device array y");
// Only define in here and actually allocate memory to these arrays if needed
int *d_SUMS_LEVEL1 = NULL;
int *d_INCR_LEVEL1 = NULL;
int *d_SUMS_LEVEL2 = NULL;
int *d_INCR_LEVEL2 = NULL;
err = hipMemcpy(d_x, h_x, size, hipMemcpyHostToDevice);
CUDA_ERROR(err, "Failed to copy array x from host to device");
// Create the device timer
hipEvent_t d_start, d_stop;
float d_msecs;
hipEventCreate(&d_start);
hipEventCreate(&d_stop);
//-----------------Pick the correct level and execute the kernels----------
// The correct level is going to be where the SUMS array can be prescanned with only one block
if(blocksPerGridL1 == 1){
hipEventRecord(d_start, 0);
hipLaunchKernelGGL(( BCAO_blockPrescan), dim3(blocksPerGridL1), dim3(BLOCK_SIZE), 0, 0, d_x, d_y, numElements, NULL);
hipEventRecord(d_stop, 0);
hipEventSynchronize(d_stop);
hipDeviceSynchronize();
} else if (blocksPerGridL2 == 1) {
// SUMS and INCR arrays need to be allocated to store intermediate values
err = hipMalloc((void**) &d_SUMS_LEVEL1, blocksPerGridL1 * sizeof(int));
CUDA_ERROR(err, "Failed to allocate device vector d_SUMS_LEVEL1");
err = hipMalloc((void**) &d_INCR_LEVEL1, size);
CUDA_ERROR(err, "Failed to allocate device vector d_INCR");
// Start timer and the execution of kernels
hipEventRecord(d_start, 0);
hipLaunchKernelGGL(( BCAO_blockPrescan), dim3(blocksPerGridL1), dim3(BLOCK_SIZE), 0, 0, d_x, d_y, numElements, d_SUMS_LEVEL1);
// Run a second prescan on the SUMS array
hipLaunchKernelGGL(( BCAO_blockPrescan), dim3(blocksPerGridL2), dim3(BLOCK_SIZE), 0, 0, d_SUMS_LEVEL1, d_INCR_LEVEL1, blocksPerGridL1, NULL);
// Add the values of INCR array to the corresponding blocks of the d_y array
hipLaunchKernelGGL(( uniformAdd), dim3(blocksPerGridL1), dim3(BLOCK_SIZE), 0, 0, d_y, numElements, d_INCR_LEVEL1);
// Stop the timer
hipEventRecord(d_stop, 0);
hipEventSynchronize(d_stop);
hipDeviceSynchronize();
} else if (blocksPerGridL3 == 1) {
// SUMS and INCR arrays need to be allocated to store intermediate values
err = hipMalloc((void**) &d_SUMS_LEVEL1, blocksPerGridL1 * sizeof(int));
CUDA_ERROR(err, "Failed to allocate device vector d_SUMS_LEVEL1");
err = hipMalloc((void**) &d_SUMS_LEVEL2, blocksPerGridL1 * sizeof(int));
CUDA_ERROR(err, "Failed to allocate device vector d_SUMS_LEVEL2");
err = hipMalloc((void**) &d_INCR_LEVEL1, size);
CUDA_ERROR(err, "Failed to allocate device vector d_INCR");
err = hipMalloc((void**) &d_INCR_LEVEL2, size);
CUDA_ERROR(err, "Failed to allocate device vector d_INCR");
// Start timer and the execution of kernels
hipEventRecord(d_start, 0);
hipLaunchKernelGGL(( BCAO_blockPrescan), dim3(blocksPerGridL1), dim3(BLOCK_SIZE), 0, 0, d_x, d_y, numElements, d_SUMS_LEVEL1);
hipLaunchKernelGGL(( BCAO_blockPrescan), dim3(blocksPerGridL2), dim3(BLOCK_SIZE), 0, 0, d_SUMS_LEVEL1, d_INCR_LEVEL1, blocksPerGridL1, d_SUMS_LEVEL2);
hipLaunchKernelGGL(( BCAO_blockPrescan), dim3(blocksPerGridL3), dim3(BLOCK_SIZE), 0, 0, d_SUMS_LEVEL2, d_INCR_LEVEL2, blocksPerGridL2, NULL);
hipLaunchKernelGGL(( uniformAdd), dim3(blocksPerGridL2), dim3(BLOCK_SIZE), 0, 0, d_INCR_LEVEL1, blocksPerGridL1, d_INCR_LEVEL2);
hipLaunchKernelGGL(( uniformAdd), dim3(blocksPerGridL1), dim3(BLOCK_SIZE), 0, 0, d_y, numElements, d_INCR_LEVEL1);
// Stop the timer
hipEventRecord(d_stop, 0);
hipEventSynchronize(d_stop);
hipDeviceSynchronize();
}else {
printf("The array of length = %d is to large for a level 3 FULL prescan\n", numElements);
exit(EXIT_FAILURE);
}
//---------------------------Timing and verification-----------------------
err = hipGetLastError();
CUDA_ERROR(err, "Failed to launch block scan kernel");
err = hipEventElapsedTime(&d_msecs, d_start, d_stop);
CUDA_ERROR(err, "Failed to get elapsed time");
int *h_dOutput = (int *)malloc(size);
err = hipMemcpy(h_dOutput, d_y, size, hipMemcpyDeviceToHost);
CUDA_ERROR(err, "Failed to copy array y from device to host");
// Verify that the result vector is correct
if(compareArrays(h_dOutput, h_y, numElements)){
printf("DEVICE FULL BCAO prescan test passed, the scan took %.5f msecs\n", d_msecs);
}else{
printf("DEVICE FULL BCAO prescan test failed, the scan took %.5f msecs\n", d_msecs);
}
//-------------------------------Cleanup-----------------------------------
// Free device memory
err = hipFree(d_x);
CUDA_ERROR(err, "Failed to free device array x");
err = hipFree(d_y);
CUDA_ERROR(err, "Failed to free device array y");
// Only need to free these arrays if they were allocated
if(blocksPerGridL2 == 1 || blocksPerGridL3 == 1){
err = hipFree(d_SUMS_LEVEL1);
CUDA_ERROR(err, "Failed to free device array d_SUMS_LEVEL1");
err = hipFree(d_INCR_LEVEL1);
CUDA_ERROR(err, "Failed to free device array d_INCR_LEVEL1");
}
if(blocksPerGridL3 == 1){
err = hipFree(d_SUMS_LEVEL2);
CUDA_ERROR(err, "Failed to free device array d_SUMS_LEVEL2");
err = hipFree(d_INCR_LEVEL2);
CUDA_ERROR(err, "Failed to free device array d_INCR_LEVEL2");
}
// Destroy device timer events
hipEventDestroy(d_start);
hipEventDestroy(d_stop);
// Reset the device
err = hipDeviceReset();
CUDA_ERROR(err, "Failed to reset the device");
}
int main(void) {
// Error code to check return values for CUDA calls
hipError_t err = hipSuccess;
// For timing
StopWatchInterface * timer = NULL;
sdkCreateTimer(&timer);
double h_msecs;
// Number of elements in the array
int numElements = 10000000;
size_t size = numElements * sizeof(int);
printf("Prescans of arrays of size %d:\n\n", numElements);
int *h_x = (int *) malloc(size);
int *h_yBlock = (int *) malloc(size);
int *h_yFull = (int *) malloc(size);
int *h_dOutput = (int *) malloc(size);
if (h_x == NULL || h_yBlock == NULL || h_yFull == NULL || h_dOutput == NULL) {
fprintf(stderr, "Failed to allocate host vectors!\n");
exit(EXIT_FAILURE);
}
unsigned int seed = 1;
// Initialize the host array to random integers
srand(seed);
for (int i = 0; i < numElements; i++) {
h_x[i] = rand() % 10;
}
//--------------------------Sequential Scans-------------------------------
sdkStartTimer(&timer);
hostBlockScan(h_x, h_yBlock, numElements);
sdkStopTimer(&timer);
h_msecs = sdkGetTimerValue(&timer);
printf("HOST sequential BLOCK scan on took = %.5fmSecs\n", h_msecs);
sdkStartTimer(&timer);
hostFullScan(h_x, h_yFull, numElements);
sdkStopTimer(&timer);
h_msecs = sdkGetTimerValue(&timer);
printf("HOST squential FULL scan took = %.5fmSecs\n\n", h_msecs);
//--------------------------Redo the input array---------------------------
// Create a new identical host input array
// This is needed because with large arrays (and only large arrays)
// the hostBlockScan() method overrides some of the input array values.
int *h_xNew = (int *) malloc(size);
if (h_xNew == NULL) {
fprintf(stderr, "Failed to allocate host vector!\n");
exit(EXIT_FAILURE);
}
srand(seed);
for (int i = 0; i < numElements; i++) {
h_xNew[i] = rand() % 10;
}
//--------------------------Device Block Scans------------------------------
// Create the device timer
hipEvent_t d_start, d_stop;
float d_msecs;
hipEventCreate(&d_start);
hipEventCreate(&d_stop);
int *d_x = NULL;
err = hipMalloc((void **) &d_x, size);
CUDA_ERROR(err, "Failed to allocate device array x");
int *d_y = NULL;
err = hipMalloc((void**) &d_y, size);
CUDA_ERROR(err, "Failed to allocate device array y");
err = hipMemcpy(d_x, h_xNew, size, hipMemcpyHostToDevice);
CUDA_ERROR(err, "Failed to copy array xNew from host to device");
// Blocks per grid for the block scans
int blocksPerGrid = 1 + ((numElements - 1) / (BLOCK_SIZE * 2));
//----------------------Device Non BCAO Block Scan-------------------------
hipEventRecord(d_start, 0);
hipLaunchKernelGGL(( blockPrescan), dim3(blocksPerGrid), dim3(BLOCK_SIZE), 0, 0, d_x, d_y, numElements, NULL);
hipEventRecord(d_stop, 0);
hipEventSynchronize(d_stop);
// Wait for device to finish
hipDeviceSynchronize();
err = hipGetLastError();
CUDA_ERROR(err, "Failed to launch blockPrescan kernel");
err = hipEventElapsedTime(&d_msecs, d_start, d_stop);
CUDA_ERROR(err, "Failed to get elapsed time");
err = hipMemcpy(h_dOutput, d_y, size, hipMemcpyDeviceToHost);
CUDA_ERROR(err, "Failed to copy array y from device to host");
// Verify that the result vector is correct
// printf("BLOCK non-BCAO prescan took %.5f msecs\n", d_msecs);
if(compareArrays(h_dOutput, h_yBlock, numElements)){
printf("DEVICE BLOCK non-BCAO prescan test passed, the scan took %.5f msecs\n", d_msecs);
}else{
printf("DEVICE BLOCK non-BCAO prescan test failed, the scan took %.5f msecs\n", d_msecs);
}
//----------------------Device BCAO Block Scan-----------------------------
hipEventRecord(d_start, 0);
hipLaunchKernelGGL(( BCAO_blockPrescan), dim3(blocksPerGrid), dim3(BLOCK_SIZE), 0, 0, d_x, d_y, numElements, NULL);
hipEventRecord(d_stop, 0);
hipEventSynchronize(d_stop);
// Wait for device to finish
hipDeviceSynchronize();
err = hipGetLastError();
CUDA_ERROR(err, "Failed to launch BCAO_blockPrescan kernel");
err = hipEventElapsedTime(&d_msecs, d_start, d_stop);
CUDA_ERROR(err, "Failed to get elapsed time");
err = hipMemcpy(h_dOutput, d_y, size, hipMemcpyDeviceToHost);
CUDA_ERROR(err, "Failed to copy array y from device to host");
// Verify that the result vector is correct
// printf("BLOCK BCAO prescan took %.5f msecs\n", d_msecs);
if(compareArrays(h_dOutput, h_yBlock, numElements)){
printf("DEVICE BLOCK BCAO prescan test passed, the scan took %.5f msecs\n\n", d_msecs);
}else{
printf("DEVICE BLOCK BCAO prescan test failed, the scan took %.5f msecs\n\n", d_msecs);
}
// Free device memory as full scan methods will allocate their own memory
err = hipFree(d_x);
CUDA_ERROR(err, "Failed to free device array x");
err = hipFree(d_y);
CUDA_ERROR(err, "Failed to free device array y");
//--------------------------Device Full Scans------------------------------
fullPrescan(h_x, h_yFull, numElements);
BCAO_fullPrescan(h_x, h_yFull, numElements);
//--------------------------Cleanup----------------------------------------
// Destroy device timer events
hipEventDestroy(d_start);
hipEventDestroy(d_stop);
// Delete host timer
sdkDeleteTimer(&timer);
// Reset the device
err = hipDeviceReset();
CUDA_ERROR(err, "Failed to reset the device");
// Free host memory
free(h_x);
free(h_yBlock);
free(h_yFull);
free(h_dOutput);
printf("\nFinished");
return 0;
}
| e5541a292ac7cb9612afda9c365e19b5380cc3e9.cu | /*
* NAME: Tomas Vycas
*
* ASSIGNMENT GOALS ACHIEVED:
∗ Block scan
∗ Full scan for large vectors
∗ Bank conflict avoidance optimization (BCAO)
*
* TIMINGS (BLOCK_SIZE = 128):
∗ Block scan without BCAO = 1.10294 msecs
∗ Block scan with BCAO = 0.47206 msecs
∗ Full scan without BCAO = 1.39594 msecs
∗ Full scan with BCAO = 0.76058 msecs
*
* MACHINE:
* CPU - Intel® Core™ i7-8700 CPU @ 3.20GHz × 12
* GPU - GeForce RTX 2060
*/
#include <stdio.h>
#include <cuda_runtime.h>
#include <helper_cuda.h>
#include <helper_functions.h>
#include <math.h>
// A helper macro to simplify handling cuda error checking
#define CUDA_ERROR( err, msg ) { \
if (err != cudaSuccess) {\
printf( "%s: %s in %s at line %d\n", msg, cudaGetErrorString( err ), __FILE__, __LINE__);\
exit( EXIT_FAILURE );\
}\
}
// This block size achieved best results
#define BLOCK_SIZE 128
// For BCAO
#define NUM_BANKS 32
#define LOG_NUM_BANKS 5
#define ZERO_BANK_CONFLICTS
#ifdef ZERO_BANK_CONFLICTS
#define CONFLICT_FREE_OFFSET(n) ( ((n) >> LOG_NUM_BANKS) + ((n) >> (2 * LOG_NUM_BANKS)) )
#else
#define CONFLICT_FREE_OFFSET(n) ((n) >> LOG_NUM_BANKS)
#endif
// You need extra shared memory space if using BCAO because of
// the padding. Note this is the number of WORDS of padding:
#define EXTRA (CONFLICT_FREE_OFFSET((BLOCK_SIZE * 2 - 1))
// Compares two arrays and outputs if they match or prints the first element that failed the check otherwise
bool compareArrays(int *array1, int *array2, int numElements) {
for (int i = 0; i < numElements; ++i) {
if (array1[i] != array2[i]) {
printf("ARRAY CHECK FAIL at arr1 = %d, arr2 = %d, at index = %d\n", array1[i], array2[i], i);
return false;
}
}
return true;
}
// Sequential implementation of a full array scan
__host__
void hostFullScan(int *g_idata, int *g_odata, int n) {
g_odata[0] = 0;
for (int i = 1; i < n; i++) {
g_odata[i] = g_odata[i - 1] + g_idata[i - 1];
}
}
// Outputs prescanned array with BLOCK_SIZE * 2 blocks
// Is unstable on large arrays
__host__
void hostBlockScan(const int *x, int *y , int numElements){
int num_blocks = 1 + (numElements - 1) / BLOCK_SIZE;
for (int blk = 0; blk < num_blocks; blk++){
int blk_start = blk * BLOCK_SIZE*2;
int blk_end = blk_start + BLOCK_SIZE*2;
if (blk_end > numElements){
blk_end = numElements;
}
y[blk_start] = 0; // since this is a prescan, not a scan
for(int j = blk_start + 1; j < blk_end; j++){
y[j] = x[j-1] + y[j-1];
}
}
}
// Takes the output array and for each block i, adds value i from INCR array to every element
__global__
void uniformAdd(int *outputArray, int numElements, int *INCR){
int index = threadIdx.x + (2 * BLOCK_SIZE) * blockIdx.x;
int valueToAdd = INCR[blockIdx.x];
// Each thread sums two elements
if (index < numElements){
outputArray[index] += valueToAdd;
}
if (index + BLOCK_SIZE < numElements){
outputArray[index + BLOCK_SIZE] += valueToAdd;
}
}
// Block prescan that works on any array length on BLOCK_SIZE * 2 length blocks
__global__
void blockPrescan(int *g_idata, int *g_odata, int n, int *SUM)
{
__shared__ int temp[BLOCK_SIZE << 1]; // allocated on invocation
int thid = threadIdx.x;
int offset = 1;
int blockOffset = BLOCK_SIZE * blockIdx.x * 2;
// Copy the correct elements form the global array
if (blockOffset + (thid * 2) < n){
temp[thid * 2] = g_idata[blockOffset + (thid * 2)];
}
if (blockOffset + (thid * 2) + 1 < n){
temp[(thid * 2)+1] = g_idata[blockOffset + (thid * 2)+1];
}
// Build sum in place up the tree
for (int d = BLOCK_SIZE; d > 0; d >>= 1){
__syncthreads();
if (thid < d){
int ai = offset*((thid * 2)+1)-1;
int bi = offset*((thid * 2)+2)-1;
temp[bi] += temp[ai];
}
offset <<= 1;
}
if (thid == 0) {
if(SUM != NULL){
// If doing a FULL scan, save the last value in the SUMS array for later processing
SUM[blockIdx.x] = temp[(BLOCK_SIZE << 1) - 1];
}
temp[(BLOCK_SIZE << 1) - 1] = 0; // clear the last element
}
// Traverse down tree & build scan
for (int d = 1; d < BLOCK_SIZE << 1; d <<= 1){
offset >>= 1;
__syncthreads();
if (thid < d){
int ai = offset*((thid * 2)+1)-1;
int bi = offset*((thid * 2)+2)-1;
int t = temp[ai];
temp[ai] = temp[bi];
temp[bi] += t;
}
}
// Copy the new array back to global array
__syncthreads();
if (blockOffset + (thid * 2) < n){
g_odata[blockOffset + (thid * 2)] = temp[(thid * 2)]; // write results to device memory
}
if (blockOffset + (thid * 2) + 1 < n){
g_odata[blockOffset + ((thid * 2)+1)] = temp[(thid * 2)+1];
}
}
__host__
void fullPrescan(int *h_x, int *h_y, int numElements) {
// Error code to check return values for CUDA calls
cudaError_t err = cudaSuccess;
size_t size = numElements * sizeof(int);
// The number of blocks it would take to process the array at each level
int blocksPerGridL1 = 1 + (numElements - 1) / (BLOCK_SIZE * 2);
int blocksPerGridL2 = 1 + blocksPerGridL1 / (BLOCK_SIZE * 2);
int blocksPerGridL3 = 1 + blocksPerGridL2 / (BLOCK_SIZE * 2);
int *d_x = NULL;
err = cudaMalloc((void **) &d_x, size);
CUDA_ERROR(err, "Failed to allocate device array x");
int *d_y = NULL;
err = cudaMalloc((void**) &d_y, size);
CUDA_ERROR(err, "Failed to allocate device array y");
// Only define in here and actually allocate memory to these arrays if needed
int *d_SUMS_LEVEL1 = NULL;
int *d_INCR_LEVEL1 = NULL;
int *d_SUMS_LEVEL2 = NULL;
int *d_INCR_LEVEL2 = NULL;
err = cudaMemcpy(d_x, h_x, size, cudaMemcpyHostToDevice);
CUDA_ERROR(err, "Failed to copy array x from host to device");
// Create the device timer
cudaEvent_t d_start, d_stop;
float d_msecs;
cudaEventCreate(&d_start);
cudaEventCreate(&d_stop);
//-----------------Pick the correct level and execute the kernels----------
// The correct level is going to be where the SUMS array can be prescanned with only one block
if(blocksPerGridL1 == 1){
cudaEventRecord(d_start, 0);
blockPrescan<<<blocksPerGridL1, BLOCK_SIZE>>>(d_x, d_y, numElements, NULL);
cudaEventRecord(d_stop, 0);
cudaEventSynchronize(d_stop);
cudaDeviceSynchronize();
} else if (blocksPerGridL2 == 1) {
// SUMS and INCR arrays need to be allocated to store intermediate values
err = cudaMalloc((void**) &d_SUMS_LEVEL1, blocksPerGridL1 * sizeof(int));
CUDA_ERROR(err, "Failed to free device array x");
err = cudaMalloc((void**) &d_INCR_LEVEL1, blocksPerGridL1 * sizeof(int));
CUDA_ERROR(err, "Failed to allocate device vector d_INCR_LEVEL1");
// Start timer and the execution of kernels
cudaEventRecord(d_start, 0);
blockPrescan<<<blocksPerGridL1, BLOCK_SIZE>>>(d_x, d_y, numElements, d_SUMS_LEVEL1);
// Run a second prescan on the SUMS array
blockPrescan<<<blocksPerGridL2, BLOCK_SIZE>>>(d_SUMS_LEVEL1, d_INCR_LEVEL1, blocksPerGridL1, NULL);
// Add the values of INCR array to the corresponding blocks of the d_y array
uniformAdd<<<blocksPerGridL1, BLOCK_SIZE>>>(d_y, numElements, d_INCR_LEVEL1);
// Stop the timer
cudaEventRecord(d_stop, 0);
cudaEventSynchronize(d_stop);
cudaDeviceSynchronize();
} else if (blocksPerGridL3 == 1) {
// SUMS and INCR arrays need to be allocated to store intermediate values
err = cudaMalloc((void**) &d_SUMS_LEVEL1, blocksPerGridL1 * sizeof(int));
CUDA_ERROR(err, "Failed to allocate device vector d_SUMS_LEVEL1");
err = cudaMalloc((void**) &d_SUMS_LEVEL2, (BLOCK_SIZE * 2) * sizeof(int));
CUDA_ERROR(err, "Failed to allocate device vector d_SUMS_LEVEL2");
err = cudaMalloc((void**) &d_INCR_LEVEL1, blocksPerGridL1 * sizeof(int));
CUDA_ERROR(err, "Failed to allocate device vector d_INCR");
err = cudaMalloc((void**) &d_INCR_LEVEL2, (BLOCK_SIZE * 2)* sizeof(int));
CUDA_ERROR(err, "Failed to allocate device vector d_INCR");
// Start timer and the execution of kernels
cudaEventRecord(d_start, 0);
blockPrescan<<<blocksPerGridL1, BLOCK_SIZE>>>(d_x, d_y, numElements, d_SUMS_LEVEL1);
blockPrescan<<<blocksPerGridL2, BLOCK_SIZE>>>(d_SUMS_LEVEL1, d_INCR_LEVEL1, blocksPerGridL1, d_SUMS_LEVEL2);
blockPrescan<<<blocksPerGridL3, BLOCK_SIZE>>>(d_SUMS_LEVEL2, d_INCR_LEVEL2, blocksPerGridL2, NULL);
uniformAdd<<<blocksPerGridL2, BLOCK_SIZE>>>(d_INCR_LEVEL1, blocksPerGridL1, d_INCR_LEVEL2);
uniformAdd<<<blocksPerGridL1, BLOCK_SIZE>>>(d_y, numElements, d_INCR_LEVEL1);
// Stop the timer
cudaEventRecord(d_stop, 0);
cudaEventSynchronize(d_stop);
cudaDeviceSynchronize();
}else {
printf("The array of length = %d is to large for a level 3 FULL prescan\n", numElements);
exit(EXIT_FAILURE);
}
//---------------------------Timing and verification-----------------------
err = cudaGetLastError();
CUDA_ERROR(err, "Failed to launch fullPrescan");
err = cudaEventElapsedTime(&d_msecs, d_start, d_stop);
CUDA_ERROR(err, "Failed to get elapsed time");
int *h_dOutput = (int *)malloc(size);
err = cudaMemcpy(h_dOutput, d_y, size, cudaMemcpyDeviceToHost);
CUDA_ERROR(err, "Failed to copy array y from device to host");
// Verify that the result vector is correct
if(compareArrays(h_dOutput, h_y, numElements)){
printf("DEVICE FULL non-BCAO prescan test passed, the scan took %.5f msecs\n", d_msecs);
}else{
printf("DEVICE FULL non-BCAO prescan test failed, the scan took %.5f msecs\n", d_msecs);
}
//-------------------------------Cleanup-----------------------------------
// Free device memory
err = cudaFree(d_x);
CUDA_ERROR(err, "Failed to free device array x");
err = cudaFree(d_y);
CUDA_ERROR(err, "Failed to free device array y");
// Only need to free these arrays if they were allocated
if(blocksPerGridL2 == 1 || blocksPerGridL3 == 1){
err = cudaFree(d_SUMS_LEVEL1);
CUDA_ERROR(err, "Failed to free device array d_SUMS_LEVEL1");
err = cudaFree(d_INCR_LEVEL1);
CUDA_ERROR(err, "Failed to free device array d_INCR_LEVEL1");
}
if(blocksPerGridL3 == 1){
err = cudaFree(d_SUMS_LEVEL2);
CUDA_ERROR(err, "Failed to free device array d_SUMS_LEVEL2");
err = cudaFree(d_INCR_LEVEL2);
CUDA_ERROR(err, "Failed to free device array d_INCR_LEVEL2");
}
// Destroy device timer events
cudaEventDestroy(d_start);
cudaEventDestroy(d_stop);
// Reset the device
err = cudaDeviceReset();
CUDA_ERROR(err, "Failed to reset the device");
}
// BCAO Block prescan that works on any array length on BLOCK_SIZE * 2 length blocks
__global__
void BCAO_blockPrescan(int *g_idata, int *g_odata, int n, int *SUM)
{
__shared__ int temp[BLOCK_SIZE * 2 + (BLOCK_SIZE)]; // allocated on invocation
int thid = threadIdx.x;
int offset = 1;
int blockOffset = BLOCK_SIZE * blockIdx.x * 2;
// Create the correct offsets for BCAO
int ai = thid;
int bi = thid + BLOCK_SIZE;
int bankOffsetA = CONFLICT_FREE_OFFSET(ai);
int bankOffsetB = CONFLICT_FREE_OFFSET(bi);
// Copy the correct elements form the global array
if (blockOffset + ai < n){
temp[ai + bankOffsetA] = g_idata[blockOffset + ai]; // load input into shared memory
}
if (blockOffset + bi < n){
temp[bi + bankOffsetB] = g_idata[blockOffset + bi];
}
// Build sum in place up the tree
for (int d = BLOCK_SIZE; d > 0; d >>= 1){
__syncthreads();
if (thid < d){
int ai = offset*(2*thid+1)-1;
int bi = offset*(2*thid+2)-1;
ai += CONFLICT_FREE_OFFSET(ai);
bi += CONFLICT_FREE_OFFSET(bi);
temp[bi] += temp[ai];
}
offset *= 2;
}
if (thid == 0) {
if(SUM != NULL){
// If doing a FULL scan, save the last value in the SUMS array for later processing
SUM[blockIdx.x] = temp[(BLOCK_SIZE * 2) - 1 + CONFLICT_FREE_OFFSET((BLOCK_SIZE * 2) - 1)];
}
temp[(BLOCK_SIZE * 2) - 1 + CONFLICT_FREE_OFFSET((BLOCK_SIZE * 2) - 1)] = 0; // clear the last element
}
// Traverse down tree & build scan
for (int d = 1; d < BLOCK_SIZE * 2; d *= 2){
offset >>= 1;
__syncthreads();
if (thid < d){
int ai = offset*(2*thid+1)-1;
int bi = offset*(2*thid+2)-1;
ai += CONFLICT_FREE_OFFSET(ai);
bi += CONFLICT_FREE_OFFSET(bi);
int t = temp[ai];
temp[ai] = temp[bi];
temp[bi] += t;
}
}
// Copy the new array back to global array
__syncthreads();
if (blockOffset + ai < n){
g_odata[blockOffset + ai] = temp[ai + bankOffsetA]; // write results to device memory
}
if (blockOffset + bi < n){
g_odata[blockOffset + bi] = temp[bi + bankOffsetB];
}
}
__host__
void BCAO_fullPrescan(int *h_x, int *h_y, int numElements) {
// Error code to check return values for CUDA calls
cudaError_t err = cudaSuccess;
size_t size = numElements * sizeof(int);
// The number of blocks it would take to process the array at each level
int blocksPerGridL1 = 1 + (numElements - 1) / (BLOCK_SIZE * 2);
int blocksPerGridL2 = 1 + blocksPerGridL1 / (BLOCK_SIZE * 2);
int blocksPerGridL3 = 1 + blocksPerGridL2 / (BLOCK_SIZE * 2);
int *d_x = NULL;
err = cudaMalloc((void **) &d_x, size);
CUDA_ERROR(err, "Failed to allocate device array x");
int *d_y = NULL;
err = cudaMalloc((void**) &d_y, size);
CUDA_ERROR(err, "Failed to allocate device array y");
// Only define in here and actually allocate memory to these arrays if needed
int *d_SUMS_LEVEL1 = NULL;
int *d_INCR_LEVEL1 = NULL;
int *d_SUMS_LEVEL2 = NULL;
int *d_INCR_LEVEL2 = NULL;
err = cudaMemcpy(d_x, h_x, size, cudaMemcpyHostToDevice);
CUDA_ERROR(err, "Failed to copy array x from host to device");
// Create the device timer
cudaEvent_t d_start, d_stop;
float d_msecs;
cudaEventCreate(&d_start);
cudaEventCreate(&d_stop);
//-----------------Pick the correct level and execute the kernels----------
// The correct level is going to be where the SUMS array can be prescanned with only one block
if(blocksPerGridL1 == 1){
cudaEventRecord(d_start, 0);
BCAO_blockPrescan<<<blocksPerGridL1, BLOCK_SIZE>>>(d_x, d_y, numElements, NULL);
cudaEventRecord(d_stop, 0);
cudaEventSynchronize(d_stop);
cudaDeviceSynchronize();
} else if (blocksPerGridL2 == 1) {
// SUMS and INCR arrays need to be allocated to store intermediate values
err = cudaMalloc((void**) &d_SUMS_LEVEL1, blocksPerGridL1 * sizeof(int));
CUDA_ERROR(err, "Failed to allocate device vector d_SUMS_LEVEL1");
err = cudaMalloc((void**) &d_INCR_LEVEL1, size);
CUDA_ERROR(err, "Failed to allocate device vector d_INCR");
// Start timer and the execution of kernels
cudaEventRecord(d_start, 0);
BCAO_blockPrescan<<<blocksPerGridL1, BLOCK_SIZE>>>(d_x, d_y, numElements, d_SUMS_LEVEL1);
// Run a second prescan on the SUMS array
BCAO_blockPrescan<<<blocksPerGridL2, BLOCK_SIZE>>>(d_SUMS_LEVEL1, d_INCR_LEVEL1, blocksPerGridL1, NULL);
// Add the values of INCR array to the corresponding blocks of the d_y array
uniformAdd<<<blocksPerGridL1, BLOCK_SIZE>>>(d_y, numElements, d_INCR_LEVEL1);
// Stop the timer
cudaEventRecord(d_stop, 0);
cudaEventSynchronize(d_stop);
cudaDeviceSynchronize();
} else if (blocksPerGridL3 == 1) {
// SUMS and INCR arrays need to be allocated to store intermediate values
err = cudaMalloc((void**) &d_SUMS_LEVEL1, blocksPerGridL1 * sizeof(int));
CUDA_ERROR(err, "Failed to allocate device vector d_SUMS_LEVEL1");
err = cudaMalloc((void**) &d_SUMS_LEVEL2, blocksPerGridL1 * sizeof(int));
CUDA_ERROR(err, "Failed to allocate device vector d_SUMS_LEVEL2");
err = cudaMalloc((void**) &d_INCR_LEVEL1, size);
CUDA_ERROR(err, "Failed to allocate device vector d_INCR");
err = cudaMalloc((void**) &d_INCR_LEVEL2, size);
CUDA_ERROR(err, "Failed to allocate device vector d_INCR");
// Start timer and the execution of kernels
cudaEventRecord(d_start, 0);
BCAO_blockPrescan<<<blocksPerGridL1, BLOCK_SIZE>>>(d_x, d_y, numElements, d_SUMS_LEVEL1);
BCAO_blockPrescan<<<blocksPerGridL2, BLOCK_SIZE>>>(d_SUMS_LEVEL1, d_INCR_LEVEL1, blocksPerGridL1, d_SUMS_LEVEL2);
BCAO_blockPrescan<<<blocksPerGridL3, BLOCK_SIZE>>>(d_SUMS_LEVEL2, d_INCR_LEVEL2, blocksPerGridL2, NULL);
uniformAdd<<<blocksPerGridL2, BLOCK_SIZE>>>(d_INCR_LEVEL1, blocksPerGridL1, d_INCR_LEVEL2);
uniformAdd<<<blocksPerGridL1, BLOCK_SIZE>>>(d_y, numElements, d_INCR_LEVEL1);
// Stop the timer
cudaEventRecord(d_stop, 0);
cudaEventSynchronize(d_stop);
cudaDeviceSynchronize();
}else {
printf("The array of length = %d is to large for a level 3 FULL prescan\n", numElements);
exit(EXIT_FAILURE);
}
//---------------------------Timing and verification-----------------------
err = cudaGetLastError();
CUDA_ERROR(err, "Failed to launch block scan kernel");
err = cudaEventElapsedTime(&d_msecs, d_start, d_stop);
CUDA_ERROR(err, "Failed to get elapsed time");
int *h_dOutput = (int *)malloc(size);
err = cudaMemcpy(h_dOutput, d_y, size, cudaMemcpyDeviceToHost);
CUDA_ERROR(err, "Failed to copy array y from device to host");
// Verify that the result vector is correct
if(compareArrays(h_dOutput, h_y, numElements)){
printf("DEVICE FULL BCAO prescan test passed, the scan took %.5f msecs\n", d_msecs);
}else{
printf("DEVICE FULL BCAO prescan test failed, the scan took %.5f msecs\n", d_msecs);
}
//-------------------------------Cleanup-----------------------------------
// Free device memory
err = cudaFree(d_x);
CUDA_ERROR(err, "Failed to free device array x");
err = cudaFree(d_y);
CUDA_ERROR(err, "Failed to free device array y");
// Only need to free these arrays if they were allocated
if(blocksPerGridL2 == 1 || blocksPerGridL3 == 1){
err = cudaFree(d_SUMS_LEVEL1);
CUDA_ERROR(err, "Failed to free device array d_SUMS_LEVEL1");
err = cudaFree(d_INCR_LEVEL1);
CUDA_ERROR(err, "Failed to free device array d_INCR_LEVEL1");
}
if(blocksPerGridL3 == 1){
err = cudaFree(d_SUMS_LEVEL2);
CUDA_ERROR(err, "Failed to free device array d_SUMS_LEVEL2");
err = cudaFree(d_INCR_LEVEL2);
CUDA_ERROR(err, "Failed to free device array d_INCR_LEVEL2");
}
// Destroy device timer events
cudaEventDestroy(d_start);
cudaEventDestroy(d_stop);
// Reset the device
err = cudaDeviceReset();
CUDA_ERROR(err, "Failed to reset the device");
}
int main(void) {
// Error code to check return values for CUDA calls
cudaError_t err = cudaSuccess;
// For timing
StopWatchInterface * timer = NULL;
sdkCreateTimer(&timer);
double h_msecs;
// Number of elements in the array
int numElements = 10000000;
size_t size = numElements * sizeof(int);
printf("Prescans of arrays of size %d:\n\n", numElements);
int *h_x = (int *) malloc(size);
int *h_yBlock = (int *) malloc(size);
int *h_yFull = (int *) malloc(size);
int *h_dOutput = (int *) malloc(size);
if (h_x == NULL || h_yBlock == NULL || h_yFull == NULL || h_dOutput == NULL) {
fprintf(stderr, "Failed to allocate host vectors!\n");
exit(EXIT_FAILURE);
}
unsigned int seed = 1;
// Initialize the host array to random integers
srand(seed);
for (int i = 0; i < numElements; i++) {
h_x[i] = rand() % 10;
}
//--------------------------Sequential Scans-------------------------------
sdkStartTimer(&timer);
hostBlockScan(h_x, h_yBlock, numElements);
sdkStopTimer(&timer);
h_msecs = sdkGetTimerValue(&timer);
printf("HOST sequential BLOCK scan on took = %.5fmSecs\n", h_msecs);
sdkStartTimer(&timer);
hostFullScan(h_x, h_yFull, numElements);
sdkStopTimer(&timer);
h_msecs = sdkGetTimerValue(&timer);
printf("HOST squential FULL scan took = %.5fmSecs\n\n", h_msecs);
//--------------------------Redo the input array---------------------------
// Create a new identical host input array
// This is needed because with large arrays (and only large arrays)
// the hostBlockScan() method overrides some of the input array values.
int *h_xNew = (int *) malloc(size);
if (h_xNew == NULL) {
fprintf(stderr, "Failed to allocate host vector!\n");
exit(EXIT_FAILURE);
}
srand(seed);
for (int i = 0; i < numElements; i++) {
h_xNew[i] = rand() % 10;
}
//--------------------------Device Block Scans------------------------------
// Create the device timer
cudaEvent_t d_start, d_stop;
float d_msecs;
cudaEventCreate(&d_start);
cudaEventCreate(&d_stop);
int *d_x = NULL;
err = cudaMalloc((void **) &d_x, size);
CUDA_ERROR(err, "Failed to allocate device array x");
int *d_y = NULL;
err = cudaMalloc((void**) &d_y, size);
CUDA_ERROR(err, "Failed to allocate device array y");
err = cudaMemcpy(d_x, h_xNew, size, cudaMemcpyHostToDevice);
CUDA_ERROR(err, "Failed to copy array xNew from host to device");
// Blocks per grid for the block scans
int blocksPerGrid = 1 + ((numElements - 1) / (BLOCK_SIZE * 2));
//----------------------Device Non BCAO Block Scan-------------------------
cudaEventRecord(d_start, 0);
blockPrescan<<<blocksPerGrid, BLOCK_SIZE>>>(d_x, d_y, numElements, NULL);
cudaEventRecord(d_stop, 0);
cudaEventSynchronize(d_stop);
// Wait for device to finish
cudaDeviceSynchronize();
err = cudaGetLastError();
CUDA_ERROR(err, "Failed to launch blockPrescan kernel");
err = cudaEventElapsedTime(&d_msecs, d_start, d_stop);
CUDA_ERROR(err, "Failed to get elapsed time");
err = cudaMemcpy(h_dOutput, d_y, size, cudaMemcpyDeviceToHost);
CUDA_ERROR(err, "Failed to copy array y from device to host");
// Verify that the result vector is correct
// printf("BLOCK non-BCAO prescan took %.5f msecs\n", d_msecs);
if(compareArrays(h_dOutput, h_yBlock, numElements)){
printf("DEVICE BLOCK non-BCAO prescan test passed, the scan took %.5f msecs\n", d_msecs);
}else{
printf("DEVICE BLOCK non-BCAO prescan test failed, the scan took %.5f msecs\n", d_msecs);
}
//----------------------Device BCAO Block Scan-----------------------------
cudaEventRecord(d_start, 0);
BCAO_blockPrescan<<<blocksPerGrid, BLOCK_SIZE>>>(d_x, d_y, numElements, NULL);
cudaEventRecord(d_stop, 0);
cudaEventSynchronize(d_stop);
// Wait for device to finish
cudaDeviceSynchronize();
err = cudaGetLastError();
CUDA_ERROR(err, "Failed to launch BCAO_blockPrescan kernel");
err = cudaEventElapsedTime(&d_msecs, d_start, d_stop);
CUDA_ERROR(err, "Failed to get elapsed time");
err = cudaMemcpy(h_dOutput, d_y, size, cudaMemcpyDeviceToHost);
CUDA_ERROR(err, "Failed to copy array y from device to host");
// Verify that the result vector is correct
// printf("BLOCK BCAO prescan took %.5f msecs\n", d_msecs);
if(compareArrays(h_dOutput, h_yBlock, numElements)){
printf("DEVICE BLOCK BCAO prescan test passed, the scan took %.5f msecs\n\n", d_msecs);
}else{
printf("DEVICE BLOCK BCAO prescan test failed, the scan took %.5f msecs\n\n", d_msecs);
}
// Free device memory as full scan methods will allocate their own memory
err = cudaFree(d_x);
CUDA_ERROR(err, "Failed to free device array x");
err = cudaFree(d_y);
CUDA_ERROR(err, "Failed to free device array y");
//--------------------------Device Full Scans------------------------------
fullPrescan(h_x, h_yFull, numElements);
BCAO_fullPrescan(h_x, h_yFull, numElements);
//--------------------------Cleanup----------------------------------------
// Destroy device timer events
cudaEventDestroy(d_start);
cudaEventDestroy(d_stop);
// Delete host timer
sdkDeleteTimer(&timer);
// Reset the device
err = cudaDeviceReset();
CUDA_ERROR(err, "Failed to reset the device");
// Free host memory
free(h_x);
free(h_yBlock);
free(h_yFull);
free(h_dOutput);
printf("\nFinished");
return 0;
}
|
f650bebc436cba5fb62d0e283e807751d3c9b872.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include "debug.h"
#include "kernels_hip.cuh"
__device__ const int blockSize = 256;
__device__ const int warp = 32;
__device__ const int stackSize = 64;
__device__ const float eps2 = 0.025;
__device__ const float theta = 0.5;
__global__ void set_draw_array_kernel(float *ptr, float *x, float *y, int n)
{
int index = threadIdx.x + blockDim.x*blockIdx.x;
if(index < n){
ptr[2*index] = x[index];
ptr[2*index+1] = y[index];
}
}
__global__ void reset_arrays_kernel(int *mutex, float *x, float *y, float *mass, int *count, int *start, int *sorted, int *child, int *index, float *left, float *right, float *bottom, float *top, int n, int m)
{
int bodyIndex = threadIdx.x + blockDim.x*blockIdx.x;
int stride = blockDim.x*gridDim.x;
int offset = 0;
// reset quadtree arrays
while(bodyIndex + offset < m){
#pragma unroll 4
for(int i=0;i<4;i++){
child[(bodyIndex + offset)*4 + i] = -1;
}
if(bodyIndex + offset < n){
count[bodyIndex + offset] = 1;
}
else{
x[bodyIndex + offset] = 0;
y[bodyIndex + offset] = 0;
mass[bodyIndex + offset] = 0;
count[bodyIndex + offset] = 0;
}
start[bodyIndex + offset] = -1;
sorted[bodyIndex + offset] = 0;
offset += stride;
}
if(bodyIndex == 0){
*mutex = 0;
*index = n;
*left = 0;
*right = 0;
*bottom = 0;
*top = 0;
}
}
__global__ void compute_bounding_box_kernel(int *mutex, float *x, float *y, float *left, float *right, float *bottom, float *top, int n)
{
int index = threadIdx.x + blockDim.x*blockIdx.x;
int stride = blockDim.x*gridDim.x;
float x_min = x[index];
float x_max = x[index];
float y_min = y[index];
float y_max = y[index];
__shared__ float left_cache[blockSize];
__shared__ float right_cache[blockSize];
__shared__ float bottom_cache[blockSize];
__shared__ float top_cache[blockSize];
int offset = stride;
while(index + offset < n){
x_min = fminf(x_min, x[index + offset]);
x_max = fmaxf(x_max, x[index + offset]);
y_min = fminf(y_min, y[index + offset]);
y_max = fmaxf(y_max, y[index + offset]);
offset += stride;
}
left_cache[threadIdx.x] = x_min;
right_cache[threadIdx.x] = x_max;
bottom_cache[threadIdx.x] = y_min;
top_cache[threadIdx.x] = y_max;
__syncthreads();
// assumes blockDim.x is a power of 2!
int i = blockDim.x/2;
while(i != 0){
if(threadIdx.x < i){
left_cache[threadIdx.x] = fminf(left_cache[threadIdx.x], left_cache[threadIdx.x + i]);
right_cache[threadIdx.x] = fmaxf(right_cache[threadIdx.x], right_cache[threadIdx.x + i]);
bottom_cache[threadIdx.x] = fminf(bottom_cache[threadIdx.x], bottom_cache[threadIdx.x + i]);
top_cache[threadIdx.x] = fmaxf(top_cache[threadIdx.x], top_cache[threadIdx.x + i]);
}
__syncthreads();
i /= 2;
}
if(threadIdx.x == 0){
while (atomicCAS(mutex, 0 ,1) != 0); // lock
*left = fminf(*left, left_cache[0]);
*right = fmaxf(*right, right_cache[0]);
*bottom = fminf(*bottom, bottom_cache[0]);
*top = fmaxf(*top, top_cache[0]);
atomicExch(mutex, 0); // unlock
}
}
__global__ void build_tree_kernel(float *x, float *y, float *mass, int *count, int *start, int *child, int *index, float *left, float *right, float *bottom, float *top, int n, int m)
{
int bodyIndex = threadIdx.x + blockIdx.x*blockDim.x;
int stride = blockDim.x*gridDim.x;
int offset = 0;
bool newBody = true;
// build quadtree
float l;
float r;
float b;
float t;
int childPath;
int temp;
offset = 0;
while((bodyIndex + offset) < n){
if(newBody){
newBody = false;
l = *left;
r = *right;
b = *bottom;
t = *top;
temp = 0;
childPath = 0;
if(x[bodyIndex + offset] < 0.5*(l+r)){
childPath += 1;
r = 0.5*(l+r);
}
else{
l = 0.5*(l+r);
}
if(y[bodyIndex + offset] < 0.5*(b+t)){
childPath += 2;
t = 0.5*(t+b);
}
else{
b = 0.5*(t+b);
}
}
int childIndex = child[temp*4 + childPath];
// traverse tree until we hit leaf node
while(childIndex >= n){
temp = childIndex;
childPath = 0;
if(x[bodyIndex + offset] < 0.5*(l+r)){
childPath += 1;
r = 0.5*(l+r);
}
else{
l = 0.5*(l+r);
}
if(y[bodyIndex + offset] < 0.5*(b+t)){
childPath += 2;
t = 0.5*(t+b);
}
else{
b = 0.5*(t+b);
}
atomicAdd(&x[temp], mass[bodyIndex + offset]*x[bodyIndex + offset]);
atomicAdd(&y[temp], mass[bodyIndex + offset]*y[bodyIndex + offset]);
atomicAdd(&mass[temp], mass[bodyIndex + offset]);
atomicAdd(&count[temp], 1);
childIndex = child[4*temp + childPath];
}
if(childIndex != -2){
int locked = temp*4 + childPath;
if(atomicCAS(&child[locked], childIndex, -2) == childIndex){
if(childIndex == -1){
child[locked] = bodyIndex + offset;
}
else{
//int patch = 2*n;
int patch = 4*n;
while(childIndex >= 0 && childIndex < n){
int cell = atomicAdd(index,1);
patch = min(patch, cell);
if(patch != cell){
child[4*temp + childPath] = cell;
}
// insert old particle
childPath = 0;
if(x[childIndex] < 0.5*(l+r)){
childPath += 1;
}
if(y[childIndex] < 0.5*(b+t)){
childPath += 2;
}
if(DEBUG){
// if(cell >= 2*n){
if(cell >= m){
printf("%s\n", "error cell index is too large!!");
printf("cell: %d\n", cell);
}
}
x[cell] += mass[childIndex]*x[childIndex];
y[cell] += mass[childIndex]*y[childIndex];
mass[cell] += mass[childIndex];
count[cell] += count[childIndex];
child[4*cell + childPath] = childIndex;
start[cell] = -1;
// insert new particle
temp = cell;
childPath = 0;
if(x[bodyIndex + offset] < 0.5*(l+r)){
childPath += 1;
r = 0.5*(l+r);
}
else{
l = 0.5*(l+r);
}
if(y[bodyIndex + offset] < 0.5*(b+t)){
childPath += 2;
t = 0.5*(t+b);
}
else{
b = 0.5*(t+b);
}
x[cell] += mass[bodyIndex + offset]*x[bodyIndex + offset];
y[cell] += mass[bodyIndex + offset]*y[bodyIndex + offset];
mass[cell] += mass[bodyIndex + offset];
count[cell] += count[bodyIndex + offset];
childIndex = child[4*temp + childPath];
}
child[4*temp + childPath] = bodyIndex + offset;
__threadfence(); // we have been writing to global memory arrays (child, x, y, mass) thus need to fence
child[locked] = patch;
}
// __threadfence(); // we have been writing to global memory arrays (child, x, y, mass) thus need to fence
offset += stride;
newBody = true;
}
}
__syncthreads(); // not strictly needed
}
}
__global__ void centre_of_mass_kernel(float *x, float *y, float *mass, int *index, int n)
{
int bodyIndex = threadIdx.x + blockIdx.x*blockDim.x;
int stride = blockDim.x*gridDim.x;
int offset = 0;
bodyIndex += n;
while(bodyIndex + offset < *index){
x[bodyIndex + offset] /= mass[bodyIndex + offset];
y[bodyIndex + offset] /= mass[bodyIndex + offset];
offset += stride;
}
}
__global__ void sort_kernel(int *count, int *start, int *sorted, int *child, int *index, int n)
{
int bodyIndex = threadIdx.x + blockIdx.x*blockDim.x;
int stride = blockDim.x*gridDim.x;
int offset = 0;
int s = 0;
if(threadIdx.x == 0){
for(int i=0;i<4;i++){
int node = child[i];
if(node >= n){ // not a leaf node
start[node] = s;
s += count[node];
}
else if(node >= 0){ // leaf node
sorted[s] = node;
s++;
}
}
}
int cell = n + bodyIndex;
int ind = *index;
while((cell + offset) < ind){
s = start[cell + offset];
if(s >= 0){
for(int i=0;i<4;i++){
int node = child[4*(cell+offset) + i];
if(node >= n){ // not a leaf node
start[node] = s;
s += count[node];
}
else if(node >= 0){ // leaf node
sorted[s] = node;
s++;
}
}
offset += stride;
}
}
}
__global__ void compute_forces_kernel(float* x, float *y, float *vx, float *vy, float *ax, float *ay, float *mass, int *sorted, int *child, float *left, float *right, int n, float g)
{
int bodyIndex = threadIdx.x + blockIdx.x*blockDim.x;
int stride = blockDim.x*gridDim.x;
int offset = 0;
__shared__ float depth[stackSize*blockSize/warp];
__shared__ int stack[stackSize*blockSize/warp]; // stack controled by one thread per warp
float radius = 0.5*(*right - (*left));
// need this in case some of the first four entries of child are -1 (otherwise jj = 3)
int jj = -1;
for(int i=0;i<4;i++){
if(child[i] != -1){
jj++;
}
}
int counter = threadIdx.x % warp;
int stackStartIndex = stackSize*(threadIdx.x / warp);
while(bodyIndex + offset < n){
int sortedIndex = sorted[bodyIndex + offset];
float pos_x = x[sortedIndex];
float pos_y = y[sortedIndex];
float acc_x = 0;
float acc_y = 0;
// initialize stack
int top = jj + stackStartIndex;
if(counter == 0){
int temp = 0;
for(int i=0;i<4;i++){
if(child[i] != -1){
stack[stackStartIndex + temp] = child[i];
depth[stackStartIndex + temp] = radius*radius/theta;
temp++;
}
// if(child[i] == -1){
// printf("%s %d %d %d %d %s %d\n", "THROW ERROR!!!!", child[0], child[1], child[2], child[3], "top: ",top);
// }
// else{
// stack[stackStartIndex + temp] = child[i];
// depth[stackStartIndex + temp] = radius*radius/theta;
// temp++;
// }
}
}
__syncthreads();
// while stack is not empty
while(top >= stackStartIndex){
int node = stack[top];
float dp = 0.25*depth[top];
// float dp = depth[top];
for(int i=0;i<4;i++){
int ch = child[4*node + i];
//__threadfence();
if(ch >= 0){
float dx = x[ch] - pos_x;
float dy = y[ch] - pos_y;
float r = dx*dx + dy*dy + eps2;
if(ch < n /*is leaf node*/ || __all(dp <= r)/*meets criterion*/){
r = rsqrt(r);
float f = mass[ch] * r * r * r;
acc_x += f*dx;
acc_y += f*dy;
}
else{
if(counter == 0){
stack[top] = ch;
depth[top] = dp;
// depth[top] = 0.25*dp;
}
top++;
//__threadfence();
}
}
}
top--;
}
ax[sortedIndex] = acc_x;
ay[sortedIndex] = acc_y;
offset += stride;
__syncthreads();
}
}
// __global__ void compute_forces_kernel(float* x, float *y, float *vx, float *vy, float *ax, float *ay, float *mass, int *sorted, int *child, float *left, float *right, int n, float g)
// {
// int bodyIndex = threadIdx.x + blockIdx.x*blockDim.x;
// int stride = blockDim.x*gridDim.x;
// int offset = 0;
// __shared__ float depth[stackSize*blockSize/warp];
// __shared__ int stack[stackSize*blockSize/warp]; // stack controled by one thread per warp
// int counter = threadIdx.x % warp;
// int stackStartIndex = stackSize*(threadIdx.x / warp);
// while(bodyIndex + offset < n){
// int sortedIndex = sorted[bodyIndex + offset];
// float pos_x = x[sortedIndex];
// float pos_y = y[sortedIndex];
// float acc_x = 0;
// float acc_y = 0;
// // initialize stack
// int top = 3 + stackStartIndex;
// float radius = 0.5*(*right - (*left));
// if(counter == 0){
// #pragma unroll 4
// for(int i=0;i<4;i++){
// if(child[i] == -1){
// printf("%s\n", "THROW ERROR!!!!");
// }
// stack[stackStartIndex + i] = child[i];
// depth[stackStartIndex + i] = radius;
// }
// }
// __syncthreads();
// // while stack is not empty
// while(top >= stackStartIndex){
// int node = stack[top];
// float dp = 0.5*depth[top];
// // float dp = depth[top];
// for(int i=0;i<4;i++){
// int ch = child[4*node + i];
// //__threadfence();
// if(ch >= 0){
// float dx = x[ch] - pos_x;
// float dy = y[ch] - pos_y;
// //float r = sqrt(dx*dx + dy*dy + eps2);
// float r = rsqrt(dx*dx + dy*dy + eps2);
// if(ch < n /*is leaf node*/ || __all(dp*r <= theta)/*meets criterion*/){
// //float f = mass[ch]/(r*r*r);
// float f = mass[ch] * r * r * r;
// acc_x += f*dx;
// acc_y += f*dy;
// }
// else{
// if(counter == 0){
// stack[top] = ch;
// depth[top] = dp;
// // depth[top] = 0.5*dp;
// }
// top++;
// //__threadfence();
// }
// }
// }
// top--;
// }
// ax[sortedIndex] = acc_x;
// ay[sortedIndex] = acc_y;
// offset += stride;
// __syncthreads();
// }
// }
__global__ void update_kernel(float *x, float *y, float *vx, float *vy, float *ax, float *ay, int n, float dt, float d)
{
int bodyIndex = threadIdx.x + blockIdx.x*blockDim.x;
int stride = blockDim.x*gridDim.x;
int offset = 0;
while(bodyIndex + offset < n){
vx[bodyIndex + offset] += dt*ax[bodyIndex + offset];
vy[bodyIndex + offset] += dt*ay[bodyIndex + offset];
x[bodyIndex + offset] += d*dt*vx[bodyIndex + offset];
y[bodyIndex + offset] += d*dt*vy[bodyIndex + offset];
offset += stride;
}
}
__global__ void copy_kernel(float *x, float *y, float *out, int n)
{
int bodyIndex = threadIdx.x + blockIdx.x*blockDim.x;
int stride = blockDim.x*gridDim.x;
int offset = 0;
while(bodyIndex + offset < n){
out[2*(bodyIndex + offset)] = x[bodyIndex + offset];
out[2*(bodyIndex + offset) + 1] = y[bodyIndex + offset];
offset += stride;
}
}
| f650bebc436cba5fb62d0e283e807751d3c9b872.cu |
#include <stdio.h>
#include "debug.h"
#include "kernels.cuh"
__device__ const int blockSize = 256;
__device__ const int warp = 32;
__device__ const int stackSize = 64;
__device__ const float eps2 = 0.025;
__device__ const float theta = 0.5;
__global__ void set_draw_array_kernel(float *ptr, float *x, float *y, int n)
{
int index = threadIdx.x + blockDim.x*blockIdx.x;
if(index < n){
ptr[2*index] = x[index];
ptr[2*index+1] = y[index];
}
}
__global__ void reset_arrays_kernel(int *mutex, float *x, float *y, float *mass, int *count, int *start, int *sorted, int *child, int *index, float *left, float *right, float *bottom, float *top, int n, int m)
{
int bodyIndex = threadIdx.x + blockDim.x*blockIdx.x;
int stride = blockDim.x*gridDim.x;
int offset = 0;
// reset quadtree arrays
while(bodyIndex + offset < m){
#pragma unroll 4
for(int i=0;i<4;i++){
child[(bodyIndex + offset)*4 + i] = -1;
}
if(bodyIndex + offset < n){
count[bodyIndex + offset] = 1;
}
else{
x[bodyIndex + offset] = 0;
y[bodyIndex + offset] = 0;
mass[bodyIndex + offset] = 0;
count[bodyIndex + offset] = 0;
}
start[bodyIndex + offset] = -1;
sorted[bodyIndex + offset] = 0;
offset += stride;
}
if(bodyIndex == 0){
*mutex = 0;
*index = n;
*left = 0;
*right = 0;
*bottom = 0;
*top = 0;
}
}
__global__ void compute_bounding_box_kernel(int *mutex, float *x, float *y, float *left, float *right, float *bottom, float *top, int n)
{
int index = threadIdx.x + blockDim.x*blockIdx.x;
int stride = blockDim.x*gridDim.x;
float x_min = x[index];
float x_max = x[index];
float y_min = y[index];
float y_max = y[index];
__shared__ float left_cache[blockSize];
__shared__ float right_cache[blockSize];
__shared__ float bottom_cache[blockSize];
__shared__ float top_cache[blockSize];
int offset = stride;
while(index + offset < n){
x_min = fminf(x_min, x[index + offset]);
x_max = fmaxf(x_max, x[index + offset]);
y_min = fminf(y_min, y[index + offset]);
y_max = fmaxf(y_max, y[index + offset]);
offset += stride;
}
left_cache[threadIdx.x] = x_min;
right_cache[threadIdx.x] = x_max;
bottom_cache[threadIdx.x] = y_min;
top_cache[threadIdx.x] = y_max;
__syncthreads();
// assumes blockDim.x is a power of 2!
int i = blockDim.x/2;
while(i != 0){
if(threadIdx.x < i){
left_cache[threadIdx.x] = fminf(left_cache[threadIdx.x], left_cache[threadIdx.x + i]);
right_cache[threadIdx.x] = fmaxf(right_cache[threadIdx.x], right_cache[threadIdx.x + i]);
bottom_cache[threadIdx.x] = fminf(bottom_cache[threadIdx.x], bottom_cache[threadIdx.x + i]);
top_cache[threadIdx.x] = fmaxf(top_cache[threadIdx.x], top_cache[threadIdx.x + i]);
}
__syncthreads();
i /= 2;
}
if(threadIdx.x == 0){
while (atomicCAS(mutex, 0 ,1) != 0); // lock
*left = fminf(*left, left_cache[0]);
*right = fmaxf(*right, right_cache[0]);
*bottom = fminf(*bottom, bottom_cache[0]);
*top = fmaxf(*top, top_cache[0]);
atomicExch(mutex, 0); // unlock
}
}
__global__ void build_tree_kernel(float *x, float *y, float *mass, int *count, int *start, int *child, int *index, float *left, float *right, float *bottom, float *top, int n, int m)
{
int bodyIndex = threadIdx.x + blockIdx.x*blockDim.x;
int stride = blockDim.x*gridDim.x;
int offset = 0;
bool newBody = true;
// build quadtree
float l;
float r;
float b;
float t;
int childPath;
int temp;
offset = 0;
while((bodyIndex + offset) < n){
if(newBody){
newBody = false;
l = *left;
r = *right;
b = *bottom;
t = *top;
temp = 0;
childPath = 0;
if(x[bodyIndex + offset] < 0.5*(l+r)){
childPath += 1;
r = 0.5*(l+r);
}
else{
l = 0.5*(l+r);
}
if(y[bodyIndex + offset] < 0.5*(b+t)){
childPath += 2;
t = 0.5*(t+b);
}
else{
b = 0.5*(t+b);
}
}
int childIndex = child[temp*4 + childPath];
// traverse tree until we hit leaf node
while(childIndex >= n){
temp = childIndex;
childPath = 0;
if(x[bodyIndex + offset] < 0.5*(l+r)){
childPath += 1;
r = 0.5*(l+r);
}
else{
l = 0.5*(l+r);
}
if(y[bodyIndex + offset] < 0.5*(b+t)){
childPath += 2;
t = 0.5*(t+b);
}
else{
b = 0.5*(t+b);
}
atomicAdd(&x[temp], mass[bodyIndex + offset]*x[bodyIndex + offset]);
atomicAdd(&y[temp], mass[bodyIndex + offset]*y[bodyIndex + offset]);
atomicAdd(&mass[temp], mass[bodyIndex + offset]);
atomicAdd(&count[temp], 1);
childIndex = child[4*temp + childPath];
}
if(childIndex != -2){
int locked = temp*4 + childPath;
if(atomicCAS(&child[locked], childIndex, -2) == childIndex){
if(childIndex == -1){
child[locked] = bodyIndex + offset;
}
else{
//int patch = 2*n;
int patch = 4*n;
while(childIndex >= 0 && childIndex < n){
int cell = atomicAdd(index,1);
patch = min(patch, cell);
if(patch != cell){
child[4*temp + childPath] = cell;
}
// insert old particle
childPath = 0;
if(x[childIndex] < 0.5*(l+r)){
childPath += 1;
}
if(y[childIndex] < 0.5*(b+t)){
childPath += 2;
}
if(DEBUG){
// if(cell >= 2*n){
if(cell >= m){
printf("%s\n", "error cell index is too large!!");
printf("cell: %d\n", cell);
}
}
x[cell] += mass[childIndex]*x[childIndex];
y[cell] += mass[childIndex]*y[childIndex];
mass[cell] += mass[childIndex];
count[cell] += count[childIndex];
child[4*cell + childPath] = childIndex;
start[cell] = -1;
// insert new particle
temp = cell;
childPath = 0;
if(x[bodyIndex + offset] < 0.5*(l+r)){
childPath += 1;
r = 0.5*(l+r);
}
else{
l = 0.5*(l+r);
}
if(y[bodyIndex + offset] < 0.5*(b+t)){
childPath += 2;
t = 0.5*(t+b);
}
else{
b = 0.5*(t+b);
}
x[cell] += mass[bodyIndex + offset]*x[bodyIndex + offset];
y[cell] += mass[bodyIndex + offset]*y[bodyIndex + offset];
mass[cell] += mass[bodyIndex + offset];
count[cell] += count[bodyIndex + offset];
childIndex = child[4*temp + childPath];
}
child[4*temp + childPath] = bodyIndex + offset;
__threadfence(); // we have been writing to global memory arrays (child, x, y, mass) thus need to fence
child[locked] = patch;
}
// __threadfence(); // we have been writing to global memory arrays (child, x, y, mass) thus need to fence
offset += stride;
newBody = true;
}
}
__syncthreads(); // not strictly needed
}
}
__global__ void centre_of_mass_kernel(float *x, float *y, float *mass, int *index, int n)
{
int bodyIndex = threadIdx.x + blockIdx.x*blockDim.x;
int stride = blockDim.x*gridDim.x;
int offset = 0;
bodyIndex += n;
while(bodyIndex + offset < *index){
x[bodyIndex + offset] /= mass[bodyIndex + offset];
y[bodyIndex + offset] /= mass[bodyIndex + offset];
offset += stride;
}
}
__global__ void sort_kernel(int *count, int *start, int *sorted, int *child, int *index, int n)
{
int bodyIndex = threadIdx.x + blockIdx.x*blockDim.x;
int stride = blockDim.x*gridDim.x;
int offset = 0;
int s = 0;
if(threadIdx.x == 0){
for(int i=0;i<4;i++){
int node = child[i];
if(node >= n){ // not a leaf node
start[node] = s;
s += count[node];
}
else if(node >= 0){ // leaf node
sorted[s] = node;
s++;
}
}
}
int cell = n + bodyIndex;
int ind = *index;
while((cell + offset) < ind){
s = start[cell + offset];
if(s >= 0){
for(int i=0;i<4;i++){
int node = child[4*(cell+offset) + i];
if(node >= n){ // not a leaf node
start[node] = s;
s += count[node];
}
else if(node >= 0){ // leaf node
sorted[s] = node;
s++;
}
}
offset += stride;
}
}
}
__global__ void compute_forces_kernel(float* x, float *y, float *vx, float *vy, float *ax, float *ay, float *mass, int *sorted, int *child, float *left, float *right, int n, float g)
{
int bodyIndex = threadIdx.x + blockIdx.x*blockDim.x;
int stride = blockDim.x*gridDim.x;
int offset = 0;
__shared__ float depth[stackSize*blockSize/warp];
__shared__ int stack[stackSize*blockSize/warp]; // stack controled by one thread per warp
float radius = 0.5*(*right - (*left));
// need this in case some of the first four entries of child are -1 (otherwise jj = 3)
int jj = -1;
for(int i=0;i<4;i++){
if(child[i] != -1){
jj++;
}
}
int counter = threadIdx.x % warp;
int stackStartIndex = stackSize*(threadIdx.x / warp);
while(bodyIndex + offset < n){
int sortedIndex = sorted[bodyIndex + offset];
float pos_x = x[sortedIndex];
float pos_y = y[sortedIndex];
float acc_x = 0;
float acc_y = 0;
// initialize stack
int top = jj + stackStartIndex;
if(counter == 0){
int temp = 0;
for(int i=0;i<4;i++){
if(child[i] != -1){
stack[stackStartIndex + temp] = child[i];
depth[stackStartIndex + temp] = radius*radius/theta;
temp++;
}
// if(child[i] == -1){
// printf("%s %d %d %d %d %s %d\n", "THROW ERROR!!!!", child[0], child[1], child[2], child[3], "top: ",top);
// }
// else{
// stack[stackStartIndex + temp] = child[i];
// depth[stackStartIndex + temp] = radius*radius/theta;
// temp++;
// }
}
}
__syncthreads();
// while stack is not empty
while(top >= stackStartIndex){
int node = stack[top];
float dp = 0.25*depth[top];
// float dp = depth[top];
for(int i=0;i<4;i++){
int ch = child[4*node + i];
//__threadfence();
if(ch >= 0){
float dx = x[ch] - pos_x;
float dy = y[ch] - pos_y;
float r = dx*dx + dy*dy + eps2;
if(ch < n /*is leaf node*/ || __all(dp <= r)/*meets criterion*/){
r = rsqrt(r);
float f = mass[ch] * r * r * r;
acc_x += f*dx;
acc_y += f*dy;
}
else{
if(counter == 0){
stack[top] = ch;
depth[top] = dp;
// depth[top] = 0.25*dp;
}
top++;
//__threadfence();
}
}
}
top--;
}
ax[sortedIndex] = acc_x;
ay[sortedIndex] = acc_y;
offset += stride;
__syncthreads();
}
}
// __global__ void compute_forces_kernel(float* x, float *y, float *vx, float *vy, float *ax, float *ay, float *mass, int *sorted, int *child, float *left, float *right, int n, float g)
// {
// int bodyIndex = threadIdx.x + blockIdx.x*blockDim.x;
// int stride = blockDim.x*gridDim.x;
// int offset = 0;
// __shared__ float depth[stackSize*blockSize/warp];
// __shared__ int stack[stackSize*blockSize/warp]; // stack controled by one thread per warp
// int counter = threadIdx.x % warp;
// int stackStartIndex = stackSize*(threadIdx.x / warp);
// while(bodyIndex + offset < n){
// int sortedIndex = sorted[bodyIndex + offset];
// float pos_x = x[sortedIndex];
// float pos_y = y[sortedIndex];
// float acc_x = 0;
// float acc_y = 0;
// // initialize stack
// int top = 3 + stackStartIndex;
// float radius = 0.5*(*right - (*left));
// if(counter == 0){
// #pragma unroll 4
// for(int i=0;i<4;i++){
// if(child[i] == -1){
// printf("%s\n", "THROW ERROR!!!!");
// }
// stack[stackStartIndex + i] = child[i];
// depth[stackStartIndex + i] = radius;
// }
// }
// __syncthreads();
// // while stack is not empty
// while(top >= stackStartIndex){
// int node = stack[top];
// float dp = 0.5*depth[top];
// // float dp = depth[top];
// for(int i=0;i<4;i++){
// int ch = child[4*node + i];
// //__threadfence();
// if(ch >= 0){
// float dx = x[ch] - pos_x;
// float dy = y[ch] - pos_y;
// //float r = sqrt(dx*dx + dy*dy + eps2);
// float r = rsqrt(dx*dx + dy*dy + eps2);
// if(ch < n /*is leaf node*/ || __all(dp*r <= theta)/*meets criterion*/){
// //float f = mass[ch]/(r*r*r);
// float f = mass[ch] * r * r * r;
// acc_x += f*dx;
// acc_y += f*dy;
// }
// else{
// if(counter == 0){
// stack[top] = ch;
// depth[top] = dp;
// // depth[top] = 0.5*dp;
// }
// top++;
// //__threadfence();
// }
// }
// }
// top--;
// }
// ax[sortedIndex] = acc_x;
// ay[sortedIndex] = acc_y;
// offset += stride;
// __syncthreads();
// }
// }
__global__ void update_kernel(float *x, float *y, float *vx, float *vy, float *ax, float *ay, int n, float dt, float d)
{
int bodyIndex = threadIdx.x + blockIdx.x*blockDim.x;
int stride = blockDim.x*gridDim.x;
int offset = 0;
while(bodyIndex + offset < n){
vx[bodyIndex + offset] += dt*ax[bodyIndex + offset];
vy[bodyIndex + offset] += dt*ay[bodyIndex + offset];
x[bodyIndex + offset] += d*dt*vx[bodyIndex + offset];
y[bodyIndex + offset] += d*dt*vy[bodyIndex + offset];
offset += stride;
}
}
__global__ void copy_kernel(float *x, float *y, float *out, int n)
{
int bodyIndex = threadIdx.x + blockIdx.x*blockDim.x;
int stride = blockDim.x*gridDim.x;
int offset = 0;
while(bodyIndex + offset < n){
out[2*(bodyIndex + offset)] = x[bodyIndex + offset];
out[2*(bodyIndex + offset) + 1] = y[bodyIndex + offset];
offset += stride;
}
}
|
2cdc287d08be52f1ed30524f4e9f1f8332efd48d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
void CPUFunction() {
printf("Hello world from the CPU.\n");
}
__global__ void GPUFunction(int myid) {
printf("Hello world from GPU %d.\n", myid);
}
int main() {
// function to run on the cpu
CPUFunction();
int deviceCount;
hipGetDeviceCount(&deviceCount);
int device;
for (device=0; device < deviceCount; ++device) {
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, device);
printf("Device %d has compute capability %d.%d.\n",
device, deviceProp.major, deviceProp.minor);
}
// run on gpu 0
int device_id = 0;
hipSetDevice(device_id);
hipLaunchKernelGGL(( GPUFunction), dim3(1), dim3(1), 0, 0, device_id);
// run on gpu 1
device_id = 1;
hipSetDevice(device_id);
hipLaunchKernelGGL(( GPUFunction), dim3(1), dim3(1), 0, 0, device_id);
// kernel execution is asynchronous so sync on its completion
hipDeviceSynchronize();
}
| 2cdc287d08be52f1ed30524f4e9f1f8332efd48d.cu | #include <stdio.h>
void CPUFunction() {
printf("Hello world from the CPU.\n");
}
__global__ void GPUFunction(int myid) {
printf("Hello world from GPU %d.\n", myid);
}
int main() {
// function to run on the cpu
CPUFunction();
int deviceCount;
cudaGetDeviceCount(&deviceCount);
int device;
for (device=0; device < deviceCount; ++device) {
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, device);
printf("Device %d has compute capability %d.%d.\n",
device, deviceProp.major, deviceProp.minor);
}
// run on gpu 0
int device_id = 0;
cudaSetDevice(device_id);
GPUFunction<<<1, 1>>>(device_id);
// run on gpu 1
device_id = 1;
cudaSetDevice(device_id);
GPUFunction<<<1, 1>>>(device_id);
// kernel execution is asynchronous so sync on its completion
cudaDeviceSynchronize();
}
|
e2c4450802cc2b5ed713aba836f24ea8d2f2bbba.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "oneflow/core/device/cudnn_util.h"
#include "oneflow/core/framework/framework.h"
#include "oneflow/core/ndarray/ndarray_util.h"
#include "oneflow/core/cuda/atomic.cuh"
#include <hipcub/hipcub.hpp>
namespace oneflow {
namespace {
class LayerNormCudnnBnCtx final {
public:
LayerNormCudnnBnCtx(const ShapeView& data_shape, const ShapeView& param_shape,
DataType data_type) {
const int64_t cudnn_c = param_shape.elem_cnt();
CHECK_EQ(data_shape.elem_cnt() % cudnn_c, 0);
const int64_t cudnn_w = data_shape.elem_cnt() / cudnn_c;
CHECK_LT(cudnn_c, GetMaxVal<int32_t>());
CHECK_LT(cudnn_w, GetMaxVal<int32_t>());
data_tensor_desc_.reset(new CudnnTensorDesc(CUDNN_TENSOR_NCHW, data_type, 1,
static_cast<int32_t>(cudnn_c), 1,
static_cast<int32_t>(cudnn_w)));
DataType param_dtype = data_type == DataType::kFloat16 ? DataType::kFloat : data_type;
param_tensor_desc_.reset(new CudnnTensorDesc(CUDNN_TENSOR_NCHW, param_dtype, 1,
static_cast<int32_t>(cudnn_c), 1, 1));
#if (CUDNN_VERSION >= 7000)
mode_ = CUDNN_BATCHNORM_SPATIAL_PERSISTENT;
#else
mode_ = CUDNN_BATCHNORM_SPATIAL;
#endif
}
~LayerNormCudnnBnCtx() = default;
const cudnnTensorDescriptor_t& data_tensor_desc() const { return data_tensor_desc_->Get(); }
const cudnnTensorDescriptor_t& param_tensor_desc() const { return param_tensor_desc_->Get(); }
cudnnBatchNormMode_t mode() const { return mode_; };
private:
std::unique_ptr<CudnnTensorDesc> data_tensor_desc_;
std::unique_ptr<CudnnTensorDesc> param_tensor_desc_;
cudnnBatchNormMode_t mode_;
};
template<typename T, bool do_scale, bool do_center>
__global__ void InstanceScaleCenterGpu(const int64_t elem_cnt, const int64_t instance_size,
const T* in, const T* gamma, const T* beta, T* out) {
CUDA_1D_KERNEL_LOOP_T(int64_t, i, elem_cnt) {
const int64_t elem_id = i % instance_size;
T v = in[i];
if (do_scale) { v *= gamma[elem_id]; }
if (do_center) { v += beta[elem_id]; }
out[i] = v;
}
}
template<bool do_scale, bool do_center>
__global__ void InstanceScaleCenterH2Gpu(const int64_t h2_elem_cnt, const int64_t h2_instance_size,
const half* in, const half* gamma, const half* beta,
half* out) {
const auto* in_h2 = reinterpret_cast<const half2*>(in);
const auto* gamma_h2 = reinterpret_cast<const half2*>(gamma);
const auto* beta_h2 = reinterpret_cast<const half2*>(beta);
auto* out_h2 = reinterpret_cast<half2*>(out);
CUDA_1D_KERNEL_LOOP_T(int64_t, i, h2_elem_cnt) {
const int64_t elem_id = i % h2_instance_size;
half2 v2 = in_h2[i];
if (do_scale) { v2 = __hmul2(v2, gamma_h2[elem_id]); }
if (do_center) { v2 = __hadd2(v2, beta_h2[elem_id]); }
out_h2[i] = v2;
}
}
template<typename T>
void InstanceScaleCenter(DeviceCtx* ctx, const int64_t batch_size, const int64_t instance_size,
const T* in, const T* gamma, const T* beta, T* out) {
const int64_t elem_cnt = batch_size * instance_size;
if (beta != nullptr && gamma != nullptr) { // scale and center
hipLaunchKernelGGL(( InstanceScaleCenterGpu<T, true, true>)
, dim3(BlocksNum4ThreadsNum(elem_cnt)), dim3(kCudaThreadsNumPerBlock), 0, ctx->cuda_stream(),
elem_cnt, instance_size, in, gamma, beta, out);
} else if (gamma != nullptr) { // scale only
hipLaunchKernelGGL(( InstanceScaleCenterGpu<T, true, false>)
, dim3(BlocksNum4ThreadsNum(elem_cnt)), dim3(kCudaThreadsNumPerBlock), 0, ctx->cuda_stream(),
elem_cnt, instance_size, in, gamma, nullptr, out);
} else if (beta != nullptr) { // center only
hipLaunchKernelGGL(( InstanceScaleCenterGpu<T, false, true>)
, dim3(BlocksNum4ThreadsNum(elem_cnt)), dim3(kCudaThreadsNumPerBlock), 0, ctx->cuda_stream(),
elem_cnt, instance_size, in, nullptr, beta, out);
} else {
UNIMPLEMENTED();
}
}
void InstanceScaleCenterH2(DeviceCtx* ctx, const int64_t batch_size, const int64_t instance_size,
const half* in, const half* gamma, const half* beta, half* out) {
CHECK_EQ(instance_size % 2, 0);
const int64_t elem_cnt_h2 = batch_size * instance_size / 2;
const int64_t instance_size_h2 = instance_size / 2;
if (beta != nullptr && gamma != nullptr) { // scale and center
hipLaunchKernelGGL(( InstanceScaleCenterH2Gpu<true, true>)
, dim3(BlocksNum4ThreadsNum(elem_cnt_h2)), dim3(kCudaThreadsNumPerBlock), 0, ctx->cuda_stream(),
elem_cnt_h2, instance_size_h2, in, gamma, beta, out);
} else if (gamma != nullptr) { // scale only
hipLaunchKernelGGL(( InstanceScaleCenterH2Gpu<true, false>)
, dim3(BlocksNum4ThreadsNum(elem_cnt_h2)), dim3(kCudaThreadsNumPerBlock), 0, ctx->cuda_stream(),
elem_cnt_h2, instance_size_h2, in, gamma, nullptr, out);
} else if (beta != nullptr) { // center only
hipLaunchKernelGGL(( InstanceScaleCenterH2Gpu<false, true>)
, dim3(BlocksNum4ThreadsNum(elem_cnt_h2)), dim3(kCudaThreadsNumPerBlock), 0, ctx->cuda_stream(),
elem_cnt_h2, instance_size_h2, in, nullptr, beta, out);
} else {
UNIMPLEMENTED();
}
}
template<>
void InstanceScaleCenter<float16>(DeviceCtx* ctx, const int64_t batch_size,
const int64_t instance_size, const float16* in,
const float16* gamma, const float16* beta, float16* out) {
if (instance_size % 2 == 0) {
InstanceScaleCenterH2(ctx, batch_size, instance_size, reinterpret_cast<const half*>(in),
reinterpret_cast<const half*>(gamma), reinterpret_cast<const half*>(beta),
reinterpret_cast<half*>(out));
} else {
InstanceScaleCenter<half>(ctx, batch_size, instance_size, reinterpret_cast<const half*>(in),
reinterpret_cast<const half*>(gamma),
reinterpret_cast<const half*>(beta), reinterpret_cast<half*>(out));
}
}
constexpr int64_t kLayerNormForwardGpuBlockSize = 256;
template<typename T>
struct LayerNormUtil {
using ComputeType = T;
__device__ static ComputeType ToComputeType(T v) { return v; }
__device__ static T FromComputeType(ComputeType v) { return v; }
};
template<>
struct LayerNormUtil<half> {
using ComputeType = float;
__device__ static ComputeType ToComputeType(half v) { return __half2float(v); }
__device__ static half FromComputeType(ComputeType v) { return __float2half(v); }
};
template<typename T>
int GetForwardDynamicSharedMemorySize(const int norm_size) {
return norm_size * sizeof(typename LayerNormUtil<T>::ComputeType);
}
int GetLayerNormForwardBlockSize() { return kLayerNormForwardGpuBlockSize; }
int GetLayerNormForwardNumBlocks(const int num_instances) {
return ::min(static_cast<int>(num_instances), kCudaMaxBlocksNum);
}
template<typename T, typename ComputeType>
__global__ void LayerNormForwardImpl(const int num_instances, const int norm_size,
const double epsilon, const T* x, const T* gamma,
const T* beta, ComputeType* mean, ComputeType* inv_variance,
T* normalized, T* y) {
using LU = LayerNormUtil<T>;
extern __shared__ __align__(sizeof(double)) unsigned char fw_shared_buf[];
auto* compute_buf = reinterpret_cast<ComputeType*>(fw_shared_buf);
__shared__ ComputeType row_mean_shared;
__shared__ ComputeType row_inv_var_shared;
typedef hipcub::BlockReduce<ComputeType, kLayerNormForwardGpuBlockSize> BlockReduce;
__shared__ typename BlockReduce::TempStorage cub_mean_reduce_tmp_storage;
__shared__ typename BlockReduce::TempStorage cub_variance_reduce_tmp_storage;
ComputeType inv_norm_size = static_cast<ComputeType>(1.0) / static_cast<ComputeType>(norm_size);
for (int row = blockIdx.x; row < num_instances; row += gridDim.x) {
const int row_offset = row * norm_size;
const T* x_row = x + row_offset;
ComputeType thread_sum = 0;
ComputeType thread_square_sum = 0;
const int tid = threadIdx.x;
for (int col = tid; col < norm_size; col += blockDim.x) {
const ComputeType val = LU::ToComputeType(x_row[col]);
compute_buf[col] = val;
thread_sum += val;
thread_square_sum += val * val;
}
__syncthreads();
ComputeType block_sum = BlockReduce(cub_mean_reduce_tmp_storage).Reduce(thread_sum, hipcub::Sum());
ComputeType block_square_sum =
BlockReduce(cub_variance_reduce_tmp_storage).Reduce(thread_square_sum, hipcub::Sum());
if (tid == 0) {
ComputeType row_mean = block_sum * inv_norm_size;
row_mean_shared = row_mean;
mean[row] = row_mean;
ComputeType row_variance =
max(block_square_sum * inv_norm_size - row_mean * row_mean, static_cast<ComputeType>(0));
ComputeType row_inv_var = rsqrt(row_variance + static_cast<ComputeType>(epsilon));
row_inv_var_shared = row_inv_var;
inv_variance[row] = row_inv_var;
}
__syncthreads();
ComputeType mean = row_mean_shared;
ComputeType inv_var = row_inv_var_shared;
for (int col = threadIdx.x; col < norm_size; col += blockDim.x) {
int offset = row_offset + col;
ComputeType val = compute_buf[col];
val = (val - mean) * inv_var;
if (gamma != nullptr || beta != nullptr) {
int elem_id = col;
if (gamma != nullptr) {
normalized[offset] = LU::FromComputeType(val);
val *= LU::ToComputeType(gamma[elem_id]);
}
if (beta != nullptr) { val += LU::ToComputeType(beta[elem_id]); }
}
y[offset] = LU::FromComputeType(val);
}
}
}
template<typename T>
void LayerNormForwardGpu(DeviceCtx* ctx, const int num_instances, const int norm_size,
const double epsilon, const T* x_ptr, const T* gamma_ptr,
const T* beta_ptr, T* normalized_ptr, T* y_ptr, user_op::Tensor* mean,
user_op::Tensor* inv_variance) {
hipLaunchKernelGGL(( LayerNormForwardImpl<T, typename LayerNormUtil<T>::ComputeType>)
, dim3(GetLayerNormForwardNumBlocks(num_instances)), dim3(GetLayerNormForwardBlockSize()),
GetForwardDynamicSharedMemorySize<T>(norm_size), ctx->cuda_stream(),
num_instances, norm_size, epsilon, x_ptr, gamma_ptr, beta_ptr,
mean->mut_dptr<typename LayerNormUtil<T>::ComputeType>(),
inv_variance->mut_dptr<typename LayerNormUtil<T>::ComputeType>(), normalized_ptr, y_ptr);
}
template<>
void LayerNormForwardGpu<float16>(DeviceCtx* ctx, const int num_instances, const int norm_size,
const double epsilon, const float16* x_ptr,
const float16* gamma_ptr, const float16* beta_ptr,
float16* normalized_ptr, float16* y_ptr, user_op::Tensor* mean,
user_op::Tensor* inv_variance) {
hipLaunchKernelGGL(( LayerNormForwardImpl<half, typename LayerNormUtil<half>::ComputeType>)
, dim3(GetLayerNormForwardNumBlocks(num_instances)), dim3(GetLayerNormForwardBlockSize()),
GetForwardDynamicSharedMemorySize<half>(norm_size), ctx->cuda_stream(),
num_instances, norm_size, epsilon, reinterpret_cast<const half*>(x_ptr),
reinterpret_cast<const half*>(gamma_ptr), reinterpret_cast<const half*>(beta_ptr),
mean->mut_dptr<typename LayerNormUtil<half>::ComputeType>(),
inv_variance->mut_dptr<typename LayerNormUtil<half>::ComputeType>(),
reinterpret_cast<half*>(normalized_ptr), reinterpret_cast<half*>(y_ptr));
}
int GetForwardFusedKernelMinNormSize() { return 64; }
template<typename T>
int GetForwardFusedKernelMaxActiveBlocks(const int32_t norm_size) {
int max_active_blocks;
OF_CUDA_CHECK(hipOccupancyMaxActiveBlocksPerMultiprocessor(
&max_active_blocks, LayerNormForwardImpl<T, typename LayerNormUtil<T>::ComputeType>,
GetLayerNormForwardBlockSize(), GetForwardDynamicSharedMemorySize<T>(norm_size)));
return max_active_blocks;
}
template<>
int GetForwardFusedKernelMaxActiveBlocks<float16>(const int32_t norm_size) {
int max_active_blocks;
OF_CUDA_CHECK(hipOccupancyMaxActiveBlocksPerMultiprocessor(
&max_active_blocks, LayerNormForwardImpl<half, typename LayerNormUtil<half>::ComputeType>,
GetLayerNormForwardBlockSize(), GetForwardDynamicSharedMemorySize<half>(norm_size)));
return max_active_blocks;
}
template<typename T>
bool IsForwardFusedKernelSupported(const int32_t norm_size, const int32_t instance_size) {
if (norm_size >= GetForwardFusedKernelMinNormSize() && norm_size % 32 == 0
&& GetForwardFusedKernelMaxActiveBlocks<T>(norm_size) > 0
&& (instance_size == 0 || norm_size == instance_size)) {
return true;
} else {
return false;
}
}
constexpr int64_t kLayerNormParamGradGpuBlockSize = 512;
int64_t GetLayerNormParamGradBlockSize() { return kLayerNormParamGradGpuBlockSize; }
int64_t GetLayerNormParamGradNumBlocks(const int64_t elem_cnt) {
return ::min(static_cast<int>((elem_cnt + kLayerNormParamGradGpuBlockSize - 1)
/ kLayerNormParamGradGpuBlockSize),
256);
}
template<typename T>
int64_t GetParamGradDynamicSharedMemorySize(const int64_t instance_size) {
return 2 * instance_size * sizeof(T);
}
template<>
int64_t GetParamGradDynamicSharedMemorySize<float16>(const int64_t instance_size) {
return 2 * instance_size * sizeof(float);
}
template<typename T, typename I>
__global__ void LayerNormParamGradImpl(const I n, const I instance_size, const T* dy,
const T* normalized, const T* gamma, T* gamma_diff,
T* beta_diff, T* normalized_diff) {
extern __shared__ __align__(sizeof(double)) unsigned char bw_shared_buf[];
auto* gamma_diff_sum_buf = reinterpret_cast<T*>(bw_shared_buf);
auto* beta_diff_sum_buf = gamma_diff_sum_buf + instance_size;
const I tid = threadIdx.x;
for (I elem_id = tid; elem_id < instance_size; elem_id += blockDim.x) {
gamma_diff_sum_buf[elem_id] = 0;
beta_diff_sum_buf[elem_id] = 0;
}
__syncthreads();
CUDA_1D_KERNEL_LOOP_T(I, i, n) {
const I elem_id = i % instance_size;
T dy_val = dy[i];
T normalized_val = normalized[i];
cuda::atomic::Add(&gamma_diff_sum_buf[elem_id], dy_val * normalized_val);
cuda::atomic::Add(&beta_diff_sum_buf[elem_id], dy_val);
T gamma_val = gamma[elem_id];
normalized_diff[i] = gamma_val * dy_val;
}
__syncthreads();
for (I elem_id = tid; elem_id < instance_size; elem_id += blockDim.x) {
cuda::atomic::Add(gamma_diff + elem_id, gamma_diff_sum_buf[elem_id]);
cuda::atomic::Add(beta_diff + elem_id, beta_diff_sum_buf[elem_id]);
}
}
template<typename I>
__global__ void LayerNormParamGradHalfImpl(const I n, const I instance_size, const half* dy,
const half* normalized, const half* gamma,
half* tmp_gamma_diff, half* tmp_beta_diff,
half* normalized_diff) {
extern __shared__ __align__(sizeof(double)) unsigned char bw_shared_buf[];
auto* gamma_diff_sum_buf = reinterpret_cast<float*>(bw_shared_buf);
auto* beta_diff_sum_buf = gamma_diff_sum_buf + instance_size;
const I tid = threadIdx.x;
for (I elem_id = tid; elem_id < instance_size; elem_id += blockDim.x) {
gamma_diff_sum_buf[elem_id] = 0;
beta_diff_sum_buf[elem_id] = 0;
}
__syncthreads();
CUDA_1D_KERNEL_LOOP_T(I, i, n) {
const I elem_id = i % instance_size;
half dy_val = dy[i];
half normalized_val = normalized[i];
cuda::atomic::Add(&gamma_diff_sum_buf[elem_id],
__half2float(dy_val) * __half2float(normalized_val));
cuda::atomic::Add(&beta_diff_sum_buf[elem_id], __half2float(dy_val));
half gamma_val = gamma[elem_id];
normalized_diff[i] = __hmul(gamma_val, dy_val);
}
__syncthreads();
for (I elem_id = tid; elem_id < instance_size; elem_id += blockDim.x) {
const I offset = blockIdx.x * instance_size + elem_id;
tmp_gamma_diff[offset] = __float2half(gamma_diff_sum_buf[elem_id]);
tmp_beta_diff[offset] = __float2half(beta_diff_sum_buf[elem_id]);
}
}
} // namespace
template<typename T, typename BNParamT>
class LayerNormGpuKernel final : public user_op::OpKernel {
public:
LayerNormGpuKernel() = default;
~LayerNormGpuKernel() = default;
private:
bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; }
void Compute(user_op::KernelComputeContext* ctx) const override {
const user_op::Tensor* x = ctx->Tensor4ArgNameAndIndex("x", 0);
user_op::Tensor* y = ctx->Tensor4ArgNameAndIndex("y", 0);
user_op::Tensor* mean = ctx->Tensor4ArgNameAndIndex("mean", 0);
user_op::Tensor* inv_variance = ctx->Tensor4ArgNameAndIndex("inv_variance", 0);
const bool scale = ctx->Attr<bool>("scale");
const bool center = ctx->Attr<bool>("center");
user_op::Tensor* normalized = scale ? ctx->Tensor4ArgNameAndIndex("normalized", 0) : y;
const double epsilon = ctx->Attr<double>("epsilon");
CHECK_GE(epsilon, CUDNN_BN_MIN_EPSILON);
const int32_t num_instances = mean->shape().elem_cnt();
const int32_t norm_size = x->shape().elem_cnt() / num_instances;
int32_t instance_size = 0;
const T* gamma_ptr = nullptr;
const T* beta_ptr = nullptr;
if (scale || center) {
if (scale) {
const user_op::Tensor* gamma = ctx->Tensor4ArgNameAndIndex("gamma", 0);
instance_size = gamma->shape().elem_cnt();
gamma_ptr = gamma->dptr<T>();
}
if (center) {
const user_op::Tensor* beta = ctx->Tensor4ArgNameAndIndex("beta", 0);
if (gamma_ptr) {
CHECK_EQ(beta->shape().elem_cnt(), instance_size);
} else {
instance_size = beta->shape().elem_cnt();
}
beta_ptr = beta->dptr<T>();
}
CHECK_EQ(y->shape().elem_cnt() % instance_size, 0);
}
if (IsForwardFusedKernelSupported<T>(norm_size, instance_size)) {
LayerNormForwardGpu<T>(ctx->device_ctx(), num_instances, norm_size, epsilon, x->dptr<T>(),
gamma_ptr, beta_ptr, normalized->mut_dptr<T>(), y->mut_dptr<T>(), mean,
inv_variance);
} else {
LayerNormCudnnBnCtx bn_ctx(x->shape(), mean->shape(), x->data_type());
user_op::Tensor* tmp_buffer = ctx->Tensor4ArgNameAndIndex("tmp_buffer", 0);
const size_t aligned_buffer_size =
GetCudaAlignedSize(mean->shape().elem_cnt() * GetSizeOfDataType(mean->data_type()));
char* cudnn_bn_scale_ones_dptr = tmp_buffer->mut_dptr<char>();
char* cudnn_bn_bias_zeros_dptr = cudnn_bn_scale_ones_dptr + aligned_buffer_size;
NewKernelUtil<DeviceType::kGPU>::Fill(ctx->device_ctx(), mean->shape().elem_cnt(),
static_cast<BNParamT>(1),
reinterpret_cast<BNParamT*>(cudnn_bn_scale_ones_dptr));
NewKernelUtil<DeviceType::kGPU>::Fill(ctx->device_ctx(), mean->shape().elem_cnt(),
static_cast<BNParamT>(0),
reinterpret_cast<BNParamT*>(cudnn_bn_bias_zeros_dptr));
OF_CUDNN_CHECK(cudnnBatchNormalizationForwardTraining(
ctx->device_ctx()->cudnn_handle(), bn_ctx.mode(), CudnnSPOnePtr<T>(), CudnnSPZeroPtr<T>(),
bn_ctx.data_tensor_desc(), x->dptr<T>(), bn_ctx.data_tensor_desc(),
normalized->mut_dptr<T>(), bn_ctx.param_tensor_desc(),
reinterpret_cast<BNParamT*>(cudnn_bn_scale_ones_dptr),
reinterpret_cast<BNParamT*>(cudnn_bn_bias_zeros_dptr), 1.0, nullptr, nullptr, epsilon,
mean->mut_dptr(), inv_variance->mut_dptr()));
if (scale || center) {
const int64_t batch_size = y->shape().elem_cnt() / instance_size;
InstanceScaleCenter<T>(ctx->device_ctx(), batch_size, instance_size, normalized->dptr<T>(),
gamma_ptr, beta_ptr, y->mut_dptr<T>());
}
}
};
};
#define REGISTER_LAYER_NORM_GPU_KERNEL(dtype, bn_param_dtype) \
REGISTER_USER_KERNEL("layer_norm") \
.SetCreateFn<LayerNormGpuKernel<dtype, bn_param_dtype>>() \
.SetIsMatchedHob((user_op::HobDeviceTag() == "gpu") \
& (user_op::HobDataType("x", 0) == GetDataType<dtype>::value)) \
.SetInferTmpSizeFn([](oneflow::user_op::InferContext* ctx) { \
user_op::TensorDesc* mean = ctx->TensorDesc4ArgNameAndIndex("mean", 0); \
const DataType& data_type = mean->data_type(); \
const int64_t elem_cnt = mean->shape().elem_cnt(); \
return GetCudaAlignedSize(elem_cnt * GetSizeOfDataType(data_type)) * 2; \
});
REGISTER_LAYER_NORM_GPU_KERNEL(float, float)
REGISTER_LAYER_NORM_GPU_KERNEL(double, double)
REGISTER_LAYER_NORM_GPU_KERNEL(float16, float)
template<typename T, typename BNParamT>
class LayerNormGradGpuKernel final : public user_op::OpKernel {
public:
LayerNormGradGpuKernel() = default;
~LayerNormGradGpuKernel() = default;
private:
bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; }
void Compute(user_op::KernelComputeContext* ctx) const override {
const user_op::Tensor* dy = ctx->Tensor4ArgNameAndIndex("dy", 0);
const user_op::Tensor* x = ctx->Tensor4ArgNameAndIndex("x", 0);
const user_op::Tensor* mean = ctx->Tensor4ArgNameAndIndex("mean", 0);
const user_op::Tensor* inv_variance = ctx->Tensor4ArgNameAndIndex("inv_variance", 0);
user_op::Tensor* dx = ctx->Tensor4ArgNameAndIndex("dx", 0);
user_op::Tensor* tmp_buffer = ctx->Tensor4ArgNameAndIndex("tmp_buffer", 0);
const size_t aligned_buffer_size =
GetCudaAlignedSize(mean->shape().elem_cnt() * GetSizeOfDataType(mean->data_type()));
char* cudnn_bn_scale_ones_dptr = tmp_buffer->mut_dptr<char>();
char* cudnn_bn_scale_diff_buf_dptr = cudnn_bn_scale_ones_dptr + aligned_buffer_size;
char* cudnn_bn_bias_diff_buf_dptr = cudnn_bn_scale_ones_dptr + aligned_buffer_size;
NewKernelUtil<DeviceType::kGPU>::Fill(ctx->device_ctx(), mean->shape().elem_cnt(),
static_cast<BNParamT>(1),
reinterpret_cast<BNParamT*>(cudnn_bn_scale_ones_dptr));
const void* sp_alpha = CudnnSPOnePtr<T>();
const void* sp_beta;
if (ctx->has_input("_add_to_output", 0)) {
const user_op::Tensor* add_to_output = ctx->Tensor4ArgNameAndIndex("_add_to_output", 0);
CHECK_EQ(add_to_output->data_type(), dx->data_type());
CHECK_EQ(add_to_output->shape(), dx->shape());
Memcpy<DeviceType::kGPU>(
ctx->device_ctx(), dx->mut_dptr<void>(), add_to_output->dptr<void>(),
add_to_output->shape().elem_cnt() * GetSizeOfDataType(add_to_output->data_type()));
sp_beta = CudnnSPOnePtr<T>();
} else {
sp_beta = CudnnSPZeroPtr<T>();
}
const double epsilon = ctx->Attr<double>("epsilon");
CHECK_GE(epsilon, CUDNN_BN_MIN_EPSILON);
LayerNormCudnnBnCtx bn_ctx(x->shape(), mean->shape(), x->data_type());
OF_CUDNN_CHECK(cudnnBatchNormalizationBackward(
ctx->device_ctx()->cudnn_handle(), bn_ctx.mode(), sp_alpha, sp_beta, CudnnSPOnePtr<T>(),
CudnnSPZeroPtr<T>(), bn_ctx.data_tensor_desc(), x->dptr<T>(), bn_ctx.data_tensor_desc(),
dy->dptr<T>(), bn_ctx.data_tensor_desc(), dx->mut_dptr<T>(), bn_ctx.param_tensor_desc(),
reinterpret_cast<const BNParamT*>(cudnn_bn_scale_ones_dptr),
reinterpret_cast<BNParamT*>(cudnn_bn_scale_diff_buf_dptr),
reinterpret_cast<BNParamT*>(cudnn_bn_bias_diff_buf_dptr), epsilon, mean->dptr(),
inv_variance->dptr()));
};
};
#define REGISTER_LAYER_NORM_GRAD_GPU_KERNEL(dtype, bn_param_dtype) \
REGISTER_USER_KERNEL("layer_norm_grad") \
.SetCreateFn<LayerNormGradGpuKernel<dtype, bn_param_dtype>>() \
.SetIsMatchedHob((user_op::HobDeviceTag() == "gpu") \
& (user_op::HobDataType("dy", 0) == GetDataType<dtype>::value)) \
.SetInferTmpSizeFn([](oneflow::user_op::InferContext* ctx) { \
user_op::TensorDesc* mean = ctx->TensorDesc4ArgNameAndIndex("mean", 0); \
const DataType& data_type = mean->data_type(); \
const int64_t elem_cnt = mean->shape().elem_cnt(); \
return GetCudaAlignedSize(elem_cnt * GetSizeOfDataType(data_type)) * 3; \
}) \
.SetInplaceProposalFn([](const user_op::InferContext& ctx, \
user_op::AddInplaceArgPair AddInplaceArgPairFn) -> Maybe<void> { \
if (ctx.has_input("_add_to_output", 0)) { \
OF_RETURN_IF_ERROR(AddInplaceArgPairFn("dx", 0, "_add_to_output", 0, true)); \
} \
return Maybe<void>::Ok(); \
});
REGISTER_LAYER_NORM_GRAD_GPU_KERNEL(float, float)
REGISTER_LAYER_NORM_GRAD_GPU_KERNEL(double, double)
REGISTER_LAYER_NORM_GRAD_GPU_KERNEL(float16, float)
template<typename T>
class LayerNormParamGradGpuKernel final : public user_op::OpKernel {
public:
LayerNormParamGradGpuKernel() = default;
~LayerNormParamGradGpuKernel() = default;
private:
bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; }
void Compute(user_op::KernelComputeContext* ctx) const override {
using NdUtil = NdarrayUtil<DeviceType::kGPU, T>;
auto Val = NdUtil::GetValNdarrayBuilder();
auto Var = NdUtil::GetVarNdarrayBuilder();
const user_op::Tensor* dy = ctx->Tensor4ArgNameAndIndex("dy", 0);
user_op::Tensor* beta_diff = ctx->Tensor4ArgNameAndIndex("beta_diff", 0);
user_op::Tensor* gamma_diff = ctx->Tensor4ArgNameAndIndex("gamma_diff", 0);
user_op::Tensor* normalized_diff = ctx->Tensor4ArgNameAndIndex("normalized_diff", 0);
user_op::Tensor* gamma = ctx->Tensor4ArgNameAndIndex("gamma", 0);
const bool has_beta_diff = beta_diff != nullptr;
const bool has_gamma_diff = gamma_diff != nullptr;
const bool has_normalized_diff = normalized_diff != nullptr;
const bool has_gamma = gamma != nullptr;
const int64_t begin_params_axis = ctx->Attr<int64_t>("begin_params_axis");
const int64_t elem_cnt = dy->shape().elem_cnt();
const int64_t m = dy->shape().Count(begin_params_axis);
int max_active_blocks;
OF_CUDA_CHECK(hipOccupancyMaxActiveBlocksPerMultiprocessor(
&max_active_blocks, LayerNormParamGradImpl<T, int64_t>, GetLayerNormParamGradBlockSize(),
GetParamGradDynamicSharedMemorySize<T>(m)));
if (has_gamma_diff && has_beta_diff && has_normalized_diff && max_active_blocks > 0) {
const user_op::Tensor* normalized = ctx->Tensor4ArgNameAndIndex("normalized", 0);
Memset<DeviceType::kGPU>(ctx->device_ctx(), gamma_diff->mut_dptr<T>(), 0,
gamma_diff->shape().elem_cnt() * sizeof(T));
Memset<DeviceType::kGPU>(ctx->device_ctx(), beta_diff->mut_dptr<T>(), 0,
beta_diff->shape().elem_cnt() * sizeof(T));
if (elem_cnt > static_cast<int64_t>(GetMaxVal<int32_t>() / 2)) {
hipLaunchKernelGGL(( LayerNormParamGradImpl<T, int64_t>)
, dim3(GetLayerNormParamGradNumBlocks(elem_cnt)), dim3(GetLayerNormParamGradBlockSize()),
GetParamGradDynamicSharedMemorySize<T>(m), ctx->device_ctx()->cuda_stream(),
elem_cnt, m, dy->dptr<T>(), normalized->dptr<T>(), gamma->dptr<T>(),
gamma_diff->mut_dptr<T>(), beta_diff->mut_dptr<T>(),
normalized_diff->mut_dptr<T>());
} else {
hipLaunchKernelGGL(( LayerNormParamGradImpl<T, int32_t>)
, dim3(GetLayerNormParamGradNumBlocks(elem_cnt)), dim3(GetLayerNormParamGradBlockSize()),
GetParamGradDynamicSharedMemorySize<T>(m), ctx->device_ctx()->cuda_stream(),
static_cast<int32_t>(elem_cnt), static_cast<int32_t>(m), dy->dptr<T>(),
normalized->dptr<T>(), gamma->dptr<T>(), gamma_diff->mut_dptr<T>(),
beta_diff->mut_dptr<T>(), normalized_diff->mut_dptr<T>());
}
} else {
if (has_beta_diff) {
user_op::Tensor* reduce_buf = ctx->Tensor4ArgNameAndIndex("reduce_buf", 0);
CHECK_EQ(m, beta_diff->shape().elem_cnt());
CHECK_EQ(dy->shape().elem_cnt() % m, 0);
const int64_t n = dy->shape().elem_cnt() / m;
NdUtil::ReduceSum(ctx->device_ctx(), Var({1, m}, beta_diff->mut_dptr<T>()),
Val({n, m}, dy->dptr<T>()), Var({n, m}, reduce_buf->mut_dptr<T>()));
}
if (has_gamma_diff) {
const user_op::Tensor* normalized = ctx->Tensor4ArgNameAndIndex("normalized", 0);
user_op::Tensor* reduce_buf = ctx->Tensor4ArgNameAndIndex("reduce_buf", 0);
CHECK_EQ(m, gamma_diff->shape().elem_cnt());
CHECK_EQ(dy->shape().elem_cnt() % m, 0);
const int64_t n = dy->shape().elem_cnt() / m;
NdUtil::BroadcastMul(ctx->device_ctx(), Var({n, m}, reduce_buf->mut_dptr<T>()),
Val({n, m}, normalized->dptr<T>()), Val({n, m}, dy->dptr<T>()));
NdUtil::ReduceSum(ctx->device_ctx(), Var({1, m}, gamma_diff->mut_dptr<T>()),
Val({n, m}, reduce_buf->dptr<T>()),
Var({n, m}, reduce_buf->mut_dptr<T>()));
}
if (has_normalized_diff) {
if (has_gamma) {
CHECK_EQ(m, gamma->shape().elem_cnt());
CHECK_EQ(dy->shape().elem_cnt() % m, 0);
const int64_t n = dy->shape().elem_cnt() / m;
NdUtil::BroadcastMul(ctx->device_ctx(), Var({n, m}, normalized_diff->mut_dptr<T>()),
Val({n, m}, dy->dptr<T>()), Val({1, m}, gamma->dptr<T>()));
} else {
Memcpy<DeviceType::kGPU>(ctx->device_ctx(), normalized_diff->mut_dptr<void>(),
dy->dptr<void>(),
dy->shape().elem_cnt() * GetSizeOfDataType(dy->data_type()));
}
}
}
};
};
#define REGISTER_LAYER_NORM_PARAM_GRAD_GPU_KERNEL(dtype) \
REGISTER_USER_KERNEL("layer_norm_param_grad") \
.SetCreateFn<LayerNormParamGradGpuKernel<dtype>>() \
.SetIsMatchedHob((user_op::HobDeviceTag() == "gpu") \
& (user_op::HobDataType("dy", 0) == GetDataType<dtype>::value));
REGISTER_LAYER_NORM_PARAM_GRAD_GPU_KERNEL(float)
REGISTER_LAYER_NORM_PARAM_GRAD_GPU_KERNEL(double)
class LayerNormParamGradGpuHalfKernel final : public user_op::OpKernel {
public:
LayerNormParamGradGpuHalfKernel() = default;
~LayerNormParamGradGpuHalfKernel() = default;
private:
bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; }
void Compute(user_op::KernelComputeContext* ctx) const override {
using NdUtil = NdarrayUtil<DeviceType::kGPU, float16>;
auto Val = NdUtil::GetValNdarrayBuilder();
auto Var = NdUtil::GetVarNdarrayBuilder();
const user_op::Tensor* dy = ctx->Tensor4ArgNameAndIndex("dy", 0);
user_op::Tensor* beta_diff = ctx->Tensor4ArgNameAndIndex("beta_diff", 0);
user_op::Tensor* gamma_diff = ctx->Tensor4ArgNameAndIndex("gamma_diff", 0);
user_op::Tensor* normalized_diff = ctx->Tensor4ArgNameAndIndex("normalized_diff", 0);
user_op::Tensor* gamma = ctx->Tensor4ArgNameAndIndex("gamma", 0);
const bool has_beta_diff = beta_diff != nullptr;
const bool has_gamma_diff = gamma_diff != nullptr;
const bool has_normalized_diff = normalized_diff != nullptr;
const bool has_gamma = gamma != nullptr;
const int64_t begin_params_axis = ctx->Attr<int64_t>("begin_params_axis");
const int64_t elem_cnt = dy->shape().elem_cnt();
const int64_t m = dy->shape().Count(begin_params_axis);
int max_active_blocks;
OF_CUDA_CHECK(hipOccupancyMaxActiveBlocksPerMultiprocessor(
&max_active_blocks, LayerNormParamGradHalfImpl<int64_t>, GetLayerNormParamGradBlockSize(),
GetParamGradDynamicSharedMemorySize<float16>(m)));
if (has_gamma_diff && has_beta_diff && has_normalized_diff && max_active_blocks > 0) {
const user_op::Tensor* normalized = ctx->Tensor4ArgNameAndIndex("normalized", 0);
user_op::Tensor* tmp_buffer = ctx->Tensor4ArgNameAndIndex("tmp_buffer", 0);
const int64_t num_blocks = GetLayerNormParamGradNumBlocks(dy->shape().elem_cnt());
const size_t tmp_diff_size = GetCudaAlignedSize(num_blocks * m * sizeof(float16));
float16* tmp_gamma_diff = tmp_buffer->mut_dptr<float16>();
float16* tmp_beta_diff =
reinterpret_cast<float16*>(tmp_buffer->mut_dptr<char>() + tmp_diff_size);
float16* tmp_reduce_buf =
reinterpret_cast<float16*>(tmp_buffer->mut_dptr<char>() + 2 * tmp_diff_size);
CHECK_GE(tmp_buffer->shape().elem_cnt(), 3 * tmp_diff_size);
if (elem_cnt > static_cast<int64_t>(GetMaxVal<int32_t>() / 2)) {
hipLaunchKernelGGL(( LayerNormParamGradHalfImpl<int64_t>)
, dim3(GetLayerNormParamGradNumBlocks(elem_cnt)), dim3(GetLayerNormParamGradBlockSize()),
GetParamGradDynamicSharedMemorySize<float16>(m), ctx->device_ctx()->cuda_stream(),
elem_cnt, m, dy->dptr<half>(), normalized->dptr<half>(), gamma->dptr<half>(),
reinterpret_cast<half*>(tmp_gamma_diff), reinterpret_cast<half*>(tmp_beta_diff),
normalized_diff->mut_dptr<half>());
} else {
hipLaunchKernelGGL(( LayerNormParamGradHalfImpl<int32_t>)
, dim3(GetLayerNormParamGradNumBlocks(elem_cnt)), dim3(GetLayerNormParamGradBlockSize()),
GetParamGradDynamicSharedMemorySize<float16>(m), ctx->device_ctx()->cuda_stream(),
static_cast<int32_t>(elem_cnt), static_cast<int32_t>(m), dy->dptr<half>(),
normalized->dptr<half>(), gamma->dptr<half>(),
reinterpret_cast<half*>(tmp_gamma_diff), reinterpret_cast<half*>(tmp_beta_diff),
normalized_diff->mut_dptr<half>());
}
NdUtil::ReduceSum(ctx->device_ctx(), Var({1, m}, gamma_diff->mut_dptr<float16>()),
Val({num_blocks, m}, tmp_gamma_diff), Var({num_blocks, m}, tmp_reduce_buf));
NdUtil::ReduceSum(ctx->device_ctx(), Var({1, m}, beta_diff->mut_dptr<float16>()),
Val({num_blocks, m}, tmp_beta_diff), Var({num_blocks, m}, tmp_reduce_buf));
} else {
if (has_beta_diff) {
user_op::Tensor* reduce_buf = ctx->Tensor4ArgNameAndIndex("reduce_buf", 0);
CHECK_EQ(m, beta_diff->shape().elem_cnt());
CHECK_EQ(dy->shape().elem_cnt() % m, 0);
const int64_t n = dy->shape().elem_cnt() / m;
NdUtil::ReduceSum(ctx->device_ctx(), Var({1, m}, beta_diff->mut_dptr<float16>()),
Val({n, m}, dy->dptr<float16>()),
Var({n, m}, reduce_buf->mut_dptr<float16>()));
}
if (has_gamma_diff) {
const user_op::Tensor* normalized = ctx->Tensor4ArgNameAndIndex("normalized", 0);
user_op::Tensor* reduce_buf = ctx->Tensor4ArgNameAndIndex("reduce_buf", 0);
CHECK_EQ(m, gamma_diff->shape().elem_cnt());
CHECK_EQ(dy->shape().elem_cnt() % m, 0);
const int64_t n = dy->shape().elem_cnt() / m;
NdUtil::BroadcastMul(ctx->device_ctx(), Var({n, m}, reduce_buf->mut_dptr<float16>()),
Val({n, m}, normalized->dptr<float16>()),
Val({n, m}, dy->dptr<float16>()));
NdUtil::ReduceSum(ctx->device_ctx(), Var({1, m}, gamma_diff->mut_dptr<float16>()),
Val({n, m}, reduce_buf->dptr<float16>()),
Var({n, m}, reduce_buf->mut_dptr<float16>()));
}
if (has_normalized_diff) {
if (has_gamma) {
CHECK_EQ(m, gamma->shape().elem_cnt());
CHECK_EQ(dy->shape().elem_cnt() % m, 0);
const int64_t n = dy->shape().elem_cnt() / m;
NdUtil::BroadcastMul(ctx->device_ctx(), Var({n, m}, normalized_diff->mut_dptr<float16>()),
Val({n, m}, dy->dptr<float16>()),
Val({1, m}, gamma->dptr<float16>()));
} else {
Memcpy<DeviceType::kGPU>(ctx->device_ctx(), normalized_diff->mut_dptr<void>(),
dy->dptr<void>(),
dy->shape().elem_cnt() * GetSizeOfDataType(dy->data_type()));
}
}
}
}
};
REGISTER_USER_KERNEL("layer_norm_param_grad")
.SetCreateFn<LayerNormParamGradGpuHalfKernel>()
.SetIsMatchedHob((user_op::HobDeviceTag() == "gpu")
& (user_op::HobDataType("dy", 0) == DataType::kFloat16))
.SetInferTmpSizeFn([](user_op::InferContext* ctx) {
const int64_t begin_params_axis = ctx->Attr<int64_t>("begin_params_axis");
const bool has_gamma_diff = ctx->has_output("gamma_diff", 0);
const bool has_beta_diff = ctx->has_output("beta_diff", 0);
const bool has_normalized_diff = ctx->has_output("normalized_diff", 0);
const auto* dy = ctx->TensorDesc4ArgNameAndIndex("dy", 0);
const int64_t instance_size = dy->shape().Count(begin_params_axis);
size_t tmp_buffer_size = 0;
if (has_gamma_diff && has_beta_diff && has_normalized_diff) {
const size_t tmp_gamma_diff =
GetCudaAlignedSize(GetLayerNormParamGradNumBlocks(dy->shape().elem_cnt())
* instance_size * sizeof(float16));
const size_t tmp_beta_diff = tmp_gamma_diff;
const size_t tmp_reduce_buf = tmp_gamma_diff;
tmp_buffer_size = tmp_gamma_diff + tmp_beta_diff + tmp_reduce_buf;
} else {
tmp_buffer_size = 0;
}
return tmp_buffer_size;
});
} // namespace oneflow
| e2c4450802cc2b5ed713aba836f24ea8d2f2bbba.cu | /*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "oneflow/core/device/cudnn_util.h"
#include "oneflow/core/framework/framework.h"
#include "oneflow/core/ndarray/ndarray_util.h"
#include "oneflow/core/cuda/atomic.cuh"
#include <cub/cub.cuh>
namespace oneflow {
namespace {
class LayerNormCudnnBnCtx final {
public:
LayerNormCudnnBnCtx(const ShapeView& data_shape, const ShapeView& param_shape,
DataType data_type) {
const int64_t cudnn_c = param_shape.elem_cnt();
CHECK_EQ(data_shape.elem_cnt() % cudnn_c, 0);
const int64_t cudnn_w = data_shape.elem_cnt() / cudnn_c;
CHECK_LT(cudnn_c, GetMaxVal<int32_t>());
CHECK_LT(cudnn_w, GetMaxVal<int32_t>());
data_tensor_desc_.reset(new CudnnTensorDesc(CUDNN_TENSOR_NCHW, data_type, 1,
static_cast<int32_t>(cudnn_c), 1,
static_cast<int32_t>(cudnn_w)));
DataType param_dtype = data_type == DataType::kFloat16 ? DataType::kFloat : data_type;
param_tensor_desc_.reset(new CudnnTensorDesc(CUDNN_TENSOR_NCHW, param_dtype, 1,
static_cast<int32_t>(cudnn_c), 1, 1));
#if (CUDNN_VERSION >= 7000)
mode_ = CUDNN_BATCHNORM_SPATIAL_PERSISTENT;
#else
mode_ = CUDNN_BATCHNORM_SPATIAL;
#endif
}
~LayerNormCudnnBnCtx() = default;
const cudnnTensorDescriptor_t& data_tensor_desc() const { return data_tensor_desc_->Get(); }
const cudnnTensorDescriptor_t& param_tensor_desc() const { return param_tensor_desc_->Get(); }
cudnnBatchNormMode_t mode() const { return mode_; };
private:
std::unique_ptr<CudnnTensorDesc> data_tensor_desc_;
std::unique_ptr<CudnnTensorDesc> param_tensor_desc_;
cudnnBatchNormMode_t mode_;
};
template<typename T, bool do_scale, bool do_center>
__global__ void InstanceScaleCenterGpu(const int64_t elem_cnt, const int64_t instance_size,
const T* in, const T* gamma, const T* beta, T* out) {
CUDA_1D_KERNEL_LOOP_T(int64_t, i, elem_cnt) {
const int64_t elem_id = i % instance_size;
T v = in[i];
if (do_scale) { v *= gamma[elem_id]; }
if (do_center) { v += beta[elem_id]; }
out[i] = v;
}
}
template<bool do_scale, bool do_center>
__global__ void InstanceScaleCenterH2Gpu(const int64_t h2_elem_cnt, const int64_t h2_instance_size,
const half* in, const half* gamma, const half* beta,
half* out) {
const auto* in_h2 = reinterpret_cast<const half2*>(in);
const auto* gamma_h2 = reinterpret_cast<const half2*>(gamma);
const auto* beta_h2 = reinterpret_cast<const half2*>(beta);
auto* out_h2 = reinterpret_cast<half2*>(out);
CUDA_1D_KERNEL_LOOP_T(int64_t, i, h2_elem_cnt) {
const int64_t elem_id = i % h2_instance_size;
half2 v2 = in_h2[i];
if (do_scale) { v2 = __hmul2(v2, gamma_h2[elem_id]); }
if (do_center) { v2 = __hadd2(v2, beta_h2[elem_id]); }
out_h2[i] = v2;
}
}
template<typename T>
void InstanceScaleCenter(DeviceCtx* ctx, const int64_t batch_size, const int64_t instance_size,
const T* in, const T* gamma, const T* beta, T* out) {
const int64_t elem_cnt = batch_size * instance_size;
if (beta != nullptr && gamma != nullptr) { // scale and center
InstanceScaleCenterGpu<T, true, true>
<<<BlocksNum4ThreadsNum(elem_cnt), kCudaThreadsNumPerBlock, 0, ctx->cuda_stream()>>>(
elem_cnt, instance_size, in, gamma, beta, out);
} else if (gamma != nullptr) { // scale only
InstanceScaleCenterGpu<T, true, false>
<<<BlocksNum4ThreadsNum(elem_cnt), kCudaThreadsNumPerBlock, 0, ctx->cuda_stream()>>>(
elem_cnt, instance_size, in, gamma, nullptr, out);
} else if (beta != nullptr) { // center only
InstanceScaleCenterGpu<T, false, true>
<<<BlocksNum4ThreadsNum(elem_cnt), kCudaThreadsNumPerBlock, 0, ctx->cuda_stream()>>>(
elem_cnt, instance_size, in, nullptr, beta, out);
} else {
UNIMPLEMENTED();
}
}
void InstanceScaleCenterH2(DeviceCtx* ctx, const int64_t batch_size, const int64_t instance_size,
const half* in, const half* gamma, const half* beta, half* out) {
CHECK_EQ(instance_size % 2, 0);
const int64_t elem_cnt_h2 = batch_size * instance_size / 2;
const int64_t instance_size_h2 = instance_size / 2;
if (beta != nullptr && gamma != nullptr) { // scale and center
InstanceScaleCenterH2Gpu<true, true>
<<<BlocksNum4ThreadsNum(elem_cnt_h2), kCudaThreadsNumPerBlock, 0, ctx->cuda_stream()>>>(
elem_cnt_h2, instance_size_h2, in, gamma, beta, out);
} else if (gamma != nullptr) { // scale only
InstanceScaleCenterH2Gpu<true, false>
<<<BlocksNum4ThreadsNum(elem_cnt_h2), kCudaThreadsNumPerBlock, 0, ctx->cuda_stream()>>>(
elem_cnt_h2, instance_size_h2, in, gamma, nullptr, out);
} else if (beta != nullptr) { // center only
InstanceScaleCenterH2Gpu<false, true>
<<<BlocksNum4ThreadsNum(elem_cnt_h2), kCudaThreadsNumPerBlock, 0, ctx->cuda_stream()>>>(
elem_cnt_h2, instance_size_h2, in, nullptr, beta, out);
} else {
UNIMPLEMENTED();
}
}
template<>
void InstanceScaleCenter<float16>(DeviceCtx* ctx, const int64_t batch_size,
const int64_t instance_size, const float16* in,
const float16* gamma, const float16* beta, float16* out) {
if (instance_size % 2 == 0) {
InstanceScaleCenterH2(ctx, batch_size, instance_size, reinterpret_cast<const half*>(in),
reinterpret_cast<const half*>(gamma), reinterpret_cast<const half*>(beta),
reinterpret_cast<half*>(out));
} else {
InstanceScaleCenter<half>(ctx, batch_size, instance_size, reinterpret_cast<const half*>(in),
reinterpret_cast<const half*>(gamma),
reinterpret_cast<const half*>(beta), reinterpret_cast<half*>(out));
}
}
constexpr int64_t kLayerNormForwardGpuBlockSize = 256;
template<typename T>
struct LayerNormUtil {
using ComputeType = T;
__device__ static ComputeType ToComputeType(T v) { return v; }
__device__ static T FromComputeType(ComputeType v) { return v; }
};
template<>
struct LayerNormUtil<half> {
using ComputeType = float;
__device__ static ComputeType ToComputeType(half v) { return __half2float(v); }
__device__ static half FromComputeType(ComputeType v) { return __float2half(v); }
};
template<typename T>
int GetForwardDynamicSharedMemorySize(const int norm_size) {
return norm_size * sizeof(typename LayerNormUtil<T>::ComputeType);
}
int GetLayerNormForwardBlockSize() { return kLayerNormForwardGpuBlockSize; }
int GetLayerNormForwardNumBlocks(const int num_instances) {
return std::min(static_cast<int>(num_instances), kCudaMaxBlocksNum);
}
template<typename T, typename ComputeType>
__global__ void LayerNormForwardImpl(const int num_instances, const int norm_size,
const double epsilon, const T* x, const T* gamma,
const T* beta, ComputeType* mean, ComputeType* inv_variance,
T* normalized, T* y) {
using LU = LayerNormUtil<T>;
extern __shared__ __align__(sizeof(double)) unsigned char fw_shared_buf[];
auto* compute_buf = reinterpret_cast<ComputeType*>(fw_shared_buf);
__shared__ ComputeType row_mean_shared;
__shared__ ComputeType row_inv_var_shared;
typedef cub::BlockReduce<ComputeType, kLayerNormForwardGpuBlockSize> BlockReduce;
__shared__ typename BlockReduce::TempStorage cub_mean_reduce_tmp_storage;
__shared__ typename BlockReduce::TempStorage cub_variance_reduce_tmp_storage;
ComputeType inv_norm_size = static_cast<ComputeType>(1.0) / static_cast<ComputeType>(norm_size);
for (int row = blockIdx.x; row < num_instances; row += gridDim.x) {
const int row_offset = row * norm_size;
const T* x_row = x + row_offset;
ComputeType thread_sum = 0;
ComputeType thread_square_sum = 0;
const int tid = threadIdx.x;
for (int col = tid; col < norm_size; col += blockDim.x) {
const ComputeType val = LU::ToComputeType(x_row[col]);
compute_buf[col] = val;
thread_sum += val;
thread_square_sum += val * val;
}
__syncthreads();
ComputeType block_sum = BlockReduce(cub_mean_reduce_tmp_storage).Reduce(thread_sum, cub::Sum());
ComputeType block_square_sum =
BlockReduce(cub_variance_reduce_tmp_storage).Reduce(thread_square_sum, cub::Sum());
if (tid == 0) {
ComputeType row_mean = block_sum * inv_norm_size;
row_mean_shared = row_mean;
mean[row] = row_mean;
ComputeType row_variance =
max(block_square_sum * inv_norm_size - row_mean * row_mean, static_cast<ComputeType>(0));
ComputeType row_inv_var = rsqrt(row_variance + static_cast<ComputeType>(epsilon));
row_inv_var_shared = row_inv_var;
inv_variance[row] = row_inv_var;
}
__syncthreads();
ComputeType mean = row_mean_shared;
ComputeType inv_var = row_inv_var_shared;
for (int col = threadIdx.x; col < norm_size; col += blockDim.x) {
int offset = row_offset + col;
ComputeType val = compute_buf[col];
val = (val - mean) * inv_var;
if (gamma != nullptr || beta != nullptr) {
int elem_id = col;
if (gamma != nullptr) {
normalized[offset] = LU::FromComputeType(val);
val *= LU::ToComputeType(gamma[elem_id]);
}
if (beta != nullptr) { val += LU::ToComputeType(beta[elem_id]); }
}
y[offset] = LU::FromComputeType(val);
}
}
}
template<typename T>
void LayerNormForwardGpu(DeviceCtx* ctx, const int num_instances, const int norm_size,
const double epsilon, const T* x_ptr, const T* gamma_ptr,
const T* beta_ptr, T* normalized_ptr, T* y_ptr, user_op::Tensor* mean,
user_op::Tensor* inv_variance) {
LayerNormForwardImpl<T, typename LayerNormUtil<T>::ComputeType>
<<<GetLayerNormForwardNumBlocks(num_instances), GetLayerNormForwardBlockSize(),
GetForwardDynamicSharedMemorySize<T>(norm_size), ctx->cuda_stream()>>>(
num_instances, norm_size, epsilon, x_ptr, gamma_ptr, beta_ptr,
mean->mut_dptr<typename LayerNormUtil<T>::ComputeType>(),
inv_variance->mut_dptr<typename LayerNormUtil<T>::ComputeType>(), normalized_ptr, y_ptr);
}
template<>
void LayerNormForwardGpu<float16>(DeviceCtx* ctx, const int num_instances, const int norm_size,
const double epsilon, const float16* x_ptr,
const float16* gamma_ptr, const float16* beta_ptr,
float16* normalized_ptr, float16* y_ptr, user_op::Tensor* mean,
user_op::Tensor* inv_variance) {
LayerNormForwardImpl<half, typename LayerNormUtil<half>::ComputeType>
<<<GetLayerNormForwardNumBlocks(num_instances), GetLayerNormForwardBlockSize(),
GetForwardDynamicSharedMemorySize<half>(norm_size), ctx->cuda_stream()>>>(
num_instances, norm_size, epsilon, reinterpret_cast<const half*>(x_ptr),
reinterpret_cast<const half*>(gamma_ptr), reinterpret_cast<const half*>(beta_ptr),
mean->mut_dptr<typename LayerNormUtil<half>::ComputeType>(),
inv_variance->mut_dptr<typename LayerNormUtil<half>::ComputeType>(),
reinterpret_cast<half*>(normalized_ptr), reinterpret_cast<half*>(y_ptr));
}
int GetForwardFusedKernelMinNormSize() { return 64; }
template<typename T>
int GetForwardFusedKernelMaxActiveBlocks(const int32_t norm_size) {
int max_active_blocks;
OF_CUDA_CHECK(cudaOccupancyMaxActiveBlocksPerMultiprocessor(
&max_active_blocks, LayerNormForwardImpl<T, typename LayerNormUtil<T>::ComputeType>,
GetLayerNormForwardBlockSize(), GetForwardDynamicSharedMemorySize<T>(norm_size)));
return max_active_blocks;
}
template<>
int GetForwardFusedKernelMaxActiveBlocks<float16>(const int32_t norm_size) {
int max_active_blocks;
OF_CUDA_CHECK(cudaOccupancyMaxActiveBlocksPerMultiprocessor(
&max_active_blocks, LayerNormForwardImpl<half, typename LayerNormUtil<half>::ComputeType>,
GetLayerNormForwardBlockSize(), GetForwardDynamicSharedMemorySize<half>(norm_size)));
return max_active_blocks;
}
template<typename T>
bool IsForwardFusedKernelSupported(const int32_t norm_size, const int32_t instance_size) {
if (norm_size >= GetForwardFusedKernelMinNormSize() && norm_size % 32 == 0
&& GetForwardFusedKernelMaxActiveBlocks<T>(norm_size) > 0
&& (instance_size == 0 || norm_size == instance_size)) {
return true;
} else {
return false;
}
}
constexpr int64_t kLayerNormParamGradGpuBlockSize = 512;
int64_t GetLayerNormParamGradBlockSize() { return kLayerNormParamGradGpuBlockSize; }
int64_t GetLayerNormParamGradNumBlocks(const int64_t elem_cnt) {
return std::min(static_cast<int>((elem_cnt + kLayerNormParamGradGpuBlockSize - 1)
/ kLayerNormParamGradGpuBlockSize),
256);
}
template<typename T>
int64_t GetParamGradDynamicSharedMemorySize(const int64_t instance_size) {
return 2 * instance_size * sizeof(T);
}
template<>
int64_t GetParamGradDynamicSharedMemorySize<float16>(const int64_t instance_size) {
return 2 * instance_size * sizeof(float);
}
template<typename T, typename I>
__global__ void LayerNormParamGradImpl(const I n, const I instance_size, const T* dy,
const T* normalized, const T* gamma, T* gamma_diff,
T* beta_diff, T* normalized_diff) {
extern __shared__ __align__(sizeof(double)) unsigned char bw_shared_buf[];
auto* gamma_diff_sum_buf = reinterpret_cast<T*>(bw_shared_buf);
auto* beta_diff_sum_buf = gamma_diff_sum_buf + instance_size;
const I tid = threadIdx.x;
for (I elem_id = tid; elem_id < instance_size; elem_id += blockDim.x) {
gamma_diff_sum_buf[elem_id] = 0;
beta_diff_sum_buf[elem_id] = 0;
}
__syncthreads();
CUDA_1D_KERNEL_LOOP_T(I, i, n) {
const I elem_id = i % instance_size;
T dy_val = dy[i];
T normalized_val = normalized[i];
cuda::atomic::Add(&gamma_diff_sum_buf[elem_id], dy_val * normalized_val);
cuda::atomic::Add(&beta_diff_sum_buf[elem_id], dy_val);
T gamma_val = gamma[elem_id];
normalized_diff[i] = gamma_val * dy_val;
}
__syncthreads();
for (I elem_id = tid; elem_id < instance_size; elem_id += blockDim.x) {
cuda::atomic::Add(gamma_diff + elem_id, gamma_diff_sum_buf[elem_id]);
cuda::atomic::Add(beta_diff + elem_id, beta_diff_sum_buf[elem_id]);
}
}
template<typename I>
__global__ void LayerNormParamGradHalfImpl(const I n, const I instance_size, const half* dy,
const half* normalized, const half* gamma,
half* tmp_gamma_diff, half* tmp_beta_diff,
half* normalized_diff) {
extern __shared__ __align__(sizeof(double)) unsigned char bw_shared_buf[];
auto* gamma_diff_sum_buf = reinterpret_cast<float*>(bw_shared_buf);
auto* beta_diff_sum_buf = gamma_diff_sum_buf + instance_size;
const I tid = threadIdx.x;
for (I elem_id = tid; elem_id < instance_size; elem_id += blockDim.x) {
gamma_diff_sum_buf[elem_id] = 0;
beta_diff_sum_buf[elem_id] = 0;
}
__syncthreads();
CUDA_1D_KERNEL_LOOP_T(I, i, n) {
const I elem_id = i % instance_size;
half dy_val = dy[i];
half normalized_val = normalized[i];
cuda::atomic::Add(&gamma_diff_sum_buf[elem_id],
__half2float(dy_val) * __half2float(normalized_val));
cuda::atomic::Add(&beta_diff_sum_buf[elem_id], __half2float(dy_val));
half gamma_val = gamma[elem_id];
normalized_diff[i] = __hmul(gamma_val, dy_val);
}
__syncthreads();
for (I elem_id = tid; elem_id < instance_size; elem_id += blockDim.x) {
const I offset = blockIdx.x * instance_size + elem_id;
tmp_gamma_diff[offset] = __float2half(gamma_diff_sum_buf[elem_id]);
tmp_beta_diff[offset] = __float2half(beta_diff_sum_buf[elem_id]);
}
}
} // namespace
template<typename T, typename BNParamT>
class LayerNormGpuKernel final : public user_op::OpKernel {
public:
LayerNormGpuKernel() = default;
~LayerNormGpuKernel() = default;
private:
bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; }
void Compute(user_op::KernelComputeContext* ctx) const override {
const user_op::Tensor* x = ctx->Tensor4ArgNameAndIndex("x", 0);
user_op::Tensor* y = ctx->Tensor4ArgNameAndIndex("y", 0);
user_op::Tensor* mean = ctx->Tensor4ArgNameAndIndex("mean", 0);
user_op::Tensor* inv_variance = ctx->Tensor4ArgNameAndIndex("inv_variance", 0);
const bool scale = ctx->Attr<bool>("scale");
const bool center = ctx->Attr<bool>("center");
user_op::Tensor* normalized = scale ? ctx->Tensor4ArgNameAndIndex("normalized", 0) : y;
const double epsilon = ctx->Attr<double>("epsilon");
CHECK_GE(epsilon, CUDNN_BN_MIN_EPSILON);
const int32_t num_instances = mean->shape().elem_cnt();
const int32_t norm_size = x->shape().elem_cnt() / num_instances;
int32_t instance_size = 0;
const T* gamma_ptr = nullptr;
const T* beta_ptr = nullptr;
if (scale || center) {
if (scale) {
const user_op::Tensor* gamma = ctx->Tensor4ArgNameAndIndex("gamma", 0);
instance_size = gamma->shape().elem_cnt();
gamma_ptr = gamma->dptr<T>();
}
if (center) {
const user_op::Tensor* beta = ctx->Tensor4ArgNameAndIndex("beta", 0);
if (gamma_ptr) {
CHECK_EQ(beta->shape().elem_cnt(), instance_size);
} else {
instance_size = beta->shape().elem_cnt();
}
beta_ptr = beta->dptr<T>();
}
CHECK_EQ(y->shape().elem_cnt() % instance_size, 0);
}
if (IsForwardFusedKernelSupported<T>(norm_size, instance_size)) {
LayerNormForwardGpu<T>(ctx->device_ctx(), num_instances, norm_size, epsilon, x->dptr<T>(),
gamma_ptr, beta_ptr, normalized->mut_dptr<T>(), y->mut_dptr<T>(), mean,
inv_variance);
} else {
LayerNormCudnnBnCtx bn_ctx(x->shape(), mean->shape(), x->data_type());
user_op::Tensor* tmp_buffer = ctx->Tensor4ArgNameAndIndex("tmp_buffer", 0);
const size_t aligned_buffer_size =
GetCudaAlignedSize(mean->shape().elem_cnt() * GetSizeOfDataType(mean->data_type()));
char* cudnn_bn_scale_ones_dptr = tmp_buffer->mut_dptr<char>();
char* cudnn_bn_bias_zeros_dptr = cudnn_bn_scale_ones_dptr + aligned_buffer_size;
NewKernelUtil<DeviceType::kGPU>::Fill(ctx->device_ctx(), mean->shape().elem_cnt(),
static_cast<BNParamT>(1),
reinterpret_cast<BNParamT*>(cudnn_bn_scale_ones_dptr));
NewKernelUtil<DeviceType::kGPU>::Fill(ctx->device_ctx(), mean->shape().elem_cnt(),
static_cast<BNParamT>(0),
reinterpret_cast<BNParamT*>(cudnn_bn_bias_zeros_dptr));
OF_CUDNN_CHECK(cudnnBatchNormalizationForwardTraining(
ctx->device_ctx()->cudnn_handle(), bn_ctx.mode(), CudnnSPOnePtr<T>(), CudnnSPZeroPtr<T>(),
bn_ctx.data_tensor_desc(), x->dptr<T>(), bn_ctx.data_tensor_desc(),
normalized->mut_dptr<T>(), bn_ctx.param_tensor_desc(),
reinterpret_cast<BNParamT*>(cudnn_bn_scale_ones_dptr),
reinterpret_cast<BNParamT*>(cudnn_bn_bias_zeros_dptr), 1.0, nullptr, nullptr, epsilon,
mean->mut_dptr(), inv_variance->mut_dptr()));
if (scale || center) {
const int64_t batch_size = y->shape().elem_cnt() / instance_size;
InstanceScaleCenter<T>(ctx->device_ctx(), batch_size, instance_size, normalized->dptr<T>(),
gamma_ptr, beta_ptr, y->mut_dptr<T>());
}
}
};
};
#define REGISTER_LAYER_NORM_GPU_KERNEL(dtype, bn_param_dtype) \
REGISTER_USER_KERNEL("layer_norm") \
.SetCreateFn<LayerNormGpuKernel<dtype, bn_param_dtype>>() \
.SetIsMatchedHob((user_op::HobDeviceTag() == "gpu") \
& (user_op::HobDataType("x", 0) == GetDataType<dtype>::value)) \
.SetInferTmpSizeFn([](oneflow::user_op::InferContext* ctx) { \
user_op::TensorDesc* mean = ctx->TensorDesc4ArgNameAndIndex("mean", 0); \
const DataType& data_type = mean->data_type(); \
const int64_t elem_cnt = mean->shape().elem_cnt(); \
return GetCudaAlignedSize(elem_cnt * GetSizeOfDataType(data_type)) * 2; \
});
REGISTER_LAYER_NORM_GPU_KERNEL(float, float)
REGISTER_LAYER_NORM_GPU_KERNEL(double, double)
REGISTER_LAYER_NORM_GPU_KERNEL(float16, float)
template<typename T, typename BNParamT>
class LayerNormGradGpuKernel final : public user_op::OpKernel {
public:
LayerNormGradGpuKernel() = default;
~LayerNormGradGpuKernel() = default;
private:
bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; }
void Compute(user_op::KernelComputeContext* ctx) const override {
const user_op::Tensor* dy = ctx->Tensor4ArgNameAndIndex("dy", 0);
const user_op::Tensor* x = ctx->Tensor4ArgNameAndIndex("x", 0);
const user_op::Tensor* mean = ctx->Tensor4ArgNameAndIndex("mean", 0);
const user_op::Tensor* inv_variance = ctx->Tensor4ArgNameAndIndex("inv_variance", 0);
user_op::Tensor* dx = ctx->Tensor4ArgNameAndIndex("dx", 0);
user_op::Tensor* tmp_buffer = ctx->Tensor4ArgNameAndIndex("tmp_buffer", 0);
const size_t aligned_buffer_size =
GetCudaAlignedSize(mean->shape().elem_cnt() * GetSizeOfDataType(mean->data_type()));
char* cudnn_bn_scale_ones_dptr = tmp_buffer->mut_dptr<char>();
char* cudnn_bn_scale_diff_buf_dptr = cudnn_bn_scale_ones_dptr + aligned_buffer_size;
char* cudnn_bn_bias_diff_buf_dptr = cudnn_bn_scale_ones_dptr + aligned_buffer_size;
NewKernelUtil<DeviceType::kGPU>::Fill(ctx->device_ctx(), mean->shape().elem_cnt(),
static_cast<BNParamT>(1),
reinterpret_cast<BNParamT*>(cudnn_bn_scale_ones_dptr));
const void* sp_alpha = CudnnSPOnePtr<T>();
const void* sp_beta;
if (ctx->has_input("_add_to_output", 0)) {
const user_op::Tensor* add_to_output = ctx->Tensor4ArgNameAndIndex("_add_to_output", 0);
CHECK_EQ(add_to_output->data_type(), dx->data_type());
CHECK_EQ(add_to_output->shape(), dx->shape());
Memcpy<DeviceType::kGPU>(
ctx->device_ctx(), dx->mut_dptr<void>(), add_to_output->dptr<void>(),
add_to_output->shape().elem_cnt() * GetSizeOfDataType(add_to_output->data_type()));
sp_beta = CudnnSPOnePtr<T>();
} else {
sp_beta = CudnnSPZeroPtr<T>();
}
const double epsilon = ctx->Attr<double>("epsilon");
CHECK_GE(epsilon, CUDNN_BN_MIN_EPSILON);
LayerNormCudnnBnCtx bn_ctx(x->shape(), mean->shape(), x->data_type());
OF_CUDNN_CHECK(cudnnBatchNormalizationBackward(
ctx->device_ctx()->cudnn_handle(), bn_ctx.mode(), sp_alpha, sp_beta, CudnnSPOnePtr<T>(),
CudnnSPZeroPtr<T>(), bn_ctx.data_tensor_desc(), x->dptr<T>(), bn_ctx.data_tensor_desc(),
dy->dptr<T>(), bn_ctx.data_tensor_desc(), dx->mut_dptr<T>(), bn_ctx.param_tensor_desc(),
reinterpret_cast<const BNParamT*>(cudnn_bn_scale_ones_dptr),
reinterpret_cast<BNParamT*>(cudnn_bn_scale_diff_buf_dptr),
reinterpret_cast<BNParamT*>(cudnn_bn_bias_diff_buf_dptr), epsilon, mean->dptr(),
inv_variance->dptr()));
};
};
#define REGISTER_LAYER_NORM_GRAD_GPU_KERNEL(dtype, bn_param_dtype) \
REGISTER_USER_KERNEL("layer_norm_grad") \
.SetCreateFn<LayerNormGradGpuKernel<dtype, bn_param_dtype>>() \
.SetIsMatchedHob((user_op::HobDeviceTag() == "gpu") \
& (user_op::HobDataType("dy", 0) == GetDataType<dtype>::value)) \
.SetInferTmpSizeFn([](oneflow::user_op::InferContext* ctx) { \
user_op::TensorDesc* mean = ctx->TensorDesc4ArgNameAndIndex("mean", 0); \
const DataType& data_type = mean->data_type(); \
const int64_t elem_cnt = mean->shape().elem_cnt(); \
return GetCudaAlignedSize(elem_cnt * GetSizeOfDataType(data_type)) * 3; \
}) \
.SetInplaceProposalFn([](const user_op::InferContext& ctx, \
user_op::AddInplaceArgPair AddInplaceArgPairFn) -> Maybe<void> { \
if (ctx.has_input("_add_to_output", 0)) { \
OF_RETURN_IF_ERROR(AddInplaceArgPairFn("dx", 0, "_add_to_output", 0, true)); \
} \
return Maybe<void>::Ok(); \
});
REGISTER_LAYER_NORM_GRAD_GPU_KERNEL(float, float)
REGISTER_LAYER_NORM_GRAD_GPU_KERNEL(double, double)
REGISTER_LAYER_NORM_GRAD_GPU_KERNEL(float16, float)
template<typename T>
class LayerNormParamGradGpuKernel final : public user_op::OpKernel {
public:
LayerNormParamGradGpuKernel() = default;
~LayerNormParamGradGpuKernel() = default;
private:
bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; }
void Compute(user_op::KernelComputeContext* ctx) const override {
using NdUtil = NdarrayUtil<DeviceType::kGPU, T>;
auto Val = NdUtil::GetValNdarrayBuilder();
auto Var = NdUtil::GetVarNdarrayBuilder();
const user_op::Tensor* dy = ctx->Tensor4ArgNameAndIndex("dy", 0);
user_op::Tensor* beta_diff = ctx->Tensor4ArgNameAndIndex("beta_diff", 0);
user_op::Tensor* gamma_diff = ctx->Tensor4ArgNameAndIndex("gamma_diff", 0);
user_op::Tensor* normalized_diff = ctx->Tensor4ArgNameAndIndex("normalized_diff", 0);
user_op::Tensor* gamma = ctx->Tensor4ArgNameAndIndex("gamma", 0);
const bool has_beta_diff = beta_diff != nullptr;
const bool has_gamma_diff = gamma_diff != nullptr;
const bool has_normalized_diff = normalized_diff != nullptr;
const bool has_gamma = gamma != nullptr;
const int64_t begin_params_axis = ctx->Attr<int64_t>("begin_params_axis");
const int64_t elem_cnt = dy->shape().elem_cnt();
const int64_t m = dy->shape().Count(begin_params_axis);
int max_active_blocks;
OF_CUDA_CHECK(cudaOccupancyMaxActiveBlocksPerMultiprocessor(
&max_active_blocks, LayerNormParamGradImpl<T, int64_t>, GetLayerNormParamGradBlockSize(),
GetParamGradDynamicSharedMemorySize<T>(m)));
if (has_gamma_diff && has_beta_diff && has_normalized_diff && max_active_blocks > 0) {
const user_op::Tensor* normalized = ctx->Tensor4ArgNameAndIndex("normalized", 0);
Memset<DeviceType::kGPU>(ctx->device_ctx(), gamma_diff->mut_dptr<T>(), 0,
gamma_diff->shape().elem_cnt() * sizeof(T));
Memset<DeviceType::kGPU>(ctx->device_ctx(), beta_diff->mut_dptr<T>(), 0,
beta_diff->shape().elem_cnt() * sizeof(T));
if (elem_cnt > static_cast<int64_t>(GetMaxVal<int32_t>() / 2)) {
LayerNormParamGradImpl<T, int64_t>
<<<GetLayerNormParamGradNumBlocks(elem_cnt), GetLayerNormParamGradBlockSize(),
GetParamGradDynamicSharedMemorySize<T>(m), ctx->device_ctx()->cuda_stream()>>>(
elem_cnt, m, dy->dptr<T>(), normalized->dptr<T>(), gamma->dptr<T>(),
gamma_diff->mut_dptr<T>(), beta_diff->mut_dptr<T>(),
normalized_diff->mut_dptr<T>());
} else {
LayerNormParamGradImpl<T, int32_t>
<<<GetLayerNormParamGradNumBlocks(elem_cnt), GetLayerNormParamGradBlockSize(),
GetParamGradDynamicSharedMemorySize<T>(m), ctx->device_ctx()->cuda_stream()>>>(
static_cast<int32_t>(elem_cnt), static_cast<int32_t>(m), dy->dptr<T>(),
normalized->dptr<T>(), gamma->dptr<T>(), gamma_diff->mut_dptr<T>(),
beta_diff->mut_dptr<T>(), normalized_diff->mut_dptr<T>());
}
} else {
if (has_beta_diff) {
user_op::Tensor* reduce_buf = ctx->Tensor4ArgNameAndIndex("reduce_buf", 0);
CHECK_EQ(m, beta_diff->shape().elem_cnt());
CHECK_EQ(dy->shape().elem_cnt() % m, 0);
const int64_t n = dy->shape().elem_cnt() / m;
NdUtil::ReduceSum(ctx->device_ctx(), Var({1, m}, beta_diff->mut_dptr<T>()),
Val({n, m}, dy->dptr<T>()), Var({n, m}, reduce_buf->mut_dptr<T>()));
}
if (has_gamma_diff) {
const user_op::Tensor* normalized = ctx->Tensor4ArgNameAndIndex("normalized", 0);
user_op::Tensor* reduce_buf = ctx->Tensor4ArgNameAndIndex("reduce_buf", 0);
CHECK_EQ(m, gamma_diff->shape().elem_cnt());
CHECK_EQ(dy->shape().elem_cnt() % m, 0);
const int64_t n = dy->shape().elem_cnt() / m;
NdUtil::BroadcastMul(ctx->device_ctx(), Var({n, m}, reduce_buf->mut_dptr<T>()),
Val({n, m}, normalized->dptr<T>()), Val({n, m}, dy->dptr<T>()));
NdUtil::ReduceSum(ctx->device_ctx(), Var({1, m}, gamma_diff->mut_dptr<T>()),
Val({n, m}, reduce_buf->dptr<T>()),
Var({n, m}, reduce_buf->mut_dptr<T>()));
}
if (has_normalized_diff) {
if (has_gamma) {
CHECK_EQ(m, gamma->shape().elem_cnt());
CHECK_EQ(dy->shape().elem_cnt() % m, 0);
const int64_t n = dy->shape().elem_cnt() / m;
NdUtil::BroadcastMul(ctx->device_ctx(), Var({n, m}, normalized_diff->mut_dptr<T>()),
Val({n, m}, dy->dptr<T>()), Val({1, m}, gamma->dptr<T>()));
} else {
Memcpy<DeviceType::kGPU>(ctx->device_ctx(), normalized_diff->mut_dptr<void>(),
dy->dptr<void>(),
dy->shape().elem_cnt() * GetSizeOfDataType(dy->data_type()));
}
}
}
};
};
#define REGISTER_LAYER_NORM_PARAM_GRAD_GPU_KERNEL(dtype) \
REGISTER_USER_KERNEL("layer_norm_param_grad") \
.SetCreateFn<LayerNormParamGradGpuKernel<dtype>>() \
.SetIsMatchedHob((user_op::HobDeviceTag() == "gpu") \
& (user_op::HobDataType("dy", 0) == GetDataType<dtype>::value));
REGISTER_LAYER_NORM_PARAM_GRAD_GPU_KERNEL(float)
REGISTER_LAYER_NORM_PARAM_GRAD_GPU_KERNEL(double)
class LayerNormParamGradGpuHalfKernel final : public user_op::OpKernel {
public:
LayerNormParamGradGpuHalfKernel() = default;
~LayerNormParamGradGpuHalfKernel() = default;
private:
bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; }
void Compute(user_op::KernelComputeContext* ctx) const override {
using NdUtil = NdarrayUtil<DeviceType::kGPU, float16>;
auto Val = NdUtil::GetValNdarrayBuilder();
auto Var = NdUtil::GetVarNdarrayBuilder();
const user_op::Tensor* dy = ctx->Tensor4ArgNameAndIndex("dy", 0);
user_op::Tensor* beta_diff = ctx->Tensor4ArgNameAndIndex("beta_diff", 0);
user_op::Tensor* gamma_diff = ctx->Tensor4ArgNameAndIndex("gamma_diff", 0);
user_op::Tensor* normalized_diff = ctx->Tensor4ArgNameAndIndex("normalized_diff", 0);
user_op::Tensor* gamma = ctx->Tensor4ArgNameAndIndex("gamma", 0);
const bool has_beta_diff = beta_diff != nullptr;
const bool has_gamma_diff = gamma_diff != nullptr;
const bool has_normalized_diff = normalized_diff != nullptr;
const bool has_gamma = gamma != nullptr;
const int64_t begin_params_axis = ctx->Attr<int64_t>("begin_params_axis");
const int64_t elem_cnt = dy->shape().elem_cnt();
const int64_t m = dy->shape().Count(begin_params_axis);
int max_active_blocks;
OF_CUDA_CHECK(cudaOccupancyMaxActiveBlocksPerMultiprocessor(
&max_active_blocks, LayerNormParamGradHalfImpl<int64_t>, GetLayerNormParamGradBlockSize(),
GetParamGradDynamicSharedMemorySize<float16>(m)));
if (has_gamma_diff && has_beta_diff && has_normalized_diff && max_active_blocks > 0) {
const user_op::Tensor* normalized = ctx->Tensor4ArgNameAndIndex("normalized", 0);
user_op::Tensor* tmp_buffer = ctx->Tensor4ArgNameAndIndex("tmp_buffer", 0);
const int64_t num_blocks = GetLayerNormParamGradNumBlocks(dy->shape().elem_cnt());
const size_t tmp_diff_size = GetCudaAlignedSize(num_blocks * m * sizeof(float16));
float16* tmp_gamma_diff = tmp_buffer->mut_dptr<float16>();
float16* tmp_beta_diff =
reinterpret_cast<float16*>(tmp_buffer->mut_dptr<char>() + tmp_diff_size);
float16* tmp_reduce_buf =
reinterpret_cast<float16*>(tmp_buffer->mut_dptr<char>() + 2 * tmp_diff_size);
CHECK_GE(tmp_buffer->shape().elem_cnt(), 3 * tmp_diff_size);
if (elem_cnt > static_cast<int64_t>(GetMaxVal<int32_t>() / 2)) {
LayerNormParamGradHalfImpl<int64_t>
<<<GetLayerNormParamGradNumBlocks(elem_cnt), GetLayerNormParamGradBlockSize(),
GetParamGradDynamicSharedMemorySize<float16>(m), ctx->device_ctx()->cuda_stream()>>>(
elem_cnt, m, dy->dptr<half>(), normalized->dptr<half>(), gamma->dptr<half>(),
reinterpret_cast<half*>(tmp_gamma_diff), reinterpret_cast<half*>(tmp_beta_diff),
normalized_diff->mut_dptr<half>());
} else {
LayerNormParamGradHalfImpl<int32_t>
<<<GetLayerNormParamGradNumBlocks(elem_cnt), GetLayerNormParamGradBlockSize(),
GetParamGradDynamicSharedMemorySize<float16>(m), ctx->device_ctx()->cuda_stream()>>>(
static_cast<int32_t>(elem_cnt), static_cast<int32_t>(m), dy->dptr<half>(),
normalized->dptr<half>(), gamma->dptr<half>(),
reinterpret_cast<half*>(tmp_gamma_diff), reinterpret_cast<half*>(tmp_beta_diff),
normalized_diff->mut_dptr<half>());
}
NdUtil::ReduceSum(ctx->device_ctx(), Var({1, m}, gamma_diff->mut_dptr<float16>()),
Val({num_blocks, m}, tmp_gamma_diff), Var({num_blocks, m}, tmp_reduce_buf));
NdUtil::ReduceSum(ctx->device_ctx(), Var({1, m}, beta_diff->mut_dptr<float16>()),
Val({num_blocks, m}, tmp_beta_diff), Var({num_blocks, m}, tmp_reduce_buf));
} else {
if (has_beta_diff) {
user_op::Tensor* reduce_buf = ctx->Tensor4ArgNameAndIndex("reduce_buf", 0);
CHECK_EQ(m, beta_diff->shape().elem_cnt());
CHECK_EQ(dy->shape().elem_cnt() % m, 0);
const int64_t n = dy->shape().elem_cnt() / m;
NdUtil::ReduceSum(ctx->device_ctx(), Var({1, m}, beta_diff->mut_dptr<float16>()),
Val({n, m}, dy->dptr<float16>()),
Var({n, m}, reduce_buf->mut_dptr<float16>()));
}
if (has_gamma_diff) {
const user_op::Tensor* normalized = ctx->Tensor4ArgNameAndIndex("normalized", 0);
user_op::Tensor* reduce_buf = ctx->Tensor4ArgNameAndIndex("reduce_buf", 0);
CHECK_EQ(m, gamma_diff->shape().elem_cnt());
CHECK_EQ(dy->shape().elem_cnt() % m, 0);
const int64_t n = dy->shape().elem_cnt() / m;
NdUtil::BroadcastMul(ctx->device_ctx(), Var({n, m}, reduce_buf->mut_dptr<float16>()),
Val({n, m}, normalized->dptr<float16>()),
Val({n, m}, dy->dptr<float16>()));
NdUtil::ReduceSum(ctx->device_ctx(), Var({1, m}, gamma_diff->mut_dptr<float16>()),
Val({n, m}, reduce_buf->dptr<float16>()),
Var({n, m}, reduce_buf->mut_dptr<float16>()));
}
if (has_normalized_diff) {
if (has_gamma) {
CHECK_EQ(m, gamma->shape().elem_cnt());
CHECK_EQ(dy->shape().elem_cnt() % m, 0);
const int64_t n = dy->shape().elem_cnt() / m;
NdUtil::BroadcastMul(ctx->device_ctx(), Var({n, m}, normalized_diff->mut_dptr<float16>()),
Val({n, m}, dy->dptr<float16>()),
Val({1, m}, gamma->dptr<float16>()));
} else {
Memcpy<DeviceType::kGPU>(ctx->device_ctx(), normalized_diff->mut_dptr<void>(),
dy->dptr<void>(),
dy->shape().elem_cnt() * GetSizeOfDataType(dy->data_type()));
}
}
}
}
};
REGISTER_USER_KERNEL("layer_norm_param_grad")
.SetCreateFn<LayerNormParamGradGpuHalfKernel>()
.SetIsMatchedHob((user_op::HobDeviceTag() == "gpu")
& (user_op::HobDataType("dy", 0) == DataType::kFloat16))
.SetInferTmpSizeFn([](user_op::InferContext* ctx) {
const int64_t begin_params_axis = ctx->Attr<int64_t>("begin_params_axis");
const bool has_gamma_diff = ctx->has_output("gamma_diff", 0);
const bool has_beta_diff = ctx->has_output("beta_diff", 0);
const bool has_normalized_diff = ctx->has_output("normalized_diff", 0);
const auto* dy = ctx->TensorDesc4ArgNameAndIndex("dy", 0);
const int64_t instance_size = dy->shape().Count(begin_params_axis);
size_t tmp_buffer_size = 0;
if (has_gamma_diff && has_beta_diff && has_normalized_diff) {
const size_t tmp_gamma_diff =
GetCudaAlignedSize(GetLayerNormParamGradNumBlocks(dy->shape().elem_cnt())
* instance_size * sizeof(float16));
const size_t tmp_beta_diff = tmp_gamma_diff;
const size_t tmp_reduce_buf = tmp_gamma_diff;
tmp_buffer_size = tmp_gamma_diff + tmp_beta_diff + tmp_reduce_buf;
} else {
tmp_buffer_size = 0;
}
return tmp_buffer_size;
});
} // namespace oneflow
|
a0afa0d27403dc927e432eb9215f2ac908c6f71c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <stdio.h>
#include <stdlib.h>
#define BLOCK_WIDTH 256
__global__ void histogram(char *d_array_in, int *d_array_out, int n)
{
__shared__ int shared_bin[128];
int i, index, blocks, iterations;
blocks = (n - 1) / BLOCK_WIDTH + 1;
iterations = 127 / (blocks * BLOCK_WIDTH) + 1;
for (i = 0; i < iterations; i++)
{
index = (blockIdx.x + i * blocks) * blockDim.x + threadIdx.x;
if (index < 128)
{
d_array_out[index] = 0;
}
}
iterations = 127 / BLOCK_WIDTH + 1;
for (i = 0; i < iterations; i++)
{
index = i * blockDim.x + threadIdx.x;
if (index < 128)
{
shared_bin[index] = 0;
}
__syncthreads();
}
index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < n)
{
atomicAdd(&shared_bin[d_array_in[index]], 1);
}
__syncthreads();
for (i = 0; i < iterations; i++)
{
index = i * blockDim.x + threadIdx.x;
if (index < 128)
{
atomicAdd(&d_array_out[index], shared_bin[index]);
}
__syncthreads();
}
return;
}
int main(int argc, char *argv[])
{
bool input_check = false;
bool expected_check = false;
bool output_check = false;
bool error_present = false;
bool expect_output = false;
bool output_pass;
char input_file_name[256];
char expected_file_name[256];
char output_file_name[256];
FILE *input_file = NULL;
FILE *expected_file = NULL;
FILE *output_file = NULL;
char *h_array_in = NULL;
int *h_array_out = NULL;
char *d_array_in = NULL;
int *d_array_out = NULL;
int *expectedOutput = NULL;
int i, n, num_bins, dataset_no;
for (i = 1; i < argc; i++)
{
if (strcmp(argv[i], "-i") == 0 && argc > i + 1)
{
if (argv[i + 1][0] != '-')
{
input_check = true;
strcpy(input_file_name, argv[i + 1]);
}
}
if (strcmp(argv[i], "-e") == 0 && argc > i + 1)
{
if (argv[i + 1][0] != '-')
{
expected_check = true;
strcpy(expected_file_name, argv[i + 1]);
}
}
if (strcmp(argv[i], "-o") == 0)
{
expect_output = true;
if (argc > i + 1)
{
if (argv[i + 1][0] != '-')
{
output_check = true;
strcpy(output_file_name, argv[i + 1]);
}
}
}
}
if (!input_check)
{
std::cout << "Execution command syntax error: \"Input\" filename required" << std::endl;
error_present = true;
}
else
{
input_file = fopen(input_file_name, "r");
if (!input_file)
{
std::cout << "Error: File " << input_file_name << " does not exist" << std::endl;
error_present = true;
}
}
if (!expected_check)
{
std::cout << "Execution command syntax error: \"Expected Output\" filename required" << std::endl;
error_present = true;
}
else
{
expected_file = fopen(expected_file_name, "r");
if (!expected_file)
{
std::cout << "Error: File " << expected_file_name << " does not exist" << std::endl;
error_present = true;
}
}
if (!output_check && expect_output)
{
std::cout << "Execution Command Syntax Warning: \"Output\" filename expected" << std::endl;
}
else if (output_check)
{
output_file = fopen(output_file_name, "w");
}
if (error_present)
{
std::cout << "Use the following command to run the program:\n\n"
"./<program> -e <expected> -i <input> -o <output>\n\n"
"Where <expected> is the expected output file, <input> is the input dataset files, and <output> is an optional path to store the results"
<< std::endl;
}
else
{
dataset_no = 0;
while (true)
{
h_array_in = (char *)malloc(1024 * sizeof(char));
if (fgets(h_array_in, 1024, input_file) == NULL)
{
break;
}
for (n = 0; h_array_in[n] != '\n'; n++)
{
continue;
}
h_array_in[n] = '\0';
if (fscanf(expected_file, "%d", &num_bins) == -1)
{
break;
}
expectedOutput = (int *)malloc(num_bins * sizeof(int));
for (i = 0; i < num_bins; i++)
{
fscanf(expected_file, "%d", &expectedOutput[i]);
}
h_array_out = (int *)malloc(128 * sizeof(int));
hipMalloc((void **)&d_array_in, n * sizeof(char));
hipMalloc((void **)&d_array_out, 128 * sizeof(int));
hipMemcpy(d_array_in, h_array_in, n * sizeof(char), hipMemcpyHostToDevice);
dim3 blocks((n - 1) / BLOCK_WIDTH + 1);
dim3 threads_per_block(BLOCK_WIDTH);
hipLaunchKernelGGL(( histogram), dim3(blocks), dim3(threads_per_block), 0, 0, d_array_in, d_array_out, n);
hipMemcpy(h_array_out, d_array_out, 128 * sizeof(int), hipMemcpyDeviceToHost);
if (output_check)
{
fprintf(output_file, "%d", 128);
for (i = 0; i < num_bins; i++)
{
fprintf(output_file, "\n%d", h_array_out[i]);
}
fprintf(output_file, "\n");
fflush(output_file);
}
output_pass = true;
for (i = 0; i < 128; i++)
{
if (expectedOutput[i] != h_array_out[i])
{
output_pass = false;
}
}
if (output_pass)
{
std::cout << "Dataset " << dataset_no << " PASSED" << std::endl;
}
else
{
std::cout << "Dataset " << dataset_no << " FAILED" << std::endl;
}
dataset_no++;
hipFree(d_array_in);
hipFree(d_array_out);
free(h_array_in);
free(h_array_out);
free(expectedOutput);
}
if (output_check)
{
std::cout << "Results stored in " << output_file_name << std::endl;
}
fclose(input_file);
fclose(expected_file);
if (output_check)
{
fclose(output_file);
}
}
return 0;
}
| a0afa0d27403dc927e432eb9215f2ac908c6f71c.cu | #include <iostream>
#include <stdio.h>
#include <stdlib.h>
#define BLOCK_WIDTH 256
__global__ void histogram(char *d_array_in, int *d_array_out, int n)
{
__shared__ int shared_bin[128];
int i, index, blocks, iterations;
blocks = (n - 1) / BLOCK_WIDTH + 1;
iterations = 127 / (blocks * BLOCK_WIDTH) + 1;
for (i = 0; i < iterations; i++)
{
index = (blockIdx.x + i * blocks) * blockDim.x + threadIdx.x;
if (index < 128)
{
d_array_out[index] = 0;
}
}
iterations = 127 / BLOCK_WIDTH + 1;
for (i = 0; i < iterations; i++)
{
index = i * blockDim.x + threadIdx.x;
if (index < 128)
{
shared_bin[index] = 0;
}
__syncthreads();
}
index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < n)
{
atomicAdd(&shared_bin[d_array_in[index]], 1);
}
__syncthreads();
for (i = 0; i < iterations; i++)
{
index = i * blockDim.x + threadIdx.x;
if (index < 128)
{
atomicAdd(&d_array_out[index], shared_bin[index]);
}
__syncthreads();
}
return;
}
int main(int argc, char *argv[])
{
bool input_check = false;
bool expected_check = false;
bool output_check = false;
bool error_present = false;
bool expect_output = false;
bool output_pass;
char input_file_name[256];
char expected_file_name[256];
char output_file_name[256];
FILE *input_file = NULL;
FILE *expected_file = NULL;
FILE *output_file = NULL;
char *h_array_in = NULL;
int *h_array_out = NULL;
char *d_array_in = NULL;
int *d_array_out = NULL;
int *expectedOutput = NULL;
int i, n, num_bins, dataset_no;
for (i = 1; i < argc; i++)
{
if (strcmp(argv[i], "-i") == 0 && argc > i + 1)
{
if (argv[i + 1][0] != '-')
{
input_check = true;
strcpy(input_file_name, argv[i + 1]);
}
}
if (strcmp(argv[i], "-e") == 0 && argc > i + 1)
{
if (argv[i + 1][0] != '-')
{
expected_check = true;
strcpy(expected_file_name, argv[i + 1]);
}
}
if (strcmp(argv[i], "-o") == 0)
{
expect_output = true;
if (argc > i + 1)
{
if (argv[i + 1][0] != '-')
{
output_check = true;
strcpy(output_file_name, argv[i + 1]);
}
}
}
}
if (!input_check)
{
std::cout << "Execution command syntax error: \"Input\" filename required" << std::endl;
error_present = true;
}
else
{
input_file = fopen(input_file_name, "r");
if (!input_file)
{
std::cout << "Error: File " << input_file_name << " does not exist" << std::endl;
error_present = true;
}
}
if (!expected_check)
{
std::cout << "Execution command syntax error: \"Expected Output\" filename required" << std::endl;
error_present = true;
}
else
{
expected_file = fopen(expected_file_name, "r");
if (!expected_file)
{
std::cout << "Error: File " << expected_file_name << " does not exist" << std::endl;
error_present = true;
}
}
if (!output_check && expect_output)
{
std::cout << "Execution Command Syntax Warning: \"Output\" filename expected" << std::endl;
}
else if (output_check)
{
output_file = fopen(output_file_name, "w");
}
if (error_present)
{
std::cout << "Use the following command to run the program:\n\n"
"./<program> -e <expected> -i <input> -o <output>\n\n"
"Where <expected> is the expected output file, <input> is the input dataset files, and <output> is an optional path to store the results"
<< std::endl;
}
else
{
dataset_no = 0;
while (true)
{
h_array_in = (char *)malloc(1024 * sizeof(char));
if (fgets(h_array_in, 1024, input_file) == NULL)
{
break;
}
for (n = 0; h_array_in[n] != '\n'; n++)
{
continue;
}
h_array_in[n] = '\0';
if (fscanf(expected_file, "%d", &num_bins) == -1)
{
break;
}
expectedOutput = (int *)malloc(num_bins * sizeof(int));
for (i = 0; i < num_bins; i++)
{
fscanf(expected_file, "%d", &expectedOutput[i]);
}
h_array_out = (int *)malloc(128 * sizeof(int));
cudaMalloc((void **)&d_array_in, n * sizeof(char));
cudaMalloc((void **)&d_array_out, 128 * sizeof(int));
cudaMemcpy(d_array_in, h_array_in, n * sizeof(char), cudaMemcpyHostToDevice);
dim3 blocks((n - 1) / BLOCK_WIDTH + 1);
dim3 threads_per_block(BLOCK_WIDTH);
histogram<<<blocks, threads_per_block>>>(d_array_in, d_array_out, n);
cudaMemcpy(h_array_out, d_array_out, 128 * sizeof(int), cudaMemcpyDeviceToHost);
if (output_check)
{
fprintf(output_file, "%d", 128);
for (i = 0; i < num_bins; i++)
{
fprintf(output_file, "\n%d", h_array_out[i]);
}
fprintf(output_file, "\n");
fflush(output_file);
}
output_pass = true;
for (i = 0; i < 128; i++)
{
if (expectedOutput[i] != h_array_out[i])
{
output_pass = false;
}
}
if (output_pass)
{
std::cout << "Dataset " << dataset_no << " PASSED" << std::endl;
}
else
{
std::cout << "Dataset " << dataset_no << " FAILED" << std::endl;
}
dataset_no++;
cudaFree(d_array_in);
cudaFree(d_array_out);
free(h_array_in);
free(h_array_out);
free(expectedOutput);
}
if (output_check)
{
std::cout << "Results stored in " << output_file_name << std::endl;
}
fclose(input_file);
fclose(expected_file);
if (output_check)
{
fclose(output_file);
}
}
return 0;
}
|
3c2e99c9797a7db8904d0a6feebb42464ed50ba9.hip | // !!! This is a file automatically generated by hipify!!!
#include "cuda_core.cuh"
#define INCLUDED_FROM_DCONST_DEFINER
#include "dconsts_core.cuh"
#include "errorhandler_cuda.cuh"
#include "common/config.h"
void print_gpu_config()
{
int n_devices;
if (hipGetDeviceCount(&n_devices) != hipSuccess) CRASH("No CUDA devices found!");
printf("Num CUDA devices found: %u\n", n_devices);
int initial_device;
hipGetDevice(&initial_device);
for (int i = 0; i < n_devices; i++) {
hipSetDevice(i);
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, i);
printf("Device Number: %d\n", i);
printf(" Device name: %s\n", prop.name);
printf(" Memory Clock Rate (KHz): %d\n", prop.memoryClockRate);
printf(" Memory Bus Width (bits): %d\n", prop.memoryBusWidth);
printf(" Peak Memory Bandwidth (GiB/s): %f\n", 2.0*prop.memoryClockRate*(prop.memoryBusWidth/8)/(1024*1024));
//Memory usage
size_t free_bytes, total_bytes;
CUDA_ERRCHK( hipMemGetInfo(&free_bytes, &total_bytes) );
const size_t used_bytes = total_bytes - free_bytes;
printf(" GPU memory used (MiB): %f\n", (double) used_bytes / (1024*1024));
printf(" GPU memory free (MiB): %f\n", (double) free_bytes / (1024*1024));
printf(" GPU memory total (MiB): %f\n", (double) total_bytes / (1024*1024));
}
hipSetDevice(initial_device);
if (n_devices < NUM_DEVICES) CRASH("Invalid number of devices requested!");
}
void load_forcing_dconsts_cuda_core(ForcingParams* forcing_params)
{
CUDA_ERRCHK( hipMemcpyToSymbol(d_FORCING_ENABLED, &forcing_params->forcing_enabled, sizeof(bool)) );
//Copy forcing coefficients to the device's constant memory
const size_t k_idx = forcing_params->k_idx;
CUDA_ERRCHK( hipMemcpyToSymbol(d_KK_VEC_X, &forcing_params->kk_x[k_idx], sizeof(real)) );
CUDA_ERRCHK( hipMemcpyToSymbol(d_KK_VEC_Y, &forcing_params->kk_y[k_idx], sizeof(real)) );
CUDA_ERRCHK( hipMemcpyToSymbol(d_KK_VEC_Z, &forcing_params->kk_z[k_idx], sizeof(real)) );
CUDA_ERRCHK( hipMemcpyToSymbol(d_FORCING_KK_PART_X, &forcing_params->kk_part_x, sizeof(real)) );
CUDA_ERRCHK( hipMemcpyToSymbol(d_FORCING_KK_PART_Y, &forcing_params->kk_part_y, sizeof(real)) );
CUDA_ERRCHK( hipMemcpyToSymbol(d_FORCING_KK_PART_Z, &forcing_params->kk_part_z, sizeof(real)) );
CUDA_ERRCHK( hipMemcpyToSymbol(d_PHI, &forcing_params->phi, sizeof(real)) );
}
void load_hydro_dconsts_cuda_core(CParamConfig* cparams, RunConfig* run_params, const vec3i start_idx)
{
//Grid dimensions
CUDA_ERRCHK( hipMemcpyToSymbol(d_nx, &(cparams->nx), sizeof(int)) );
CUDA_ERRCHK( hipMemcpyToSymbol(d_ny, &(cparams->ny), sizeof(int)) );
CUDA_ERRCHK( hipMemcpyToSymbol(d_nz, &(cparams->nz), sizeof(int)) );
CUDA_ERRCHK( hipMemcpyToSymbol(d_mx, &(cparams->mx), sizeof(int)) );
CUDA_ERRCHK( hipMemcpyToSymbol(d_my, &(cparams->my), sizeof(int)) );
CUDA_ERRCHK( hipMemcpyToSymbol(d_mz, &(cparams->mz), sizeof(int)) );
CUDA_ERRCHK( hipMemcpyToSymbol(d_nx_min, &(cparams->nx_min), sizeof(int)) );
CUDA_ERRCHK( hipMemcpyToSymbol(d_nx_max, &(cparams->nx_max), sizeof(int)) );
CUDA_ERRCHK( hipMemcpyToSymbol(d_ny_min, &(cparams->ny_min), sizeof(int)) );
CUDA_ERRCHK( hipMemcpyToSymbol(d_ny_max, &(cparams->ny_max), sizeof(int)) );
CUDA_ERRCHK( hipMemcpyToSymbol(d_nz_min, &(cparams->nz_min), sizeof(int)) );
CUDA_ERRCHK( hipMemcpyToSymbol(d_nz_max, &(cparams->nz_max), sizeof(int)) );
CUDA_ERRCHK( hipMemcpyToSymbol(d_DSX, &(cparams->dsx), sizeof(real)) );
CUDA_ERRCHK( hipMemcpyToSymbol(d_DSY, &(cparams->dsy), sizeof(real)) );
CUDA_ERRCHK( hipMemcpyToSymbol(d_DSZ, &(cparams->dsz), sizeof(real)) );
const real dsx_offset = cparams->dsx*start_idx.x;
const real dsy_offset = cparams->dsy*start_idx.y;
const real dsz_offset = cparams->dsz*start_idx.z;
CUDA_ERRCHK( hipMemcpyToSymbol(d_DSX_OFFSET, &dsx_offset, sizeof(real)) );
CUDA_ERRCHK( hipMemcpyToSymbol(d_DSY_OFFSET, &dsy_offset, sizeof(real)) );
CUDA_ERRCHK( hipMemcpyToSymbol(d_DSZ_OFFSET, &dsz_offset, sizeof(real)) );
const real xorig = XORIG;
const real yorig = YORIG;
const real zorig = ZORIG;
CUDA_ERRCHK( hipMemcpyToSymbol(d_XORIG, &xorig, sizeof(real)) );
CUDA_ERRCHK( hipMemcpyToSymbol(d_YORIG, &yorig, sizeof(real)) );
CUDA_ERRCHK( hipMemcpyToSymbol(d_ZORIG, &zorig, sizeof(real)) );
//Diff constants
const real diff1_dx = 1.0/(60.0*cparams->dsx);
const real diff1_dy = 1.0/(60.0*cparams->dsy);
const real diff1_dz = 1.0/(60.0*cparams->dsz);
const real diff2_dx = 1.0/(180.0*cparams->dsx*cparams->dsx);
const real diff2_dy = 1.0/(180.0*cparams->dsy*cparams->dsy);
const real diff2_dz = 1.0/(180.0*cparams->dsz*cparams->dsz);
const real diffmn_dxdy = (1.0/720.0)*(1.0/cparams->dsx)*(1.0/cparams->dsy);
const real diffmn_dydz = (1.0/720.0)*(1.0/cparams->dsy)*(1.0/cparams->dsz);
const real diffmn_dxdz = (1.0/720.0)*(1.0/cparams->dsz)*(1.0/cparams->dsx);
CUDA_ERRCHK( hipMemcpyToSymbol(d_DIFF1_DX_DIV, &diff1_dx, sizeof(real)) );
CUDA_ERRCHK( hipMemcpyToSymbol(d_DIFF1_DY_DIV, &diff1_dy, sizeof(real)) );
CUDA_ERRCHK( hipMemcpyToSymbol(d_DIFF1_DZ_DIV, &diff1_dz, sizeof(real)) );
CUDA_ERRCHK( hipMemcpyToSymbol(d_DIFF2_DX_DIV, &diff2_dx, sizeof(real)) );
CUDA_ERRCHK( hipMemcpyToSymbol(d_DIFF2_DY_DIV, &diff2_dy, sizeof(real)) );
CUDA_ERRCHK( hipMemcpyToSymbol(d_DIFF2_DZ_DIV, &diff2_dz, sizeof(real)) );
CUDA_ERRCHK( hipMemcpyToSymbol(d_DIFFMN_DXDY_DIV, &diffmn_dxdy, sizeof(real)) );
CUDA_ERRCHK( hipMemcpyToSymbol(d_DIFFMN_DYDZ_DIV, &diffmn_dydz, sizeof(real)) );
CUDA_ERRCHK( hipMemcpyToSymbol(d_DIFFMN_DXDZ_DIV, &diffmn_dxdz, sizeof(real)) );
//Viscosity
CUDA_ERRCHK( hipMemcpyToSymbol(d_NU_VISC, &(run_params->nu_visc), sizeof(real)) );
//Speed of sound
const real cs2_sound = pow(run_params->cs_sound, 2.0);
CUDA_ERRCHK( hipMemcpyToSymbol(d_CS2_SOUND, &cs2_sound, sizeof(real)) );
//Induction
CUDA_ERRCHK( hipMemcpyToSymbol(d_ETA, &(run_params->eta), sizeof(real)) );
}
void init_grid_cuda_core(Grid* d_grid, Grid* d_grid_dst, CParamConfig* cparams)
{
//Print the GPU configuration
print_gpu_config();
const size_t grid_size_bytes = sizeof(real) * cparams->mx * cparams->my * cparams->mz;
//Init device arrays
for (int i=0; i < NUM_ARRS; ++i) {
CUDA_ERRCHK( hipMalloc(&(d_grid->arr[i]), grid_size_bytes) );
CUDA_ERRCHK( hipMemset(d_grid->arr[i], INT_MAX, grid_size_bytes) );
CUDA_ERRCHK( hipMalloc(&(d_grid_dst->arr[i]), grid_size_bytes) );
CUDA_ERRCHK( hipMemset(d_grid_dst->arr[i], INT_MAX, grid_size_bytes) );
}
}
void destroy_grid_cuda_core(Grid* d_grid, Grid* d_grid_dst)
{
for (int i=0; i < NUM_ARRS; ++i) {
CUDA_ERRCHK( hipFree(d_grid->arr[i]) );
CUDA_ERRCHK( hipFree(d_grid_dst->arr[i]) );
}
}
void load_grid_cuda_core(Grid* d_grid, CParamConfig* d_cparams, vec3i* h_start_idx, Grid* h_grid, CParamConfig* h_cparams)
{
//Create a host buffer to minimize the number of device-host-device memcpys (very high latency)
Grid buffer;
grid_malloc(&buffer, d_cparams);
const size_t slab_size_bytes = sizeof(real) * d_cparams->mx * d_cparams->my;
for (int w=0; w < NUM_ARRS; ++w)
for (int k=0; k < d_cparams->mz; ++k)
memcpy(&buffer.arr[w][k*d_cparams->mx*d_cparams->my], &h_grid->arr[w][h_start_idx->y*h_cparams->mx + k*h_cparams->mx*h_cparams->my], slab_size_bytes);
hipStream_t stream;
hipStreamCreateWithFlags(&stream, hipStreamNonBlocking);
const size_t grid_size_bytes = sizeof(real)* d_cparams->mx * d_cparams->my * d_cparams->mz;
for (int w=0; w < NUM_ARRS; ++w)
CUDA_ERRCHK( hipMemcpyAsync(d_grid->arr[w], buffer.arr[w], grid_size_bytes, hipMemcpyHostToDevice, stream) );
hipStreamDestroy(stream);
grid_free(&buffer);
}
void store_grid_cuda_core(Grid* h_grid, CParamConfig* h_cparams, Grid* d_grid, CParamConfig* d_cparams, vec3i* h_start_idx)
{
Grid buffer;
grid_malloc(&buffer, d_cparams);
hipStream_t stream;
hipStreamCreateWithFlags(&stream, hipStreamNonBlocking);
const size_t grid_size_bytes = sizeof(real) * d_cparams->mx * d_cparams->my * d_cparams->mz;
for (int w=0; w < NUM_ARRS; ++w)
hipMemcpyAsync(buffer.arr[w], d_grid->arr[w], grid_size_bytes, hipMemcpyDeviceToHost, stream);
const size_t row_size_bytes = sizeof(real) * d_cparams->nx;
for (int w=0; w < NUM_ARRS; ++w) {
for (int k=d_cparams->nz_min; k < d_cparams->nz_max; ++k)
for (int j=d_cparams->ny_min; j < d_cparams->ny_max; ++j)
memcpy(&h_grid->arr[w][h_cparams->nx_min + (j+h_start_idx->y)*h_cparams->mx + k*h_cparams->mx*h_cparams->my],
&buffer.arr[w][d_cparams->nx_min + j*d_cparams->mx + k*d_cparams->mx*d_cparams->my], row_size_bytes);
}
hipStreamDestroy(stream);
grid_free(&buffer);
}
void store_slice_cuda_core(Slice* h_slice, CParamConfig* h_cparams, RunConfig* h_run_params, Slice* d_slice, CParamConfig* d_cparams, vec3i* h_start_idx)
{
if (h_run_params->slice_axis != 'z') CRASH("Slice axis other that z not yet supported!");
Slice buffer;
slice_malloc(&buffer, d_cparams, h_run_params);
hipStream_t stream;
hipStreamCreateWithFlags(&stream, hipStreamNonBlocking);
const size_t slice_size_bytes = sizeof(real) * d_cparams->mx * d_cparams->my;
for (int w=0; w < NUM_SLICES; ++w)
CUDA_ERRCHK( hipMemcpyAsync(buffer.arr[w], d_slice->arr[w], slice_size_bytes, hipMemcpyDeviceToHost, stream) );
const size_t row_size_bytes = sizeof(real) * d_cparams->nx;
for (int w=0; w < NUM_SLICES; ++w)
for (int j=d_cparams->ny_min; j < d_cparams->ny_max; ++j)
memcpy(&h_slice->arr[w][h_cparams->nx_min + (j+h_start_idx->y)*h_cparams->mx],
&buffer.arr[w][d_cparams->nx_min + j*d_cparams->mx], row_size_bytes);
hipStreamDestroy(stream);
slice_free(&buffer);
}
| 3c2e99c9797a7db8904d0a6feebb42464ed50ba9.cu | #include "cuda_core.cuh"
#define INCLUDED_FROM_DCONST_DEFINER
#include "dconsts_core.cuh"
#include "errorhandler_cuda.cuh"
#include "common/config.h"
void print_gpu_config()
{
int n_devices;
if (cudaGetDeviceCount(&n_devices) != cudaSuccess) CRASH("No CUDA devices found!");
printf("Num CUDA devices found: %u\n", n_devices);
int initial_device;
cudaGetDevice(&initial_device);
for (int i = 0; i < n_devices; i++) {
cudaSetDevice(i);
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, i);
printf("Device Number: %d\n", i);
printf(" Device name: %s\n", prop.name);
printf(" Memory Clock Rate (KHz): %d\n", prop.memoryClockRate);
printf(" Memory Bus Width (bits): %d\n", prop.memoryBusWidth);
printf(" Peak Memory Bandwidth (GiB/s): %f\n", 2.0*prop.memoryClockRate*(prop.memoryBusWidth/8)/(1024*1024));
//Memory usage
size_t free_bytes, total_bytes;
CUDA_ERRCHK( cudaMemGetInfo(&free_bytes, &total_bytes) );
const size_t used_bytes = total_bytes - free_bytes;
printf(" GPU memory used (MiB): %f\n", (double) used_bytes / (1024*1024));
printf(" GPU memory free (MiB): %f\n", (double) free_bytes / (1024*1024));
printf(" GPU memory total (MiB): %f\n", (double) total_bytes / (1024*1024));
}
cudaSetDevice(initial_device);
if (n_devices < NUM_DEVICES) CRASH("Invalid number of devices requested!");
}
void load_forcing_dconsts_cuda_core(ForcingParams* forcing_params)
{
CUDA_ERRCHK( cudaMemcpyToSymbol(d_FORCING_ENABLED, &forcing_params->forcing_enabled, sizeof(bool)) );
//Copy forcing coefficients to the device's constant memory
const size_t k_idx = forcing_params->k_idx;
CUDA_ERRCHK( cudaMemcpyToSymbol(d_KK_VEC_X, &forcing_params->kk_x[k_idx], sizeof(real)) );
CUDA_ERRCHK( cudaMemcpyToSymbol(d_KK_VEC_Y, &forcing_params->kk_y[k_idx], sizeof(real)) );
CUDA_ERRCHK( cudaMemcpyToSymbol(d_KK_VEC_Z, &forcing_params->kk_z[k_idx], sizeof(real)) );
CUDA_ERRCHK( cudaMemcpyToSymbol(d_FORCING_KK_PART_X, &forcing_params->kk_part_x, sizeof(real)) );
CUDA_ERRCHK( cudaMemcpyToSymbol(d_FORCING_KK_PART_Y, &forcing_params->kk_part_y, sizeof(real)) );
CUDA_ERRCHK( cudaMemcpyToSymbol(d_FORCING_KK_PART_Z, &forcing_params->kk_part_z, sizeof(real)) );
CUDA_ERRCHK( cudaMemcpyToSymbol(d_PHI, &forcing_params->phi, sizeof(real)) );
}
void load_hydro_dconsts_cuda_core(CParamConfig* cparams, RunConfig* run_params, const vec3i start_idx)
{
//Grid dimensions
CUDA_ERRCHK( cudaMemcpyToSymbol(d_nx, &(cparams->nx), sizeof(int)) );
CUDA_ERRCHK( cudaMemcpyToSymbol(d_ny, &(cparams->ny), sizeof(int)) );
CUDA_ERRCHK( cudaMemcpyToSymbol(d_nz, &(cparams->nz), sizeof(int)) );
CUDA_ERRCHK( cudaMemcpyToSymbol(d_mx, &(cparams->mx), sizeof(int)) );
CUDA_ERRCHK( cudaMemcpyToSymbol(d_my, &(cparams->my), sizeof(int)) );
CUDA_ERRCHK( cudaMemcpyToSymbol(d_mz, &(cparams->mz), sizeof(int)) );
CUDA_ERRCHK( cudaMemcpyToSymbol(d_nx_min, &(cparams->nx_min), sizeof(int)) );
CUDA_ERRCHK( cudaMemcpyToSymbol(d_nx_max, &(cparams->nx_max), sizeof(int)) );
CUDA_ERRCHK( cudaMemcpyToSymbol(d_ny_min, &(cparams->ny_min), sizeof(int)) );
CUDA_ERRCHK( cudaMemcpyToSymbol(d_ny_max, &(cparams->ny_max), sizeof(int)) );
CUDA_ERRCHK( cudaMemcpyToSymbol(d_nz_min, &(cparams->nz_min), sizeof(int)) );
CUDA_ERRCHK( cudaMemcpyToSymbol(d_nz_max, &(cparams->nz_max), sizeof(int)) );
CUDA_ERRCHK( cudaMemcpyToSymbol(d_DSX, &(cparams->dsx), sizeof(real)) );
CUDA_ERRCHK( cudaMemcpyToSymbol(d_DSY, &(cparams->dsy), sizeof(real)) );
CUDA_ERRCHK( cudaMemcpyToSymbol(d_DSZ, &(cparams->dsz), sizeof(real)) );
const real dsx_offset = cparams->dsx*start_idx.x;
const real dsy_offset = cparams->dsy*start_idx.y;
const real dsz_offset = cparams->dsz*start_idx.z;
CUDA_ERRCHK( cudaMemcpyToSymbol(d_DSX_OFFSET, &dsx_offset, sizeof(real)) );
CUDA_ERRCHK( cudaMemcpyToSymbol(d_DSY_OFFSET, &dsy_offset, sizeof(real)) );
CUDA_ERRCHK( cudaMemcpyToSymbol(d_DSZ_OFFSET, &dsz_offset, sizeof(real)) );
const real xorig = XORIG;
const real yorig = YORIG;
const real zorig = ZORIG;
CUDA_ERRCHK( cudaMemcpyToSymbol(d_XORIG, &xorig, sizeof(real)) );
CUDA_ERRCHK( cudaMemcpyToSymbol(d_YORIG, &yorig, sizeof(real)) );
CUDA_ERRCHK( cudaMemcpyToSymbol(d_ZORIG, &zorig, sizeof(real)) );
//Diff constants
const real diff1_dx = 1.0/(60.0*cparams->dsx);
const real diff1_dy = 1.0/(60.0*cparams->dsy);
const real diff1_dz = 1.0/(60.0*cparams->dsz);
const real diff2_dx = 1.0/(180.0*cparams->dsx*cparams->dsx);
const real diff2_dy = 1.0/(180.0*cparams->dsy*cparams->dsy);
const real diff2_dz = 1.0/(180.0*cparams->dsz*cparams->dsz);
const real diffmn_dxdy = (1.0/720.0)*(1.0/cparams->dsx)*(1.0/cparams->dsy);
const real diffmn_dydz = (1.0/720.0)*(1.0/cparams->dsy)*(1.0/cparams->dsz);
const real diffmn_dxdz = (1.0/720.0)*(1.0/cparams->dsz)*(1.0/cparams->dsx);
CUDA_ERRCHK( cudaMemcpyToSymbol(d_DIFF1_DX_DIV, &diff1_dx, sizeof(real)) );
CUDA_ERRCHK( cudaMemcpyToSymbol(d_DIFF1_DY_DIV, &diff1_dy, sizeof(real)) );
CUDA_ERRCHK( cudaMemcpyToSymbol(d_DIFF1_DZ_DIV, &diff1_dz, sizeof(real)) );
CUDA_ERRCHK( cudaMemcpyToSymbol(d_DIFF2_DX_DIV, &diff2_dx, sizeof(real)) );
CUDA_ERRCHK( cudaMemcpyToSymbol(d_DIFF2_DY_DIV, &diff2_dy, sizeof(real)) );
CUDA_ERRCHK( cudaMemcpyToSymbol(d_DIFF2_DZ_DIV, &diff2_dz, sizeof(real)) );
CUDA_ERRCHK( cudaMemcpyToSymbol(d_DIFFMN_DXDY_DIV, &diffmn_dxdy, sizeof(real)) );
CUDA_ERRCHK( cudaMemcpyToSymbol(d_DIFFMN_DYDZ_DIV, &diffmn_dydz, sizeof(real)) );
CUDA_ERRCHK( cudaMemcpyToSymbol(d_DIFFMN_DXDZ_DIV, &diffmn_dxdz, sizeof(real)) );
//Viscosity
CUDA_ERRCHK( cudaMemcpyToSymbol(d_NU_VISC, &(run_params->nu_visc), sizeof(real)) );
//Speed of sound
const real cs2_sound = pow(run_params->cs_sound, 2.0);
CUDA_ERRCHK( cudaMemcpyToSymbol(d_CS2_SOUND, &cs2_sound, sizeof(real)) );
//Induction
CUDA_ERRCHK( cudaMemcpyToSymbol(d_ETA, &(run_params->eta), sizeof(real)) );
}
void init_grid_cuda_core(Grid* d_grid, Grid* d_grid_dst, CParamConfig* cparams)
{
//Print the GPU configuration
print_gpu_config();
const size_t grid_size_bytes = sizeof(real) * cparams->mx * cparams->my * cparams->mz;
//Init device arrays
for (int i=0; i < NUM_ARRS; ++i) {
CUDA_ERRCHK( cudaMalloc(&(d_grid->arr[i]), grid_size_bytes) );
CUDA_ERRCHK( cudaMemset(d_grid->arr[i], INT_MAX, grid_size_bytes) );
CUDA_ERRCHK( cudaMalloc(&(d_grid_dst->arr[i]), grid_size_bytes) );
CUDA_ERRCHK( cudaMemset(d_grid_dst->arr[i], INT_MAX, grid_size_bytes) );
}
}
void destroy_grid_cuda_core(Grid* d_grid, Grid* d_grid_dst)
{
for (int i=0; i < NUM_ARRS; ++i) {
CUDA_ERRCHK( cudaFree(d_grid->arr[i]) );
CUDA_ERRCHK( cudaFree(d_grid_dst->arr[i]) );
}
}
void load_grid_cuda_core(Grid* d_grid, CParamConfig* d_cparams, vec3i* h_start_idx, Grid* h_grid, CParamConfig* h_cparams)
{
//Create a host buffer to minimize the number of device-host-device memcpys (very high latency)
Grid buffer;
grid_malloc(&buffer, d_cparams);
const size_t slab_size_bytes = sizeof(real) * d_cparams->mx * d_cparams->my;
for (int w=0; w < NUM_ARRS; ++w)
for (int k=0; k < d_cparams->mz; ++k)
memcpy(&buffer.arr[w][k*d_cparams->mx*d_cparams->my], &h_grid->arr[w][h_start_idx->y*h_cparams->mx + k*h_cparams->mx*h_cparams->my], slab_size_bytes);
cudaStream_t stream;
cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking);
const size_t grid_size_bytes = sizeof(real)* d_cparams->mx * d_cparams->my * d_cparams->mz;
for (int w=0; w < NUM_ARRS; ++w)
CUDA_ERRCHK( cudaMemcpyAsync(d_grid->arr[w], buffer.arr[w], grid_size_bytes, cudaMemcpyHostToDevice, stream) );
cudaStreamDestroy(stream);
grid_free(&buffer);
}
void store_grid_cuda_core(Grid* h_grid, CParamConfig* h_cparams, Grid* d_grid, CParamConfig* d_cparams, vec3i* h_start_idx)
{
Grid buffer;
grid_malloc(&buffer, d_cparams);
cudaStream_t stream;
cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking);
const size_t grid_size_bytes = sizeof(real) * d_cparams->mx * d_cparams->my * d_cparams->mz;
for (int w=0; w < NUM_ARRS; ++w)
cudaMemcpyAsync(buffer.arr[w], d_grid->arr[w], grid_size_bytes, cudaMemcpyDeviceToHost, stream);
const size_t row_size_bytes = sizeof(real) * d_cparams->nx;
for (int w=0; w < NUM_ARRS; ++w) {
for (int k=d_cparams->nz_min; k < d_cparams->nz_max; ++k)
for (int j=d_cparams->ny_min; j < d_cparams->ny_max; ++j)
memcpy(&h_grid->arr[w][h_cparams->nx_min + (j+h_start_idx->y)*h_cparams->mx + k*h_cparams->mx*h_cparams->my],
&buffer.arr[w][d_cparams->nx_min + j*d_cparams->mx + k*d_cparams->mx*d_cparams->my], row_size_bytes);
}
cudaStreamDestroy(stream);
grid_free(&buffer);
}
void store_slice_cuda_core(Slice* h_slice, CParamConfig* h_cparams, RunConfig* h_run_params, Slice* d_slice, CParamConfig* d_cparams, vec3i* h_start_idx)
{
if (h_run_params->slice_axis != 'z') CRASH("Slice axis other that z not yet supported!");
Slice buffer;
slice_malloc(&buffer, d_cparams, h_run_params);
cudaStream_t stream;
cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking);
const size_t slice_size_bytes = sizeof(real) * d_cparams->mx * d_cparams->my;
for (int w=0; w < NUM_SLICES; ++w)
CUDA_ERRCHK( cudaMemcpyAsync(buffer.arr[w], d_slice->arr[w], slice_size_bytes, cudaMemcpyDeviceToHost, stream) );
const size_t row_size_bytes = sizeof(real) * d_cparams->nx;
for (int w=0; w < NUM_SLICES; ++w)
for (int j=d_cparams->ny_min; j < d_cparams->ny_max; ++j)
memcpy(&h_slice->arr[w][h_cparams->nx_min + (j+h_start_idx->y)*h_cparams->mx],
&buffer.arr[w][d_cparams->nx_min + j*d_cparams->mx], row_size_bytes);
cudaStreamDestroy(stream);
slice_free(&buffer);
}
|
239cae4189dc2c94c185c5038a251e385d4e4141.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <hipcub/hipcub.hpp>
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/find_op.h"
namespace caffe2 {
template <typename T>
__global__ void FindKernel(
int num_needles,
int idx_size,
const T* idx,
const T* needles,
int* out,
int missing_value) {
int needle_idx = blockIdx.x; // One cuda block per needle
T q = needles[needle_idx];
int res = (-1);
for (int j = threadIdx.x; j < idx_size; j += CAFFE_CUDA_NUM_THREADS) {
if (idx[j] == q) {
res = max(res, j);
}
}
typedef hipcub::BlockReduce<int, CAFFE_CUDA_NUM_THREADS> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
int min_res = BlockReduce(temp_storage).Reduce(res, hipcub::Max());
if (threadIdx.x == 0) {
out[needle_idx] = min_res == (-1) ? missing_value : min_res;
}
}
template <>
template <typename T>
bool FindOp<CUDAContext>::DoRunWithType() {
auto& idx = Input(0);
auto& needles = Input(1);
auto* res_indices = Output(0, needles.sizes(), at::dtype<int>());
const T* idx_data = idx.data<T>();
const T* needles_data = needles.data<T>();
int* res_data = res_indices->template mutable_data<int>();
hipLaunchKernelGGL(( FindKernel<
T>), dim3(needles.numel()), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(),
needles.numel(),
idx.numel(),
idx_data,
needles_data,
res_data,
missing_value_);
C10_HIP_KERNEL_LAUNCH_CHECK();
return true;
}
REGISTER_CUDA_OPERATOR(Find, FindOp<CUDAContext>)
} // namespace caffe2
| 239cae4189dc2c94c185c5038a251e385d4e4141.cu | #include <cub/block/block_reduce.cuh>
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/find_op.h"
namespace caffe2 {
template <typename T>
__global__ void FindKernel(
int num_needles,
int idx_size,
const T* idx,
const T* needles,
int* out,
int missing_value) {
int needle_idx = blockIdx.x; // One cuda block per needle
T q = needles[needle_idx];
int res = (-1);
for (int j = threadIdx.x; j < idx_size; j += CAFFE_CUDA_NUM_THREADS) {
if (idx[j] == q) {
res = max(res, j);
}
}
typedef cub::BlockReduce<int, CAFFE_CUDA_NUM_THREADS> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
int min_res = BlockReduce(temp_storage).Reduce(res, cub::Max());
if (threadIdx.x == 0) {
out[needle_idx] = min_res == (-1) ? missing_value : min_res;
}
}
template <>
template <typename T>
bool FindOp<CUDAContext>::DoRunWithType() {
auto& idx = Input(0);
auto& needles = Input(1);
auto* res_indices = Output(0, needles.sizes(), at::dtype<int>());
const T* idx_data = idx.data<T>();
const T* needles_data = needles.data<T>();
int* res_data = res_indices->template mutable_data<int>();
FindKernel<
T><<<needles.numel(), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>(
needles.numel(),
idx.numel(),
idx_data,
needles_data,
res_data,
missing_value_);
C10_CUDA_KERNEL_LAUNCH_CHECK();
return true;
}
REGISTER_CUDA_OPERATOR(Find, FindOp<CUDAContext>)
} // namespace caffe2
|
4de9bc4f4f7a3865dc790a0b9ccd2de719738ad5.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "gpu_rndrd_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *buffer = NULL;
hipMalloc(&buffer, XSIZE*YSIZE);
size_t reps = 1;
size_t steps = 1;
size_t elements = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
gpu_rndrd_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, buffer,reps,steps,elements);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
gpu_rndrd_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, buffer,reps,steps,elements);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
gpu_rndrd_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, buffer,reps,steps,elements);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 4de9bc4f4f7a3865dc790a0b9ccd2de719738ad5.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "gpu_rndrd_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *buffer = NULL;
cudaMalloc(&buffer, XSIZE*YSIZE);
size_t reps = 1;
size_t steps = 1;
size_t elements = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
gpu_rndrd_kernel<<<gridBlock,threadBlock>>>(buffer,reps,steps,elements);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
gpu_rndrd_kernel<<<gridBlock,threadBlock>>>(buffer,reps,steps,elements);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
gpu_rndrd_kernel<<<gridBlock,threadBlock>>>(buffer,reps,steps,elements);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
f439b30ddb542a70cff063f8a5f5b58b21d345bb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#include <ATen/ATen.h>
#include <ATen/hip/HIPContext.h>
#include <THH/THH.h>
#include <THH/THHDeviceUtils.cuh>
#include <vector>
#include <iostream>
int const threadsPerBlock = sizeof(unsigned long long) * 8;
__device__ inline float devIoU(float const * const a, float const * const b) {
float left = max(a[0], b[0]), right = min(a[2], b[2]);
float top = max(a[1], b[1]), bottom = min(a[3], b[3]);
float width = max(right - left + 1, 0.f), height = max(bottom - top + 1, 0.f);
float interS = width * height;
float Sa = (a[2] - a[0] + 1) * (a[3] - a[1] + 1);
float Sb = (b[2] - b[0] + 1) * (b[3] - b[1] + 1);
return interS / (Sa + Sb - interS);
}
__global__ void nms_kernel(const int n_boxes, const float nms_overlap_thresh,
const float *dev_boxes, unsigned long long *dev_mask) {
const int row_start = blockIdx.y;
const int col_start = blockIdx.x;
// if (row_start > col_start) return;
const int row_size =
min(n_boxes - row_start * threadsPerBlock, threadsPerBlock);
const int col_size =
min(n_boxes - col_start * threadsPerBlock, threadsPerBlock);
__shared__ float block_boxes[threadsPerBlock * 5];
if (threadIdx.x < col_size) {
block_boxes[threadIdx.x * 5 + 0] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 0];
block_boxes[threadIdx.x * 5 + 1] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 1];
block_boxes[threadIdx.x * 5 + 2] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 2];
block_boxes[threadIdx.x * 5 + 3] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 3];
block_boxes[threadIdx.x * 5 + 4] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 4];
}
__syncthreads();
if (threadIdx.x < row_size) {
const int cur_box_idx = threadsPerBlock * row_start + threadIdx.x;
const float *cur_box = dev_boxes + cur_box_idx * 5;
int i = 0;
unsigned long long t = 0;
int start = 0;
if (row_start == col_start) {
start = threadIdx.x + 1;
}
for (i = start; i < col_size; i++) {
if (devIoU(cur_box, block_boxes + i * 5) > nms_overlap_thresh) {
t |= 1ULL << i;
}
}
const int col_blocks = THCCeilDiv(n_boxes, threadsPerBlock);
dev_mask[cur_box_idx * col_blocks + col_start] = t;
}
}
// boxes is a N x 5 tensor
extern "C" at::Tensor nms_cuda(const at::Tensor boxes, float nms_overlap_thresh) {
using scalar_t = float;
AT_ASSERTM(boxes.type().is_cuda(), "boxes must be a CUDA tensor");
auto scores = boxes.select(1, 4);
auto order_t = std::get<1>(scores.sort(0, /* descending=*/true));
auto boxes_sorted = boxes.index_select(0, order_t);
int boxes_num = boxes.size(0);
const int col_blocks = THCCeilDiv(boxes_num, threadsPerBlock);
scalar_t* boxes_dev = boxes_sorted.data<scalar_t>();
THCState *state = at::globalContext().lazyInitCUDA(); // TODO replace with getTHCState
unsigned long long* mask_dev = NULL;
//THCudaCheck(THCudaMalloc(state, (void**) &mask_dev,
// boxes_num * col_blocks * sizeof(unsigned long long)));
mask_dev = (unsigned long long*) THCudaMalloc(state, boxes_num * col_blocks * sizeof(unsigned long long));
dim3 blocks(THCCeilDiv(boxes_num, threadsPerBlock),
THCCeilDiv(boxes_num, threadsPerBlock));
dim3 threads(threadsPerBlock);
hipLaunchKernelGGL(( nms_kernel), dim3(blocks), dim3(threads), 0, 0, boxes_num,
nms_overlap_thresh,
boxes_dev,
mask_dev);
std::vector<unsigned long long> mask_host(boxes_num * col_blocks);
THCudaCheck(hipMemcpy(&mask_host[0],
mask_dev,
sizeof(unsigned long long) * boxes_num * col_blocks,
hipMemcpyDeviceToHost));
std::vector<unsigned long long> remv(col_blocks);
memset(&remv[0], 0, sizeof(unsigned long long) * col_blocks);
at::Tensor keep = at::empty({boxes_num}, boxes.options().dtype(at::kLong).device(at::kCPU));
int64_t* keep_out = keep.data<int64_t>();
int num_to_keep = 0;
for (int i = 0; i < boxes_num; i++) {
int nblock = i / threadsPerBlock;
int inblock = i % threadsPerBlock;
if (!(remv[nblock] & (1ULL << inblock))) {
keep_out[num_to_keep++] = i;
unsigned long long *p = &mask_host[0] + i * col_blocks;
for (int j = nblock; j < col_blocks; j++) {
remv[j] |= p[j];
}
}
}
THCudaFree(state, mask_dev);
// TODO improve this part
return std::get<0>(order_t.index({
keep.narrow(/*dim=*/0, /*start=*/0, /*length=*/num_to_keep).to(
order_t.device(), keep.scalar_type())
}).sort(0, false));
}
| f439b30ddb542a70cff063f8a5f5b58b21d345bb.cu | // Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <THC/THC.h>
#include <THC/THCDeviceUtils.cuh>
#include <vector>
#include <iostream>
int const threadsPerBlock = sizeof(unsigned long long) * 8;
__device__ inline float devIoU(float const * const a, float const * const b) {
float left = max(a[0], b[0]), right = min(a[2], b[2]);
float top = max(a[1], b[1]), bottom = min(a[3], b[3]);
float width = max(right - left + 1, 0.f), height = max(bottom - top + 1, 0.f);
float interS = width * height;
float Sa = (a[2] - a[0] + 1) * (a[3] - a[1] + 1);
float Sb = (b[2] - b[0] + 1) * (b[3] - b[1] + 1);
return interS / (Sa + Sb - interS);
}
__global__ void nms_kernel(const int n_boxes, const float nms_overlap_thresh,
const float *dev_boxes, unsigned long long *dev_mask) {
const int row_start = blockIdx.y;
const int col_start = blockIdx.x;
// if (row_start > col_start) return;
const int row_size =
min(n_boxes - row_start * threadsPerBlock, threadsPerBlock);
const int col_size =
min(n_boxes - col_start * threadsPerBlock, threadsPerBlock);
__shared__ float block_boxes[threadsPerBlock * 5];
if (threadIdx.x < col_size) {
block_boxes[threadIdx.x * 5 + 0] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 0];
block_boxes[threadIdx.x * 5 + 1] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 1];
block_boxes[threadIdx.x * 5 + 2] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 2];
block_boxes[threadIdx.x * 5 + 3] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 3];
block_boxes[threadIdx.x * 5 + 4] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 4];
}
__syncthreads();
if (threadIdx.x < row_size) {
const int cur_box_idx = threadsPerBlock * row_start + threadIdx.x;
const float *cur_box = dev_boxes + cur_box_idx * 5;
int i = 0;
unsigned long long t = 0;
int start = 0;
if (row_start == col_start) {
start = threadIdx.x + 1;
}
for (i = start; i < col_size; i++) {
if (devIoU(cur_box, block_boxes + i * 5) > nms_overlap_thresh) {
t |= 1ULL << i;
}
}
const int col_blocks = THCCeilDiv(n_boxes, threadsPerBlock);
dev_mask[cur_box_idx * col_blocks + col_start] = t;
}
}
// boxes is a N x 5 tensor
extern "C" at::Tensor nms_cuda(const at::Tensor boxes, float nms_overlap_thresh) {
using scalar_t = float;
AT_ASSERTM(boxes.type().is_cuda(), "boxes must be a CUDA tensor");
auto scores = boxes.select(1, 4);
auto order_t = std::get<1>(scores.sort(0, /* descending=*/true));
auto boxes_sorted = boxes.index_select(0, order_t);
int boxes_num = boxes.size(0);
const int col_blocks = THCCeilDiv(boxes_num, threadsPerBlock);
scalar_t* boxes_dev = boxes_sorted.data<scalar_t>();
THCState *state = at::globalContext().lazyInitCUDA(); // TODO replace with getTHCState
unsigned long long* mask_dev = NULL;
//THCudaCheck(THCudaMalloc(state, (void**) &mask_dev,
// boxes_num * col_blocks * sizeof(unsigned long long)));
mask_dev = (unsigned long long*) THCudaMalloc(state, boxes_num * col_blocks * sizeof(unsigned long long));
dim3 blocks(THCCeilDiv(boxes_num, threadsPerBlock),
THCCeilDiv(boxes_num, threadsPerBlock));
dim3 threads(threadsPerBlock);
nms_kernel<<<blocks, threads>>>(boxes_num,
nms_overlap_thresh,
boxes_dev,
mask_dev);
std::vector<unsigned long long> mask_host(boxes_num * col_blocks);
THCudaCheck(cudaMemcpy(&mask_host[0],
mask_dev,
sizeof(unsigned long long) * boxes_num * col_blocks,
cudaMemcpyDeviceToHost));
std::vector<unsigned long long> remv(col_blocks);
memset(&remv[0], 0, sizeof(unsigned long long) * col_blocks);
at::Tensor keep = at::empty({boxes_num}, boxes.options().dtype(at::kLong).device(at::kCPU));
int64_t* keep_out = keep.data<int64_t>();
int num_to_keep = 0;
for (int i = 0; i < boxes_num; i++) {
int nblock = i / threadsPerBlock;
int inblock = i % threadsPerBlock;
if (!(remv[nblock] & (1ULL << inblock))) {
keep_out[num_to_keep++] = i;
unsigned long long *p = &mask_host[0] + i * col_blocks;
for (int j = nblock; j < col_blocks; j++) {
remv[j] |= p[j];
}
}
}
THCudaFree(state, mask_dev);
// TODO improve this part
return std::get<0>(order_t.index({
keep.narrow(/*dim=*/0, /*start=*/0, /*length=*/num_to_keep).to(
order_t.device(), keep.scalar_type())
}).sort(0, false));
}
|
3467a0bcf57efa531f066c5a0a5513e923a29058.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// Created by igor on 17.05.2021.
//
#define BLOCK_X 32
#define BLOCK_Y 32
#define EPSILON 0.0000001
#define BATCH_SIZE 1024
#define raysPerPixel 100
#include "Renderer.cuh"
#include <hiprand/hiprand_kernel.h>
#include <hiprand/hiprand.h>
#define clip(number) ( (number) = (number) < 0 ? 0 : (number) > 255 ? 255 : (number) )
__constant__ struct C_Camera {
unsigned int x, y;
Vector3 topLeft;
Vector3 pixelDx, pixelDy;
Vector3 origin;
} c_camera;
__device__ Vector3 directionFromXY(C_Camera c, unsigned int x, unsigned int y, hiprandState_t *state){
return (c.topLeft + (x + hiprand_uniform (state)) * c.pixelDx + (y + hiprand_uniform (state)) * c.pixelDy).normalize();
}
__device__ int cast_ray(const Vector3 direction,
const Vector3 origin,
const Vector3* as,
const Vector3* edges1,
const Vector3* edges2,
const unsigned int triangles_n,
float* closest_distance){
*closest_distance = 10000000.0f;
int closest_id = -1;
for (int t = 0; t < triangles_n; ++t) {
Vector3 a = as[t];
Vector3 edge1 = edges1[t];
Vector3 edge2 = edges2[t];
Vector3 h = direction.cross(edge2);
float a_ = edge1.dot(h);
// The ray is parallel to this triangle.
if (a_ > -EPSILON && a_ < EPSILON) continue;
float f = 1.0f / a_;
Vector3 s = origin - a;
float u = f * s.dot(h);
Vector3 q = s.cross(edge1);
float v = f * direction.dot(q);
// The ray intercepts the plane outside the triangle;
if (v < 0.0 || u + v > 1.0 || u < 0.0 || u > 1.0) continue;
// At this stage we can compute t to find out where the intersection point is on the line.
float distance = f * edge2.dot(q);
if (distance > EPSILON && distance < *closest_distance) {
closest_id = t;
*closest_distance = distance;
}
}
return closest_id;
}
void Renderer::uploadScene() {
hipMalloc(&d_vectors, scene.vertices.size() * sizeof(Vector3));
hipMalloc(&d_materials, scene.materials.size() * sizeof(Material));
hipMalloc(&d_triangles, scene.triangles.size() * sizeof(Triangle));
hipMemcpy(d_vectors, scene.vertices.data(), scene.vertices.size() * sizeof(Vector3), hipMemcpyHostToDevice);
hipMemcpy(d_materials, scene.materials.data(), scene.materials.size() * sizeof(Material), hipMemcpyHostToDevice);
hipMemcpy(d_triangles, scene.triangles.data(), scene.triangles.size() * sizeof(Triangle), hipMemcpyHostToDevice);
// hipMalloc(&d_skybox_tex, scene.skybox.y * scene.skybox.x * sizeof(ColorF));
// hipMemcpy(d_skybox_tex, scene.skybox.img, scene.skybox.y * scene.skybox.x * sizeof(ColorF), hipMemcpyHostToDevice);
}
__global__ void random_init(hiprandState_t *dev_random){
unsigned int id = threadIdx.x + threadIdx.y * blockDim.x;
hiprand_init(id, id, 0, &dev_random[id]);
}
__global__ void calculatePixel(
const Screen screen,
const Vector3* d_vertices,
const Material* d_materials,
const Triangle* d_triangles,
const unsigned int triangles_n,
hiprandState_t *dev_random
){
unsigned int x = threadIdx.x + blockIdx.x * blockDim.x;
unsigned int y = threadIdx.y + blockIdx.y * blockDim.y;
unsigned int id = threadIdx.x + threadIdx.y * blockDim.x;
extern __shared__ Vector3 triangles[];
Vector3* as = &triangles[0];
Vector3* edges1 = &triangles[BATCH_SIZE];
Vector3* edges2 = &triangles[BATCH_SIZE * 2];
if(id < triangles_n){
as[id] = d_vertices[d_triangles[id].a];
edges1[id] = d_vertices[d_triangles[id].b] - as[id];
edges2[id] = d_vertices[d_triangles[id].c] - as[id];
}
__syncthreads();
ColorF final_color{};
for(int i = 0; i < raysPerPixel; ++i) {
float closest_distance;
Vector3 ray = directionFromXY(c_camera, x, y, &dev_random[id]);
int closest_id = cast_ray(ray, c_camera.origin, as, edges1, edges2, triangles_n, &closest_distance);
const Material& material = d_materials[d_triangles[closest_id].material];
if (closest_id >= 0) {
//emission color
final_color += material.emit_color * material.emit;
//specular color
Vector3 normal = edges1[closest_id].cross(edges2[closest_id]).normalize();
Vector3 reflection_dir = -2.0f * ray.dot(normal) * normal + ray;
Vector3 reflection_origin = c_camera.origin + closest_distance * ray;
int reflection_id = cast_ray(reflection_dir, reflection_origin, as, edges1, edges2, triangles_n, &closest_distance);
if (reflection_id >= 0) {
const Material &ref_material = d_materials[d_triangles[reflection_id].material];
final_color += ref_material.emit_color * material.specular * ref_material.emit;
} else {
final_color += ColorF{0, 0, 255.0f * (0.5f + asin(reflection_dir.y) / 3.1415f)} * material.specular;
}
} else {
final_color += ColorF{0, 0, 255.0f * (0.5f + asin(ray.y)/3.1415f)};
}
}
screen.d_image[y * screen.sizeX + x] = final_color * (1.0f/raysPerPixel);
}
void Renderer::render() {
uploadScene();
hiprandState_t *dev_random;
hipMalloc((void**)&dev_random, BLOCK_X * BLOCK_Y * sizeof(hiprandState_t));
C_Camera c {
camera.x,
camera.y,
camera.topLeft,
camera.pixelDx,
camera.pixelDy,
camera.origin
};
hipMemcpyToSymbol(c_camera, &c, sizeof(C_Camera));
hipError_t error = hipGetLastError();
if (error != hipSuccess) std::cout << "Error before cudaExecute: " << hipGetErrorString(error);
hipLaunchKernelGGL(( random_init), dim3(1), dim3(dim3(BLOCK_X, BLOCK_Y)), 0, 0, dev_random);
hipLaunchKernelGGL(( calculatePixel), dim3((screen.sizeX + BLOCK_X - 1)/BLOCK_X,
(screen.sizeY + BLOCK_Y - 1)/BLOCK_Y),
dim3(dim3(BLOCK_X, BLOCK_Y)), BATCH_SIZE * sizeof(Vector3) * 3, 0,
screen,
d_vectors,
d_materials,
d_triangles,
scene.triangles.size(),
dev_random
);
hipDeviceSynchronize();
error = hipGetLastError();
if (error != hipSuccess) std::cout << "Error in cudaExecute: " << hipGetErrorString(error);
hipFree(&dev_random);
hipFree(&d_vectors);
hipFree(&d_materials);
hipFree(&d_triangles);
// hipFree(&d_skybox_tex);
}
Renderer::Renderer(Screen &&screen, Scene &&scene, Camera &&camera) : screen(screen), scene(scene),
camera(camera) {}
| 3467a0bcf57efa531f066c5a0a5513e923a29058.cu | //
// Created by igor on 17.05.2021.
//
#define BLOCK_X 32
#define BLOCK_Y 32
#define EPSILON 0.0000001
#define BATCH_SIZE 1024
#define raysPerPixel 100
#include "Renderer.cuh"
#include <curand_kernel.h>
#include <curand.h>
#define clip(number) ( (number) = (number) < 0 ? 0 : (number) > 255 ? 255 : (number) )
__constant__ struct C_Camera {
unsigned int x, y;
Vector3 topLeft;
Vector3 pixelDx, pixelDy;
Vector3 origin;
} c_camera;
__device__ Vector3 directionFromXY(C_Camera c, unsigned int x, unsigned int y, curandState *state){
return (c.topLeft + (x + curand_uniform (state)) * c.pixelDx + (y + curand_uniform (state)) * c.pixelDy).normalize();
}
__device__ int cast_ray(const Vector3 direction,
const Vector3 origin,
const Vector3* as,
const Vector3* edges1,
const Vector3* edges2,
const unsigned int triangles_n,
float* closest_distance){
*closest_distance = 10000000.0f;
int closest_id = -1;
for (int t = 0; t < triangles_n; ++t) {
Vector3 a = as[t];
Vector3 edge1 = edges1[t];
Vector3 edge2 = edges2[t];
Vector3 h = direction.cross(edge2);
float a_ = edge1.dot(h);
// The ray is parallel to this triangle.
if (a_ > -EPSILON && a_ < EPSILON) continue;
float f = 1.0f / a_;
Vector3 s = origin - a;
float u = f * s.dot(h);
Vector3 q = s.cross(edge1);
float v = f * direction.dot(q);
// The ray intercepts the plane outside the triangle;
if (v < 0.0 || u + v > 1.0 || u < 0.0 || u > 1.0) continue;
// At this stage we can compute t to find out where the intersection point is on the line.
float distance = f * edge2.dot(q);
if (distance > EPSILON && distance < *closest_distance) {
closest_id = t;
*closest_distance = distance;
}
}
return closest_id;
}
void Renderer::uploadScene() {
cudaMalloc(&d_vectors, scene.vertices.size() * sizeof(Vector3));
cudaMalloc(&d_materials, scene.materials.size() * sizeof(Material));
cudaMalloc(&d_triangles, scene.triangles.size() * sizeof(Triangle));
cudaMemcpy(d_vectors, scene.vertices.data(), scene.vertices.size() * sizeof(Vector3), cudaMemcpyHostToDevice);
cudaMemcpy(d_materials, scene.materials.data(), scene.materials.size() * sizeof(Material), cudaMemcpyHostToDevice);
cudaMemcpy(d_triangles, scene.triangles.data(), scene.triangles.size() * sizeof(Triangle), cudaMemcpyHostToDevice);
// cudaMalloc(&d_skybox_tex, scene.skybox.y * scene.skybox.x * sizeof(ColorF));
// cudaMemcpy(d_skybox_tex, scene.skybox.img, scene.skybox.y * scene.skybox.x * sizeof(ColorF), cudaMemcpyHostToDevice);
}
__global__ void random_init(curandState *dev_random){
unsigned int id = threadIdx.x + threadIdx.y * blockDim.x;
curand_init(id, id, 0, &dev_random[id]);
}
__global__ void calculatePixel(
const Screen screen,
const Vector3* d_vertices,
const Material* d_materials,
const Triangle* d_triangles,
const unsigned int triangles_n,
curandState *dev_random
){
unsigned int x = threadIdx.x + blockIdx.x * blockDim.x;
unsigned int y = threadIdx.y + blockIdx.y * blockDim.y;
unsigned int id = threadIdx.x + threadIdx.y * blockDim.x;
extern __shared__ Vector3 triangles[];
Vector3* as = &triangles[0];
Vector3* edges1 = &triangles[BATCH_SIZE];
Vector3* edges2 = &triangles[BATCH_SIZE * 2];
if(id < triangles_n){
as[id] = d_vertices[d_triangles[id].a];
edges1[id] = d_vertices[d_triangles[id].b] - as[id];
edges2[id] = d_vertices[d_triangles[id].c] - as[id];
}
__syncthreads();
ColorF final_color{};
for(int i = 0; i < raysPerPixel; ++i) {
float closest_distance;
Vector3 ray = directionFromXY(c_camera, x, y, &dev_random[id]);
int closest_id = cast_ray(ray, c_camera.origin, as, edges1, edges2, triangles_n, &closest_distance);
const Material& material = d_materials[d_triangles[closest_id].material];
if (closest_id >= 0) {
//emission color
final_color += material.emit_color * material.emit;
//specular color
Vector3 normal = edges1[closest_id].cross(edges2[closest_id]).normalize();
Vector3 reflection_dir = -2.0f * ray.dot(normal) * normal + ray;
Vector3 reflection_origin = c_camera.origin + closest_distance * ray;
int reflection_id = cast_ray(reflection_dir, reflection_origin, as, edges1, edges2, triangles_n, &closest_distance);
if (reflection_id >= 0) {
const Material &ref_material = d_materials[d_triangles[reflection_id].material];
final_color += ref_material.emit_color * material.specular * ref_material.emit;
} else {
final_color += ColorF{0, 0, 255.0f * (0.5f + asin(reflection_dir.y) / 3.1415f)} * material.specular;
}
} else {
final_color += ColorF{0, 0, 255.0f * (0.5f + asin(ray.y)/3.1415f)};
}
}
screen.d_image[y * screen.sizeX + x] = final_color * (1.0f/raysPerPixel);
}
void Renderer::render() {
uploadScene();
curandState *dev_random;
cudaMalloc((void**)&dev_random, BLOCK_X * BLOCK_Y * sizeof(curandState));
C_Camera c {
camera.x,
camera.y,
camera.topLeft,
camera.pixelDx,
camera.pixelDy,
camera.origin
};
cudaMemcpyToSymbol(c_camera, &c, sizeof(C_Camera));
cudaError error = cudaGetLastError();
if (error != cudaSuccess) std::cout << "Error before cudaExecute: " << cudaGetErrorString(error);
random_init<<<1, dim3(BLOCK_X, BLOCK_Y)>>>(dev_random);
calculatePixel<<<dim3((screen.sizeX + BLOCK_X - 1)/BLOCK_X,
(screen.sizeY + BLOCK_Y - 1)/BLOCK_Y),
dim3(BLOCK_X, BLOCK_Y), BATCH_SIZE * sizeof(Vector3) * 3>>>
(
screen,
d_vectors,
d_materials,
d_triangles,
scene.triangles.size(),
dev_random
);
cudaDeviceSynchronize();
error = cudaGetLastError();
if (error != cudaSuccess) std::cout << "Error in cudaExecute: " << cudaGetErrorString(error);
cudaFree(&dev_random);
cudaFree(&d_vectors);
cudaFree(&d_materials);
cudaFree(&d_triangles);
// cudaFree(&d_skybox_tex);
}
Renderer::Renderer(Screen &&screen, Scene &&scene, Camera &&camera) : screen(screen), scene(scene),
camera(camera) {}
|
684edebb7c0d2a64d54b991f5df99c9859b1c946.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.5.4) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date October 2020
@author Ahmad Abdelfattah
@author Azzam Haidar
@generated from magmablas/zgeqrf_batched_smallsq.cu, normal z -> c, Thu Oct 8 23:05:36 2020
*/
#include "magma_internal.h"
#include "magma_templates.h"
#include "sync.cuh"
#include "batched_kernel_param.h"
#define SLDA(N) ( (N==15||N==23||N==31)? (N+2) : (N+1) )
extern __shared__ magmaFloatComplex zdata[];
template<int N>
__global__ void
cgeqrf_batched_sq1d_reg_kernel(
magmaFloatComplex **dA_array, magma_int_t ldda,
magmaFloatComplex **dtau_array, magma_int_t *info_array,
magma_int_t batchCount)
{
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int batchid = blockIdx.x * blockDim.y + ty;
if(batchid >= batchCount) return;
if(tx >= N) return;
const int slda = SLDA(N);
magmaFloatComplex* dA = dA_array[batchid];
magmaFloatComplex* dtau = dtau_array[batchid];
magma_int_t* info = &info_array[batchid];
// shared memory pointers
magmaFloatComplex* sA = (magmaFloatComplex*)(zdata + ty * slda * N);
float* sdw = (float*)(zdata + blockDim.y * slda * N);
sdw += ty * N;
magmaFloatComplex rA[N] = {MAGMA_C_ZERO};
magmaFloatComplex alpha, tau, tmp, zsum, scale = MAGMA_C_ZERO;
float sum = MAGMA_D_ZERO, norm = MAGMA_D_ZERO, beta;
if( tx == 0 ){
(*info) = 0;
}
// init tau
dtau[tx] = MAGMA_C_ZERO;
// read
#pragma unroll
for(int i = 0; i < N; i++){
rA[i] = dA[ i * ldda + tx ];
}
#pragma unroll
for(int i = 0; i < N-1; i++){
sA[ i * slda + tx] = rA[i];
sdw[tx] = ( MAGMA_C_REAL(rA[i]) * MAGMA_C_REAL(rA[i]) + MAGMA_C_IMAG(rA[i]) * MAGMA_C_IMAG(rA[i]) );
magmablas_syncwarp();
alpha = sA[i * slda + i];
sum = MAGMA_D_ZERO;
#pragma unroll
for(int j = i; j < N; j++){
sum += sdw[j];
}
norm = sqrt(sum);
beta = -copysign(norm, real(alpha));
scale = MAGMA_C_DIV( MAGMA_C_ONE, alpha - MAGMA_C_MAKE(beta, 0));
tau = MAGMA_C_MAKE( (beta - real(alpha)) / beta, -imag(alpha) / beta );
if(tx == i){
dtau[i] = tau;
}
tmp = (tx == i)? MAGMA_C_MAKE(beta, MAGMA_D_ZERO) : rA[i] * scale;
if(tx >= i){
rA[i] = tmp;
}
dA[ i * ldda + tx ] = rA[i];
rA[i] = (tx == i) ? MAGMA_C_ONE : rA[i];
rA[i] = (tx < i ) ? MAGMA_C_ZERO : rA[i];
tmp = MAGMA_C_CONJ( rA[i] ) * MAGMA_C_CONJ( tau );
magmablas_syncwarp();
#pragma unroll
for(int j = i+1; j < N; j++){
sA[j * slda + tx] = rA[j] * tmp;
}
magmablas_syncwarp();
zsum = MAGMA_C_ZERO;
#pragma unroll
for(int j = i; j < N; j++){
zsum += sA[tx * slda + j];
}
sA[tx * slda + N] = zsum;
magmablas_syncwarp();
#pragma unroll
for(int j = i+1; j < N; j++){
rA[j] -= rA[i] * sA[j * slda + N];
}
magmablas_syncwarp();
}
// write the last column
dA[ (N-1) * ldda + tx ] = rA[N-1];
}
/***************************************************************************//**
Purpose
-------
CGEQRF computes a QR factorization of a complex M-by-N matrix A:
A = Q * R.
This is a batched version of the routine, and works only for small
square matrices of size up to 32.
Arguments
---------
@param[in]
n INTEGER
The size of the matrix A. N >= 0.
@param[in,out]
dA_array Array of pointers, dimension (batchCount).
Each is a COMPLEX array on the GPU, dimension (LDDA,N)
On entry, the M-by-N matrix A.
On exit, the elements on and above the diagonal of the array
contain the min(M,N)-by-N upper trapezoidal matrix R (R is
upper triangular if m >= n); the elements below the diagonal,
with the array TAU, represent the orthogonal matrix Q as a
product of min(m,n) elementary reflectors (see Further
Details).
@param[in]
ldda INTEGER
The leading dimension of the array dA. LDDA >= max(1,M).
To benefit from coalescent memory accesses LDDA must be
divisible by 16.
@param[out]
dtau_array Array of pointers, dimension (batchCount).
Each is a COMPLEX array, dimension (min(M,N))
The scalar factors of the elementary reflectors (see Further
Details).
@param[out]
info_array Array of INTEGERs, dimension (batchCount), for corresponding matrices.
- = 0: successful exit
@param[in]
batchCount INTEGER
The number of matrices to operate on.
@param[in]
queue magma_queue_t
Queue to execute in.
Further Details
---------------
The matrix Q is represented as a product of elementary reflectors
Q = H(1) H(2) . . . H(k), where k = min(m,n).
Each H(i) has the form
H(i) = I - tau * v * v'
where tau is a complex scalar, and v is a complex vector with
v(1:i-1) = 0 and v(i) = 1; v(i+1:m) is stored on exit in A(i+1:m,i),
and tau in TAU(i).
@ingroup magma_geqrf_batched
*******************************************************************************/
extern "C" magma_int_t
magma_cgeqrf_batched_smallsq(
magma_int_t n,
magmaFloatComplex** dA_array, magma_int_t ldda,
magmaFloatComplex **dtau_array, magma_int_t* info_array,
magma_int_t batchCount, magma_queue_t queue )
{
magma_int_t arginfo = 0;
magma_int_t m = n;
if( (m < 0) || ( m > 32 ) ){
arginfo = -1;
}
if (arginfo != 0) {
magma_xerbla( __func__, -(arginfo) );
return arginfo;
}
if( m == 0 || n == 0) return 0;
const magma_int_t ntcol = magma_get_cgeqrf_batched_ntcol(m, n);
magma_int_t shmem = ( SLDA(m) * m * sizeof(magmaFloatComplex) );
shmem += ( m * sizeof(float) );
shmem *= ntcol;
magma_int_t nth = magma_ceilpow2(m);
magma_int_t gridx = magma_ceildiv(batchCount, ntcol);
dim3 grid(gridx, 1, 1);
dim3 threads(nth, ntcol, 1);
switch(m){
case 1:hipLaunchKernelGGL(( cgeqrf_batched_sq1d_reg_kernel< 1>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, dtau_array, info_array, batchCount); break;
case 2:hipLaunchKernelGGL(( cgeqrf_batched_sq1d_reg_kernel< 2>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, dtau_array, info_array, batchCount); break;
case 3:hipLaunchKernelGGL(( cgeqrf_batched_sq1d_reg_kernel< 3>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, dtau_array, info_array, batchCount); break;
case 4:hipLaunchKernelGGL(( cgeqrf_batched_sq1d_reg_kernel< 4>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, dtau_array, info_array, batchCount); break;
case 5:hipLaunchKernelGGL(( cgeqrf_batched_sq1d_reg_kernel< 5>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, dtau_array, info_array, batchCount); break;
case 6:hipLaunchKernelGGL(( cgeqrf_batched_sq1d_reg_kernel< 6>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, dtau_array, info_array, batchCount); break;
case 7:hipLaunchKernelGGL(( cgeqrf_batched_sq1d_reg_kernel< 7>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, dtau_array, info_array, batchCount); break;
case 8:hipLaunchKernelGGL(( cgeqrf_batched_sq1d_reg_kernel< 8>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, dtau_array, info_array, batchCount); break;
case 9:hipLaunchKernelGGL(( cgeqrf_batched_sq1d_reg_kernel< 9>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, dtau_array, info_array, batchCount); break;
case 10:hipLaunchKernelGGL(( cgeqrf_batched_sq1d_reg_kernel<10>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, dtau_array, info_array, batchCount); break;
case 11:hipLaunchKernelGGL(( cgeqrf_batched_sq1d_reg_kernel<11>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, dtau_array, info_array, batchCount); break;
case 12:hipLaunchKernelGGL(( cgeqrf_batched_sq1d_reg_kernel<12>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, dtau_array, info_array, batchCount); break;
case 13:hipLaunchKernelGGL(( cgeqrf_batched_sq1d_reg_kernel<13>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, dtau_array, info_array, batchCount); break;
case 14:hipLaunchKernelGGL(( cgeqrf_batched_sq1d_reg_kernel<14>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, dtau_array, info_array, batchCount); break;
case 15:hipLaunchKernelGGL(( cgeqrf_batched_sq1d_reg_kernel<15>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, dtau_array, info_array, batchCount); break;
case 16:hipLaunchKernelGGL(( cgeqrf_batched_sq1d_reg_kernel<16>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, dtau_array, info_array, batchCount); break;
case 17:hipLaunchKernelGGL(( cgeqrf_batched_sq1d_reg_kernel<17>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, dtau_array, info_array, batchCount); break;
case 18:hipLaunchKernelGGL(( cgeqrf_batched_sq1d_reg_kernel<18>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, dtau_array, info_array, batchCount); break;
case 19:hipLaunchKernelGGL(( cgeqrf_batched_sq1d_reg_kernel<19>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, dtau_array, info_array, batchCount); break;
case 20:hipLaunchKernelGGL(( cgeqrf_batched_sq1d_reg_kernel<20>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, dtau_array, info_array, batchCount); break;
case 21:hipLaunchKernelGGL(( cgeqrf_batched_sq1d_reg_kernel<21>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, dtau_array, info_array, batchCount); break;
case 22:hipLaunchKernelGGL(( cgeqrf_batched_sq1d_reg_kernel<22>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, dtau_array, info_array, batchCount); break;
case 23:hipLaunchKernelGGL(( cgeqrf_batched_sq1d_reg_kernel<23>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, dtau_array, info_array, batchCount); break;
case 24:hipLaunchKernelGGL(( cgeqrf_batched_sq1d_reg_kernel<24>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, dtau_array, info_array, batchCount); break;
case 25:hipLaunchKernelGGL(( cgeqrf_batched_sq1d_reg_kernel<25>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, dtau_array, info_array, batchCount); break;
case 26:hipLaunchKernelGGL(( cgeqrf_batched_sq1d_reg_kernel<26>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, dtau_array, info_array, batchCount); break;
case 27:hipLaunchKernelGGL(( cgeqrf_batched_sq1d_reg_kernel<27>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, dtau_array, info_array, batchCount); break;
case 28:hipLaunchKernelGGL(( cgeqrf_batched_sq1d_reg_kernel<28>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, dtau_array, info_array, batchCount); break;
case 29:hipLaunchKernelGGL(( cgeqrf_batched_sq1d_reg_kernel<29>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, dtau_array, info_array, batchCount); break;
case 30:hipLaunchKernelGGL(( cgeqrf_batched_sq1d_reg_kernel<30>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, dtau_array, info_array, batchCount); break;
case 31:hipLaunchKernelGGL(( cgeqrf_batched_sq1d_reg_kernel<31>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, dtau_array, info_array, batchCount); break;
case 32:hipLaunchKernelGGL(( cgeqrf_batched_sq1d_reg_kernel<32>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, dtau_array, info_array, batchCount); break;
default: printf("error: size %lld is not supported\n", (long long) m);
}
return arginfo;
}
| 684edebb7c0d2a64d54b991f5df99c9859b1c946.cu | /*
-- MAGMA (version 2.5.4) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date October 2020
@author Ahmad Abdelfattah
@author Azzam Haidar
@generated from magmablas/zgeqrf_batched_smallsq.cu, normal z -> c, Thu Oct 8 23:05:36 2020
*/
#include "magma_internal.h"
#include "magma_templates.h"
#include "sync.cuh"
#include "batched_kernel_param.h"
#define SLDA(N) ( (N==15||N==23||N==31)? (N+2) : (N+1) )
extern __shared__ magmaFloatComplex zdata[];
template<int N>
__global__ void
cgeqrf_batched_sq1d_reg_kernel(
magmaFloatComplex **dA_array, magma_int_t ldda,
magmaFloatComplex **dtau_array, magma_int_t *info_array,
magma_int_t batchCount)
{
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int batchid = blockIdx.x * blockDim.y + ty;
if(batchid >= batchCount) return;
if(tx >= N) return;
const int slda = SLDA(N);
magmaFloatComplex* dA = dA_array[batchid];
magmaFloatComplex* dtau = dtau_array[batchid];
magma_int_t* info = &info_array[batchid];
// shared memory pointers
magmaFloatComplex* sA = (magmaFloatComplex*)(zdata + ty * slda * N);
float* sdw = (float*)(zdata + blockDim.y * slda * N);
sdw += ty * N;
magmaFloatComplex rA[N] = {MAGMA_C_ZERO};
magmaFloatComplex alpha, tau, tmp, zsum, scale = MAGMA_C_ZERO;
float sum = MAGMA_D_ZERO, norm = MAGMA_D_ZERO, beta;
if( tx == 0 ){
(*info) = 0;
}
// init tau
dtau[tx] = MAGMA_C_ZERO;
// read
#pragma unroll
for(int i = 0; i < N; i++){
rA[i] = dA[ i * ldda + tx ];
}
#pragma unroll
for(int i = 0; i < N-1; i++){
sA[ i * slda + tx] = rA[i];
sdw[tx] = ( MAGMA_C_REAL(rA[i]) * MAGMA_C_REAL(rA[i]) + MAGMA_C_IMAG(rA[i]) * MAGMA_C_IMAG(rA[i]) );
magmablas_syncwarp();
alpha = sA[i * slda + i];
sum = MAGMA_D_ZERO;
#pragma unroll
for(int j = i; j < N; j++){
sum += sdw[j];
}
norm = sqrt(sum);
beta = -copysign(norm, real(alpha));
scale = MAGMA_C_DIV( MAGMA_C_ONE, alpha - MAGMA_C_MAKE(beta, 0));
tau = MAGMA_C_MAKE( (beta - real(alpha)) / beta, -imag(alpha) / beta );
if(tx == i){
dtau[i] = tau;
}
tmp = (tx == i)? MAGMA_C_MAKE(beta, MAGMA_D_ZERO) : rA[i] * scale;
if(tx >= i){
rA[i] = tmp;
}
dA[ i * ldda + tx ] = rA[i];
rA[i] = (tx == i) ? MAGMA_C_ONE : rA[i];
rA[i] = (tx < i ) ? MAGMA_C_ZERO : rA[i];
tmp = MAGMA_C_CONJ( rA[i] ) * MAGMA_C_CONJ( tau );
magmablas_syncwarp();
#pragma unroll
for(int j = i+1; j < N; j++){
sA[j * slda + tx] = rA[j] * tmp;
}
magmablas_syncwarp();
zsum = MAGMA_C_ZERO;
#pragma unroll
for(int j = i; j < N; j++){
zsum += sA[tx * slda + j];
}
sA[tx * slda + N] = zsum;
magmablas_syncwarp();
#pragma unroll
for(int j = i+1; j < N; j++){
rA[j] -= rA[i] * sA[j * slda + N];
}
magmablas_syncwarp();
}
// write the last column
dA[ (N-1) * ldda + tx ] = rA[N-1];
}
/***************************************************************************//**
Purpose
-------
CGEQRF computes a QR factorization of a complex M-by-N matrix A:
A = Q * R.
This is a batched version of the routine, and works only for small
square matrices of size up to 32.
Arguments
---------
@param[in]
n INTEGER
The size of the matrix A. N >= 0.
@param[in,out]
dA_array Array of pointers, dimension (batchCount).
Each is a COMPLEX array on the GPU, dimension (LDDA,N)
On entry, the M-by-N matrix A.
On exit, the elements on and above the diagonal of the array
contain the min(M,N)-by-N upper trapezoidal matrix R (R is
upper triangular if m >= n); the elements below the diagonal,
with the array TAU, represent the orthogonal matrix Q as a
product of min(m,n) elementary reflectors (see Further
Details).
@param[in]
ldda INTEGER
The leading dimension of the array dA. LDDA >= max(1,M).
To benefit from coalescent memory accesses LDDA must be
divisible by 16.
@param[out]
dtau_array Array of pointers, dimension (batchCount).
Each is a COMPLEX array, dimension (min(M,N))
The scalar factors of the elementary reflectors (see Further
Details).
@param[out]
info_array Array of INTEGERs, dimension (batchCount), for corresponding matrices.
- = 0: successful exit
@param[in]
batchCount INTEGER
The number of matrices to operate on.
@param[in]
queue magma_queue_t
Queue to execute in.
Further Details
---------------
The matrix Q is represented as a product of elementary reflectors
Q = H(1) H(2) . . . H(k), where k = min(m,n).
Each H(i) has the form
H(i) = I - tau * v * v'
where tau is a complex scalar, and v is a complex vector with
v(1:i-1) = 0 and v(i) = 1; v(i+1:m) is stored on exit in A(i+1:m,i),
and tau in TAU(i).
@ingroup magma_geqrf_batched
*******************************************************************************/
extern "C" magma_int_t
magma_cgeqrf_batched_smallsq(
magma_int_t n,
magmaFloatComplex** dA_array, magma_int_t ldda,
magmaFloatComplex **dtau_array, magma_int_t* info_array,
magma_int_t batchCount, magma_queue_t queue )
{
magma_int_t arginfo = 0;
magma_int_t m = n;
if( (m < 0) || ( m > 32 ) ){
arginfo = -1;
}
if (arginfo != 0) {
magma_xerbla( __func__, -(arginfo) );
return arginfo;
}
if( m == 0 || n == 0) return 0;
const magma_int_t ntcol = magma_get_cgeqrf_batched_ntcol(m, n);
magma_int_t shmem = ( SLDA(m) * m * sizeof(magmaFloatComplex) );
shmem += ( m * sizeof(float) );
shmem *= ntcol;
magma_int_t nth = magma_ceilpow2(m);
magma_int_t gridx = magma_ceildiv(batchCount, ntcol);
dim3 grid(gridx, 1, 1);
dim3 threads(nth, ntcol, 1);
switch(m){
case 1: cgeqrf_batched_sq1d_reg_kernel< 1><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, dtau_array, info_array, batchCount); break;
case 2: cgeqrf_batched_sq1d_reg_kernel< 2><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, dtau_array, info_array, batchCount); break;
case 3: cgeqrf_batched_sq1d_reg_kernel< 3><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, dtau_array, info_array, batchCount); break;
case 4: cgeqrf_batched_sq1d_reg_kernel< 4><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, dtau_array, info_array, batchCount); break;
case 5: cgeqrf_batched_sq1d_reg_kernel< 5><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, dtau_array, info_array, batchCount); break;
case 6: cgeqrf_batched_sq1d_reg_kernel< 6><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, dtau_array, info_array, batchCount); break;
case 7: cgeqrf_batched_sq1d_reg_kernel< 7><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, dtau_array, info_array, batchCount); break;
case 8: cgeqrf_batched_sq1d_reg_kernel< 8><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, dtau_array, info_array, batchCount); break;
case 9: cgeqrf_batched_sq1d_reg_kernel< 9><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, dtau_array, info_array, batchCount); break;
case 10: cgeqrf_batched_sq1d_reg_kernel<10><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, dtau_array, info_array, batchCount); break;
case 11: cgeqrf_batched_sq1d_reg_kernel<11><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, dtau_array, info_array, batchCount); break;
case 12: cgeqrf_batched_sq1d_reg_kernel<12><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, dtau_array, info_array, batchCount); break;
case 13: cgeqrf_batched_sq1d_reg_kernel<13><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, dtau_array, info_array, batchCount); break;
case 14: cgeqrf_batched_sq1d_reg_kernel<14><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, dtau_array, info_array, batchCount); break;
case 15: cgeqrf_batched_sq1d_reg_kernel<15><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, dtau_array, info_array, batchCount); break;
case 16: cgeqrf_batched_sq1d_reg_kernel<16><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, dtau_array, info_array, batchCount); break;
case 17: cgeqrf_batched_sq1d_reg_kernel<17><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, dtau_array, info_array, batchCount); break;
case 18: cgeqrf_batched_sq1d_reg_kernel<18><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, dtau_array, info_array, batchCount); break;
case 19: cgeqrf_batched_sq1d_reg_kernel<19><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, dtau_array, info_array, batchCount); break;
case 20: cgeqrf_batched_sq1d_reg_kernel<20><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, dtau_array, info_array, batchCount); break;
case 21: cgeqrf_batched_sq1d_reg_kernel<21><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, dtau_array, info_array, batchCount); break;
case 22: cgeqrf_batched_sq1d_reg_kernel<22><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, dtau_array, info_array, batchCount); break;
case 23: cgeqrf_batched_sq1d_reg_kernel<23><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, dtau_array, info_array, batchCount); break;
case 24: cgeqrf_batched_sq1d_reg_kernel<24><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, dtau_array, info_array, batchCount); break;
case 25: cgeqrf_batched_sq1d_reg_kernel<25><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, dtau_array, info_array, batchCount); break;
case 26: cgeqrf_batched_sq1d_reg_kernel<26><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, dtau_array, info_array, batchCount); break;
case 27: cgeqrf_batched_sq1d_reg_kernel<27><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, dtau_array, info_array, batchCount); break;
case 28: cgeqrf_batched_sq1d_reg_kernel<28><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, dtau_array, info_array, batchCount); break;
case 29: cgeqrf_batched_sq1d_reg_kernel<29><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, dtau_array, info_array, batchCount); break;
case 30: cgeqrf_batched_sq1d_reg_kernel<30><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, dtau_array, info_array, batchCount); break;
case 31: cgeqrf_batched_sq1d_reg_kernel<31><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, dtau_array, info_array, batchCount); break;
case 32: cgeqrf_batched_sq1d_reg_kernel<32><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, dtau_array, info_array, batchCount); break;
default: printf("error: size %lld is not supported\n", (long long) m);
}
return arginfo;
}
|
66bfcefd7182cd5e33db3fbde74d12bfb1e6565c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <gd.h>
#include <assert.h>
#include <time.h>
//array index to km
#define XIND2KM(x) ((x)*(dimx/nx))
#define YIND2KM(y) ((y)*(dimy/ny))
#define FRAMEDELAY 0
#define PHEIGHT 1
#define PWIDTH 1
#define MAXCOLORS 256
#define PRESSURERANGE 2
#define MAXBLOCKS 512
#define MAXTHREAD 512
//globals
double c = 1.5; //speed of wave (km/s)
double lambda = 3; //wave length (km)
double sigma = 4; //width of gaussian disturbance
int dimx = 100; //metric distance of x in km
int dimy = 100; //metric distance of y in km
int nx, ny, sx, sy;
int timesteps = 5;
double *p_arr;
int radius = 50;
FILE *giffile;
gdImagePtr im, previm;
int *colors;
int framecount = 0;
double *buf;
int fps = 10;
int maxdist;
int nthreads = 512;
__device__ int d_nx, d_ny, d_sx, d_sy, d_dimx, d_dimy;
__device__ double d_c, d_lambda, d_sigma;
__global__ void calculateWaveProp(double *arr, double time) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
double range = sqrt(pow((x-d_sx)*d_dimx/ d_nx, 2) + pow((y-d_sy)*d_dimy/d_ny, 2));
//gaussian pulse generation based on range and time
double p0 = exp(-0.5 * pow((((d_c * time)-range) / d_sigma ),2));
double p = p0 * cos(2 * M_PI * ((d_c * time)-range)/ d_lambda);
//printf("p[%d][%d]=%3.3f, range=%3.3f, p0=%3.3f,xdim=%d,ydim=%d\n",x,y,p,sqrt(pow(x-d_sx,2) + pow(y-d_sy,2)),p0,d_dimx,d_dimy);
arr[x*d_nx + y] = p;
}
void printPressureArray();
void write_frame(double *p_arr, double time) {
im = gdImageCreate(nx*PWIDTH,ny*PHEIGHT);
if (time == 0) {
colors = (int *)malloc(MAXCOLORS*sizeof(int));
for (int j = 0; j < MAXCOLORS; j++) {
colors[j] = gdImageColorAllocate(im, j, 0, MAXCOLORS-j-1);
}
gdImageGifAnimBegin(im,giffile,1,-1);
} else {
gdImagePaletteCopy(im, previm);
}
for (int i = 0; i < nx; i++) {
for (int j = 0; j < ny; j++) {
int color = (int)(((1+p_arr[i*nx + j])*MAXCOLORS)/PRESSURERANGE);
assert(color >= 0);
if (color >= MAXCOLORS) color = MAXCOLORS-1;
gdImageFilledRectangle(im, i*PWIDTH, j*PHEIGHT, (i+1)*PWIDTH-1, (j+1)*PHEIGHT-1, colors[color]);
}
}
if (time == 0) {
//use a large frame delay to give buffer time for eog to open .gif file
gdImageGifAnimAdd(im,giffile, 0, 0, 0, 200, gdDisposalNone, NULL);
} else {
gdImageSetPixel(im, 0, 0, framecount%2);
gdImageGifAnimAdd(im, giffile, 0, 0, 0, FRAMEDELAY , gdDisposalNone, previm);
gdImageDestroy(previm);
}
previm = im;
im = NULL;
framecount++;
#ifdef DEBUG
if (framecount < 10) printPressureArray();
#endif
}
void printArray(double* arr) {
for (int i = 0; i < nx; i++) {
for (int j = 0; j < ny; j++) {
printf("%6.2f ",arr[i*nx + j]);
}
printf("\n");
}
printf("\n");
}
void init(int argc, char *argv[]) {
nx = atoi(argv[1]);
ny = atoi(argv[2]);
sx = atoi(argv[3]);
sy = atoi(argv[4]);
timesteps = atoi(argv[5]);
maxdist = atoi(argv[6]);
if (nx >= ny) {
dimx = maxdist;
dimy = ny*maxdist/nx;
} else {
dimy = maxdist;
dimx = nx*maxdist/ny;
}
long int totalPoints = nx*ny*timesteps*fps;
printf("Total Points: %ld\n",totalPoints);
//ignore warnings for gif opening size if gif shall not be opened
char *filename = argv[7];
if (filename != "DONOTOPEN.gif") assert(totalPoints < 500000000);
giffile = fopen(argv[7],"wb");
}
void calculateEvenSquareDistribution(int *nxb, int *nyb) {
int xb = (int)(sqrt(nx));
while (xb > 0) {
if (nx % xb == 0) break;
xb--;
}
int yb = (int)(sqrt(ny));
while (yb > 0) {
if (ny % yb == 0) break;
yb--;
}
*nxb = xb; *nyb = yb;
}
void calculateEvenMaxDistribution(int *nxb, int *nyb) {
int xb = MAXBLOCKS;
while (xb > 0) {
if (nx % xb == 0 && xb < MAXBLOCKS) break;
xb--;
}
int yb = MAXBLOCKS;
while (yb > 0) {
if (ny % yb == 0 && yb < MAXBLOCKS) break;
yb--;
}
*nxb = xb; *nyb = yb;
}
void calculateMaxCacheHitsDistribution(int *nxb, int *nyb) {
//maximize cache (ie blocks are 1 row but split maximally)
*nxb = nx;
int yb;
if (MAXBLOCKS > nx) {
yb = (int) (sqrt(nx));
} else {yb = nx;}
while (yb > 0) {
if (ny % yb == 0 && yb < MAXBLOCKS) break;
yb--;
}
*nyb = yb;
}
int main(int argc, char *argv[]) {
init(argc, argv);
double *d_p_arr, *p_arr;
//allocate array on CPU
p_arr = (double *)malloc(ny*nx*sizeof(double));
//allocate and copy to GPU
hipMalloc((void**)&d_p_arr, nx*ny*sizeof(double));
//copy CPU globals to GPU variables
hipMemcpyToSymbol(d_c, &c, sizeof(double), 0,hipMemcpyHostToDevice);
hipMemcpyToSymbol(d_nx, &nx, sizeof(int), 0,hipMemcpyHostToDevice);
hipMemcpyToSymbol(d_ny, &ny, sizeof(int), 0,hipMemcpyHostToDevice);
hipMemcpyToSymbol(d_sx, &sx, sizeof(int), 0,hipMemcpyHostToDevice);
hipMemcpyToSymbol(d_sy, &sy, sizeof(int), 0,hipMemcpyHostToDevice);
hipMemcpyToSymbol(d_dimx, &dimx, sizeof(int), 0,hipMemcpyHostToDevice);
hipMemcpyToSymbol(d_dimy, &dimy, sizeof(int), 0,hipMemcpyHostToDevice);
hipMemcpyToSymbol(d_lambda, &lambda, sizeof(double),0, hipMemcpyHostToDevice);
hipMemcpyToSymbol(d_sigma, &sigma, sizeof(double), 0,hipMemcpyHostToDevice);
//run wave propagation
double timeinc = (double)(1.0/fps); //time increments
int nxblocks = 0;
int nyblocks = 0;
//determine which block distribution scheme to use
int dist_type = atoi(argv[8]);
if (dist_type == 0) calculateEvenSquareDistribution(&nxblocks,&nyblocks);
else if (dist_type == 1) calculateEvenMaxDistribution(&nxblocks,&nyblocks);
else if (dist_type == 2) calculateMaxCacheHitsDistribution(&nxblocks, &nyblocks);
else assert(0);
dim3 numBlocks(nxblocks,nyblocks,1);
dim3 numThreadsPerBlock(nx/nxblocks,ny/nyblocks,1);
clock_t start,end;
start = clock();
for (double t = 0; t < timesteps; t += timeinc) {
hipLaunchKernelGGL(( calculateWaveProp), dim3(numBlocks),dim3(numThreadsPerBlock), 0, 0, d_p_arr, t);
hipDeviceSynchronize();
hipMemcpy(p_arr, d_p_arr, nx*ny*sizeof(double),hipMemcpyDeviceToHost);
write_frame(p_arr, t);
}
//print timings and results
end = clock();
printf("Time: %3.5f\n",((double)(end-start)/CLOCKS_PER_SEC));
printf("Gif of size %dx%d created:\n\tNumber of Frames: %d \n\tFPS: %d \n\tTotal Time: %d\n",nx,ny,timesteps*fps,fps,timesteps);
printf("\tLength of X Dim: %d km\n\tLength of Y Dim: %d km\n", dimx, dimy);
printf("\tX-Dim Blocks: %d\n\tY-Dim Blocks: %d\n\tX-Dim of Threads: %d\n\tY-Dim of Threads: %d\n\n\n",nxblocks, nyblocks, nx/nxblocks, ny/nyblocks);
//free
hipFree(d_p_arr);
gdImageGifAnimEnd(giffile);
fclose(giffile);
free(p_arr);
free(colors);
gdImageDestroy(previm);
return 0;
}
| 66bfcefd7182cd5e33db3fbde74d12bfb1e6565c.cu | #include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <gd.h>
#include <assert.h>
#include <time.h>
//array index to km
#define XIND2KM(x) ((x)*(dimx/nx))
#define YIND2KM(y) ((y)*(dimy/ny))
#define FRAMEDELAY 0
#define PHEIGHT 1
#define PWIDTH 1
#define MAXCOLORS 256
#define PRESSURERANGE 2
#define MAXBLOCKS 512
#define MAXTHREAD 512
//globals
double c = 1.5; //speed of wave (km/s)
double lambda = 3; //wave length (km)
double sigma = 4; //width of gaussian disturbance
int dimx = 100; //metric distance of x in km
int dimy = 100; //metric distance of y in km
int nx, ny, sx, sy;
int timesteps = 5;
double *p_arr;
int radius = 50;
FILE *giffile;
gdImagePtr im, previm;
int *colors;
int framecount = 0;
double *buf;
int fps = 10;
int maxdist;
int nthreads = 512;
__device__ int d_nx, d_ny, d_sx, d_sy, d_dimx, d_dimy;
__device__ double d_c, d_lambda, d_sigma;
__global__ void calculateWaveProp(double *arr, double time) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
double range = sqrt(pow((x-d_sx)*d_dimx/ d_nx, 2) + pow((y-d_sy)*d_dimy/d_ny, 2));
//gaussian pulse generation based on range and time
double p0 = exp(-0.5 * pow((((d_c * time)-range) / d_sigma ),2));
double p = p0 * cos(2 * M_PI * ((d_c * time)-range)/ d_lambda);
//printf("p[%d][%d]=%3.3f, range=%3.3f, p0=%3.3f,xdim=%d,ydim=%d\n",x,y,p,sqrt(pow(x-d_sx,2) + pow(y-d_sy,2)),p0,d_dimx,d_dimy);
arr[x*d_nx + y] = p;
}
void printPressureArray();
void write_frame(double *p_arr, double time) {
im = gdImageCreate(nx*PWIDTH,ny*PHEIGHT);
if (time == 0) {
colors = (int *)malloc(MAXCOLORS*sizeof(int));
for (int j = 0; j < MAXCOLORS; j++) {
colors[j] = gdImageColorAllocate(im, j, 0, MAXCOLORS-j-1);
}
gdImageGifAnimBegin(im,giffile,1,-1);
} else {
gdImagePaletteCopy(im, previm);
}
for (int i = 0; i < nx; i++) {
for (int j = 0; j < ny; j++) {
int color = (int)(((1+p_arr[i*nx + j])*MAXCOLORS)/PRESSURERANGE);
assert(color >= 0);
if (color >= MAXCOLORS) color = MAXCOLORS-1;
gdImageFilledRectangle(im, i*PWIDTH, j*PHEIGHT, (i+1)*PWIDTH-1, (j+1)*PHEIGHT-1, colors[color]);
}
}
if (time == 0) {
//use a large frame delay to give buffer time for eog to open .gif file
gdImageGifAnimAdd(im,giffile, 0, 0, 0, 200, gdDisposalNone, NULL);
} else {
gdImageSetPixel(im, 0, 0, framecount%2);
gdImageGifAnimAdd(im, giffile, 0, 0, 0, FRAMEDELAY , gdDisposalNone, previm);
gdImageDestroy(previm);
}
previm = im;
im = NULL;
framecount++;
#ifdef DEBUG
if (framecount < 10) printPressureArray();
#endif
}
void printArray(double* arr) {
for (int i = 0; i < nx; i++) {
for (int j = 0; j < ny; j++) {
printf("%6.2f ",arr[i*nx + j]);
}
printf("\n");
}
printf("\n");
}
void init(int argc, char *argv[]) {
nx = atoi(argv[1]);
ny = atoi(argv[2]);
sx = atoi(argv[3]);
sy = atoi(argv[4]);
timesteps = atoi(argv[5]);
maxdist = atoi(argv[6]);
if (nx >= ny) {
dimx = maxdist;
dimy = ny*maxdist/nx;
} else {
dimy = maxdist;
dimx = nx*maxdist/ny;
}
long int totalPoints = nx*ny*timesteps*fps;
printf("Total Points: %ld\n",totalPoints);
//ignore warnings for gif opening size if gif shall not be opened
char *filename = argv[7];
if (filename != "DONOTOPEN.gif") assert(totalPoints < 500000000);
giffile = fopen(argv[7],"wb");
}
void calculateEvenSquareDistribution(int *nxb, int *nyb) {
int xb = (int)(sqrt(nx));
while (xb > 0) {
if (nx % xb == 0) break;
xb--;
}
int yb = (int)(sqrt(ny));
while (yb > 0) {
if (ny % yb == 0) break;
yb--;
}
*nxb = xb; *nyb = yb;
}
void calculateEvenMaxDistribution(int *nxb, int *nyb) {
int xb = MAXBLOCKS;
while (xb > 0) {
if (nx % xb == 0 && xb < MAXBLOCKS) break;
xb--;
}
int yb = MAXBLOCKS;
while (yb > 0) {
if (ny % yb == 0 && yb < MAXBLOCKS) break;
yb--;
}
*nxb = xb; *nyb = yb;
}
void calculateMaxCacheHitsDistribution(int *nxb, int *nyb) {
//maximize cache (ie blocks are 1 row but split maximally)
*nxb = nx;
int yb;
if (MAXBLOCKS > nx) {
yb = (int) (sqrt(nx));
} else {yb = nx;}
while (yb > 0) {
if (ny % yb == 0 && yb < MAXBLOCKS) break;
yb--;
}
*nyb = yb;
}
int main(int argc, char *argv[]) {
init(argc, argv);
double *d_p_arr, *p_arr;
//allocate array on CPU
p_arr = (double *)malloc(ny*nx*sizeof(double));
//allocate and copy to GPU
cudaMalloc((void**)&d_p_arr, nx*ny*sizeof(double));
//copy CPU globals to GPU variables
cudaMemcpyToSymbol(d_c, &c, sizeof(double), 0,cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(d_nx, &nx, sizeof(int), 0,cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(d_ny, &ny, sizeof(int), 0,cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(d_sx, &sx, sizeof(int), 0,cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(d_sy, &sy, sizeof(int), 0,cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(d_dimx, &dimx, sizeof(int), 0,cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(d_dimy, &dimy, sizeof(int), 0,cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(d_lambda, &lambda, sizeof(double),0, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(d_sigma, &sigma, sizeof(double), 0,cudaMemcpyHostToDevice);
//run wave propagation
double timeinc = (double)(1.0/fps); //time increments
int nxblocks = 0;
int nyblocks = 0;
//determine which block distribution scheme to use
int dist_type = atoi(argv[8]);
if (dist_type == 0) calculateEvenSquareDistribution(&nxblocks,&nyblocks);
else if (dist_type == 1) calculateEvenMaxDistribution(&nxblocks,&nyblocks);
else if (dist_type == 2) calculateMaxCacheHitsDistribution(&nxblocks, &nyblocks);
else assert(0);
dim3 numBlocks(nxblocks,nyblocks,1);
dim3 numThreadsPerBlock(nx/nxblocks,ny/nyblocks,1);
clock_t start,end;
start = clock();
for (double t = 0; t < timesteps; t += timeinc) {
calculateWaveProp<<<numBlocks,numThreadsPerBlock>>>(d_p_arr, t);
cudaDeviceSynchronize();
cudaMemcpy(p_arr, d_p_arr, nx*ny*sizeof(double),cudaMemcpyDeviceToHost);
write_frame(p_arr, t);
}
//print timings and results
end = clock();
printf("Time: %3.5f\n",((double)(end-start)/CLOCKS_PER_SEC));
printf("Gif of size %dx%d created:\n\tNumber of Frames: %d \n\tFPS: %d \n\tTotal Time: %d\n",nx,ny,timesteps*fps,fps,timesteps);
printf("\tLength of X Dim: %d km\n\tLength of Y Dim: %d km\n", dimx, dimy);
printf("\tX-Dim Blocks: %d\n\tY-Dim Blocks: %d\n\tX-Dim of Threads: %d\n\tY-Dim of Threads: %d\n\n\n",nxblocks, nyblocks, nx/nxblocks, ny/nyblocks);
//free
cudaFree(d_p_arr);
gdImageGifAnimEnd(giffile);
fclose(giffile);
free(p_arr);
free(colors);
gdImageDestroy(previm);
return 0;
}
|
83b5385815ac6bc074b315e98785ee27dfe5a01e.hip | // !!! This is a file automatically generated by hipify!!!
//xfail:BOOGIE_ERROR
//--blockDim=64 --gridDim=64 --no-inline
//
#include "hip/hip_runtime.h"
__global__ void foo() {
__shared__ int a;
a = threadIdx.x;
}
| 83b5385815ac6bc074b315e98785ee27dfe5a01e.cu | //xfail:BOOGIE_ERROR
//--blockDim=64 --gridDim=64 --no-inline
//
#include "cuda.h"
__global__ void foo() {
__shared__ int a;
a = threadIdx.x;
}
|
a2d43dfe0010430492c1da0a5cea719150522ec8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* \file
* nd convolution transform on the GPU with CUDA.
*
* \todo support different boundary conditions
* \todo support other filter types (it's nice to have integer arithmentic sometimes)
* \todo respect stride vs shape differences
* \todo relax warp alignment constraints for problem shape.
*/
#include "common.h"
#define MAX_FILTER_WIDTH 32 // max size of kernel (allocated in constant memory)
// actual max depends on kernel launch parameters
/**
* Treat the nd data as 3d around idim:
* \a idim is the dimension to be convolved.
* Dimensions less than \a idim are contiguous, and may be collapsed together.
* Dimensions greater than \a idim may also be collapased together.
*/
struct arg_t
{ unsigned char *restrict data;
int nrows, // It's important these are signed
ncols,
nplanes,
rstride, ///< row stride. strides are in elements (not bytes)
cstride, ///< column stride
pstride; ///< plane stride
#undef LOG
#define LOG(...) ndLogError(a,__VA_ARGS__)
arg_t(nd_t a,unsigned idim):
data(0),nrows(0),ncols(0),nplanes(0),rstride(0),cstride(0)
{
TRY(ndstrides(a)[0]==ndbpp(a)); // only support unit element strides
TRY(data=(unsigned char*)nddata(a));
cstride=1;
if(idim==0)
{ rstride=(int)(ndstrides(a)[1]/ndstrides(a)[0]);
pstride=1; //won't be used
ncols=(int)ndshape(a)[0];
nrows=(int)(ndstrides(a)[ndndim(a)]/ndstrides(a)[1]);
nplanes=1;
}
else // idim>0
{ rstride=(int)(ndstrides(a)[idim] /ndstrides(a)[0]);
pstride=(int)(ndstrides(a)[idim+1] /ndstrides(a)[0]);
ncols =(int)(ndstrides(a)[idim] /ndstrides(a)[0]);
nrows =(int)(ndstrides(a)[idim+1] /ndstrides(a)[idim]);
nplanes=(int)(ndstrides(a)[ndndim(a)]/ndstrides(a)[idim+1]);
}
Error:
return; // data will be 0 if there was an error
}
bool isok() { return data!=0;}
};
__constant__ float c_kernel[MAX_FILTER_WIDTH];
#undef LOG
#define LOG(...) printf(__VA_ARGS__)
static unsigned upload_kernel(float *kernel,unsigned n)
{ TRY(n<MAX_FILTER_WIDTH);
CUTRY(hipMemcpyToSymbol(c_kernel,kernel,n*sizeof(float)));
return 1;
Error:
return 0;
}
template<
typename T,
unsigned BX ,//=32, // should be 1 warp's worth
unsigned BY ,//=4, // the number of warps to schedule for a single block
unsigned HALO,//=1, // the number of halo elements to load per thread per side
unsigned WORK //=8, // each thread processes this many elements in a row
>
__global__ void
__launch_bounds__(BX*BY,1) /*max threads,min blocks*/
conv1_rows(arg_t dst_,arg_t src_,int radius, const nd_conv_params_t param)
{
__shared__ T buf[BY][(WORK+2*HALO)*BX];
T *dst=(T*)dst_.data,
*src=(T*)src_.data;
const int ox=threadIdx.x+(blockIdx.x*WORK-HALO)*BX,
oy=threadIdx.y+ blockIdx.y *BY;
if(oy<dst_.nrows)
{ // LOAD
src+=ox+oy*(int)src_.rstride+(int)(blockIdx.z*src_.pstride);
dst+=ox+oy*(int)dst_.rstride+(int)(blockIdx.z*dst_.pstride);
#pragma unroll
for(int i=0 ;i<HALO ;++i) buf[threadIdx.y][threadIdx.x+i*BX]=(ox>=-i*(int)BX)?src[i*BX]:src[-ox]; // clamp to edge boundary condition
switch(gridDim.x-blockIdx.x)
{
case 1:// last block...might hang off an unaligned edge
#pragma unroll
for(int i=HALO ;i<WORK+2*HALO;++i) buf[threadIdx.y][threadIdx.x+i*BX]=(src_.ncols-ox>i*BX)?src[i*BX]:src[src_.ncols-ox-1]; // clamp to edge boundary condition
break;
case 2:// next to last block: bounds check end halo
#pragma unroll
for(int i=HALO ;i<WORK+ HALO;++i) buf[threadIdx.y][threadIdx.x+i*BX]=src[i*BX];
#pragma unroll
for(int i=HALO+WORK;i<WORK+2*HALO;++i) buf[threadIdx.y][threadIdx.x+i*BX]=(src_.ncols-ox>i*BX)?src[i*BX]:src[src_.ncols-ox-1]; // clamp to edge boundary condition
break;
default:// not last block...everything should be in bounds
#pragma unroll
for(int i=HALO ;i<WORK+2*HALO;++i) buf[threadIdx.y][threadIdx.x+i*BX]=src[i*BX];
}
// COMPUTE
__syncthreads();
if(blockIdx.x!=(gridDim.x-1))
{
#pragma unroll
for(int i=HALO;i<HALO+WORK;++i)
{ float sum=0.0f;
sum+=c_kernel[radius]*buf[threadIdx.y][threadIdx.x+i*BX];
for(int j=1;j<=radius;++j)
{ sum+=c_kernel[radius-j]*buf[threadIdx.y][threadIdx.x+i*BX-j];
sum+=c_kernel[radius+j]*buf[threadIdx.y][threadIdx.x+i*BX+j];
}
dst[i*BX]=sum;
}
} else
{ // last block
#pragma unroll
for(int i=HALO;i<HALO+WORK;++i)
{ if(dst_.ncols-ox>i*BX)
{ float sum=0.0f;
sum+=c_kernel[radius]*buf[threadIdx.y][threadIdx.x+i*BX];
for(int j=1;j<=radius;++j)
{ sum+=c_kernel[radius-j]*buf[threadIdx.y][threadIdx.x+i*BX-j];
sum+=c_kernel[radius+j]*buf[threadIdx.y][threadIdx.x+i*BX+j];
}
dst[i*BX]=sum;
}
}
}
}
}
template<
typename T,
unsigned BX , // should be 1 warp's worth
unsigned BY , // the number of warps to schedule for a single block
unsigned HALO, // the number of halo elements to load per thread per side
unsigned WORK // each thread processes this many elements in a row
>
__global__ void
__launch_bounds__(BX*BY,1) /*max threads,min blocks*/
conv1_cols(arg_t dst_, arg_t src_, int radius, const nd_conv_params_t param)
{ __shared__ T buf[BX][(WORK+2*HALO)*BY+1];
T *dst=(T*)dst_.data,
*src=(T*)src_.data;
const int ox=threadIdx.x+ blockIdx.x *BX,
oy=threadIdx.y+(blockIdx.y*WORK-HALO)*BY;
if(ox<dst_.ncols)
{ src+=ox+oy*src_.rstride+(int)(blockIdx.z*src_.pstride);
dst+=ox+oy*dst_.rstride+(int)(blockIdx.z*dst_.pstride);
}else
{ src+=(src_.ncols-1)+oy*src_.rstride+(int)(blockIdx.z*src_.pstride); // clamp to edge boundary condition
dst+=(dst_.ncols-1)+oy*dst_.rstride+(int)(blockIdx.z*dst_.pstride); // clamp to edge boundary condition
}
// LOAD
#pragma unroll
for(int i=0 ;i<HALO ;++i) buf[threadIdx.x][threadIdx.y+i*BY]=(oy>=-i*(int)BY) ?src[i*BY*src_.rstride]:src[-oy*src_.rstride]; // clamp to edge boundary condition
switch(gridDim.y-blockIdx.y)
{ case 1: // last block: bounds check every access
#pragma unroll
for(int i=HALO ;i<WORK+2*HALO;++i) buf[threadIdx.x][threadIdx.y+i*BY]=(src_.nrows-oy>i*BY)?src[i*BY*src_.rstride]:src[(src_.nrows-oy-1)*src_.rstride]; // clamp to edge boundary condition
break;
case 2: // next to last block: bounds check end halo
#pragma unroll
for(int i=HALO ;i<WORK+ HALO;++i) buf[threadIdx.x][threadIdx.y+i*BY]=src[i*BY*src_.rstride];
#pragma unroll
for(int i=WORK+HALO;i<WORK+2*HALO;++i) buf[threadIdx.x][threadIdx.y+i*BY]=(src_.nrows-oy>i*BY)?src[i*BY*src_.rstride]:src[(src_.nrows-oy-1)*src_.rstride]; // clamp to edge boundary condition
break;
default: // no bounds checking
#pragma unroll
for(int i=HALO ;i<WORK+2*HALO;++i) buf[threadIdx.x][threadIdx.y+i*BY]=src[i*BY*src_.rstride];
}
// COMPUTE
__syncthreads();
if(blockIdx.y!=(gridDim.y-1))
{
#pragma unroll
for(int i=HALO;i<HALO+WORK;++i)
{ float sum=0.0f;
sum+=c_kernel[radius]*buf[threadIdx.x][threadIdx.y+i*BY];
for(int j=1;j<=radius;++j)
{ sum+=c_kernel[radius-j]*buf[threadIdx.x][threadIdx.y+i*BY-j];
sum+=c_kernel[radius+j]*buf[threadIdx.x][threadIdx.y+i*BY+j];
}
dst[i*BY*dst_.rstride]=sum;
}
} else // last block
{
#pragma unroll
for(int i=HALO;i<HALO+WORK;++i)
{ if(dst_.nrows-oy>i*BY)
{ float sum=0.0f;
sum+=c_kernel[radius]*buf[threadIdx.x][threadIdx.y+i*BY];
for(int j=1;j<=radius;++j)
{ sum+=c_kernel[radius-j]*buf[threadIdx.x][threadIdx.y+i*BY-j];
sum+=c_kernel[radius+j]*buf[threadIdx.x][threadIdx.y+i*BY+j];
}
dst[i*BY*dst_.rstride]=sum;
}
}
}
}
//
// === Interface ===
//
#undef LOG
#define LOG(...) ndLogError(dst_, __VA_ARGS__)
/**
* Assume the ndkind() of \a src_ and \a dst_ have already been checked.
*/
extern "C" unsigned ndconv1_cuda(nd_t dst_,nd_t src_,const nd_t filter_, const unsigned idim, const nd_conv_params_t *param)
{ arg_t dst(dst_,idim),
src(src_,idim);
unsigned radius;
CUTRY(hipGetLastError());
// check args
TRY(param->boundary_condition==nd_boundary_replicate); // only support this boundary condition for now
TRY(dst.isok());
TRY(ndtype(filter_)==nd_f32); // only float kernels supported at the moment
radius=(int)(ndnelem(filter_)/2);
TRY(2*radius+1==ndnelem(filter_)); // filter has odd size
TRY(ndnelem(filter_)<MAX_FILTER_WIDTH);
TRY(upload_kernel((float*)nddata(filter_),(unsigned)ndnelem(filter_))); /// \todo Ideally I'd only upload the kernel once and then do the seperable convolution on each dimension
/// @cond DEFINES
if(idim==0)
{ //
// ROW-WISE
//
// Max ncols=(WORK*BX)*MAX_BLOCK=2^8*2^16=2^24 -- max src->shape[0]
const unsigned BX=32,BY=8,HALO=1,WORK=8;
dim3 blocks((unsigned)ceil(src.ncols/(float)(WORK*BX)), (unsigned)ceil(src.nrows/(float)BY), src.nplanes);
dim3 threads(BX,BY);
int maxGridY, rem=blocks.y;
CUTRY(hipDeviceGetAttribute(&maxGridY,hipDeviceAttributeMaxGridDimY,0/*device id*/));
while(rem) //Process as many rows as possible per launch
{ blocks.y=min(maxGridY,rem);
#define CASE(T)hipLaunchKernelGGL(( conv1_rows<T,BX,BY,HALO,WORK>), dim3(blocks),dim3(threads),0,(hipStream_t)ndCudaStream(src_), dst,src,radius,*param); break
{TYPECASE(ndtype(src_));}
#undef CASE
rem-=blocks.y;
src.data+=blocks.y*src.rstride*ndstrides(src_)[0];
dst.data+=blocks.y*dst.rstride*ndstrides(dst_)[0];
}
} else
{ //
// COLUMN-WISE
//
// MAX ncols = BX *MAX_BLOCKS=2^5*2^16=2M -- prod(src->shape[0:i])
// MAX nrows =(WORK*BY)*MAX_BLOCKS=2^6*2^16=4M -- src->shape[i]
// MAX nplanes= MAX_BLOCKS=2^6*2^16=4M -- prod(src->shape[i:end])
const unsigned BX=32,BY=8,WORK=8,HALO=4;
dim3 blocks((unsigned)ceil(src.ncols/(float)BX), (unsigned)ceil(src.nrows/(float)(WORK*BY)), src.nplanes);
dim3 threads(BX,BY);
TRY(BY*HALO>=radius); // radius can't be too big
int maxGridX, rem=blocks.x;
CUTRY(hipDeviceGetAttribute(&maxGridX,hipDeviceAttributeMaxGridDimX,0/*device id*/));
while(rem) // Process as many columns as possible per launch
{ blocks.x=min(maxGridX,rem);
#define CASE(T)hipLaunchKernelGGL(( conv1_cols<T,BX,BY,HALO,WORK>), dim3(blocks),dim3(threads), 0, 0, dst,src,radius,*param); break
TYPECASE(ndtype(dst_));
#undef CASE
rem-=blocks.x;
src.data+=blocks.x*ndstrides(src_)[0];
dst.data+=blocks.x*ndstrides(dst_)[0];
}
}
/// @endcond
CUTRY(hipGetLastError());
return 1;
Error:
return 0;
} | a2d43dfe0010430492c1da0a5cea719150522ec8.cu | /**
* \file
* nd convolution transform on the GPU with CUDA.
*
* \todo support different boundary conditions
* \todo support other filter types (it's nice to have integer arithmentic sometimes)
* \todo respect stride vs shape differences
* \todo relax warp alignment constraints for problem shape.
*/
#include "common.h"
#define MAX_FILTER_WIDTH 32 // max size of kernel (allocated in constant memory)
// actual max depends on kernel launch parameters
/**
* Treat the nd data as 3d around idim:
* \a idim is the dimension to be convolved.
* Dimensions less than \a idim are contiguous, and may be collapsed together.
* Dimensions greater than \a idim may also be collapased together.
*/
struct arg_t
{ unsigned char *restrict data;
int nrows, // It's important these are signed
ncols,
nplanes,
rstride, ///< row stride. strides are in elements (not bytes)
cstride, ///< column stride
pstride; ///< plane stride
#undef LOG
#define LOG(...) ndLogError(a,__VA_ARGS__)
arg_t(nd_t a,unsigned idim):
data(0),nrows(0),ncols(0),nplanes(0),rstride(0),cstride(0)
{
TRY(ndstrides(a)[0]==ndbpp(a)); // only support unit element strides
TRY(data=(unsigned char*)nddata(a));
cstride=1;
if(idim==0)
{ rstride=(int)(ndstrides(a)[1]/ndstrides(a)[0]);
pstride=1; //won't be used
ncols=(int)ndshape(a)[0];
nrows=(int)(ndstrides(a)[ndndim(a)]/ndstrides(a)[1]);
nplanes=1;
}
else // idim>0
{ rstride=(int)(ndstrides(a)[idim] /ndstrides(a)[0]);
pstride=(int)(ndstrides(a)[idim+1] /ndstrides(a)[0]);
ncols =(int)(ndstrides(a)[idim] /ndstrides(a)[0]);
nrows =(int)(ndstrides(a)[idim+1] /ndstrides(a)[idim]);
nplanes=(int)(ndstrides(a)[ndndim(a)]/ndstrides(a)[idim+1]);
}
Error:
return; // data will be 0 if there was an error
}
bool isok() { return data!=0;}
};
__constant__ float c_kernel[MAX_FILTER_WIDTH];
#undef LOG
#define LOG(...) printf(__VA_ARGS__)
static unsigned upload_kernel(float *kernel,unsigned n)
{ TRY(n<MAX_FILTER_WIDTH);
CUTRY(cudaMemcpyToSymbol(c_kernel,kernel,n*sizeof(float)));
return 1;
Error:
return 0;
}
template<
typename T,
unsigned BX ,//=32, // should be 1 warp's worth
unsigned BY ,//=4, // the number of warps to schedule for a single block
unsigned HALO,//=1, // the number of halo elements to load per thread per side
unsigned WORK //=8, // each thread processes this many elements in a row
>
__global__ void
__launch_bounds__(BX*BY,1) /*max threads,min blocks*/
conv1_rows(arg_t dst_,arg_t src_,int radius, const nd_conv_params_t param)
{
__shared__ T buf[BY][(WORK+2*HALO)*BX];
T *dst=(T*)dst_.data,
*src=(T*)src_.data;
const int ox=threadIdx.x+(blockIdx.x*WORK-HALO)*BX,
oy=threadIdx.y+ blockIdx.y *BY;
if(oy<dst_.nrows)
{ // LOAD
src+=ox+oy*(int)src_.rstride+(int)(blockIdx.z*src_.pstride);
dst+=ox+oy*(int)dst_.rstride+(int)(blockIdx.z*dst_.pstride);
#pragma unroll
for(int i=0 ;i<HALO ;++i) buf[threadIdx.y][threadIdx.x+i*BX]=(ox>=-i*(int)BX)?src[i*BX]:src[-ox]; // clamp to edge boundary condition
switch(gridDim.x-blockIdx.x)
{
case 1:// last block...might hang off an unaligned edge
#pragma unroll
for(int i=HALO ;i<WORK+2*HALO;++i) buf[threadIdx.y][threadIdx.x+i*BX]=(src_.ncols-ox>i*BX)?src[i*BX]:src[src_.ncols-ox-1]; // clamp to edge boundary condition
break;
case 2:// next to last block: bounds check end halo
#pragma unroll
for(int i=HALO ;i<WORK+ HALO;++i) buf[threadIdx.y][threadIdx.x+i*BX]=src[i*BX];
#pragma unroll
for(int i=HALO+WORK;i<WORK+2*HALO;++i) buf[threadIdx.y][threadIdx.x+i*BX]=(src_.ncols-ox>i*BX)?src[i*BX]:src[src_.ncols-ox-1]; // clamp to edge boundary condition
break;
default:// not last block...everything should be in bounds
#pragma unroll
for(int i=HALO ;i<WORK+2*HALO;++i) buf[threadIdx.y][threadIdx.x+i*BX]=src[i*BX];
}
// COMPUTE
__syncthreads();
if(blockIdx.x!=(gridDim.x-1))
{
#pragma unroll
for(int i=HALO;i<HALO+WORK;++i)
{ float sum=0.0f;
sum+=c_kernel[radius]*buf[threadIdx.y][threadIdx.x+i*BX];
for(int j=1;j<=radius;++j)
{ sum+=c_kernel[radius-j]*buf[threadIdx.y][threadIdx.x+i*BX-j];
sum+=c_kernel[radius+j]*buf[threadIdx.y][threadIdx.x+i*BX+j];
}
dst[i*BX]=sum;
}
} else
{ // last block
#pragma unroll
for(int i=HALO;i<HALO+WORK;++i)
{ if(dst_.ncols-ox>i*BX)
{ float sum=0.0f;
sum+=c_kernel[radius]*buf[threadIdx.y][threadIdx.x+i*BX];
for(int j=1;j<=radius;++j)
{ sum+=c_kernel[radius-j]*buf[threadIdx.y][threadIdx.x+i*BX-j];
sum+=c_kernel[radius+j]*buf[threadIdx.y][threadIdx.x+i*BX+j];
}
dst[i*BX]=sum;
}
}
}
}
}
template<
typename T,
unsigned BX , // should be 1 warp's worth
unsigned BY , // the number of warps to schedule for a single block
unsigned HALO, // the number of halo elements to load per thread per side
unsigned WORK // each thread processes this many elements in a row
>
__global__ void
__launch_bounds__(BX*BY,1) /*max threads,min blocks*/
conv1_cols(arg_t dst_, arg_t src_, int radius, const nd_conv_params_t param)
{ __shared__ T buf[BX][(WORK+2*HALO)*BY+1];
T *dst=(T*)dst_.data,
*src=(T*)src_.data;
const int ox=threadIdx.x+ blockIdx.x *BX,
oy=threadIdx.y+(blockIdx.y*WORK-HALO)*BY;
if(ox<dst_.ncols)
{ src+=ox+oy*src_.rstride+(int)(blockIdx.z*src_.pstride);
dst+=ox+oy*dst_.rstride+(int)(blockIdx.z*dst_.pstride);
}else
{ src+=(src_.ncols-1)+oy*src_.rstride+(int)(blockIdx.z*src_.pstride); // clamp to edge boundary condition
dst+=(dst_.ncols-1)+oy*dst_.rstride+(int)(blockIdx.z*dst_.pstride); // clamp to edge boundary condition
}
// LOAD
#pragma unroll
for(int i=0 ;i<HALO ;++i) buf[threadIdx.x][threadIdx.y+i*BY]=(oy>=-i*(int)BY) ?src[i*BY*src_.rstride]:src[-oy*src_.rstride]; // clamp to edge boundary condition
switch(gridDim.y-blockIdx.y)
{ case 1: // last block: bounds check every access
#pragma unroll
for(int i=HALO ;i<WORK+2*HALO;++i) buf[threadIdx.x][threadIdx.y+i*BY]=(src_.nrows-oy>i*BY)?src[i*BY*src_.rstride]:src[(src_.nrows-oy-1)*src_.rstride]; // clamp to edge boundary condition
break;
case 2: // next to last block: bounds check end halo
#pragma unroll
for(int i=HALO ;i<WORK+ HALO;++i) buf[threadIdx.x][threadIdx.y+i*BY]=src[i*BY*src_.rstride];
#pragma unroll
for(int i=WORK+HALO;i<WORK+2*HALO;++i) buf[threadIdx.x][threadIdx.y+i*BY]=(src_.nrows-oy>i*BY)?src[i*BY*src_.rstride]:src[(src_.nrows-oy-1)*src_.rstride]; // clamp to edge boundary condition
break;
default: // no bounds checking
#pragma unroll
for(int i=HALO ;i<WORK+2*HALO;++i) buf[threadIdx.x][threadIdx.y+i*BY]=src[i*BY*src_.rstride];
}
// COMPUTE
__syncthreads();
if(blockIdx.y!=(gridDim.y-1))
{
#pragma unroll
for(int i=HALO;i<HALO+WORK;++i)
{ float sum=0.0f;
sum+=c_kernel[radius]*buf[threadIdx.x][threadIdx.y+i*BY];
for(int j=1;j<=radius;++j)
{ sum+=c_kernel[radius-j]*buf[threadIdx.x][threadIdx.y+i*BY-j];
sum+=c_kernel[radius+j]*buf[threadIdx.x][threadIdx.y+i*BY+j];
}
dst[i*BY*dst_.rstride]=sum;
}
} else // last block
{
#pragma unroll
for(int i=HALO;i<HALO+WORK;++i)
{ if(dst_.nrows-oy>i*BY)
{ float sum=0.0f;
sum+=c_kernel[radius]*buf[threadIdx.x][threadIdx.y+i*BY];
for(int j=1;j<=radius;++j)
{ sum+=c_kernel[radius-j]*buf[threadIdx.x][threadIdx.y+i*BY-j];
sum+=c_kernel[radius+j]*buf[threadIdx.x][threadIdx.y+i*BY+j];
}
dst[i*BY*dst_.rstride]=sum;
}
}
}
}
//
// === Interface ===
//
#undef LOG
#define LOG(...) ndLogError(dst_, __VA_ARGS__)
/**
* Assume the ndkind() of \a src_ and \a dst_ have already been checked.
*/
extern "C" unsigned ndconv1_cuda(nd_t dst_,nd_t src_,const nd_t filter_, const unsigned idim, const nd_conv_params_t *param)
{ arg_t dst(dst_,idim),
src(src_,idim);
unsigned radius;
CUTRY(cudaGetLastError());
// check args
TRY(param->boundary_condition==nd_boundary_replicate); // only support this boundary condition for now
TRY(dst.isok());
TRY(ndtype(filter_)==nd_f32); // only float kernels supported at the moment
radius=(int)(ndnelem(filter_)/2);
TRY(2*radius+1==ndnelem(filter_)); // filter has odd size
TRY(ndnelem(filter_)<MAX_FILTER_WIDTH);
TRY(upload_kernel((float*)nddata(filter_),(unsigned)ndnelem(filter_))); /// \todo Ideally I'd only upload the kernel once and then do the seperable convolution on each dimension
/// @cond DEFINES
if(idim==0)
{ //
// ROW-WISE
//
// Max ncols=(WORK*BX)*MAX_BLOCK=2^8*2^16=2^24 -- max src->shape[0]
const unsigned BX=32,BY=8,HALO=1,WORK=8;
dim3 blocks((unsigned)ceil(src.ncols/(float)(WORK*BX)), (unsigned)ceil(src.nrows/(float)BY), src.nplanes);
dim3 threads(BX,BY);
int maxGridY, rem=blocks.y;
CUTRY(cudaDeviceGetAttribute(&maxGridY,cudaDevAttrMaxGridDimY,0/*device id*/));
while(rem) //Process as many rows as possible per launch
{ blocks.y=min(maxGridY,rem);
#define CASE(T) conv1_rows<T,BX,BY,HALO,WORK><<<blocks,threads,0,(cudaStream_t)ndCudaStream(src_)>>>(dst,src,radius,*param); break
{TYPECASE(ndtype(src_));}
#undef CASE
rem-=blocks.y;
src.data+=blocks.y*src.rstride*ndstrides(src_)[0];
dst.data+=blocks.y*dst.rstride*ndstrides(dst_)[0];
}
} else
{ //
// COLUMN-WISE
//
// MAX ncols = BX *MAX_BLOCKS=2^5*2^16=2M -- prod(src->shape[0:i])
// MAX nrows =(WORK*BY)*MAX_BLOCKS=2^6*2^16=4M -- src->shape[i]
// MAX nplanes= MAX_BLOCKS=2^6*2^16=4M -- prod(src->shape[i:end])
const unsigned BX=32,BY=8,WORK=8,HALO=4;
dim3 blocks((unsigned)ceil(src.ncols/(float)BX), (unsigned)ceil(src.nrows/(float)(WORK*BY)), src.nplanes);
dim3 threads(BX,BY);
TRY(BY*HALO>=radius); // radius can't be too big
int maxGridX, rem=blocks.x;
CUTRY(cudaDeviceGetAttribute(&maxGridX,cudaDevAttrMaxGridDimX,0/*device id*/));
while(rem) // Process as many columns as possible per launch
{ blocks.x=min(maxGridX,rem);
#define CASE(T) conv1_cols<T,BX,BY,HALO,WORK><<<blocks,threads>>>(dst,src,radius,*param); break
TYPECASE(ndtype(dst_));
#undef CASE
rem-=blocks.x;
src.data+=blocks.x*ndstrides(src_)[0];
dst.data+=blocks.x*ndstrides(dst_)[0];
}
}
/// @endcond
CUTRY(cudaGetLastError());
return 1;
Error:
return 0;
} |
7f40d5549ba2ce2da0fb7cba99b45d0643ac5ddd.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "cuda_radiation_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
cuda_radiation_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, );
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
cuda_radiation_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, );
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
cuda_radiation_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, );
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 7f40d5549ba2ce2da0fb7cba99b45d0643ac5ddd.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "cuda_radiation_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
cuda_radiation_kernel<<<gridBlock,threadBlock>>>();
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
cuda_radiation_kernel<<<gridBlock,threadBlock>>>();
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
cuda_radiation_kernel<<<gridBlock,threadBlock>>>();
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
da4e72de496579ff55cd4a19d450f28bfd35a1ef.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.0.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date February 2016
@author Tingxing Dong
@author Azzam Haidar
@precisions normal z -> s d c
*/
#include "magma_internal.h"
#include "magma_templates.h"
#define PRECISION_z
#define NB 256 //NB is the 1st level blocking in recursive blocking, NUM_THREADS is the 2ed level, NB=256, NUM_THREADS=64 is optimal for batched
#define NUM_THREADS 128 //64 //128
#define BLOCK_SIZE_N 128
#define DIM_X_N 128
#define DIM_Y_N 1
#define BLOCK_SIZE_T 32
#define DIM_X_T 16
#define DIM_Y_T 8
#include "ztrsv_template_device.cuh"
#define A(i, j) (A + (i) + (j)*lda) // A(i, j) means at i row, j column
extern __shared__ magmaDoubleComplex shared_data[];
//==============================================================================
template< const int BLOCK_SIZE, const int DIM_X, const int DIM_Y, const int TILE_SIZE, const int flag, const magma_uplo_t uplo, const magma_trans_t trans, const magma_diag_t diag>
__global__ void
ztrsv_notrans_kernel_outplace(
int n,
const magmaDoubleComplex * __restrict__ A, int lda,
magmaDoubleComplex *b, int incb,
magmaDoubleComplex *x)
{
ztrsv_notrans_device< BLOCK_SIZE, DIM_X, DIM_Y, TILE_SIZE, flag, uplo, trans, diag >( n, A, lda, b, incb, x);
}
//==============================================================================
template<const int BLOCK_SIZE, const int DIM_X, const int DIM_Y, const int TILE_SIZE, const int flag, const magma_uplo_t uplo, const magma_trans_t trans, const magma_diag_t diag>
__global__ void
ztrsv_trans_kernel_outplace(
int n,
const magmaDoubleComplex * __restrict__ A, int lda,
magmaDoubleComplex *b, int incb,
magmaDoubleComplex *x)
{
ztrsv_trans_device< BLOCK_SIZE, DIM_X, DIM_Y, TILE_SIZE, flag, uplo, trans, diag >( n, A, lda, b, incb, x);
}
//==============================================================================
extern "C" void
magmablas_ztrsv_outofplace(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag,
magma_int_t n,
magmaDoubleComplex_const_ptr A, magma_int_t lda,
magmaDoubleComplex_ptr b, magma_int_t incb,
magmaDoubleComplex_ptr x,
magma_queue_t queue,
magma_int_t flag=0)
{
/* Check arguments */
magma_int_t info = 0;
if ( uplo != MagmaUpper && uplo != MagmaLower ) {
info = -1;
} else if ( trans != MagmaNoTrans && trans != MagmaTrans && trans != MagmaConjTrans ) {
info = -2;
} else if ( diag != MagmaUnit && diag != MagmaNonUnit ) {
info = -3;
} else if (n < 0) {
info = -5;
} else if (lda < max(1,n)) {
info = -8;
}
if (info != 0) {
magma_xerbla( __func__, -(info) );
return;
}
// quick return if possible.
if (n == 0)
return;
dim3 threads( NUM_THREADS );
dim3 blocks( 1, 1, 1 );
size_t shmem = n * sizeof(magmaDoubleComplex);
if (trans == MagmaNoTrans)
{
if (uplo == MagmaUpper)
{
if (diag == MagmaNonUnit)
{
if (flag == 0) {
hipLaunchKernelGGL(( ztrsv_notrans_kernel_outplace< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 0, MagmaUpper, MagmaNoTrans, MagmaNonUnit >)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A, lda, b, incb, x);
}
else {
hipLaunchKernelGGL(( ztrsv_notrans_kernel_outplace< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 1, MagmaUpper, MagmaNoTrans, MagmaNonUnit >)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A, lda, b, incb, x);
}
}
else if (diag == MagmaUnit)
{
if (flag == 0) {
hipLaunchKernelGGL(( ztrsv_notrans_kernel_outplace< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 0, MagmaUpper, MagmaNoTrans, MagmaUnit >)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A, lda, b, incb, x);
}
else {
hipLaunchKernelGGL(( ztrsv_notrans_kernel_outplace< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 1, MagmaUpper, MagmaNoTrans, MagmaUnit >)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A, lda, b, incb, x);
}
}
}
else //Lower
{
if (diag == MagmaNonUnit)
{
if (flag == 0)
{
hipLaunchKernelGGL(( ztrsv_notrans_kernel_outplace< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 0, MagmaLower, MagmaNoTrans, MagmaNonUnit >)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A, lda, b, incb, x);
}
else {
hipLaunchKernelGGL(( ztrsv_notrans_kernel_outplace< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 1, MagmaLower, MagmaNoTrans, MagmaNonUnit >)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A, lda, b, incb, x);
}
}
else if (diag == MagmaUnit)
{
if (flag == 0)
{
hipLaunchKernelGGL(( ztrsv_notrans_kernel_outplace< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 0, MagmaLower, MagmaNoTrans, MagmaUnit>)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A, lda, b, incb, x);
}
else {
hipLaunchKernelGGL(( ztrsv_notrans_kernel_outplace< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 1, MagmaLower, MagmaNoTrans, MagmaUnit>)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A, lda, b, incb, x);
}
}
}
}
else if (trans == MagmaTrans)
{
if (uplo == MagmaUpper)
{
if (diag == MagmaNonUnit) {
if (flag == 0)
{
hipLaunchKernelGGL(( ztrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 0, MagmaUpper, MagmaTrans, MagmaNonUnit >)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A, lda, b, incb, x);
}
else {
hipLaunchKernelGGL(( ztrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaUpper, MagmaTrans, MagmaNonUnit >)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A, lda, b, incb, x);
}
}
else if (diag == MagmaUnit) {
if (flag == 0)
{
hipLaunchKernelGGL(( ztrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 0, MagmaUpper, MagmaTrans, MagmaUnit >)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A, lda, b, incb, x);
}
else {
hipLaunchKernelGGL(( ztrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaUpper, MagmaTrans, MagmaUnit >)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A, lda, b, incb, x);
}
}
}
else
{
if (diag == MagmaNonUnit) {
if (flag == 0)
{
hipLaunchKernelGGL(( ztrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T,MagmaBigTileSize, 0, MagmaLower, MagmaTrans, MagmaNonUnit >)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A, lda, b, incb, x);
}
else {
hipLaunchKernelGGL(( ztrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaLower, MagmaTrans, MagmaNonUnit >)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A, lda, b, incb, x);
}
}
else if (diag == MagmaUnit) {
if (flag == 0)
{
hipLaunchKernelGGL(( ztrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T,MagmaBigTileSize, 0, MagmaLower, MagmaTrans, MagmaUnit >)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A, lda, b, incb, x);
}
else {
hipLaunchKernelGGL(( ztrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaLower, MagmaTrans, MagmaUnit >)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A, lda, b, incb, x);
}
}
}
}
else if (trans == MagmaConjTrans)
{
if (uplo == MagmaUpper)
{
if (diag == MagmaNonUnit) {
if (flag == 0)
{
hipLaunchKernelGGL(( ztrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 0, MagmaUpper, MagmaConjTrans, MagmaNonUnit >)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A, lda, b, incb, x);
}
else {
hipLaunchKernelGGL(( ztrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaUpper, MagmaConjTrans, MagmaNonUnit >)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A, lda, b, incb, x);
}
}
else if (diag == MagmaUnit) {
if (flag == 0)
{
hipLaunchKernelGGL(( ztrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 0, MagmaUpper, MagmaConjTrans, MagmaUnit >)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A, lda, b, incb, x);
}
else {
hipLaunchKernelGGL(( ztrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaUpper, MagmaConjTrans, MagmaUnit >)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A, lda, b, incb, x);
}
}
}
else
{
if (diag == MagmaNonUnit) {
if (flag == 0)
{
hipLaunchKernelGGL(( ztrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T,MagmaBigTileSize, 0, MagmaLower, MagmaConjTrans, MagmaNonUnit >)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A, lda, b, incb, x);
}
else {
hipLaunchKernelGGL(( ztrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaLower, MagmaConjTrans, MagmaNonUnit >)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A, lda, b, incb, x);
}
}
else if (diag == MagmaUnit) {
if (flag == 0)
{
hipLaunchKernelGGL(( ztrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T,MagmaBigTileSize, 0, MagmaLower, MagmaConjTrans, MagmaUnit >)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A, lda, b, incb, x);
}
else {
hipLaunchKernelGGL(( ztrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaLower, MagmaConjTrans, MagmaUnit >)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A, lda, b, incb, x);
}
}
}
}
}
/*
README: flag decides if the ztrsv_outplace see an updated x or not. 0: No; other: Yes
In recursive, flag must be nonzero except the 1st call
*/
extern "C" void
magmablas_ztrsv_recursive_outofplace(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag,
magma_int_t n,
magmaDoubleComplex_const_ptr A, magma_int_t lda,
magmaDoubleComplex_ptr b, magma_int_t incb,
magmaDoubleComplex_ptr x,
magma_queue_t queue)
{
/* Check arguments */
magma_int_t info = 0;
if ( uplo != MagmaUpper && uplo != MagmaLower ) {
info = -1;
} else if ( trans != MagmaNoTrans && trans != MagmaTrans && trans != MagmaConjTrans ) {
info = -2;
} else if ( diag != MagmaUnit && diag != MagmaNonUnit ) {
info = -3;
} else if (n < 0) {
info = -5;
} else if (lda < max(1,n)) {
info = -8;
}
if (info != 0) {
magma_xerbla( __func__, -(info) );
return;
}
// quick return if possible.
if (n == 0)
return;
//Init x with zero
//magmablas_zlaset( MagmaFull, n, incb, MAGMA_Z_ZERO, MAGMA_Z_ZERO, x, n, queue );
magma_int_t col = n;
if (trans == MagmaNoTrans)
{
for (magma_int_t i=0; i < n; i+= NB)
{
magma_int_t jb = min(NB, n-i);
if (uplo == MagmaUpper)
{
col -= jb;
//assume x_array contains zero elements, magmablas_zgemv will cause slow down
magma_zgemv( MagmaNoTrans, jb, i, MAGMA_Z_ONE, A(col, col+jb), lda,
x+col+jb, 1, MAGMA_Z_ONE, x+col, 1, queue );
}
else
{
col = i;
magma_zgemv( MagmaNoTrans, jb, i, MAGMA_Z_ONE, A(col, 0), lda,
x, 1, MAGMA_Z_ONE, x+col, 1, queue );
}
magmablas_ztrsv_outofplace( uplo, trans, diag, jb, A(col, col), lda, b+col, incb, x+col, queue, i );
}
}
else
{
for (magma_int_t i=0; i < n; i += NB)
{
magma_int_t jb = min(NB, n-i);
if (uplo == MagmaLower)
{
col -= jb;
magma_zgemv( MagmaConjTrans, i, jb, MAGMA_Z_ONE, A(col+jb, col), lda, x+col+jb, 1, MAGMA_Z_ONE, x+col, 1, queue );
}
else
{
col = i;
magma_zgemv( MagmaConjTrans, i, jb, MAGMA_Z_ONE, A(0, col), lda, x, 1, MAGMA_Z_ONE, x+col, 1, queue );
}
magmablas_ztrsv_outofplace( uplo, trans, diag, jb, A(col, col), lda, b+col, incb, x+col, queue, i );
}
}
}
//==============================================================================
/**
Purpose
-------
ztrsv solves one of the matrix equations on gpu
op(A)*x = B, or
x*op(A) = B,
where alpha is a scalar, X and B are vectors, A is a unit, or
non-unit, upper or lower triangular matrix and op(A) is one of
op(A) = A, or
op(A) = A^T, or
op(A) = A^H.
The vector x is overwritten on b.
Arguments
----------
@param[in]
uplo magma_uplo_t.
On entry, uplo specifies whether the matrix A is an upper or
lower triangular matrix as follows:
- = MagmaUpper: A is an upper triangular matrix.
- = MagmaLower: A is a lower triangular matrix.
@param[in]
trans magma_trans_t.
On entry, trans specifies the form of op(A) to be used in
the matrix multiplication as follows:
- = MagmaNoTrans: op(A) = A.
- = MagmaTrans: op(A) = A^T.
- = MagmaConjTrans: op(A) = A^H.
@param[in]
diag magma_diag_t.
On entry, diag specifies whether or not A is unit triangular
as follows:
- = MagmaUnit: A is assumed to be unit triangular.
- = MagmaNonUnit: A is not assumed to be unit triangular.
@param[in]
n INTEGER.
On entry, n N specifies the order of the matrix A. n >= 0.
@param[in]
dA COMPLEX_16 array of dimension ( lda, n )
Before entry with uplo = MagmaUpper, the leading n by n
upper triangular part of the array A must contain the upper
triangular matrix and the strictly lower triangular part of
A is not referenced.
Before entry with uplo = MagmaLower, the leading n by n
lower triangular part of the array A must contain the lower
triangular matrix and the strictly upper triangular part of
A is not referenced.
Note that when diag = MagmaUnit, the diagonal elements of
A are not referenced either, but are assumed to be unity.
@param[in]
ldda INTEGER.
On entry, lda specifies the first dimension of A.
lda >= max( 1, n ).
@param[in]
db COMPLEX_16 array of dimension n
On exit, b is overwritten with the solution vector X.
@param[in]
incb INTEGER.
On entry, incb specifies the increment for the elements of
b. incb must not be zero.
Unchanged on exit.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_zblas2
********************************************************************/
extern "C" void
magmablas_ztrsv(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag,
magma_int_t n,
magmaDoubleComplex_const_ptr dA, magma_int_t ldda,
magmaDoubleComplex_ptr db, magma_int_t incb,
magma_queue_t queue)
{
magma_int_t size_x = n * incb;
magmaDoubleComplex_ptr dx=NULL;
magma_zmalloc( &dx, size_x );
magmablas_zlaset( MagmaFull, n, 1, MAGMA_Z_ZERO, MAGMA_Z_ZERO, dx, n, queue );
magmablas_ztrsv_recursive_outofplace( uplo, trans, diag, n, dA, ldda, db, incb, dx, queue );
magmablas_zlacpy( MagmaFull, n, 1, dx, n, db, n, queue );
magma_free( dx );
}
| da4e72de496579ff55cd4a19d450f28bfd35a1ef.cu | /*
-- MAGMA (version 2.0.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date February 2016
@author Tingxing Dong
@author Azzam Haidar
@precisions normal z -> s d c
*/
#include "magma_internal.h"
#include "magma_templates.h"
#define PRECISION_z
#define NB 256 //NB is the 1st level blocking in recursive blocking, NUM_THREADS is the 2ed level, NB=256, NUM_THREADS=64 is optimal for batched
#define NUM_THREADS 128 //64 //128
#define BLOCK_SIZE_N 128
#define DIM_X_N 128
#define DIM_Y_N 1
#define BLOCK_SIZE_T 32
#define DIM_X_T 16
#define DIM_Y_T 8
#include "ztrsv_template_device.cuh"
#define A(i, j) (A + (i) + (j)*lda) // A(i, j) means at i row, j column
extern __shared__ magmaDoubleComplex shared_data[];
//==============================================================================
template< const int BLOCK_SIZE, const int DIM_X, const int DIM_Y, const int TILE_SIZE, const int flag, const magma_uplo_t uplo, const magma_trans_t trans, const magma_diag_t diag>
__global__ void
ztrsv_notrans_kernel_outplace(
int n,
const magmaDoubleComplex * __restrict__ A, int lda,
magmaDoubleComplex *b, int incb,
magmaDoubleComplex *x)
{
ztrsv_notrans_device< BLOCK_SIZE, DIM_X, DIM_Y, TILE_SIZE, flag, uplo, trans, diag >( n, A, lda, b, incb, x);
}
//==============================================================================
template<const int BLOCK_SIZE, const int DIM_X, const int DIM_Y, const int TILE_SIZE, const int flag, const magma_uplo_t uplo, const magma_trans_t trans, const magma_diag_t diag>
__global__ void
ztrsv_trans_kernel_outplace(
int n,
const magmaDoubleComplex * __restrict__ A, int lda,
magmaDoubleComplex *b, int incb,
magmaDoubleComplex *x)
{
ztrsv_trans_device< BLOCK_SIZE, DIM_X, DIM_Y, TILE_SIZE, flag, uplo, trans, diag >( n, A, lda, b, incb, x);
}
//==============================================================================
extern "C" void
magmablas_ztrsv_outofplace(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag,
magma_int_t n,
magmaDoubleComplex_const_ptr A, magma_int_t lda,
magmaDoubleComplex_ptr b, magma_int_t incb,
magmaDoubleComplex_ptr x,
magma_queue_t queue,
magma_int_t flag=0)
{
/* Check arguments */
magma_int_t info = 0;
if ( uplo != MagmaUpper && uplo != MagmaLower ) {
info = -1;
} else if ( trans != MagmaNoTrans && trans != MagmaTrans && trans != MagmaConjTrans ) {
info = -2;
} else if ( diag != MagmaUnit && diag != MagmaNonUnit ) {
info = -3;
} else if (n < 0) {
info = -5;
} else if (lda < max(1,n)) {
info = -8;
}
if (info != 0) {
magma_xerbla( __func__, -(info) );
return;
}
// quick return if possible.
if (n == 0)
return;
dim3 threads( NUM_THREADS );
dim3 blocks( 1, 1, 1 );
size_t shmem = n * sizeof(magmaDoubleComplex);
if (trans == MagmaNoTrans)
{
if (uplo == MagmaUpper)
{
if (diag == MagmaNonUnit)
{
if (flag == 0) {
ztrsv_notrans_kernel_outplace< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 0, MagmaUpper, MagmaNoTrans, MagmaNonUnit >
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A, lda, b, incb, x);
}
else {
ztrsv_notrans_kernel_outplace< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 1, MagmaUpper, MagmaNoTrans, MagmaNonUnit >
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A, lda, b, incb, x);
}
}
else if (diag == MagmaUnit)
{
if (flag == 0) {
ztrsv_notrans_kernel_outplace< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 0, MagmaUpper, MagmaNoTrans, MagmaUnit >
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A, lda, b, incb, x);
}
else {
ztrsv_notrans_kernel_outplace< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 1, MagmaUpper, MagmaNoTrans, MagmaUnit >
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A, lda, b, incb, x);
}
}
}
else //Lower
{
if (diag == MagmaNonUnit)
{
if (flag == 0)
{
ztrsv_notrans_kernel_outplace< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 0, MagmaLower, MagmaNoTrans, MagmaNonUnit >
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A, lda, b, incb, x);
}
else {
ztrsv_notrans_kernel_outplace< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 1, MagmaLower, MagmaNoTrans, MagmaNonUnit >
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A, lda, b, incb, x);
}
}
else if (diag == MagmaUnit)
{
if (flag == 0)
{
ztrsv_notrans_kernel_outplace< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 0, MagmaLower, MagmaNoTrans, MagmaUnit>
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A, lda, b, incb, x);
}
else {
ztrsv_notrans_kernel_outplace< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 1, MagmaLower, MagmaNoTrans, MagmaUnit>
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A, lda, b, incb, x);
}
}
}
}
else if (trans == MagmaTrans)
{
if (uplo == MagmaUpper)
{
if (diag == MagmaNonUnit) {
if (flag == 0)
{
ztrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 0, MagmaUpper, MagmaTrans, MagmaNonUnit >
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A, lda, b, incb, x);
}
else {
ztrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaUpper, MagmaTrans, MagmaNonUnit >
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A, lda, b, incb, x);
}
}
else if (diag == MagmaUnit) {
if (flag == 0)
{
ztrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 0, MagmaUpper, MagmaTrans, MagmaUnit >
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A, lda, b, incb, x);
}
else {
ztrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaUpper, MagmaTrans, MagmaUnit >
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A, lda, b, incb, x);
}
}
}
else
{
if (diag == MagmaNonUnit) {
if (flag == 0)
{
ztrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T,MagmaBigTileSize, 0, MagmaLower, MagmaTrans, MagmaNonUnit >
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A, lda, b, incb, x);
}
else {
ztrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaLower, MagmaTrans, MagmaNonUnit >
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A, lda, b, incb, x);
}
}
else if (diag == MagmaUnit) {
if (flag == 0)
{
ztrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T,MagmaBigTileSize, 0, MagmaLower, MagmaTrans, MagmaUnit >
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A, lda, b, incb, x);
}
else {
ztrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaLower, MagmaTrans, MagmaUnit >
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A, lda, b, incb, x);
}
}
}
}
else if (trans == MagmaConjTrans)
{
if (uplo == MagmaUpper)
{
if (diag == MagmaNonUnit) {
if (flag == 0)
{
ztrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 0, MagmaUpper, MagmaConjTrans, MagmaNonUnit >
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A, lda, b, incb, x);
}
else {
ztrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaUpper, MagmaConjTrans, MagmaNonUnit >
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A, lda, b, incb, x);
}
}
else if (diag == MagmaUnit) {
if (flag == 0)
{
ztrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 0, MagmaUpper, MagmaConjTrans, MagmaUnit >
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A, lda, b, incb, x);
}
else {
ztrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaUpper, MagmaConjTrans, MagmaUnit >
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A, lda, b, incb, x);
}
}
}
else
{
if (diag == MagmaNonUnit) {
if (flag == 0)
{
ztrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T,MagmaBigTileSize, 0, MagmaLower, MagmaConjTrans, MagmaNonUnit >
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A, lda, b, incb, x);
}
else {
ztrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaLower, MagmaConjTrans, MagmaNonUnit >
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A, lda, b, incb, x);
}
}
else if (diag == MagmaUnit) {
if (flag == 0)
{
ztrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T,MagmaBigTileSize, 0, MagmaLower, MagmaConjTrans, MagmaUnit >
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A, lda, b, incb, x);
}
else {
ztrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaLower, MagmaConjTrans, MagmaUnit >
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A, lda, b, incb, x);
}
}
}
}
}
/*
README: flag decides if the ztrsv_outplace see an updated x or not. 0: No; other: Yes
In recursive, flag must be nonzero except the 1st call
*/
extern "C" void
magmablas_ztrsv_recursive_outofplace(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag,
magma_int_t n,
magmaDoubleComplex_const_ptr A, magma_int_t lda,
magmaDoubleComplex_ptr b, magma_int_t incb,
magmaDoubleComplex_ptr x,
magma_queue_t queue)
{
/* Check arguments */
magma_int_t info = 0;
if ( uplo != MagmaUpper && uplo != MagmaLower ) {
info = -1;
} else if ( trans != MagmaNoTrans && trans != MagmaTrans && trans != MagmaConjTrans ) {
info = -2;
} else if ( diag != MagmaUnit && diag != MagmaNonUnit ) {
info = -3;
} else if (n < 0) {
info = -5;
} else if (lda < max(1,n)) {
info = -8;
}
if (info != 0) {
magma_xerbla( __func__, -(info) );
return;
}
// quick return if possible.
if (n == 0)
return;
//Init x with zero
//magmablas_zlaset( MagmaFull, n, incb, MAGMA_Z_ZERO, MAGMA_Z_ZERO, x, n, queue );
magma_int_t col = n;
if (trans == MagmaNoTrans)
{
for (magma_int_t i=0; i < n; i+= NB)
{
magma_int_t jb = min(NB, n-i);
if (uplo == MagmaUpper)
{
col -= jb;
//assume x_array contains zero elements, magmablas_zgemv will cause slow down
magma_zgemv( MagmaNoTrans, jb, i, MAGMA_Z_ONE, A(col, col+jb), lda,
x+col+jb, 1, MAGMA_Z_ONE, x+col, 1, queue );
}
else
{
col = i;
magma_zgemv( MagmaNoTrans, jb, i, MAGMA_Z_ONE, A(col, 0), lda,
x, 1, MAGMA_Z_ONE, x+col, 1, queue );
}
magmablas_ztrsv_outofplace( uplo, trans, diag, jb, A(col, col), lda, b+col, incb, x+col, queue, i );
}
}
else
{
for (magma_int_t i=0; i < n; i += NB)
{
magma_int_t jb = min(NB, n-i);
if (uplo == MagmaLower)
{
col -= jb;
magma_zgemv( MagmaConjTrans, i, jb, MAGMA_Z_ONE, A(col+jb, col), lda, x+col+jb, 1, MAGMA_Z_ONE, x+col, 1, queue );
}
else
{
col = i;
magma_zgemv( MagmaConjTrans, i, jb, MAGMA_Z_ONE, A(0, col), lda, x, 1, MAGMA_Z_ONE, x+col, 1, queue );
}
magmablas_ztrsv_outofplace( uplo, trans, diag, jb, A(col, col), lda, b+col, incb, x+col, queue, i );
}
}
}
//==============================================================================
/**
Purpose
-------
ztrsv solves one of the matrix equations on gpu
op(A)*x = B, or
x*op(A) = B,
where alpha is a scalar, X and B are vectors, A is a unit, or
non-unit, upper or lower triangular matrix and op(A) is one of
op(A) = A, or
op(A) = A^T, or
op(A) = A^H.
The vector x is overwritten on b.
Arguments
----------
@param[in]
uplo magma_uplo_t.
On entry, uplo specifies whether the matrix A is an upper or
lower triangular matrix as follows:
- = MagmaUpper: A is an upper triangular matrix.
- = MagmaLower: A is a lower triangular matrix.
@param[in]
trans magma_trans_t.
On entry, trans specifies the form of op(A) to be used in
the matrix multiplication as follows:
- = MagmaNoTrans: op(A) = A.
- = MagmaTrans: op(A) = A^T.
- = MagmaConjTrans: op(A) = A^H.
@param[in]
diag magma_diag_t.
On entry, diag specifies whether or not A is unit triangular
as follows:
- = MagmaUnit: A is assumed to be unit triangular.
- = MagmaNonUnit: A is not assumed to be unit triangular.
@param[in]
n INTEGER.
On entry, n N specifies the order of the matrix A. n >= 0.
@param[in]
dA COMPLEX_16 array of dimension ( lda, n )
Before entry with uplo = MagmaUpper, the leading n by n
upper triangular part of the array A must contain the upper
triangular matrix and the strictly lower triangular part of
A is not referenced.
Before entry with uplo = MagmaLower, the leading n by n
lower triangular part of the array A must contain the lower
triangular matrix and the strictly upper triangular part of
A is not referenced.
Note that when diag = MagmaUnit, the diagonal elements of
A are not referenced either, but are assumed to be unity.
@param[in]
ldda INTEGER.
On entry, lda specifies the first dimension of A.
lda >= max( 1, n ).
@param[in]
db COMPLEX_16 array of dimension n
On exit, b is overwritten with the solution vector X.
@param[in]
incb INTEGER.
On entry, incb specifies the increment for the elements of
b. incb must not be zero.
Unchanged on exit.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_zblas2
********************************************************************/
extern "C" void
magmablas_ztrsv(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag,
magma_int_t n,
magmaDoubleComplex_const_ptr dA, magma_int_t ldda,
magmaDoubleComplex_ptr db, magma_int_t incb,
magma_queue_t queue)
{
magma_int_t size_x = n * incb;
magmaDoubleComplex_ptr dx=NULL;
magma_zmalloc( &dx, size_x );
magmablas_zlaset( MagmaFull, n, 1, MAGMA_Z_ZERO, MAGMA_Z_ZERO, dx, n, queue );
magmablas_ztrsv_recursive_outofplace( uplo, trans, diag, n, dA, ldda, db, incb, dx, queue );
magmablas_zlacpy( MagmaFull, n, 1, dx, n, db, n, queue );
magma_free( dx );
}
|
e19bb57f2340538e2bb28cc71b9a708177b62c98.hip | // !!! This is a file automatically generated by hipify!!!
#include "luaT.h"
#include "TH.h"
#include "THH.h"
#include "THLogAdd.h" /* DEBUG: WTF */
#include <thrust/transform.h>
#include <thrust/reduce.h>
#include <thrust/transform_reduce.h>
#include <thrust/functional.h>
#include <thrust/device_ptr.h>
#include "utils.h"
LUA_EXTERNC DLL_EXPORT int luaopen_libcunn(lua_State *L);
int luaopen_libcunn(lua_State *L)
{
lua_newtable(L);
cunn_SpatialCrossMapLRN_init(L);
cunn_Tanh_init(L);
cunn_ELU_init(L);
cunn_Sigmoid_init(L);
cunn_LogSoftMax_init(L);
cunn_SoftMax_init(L);
cunn_TemporalConvolution_init(L);
cunn_TemporalMaxPooling_init(L);
cunn_SpatialConvolutionMM_init(L);
cunn_SpatialFullConvolution_init(L);
cunn_SpatialMaxPooling_init(L);
cunn_SpatialFractionalMaxPooling_init(L);
cunn_SpatialAdaptiveMaxPooling_init(L);
cunn_SpatialSubSampling_init(L);
cunn_SpatialAveragePooling_init(L);
cunn_MultiMarginCriterion_init(L);
cunn_MarginCriterion_init(L);
cunn_Square_init(L);
cunn_Sqrt_init(L);
cunn_Threshold_init(L);
cunn_MSECriterion_init(L);
cunn_SmoothL1Criterion_init(L);
cunn_SoftPlus_init(L);
cunn_SpatialUpSamplingNearest_init(L);
cunn_VolumetricConvolution_init(L);
cunn_VolumetricFullConvolution_init(L);
cunn_VolumetricMaxPooling_init(L);
cunn_VolumetricAveragePooling_init(L);
cunn_LogSigmoid_init(L);
cunn_PReLU_init(L);
cunn_RReLU_init(L);
cunn_LeakyReLU_init(L);
cunn_LookupTable_init(L);
return 1;
}
| e19bb57f2340538e2bb28cc71b9a708177b62c98.cu | #include "luaT.h"
#include "TH.h"
#include "THC.h"
#include "THLogAdd.h" /* DEBUG: WTF */
#include <thrust/transform.h>
#include <thrust/reduce.h>
#include <thrust/transform_reduce.h>
#include <thrust/functional.h>
#include <thrust/device_ptr.h>
#include "utils.h"
LUA_EXTERNC DLL_EXPORT int luaopen_libcunn(lua_State *L);
int luaopen_libcunn(lua_State *L)
{
lua_newtable(L);
cunn_SpatialCrossMapLRN_init(L);
cunn_Tanh_init(L);
cunn_ELU_init(L);
cunn_Sigmoid_init(L);
cunn_LogSoftMax_init(L);
cunn_SoftMax_init(L);
cunn_TemporalConvolution_init(L);
cunn_TemporalMaxPooling_init(L);
cunn_SpatialConvolutionMM_init(L);
cunn_SpatialFullConvolution_init(L);
cunn_SpatialMaxPooling_init(L);
cunn_SpatialFractionalMaxPooling_init(L);
cunn_SpatialAdaptiveMaxPooling_init(L);
cunn_SpatialSubSampling_init(L);
cunn_SpatialAveragePooling_init(L);
cunn_MultiMarginCriterion_init(L);
cunn_MarginCriterion_init(L);
cunn_Square_init(L);
cunn_Sqrt_init(L);
cunn_Threshold_init(L);
cunn_MSECriterion_init(L);
cunn_SmoothL1Criterion_init(L);
cunn_SoftPlus_init(L);
cunn_SpatialUpSamplingNearest_init(L);
cunn_VolumetricConvolution_init(L);
cunn_VolumetricFullConvolution_init(L);
cunn_VolumetricMaxPooling_init(L);
cunn_VolumetricAveragePooling_init(L);
cunn_LogSigmoid_init(L);
cunn_PReLU_init(L);
cunn_RReLU_init(L);
cunn_LeakyReLU_init(L);
cunn_LookupTable_init(L);
return 1;
}
|
77d94483716e31e85d56ce730451fbbe74f57f92.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*********************************************************************************
Implementing Breadth first search on CUDA using algorithm given in HiPC'07
paper "Accelerating Large Graph Algorithms on the GPU using CUDA"
Copyright (c) 2008 International Institute of Information Technology - Hyderabad.
All rights reserved.
Permission to use, copy, modify and distribute this software and its documentation for
educational purpose is hereby granted without fee, provided that the above copyright
notice and this permission notice appear in all copies of this software and that you do
not sell the software.
THE SOFTWARE IS PROVIDED "AS IS" AND WITHOUT WARRANTY OF ANY KIND,EXPRESS, IMPLIED OR
OTHERWISE.
The CUDA Kernel for Applying BFS on a loaded Graph. Created By Pawan Harish
**********************************************************************************/
#ifndef _KERNEL_H_
#define _KERNEL_H_
__global__ void
Kernel( Node* g_graph_nodes, int* g_graph_edges, int* g_graph_mask, int* g_updating_graph_mask, int *g_graph_visited, int* g_cost, int no_of_nodes)
{
int tid = blockIdx.x*MAX_THREADS_PER_BLOCK + threadIdx.x;
if( tid<no_of_nodes && g_graph_mask[tid])
{
g_graph_mask[tid]=false;
for(int i=g_graph_nodes[tid].starting; i<(g_graph_nodes[tid].no_of_edges + g_graph_nodes[tid].starting); i++)
{
int id = g_graph_edges[i];
if(!g_graph_visited[id])
{
g_cost[id]=g_cost[tid]+1;
g_updating_graph_mask[id]=true;
}
}
}
}
#endif
| 77d94483716e31e85d56ce730451fbbe74f57f92.cu | /*********************************************************************************
Implementing Breadth first search on CUDA using algorithm given in HiPC'07
paper "Accelerating Large Graph Algorithms on the GPU using CUDA"
Copyright (c) 2008 International Institute of Information Technology - Hyderabad.
All rights reserved.
Permission to use, copy, modify and distribute this software and its documentation for
educational purpose is hereby granted without fee, provided that the above copyright
notice and this permission notice appear in all copies of this software and that you do
not sell the software.
THE SOFTWARE IS PROVIDED "AS IS" AND WITHOUT WARRANTY OF ANY KIND,EXPRESS, IMPLIED OR
OTHERWISE.
The CUDA Kernel for Applying BFS on a loaded Graph. Created By Pawan Harish
**********************************************************************************/
#ifndef _KERNEL_H_
#define _KERNEL_H_
__global__ void
Kernel( Node* g_graph_nodes, int* g_graph_edges, int* g_graph_mask, int* g_updating_graph_mask, int *g_graph_visited, int* g_cost, int no_of_nodes)
{
int tid = blockIdx.x*MAX_THREADS_PER_BLOCK + threadIdx.x;
if( tid<no_of_nodes && g_graph_mask[tid])
{
g_graph_mask[tid]=false;
for(int i=g_graph_nodes[tid].starting; i<(g_graph_nodes[tid].no_of_edges + g_graph_nodes[tid].starting); i++)
{
int id = g_graph_edges[i];
if(!g_graph_visited[id])
{
g_cost[id]=g_cost[tid]+1;
g_updating_graph_mask[id]=true;
}
}
}
}
#endif
|
83319e60dafe408b00baa8c5254c819c896da10d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdlib.h>
#include <stdio.h>
#include "timer.h"
#include "cuda_utils.h"
typedef float dtype;
#define N_ (8 * 1024 * 1024)
#define MAX_THREADS 256
#define MAX_BLOCKS 64
#define MIN(x,y) ((x < y) ? x : y)
/* return the next power of 2 number that is larger than x */
unsigned int nextPow2( unsigned int x ) {
--x;
x |= x >> 1;
x |= x >> 2;
x |= x >> 4;
x |= x >> 8;
x |= x >> 16;
return ++x;
}
/* find out # of threads and # thread blocks for a particular kernel */
void getNumBlocksAndThreads(int whichKernel, int n, int maxBlocks, int maxThreads, int &blocks, int &threads)
{
if (whichKernel < 3)
{
/* 1 thread per element */
threads = (n < maxThreads) ? nextPow2(n) : maxThreads;
blocks = (n + threads - 1) / threads;
}
else
{
/* 1 thread per 2 elements */
threads = (n < maxThreads*2) ? nextPow2((n + 1)/ 2) : maxThreads;
blocks = (n + (threads * 2 - 1)) / (threads * 2);
}
/* limit the total number of threads */
if (whichKernel == 5)
blocks = MIN(maxBlocks, blocks);
}
/* special type of reduction to account for floating point error */
dtype reduce_cpu(dtype *data, int n) {
dtype sum = data[0];
dtype c = (dtype)0.0;
for (int i = 1; i < n; i++)
{
dtype y = data[i] - c;
dtype t = sum + y;
c = (t - sum) - y;
sum = t;
}
return sum;
}
__global__ void
kernel3(dtype *g_idata, dtype *g_odata, unsigned int n)
{
__shared__ dtype scratch[MAX_THREADS];
unsigned int bid = gridDim.x * blockIdx.y + blockIdx.x;
unsigned int i = bid * blockDim.x + threadIdx.x;
if(i < (n>>1)) {
scratch[threadIdx.x] = g_idata[i]+g_idata[i+(n>>1)];
} else {
scratch[threadIdx.x] = 0;
}
__syncthreads ();
for(unsigned int s = blockDim.x >> 1; s >= 1; s = s >> 1) {
if(threadIdx.x < s) {
scratch[threadIdx.x] += scratch[threadIdx.x + s];
}
__syncthreads ();
}
if(threadIdx.x == 0) {
g_odata[bid] = scratch[0]; // the blocks overwrite the first "numOfBlocks" elements in the output array. each block writes at the block idx location.
}
}
int
main(int argc, char** argv)
{
int i;
/* data structure */
dtype *h_idata, h_odata, h_cpu;
dtype *d_idata, *d_odata;
/* timer */
struct stopwatch_t* timer = NULL;
long double t_kernel_3, t_cpu;
/* which kernel are we running */
int whichKernel;
/* number of threads and thread blocks */
int threads, blocks;
int N;
if(argc > 1) {
N = atoi (argv[1]);
printf("N: %d\n", N);
} else {
N = N_;
printf("N: %d\n", N);
}
/* naive kernel */
whichKernel = 3;
getNumBlocksAndThreads (whichKernel, N, MAX_BLOCKS, MAX_THREADS,
blocks, threads);
/* initialize timer */
stopwatch_init ();
timer = stopwatch_create ();
/* allocate memory */
h_idata = (dtype*) malloc (N * sizeof (dtype));
CUDA_CHECK_ERROR (hipMalloc (&d_idata, N * sizeof (dtype)));
CUDA_CHECK_ERROR (hipMalloc (&d_odata, blocks * sizeof (dtype)));
/* Initialize array */
srand48(time(NULL));
for(i = 0; i < N; i++) {
h_idata[i] = drand48() / 100000;
}
CUDA_CHECK_ERROR (hipMemcpy (d_idata, h_idata, N * sizeof (dtype),
hipMemcpyHostToDevice));
/* ================================================== */
/* GPU kernel */
dim3 gb(16, ((blocks + 16 - 1) / 16), 1);
dim3 tb(threads, 1, 1);
/* warm up */
hipLaunchKernelGGL(( kernel3) , dim3(gb), dim3(tb), 0, 0, d_idata, d_odata, N);
hipDeviceSynchronize ();
stopwatch_start (timer);
/* execute kernel */
hipLaunchKernelGGL(( kernel3) , dim3(gb), dim3(tb), 0, 0, d_idata, d_odata, N);
int s = blocks;
while(s > 1) {
threads = 0;
blocks = 0;
getNumBlocksAndThreads (whichKernel, s, MAX_BLOCKS, MAX_THREADS,
blocks, threads);
dim3 gb(16, (blocks + 16 - 1) / 16, 1);
dim3 tb(threads, 1, 1);
hipLaunchKernelGGL(( kernel3) , dim3(gb), dim3(tb), 0, 0, d_odata, d_odata, s);
s = (s + threads * 2 - 1) / (threads * 2);
}
hipDeviceSynchronize ();
t_kernel_3 = stopwatch_stop (timer);
fprintf (stdout, "Time to execute first add GPU reduction kernel: %Lg secs\n", t_kernel_3);
double bw = (N * sizeof(dtype)) / (t_kernel_3 * 1e9);
fprintf (stdout, "Effective bandwidth: %.2lf GB/s\n", bw);
/* copy result back from GPU */
CUDA_CHECK_ERROR (hipMemcpy (&h_odata, d_odata, sizeof (dtype),
hipMemcpyDeviceToHost));
/* ================================================== */
/* ================================================== */
/* CPU kernel */
stopwatch_start (timer);
h_cpu = reduce_cpu (h_idata, N);
t_cpu = stopwatch_stop (timer);
fprintf (stdout, "Time to execute naive CPU reduction: %Lg secs\n",
t_cpu);
/* ================================================== */
if(abs (h_odata - h_cpu) > 1e-5) {
fprintf(stderr, "FAILURE: GPU: %f CPU: %f\n", h_odata, h_cpu);
} else {
printf("SUCCESS: GPU: %f CPU: %f\n", h_odata, h_cpu);
}
return 0;
}
| 83319e60dafe408b00baa8c5254c819c896da10d.cu | #include <stdlib.h>
#include <stdio.h>
#include "timer.h"
#include "cuda_utils.h"
typedef float dtype;
#define N_ (8 * 1024 * 1024)
#define MAX_THREADS 256
#define MAX_BLOCKS 64
#define MIN(x,y) ((x < y) ? x : y)
/* return the next power of 2 number that is larger than x */
unsigned int nextPow2( unsigned int x ) {
--x;
x |= x >> 1;
x |= x >> 2;
x |= x >> 4;
x |= x >> 8;
x |= x >> 16;
return ++x;
}
/* find out # of threads and # thread blocks for a particular kernel */
void getNumBlocksAndThreads(int whichKernel, int n, int maxBlocks, int maxThreads, int &blocks, int &threads)
{
if (whichKernel < 3)
{
/* 1 thread per element */
threads = (n < maxThreads) ? nextPow2(n) : maxThreads;
blocks = (n + threads - 1) / threads;
}
else
{
/* 1 thread per 2 elements */
threads = (n < maxThreads*2) ? nextPow2((n + 1)/ 2) : maxThreads;
blocks = (n + (threads * 2 - 1)) / (threads * 2);
}
/* limit the total number of threads */
if (whichKernel == 5)
blocks = MIN(maxBlocks, blocks);
}
/* special type of reduction to account for floating point error */
dtype reduce_cpu(dtype *data, int n) {
dtype sum = data[0];
dtype c = (dtype)0.0;
for (int i = 1; i < n; i++)
{
dtype y = data[i] - c;
dtype t = sum + y;
c = (t - sum) - y;
sum = t;
}
return sum;
}
__global__ void
kernel3(dtype *g_idata, dtype *g_odata, unsigned int n)
{
__shared__ dtype scratch[MAX_THREADS];
unsigned int bid = gridDim.x * blockIdx.y + blockIdx.x;
unsigned int i = bid * blockDim.x + threadIdx.x;
if(i < (n>>1)) {
scratch[threadIdx.x] = g_idata[i]+g_idata[i+(n>>1)];
} else {
scratch[threadIdx.x] = 0;
}
__syncthreads ();
for(unsigned int s = blockDim.x >> 1; s >= 1; s = s >> 1) {
if(threadIdx.x < s) {
scratch[threadIdx.x] += scratch[threadIdx.x + s];
}
__syncthreads ();
}
if(threadIdx.x == 0) {
g_odata[bid] = scratch[0]; // the blocks overwrite the first "numOfBlocks" elements in the output array. each block writes at the block idx location.
}
}
int
main(int argc, char** argv)
{
int i;
/* data structure */
dtype *h_idata, h_odata, h_cpu;
dtype *d_idata, *d_odata;
/* timer */
struct stopwatch_t* timer = NULL;
long double t_kernel_3, t_cpu;
/* which kernel are we running */
int whichKernel;
/* number of threads and thread blocks */
int threads, blocks;
int N;
if(argc > 1) {
N = atoi (argv[1]);
printf("N: %d\n", N);
} else {
N = N_;
printf("N: %d\n", N);
}
/* naive kernel */
whichKernel = 3;
getNumBlocksAndThreads (whichKernel, N, MAX_BLOCKS, MAX_THREADS,
blocks, threads);
/* initialize timer */
stopwatch_init ();
timer = stopwatch_create ();
/* allocate memory */
h_idata = (dtype*) malloc (N * sizeof (dtype));
CUDA_CHECK_ERROR (cudaMalloc (&d_idata, N * sizeof (dtype)));
CUDA_CHECK_ERROR (cudaMalloc (&d_odata, blocks * sizeof (dtype)));
/* Initialize array */
srand48(time(NULL));
for(i = 0; i < N; i++) {
h_idata[i] = drand48() / 100000;
}
CUDA_CHECK_ERROR (cudaMemcpy (d_idata, h_idata, N * sizeof (dtype),
cudaMemcpyHostToDevice));
/* ================================================== */
/* GPU kernel */
dim3 gb(16, ((blocks + 16 - 1) / 16), 1);
dim3 tb(threads, 1, 1);
/* warm up */
kernel3 <<<gb, tb>>> (d_idata, d_odata, N);
cudaThreadSynchronize ();
stopwatch_start (timer);
/* execute kernel */
kernel3 <<<gb, tb>>> (d_idata, d_odata, N);
int s = blocks;
while(s > 1) {
threads = 0;
blocks = 0;
getNumBlocksAndThreads (whichKernel, s, MAX_BLOCKS, MAX_THREADS,
blocks, threads);
dim3 gb(16, (blocks + 16 - 1) / 16, 1);
dim3 tb(threads, 1, 1);
kernel3 <<<gb, tb>>> (d_odata, d_odata, s);
s = (s + threads * 2 - 1) / (threads * 2);
}
cudaThreadSynchronize ();
t_kernel_3 = stopwatch_stop (timer);
fprintf (stdout, "Time to execute first add GPU reduction kernel: %Lg secs\n", t_kernel_3);
double bw = (N * sizeof(dtype)) / (t_kernel_3 * 1e9);
fprintf (stdout, "Effective bandwidth: %.2lf GB/s\n", bw);
/* copy result back from GPU */
CUDA_CHECK_ERROR (cudaMemcpy (&h_odata, d_odata, sizeof (dtype),
cudaMemcpyDeviceToHost));
/* ================================================== */
/* ================================================== */
/* CPU kernel */
stopwatch_start (timer);
h_cpu = reduce_cpu (h_idata, N);
t_cpu = stopwatch_stop (timer);
fprintf (stdout, "Time to execute naive CPU reduction: %Lg secs\n",
t_cpu);
/* ================================================== */
if(abs (h_odata - h_cpu) > 1e-5) {
fprintf(stderr, "FAILURE: GPU: %f CPU: %f\n", h_odata, h_cpu);
} else {
printf("SUCCESS: GPU: %f CPU: %f\n", h_odata, h_cpu);
}
return 0;
}
|
1c3455d6a1b718a060635eab204387aeffe309b1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "ImageBase.cuh"
namespace imagebase {
__global__ void resetImage_kernel(uint * pix,
float * depth,
uint maxInd)
{
unsigned ind = blockIdx.x*blockDim.x + threadIdx.x;
if(ind >= maxInd) return;
pix[ind] = 0;
depth[ind] = 0.f;
}
__global__ void resetImage2_kernel(uint * pix,
float * nearDepth,
float * farDepth,
uint maxInd)
{
unsigned ind = blockIdx.x*blockDim.x + threadIdx.x;
if(ind >= maxInd) return;
pix[ind] = 0;
nearDepth[ind] = 1.f;
farDepth[ind] = 1e28f;
}
void resetImage(uint * pix,
float * depth,
int blockx,
uint n)
{
dim3 block(blockx, 1, 1);
int nblk = getNumBlock(n, blockx);
dim3 grid(nblk, 1, 1);
hipLaunchKernelGGL(( resetImage_kernel), dim3(grid), dim3(block) , 0, 0, pix,
depth,
n);
}
void resetImage(uint * pix,
float * nearDepth,
float * farDepth,
int blockx,
uint n)
{
dim3 block(blockx, 1, 1);
int nblk = getNumBlock(n, blockx);
dim3 grid(nblk, 1, 1);
hipLaunchKernelGGL(( resetImage2_kernel), dim3(grid), dim3(block) , 0, 0, pix,
nearDepth,
farDepth,
n);
}
}
| 1c3455d6a1b718a060635eab204387aeffe309b1.cu | #include "ImageBase.cuh"
namespace imagebase {
__global__ void resetImage_kernel(uint * pix,
float * depth,
uint maxInd)
{
unsigned ind = blockIdx.x*blockDim.x + threadIdx.x;
if(ind >= maxInd) return;
pix[ind] = 0;
depth[ind] = 0.f;
}
__global__ void resetImage2_kernel(uint * pix,
float * nearDepth,
float * farDepth,
uint maxInd)
{
unsigned ind = blockIdx.x*blockDim.x + threadIdx.x;
if(ind >= maxInd) return;
pix[ind] = 0;
nearDepth[ind] = 1.f;
farDepth[ind] = 1e28f;
}
void resetImage(uint * pix,
float * depth,
int blockx,
uint n)
{
dim3 block(blockx, 1, 1);
int nblk = getNumBlock(n, blockx);
dim3 grid(nblk, 1, 1);
resetImage_kernel<<< grid, block >>>(pix,
depth,
n);
}
void resetImage(uint * pix,
float * nearDepth,
float * farDepth,
int blockx,
uint n)
{
dim3 block(blockx, 1, 1);
int nblk = getNumBlock(n, blockx);
dim3 grid(nblk, 1, 1);
resetImage2_kernel<<< grid, block >>>(pix,
nearDepth,
farDepth,
n);
}
}
|
bd4828201ac8bd6fad62498d6d715d26cd59ff6f.hip | // !!! This is a file automatically generated by hipify!!!
#include <math.h>
#include <hip/hip_runtime.h>
#include <vector>
#include <fstream>
#include <string>
#include <sstream>
#include <iostream>
#define BLOCK_WIDTH 512
/*
extern __shared__ stands for shared memory on device, which has two "warps" of 32 threads.
Google CUDA shared memory and warps.
To replace extern __shared__ int __smem[]; which requires you to explicitly
know the data type is integer in advance. But input file could be int, float, or double.
Since we don't know the data type of shared meomry __smem[], we use
template<class T> where T stands for all possible data types. We also
need to instantiate all possible data types later
In return (T *) __smem; it is data type conversion
Suggest to figure out difference between overload, override, redefine
*/
template<class T>
struct SharedMemory {
__device__ inline operator T *() {
extern __shared__ int __smem[];
return (T *) __smem;
}
__device__ inline operator const T *() const {
extern __shared__ int __smem[];
return (T *) __smem;
}
};
/////////////////////////////////////////////////////////////////////////////
// CUDA Kernel: Global memory
/////////////////////////////////////////////////////////////////////////////
template<class T, int blockSize>
__global__ void countGlobalMem(T *g_idata, int *g_odata, int N) {
unsigned int i = blockSize * blockIdx.x + threadIdx.x;
int gi = 0;
if (i < N) {
if (g_idata[i] == 1000) {
atomicAdd(&g_odata[9], 1);
} else {
gi = (int) g_idata[i] / 100;
atomicAdd(&g_odata[gi], 1);
}
}
}
/////////////////////////////////////////////////////////////////////////////
// CUDA Kernel: shared memory
/////////////////////////////////////////////////////////////////////////////
template<class T, int blockSize>
__global__ void countSharedMem(T *g_idata, int *g_odata, int N, int maxNum, int barrelSize) {
/*
Each block has a sdata
*/
extern __shared__ int sdata[];
unsigned int tid = threadIdx.x;
int numBarrel = maxNum/barrelSize;
unsigned int i = blockSize * blockIdx.x + threadIdx.x;
//gi is group/barrel index
int gi = 0;
if (i < N) {
if (g_idata[i] == maxNum) {
atomicAdd(&sdata[numBarrel-1], 1);
} else {
gi = (int) g_idata[i] / barrelSize;
atomicAdd(&sdata[gi], 1);
}
}
//wait until sdata[0~9] in all blocks are ready
__syncthreads();
/*
every block has threadIdx.x from 0 to 511
size of g_odata is numBarrel * blocks
sum of all blocks is done in myCountTest(), note there
is += when output to "q2b.txt"
*/
if (tid < numBarrel) {
g_odata[blockIdx.x * numBarrel + tid] = sdata[tid];
}
}
//////////////////////////////////////////////////////////////////////////////
// CUDA Kernel: prefix sum (Naiive)
///////////////////////////////////////////////////////////////////////////////
int nextPowerOf2(int x)
{
--x;
x |= x >> 1;
x |= x >> 2;
x |= x >> 4;
x |= x >> 8;
x |= x >> 16;
return ++x;
}
__global__ void scan(int *d_idata, int *d_odata, int N) {
extern __shared__ int sdata[];
//cunyi
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < N) {
sdata[threadIdx.x] = d_idata[i];
//printf("\n sdata[%d]: %d", i, sdata[threadIdx.x]);
}
for (unsigned int stride = 1; stride <= threadIdx.x; stride *= 2) {
__syncthreads();
int in1 = sdata[threadIdx.x - stride];
__syncthreads();
sdata[threadIdx.x] += in1;
}
__syncthreads();
if(i < N) {
d_odata[threadIdx.x] = sdata[threadIdx.x];
//printf("\n sdata[%d]: %d", i, d_odata[threadIdx.x]);
}
}
///////////////////////////////////////////////////////////////////////////////
// Wrapper for countGlobalMem
///////////////////////////////////////////////////////////////////////////////
template<class T>
void countGMWrapper(int threads, int blocks, T *g_idata, int *g_odata ,int N) {
/*
1D block and 1D grid
*/
dim3 dimBlock(threads, 1, 1);
dim3 dimGrid(blocks, 1, 1);
int smemSize = threads * sizeof (T);
hipLaunchKernelGGL(( countGlobalMem<T, BLOCK_WIDTH>), dim3(dimGrid), dim3(dimBlock), smemSize, 0, g_idata, g_odata, N);
}
///////////////////////////////////////////////////////////////////////////////
// Wrapper for countSharedMem
///////////////////////////////////////////////////////////////////////////////
template<class T>
void countSWrapper(int threads, int blocks, T *g_idata, int *g_odata ,int N, int maxNum, int barrelSize) {
/*
1D block and 1D grid
*/
dim3 dimBlock(threads, 1, 1);
dim3 dimGrid(blocks, 1, 1);
int smemSize = threads * sizeof (T);
hipLaunchKernelGGL(( countSharedMem<T, BLOCK_WIDTH>), dim3(dimGrid), dim3(dimBlock), smemSize, 0, g_idata, g_odata, N, maxNum, barrelSize);
}
/////////////////////////////////////////////////////////////////////////////////
// Instantiate Template
/////////////////////////////////////////////////////////////////////////////////
template void
countGMWrapper<int>(int threads, int blocks, int *g_idata, int *g_odata, int N);
template void
countGMWrapper<float>(int threads, int blocks, float *g_idata, int *g_odata, int N);
template void
countGMWrapper<double>(int threads, int blocks, double *g_idata, int *g_odata, int N);
template void
countSWrapper<int>(int threads, int blocks, int *g_idata, int *g_odata ,int N, int maxNum, int barrelSize);
//////////////////////////////////////////////////////////////////////////////////
// Test Function
//////////////////////////////////////////////////////////////////////////////////
void myCountTest(const char* filename) {
int numBarrel = 10;
//read test file and decide size of array
std::vector<int> data;
std::string line_;
std::ifstream file_(filename);
if(file_.is_open()) {
while (getline(file_, line_)) {
std::stringstream ss(line_);
int i;
while(ss>>i) {
data.push_back(i);
if (ss.peek() == ',' || ss.peek() == ' ') {
ss.ignore();
}
}
}
file_.close();
}
int num_els = data.size();
int numBlocks = num_els/BLOCK_WIDTH + 1;
//Start to run Kernel_a
int *d_in = NULL;
int *d_out = NULL;
hipMalloc( (void **) &d_in, num_els * sizeof(int));
hipMalloc( (void **) &d_out, numBarrel * sizeof(int));
int *in = (int *) malloc(num_els * sizeof(int));
int *out = (int *) malloc(numBarrel * sizeof(int));
in = &data[0];
std::vector<int> v(10);
std::fill(v.begin(), v.end(), 0);
std::copy(v.begin(), v.end(), out);
hipMemcpy(d_in, in, num_els * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_out, out, numBarrel * sizeof(int), hipMemcpyHostToDevice);
countGMWrapper(BLOCK_WIDTH, numBlocks, d_in, d_out, num_els);
hipMemcpy(out, d_out, numBarrel * sizeof(int), hipMemcpyDeviceToHost);
std::ofstream fout1("q2a.txt", std::ios::app);
for(int i = 0; i < numBarrel; i++) {
if(fout1.is_open()) {
fout1 << "\n Count[" <<i<<"]: " <<out[i];
}
}
fout1.close();
fout1.clear();
hipFree(d_out);
//free(out);
//d_in is not cleaned because we are going to run more cuda kernels using d_in
free(in);
//hipFree(d_in);
//Start to run Kernel_b, almost the same as kernel_a
int *d_out_b = NULL;
hipMalloc( (void **) &d_out_b, numBarrel * numBlocks * sizeof(int));
int *out_b = (int *) malloc(numBarrel * numBlocks * sizeof(int));
//size of out_b is changed
v.resize(numBarrel * numBlocks);
std::fill(v.begin(), v.end(), 0);
std::copy(v.begin(), v.end(), out_b);
hipMemcpy(d_out_b, out_b, numBarrel * numBlocks * sizeof(int), hipMemcpyHostToDevice);
countSWrapper(BLOCK_WIDTH, numBlocks, d_in, d_out ,num_els, 1000, 100);
hipMemcpy(out_b, d_out_b, numBarrel * numBlocks * sizeof(int), hipMemcpyDeviceToHost);
std::ofstream fout2("q2b.txt", std::ios::app);
int out_b_all;
//int B[numBarrel];
for(int i = 0; i < numBarrel; i++) {
out_b_all = 0;
for (int j = 0; j < numBlocks; j++)
out_b_all += out_b[i + j * numBarrel];
//B[i] = out_b_all;
if(fout2.is_open()) {
fout2 << "\n Count[" <<i<<"]: " <<out_b_all;
}
}
fout2.close();
fout2.clear();
hipFree(d_out_b);
free(out_b);
hipFree(d_in);
//start to run Kernel_c
int n3 = nextPowerOf2(numBarrel);
int *d_out_c = NULL;
int *d_in_c = NULL;
int *out_c = (int *) malloc(n3 * sizeof(int));
v.resize(n3);
std::fill(v.begin(), v.end(), 0);
std::copy(v.begin(), v.end(), out_c);
hipMalloc( (void **) &d_in_c, n3 * sizeof(int));
hipMalloc( (void **) &d_out_c, n3 * sizeof(int));
int *in_test = (int *) malloc(n3 * sizeof(int));
std::vector<int> in_c;
for (int i = 0; i < n3; i++) {
if (i < numBarrel) {
in_c.push_back(out[i]);
} else {
in_c.push_back(0);
}
//printf("\n c: %d", in_c[i]);
}
in_test = &in_c[0];
hipMemcpy(d_in_c, in_test, n3 * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_out_c, out_c, n3 * sizeof(int), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( scan), dim3(1), dim3(n3), n3*sizeof(int), 0, d_in_c, d_out_c, n3);
hipMemcpy(out_c, d_out_c, n3 * sizeof(int), hipMemcpyDeviceToHost);
std::ofstream fout3("q2c.txt", std::ios::app);
for(int i = 0; i < numBarrel; i++) {
if(fout3.is_open()) {
fout3 << "\n prescan[" <<i<<"]: " <<out_c[i];
}
}
fout3.close();
fout3.clear();
}
int main(int argc, char **argv) {
myCountTest("inp.txt");
return 0;
}
| bd4828201ac8bd6fad62498d6d715d26cd59ff6f.cu | #include <math.h>
#include <cuda.h>
#include <vector>
#include <fstream>
#include <string>
#include <sstream>
#include <iostream>
#define BLOCK_WIDTH 512
/*
extern __shared__ stands for shared memory on device, which has two "warps" of 32 threads.
Google CUDA shared memory and warps.
To replace extern __shared__ int __smem[]; which requires you to explicitly
know the data type is integer in advance. But input file could be int, float, or double.
Since we don't know the data type of shared meomry __smem[], we use
template<class T> where T stands for all possible data types. We also
need to instantiate all possible data types later
In return (T *) __smem; it is data type conversion
Suggest to figure out difference between overload, override, redefine
*/
template<class T>
struct SharedMemory {
__device__ inline operator T *() {
extern __shared__ int __smem[];
return (T *) __smem;
}
__device__ inline operator const T *() const {
extern __shared__ int __smem[];
return (T *) __smem;
}
};
/////////////////////////////////////////////////////////////////////////////
// CUDA Kernel: Global memory
/////////////////////////////////////////////////////////////////////////////
template<class T, int blockSize>
__global__ void countGlobalMem(T *g_idata, int *g_odata, int N) {
unsigned int i = blockSize * blockIdx.x + threadIdx.x;
int gi = 0;
if (i < N) {
if (g_idata[i] == 1000) {
atomicAdd(&g_odata[9], 1);
} else {
gi = (int) g_idata[i] / 100;
atomicAdd(&g_odata[gi], 1);
}
}
}
/////////////////////////////////////////////////////////////////////////////
// CUDA Kernel: shared memory
/////////////////////////////////////////////////////////////////////////////
template<class T, int blockSize>
__global__ void countSharedMem(T *g_idata, int *g_odata, int N, int maxNum, int barrelSize) {
/*
Each block has a sdata
*/
extern __shared__ int sdata[];
unsigned int tid = threadIdx.x;
int numBarrel = maxNum/barrelSize;
unsigned int i = blockSize * blockIdx.x + threadIdx.x;
//gi is group/barrel index
int gi = 0;
if (i < N) {
if (g_idata[i] == maxNum) {
atomicAdd(&sdata[numBarrel-1], 1);
} else {
gi = (int) g_idata[i] / barrelSize;
atomicAdd(&sdata[gi], 1);
}
}
//wait until sdata[0~9] in all blocks are ready
__syncthreads();
/*
every block has threadIdx.x from 0 to 511
size of g_odata is numBarrel * blocks
sum of all blocks is done in myCountTest(), note there
is += when output to "q2b.txt"
*/
if (tid < numBarrel) {
g_odata[blockIdx.x * numBarrel + tid] = sdata[tid];
}
}
//////////////////////////////////////////////////////////////////////////////
// CUDA Kernel: prefix sum (Naiive)
///////////////////////////////////////////////////////////////////////////////
int nextPowerOf2(int x)
{
--x;
x |= x >> 1;
x |= x >> 2;
x |= x >> 4;
x |= x >> 8;
x |= x >> 16;
return ++x;
}
__global__ void scan(int *d_idata, int *d_odata, int N) {
extern __shared__ int sdata[];
//cunyi
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < N) {
sdata[threadIdx.x] = d_idata[i];
//printf("\n sdata[%d]: %d", i, sdata[threadIdx.x]);
}
for (unsigned int stride = 1; stride <= threadIdx.x; stride *= 2) {
__syncthreads();
int in1 = sdata[threadIdx.x - stride];
__syncthreads();
sdata[threadIdx.x] += in1;
}
__syncthreads();
if(i < N) {
d_odata[threadIdx.x] = sdata[threadIdx.x];
//printf("\n sdata[%d]: %d", i, d_odata[threadIdx.x]);
}
}
///////////////////////////////////////////////////////////////////////////////
// Wrapper for countGlobalMem
///////////////////////////////////////////////////////////////////////////////
template<class T>
void countGMWrapper(int threads, int blocks, T *g_idata, int *g_odata ,int N) {
/*
1D block and 1D grid
*/
dim3 dimBlock(threads, 1, 1);
dim3 dimGrid(blocks, 1, 1);
int smemSize = threads * sizeof (T);
countGlobalMem<T, BLOCK_WIDTH><<<dimGrid, dimBlock, smemSize>>>(g_idata, g_odata, N);
}
///////////////////////////////////////////////////////////////////////////////
// Wrapper for countSharedMem
///////////////////////////////////////////////////////////////////////////////
template<class T>
void countSWrapper(int threads, int blocks, T *g_idata, int *g_odata ,int N, int maxNum, int barrelSize) {
/*
1D block and 1D grid
*/
dim3 dimBlock(threads, 1, 1);
dim3 dimGrid(blocks, 1, 1);
int smemSize = threads * sizeof (T);
countSharedMem<T, BLOCK_WIDTH><<<dimGrid, dimBlock, smemSize>>>(g_idata, g_odata, N, maxNum, barrelSize);
}
/////////////////////////////////////////////////////////////////////////////////
// Instantiate Template
/////////////////////////////////////////////////////////////////////////////////
template void
countGMWrapper<int>(int threads, int blocks, int *g_idata, int *g_odata, int N);
template void
countGMWrapper<float>(int threads, int blocks, float *g_idata, int *g_odata, int N);
template void
countGMWrapper<double>(int threads, int blocks, double *g_idata, int *g_odata, int N);
template void
countSWrapper<int>(int threads, int blocks, int *g_idata, int *g_odata ,int N, int maxNum, int barrelSize);
//////////////////////////////////////////////////////////////////////////////////
// Test Function
//////////////////////////////////////////////////////////////////////////////////
void myCountTest(const char* filename) {
int numBarrel = 10;
//read test file and decide size of array
std::vector<int> data;
std::string line_;
std::ifstream file_(filename);
if(file_.is_open()) {
while (getline(file_, line_)) {
std::stringstream ss(line_);
int i;
while(ss>>i) {
data.push_back(i);
if (ss.peek() == ',' || ss.peek() == ' ') {
ss.ignore();
}
}
}
file_.close();
}
int num_els = data.size();
int numBlocks = num_els/BLOCK_WIDTH + 1;
//Start to run Kernel_a
int *d_in = NULL;
int *d_out = NULL;
cudaMalloc( (void **) &d_in, num_els * sizeof(int));
cudaMalloc( (void **) &d_out, numBarrel * sizeof(int));
int *in = (int *) malloc(num_els * sizeof(int));
int *out = (int *) malloc(numBarrel * sizeof(int));
in = &data[0];
std::vector<int> v(10);
std::fill(v.begin(), v.end(), 0);
std::copy(v.begin(), v.end(), out);
cudaMemcpy(d_in, in, num_els * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_out, out, numBarrel * sizeof(int), cudaMemcpyHostToDevice);
countGMWrapper(BLOCK_WIDTH, numBlocks, d_in, d_out, num_els);
cudaMemcpy(out, d_out, numBarrel * sizeof(int), cudaMemcpyDeviceToHost);
std::ofstream fout1("q2a.txt", std::ios::app);
for(int i = 0; i < numBarrel; i++) {
if(fout1.is_open()) {
fout1 << "\n Count[" <<i<<"]: " <<out[i];
}
}
fout1.close();
fout1.clear();
cudaFree(d_out);
//free(out);
//d_in is not cleaned because we are going to run more cuda kernels using d_in
free(in);
//cudaFree(d_in);
//Start to run Kernel_b, almost the same as kernel_a
int *d_out_b = NULL;
cudaMalloc( (void **) &d_out_b, numBarrel * numBlocks * sizeof(int));
int *out_b = (int *) malloc(numBarrel * numBlocks * sizeof(int));
//size of out_b is changed
v.resize(numBarrel * numBlocks);
std::fill(v.begin(), v.end(), 0);
std::copy(v.begin(), v.end(), out_b);
cudaMemcpy(d_out_b, out_b, numBarrel * numBlocks * sizeof(int), cudaMemcpyHostToDevice);
countSWrapper(BLOCK_WIDTH, numBlocks, d_in, d_out ,num_els, 1000, 100);
cudaMemcpy(out_b, d_out_b, numBarrel * numBlocks * sizeof(int), cudaMemcpyDeviceToHost);
std::ofstream fout2("q2b.txt", std::ios::app);
int out_b_all;
//int B[numBarrel];
for(int i = 0; i < numBarrel; i++) {
out_b_all = 0;
for (int j = 0; j < numBlocks; j++)
out_b_all += out_b[i + j * numBarrel];
//B[i] = out_b_all;
if(fout2.is_open()) {
fout2 << "\n Count[" <<i<<"]: " <<out_b_all;
}
}
fout2.close();
fout2.clear();
cudaFree(d_out_b);
free(out_b);
cudaFree(d_in);
//start to run Kernel_c
int n3 = nextPowerOf2(numBarrel);
int *d_out_c = NULL;
int *d_in_c = NULL;
int *out_c = (int *) malloc(n3 * sizeof(int));
v.resize(n3);
std::fill(v.begin(), v.end(), 0);
std::copy(v.begin(), v.end(), out_c);
cudaMalloc( (void **) &d_in_c, n3 * sizeof(int));
cudaMalloc( (void **) &d_out_c, n3 * sizeof(int));
int *in_test = (int *) malloc(n3 * sizeof(int));
std::vector<int> in_c;
for (int i = 0; i < n3; i++) {
if (i < numBarrel) {
in_c.push_back(out[i]);
} else {
in_c.push_back(0);
}
//printf("\n c: %d", in_c[i]);
}
in_test = &in_c[0];
cudaMemcpy(d_in_c, in_test, n3 * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_out_c, out_c, n3 * sizeof(int), cudaMemcpyHostToDevice);
scan<<<1, n3, n3*sizeof(int)>>>(d_in_c, d_out_c, n3);
cudaMemcpy(out_c, d_out_c, n3 * sizeof(int), cudaMemcpyDeviceToHost);
std::ofstream fout3("q2c.txt", std::ios::app);
for(int i = 0; i < numBarrel; i++) {
if(fout3.is_open()) {
fout3 << "\n prescan[" <<i<<"]: " <<out_c[i];
}
}
fout3.close();
fout3.clear();
}
int main(int argc, char **argv) {
myCountTest("inp.txt");
return 0;
}
|
8644f7a8200eb3dfdcace883e9fe9ddd57debd0d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <stdio.h>
#include <math.h>
#include <sys/time.h>
#define MATRIX_SIZE 256
#define BLOCK_SIZE 16
using namespace std;
__global__ void matMul(float *x, float *y, float *z, int matrixSize){
float zTemp = 0.0f;
__shared__ float xblkMat[BLOCK_SIZE*BLOCK_SIZE], yblkMat[BLOCK_SIZE*BLOCK_SIZE];
const int global_x = threadIdx.x + blockIdx.x * blockDim.x;
const int global_y = threadIdx.y + blockIdx.y * blockDim.y;
const int blocked_x = blockIdx.x;
const int blocked_y = blockIdx.y;
const int blocked_x_id = threadIdx.x;
const int blocked_y_id = threadIdx.y;
const int numBlocks = matrixSize / BLOCK_SIZE;
int xStart = blocked_y * matrixSize * BLOCK_SIZE;
int yStart = blocked_x * BLOCK_SIZE;
for (int block = 0; block < numBlocks; block++) {
xblkMat[blocked_x_id + (blocked_y_id*BLOCK_SIZE)] = x[xStart + ((blocked_y_id*matrixSize) + blocked_x_id)];
yblkMat[blocked_x_id + (blocked_y_id*BLOCK_SIZE)] = y[yStart + ((blocked_y_id*matrixSize) + blocked_x_id)];
__syncthreads();
for (int k = 0;k < BLOCK_SIZE;k++) {
zTemp += xblkMat[k + (blocked_y_id * BLOCK_SIZE)] * yblkMat[blocked_x_id + (k * BLOCK_SIZE)];
}
__syncthreads();
xStart += BLOCK_SIZE;
yStart += BLOCK_SIZE;
}
z[global_x + (global_y * matrixSize)] = zTemp;
}
int main(){
float *x,*y,*z;
struct timeval start;
struct timeval end;
double elapsedTime;
double numOps;
float gFLOPS;
hipMallocManaged(&x, MATRIX_SIZE * MATRIX_SIZE * sizeof(float));
hipMallocManaged(&y, MATRIX_SIZE * MATRIX_SIZE * sizeof(float));
hipMallocManaged(&z, MATRIX_SIZE * MATRIX_SIZE * sizeof(float));
for(int i=0;i<MATRIX_SIZE;i++){
for(int j=0;j<MATRIX_SIZE;j++){
*(x + i*MATRIX_SIZE + j) = 1.0f;
*(y + i*MATRIX_SIZE + j) = 1.0f;
*(z + i*MATRIX_SIZE + j) = 0.0f;
}
}
// Keep track of when we start doing work
gettimeofday(&start, NULL);
dim3 threads(BLOCK_SIZE,BLOCK_SIZE);
dim3 grid(MATRIX_SIZE/threads.x,MATRIX_SIZE/threads.y);
hipLaunchKernelGGL(( matMul), dim3(grid),dim3(threads), 0, 0, x,y,z,MATRIX_SIZE);
hipDeviceSynchronize();
// Keep track of when we finish our work
gettimeofday(&end, NULL);
// Calculate the time it took to do the above task
elapsedTime = (end.tv_sec - start.tv_sec) * 1000.0;
elapsedTime += (end.tv_usec - start.tv_usec) / 1000.0;
elapsedTime /= 1000;
//Calculate the GFLOPS obtained and print it along with the execution time
numOps = 2 * pow(MATRIX_SIZE, 3);
gFLOPS = float(1.0e-9 * numOps / elapsedTime);
printf("CUDA : %.3f seconds ( %f GFLOPS )\n",elapsedTime,gFLOPS);
/*cout << "X[23][65] : " << *(x + 23*MATRIX_SIZE + 65) << endl;
cout << "Y[23][65] : " << *(y + 23*MATRIX_SIZE + 65) << endl;
cout << "Z[23][65] : " << *(z + 23*MATRIX_SIZE + 65) << endl;*/
hipFree(x);
hipFree(y);
hipFree(z);
return 0;
}
| 8644f7a8200eb3dfdcace883e9fe9ddd57debd0d.cu | #include <iostream>
#include <stdio.h>
#include <math.h>
#include <sys/time.h>
#define MATRIX_SIZE 256
#define BLOCK_SIZE 16
using namespace std;
__global__ void matMul(float *x, float *y, float *z, int matrixSize){
float zTemp = 0.0f;
__shared__ float xblkMat[BLOCK_SIZE*BLOCK_SIZE], yblkMat[BLOCK_SIZE*BLOCK_SIZE];
const int global_x = threadIdx.x + blockIdx.x * blockDim.x;
const int global_y = threadIdx.y + blockIdx.y * blockDim.y;
const int blocked_x = blockIdx.x;
const int blocked_y = blockIdx.y;
const int blocked_x_id = threadIdx.x;
const int blocked_y_id = threadIdx.y;
const int numBlocks = matrixSize / BLOCK_SIZE;
int xStart = blocked_y * matrixSize * BLOCK_SIZE;
int yStart = blocked_x * BLOCK_SIZE;
for (int block = 0; block < numBlocks; block++) {
xblkMat[blocked_x_id + (blocked_y_id*BLOCK_SIZE)] = x[xStart + ((blocked_y_id*matrixSize) + blocked_x_id)];
yblkMat[blocked_x_id + (blocked_y_id*BLOCK_SIZE)] = y[yStart + ((blocked_y_id*matrixSize) + blocked_x_id)];
__syncthreads();
for (int k = 0;k < BLOCK_SIZE;k++) {
zTemp += xblkMat[k + (blocked_y_id * BLOCK_SIZE)] * yblkMat[blocked_x_id + (k * BLOCK_SIZE)];
}
__syncthreads();
xStart += BLOCK_SIZE;
yStart += BLOCK_SIZE;
}
z[global_x + (global_y * matrixSize)] = zTemp;
}
int main(){
float *x,*y,*z;
struct timeval start;
struct timeval end;
double elapsedTime;
double numOps;
float gFLOPS;
cudaMallocManaged(&x, MATRIX_SIZE * MATRIX_SIZE * sizeof(float));
cudaMallocManaged(&y, MATRIX_SIZE * MATRIX_SIZE * sizeof(float));
cudaMallocManaged(&z, MATRIX_SIZE * MATRIX_SIZE * sizeof(float));
for(int i=0;i<MATRIX_SIZE;i++){
for(int j=0;j<MATRIX_SIZE;j++){
*(x + i*MATRIX_SIZE + j) = 1.0f;
*(y + i*MATRIX_SIZE + j) = 1.0f;
*(z + i*MATRIX_SIZE + j) = 0.0f;
}
}
// Keep track of when we start doing work
gettimeofday(&start, NULL);
dim3 threads(BLOCK_SIZE,BLOCK_SIZE);
dim3 grid(MATRIX_SIZE/threads.x,MATRIX_SIZE/threads.y);
matMul<<<grid,threads>>>(x,y,z,MATRIX_SIZE);
cudaDeviceSynchronize();
// Keep track of when we finish our work
gettimeofday(&end, NULL);
// Calculate the time it took to do the above task
elapsedTime = (end.tv_sec - start.tv_sec) * 1000.0;
elapsedTime += (end.tv_usec - start.tv_usec) / 1000.0;
elapsedTime /= 1000;
//Calculate the GFLOPS obtained and print it along with the execution time
numOps = 2 * pow(MATRIX_SIZE, 3);
gFLOPS = float(1.0e-9 * numOps / elapsedTime);
printf("CUDA : %.3f seconds ( %f GFLOPS )\n",elapsedTime,gFLOPS);
/*cout << "X[23][65] : " << *(x + 23*MATRIX_SIZE + 65) << endl;
cout << "Y[23][65] : " << *(y + 23*MATRIX_SIZE + 65) << endl;
cout << "Z[23][65] : " << *(z + 23*MATRIX_SIZE + 65) << endl;*/
cudaFree(x);
cudaFree(y);
cudaFree(z);
return 0;
}
|
97b5332af331330643416043766b70341f94ce85.hip | // !!! This is a file automatically generated by hipify!!!
// Fast CUDA implementation of the Hungarian algorithm.
// (maximum pay version)
//
// Satyendra Yadav and Paulo Lopes
//
// Annex to the paper: Paulo Lopes, Satyendra Yadav et al., "Fast CUDA Implementation of the Hungarian Algorithm."
//
//
// Classical version of the Hungarian algorithm:
// (This algorithm was modified to result in an efficient GPU implementation, see paper)
//
// Initialize the slack matrix with the cost matrix, and then work with the slack matrix.
//
// STEP 1: Subtract the row minimum from each row. Subtract the column minimum from each column.
//
// STEP 2: Find a zero of the slack matrix. If there are no starred zeros in its column or row star the zero.
// Repeat for each zero.
//
// STEP 3: Cover each column with a starred zero. If all the columns are
// covered then the matching is maximum.
//
// STEP 4: Find a non-covered zero and prime it. If there is no starred zero in the row containing this primed zero,
// Go to Step 5. Otherwise, cover this row and uncover the column containing the starred zero.
// Continue in this manner until there are no uncovered zeros left.
// Save the smallest uncovered value and Go to Step 6.
//
// STEP 5: Construct a series of alternating primed and starred zeros as follows:
// Let Z0 represent the uncovered primed zero found in Step 4.
// Let Z1 denote the starred zero in the column of Z0(if any).
// Let Z2 denote the primed zero in the row of Z1(there will always be one).
// Continue until the series terminates at a primed zero that has no starred zero in its column.
// Un-star each starred zero of the series, star each primed zero of the series,
// erase all primes and uncover every row in the matrix. Return to Step 3.
//
// STEP 6: Add the minimum uncovered value to every element of each covered row,
// and subtract it from every element of each uncovered column.
// Return to Step 4 without altering any stars, primes, or covered rows.
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include <hip/device_functions.h>
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
#include <random>
#include <assert.h>
// Uncomment to use on SM20, (without dynamic parallelism).
// #define SM20
// Uncomment to use chars as the data type, otherwise use int
// #define CHAR_DATA_TYPE
// Uncomment to use a 4x4 predefined matrix for testing
// #define USE_TEST_MATRIX
#ifndef USE_TEST_MATRIX
// User inputs: These values should be changed by the user
const int n = 1024; // size of the cost/pay matrix
const int range = n; // defines the range of the random matrix.
// Bellow are the resulting total pays for matrixes of sizes for sizes of 128, 256, 512, 1024, 2048, 4096, 8192
// for range of 0 to 0.1n: 1408, 6144, 25600, 103423, 415744, 1671167, 6701055
// for range of 0 to n: 16130, 64959, 261071, 1046350, 4189849, 16768289, 67091170
// for range of 0 to 10n: 161909, 651152, 2613373, 10468839, 41907855, 167703929, 670949133
const int log2_n = 10; // log2(n) needs to be entered manually
const int n_threads = 64; // Number of threads used in small kernels grid size (typically grid size equal to n)
// Used in steps 3ini, 3, 4ini, 4a, 4b, 5a and 5b
const int n_threads_reduction = 256; // Number of threads used in the redution kernels in step 1 and 6
const int n_blocks_reduction = 256; // Number of blocks used in the redution kernels in step 1 and 6
const int n_threads_full = 512; // Number of threads used the largest grids sizes (typically grid size equal to n*n)
// Used in steps 2 and 6
// End of user inputs
#else
const int n = 4;
const int log2_n = 2;
const int n_threads = 2;
const int n_threads_reduction = 2;
const int n_blocks_reduction = 2;
const int n_threads_full = 2;
#endif
const int n_blocks = n / n_threads; // Number of blocks used in small kernels grid size (typically grid size equal to n)
const int n_blocks_full = n * n / n_threads_full; // Number of blocks used the largest gris sizes (typically grid size equal to n*n)
const int row_mask = (1 << log2_n) - 1; // Used to extract the row from tha matrix position index (matrices are column wise)
const int nrow = n, ncol = n; // The matrix is square so the number of rows and columns is equal to n
const int max_threads_per_block = 1024; // The maximum number of threads per block
const int seed = 45345; // Initialization for the random number generator
// For the selection of the used data type
#ifndef CHAR_DATA_TYPE
typedef int data;
#define MAX_DATA INT_MAX
#define MIN_DATA INT_MIN
#else
typedef unsigned char data;
#define MAX_DATA 255
#define MIN_DATA 0
#endif
// Host Variables
// Some host variables start with h_ to distinguish them from the corresponding device variables
// Device variables have no prefix.
#ifndef USE_TEST_MATRIX
data pay[ncol][nrow];
#else
data pay[n][n] = { { 1, 2, 3, 4 }, { 2, 4, 6, 8 }, { 3, 6, 9, 12 }, { 4, 8, 12, 16 } };
#endif
int h_column_of_star_at_row[nrow];
int h_zeros_vector_size;
int h_n_matches;
bool h_found;
bool h_goto_5;
// Device Variables
__device__ data slack[nrow*ncol]; // The slack matrix
__device__ int zeros[nrow*ncol]; // A vector with the position of the zeros in the slack matrix
__device__ int zeros_vector_size; // The size of the zeros vector
__device__ int row_of_star_at_column[ncol]; // A vector that given the column j gives the row of the star at that column (or -1, no star)
__device__ int column_of_star_at_row[nrow]; // A vector that given the row i gives the column of the star at that row (or -1, no star)
__device__ int cover_row[nrow]; // A vector that given the row i indicates if it is covered (1- covered, 0- uncovered)
__device__ int cover_column[ncol]; // A vector that given the column j indicates if it is covered (1- covered, 0- uncovered)
__device__ int column_of_prime_at_row[nrow]; // A vector that given the row i gives the column of the prime at that row (or -1, no prime)
__device__ int row_of_green_at_column[ncol]; // A vector that given the column j gives the row of the green at that column (or -1, no green)
__device__ int column_of_zero_at_row[nrow]; // The column of the zero at row i, found on step 4a
__device__ int n_matches; // Used in step 3 to count the number of matches found
__device__ bool goto_5; // After step 4b, goto step 5?
__device__ bool found = false; // Found a zero in step 4a?
__device__ data max_in_mat_row[nrow]; // Used in step 1 to stores the maximum in rows
__device__ data min_in_mat_col[ncol]; // Used in step 1 to stores the minimums in columns
__device__ data d_min_in_mat_vect[n_blocks_reduction]; // Used in step 6 to stores the intermediate results from the first reduction kernel
__device__ data d_min_in_mat; // Used in step 6 to store the minimum
__shared__ extern data sdata[]; // For access to shared memory
// -------------------------------------------------------------------------------------
// Device code
// -------------------------------------------------------------------------------------
// Convenience function for checking CUDA runtime API results
// can be wrapped around any runtime API call. No-op in release builds.
inline
#ifndef SM20
__device__
#endif
hipError_t checkCuda(hipError_t result)
{
#if defined(DEBUG) || defined(_DEBUG)
if (result != hipSuccess) {
fprintf(stderr, "CUDA Runtime Error: %s\n",
hipGetErrorString(result));
assert(result == hipSuccess);
}
#endif
return result;
}
__global__ void Init()
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
// initializations
//for step 2
if (i < nrow){
cover_row[i] = 0;
column_of_star_at_row[i] = -1;
}
if (i < ncol){
cover_column[i] = 0;
row_of_star_at_column[i] = -1;
}
}
// STEP 1.
// a) Subtracting the maximum in each row by the row
const int n_rows_per_block = n / n_blocks_reduction;
__device__ void max_in_rows_warp_reduce(volatile data* sdata, int tid) {
if (n_threads_reduction >= 64 && n_rows_per_block < 64) sdata[tid] = max(sdata[tid], sdata[tid + 32]);
if (n_threads_reduction >= 32 && n_rows_per_block < 32) sdata[tid] = max(sdata[tid], sdata[tid + 16]);
if (n_threads_reduction >= 16 && n_rows_per_block < 16) sdata[tid] = max(sdata[tid], sdata[tid + 8]);
if (n_threads_reduction >= 8 && n_rows_per_block < 8) sdata[tid] = max(sdata[tid], sdata[tid + 4]);
if (n_threads_reduction >= 4 && n_rows_per_block < 4) sdata[tid] = max(sdata[tid], sdata[tid + 2]);
if (n_threads_reduction >= 2 && n_rows_per_block < 2) sdata[tid] = max(sdata[tid], sdata[tid + 1]);
}
__global__ void max_in_rows()
{
__shared__ data sdata[n_threads_reduction]; // One temporary result for each thread.
unsigned int tid = threadIdx.x;
unsigned int bid = blockIdx.x;
// One gets the line and column from the blockID and threadID.
unsigned int l = bid * n_rows_per_block + tid % n_rows_per_block;
unsigned int c = tid / n_rows_per_block;
unsigned int i = c * nrow + l;
const unsigned int gridSize = n_threads_reduction * n_blocks_reduction;
data thread_min = MIN_DATA;
while (i < n * n) {
thread_min = max(thread_min, slack[i]);
i += gridSize; // go to the next piece of the matrix...
// gridSize = 2^k * n, so that each thread always processes the same line or column
}
sdata[tid] = thread_min;
__syncthreads();
if (n_threads_reduction >= 1024 && n_rows_per_block < 1024) { if (tid < 512) { sdata[tid] = max(sdata[tid], sdata[tid + 512]); } __syncthreads(); }
if (n_threads_reduction >= 512 && n_rows_per_block < 512) { if (tid < 256) { sdata[tid] = max(sdata[tid], sdata[tid + 256]); } __syncthreads(); }
if (n_threads_reduction >= 256 && n_rows_per_block < 256) { if (tid < 128) { sdata[tid] = max(sdata[tid], sdata[tid + 128]); } __syncthreads(); }
if (n_threads_reduction >= 128 && n_rows_per_block < 128) { if (tid < 64) { sdata[tid] = max(sdata[tid], sdata[tid + 64]); } __syncthreads(); }
if (tid < 32) max_in_rows_warp_reduce(sdata, tid);
if (tid < n_rows_per_block) max_in_mat_row[bid*n_rows_per_block + tid] = sdata[tid];
}
// b) subtracting the row by its minimum
const int n_cols_per_block = n / n_blocks_reduction;
__device__ void min_in_cols_warp_reduce(volatile data* sdata, int tid) {
if (n_threads_reduction >= 64 && n_cols_per_block < 64) sdata[tid] = min(sdata[tid], sdata[tid + 32]);
if (n_threads_reduction >= 32 && n_cols_per_block < 32) sdata[tid] = min(sdata[tid], sdata[tid + 16]);
if (n_threads_reduction >= 16 && n_cols_per_block < 16) sdata[tid] = min(sdata[tid], sdata[tid + 8]);
if (n_threads_reduction >= 8 && n_cols_per_block < 8) sdata[tid] = min(sdata[tid], sdata[tid + 4]);
if (n_threads_reduction >= 4 && n_cols_per_block < 4) sdata[tid] = min(sdata[tid], sdata[tid + 2]);
if (n_threads_reduction >= 2 && n_cols_per_block < 2) sdata[tid] = min(sdata[tid], sdata[tid + 1]);
}
__global__ void min_in_cols()
{
__shared__ data sdata[n_threads_reduction]; // One temporary result for each thread
unsigned int tid = threadIdx.x;
unsigned int bid = blockIdx.x;
// One gets the line and column from the blockID and threadID.
unsigned int c = bid * n_cols_per_block + tid % n_cols_per_block;
unsigned int l = tid / n_cols_per_block;
const unsigned int gridSize = n_threads_reduction * n_blocks_reduction;
data thread_min = MAX_DATA;
while (l < n) {
unsigned int i = c * nrow + l;
thread_min = min(thread_min, slack[i]);
l += gridSize / n; // go to the next piece of the matrix...
// gridSize = 2^k * n, so that each thread always processes the same line or column
}
sdata[tid] = thread_min;
__syncthreads();
if (n_threads_reduction >= 1024 && n_cols_per_block < 1024) { if (tid < 512) { sdata[tid] = min(sdata[tid], sdata[tid + 512]); } __syncthreads(); }
if (n_threads_reduction >= 512 && n_cols_per_block < 512) { if (tid < 256) { sdata[tid] = min(sdata[tid], sdata[tid + 256]); } __syncthreads(); }
if (n_threads_reduction >= 256 && n_cols_per_block < 256) { if (tid < 128) { sdata[tid] = min(sdata[tid], sdata[tid + 128]); } __syncthreads(); }
if (n_threads_reduction >= 128 && n_cols_per_block < 128) { if (tid < 64) { sdata[tid] = min(sdata[tid], sdata[tid + 64]); } __syncthreads(); }
if (tid < 32) min_in_cols_warp_reduce(sdata, tid);
if (tid < n_cols_per_block) min_in_mat_col[bid*n_cols_per_block + tid] = sdata[tid];
}
__global__ void step_1_sub_row()
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
int l = i & row_mask;
slack[i] = max_in_mat_row[l] - slack[i]; // subtract the minimum in row from that row
}
__global__ void step_1_col_sub()
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
int c = i >> log2_n;
slack[i] = slack[i] - min_in_mat_col[c]; // subtract the minimum in row from that row
if (i == 0) zeros_vector_size = 0;
}
// Compress matrix
__global__ void compress_matrix(){
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (slack[i] == 0) {
int j = atomicAdd(&zeros_vector_size, 1);
zeros[j] = i;
}
}
// STEP 2
// Find a zero of slack. If there are no starred zeros in its
// column or row star the zero. Repeat for each zero.
__global__ void step_2()
{
int i = threadIdx.x;
bool repeat;
do {
repeat = false;
__syncthreads();
for (int j = i; j < zeros_vector_size; j += blockDim.x)
{
int z = zeros[j];
int l = z & row_mask;
int c = z >> log2_n;
if (cover_row[l] == 0 && cover_column[c] == 0) {
// thread trys to get the line
if (!atomicExch(&(cover_row[l]), 1)){
// only one thread gets the line
if (!atomicExch(&(cover_column[c]), 1)){
// only one thread gets the column
row_of_star_at_column[c] = l;
column_of_star_at_row[l] = c;
}
else {
cover_row[l] = 0;
repeat = true;
}
}
}
}
__syncthreads();
} while (repeat);
}
// STEP 3
// uncover all the rows and columns before going to step 3
__global__ void step_3ini()
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
cover_row[i] = 0;
cover_column[i] = 0;
if (i == 0) n_matches = 0;
}
// Cover each column with a starred zero. If all the columns are
// covered then the matching is maximum
__global__ void step_3()
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (row_of_star_at_column[i]>=0)
{
cover_column[i] = 1;
atomicAdd((int*)&n_matches, 1);
}
}
// STEP 4
// Find a noncovered zero and prime it. If there is no starred
// zero in the row containing this primed zero, go to Step 5.
// Otherwise, cover this row and uncover the column containing
// the starred zero. Continue in this manner until there are no
// uncovered zeros left. Save the smallest uncovered value and
// Go to Step 6.
__global__ void step_4_init()
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
column_of_prime_at_row[i] =-1;
row_of_green_at_column[i] = -1;
column_of_zero_at_row[i] = -1;
}
// Maps the uncovered zeros into column_of_zero_at_row
__global__ void step_4a(){
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < zeros_vector_size) {
int z = zeros[i];
int l = z & row_mask;
int c = z >> log2_n;
if (!cover_row[l] && !cover_column[c]){
column_of_zero_at_row[l] = c;
found = true; // This is set to false in 4b and at initialization.
}
}
if (i == 0) {
goto_5 = false;
}
}
// The rest of step 4
__global__ void step_4b(){
int l = blockDim.x * blockIdx.x + threadIdx.x;
int c0 = column_of_zero_at_row[l];
if (c0>=0)
{
column_of_prime_at_row[l] = c0;
int c = column_of_star_at_row[l];
if (c >= 0) {
cover_row[l] = 1;
cover_column[c] = 0;
found = false;
}
else
goto_5 = true;
}
column_of_zero_at_row[l] = -1;
}
/* STEP 5:
Construct a series of alternating primed and starred zeros as
follows:
Let Z0 represent the uncovered primed zero found in Step 4.
Let Z1 denote the starred zero in the column of Z0(if any).
Let Z2 denote the primed zero in the row of Z1(there will always
be one). Continue until the series terminates at a primed zero
that has no starred zero in its column. Unstar each starred
zero of the series, star each primed zero of the series, erase
all primes and uncover every line in the matrix. Return to Step 3.*/
// Eliminates joining paths
__global__ void step_5a()
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
int r_Z0, c_Z0;
c_Z0 = column_of_prime_at_row[i];
if (c_Z0 >= 0 && column_of_star_at_row[i] < 0){
row_of_green_at_column[c_Z0] = i;
while ((r_Z0 = row_of_star_at_column[c_Z0]) >= 0){
c_Z0 = column_of_prime_at_row[r_Z0];
row_of_green_at_column[c_Z0] = r_Z0;
}
}
}
// Applies the alternating paths
__global__ void step_5b()
{
int j = blockDim.x * blockIdx.x + threadIdx.x;
int r_Z0, c_Z0, c_Z2;
r_Z0 = row_of_green_at_column[j];
if (r_Z0 >= 0 && row_of_star_at_column[j] < 0){
c_Z2 = column_of_star_at_row[r_Z0];
column_of_star_at_row[r_Z0] = j;
row_of_star_at_column[j] = r_Z0;
while (c_Z2 >= 0) {
r_Z0 = row_of_green_at_column[c_Z2]; // row of Z2
c_Z0 = c_Z2; // col of Z2
c_Z2 = column_of_star_at_row[r_Z0]; // col of Z4
// star Z2
column_of_star_at_row[r_Z0] = c_Z0;
row_of_star_at_column[c_Z0] = r_Z0;
}
}
}
// STEP 6
// Add the minimum uncovered value to every element of each covered
// row, and subtract it from every element of each uncovered column.
// Return to Step 4 without altering any stars, primes, or covered lines.
template <unsigned int blockSize>
__device__ void min_warp_reduce(volatile data* sdata, int tid) {
if (blockSize >= 64) sdata[tid] = min(sdata[tid], sdata[tid + 32]);
if (blockSize >= 32) sdata[tid] = min(sdata[tid], sdata[tid + 16]);
if (blockSize >= 16) sdata[tid] = min(sdata[tid], sdata[tid + 8]);
if (blockSize >= 8) sdata[tid] = min(sdata[tid], sdata[tid + 4]);
if (blockSize >= 4) sdata[tid] = min(sdata[tid], sdata[tid + 2]);
if (blockSize >= 2) sdata[tid] = min(sdata[tid], sdata[tid + 1]);
}
template <unsigned int blockSize> // blockSize is the size of a block of threads
__device__ void min_reduce1(volatile data *g_idata, volatile data *g_odata, unsigned int n)
{
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*(blockSize * 2) + tid;
unsigned int gridSize = blockSize * 2 * gridDim.x;
sdata[tid] = MAX_DATA;
while (i < n) {
int i1 = i;
int i2 = i + blockSize;
int l1 = i1 & row_mask;
int c1 = i1 >> log2_n;
int g1;
if (cover_row[l1] == 1 || cover_column[c1] == 1) g1 = MAX_DATA;
else g1 = g_idata[i1];
int l2 = i2 & row_mask;
int c2 = i2 >> log2_n;
int g2;
if (cover_row[l2] == 1 || cover_column[c2] == 1) g2 = MAX_DATA;
else g2 = g_idata[i2];
sdata[tid] = min(sdata[tid], min(g1, g2));
i += gridSize;
}
__syncthreads();
if (blockSize >= 1024) { if (tid < 512) { sdata[tid] = min(sdata[tid], sdata[tid + 512]); } __syncthreads(); }
if (blockSize >= 512) { if (tid < 256) { sdata[tid] = min(sdata[tid], sdata[tid + 256]); } __syncthreads(); }
if (blockSize >= 256) { if (tid < 128) { sdata[tid] = min(sdata[tid], sdata[tid + 128]); } __syncthreads(); }
if (blockSize >= 128) { if (tid < 64) { sdata[tid] = min(sdata[tid], sdata[tid + 64]); } __syncthreads(); }
if (tid < 32) min_warp_reduce<blockSize>(sdata, tid);
if (tid == 0) g_odata[blockIdx.x] = sdata[0];
}
template <unsigned int blockSize>
__device__ void min_reduce2(volatile data *g_idata, volatile data *g_odata, unsigned int n)
{
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*(blockSize * 2) + tid;
sdata[tid] = min(g_idata[i], g_idata[i + blockSize]);
__syncthreads();
if (blockSize >= 1024) { if (tid < 512) { sdata[tid] = min(sdata[tid], sdata[tid + 512]); } __syncthreads(); }
if (blockSize >= 512) { if (tid < 256) { sdata[tid] = min(sdata[tid], sdata[tid + 256]); } __syncthreads(); }
if (blockSize >= 256) { if (tid < 128) { sdata[tid] = min(sdata[tid], sdata[tid + 128]); } __syncthreads(); }
if (blockSize >= 128) { if (tid < 64) { sdata[tid] = min(sdata[tid], sdata[tid + 64]); } __syncthreads(); }
if (tid < 32) min_warp_reduce<blockSize>(sdata, tid);
if (tid == 0) g_odata[blockIdx.x] = sdata[0];
}
__global__ void step_6_add_sub()
{
// STEP 6:
// /*STEP 6: Add the minimum uncovered value to every element of each covered
// row, and subtract it from every element of each uncovered column.
// Return to Step 4 without altering any stars, primes, or covered lines. */
int i = blockDim.x * blockIdx.x + threadIdx.x;
int l = i & row_mask;
int c = i >> log2_n;
if (cover_row[l] == 1 && cover_column[c] == 1)
slack[i] += d_min_in_mat;
if (cover_row[l] == 0 && cover_column[c] == 0)
slack[i] -= d_min_in_mat;
if (i == 0) zeros_vector_size = 0;
}
__global__ void min_reduce_kernel1() {
min_reduce1<n_threads_reduction>(slack, d_min_in_mat_vect, nrow*ncol);
}
__global__ void min_reduce_kernel2() {
min_reduce2<n_threads_reduction / 2>(d_min_in_mat_vect, &d_min_in_mat, n_blocks_reduction);
}
// Hungarian_Algorithm
// This function is run on the device if device if dynamic parallelism is enabled,
// or in the host if it is disabled (SM20).
#ifdef SM20
void Hungarian_Algorithm()
#else
__global__ void Hungarian_Algorithm()
#endif
{
// Initialization
Init << < n_blocks, n_threads >> > ();
checkCuda(hipDeviceSynchronize());
// Step 1 kernels
max_in_rows << < n_blocks_reduction, n_threads_reduction >> >();
checkCuda(hipDeviceSynchronize());
step_1_sub_row << <n_blocks_full, n_threads_full >> >();
checkCuda(hipDeviceSynchronize());
min_in_cols << < n_blocks_reduction, n_threads_reduction >> >();
checkCuda(hipDeviceSynchronize());
step_1_col_sub << <n_blocks_full, n_threads_full >> >();
checkCuda(hipDeviceSynchronize());
// compress_matrix
compress_matrix << < n_blocks_full, n_threads_full >> > ();
checkCuda(hipDeviceSynchronize());
// Step 2 kernels
step_2 << <1, min(max_threads_per_block, nrow) >> > ();
checkCuda(hipDeviceSynchronize());
while (1) { // repeat steps 3 to 6
// Step 3 kernels
step_3ini << <n_blocks, n_threads >> >();
checkCuda(hipDeviceSynchronize());
step_3 << <n_blocks, n_threads >> > ();
checkCuda(hipDeviceSynchronize());
#ifdef SM20
hipMemcpyFromSymbol(&h_n_matches, n_matches, sizeof(int));
if (h_n_matches >= ncol) return; // It's done
#else
if (n_matches >= ncol) return; // It's done
#endif
//step 4_kernels
step_4_init << <n_blocks, n_threads >> > ();
checkCuda(hipDeviceSynchronize());
while (1) // repeat step 4 and 6
{
do { // step 4 loop
#ifdef SM20
hipMemcpyFromSymbol(&h_zeros_vector_size, zeros_vector_size, sizeof(int));
if (h_zeros_vector_size > 100 * n)
step_4a << < n_blocks_full, n_threads_full >> > ();
else if (h_zeros_vector_size > 10 * n)
step_4a << < 100 * n / n_threads, n_threads >> > ();
else
step_4a << < 10 * n / n_threads, n_threads >> > ();
#else
if (zeros_vector_size>100 * n)
step_4a << < n_blocks_full, n_threads_full >> > ();
else if (zeros_vector_size>10 * n)
step_4a << < 100 * n / n_threads, n_threads >> > ();
else
step_4a << < 10 * n / n_threads, n_threads >> > ();
#endif
checkCuda(hipDeviceSynchronize());
#ifdef SM20
hipMemcpyFromSymbol(&h_found, found, sizeof(bool));
if (!h_found) break;
#else
if (!found) break;
#endif
step_4b << < n_blocks, n_threads >> >();
checkCuda(hipDeviceSynchronize());
#ifdef SM20
hipMemcpyFromSymbol(&h_goto_5, goto_5, sizeof(bool));
} while (!h_goto_5);
#else
} while (!goto_5);
#endif
#ifdef SM20
hipMemcpyFromSymbol(&h_goto_5, goto_5, sizeof(bool));
if (h_goto_5) break; // Or if (!h_found)
#else
if (goto_5) break; // Or if (!found)
#endif
//step 6_kernel
min_reduce_kernel1 << <n_blocks_reduction, n_threads_reduction, n_threads_reduction*sizeof(int) >> >();
checkCuda(hipDeviceSynchronize());
min_reduce_kernel2 << <1, n_blocks_reduction / 2, (n_blocks_reduction / 2) * sizeof(int) >> >();
checkCuda(hipDeviceSynchronize());
step_6_add_sub << <n_blocks_full, n_threads_full >> >();
checkCuda(hipDeviceSynchronize());
//compress_matrix
compress_matrix << < n_blocks_full, n_threads_full >> > ();
checkCuda(hipDeviceSynchronize());
} // repeat step 4 and 6
step_5a << < n_blocks, n_threads >> > ();
checkCuda(hipDeviceSynchronize());
step_5b << < n_blocks, n_threads >> > ();
checkCuda(hipDeviceSynchronize());
} // repeat steps 3 to 6
}
// -------------------------------------------------------------------------------------
// Host code
// -------------------------------------------------------------------------------------
// Used to make sure some constants are properly set
void check(bool val, char *str){
if (!val) {
printf("Check failed: %s!\n", str);
getchar();
exit(-1);
}
}
int main()
{
// Constant checks:
check(n == (1 << log2_n), "Incorrect log2_n!");
check(n_threads*n_blocks == n, "n_threads*n_blocks != n\n");
// step 1
check(n_blocks_reduction <= n, "Step 1: Should have several lines per block!");
check(n % n_blocks_reduction == 0, "Step 1: Number of lines per block should be integer!");
check((n_blocks_reduction*n_threads_reduction) % n == 0, "Step 1: The grid size must be a multiple of the line size!");
check(n_threads_reduction*n_blocks_reduction <= n*n, "Step 1: The grid size is bigger than the matrix size!");
// step 6
check(n_threads_full*n_blocks_full <= n*n, "Step 6: The grid size is bigger than the matrix size!");
#ifndef USE_TEST_MATRIX
std::default_random_engine generator(seed);
std::uniform_int_distribution<int> distribution(0, range-1);
for (int c = 0; c < ncol; c++)
for (int r = 0; r < nrow; r++) {
pay[c][r] = distribution(generator);
}
#endif
// Copy vectors from host memory to device memory
hipMemcpyToSymbol(slack, pay, sizeof(data)*nrow*ncol); // symbol refers to the device memory hence "To" means from Host to Device
// Invoke kernels
time_t start_time = clock();
#ifdef SM20
Hungarian_Algorithm();
#else
Hungarian_Algorithm << < 1, 1 >> > ();
#endif
hipDeviceSynchronize();
time_t stop_time = clock();
// Copy assignments from Device to Host and calculate the total Cost.
hipMemcpyFromSymbol(h_column_of_star_at_row, column_of_star_at_row, nrow*sizeof(int));
int total_pay = 0;
for (int r = 0; r < nrow; r++) {
int c = h_column_of_star_at_row[r];
if (c >= 0) total_pay += pay[c][r];
}
printf("Total pay is: %d \n", total_pay);
printf("Elapsed time: %f ms\n", 1000.0*(double)(stop_time - start_time) / CLOCKS_PER_SEC);
printf("Note: This time measurment is portable but very inaccurate!\n");
}
| 97b5332af331330643416043766b70341f94ce85.cu | // Fast CUDA implementation of the Hungarian algorithm.
// (maximum pay version)
//
// Satyendra Yadav and Paulo Lopes
//
// Annex to the paper: Paulo Lopes, Satyendra Yadav et al., "Fast CUDA Implementation of the Hungarian Algorithm."
//
//
// Classical version of the Hungarian algorithm:
// (This algorithm was modified to result in an efficient GPU implementation, see paper)
//
// Initialize the slack matrix with the cost matrix, and then work with the slack matrix.
//
// STEP 1: Subtract the row minimum from each row. Subtract the column minimum from each column.
//
// STEP 2: Find a zero of the slack matrix. If there are no starred zeros in its column or row star the zero.
// Repeat for each zero.
//
// STEP 3: Cover each column with a starred zero. If all the columns are
// covered then the matching is maximum.
//
// STEP 4: Find a non-covered zero and prime it. If there is no starred zero in the row containing this primed zero,
// Go to Step 5. Otherwise, cover this row and uncover the column containing the starred zero.
// Continue in this manner until there are no uncovered zeros left.
// Save the smallest uncovered value and Go to Step 6.
//
// STEP 5: Construct a series of alternating primed and starred zeros as follows:
// Let Z0 represent the uncovered primed zero found in Step 4.
// Let Z1 denote the starred zero in the column of Z0(if any).
// Let Z2 denote the primed zero in the row of Z1(there will always be one).
// Continue until the series terminates at a primed zero that has no starred zero in its column.
// Un-star each starred zero of the series, star each primed zero of the series,
// erase all primes and uncover every row in the matrix. Return to Step 3.
//
// STEP 6: Add the minimum uncovered value to every element of each covered row,
// and subtract it from every element of each uncovered column.
// Return to Step 4 without altering any stars, primes, or covered rows.
#include <cuda.h>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <device_functions.h>
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
#include <random>
#include <assert.h>
// Uncomment to use on SM20, (without dynamic parallelism).
// #define SM20
// Uncomment to use chars as the data type, otherwise use int
// #define CHAR_DATA_TYPE
// Uncomment to use a 4x4 predefined matrix for testing
// #define USE_TEST_MATRIX
#ifndef USE_TEST_MATRIX
// User inputs: These values should be changed by the user
const int n = 1024; // size of the cost/pay matrix
const int range = n; // defines the range of the random matrix.
// Bellow are the resulting total pays for matrixes of sizes for sizes of 128, 256, 512, 1024, 2048, 4096, 8192
// for range of 0 to 0.1n: 1408, 6144, 25600, 103423, 415744, 1671167, 6701055
// for range of 0 to n: 16130, 64959, 261071, 1046350, 4189849, 16768289, 67091170
// for range of 0 to 10n: 161909, 651152, 2613373, 10468839, 41907855, 167703929, 670949133
const int log2_n = 10; // log2(n) needs to be entered manually
const int n_threads = 64; // Number of threads used in small kernels grid size (typically grid size equal to n)
// Used in steps 3ini, 3, 4ini, 4a, 4b, 5a and 5b
const int n_threads_reduction = 256; // Number of threads used in the redution kernels in step 1 and 6
const int n_blocks_reduction = 256; // Number of blocks used in the redution kernels in step 1 and 6
const int n_threads_full = 512; // Number of threads used the largest grids sizes (typically grid size equal to n*n)
// Used in steps 2 and 6
// End of user inputs
#else
const int n = 4;
const int log2_n = 2;
const int n_threads = 2;
const int n_threads_reduction = 2;
const int n_blocks_reduction = 2;
const int n_threads_full = 2;
#endif
const int n_blocks = n / n_threads; // Number of blocks used in small kernels grid size (typically grid size equal to n)
const int n_blocks_full = n * n / n_threads_full; // Number of blocks used the largest gris sizes (typically grid size equal to n*n)
const int row_mask = (1 << log2_n) - 1; // Used to extract the row from tha matrix position index (matrices are column wise)
const int nrow = n, ncol = n; // The matrix is square so the number of rows and columns is equal to n
const int max_threads_per_block = 1024; // The maximum number of threads per block
const int seed = 45345; // Initialization for the random number generator
// For the selection of the used data type
#ifndef CHAR_DATA_TYPE
typedef int data;
#define MAX_DATA INT_MAX
#define MIN_DATA INT_MIN
#else
typedef unsigned char data;
#define MAX_DATA 255
#define MIN_DATA 0
#endif
// Host Variables
// Some host variables start with h_ to distinguish them from the corresponding device variables
// Device variables have no prefix.
#ifndef USE_TEST_MATRIX
data pay[ncol][nrow];
#else
data pay[n][n] = { { 1, 2, 3, 4 }, { 2, 4, 6, 8 }, { 3, 6, 9, 12 }, { 4, 8, 12, 16 } };
#endif
int h_column_of_star_at_row[nrow];
int h_zeros_vector_size;
int h_n_matches;
bool h_found;
bool h_goto_5;
// Device Variables
__device__ data slack[nrow*ncol]; // The slack matrix
__device__ int zeros[nrow*ncol]; // A vector with the position of the zeros in the slack matrix
__device__ int zeros_vector_size; // The size of the zeros vector
__device__ int row_of_star_at_column[ncol]; // A vector that given the column j gives the row of the star at that column (or -1, no star)
__device__ int column_of_star_at_row[nrow]; // A vector that given the row i gives the column of the star at that row (or -1, no star)
__device__ int cover_row[nrow]; // A vector that given the row i indicates if it is covered (1- covered, 0- uncovered)
__device__ int cover_column[ncol]; // A vector that given the column j indicates if it is covered (1- covered, 0- uncovered)
__device__ int column_of_prime_at_row[nrow]; // A vector that given the row i gives the column of the prime at that row (or -1, no prime)
__device__ int row_of_green_at_column[ncol]; // A vector that given the column j gives the row of the green at that column (or -1, no green)
__device__ int column_of_zero_at_row[nrow]; // The column of the zero at row i, found on step 4a
__device__ int n_matches; // Used in step 3 to count the number of matches found
__device__ bool goto_5; // After step 4b, goto step 5?
__device__ bool found = false; // Found a zero in step 4a?
__device__ data max_in_mat_row[nrow]; // Used in step 1 to stores the maximum in rows
__device__ data min_in_mat_col[ncol]; // Used in step 1 to stores the minimums in columns
__device__ data d_min_in_mat_vect[n_blocks_reduction]; // Used in step 6 to stores the intermediate results from the first reduction kernel
__device__ data d_min_in_mat; // Used in step 6 to store the minimum
__shared__ extern data sdata[]; // For access to shared memory
// -------------------------------------------------------------------------------------
// Device code
// -------------------------------------------------------------------------------------
// Convenience function for checking CUDA runtime API results
// can be wrapped around any runtime API call. No-op in release builds.
inline
#ifndef SM20
__device__
#endif
cudaError_t checkCuda(cudaError_t result)
{
#if defined(DEBUG) || defined(_DEBUG)
if (result != cudaSuccess) {
fprintf(stderr, "CUDA Runtime Error: %s\n",
cudaGetErrorString(result));
assert(result == cudaSuccess);
}
#endif
return result;
}
__global__ void Init()
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
// initializations
//for step 2
if (i < nrow){
cover_row[i] = 0;
column_of_star_at_row[i] = -1;
}
if (i < ncol){
cover_column[i] = 0;
row_of_star_at_column[i] = -1;
}
}
// STEP 1.
// a) Subtracting the maximum in each row by the row
const int n_rows_per_block = n / n_blocks_reduction;
__device__ void max_in_rows_warp_reduce(volatile data* sdata, int tid) {
if (n_threads_reduction >= 64 && n_rows_per_block < 64) sdata[tid] = max(sdata[tid], sdata[tid + 32]);
if (n_threads_reduction >= 32 && n_rows_per_block < 32) sdata[tid] = max(sdata[tid], sdata[tid + 16]);
if (n_threads_reduction >= 16 && n_rows_per_block < 16) sdata[tid] = max(sdata[tid], sdata[tid + 8]);
if (n_threads_reduction >= 8 && n_rows_per_block < 8) sdata[tid] = max(sdata[tid], sdata[tid + 4]);
if (n_threads_reduction >= 4 && n_rows_per_block < 4) sdata[tid] = max(sdata[tid], sdata[tid + 2]);
if (n_threads_reduction >= 2 && n_rows_per_block < 2) sdata[tid] = max(sdata[tid], sdata[tid + 1]);
}
__global__ void max_in_rows()
{
__shared__ data sdata[n_threads_reduction]; // One temporary result for each thread.
unsigned int tid = threadIdx.x;
unsigned int bid = blockIdx.x;
// One gets the line and column from the blockID and threadID.
unsigned int l = bid * n_rows_per_block + tid % n_rows_per_block;
unsigned int c = tid / n_rows_per_block;
unsigned int i = c * nrow + l;
const unsigned int gridSize = n_threads_reduction * n_blocks_reduction;
data thread_min = MIN_DATA;
while (i < n * n) {
thread_min = max(thread_min, slack[i]);
i += gridSize; // go to the next piece of the matrix...
// gridSize = 2^k * n, so that each thread always processes the same line or column
}
sdata[tid] = thread_min;
__syncthreads();
if (n_threads_reduction >= 1024 && n_rows_per_block < 1024) { if (tid < 512) { sdata[tid] = max(sdata[tid], sdata[tid + 512]); } __syncthreads(); }
if (n_threads_reduction >= 512 && n_rows_per_block < 512) { if (tid < 256) { sdata[tid] = max(sdata[tid], sdata[tid + 256]); } __syncthreads(); }
if (n_threads_reduction >= 256 && n_rows_per_block < 256) { if (tid < 128) { sdata[tid] = max(sdata[tid], sdata[tid + 128]); } __syncthreads(); }
if (n_threads_reduction >= 128 && n_rows_per_block < 128) { if (tid < 64) { sdata[tid] = max(sdata[tid], sdata[tid + 64]); } __syncthreads(); }
if (tid < 32) max_in_rows_warp_reduce(sdata, tid);
if (tid < n_rows_per_block) max_in_mat_row[bid*n_rows_per_block + tid] = sdata[tid];
}
// b) subtracting the row by its minimum
const int n_cols_per_block = n / n_blocks_reduction;
__device__ void min_in_cols_warp_reduce(volatile data* sdata, int tid) {
if (n_threads_reduction >= 64 && n_cols_per_block < 64) sdata[tid] = min(sdata[tid], sdata[tid + 32]);
if (n_threads_reduction >= 32 && n_cols_per_block < 32) sdata[tid] = min(sdata[tid], sdata[tid + 16]);
if (n_threads_reduction >= 16 && n_cols_per_block < 16) sdata[tid] = min(sdata[tid], sdata[tid + 8]);
if (n_threads_reduction >= 8 && n_cols_per_block < 8) sdata[tid] = min(sdata[tid], sdata[tid + 4]);
if (n_threads_reduction >= 4 && n_cols_per_block < 4) sdata[tid] = min(sdata[tid], sdata[tid + 2]);
if (n_threads_reduction >= 2 && n_cols_per_block < 2) sdata[tid] = min(sdata[tid], sdata[tid + 1]);
}
__global__ void min_in_cols()
{
__shared__ data sdata[n_threads_reduction]; // One temporary result for each thread
unsigned int tid = threadIdx.x;
unsigned int bid = blockIdx.x;
// One gets the line and column from the blockID and threadID.
unsigned int c = bid * n_cols_per_block + tid % n_cols_per_block;
unsigned int l = tid / n_cols_per_block;
const unsigned int gridSize = n_threads_reduction * n_blocks_reduction;
data thread_min = MAX_DATA;
while (l < n) {
unsigned int i = c * nrow + l;
thread_min = min(thread_min, slack[i]);
l += gridSize / n; // go to the next piece of the matrix...
// gridSize = 2^k * n, so that each thread always processes the same line or column
}
sdata[tid] = thread_min;
__syncthreads();
if (n_threads_reduction >= 1024 && n_cols_per_block < 1024) { if (tid < 512) { sdata[tid] = min(sdata[tid], sdata[tid + 512]); } __syncthreads(); }
if (n_threads_reduction >= 512 && n_cols_per_block < 512) { if (tid < 256) { sdata[tid] = min(sdata[tid], sdata[tid + 256]); } __syncthreads(); }
if (n_threads_reduction >= 256 && n_cols_per_block < 256) { if (tid < 128) { sdata[tid] = min(sdata[tid], sdata[tid + 128]); } __syncthreads(); }
if (n_threads_reduction >= 128 && n_cols_per_block < 128) { if (tid < 64) { sdata[tid] = min(sdata[tid], sdata[tid + 64]); } __syncthreads(); }
if (tid < 32) min_in_cols_warp_reduce(sdata, tid);
if (tid < n_cols_per_block) min_in_mat_col[bid*n_cols_per_block + tid] = sdata[tid];
}
__global__ void step_1_sub_row()
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
int l = i & row_mask;
slack[i] = max_in_mat_row[l] - slack[i]; // subtract the minimum in row from that row
}
__global__ void step_1_col_sub()
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
int c = i >> log2_n;
slack[i] = slack[i] - min_in_mat_col[c]; // subtract the minimum in row from that row
if (i == 0) zeros_vector_size = 0;
}
// Compress matrix
__global__ void compress_matrix(){
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (slack[i] == 0) {
int j = atomicAdd(&zeros_vector_size, 1);
zeros[j] = i;
}
}
// STEP 2
// Find a zero of slack. If there are no starred zeros in its
// column or row star the zero. Repeat for each zero.
__global__ void step_2()
{
int i = threadIdx.x;
bool repeat;
do {
repeat = false;
__syncthreads();
for (int j = i; j < zeros_vector_size; j += blockDim.x)
{
int z = zeros[j];
int l = z & row_mask;
int c = z >> log2_n;
if (cover_row[l] == 0 && cover_column[c] == 0) {
// thread trys to get the line
if (!atomicExch(&(cover_row[l]), 1)){
// only one thread gets the line
if (!atomicExch(&(cover_column[c]), 1)){
// only one thread gets the column
row_of_star_at_column[c] = l;
column_of_star_at_row[l] = c;
}
else {
cover_row[l] = 0;
repeat = true;
}
}
}
}
__syncthreads();
} while (repeat);
}
// STEP 3
// uncover all the rows and columns before going to step 3
__global__ void step_3ini()
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
cover_row[i] = 0;
cover_column[i] = 0;
if (i == 0) n_matches = 0;
}
// Cover each column with a starred zero. If all the columns are
// covered then the matching is maximum
__global__ void step_3()
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (row_of_star_at_column[i]>=0)
{
cover_column[i] = 1;
atomicAdd((int*)&n_matches, 1);
}
}
// STEP 4
// Find a noncovered zero and prime it. If there is no starred
// zero in the row containing this primed zero, go to Step 5.
// Otherwise, cover this row and uncover the column containing
// the starred zero. Continue in this manner until there are no
// uncovered zeros left. Save the smallest uncovered value and
// Go to Step 6.
__global__ void step_4_init()
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
column_of_prime_at_row[i] =-1;
row_of_green_at_column[i] = -1;
column_of_zero_at_row[i] = -1;
}
// Maps the uncovered zeros into column_of_zero_at_row
__global__ void step_4a(){
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < zeros_vector_size) {
int z = zeros[i];
int l = z & row_mask;
int c = z >> log2_n;
if (!cover_row[l] && !cover_column[c]){
column_of_zero_at_row[l] = c;
found = true; // This is set to false in 4b and at initialization.
}
}
if (i == 0) {
goto_5 = false;
}
}
// The rest of step 4
__global__ void step_4b(){
int l = blockDim.x * blockIdx.x + threadIdx.x;
int c0 = column_of_zero_at_row[l];
if (c0>=0)
{
column_of_prime_at_row[l] = c0;
int c = column_of_star_at_row[l];
if (c >= 0) {
cover_row[l] = 1;
cover_column[c] = 0;
found = false;
}
else
goto_5 = true;
}
column_of_zero_at_row[l] = -1;
}
/* STEP 5:
Construct a series of alternating primed and starred zeros as
follows:
Let Z0 represent the uncovered primed zero found in Step 4.
Let Z1 denote the starred zero in the column of Z0(if any).
Let Z2 denote the primed zero in the row of Z1(there will always
be one). Continue until the series terminates at a primed zero
that has no starred zero in its column. Unstar each starred
zero of the series, star each primed zero of the series, erase
all primes and uncover every line in the matrix. Return to Step 3.*/
// Eliminates joining paths
__global__ void step_5a()
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
int r_Z0, c_Z0;
c_Z0 = column_of_prime_at_row[i];
if (c_Z0 >= 0 && column_of_star_at_row[i] < 0){
row_of_green_at_column[c_Z0] = i;
while ((r_Z0 = row_of_star_at_column[c_Z0]) >= 0){
c_Z0 = column_of_prime_at_row[r_Z0];
row_of_green_at_column[c_Z0] = r_Z0;
}
}
}
// Applies the alternating paths
__global__ void step_5b()
{
int j = blockDim.x * blockIdx.x + threadIdx.x;
int r_Z0, c_Z0, c_Z2;
r_Z0 = row_of_green_at_column[j];
if (r_Z0 >= 0 && row_of_star_at_column[j] < 0){
c_Z2 = column_of_star_at_row[r_Z0];
column_of_star_at_row[r_Z0] = j;
row_of_star_at_column[j] = r_Z0;
while (c_Z2 >= 0) {
r_Z0 = row_of_green_at_column[c_Z2]; // row of Z2
c_Z0 = c_Z2; // col of Z2
c_Z2 = column_of_star_at_row[r_Z0]; // col of Z4
// star Z2
column_of_star_at_row[r_Z0] = c_Z0;
row_of_star_at_column[c_Z0] = r_Z0;
}
}
}
// STEP 6
// Add the minimum uncovered value to every element of each covered
// row, and subtract it from every element of each uncovered column.
// Return to Step 4 without altering any stars, primes, or covered lines.
template <unsigned int blockSize>
__device__ void min_warp_reduce(volatile data* sdata, int tid) {
if (blockSize >= 64) sdata[tid] = min(sdata[tid], sdata[tid + 32]);
if (blockSize >= 32) sdata[tid] = min(sdata[tid], sdata[tid + 16]);
if (blockSize >= 16) sdata[tid] = min(sdata[tid], sdata[tid + 8]);
if (blockSize >= 8) sdata[tid] = min(sdata[tid], sdata[tid + 4]);
if (blockSize >= 4) sdata[tid] = min(sdata[tid], sdata[tid + 2]);
if (blockSize >= 2) sdata[tid] = min(sdata[tid], sdata[tid + 1]);
}
template <unsigned int blockSize> // blockSize is the size of a block of threads
__device__ void min_reduce1(volatile data *g_idata, volatile data *g_odata, unsigned int n)
{
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*(blockSize * 2) + tid;
unsigned int gridSize = blockSize * 2 * gridDim.x;
sdata[tid] = MAX_DATA;
while (i < n) {
int i1 = i;
int i2 = i + blockSize;
int l1 = i1 & row_mask;
int c1 = i1 >> log2_n;
int g1;
if (cover_row[l1] == 1 || cover_column[c1] == 1) g1 = MAX_DATA;
else g1 = g_idata[i1];
int l2 = i2 & row_mask;
int c2 = i2 >> log2_n;
int g2;
if (cover_row[l2] == 1 || cover_column[c2] == 1) g2 = MAX_DATA;
else g2 = g_idata[i2];
sdata[tid] = min(sdata[tid], min(g1, g2));
i += gridSize;
}
__syncthreads();
if (blockSize >= 1024) { if (tid < 512) { sdata[tid] = min(sdata[tid], sdata[tid + 512]); } __syncthreads(); }
if (blockSize >= 512) { if (tid < 256) { sdata[tid] = min(sdata[tid], sdata[tid + 256]); } __syncthreads(); }
if (blockSize >= 256) { if (tid < 128) { sdata[tid] = min(sdata[tid], sdata[tid + 128]); } __syncthreads(); }
if (blockSize >= 128) { if (tid < 64) { sdata[tid] = min(sdata[tid], sdata[tid + 64]); } __syncthreads(); }
if (tid < 32) min_warp_reduce<blockSize>(sdata, tid);
if (tid == 0) g_odata[blockIdx.x] = sdata[0];
}
template <unsigned int blockSize>
__device__ void min_reduce2(volatile data *g_idata, volatile data *g_odata, unsigned int n)
{
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*(blockSize * 2) + tid;
sdata[tid] = min(g_idata[i], g_idata[i + blockSize]);
__syncthreads();
if (blockSize >= 1024) { if (tid < 512) { sdata[tid] = min(sdata[tid], sdata[tid + 512]); } __syncthreads(); }
if (blockSize >= 512) { if (tid < 256) { sdata[tid] = min(sdata[tid], sdata[tid + 256]); } __syncthreads(); }
if (blockSize >= 256) { if (tid < 128) { sdata[tid] = min(sdata[tid], sdata[tid + 128]); } __syncthreads(); }
if (blockSize >= 128) { if (tid < 64) { sdata[tid] = min(sdata[tid], sdata[tid + 64]); } __syncthreads(); }
if (tid < 32) min_warp_reduce<blockSize>(sdata, tid);
if (tid == 0) g_odata[blockIdx.x] = sdata[0];
}
__global__ void step_6_add_sub()
{
// STEP 6:
// /*STEP 6: Add the minimum uncovered value to every element of each covered
// row, and subtract it from every element of each uncovered column.
// Return to Step 4 without altering any stars, primes, or covered lines. */
int i = blockDim.x * blockIdx.x + threadIdx.x;
int l = i & row_mask;
int c = i >> log2_n;
if (cover_row[l] == 1 && cover_column[c] == 1)
slack[i] += d_min_in_mat;
if (cover_row[l] == 0 && cover_column[c] == 0)
slack[i] -= d_min_in_mat;
if (i == 0) zeros_vector_size = 0;
}
__global__ void min_reduce_kernel1() {
min_reduce1<n_threads_reduction>(slack, d_min_in_mat_vect, nrow*ncol);
}
__global__ void min_reduce_kernel2() {
min_reduce2<n_threads_reduction / 2>(d_min_in_mat_vect, &d_min_in_mat, n_blocks_reduction);
}
// Hungarian_Algorithm
// This function is run on the device if device if dynamic parallelism is enabled,
// or in the host if it is disabled (SM20).
#ifdef SM20
void Hungarian_Algorithm()
#else
__global__ void Hungarian_Algorithm()
#endif
{
// Initialization
Init << < n_blocks, n_threads >> > ();
checkCuda(cudaDeviceSynchronize());
// Step 1 kernels
max_in_rows << < n_blocks_reduction, n_threads_reduction >> >();
checkCuda(cudaDeviceSynchronize());
step_1_sub_row << <n_blocks_full, n_threads_full >> >();
checkCuda(cudaDeviceSynchronize());
min_in_cols << < n_blocks_reduction, n_threads_reduction >> >();
checkCuda(cudaDeviceSynchronize());
step_1_col_sub << <n_blocks_full, n_threads_full >> >();
checkCuda(cudaDeviceSynchronize());
// compress_matrix
compress_matrix << < n_blocks_full, n_threads_full >> > ();
checkCuda(cudaDeviceSynchronize());
// Step 2 kernels
step_2 << <1, min(max_threads_per_block, nrow) >> > ();
checkCuda(cudaDeviceSynchronize());
while (1) { // repeat steps 3 to 6
// Step 3 kernels
step_3ini << <n_blocks, n_threads >> >();
checkCuda(cudaDeviceSynchronize());
step_3 << <n_blocks, n_threads >> > ();
checkCuda(cudaDeviceSynchronize());
#ifdef SM20
cudaMemcpyFromSymbol(&h_n_matches, n_matches, sizeof(int));
if (h_n_matches >= ncol) return; // It's done
#else
if (n_matches >= ncol) return; // It's done
#endif
//step 4_kernels
step_4_init << <n_blocks, n_threads >> > ();
checkCuda(cudaDeviceSynchronize());
while (1) // repeat step 4 and 6
{
do { // step 4 loop
#ifdef SM20
cudaMemcpyFromSymbol(&h_zeros_vector_size, zeros_vector_size, sizeof(int));
if (h_zeros_vector_size > 100 * n)
step_4a << < n_blocks_full, n_threads_full >> > ();
else if (h_zeros_vector_size > 10 * n)
step_4a << < 100 * n / n_threads, n_threads >> > ();
else
step_4a << < 10 * n / n_threads, n_threads >> > ();
#else
if (zeros_vector_size>100 * n)
step_4a << < n_blocks_full, n_threads_full >> > ();
else if (zeros_vector_size>10 * n)
step_4a << < 100 * n / n_threads, n_threads >> > ();
else
step_4a << < 10 * n / n_threads, n_threads >> > ();
#endif
checkCuda(cudaDeviceSynchronize());
#ifdef SM20
cudaMemcpyFromSymbol(&h_found, found, sizeof(bool));
if (!h_found) break;
#else
if (!found) break;
#endif
step_4b << < n_blocks, n_threads >> >();
checkCuda(cudaDeviceSynchronize());
#ifdef SM20
cudaMemcpyFromSymbol(&h_goto_5, goto_5, sizeof(bool));
} while (!h_goto_5);
#else
} while (!goto_5);
#endif
#ifdef SM20
cudaMemcpyFromSymbol(&h_goto_5, goto_5, sizeof(bool));
if (h_goto_5) break; // Or if (!h_found)
#else
if (goto_5) break; // Or if (!found)
#endif
//step 6_kernel
min_reduce_kernel1 << <n_blocks_reduction, n_threads_reduction, n_threads_reduction*sizeof(int) >> >();
checkCuda(cudaDeviceSynchronize());
min_reduce_kernel2 << <1, n_blocks_reduction / 2, (n_blocks_reduction / 2) * sizeof(int) >> >();
checkCuda(cudaDeviceSynchronize());
step_6_add_sub << <n_blocks_full, n_threads_full >> >();
checkCuda(cudaDeviceSynchronize());
//compress_matrix
compress_matrix << < n_blocks_full, n_threads_full >> > ();
checkCuda(cudaDeviceSynchronize());
} // repeat step 4 and 6
step_5a << < n_blocks, n_threads >> > ();
checkCuda(cudaDeviceSynchronize());
step_5b << < n_blocks, n_threads >> > ();
checkCuda(cudaDeviceSynchronize());
} // repeat steps 3 to 6
}
// -------------------------------------------------------------------------------------
// Host code
// -------------------------------------------------------------------------------------
// Used to make sure some constants are properly set
void check(bool val, char *str){
if (!val) {
printf("Check failed: %s!\n", str);
getchar();
exit(-1);
}
}
int main()
{
// Constant checks:
check(n == (1 << log2_n), "Incorrect log2_n!");
check(n_threads*n_blocks == n, "n_threads*n_blocks != n\n");
// step 1
check(n_blocks_reduction <= n, "Step 1: Should have several lines per block!");
check(n % n_blocks_reduction == 0, "Step 1: Number of lines per block should be integer!");
check((n_blocks_reduction*n_threads_reduction) % n == 0, "Step 1: The grid size must be a multiple of the line size!");
check(n_threads_reduction*n_blocks_reduction <= n*n, "Step 1: The grid size is bigger than the matrix size!");
// step 6
check(n_threads_full*n_blocks_full <= n*n, "Step 6: The grid size is bigger than the matrix size!");
#ifndef USE_TEST_MATRIX
std::default_random_engine generator(seed);
std::uniform_int_distribution<int> distribution(0, range-1);
for (int c = 0; c < ncol; c++)
for (int r = 0; r < nrow; r++) {
pay[c][r] = distribution(generator);
}
#endif
// Copy vectors from host memory to device memory
cudaMemcpyToSymbol(slack, pay, sizeof(data)*nrow*ncol); // symbol refers to the device memory hence "To" means from Host to Device
// Invoke kernels
time_t start_time = clock();
#ifdef SM20
Hungarian_Algorithm();
#else
Hungarian_Algorithm << < 1, 1 >> > ();
#endif
cudaDeviceSynchronize();
time_t stop_time = clock();
// Copy assignments from Device to Host and calculate the total Cost.
cudaMemcpyFromSymbol(h_column_of_star_at_row, column_of_star_at_row, nrow*sizeof(int));
int total_pay = 0;
for (int r = 0; r < nrow; r++) {
int c = h_column_of_star_at_row[r];
if (c >= 0) total_pay += pay[c][r];
}
printf("Total pay is: %d \n", total_pay);
printf("Elapsed time: %f ms\n", 1000.0*(double)(stop_time - start_time) / CLOCKS_PER_SEC);
printf("Note: This time measurment is portable but very inaccurate!\n");
}
|
d515b366dc1d1b17a8140aeaad531c4630b8eb34.hip | // !!! This is a file automatically generated by hipify!!!
#include "Region.h"
#include <hip/hip_runtime.h>
// our kernel for edge updates
// parameters:
// g: graph
// epsilon: epsilon
// numThreadUpdates: number of updates in each thread
// lambdaGlobal: global lambda array
// runFlag: a flag that controls when we want to terminate the array
// rangeRandNums: random numbers (defined by the graph)
template<typename T, typename S>
__global__ void EdgeUpdateKernel(MPGraph<T, S>* g, T epsilon, size_t* numThreadUpdates, T* lambdaGlobal, volatile int* runFlag, int numThreads)
{
int tx = threadIdx.x + blockIdx.x * blockDim.x;
if(tx < numThreads)
{
int uid;
hiprandState_t state;
hiprand_init(clock64(),tx,0,&state);
// allocate space for edge workspace
typename MPGraph<T, S>::REdgeWorkspaceID rew;
rew = g->AllocateReparameterizeEdgeWorkspaceMem(epsilon);
// allocate an array that will act as our base
size_t msgSize = g->GetLambdaSize();
T* devLambdaBase = (T*)malloc(msgSize * sizeof(T));
//memset(devLambdaBase, T(0), sizeof(T) * msgSize);
int rangeRandNums = g->NumberOfEdges();
while(true)
{
if(!*runFlag)
{
break;
}
uid = floorf(hiprand_uniform(&state) * rangeRandNums);
g->CopyMessagesForEdge(lambdaGlobal, devLambdaBase, uid);
g->ReparameterizeEdge(devLambdaBase, uid, epsilon, false, rew);
g->UpdateEdge(devLambdaBase, lambdaGlobal, uid, false);
//`
numThreadUpdates[tx]++;
// __syncthreads();
}
g->DeAllocateReparameterizeEdgeWorkspaceMem(rew);
free(devLambdaBase);
}
}
__device__ bool checkFlag(volatile bool* flag)
{
return *flag;
}
template<typename T, typename S>
__global__ void RegionUpdateKernel(MPGraph<T, S>* g, T epsilon, size_t* numThreadUpdates, T* lambdaGlobal, T* lambdaBase, volatile bool* runFlag, int numThreads)
{
int tx = threadIdx.x + blockIdx.x * blockDim.x;
if(tx < numThreads)
{
int uid;
hiprandState_t state;
hiprand_init(clock64(),tx,0,&state);
// allocate space for edge workspace
typename MPGraph<T, S>::RRegionWorkspaceID rew;
rew = g->AllocateReparameterizeRegionWorkspaceMem(epsilon);
// allocate an array that will act as our base
size_t msgSize = g->GetLambdaSize();
T* devLambdaBase = &(lambdaBase[tx*msgSize]);
memset(devLambdaBase, T(0), sizeof(T) * msgSize);
int rangeRandNums = g->NumberOfRegionsWithParents();
// for(int i = 0; i < rangeRandNums; i++)
// {
while(true)
{
if(!checkFlag(runFlag))
{
break;
}
uid = floorf(hiprand_uniform(&state) * rangeRandNums);
g->CopyMessagesForStar(lambdaGlobal, devLambdaBase, uid);
g->ReparameterizeRegion(devLambdaBase, uid, epsilon, false, rew);
g->UpdateRegion(devLambdaBase, lambdaGlobal, uid, false);
//`
numThreadUpdates[tx]++;
}
//
// // free device pointers
g->DeAllocateReparameterizeRegionWorkspaceMem(rew);
free(devLambdaBase);
//
//atomicAdd(runFlag, numThreads);
//
}
//
}
template<typename T, typename S>
int CudaAsyncRMPThread<T,S>::CudaRunMP(MPGraph<T, S>& g, T epsilon, int numIterations, int numThreads, int WaitTimeInMS) {
size_t msgSize = g.HostGetLambdaSize();
std::cout << "Num threads " << numThreads << std::endl;
// handle this case later.i
if (msgSize == 0) {
typename MPGraph<T, S>::DualWorkspaceID dw = g.HostAllocateDualWorkspaceMem(epsilon);
std::cout << "0: " << g.HostComputeDual(NULL, epsilon, dw) << std::endl;
g.HostDeAllocateDualWorkspaceMem(dw);
return 0;
}
std::cout << std::setprecision(15);
// allocate device pointers for lambda global
T* devLambdaGlobal = NULL;
gpuErrchk(hipMalloc((void**)&devLambdaGlobal, sizeof(T) * msgSize));
gpuErrchk(hipMemset((void*)devLambdaGlobal, T(0), sizeof(T)*msgSize));
// allocate on host memory for cuda streaming
T* lambdaGlob = NULL;
gpuErrchk(hipHostMalloc((void**)&lambdaGlob, sizeof(T)*msgSize));
gpuErrchk(hipMemset((void*)lambdaGlob, T(0), sizeof(T)*msgSize));
// allocate space and copy graph to GPU
MPGraph<T,S>* gPtr = NULL;
gpuErrchk(hipMalloc((void**)&gPtr, sizeof(g)));
gpuErrchk(hipMemcpy(gPtr, &g, sizeof(g), hipMemcpyHostToDevice));
// initialize the number of region updates
size_t* numThreadUpdates = NULL;
size_t* hostThreadUpdates = new size_t[numThreads];
gpuErrchk(hipMalloc((void**)&numThreadUpdates, numThreads * sizeof(size_t)));
gpuErrchk(hipMemset((void*)numThreadUpdates, 0, numThreads * sizeof(size_t)));
// allocate all the base lambdas
T* indivLambda;
gpuErrchk(hipMalloc((void**)&indivLambda, sizeof(T)*msgSize*numThreads));
gpuErrchk(hipMemset((void*)indivLambda, 0, sizeof(T)*msgSize*numThreads));
// allocate run flag
bool* devRunFlag = NULL;
bool tmpTest = true;
gpuErrchk(hipMalloc((void**)&devRunFlag, sizeof(bool)));
gpuErrchk(hipMemcpy(devRunFlag, &tmpTest, sizeof(bool), hipMemcpyHostToDevice));
// create an asynchronous cuda stream
// we only have two streams, the main (CPU) stream, and the GPU one
// CPU stream only copies back every so often (or writes to the GPU)
// GPU is executing
hipStream_t streamCopy, streamExec;
gpuErrchk(hipStreamCreate(&streamCopy));
gpuErrchk(hipStreamCreate(&streamExec));
// create a ThreadSync object (not necessary at all, but hey, I wanna
// make sure this actually works)
ThreadSync<T, S> sy(numThreads, lambdaGlob, epsilon, &g);
// grid/block dimensions
dim3 DimGrid(ceil(numThreads * 1.0 / BLOCK_SIZE),1,1);
dim3 DimBlock(BLOCK_SIZE,1,1);
bool stopFlag = false;
std::cout << "Executing kernel..." << std::endl;
// start the kernel
// EdgeUpdateKernel<<<DimGrid, DimBlock, 0, streamExec>>>(gPtr, epsilon, numThreadUpdates, devLambdaGlobal, devRunFlag, numThreads);
hipLaunchKernelGGL(( RegionUpdateKernel), dim3(DimGrid), dim3(DimBlock), 0, streamExec, gPtr, epsilon, numThreadUpdates, devLambdaGlobal, indivLambda, devRunFlag, numThreads);
for (int k = 0; k < numIterations; ++k)
{
std::cout << "Iteration " << k << std::endl;
std::this_thread::sleep_for(std::chrono::milliseconds(WaitTimeInMS));
hipMemcpyAsync(lambdaGlob, devLambdaGlobal, sizeof(T)*msgSize, hipMemcpyDeviceToHost, streamCopy);
hipStreamSynchronize(streamCopy);
sy.ComputeDualNoSync();
}
gpuErrchk(hipMemcpyAsync(devRunFlag, &stopFlag, sizeof(bool), hipMemcpyHostToDevice, streamCopy));
hipStreamSynchronize(streamCopy);
// now, we can block
gpuErrchk(hipMemcpy(hostThreadUpdates, numThreadUpdates, sizeof(size_t)*numThreads, hipMemcpyDeviceToHost));
g.ResetMessageMemory();
hipMemcpy(lambdaGlob, devLambdaGlobal, sizeof(T)*msgSize, hipMemcpyDeviceToHost);
sy.ComputeDualNoSync();
std::cout << "Kernel Terminated" << std::endl;
size_t regionUpdates = 0;
for(int k=0;k<numThreads;++k) {
size_t tmp = hostThreadUpdates[k];
// std::cout << "Thread " << k << ": " << tmp << std::endl;
regionUpdates += tmp;
}
//hipFree(gPtr);
//hipHostFree(lambdaGlob);
//hipFree(devRunFlag);
//hipFree(indivLambda);
//hipFree(devLambdaGlobal);
//hipHostFree(lambdaGlob);
//delete [] hostThreadUpdates;
//hipStreamDestroy(streamCopy);
//hipStreamDestroy(streamExec);
hipDeviceReset();
std::cout << "Region updates: " << regionUpdates << std::endl;
std::cout << "Total regions: " << g.HostNumberOfRegionsWithParents() << std::endl;
// std::this_thread::sleep_for(std::chrono::milliseconds(1000));
std::cout << "Terminating program." << std::endl;
return 0;
}
| d515b366dc1d1b17a8140aeaad531c4630b8eb34.cu | #include "Region.h"
#include <cuda.h>
// our kernel for edge updates
// parameters:
// g: graph
// epsilon: epsilon
// numThreadUpdates: number of updates in each thread
// lambdaGlobal: global lambda array
// runFlag: a flag that controls when we want to terminate the array
// rangeRandNums: random numbers (defined by the graph)
template<typename T, typename S>
__global__ void EdgeUpdateKernel(MPGraph<T, S>* g, T epsilon, size_t* numThreadUpdates, T* lambdaGlobal, volatile int* runFlag, int numThreads)
{
int tx = threadIdx.x + blockIdx.x * blockDim.x;
if(tx < numThreads)
{
int uid;
curandState_t state;
curand_init(clock64(),tx,0,&state);
// allocate space for edge workspace
typename MPGraph<T, S>::REdgeWorkspaceID rew;
rew = g->AllocateReparameterizeEdgeWorkspaceMem(epsilon);
// allocate an array that will act as our base
size_t msgSize = g->GetLambdaSize();
T* devLambdaBase = (T*)malloc(msgSize * sizeof(T));
//memset(devLambdaBase, T(0), sizeof(T) * msgSize);
int rangeRandNums = g->NumberOfEdges();
while(true)
{
if(!*runFlag)
{
break;
}
uid = floorf(curand_uniform(&state) * rangeRandNums);
g->CopyMessagesForEdge(lambdaGlobal, devLambdaBase, uid);
g->ReparameterizeEdge(devLambdaBase, uid, epsilon, false, rew);
g->UpdateEdge(devLambdaBase, lambdaGlobal, uid, false);
//`
numThreadUpdates[tx]++;
// __syncthreads();
}
g->DeAllocateReparameterizeEdgeWorkspaceMem(rew);
free(devLambdaBase);
}
}
__device__ bool checkFlag(volatile bool* flag)
{
return *flag;
}
template<typename T, typename S>
__global__ void RegionUpdateKernel(MPGraph<T, S>* g, T epsilon, size_t* numThreadUpdates, T* lambdaGlobal, T* lambdaBase, volatile bool* runFlag, int numThreads)
{
int tx = threadIdx.x + blockIdx.x * blockDim.x;
if(tx < numThreads)
{
int uid;
curandState_t state;
curand_init(clock64(),tx,0,&state);
// allocate space for edge workspace
typename MPGraph<T, S>::RRegionWorkspaceID rew;
rew = g->AllocateReparameterizeRegionWorkspaceMem(epsilon);
// allocate an array that will act as our base
size_t msgSize = g->GetLambdaSize();
T* devLambdaBase = &(lambdaBase[tx*msgSize]);
memset(devLambdaBase, T(0), sizeof(T) * msgSize);
int rangeRandNums = g->NumberOfRegionsWithParents();
// for(int i = 0; i < rangeRandNums; i++)
// {
while(true)
{
if(!checkFlag(runFlag))
{
break;
}
uid = floorf(curand_uniform(&state) * rangeRandNums);
g->CopyMessagesForStar(lambdaGlobal, devLambdaBase, uid);
g->ReparameterizeRegion(devLambdaBase, uid, epsilon, false, rew);
g->UpdateRegion(devLambdaBase, lambdaGlobal, uid, false);
//`
numThreadUpdates[tx]++;
}
//
// // free device pointers
g->DeAllocateReparameterizeRegionWorkspaceMem(rew);
free(devLambdaBase);
//
//atomicAdd(runFlag, numThreads);
//
}
//
}
template<typename T, typename S>
int CudaAsyncRMPThread<T,S>::CudaRunMP(MPGraph<T, S>& g, T epsilon, int numIterations, int numThreads, int WaitTimeInMS) {
size_t msgSize = g.HostGetLambdaSize();
std::cout << "Num threads " << numThreads << std::endl;
// handle this case later.i
if (msgSize == 0) {
typename MPGraph<T, S>::DualWorkspaceID dw = g.HostAllocateDualWorkspaceMem(epsilon);
std::cout << "0: " << g.HostComputeDual(NULL, epsilon, dw) << std::endl;
g.HostDeAllocateDualWorkspaceMem(dw);
return 0;
}
std::cout << std::setprecision(15);
// allocate device pointers for lambda global
T* devLambdaGlobal = NULL;
gpuErrchk(cudaMalloc((void**)&devLambdaGlobal, sizeof(T) * msgSize));
gpuErrchk(cudaMemset((void*)devLambdaGlobal, T(0), sizeof(T)*msgSize));
// allocate on host memory for cuda streaming
T* lambdaGlob = NULL;
gpuErrchk(cudaMallocHost((void**)&lambdaGlob, sizeof(T)*msgSize));
gpuErrchk(cudaMemset((void*)lambdaGlob, T(0), sizeof(T)*msgSize));
// allocate space and copy graph to GPU
MPGraph<T,S>* gPtr = NULL;
gpuErrchk(cudaMalloc((void**)&gPtr, sizeof(g)));
gpuErrchk(cudaMemcpy(gPtr, &g, sizeof(g), cudaMemcpyHostToDevice));
// initialize the number of region updates
size_t* numThreadUpdates = NULL;
size_t* hostThreadUpdates = new size_t[numThreads];
gpuErrchk(cudaMalloc((void**)&numThreadUpdates, numThreads * sizeof(size_t)));
gpuErrchk(cudaMemset((void*)numThreadUpdates, 0, numThreads * sizeof(size_t)));
// allocate all the base lambdas
T* indivLambda;
gpuErrchk(cudaMalloc((void**)&indivLambda, sizeof(T)*msgSize*numThreads));
gpuErrchk(cudaMemset((void*)indivLambda, 0, sizeof(T)*msgSize*numThreads));
// allocate run flag
bool* devRunFlag = NULL;
bool tmpTest = true;
gpuErrchk(cudaMalloc((void**)&devRunFlag, sizeof(bool)));
gpuErrchk(cudaMemcpy(devRunFlag, &tmpTest, sizeof(bool), cudaMemcpyHostToDevice));
// create an asynchronous cuda stream
// we only have two streams, the main (CPU) stream, and the GPU one
// CPU stream only copies back every so often (or writes to the GPU)
// GPU is executing
cudaStream_t streamCopy, streamExec;
gpuErrchk(cudaStreamCreate(&streamCopy));
gpuErrchk(cudaStreamCreate(&streamExec));
// create a ThreadSync object (not necessary at all, but hey, I wanna
// make sure this actually works)
ThreadSync<T, S> sy(numThreads, lambdaGlob, epsilon, &g);
// grid/block dimensions
dim3 DimGrid(ceil(numThreads * 1.0 / BLOCK_SIZE),1,1);
dim3 DimBlock(BLOCK_SIZE,1,1);
bool stopFlag = false;
std::cout << "Executing kernel..." << std::endl;
// start the kernel
// EdgeUpdateKernel<<<DimGrid, DimBlock, 0, streamExec>>>(gPtr, epsilon, numThreadUpdates, devLambdaGlobal, devRunFlag, numThreads);
RegionUpdateKernel<<<DimGrid, DimBlock, 0, streamExec>>>(gPtr, epsilon, numThreadUpdates, devLambdaGlobal, indivLambda, devRunFlag, numThreads);
for (int k = 0; k < numIterations; ++k)
{
std::cout << "Iteration " << k << std::endl;
std::this_thread::sleep_for(std::chrono::milliseconds(WaitTimeInMS));
cudaMemcpyAsync(lambdaGlob, devLambdaGlobal, sizeof(T)*msgSize, cudaMemcpyDeviceToHost, streamCopy);
cudaStreamSynchronize(streamCopy);
sy.ComputeDualNoSync();
}
gpuErrchk(cudaMemcpyAsync(devRunFlag, &stopFlag, sizeof(bool), cudaMemcpyHostToDevice, streamCopy));
cudaStreamSynchronize(streamCopy);
// now, we can block
gpuErrchk(cudaMemcpy(hostThreadUpdates, numThreadUpdates, sizeof(size_t)*numThreads, cudaMemcpyDeviceToHost));
g.ResetMessageMemory();
cudaMemcpy(lambdaGlob, devLambdaGlobal, sizeof(T)*msgSize, cudaMemcpyDeviceToHost);
sy.ComputeDualNoSync();
std::cout << "Kernel Terminated" << std::endl;
size_t regionUpdates = 0;
for(int k=0;k<numThreads;++k) {
size_t tmp = hostThreadUpdates[k];
// std::cout << "Thread " << k << ": " << tmp << std::endl;
regionUpdates += tmp;
}
//cudaFree(gPtr);
//cudaFreeHost(lambdaGlob);
//cudaFree(devRunFlag);
//cudaFree(indivLambda);
//cudaFree(devLambdaGlobal);
//cudaFreeHost(lambdaGlob);
//delete [] hostThreadUpdates;
//cudaStreamDestroy(streamCopy);
//cudaStreamDestroy(streamExec);
cudaDeviceReset();
std::cout << "Region updates: " << regionUpdates << std::endl;
std::cout << "Total regions: " << g.HostNumberOfRegionsWithParents() << std::endl;
// std::this_thread::sleep_for(std::chrono::milliseconds(1000));
std::cout << "Terminating program." << std::endl;
return 0;
}
|
8a7bf3026fc0c90764385ee6d2d44ad8501403f3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 2022 Institute of Parallel and Distributed Systems, Shanghai Jiao Tong University
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#include <hipcub/hipcub.hpp>
#include "cuda_function.h"
#include "cuda_utils.h"
#include "../device.h"
#include "../common.h"
#include "../constant.h"
#include "../logging.h"
namespace samgraph {
namespace common {
namespace cuda {
namespace {
template <size_t BLOCK_SIZE, size_t TILE_SIZE>
__global__ void count_miss_cache(const IdType *hashtable, const IdType *nodes,
const size_t num_nodes, IdType *miss_counts,
IdType *cache_counts) {
const size_t block_start = TILE_SIZE * blockIdx.x;
const size_t block_end = TILE_SIZE * (blockIdx.x + 1);
using BlockReduce = typename hipcub::BlockReduce<IdType, BLOCK_SIZE>;
IdType miss_count = 0;
IdType cache_count = 0;
#pragma unroll
for (size_t index = threadIdx.x + block_start; index < block_end;
index += BLOCK_SIZE) {
if (index < num_nodes) {
if (hashtable[nodes[index]] == Constant::kEmptyKey) {
miss_count++;
} else {
cache_count++;
}
}
}
__shared__ typename BlockReduce::TempStorage temp_miss_space;
__shared__ typename BlockReduce::TempStorage temp_cache_space;
miss_count = BlockReduce(temp_miss_space).Sum(miss_count);
cache_count = BlockReduce(temp_cache_space).Sum(cache_count);
if (threadIdx.x == 0) {
miss_counts[blockIdx.x] = miss_count;
cache_counts[blockIdx.x] = cache_count;
if (blockIdx.x == 0) {
miss_counts[gridDim.x] = 0;
cache_counts[gridDim.x] = 0;
}
}
}
template <size_t BLOCK_SIZE, size_t TILE_SIZE>
__global__ void get_miss_index(const IdType *hashtable, const IdType *nodes,
const size_t num_nodes,
IdType *output_miss_dst_index,
IdType *output_miss_src_index,
const IdType *miss_counts_prefix) {
using FlagType = IdType;
using BlockScan = typename hipcub::BlockScan<FlagType, BLOCK_SIZE>;
constexpr const IdType VALS_PER_THREAD = TILE_SIZE / BLOCK_SIZE;
__shared__ typename BlockScan::TempStorage temp_space;
BlockPrefixCallbackOp<FlagType> prefix_op(0);
const IdType offset = miss_counts_prefix[blockIdx.x];
for (IdType i = 0; i < VALS_PER_THREAD; ++i) {
const IdType index = threadIdx.x + i * BLOCK_SIZE + blockIdx.x * TILE_SIZE;
FlagType flag;
if (index < num_nodes && hashtable[nodes[index]] == Constant::kEmptyKey) {
flag = 1;
} else {
flag = 0;
}
BlockScan(temp_space).ExclusiveSum(flag, flag, prefix_op);
__syncthreads();
if (index < num_nodes && hashtable[nodes[index]] == Constant::kEmptyKey) {
const IdType pos = offset + flag;
assert(pos < num_nodes);
// new node ID in subgraph
output_miss_dst_index[pos] = index;
// old node ID in original graph
output_miss_src_index[pos] = nodes[index];
}
}
// if (threadIdx.x == 0 && blockIdx.x == 0) {
// printf("miss count %u, %u\n", miss_counts_prefix[gridDim.x],
// miss_counts_prefix[gridDim.x - 1]);
// }
}
template <size_t BLOCK_SIZE, size_t TILE_SIZE>
__global__ void get_cache_index(const IdType *hashtable, const IdType *nodes,
const size_t num_nodes,
IdType *output_cache_dst_index,
IdType *output_cache_src_index,
const IdType *cache_counts_prefix) {
using FlagType = IdType;
using BlockScan = typename hipcub::BlockScan<FlagType, BLOCK_SIZE>;
constexpr const IdType VALS_PER_THREAD = TILE_SIZE / BLOCK_SIZE;
__shared__ typename BlockScan::TempStorage temp_space;
BlockPrefixCallbackOp<FlagType> prefix_op(0);
const IdType offset = cache_counts_prefix[blockIdx.x];
for (IdType i = 0; i < VALS_PER_THREAD; ++i) {
const IdType index = threadIdx.x + i * BLOCK_SIZE + blockIdx.x * TILE_SIZE;
FlagType flag;
if (index < num_nodes && hashtable[nodes[index]] != Constant::kEmptyKey) {
flag = 1;
} else {
flag = 0;
}
BlockScan(temp_space).ExclusiveSum(flag, flag, prefix_op);
__syncthreads();
if (index < num_nodes && hashtable[nodes[index]] != Constant::kEmptyKey) {
const IdType pos = offset + flag;
// new node ID in subgraph
output_cache_dst_index[pos] = index;
// old node ID in original graph
output_cache_src_index[pos] = hashtable[nodes[index]];
}
}
// if (threadIdx.x == 0 && blockIdx.x == 0) {
// printf("cache count %u, %u\n", cache_counts_prefix[gridDim.x],
// cache_counts_prefix[gridDim.x - 1]);
// }
}
} // namespace
void GetMissCacheIndex(
IdType *sampler_gpu_hashtable, Context sampler_ctx,
IdType *output_miss_src_index, IdType *output_miss_dst_index,
size_t *num_output_miss, IdType *output_cache_src_index,
IdType *output_cache_dst_index, size_t *num_output_cache,
const IdType *nodes, const size_t num_nodes, StreamHandle stream) {
const size_t num_tiles = RoundUpDiv(num_nodes, Constant::kCudaTileSize);
const dim3 grid(num_tiles);
const dim3 block(Constant::kCudaBlockSize);
auto sampler_device = Device::Get(sampler_ctx);
auto cu_stream = static_cast<hipStream_t>(stream);
sampler_device->SetDevice(sampler_ctx);
IdType *miss_prefix_counts =
static_cast<IdType *>(sampler_device->AllocWorkspace(
sampler_ctx, sizeof(IdType) * (grid.x + 1)));
IdType *cache_prefix_counts =
static_cast<IdType *>(sampler_device->AllocWorkspace(
sampler_ctx, sizeof(IdType) * (grid.x + 1)));
// LOG(DEBUG) << "GetMissCacheIndex num nodes " << num_nodes;
CUDA_CALL(hipSetDevice(sampler_ctx.device_id));
hipLaunchKernelGGL(( count_miss_cache<Constant::kCudaBlockSize, Constant::kCudaTileSize>)
, dim3(grid), dim3(block), 0, cu_stream, sampler_gpu_hashtable, nodes, num_nodes,
miss_prefix_counts, cache_prefix_counts);
sampler_device->StreamSync(sampler_ctx, stream);
size_t workspace_bytes;
CUDA_CALL(hipcub::DeviceScan::ExclusiveSum(
nullptr, workspace_bytes, static_cast<IdType *>(nullptr),
static_cast<IdType *>(nullptr), grid.x + 1, cu_stream));
sampler_device->StreamSync(sampler_ctx, stream);
void *workspace =
sampler_device->AllocWorkspace(sampler_ctx, workspace_bytes);
CUDA_CALL(hipcub::DeviceScan::ExclusiveSum(
workspace, workspace_bytes, miss_prefix_counts, miss_prefix_counts,
grid.x + 1, cu_stream));
CUDA_CALL(hipcub::DeviceScan::ExclusiveSum(
workspace, workspace_bytes, cache_prefix_counts, cache_prefix_counts,
grid.x + 1, cu_stream));
sampler_device->StreamSync(sampler_ctx, stream);
hipLaunchKernelGGL(( get_miss_index<Constant::kCudaBlockSize, Constant::kCudaTileSize>)
, dim3(grid), dim3(block), 0, cu_stream,
sampler_gpu_hashtable, nodes, num_nodes, output_miss_dst_index,
output_miss_src_index, miss_prefix_counts);
sampler_device->StreamSync(sampler_ctx, stream);
hipLaunchKernelGGL(( get_cache_index<Constant::kCudaBlockSize, Constant::kCudaTileSize>)
, dim3(grid), dim3(block), 0, cu_stream,
sampler_gpu_hashtable, nodes, num_nodes, output_cache_dst_index,
output_cache_src_index, cache_prefix_counts);
sampler_device->StreamSync(sampler_ctx, stream);
IdType num_miss;
IdType num_cache;
sampler_device->CopyDataFromTo(miss_prefix_counts + grid.x, 0, &num_miss, 0,
sizeof(IdType), sampler_ctx, CPU(), stream);
sampler_device->CopyDataFromTo(cache_prefix_counts + grid.x, 0, &num_cache, 0,
sizeof(IdType), sampler_ctx, CPU(), stream);
sampler_device->StreamSync(sampler_ctx, stream);
*num_output_miss = num_miss;
*num_output_cache = num_cache;
sampler_device->FreeWorkspace(sampler_ctx, workspace);
sampler_device->FreeWorkspace(sampler_ctx, cache_prefix_counts);
sampler_device->FreeWorkspace(sampler_ctx, miss_prefix_counts);
}
} // namespace cuda
} // namespace common
} // namespace samgraph
| 8a7bf3026fc0c90764385ee6d2d44ad8501403f3.cu | /*
* Copyright 2022 Institute of Parallel and Distributed Systems, Shanghai Jiao Tong University
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#include <cub/cub.cuh>
#include "cuda_function.h"
#include "cuda_utils.h"
#include "../device.h"
#include "../common.h"
#include "../constant.h"
#include "../logging.h"
namespace samgraph {
namespace common {
namespace cuda {
namespace {
template <size_t BLOCK_SIZE, size_t TILE_SIZE>
__global__ void count_miss_cache(const IdType *hashtable, const IdType *nodes,
const size_t num_nodes, IdType *miss_counts,
IdType *cache_counts) {
const size_t block_start = TILE_SIZE * blockIdx.x;
const size_t block_end = TILE_SIZE * (blockIdx.x + 1);
using BlockReduce = typename cub::BlockReduce<IdType, BLOCK_SIZE>;
IdType miss_count = 0;
IdType cache_count = 0;
#pragma unroll
for (size_t index = threadIdx.x + block_start; index < block_end;
index += BLOCK_SIZE) {
if (index < num_nodes) {
if (hashtable[nodes[index]] == Constant::kEmptyKey) {
miss_count++;
} else {
cache_count++;
}
}
}
__shared__ typename BlockReduce::TempStorage temp_miss_space;
__shared__ typename BlockReduce::TempStorage temp_cache_space;
miss_count = BlockReduce(temp_miss_space).Sum(miss_count);
cache_count = BlockReduce(temp_cache_space).Sum(cache_count);
if (threadIdx.x == 0) {
miss_counts[blockIdx.x] = miss_count;
cache_counts[blockIdx.x] = cache_count;
if (blockIdx.x == 0) {
miss_counts[gridDim.x] = 0;
cache_counts[gridDim.x] = 0;
}
}
}
template <size_t BLOCK_SIZE, size_t TILE_SIZE>
__global__ void get_miss_index(const IdType *hashtable, const IdType *nodes,
const size_t num_nodes,
IdType *output_miss_dst_index,
IdType *output_miss_src_index,
const IdType *miss_counts_prefix) {
using FlagType = IdType;
using BlockScan = typename cub::BlockScan<FlagType, BLOCK_SIZE>;
constexpr const IdType VALS_PER_THREAD = TILE_SIZE / BLOCK_SIZE;
__shared__ typename BlockScan::TempStorage temp_space;
BlockPrefixCallbackOp<FlagType> prefix_op(0);
const IdType offset = miss_counts_prefix[blockIdx.x];
for (IdType i = 0; i < VALS_PER_THREAD; ++i) {
const IdType index = threadIdx.x + i * BLOCK_SIZE + blockIdx.x * TILE_SIZE;
FlagType flag;
if (index < num_nodes && hashtable[nodes[index]] == Constant::kEmptyKey) {
flag = 1;
} else {
flag = 0;
}
BlockScan(temp_space).ExclusiveSum(flag, flag, prefix_op);
__syncthreads();
if (index < num_nodes && hashtable[nodes[index]] == Constant::kEmptyKey) {
const IdType pos = offset + flag;
assert(pos < num_nodes);
// new node ID in subgraph
output_miss_dst_index[pos] = index;
// old node ID in original graph
output_miss_src_index[pos] = nodes[index];
}
}
// if (threadIdx.x == 0 && blockIdx.x == 0) {
// printf("miss count %u, %u\n", miss_counts_prefix[gridDim.x],
// miss_counts_prefix[gridDim.x - 1]);
// }
}
template <size_t BLOCK_SIZE, size_t TILE_SIZE>
__global__ void get_cache_index(const IdType *hashtable, const IdType *nodes,
const size_t num_nodes,
IdType *output_cache_dst_index,
IdType *output_cache_src_index,
const IdType *cache_counts_prefix) {
using FlagType = IdType;
using BlockScan = typename cub::BlockScan<FlagType, BLOCK_SIZE>;
constexpr const IdType VALS_PER_THREAD = TILE_SIZE / BLOCK_SIZE;
__shared__ typename BlockScan::TempStorage temp_space;
BlockPrefixCallbackOp<FlagType> prefix_op(0);
const IdType offset = cache_counts_prefix[blockIdx.x];
for (IdType i = 0; i < VALS_PER_THREAD; ++i) {
const IdType index = threadIdx.x + i * BLOCK_SIZE + blockIdx.x * TILE_SIZE;
FlagType flag;
if (index < num_nodes && hashtable[nodes[index]] != Constant::kEmptyKey) {
flag = 1;
} else {
flag = 0;
}
BlockScan(temp_space).ExclusiveSum(flag, flag, prefix_op);
__syncthreads();
if (index < num_nodes && hashtable[nodes[index]] != Constant::kEmptyKey) {
const IdType pos = offset + flag;
// new node ID in subgraph
output_cache_dst_index[pos] = index;
// old node ID in original graph
output_cache_src_index[pos] = hashtable[nodes[index]];
}
}
// if (threadIdx.x == 0 && blockIdx.x == 0) {
// printf("cache count %u, %u\n", cache_counts_prefix[gridDim.x],
// cache_counts_prefix[gridDim.x - 1]);
// }
}
} // namespace
void GetMissCacheIndex(
IdType *sampler_gpu_hashtable, Context sampler_ctx,
IdType *output_miss_src_index, IdType *output_miss_dst_index,
size_t *num_output_miss, IdType *output_cache_src_index,
IdType *output_cache_dst_index, size_t *num_output_cache,
const IdType *nodes, const size_t num_nodes, StreamHandle stream) {
const size_t num_tiles = RoundUpDiv(num_nodes, Constant::kCudaTileSize);
const dim3 grid(num_tiles);
const dim3 block(Constant::kCudaBlockSize);
auto sampler_device = Device::Get(sampler_ctx);
auto cu_stream = static_cast<cudaStream_t>(stream);
sampler_device->SetDevice(sampler_ctx);
IdType *miss_prefix_counts =
static_cast<IdType *>(sampler_device->AllocWorkspace(
sampler_ctx, sizeof(IdType) * (grid.x + 1)));
IdType *cache_prefix_counts =
static_cast<IdType *>(sampler_device->AllocWorkspace(
sampler_ctx, sizeof(IdType) * (grid.x + 1)));
// LOG(DEBUG) << "GetMissCacheIndex num nodes " << num_nodes;
CUDA_CALL(cudaSetDevice(sampler_ctx.device_id));
count_miss_cache<Constant::kCudaBlockSize, Constant::kCudaTileSize>
<<<grid, block, 0, cu_stream>>>(sampler_gpu_hashtable, nodes, num_nodes,
miss_prefix_counts, cache_prefix_counts);
sampler_device->StreamSync(sampler_ctx, stream);
size_t workspace_bytes;
CUDA_CALL(cub::DeviceScan::ExclusiveSum(
nullptr, workspace_bytes, static_cast<IdType *>(nullptr),
static_cast<IdType *>(nullptr), grid.x + 1, cu_stream));
sampler_device->StreamSync(sampler_ctx, stream);
void *workspace =
sampler_device->AllocWorkspace(sampler_ctx, workspace_bytes);
CUDA_CALL(cub::DeviceScan::ExclusiveSum(
workspace, workspace_bytes, miss_prefix_counts, miss_prefix_counts,
grid.x + 1, cu_stream));
CUDA_CALL(cub::DeviceScan::ExclusiveSum(
workspace, workspace_bytes, cache_prefix_counts, cache_prefix_counts,
grid.x + 1, cu_stream));
sampler_device->StreamSync(sampler_ctx, stream);
get_miss_index<Constant::kCudaBlockSize, Constant::kCudaTileSize>
<<<grid, block, 0, cu_stream>>>(
sampler_gpu_hashtable, nodes, num_nodes, output_miss_dst_index,
output_miss_src_index, miss_prefix_counts);
sampler_device->StreamSync(sampler_ctx, stream);
get_cache_index<Constant::kCudaBlockSize, Constant::kCudaTileSize>
<<<grid, block, 0, cu_stream>>>(
sampler_gpu_hashtable, nodes, num_nodes, output_cache_dst_index,
output_cache_src_index, cache_prefix_counts);
sampler_device->StreamSync(sampler_ctx, stream);
IdType num_miss;
IdType num_cache;
sampler_device->CopyDataFromTo(miss_prefix_counts + grid.x, 0, &num_miss, 0,
sizeof(IdType), sampler_ctx, CPU(), stream);
sampler_device->CopyDataFromTo(cache_prefix_counts + grid.x, 0, &num_cache, 0,
sizeof(IdType), sampler_ctx, CPU(), stream);
sampler_device->StreamSync(sampler_ctx, stream);
*num_output_miss = num_miss;
*num_output_cache = num_cache;
sampler_device->FreeWorkspace(sampler_ctx, workspace);
sampler_device->FreeWorkspace(sampler_ctx, cache_prefix_counts);
sampler_device->FreeWorkspace(sampler_ctx, miss_prefix_counts);
}
} // namespace cuda
} // namespace common
} // namespace samgraph
|
4a0bdb05f008df464fc8f88b15f3640a9ed95a41.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include <iostream>
#include <rocblas.h>
#include "prod_cuda.cuh"
#include "prod_kernel.cuh"
#define CUDA_CHECKED_RUN(E) \
{ \
auto _status = E; \
if (_status != hipSuccess) \
{ \
fprintf(stderr, "Error: %s at %s(%i): %s\n", \
#E, __FILE__, __LINE__, \
hipGetErrorString(_status)); \
exit(EXIT_FAILURE); \
} \
}
Matrix product_cuda(const Matrix &L, const Matrix &R, size_t bs_x, size_t bs_y)
{
Matrix O(L.m(), R.n());
// set device
CUDA_CHECKED_RUN(hipSetDevice(3));
// allocate device memory
Matrix::data_t *dev_l, *dev_r, *dev_o;
CUDA_CHECKED_RUN(hipMalloc(&dev_l, L.data_size()));
CUDA_CHECKED_RUN(hipMalloc(&dev_r, R.data_size()));
CUDA_CHECKED_RUN(hipMalloc(&dev_o, O.data_size()));
// copy matrix to device
hipMemcpy(dev_l, L._data, L.data_size(), hipMemcpyHostToDevice);
hipMemcpy(dev_r, R._data, R.data_size(), hipMemcpyHostToDevice);
// perform product
std::cout << "Computing: GRID(" << (O.m() / bs_x) << "," << (O.n() / bs_y) << ")"
<< ", BLOCK(" << bs_x << "," << bs_y << ")" << std::endl;
dim3 grid(O.m() / bs_x, O.n() / bs_y);
dim3 block(bs_x, bs_y);
hipLaunchKernelGGL(( matrix_mult), dim3(grid), dim3(block), 0, 0, dev_l, dev_r, dev_o, L.m(), L.n(), R.n());
// copy result back
CUDA_CHECKED_RUN(hipMemcpy(O._data, dev_o, O.data_size(), hipMemcpyDeviceToHost));
// free memory
CUDA_CHECKED_RUN(hipFree(dev_l));
CUDA_CHECKED_RUN(hipFree(dev_r));
CUDA_CHECKED_RUN(hipFree(dev_o));
return O;
}
Matrix product_cuda_omp(const Matrix &L, const Matrix &R, size_t bs_x, size_t bs_y, size_t p)
{
Matrix O(L.m(), R.n());
// set device
CUDA_CHECKED_RUN(hipSetDevice(3));
// allocate device memory
Matrix::data_t *dev_l, *dev_r, *dev_o;
CUDA_CHECKED_RUN(hipMalloc(&dev_l, L.data_size()));
CUDA_CHECKED_RUN(hipMalloc(&dev_r, R.data_size()));
CUDA_CHECKED_RUN(hipMalloc(&dev_o, O.data_size()));
// copy matrix to device
hipMemcpy(dev_l, L._data, L.data_size(), hipMemcpyHostToDevice);
hipMemcpy(dev_r, R._data, R.data_size(), hipMemcpyHostToDevice);
// perform product
std::cout << "Computing: GRID(" << (O.m() / bs_x / p) << "," << (O.n() / bs_y) << ")"
<< ", BLOCK(" << bs_x << "," << bs_y << ")"
<< " with OpenMP = " << p << std::endl;
dim3 grid(O.m() / bs_x / p, O.n() / bs_y);
dim3 block(bs_x, bs_y);
#pragma omp parallel for num_threads(p)
for(size_t tid = 0; tid < p; tid++)
{
size_t offset = grid.x * bs_x * tid * L.n();
hipLaunchKernelGGL(( matrix_mult), dim3(grid), dim3(block), 0, 0, dev_l + offset, dev_r, dev_o + grid.x * bs_x * tid * L.n(), L.m() / p, L.n(), R.n());
}
// copy result back
CUDA_CHECKED_RUN(hipMemcpy(O._data, dev_o, O.data_size(), hipMemcpyDeviceToHost));
// free memory
CUDA_CHECKED_RUN(hipFree(dev_l));
CUDA_CHECKED_RUN(hipFree(dev_r));
CUDA_CHECKED_RUN(hipFree(dev_o));
return O;
}
Matrix product_cublas(const Matrix &L, const Matrix &R)
{
Matrix O(L.m(), R.n());
// set device
CUDA_CHECKED_RUN(hipSetDevice(3));
// allocate device memory
Matrix::data_t *dev_l, *dev_r, *dev_o;
CUDA_CHECKED_RUN(hipMalloc(&dev_l, L.data_size()));
CUDA_CHECKED_RUN(hipMalloc(&dev_r, R.data_size()));
CUDA_CHECKED_RUN(hipMalloc(&dev_o, O.data_size()));
// perform product
hipblasHandle_t handle;
double alpha = 1.0;
double beta = 0.0;
hipblasCreate(&handle);
hipblasSetMatrix(L.m(), L.n(), sizeof(Matrix::data_t), L._data, L.n(), dev_l, L.n());
hipblasSetMatrix(R.n(), R.m(), sizeof(Matrix::data_t), R._data, R.m(), dev_r, R.m());
hipblasDgemm(handle, HIPBLAS_OP_T, HIPBLAS_OP_N,
O.m(), O.n(), L.n(),
&alpha, dev_r, R.n(),
dev_l, L.n(), &beta,
dev_o, O.m());
// copy result back
hipblasGetMatrix(O.m(), O.n(), sizeof(Matrix::data_t), dev_o, O.n(), O._data, O.n());
// free memory
CUDA_CHECKED_RUN(hipFree(dev_l));
CUDA_CHECKED_RUN(hipFree(dev_r));
CUDA_CHECKED_RUN(hipFree(dev_o));
return O;
}
| 4a0bdb05f008df464fc8f88b15f3640a9ed95a41.cu | #include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <iostream>
#include <cublas_v2.h>
#include "prod_cuda.cuh"
#include "prod_kernel.cuh"
#define CUDA_CHECKED_RUN(E) \
{ \
auto _status = E; \
if (_status != cudaSuccess) \
{ \
fprintf(stderr, "Error: %s at %s(%i): %s\n", \
#E, __FILE__, __LINE__, \
cudaGetErrorString(_status)); \
exit(EXIT_FAILURE); \
} \
}
Matrix product_cuda(const Matrix &L, const Matrix &R, size_t bs_x, size_t bs_y)
{
Matrix O(L.m(), R.n());
// set device
CUDA_CHECKED_RUN(cudaSetDevice(3));
// allocate device memory
Matrix::data_t *dev_l, *dev_r, *dev_o;
CUDA_CHECKED_RUN(cudaMalloc(&dev_l, L.data_size()));
CUDA_CHECKED_RUN(cudaMalloc(&dev_r, R.data_size()));
CUDA_CHECKED_RUN(cudaMalloc(&dev_o, O.data_size()));
// copy matrix to device
cudaMemcpy(dev_l, L._data, L.data_size(), cudaMemcpyHostToDevice);
cudaMemcpy(dev_r, R._data, R.data_size(), cudaMemcpyHostToDevice);
// perform product
std::cout << "Computing: GRID(" << (O.m() / bs_x) << "," << (O.n() / bs_y) << ")"
<< ", BLOCK(" << bs_x << "," << bs_y << ")" << std::endl;
dim3 grid(O.m() / bs_x, O.n() / bs_y);
dim3 block(bs_x, bs_y);
matrix_mult<<<grid, block>>>(dev_l, dev_r, dev_o, L.m(), L.n(), R.n());
// copy result back
CUDA_CHECKED_RUN(cudaMemcpy(O._data, dev_o, O.data_size(), cudaMemcpyDeviceToHost));
// free memory
CUDA_CHECKED_RUN(cudaFree(dev_l));
CUDA_CHECKED_RUN(cudaFree(dev_r));
CUDA_CHECKED_RUN(cudaFree(dev_o));
return O;
}
Matrix product_cuda_omp(const Matrix &L, const Matrix &R, size_t bs_x, size_t bs_y, size_t p)
{
Matrix O(L.m(), R.n());
// set device
CUDA_CHECKED_RUN(cudaSetDevice(3));
// allocate device memory
Matrix::data_t *dev_l, *dev_r, *dev_o;
CUDA_CHECKED_RUN(cudaMalloc(&dev_l, L.data_size()));
CUDA_CHECKED_RUN(cudaMalloc(&dev_r, R.data_size()));
CUDA_CHECKED_RUN(cudaMalloc(&dev_o, O.data_size()));
// copy matrix to device
cudaMemcpy(dev_l, L._data, L.data_size(), cudaMemcpyHostToDevice);
cudaMemcpy(dev_r, R._data, R.data_size(), cudaMemcpyHostToDevice);
// perform product
std::cout << "Computing: GRID(" << (O.m() / bs_x / p) << "," << (O.n() / bs_y) << ")"
<< ", BLOCK(" << bs_x << "," << bs_y << ")"
<< " with OpenMP = " << p << std::endl;
dim3 grid(O.m() / bs_x / p, O.n() / bs_y);
dim3 block(bs_x, bs_y);
#pragma omp parallel for num_threads(p)
for(size_t tid = 0; tid < p; tid++)
{
size_t offset = grid.x * bs_x * tid * L.n();
matrix_mult<<<grid, block>>>(dev_l + offset, dev_r, dev_o + grid.x * bs_x * tid * L.n(), L.m() / p, L.n(), R.n());
}
// copy result back
CUDA_CHECKED_RUN(cudaMemcpy(O._data, dev_o, O.data_size(), cudaMemcpyDeviceToHost));
// free memory
CUDA_CHECKED_RUN(cudaFree(dev_l));
CUDA_CHECKED_RUN(cudaFree(dev_r));
CUDA_CHECKED_RUN(cudaFree(dev_o));
return O;
}
Matrix product_cublas(const Matrix &L, const Matrix &R)
{
Matrix O(L.m(), R.n());
// set device
CUDA_CHECKED_RUN(cudaSetDevice(3));
// allocate device memory
Matrix::data_t *dev_l, *dev_r, *dev_o;
CUDA_CHECKED_RUN(cudaMalloc(&dev_l, L.data_size()));
CUDA_CHECKED_RUN(cudaMalloc(&dev_r, R.data_size()));
CUDA_CHECKED_RUN(cudaMalloc(&dev_o, O.data_size()));
// perform product
cublasHandle_t handle;
double alpha = 1.0;
double beta = 0.0;
cublasCreate(&handle);
cublasSetMatrix(L.m(), L.n(), sizeof(Matrix::data_t), L._data, L.n(), dev_l, L.n());
cublasSetMatrix(R.n(), R.m(), sizeof(Matrix::data_t), R._data, R.m(), dev_r, R.m());
cublasDgemm(handle, CUBLAS_OP_T, CUBLAS_OP_N,
O.m(), O.n(), L.n(),
&alpha, dev_r, R.n(),
dev_l, L.n(), &beta,
dev_o, O.m());
// copy result back
cublasGetMatrix(O.m(), O.n(), sizeof(Matrix::data_t), dev_o, O.n(), O._data, O.n());
// free memory
CUDA_CHECKED_RUN(cudaFree(dev_l));
CUDA_CHECKED_RUN(cudaFree(dev_r));
CUDA_CHECKED_RUN(cudaFree(dev_o));
return O;
}
|
dab7a838fbd44da398a32874e1189e492782700a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/******************** HEADERS *********************/
#include "./header/lbm_config.h"
#include "./header/lbm_phys.h"
#include <assert.h>
#include <stdlib.h>
__device__ static float get_cell_density(const float *cell) {
// vars
float res = 0.0;
// loop on directions
for (int k = 0; k < DIRECTIONS; k++)
res += cell[k];
return res;
}
__device__ static void get_cell_velocity(float v[DIMENSIONS], const float *cell,
const float cell_density) {
// vars
float temp;
float div = 1.0 / cell_density;
const float direction_matrix[DIRECTIONS][DIMENSIONS] = {
{+0.0f, +0.0f}, {+1.0f, +0.0f}, {+0.0f, +1.0f},
{-1.0f, +0.0f}, {+0.0f, -1.0f}, {+1.0f, +1.0f},
{-1.0f, +1.0f}, {-1.0f, -1.0f}, {+1.0f, -1.0f}};
// loop on all dimensions
for (int d = 0; d < DIMENSIONS; d++) {
// reset value
temp = 0.0;
// sum all directions
for (int k = 0; k < DIRECTIONS; k++) {
temp += cell[k] * direction_matrix[k][d];
}
// normalize
v[d] = temp * div;
}
}
__device__ static float
helper_compute_poiseuille(const int j, const int height,
const float inflow_max_velocity) {
return 4.0f * inflow_max_velocity / (height * height) * (height * j - j * j);
}
__device__ static float get_vect_norme_2(const float v1[DIMENSIONS],
const float v2[DIMENSIONS]) {
float res = 0.0;
for (int k = 0; k < DIMENSIONS; k++)
res += v1[k] * v2[k];
return res;
}
__device__ static void
compute_inflow_zou_he_poiseuille_distr(float *cell, const int j,
const int height,
const float inflow_max_velocity) {
float v = helper_compute_poiseuille(j, height, inflow_max_velocity);
// compute rho from u and inner flow on surface
float density =
(cell[0] + cell[2] + cell[4] + 2 * (cell[3] + cell[6] + cell[7])) *
(1.0 - v);
// now compute unknown microscopic values
float a = 0.166667 * (density * v);
cell[1] = cell[3];
cell[5] = cell[7] - 0.5 * (cell[2] - cell[4]) + a;
cell[8] = cell[6] + 0.5 * (cell[2] - cell[4]) + a;
}
__device__ static void compute_outflow_zou_he_const_density(float *cell) {
// compute macroscopic v depeding on inner flow going onto the wall
float v =
(cell[0] + cell[2] + cell[4] + 2 * (cell[1] + cell[5] + cell[8])) - 1.0;
// now can compute unknown microscopic values
float a = 0.166667 * v;
cell[3] = cell[1] - 0.66667 * v;
cell[7] = cell[5] + 0.5 * (cell[2] - cell[4]) - a;
cell[6] = cell[8] + 0.5 * (cell[4] - cell[2]) - a;
}
__device__ static float compute_equilibrium_profile(float velocity[DIMENSIONS],
float density,
int direction) {
const float equil_weight[DIRECTIONS] = {
4.0f / 9.0f, 1.0f / 9.0f, 1.0f / 9.0f, 1.0f / 9.0f, 1.0f / 9.0f,
1.0f / 36.0f, 1.0f / 36.0f, 1.0f / 36.0f, 1.0f / 36.0f};
const float direction_matrix[DIRECTIONS][DIMENSIONS] = {
{+0.0f, +0.0f}, {+1.0f, +0.0f}, {+0.0f, +1.0f},
{-1.0f, +0.0f}, {+0.0f, -1.0f}, {+1.0f, +1.0f},
{-1.0f, +1.0f}, {-1.0f, -1.0f}, {+1.0f, -1.0f}};
// vars
float p, p2, feq, v2;
v2 = get_vect_norme_2(velocity, velocity);
// calc e_i * v_i / c
p = get_vect_norme_2(direction_matrix[direction], velocity);
p2 = p * p;
// terms without density and direction weight
feq = 1.0f + (3.0f * p) + (4.5f * p2) - (1.5f * v2);
// mult all by density and direction weight
feq *= equil_weight[direction] * density;
return feq;
}
__global__ void kernel_macroscopic_mesh(lbm_file_entry_t *mesh_out,
float *mesh_in, int width, int height,
lbm_config_t config) {
// get thread column
const int column = blockIdx.x * blockDim.x + threadIdx.x;
// get thread row
const int row = blockIdx.y * blockDim.y + threadIdx.y;
// get index of the thread
const int i = row * width + column;
// get the index where to write in the mesh_out array
const int j = column * height + row;
// test if the thread is in the mesh
if ((column >= width) || (row >= height))
return;
// if is obstacle set to -1
int obstacle_row = row - config.obstacle_y;
int obstacle_column = column - config.obstacle_x;
int obstacle_index = obstacle_row * (config.obstacle_width) + obstacle_column;
if ((0 <= obstacle_row && obstacle_row < config.obstacle_height) &&
(0 <= obstacle_column && obstacle_column < config.obstacle_width) &&
config.obstacle_mesh[obstacle_index]) {
mesh_out[j].density = -0.001;
mesh_out[j].v = -0.001;
} else {
float density = get_cell_density(&(mesh_in[i * (DIRECTIONS)]));
float v[DIMENSIONS] = {0.0f, 0.0f};
get_cell_velocity(v, &(mesh_in[i * (DIRECTIONS)]), density);
float norm = sqrt(get_vect_norme_2(v, v));
mesh_out[j].density = density;
mesh_out[j].v = norm;
}
}
__global__ void kernel_special_cells(float *mesh, int width, int height,
lbm_config_t config) {
// get thread column
const int column = blockIdx.x * blockDim.x + threadIdx.x;
// get thread row
const int row = blockIdx.y * blockDim.y + threadIdx.y;
// get index of the thread
const int i = row * width + column;
// test if the thread is in the mesh
if ((column >= width) || (row >= height))
return;
const int opposite_of[DIRECTIONS] = {0, 3, 4, 1, 2, 7, 8, 5, 6};
// compute_inflow_zou_he_poiseuille_distr
if (column == 0 && row != 0 && row != height - 1) {
compute_inflow_zou_he_poiseuille_distr(&(mesh[(i * DIRECTIONS)]), row,
height, config.inflow_max_velocity);
}
// compute_outflow_zou_he_const_density
if (column == (width - 1) && row != 0 && row != height - 1) {
compute_outflow_zou_he_const_density(&(mesh[(i * DIRECTIONS)]));
}
// compute_bounce_back
int obstacle_row = row - config.obstacle_y;
int obstacle_column = column - config.obstacle_x;
int obstacle_index = obstacle_row * (config.obstacle_width) + obstacle_column;
if ((0 <= obstacle_row && obstacle_row < config.obstacle_height) &&
(0 <= obstacle_column && obstacle_column < config.obstacle_width) &&
config.obstacle_mesh[obstacle_index]) {
for (int k = 0; k < DIRECTIONS; k++) {
mesh[(i * DIRECTIONS) + k] = mesh[(i * DIRECTIONS) + opposite_of[k]];
}
}
// walls
if (row == 0 || row == height - 1) {
for (int k = 0; k < DIRECTIONS; k++)
mesh[(i * DIRECTIONS) + k] = mesh[(i * DIRECTIONS) + opposite_of[k]];
}
}
__global__ void kernel_collision(float *mesh_out, float *mesh_in, int width,
int height, lbm_config_t config) {
// get thread column
const int column = blockIdx.x * blockDim.x + threadIdx.x;
// get thread row
const int row = blockIdx.y * blockDim.y + threadIdx.y;
// get index of the thread
const int i = row * width + column;
// test if the thread is in the mesh
if (!(column > 0 && column < width - 1) || !(row > 0 && row < height - 1))
return;
float v[2];
// compute macroscopic values
float density = get_cell_density(&(mesh_in[(i * DIRECTIONS)]));
get_cell_velocity(v, &(mesh_in[(i * DIRECTIONS)]), density);
// loop on microscopic directions
for (int k = 0; k < DIRECTIONS; k++) {
// compute f at equilibr.
float feq = compute_equilibrium_profile(v, density, k);
// compute f out
mesh_out[(i * DIRECTIONS) + k] =
mesh_in[(i * DIRECTIONS) + k] -
config.relax_parameter * (mesh_in[(i * DIRECTIONS) + k] - feq);
}
}
__global__ void kernel_propagation(float *mesh_out, float *mesh_in, int width,
int height, lbm_config_t config) {
// get thread column
const int column = blockIdx.x * blockDim.x + threadIdx.x;
// get thread row
const int row = blockIdx.y * blockDim.y + threadIdx.y;
// get index of the thread
const int i = row * width + column;
// test if the thread is in the mesh
if ((column >= width) || (row >= height))
return;
const int int_direction_matrix[DIRECTIONS][DIMENSIONS] = {
{+0, +0}, {+1, +0}, {+0, +1}, {-1, +0}, {+0, -1},
{+1, +1}, {-1, +1}, {-1, -1}, {+1, -1}};
for (int k = 0; k < DIRECTIONS; k++) {
int cc = column + int_direction_matrix[k][0];
int rr = row + int_direction_matrix[k][1];
if ((cc >= 0 && cc < width) && (rr >= 0 && rr < height)) {
int j = rr * width + cc;
mesh_out[(j * DIRECTIONS) + k] = mesh_in[(i * DIRECTIONS) + k];
}
}
} | dab7a838fbd44da398a32874e1189e492782700a.cu | /******************** HEADERS *********************/
#include "./header/lbm_config.h"
#include "./header/lbm_phys.h"
#include <assert.h>
#include <stdlib.h>
__device__ static float get_cell_density(const float *cell) {
// vars
float res = 0.0;
// loop on directions
for (int k = 0; k < DIRECTIONS; k++)
res += cell[k];
return res;
}
__device__ static void get_cell_velocity(float v[DIMENSIONS], const float *cell,
const float cell_density) {
// vars
float temp;
float div = 1.0 / cell_density;
const float direction_matrix[DIRECTIONS][DIMENSIONS] = {
{+0.0f, +0.0f}, {+1.0f, +0.0f}, {+0.0f, +1.0f},
{-1.0f, +0.0f}, {+0.0f, -1.0f}, {+1.0f, +1.0f},
{-1.0f, +1.0f}, {-1.0f, -1.0f}, {+1.0f, -1.0f}};
// loop on all dimensions
for (int d = 0; d < DIMENSIONS; d++) {
// reset value
temp = 0.0;
// sum all directions
for (int k = 0; k < DIRECTIONS; k++) {
temp += cell[k] * direction_matrix[k][d];
}
// normalize
v[d] = temp * div;
}
}
__device__ static float
helper_compute_poiseuille(const int j, const int height,
const float inflow_max_velocity) {
return 4.0f * inflow_max_velocity / (height * height) * (height * j - j * j);
}
__device__ static float get_vect_norme_2(const float v1[DIMENSIONS],
const float v2[DIMENSIONS]) {
float res = 0.0;
for (int k = 0; k < DIMENSIONS; k++)
res += v1[k] * v2[k];
return res;
}
__device__ static void
compute_inflow_zou_he_poiseuille_distr(float *cell, const int j,
const int height,
const float inflow_max_velocity) {
float v = helper_compute_poiseuille(j, height, inflow_max_velocity);
// compute rho from u and inner flow on surface
float density =
(cell[0] + cell[2] + cell[4] + 2 * (cell[3] + cell[6] + cell[7])) *
(1.0 - v);
// now compute unknown microscopic values
float a = 0.166667 * (density * v);
cell[1] = cell[3];
cell[5] = cell[7] - 0.5 * (cell[2] - cell[4]) + a;
cell[8] = cell[6] + 0.5 * (cell[2] - cell[4]) + a;
}
__device__ static void compute_outflow_zou_he_const_density(float *cell) {
// compute macroscopic v depeding on inner flow going onto the wall
float v =
(cell[0] + cell[2] + cell[4] + 2 * (cell[1] + cell[5] + cell[8])) - 1.0;
// now can compute unknown microscopic values
float a = 0.166667 * v;
cell[3] = cell[1] - 0.66667 * v;
cell[7] = cell[5] + 0.5 * (cell[2] - cell[4]) - a;
cell[6] = cell[8] + 0.5 * (cell[4] - cell[2]) - a;
}
__device__ static float compute_equilibrium_profile(float velocity[DIMENSIONS],
float density,
int direction) {
const float equil_weight[DIRECTIONS] = {
4.0f / 9.0f, 1.0f / 9.0f, 1.0f / 9.0f, 1.0f / 9.0f, 1.0f / 9.0f,
1.0f / 36.0f, 1.0f / 36.0f, 1.0f / 36.0f, 1.0f / 36.0f};
const float direction_matrix[DIRECTIONS][DIMENSIONS] = {
{+0.0f, +0.0f}, {+1.0f, +0.0f}, {+0.0f, +1.0f},
{-1.0f, +0.0f}, {+0.0f, -1.0f}, {+1.0f, +1.0f},
{-1.0f, +1.0f}, {-1.0f, -1.0f}, {+1.0f, -1.0f}};
// vars
float p, p2, feq, v2;
v2 = get_vect_norme_2(velocity, velocity);
// calc e_i * v_i / c
p = get_vect_norme_2(direction_matrix[direction], velocity);
p2 = p * p;
// terms without density and direction weight
feq = 1.0f + (3.0f * p) + (4.5f * p2) - (1.5f * v2);
// mult all by density and direction weight
feq *= equil_weight[direction] * density;
return feq;
}
__global__ void kernel_macroscopic_mesh(lbm_file_entry_t *mesh_out,
float *mesh_in, int width, int height,
lbm_config_t config) {
// get thread column
const int column = blockIdx.x * blockDim.x + threadIdx.x;
// get thread row
const int row = blockIdx.y * blockDim.y + threadIdx.y;
// get index of the thread
const int i = row * width + column;
// get the index where to write in the mesh_out array
const int j = column * height + row;
// test if the thread is in the mesh
if ((column >= width) || (row >= height))
return;
// if is obstacle set to -1
int obstacle_row = row - config.obstacle_y;
int obstacle_column = column - config.obstacle_x;
int obstacle_index = obstacle_row * (config.obstacle_width) + obstacle_column;
if ((0 <= obstacle_row && obstacle_row < config.obstacle_height) &&
(0 <= obstacle_column && obstacle_column < config.obstacle_width) &&
config.obstacle_mesh[obstacle_index]) {
mesh_out[j].density = -0.001;
mesh_out[j].v = -0.001;
} else {
float density = get_cell_density(&(mesh_in[i * (DIRECTIONS)]));
float v[DIMENSIONS] = {0.0f, 0.0f};
get_cell_velocity(v, &(mesh_in[i * (DIRECTIONS)]), density);
float norm = sqrt(get_vect_norme_2(v, v));
mesh_out[j].density = density;
mesh_out[j].v = norm;
}
}
__global__ void kernel_special_cells(float *mesh, int width, int height,
lbm_config_t config) {
// get thread column
const int column = blockIdx.x * blockDim.x + threadIdx.x;
// get thread row
const int row = blockIdx.y * blockDim.y + threadIdx.y;
// get index of the thread
const int i = row * width + column;
// test if the thread is in the mesh
if ((column >= width) || (row >= height))
return;
const int opposite_of[DIRECTIONS] = {0, 3, 4, 1, 2, 7, 8, 5, 6};
// compute_inflow_zou_he_poiseuille_distr
if (column == 0 && row != 0 && row != height - 1) {
compute_inflow_zou_he_poiseuille_distr(&(mesh[(i * DIRECTIONS)]), row,
height, config.inflow_max_velocity);
}
// compute_outflow_zou_he_const_density
if (column == (width - 1) && row != 0 && row != height - 1) {
compute_outflow_zou_he_const_density(&(mesh[(i * DIRECTIONS)]));
}
// compute_bounce_back
int obstacle_row = row - config.obstacle_y;
int obstacle_column = column - config.obstacle_x;
int obstacle_index = obstacle_row * (config.obstacle_width) + obstacle_column;
if ((0 <= obstacle_row && obstacle_row < config.obstacle_height) &&
(0 <= obstacle_column && obstacle_column < config.obstacle_width) &&
config.obstacle_mesh[obstacle_index]) {
for (int k = 0; k < DIRECTIONS; k++) {
mesh[(i * DIRECTIONS) + k] = mesh[(i * DIRECTIONS) + opposite_of[k]];
}
}
// walls
if (row == 0 || row == height - 1) {
for (int k = 0; k < DIRECTIONS; k++)
mesh[(i * DIRECTIONS) + k] = mesh[(i * DIRECTIONS) + opposite_of[k]];
}
}
__global__ void kernel_collision(float *mesh_out, float *mesh_in, int width,
int height, lbm_config_t config) {
// get thread column
const int column = blockIdx.x * blockDim.x + threadIdx.x;
// get thread row
const int row = blockIdx.y * blockDim.y + threadIdx.y;
// get index of the thread
const int i = row * width + column;
// test if the thread is in the mesh
if (!(column > 0 && column < width - 1) || !(row > 0 && row < height - 1))
return;
float v[2];
// compute macroscopic values
float density = get_cell_density(&(mesh_in[(i * DIRECTIONS)]));
get_cell_velocity(v, &(mesh_in[(i * DIRECTIONS)]), density);
// loop on microscopic directions
for (int k = 0; k < DIRECTIONS; k++) {
// compute f at equilibr.
float feq = compute_equilibrium_profile(v, density, k);
// compute f out
mesh_out[(i * DIRECTIONS) + k] =
mesh_in[(i * DIRECTIONS) + k] -
config.relax_parameter * (mesh_in[(i * DIRECTIONS) + k] - feq);
}
}
__global__ void kernel_propagation(float *mesh_out, float *mesh_in, int width,
int height, lbm_config_t config) {
// get thread column
const int column = blockIdx.x * blockDim.x + threadIdx.x;
// get thread row
const int row = blockIdx.y * blockDim.y + threadIdx.y;
// get index of the thread
const int i = row * width + column;
// test if the thread is in the mesh
if ((column >= width) || (row >= height))
return;
const int int_direction_matrix[DIRECTIONS][DIMENSIONS] = {
{+0, +0}, {+1, +0}, {+0, +1}, {-1, +0}, {+0, -1},
{+1, +1}, {-1, +1}, {-1, -1}, {+1, -1}};
for (int k = 0; k < DIRECTIONS; k++) {
int cc = column + int_direction_matrix[k][0];
int rr = row + int_direction_matrix[k][1];
if ((cc >= 0 && cc < width) && (rr >= 0 && rr < height)) {
int j = rr * width + cc;
mesh_out[(j * DIRECTIONS) + k] = mesh_in[(i * DIRECTIONS) + k];
}
}
} |
26e754d040a1066107be7437b0c8a335d9f97c38.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "stdio.h"
#include <assert.h>
#include <cusparse_v2.h>
#include "interpolate.hh"
#define gpuErrchk_here(ans) { gpuAssert_here((ans), __FILE__, __LINE__); }
inline void gpuAssert_here(hipError_t code, const char *file, int line, bool abort=true)
{
if (code != hipSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
#define ERR_NE(X,Y) do { if ((X) != (Y)) { \
fprintf(stderr,"Error in %s at %s:%d\n",__func__,__FILE__,__LINE__); \
exit(-1);}} while(0)
#define CUDA_CALL(X) ERR_NE((X),hipSuccess)
#define CUSPARSE_CALL(X) ERR_NE((X),HIPSPARSE_STATUS_SUCCESS)
using namespace std;
InterpArrayContainer * createInterpArrayContainer(size_t *numBytes, int num_arr, int num_points){
InterpArrayContainer *cpu_array_container;
size_t InterpArrayContainer_size = sizeof(InterpArrayContainer);
*numBytes = num_arr*InterpArrayContainer_size;
cpu_array_container = (InterpArrayContainer*)malloc(*numBytes);
for (int i=0; i<num_arr; i++){
gpuErrchk_here(hipMalloc( (void**)&(cpu_array_container[i].array), num_points*sizeof(double) ));
gpuErrchk_here(hipMalloc( (void**)&(cpu_array_container[i].coeff_1), (num_points-1)*sizeof(double) ));
gpuErrchk_here(hipMalloc( (void**)&(cpu_array_container[i].coeff_2), (num_points-1)*sizeof(double) ));
gpuErrchk_here(hipMalloc( (void**)&(cpu_array_container[i].coeff_3), (num_points-1)*sizeof(double) ));
}
return cpu_array_container;
//hipMalloc((void**)&gpu_array_container, *numBytes);
}
InterpArrayContainer * createInterpArrayContainer_gpu(size_t numBytes){
InterpArrayContainer *gpu_array_container;
hipMalloc((void**)&gpu_array_container, numBytes);
return gpu_array_container;
}
void destroyInterpArrayContainer(InterpArrayContainer * gpu_array_container, InterpArrayContainer *cpu_array_container, int num_arr){
for (int i=0; i<num_arr; i++){
gpuErrchk_here(hipFree(cpu_array_container[i].array));
gpuErrchk_here(hipFree(cpu_array_container[i].coeff_1));
gpuErrchk_here(hipFree(cpu_array_container[i].coeff_2));
gpuErrchk_here(hipFree(cpu_array_container[i].coeff_3));
}
gpuErrchk_here(hipFree(gpu_array_container));
free(cpu_array_container);
}
__global__
void fill_B(InterpArrayContainer *arr_container, double *B, int length_per_arr, int num_arr){
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y;
if (j >= num_arr) return;
if (i >= length_per_arr) return;
if (i == length_per_arr - 1){
B[j*length_per_arr + i] = 3.0* (arr_container[j].array[i] - arr_container[j].array[(i-1)]);
} else if (i == 0){
B[j*length_per_arr + i] = 3.0* (arr_container[j].array[1] - arr_container[j].array[0]);
} else{
B[j*length_per_arr + i] = 3.0* (arr_container[j].array[(i+1)] - arr_container[j].array[(i-1)]);
}
/*# if __CUDA_ARCH__>=200
if ((i < 100) && (j ==8))
printf("%d %d, %.18e, %.18e, %.18e, %.18e\n", i, j, B[j*length_per_arr + i], arr_container[j].array[i+1], arr_container[j].array[i], arr_container[j].array[i-1]);
#endif //*/
}
__global__
void set_spline_constants(InterpArrayContainer *arr_container, double *B, int length_per_arr, int num_arr){
double D_i, D_ip1, y_i, y_ip1;
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y;
if (j >= num_arr) return;
if (i >= length_per_arr) return;
D_i = B[(j*length_per_arr) + i];
D_ip1 = B[(j*length_per_arr) + i + 1];
y_i = arr_container[j].array[i];
y_ip1 = arr_container[j].array[i+1];
arr_container[j].coeff_1[i] = D_i;
arr_container[j].coeff_2[i] = 3.0 * (y_ip1 - y_i) - 2.0*D_i - D_ip1;
arr_container[j].coeff_3[i] = 2.0 * (y_i - y_ip1) + D_i + D_ip1;
/*# if __CUDA_ARCH__>=200
if ((i % 2000 == 0) && (j == 3))
printf("%d, %.18e, %.18e, %.18e, %.18e, %.18e, %.18e, %.18e, %.18e \n", i, B[(j*length_per_arr) + i], B[(j*length_per_arr) + i+1], arr_container[j].array[i], arr_container[j].array[i], arr_container[j].array[i+1], arr_container[j].coeff_1[i], arr_container[j].coeff_2[i], arr_container[j].coeff_3[i]);
#endif //*/
}
Interpolate::Interpolate(){
int pass = 0;
}
__host__
void Interpolate::alloc_arrays(int max_length_init, int num_arr){
gpuErrchk_here(hipMalloc(&d_B, max_length_init*num_arr*sizeof(double)));
gpuErrchk_here(hipMalloc(&d_dl, max_length_init*sizeof(double)));
gpuErrchk_here(hipMalloc(&d_d, max_length_init*sizeof(double)));
gpuErrchk_here(hipMalloc(&d_du, max_length_init*sizeof(double)));
}
__global__
void setup_d_vals(double *dl, double *d, double *du, int current_length){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= current_length) return;
if (i == 0){
dl[0] = 0.0;
d[0] = 2.0;
du[0] = 1.0;
} else if (i == current_length - 1){
dl[current_length-1] = 1.0;
d[current_length-1] = 2.0;
du[current_length-1] = 0.0;
} else{
dl[i] = 1.0;
d[i] = 4.0;
du[i] = 1.0;
}
# /*if __CUDA_ARCH__>=200
if ((i == 0) || (i == current_length-1) || (i == 10))
printf("%d, %e, %e, %e \n", i, dl[i], d[i], du[i]);
#endif //*/
}
/*__device__
void find_index_and_xout(int *index, double *x_out, double dx, double x_new, double *x_old, int length){
*index = floor((x_new - x_old[0])/dx);
if (*index >= length - 1) *index = length - 2;
*x_out = (x_new - x_old[*index])/(x_old[*index+1] - x_old[*index]);
}
__device__
double interpolate_array(InterpArrayContainer array_container, double x, int index){
double coeff_0 = array_container.array[index];
double coeff_1 = array_container.coeff_1[index];
double coeff_2 = array_container.coeff_2[index];
double coeff_3 = array_container.coeff_3[index];
double x2 = x*x;
double x3 = x*x2;
double return_val = coeff_0 + coeff_1*x + coeff_2*x2 + coeff_3*x3;
return return_val;
}//*/
void Interpolate::setup(InterpArrayContainer *array_container, int m_, int n_){
m = m_;
n = n_;
int NUM_THREADS = 256;
int num_blocks = ::ceil((m + NUM_THREADS -1)/NUM_THREADS);
dim3 interpGrid(num_blocks, n);
hipLaunchKernelGGL(( setup_d_vals), dim3(num_blocks), dim3(NUM_THREADS), 0, 0, d_dl, d_d, d_du, m);
hipDeviceSynchronize();
gpuErrchk_here(hipGetLastError());
hipLaunchKernelGGL(( fill_B), dim3(interpGrid), dim3(NUM_THREADS), 0, 0, array_container, d_B, m, n);
hipDeviceSynchronize();
gpuErrchk_here(hipGetLastError());
/*hipMemcpy(checker, d_B, m*n*sizeof(double), hipMemcpyDeviceToHost);
for (int i=0; i<n; i++){
for (int j=0; j<m; j+=100){
printf("%d %d, %e\n", i, j, checker[i*m + j]);
}
}//*/
double *checker = new double[m*n];
CUSPARSE_CALL( hipsparseCreate(&handle) );
hipsparseStatus_t status = cusparseDgtsv_nopivot(handle, m, n, d_dl, d_d, d_du, d_B, m);
if (status != HIPSPARSE_STATUS_SUCCESS) assert(0);
hipsparseDestroy(handle);
hipLaunchKernelGGL(( set_spline_constants), dim3(interpGrid), dim3(NUM_THREADS), 0, 0, array_container, d_B, m, n);
hipDeviceSynchronize();
gpuErrchk_here(hipGetLastError());
delete[] checker;
}
__host__ Interpolate::~Interpolate(){
hipFree(d_dl);
hipFree(d_du);
hipFree(d_d);
hipFree(d_B);
//delete[] d;
//delete[] dl;
//delete[] du;
}
| 26e754d040a1066107be7437b0c8a335d9f97c38.cu | #include "stdio.h"
#include <assert.h>
#include <cusparse_v2.h>
#include "interpolate.hh"
#define gpuErrchk_here(ans) { gpuAssert_here((ans), __FILE__, __LINE__); }
inline void gpuAssert_here(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
#define ERR_NE(X,Y) do { if ((X) != (Y)) { \
fprintf(stderr,"Error in %s at %s:%d\n",__func__,__FILE__,__LINE__); \
exit(-1);}} while(0)
#define CUDA_CALL(X) ERR_NE((X),cudaSuccess)
#define CUSPARSE_CALL(X) ERR_NE((X),CUSPARSE_STATUS_SUCCESS)
using namespace std;
InterpArrayContainer * createInterpArrayContainer(size_t *numBytes, int num_arr, int num_points){
InterpArrayContainer *cpu_array_container;
size_t InterpArrayContainer_size = sizeof(InterpArrayContainer);
*numBytes = num_arr*InterpArrayContainer_size;
cpu_array_container = (InterpArrayContainer*)malloc(*numBytes);
for (int i=0; i<num_arr; i++){
gpuErrchk_here(cudaMalloc( (void**)&(cpu_array_container[i].array), num_points*sizeof(double) ));
gpuErrchk_here(cudaMalloc( (void**)&(cpu_array_container[i].coeff_1), (num_points-1)*sizeof(double) ));
gpuErrchk_here(cudaMalloc( (void**)&(cpu_array_container[i].coeff_2), (num_points-1)*sizeof(double) ));
gpuErrchk_here(cudaMalloc( (void**)&(cpu_array_container[i].coeff_3), (num_points-1)*sizeof(double) ));
}
return cpu_array_container;
//cudaMalloc((void**)&gpu_array_container, *numBytes);
}
InterpArrayContainer * createInterpArrayContainer_gpu(size_t numBytes){
InterpArrayContainer *gpu_array_container;
cudaMalloc((void**)&gpu_array_container, numBytes);
return gpu_array_container;
}
void destroyInterpArrayContainer(InterpArrayContainer * gpu_array_container, InterpArrayContainer *cpu_array_container, int num_arr){
for (int i=0; i<num_arr; i++){
gpuErrchk_here(cudaFree(cpu_array_container[i].array));
gpuErrchk_here(cudaFree(cpu_array_container[i].coeff_1));
gpuErrchk_here(cudaFree(cpu_array_container[i].coeff_2));
gpuErrchk_here(cudaFree(cpu_array_container[i].coeff_3));
}
gpuErrchk_here(cudaFree(gpu_array_container));
free(cpu_array_container);
}
__global__
void fill_B(InterpArrayContainer *arr_container, double *B, int length_per_arr, int num_arr){
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y;
if (j >= num_arr) return;
if (i >= length_per_arr) return;
if (i == length_per_arr - 1){
B[j*length_per_arr + i] = 3.0* (arr_container[j].array[i] - arr_container[j].array[(i-1)]);
} else if (i == 0){
B[j*length_per_arr + i] = 3.0* (arr_container[j].array[1] - arr_container[j].array[0]);
} else{
B[j*length_per_arr + i] = 3.0* (arr_container[j].array[(i+1)] - arr_container[j].array[(i-1)]);
}
/*# if __CUDA_ARCH__>=200
if ((i < 100) && (j ==8))
printf("%d %d, %.18e, %.18e, %.18e, %.18e\n", i, j, B[j*length_per_arr + i], arr_container[j].array[i+1], arr_container[j].array[i], arr_container[j].array[i-1]);
#endif //*/
}
__global__
void set_spline_constants(InterpArrayContainer *arr_container, double *B, int length_per_arr, int num_arr){
double D_i, D_ip1, y_i, y_ip1;
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y;
if (j >= num_arr) return;
if (i >= length_per_arr) return;
D_i = B[(j*length_per_arr) + i];
D_ip1 = B[(j*length_per_arr) + i + 1];
y_i = arr_container[j].array[i];
y_ip1 = arr_container[j].array[i+1];
arr_container[j].coeff_1[i] = D_i;
arr_container[j].coeff_2[i] = 3.0 * (y_ip1 - y_i) - 2.0*D_i - D_ip1;
arr_container[j].coeff_3[i] = 2.0 * (y_i - y_ip1) + D_i + D_ip1;
/*# if __CUDA_ARCH__>=200
if ((i % 2000 == 0) && (j == 3))
printf("%d, %.18e, %.18e, %.18e, %.18e, %.18e, %.18e, %.18e, %.18e \n", i, B[(j*length_per_arr) + i], B[(j*length_per_arr) + i+1], arr_container[j].array[i], arr_container[j].array[i], arr_container[j].array[i+1], arr_container[j].coeff_1[i], arr_container[j].coeff_2[i], arr_container[j].coeff_3[i]);
#endif //*/
}
Interpolate::Interpolate(){
int pass = 0;
}
__host__
void Interpolate::alloc_arrays(int max_length_init, int num_arr){
gpuErrchk_here(cudaMalloc(&d_B, max_length_init*num_arr*sizeof(double)));
gpuErrchk_here(cudaMalloc(&d_dl, max_length_init*sizeof(double)));
gpuErrchk_here(cudaMalloc(&d_d, max_length_init*sizeof(double)));
gpuErrchk_here(cudaMalloc(&d_du, max_length_init*sizeof(double)));
}
__global__
void setup_d_vals(double *dl, double *d, double *du, int current_length){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= current_length) return;
if (i == 0){
dl[0] = 0.0;
d[0] = 2.0;
du[0] = 1.0;
} else if (i == current_length - 1){
dl[current_length-1] = 1.0;
d[current_length-1] = 2.0;
du[current_length-1] = 0.0;
} else{
dl[i] = 1.0;
d[i] = 4.0;
du[i] = 1.0;
}
# /*if __CUDA_ARCH__>=200
if ((i == 0) || (i == current_length-1) || (i == 10))
printf("%d, %e, %e, %e \n", i, dl[i], d[i], du[i]);
#endif //*/
}
/*__device__
void find_index_and_xout(int *index, double *x_out, double dx, double x_new, double *x_old, int length){
*index = floor((x_new - x_old[0])/dx);
if (*index >= length - 1) *index = length - 2;
*x_out = (x_new - x_old[*index])/(x_old[*index+1] - x_old[*index]);
}
__device__
double interpolate_array(InterpArrayContainer array_container, double x, int index){
double coeff_0 = array_container.array[index];
double coeff_1 = array_container.coeff_1[index];
double coeff_2 = array_container.coeff_2[index];
double coeff_3 = array_container.coeff_3[index];
double x2 = x*x;
double x3 = x*x2;
double return_val = coeff_0 + coeff_1*x + coeff_2*x2 + coeff_3*x3;
return return_val;
}//*/
void Interpolate::setup(InterpArrayContainer *array_container, int m_, int n_){
m = m_;
n = n_;
int NUM_THREADS = 256;
int num_blocks = std::ceil((m + NUM_THREADS -1)/NUM_THREADS);
dim3 interpGrid(num_blocks, n);
setup_d_vals<<<num_blocks, NUM_THREADS>>>(d_dl, d_d, d_du, m);
cudaDeviceSynchronize();
gpuErrchk_here(cudaGetLastError());
fill_B<<<interpGrid, NUM_THREADS>>>(array_container, d_B, m, n);
cudaDeviceSynchronize();
gpuErrchk_here(cudaGetLastError());
/*cudaMemcpy(checker, d_B, m*n*sizeof(double), cudaMemcpyDeviceToHost);
for (int i=0; i<n; i++){
for (int j=0; j<m; j+=100){
printf("%d %d, %e\n", i, j, checker[i*m + j]);
}
}//*/
double *checker = new double[m*n];
CUSPARSE_CALL( cusparseCreate(&handle) );
cusparseStatus_t status = cusparseDgtsv_nopivot(handle, m, n, d_dl, d_d, d_du, d_B, m);
if (status != CUSPARSE_STATUS_SUCCESS) assert(0);
cusparseDestroy(handle);
set_spline_constants<<<interpGrid, NUM_THREADS>>>(array_container, d_B, m, n);
cudaDeviceSynchronize();
gpuErrchk_here(cudaGetLastError());
delete[] checker;
}
__host__ Interpolate::~Interpolate(){
cudaFree(d_dl);
cudaFree(d_du);
cudaFree(d_d);
cudaFree(d_B);
//delete[] d;
//delete[] dl;
//delete[] du;
}
|
4a02e1a84a98b9bb9e44bbf6d02ef37b3228cd8f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/*
* cuPrintf.cu
*
* This is a printf command callable from within a kernel. It is set
* up so that output is sent to a memory buffer, which is emptied from
* the host side - but only after a hipDeviceSynchronize() on the host.
*
* Currently, there is a limitation of around 200 characters of output
* and no more than 10 arguments to a single cuPrintf() call. Issue
* multiple calls if longer format strings are required.
*
* It requires minimal setup, and is *NOT* optimised for performance.
* For example, writes are not coalesced - this is because there is an
* assumption that people will not want to printf from every single one
* of thousands of threads, but only from individual threads at a time.
*
* Using this is simple - it requires one host-side call to initialise
* everything, and then kernels can call cuPrintf at will. Sample code
* is the easiest way to demonstrate:
*
#include "cuPrintf.hip"
__global__ void testKernel(int val)
{
cuPrintf("Value is: %d\n", val);
}
int main()
{
cudaPrintfInit();
testKernel<<< 2, 3 >>>(10);
cudaPrintfDisplay(stdout, true);
cudaPrintfEnd();
return 0;
}
*
* See the header file, "cuPrintf.cuh" for more info, especially
* arguments to cudaPrintfInit() and cudaPrintfDisplay();
*/
#ifndef CUPRINTF_CU
#define CUPRINTF_CU
#include "cuPrintf_hip.cuh"
#if __CUDA_ARCH__ > 100 // Atomics only used with > sm_10 architecture
#include <sm_11_atomic_functions.h>
#endif
// This is the smallest amount of memory, per-thread, which is allowed.
// It is also the largest amount of space a single printf() can take up
const static int CUPRINTF_MAX_LEN = 256;
// This structure is used internally to track block/thread output restrictions.
typedef struct __align__(8) {
int threadid; // CUPRINTF_UNRESTRICTED for unrestricted
int blockid; // CUPRINTF_UNRESTRICTED for unrestricted
} cuPrintfRestriction;
// The main storage is in a global print buffer, which has a known
// start/end/length. These are atomically updated so it works as a
// circular buffer.
// Since the only control primitive that can be used is atomicAdd(),
// we cannot wrap the pointer as such. The actual address must be
// calculated from printfBufferPtr by mod-ing with printfBufferLength.
// For sm_10 architecture, we must subdivide the buffer per-thread
// since we do not even have an atomic primitive.
__constant__ static char *globalPrintfBuffer = NULL; // Start of circular buffer (set up by host)
__constant__ static int printfBufferLength = 0; // Size of circular buffer (set up by host)
__device__ static cuPrintfRestriction restrictRules; // Output restrictions
__device__ volatile static char *printfBufferPtr = NULL; // Current atomically-incremented non-wrapped offset
// This is the header preceeding all printf entries.
// NOTE: It *must* be size-aligned to the maximum entity size (size_t)
typedef struct __align__(8) {
unsigned short magic; // Magic number says we're valid
unsigned short fmtoffset; // Offset of fmt string into buffer
unsigned short blockid; // Block ID of author
unsigned short threadid; // Thread ID of author
} cuPrintfHeader;
// Special header for sm_10 architecture
#define CUPRINTF_SM10_MAGIC 0xC810 // Not a valid ascii character
typedef struct __align__(16) {
unsigned short magic; // sm_10 specific magic number
unsigned short unused;
unsigned int thread_index; // thread ID for this buffer
unsigned int thread_buf_len; // per-thread buffer length
unsigned int offset; // most recent printf's offset
} cuPrintfHeaderSM10;
// Because we can't write an element which is not aligned to its bit-size,
// we have to align all sizes and variables on maximum-size boundaries.
// That means sizeof(double) in this case, but we'll use (long long) for
// better arch<1.3 support
#define CUPRINTF_ALIGN_SIZE sizeof(long long)
// All our headers are prefixed with a magic number so we know they're ready
#define CUPRINTF_SM11_MAGIC (unsigned short)0xC811 // Not a valid ascii character
//
// getNextPrintfBufPtr
//
// Grabs a block of space in the general circular buffer, using an
// atomic function to ensure that it's ours. We handle wrapping
// around the circular buffer and return a pointer to a place which
// can be written to.
//
// Important notes:
// 1. We always grab CUPRINTF_MAX_LEN bytes
// 2. Because of 1, we never worry about wrapping around the end
// 3. Because of 1, printfBufferLength *must* be a factor of CUPRINTF_MAX_LEN
//
// This returns a pointer to the place where we own.
//
__device__ static char *getNextPrintfBufPtr()
{
// Initialisation check
if(!printfBufferPtr)
return NULL;
// Thread/block restriction check
if((restrictRules.blockid != CUPRINTF_UNRESTRICTED) && (restrictRules.blockid != (blockIdx.x + gridDim.x*blockIdx.y)))
return NULL;
if((restrictRules.threadid != CUPRINTF_UNRESTRICTED) && (restrictRules.threadid != (threadIdx.x + blockDim.x*threadIdx.y + blockDim.x*blockDim.y*threadIdx.z)))
return NULL;
// Conditional section, dependent on architecture
#if __CUDA_ARCH__ == 100
// For sm_10 architectures, we have no atomic add - this means we must split the
// entire available buffer into per-thread blocks. Inefficient, but what can you do.
int thread_count = (gridDim.x * gridDim.y) * (blockDim.x * blockDim.y * blockDim.z);
int thread_index = threadIdx.x + blockDim.x*threadIdx.y + blockDim.x*blockDim.y*threadIdx.z +
(blockIdx.x + gridDim.x*blockIdx.y) * (blockDim.x * blockDim.y * blockDim.z);
// Find our own block of data and go to it. Make sure the per-thread length
// is a precise multiple of CUPRINTF_MAX_LEN, otherwise we risk size and
// alignment issues! We must round down, of course.
unsigned int thread_buf_len = printfBufferLength / thread_count;
thread_buf_len &= ~(CUPRINTF_MAX_LEN-1);
// We *must* have a thread buffer length able to fit at least two printfs (one header, one real)
if(thread_buf_len < (CUPRINTF_MAX_LEN * 2))
return NULL;
// Now address our section of the buffer. The first item is a header.
char *myPrintfBuffer = globalPrintfBuffer + (thread_buf_len * thread_index);
cuPrintfHeaderSM10 hdr = *(cuPrintfHeaderSM10 *)(void *)myPrintfBuffer;
if(hdr.magic != CUPRINTF_SM10_MAGIC)
{
// If our header is not set up, initialise it
hdr.magic = CUPRINTF_SM10_MAGIC;
hdr.thread_index = thread_index;
hdr.thread_buf_len = thread_buf_len;
hdr.offset = 0; // Note we start at 0! We pre-increment below.
*(cuPrintfHeaderSM10 *)(void *)myPrintfBuffer = hdr; // Write back the header
// For initial setup purposes, we might need to init thread0's header too
// (so that cudaPrintfDisplay() below will work). This is only run once.
cuPrintfHeaderSM10 *tophdr = (cuPrintfHeaderSM10 *)(void *)globalPrintfBuffer;
tophdr->thread_buf_len = thread_buf_len;
}
// Adjust the offset by the right amount, and wrap it if need be
unsigned int offset = hdr.offset + CUPRINTF_MAX_LEN;
if(offset >= hdr.thread_buf_len)
offset = CUPRINTF_MAX_LEN;
// Write back the new offset for next time and return a pointer to it
((cuPrintfHeaderSM10 *)(void *)myPrintfBuffer)->offset = offset;
return myPrintfBuffer + offset;
#else
// Much easier with an atomic operation!
size_t offset = atomicAdd((unsigned int *)&printfBufferPtr, CUPRINTF_MAX_LEN) - (size_t)globalPrintfBuffer;
offset %= printfBufferLength;
return globalPrintfBuffer + offset;
#endif
}
//
// writePrintfHeader
//
// Inserts the header for containing our UID, fmt position and
// block/thread number. We generate it dynamically to avoid
// issues arising from requiring pre-initialisation.
//
__device__ static void writePrintfHeader(char *ptr, char *fmtptr)
{
if(ptr)
{
cuPrintfHeader header;
header.magic = CUPRINTF_SM11_MAGIC;
header.fmtoffset = (unsigned short)(fmtptr - ptr);
header.blockid = blockIdx.x + gridDim.x*blockIdx.y;
header.threadid = threadIdx.x + blockDim.x*threadIdx.y + blockDim.x*blockDim.y*threadIdx.z;
*(cuPrintfHeader *)(void *)ptr = header;
}
}
//
// cuPrintfStrncpy
//
// This special strncpy outputs an aligned length value, followed by the
// string. It then zero-pads the rest of the string until a 64-aligned
// boundary. The length *includes* the padding. A pointer to the byte
// just after the \0 is returned.
//
// This function could overflow CUPRINTF_MAX_LEN characters in our buffer.
// To avoid it, we must count as we output and truncate where necessary.
//
__device__ static char *cuPrintfStrncpy(char *dest, const char *src, int n, char *end)
{
// Initialisation and overflow check
if(!dest || !src || (dest >= end))
return NULL;
// Prepare to write the length specifier. We're guaranteed to have
// at least "CUPRINTF_ALIGN_SIZE" bytes left because we only write out in
// chunks that size, and CUPRINTF_MAX_LEN is aligned with CUPRINTF_ALIGN_SIZE.
int *lenptr = (int *)(void *)dest;
int len = 0;
dest += CUPRINTF_ALIGN_SIZE;
// Now copy the string
while(n--)
{
if(dest >= end) // Overflow check
break;
len++;
*dest++ = *src;
if(*src++ == '\0')
break;
}
// Now write out the padding bytes, and we have our length.
while((dest < end) && (((size_t)dest & (CUPRINTF_ALIGN_SIZE-1)) != 0))
{
len++;
*dest++ = 0;
}
*lenptr = len;
return (dest < end) ? dest : NULL; // Overflow means return NULL
}
//
// copyArg
//
// This copies a length specifier and then the argument out to the
// data buffer. Templates let the compiler figure all this out at
// compile-time, making life much simpler from the programming
// point of view. I'm assuimg all (const char *) is a string, and
// everything else is the variable it points at. I'd love to see
// a better way of doing it, but aside from parsing the format
// string I can't think of one.
//
// The length of the data type is inserted at the beginning (so that
// the display can distinguish between float and double), and the
// pointer to the end of the entry is returned.
//
__device__ static char *copyArg(char *ptr, const char *arg, char *end)
{
// Initialisation check
if(!ptr || !arg)
return NULL;
// strncpy does all our work. We just terminate.
if((ptr = cuPrintfStrncpy(ptr, arg, CUPRINTF_MAX_LEN, end)) != NULL)
*ptr = 0;
return ptr;
}
template <typename T>
__device__ static char *copyArg(char *ptr, T &arg, char *end)
{
// Initisalisation and overflow check. Alignment rules mean that
// we're at least CUPRINTF_ALIGN_SIZE away from "end", so we only need
// to check that one offset.
if(!ptr || ((ptr+CUPRINTF_ALIGN_SIZE) >= end))
return NULL;
// Write the length and argument
*(int *)(void *)ptr = sizeof(arg);
ptr += CUPRINTF_ALIGN_SIZE;
*(T *)(void *)ptr = arg;
ptr += CUPRINTF_ALIGN_SIZE;
*ptr = 0;
return ptr;
}
//
// cuPrintf
//
// Templated printf functions to handle multiple arguments.
// Note we return the total amount of data copied, not the number
// of characters output. But then again, who ever looks at the
// return from printf() anyway?
//
// The format is to grab a block of circular buffer space, the
// start of which will hold a header and a pointer to the format
// string. We then write in all the arguments, and finally the
// format string itself. This is to make it easy to prevent
// overflow of our buffer (we support up to 10 arguments, each of
// which can be 12 bytes in length - that means that only the
// format string (or a %s) can actually overflow; so the overflow
// check need only be in the strcpy function.
//
// The header is written at the very last because that's what
// makes it look like we're done.
//
// Errors, which are basically lack-of-initialisation, are ignored
// in the called functions because NULL pointers are passed around
//
// All printf variants basically do the same thing, setting up the
// buffer, writing all arguments, then finalising the header. For
// clarity, we'll pack the code into some big macros.
#define CUPRINTF_PREAMBLE \
char *start, *end, *bufptr, *fmtstart; \
if((start = getNextPrintfBufPtr()) == NULL) return 0; \
end = start + CUPRINTF_MAX_LEN; \
bufptr = start + sizeof(cuPrintfHeader);
// Posting an argument is easy
#define CUPRINTF_ARG(argname) \
bufptr = copyArg(bufptr, argname, end);
// After args are done, record start-of-fmt and write the fmt and header
#define CUPRINTF_POSTAMBLE \
fmtstart = bufptr; \
end = cuPrintfStrncpy(bufptr, fmt, CUPRINTF_MAX_LEN, end); \
writePrintfHeader(start, end ? fmtstart : NULL); \
return end ? (int)(end - start) : 0;
__device__ int cuPrintf(const char *fmt)
{
CUPRINTF_PREAMBLE;
CUPRINTF_POSTAMBLE;
}
template <typename T1> __device__ int cuPrintf(const char *fmt, T1 arg1)
{
CUPRINTF_PREAMBLE;
CUPRINTF_ARG(arg1);
CUPRINTF_POSTAMBLE;
}
template <typename T1, typename T2> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2)
{
CUPRINTF_PREAMBLE;
CUPRINTF_ARG(arg1);
CUPRINTF_ARG(arg2);
CUPRINTF_POSTAMBLE;
}
template <typename T1, typename T2, typename T3> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3)
{
CUPRINTF_PREAMBLE;
CUPRINTF_ARG(arg1);
CUPRINTF_ARG(arg2);
CUPRINTF_ARG(arg3);
CUPRINTF_POSTAMBLE;
}
template <typename T1, typename T2, typename T3, typename T4> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4)
{
CUPRINTF_PREAMBLE;
CUPRINTF_ARG(arg1);
CUPRINTF_ARG(arg2);
CUPRINTF_ARG(arg3);
CUPRINTF_ARG(arg4);
CUPRINTF_POSTAMBLE;
}
template <typename T1, typename T2, typename T3, typename T4, typename T5> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4, T5 arg5)
{
CUPRINTF_PREAMBLE;
CUPRINTF_ARG(arg1);
CUPRINTF_ARG(arg2);
CUPRINTF_ARG(arg3);
CUPRINTF_ARG(arg4);
CUPRINTF_ARG(arg5);
CUPRINTF_POSTAMBLE;
}
template <typename T1, typename T2, typename T3, typename T4, typename T5, typename T6> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4, T5 arg5, T6 arg6)
{
CUPRINTF_PREAMBLE;
CUPRINTF_ARG(arg1);
CUPRINTF_ARG(arg2);
CUPRINTF_ARG(arg3);
CUPRINTF_ARG(arg4);
CUPRINTF_ARG(arg5);
CUPRINTF_ARG(arg6);
CUPRINTF_POSTAMBLE;
}
template <typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4, T5 arg5, T6 arg6, T7 arg7)
{
CUPRINTF_PREAMBLE;
CUPRINTF_ARG(arg1);
CUPRINTF_ARG(arg2);
CUPRINTF_ARG(arg3);
CUPRINTF_ARG(arg4);
CUPRINTF_ARG(arg5);
CUPRINTF_ARG(arg6);
CUPRINTF_ARG(arg7);
CUPRINTF_POSTAMBLE;
}
template <typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4, T5 arg5, T6 arg6, T7 arg7, T8 arg8)
{
CUPRINTF_PREAMBLE;
CUPRINTF_ARG(arg1);
CUPRINTF_ARG(arg2);
CUPRINTF_ARG(arg3);
CUPRINTF_ARG(arg4);
CUPRINTF_ARG(arg5);
CUPRINTF_ARG(arg6);
CUPRINTF_ARG(arg7);
CUPRINTF_ARG(arg8);
CUPRINTF_POSTAMBLE;
}
template <typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8, typename T9> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4, T5 arg5, T6 arg6, T7 arg7, T8 arg8, T9 arg9)
{
CUPRINTF_PREAMBLE;
CUPRINTF_ARG(arg1);
CUPRINTF_ARG(arg2);
CUPRINTF_ARG(arg3);
CUPRINTF_ARG(arg4);
CUPRINTF_ARG(arg5);
CUPRINTF_ARG(arg6);
CUPRINTF_ARG(arg7);
CUPRINTF_ARG(arg8);
CUPRINTF_ARG(arg9);
CUPRINTF_POSTAMBLE;
}
template <typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8, typename T9, typename T10> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4, T5 arg5, T6 arg6, T7 arg7, T8 arg8, T9 arg9, T10 arg10)
{
CUPRINTF_PREAMBLE;
CUPRINTF_ARG(arg1);
CUPRINTF_ARG(arg2);
CUPRINTF_ARG(arg3);
CUPRINTF_ARG(arg4);
CUPRINTF_ARG(arg5);
CUPRINTF_ARG(arg6);
CUPRINTF_ARG(arg7);
CUPRINTF_ARG(arg8);
CUPRINTF_ARG(arg9);
CUPRINTF_ARG(arg10);
CUPRINTF_POSTAMBLE;
}
#undef CUPRINTF_PREAMBLE
#undef CUPRINTF_ARG
#undef CUPRINTF_POSTAMBLE
//
// cuPrintfRestrict
//
// Called to restrict output to a given thread/block.
// We store the info in "restrictRules", which is set up at
// init time by the host. It's not the cleanest way to do this
// because it means restrictions will last between
// invocations, but given the output-pointer continuity,
// I feel this is reasonable.
//
__device__ void cuPrintfRestrict(int threadid, int blockid)
{
int thread_count = blockDim.x * blockDim.y * blockDim.z;
if(((threadid < thread_count) && (threadid >= 0)) || (threadid == CUPRINTF_UNRESTRICTED))
restrictRules.threadid = threadid;
int block_count = gridDim.x * gridDim.y;
if(((blockid < block_count) && (blockid >= 0)) || (blockid == CUPRINTF_UNRESTRICTED))
restrictRules.blockid = blockid;
}
///////////////////////////////////////////////////////////////////////////////
// HOST SIDE
#include <stdio.h>
static FILE *printf_fp;
static char *printfbuf_start=NULL;
static char *printfbuf_device=NULL;
static int printfbuf_len=0;
//
// outputPrintfData
//
// Our own internal function, which takes a pointer to a data buffer
// and passes it through libc's printf for output.
//
// We receive the formate string and a pointer to where the data is
// held. We then run through and print it out.
//
// Returns 0 on failure, 1 on success
//
static int outputPrintfData(char *fmt, char *data)
{
// Format string is prefixed by a length that we don't need
fmt += CUPRINTF_ALIGN_SIZE;
// Now run through it, printing everything we can. We must
// run to every % character, extract only that, and use printf
// to format it.
char *p = strchr(fmt, '%');
while(p != NULL)
{
// Print up to the % character
*p = '\0';
fputs(fmt, printf_fp);
*p = '%'; // Put back the %
// Now handle the format specifier
char *format = p++; // Points to the '%'
p += strcspn(p, "%cdiouxXeEfgGaAnps");
if(*p == '\0') // If no format specifier, print the whole thing
{
fmt = format;
break;
}
// Cut out the format bit and use printf to print it. It's prefixed
// by its length.
int arglen = *(int *)data;
if(arglen > CUPRINTF_MAX_LEN)
{
fputs("Corrupt printf buffer data - aborting\n", printf_fp);
return 0;
}
data += CUPRINTF_ALIGN_SIZE;
char specifier = *p++;
char c = *p; // Store for later
*p = '\0';
switch(specifier)
{
// These all take integer arguments
case 'c':
case 'd':
case 'i':
case 'o':
case 'u':
case 'x':
case 'X':
case 'p':
fprintf(printf_fp, format, *((int *)data));
break;
// These all take double arguments
case 'e':
case 'E':
case 'f':
case 'g':
case 'G':
case 'a':
case 'A':
if(arglen == 4) // Float vs. Double thing
fprintf(printf_fp, format, *((float *)data));
else
fprintf(printf_fp, format, *((double *)data));
break;
// Strings are handled in a special way
case 's':
fprintf(printf_fp, format, (char *)data);
break;
// % is special
case '%':
fprintf(printf_fp, "%%");
break;
// Everything else is just printed out as-is
default:
fprintf(printf_fp, "%s", format);
break;
}
data += CUPRINTF_ALIGN_SIZE; // Move on to next argument
*p = c; // Restore what we removed
fmt = p; // Adjust fmt string to be past the specifier
p = strchr(fmt, '%'); // and get the next specifier
}
// Print out the last of the string
fputs(fmt, printf_fp);
return 1;
}
//
// doPrintfDisplay
//
// This runs through the blocks of CUPRINTF_MAX_LEN-sized data, calling the
// print function above to display them. We've got this separate from
// cudaPrintfDisplay() below so we can handle the SM_10 architecture
// partitioning.
//
static int doPrintfDisplay(int headings, int clear, char *bufstart, char *bufend, char *bufptr, char *endptr)
{
// Grab, piece-by-piece, each output element until we catch
// up with the circular buffer end pointer
int printf_count=0;
char printfbuf_local[CUPRINTF_MAX_LEN+1];
printfbuf_local[CUPRINTF_MAX_LEN] = '\0';
while(bufptr != endptr)
{
// Wrap ourselves at the end-of-buffer
if(bufptr == bufend)
bufptr = bufstart;
// Adjust our start pointer to within the circular buffer and copy a block.
hipMemcpy(printfbuf_local, bufptr, CUPRINTF_MAX_LEN, hipMemcpyDeviceToHost);
// If the magic number isn't valid, then this write hasn't gone through
// yet and we'll wait until it does (or we're past the end for non-async printfs).
cuPrintfHeader *hdr = (cuPrintfHeader *)printfbuf_local;
if((hdr->magic != CUPRINTF_SM11_MAGIC) || (hdr->fmtoffset >= CUPRINTF_MAX_LEN))
{
//fprintf(printf_fp, "Bad magic number in printf header\n");
break;
}
// Extract all the info and get this printf done
if(headings)
fprintf(printf_fp, "[%d, %d]: ", hdr->blockid, hdr->threadid);
if(hdr->fmtoffset == 0)
fprintf(printf_fp, "printf buffer overflow\n");
else if(!outputPrintfData(printfbuf_local+hdr->fmtoffset, printfbuf_local+sizeof(cuPrintfHeader)))
break;
printf_count++;
// Clear if asked
if(clear)
hipMemset(bufptr, 0, CUPRINTF_MAX_LEN);
// Now advance our start location, because we're done, and keep copying
bufptr += CUPRINTF_MAX_LEN;
}
return printf_count;
}
//
// cudaPrintfInit
//
// Takes a buffer length to allocate, creates the memory on the device and
// returns a pointer to it for when a kernel is called. It's up to the caller
// to free it.
//
extern "C" hipError_t cudaPrintfInit(size_t bufferLen)
{
// Fix up bufferlen to be a multiple of CUPRINTF_MAX_LEN
bufferLen = (bufferLen < (size_t)CUPRINTF_MAX_LEN) ? CUPRINTF_MAX_LEN : bufferLen;
if((bufferLen % CUPRINTF_MAX_LEN) > 0)
bufferLen += (CUPRINTF_MAX_LEN - (bufferLen % CUPRINTF_MAX_LEN));
printfbuf_len = (int)bufferLen;
// Allocate a print buffer on the device and zero it
if(hipMalloc((void **)&printfbuf_device, printfbuf_len) != hipSuccess)
return hipErrorInitializationError;
hipMemset(printfbuf_device, 0, printfbuf_len);
printfbuf_start = printfbuf_device; // Where we start reading from
// No restrictions to begin with
cuPrintfRestriction restrict;
restrict.threadid = restrict.blockid = CUPRINTF_UNRESTRICTED;
hipMemcpyToSymbol(restrictRules, &restrict, sizeof(restrict));
// Initialise the buffer and the respective lengths/pointers.
hipMemcpyToSymbol(globalPrintfBuffer, &printfbuf_device, sizeof(char *));
hipMemcpyToSymbol(printfBufferPtr, &printfbuf_device, sizeof(char *));
hipMemcpyToSymbol(printfBufferLength, &printfbuf_len, sizeof(printfbuf_len));
return hipSuccess;
}
//
// cudaPrintfEnd
//
// Frees up the memory which we allocated
//
extern "C" void cudaPrintfEnd()
{
if(!printfbuf_start || !printfbuf_device)
return;
hipFree(printfbuf_device);
printfbuf_start = printfbuf_device = NULL;
}
//
// cudaPrintfDisplay
//
// Each call to this function dumps the entire current contents
// of the printf buffer to the pre-specified FILE pointer. The
// circular "start" pointer is advanced so that subsequent calls
// dumps only new stuff.
//
// In the case of async memory access (via streams), call this
// repeatedly to keep trying to empty the buffer. If it's a sync
// access, then the whole buffer should empty in one go.
//
// Arguments:
// outputFP - File descriptor to output to (NULL => stdout)
// showThreadID - If true, prints [block,thread] before each line
//
extern "C" hipError_t cudaPrintfDisplay(void *outputFP, bool showThreadID)
{
printf_fp = (FILE *)((outputFP == NULL) ? stdout : outputFP);
// For now, we force "synchronous" mode which means we're not concurrent
// with kernel execution. This also means we don't need clearOnPrint.
// If you're patching it for async operation, here's where you want it.
bool sync_printfs = true;
bool clearOnPrint = false;
// Initialisation check
if(!printfbuf_start || !printfbuf_device || !printf_fp)
return hipErrorMissingConfiguration;
// To determine which architecture we're using, we read the
// first short from the buffer - it'll be the magic number
// relating to the version.
unsigned short magic;
hipMemcpy(&magic, printfbuf_device, sizeof(unsigned short), hipMemcpyDeviceToHost);
// For SM_10 architecture, we've split our buffer into one-per-thread.
// That means we must do each thread block separately. It'll require
// extra reading. We also, for now, don't support async printfs because
// that requires tracking one start pointer per thread.
if(magic == CUPRINTF_SM10_MAGIC)
{
sync_printfs = true;
clearOnPrint = false;
int blocklen = 0;
char *blockptr = printfbuf_device;
while(blockptr < (printfbuf_device + printfbuf_len))
{
cuPrintfHeaderSM10 hdr;
hipMemcpy(&hdr, blockptr, sizeof(hdr), hipMemcpyDeviceToHost);
// We get our block-size-step from the very first header
if(hdr.thread_buf_len != 0)
blocklen = hdr.thread_buf_len;
// No magic number means no printfs from this thread
if(hdr.magic != CUPRINTF_SM10_MAGIC)
{
if(blocklen == 0)
{
fprintf(printf_fp, "No printf headers found at all!\n");
break; // No valid headers!
}
blockptr += blocklen;
continue;
}
// "offset" is non-zero then we can print the block contents
if(hdr.offset > 0)
{
// For synchronous printfs, we must print from endptr->bufend, then from start->end
if(sync_printfs)
doPrintfDisplay(showThreadID, clearOnPrint, blockptr+CUPRINTF_MAX_LEN, blockptr+hdr.thread_buf_len, blockptr+hdr.offset+CUPRINTF_MAX_LEN, blockptr+hdr.thread_buf_len);
doPrintfDisplay(showThreadID, clearOnPrint, blockptr+CUPRINTF_MAX_LEN, blockptr+hdr.thread_buf_len, blockptr+CUPRINTF_MAX_LEN, blockptr+hdr.offset+CUPRINTF_MAX_LEN);
}
// Move on to the next block and loop again
blockptr += hdr.thread_buf_len;
}
}
// For SM_11 and up, everything is a single buffer and it's simple
else if(magic == CUPRINTF_SM11_MAGIC)
{
// Grab the current "end of circular buffer" pointer.
char *printfbuf_end = NULL;
hipMemcpyFromSymbol(&printfbuf_end, printfBufferPtr, sizeof(char *));
// Adjust our starting and ending pointers to within the block
char *bufptr = ((printfbuf_start - printfbuf_device) % printfbuf_len) + printfbuf_device;
char *endptr = ((printfbuf_end - printfbuf_device) % printfbuf_len) + printfbuf_device;
// For synchronous (i.e. after-kernel-exit) printf display, we have to handle circular
// buffer wrap carefully because we could miss those past "end".
if(sync_printfs)
doPrintfDisplay(showThreadID, clearOnPrint, printfbuf_device, printfbuf_device+printfbuf_len, endptr, printfbuf_device+printfbuf_len);
doPrintfDisplay(showThreadID, clearOnPrint, printfbuf_device, printfbuf_device+printfbuf_len, bufptr, endptr);
printfbuf_start = printfbuf_end;
}
else
;//printf("Bad magic number in cuPrintf buffer header\n");
// If we were synchronous, then we must ensure that the memory is cleared on exit
// otherwise another kernel launch with a different grid size could conflict.
if(sync_printfs)
hipMemset(printfbuf_device, 0, printfbuf_len);
return hipSuccess;
}
// Cleanup
#undef CUPRINTF_MAX_LEN
#undef CUPRINTF_ALIGN_SIZE
#undef CUPRINTF_SM10_MAGIC
#undef CUPRINTF_SM11_MAGIC
#endif
| 4a02e1a84a98b9bb9e44bbf6d02ef37b3228cd8f.cu | /*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/*
* cuPrintf.cu
*
* This is a printf command callable from within a kernel. It is set
* up so that output is sent to a memory buffer, which is emptied from
* the host side - but only after a cudaThreadSynchronize() on the host.
*
* Currently, there is a limitation of around 200 characters of output
* and no more than 10 arguments to a single cuPrintf() call. Issue
* multiple calls if longer format strings are required.
*
* It requires minimal setup, and is *NOT* optimised for performance.
* For example, writes are not coalesced - this is because there is an
* assumption that people will not want to printf from every single one
* of thousands of threads, but only from individual threads at a time.
*
* Using this is simple - it requires one host-side call to initialise
* everything, and then kernels can call cuPrintf at will. Sample code
* is the easiest way to demonstrate:
*
#include "cuPrintf.cu"
__global__ void testKernel(int val)
{
cuPrintf("Value is: %d\n", val);
}
int main()
{
cudaPrintfInit();
testKernel<<< 2, 3 >>>(10);
cudaPrintfDisplay(stdout, true);
cudaPrintfEnd();
return 0;
}
*
* See the header file, "cuPrintf.cuh" for more info, especially
* arguments to cudaPrintfInit() and cudaPrintfDisplay();
*/
#ifndef CUPRINTF_CU
#define CUPRINTF_CU
#include "cuPrintf.cuh"
#if __CUDA_ARCH__ > 100 // Atomics only used with > sm_10 architecture
#include <sm_11_atomic_functions.h>
#endif
// This is the smallest amount of memory, per-thread, which is allowed.
// It is also the largest amount of space a single printf() can take up
const static int CUPRINTF_MAX_LEN = 256;
// This structure is used internally to track block/thread output restrictions.
typedef struct __align__(8) {
int threadid; // CUPRINTF_UNRESTRICTED for unrestricted
int blockid; // CUPRINTF_UNRESTRICTED for unrestricted
} cuPrintfRestriction;
// The main storage is in a global print buffer, which has a known
// start/end/length. These are atomically updated so it works as a
// circular buffer.
// Since the only control primitive that can be used is atomicAdd(),
// we cannot wrap the pointer as such. The actual address must be
// calculated from printfBufferPtr by mod-ing with printfBufferLength.
// For sm_10 architecture, we must subdivide the buffer per-thread
// since we do not even have an atomic primitive.
__constant__ static char *globalPrintfBuffer = NULL; // Start of circular buffer (set up by host)
__constant__ static int printfBufferLength = 0; // Size of circular buffer (set up by host)
__device__ static cuPrintfRestriction restrictRules; // Output restrictions
__device__ volatile static char *printfBufferPtr = NULL; // Current atomically-incremented non-wrapped offset
// This is the header preceeding all printf entries.
// NOTE: It *must* be size-aligned to the maximum entity size (size_t)
typedef struct __align__(8) {
unsigned short magic; // Magic number says we're valid
unsigned short fmtoffset; // Offset of fmt string into buffer
unsigned short blockid; // Block ID of author
unsigned short threadid; // Thread ID of author
} cuPrintfHeader;
// Special header for sm_10 architecture
#define CUPRINTF_SM10_MAGIC 0xC810 // Not a valid ascii character
typedef struct __align__(16) {
unsigned short magic; // sm_10 specific magic number
unsigned short unused;
unsigned int thread_index; // thread ID for this buffer
unsigned int thread_buf_len; // per-thread buffer length
unsigned int offset; // most recent printf's offset
} cuPrintfHeaderSM10;
// Because we can't write an element which is not aligned to its bit-size,
// we have to align all sizes and variables on maximum-size boundaries.
// That means sizeof(double) in this case, but we'll use (long long) for
// better arch<1.3 support
#define CUPRINTF_ALIGN_SIZE sizeof(long long)
// All our headers are prefixed with a magic number so we know they're ready
#define CUPRINTF_SM11_MAGIC (unsigned short)0xC811 // Not a valid ascii character
//
// getNextPrintfBufPtr
//
// Grabs a block of space in the general circular buffer, using an
// atomic function to ensure that it's ours. We handle wrapping
// around the circular buffer and return a pointer to a place which
// can be written to.
//
// Important notes:
// 1. We always grab CUPRINTF_MAX_LEN bytes
// 2. Because of 1, we never worry about wrapping around the end
// 3. Because of 1, printfBufferLength *must* be a factor of CUPRINTF_MAX_LEN
//
// This returns a pointer to the place where we own.
//
__device__ static char *getNextPrintfBufPtr()
{
// Initialisation check
if(!printfBufferPtr)
return NULL;
// Thread/block restriction check
if((restrictRules.blockid != CUPRINTF_UNRESTRICTED) && (restrictRules.blockid != (blockIdx.x + gridDim.x*blockIdx.y)))
return NULL;
if((restrictRules.threadid != CUPRINTF_UNRESTRICTED) && (restrictRules.threadid != (threadIdx.x + blockDim.x*threadIdx.y + blockDim.x*blockDim.y*threadIdx.z)))
return NULL;
// Conditional section, dependent on architecture
#if __CUDA_ARCH__ == 100
// For sm_10 architectures, we have no atomic add - this means we must split the
// entire available buffer into per-thread blocks. Inefficient, but what can you do.
int thread_count = (gridDim.x * gridDim.y) * (blockDim.x * blockDim.y * blockDim.z);
int thread_index = threadIdx.x + blockDim.x*threadIdx.y + blockDim.x*blockDim.y*threadIdx.z +
(blockIdx.x + gridDim.x*blockIdx.y) * (blockDim.x * blockDim.y * blockDim.z);
// Find our own block of data and go to it. Make sure the per-thread length
// is a precise multiple of CUPRINTF_MAX_LEN, otherwise we risk size and
// alignment issues! We must round down, of course.
unsigned int thread_buf_len = printfBufferLength / thread_count;
thread_buf_len &= ~(CUPRINTF_MAX_LEN-1);
// We *must* have a thread buffer length able to fit at least two printfs (one header, one real)
if(thread_buf_len < (CUPRINTF_MAX_LEN * 2))
return NULL;
// Now address our section of the buffer. The first item is a header.
char *myPrintfBuffer = globalPrintfBuffer + (thread_buf_len * thread_index);
cuPrintfHeaderSM10 hdr = *(cuPrintfHeaderSM10 *)(void *)myPrintfBuffer;
if(hdr.magic != CUPRINTF_SM10_MAGIC)
{
// If our header is not set up, initialise it
hdr.magic = CUPRINTF_SM10_MAGIC;
hdr.thread_index = thread_index;
hdr.thread_buf_len = thread_buf_len;
hdr.offset = 0; // Note we start at 0! We pre-increment below.
*(cuPrintfHeaderSM10 *)(void *)myPrintfBuffer = hdr; // Write back the header
// For initial setup purposes, we might need to init thread0's header too
// (so that cudaPrintfDisplay() below will work). This is only run once.
cuPrintfHeaderSM10 *tophdr = (cuPrintfHeaderSM10 *)(void *)globalPrintfBuffer;
tophdr->thread_buf_len = thread_buf_len;
}
// Adjust the offset by the right amount, and wrap it if need be
unsigned int offset = hdr.offset + CUPRINTF_MAX_LEN;
if(offset >= hdr.thread_buf_len)
offset = CUPRINTF_MAX_LEN;
// Write back the new offset for next time and return a pointer to it
((cuPrintfHeaderSM10 *)(void *)myPrintfBuffer)->offset = offset;
return myPrintfBuffer + offset;
#else
// Much easier with an atomic operation!
size_t offset = atomicAdd((unsigned int *)&printfBufferPtr, CUPRINTF_MAX_LEN) - (size_t)globalPrintfBuffer;
offset %= printfBufferLength;
return globalPrintfBuffer + offset;
#endif
}
//
// writePrintfHeader
//
// Inserts the header for containing our UID, fmt position and
// block/thread number. We generate it dynamically to avoid
// issues arising from requiring pre-initialisation.
//
__device__ static void writePrintfHeader(char *ptr, char *fmtptr)
{
if(ptr)
{
cuPrintfHeader header;
header.magic = CUPRINTF_SM11_MAGIC;
header.fmtoffset = (unsigned short)(fmtptr - ptr);
header.blockid = blockIdx.x + gridDim.x*blockIdx.y;
header.threadid = threadIdx.x + blockDim.x*threadIdx.y + blockDim.x*blockDim.y*threadIdx.z;
*(cuPrintfHeader *)(void *)ptr = header;
}
}
//
// cuPrintfStrncpy
//
// This special strncpy outputs an aligned length value, followed by the
// string. It then zero-pads the rest of the string until a 64-aligned
// boundary. The length *includes* the padding. A pointer to the byte
// just after the \0 is returned.
//
// This function could overflow CUPRINTF_MAX_LEN characters in our buffer.
// To avoid it, we must count as we output and truncate where necessary.
//
__device__ static char *cuPrintfStrncpy(char *dest, const char *src, int n, char *end)
{
// Initialisation and overflow check
if(!dest || !src || (dest >= end))
return NULL;
// Prepare to write the length specifier. We're guaranteed to have
// at least "CUPRINTF_ALIGN_SIZE" bytes left because we only write out in
// chunks that size, and CUPRINTF_MAX_LEN is aligned with CUPRINTF_ALIGN_SIZE.
int *lenptr = (int *)(void *)dest;
int len = 0;
dest += CUPRINTF_ALIGN_SIZE;
// Now copy the string
while(n--)
{
if(dest >= end) // Overflow check
break;
len++;
*dest++ = *src;
if(*src++ == '\0')
break;
}
// Now write out the padding bytes, and we have our length.
while((dest < end) && (((size_t)dest & (CUPRINTF_ALIGN_SIZE-1)) != 0))
{
len++;
*dest++ = 0;
}
*lenptr = len;
return (dest < end) ? dest : NULL; // Overflow means return NULL
}
//
// copyArg
//
// This copies a length specifier and then the argument out to the
// data buffer. Templates let the compiler figure all this out at
// compile-time, making life much simpler from the programming
// point of view. I'm assuimg all (const char *) is a string, and
// everything else is the variable it points at. I'd love to see
// a better way of doing it, but aside from parsing the format
// string I can't think of one.
//
// The length of the data type is inserted at the beginning (so that
// the display can distinguish between float and double), and the
// pointer to the end of the entry is returned.
//
__device__ static char *copyArg(char *ptr, const char *arg, char *end)
{
// Initialisation check
if(!ptr || !arg)
return NULL;
// strncpy does all our work. We just terminate.
if((ptr = cuPrintfStrncpy(ptr, arg, CUPRINTF_MAX_LEN, end)) != NULL)
*ptr = 0;
return ptr;
}
template <typename T>
__device__ static char *copyArg(char *ptr, T &arg, char *end)
{
// Initisalisation and overflow check. Alignment rules mean that
// we're at least CUPRINTF_ALIGN_SIZE away from "end", so we only need
// to check that one offset.
if(!ptr || ((ptr+CUPRINTF_ALIGN_SIZE) >= end))
return NULL;
// Write the length and argument
*(int *)(void *)ptr = sizeof(arg);
ptr += CUPRINTF_ALIGN_SIZE;
*(T *)(void *)ptr = arg;
ptr += CUPRINTF_ALIGN_SIZE;
*ptr = 0;
return ptr;
}
//
// cuPrintf
//
// Templated printf functions to handle multiple arguments.
// Note we return the total amount of data copied, not the number
// of characters output. But then again, who ever looks at the
// return from printf() anyway?
//
// The format is to grab a block of circular buffer space, the
// start of which will hold a header and a pointer to the format
// string. We then write in all the arguments, and finally the
// format string itself. This is to make it easy to prevent
// overflow of our buffer (we support up to 10 arguments, each of
// which can be 12 bytes in length - that means that only the
// format string (or a %s) can actually overflow; so the overflow
// check need only be in the strcpy function.
//
// The header is written at the very last because that's what
// makes it look like we're done.
//
// Errors, which are basically lack-of-initialisation, are ignored
// in the called functions because NULL pointers are passed around
//
// All printf variants basically do the same thing, setting up the
// buffer, writing all arguments, then finalising the header. For
// clarity, we'll pack the code into some big macros.
#define CUPRINTF_PREAMBLE \
char *start, *end, *bufptr, *fmtstart; \
if((start = getNextPrintfBufPtr()) == NULL) return 0; \
end = start + CUPRINTF_MAX_LEN; \
bufptr = start + sizeof(cuPrintfHeader);
// Posting an argument is easy
#define CUPRINTF_ARG(argname) \
bufptr = copyArg(bufptr, argname, end);
// After args are done, record start-of-fmt and write the fmt and header
#define CUPRINTF_POSTAMBLE \
fmtstart = bufptr; \
end = cuPrintfStrncpy(bufptr, fmt, CUPRINTF_MAX_LEN, end); \
writePrintfHeader(start, end ? fmtstart : NULL); \
return end ? (int)(end - start) : 0;
__device__ int cuPrintf(const char *fmt)
{
CUPRINTF_PREAMBLE;
CUPRINTF_POSTAMBLE;
}
template <typename T1> __device__ int cuPrintf(const char *fmt, T1 arg1)
{
CUPRINTF_PREAMBLE;
CUPRINTF_ARG(arg1);
CUPRINTF_POSTAMBLE;
}
template <typename T1, typename T2> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2)
{
CUPRINTF_PREAMBLE;
CUPRINTF_ARG(arg1);
CUPRINTF_ARG(arg2);
CUPRINTF_POSTAMBLE;
}
template <typename T1, typename T2, typename T3> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3)
{
CUPRINTF_PREAMBLE;
CUPRINTF_ARG(arg1);
CUPRINTF_ARG(arg2);
CUPRINTF_ARG(arg3);
CUPRINTF_POSTAMBLE;
}
template <typename T1, typename T2, typename T3, typename T4> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4)
{
CUPRINTF_PREAMBLE;
CUPRINTF_ARG(arg1);
CUPRINTF_ARG(arg2);
CUPRINTF_ARG(arg3);
CUPRINTF_ARG(arg4);
CUPRINTF_POSTAMBLE;
}
template <typename T1, typename T2, typename T3, typename T4, typename T5> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4, T5 arg5)
{
CUPRINTF_PREAMBLE;
CUPRINTF_ARG(arg1);
CUPRINTF_ARG(arg2);
CUPRINTF_ARG(arg3);
CUPRINTF_ARG(arg4);
CUPRINTF_ARG(arg5);
CUPRINTF_POSTAMBLE;
}
template <typename T1, typename T2, typename T3, typename T4, typename T5, typename T6> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4, T5 arg5, T6 arg6)
{
CUPRINTF_PREAMBLE;
CUPRINTF_ARG(arg1);
CUPRINTF_ARG(arg2);
CUPRINTF_ARG(arg3);
CUPRINTF_ARG(arg4);
CUPRINTF_ARG(arg5);
CUPRINTF_ARG(arg6);
CUPRINTF_POSTAMBLE;
}
template <typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4, T5 arg5, T6 arg6, T7 arg7)
{
CUPRINTF_PREAMBLE;
CUPRINTF_ARG(arg1);
CUPRINTF_ARG(arg2);
CUPRINTF_ARG(arg3);
CUPRINTF_ARG(arg4);
CUPRINTF_ARG(arg5);
CUPRINTF_ARG(arg6);
CUPRINTF_ARG(arg7);
CUPRINTF_POSTAMBLE;
}
template <typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4, T5 arg5, T6 arg6, T7 arg7, T8 arg8)
{
CUPRINTF_PREAMBLE;
CUPRINTF_ARG(arg1);
CUPRINTF_ARG(arg2);
CUPRINTF_ARG(arg3);
CUPRINTF_ARG(arg4);
CUPRINTF_ARG(arg5);
CUPRINTF_ARG(arg6);
CUPRINTF_ARG(arg7);
CUPRINTF_ARG(arg8);
CUPRINTF_POSTAMBLE;
}
template <typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8, typename T9> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4, T5 arg5, T6 arg6, T7 arg7, T8 arg8, T9 arg9)
{
CUPRINTF_PREAMBLE;
CUPRINTF_ARG(arg1);
CUPRINTF_ARG(arg2);
CUPRINTF_ARG(arg3);
CUPRINTF_ARG(arg4);
CUPRINTF_ARG(arg5);
CUPRINTF_ARG(arg6);
CUPRINTF_ARG(arg7);
CUPRINTF_ARG(arg8);
CUPRINTF_ARG(arg9);
CUPRINTF_POSTAMBLE;
}
template <typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8, typename T9, typename T10> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4, T5 arg5, T6 arg6, T7 arg7, T8 arg8, T9 arg9, T10 arg10)
{
CUPRINTF_PREAMBLE;
CUPRINTF_ARG(arg1);
CUPRINTF_ARG(arg2);
CUPRINTF_ARG(arg3);
CUPRINTF_ARG(arg4);
CUPRINTF_ARG(arg5);
CUPRINTF_ARG(arg6);
CUPRINTF_ARG(arg7);
CUPRINTF_ARG(arg8);
CUPRINTF_ARG(arg9);
CUPRINTF_ARG(arg10);
CUPRINTF_POSTAMBLE;
}
#undef CUPRINTF_PREAMBLE
#undef CUPRINTF_ARG
#undef CUPRINTF_POSTAMBLE
//
// cuPrintfRestrict
//
// Called to restrict output to a given thread/block.
// We store the info in "restrictRules", which is set up at
// init time by the host. It's not the cleanest way to do this
// because it means restrictions will last between
// invocations, but given the output-pointer continuity,
// I feel this is reasonable.
//
__device__ void cuPrintfRestrict(int threadid, int blockid)
{
int thread_count = blockDim.x * blockDim.y * blockDim.z;
if(((threadid < thread_count) && (threadid >= 0)) || (threadid == CUPRINTF_UNRESTRICTED))
restrictRules.threadid = threadid;
int block_count = gridDim.x * gridDim.y;
if(((blockid < block_count) && (blockid >= 0)) || (blockid == CUPRINTF_UNRESTRICTED))
restrictRules.blockid = blockid;
}
///////////////////////////////////////////////////////////////////////////////
// HOST SIDE
#include <stdio.h>
static FILE *printf_fp;
static char *printfbuf_start=NULL;
static char *printfbuf_device=NULL;
static int printfbuf_len=0;
//
// outputPrintfData
//
// Our own internal function, which takes a pointer to a data buffer
// and passes it through libc's printf for output.
//
// We receive the formate string and a pointer to where the data is
// held. We then run through and print it out.
//
// Returns 0 on failure, 1 on success
//
static int outputPrintfData(char *fmt, char *data)
{
// Format string is prefixed by a length that we don't need
fmt += CUPRINTF_ALIGN_SIZE;
// Now run through it, printing everything we can. We must
// run to every % character, extract only that, and use printf
// to format it.
char *p = strchr(fmt, '%');
while(p != NULL)
{
// Print up to the % character
*p = '\0';
fputs(fmt, printf_fp);
*p = '%'; // Put back the %
// Now handle the format specifier
char *format = p++; // Points to the '%'
p += strcspn(p, "%cdiouxXeEfgGaAnps");
if(*p == '\0') // If no format specifier, print the whole thing
{
fmt = format;
break;
}
// Cut out the format bit and use printf to print it. It's prefixed
// by its length.
int arglen = *(int *)data;
if(arglen > CUPRINTF_MAX_LEN)
{
fputs("Corrupt printf buffer data - aborting\n", printf_fp);
return 0;
}
data += CUPRINTF_ALIGN_SIZE;
char specifier = *p++;
char c = *p; // Store for later
*p = '\0';
switch(specifier)
{
// These all take integer arguments
case 'c':
case 'd':
case 'i':
case 'o':
case 'u':
case 'x':
case 'X':
case 'p':
fprintf(printf_fp, format, *((int *)data));
break;
// These all take double arguments
case 'e':
case 'E':
case 'f':
case 'g':
case 'G':
case 'a':
case 'A':
if(arglen == 4) // Float vs. Double thing
fprintf(printf_fp, format, *((float *)data));
else
fprintf(printf_fp, format, *((double *)data));
break;
// Strings are handled in a special way
case 's':
fprintf(printf_fp, format, (char *)data);
break;
// % is special
case '%':
fprintf(printf_fp, "%%");
break;
// Everything else is just printed out as-is
default:
fprintf(printf_fp, "%s", format);
break;
}
data += CUPRINTF_ALIGN_SIZE; // Move on to next argument
*p = c; // Restore what we removed
fmt = p; // Adjust fmt string to be past the specifier
p = strchr(fmt, '%'); // and get the next specifier
}
// Print out the last of the string
fputs(fmt, printf_fp);
return 1;
}
//
// doPrintfDisplay
//
// This runs through the blocks of CUPRINTF_MAX_LEN-sized data, calling the
// print function above to display them. We've got this separate from
// cudaPrintfDisplay() below so we can handle the SM_10 architecture
// partitioning.
//
static int doPrintfDisplay(int headings, int clear, char *bufstart, char *bufend, char *bufptr, char *endptr)
{
// Grab, piece-by-piece, each output element until we catch
// up with the circular buffer end pointer
int printf_count=0;
char printfbuf_local[CUPRINTF_MAX_LEN+1];
printfbuf_local[CUPRINTF_MAX_LEN] = '\0';
while(bufptr != endptr)
{
// Wrap ourselves at the end-of-buffer
if(bufptr == bufend)
bufptr = bufstart;
// Adjust our start pointer to within the circular buffer and copy a block.
cudaMemcpy(printfbuf_local, bufptr, CUPRINTF_MAX_LEN, cudaMemcpyDeviceToHost);
// If the magic number isn't valid, then this write hasn't gone through
// yet and we'll wait until it does (or we're past the end for non-async printfs).
cuPrintfHeader *hdr = (cuPrintfHeader *)printfbuf_local;
if((hdr->magic != CUPRINTF_SM11_MAGIC) || (hdr->fmtoffset >= CUPRINTF_MAX_LEN))
{
//fprintf(printf_fp, "Bad magic number in printf header\n");
break;
}
// Extract all the info and get this printf done
if(headings)
fprintf(printf_fp, "[%d, %d]: ", hdr->blockid, hdr->threadid);
if(hdr->fmtoffset == 0)
fprintf(printf_fp, "printf buffer overflow\n");
else if(!outputPrintfData(printfbuf_local+hdr->fmtoffset, printfbuf_local+sizeof(cuPrintfHeader)))
break;
printf_count++;
// Clear if asked
if(clear)
cudaMemset(bufptr, 0, CUPRINTF_MAX_LEN);
// Now advance our start location, because we're done, and keep copying
bufptr += CUPRINTF_MAX_LEN;
}
return printf_count;
}
//
// cudaPrintfInit
//
// Takes a buffer length to allocate, creates the memory on the device and
// returns a pointer to it for when a kernel is called. It's up to the caller
// to free it.
//
extern "C" cudaError_t cudaPrintfInit(size_t bufferLen)
{
// Fix up bufferlen to be a multiple of CUPRINTF_MAX_LEN
bufferLen = (bufferLen < (size_t)CUPRINTF_MAX_LEN) ? CUPRINTF_MAX_LEN : bufferLen;
if((bufferLen % CUPRINTF_MAX_LEN) > 0)
bufferLen += (CUPRINTF_MAX_LEN - (bufferLen % CUPRINTF_MAX_LEN));
printfbuf_len = (int)bufferLen;
// Allocate a print buffer on the device and zero it
if(cudaMalloc((void **)&printfbuf_device, printfbuf_len) != cudaSuccess)
return cudaErrorInitializationError;
cudaMemset(printfbuf_device, 0, printfbuf_len);
printfbuf_start = printfbuf_device; // Where we start reading from
// No restrictions to begin with
cuPrintfRestriction restrict;
restrict.threadid = restrict.blockid = CUPRINTF_UNRESTRICTED;
cudaMemcpyToSymbol(restrictRules, &restrict, sizeof(restrict));
// Initialise the buffer and the respective lengths/pointers.
cudaMemcpyToSymbol(globalPrintfBuffer, &printfbuf_device, sizeof(char *));
cudaMemcpyToSymbol(printfBufferPtr, &printfbuf_device, sizeof(char *));
cudaMemcpyToSymbol(printfBufferLength, &printfbuf_len, sizeof(printfbuf_len));
return cudaSuccess;
}
//
// cudaPrintfEnd
//
// Frees up the memory which we allocated
//
extern "C" void cudaPrintfEnd()
{
if(!printfbuf_start || !printfbuf_device)
return;
cudaFree(printfbuf_device);
printfbuf_start = printfbuf_device = NULL;
}
//
// cudaPrintfDisplay
//
// Each call to this function dumps the entire current contents
// of the printf buffer to the pre-specified FILE pointer. The
// circular "start" pointer is advanced so that subsequent calls
// dumps only new stuff.
//
// In the case of async memory access (via streams), call this
// repeatedly to keep trying to empty the buffer. If it's a sync
// access, then the whole buffer should empty in one go.
//
// Arguments:
// outputFP - File descriptor to output to (NULL => stdout)
// showThreadID - If true, prints [block,thread] before each line
//
extern "C" cudaError_t cudaPrintfDisplay(void *outputFP, bool showThreadID)
{
printf_fp = (FILE *)((outputFP == NULL) ? stdout : outputFP);
// For now, we force "synchronous" mode which means we're not concurrent
// with kernel execution. This also means we don't need clearOnPrint.
// If you're patching it for async operation, here's where you want it.
bool sync_printfs = true;
bool clearOnPrint = false;
// Initialisation check
if(!printfbuf_start || !printfbuf_device || !printf_fp)
return cudaErrorMissingConfiguration;
// To determine which architecture we're using, we read the
// first short from the buffer - it'll be the magic number
// relating to the version.
unsigned short magic;
cudaMemcpy(&magic, printfbuf_device, sizeof(unsigned short), cudaMemcpyDeviceToHost);
// For SM_10 architecture, we've split our buffer into one-per-thread.
// That means we must do each thread block separately. It'll require
// extra reading. We also, for now, don't support async printfs because
// that requires tracking one start pointer per thread.
if(magic == CUPRINTF_SM10_MAGIC)
{
sync_printfs = true;
clearOnPrint = false;
int blocklen = 0;
char *blockptr = printfbuf_device;
while(blockptr < (printfbuf_device + printfbuf_len))
{
cuPrintfHeaderSM10 hdr;
cudaMemcpy(&hdr, blockptr, sizeof(hdr), cudaMemcpyDeviceToHost);
// We get our block-size-step from the very first header
if(hdr.thread_buf_len != 0)
blocklen = hdr.thread_buf_len;
// No magic number means no printfs from this thread
if(hdr.magic != CUPRINTF_SM10_MAGIC)
{
if(blocklen == 0)
{
fprintf(printf_fp, "No printf headers found at all!\n");
break; // No valid headers!
}
blockptr += blocklen;
continue;
}
// "offset" is non-zero then we can print the block contents
if(hdr.offset > 0)
{
// For synchronous printfs, we must print from endptr->bufend, then from start->end
if(sync_printfs)
doPrintfDisplay(showThreadID, clearOnPrint, blockptr+CUPRINTF_MAX_LEN, blockptr+hdr.thread_buf_len, blockptr+hdr.offset+CUPRINTF_MAX_LEN, blockptr+hdr.thread_buf_len);
doPrintfDisplay(showThreadID, clearOnPrint, blockptr+CUPRINTF_MAX_LEN, blockptr+hdr.thread_buf_len, blockptr+CUPRINTF_MAX_LEN, blockptr+hdr.offset+CUPRINTF_MAX_LEN);
}
// Move on to the next block and loop again
blockptr += hdr.thread_buf_len;
}
}
// For SM_11 and up, everything is a single buffer and it's simple
else if(magic == CUPRINTF_SM11_MAGIC)
{
// Grab the current "end of circular buffer" pointer.
char *printfbuf_end = NULL;
cudaMemcpyFromSymbol(&printfbuf_end, printfBufferPtr, sizeof(char *));
// Adjust our starting and ending pointers to within the block
char *bufptr = ((printfbuf_start - printfbuf_device) % printfbuf_len) + printfbuf_device;
char *endptr = ((printfbuf_end - printfbuf_device) % printfbuf_len) + printfbuf_device;
// For synchronous (i.e. after-kernel-exit) printf display, we have to handle circular
// buffer wrap carefully because we could miss those past "end".
if(sync_printfs)
doPrintfDisplay(showThreadID, clearOnPrint, printfbuf_device, printfbuf_device+printfbuf_len, endptr, printfbuf_device+printfbuf_len);
doPrintfDisplay(showThreadID, clearOnPrint, printfbuf_device, printfbuf_device+printfbuf_len, bufptr, endptr);
printfbuf_start = printfbuf_end;
}
else
;//printf("Bad magic number in cuPrintf buffer header\n");
// If we were synchronous, then we must ensure that the memory is cleared on exit
// otherwise another kernel launch with a different grid size could conflict.
if(sync_printfs)
cudaMemset(printfbuf_device, 0, printfbuf_len);
return cudaSuccess;
}
// Cleanup
#undef CUPRINTF_MAX_LEN
#undef CUPRINTF_ALIGN_SIZE
#undef CUPRINTF_SM10_MAGIC
#undef CUPRINTF_SM11_MAGIC
#endif
|
aba68f2b6cef0eccaa4c5e08b49ab7a6bff197e7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "hpc/rll/cuda/rl_utils/entry.h"
#include "hpc/rll/cuda/rl_utils/gae_kernel.h"
namespace hpc {
namespace rll {
namespace cuda {
void GaeForward(
const std::vector<torch::Tensor>& inputs,
std::vector<torch::Tensor>& outputs,
float gamma,
float lambda) {
unsigned int index = 0;
const torch::Tensor& value = inputs[index++];
const torch::Tensor& reward = inputs[index++];
index = 0;
torch::Tensor& adv = outputs[index++];
const unsigned int time_step = reward.size(0);
const unsigned int batch_size = reward.size(1);
unsigned int block_size = 1 * WARP_SIZE; // single warp to utilize more blocks
unsigned int grid_size = (batch_size + block_size - 1) / block_size;
hipLaunchKernelGGL(( gaeForwardKernel), dim3(grid_size), dim3(block_size), 0, 0,
time_step, batch_size, gamma, lambda,
value.data_ptr<float>(), reward.data_ptr<float>(), adv.data_ptr<float>());
}
} // namespace cuda
} // namespace rll
} // namespace hpc
| aba68f2b6cef0eccaa4c5e08b49ab7a6bff197e7.cu | #include "hpc/rll/cuda/rl_utils/entry.h"
#include "hpc/rll/cuda/rl_utils/gae_kernel.h"
namespace hpc {
namespace rll {
namespace cuda {
void GaeForward(
const std::vector<torch::Tensor>& inputs,
std::vector<torch::Tensor>& outputs,
float gamma,
float lambda) {
unsigned int index = 0;
const torch::Tensor& value = inputs[index++];
const torch::Tensor& reward = inputs[index++];
index = 0;
torch::Tensor& adv = outputs[index++];
const unsigned int time_step = reward.size(0);
const unsigned int batch_size = reward.size(1);
unsigned int block_size = 1 * WARP_SIZE; // single warp to utilize more blocks
unsigned int grid_size = (batch_size + block_size - 1) / block_size;
gaeForwardKernel<<<grid_size, block_size>>>(
time_step, batch_size, gamma, lambda,
value.data_ptr<float>(), reward.data_ptr<float>(), adv.data_ptr<float>());
}
} // namespace cuda
} // namespace rll
} // namespace hpc
|
3f87c862601c79f7e1cd898863123d6895eb0ab9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
const int image_size = 4096;
const int filter_size = 3;
__global__ void conv2d(int* A, int* B, int* C, int N, int n)
{
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
const int offset = n / 2;
int row_i = threadIdx.y - offset;
int col_i = threadIdx.x - offset;
__shared__ int shm[16][16];
shm[threadIdx.y][threadIdx.x] = A[row * N + col];
__syncthreads();
int val = 0;
for (int i = 0; i < n; i++)
for (int j = 0; j < n; j++)
if ((0 <= (i + col_i) && (i + col_i) < 16))
if ((0 <= (j + row_i) && (j + row_i) < 16)) val += shm[j + row_i][i + col_i] * C[j * n + i];
B[row * N + col] = val;
}
int main()
{
int *A, *A_d, *B, *B_d, *C, *C_d;
const int data_size = image_size * image_size * sizeof(int);
const int kernel_size = filter_size * filter_size * sizeof(int);
hipHostMalloc(&A, data_size);
hipHostMalloc(&B, data_size);
hipHostMalloc(&C, kernel_size);
for (int i = 0; i < image_size * image_size; i++)
A[i] = 1;
memset(B, 0, data_size);
for (int i = 0; i < filter_size * filter_size; i++)
C[i] = 2;
hipMalloc(&A_d, data_size);
hipMalloc(&B_d, data_size);
hipMalloc(&C_d, kernel_size);
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipMemcpy(A_d, A, data_size, hipMemcpyHostToDevice);
hipMemcpy(B_d, B, data_size, hipMemcpyHostToDevice);
hipMemcpy(C_d, C, kernel_size, hipMemcpyHostToDevice);
const int block_size = 16;
const int grid_size = (image_size + block_size - 1) / block_size;
dim3 grid(grid_size, grid_size);
dim3 block(block_size, block_size);
hipEventRecord(start);
hipLaunchKernelGGL(( conv2d), dim3(grid), dim3(block), 0, 0, A_d, B_d, C_d, image_size, filter_size);
hipEventRecord(stop);
hipMemcpy(B, B_d, data_size, hipMemcpyDeviceToHost);
hipEventSynchronize(stop);
float milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
for (int i = 0; i < 10; i++) {
for (int j = 0; j < 10; j++)
std::cout << B[i * image_size + j] << " ";
std::cout << "\n";
}
std::cout << "Kernel run time: " << milliseconds << " ms\n";
hipFree(A_d);
hipFree(B_d);
hipFree(C_d);
hipHostFree(A);
hipHostFree(B);
hipHostFree(C);
}
| 3f87c862601c79f7e1cd898863123d6895eb0ab9.cu | #include <iostream>
const int image_size = 4096;
const int filter_size = 3;
__global__ void conv2d(int* A, int* B, int* C, int N, int n)
{
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
const int offset = n / 2;
int row_i = threadIdx.y - offset;
int col_i = threadIdx.x - offset;
__shared__ int shm[16][16];
shm[threadIdx.y][threadIdx.x] = A[row * N + col];
__syncthreads();
int val = 0;
for (int i = 0; i < n; i++)
for (int j = 0; j < n; j++)
if ((0 <= (i + col_i) && (i + col_i) < 16))
if ((0 <= (j + row_i) && (j + row_i) < 16)) val += shm[j + row_i][i + col_i] * C[j * n + i];
B[row * N + col] = val;
}
int main()
{
int *A, *A_d, *B, *B_d, *C, *C_d;
const int data_size = image_size * image_size * sizeof(int);
const int kernel_size = filter_size * filter_size * sizeof(int);
cudaMallocHost(&A, data_size);
cudaMallocHost(&B, data_size);
cudaMallocHost(&C, kernel_size);
for (int i = 0; i < image_size * image_size; i++)
A[i] = 1;
memset(B, 0, data_size);
for (int i = 0; i < filter_size * filter_size; i++)
C[i] = 2;
cudaMalloc(&A_d, data_size);
cudaMalloc(&B_d, data_size);
cudaMalloc(&C_d, kernel_size);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaMemcpy(A_d, A, data_size, cudaMemcpyHostToDevice);
cudaMemcpy(B_d, B, data_size, cudaMemcpyHostToDevice);
cudaMemcpy(C_d, C, kernel_size, cudaMemcpyHostToDevice);
const int block_size = 16;
const int grid_size = (image_size + block_size - 1) / block_size;
dim3 grid(grid_size, grid_size);
dim3 block(block_size, block_size);
cudaEventRecord(start);
conv2d<<<grid, block>>>(A_d, B_d, C_d, image_size, filter_size);
cudaEventRecord(stop);
cudaMemcpy(B, B_d, data_size, cudaMemcpyDeviceToHost);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
for (int i = 0; i < 10; i++) {
for (int j = 0; j < 10; j++)
std::cout << B[i * image_size + j] << " ";
std::cout << "\n";
}
std::cout << "Kernel run time: " << milliseconds << " ms\n";
cudaFree(A_d);
cudaFree(B_d);
cudaFree(C_d);
cudaFreeHost(A);
cudaFreeHost(B);
cudaFreeHost(C);
}
|
d762dd68fca1ae956fa4a4c581869dd86f3e29f1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <torch/torch.h>
#include <cmath>
#include <vector>
// counting
// input N*3 int32 tensor output N*1 int64 tensor
__global__ void count_kernel(int N, const int *__restrict__ data,
int *__restrict__ out) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < N && data[i] >= 0) {
atomicAdd(&out[data[i]], 1);
}
}
void count_wrapper(int N, const int *data, int *out) {
hipLaunchKernelGGL(( count_kernel), dim3(ceil((double)N / 512)), dim3(512), 0, 0, N, data, out);
}
// make sure indices is int type
// feat: (b,c,n) indices: (b,n) -> out: (b,c,s), out_indices: (b,n)
// (preprocessed indices)
at::Tensor count_cuda(const at::Tensor idx, const int s) {
int N = idx.size(0);
at::Tensor out =
torch::zeros({s}, at::device(idx.device()).dtype(at::ScalarType::Int));
count_wrapper(N, idx.data_ptr<int>(), out.data_ptr<int>());
return out;
}
| d762dd68fca1ae956fa4a4c581869dd86f3e29f1.cu | #include <stdio.h>
#include <stdlib.h>
#include <torch/torch.h>
#include <cmath>
#include <vector>
// counting
// input N*3 int32 tensor output N*1 int64 tensor
__global__ void count_kernel(int N, const int *__restrict__ data,
int *__restrict__ out) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < N && data[i] >= 0) {
atomicAdd(&out[data[i]], 1);
}
}
void count_wrapper(int N, const int *data, int *out) {
count_kernel<<<ceil((double)N / 512), 512>>>(N, data, out);
}
// make sure indices is int type
// feat: (b,c,n) indices: (b,n) -> out: (b,c,s), out_indices: (b,n)
// (preprocessed indices)
at::Tensor count_cuda(const at::Tensor idx, const int s) {
int N = idx.size(0);
at::Tensor out =
torch::zeros({s}, at::device(idx.device()).dtype(at::ScalarType::Int));
count_wrapper(N, idx.data_ptr<int>(), out.data_ptr<int>());
return out;
}
|
7e2bb2d0eee8c244f10380540a41fbe6a4cd64ec.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2018-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <hash/concurrent_unordered_map.cuh>
#include <cudf/types.hpp>
#include <cudf_test/base_fixture.hpp>
#include <rmm/thrust_rmm_allocator.h>
#include <rmm/cuda_stream_view.hpp>
#include <thrust/device_vector.h>
#include <thrust/logical.h>
#include <gtest/gtest.h>
#include <cstdlib>
#include <iostream>
#include <limits>
#include <random>
#include <unordered_map>
#include <vector>
template <typename K, typename V>
struct key_value_types {
using key_type = K;
using value_type = V;
using pair_type = thrust::pair<K, V>;
using map_type = concurrent_unordered_map<key_type, value_type>;
};
template <typename T>
struct InsertTest : public cudf::test::BaseFixture {
using key_type = typename T::key_type;
using value_type = typename T::value_type;
using pair_type = typename T::pair_type;
using map_type = typename T::map_type;
InsertTest()
{
// prevent overflow of small types
const size_t input_size =
::min(static_cast<key_type>(size), std::numeric_limits<key_type>::max());
pairs.resize(input_size);
map = std::move(map_type::create(compute_hash_table_size(size)));
rmm::cuda_stream_default.synchronize();
}
const cudf::size_type size{10000};
rmm::device_vector<pair_type> pairs;
std::unique_ptr<map_type, std::function<void(map_type*)>> map;
};
using TestTypes = ::testing::Types<key_value_types<int32_t, int32_t>,
key_value_types<int64_t, int64_t>,
key_value_types<int8_t, int8_t>,
key_value_types<int16_t, int16_t>,
key_value_types<int8_t, float>,
key_value_types<int16_t, double>,
key_value_types<int32_t, float>,
key_value_types<int64_t, double>>;
TYPED_TEST_CASE(InsertTest, TestTypes);
template <typename map_type, typename pair_type>
struct insert_pair {
insert_pair(map_type _map) : map{_map} {}
__device__ bool operator()(pair_type const& pair)
{
auto result = map.insert(pair);
if (result.first == map.end()) { return false; }
return result.second;
}
map_type map;
};
template <typename map_type, typename pair_type>
struct find_pair {
find_pair(map_type _map) : map{_map} {}
__device__ bool operator()(pair_type const& pair)
{
auto result = map.find(pair.first);
if (result == map.end()) { return false; }
return *result == pair;
}
map_type map;
};
template <typename pair_type,
typename key_type = typename pair_type::first_type,
typename value_type = typename pair_type::second_type>
struct unique_pair_generator {
__device__ pair_type operator()(cudf::size_type i)
{
return thrust::make_pair(key_type(i), value_type(i));
}
};
template <typename pair_type,
typename key_type = typename pair_type::first_type,
typename value_type = typename pair_type::second_type>
struct identical_pair_generator {
identical_pair_generator(key_type k = 42, value_type v = 42) : key{k}, value{v} {}
__device__ pair_type operator()(cudf::size_type i) { return thrust::make_pair(key, value); }
key_type key;
value_type value;
};
template <typename pair_type,
typename key_type = typename pair_type::first_type,
typename value_type = typename pair_type::second_type>
struct identical_key_generator {
identical_key_generator(key_type k = 42) : key{k} {}
__device__ pair_type operator()(cudf::size_type i)
{
return thrust::make_pair(key, value_type(i));
}
key_type key;
};
TYPED_TEST(InsertTest, UniqueKeysUniqueValues)
{
using map_type = typename TypeParam::map_type;
using pair_type = typename TypeParam::pair_type;
thrust::tabulate(this->pairs.begin(), this->pairs.end(), unique_pair_generator<pair_type>{});
// All pairs should be new inserts
EXPECT_TRUE(thrust::all_of(
this->pairs.begin(), this->pairs.end(), insert_pair<map_type, pair_type>{*this->map}));
// All pairs should be present in the map
EXPECT_TRUE(thrust::all_of(
this->pairs.begin(), this->pairs.end(), find_pair<map_type, pair_type>{*this->map}));
}
TYPED_TEST(InsertTest, IdenticalKeysIdenticalValues)
{
using map_type = typename TypeParam::map_type;
using pair_type = typename TypeParam::pair_type;
thrust::tabulate(this->pairs.begin(), this->pairs.end(), identical_pair_generator<pair_type>{});
// Insert a single pair
EXPECT_TRUE(thrust::all_of(
this->pairs.begin(), this->pairs.begin() + 1, insert_pair<map_type, pair_type>{*this->map}));
// Identical inserts should all return false (no new insert)
EXPECT_FALSE(thrust::all_of(
this->pairs.begin(), this->pairs.end(), insert_pair<map_type, pair_type>{*this->map}));
// All pairs should be present in the map
EXPECT_TRUE(thrust::all_of(
this->pairs.begin(), this->pairs.end(), find_pair<map_type, pair_type>{*this->map}));
}
TYPED_TEST(InsertTest, IdenticalKeysUniqueValues)
{
using map_type = typename TypeParam::map_type;
using pair_type = typename TypeParam::pair_type;
thrust::tabulate(this->pairs.begin(), this->pairs.end(), identical_key_generator<pair_type>{});
// Insert a single pair
EXPECT_TRUE(thrust::all_of(
this->pairs.begin(), this->pairs.begin() + 1, insert_pair<map_type, pair_type>{*this->map}));
// Identical key inserts should all return false (no new insert)
EXPECT_FALSE(thrust::all_of(
this->pairs.begin() + 1, this->pairs.end(), insert_pair<map_type, pair_type>{*this->map}));
// Only first pair is present in map
EXPECT_TRUE(thrust::all_of(
this->pairs.begin(), this->pairs.begin() + 1, find_pair<map_type, pair_type>{*this->map}));
EXPECT_FALSE(thrust::all_of(
this->pairs.begin() + 1, this->pairs.end(), find_pair<map_type, pair_type>{*this->map}));
}
CUDF_TEST_PROGRAM_MAIN()
| 7e2bb2d0eee8c244f10380540a41fbe6a4cd64ec.cu | /*
* Copyright (c) 2018-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <hash/concurrent_unordered_map.cuh>
#include <cudf/types.hpp>
#include <cudf_test/base_fixture.hpp>
#include <rmm/thrust_rmm_allocator.h>
#include <rmm/cuda_stream_view.hpp>
#include <thrust/device_vector.h>
#include <thrust/logical.h>
#include <gtest/gtest.h>
#include <cstdlib>
#include <iostream>
#include <limits>
#include <random>
#include <unordered_map>
#include <vector>
template <typename K, typename V>
struct key_value_types {
using key_type = K;
using value_type = V;
using pair_type = thrust::pair<K, V>;
using map_type = concurrent_unordered_map<key_type, value_type>;
};
template <typename T>
struct InsertTest : public cudf::test::BaseFixture {
using key_type = typename T::key_type;
using value_type = typename T::value_type;
using pair_type = typename T::pair_type;
using map_type = typename T::map_type;
InsertTest()
{
// prevent overflow of small types
const size_t input_size =
std::min(static_cast<key_type>(size), std::numeric_limits<key_type>::max());
pairs.resize(input_size);
map = std::move(map_type::create(compute_hash_table_size(size)));
rmm::cuda_stream_default.synchronize();
}
const cudf::size_type size{10000};
rmm::device_vector<pair_type> pairs;
std::unique_ptr<map_type, std::function<void(map_type*)>> map;
};
using TestTypes = ::testing::Types<key_value_types<int32_t, int32_t>,
key_value_types<int64_t, int64_t>,
key_value_types<int8_t, int8_t>,
key_value_types<int16_t, int16_t>,
key_value_types<int8_t, float>,
key_value_types<int16_t, double>,
key_value_types<int32_t, float>,
key_value_types<int64_t, double>>;
TYPED_TEST_CASE(InsertTest, TestTypes);
template <typename map_type, typename pair_type>
struct insert_pair {
insert_pair(map_type _map) : map{_map} {}
__device__ bool operator()(pair_type const& pair)
{
auto result = map.insert(pair);
if (result.first == map.end()) { return false; }
return result.second;
}
map_type map;
};
template <typename map_type, typename pair_type>
struct find_pair {
find_pair(map_type _map) : map{_map} {}
__device__ bool operator()(pair_type const& pair)
{
auto result = map.find(pair.first);
if (result == map.end()) { return false; }
return *result == pair;
}
map_type map;
};
template <typename pair_type,
typename key_type = typename pair_type::first_type,
typename value_type = typename pair_type::second_type>
struct unique_pair_generator {
__device__ pair_type operator()(cudf::size_type i)
{
return thrust::make_pair(key_type(i), value_type(i));
}
};
template <typename pair_type,
typename key_type = typename pair_type::first_type,
typename value_type = typename pair_type::second_type>
struct identical_pair_generator {
identical_pair_generator(key_type k = 42, value_type v = 42) : key{k}, value{v} {}
__device__ pair_type operator()(cudf::size_type i) { return thrust::make_pair(key, value); }
key_type key;
value_type value;
};
template <typename pair_type,
typename key_type = typename pair_type::first_type,
typename value_type = typename pair_type::second_type>
struct identical_key_generator {
identical_key_generator(key_type k = 42) : key{k} {}
__device__ pair_type operator()(cudf::size_type i)
{
return thrust::make_pair(key, value_type(i));
}
key_type key;
};
TYPED_TEST(InsertTest, UniqueKeysUniqueValues)
{
using map_type = typename TypeParam::map_type;
using pair_type = typename TypeParam::pair_type;
thrust::tabulate(this->pairs.begin(), this->pairs.end(), unique_pair_generator<pair_type>{});
// All pairs should be new inserts
EXPECT_TRUE(thrust::all_of(
this->pairs.begin(), this->pairs.end(), insert_pair<map_type, pair_type>{*this->map}));
// All pairs should be present in the map
EXPECT_TRUE(thrust::all_of(
this->pairs.begin(), this->pairs.end(), find_pair<map_type, pair_type>{*this->map}));
}
TYPED_TEST(InsertTest, IdenticalKeysIdenticalValues)
{
using map_type = typename TypeParam::map_type;
using pair_type = typename TypeParam::pair_type;
thrust::tabulate(this->pairs.begin(), this->pairs.end(), identical_pair_generator<pair_type>{});
// Insert a single pair
EXPECT_TRUE(thrust::all_of(
this->pairs.begin(), this->pairs.begin() + 1, insert_pair<map_type, pair_type>{*this->map}));
// Identical inserts should all return false (no new insert)
EXPECT_FALSE(thrust::all_of(
this->pairs.begin(), this->pairs.end(), insert_pair<map_type, pair_type>{*this->map}));
// All pairs should be present in the map
EXPECT_TRUE(thrust::all_of(
this->pairs.begin(), this->pairs.end(), find_pair<map_type, pair_type>{*this->map}));
}
TYPED_TEST(InsertTest, IdenticalKeysUniqueValues)
{
using map_type = typename TypeParam::map_type;
using pair_type = typename TypeParam::pair_type;
thrust::tabulate(this->pairs.begin(), this->pairs.end(), identical_key_generator<pair_type>{});
// Insert a single pair
EXPECT_TRUE(thrust::all_of(
this->pairs.begin(), this->pairs.begin() + 1, insert_pair<map_type, pair_type>{*this->map}));
// Identical key inserts should all return false (no new insert)
EXPECT_FALSE(thrust::all_of(
this->pairs.begin() + 1, this->pairs.end(), insert_pair<map_type, pair_type>{*this->map}));
// Only first pair is present in map
EXPECT_TRUE(thrust::all_of(
this->pairs.begin(), this->pairs.begin() + 1, find_pair<map_type, pair_type>{*this->map}));
EXPECT_FALSE(thrust::all_of(
this->pairs.begin() + 1, this->pairs.end(), find_pair<map_type, pair_type>{*this->map}));
}
CUDF_TEST_PROGRAM_MAIN()
|
89f929b2226f5208a499380fb2b456533f83f4b0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include "CImg.h"
using namespace cimg_library;
#define K 10
#define W 3
#define DIM_BLOQUE 32
#define SIGMA 0.9
char path[20] = "img\\fing_xl.pgm";
__int64 ctr1 = 0, ctr2 = 0, freq = 0;
void clockStart(){
QueryPerformanceCounter((LARGE_INTEGER *)&ctr1);
}
void clockStop(const char * str){
QueryPerformanceCounter((LARGE_INTEGER *)&ctr2);
QueryPerformanceFrequency((LARGE_INTEGER *)&freq);
printf("%s --> %fs\n",str,(ctr2 - ctr1) * 1.0 / freq);
}
void cudaCheck()
{
hipError_t hipError_t;
hipError_t = hipGetLastError();
if(hipError_t != hipSuccess)
{
printf(" hipGetLastError() returned %d: %s\n", hipError_t, hipGetErrorString(hipError_t));
}else{
//printf(" todo ok\n" );
}
}
__global__ void nlm_kernel(float* inputArray_GPU,float* outputArray_GPU, int width, int height)
{
__shared__ float arrayincom[((K/2+W/2)*2+DIM_BLOQUE)*((K/2+W/2)*2+DIM_BLOQUE)];
int marco_medio=(int)K/2+(int)W/2;
int marco=marco_medio*2;
int dim_arraycom=marco_medio*2+DIM_BLOQUE;
int dim_in_out_array=marco_medio*2+width;
// COPIO LOS PIXELES QUE LE CORRESPONDEN AL HILO
for(int i=threadIdx.x; i<blockDim.x+marco ; i+=blockDim.x){
for(int j=threadIdx.y; j<blockDim.y+marco ; j+=blockDim.y){
arrayincom[i+j*dim_arraycom]=inputArray_GPU[(i+blockIdx.x * blockDim.x)+(j+blockIdx.y * blockDim.y)*dim_in_out_array]; // Revisar bien los indices
}
}
// SINCRONIZCO LOS HILOS PARA ASEGURARME QUE YA TENGO TODO CARGADO EN MEMORIA COMPARTIDA
__syncthreads();
// EJECUTO NLM
float suma=0;
float consta=0;
float dist=0;
float peso=0;
for (int sx = (threadIdx.x+marco_medio) - K/2 ; sx < (threadIdx.x+marco_medio) + K/2 ; sx++){
for (int sy = (threadIdx.y+marco_medio) - K/2 ; sy < (threadIdx.y+marco_medio) + K/2 ; sy++){
for (int wx = - W/2 ; wx < W/2 ; wx++){
for (int wy = - W/2 ; wy < W/2 ; wy++){
dist +=powf( (arrayincom[((threadIdx.x+marco_medio) + wx) + ((threadIdx.y+marco_medio) + wy)*dim_arraycom] - arrayincom[(sx + wx) +(sy + wy)*dim_arraycom]) ,2 );
}
}
peso = expf(-dist/powf(SIGMA,2));
suma += arrayincom[sx+sy*dim_arraycom] * peso;
consta += peso;
dist=0;
}
}
outputArray_GPU[(blockIdx.x * blockDim.x +threadIdx.x)+(blockIdx.y * blockDim.y +threadIdx.y)*width] =suma/consta;
}
void nlm_Normal(float *input,float norma,int size)
{
for(int i=0;i<size;i++){
input[i] = input[i]/norma;
}
}
void nlm_DesNormal(float *input,float norma,int size)
{
for(int i=0;i<size;i++){
input[i] = input[i]*norma;
}
}
void nlm_ImprimirValores(float *input,int size)
{
for(int i=0;i<size;i++){
printf("%f ",input[i]);
if (i%1024==0)
printf("\n");
}
}
void nlm_CPU(float * inputImage, float* outputImage, int width, int height)
{
float suma=0;
float consta=0;
float dist=0;
float peso=0;
for (int px = 0 ; px < width ; px++){
for (int py = 0 ; py < height ; py++){
for (int sx = px - K/2 ; sx < px + K/2 ; sx++){
for (int sy = py - K/2 ; sy < py + K/2 ; sy++){
if ((sx>0)&&(sx<width)&&(sy>0)&&(sy<height)){
for (int wx = - W/2 ; wx < W/2 ; wx++){
for (int wy = - W/2 ; wy < W/2 ; wy++){
if (((px+wx>0)&&(px+wx<width)&&(py+wy>0)&&(py+wy<height))&&((sx + wx>0)&&(sx + wx<width)&&(sy + wy>0)&&(sy + wy<height)))
dist += pow( (inputImage[(px + wx) + (py + wy)*width] - inputImage[(sx + wx) +(sy + wy)*width]) ,2 );
}
}
peso = exp(-dist/pow(SIGMA,2));
suma += inputImage[sx+sy*width] * peso;
consta += peso;
dist=0;
}
}
}
outputImage[py*width + px] = suma / consta;
suma=0;
consta=0;
}
}
}
int main()
{
/* CARGA IMAGEN */
//CImg<float> image("img\\fing.pgm");
CImg<float> image(path);
float * img_matrix = image.data();
CImg<float> imageOutGPU(path);
float * img_matrixOutGPU = imageOutGPU.data();
CImg<float> imageOutCPU(path);
float * img_matrixOutCPU = imageOutCPU.data();
/* FIN CARGA */
int width=image.width();
int height=image.height();
int k=K/2;
int w=W/2;
size_t size = width*height*sizeof(float);
size_t size2=((k+w)*2+height)*((k+w)*2+width)*sizeof(float);
// Tenemos que normalizar ver nota al final de la letra de practico
nlm_Normal(img_matrix,255.0,width*height);
// GPU BEGIN
float* inputArray_GPU;
float* outputArray_GPU;
hipMalloc(&(inputArray_GPU),size2);
hipMalloc(&(outputArray_GPU),size);
/////////////////////////////////////
//copiar datos de entrada a la GPU
///////////////////////////////////////
hipMemset(outputArray_GPU, 0, size);
hipMemset(inputArray_GPU, 0, size2);
// Copiamos a memoria de GPU teniendo encuenta que es 2*(k+w) mas grande
int offset=(k+w)*(width+(k+w)*2);
for (int i=0; i<height; i++){
offset+=(k+w);
hipMemcpy(&inputArray_GPU[offset],&img_matrix[i*width],width*sizeof(float),hipMemcpyHostToDevice);
offset+=width+(k+w);
}
//Configurar la grilla
dim3 tamGrid (width/DIM_BLOQUE, height/DIM_BLOQUE); //Grid dimensin
dim3 tamBlock(DIM_BLOQUE, DIM_BLOQUE); //Block dimensin
clockStart();
hipLaunchKernelGGL(( nlm_kernel), dim3(tamGrid), dim3(tamBlock), 0, 0, inputArray_GPU, outputArray_GPU,width,height);
hipDeviceSynchronize();
clockStop("GPU");
cudaCheck();
hipMemcpy(img_matrixOutGPU,outputArray_GPU,size,hipMemcpyDeviceToHost);
nlm_DesNormal(img_matrixOutGPU,255.0,imageOutGPU.width()*imageOutGPU.height());
hipFree(outputArray_GPU);
// GPU END
// CPU BEGIN
clockStart();
nlm_CPU(img_matrix, img_matrixOutCPU, image.width(), image.height());
clockStop("CPU");
nlm_DesNormal(img_matrixOutCPU,255.0,imageOutCPU.width()*imageOutCPU.height());
// CPU END
CImgDisplay main_disp(image,"Fing");
CImgDisplay main_disp1(imageOutGPU,"Fing - GPU");
CImgDisplay main_disp2(imageOutCPU,"Fing - CPU");
while (!main_disp.is_closed()) {
main_disp.wait();
}
return 0;
}
| 89f929b2226f5208a499380fb2b456533f83f4b0.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include "CImg.h"
using namespace cimg_library;
#define K 10
#define W 3
#define DIM_BLOQUE 32
#define SIGMA 0.9
char path[20] = "img\\fing_xl.pgm";
__int64 ctr1 = 0, ctr2 = 0, freq = 0;
void clockStart(){
QueryPerformanceCounter((LARGE_INTEGER *)&ctr1);
}
void clockStop(const char * str){
QueryPerformanceCounter((LARGE_INTEGER *)&ctr2);
QueryPerformanceFrequency((LARGE_INTEGER *)&freq);
printf("%s --> %fs\n",str,(ctr2 - ctr1) * 1.0 / freq);
}
void cudaCheck()
{
cudaError_t cudaError;
cudaError = cudaGetLastError();
if(cudaError != cudaSuccess)
{
printf(" cudaGetLastError() returned %d: %s\n", cudaError, cudaGetErrorString(cudaError));
}else{
//printf(" todo ok\n" );
}
}
__global__ void nlm_kernel(float* inputArray_GPU,float* outputArray_GPU, int width, int height)
{
__shared__ float arrayincom[((K/2+W/2)*2+DIM_BLOQUE)*((K/2+W/2)*2+DIM_BLOQUE)];
int marco_medio=(int)K/2+(int)W/2;
int marco=marco_medio*2;
int dim_arraycom=marco_medio*2+DIM_BLOQUE;
int dim_in_out_array=marco_medio*2+width;
// COPIO LOS PIXELES QUE LE CORRESPONDEN AL HILO
for(int i=threadIdx.x; i<blockDim.x+marco ; i+=blockDim.x){
for(int j=threadIdx.y; j<blockDim.y+marco ; j+=blockDim.y){
arrayincom[i+j*dim_arraycom]=inputArray_GPU[(i+blockIdx.x * blockDim.x)+(j+blockIdx.y * blockDim.y)*dim_in_out_array]; // Revisar bien los indices
}
}
// SINCRONIZCO LOS HILOS PARA ASEGURARME QUE YA TENGO TODO CARGADO EN MEMORIA COMPARTIDA
__syncthreads();
// EJECUTO NLM
float suma=0;
float consta=0;
float dist=0;
float peso=0;
for (int sx = (threadIdx.x+marco_medio) - K/2 ; sx < (threadIdx.x+marco_medio) + K/2 ; sx++){
for (int sy = (threadIdx.y+marco_medio) - K/2 ; sy < (threadIdx.y+marco_medio) + K/2 ; sy++){
for (int wx = - W/2 ; wx < W/2 ; wx++){
for (int wy = - W/2 ; wy < W/2 ; wy++){
dist +=powf( (arrayincom[((threadIdx.x+marco_medio) + wx) + ((threadIdx.y+marco_medio) + wy)*dim_arraycom] - arrayincom[(sx + wx) +(sy + wy)*dim_arraycom]) ,2 );
}
}
peso = expf(-dist/powf(SIGMA,2));
suma += arrayincom[sx+sy*dim_arraycom] * peso;
consta += peso;
dist=0;
}
}
outputArray_GPU[(blockIdx.x * blockDim.x +threadIdx.x)+(blockIdx.y * blockDim.y +threadIdx.y)*width] =suma/consta;
}
void nlm_Normal(float *input,float norma,int size)
{
for(int i=0;i<size;i++){
input[i] = input[i]/norma;
}
}
void nlm_DesNormal(float *input,float norma,int size)
{
for(int i=0;i<size;i++){
input[i] = input[i]*norma;
}
}
void nlm_ImprimirValores(float *input,int size)
{
for(int i=0;i<size;i++){
printf("%f ",input[i]);
if (i%1024==0)
printf("\n");
}
}
void nlm_CPU(float * inputImage, float* outputImage, int width, int height)
{
float suma=0;
float consta=0;
float dist=0;
float peso=0;
for (int px = 0 ; px < width ; px++){
for (int py = 0 ; py < height ; py++){
for (int sx = px - K/2 ; sx < px + K/2 ; sx++){
for (int sy = py - K/2 ; sy < py + K/2 ; sy++){
if ((sx>0)&&(sx<width)&&(sy>0)&&(sy<height)){
for (int wx = - W/2 ; wx < W/2 ; wx++){
for (int wy = - W/2 ; wy < W/2 ; wy++){
if (((px+wx>0)&&(px+wx<width)&&(py+wy>0)&&(py+wy<height))&&((sx + wx>0)&&(sx + wx<width)&&(sy + wy>0)&&(sy + wy<height)))
dist += pow( (inputImage[(px + wx) + (py + wy)*width] - inputImage[(sx + wx) +(sy + wy)*width]) ,2 );
}
}
peso = exp(-dist/pow(SIGMA,2));
suma += inputImage[sx+sy*width] * peso;
consta += peso;
dist=0;
}
}
}
outputImage[py*width + px] = suma / consta;
suma=0;
consta=0;
}
}
}
int main()
{
/* CARGA IMAGEN */
//CImg<float> image("img\\fing.pgm");
CImg<float> image(path);
float * img_matrix = image.data();
CImg<float> imageOutGPU(path);
float * img_matrixOutGPU = imageOutGPU.data();
CImg<float> imageOutCPU(path);
float * img_matrixOutCPU = imageOutCPU.data();
/* FIN CARGA */
int width=image.width();
int height=image.height();
int k=K/2;
int w=W/2;
size_t size = width*height*sizeof(float);
size_t size2=((k+w)*2+height)*((k+w)*2+width)*sizeof(float);
// Tenemos que normalizar ver nota al final de la letra de practico
nlm_Normal(img_matrix,255.0,width*height);
// GPU BEGIN
float* inputArray_GPU;
float* outputArray_GPU;
cudaMalloc(&(inputArray_GPU),size2);
cudaMalloc(&(outputArray_GPU),size);
/////////////////////////////////////
//copiar datos de entrada a la GPU
///////////////////////////////////////
cudaMemset(outputArray_GPU, 0, size);
cudaMemset(inputArray_GPU, 0, size2);
// Copiamos a memoria de GPU teniendo encuenta que es 2*(k+w) mas grande
int offset=(k+w)*(width+(k+w)*2);
for (int i=0; i<height; i++){
offset+=(k+w);
cudaMemcpy(&inputArray_GPU[offset],&img_matrix[i*width],width*sizeof(float),cudaMemcpyHostToDevice);
offset+=width+(k+w);
}
//Configurar la grilla
dim3 tamGrid (width/DIM_BLOQUE, height/DIM_BLOQUE); //Grid dimensión
dim3 tamBlock(DIM_BLOQUE, DIM_BLOQUE); //Block dimensión
clockStart();
nlm_kernel<<<tamGrid, tamBlock>>>(inputArray_GPU, outputArray_GPU,width,height);
cudaDeviceSynchronize();
clockStop("GPU");
cudaCheck();
cudaMemcpy(img_matrixOutGPU,outputArray_GPU,size,cudaMemcpyDeviceToHost);
nlm_DesNormal(img_matrixOutGPU,255.0,imageOutGPU.width()*imageOutGPU.height());
cudaFree(outputArray_GPU);
// GPU END
// CPU BEGIN
clockStart();
nlm_CPU(img_matrix, img_matrixOutCPU, image.width(), image.height());
clockStop("CPU");
nlm_DesNormal(img_matrixOutCPU,255.0,imageOutCPU.width()*imageOutCPU.height());
// CPU END
CImgDisplay main_disp(image,"Fing");
CImgDisplay main_disp1(imageOutGPU,"Fing - GPU");
CImgDisplay main_disp2(imageOutCPU,"Fing - CPU");
while (!main_disp.is_closed()) {
main_disp.wait();
}
return 0;
}
|
28ea5b37bd27f571abc45e437c65090baf949e05.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "reduce.h"
__device__ float merge(float old,float opOutput,float *extraParams) {
return fmaxf(old,opOutput);
}
__device__ float update(float old,float opOutput,float *extraParams) {
return fmaxf(old,opOutput);
}
__device__ float op(float d1,float *extraParams) {
return d1;
}
__device__ float postProcess(float reduction,int n,int xOffset,float *dx,int incx,float *extraParams,float *result) {
return reduction;
}
extern "C"
__global__ void max_strided_float(int n, int xOffset,float *dx,int incx,float *extraParams,float *result) {
transform(n,xOffset,dx,incx,extraParams,result);
}
| 28ea5b37bd27f571abc45e437c65090baf949e05.cu | #include "reduce.h"
__device__ float merge(float old,float opOutput,float *extraParams) {
return fmaxf(old,opOutput);
}
__device__ float update(float old,float opOutput,float *extraParams) {
return fmaxf(old,opOutput);
}
__device__ float op(float d1,float *extraParams) {
return d1;
}
__device__ float postProcess(float reduction,int n,int xOffset,float *dx,int incx,float *extraParams,float *result) {
return reduction;
}
extern "C"
__global__ void max_strided_float(int n, int xOffset,float *dx,int incx,float *extraParams,float *result) {
transform(n,xOffset,dx,incx,extraParams,result);
}
|
e042f1b64f00244476a4145b84e7d389d05e7386.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) OpenMMLab. All rights reserved
#include <stdio.h>
#include <vector>
#include "common_cuda_helper.hpp"
#include "trt_cuda_helper.cuh"
#include "trt_plugin_helper.hpp"
static int const threadsPerBlock = sizeof(unsigned long long int) * 8;
using mmcv::TensorDesc;
template <typename T>
__global__ void onnx_scatternd_kernel(const int n, const int* indices,
const T* update, T* output,
TensorDesc tensor_desc,
TensorDesc indice_desc) {
const int indice_cols = indice_desc.shape[indice_desc.dim - 1];
const int copy_stride = tensor_desc.stride[indice_cols - 1];
const int* stride = &(tensor_desc.stride[0]);
CUDA_1D_KERNEL_LOOP(index, n) {
int output_offset = 0;
const int* indices_current = indices + index * indice_cols;
for (int i = 0; i < indice_cols; ++i) {
output_offset += stride[i] * indices_current[i];
}
memcpy(output + output_offset, update + index * copy_stride,
copy_stride * sizeof(T));
}
}
template <typename T>
void TRTONNXScatterNDKernelLauncher(const T* data, const int* indices,
const T* update, const int* dims,
int nbDims, const int* indices_dims,
int indice_nbDims, T* output,
hipStream_t stream) {
// fill tensordesc and initial
TensorDesc tensor_desc;
memset((void*)&tensor_desc, 0, sizeof(TensorDesc));
tensor_desc.dim = nbDims;
tensor_desc.shape[nbDims - 1] = dims[nbDims - 1];
tensor_desc.stride[nbDims - 1] = 1;
for (int i = nbDims - 2; i >= 0; --i) {
tensor_desc.shape[i] = dims[i];
tensor_desc.stride[i] = dims[i + 1] * tensor_desc.stride[i + 1];
}
const int data_size = tensor_desc.stride[0] * tensor_desc.shape[0];
TensorDesc indice_desc;
memset((void*)&indice_desc, 0, sizeof(TensorDesc));
indice_desc.dim = indice_nbDims;
indice_desc.shape[indice_nbDims - 1] = indices_dims[indice_nbDims - 1];
indice_desc.stride[indice_nbDims - 1] = 1;
for (int i = indice_nbDims - 2; i >= 0; --i) {
indice_desc.shape[i] = indices_dims[i];
indice_desc.stride[i] = indices_dims[i + 1] * indice_desc.stride[i + 1];
}
// output = np.copy(data)
hipMemcpyAsync(output, data, data_size * sizeof(T),
hipMemcpyDeviceToDevice);
int num_update_indice = 1;
for (int i = 0; i < indice_nbDims - 1; ++i) {
num_update_indice *= indice_desc.shape[i];
}
// scatter
const int col_block = DIVUP(num_update_indice, threadsPerBlock);
hipLaunchKernelGGL(( onnx_scatternd_kernel), dim3(col_block), dim3(threadsPerBlock), 0, stream,
num_update_indice, indices, update, output, tensor_desc, indice_desc);
}
void TRTONNXScatterNDKernelLauncher_float(const float* data, const int* indices,
const float* update, const int* dims,
int nbDims, const int* indices_dims,
int indice_nbDims, float* output,
hipStream_t stream) {
TRTONNXScatterNDKernelLauncher<float>(data, indices, update, dims, nbDims,
indices_dims, indice_nbDims, output,
stream);
}
void TRTONNXScatterNDKernelLauncher_int32(const int* data, const int* indices,
const int* update, const int* dims,
int nbDims, const int* indices_dims,
int indice_nbDims, int* output,
hipStream_t stream) {
TRTONNXScatterNDKernelLauncher<int>(data, indices, update, dims, nbDims,
indices_dims, indice_nbDims, output,
stream);
}
| e042f1b64f00244476a4145b84e7d389d05e7386.cu | // Copyright (c) OpenMMLab. All rights reserved
#include <stdio.h>
#include <vector>
#include "common_cuda_helper.hpp"
#include "trt_cuda_helper.cuh"
#include "trt_plugin_helper.hpp"
static int const threadsPerBlock = sizeof(unsigned long long int) * 8;
using mmcv::TensorDesc;
template <typename T>
__global__ void onnx_scatternd_kernel(const int n, const int* indices,
const T* update, T* output,
TensorDesc tensor_desc,
TensorDesc indice_desc) {
const int indice_cols = indice_desc.shape[indice_desc.dim - 1];
const int copy_stride = tensor_desc.stride[indice_cols - 1];
const int* stride = &(tensor_desc.stride[0]);
CUDA_1D_KERNEL_LOOP(index, n) {
int output_offset = 0;
const int* indices_current = indices + index * indice_cols;
for (int i = 0; i < indice_cols; ++i) {
output_offset += stride[i] * indices_current[i];
}
memcpy(output + output_offset, update + index * copy_stride,
copy_stride * sizeof(T));
}
}
template <typename T>
void TRTONNXScatterNDKernelLauncher(const T* data, const int* indices,
const T* update, const int* dims,
int nbDims, const int* indices_dims,
int indice_nbDims, T* output,
cudaStream_t stream) {
// fill tensordesc and initial
TensorDesc tensor_desc;
memset((void*)&tensor_desc, 0, sizeof(TensorDesc));
tensor_desc.dim = nbDims;
tensor_desc.shape[nbDims - 1] = dims[nbDims - 1];
tensor_desc.stride[nbDims - 1] = 1;
for (int i = nbDims - 2; i >= 0; --i) {
tensor_desc.shape[i] = dims[i];
tensor_desc.stride[i] = dims[i + 1] * tensor_desc.stride[i + 1];
}
const int data_size = tensor_desc.stride[0] * tensor_desc.shape[0];
TensorDesc indice_desc;
memset((void*)&indice_desc, 0, sizeof(TensorDesc));
indice_desc.dim = indice_nbDims;
indice_desc.shape[indice_nbDims - 1] = indices_dims[indice_nbDims - 1];
indice_desc.stride[indice_nbDims - 1] = 1;
for (int i = indice_nbDims - 2; i >= 0; --i) {
indice_desc.shape[i] = indices_dims[i];
indice_desc.stride[i] = indices_dims[i + 1] * indice_desc.stride[i + 1];
}
// output = np.copy(data)
cudaMemcpyAsync(output, data, data_size * sizeof(T),
cudaMemcpyDeviceToDevice);
int num_update_indice = 1;
for (int i = 0; i < indice_nbDims - 1; ++i) {
num_update_indice *= indice_desc.shape[i];
}
// scatter
const int col_block = DIVUP(num_update_indice, threadsPerBlock);
onnx_scatternd_kernel<<<col_block, threadsPerBlock, 0, stream>>>(
num_update_indice, indices, update, output, tensor_desc, indice_desc);
}
void TRTONNXScatterNDKernelLauncher_float(const float* data, const int* indices,
const float* update, const int* dims,
int nbDims, const int* indices_dims,
int indice_nbDims, float* output,
cudaStream_t stream) {
TRTONNXScatterNDKernelLauncher<float>(data, indices, update, dims, nbDims,
indices_dims, indice_nbDims, output,
stream);
}
void TRTONNXScatterNDKernelLauncher_int32(const int* data, const int* indices,
const int* update, const int* dims,
int nbDims, const int* indices_dims,
int indice_nbDims, int* output,
cudaStream_t stream) {
TRTONNXScatterNDKernelLauncher<int>(data, indices, update, dims, nbDims,
indices_dims, indice_nbDims, output,
stream);
}
|
02390da6f9a6107973536a2faaa379f9a9dac574.hip | // !!! This is a file automatically generated by hipify!!!
#include <cstdio>
#include <cstdlib>
#include "cpu.h"
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <ctime>
namespace StreamCompaction {
namespace CPU {
/**
* CPU scan (prefix sum).
*/
float scan(int n, int *odata, const int *idata) {
hipEvent_t start, stop;
float ms_time = 0.0f;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start);
odata[0] = 0;
for (int i = 1; i < n; i++) {
odata[i] = odata[i - 1] + idata[i - 1];
}
hipEventRecord(stop);
hipEventSynchronize(stop);
hipEventElapsedTime(&ms_time, start, stop);
return ms_time;
}
/**
* CPU stream compaction without using the scan function.
*
* @returns the number of elements remaining after compaction.
*/
int compactWithoutScan(int n, int *odata, const int *idata) {
hipEvent_t start, stop;
float ms_time = 0.0f;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start);
int j = 0;
for (int i = 0; i < n; i++) {
if (idata[i] != 0) {
odata[j] = idata[i];
j++;
}
}
hipEventRecord(stop);
hipEventSynchronize(stop);
hipEventElapsedTime(&ms_time, start, stop);
printf("CPU execution time for compact without scan: %.5fms\n", ms_time);
return j;
}
void zeroArray(int n, int *a) {
for (int i = 0; i < n; i++) {
a[i] = 0;
}
}
/**
* CPU stream compaction using scan and scatter, like the parallel version.
*
* @returns the number of elements remaining after compaction.
*/
int compactWithScan(int n, int *odata, const int *idata) {
int *temp = (int*)malloc(n * sizeof(int));
zeroArray(n, temp);
int *scan_output = (int*)malloc(n * sizeof(int));
zeroArray(n, scan_output);
hipEvent_t start, stop;
float ms_time = 0.0f;
float ms_total_time = 0.0f;
hipEventCreate(&start);
hipEventCreate(&stop);
// Compute temporary array
for (int i = 0; i < n; i++) {
if (idata[i] != 0) {
temp[i] = 1;
}
}
// Run exclusive scan on the temporary array
ms_time = scan(n, scan_output, temp);
ms_total_time += ms_time;
ms_time = 0.0f;
// Scatter
hipEventCreate(&start);
for (int i = 0; i < n; i++) {
if (temp[i] == 1) {
odata[scan_output[i]] = idata[i];
}
}
hipEventRecord(stop);
hipEventSynchronize(stop);
hipEventElapsedTime(&ms_time, start, stop);
ms_total_time += ms_time;
printf("CPU execution time for compact with scan: %.5fms\n", ms_total_time);
return scan_output[n - 1] + temp[n - 1];
}
}
}
| 02390da6f9a6107973536a2faaa379f9a9dac574.cu | #include <cstdio>
#include <cstdlib>
#include "cpu.h"
#include <cuda.h>
#include <cuda_runtime.h>
#include <ctime>
namespace StreamCompaction {
namespace CPU {
/**
* CPU scan (prefix sum).
*/
float scan(int n, int *odata, const int *idata) {
cudaEvent_t start, stop;
float ms_time = 0.0f;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
odata[0] = 0;
for (int i = 1; i < n; i++) {
odata[i] = odata[i - 1] + idata[i - 1];
}
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&ms_time, start, stop);
return ms_time;
}
/**
* CPU stream compaction without using the scan function.
*
* @returns the number of elements remaining after compaction.
*/
int compactWithoutScan(int n, int *odata, const int *idata) {
cudaEvent_t start, stop;
float ms_time = 0.0f;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
int j = 0;
for (int i = 0; i < n; i++) {
if (idata[i] != 0) {
odata[j] = idata[i];
j++;
}
}
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&ms_time, start, stop);
printf("CPU execution time for compact without scan: %.5fms\n", ms_time);
return j;
}
void zeroArray(int n, int *a) {
for (int i = 0; i < n; i++) {
a[i] = 0;
}
}
/**
* CPU stream compaction using scan and scatter, like the parallel version.
*
* @returns the number of elements remaining after compaction.
*/
int compactWithScan(int n, int *odata, const int *idata) {
int *temp = (int*)malloc(n * sizeof(int));
zeroArray(n, temp);
int *scan_output = (int*)malloc(n * sizeof(int));
zeroArray(n, scan_output);
cudaEvent_t start, stop;
float ms_time = 0.0f;
float ms_total_time = 0.0f;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// Compute temporary array
for (int i = 0; i < n; i++) {
if (idata[i] != 0) {
temp[i] = 1;
}
}
// Run exclusive scan on the temporary array
ms_time = scan(n, scan_output, temp);
ms_total_time += ms_time;
ms_time = 0.0f;
// Scatter
cudaEventCreate(&start);
for (int i = 0; i < n; i++) {
if (temp[i] == 1) {
odata[scan_output[i]] = idata[i];
}
}
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&ms_time, start, stop);
ms_total_time += ms_time;
printf("CPU execution time for compact with scan: %.5fms\n", ms_total_time);
return scan_output[n - 1] + temp[n - 1];
}
}
}
|
caaedd81f3714908d47113f21dc6df90accfec9f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <iostream>
#include <string>
#include <cstdio>
#include <cstdlib>
#include "mmul.h"
void mmul(hipblasHandle_t handle, const float* A, const float* B, float* C, int n) {
int lda = n;
int ldb = n;
int ldc = n;
const float bet = 1;
const float alf = 1;
const float *alpha = &alf;
const float *beta = &bet;
// cublasSetMathMode(handle, CUBLAS_TENSOR_OP_MATH);
hipblasSgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, n, n, n, alpha, A, lda, B, ldb, beta, C, ldc);
hipDeviceSynchronize();
} | caaedd81f3714908d47113f21dc6df90accfec9f.cu | #include "cuda_runtime.h"
#include <cuda.h>
#include <stdio.h>
#include <iostream>
#include <string>
#include <cstdio>
#include <cstdlib>
#include "mmul.h"
void mmul(cublasHandle_t handle, const float* A, const float* B, float* C, int n) {
int lda = n;
int ldb = n;
int ldc = n;
const float bet = 1;
const float alf = 1;
const float *alpha = &alf;
const float *beta = &bet;
// cublasSetMathMode(handle, CUBLAS_TENSOR_OP_MATH);
cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, n, n, n, alpha, A, lda, B, ldb, beta, C, ldc);
cudaDeviceSynchronize();
} |
bdbef625a9fdd89937af6de2670ec7ad7f1c63f5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) Microsoft Corporation.
// SPDX-License-Identifier: Apache-2.0
// DeepSpeed Team
#include "general_kernels.h"
namespace cg = cooperative_groups;
template <typename T>
__global__ void column_sum_reduce(const T* __restrict__ inp,
T* __restrict__ out,
int rows,
int width)
{
__shared__ float tile[TILE_DIM][TILE_DIM + 1];
cg::thread_block b = cg::this_thread_block();
cg::thread_block_tile<TILE_DIM> g = cg::tiled_partition<TILE_DIM>(b);
int idx = blockDim.x * blockIdx.x + threadIdx.x;
int y_stride = width * TILE_DIM;
float localSum = 0;
// Loop across matrix height
if (idx < width) {
int offset = threadIdx.y * width + idx;
for (int r = threadIdx.y; r < rows; r += TILE_DIM) {
localSum += (float)inp[offset];
offset += y_stride;
}
}
tile[threadIdx.x][threadIdx.y] = localSum;
__syncthreads();
// Sum the shared buffer.
float sum = tile[threadIdx.y][threadIdx.x];
#ifndef __STOCHASTIC_MODE__
__syncthreads();
#endif
for (int i = 1; i < TILE_DIM; i <<= 1) sum += g.shfl_down(sum, i);
if (threadIdx.x == 0) {
int pos = blockIdx.x * TILE_DIM + threadIdx.y;
if (pos < width) out[pos] = sum;
}
}
template <typename T>
void launch_fuse_transpose_bias_kernel(const T* inp,
T* out,
int rows,
int cols,
hipStream_t stream);
template <>
void launch_fuse_transpose_bias_kernel<float>(const float* inp,
float* out,
int rows,
int cols,
hipStream_t stream)
{
// assert(rows % TILE_DIM == 0);
// assert(cols % TILE_DIM == 0);
dim3 grid_dim((cols - 1) / TILE_DIM + 1);
dim3 block_dim(TILE_DIM, TILE_DIM);
hipLaunchKernelGGL(( column_sum_reduce<float>), dim3(grid_dim), dim3(block_dim), 0, stream, inp, out, rows, cols);
}
template <>
void launch_fuse_transpose_bias_kernel<__half>(const __half* inp,
__half* out,
int rows,
int cols,
hipStream_t stream)
{
// assert(rows % TILE_DIM == 0);
// assert(cols % TILE_DIM == 0);
dim3 grid_dim((cols - 1) / TILE_DIM + 1);
dim3 block_dim(TILE_DIM, TILE_DIM);
hipLaunchKernelGGL(( column_sum_reduce<__half>), dim3(grid_dim), dim3(block_dim), 0, stream, inp, out, rows, cols);
}
__global__ void fused_add2_kernel(const int N, float* out, const float* inp1, const float* inp2)
{
const float4* inp1_4 = reinterpret_cast<const float4*>(inp1);
const float4* inp2_4 = reinterpret_cast<const float4*>(inp2);
float4* out_4 = reinterpret_cast<float4*>(out);
CUDA_1D_KERNEL_LOOP(j, N)
{
float4 val;
float4 inp1_reg = inp1_4[j];
float4 inp2_reg = inp2_4[j];
val.x = inp1_reg.x + inp2_reg.x;
val.y = inp1_reg.y + inp2_reg.y;
val.z = inp1_reg.z + inp2_reg.z;
val.w = inp1_reg.w + inp2_reg.w;
out_4[j] = val;
}
}
__global__ void fused_add2_kernel(const int N, __half* out, const __half* inp1, const __half* inp2)
{
float2 inp1_4;
float2 inp2_4;
__half2* inp1_h = reinterpret_cast<__half2*>(&inp1_4);
__half2* inp2_h = reinterpret_cast<__half2*>(&inp2_4);
const float2* inp1_arr = reinterpret_cast<const float2*>(inp1);
const float2* inp2_arr = reinterpret_cast<const float2*>(inp2);
CUDA_1D_KERNEL_LOOP(j, N)
{
inp1_4 = inp1_arr[j];
inp2_4 = inp2_arr[j];
float2 inp1_h_f_0 = __half22float2(inp1_h[0]);
float2 inp1_h_f_1 = __half22float2(inp1_h[1]);
float2 inp2_h_f_0 = __half22float2(inp2_h[0]);
float2 inp2_h_f_1 = __half22float2(inp2_h[1]);
inp1_h_f_0.x += inp2_h_f_0.x;
inp1_h_f_0.y += inp2_h_f_0.y;
inp1_h_f_1.x += inp2_h_f_1.x;
inp1_h_f_1.y += inp2_h_f_1.y;
float2 val_f;
__half2* val_h = reinterpret_cast<__half2*>(&val_f);
val_h[0] = __float22half2_rn(inp1_h_f_0);
val_h[1] = __float22half2_rn(inp1_h_f_1);
float2* out_4 = reinterpret_cast<float2*>(out);
out_4[j] = val_f;
}
}
template <>
void launch_fused_add2<float>(float* out,
const float* inp1,
const float* inp2,
int batch_size,
int seq_length,
int hidden_dim,
hipStream_t& stream)
{
int total_count = batch_size * seq_length * hidden_dim / 4;
dim3 grid_dim = DS_GET_BLOCKS(total_count); //(batch_size * seq_length);
dim3 block_dim = DS_CUDA_NUM_THREADS; //(hidden_dim / 4);
hipLaunchKernelGGL(( fused_add2_kernel), dim3(grid_dim), dim3(block_dim), 0, stream, total_count, out, inp1, inp2);
}
template <>
void launch_fused_add2<__half>(__half* out,
const __half* inp1,
const __half* inp2,
int batch_size,
int seq_length,
int hidden_dim,
hipStream_t& stream)
{
int total_count = batch_size * seq_length * hidden_dim / 4;
dim3 grid_dim = DS_GET_BLOCKS(total_count); //(batch_size * seq_length);
dim3 block_dim = DS_CUDA_NUM_THREADS; //(hidden_dim / 4);
hipLaunchKernelGGL(( fused_add2_kernel), dim3(grid_dim), dim3(block_dim), 0, stream, total_count, out, inp1, inp2);
}
__global__ void fused_add3_kernel(float* out,
const float* inp1,
const float* inp2,
const float* inp3,
int size,
int row_stride)
{
int row = blockIdx.x;
int id = threadIdx.x;
const float4* inp1_4 = reinterpret_cast<const float4*>(inp1);
const float4* inp2_4 = reinterpret_cast<const float4*>(inp2);
const float4* inp3_4 = reinterpret_cast<const float4*>(inp3);
float4* out_4 = reinterpret_cast<float4*>(out);
float4 val;
float4 inp1_reg = inp1_4[row * row_stride + id];
float4 inp2_reg = inp2_4[row * row_stride + id];
float4 inp3_reg = inp3_4[row * row_stride + id];
val.x = inp1_reg.x + inp2_reg.x + inp3_reg.x;
val.y = inp1_reg.y + inp2_reg.y + inp3_reg.y;
val.z = inp1_reg.z + inp2_reg.z + inp3_reg.z;
val.w = inp1_reg.w + inp2_reg.w + inp3_reg.w;
out_4[row * row_stride + id] = val;
}
__global__ void fused_add3_kernel(__half* out,
const __half* inp1,
const __half* inp2,
const __half* inp3,
int size,
int row_stride)
{
int row = blockIdx.x;
int id = threadIdx.x;
const float2* inp1_arr = reinterpret_cast<const float2*>(inp1);
const float2* inp2_arr = reinterpret_cast<const float2*>(inp2);
const float2* inp3_arr = reinterpret_cast<const float2*>(inp3);
float2 inp1_4 = inp1_arr[row * row_stride + id];
float2 inp2_4 = inp2_arr[row * row_stride + id];
float2 inp3_4 = inp3_arr[row * row_stride + id];
__half2* inp1_h = reinterpret_cast<__half2*>(&inp1_4);
__half2* inp2_h = reinterpret_cast<__half2*>(&inp2_4);
__half2* inp3_h = reinterpret_cast<__half2*>(&inp3_4);
float2 inp1_h_f_0 = __half22float2(inp1_h[0]);
float2 inp1_h_f_1 = __half22float2(inp1_h[1]);
float2 inp2_h_f_0 = __half22float2(inp2_h[0]);
float2 inp2_h_f_1 = __half22float2(inp2_h[1]);
float2 inp3_h_f_0 = __half22float2(inp3_h[0]);
float2 inp3_h_f_1 = __half22float2(inp3_h[1]);
inp1_h_f_0.x += (inp2_h_f_0.x + inp3_h_f_0.x);
inp1_h_f_0.y += (inp2_h_f_0.y + inp3_h_f_0.y);
inp1_h_f_1.x += (inp2_h_f_1.x + inp3_h_f_1.x);
inp1_h_f_1.y += (inp2_h_f_1.y + inp3_h_f_1.y);
float2 val_f;
__half2* val_h = reinterpret_cast<__half2*>(&val_f);
val_h[0] = __float22half2_rn(inp1_h_f_0);
val_h[1] = __float22half2_rn(inp1_h_f_1);
float2* out_4 = reinterpret_cast<float2*>(out);
out_4[row * row_stride + id] = val_f;
}
template <>
void launch_fused_add3<float>(float* out,
const float* inp1,
const float* inp2,
const float* inp3,
int batch_size,
int seq_length,
int hidden_size,
hipStream_t& stream)
{
dim3 grid_dim(batch_size * seq_length);
dim3 block_dim(hidden_size / 4);
hipLaunchKernelGGL(( fused_add3_kernel), dim3(grid_dim), dim3(block_dim), 0, stream,
out, inp1, inp2, inp3, (batch_size * seq_length * hidden_size), hidden_size / 4);
}
template <>
void launch_fused_add3<__half>(__half* out,
const __half* inp1,
const __half* inp2,
const __half* inp3,
int batch_size,
int seq_length,
int hidden_size,
hipStream_t& stream)
{
dim3 grid_dim(batch_size * seq_length);
dim3 block_dim(hidden_size / 4);
hipLaunchKernelGGL(( fused_add3_kernel), dim3(grid_dim), dim3(block_dim), 0, stream,
out, inp1, inp2, inp3, (batch_size * seq_length * hidden_size), hidden_size / 4);
}
__global__ void fused_add4_kernel(float* out,
const float* inp1,
const float* inp2,
const float* inp3,
const float* inp4,
int size,
int row_stride)
{
int row = blockIdx.x;
int id = threadIdx.x;
const float4* inp1_4 = reinterpret_cast<const float4*>(inp1);
const float4* inp2_4 = reinterpret_cast<const float4*>(inp2);
const float4* inp3_4 = reinterpret_cast<const float4*>(inp3);
const float4* inp4_4 = reinterpret_cast<const float4*>(inp4);
float4* out_4 = reinterpret_cast<float4*>(out);
float4 val;
float4 inp1_reg = inp1_4[row * row_stride + id];
float4 inp2_reg = inp2_4[row * row_stride + id];
float4 inp3_reg = inp3_4[row * row_stride + id];
float4 inp4_reg = inp4_4[row * row_stride + id];
val.x = inp1_reg.x + inp2_reg.x + inp3_reg.x + inp4_reg.x;
val.y = inp1_reg.y + inp2_reg.y + inp3_reg.y + inp4_reg.y;
val.z = inp1_reg.z + inp2_reg.z + inp3_reg.z + inp4_reg.z;
val.w = inp1_reg.w + inp2_reg.w + inp3_reg.w + inp4_reg.w;
out_4[row * row_stride + id] = val;
}
__global__ void fused_add4_kernel(__half* out,
const __half* inp1,
const __half* inp2,
const __half* inp3,
const __half* inp4,
int size,
int row_stride)
{
int row = blockIdx.x;
int id = threadIdx.x;
const float2* inp1_arr = reinterpret_cast<const float2*>(inp1);
const float2* inp2_arr = reinterpret_cast<const float2*>(inp2);
const float2* inp3_arr = reinterpret_cast<const float2*>(inp3);
const float2* inp4_arr = reinterpret_cast<const float2*>(inp4);
float2 inp1_4 = inp1_arr[row * row_stride + id];
float2 inp2_4 = inp2_arr[row * row_stride + id];
float2 inp3_4 = inp3_arr[row * row_stride + id];
float2 inp4_4 = inp4_arr[row * row_stride + id];
__half2* inp1_h = reinterpret_cast<__half2*>(&inp1_4);
__half2* inp2_h = reinterpret_cast<__half2*>(&inp2_4);
__half2* inp3_h = reinterpret_cast<__half2*>(&inp3_4);
__half2* inp4_h = reinterpret_cast<__half2*>(&inp4_4);
float2 inp1_h_f_0 = __half22float2(inp1_h[0]);
float2 inp1_h_f_1 = __half22float2(inp1_h[1]);
float2 inp2_h_f_0 = __half22float2(inp2_h[0]);
float2 inp2_h_f_1 = __half22float2(inp2_h[1]);
float2 inp3_h_f_0 = __half22float2(inp3_h[0]);
float2 inp3_h_f_1 = __half22float2(inp3_h[1]);
float2 inp4_h_f_0 = __half22float2(inp4_h[0]);
float2 inp4_h_f_1 = __half22float2(inp4_h[1]);
inp1_h_f_0.x += (inp2_h_f_0.x + inp3_h_f_0.x + inp4_h_f_0.x);
inp1_h_f_0.y += (inp2_h_f_0.y + inp3_h_f_0.y + inp4_h_f_0.y);
inp1_h_f_1.x += (inp2_h_f_1.x + inp3_h_f_1.x + inp4_h_f_1.x);
inp1_h_f_1.y += (inp2_h_f_1.y + inp3_h_f_1.y + inp4_h_f_1.y);
float2 val_f;
__half2* val_h = reinterpret_cast<__half2*>(&val_f);
val_h[0] = __float22half2_rn(inp1_h_f_0);
val_h[1] = __float22half2_rn(inp1_h_f_1);
float2* out_4 = reinterpret_cast<float2*>(out);
out_4[row * row_stride + id] = val_f;
}
template <>
void launch_fused_add4<float>(float* out,
const float* inp1,
const float* inp2,
const float* inp3,
const float* inp4,
int batch_size,
int seq_length,
int hidden_size,
hipStream_t& stream)
{
dim3 grid_dim(batch_size * seq_length);
dim3 block_dim(hidden_size / 4);
hipLaunchKernelGGL(( fused_add4_kernel), dim3(grid_dim), dim3(block_dim), 0, stream,
out, inp1, inp2, inp3, inp4, (batch_size * seq_length * hidden_size), hidden_size / 4);
}
template <>
void launch_fused_add4<__half>(__half* out,
const __half* inp1,
const __half* inp2,
const __half* inp3,
const __half* inp4,
int batch_size,
int seq_length,
int hidden_size,
hipStream_t& stream)
{
dim3 grid_dim(batch_size * seq_length);
dim3 block_dim(hidden_size / 4);
hipLaunchKernelGGL(( fused_add4_kernel), dim3(grid_dim), dim3(block_dim), 0, stream,
out, inp1, inp2, inp3, inp4, (batch_size * seq_length * hidden_size), hidden_size / 4);
}
| bdbef625a9fdd89937af6de2670ec7ad7f1c63f5.cu | // Copyright (c) Microsoft Corporation.
// SPDX-License-Identifier: Apache-2.0
// DeepSpeed Team
#include "general_kernels.h"
namespace cg = cooperative_groups;
template <typename T>
__global__ void column_sum_reduce(const T* __restrict__ inp,
T* __restrict__ out,
int rows,
int width)
{
__shared__ float tile[TILE_DIM][TILE_DIM + 1];
cg::thread_block b = cg::this_thread_block();
cg::thread_block_tile<TILE_DIM> g = cg::tiled_partition<TILE_DIM>(b);
int idx = blockDim.x * blockIdx.x + threadIdx.x;
int y_stride = width * TILE_DIM;
float localSum = 0;
// Loop across matrix height
if (idx < width) {
int offset = threadIdx.y * width + idx;
for (int r = threadIdx.y; r < rows; r += TILE_DIM) {
localSum += (float)inp[offset];
offset += y_stride;
}
}
tile[threadIdx.x][threadIdx.y] = localSum;
__syncthreads();
// Sum the shared buffer.
float sum = tile[threadIdx.y][threadIdx.x];
#ifndef __STOCHASTIC_MODE__
__syncthreads();
#endif
for (int i = 1; i < TILE_DIM; i <<= 1) sum += g.shfl_down(sum, i);
if (threadIdx.x == 0) {
int pos = blockIdx.x * TILE_DIM + threadIdx.y;
if (pos < width) out[pos] = sum;
}
}
template <typename T>
void launch_fuse_transpose_bias_kernel(const T* inp,
T* out,
int rows,
int cols,
cudaStream_t stream);
template <>
void launch_fuse_transpose_bias_kernel<float>(const float* inp,
float* out,
int rows,
int cols,
cudaStream_t stream)
{
// assert(rows % TILE_DIM == 0);
// assert(cols % TILE_DIM == 0);
dim3 grid_dim((cols - 1) / TILE_DIM + 1);
dim3 block_dim(TILE_DIM, TILE_DIM);
column_sum_reduce<float><<<grid_dim, block_dim, 0, stream>>>(inp, out, rows, cols);
}
template <>
void launch_fuse_transpose_bias_kernel<__half>(const __half* inp,
__half* out,
int rows,
int cols,
cudaStream_t stream)
{
// assert(rows % TILE_DIM == 0);
// assert(cols % TILE_DIM == 0);
dim3 grid_dim((cols - 1) / TILE_DIM + 1);
dim3 block_dim(TILE_DIM, TILE_DIM);
column_sum_reduce<__half><<<grid_dim, block_dim, 0, stream>>>(inp, out, rows, cols);
}
__global__ void fused_add2_kernel(const int N, float* out, const float* inp1, const float* inp2)
{
const float4* inp1_4 = reinterpret_cast<const float4*>(inp1);
const float4* inp2_4 = reinterpret_cast<const float4*>(inp2);
float4* out_4 = reinterpret_cast<float4*>(out);
CUDA_1D_KERNEL_LOOP(j, N)
{
float4 val;
float4 inp1_reg = inp1_4[j];
float4 inp2_reg = inp2_4[j];
val.x = inp1_reg.x + inp2_reg.x;
val.y = inp1_reg.y + inp2_reg.y;
val.z = inp1_reg.z + inp2_reg.z;
val.w = inp1_reg.w + inp2_reg.w;
out_4[j] = val;
}
}
__global__ void fused_add2_kernel(const int N, __half* out, const __half* inp1, const __half* inp2)
{
float2 inp1_4;
float2 inp2_4;
__half2* inp1_h = reinterpret_cast<__half2*>(&inp1_4);
__half2* inp2_h = reinterpret_cast<__half2*>(&inp2_4);
const float2* inp1_arr = reinterpret_cast<const float2*>(inp1);
const float2* inp2_arr = reinterpret_cast<const float2*>(inp2);
CUDA_1D_KERNEL_LOOP(j, N)
{
inp1_4 = inp1_arr[j];
inp2_4 = inp2_arr[j];
float2 inp1_h_f_0 = __half22float2(inp1_h[0]);
float2 inp1_h_f_1 = __half22float2(inp1_h[1]);
float2 inp2_h_f_0 = __half22float2(inp2_h[0]);
float2 inp2_h_f_1 = __half22float2(inp2_h[1]);
inp1_h_f_0.x += inp2_h_f_0.x;
inp1_h_f_0.y += inp2_h_f_0.y;
inp1_h_f_1.x += inp2_h_f_1.x;
inp1_h_f_1.y += inp2_h_f_1.y;
float2 val_f;
__half2* val_h = reinterpret_cast<__half2*>(&val_f);
val_h[0] = __float22half2_rn(inp1_h_f_0);
val_h[1] = __float22half2_rn(inp1_h_f_1);
float2* out_4 = reinterpret_cast<float2*>(out);
out_4[j] = val_f;
}
}
template <>
void launch_fused_add2<float>(float* out,
const float* inp1,
const float* inp2,
int batch_size,
int seq_length,
int hidden_dim,
cudaStream_t& stream)
{
int total_count = batch_size * seq_length * hidden_dim / 4;
dim3 grid_dim = DS_GET_BLOCKS(total_count); //(batch_size * seq_length);
dim3 block_dim = DS_CUDA_NUM_THREADS; //(hidden_dim / 4);
fused_add2_kernel<<<grid_dim, block_dim, 0, stream>>>(total_count, out, inp1, inp2);
}
template <>
void launch_fused_add2<__half>(__half* out,
const __half* inp1,
const __half* inp2,
int batch_size,
int seq_length,
int hidden_dim,
cudaStream_t& stream)
{
int total_count = batch_size * seq_length * hidden_dim / 4;
dim3 grid_dim = DS_GET_BLOCKS(total_count); //(batch_size * seq_length);
dim3 block_dim = DS_CUDA_NUM_THREADS; //(hidden_dim / 4);
fused_add2_kernel<<<grid_dim, block_dim, 0, stream>>>(total_count, out, inp1, inp2);
}
__global__ void fused_add3_kernel(float* out,
const float* inp1,
const float* inp2,
const float* inp3,
int size,
int row_stride)
{
int row = blockIdx.x;
int id = threadIdx.x;
const float4* inp1_4 = reinterpret_cast<const float4*>(inp1);
const float4* inp2_4 = reinterpret_cast<const float4*>(inp2);
const float4* inp3_4 = reinterpret_cast<const float4*>(inp3);
float4* out_4 = reinterpret_cast<float4*>(out);
float4 val;
float4 inp1_reg = inp1_4[row * row_stride + id];
float4 inp2_reg = inp2_4[row * row_stride + id];
float4 inp3_reg = inp3_4[row * row_stride + id];
val.x = inp1_reg.x + inp2_reg.x + inp3_reg.x;
val.y = inp1_reg.y + inp2_reg.y + inp3_reg.y;
val.z = inp1_reg.z + inp2_reg.z + inp3_reg.z;
val.w = inp1_reg.w + inp2_reg.w + inp3_reg.w;
out_4[row * row_stride + id] = val;
}
__global__ void fused_add3_kernel(__half* out,
const __half* inp1,
const __half* inp2,
const __half* inp3,
int size,
int row_stride)
{
int row = blockIdx.x;
int id = threadIdx.x;
const float2* inp1_arr = reinterpret_cast<const float2*>(inp1);
const float2* inp2_arr = reinterpret_cast<const float2*>(inp2);
const float2* inp3_arr = reinterpret_cast<const float2*>(inp3);
float2 inp1_4 = inp1_arr[row * row_stride + id];
float2 inp2_4 = inp2_arr[row * row_stride + id];
float2 inp3_4 = inp3_arr[row * row_stride + id];
__half2* inp1_h = reinterpret_cast<__half2*>(&inp1_4);
__half2* inp2_h = reinterpret_cast<__half2*>(&inp2_4);
__half2* inp3_h = reinterpret_cast<__half2*>(&inp3_4);
float2 inp1_h_f_0 = __half22float2(inp1_h[0]);
float2 inp1_h_f_1 = __half22float2(inp1_h[1]);
float2 inp2_h_f_0 = __half22float2(inp2_h[0]);
float2 inp2_h_f_1 = __half22float2(inp2_h[1]);
float2 inp3_h_f_0 = __half22float2(inp3_h[0]);
float2 inp3_h_f_1 = __half22float2(inp3_h[1]);
inp1_h_f_0.x += (inp2_h_f_0.x + inp3_h_f_0.x);
inp1_h_f_0.y += (inp2_h_f_0.y + inp3_h_f_0.y);
inp1_h_f_1.x += (inp2_h_f_1.x + inp3_h_f_1.x);
inp1_h_f_1.y += (inp2_h_f_1.y + inp3_h_f_1.y);
float2 val_f;
__half2* val_h = reinterpret_cast<__half2*>(&val_f);
val_h[0] = __float22half2_rn(inp1_h_f_0);
val_h[1] = __float22half2_rn(inp1_h_f_1);
float2* out_4 = reinterpret_cast<float2*>(out);
out_4[row * row_stride + id] = val_f;
}
template <>
void launch_fused_add3<float>(float* out,
const float* inp1,
const float* inp2,
const float* inp3,
int batch_size,
int seq_length,
int hidden_size,
cudaStream_t& stream)
{
dim3 grid_dim(batch_size * seq_length);
dim3 block_dim(hidden_size / 4);
fused_add3_kernel<<<grid_dim, block_dim, 0, stream>>>(
out, inp1, inp2, inp3, (batch_size * seq_length * hidden_size), hidden_size / 4);
}
template <>
void launch_fused_add3<__half>(__half* out,
const __half* inp1,
const __half* inp2,
const __half* inp3,
int batch_size,
int seq_length,
int hidden_size,
cudaStream_t& stream)
{
dim3 grid_dim(batch_size * seq_length);
dim3 block_dim(hidden_size / 4);
fused_add3_kernel<<<grid_dim, block_dim, 0, stream>>>(
out, inp1, inp2, inp3, (batch_size * seq_length * hidden_size), hidden_size / 4);
}
__global__ void fused_add4_kernel(float* out,
const float* inp1,
const float* inp2,
const float* inp3,
const float* inp4,
int size,
int row_stride)
{
int row = blockIdx.x;
int id = threadIdx.x;
const float4* inp1_4 = reinterpret_cast<const float4*>(inp1);
const float4* inp2_4 = reinterpret_cast<const float4*>(inp2);
const float4* inp3_4 = reinterpret_cast<const float4*>(inp3);
const float4* inp4_4 = reinterpret_cast<const float4*>(inp4);
float4* out_4 = reinterpret_cast<float4*>(out);
float4 val;
float4 inp1_reg = inp1_4[row * row_stride + id];
float4 inp2_reg = inp2_4[row * row_stride + id];
float4 inp3_reg = inp3_4[row * row_stride + id];
float4 inp4_reg = inp4_4[row * row_stride + id];
val.x = inp1_reg.x + inp2_reg.x + inp3_reg.x + inp4_reg.x;
val.y = inp1_reg.y + inp2_reg.y + inp3_reg.y + inp4_reg.y;
val.z = inp1_reg.z + inp2_reg.z + inp3_reg.z + inp4_reg.z;
val.w = inp1_reg.w + inp2_reg.w + inp3_reg.w + inp4_reg.w;
out_4[row * row_stride + id] = val;
}
__global__ void fused_add4_kernel(__half* out,
const __half* inp1,
const __half* inp2,
const __half* inp3,
const __half* inp4,
int size,
int row_stride)
{
int row = blockIdx.x;
int id = threadIdx.x;
const float2* inp1_arr = reinterpret_cast<const float2*>(inp1);
const float2* inp2_arr = reinterpret_cast<const float2*>(inp2);
const float2* inp3_arr = reinterpret_cast<const float2*>(inp3);
const float2* inp4_arr = reinterpret_cast<const float2*>(inp4);
float2 inp1_4 = inp1_arr[row * row_stride + id];
float2 inp2_4 = inp2_arr[row * row_stride + id];
float2 inp3_4 = inp3_arr[row * row_stride + id];
float2 inp4_4 = inp4_arr[row * row_stride + id];
__half2* inp1_h = reinterpret_cast<__half2*>(&inp1_4);
__half2* inp2_h = reinterpret_cast<__half2*>(&inp2_4);
__half2* inp3_h = reinterpret_cast<__half2*>(&inp3_4);
__half2* inp4_h = reinterpret_cast<__half2*>(&inp4_4);
float2 inp1_h_f_0 = __half22float2(inp1_h[0]);
float2 inp1_h_f_1 = __half22float2(inp1_h[1]);
float2 inp2_h_f_0 = __half22float2(inp2_h[0]);
float2 inp2_h_f_1 = __half22float2(inp2_h[1]);
float2 inp3_h_f_0 = __half22float2(inp3_h[0]);
float2 inp3_h_f_1 = __half22float2(inp3_h[1]);
float2 inp4_h_f_0 = __half22float2(inp4_h[0]);
float2 inp4_h_f_1 = __half22float2(inp4_h[1]);
inp1_h_f_0.x += (inp2_h_f_0.x + inp3_h_f_0.x + inp4_h_f_0.x);
inp1_h_f_0.y += (inp2_h_f_0.y + inp3_h_f_0.y + inp4_h_f_0.y);
inp1_h_f_1.x += (inp2_h_f_1.x + inp3_h_f_1.x + inp4_h_f_1.x);
inp1_h_f_1.y += (inp2_h_f_1.y + inp3_h_f_1.y + inp4_h_f_1.y);
float2 val_f;
__half2* val_h = reinterpret_cast<__half2*>(&val_f);
val_h[0] = __float22half2_rn(inp1_h_f_0);
val_h[1] = __float22half2_rn(inp1_h_f_1);
float2* out_4 = reinterpret_cast<float2*>(out);
out_4[row * row_stride + id] = val_f;
}
template <>
void launch_fused_add4<float>(float* out,
const float* inp1,
const float* inp2,
const float* inp3,
const float* inp4,
int batch_size,
int seq_length,
int hidden_size,
cudaStream_t& stream)
{
dim3 grid_dim(batch_size * seq_length);
dim3 block_dim(hidden_size / 4);
fused_add4_kernel<<<grid_dim, block_dim, 0, stream>>>(
out, inp1, inp2, inp3, inp4, (batch_size * seq_length * hidden_size), hidden_size / 4);
}
template <>
void launch_fused_add4<__half>(__half* out,
const __half* inp1,
const __half* inp2,
const __half* inp3,
const __half* inp4,
int batch_size,
int seq_length,
int hidden_size,
cudaStream_t& stream)
{
dim3 grid_dim(batch_size * seq_length);
dim3 block_dim(hidden_size / 4);
fused_add4_kernel<<<grid_dim, block_dim, 0, stream>>>(
out, inp1, inp2, inp3, inp4, (batch_size * seq_length * hidden_size), hidden_size / 4);
}
|
eebac494b47e5c518165091273b366630b0bac7e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <string>
#include <hip/hip_fp16.h>
#include "caffe/util/gpu_math_functions.cuh"
#include "caffe/util/math_functions.hpp"
namespace caffe {
#pragma clang diagnostic push
#pragma ide diagnostic ignored "CannotResolve"
template<typename Gtype, typename Wtype>
__global__ void AdaDeltaRegUpdateAllAndClear(int N,
Gtype* g, Wtype *w, Wtype* h, Wtype* h2,
float momentum, float delta, float local_rate, float local_decay, bool reg_L2,
bool clear_grads) {
CUDA_KERNEL_LOOP(i, N) {
Wtype reg = reg_L2 ? w[i] : Wtype((Wtype(0) < w[i]) - (w[i] < Wtype(0)));
Wtype gr = Wtype(g[i]) + reg * local_decay;
Wtype hi = h[i] = momentum * h[i] + Wtype(1.F - momentum) * gr * gr;
gr *= sqrt((h2[i] + delta) / (hi + delta));
h2[i] = momentum * h2[i] + Wtype(1.F - momentum) * gr * gr;
gr *= local_rate;
w[i] -= gr;
g[i] = clear_grads ? Gtype(0) : Gtype(gr);
}
}
#pragma clang diagnostic pop
template<>
__global__ void AdaDeltaRegUpdateAllAndClear<__half, __half>(int N,
__half* g, __half *w, __half* h, __half* h2,
float momentum, float delta, float local_rate, float local_decay, bool reg_L2,
bool clear_grads) {
__half hz;
hz.x = 0;
CUDA_KERNEL_LOOP(i, N) {
float wf = __half2float(w[i]);
float hf = __half2float(h[i]);
float h2f = __half2float(h2[i]);
float gr = __half2float(g[i]);
// float reg = reg_L2 ? wf : ((hlt(hz, w[i]) ? 1.F : 0.F) - (hlt(w[i], hz) ? 1.F : 0.F));
float reg = reg_L2 ? wf : float((0.F < wf)-(wf < 0.F));
gr += reg * local_decay;
hf = momentum * hf + (1.F - momentum) * gr * gr;
gr *= sqrt((h2f + delta) / (hf + delta));
h2f = momentum * h2f + (1.F - momentum) * gr * gr;
gr *= local_rate;
wf -= gr;
h[i] = float2half_clip(hf);
h2[i] = float2half_clip(h2f);
w[i] = float2half_clip(wf);
g[i] = clear_grads ? hz : float2half_clip(gr);
}
}
template<typename Gtype, typename Wtype>
void adadelta_reg_update_and_clear_gpu(int N,
Gtype* g, Wtype* w, Wtype* h, Wtype* h2,
float momentum, float delta, float local_rate, const std::string& reg_type, float local_decay,
void* handle, bool clear_grads) {
hipblasHandle_t cublas_handle =
handle == nullptr ? Caffe::cublas_handle() : reinterpret_cast<hipblasHandle_t>(handle);
hipStream_t stream;
CUBLAS_CHECK(hipblasGetStream(cublas_handle, &stream));
AdaDeltaRegUpdateAllAndClear // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, stream, N,
g, w, h, h2,
momentum, delta, local_rate, local_decay, reg_type == "L2", clear_grads);
CUDA_POST_KERNEL_CHECK;
CUDA_CHECK(hipStreamSynchronize(stream));
}
template<>
void adadelta_reg_update_and_clear_gpu<float16, float16>(int N,
float16 *g, float16 *w, float16 *h, float16 *h2,
float momentum, float delta, float local_rate, const std::string& reg_type, float local_decay,
void* handle, bool clear_grads) {
hipblasHandle_t cublas_handle =
handle == nullptr ? Caffe::cublas_handle() : reinterpret_cast<hipblasHandle_t>(handle);
hipStream_t stream;
CUBLAS_CHECK(hipblasGetStream(cublas_handle, &stream));
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( AdaDeltaRegUpdateAllAndClear), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, stream,
N, reinterpret_cast<__half*>(g), reinterpret_cast<__half*>(w),
reinterpret_cast<__half*>(h), reinterpret_cast<__half*>(h2),
momentum, delta, local_rate, local_decay, reg_type == "L2", clear_grads);
CUDA_POST_KERNEL_CHECK;
CUDA_CHECK(hipStreamSynchronize(stream));
}
template void adadelta_reg_update_and_clear_gpu<float16, float>(int, float16*, float*, float*,
float*, float, float, float, const std::string&, float, void*, bool);
template void adadelta_reg_update_and_clear_gpu<float16, double>(int, float16*, double*, double*,
double*, float, float, float, const std::string&, float, void*, bool);
template void adadelta_reg_update_and_clear_gpu<float, float>(int, float*, float*, float*,
float*, float, float, float, const std::string&, float, void*, bool);
template void adadelta_reg_update_and_clear_gpu<float, double>(int, float*, double*, double*,
double*, float, float, float, const std::string&, float, void*, bool);
template void adadelta_reg_update_and_clear_gpu<float, float16>(int, float*, float16*, float16*,
float16*, float, float, float, const std::string&, float, void*, bool);
template void adadelta_reg_update_and_clear_gpu<double, float>(int, double*, float*, float*,
float*, float, float, float, const std::string&, float, void*, bool);
template void adadelta_reg_update_and_clear_gpu<double, double>(int, double*, double*, double*,
double*, float, float, float, const std::string&, float, void*, bool);
template void adadelta_reg_update_and_clear_gpu<double, float16>(int, double*, float16*, float16*,
float16*, float, float, float, const std::string&, float, void*, bool);
} // namespace caffe
| eebac494b47e5c518165091273b366630b0bac7e.cu | #include <string>
#include <cuda_fp16.h>
#include "caffe/util/gpu_math_functions.cuh"
#include "caffe/util/math_functions.hpp"
namespace caffe {
#pragma clang diagnostic push
#pragma ide diagnostic ignored "CannotResolve"
template<typename Gtype, typename Wtype>
__global__ void AdaDeltaRegUpdateAllAndClear(int N,
Gtype* g, Wtype *w, Wtype* h, Wtype* h2,
float momentum, float delta, float local_rate, float local_decay, bool reg_L2,
bool clear_grads) {
CUDA_KERNEL_LOOP(i, N) {
Wtype reg = reg_L2 ? w[i] : Wtype((Wtype(0) < w[i]) - (w[i] < Wtype(0)));
Wtype gr = Wtype(g[i]) + reg * local_decay;
Wtype hi = h[i] = momentum * h[i] + Wtype(1.F - momentum) * gr * gr;
gr *= sqrt((h2[i] + delta) / (hi + delta));
h2[i] = momentum * h2[i] + Wtype(1.F - momentum) * gr * gr;
gr *= local_rate;
w[i] -= gr;
g[i] = clear_grads ? Gtype(0) : Gtype(gr);
}
}
#pragma clang diagnostic pop
template<>
__global__ void AdaDeltaRegUpdateAllAndClear<__half, __half>(int N,
__half* g, __half *w, __half* h, __half* h2,
float momentum, float delta, float local_rate, float local_decay, bool reg_L2,
bool clear_grads) {
__half hz;
hz.x = 0;
CUDA_KERNEL_LOOP(i, N) {
float wf = __half2float(w[i]);
float hf = __half2float(h[i]);
float h2f = __half2float(h2[i]);
float gr = __half2float(g[i]);
// float reg = reg_L2 ? wf : ((hlt(hz, w[i]) ? 1.F : 0.F) - (hlt(w[i], hz) ? 1.F : 0.F));
float reg = reg_L2 ? wf : float((0.F < wf)-(wf < 0.F));
gr += reg * local_decay;
hf = momentum * hf + (1.F - momentum) * gr * gr;
gr *= sqrt((h2f + delta) / (hf + delta));
h2f = momentum * h2f + (1.F - momentum) * gr * gr;
gr *= local_rate;
wf -= gr;
h[i] = float2half_clip(hf);
h2[i] = float2half_clip(h2f);
w[i] = float2half_clip(wf);
g[i] = clear_grads ? hz : float2half_clip(gr);
}
}
template<typename Gtype, typename Wtype>
void adadelta_reg_update_and_clear_gpu(int N,
Gtype* g, Wtype* w, Wtype* h, Wtype* h2,
float momentum, float delta, float local_rate, const std::string& reg_type, float local_decay,
void* handle, bool clear_grads) {
cublasHandle_t cublas_handle =
handle == nullptr ? Caffe::cublas_handle() : reinterpret_cast<cublasHandle_t>(handle);
cudaStream_t stream;
CUBLAS_CHECK(cublasGetStream(cublas_handle, &stream));
AdaDeltaRegUpdateAllAndClear // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS, 0, stream>>> (N,
g, w, h, h2,
momentum, delta, local_rate, local_decay, reg_type == "L2", clear_grads);
CUDA_POST_KERNEL_CHECK;
CUDA_CHECK(cudaStreamSynchronize(stream));
}
template<>
void adadelta_reg_update_and_clear_gpu<float16, float16>(int N,
float16 *g, float16 *w, float16 *h, float16 *h2,
float momentum, float delta, float local_rate, const std::string& reg_type, float local_decay,
void* handle, bool clear_grads) {
cublasHandle_t cublas_handle =
handle == nullptr ? Caffe::cublas_handle() : reinterpret_cast<cublasHandle_t>(handle);
cudaStream_t stream;
CUBLAS_CHECK(cublasGetStream(cublas_handle, &stream));
// NOLINT_NEXT_LINE(whitespace/operators)
AdaDeltaRegUpdateAllAndClear<<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS, 0, stream>>>
(N, reinterpret_cast<__half*>(g), reinterpret_cast<__half*>(w),
reinterpret_cast<__half*>(h), reinterpret_cast<__half*>(h2),
momentum, delta, local_rate, local_decay, reg_type == "L2", clear_grads);
CUDA_POST_KERNEL_CHECK;
CUDA_CHECK(cudaStreamSynchronize(stream));
}
template void adadelta_reg_update_and_clear_gpu<float16, float>(int, float16*, float*, float*,
float*, float, float, float, const std::string&, float, void*, bool);
template void adadelta_reg_update_and_clear_gpu<float16, double>(int, float16*, double*, double*,
double*, float, float, float, const std::string&, float, void*, bool);
template void adadelta_reg_update_and_clear_gpu<float, float>(int, float*, float*, float*,
float*, float, float, float, const std::string&, float, void*, bool);
template void adadelta_reg_update_and_clear_gpu<float, double>(int, float*, double*, double*,
double*, float, float, float, const std::string&, float, void*, bool);
template void adadelta_reg_update_and_clear_gpu<float, float16>(int, float*, float16*, float16*,
float16*, float, float, float, const std::string&, float, void*, bool);
template void adadelta_reg_update_and_clear_gpu<double, float>(int, double*, float*, float*,
float*, float, float, float, const std::string&, float, void*, bool);
template void adadelta_reg_update_and_clear_gpu<double, double>(int, double*, double*, double*,
double*, float, float, float, const std::string&, float, void*, bool);
template void adadelta_reg_update_and_clear_gpu<double, float16>(int, double*, float16*, float16*,
float16*, float, float, float, const std::string&, float, void*, bool);
} // namespace caffe
|
97a4237dd53696b3833dac6b89cc676f5906006f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Software License Agreement (BSD License)
*
* Point Cloud Library (PCL) - www.pointclouds.org
* Copyright (c) 2011, Willow Garage, Inc.
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Willow Garage, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
*/
#include "device.hpp"
//#include <pcl/gpu/utils/device/funcattrib.hpp>
//include <pcl/gpu/utils/device/block.hpp>
//#include <pcl/gpu/utils/device/warp.hpp>
namespace pcl
{
namespace device
{
////////////////////////////////////////////////////////////////////////////////////////
///// Full Volume Scan6
enum
{
CTA_SIZE_X = 32,
CTA_SIZE_Y = 6,
CTA_SIZE = CTA_SIZE_X * CTA_SIZE_Y,
MAX_LOCAL_POINTS = 3
};
__device__ int global_count = 0;
__device__ int output_count;
__device__ unsigned int blocks_done = 0;
__shared__ float storage_X[CTA_SIZE * MAX_LOCAL_POINTS];
__shared__ float storage_Y[CTA_SIZE * MAX_LOCAL_POINTS];
__shared__ float storage_Z[CTA_SIZE * MAX_LOCAL_POINTS];
struct FullScan6
{
PtrStep<short2> volume;
float3 cell_size;
mutable PtrSz<PointType> output;
__device__ __forceinline__ float
fetch (int x, int y, int z, int& weight) const
{
float tsdf;
unpack_tsdf (volume.ptr (VOLUME_Y * z + y)[x], tsdf, weight);
return tsdf;
}
__device__ __forceinline__ void
operator () () const
{
int x = threadIdx.x + blockIdx.x * CTA_SIZE_X;
int y = threadIdx.y + blockIdx.y * CTA_SIZE_Y;
#if CUDART_VERSION >= 9000
if (__all_sync (__activemask (), x >= VOLUME_X)
|| __all_sync (__activemask (), y >= VOLUME_Y))
return;
#else
if (__all (x >= VOLUME_X) || __all (y >= VOLUME_Y))
return;
#endif
float3 V;
V.x = (x + 0.5f) * cell_size.x;
V.y = (y + 0.5f) * cell_size.y;
int ftid = Block::flattenedThreadId ();
for (int z = 0; z < VOLUME_Z - 1; ++z)
{
float3 points[MAX_LOCAL_POINTS];
int local_count = 0;
if (x < VOLUME_X && y < VOLUME_Y)
{
int W;
float F = fetch (x, y, z, W);
if (W != 0 && F != 1.f)
{
V.z = (z + 0.5f) * cell_size.z;
//process dx
if (x + 1 < VOLUME_X)
{
int Wn;
float Fn = fetch (x + 1, y, z, Wn);
if (Wn != 0 && Fn != 1.f)
if ((F > 0 && Fn < 0) || (F < 0 && Fn > 0))
{
float3 p;
p.y = V.y;
p.z = V.z;
float Vnx = V.x + cell_size.x;
float d_inv = 1.f / (fabs (F) + fabs (Fn));
p.x = (V.x * fabs (Fn) + Vnx * fabs (F)) * d_inv;
points[local_count++] = p;
}
} /* if (x + 1 < VOLUME_X) */
//process dy
if (y + 1 < VOLUME_Y)
{
int Wn;
float Fn = fetch (x, y + 1, z, Wn);
if (Wn != 0 && Fn != 1.f)
if ((F > 0 && Fn < 0) || (F < 0 && Fn > 0))
{
float3 p;
p.x = V.x;
p.z = V.z;
float Vny = V.y + cell_size.y;
float d_inv = 1.f / (fabs (F) + fabs (Fn));
p.y = (V.y * fabs (Fn) + Vny * fabs (F)) * d_inv;
points[local_count++] = p;
}
} /* if (y + 1 < VOLUME_Y) */
//process dz
//if (z + 1 < VOLUME_Z) // guaranteed by loop
{
int Wn;
float Fn = fetch (x, y, z + 1, Wn);
if (Wn != 0 && Fn != 1.f)
if ((F > 0 && Fn < 0) || (F < 0 && Fn > 0))
{
float3 p;
p.x = V.x;
p.y = V.y;
float Vnz = V.z + cell_size.z;
float d_inv = 1.f / (fabs (F) + fabs (Fn));
p.z = (V.z * fabs (Fn) + Vnz * fabs (F)) * d_inv;
points[local_count++] = p;
}
} /* if (z + 1 < VOLUME_Z) */
} /* if (W != 0 && F != 1.f) */
} /* if (x < VOLUME_X && y < VOLUME_Y) */
#if CUDART_VERSION >= 9000
int total_warp = __popc (__ballot_sync (__activemask (), local_count > 0))
+ __popc (__ballot_sync (__activemask (), local_count > 1))
+ __popc (__ballot_sync (__activemask (), local_count > 2));
#else
///not we fulfilled points array at current iteration
int total_warp = __popc (__ballot (local_count > 0)) + __popc (__ballot (local_count > 1)) + __popc (__ballot (local_count > 2));
#endif
if (total_warp > 0)
{
int lane = Warp::laneId ();
int storage_index = (ftid >> Warp::LOG_WARP_SIZE) * Warp::WARP_SIZE * MAX_LOCAL_POINTS;
volatile int* cta_buffer = (int*)(storage_X + storage_index);
cta_buffer[lane] = local_count;
int offset = scan_warp<exclusive>(cta_buffer, lane);
if (lane == 0)
{
int old_global_count = atomicAdd (&global_count, total_warp);
cta_buffer[0] = old_global_count;
}
int old_global_count = cta_buffer[0];
for (int l = 0; l < local_count; ++l)
{
storage_X[storage_index + offset + l] = points[l].x;
storage_Y[storage_index + offset + l] = points[l].y;
storage_Z[storage_index + offset + l] = points[l].z;
}
PointType *pos = output.data + old_global_count + lane;
for (int idx = lane; idx < total_warp; idx += Warp::STRIDE, pos += Warp::STRIDE)
{
float x = storage_X[storage_index + idx];
float y = storage_Y[storage_index + idx];
float z = storage_Z[storage_index + idx];
store_point_type (x, y, z, pos);
}
bool full = (old_global_count + total_warp) >= output.size;
if (full)
break;
}
} /* for(int z = 0; z < VOLUME_Z - 1; ++z) */
///////////////////////////
// prepare for future scans
if (ftid == 0)
{
unsigned int total_blocks = gridDim.x * gridDim.y * gridDim.z;
unsigned int value = atomicInc (&blocks_done, total_blocks);
//last block
if (value == total_blocks - 1)
{
output_count = min ((int)output.size, global_count);
blocks_done = 0;
global_count = 0;
}
}
} /* operator() */
__device__ __forceinline__ void
store_point_type (float x, float y, float z, float4* ptr) const {
*ptr = make_float4 (x, y, z, 0);
}
__device__ __forceinline__ void
store_point_type (float x, float y, float z, float3* ptr) const {
*ptr = make_float3 (x, y, z);
}
};
__global__ void
extractKernel (const FullScan6 fs) {
fs ();
}
}
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
size_t
pcl::device::extractCloud (const PtrStep<short2>& volume, const float3& volume_size,
PtrSz<PointType> output)
{
FullScan6 fs;
fs.volume = volume;
fs.cell_size.x = volume_size.x / VOLUME_X;
fs.cell_size.y = volume_size.y / VOLUME_Y;
fs.cell_size.z = volume_size.z / VOLUME_Z;
fs.output = output;
dim3 block (CTA_SIZE_X, CTA_SIZE_Y);
dim3 grid (divUp (VOLUME_X, block.x), divUp (VOLUME_Y, block.y));
//hipFuncSetCacheConfig(extractKernel, hipFuncCachePreferL1);
//printFuncAttrib(extractKernel);
hipLaunchKernelGGL(( extractKernel), dim3(grid), dim3(block), 0, 0, fs);
cudaSafeCall ( hipGetLastError () );
cudaSafeCall (hipDeviceSynchronize ());
int size;
cudaSafeCall ( hipMemcpyFromSymbol (&size, output_count, sizeof(size)) );
return (size_t)size;
}
namespace pcl
{
namespace device
{
template<typename NormalType>
struct ExtractNormals
{
float3 cell_size;
PtrStep<short2> volume;
PtrSz<PointType> points;
mutable NormalType* output;
__device__ __forceinline__ float
readTsdf (int x, int y, int z) const
{
return unpack_tsdf (volume.ptr (VOLUME_Y * z + y)[x]);
}
__device__ __forceinline__ float3
fetchPoint (int idx) const
{
PointType p = points.data[idx];
return make_float3 (p.x, p.y, p.z);
}
__device__ __forceinline__ void
storeNormal (int idx, float3 normal) const
{
NormalType n;
n.x = normal.x; n.y = normal.y; n.z = normal.z;
output[idx] = n;
}
__device__ __forceinline__ int3
getVoxel (const float3& point) const
{
int vx = __float2int_rd (point.x / cell_size.x); // round to negative infinity
int vy = __float2int_rd (point.y / cell_size.y);
int vz = __float2int_rd (point.z / cell_size.z);
return make_int3 (vx, vy, vz);
}
__device__ __forceinline__ void
operator () () const
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx >= points.size)
return;
const float qnan = numeric_limits<float>::quiet_NaN ();
float3 n = make_float3 (qnan, qnan, qnan);
float3 point = fetchPoint (idx);
int3 g = getVoxel (point);
if (g.x > 1 && g.y > 1 && g.z > 1 && g.x < VOLUME_X - 2 && g.y < VOLUME_Y - 2 && g.z < VOLUME_Z - 2)
{
float3 t;
t = point;
t.x += cell_size.x;
float Fx1 = interpolateTrilineary (t);
t = point;
t.x -= cell_size.x;
float Fx2 = interpolateTrilineary (t);
n.x = (Fx1 - Fx2);
t = point;
t.y += cell_size.y;
float Fy1 = interpolateTrilineary (t);
t = point;
t.y -= cell_size.y;
float Fy2 = interpolateTrilineary (t);
n.y = (Fy1 - Fy2);
t = point;
t.z += cell_size.z;
float Fz1 = interpolateTrilineary (t);
t = point;
t.z -= cell_size.z;
float Fz2 = interpolateTrilineary (t);
n.z = (Fz1 - Fz2);
n = normalized (n);
}
storeNormal (idx, n);
}
__device__ __forceinline__ float
interpolateTrilineary (const float3& point) const
{
int3 g = getVoxel (point);
float vx = (g.x + 0.5f) * cell_size.x;
float vy = (g.y + 0.5f) * cell_size.y;
float vz = (g.z + 0.5f) * cell_size.z;
g.x = (point.x < vx) ? (g.x - 1) : g.x;
g.y = (point.y < vy) ? (g.y - 1) : g.y;
g.z = (point.z < vz) ? (g.z - 1) : g.z;
float a = (point.x - (g.x + 0.5f) * cell_size.x) / cell_size.x;
float b = (point.y - (g.y + 0.5f) * cell_size.y) / cell_size.y;
float c = (point.z - (g.z + 0.5f) * cell_size.z) / cell_size.z;
float res = readTsdf (g.x + 0, g.y + 0, g.z + 0) * (1 - a) * (1 - b) * (1 - c) +
readTsdf (g.x + 0, g.y + 0, g.z + 1) * (1 - a) * (1 - b) * c +
readTsdf (g.x + 0, g.y + 1, g.z + 0) * (1 - a) * b * (1 - c) +
readTsdf (g.x + 0, g.y + 1, g.z + 1) * (1 - a) * b * c +
readTsdf (g.x + 1, g.y + 0, g.z + 0) * a * (1 - b) * (1 - c) +
readTsdf (g.x + 1, g.y + 0, g.z + 1) * a * (1 - b) * c +
readTsdf (g.x + 1, g.y + 1, g.z + 0) * a * b * (1 - c) +
readTsdf (g.x + 1, g.y + 1, g.z + 1) * a * b * c;
return res;
}
};
template<typename NormalType>
__global__ void
extractNormalsKernel (const ExtractNormals<NormalType> en) {
en ();
}
}
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template<typename NormalType> void
pcl::device::extractNormals (const PtrStep<short2>& volume, const float3& volume_size,
const PtrSz<PointType>& points, NormalType* output)
{
ExtractNormals<NormalType> en;
en.volume = volume;
en.cell_size.x = volume_size.x / VOLUME_X;
en.cell_size.y = volume_size.y / VOLUME_Y;
en.cell_size.z = volume_size.z / VOLUME_Z;
en.points = points;
en.output = output;
dim3 block (256);
dim3 grid (divUp (points.size, block.x));
hipLaunchKernelGGL(( extractNormalsKernel), dim3(grid), dim3(block), 0, 0, en);
cudaSafeCall ( hipGetLastError () );
cudaSafeCall (hipDeviceSynchronize ());
}
using namespace pcl::device;
template void pcl::device::extractNormals<PointType>(const PtrStep<short2>&volume, const float3 &volume_size, const PtrSz<PointType>&input, PointType * output);
template void pcl::device::extractNormals<float8>(const PtrStep<short2>&volume, const float3 &volume_size, const PtrSz<PointType>&input, float8 * output);
| 97a4237dd53696b3833dac6b89cc676f5906006f.cu | /*
* Software License Agreement (BSD License)
*
* Point Cloud Library (PCL) - www.pointclouds.org
* Copyright (c) 2011, Willow Garage, Inc.
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Willow Garage, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
*/
#include "device.hpp"
//#include <pcl/gpu/utils/device/funcattrib.hpp>
//include <pcl/gpu/utils/device/block.hpp>
//#include <pcl/gpu/utils/device/warp.hpp>
namespace pcl
{
namespace device
{
////////////////////////////////////////////////////////////////////////////////////////
///// Full Volume Scan6
enum
{
CTA_SIZE_X = 32,
CTA_SIZE_Y = 6,
CTA_SIZE = CTA_SIZE_X * CTA_SIZE_Y,
MAX_LOCAL_POINTS = 3
};
__device__ int global_count = 0;
__device__ int output_count;
__device__ unsigned int blocks_done = 0;
__shared__ float storage_X[CTA_SIZE * MAX_LOCAL_POINTS];
__shared__ float storage_Y[CTA_SIZE * MAX_LOCAL_POINTS];
__shared__ float storage_Z[CTA_SIZE * MAX_LOCAL_POINTS];
struct FullScan6
{
PtrStep<short2> volume;
float3 cell_size;
mutable PtrSz<PointType> output;
__device__ __forceinline__ float
fetch (int x, int y, int z, int& weight) const
{
float tsdf;
unpack_tsdf (volume.ptr (VOLUME_Y * z + y)[x], tsdf, weight);
return tsdf;
}
__device__ __forceinline__ void
operator () () const
{
int x = threadIdx.x + blockIdx.x * CTA_SIZE_X;
int y = threadIdx.y + blockIdx.y * CTA_SIZE_Y;
#if CUDART_VERSION >= 9000
if (__all_sync (__activemask (), x >= VOLUME_X)
|| __all_sync (__activemask (), y >= VOLUME_Y))
return;
#else
if (__all (x >= VOLUME_X) || __all (y >= VOLUME_Y))
return;
#endif
float3 V;
V.x = (x + 0.5f) * cell_size.x;
V.y = (y + 0.5f) * cell_size.y;
int ftid = Block::flattenedThreadId ();
for (int z = 0; z < VOLUME_Z - 1; ++z)
{
float3 points[MAX_LOCAL_POINTS];
int local_count = 0;
if (x < VOLUME_X && y < VOLUME_Y)
{
int W;
float F = fetch (x, y, z, W);
if (W != 0 && F != 1.f)
{
V.z = (z + 0.5f) * cell_size.z;
//process dx
if (x + 1 < VOLUME_X)
{
int Wn;
float Fn = fetch (x + 1, y, z, Wn);
if (Wn != 0 && Fn != 1.f)
if ((F > 0 && Fn < 0) || (F < 0 && Fn > 0))
{
float3 p;
p.y = V.y;
p.z = V.z;
float Vnx = V.x + cell_size.x;
float d_inv = 1.f / (fabs (F) + fabs (Fn));
p.x = (V.x * fabs (Fn) + Vnx * fabs (F)) * d_inv;
points[local_count++] = p;
}
} /* if (x + 1 < VOLUME_X) */
//process dy
if (y + 1 < VOLUME_Y)
{
int Wn;
float Fn = fetch (x, y + 1, z, Wn);
if (Wn != 0 && Fn != 1.f)
if ((F > 0 && Fn < 0) || (F < 0 && Fn > 0))
{
float3 p;
p.x = V.x;
p.z = V.z;
float Vny = V.y + cell_size.y;
float d_inv = 1.f / (fabs (F) + fabs (Fn));
p.y = (V.y * fabs (Fn) + Vny * fabs (F)) * d_inv;
points[local_count++] = p;
}
} /* if (y + 1 < VOLUME_Y) */
//process dz
//if (z + 1 < VOLUME_Z) // guaranteed by loop
{
int Wn;
float Fn = fetch (x, y, z + 1, Wn);
if (Wn != 0 && Fn != 1.f)
if ((F > 0 && Fn < 0) || (F < 0 && Fn > 0))
{
float3 p;
p.x = V.x;
p.y = V.y;
float Vnz = V.z + cell_size.z;
float d_inv = 1.f / (fabs (F) + fabs (Fn));
p.z = (V.z * fabs (Fn) + Vnz * fabs (F)) * d_inv;
points[local_count++] = p;
}
} /* if (z + 1 < VOLUME_Z) */
} /* if (W != 0 && F != 1.f) */
} /* if (x < VOLUME_X && y < VOLUME_Y) */
#if CUDART_VERSION >= 9000
int total_warp = __popc (__ballot_sync (__activemask (), local_count > 0))
+ __popc (__ballot_sync (__activemask (), local_count > 1))
+ __popc (__ballot_sync (__activemask (), local_count > 2));
#else
///not we fulfilled points array at current iteration
int total_warp = __popc (__ballot (local_count > 0)) + __popc (__ballot (local_count > 1)) + __popc (__ballot (local_count > 2));
#endif
if (total_warp > 0)
{
int lane = Warp::laneId ();
int storage_index = (ftid >> Warp::LOG_WARP_SIZE) * Warp::WARP_SIZE * MAX_LOCAL_POINTS;
volatile int* cta_buffer = (int*)(storage_X + storage_index);
cta_buffer[lane] = local_count;
int offset = scan_warp<exclusive>(cta_buffer, lane);
if (lane == 0)
{
int old_global_count = atomicAdd (&global_count, total_warp);
cta_buffer[0] = old_global_count;
}
int old_global_count = cta_buffer[0];
for (int l = 0; l < local_count; ++l)
{
storage_X[storage_index + offset + l] = points[l].x;
storage_Y[storage_index + offset + l] = points[l].y;
storage_Z[storage_index + offset + l] = points[l].z;
}
PointType *pos = output.data + old_global_count + lane;
for (int idx = lane; idx < total_warp; idx += Warp::STRIDE, pos += Warp::STRIDE)
{
float x = storage_X[storage_index + idx];
float y = storage_Y[storage_index + idx];
float z = storage_Z[storage_index + idx];
store_point_type (x, y, z, pos);
}
bool full = (old_global_count + total_warp) >= output.size;
if (full)
break;
}
} /* for(int z = 0; z < VOLUME_Z - 1; ++z) */
///////////////////////////
// prepare for future scans
if (ftid == 0)
{
unsigned int total_blocks = gridDim.x * gridDim.y * gridDim.z;
unsigned int value = atomicInc (&blocks_done, total_blocks);
//last block
if (value == total_blocks - 1)
{
output_count = min ((int)output.size, global_count);
blocks_done = 0;
global_count = 0;
}
}
} /* operator() */
__device__ __forceinline__ void
store_point_type (float x, float y, float z, float4* ptr) const {
*ptr = make_float4 (x, y, z, 0);
}
__device__ __forceinline__ void
store_point_type (float x, float y, float z, float3* ptr) const {
*ptr = make_float3 (x, y, z);
}
};
__global__ void
extractKernel (const FullScan6 fs) {
fs ();
}
}
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
size_t
pcl::device::extractCloud (const PtrStep<short2>& volume, const float3& volume_size,
PtrSz<PointType> output)
{
FullScan6 fs;
fs.volume = volume;
fs.cell_size.x = volume_size.x / VOLUME_X;
fs.cell_size.y = volume_size.y / VOLUME_Y;
fs.cell_size.z = volume_size.z / VOLUME_Z;
fs.output = output;
dim3 block (CTA_SIZE_X, CTA_SIZE_Y);
dim3 grid (divUp (VOLUME_X, block.x), divUp (VOLUME_Y, block.y));
//cudaFuncSetCacheConfig(extractKernel, cudaFuncCachePreferL1);
//printFuncAttrib(extractKernel);
extractKernel<<<grid, block>>>(fs);
cudaSafeCall ( cudaGetLastError () );
cudaSafeCall (cudaDeviceSynchronize ());
int size;
cudaSafeCall ( cudaMemcpyFromSymbol (&size, output_count, sizeof(size)) );
return (size_t)size;
}
namespace pcl
{
namespace device
{
template<typename NormalType>
struct ExtractNormals
{
float3 cell_size;
PtrStep<short2> volume;
PtrSz<PointType> points;
mutable NormalType* output;
__device__ __forceinline__ float
readTsdf (int x, int y, int z) const
{
return unpack_tsdf (volume.ptr (VOLUME_Y * z + y)[x]);
}
__device__ __forceinline__ float3
fetchPoint (int idx) const
{
PointType p = points.data[idx];
return make_float3 (p.x, p.y, p.z);
}
__device__ __forceinline__ void
storeNormal (int idx, float3 normal) const
{
NormalType n;
n.x = normal.x; n.y = normal.y; n.z = normal.z;
output[idx] = n;
}
__device__ __forceinline__ int3
getVoxel (const float3& point) const
{
int vx = __float2int_rd (point.x / cell_size.x); // round to negative infinity
int vy = __float2int_rd (point.y / cell_size.y);
int vz = __float2int_rd (point.z / cell_size.z);
return make_int3 (vx, vy, vz);
}
__device__ __forceinline__ void
operator () () const
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx >= points.size)
return;
const float qnan = numeric_limits<float>::quiet_NaN ();
float3 n = make_float3 (qnan, qnan, qnan);
float3 point = fetchPoint (idx);
int3 g = getVoxel (point);
if (g.x > 1 && g.y > 1 && g.z > 1 && g.x < VOLUME_X - 2 && g.y < VOLUME_Y - 2 && g.z < VOLUME_Z - 2)
{
float3 t;
t = point;
t.x += cell_size.x;
float Fx1 = interpolateTrilineary (t);
t = point;
t.x -= cell_size.x;
float Fx2 = interpolateTrilineary (t);
n.x = (Fx1 - Fx2);
t = point;
t.y += cell_size.y;
float Fy1 = interpolateTrilineary (t);
t = point;
t.y -= cell_size.y;
float Fy2 = interpolateTrilineary (t);
n.y = (Fy1 - Fy2);
t = point;
t.z += cell_size.z;
float Fz1 = interpolateTrilineary (t);
t = point;
t.z -= cell_size.z;
float Fz2 = interpolateTrilineary (t);
n.z = (Fz1 - Fz2);
n = normalized (n);
}
storeNormal (idx, n);
}
__device__ __forceinline__ float
interpolateTrilineary (const float3& point) const
{
int3 g = getVoxel (point);
float vx = (g.x + 0.5f) * cell_size.x;
float vy = (g.y + 0.5f) * cell_size.y;
float vz = (g.z + 0.5f) * cell_size.z;
g.x = (point.x < vx) ? (g.x - 1) : g.x;
g.y = (point.y < vy) ? (g.y - 1) : g.y;
g.z = (point.z < vz) ? (g.z - 1) : g.z;
float a = (point.x - (g.x + 0.5f) * cell_size.x) / cell_size.x;
float b = (point.y - (g.y + 0.5f) * cell_size.y) / cell_size.y;
float c = (point.z - (g.z + 0.5f) * cell_size.z) / cell_size.z;
float res = readTsdf (g.x + 0, g.y + 0, g.z + 0) * (1 - a) * (1 - b) * (1 - c) +
readTsdf (g.x + 0, g.y + 0, g.z + 1) * (1 - a) * (1 - b) * c +
readTsdf (g.x + 0, g.y + 1, g.z + 0) * (1 - a) * b * (1 - c) +
readTsdf (g.x + 0, g.y + 1, g.z + 1) * (1 - a) * b * c +
readTsdf (g.x + 1, g.y + 0, g.z + 0) * a * (1 - b) * (1 - c) +
readTsdf (g.x + 1, g.y + 0, g.z + 1) * a * (1 - b) * c +
readTsdf (g.x + 1, g.y + 1, g.z + 0) * a * b * (1 - c) +
readTsdf (g.x + 1, g.y + 1, g.z + 1) * a * b * c;
return res;
}
};
template<typename NormalType>
__global__ void
extractNormalsKernel (const ExtractNormals<NormalType> en) {
en ();
}
}
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template<typename NormalType> void
pcl::device::extractNormals (const PtrStep<short2>& volume, const float3& volume_size,
const PtrSz<PointType>& points, NormalType* output)
{
ExtractNormals<NormalType> en;
en.volume = volume;
en.cell_size.x = volume_size.x / VOLUME_X;
en.cell_size.y = volume_size.y / VOLUME_Y;
en.cell_size.z = volume_size.z / VOLUME_Z;
en.points = points;
en.output = output;
dim3 block (256);
dim3 grid (divUp (points.size, block.x));
extractNormalsKernel<<<grid, block>>>(en);
cudaSafeCall ( cudaGetLastError () );
cudaSafeCall (cudaDeviceSynchronize ());
}
using namespace pcl::device;
template void pcl::device::extractNormals<PointType>(const PtrStep<short2>&volume, const float3 &volume_size, const PtrSz<PointType>&input, PointType * output);
template void pcl::device::extractNormals<float8>(const PtrStep<short2>&volume, const float3 &volume_size, const PtrSz<PointType>&input, float8 * output);
|
055d1c2736a4c8b5429829f088171dedd60b2128.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int xdim0_update_halo_kernel2_xvel_plus_2_back;
int xdim0_update_halo_kernel2_xvel_plus_2_back_h = -1;
__constant__ int ydim0_update_halo_kernel2_xvel_plus_2_back;
int ydim0_update_halo_kernel2_xvel_plus_2_back_h = -1;
__constant__ int xdim1_update_halo_kernel2_xvel_plus_2_back;
int xdim1_update_halo_kernel2_xvel_plus_2_back_h = -1;
__constant__ int ydim1_update_halo_kernel2_xvel_plus_2_back;
int ydim1_update_halo_kernel2_xvel_plus_2_back_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#define OPS_ACC0(x, y, z) \
(x + xdim0_update_halo_kernel2_xvel_plus_2_back * (y) + \
xdim0_update_halo_kernel2_xvel_plus_2_back * \
ydim0_update_halo_kernel2_xvel_plus_2_back * (z))
#define OPS_ACC1(x, y, z) \
(x + xdim1_update_halo_kernel2_xvel_plus_2_back * (y) + \
xdim1_update_halo_kernel2_xvel_plus_2_back * \
ydim1_update_halo_kernel2_xvel_plus_2_back * (z))
// user function
__device__
inline void
update_halo_kernel2_xvel_plus_2_back_gpu(double *xvel0, double *xvel1,
const int *fields) {
if (fields[FIELD_XVEL0] == 1)
xvel0[OPS_ACC0(0, 0, 0)] = xvel0[OPS_ACC0(0, 0, 2)];
if (fields[FIELD_XVEL1] == 1)
xvel1[OPS_ACC1(0, 0, 0)] = xvel1[OPS_ACC1(0, 0, 2)];
}
#undef OPS_ACC0
#undef OPS_ACC1
__global__ void ops_update_halo_kernel2_xvel_plus_2_back(
double *__restrict arg0, double *__restrict arg1,
const int *__restrict arg2, int size0, int size1, int size2) {
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 * 1 +
idx_y * 1 * 1 * xdim0_update_halo_kernel2_xvel_plus_2_back +
idx_z * 1 * 1 * xdim0_update_halo_kernel2_xvel_plus_2_back *
ydim0_update_halo_kernel2_xvel_plus_2_back;
arg1 += idx_x * 1 * 1 +
idx_y * 1 * 1 * xdim1_update_halo_kernel2_xvel_plus_2_back +
idx_z * 1 * 1 * xdim1_update_halo_kernel2_xvel_plus_2_back *
ydim1_update_halo_kernel2_xvel_plus_2_back;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
update_halo_kernel2_xvel_plus_2_back_gpu(arg0, arg1, arg2);
}
}
// host stub function
void ops_par_loop_update_halo_kernel2_xvel_plus_2_back(char const *name,
ops_block block, int dim,
int *range, ops_arg arg0,
ops_arg arg1,
ops_arg arg2) {
// Timing
double t1, t2, c1, c2;
ops_arg args[3] = {arg0, arg1, arg2};
#ifdef CHECKPOINTING
if (!ops_checkpointing_before(args, 3, range, 77))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(77, "update_halo_kernel2_xvel_plus_2_back");
OPS_kernels[77].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[3];
int end[3];
#ifdef OPS_MPI
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 3; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 3; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int y_size = MAX(0, end[1] - start[1]);
int z_size = MAX(0, end[2] - start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != xdim0_update_halo_kernel2_xvel_plus_2_back_h ||
ydim0 != ydim0_update_halo_kernel2_xvel_plus_2_back_h ||
xdim1 != xdim1_update_halo_kernel2_xvel_plus_2_back_h ||
ydim1 != ydim1_update_halo_kernel2_xvel_plus_2_back_h) {
hipMemcpyToSymbol(xdim0_update_halo_kernel2_xvel_plus_2_back, &xdim0,
sizeof(int));
xdim0_update_halo_kernel2_xvel_plus_2_back_h = xdim0;
hipMemcpyToSymbol(ydim0_update_halo_kernel2_xvel_plus_2_back, &ydim0,
sizeof(int));
ydim0_update_halo_kernel2_xvel_plus_2_back_h = ydim0;
hipMemcpyToSymbol(xdim1_update_halo_kernel2_xvel_plus_2_back, &xdim1,
sizeof(int));
xdim1_update_halo_kernel2_xvel_plus_2_back_h = xdim1;
hipMemcpyToSymbol(ydim1_update_halo_kernel2_xvel_plus_2_back, &ydim1,
sizeof(int));
ydim1_update_halo_kernel2_xvel_plus_2_back_h = ydim1;
}
int *arg2h = (int *)arg2.data;
dim3 grid((x_size - 1) / OPS_block_size_x + 1,
(y_size - 1) / OPS_block_size_y + 1, z_size);
dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d = 0; d < NUM_FIELDS; d++)
((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = args[0].dat->elem_size;
int dat1 = args[1].dat->elem_size;
char *p_a[3];
// set up initial pointers
int d_m[OPS_MAX_DIM];
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[0].dat->d_m[d];
#endif
int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] -
args[0].dat->base[0] - d_m[0]);
base0 = base0 +
dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] -
args[0].dat->base[1] - d_m[1]);
base0 = base0 +
dat0 * args[0].dat->size[0] * args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] -
d_m[2]);
p_a[0] = (char *)args[0].data_d + base0;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[1].dat->d_m[d];
#endif
int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] -
args[1].dat->base[0] - d_m[0]);
base1 = base1 +
dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] -
args[1].dat->base[1] - d_m[1]);
base1 = base1 +
dat1 * args[1].dat->size[0] * args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] -
d_m[2]);
p_a[1] = (char *)args[1].data_d + base1;
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args, 3, range);
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[77].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
hipLaunchKernelGGL(( ops_update_halo_kernel2_xvel_plus_2_back), dim3(grid), dim3(tblock), 0, 0,
(double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d, x_size, y_size,
z_size);
if (OPS_diags > 1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[77].time += t1 - t2;
}
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0], range);
ops_set_halo_dirtybit3(&args[1], range);
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[77].mpi_time += t2 - t1;
OPS_kernels[77].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[77].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
| 055d1c2736a4c8b5429829f088171dedd60b2128.cu | //
// auto-generated by ops.py
//
__constant__ int xdim0_update_halo_kernel2_xvel_plus_2_back;
int xdim0_update_halo_kernel2_xvel_plus_2_back_h = -1;
__constant__ int ydim0_update_halo_kernel2_xvel_plus_2_back;
int ydim0_update_halo_kernel2_xvel_plus_2_back_h = -1;
__constant__ int xdim1_update_halo_kernel2_xvel_plus_2_back;
int xdim1_update_halo_kernel2_xvel_plus_2_back_h = -1;
__constant__ int ydim1_update_halo_kernel2_xvel_plus_2_back;
int ydim1_update_halo_kernel2_xvel_plus_2_back_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#define OPS_ACC0(x, y, z) \
(x + xdim0_update_halo_kernel2_xvel_plus_2_back * (y) + \
xdim0_update_halo_kernel2_xvel_plus_2_back * \
ydim0_update_halo_kernel2_xvel_plus_2_back * (z))
#define OPS_ACC1(x, y, z) \
(x + xdim1_update_halo_kernel2_xvel_plus_2_back * (y) + \
xdim1_update_halo_kernel2_xvel_plus_2_back * \
ydim1_update_halo_kernel2_xvel_plus_2_back * (z))
// user function
__device__
inline void
update_halo_kernel2_xvel_plus_2_back_gpu(double *xvel0, double *xvel1,
const int *fields) {
if (fields[FIELD_XVEL0] == 1)
xvel0[OPS_ACC0(0, 0, 0)] = xvel0[OPS_ACC0(0, 0, 2)];
if (fields[FIELD_XVEL1] == 1)
xvel1[OPS_ACC1(0, 0, 0)] = xvel1[OPS_ACC1(0, 0, 2)];
}
#undef OPS_ACC0
#undef OPS_ACC1
__global__ void ops_update_halo_kernel2_xvel_plus_2_back(
double *__restrict arg0, double *__restrict arg1,
const int *__restrict arg2, int size0, int size1, int size2) {
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 * 1 +
idx_y * 1 * 1 * xdim0_update_halo_kernel2_xvel_plus_2_back +
idx_z * 1 * 1 * xdim0_update_halo_kernel2_xvel_plus_2_back *
ydim0_update_halo_kernel2_xvel_plus_2_back;
arg1 += idx_x * 1 * 1 +
idx_y * 1 * 1 * xdim1_update_halo_kernel2_xvel_plus_2_back +
idx_z * 1 * 1 * xdim1_update_halo_kernel2_xvel_plus_2_back *
ydim1_update_halo_kernel2_xvel_plus_2_back;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
update_halo_kernel2_xvel_plus_2_back_gpu(arg0, arg1, arg2);
}
}
// host stub function
void ops_par_loop_update_halo_kernel2_xvel_plus_2_back(char const *name,
ops_block block, int dim,
int *range, ops_arg arg0,
ops_arg arg1,
ops_arg arg2) {
// Timing
double t1, t2, c1, c2;
ops_arg args[3] = {arg0, arg1, arg2};
#ifdef CHECKPOINTING
if (!ops_checkpointing_before(args, 3, range, 77))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(77, "update_halo_kernel2_xvel_plus_2_back");
OPS_kernels[77].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[3];
int end[3];
#ifdef OPS_MPI
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 3; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 3; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int y_size = MAX(0, end[1] - start[1]);
int z_size = MAX(0, end[2] - start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != xdim0_update_halo_kernel2_xvel_plus_2_back_h ||
ydim0 != ydim0_update_halo_kernel2_xvel_plus_2_back_h ||
xdim1 != xdim1_update_halo_kernel2_xvel_plus_2_back_h ||
ydim1 != ydim1_update_halo_kernel2_xvel_plus_2_back_h) {
cudaMemcpyToSymbol(xdim0_update_halo_kernel2_xvel_plus_2_back, &xdim0,
sizeof(int));
xdim0_update_halo_kernel2_xvel_plus_2_back_h = xdim0;
cudaMemcpyToSymbol(ydim0_update_halo_kernel2_xvel_plus_2_back, &ydim0,
sizeof(int));
ydim0_update_halo_kernel2_xvel_plus_2_back_h = ydim0;
cudaMemcpyToSymbol(xdim1_update_halo_kernel2_xvel_plus_2_back, &xdim1,
sizeof(int));
xdim1_update_halo_kernel2_xvel_plus_2_back_h = xdim1;
cudaMemcpyToSymbol(ydim1_update_halo_kernel2_xvel_plus_2_back, &ydim1,
sizeof(int));
ydim1_update_halo_kernel2_xvel_plus_2_back_h = ydim1;
}
int *arg2h = (int *)arg2.data;
dim3 grid((x_size - 1) / OPS_block_size_x + 1,
(y_size - 1) / OPS_block_size_y + 1, z_size);
dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d = 0; d < NUM_FIELDS; d++)
((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = args[0].dat->elem_size;
int dat1 = args[1].dat->elem_size;
char *p_a[3];
// set up initial pointers
int d_m[OPS_MAX_DIM];
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[0].dat->d_m[d];
#endif
int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] -
args[0].dat->base[0] - d_m[0]);
base0 = base0 +
dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] -
args[0].dat->base[1] - d_m[1]);
base0 = base0 +
dat0 * args[0].dat->size[0] * args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] -
d_m[2]);
p_a[0] = (char *)args[0].data_d + base0;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[1].dat->d_m[d];
#endif
int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] -
args[1].dat->base[0] - d_m[0]);
base1 = base1 +
dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] -
args[1].dat->base[1] - d_m[1]);
base1 = base1 +
dat1 * args[1].dat->size[0] * args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] -
d_m[2]);
p_a[1] = (char *)args[1].data_d + base1;
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args, 3, range);
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[77].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
ops_update_halo_kernel2_xvel_plus_2_back<<<grid, tblock>>>(
(double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d, x_size, y_size,
z_size);
if (OPS_diags > 1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[77].time += t1 - t2;
}
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0], range);
ops_set_halo_dirtybit3(&args[1], range);
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[77].mpi_time += t2 - t1;
OPS_kernels[77].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[77].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
|
7dd859d2187ad2510ce2cd89b4e96566c5f1792d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright 2020 Stanford
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "model.h"
#include "cuda_helper.h"
Tensor FFModel::embedding(const std::string& pcname,
const Tensor& input,
int num_entries,
int out_dim,
AggrMode aggr,
Initializer* kernel_initializer)
{
//assert(config.strategies.find(name) != config.strategies.end());
//ParallelConfig pc = config.strategies[name];
//IndexSpaceT<2> task_is = IndexSpaceT<2>(get_or_create_task_is(pc));
Embedding* embed = new Embedding(*this, pcname, input, num_entries,
out_dim, aggr, kernel_initializer);
layers.push_back(embed);
Parameter kernel;
kernel.tensor = embed->kernel;
kernel.op = embed;
parameters.push_back(kernel);
return embed->output;
}
Embedding::Embedding(FFModel& model,
const std::string& pcname,
const Tensor& _input,
//std::stirng name,
int num_entries, int outDim,
AggrMode _aggr,
Initializer* kernel_initializer)
: Op(pcname, _input), aggr(_aggr), profiling(model.config.profiling)
{
assert(_input.numDim == 2);
// Retrive the task indexspace for the op
task_is = IndexSpaceT<2>(model.get_or_create_task_is(2, pcname));
Context ctx = model.config.lg_ctx;
Runtime* runtime = model.config.lg_hlr;
Rect<2> part_rect = runtime->get_index_space_domain(ctx, task_is);
// Currently assume we can only partition over the sample dim
assert(part_rect.hi[0] == part_rect.lo[0]);
{
const int dims[2] = {inputs[0].adim[1], outDim};
output = model.create_tensor<2>(dims, task_is, DT_FLOAT);
}
{
const int dims[2] = {outDim, num_entries};
// Embeddding weights and linear weights can be partitioned in the same way
kernel = model.create_linear_weight<2>(dims, task_is, DT_FLOAT, kernel_initializer);
}
#ifdef DEADCODE
// Create kernel tensor
Rect<2> kernel_rect(Point<2>(0, 0), Point<2>(outDim-1, inDim-1));
FieldSpace fs = runtime->create_field_space(ctx);
FieldAllocator allocator = runtime->create_field_allocator(ctx, fs);
allocator.allocate_field(sizeof(float), FID_DATA);
IndexSpaceT<2> kernel_is = runtime->create_index_space(ctx, kernel_rect);
kernel.region = runtime->create_logical_region(ctx, kernel_is, fs);
{
int num_part_c = part_rect.hi[0] - part_rect.lo[0] + 1;
int extent_c = (outDim + num_part_c - 1) / num_part_c;
Rect<2> extent(Point<2>(0, 0), Point<2>(extent_c, inDim-1));
Transform<2, 2> transform;
transform[0][0] = extent_c; transform[0][1] = 0;
transform[1][0] = 0; transform[1][1] = 0;
IndexPartition ip = runtime->create_partition_by_restriction(
ctx, kernel_is, task_is, transform, extent);
kernel.part = runtime->get_logical_partition(
ctx, kernel.region, ip);
}
// Create kernel tensor gradients
Rect<3> kernel_grad_rect(Point<3>(0, 0, 0),
Point<3>(outDim-1, inDim-1, part_rect.hi[1] - part_rect.lo[1]));
IndexSpaceT<3> kernel_grad_is = runtime->create_index_space(
ctx, kernel_grad_rect);
kernel.region_grad = runtime->create_logical_region(
ctx, kernel_grad_is, fs);
{
int num_part_c = part_rect.hi[0] - part_rect.lo[0] + 1;
int extent_c = (outDim + num_part_c - 1) / num_part_c;
Rect<3> extent(Point<3>(0, 0, 0), Point<3>(extent_c, inDim-1, 0));
Transform<3, 2> transform;
transform[0][0] = extent_c; transform[0][1] = 0;
transform[1][0] = 0; transform[1][1] = 0;
transform[2][0] = 0; transform[2][1] = 1;
IndexPartition ip = runtime->create_partition_by_restriction(
ctx, kernel_grad_is, task_is, transform, extent);
kernel.part_grad = runtime->get_logical_partition(
ctx, kernel.region_grad, ip);
assert(runtime->is_index_partition_disjoint(ctx, ip));
assert(runtime->is_index_partition_complete(ctx, ip));
}
#endif
// Compute partition bound for input
Rect<2> input_rect = runtime->get_index_partition_color_space(
ctx, inputs[0].part.get_index_partition());
if (input_rect == part_rect) {
input_lps[0] = inputs[0].part;
input_grad_lps[0] = inputs[0].part_grad;
} else {
// Currently assert input must have the same partition
// to avoid data movement
assert(false);
}
}
//__host__
//OpMeta* Embedding::init_task(const Task *task,
// const std::vector<PhysicalRegion> ®ions,
// Context ctx, Runtime* runtime)
//{}
void Embedding::init(const FFModel& ff)
{}
__global__
void embed_forward(const int64_t* input,
float* output,
const float* embed,
int out_dim,
int in_dim,
int batch_size,
AggrMode aggr)
{
CUDA_KERNEL_LOOP(i, batch_size * out_dim)
{
output[i] = 0;
int idx = i / out_dim;
int off = i % out_dim;
for (int j = 0; j < in_dim; j++) {
int64_t wordIdx = input[idx * in_dim + j];
output[i] += embed[wordIdx * out_dim + off];
if (aggr == AGGR_MODE_SUM) {
} else {
assert(aggr == AGGR_MODE_AVG);
output[i] /= in_dim;
}
}
}
}
__global__
void embed_backward(const int64_t* input,
const float* output,
float* embed,
int out_dim,
int in_dim,
int batch_size,
AggrMode aggr)
{
CUDA_KERNEL_LOOP(i, batch_size * out_dim)
{
int idx = i / out_dim;
int off = i % out_dim;
float gradient;
if (aggr == AGGR_MODE_SUM) {
gradient = output[i];
} else {
assert(aggr == AGGR_MODE_AVG);
gradient = output[i] / in_dim;
}
for (int j = 0; j < in_dim; j++) {
int64_t wordIdx = input[idx * in_dim + j];
atomicAdd(embed + wordIdx * out_dim + off, gradient);
}
}
}
/*
regions[0](I): input
regions[1](O): output
regions[2](I): kernel
*/
__host__
void Embedding::forward_task(const Task *task,
const std::vector<PhysicalRegion> ®ions,
Context ctx, Runtime* runtime)
{
assert(regions.size() == 3);
assert(task->regions.size() == 3);
const Embedding* embed = (Embedding*) task->args;
TensorAccessorR<int64_t, 2> accInput(
regions[0], task->regions[0], FID_DATA, ctx, runtime);
TensorAccessorW<float, 2> accOutput(
regions[1], task->regions[1], FID_DATA, ctx, runtime, false/*readOutput*/);
TensorAccessorR<float, 2> accWeight(
regions[2], task->regions[2], FID_DATA, ctx, runtime);
// Input matches Output
assert(accInput.rect.hi[1] == accOutput.rect.hi[1]);
assert(accInput.rect.lo[1] == accOutput.rect.lo[1]);
// Weight matches Output
assert(accWeight.rect.hi[1] == accOutput.rect.hi[0]);
assert(accWeight.rect.lo[1] == accOutput.rect.lo[0]);
int in_dim = accInput.rect.hi[0] - accInput.rect.lo[0] + 1;
int out_dim = accOutput.rect.hi[0] - accOutput.rect.lo[0] + 1;
int batch_size = accOutput.rect.hi[1] - accOutput.rect.lo[1] + 1;
hipLaunchKernelGGL(( embed_forward), dim3(GET_BLOCKS(accOutput.rect.volume())), dim3(CUDA_NUM_THREADS), 0, 0,
accInput.ptr, accOutput.ptr, accWeight.ptr, out_dim, in_dim, batch_size, embed->aggr);
checkCUDA(hipDeviceSynchronize());
if (embed->profiling) {
print_tensor<2, int64_t>(accInput.ptr, accInput.rect, "[Embedding:forward:input]");
print_tensor<2, float>(accWeight.ptr, accWeight.rect, "[Embedding:forward:weight]");
print_tensor<2, float>(accOutput.ptr, accOutput.rect, "[Embedding:forward:output]");
checkCUDA(hipDeviceSynchronize());
}
}
void Embedding::forward(const FFModel& ff)
{
ArgumentMap argmap;
Context ctx = ff.config.lg_ctx;
Runtime* runtime = ff.config.lg_hlr;
IndexLauncher launcher(EMBED_FWD_TASK_ID, task_is,
TaskArgument(this, sizeof(Embedding)), argmap,
Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/,
FFConfig::get_hash_id(std::string(name)));
// regions[0]: input
launcher.add_region_requirement(
RegionRequirement(input_lps[0], 0/*projection*/,
READ_ONLY, EXCLUSIVE, inputs[0].region));
launcher.add_field(0, FID_DATA);
// regions[1]: output
launcher.add_region_requirement(
RegionRequirement(output.part, 0/*projection*/,
WRITE_ONLY, EXCLUSIVE, output.region,
MAP_TO_ZC_MEMORY));
launcher.add_field(1, FID_DATA);
// regions[2]: weight
launcher.add_region_requirement(
RegionRequirement(kernel.part, 0/*projection*/,
READ_ONLY, EXCLUSIVE, kernel.region));
launcher.add_field(2, FID_DATA);
runtime->execute_index_space(ctx, launcher);
}
void Embedding::backward_task(const Task *task,
const std::vector<PhysicalRegion> ®ions,
Context ctx, Runtime *runtime)
{
assert(regions.size() == 3);
assert(task->regions.size() == 3);
const Embedding* embed = (Embedding*) task->args;
TensorAccessorR<int64_t, 2> accInput(
regions[0], task->regions[0], FID_DATA, ctx, runtime);
TensorAccessorR<float, 2> accOutput(
regions[1], task->regions[1], FID_DATA, ctx, runtime);
TensorAccessorW<float, 2> accWeightGrad(
regions[2], task->regions[2], FID_DATA, ctx, runtime, true/*readOutput*/);
// Input matches Output
assert(accInput.rect.hi[1] == accOutput.rect.hi[1]);
assert(accInput.rect.lo[1] == accOutput.rect.lo[1]);
// WeightGrad matches Output
assert(accWeightGrad.rect.hi[1] - accWeightGrad.rect.lo[1] == accOutput.rect.hi[0] - accOutput.rect.lo[0]);
int in_dim = accInput.rect.hi[0] - accInput.rect.lo[0] + 1;
int out_dim = accOutput.rect.hi[0] - accOutput.rect.lo[0] + 1;
int batch_size = accOutput.rect.hi[1] - accOutput.rect.lo[1] + 1;
// Explicitly initialize accWegihtGrad to zero to aviod calling zero_gradients() before backward()
// as an optimization for DLRM
//assign_kernel<<<GET_BLOCKS(accWeightGrad.rect.volume()), CUDA_NUM_THREADS>>>(
// accWeightGrad.ptr, accWeightGrad.rect.volume(), 0.0f);
hipLaunchKernelGGL(( embed_backward), dim3(GET_BLOCKS(accOutput.rect.volume())), dim3(CUDA_NUM_THREADS), 0, 0,
accInput.ptr, accOutput.ptr, accWeightGrad.ptr, out_dim, in_dim, batch_size, embed->aggr);
checkCUDA(hipDeviceSynchronize());
if (embed->profiling) {
print_tensor<2, float>(accOutput.ptr, accOutput.rect, "[Embedding:backward:output_grad]");
print_tensor<2, float>(accWeightGrad.ptr, accWeightGrad.rect, "[Embedding:backward:weight_grad]");
print_tensor<2, int64_t>(accInput.ptr, accInput.rect, "[Embedding:backward:input]");
checkCUDA(hipDeviceSynchronize());
}
}
void Embedding::backward(const FFModel& ff)
{
ArgumentMap argmap;
Context ctx = ff.config.lg_ctx;
Runtime* runtime = ff.config.lg_hlr;
IndexLauncher launcher(EMBED_BWD_TASK_ID, task_is,
TaskArgument(this, sizeof(Embedding)), argmap,
Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/,
FFConfig::get_hash_id(std::string(name)));
// regions[0]: input
launcher.add_region_requirement(
RegionRequirement(input_lps[0], 0/*projection*/,
READ_ONLY, EXCLUSIVE, inputs[0].region));
launcher.add_field(0, FID_DATA);
// regions[1]: output_grad
launcher.add_region_requirement(
RegionRequirement(output.part_grad, 0/*projection*/,
READ_ONLY, EXCLUSIVE, output.region_grad,
MAP_TO_ZC_MEMORY));
launcher.add_field(1, FID_DATA);
// regions[2]: weight_grad
launcher.add_region_requirement(
RegionRequirement(kernel.part_grad, 0/*projection*/,
READ_WRITE, EXCLUSIVE, kernel.region_grad));
launcher.add_field(2, FID_DATA);
runtime->execute_index_space(ctx, launcher);
}
| 7dd859d2187ad2510ce2cd89b4e96566c5f1792d.cu | /* Copyright 2020 Stanford
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "model.h"
#include "cuda_helper.h"
Tensor FFModel::embedding(const std::string& pcname,
const Tensor& input,
int num_entries,
int out_dim,
AggrMode aggr,
Initializer* kernel_initializer)
{
//assert(config.strategies.find(name) != config.strategies.end());
//ParallelConfig pc = config.strategies[name];
//IndexSpaceT<2> task_is = IndexSpaceT<2>(get_or_create_task_is(pc));
Embedding* embed = new Embedding(*this, pcname, input, num_entries,
out_dim, aggr, kernel_initializer);
layers.push_back(embed);
Parameter kernel;
kernel.tensor = embed->kernel;
kernel.op = embed;
parameters.push_back(kernel);
return embed->output;
}
Embedding::Embedding(FFModel& model,
const std::string& pcname,
const Tensor& _input,
//std::stirng name,
int num_entries, int outDim,
AggrMode _aggr,
Initializer* kernel_initializer)
: Op(pcname, _input), aggr(_aggr), profiling(model.config.profiling)
{
assert(_input.numDim == 2);
// Retrive the task indexspace for the op
task_is = IndexSpaceT<2>(model.get_or_create_task_is(2, pcname));
Context ctx = model.config.lg_ctx;
Runtime* runtime = model.config.lg_hlr;
Rect<2> part_rect = runtime->get_index_space_domain(ctx, task_is);
// Currently assume we can only partition over the sample dim
assert(part_rect.hi[0] == part_rect.lo[0]);
{
const int dims[2] = {inputs[0].adim[1], outDim};
output = model.create_tensor<2>(dims, task_is, DT_FLOAT);
}
{
const int dims[2] = {outDim, num_entries};
// Embeddding weights and linear weights can be partitioned in the same way
kernel = model.create_linear_weight<2>(dims, task_is, DT_FLOAT, kernel_initializer);
}
#ifdef DEADCODE
// Create kernel tensor
Rect<2> kernel_rect(Point<2>(0, 0), Point<2>(outDim-1, inDim-1));
FieldSpace fs = runtime->create_field_space(ctx);
FieldAllocator allocator = runtime->create_field_allocator(ctx, fs);
allocator.allocate_field(sizeof(float), FID_DATA);
IndexSpaceT<2> kernel_is = runtime->create_index_space(ctx, kernel_rect);
kernel.region = runtime->create_logical_region(ctx, kernel_is, fs);
{
int num_part_c = part_rect.hi[0] - part_rect.lo[0] + 1;
int extent_c = (outDim + num_part_c - 1) / num_part_c;
Rect<2> extent(Point<2>(0, 0), Point<2>(extent_c, inDim-1));
Transform<2, 2> transform;
transform[0][0] = extent_c; transform[0][1] = 0;
transform[1][0] = 0; transform[1][1] = 0;
IndexPartition ip = runtime->create_partition_by_restriction(
ctx, kernel_is, task_is, transform, extent);
kernel.part = runtime->get_logical_partition(
ctx, kernel.region, ip);
}
// Create kernel tensor gradients
Rect<3> kernel_grad_rect(Point<3>(0, 0, 0),
Point<3>(outDim-1, inDim-1, part_rect.hi[1] - part_rect.lo[1]));
IndexSpaceT<3> kernel_grad_is = runtime->create_index_space(
ctx, kernel_grad_rect);
kernel.region_grad = runtime->create_logical_region(
ctx, kernel_grad_is, fs);
{
int num_part_c = part_rect.hi[0] - part_rect.lo[0] + 1;
int extent_c = (outDim + num_part_c - 1) / num_part_c;
Rect<3> extent(Point<3>(0, 0, 0), Point<3>(extent_c, inDim-1, 0));
Transform<3, 2> transform;
transform[0][0] = extent_c; transform[0][1] = 0;
transform[1][0] = 0; transform[1][1] = 0;
transform[2][0] = 0; transform[2][1] = 1;
IndexPartition ip = runtime->create_partition_by_restriction(
ctx, kernel_grad_is, task_is, transform, extent);
kernel.part_grad = runtime->get_logical_partition(
ctx, kernel.region_grad, ip);
assert(runtime->is_index_partition_disjoint(ctx, ip));
assert(runtime->is_index_partition_complete(ctx, ip));
}
#endif
// Compute partition bound for input
Rect<2> input_rect = runtime->get_index_partition_color_space(
ctx, inputs[0].part.get_index_partition());
if (input_rect == part_rect) {
input_lps[0] = inputs[0].part;
input_grad_lps[0] = inputs[0].part_grad;
} else {
// Currently assert input must have the same partition
// to avoid data movement
assert(false);
}
}
//__host__
//OpMeta* Embedding::init_task(const Task *task,
// const std::vector<PhysicalRegion> ®ions,
// Context ctx, Runtime* runtime)
//{}
void Embedding::init(const FFModel& ff)
{}
__global__
void embed_forward(const int64_t* input,
float* output,
const float* embed,
int out_dim,
int in_dim,
int batch_size,
AggrMode aggr)
{
CUDA_KERNEL_LOOP(i, batch_size * out_dim)
{
output[i] = 0;
int idx = i / out_dim;
int off = i % out_dim;
for (int j = 0; j < in_dim; j++) {
int64_t wordIdx = input[idx * in_dim + j];
output[i] += embed[wordIdx * out_dim + off];
if (aggr == AGGR_MODE_SUM) {
} else {
assert(aggr == AGGR_MODE_AVG);
output[i] /= in_dim;
}
}
}
}
__global__
void embed_backward(const int64_t* input,
const float* output,
float* embed,
int out_dim,
int in_dim,
int batch_size,
AggrMode aggr)
{
CUDA_KERNEL_LOOP(i, batch_size * out_dim)
{
int idx = i / out_dim;
int off = i % out_dim;
float gradient;
if (aggr == AGGR_MODE_SUM) {
gradient = output[i];
} else {
assert(aggr == AGGR_MODE_AVG);
gradient = output[i] / in_dim;
}
for (int j = 0; j < in_dim; j++) {
int64_t wordIdx = input[idx * in_dim + j];
atomicAdd(embed + wordIdx * out_dim + off, gradient);
}
}
}
/*
regions[0](I): input
regions[1](O): output
regions[2](I): kernel
*/
__host__
void Embedding::forward_task(const Task *task,
const std::vector<PhysicalRegion> ®ions,
Context ctx, Runtime* runtime)
{
assert(regions.size() == 3);
assert(task->regions.size() == 3);
const Embedding* embed = (Embedding*) task->args;
TensorAccessorR<int64_t, 2> accInput(
regions[0], task->regions[0], FID_DATA, ctx, runtime);
TensorAccessorW<float, 2> accOutput(
regions[1], task->regions[1], FID_DATA, ctx, runtime, false/*readOutput*/);
TensorAccessorR<float, 2> accWeight(
regions[2], task->regions[2], FID_DATA, ctx, runtime);
// Input matches Output
assert(accInput.rect.hi[1] == accOutput.rect.hi[1]);
assert(accInput.rect.lo[1] == accOutput.rect.lo[1]);
// Weight matches Output
assert(accWeight.rect.hi[1] == accOutput.rect.hi[0]);
assert(accWeight.rect.lo[1] == accOutput.rect.lo[0]);
int in_dim = accInput.rect.hi[0] - accInput.rect.lo[0] + 1;
int out_dim = accOutput.rect.hi[0] - accOutput.rect.lo[0] + 1;
int batch_size = accOutput.rect.hi[1] - accOutput.rect.lo[1] + 1;
embed_forward<<<GET_BLOCKS(accOutput.rect.volume()), CUDA_NUM_THREADS>>>(
accInput.ptr, accOutput.ptr, accWeight.ptr, out_dim, in_dim, batch_size, embed->aggr);
checkCUDA(cudaDeviceSynchronize());
if (embed->profiling) {
print_tensor<2, int64_t>(accInput.ptr, accInput.rect, "[Embedding:forward:input]");
print_tensor<2, float>(accWeight.ptr, accWeight.rect, "[Embedding:forward:weight]");
print_tensor<2, float>(accOutput.ptr, accOutput.rect, "[Embedding:forward:output]");
checkCUDA(cudaDeviceSynchronize());
}
}
void Embedding::forward(const FFModel& ff)
{
ArgumentMap argmap;
Context ctx = ff.config.lg_ctx;
Runtime* runtime = ff.config.lg_hlr;
IndexLauncher launcher(EMBED_FWD_TASK_ID, task_is,
TaskArgument(this, sizeof(Embedding)), argmap,
Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/,
FFConfig::get_hash_id(std::string(name)));
// regions[0]: input
launcher.add_region_requirement(
RegionRequirement(input_lps[0], 0/*projection*/,
READ_ONLY, EXCLUSIVE, inputs[0].region));
launcher.add_field(0, FID_DATA);
// regions[1]: output
launcher.add_region_requirement(
RegionRequirement(output.part, 0/*projection*/,
WRITE_ONLY, EXCLUSIVE, output.region,
MAP_TO_ZC_MEMORY));
launcher.add_field(1, FID_DATA);
// regions[2]: weight
launcher.add_region_requirement(
RegionRequirement(kernel.part, 0/*projection*/,
READ_ONLY, EXCLUSIVE, kernel.region));
launcher.add_field(2, FID_DATA);
runtime->execute_index_space(ctx, launcher);
}
void Embedding::backward_task(const Task *task,
const std::vector<PhysicalRegion> ®ions,
Context ctx, Runtime *runtime)
{
assert(regions.size() == 3);
assert(task->regions.size() == 3);
const Embedding* embed = (Embedding*) task->args;
TensorAccessorR<int64_t, 2> accInput(
regions[0], task->regions[0], FID_DATA, ctx, runtime);
TensorAccessorR<float, 2> accOutput(
regions[1], task->regions[1], FID_DATA, ctx, runtime);
TensorAccessorW<float, 2> accWeightGrad(
regions[2], task->regions[2], FID_DATA, ctx, runtime, true/*readOutput*/);
// Input matches Output
assert(accInput.rect.hi[1] == accOutput.rect.hi[1]);
assert(accInput.rect.lo[1] == accOutput.rect.lo[1]);
// WeightGrad matches Output
assert(accWeightGrad.rect.hi[1] - accWeightGrad.rect.lo[1] == accOutput.rect.hi[0] - accOutput.rect.lo[0]);
int in_dim = accInput.rect.hi[0] - accInput.rect.lo[0] + 1;
int out_dim = accOutput.rect.hi[0] - accOutput.rect.lo[0] + 1;
int batch_size = accOutput.rect.hi[1] - accOutput.rect.lo[1] + 1;
// Explicitly initialize accWegihtGrad to zero to aviod calling zero_gradients() before backward()
// as an optimization for DLRM
//assign_kernel<<<GET_BLOCKS(accWeightGrad.rect.volume()), CUDA_NUM_THREADS>>>(
// accWeightGrad.ptr, accWeightGrad.rect.volume(), 0.0f);
embed_backward<<<GET_BLOCKS(accOutput.rect.volume()), CUDA_NUM_THREADS>>>(
accInput.ptr, accOutput.ptr, accWeightGrad.ptr, out_dim, in_dim, batch_size, embed->aggr);
checkCUDA(cudaDeviceSynchronize());
if (embed->profiling) {
print_tensor<2, float>(accOutput.ptr, accOutput.rect, "[Embedding:backward:output_grad]");
print_tensor<2, float>(accWeightGrad.ptr, accWeightGrad.rect, "[Embedding:backward:weight_grad]");
print_tensor<2, int64_t>(accInput.ptr, accInput.rect, "[Embedding:backward:input]");
checkCUDA(cudaDeviceSynchronize());
}
}
void Embedding::backward(const FFModel& ff)
{
ArgumentMap argmap;
Context ctx = ff.config.lg_ctx;
Runtime* runtime = ff.config.lg_hlr;
IndexLauncher launcher(EMBED_BWD_TASK_ID, task_is,
TaskArgument(this, sizeof(Embedding)), argmap,
Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/,
FFConfig::get_hash_id(std::string(name)));
// regions[0]: input
launcher.add_region_requirement(
RegionRequirement(input_lps[0], 0/*projection*/,
READ_ONLY, EXCLUSIVE, inputs[0].region));
launcher.add_field(0, FID_DATA);
// regions[1]: output_grad
launcher.add_region_requirement(
RegionRequirement(output.part_grad, 0/*projection*/,
READ_ONLY, EXCLUSIVE, output.region_grad,
MAP_TO_ZC_MEMORY));
launcher.add_field(1, FID_DATA);
// regions[2]: weight_grad
launcher.add_region_requirement(
RegionRequirement(kernel.part_grad, 0/*projection*/,
READ_WRITE, EXCLUSIVE, kernel.region_grad));
launcher.add_field(2, FID_DATA);
runtime->execute_index_space(ctx, launcher);
}
|
0841ea396be9b9aae89e1f1809df740bef0aed8a.hip | // !!! This is a file automatically generated by hipify!!!
/*
-- MAGMA (version 1.3.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
November 2010
@generated d Wed Nov 14 22:53:53 2012
@author Ichitaro Yamazaki
*/
#include "common_magma.h"
#define PRECISION_d
#include "commonblas.h"
//
// m, n - dimensions in the output (ha) matrix.
// This routine copies the dat matrix from the GPU
// to ha on the CPU. In addition, the output matrix
// is transposed. The routine uses a buffer of size
// 2*lddb*nb pointed to by dB (lddb > m) on the GPU.
// Note that lda >= m and lddat >= n.
//
extern "C" void
magmablas_dgetmatrix_transpose_mgpu(
magma_int_t ngpus, hipStream_t stream[][2],
double **dat, magma_int_t ldda,
double *ha, magma_int_t lda,
double **db, magma_int_t lddb,
magma_int_t m, magma_int_t n, magma_int_t nb)
{
#define A(j) (ha + (j)*lda)
#define dB(d, j) (db[(d)] + (j)*nb*lddb)
#define dAT(d, j) (dat[(d)] + (j)*nb)
int nstreams = 2, j, j_local, d, id, ib;
/* Quick return */
if ( (m == 0) || (n == 0) )
return;
if (lda < m || ngpus*ldda < n || lddb < m){
printf( "Wrong arguments in magmablas_dgetmatrix_transpose_mgpu (%d<%d), (%d*%d<%d), or (%d<%d).\n",
lda, m, ngpus, ldda, n, lddb, m );
return;
}
/* Move data from GPU to CPU using two buffers; first transpose the data on the GPU */
for(j=0; j<n; j+=nb){
d = (j/nb)%ngpus;
j_local = (j/nb)/ngpus;
id = j_local%nstreams;
magma_setdevice(d);
ib = min(n-j, nb);
magmablasSetKernelStream(stream[d][id]);
magmablas_dtranspose2( dB(d, id), lddb,
dAT(d, j_local), ldda,
ib, m);
magma_dgetmatrix_async( m, ib,
dB(d, id), lddb,
A(j), lda,
stream[d][id] );
}
}
| 0841ea396be9b9aae89e1f1809df740bef0aed8a.cu | /*
-- MAGMA (version 1.3.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
November 2010
@generated d Wed Nov 14 22:53:53 2012
@author Ichitaro Yamazaki
*/
#include "common_magma.h"
#define PRECISION_d
#include "commonblas.h"
//
// m, n - dimensions in the output (ha) matrix.
// This routine copies the dat matrix from the GPU
// to ha on the CPU. In addition, the output matrix
// is transposed. The routine uses a buffer of size
// 2*lddb*nb pointed to by dB (lddb > m) on the GPU.
// Note that lda >= m and lddat >= n.
//
extern "C" void
magmablas_dgetmatrix_transpose_mgpu(
magma_int_t ngpus, cudaStream_t stream[][2],
double **dat, magma_int_t ldda,
double *ha, magma_int_t lda,
double **db, magma_int_t lddb,
magma_int_t m, magma_int_t n, magma_int_t nb)
{
#define A(j) (ha + (j)*lda)
#define dB(d, j) (db[(d)] + (j)*nb*lddb)
#define dAT(d, j) (dat[(d)] + (j)*nb)
int nstreams = 2, j, j_local, d, id, ib;
/* Quick return */
if ( (m == 0) || (n == 0) )
return;
if (lda < m || ngpus*ldda < n || lddb < m){
printf( "Wrong arguments in magmablas_dgetmatrix_transpose_mgpu (%d<%d), (%d*%d<%d), or (%d<%d).\n",
lda, m, ngpus, ldda, n, lddb, m );
return;
}
/* Move data from GPU to CPU using two buffers; first transpose the data on the GPU */
for(j=0; j<n; j+=nb){
d = (j/nb)%ngpus;
j_local = (j/nb)/ngpus;
id = j_local%nstreams;
magma_setdevice(d);
ib = min(n-j, nb);
magmablasSetKernelStream(stream[d][id]);
magmablas_dtranspose2( dB(d, id), lddb,
dAT(d, j_local), ldda,
ib, m);
magma_dgetmatrix_async( m, ib,
dB(d, id), lddb,
A(j), lda,
stream[d][id] );
}
}
|
4584e3a7a84f2126a3c51f86c9c6edabcebd94e6.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <cmath>
#include <thrust/device_ptr.h>
#include <thrust/device_vector.h>
#include <thrust/sort.h>
#include "glm/glm.hpp"
#include "utilities.h"
#include "kernel.h"
#include "gridStruct.h"
#include "intersections.h"
#if PRESSURE == 1
#define DELTA_Q (float)(0.1*H)
#define PRESSURE_K 0.1
#define PRESSURE_N 6
#endif
//GLOBALS
dim3 threadsPerBlock(blockSize);
int totalGridSize = (2 * (BOX_X + 2)) * (2 * (BOX_Y + 2)) * (BOX_Z + 2);
int numParticles;
int numGenerated;
const __device__ float starMass = 5e10;
const float scene_scale = 1; //size of the height map in simulation space
staticGeom* cudageoms;
int numGeoms;
particle* particles;
int* neighbors;
int* num_neighbors;
int* grid_idx;
int* grid;
bool hitonce = false;
float wallMove = 0.0f;
using namespace glm;
void checkCUDAError(const char *msg, int line = -1)
{
hipError_t err = hipGetLastError();
if( hipSuccess != err)
{
if( line >= 0 )
{
fprintf(stderr, "Line %d: ", line);
}
fprintf(stderr, "Cuda error: %s: %s.\n", msg, hipGetErrorString( err) );
exit(EXIT_FAILURE);
}
}
__host__ __device__
unsigned int hash(unsigned int a){
a = (a+0x7ed55d16) + (a<<12);
a = (a^0xc761c23c) ^ (a>>19);
a = (a+0x165667b1) + (a<<5);
a = (a+0xd3a2646c) ^ (a<<9);
a = (a+0xfd7046c5) + (a<<3);
a = (a^0xb55a4f09) ^ (a>>16);
return a;
}
//Function that generates static.
__host__ __device__
glm::vec3 generateRandomNumberFromThread(float time, int index)
{
thrust::default_random_engine rng(hash(index*time));
thrust::uniform_real_distribution<float> u01(0,1);
return glm::vec3((float) u01(rng), (float) u01(rng), (float) u01(rng));
}
//Update the vertex buffer object
//(The VBO is where OpenGL looks for the positions for the planets)
__global__
void sendToVBO(int N, particle* particles, float * vbo, int width, int height, float s_scale)
{
int index = threadIdx.x + (blockIdx.x * blockDim.x);
float c_scale_w = 1.0f;
float c_scale_h = 1.0f;
float c_scale_z = 1.0f;
if(index<N)
{
vbo[4*index+0] = particles[index].position.x*c_scale_w;
vbo[4*index+1] = particles[index].position.y*c_scale_h;
vbo[4*index+2] = particles[index].position.z*c_scale_z;
vbo[4*index+3] = 1;
}
}
/*************************************
* Device Methods for Solver
*************************************/
__device__ float wPoly6Kernel(glm::vec3 p_i, glm::vec3 p_j){
float r = glm::length(p_i - p_j);
float hr_term = (H * H - r * r);
float div = 64.0 * PI * POW_H_9;
return 315.0f / div * hr_term * hr_term * hr_term;
}
__device__ glm::vec3 wGradientSpikyKernel(glm::vec3 p_i, glm::vec3 p_j){
glm::vec3 r = p_i - p_j;
float hr_term = H - glm::length(r);
float gradient_magnitude = 45.0f / (PI * POW_H_6) * hr_term * hr_term;
float div = (glm::length(r) + 0.001f);
return gradient_magnitude * 1.0f / div * r;
}
__device__ float calculateRo(particle* particles, glm::vec3 p, int* p_neighbors, int p_num_neighbors, int index){
glm::vec3 p_j;
float ro = 0.0f;
for(int i = 0; i < p_num_neighbors; i++){
p_j = glm::vec3(particles[p_neighbors[i + index * MAX_NEIGHBORS]].pred_position);
ro += wPoly6Kernel(p, p_j);
}
return ro;
}
__device__ glm::vec3 calculateCiGradient(glm::vec3 p_i, glm::vec3 p_j){
glm::vec3 Ci = -1.0f / float(REST_DENSITY) * wGradientSpikyKernel(p_i, p_j);
return Ci;
}
__device__ glm::vec3 calculateCiGradientAti(particle* particles, glm::vec3 p_i, int* neighbors, int p_num_neighbors, int index){
glm::vec3 accum = glm::vec3(0.0f);
for(int i = 0; i < p_num_neighbors; i++){
accum += wGradientSpikyKernel(p_i, glm::vec3(particles[neighbors[i + index * MAX_NEIGHBORS]].pred_position));
}
glm::vec3 Ci = 1.0f / float(REST_DENSITY) * accum;
return Ci;
}
/*************************************
* Finding Neighboring Particles
*************************************/
// Clears grid from previous neighbors
__global__ void clearGrid(int* grid, int totalGridSize){
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if(index < totalGridSize){
grid[index] = -1;
}
}
// Matches each particles the grid index for the cell in which the particle resides
__global__ void findParticleGridIndex(particle* particles, int* grid_idx, int num_particles){
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if(index < num_particles){
int x, y, z;
glm::vec4 p = particles[index].pred_position;
x = int(p.x) + BOX_X + 2;
y = int(p.y) + BOX_Y + 2;
z = int(p.z) + 2;
grid_idx[index] = x + (2 * (BOX_X + 2) * y) + (4 * (BOX_X + 2) * (BOX_Y + 2) * z);
}
}
// Matches the sorted index to each of the cells
__global__ void matchParticleToCell(int* gridIdx, int* grid, int num_particles, int totalGridSize){
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if(index < num_particles){
if(index == 0){
grid[gridIdx[index]] = index;
}else if(gridIdx[index] != gridIdx[index - 1]){
if(gridIdx[index] >= 0 && gridIdx[index] < totalGridSize) grid[gridIdx[index]] = index;
}
}
}
// Finds the nearest K neighbors within the smoothing kernel radius
__global__ void findKNearestNeighbors(particle* particles, int* gridIdx, int* grid, int* neighbors, int* num_neighbors, int num_particles, int totalGridSize){
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if(index < num_particles){
int heap_size = 0;
int x,y,z,idx;
float r;
glm::vec4 p_j, p = particles[index].pred_position;
// Find particle index
x = int(p.x) + BOX_X + 2;
y = int(p.y) + BOX_Y + 2;
z = int(p.z) + 2;
float max;
int m, max_index, begin, cell_position;
// Examine all cells within radius
// NOTE: checks the cube that circumscribes the spherical smoothing kernel
for(int i = int(-H + z); i <= int(H + z); i++){
for(int j = int(-H + y); j <= int(H + y); j++){
for(int k = int(-H + x); k <= int(H + x); k++){
idx = k + (2 * (BOX_X + 2) * j) + (4 * (BOX_X + 2) * (BOX_Y + 2) * i);
if(idx >= totalGridSize || idx < 0){
continue;
}
begin = grid[idx];
if(begin < 0) continue;
cell_position = begin;
while(cell_position < num_particles && gridIdx[begin] == gridIdx[cell_position]){
if(cell_position == index){
++cell_position;
continue;
}
p_j = particles[cell_position].pred_position;
r = glm::length(p - p_j);
if(heap_size < MAX_NEIGHBORS){
if(r < H){
neighbors[index * MAX_NEIGHBORS + heap_size] = cell_position;
++heap_size;
}
}else{
max = glm::length(p - particles[neighbors[index * MAX_NEIGHBORS]].pred_position);
max_index = 0;
for(m = 1; m < heap_size; m++){
float d = glm::length(p - particles[neighbors[index * MAX_NEIGHBORS + m]].pred_position);
if(d > max){
max = d;
max_index = m;
}
}
if(r < max && r < H){
neighbors[index * MAX_NEIGHBORS + max_index] = cell_position;
}
}
++cell_position;
}
}
}
}
num_neighbors[index] = heap_size;
}
}
// Wrapper to find neighbors using hash grid
void findNeighbors(particle* particles, int* grid_idx, int* grid, int* neighbors, int num_particles){
dim3 fullBlocksPerGrid((int)ceil(float(totalGridSize) / float(blockSize)));
dim3 fullBlocksPerGridParticles((int)ceil(float(numParticles)/float(blockSize)));
// Clear Grid
hipLaunchKernelGGL(( clearGrid), dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0, grid, totalGridSize);
checkCUDAErrorWithLine("clearGrid failed!");
// Match particle to index
hipLaunchKernelGGL(( findParticleGridIndex), dim3(fullBlocksPerGridParticles), dim3(blockSize), 0, 0, particles, grid_idx, num_particles);
checkCUDAErrorWithLine("findParticleGridIndex failed!");
// Cast to device pointers
thrust::device_ptr<int> t_grid_idx = thrust::device_pointer_cast(grid_idx);
thrust::device_ptr<particle> t_particles = thrust::device_pointer_cast(particles);
// Sort by key
thrust::sort_by_key(t_grid_idx, t_grid_idx + numParticles, t_particles);
checkCUDAErrorWithLine("thrust failed!");
// Match sorted particle index
hipLaunchKernelGGL(( matchParticleToCell), dim3(fullBlocksPerGridParticles), dim3(blockSize), 0, 0, grid_idx, grid, numParticles, totalGridSize);
checkCUDAErrorWithLine("matchParticletoCell failed!");
// Find K nearest neighbors
hipLaunchKernelGGL(( findKNearestNeighbors), dim3(fullBlocksPerGridParticles), dim3(blockSize), 0, 0, particles, grid_idx, grid, neighbors, num_neighbors, num_particles, totalGridSize);
checkCUDAErrorWithLine("findKNearestNeighbors failed!");
}
__global__ void findNeighbors(particle* particles, int* neighbors, int* num_neighbors, int num_particles){
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if(index < num_particles){
glm::vec3 p = glm::vec3(particles[index].pred_position);
int num_p_neighbors = 0;
glm::vec3 p_j, r;
for(int i = 0; i < num_particles && num_p_neighbors < MAX_NEIGHBORS; i++){
if(i != index){
p_j = glm::vec3(particles[i].pred_position);
r = p_j - p;
if(glm::length(r) <= H){
neighbors[num_p_neighbors + index * MAX_NEIGHBORS] = i;
++num_p_neighbors;
}
}
}
num_neighbors[index] = num_p_neighbors;
}
}
/*************************************
* Kernels for Jacobi Solver
*************************************/
__global__ void calculateLambda(particle* particles, int* neighbors, int* num_neighbors, int num_particles){
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if(index < num_particles){
int k = num_neighbors[index];
glm::vec3 p = glm::vec3(particles[index].pred_position);
float p_i = calculateRo(particles, p, neighbors, k, index);
float C_i = (p_i / REST_DENSITY) - 1.0f;
float C_i_gradient, sum_gradients = 0.0f;
for(int i = 0; i < k; i++){
// Calculate gradient when k = j
C_i_gradient = glm::length(calculateCiGradient(p, glm::vec3(particles[neighbors[i + index * MAX_NEIGHBORS]].pred_position)));
sum_gradients += (C_i_gradient * C_i_gradient);
}
// Add gradient when k = i
C_i_gradient = glm::length(calculateCiGradientAti(particles, p, neighbors, k, index));
sum_gradients += (C_i_gradient * C_i_gradient);
float sumCi = sum_gradients + RELAXATION;
particles[index].lambda = -1.0f * (C_i / sumCi);
}
}
__global__ void calculateDeltaPi(particle* particles, int* neighbors, int* num_neighbors, int num_particles){
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if(index < num_particles){
int k = num_neighbors[index];
glm::vec3 p = glm::vec3(particles[index].pred_position);
float l = particles[index].lambda;
glm::vec3 delta = glm::vec3(0.0f);
int p_j_idx;
#if PRESSURE == 1
float k_term;
glm::vec3 d_q = DELTA_Q * glm::vec3(1.0f) + p;
#endif
float s_corr = 0.0f;
for(int i = 0; i < k; i++){
p_j_idx = neighbors[i + index * MAX_NEIGHBORS];
#if PRESSURE == 1
float poly6pd_q = wPoly6Kernel(p, d_q);
if(poly6pd_q < EPSILON) k_term = 0.0f;
else k_term = wPoly6Kernel(p, glm::vec3(particles[p_j_idx].pred_position)) / poly6pd_q;
s_corr = -1.0f * PRESSURE_K * pow(k_term, PRESSURE_N);
#endif
delta += (l + particles[p_j_idx].lambda + s_corr) * wGradientSpikyKernel(p, glm::vec3(particles[p_j_idx].pred_position));
}
particles[index].delta_pos = 1.0f / REST_DENSITY * delta;
}
}
__global__ void calculateCurl(particle* particles, int* neighbors, int* num_neighbors, int num_particles){
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if(index < num_particles){
int k = num_neighbors[index];
glm::vec3 p = glm::vec3(particles[index].pred_position);
glm::vec3 v = particles[index].velocity;
int j_idx;
glm::vec3 v_ij, gradient, accum = glm::vec3(0.0f);
for(int i = 0; i < k; i++){
j_idx = neighbors[i + index * MAX_NEIGHBORS];
v_ij = particles[j_idx].velocity - v;
gradient = wGradientSpikyKernel(p, glm::vec3(particles[j_idx].pred_position));
accum += glm::cross(v_ij, gradient);
}
particles[index].curl = accum;
}
}
__global__ void applyVorticity(particle* particles, int* neighbors, int* num_neighbors, int num_particles){
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if(index < num_particles){
int k = num_neighbors[index];
glm::vec3 p = glm::vec3(particles[index].pred_position);
glm::vec3 w = particles[index].curl;
int j_idx;
float mag_w;
glm::vec3 r, grad = glm::vec3(0.0f);
for(int i = 0; i < k; i++){
j_idx = neighbors[i + index * MAX_NEIGHBORS];
r = glm::vec3(particles[j_idx].pred_position) - p;
mag_w = glm::length(particles[j_idx].curl - w);
grad.x += mag_w / r.x;
grad.y += mag_w / r.y;
grad.z += mag_w / r.z;
}
glm::vec3 vorticity, N;
N = 1.0f/(glm::length(grad) + .001f) * grad;
vorticity = float(RELAXATION) * (glm::cross(N, w));
particles[index].external_forces += vorticity;
}
}
__global__
void initializeParticles(int N, particle* particles)
{
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
float gravity = -9.8f;
if(index < N)
{
particle p = particles[index];
glm::vec3 rand = (generateRandomNumberFromThread(1.0f, index)-0.5f);
p.position.x = 9*rand.x;
p.position.y = 20*rand.y;
p.position.z = 5.0 + 5.0*rand.z;
p.position.w = 1.0f;
p.velocity = glm::vec3(0.0f);
p.external_forces = glm::vec3(0.0f,0.0f,gravity);
particles[index] = p;
}
}
__global__
void generateParticles(int N, particle* particles)
{
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
float gravity = -9.8f;
if(index > N && index <= N+10)
{
particle p = particles[index];
glm::vec3 rand = 20.0f * (generateRandomNumberFromThread(1.0f, index)-0.5f);
p.position.x = rand.x;
p.position.y = rand.y;
p.position.z = 20.0 + rand.z;
p.position.w = 1.0f;
p.velocity = glm::vec3(0.0f);
p.external_forces = glm::vec3(0.0f,0.0f,gravity);
particles[index] = p;
}
}
//Simple Euler integration scheme
__global__
void applyExternalForces(int N, float dt, particle* particles)
{
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if(index < N){
particle p = particles[index];
p.velocity += dt * p.external_forces;
p.pred_position = p.position + dt * glm::vec4(p.velocity,0.0f);
particles[index] = p;
}
}
__global__
void updatePosition(int N, particle* particles)
{
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if(index < N){
particles[index].position = particles[index].pred_position;
}
}
__global__
void updatePredictedPosition(int N, particle* particles)
{
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if(index < N){
particles[index].pred_position += glm::vec4(particles[index].delta_pos,0.0f);
}
}
__global__
void updateVelocity(int N, particle* particles, float dt)
{
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if(index < N){
particles[index].velocity = glm::vec3((1.0f/dt)*(particles[index].pred_position - particles[index].position));
}
}
__global__
void boxCollisionResponse(int N, particle* particles, float move){
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if(index < N){
if( particles[index].pred_position.z < 0.0f){
particles[index].pred_position.z = 0.0001f;
glm::vec3 normal = glm::vec3(0,0,1);
glm::vec3 reflectedDir = particles[index].velocity - glm::vec3(2.0f*(normal*(glm::dot(particles[index].velocity,normal))));
particles[index].velocity.z = reflectedDir.z;
}
if( particles[index].pred_position.z > BOX_Z){
particles[index].pred_position.z = BOX_Z-0.0001f;
glm::vec3 normal = glm::vec3(0,0,-1);
glm::vec3 reflectedDir = particles[index].velocity - glm::vec3(2.0f*(normal*(glm::dot(particles[index].velocity,normal))));
particles[index].velocity.z = reflectedDir.z;
}
if( particles[index].pred_position.y < -BOX_Y+move){
particles[index].pred_position.y = -BOX_Y+move+0.01f;
glm::vec3 normal = glm::vec3(0,1,0);
glm::vec3 reflectedDir = particles[index].velocity - glm::vec3(2.0f*(normal*(glm::dot(particles[index].velocity,normal))));
particles[index].velocity.y = reflectedDir.y;
}
if( particles[index].pred_position.y > BOX_Y){
particles[index].pred_position.y = BOX_Y-0.01f;
glm::vec3 normal = glm::vec3(0,-1,0);
glm::vec3 reflectedDir = particles[index].velocity - glm::vec3(2.0f*(normal*(glm::dot(particles[index].velocity,normal))));
particles[index].velocity.y = reflectedDir.y;
}
if( particles[index].pred_position.x < -BOX_X){
particles[index].pred_position.x = -BOX_X+0.01f;
glm::vec3 normal = glm::vec3(1,0,0);
glm::vec3 reflectedDir = particles[index].velocity - glm::vec3(2.0f*(normal*(glm::dot(particles[index].velocity,normal))));
particles[index].velocity.x = reflectedDir.x;
}
if( particles[index].pred_position.x > BOX_X){
particles[index].pred_position.x = BOX_X-0.01f;
glm::vec3 normal = glm::vec3(-1,0,0);
glm::vec3 reflectedDir = particles[index].velocity - glm::vec3(2.0f*(normal*(glm::dot(particles[index].velocity,normal))));
particles[index].velocity.x = reflectedDir.x;
}
}
}
__global__
void geomCollisionResponse(int N, particle* particles, staticGeom* geoms, int numGeoms){
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if(index < N){
for (int i = 0; i < numGeoms; i++){
vec3 normal;
vec3 intersectionPoint;
if (geoms[i].type == SPHERE){
if (sphereIntersectionTest(geoms[i], vec3(particles[index].pred_position), intersectionPoint, normal)){
particles[index].pred_position = vec4(intersectionPoint,1.0);
vec3 reflectedDir = particles[index].velocity - glm::vec3(2.0f*(normal*(glm::dot(particles[index].velocity,normal))));
particles[index].velocity = reflectedDir;
}
}
}
}
}
/*************************************
* Wrappers for the __global__ calls *
*************************************/
//Initialize memory, update some globals
void initCuda(int N, staticGeom* geoms, int numg)
{
numGeoms = numg;
numParticles = N;
numGenerated = 0;
dim3 fullBlocksPerGrid((int)ceil(float(N)/float(blockSize)));
hipMalloc((void**)&particles, N * sizeof(particle));
checkCUDAErrorWithLine("particles cudamalloc failed");
hipMalloc((void**)&neighbors, MAX_NEIGHBORS*N*sizeof(int));
hipMalloc((void**)&num_neighbors, N*sizeof(int));
hipMalloc((void**)&grid_idx, N*sizeof(int));
checkCUDAErrorWithLine("grid idx cudamalloc failed!");
hipMalloc((void**)&grid, totalGridSize*sizeof(int));
checkCUDAErrorWithLine("grid cudamalloc failed!");
//malloc geometry
cudageoms = NULL;
hipMalloc((void**)&cudageoms, numGeoms*sizeof(staticGeom));
hipMemcpy( cudageoms, geoms, numGeoms*sizeof(staticGeom), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( initializeParticles), dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0, N, particles);
checkCUDAErrorWithLine("Kernel failed!");
hipDeviceSynchronize();
}
void cudaPBFUpdateWrapper(float dt)
{
dim3 fullBlocksPerGrid((int)ceil(float(numParticles)/float(blockSize)));
//generateParticles<<<fullBlocksPerGrid, blockSize>>>(numGenerated, particles);
//if (numGenerated <= numParticles)
//numGenerated+=10;
hipLaunchKernelGGL(( applyExternalForces), dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0, numParticles, dt, particles);
checkCUDAErrorWithLine("applyExternalForces failed!");
findNeighbors(particles, grid_idx, grid, neighbors, numParticles);
//findNeighbors<<<fullBlocksPerGrid, blockSize>>>(particles, neighbors, num_neighbors, numParticles);
checkCUDAErrorWithLine("findNeighbors failed!");
for(int i = 0; i < SOLVER_ITERATIONS; i++){
hipLaunchKernelGGL(( calculateLambda), dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0, particles, neighbors, num_neighbors, numParticles);
hipLaunchKernelGGL(( calculateDeltaPi), dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0, particles, neighbors, num_neighbors, numParticles);
//PEFORM COLLISION DETECTION AND RESPONSE
hipLaunchKernelGGL(( boxCollisionResponse), dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0, numParticles, particles, wallMove);
//geomCollisionResponse<<<fullBlocksPerGrid, blockSize>>>(numParticles, particles, cudageoms, numGeoms);
hipLaunchKernelGGL(( updatePredictedPosition), dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0, numParticles, particles);
}
if (!hitonce)
wallMove += 0.1;
if (wallMove > BOX_Y){
wallMove = 0;
hitonce = true;
}
hipLaunchKernelGGL(( updateVelocity), dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0, numParticles, particles, dt);
//calculateCurl<<<fullBlocksPerGrid, blockSize>>>(particles, neighbors, num_neighbors, numParticles);
//applyVorticity<<<fullBlocksPerGrid, blockSize>>>(particles, neighbors, num_neighbors, numParticles);
hipLaunchKernelGGL(( updatePosition), dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0, numParticles, particles);
checkCUDAErrorWithLine("updatePosition failed!");
hipDeviceSynchronize();
}
void cudaUpdateVBO(float * vbodptr, int width, int height)
{
dim3 fullBlocksPerGrid((int)ceil(float(numParticles)/float(blockSize)));
hipLaunchKernelGGL(( sendToVBO), dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0, numParticles, particles, vbodptr, width, height, scene_scale);
hipDeviceSynchronize();
}
void freeCuda(){
hipFree(particles);
hipFree(neighbors);
hipFree(num_neighbors);
hipFree(grid_idx);
hipFree(grid);
}
| 4584e3a7a84f2126a3c51f86c9c6edabcebd94e6.cu | #include <stdio.h>
#include <cuda.h>
#include <cmath>
#include <thrust/device_ptr.h>
#include <thrust/device_vector.h>
#include <thrust/sort.h>
#include "glm/glm.hpp"
#include "utilities.h"
#include "kernel.h"
#include "gridStruct.h"
#include "intersections.h"
#if PRESSURE == 1
#define DELTA_Q (float)(0.1*H)
#define PRESSURE_K 0.1
#define PRESSURE_N 6
#endif
//GLOBALS
dim3 threadsPerBlock(blockSize);
int totalGridSize = (2 * (BOX_X + 2)) * (2 * (BOX_Y + 2)) * (BOX_Z + 2);
int numParticles;
int numGenerated;
const __device__ float starMass = 5e10;
const float scene_scale = 1; //size of the height map in simulation space
staticGeom* cudageoms;
int numGeoms;
particle* particles;
int* neighbors;
int* num_neighbors;
int* grid_idx;
int* grid;
bool hitonce = false;
float wallMove = 0.0f;
using namespace glm;
void checkCUDAError(const char *msg, int line = -1)
{
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err)
{
if( line >= 0 )
{
fprintf(stderr, "Line %d: ", line);
}
fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString( err) );
exit(EXIT_FAILURE);
}
}
__host__ __device__
unsigned int hash(unsigned int a){
a = (a+0x7ed55d16) + (a<<12);
a = (a^0xc761c23c) ^ (a>>19);
a = (a+0x165667b1) + (a<<5);
a = (a+0xd3a2646c) ^ (a<<9);
a = (a+0xfd7046c5) + (a<<3);
a = (a^0xb55a4f09) ^ (a>>16);
return a;
}
//Function that generates static.
__host__ __device__
glm::vec3 generateRandomNumberFromThread(float time, int index)
{
thrust::default_random_engine rng(hash(index*time));
thrust::uniform_real_distribution<float> u01(0,1);
return glm::vec3((float) u01(rng), (float) u01(rng), (float) u01(rng));
}
//Update the vertex buffer object
//(The VBO is where OpenGL looks for the positions for the planets)
__global__
void sendToVBO(int N, particle* particles, float * vbo, int width, int height, float s_scale)
{
int index = threadIdx.x + (blockIdx.x * blockDim.x);
float c_scale_w = 1.0f;
float c_scale_h = 1.0f;
float c_scale_z = 1.0f;
if(index<N)
{
vbo[4*index+0] = particles[index].position.x*c_scale_w;
vbo[4*index+1] = particles[index].position.y*c_scale_h;
vbo[4*index+2] = particles[index].position.z*c_scale_z;
vbo[4*index+3] = 1;
}
}
/*************************************
* Device Methods for Solver
*************************************/
__device__ float wPoly6Kernel(glm::vec3 p_i, glm::vec3 p_j){
float r = glm::length(p_i - p_j);
float hr_term = (H * H - r * r);
float div = 64.0 * PI * POW_H_9;
return 315.0f / div * hr_term * hr_term * hr_term;
}
__device__ glm::vec3 wGradientSpikyKernel(glm::vec3 p_i, glm::vec3 p_j){
glm::vec3 r = p_i - p_j;
float hr_term = H - glm::length(r);
float gradient_magnitude = 45.0f / (PI * POW_H_6) * hr_term * hr_term;
float div = (glm::length(r) + 0.001f);
return gradient_magnitude * 1.0f / div * r;
}
__device__ float calculateRo(particle* particles, glm::vec3 p, int* p_neighbors, int p_num_neighbors, int index){
glm::vec3 p_j;
float ro = 0.0f;
for(int i = 0; i < p_num_neighbors; i++){
p_j = glm::vec3(particles[p_neighbors[i + index * MAX_NEIGHBORS]].pred_position);
ro += wPoly6Kernel(p, p_j);
}
return ro;
}
__device__ glm::vec3 calculateCiGradient(glm::vec3 p_i, glm::vec3 p_j){
glm::vec3 Ci = -1.0f / float(REST_DENSITY) * wGradientSpikyKernel(p_i, p_j);
return Ci;
}
__device__ glm::vec3 calculateCiGradientAti(particle* particles, glm::vec3 p_i, int* neighbors, int p_num_neighbors, int index){
glm::vec3 accum = glm::vec3(0.0f);
for(int i = 0; i < p_num_neighbors; i++){
accum += wGradientSpikyKernel(p_i, glm::vec3(particles[neighbors[i + index * MAX_NEIGHBORS]].pred_position));
}
glm::vec3 Ci = 1.0f / float(REST_DENSITY) * accum;
return Ci;
}
/*************************************
* Finding Neighboring Particles
*************************************/
// Clears grid from previous neighbors
__global__ void clearGrid(int* grid, int totalGridSize){
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if(index < totalGridSize){
grid[index] = -1;
}
}
// Matches each particles the grid index for the cell in which the particle resides
__global__ void findParticleGridIndex(particle* particles, int* grid_idx, int num_particles){
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if(index < num_particles){
int x, y, z;
glm::vec4 p = particles[index].pred_position;
x = int(p.x) + BOX_X + 2;
y = int(p.y) + BOX_Y + 2;
z = int(p.z) + 2;
grid_idx[index] = x + (2 * (BOX_X + 2) * y) + (4 * (BOX_X + 2) * (BOX_Y + 2) * z);
}
}
// Matches the sorted index to each of the cells
__global__ void matchParticleToCell(int* gridIdx, int* grid, int num_particles, int totalGridSize){
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if(index < num_particles){
if(index == 0){
grid[gridIdx[index]] = index;
}else if(gridIdx[index] != gridIdx[index - 1]){
if(gridIdx[index] >= 0 && gridIdx[index] < totalGridSize) grid[gridIdx[index]] = index;
}
}
}
// Finds the nearest K neighbors within the smoothing kernel radius
__global__ void findKNearestNeighbors(particle* particles, int* gridIdx, int* grid, int* neighbors, int* num_neighbors, int num_particles, int totalGridSize){
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if(index < num_particles){
int heap_size = 0;
int x,y,z,idx;
float r;
glm::vec4 p_j, p = particles[index].pred_position;
// Find particle index
x = int(p.x) + BOX_X + 2;
y = int(p.y) + BOX_Y + 2;
z = int(p.z) + 2;
float max;
int m, max_index, begin, cell_position;
// Examine all cells within radius
// NOTE: checks the cube that circumscribes the spherical smoothing kernel
for(int i = int(-H + z); i <= int(H + z); i++){
for(int j = int(-H + y); j <= int(H + y); j++){
for(int k = int(-H + x); k <= int(H + x); k++){
idx = k + (2 * (BOX_X + 2) * j) + (4 * (BOX_X + 2) * (BOX_Y + 2) * i);
if(idx >= totalGridSize || idx < 0){
continue;
}
begin = grid[idx];
if(begin < 0) continue;
cell_position = begin;
while(cell_position < num_particles && gridIdx[begin] == gridIdx[cell_position]){
if(cell_position == index){
++cell_position;
continue;
}
p_j = particles[cell_position].pred_position;
r = glm::length(p - p_j);
if(heap_size < MAX_NEIGHBORS){
if(r < H){
neighbors[index * MAX_NEIGHBORS + heap_size] = cell_position;
++heap_size;
}
}else{
max = glm::length(p - particles[neighbors[index * MAX_NEIGHBORS]].pred_position);
max_index = 0;
for(m = 1; m < heap_size; m++){
float d = glm::length(p - particles[neighbors[index * MAX_NEIGHBORS + m]].pred_position);
if(d > max){
max = d;
max_index = m;
}
}
if(r < max && r < H){
neighbors[index * MAX_NEIGHBORS + max_index] = cell_position;
}
}
++cell_position;
}
}
}
}
num_neighbors[index] = heap_size;
}
}
// Wrapper to find neighbors using hash grid
void findNeighbors(particle* particles, int* grid_idx, int* grid, int* neighbors, int num_particles){
dim3 fullBlocksPerGrid((int)ceil(float(totalGridSize) / float(blockSize)));
dim3 fullBlocksPerGridParticles((int)ceil(float(numParticles)/float(blockSize)));
// Clear Grid
clearGrid<<<fullBlocksPerGrid, blockSize>>>(grid, totalGridSize);
checkCUDAErrorWithLine("clearGrid failed!");
// Match particle to index
findParticleGridIndex<<<fullBlocksPerGridParticles, blockSize>>>(particles, grid_idx, num_particles);
checkCUDAErrorWithLine("findParticleGridIndex failed!");
// Cast to device pointers
thrust::device_ptr<int> t_grid_idx = thrust::device_pointer_cast(grid_idx);
thrust::device_ptr<particle> t_particles = thrust::device_pointer_cast(particles);
// Sort by key
thrust::sort_by_key(t_grid_idx, t_grid_idx + numParticles, t_particles);
checkCUDAErrorWithLine("thrust failed!");
// Match sorted particle index
matchParticleToCell<<<fullBlocksPerGridParticles, blockSize>>>(grid_idx, grid, numParticles, totalGridSize);
checkCUDAErrorWithLine("matchParticletoCell failed!");
// Find K nearest neighbors
findKNearestNeighbors<<<fullBlocksPerGridParticles, blockSize>>>(particles, grid_idx, grid, neighbors, num_neighbors, num_particles, totalGridSize);
checkCUDAErrorWithLine("findKNearestNeighbors failed!");
}
__global__ void findNeighbors(particle* particles, int* neighbors, int* num_neighbors, int num_particles){
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if(index < num_particles){
glm::vec3 p = glm::vec3(particles[index].pred_position);
int num_p_neighbors = 0;
glm::vec3 p_j, r;
for(int i = 0; i < num_particles && num_p_neighbors < MAX_NEIGHBORS; i++){
if(i != index){
p_j = glm::vec3(particles[i].pred_position);
r = p_j - p;
if(glm::length(r) <= H){
neighbors[num_p_neighbors + index * MAX_NEIGHBORS] = i;
++num_p_neighbors;
}
}
}
num_neighbors[index] = num_p_neighbors;
}
}
/*************************************
* Kernels for Jacobi Solver
*************************************/
__global__ void calculateLambda(particle* particles, int* neighbors, int* num_neighbors, int num_particles){
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if(index < num_particles){
int k = num_neighbors[index];
glm::vec3 p = glm::vec3(particles[index].pred_position);
float p_i = calculateRo(particles, p, neighbors, k, index);
float C_i = (p_i / REST_DENSITY) - 1.0f;
float C_i_gradient, sum_gradients = 0.0f;
for(int i = 0; i < k; i++){
// Calculate gradient when k = j
C_i_gradient = glm::length(calculateCiGradient(p, glm::vec3(particles[neighbors[i + index * MAX_NEIGHBORS]].pred_position)));
sum_gradients += (C_i_gradient * C_i_gradient);
}
// Add gradient when k = i
C_i_gradient = glm::length(calculateCiGradientAti(particles, p, neighbors, k, index));
sum_gradients += (C_i_gradient * C_i_gradient);
float sumCi = sum_gradients + RELAXATION;
particles[index].lambda = -1.0f * (C_i / sumCi);
}
}
__global__ void calculateDeltaPi(particle* particles, int* neighbors, int* num_neighbors, int num_particles){
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if(index < num_particles){
int k = num_neighbors[index];
glm::vec3 p = glm::vec3(particles[index].pred_position);
float l = particles[index].lambda;
glm::vec3 delta = glm::vec3(0.0f);
int p_j_idx;
#if PRESSURE == 1
float k_term;
glm::vec3 d_q = DELTA_Q * glm::vec3(1.0f) + p;
#endif
float s_corr = 0.0f;
for(int i = 0; i < k; i++){
p_j_idx = neighbors[i + index * MAX_NEIGHBORS];
#if PRESSURE == 1
float poly6pd_q = wPoly6Kernel(p, d_q);
if(poly6pd_q < EPSILON) k_term = 0.0f;
else k_term = wPoly6Kernel(p, glm::vec3(particles[p_j_idx].pred_position)) / poly6pd_q;
s_corr = -1.0f * PRESSURE_K * pow(k_term, PRESSURE_N);
#endif
delta += (l + particles[p_j_idx].lambda + s_corr) * wGradientSpikyKernel(p, glm::vec3(particles[p_j_idx].pred_position));
}
particles[index].delta_pos = 1.0f / REST_DENSITY * delta;
}
}
__global__ void calculateCurl(particle* particles, int* neighbors, int* num_neighbors, int num_particles){
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if(index < num_particles){
int k = num_neighbors[index];
glm::vec3 p = glm::vec3(particles[index].pred_position);
glm::vec3 v = particles[index].velocity;
int j_idx;
glm::vec3 v_ij, gradient, accum = glm::vec3(0.0f);
for(int i = 0; i < k; i++){
j_idx = neighbors[i + index * MAX_NEIGHBORS];
v_ij = particles[j_idx].velocity - v;
gradient = wGradientSpikyKernel(p, glm::vec3(particles[j_idx].pred_position));
accum += glm::cross(v_ij, gradient);
}
particles[index].curl = accum;
}
}
__global__ void applyVorticity(particle* particles, int* neighbors, int* num_neighbors, int num_particles){
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if(index < num_particles){
int k = num_neighbors[index];
glm::vec3 p = glm::vec3(particles[index].pred_position);
glm::vec3 w = particles[index].curl;
int j_idx;
float mag_w;
glm::vec3 r, grad = glm::vec3(0.0f);
for(int i = 0; i < k; i++){
j_idx = neighbors[i + index * MAX_NEIGHBORS];
r = glm::vec3(particles[j_idx].pred_position) - p;
mag_w = glm::length(particles[j_idx].curl - w);
grad.x += mag_w / r.x;
grad.y += mag_w / r.y;
grad.z += mag_w / r.z;
}
glm::vec3 vorticity, N;
N = 1.0f/(glm::length(grad) + .001f) * grad;
vorticity = float(RELAXATION) * (glm::cross(N, w));
particles[index].external_forces += vorticity;
}
}
__global__
void initializeParticles(int N, particle* particles)
{
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
float gravity = -9.8f;
if(index < N)
{
particle p = particles[index];
glm::vec3 rand = (generateRandomNumberFromThread(1.0f, index)-0.5f);
p.position.x = 9*rand.x;
p.position.y = 20*rand.y;
p.position.z = 5.0 + 5.0*rand.z;
p.position.w = 1.0f;
p.velocity = glm::vec3(0.0f);
p.external_forces = glm::vec3(0.0f,0.0f,gravity);
particles[index] = p;
}
}
__global__
void generateParticles(int N, particle* particles)
{
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
float gravity = -9.8f;
if(index > N && index <= N+10)
{
particle p = particles[index];
glm::vec3 rand = 20.0f * (generateRandomNumberFromThread(1.0f, index)-0.5f);
p.position.x = rand.x;
p.position.y = rand.y;
p.position.z = 20.0 + rand.z;
p.position.w = 1.0f;
p.velocity = glm::vec3(0.0f);
p.external_forces = glm::vec3(0.0f,0.0f,gravity);
particles[index] = p;
}
}
//Simple Euler integration scheme
__global__
void applyExternalForces(int N, float dt, particle* particles)
{
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if(index < N){
particle p = particles[index];
p.velocity += dt * p.external_forces;
p.pred_position = p.position + dt * glm::vec4(p.velocity,0.0f);
particles[index] = p;
}
}
__global__
void updatePosition(int N, particle* particles)
{
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if(index < N){
particles[index].position = particles[index].pred_position;
}
}
__global__
void updatePredictedPosition(int N, particle* particles)
{
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if(index < N){
particles[index].pred_position += glm::vec4(particles[index].delta_pos,0.0f);
}
}
__global__
void updateVelocity(int N, particle* particles, float dt)
{
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if(index < N){
particles[index].velocity = glm::vec3((1.0f/dt)*(particles[index].pred_position - particles[index].position));
}
}
__global__
void boxCollisionResponse(int N, particle* particles, float move){
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if(index < N){
if( particles[index].pred_position.z < 0.0f){
particles[index].pred_position.z = 0.0001f;
glm::vec3 normal = glm::vec3(0,0,1);
glm::vec3 reflectedDir = particles[index].velocity - glm::vec3(2.0f*(normal*(glm::dot(particles[index].velocity,normal))));
particles[index].velocity.z = reflectedDir.z;
}
if( particles[index].pred_position.z > BOX_Z){
particles[index].pred_position.z = BOX_Z-0.0001f;
glm::vec3 normal = glm::vec3(0,0,-1);
glm::vec3 reflectedDir = particles[index].velocity - glm::vec3(2.0f*(normal*(glm::dot(particles[index].velocity,normal))));
particles[index].velocity.z = reflectedDir.z;
}
if( particles[index].pred_position.y < -BOX_Y+move){
particles[index].pred_position.y = -BOX_Y+move+0.01f;
glm::vec3 normal = glm::vec3(0,1,0);
glm::vec3 reflectedDir = particles[index].velocity - glm::vec3(2.0f*(normal*(glm::dot(particles[index].velocity,normal))));
particles[index].velocity.y = reflectedDir.y;
}
if( particles[index].pred_position.y > BOX_Y){
particles[index].pred_position.y = BOX_Y-0.01f;
glm::vec3 normal = glm::vec3(0,-1,0);
glm::vec3 reflectedDir = particles[index].velocity - glm::vec3(2.0f*(normal*(glm::dot(particles[index].velocity,normal))));
particles[index].velocity.y = reflectedDir.y;
}
if( particles[index].pred_position.x < -BOX_X){
particles[index].pred_position.x = -BOX_X+0.01f;
glm::vec3 normal = glm::vec3(1,0,0);
glm::vec3 reflectedDir = particles[index].velocity - glm::vec3(2.0f*(normal*(glm::dot(particles[index].velocity,normal))));
particles[index].velocity.x = reflectedDir.x;
}
if( particles[index].pred_position.x > BOX_X){
particles[index].pred_position.x = BOX_X-0.01f;
glm::vec3 normal = glm::vec3(-1,0,0);
glm::vec3 reflectedDir = particles[index].velocity - glm::vec3(2.0f*(normal*(glm::dot(particles[index].velocity,normal))));
particles[index].velocity.x = reflectedDir.x;
}
}
}
__global__
void geomCollisionResponse(int N, particle* particles, staticGeom* geoms, int numGeoms){
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if(index < N){
for (int i = 0; i < numGeoms; i++){
vec3 normal;
vec3 intersectionPoint;
if (geoms[i].type == SPHERE){
if (sphereIntersectionTest(geoms[i], vec3(particles[index].pred_position), intersectionPoint, normal)){
particles[index].pred_position = vec4(intersectionPoint,1.0);
vec3 reflectedDir = particles[index].velocity - glm::vec3(2.0f*(normal*(glm::dot(particles[index].velocity,normal))));
particles[index].velocity = reflectedDir;
}
}
}
}
}
/*************************************
* Wrappers for the __global__ calls *
*************************************/
//Initialize memory, update some globals
void initCuda(int N, staticGeom* geoms, int numg)
{
numGeoms = numg;
numParticles = N;
numGenerated = 0;
dim3 fullBlocksPerGrid((int)ceil(float(N)/float(blockSize)));
cudaMalloc((void**)&particles, N * sizeof(particle));
checkCUDAErrorWithLine("particles cudamalloc failed");
cudaMalloc((void**)&neighbors, MAX_NEIGHBORS*N*sizeof(int));
cudaMalloc((void**)&num_neighbors, N*sizeof(int));
cudaMalloc((void**)&grid_idx, N*sizeof(int));
checkCUDAErrorWithLine("grid idx cudamalloc failed!");
cudaMalloc((void**)&grid, totalGridSize*sizeof(int));
checkCUDAErrorWithLine("grid cudamalloc failed!");
//malloc geometry
cudageoms = NULL;
cudaMalloc((void**)&cudageoms, numGeoms*sizeof(staticGeom));
cudaMemcpy( cudageoms, geoms, numGeoms*sizeof(staticGeom), cudaMemcpyHostToDevice);
initializeParticles<<<fullBlocksPerGrid, blockSize>>>(N, particles);
checkCUDAErrorWithLine("Kernel failed!");
cudaThreadSynchronize();
}
void cudaPBFUpdateWrapper(float dt)
{
dim3 fullBlocksPerGrid((int)ceil(float(numParticles)/float(blockSize)));
//generateParticles<<<fullBlocksPerGrid, blockSize>>>(numGenerated, particles);
//if (numGenerated <= numParticles)
//numGenerated+=10;
applyExternalForces<<<fullBlocksPerGrid, blockSize>>>(numParticles, dt, particles);
checkCUDAErrorWithLine("applyExternalForces failed!");
findNeighbors(particles, grid_idx, grid, neighbors, numParticles);
//findNeighbors<<<fullBlocksPerGrid, blockSize>>>(particles, neighbors, num_neighbors, numParticles);
checkCUDAErrorWithLine("findNeighbors failed!");
for(int i = 0; i < SOLVER_ITERATIONS; i++){
calculateLambda<<<fullBlocksPerGrid, blockSize>>>(particles, neighbors, num_neighbors, numParticles);
calculateDeltaPi<<<fullBlocksPerGrid, blockSize>>>(particles, neighbors, num_neighbors, numParticles);
//PEFORM COLLISION DETECTION AND RESPONSE
boxCollisionResponse<<<fullBlocksPerGrid, blockSize>>>(numParticles, particles, wallMove);
//geomCollisionResponse<<<fullBlocksPerGrid, blockSize>>>(numParticles, particles, cudageoms, numGeoms);
updatePredictedPosition<<<fullBlocksPerGrid, blockSize>>>(numParticles, particles);
}
if (!hitonce)
wallMove += 0.1;
if (wallMove > BOX_Y){
wallMove = 0;
hitonce = true;
}
updateVelocity<<<fullBlocksPerGrid, blockSize>>>(numParticles, particles, dt);
//calculateCurl<<<fullBlocksPerGrid, blockSize>>>(particles, neighbors, num_neighbors, numParticles);
//applyVorticity<<<fullBlocksPerGrid, blockSize>>>(particles, neighbors, num_neighbors, numParticles);
updatePosition<<<fullBlocksPerGrid, blockSize>>>(numParticles, particles);
checkCUDAErrorWithLine("updatePosition failed!");
cudaThreadSynchronize();
}
void cudaUpdateVBO(float * vbodptr, int width, int height)
{
dim3 fullBlocksPerGrid((int)ceil(float(numParticles)/float(blockSize)));
sendToVBO<<<fullBlocksPerGrid, blockSize>>>(numParticles, particles, vbodptr, width, height, scene_scale);
cudaThreadSynchronize();
}
void freeCuda(){
cudaFree(particles);
cudaFree(neighbors);
cudaFree(num_neighbors);
cudaFree(grid_idx);
cudaFree(grid);
}
|
e2f264190ff03033e8c89c97cfb65e3afe80d57d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <assert.h>
#include <stdio.h>
#include "BetweennessCentrality.h"
// Number of thread blocks.
#define BLOCKS_COUNT 2
// Maximal number of threads per block.
#define MAX_THREADS_PER_BLOCK 768
__global__ void vertexParallelBC(
int *sources,
int sourcesPerBlock,
int sourceCount,
Graph *g,
int *ds,
float *sigmas,
float *deltas,
plist *ps,
float *bc)
{
// Number of nodes in the graph.
__shared__ int n;
// Used to determine the set of sources to explore.
__shared__ int sourceIdxEnd;
__shared__ int sourceIdx;
__shared__ int source;
// Used to acces the portions of the global storage.
__shared__ int *d;
__shared__ float *sigma;
__shared__ float *delta;
__shared__ plist *p;
// Used to represent the standard bfs queue, implicitly.
__shared__ bool empty;
__shared__ int level;
if (threadIdx.x == 0)
{
// Set the number of nodes.
n = g->n;
// Compute the set of sources to explore.
sourceIdx = blockIdx.x * sourcesPerBlock;
int nextEndIdx = sourceIdx + sourcesPerBlock;
sourceIdxEnd = (sourceCount < nextEndIdx) ? sourceCount : nextEndIdx;
// Compute the global structure offests.
d = ds + blockIdx.x * n;
sigma = sigmas + blockIdx.x * n;
delta = deltas + blockIdx.x * n;
p = ps + blockIdx.x * n;
}
__syncthreads();
while (sourceIdx < sourceIdxEnd)
{
__syncthreads();
if (threadIdx.x == 0)
{
source = sources[sourceIdx];
}
__syncthreads();
// Initialize the required data structures.
for (int v = threadIdx.x; v < n; v += blockDim.x)
{
if (v == source)
{
d[v] = 0;
sigma[v] = 1.0;
}
else
{
d[v] = -1;
sigma[v] = 0.0;
}
delta[v] = 0.0;
p[v].count = 0;
}
if (threadIdx.x == 0)
{
empty = false;
level = 0;
}
__syncthreads();
// Perform BFS.
while (!empty)
{
__syncthreads();
empty = true;
__syncthreads();
// TODO: unroll the first iteration.
for (int v = threadIdx.x; v < n; v += blockDim.x)
{
// Check if vs neighbours are to be visited i.e. if v is in the queue.
if (d[v] == level)
{
// Go through the successors of v.
for (int j = g->rowOffset[v]; j < g->rowOffset[v + 1]; ++j)
{
// Skip edges whose weight is divisible by 8.
if ((g->weight[j] & 7) == 0)
{
continue;
}
int w = g->column[j];
// Not visited.
if (d[w] == -1)
{
// Mark that there are more nodes to be explored.
empty = false;
// No need for an atomic update.
d[w] = d[v] + 1;
}
if (d[w] == (d[v] + 1))
{
atomicAdd(&sigma[w], sigma[v]);
// Save the predecessor.
atomicExch(&(p[w].list[p[w].count]), v);
atomicAdd(&(p[w].count), 1);
}
}
}
}
__syncthreads();
if (threadIdx.x == 0)
{
level++;
}
__syncthreads();
}
__syncthreads();
// Accumulate bc scores.
while (level > 0)
{
__syncthreads();
for (int w = threadIdx.x; w < n; w += blockDim.x)
{
if (d[w] == level)
{
for (int k = 0; k < p[w].count; ++k)
{
int v = p[w].list[k];
float d = (sigma[v] / sigma[w]) * (1.0 + delta[w]);
atomicAdd(&delta[v], d);
}
}
}
__syncthreads();
if (threadIdx.x == 0)
{
level--;
}
__syncthreads();
}
__syncthreads();
// Update bc scores.
for (int v = threadIdx.x; v < n; v += blockDim.x)
{
if (v != source)
{
// Need to perform an atomic update as we may have conflicting reads
// and writes by threads from different blocks.
atomicAdd(&bc[v], delta[v]);
}
}
__syncthreads();
if (threadIdx.x == 0)
{
sourceIdx++;
}
__syncthreads();
}
}
void computeBCGPU(Configuration *config, Graph *g, int *perm, float *bc)
{
// Declare the auxilary structures.
int *d;
float *sigma;
float *delta;
plist *p;
int *pListMem;
int *sources;
// Compute the number of sources.
int maxSourceCount = 1 << config->k4Approx;
hipMallocManaged(&sources, maxSourceCount * sizeof(int));
// Construct the list of sources.
int sourceCount = 0;
for (int i = 0; i < g->n; ++i)
{
if (sourceCount == maxSourceCount)
{
break;
}
// Apply the permutation.
int source = perm[i];
// Skip vertices with no outgoing edges.
if (g->rowOffset[source + 1] - g->rowOffset[source] == 0)
{
continue;
}
else
{
sources[sourceCount] = source;
sourceCount++;
}
}
// Determine the number of sourecs to explore per block.
int sourcesPerBlock = maxSourceCount / BLOCKS_COUNT;
// Allocate temporary structures in global memory.
hipMallocManaged(&d, g->n * sizeof(int) * BLOCKS_COUNT);
hipMallocManaged(&sigma, g->n * sizeof(float) * BLOCKS_COUNT);
hipMallocManaged(&delta, g->n * sizeof(float) * BLOCKS_COUNT);
hipMallocManaged(&p, g->n * sizeof(plist) * BLOCKS_COUNT);
hipMallocManaged(&pListMem, g->m * sizeof(int) * BLOCKS_COUNT);
// Compute the predecessor list sizes.
int *inDegree;
int *numEdges;
hipMallocManaged(&inDegree, (g->n + 1) * sizeof(int));
hipMemset(inDegree, 0, g->n + 1);
hipMallocManaged(&numEdges, (g->n + 1) * sizeof(int));
for (int i = 0; i < g->m; ++i)
{
inDegree[g->column[i]]++;
}
numEdges[0] = 0;
for (int i = 1; i < (g->n + 1); ++i)
{
numEdges[i] = numEdges[i - 1] + inDegree[i - 1];
}
// Set predecessor list pointers.
for (int j = 0; j < BLOCKS_COUNT; ++j)
{
int pOffset = j * g->n;
int pListMemOffset = j * g->m;
for (int i = 0; i < g->n; ++i)
{
p[pOffset + i].list = pListMem + pListMemOffset + numEdges[i];
p[pOffset + i].count = 0;
}
}
hipFree(inDegree);
hipFree(numEdges);
// Run BC.
hipLaunchKernelGGL(( vertexParallelBC), dim3(BLOCKS_COUNT), dim3(MAX_THREADS_PER_BLOCK), 0, 0,
sources,
sourcesPerBlock,
sourceCount,
g,
d,
sigma,
delta,
p,
bc);
hipDeviceSynchronize();
// Clean up.
hipFree(pListMem);
hipFree(p);
hipFree(delta);
hipFree(sigma);
hipFree(d);
hipFree(sources);
}
void computeBCCPU(Configuration *config, Graph *g, int *perm, float *bc)
{
assert(config != NULL);
assert(g != NULL);
assert(perm != NULL);
assert(bc != NULL);
// Number of vertices to run BFS from.
int rootsCount = 1 << config->k4Approx;
// Predecessors of a vertex v on shortest paths from s.
plist *p;
int *pListMem;
// Number of shortest paths.
float *sigma;
// Shortest path length between the pairs.
int *d;
// Dependency of vertices.
float *delta;
int *inDegree;
int *numEdges;
// The size of the predecessor list of each vertex is bounded
// by its in-degree -> Compute the in-degree of every vertex.
p = (plist *) calloc(g->n, sizeof(plist));
inDegree = (int *) calloc(g->n + 1, sizeof(int));
numEdges = (int *) malloc((g->n + 1) * sizeof(int));
// Compute inDegree.
for (int i = 0; i < g->m; ++i)
{
int v = g->column[i];
inDegree[v]++;
}
// Prefix sums.
numEdges[0] = 0;
for (int i = 1; i < (g->n + 1); ++i)
{
numEdges[i] = numEdges[i - 1] + inDegree[i - 1];
}
// Allocate memory for plists.
pListMem = (int *) malloc(g->m * sizeof(int));
// Set the p pointers accordingly.
for (int i = 0; i < g->n; ++i)
{
p[i].list = pListMem + numEdges[i];
p[i].count = 0;
}
// Clean up temporary structures.
free(inDegree);
free(numEdges);
// Allocate memory.
sigma = (float *) malloc(g->n * sizeof(float));
d = (int *) malloc(g->n * sizeof(int));
delta = (float *) calloc(g->n, sizeof(float));
int *stack = (int *) malloc(g->n * sizeof(int));
int *queue = (int *) malloc(g->n * sizeof(int));
for (int r = 0; r < g->n; ++r)
{
// Check if the required number of roots has been explored.
if (rootsCount == 0)
{
break;
}
// Apply the permutation.
int root = perm[r];
// Skip vertices with no outgoing edges.
if (g->rowOffset[root + 1] - g->rowOffset[root] == 0)
{
continue;
}
else
{
rootsCount--;
}
int sPtr = 0;
int qHead = 0;
int qTail = 0;
// Clear the auxilary structures.
for (int k = 0; k < g->n; ++k)
{
d[k] = -1;
sigma[k] = 0.0;
p[k].count = 0;
delta[k] = 0.0;
}
sigma[root] = 1.0;
d[root] = 0;
queue[qTail] = root;
qTail++;
// While !empty(Q)
while (qTail - qHead > 0)
{
// Dequeue v <- Q
int v = queue[qHead];
qHead++;
// Push v -> Stack
stack[sPtr] = v;
sPtr++;
for (int j = g->rowOffset[v]; j < g->rowOffset[v + 1]; ++j)
{
// Skip edges whose weight is divisible by 8.
if ((g->weight[j] & 7) == 0)
{
continue;
}
int w = g->column[j];
// Not visited.
if (d[w] == -1)
{
// Enqueue w -> Q
queue[qTail] = w;
qTail++;
// Distance to w is distance to v + 1.
d[w] = d[v] + 1;
}
if (d[w] == (d[v] + 1))
{
sigma[w] += sigma[v];
// Save the predecesor.
p[w].list[p[w].count] = v;
p[w].count++;
}
}
}
// While !empty(Stack)
while (sPtr > 0)
{
sPtr--;
int w = stack[sPtr];
for (int k = 0; k < p[w].count; ++k)
{
// v = pred of w
int v = p[w].list[k];
delta[v] = delta[v] + (sigma[v] / sigma[w]) * (1.0 + delta[w]);
}
if (w != root)
{
bc[w] += delta[w];
}
}
}
// Free the memory.
free(queue);
free(stack);
free(delta);
free(d);
free(sigma);
free(pListMem);
free(p);
}
| e2f264190ff03033e8c89c97cfb65e3afe80d57d.cu | #include <assert.h>
#include <stdio.h>
#include "BetweennessCentrality.h"
// Number of thread blocks.
#define BLOCKS_COUNT 2
// Maximal number of threads per block.
#define MAX_THREADS_PER_BLOCK 768
__global__ void vertexParallelBC(
int *sources,
int sourcesPerBlock,
int sourceCount,
Graph *g,
int *ds,
float *sigmas,
float *deltas,
plist *ps,
float *bc)
{
// Number of nodes in the graph.
__shared__ int n;
// Used to determine the set of sources to explore.
__shared__ int sourceIdxEnd;
__shared__ int sourceIdx;
__shared__ int source;
// Used to acces the portions of the global storage.
__shared__ int *d;
__shared__ float *sigma;
__shared__ float *delta;
__shared__ plist *p;
// Used to represent the standard bfs queue, implicitly.
__shared__ bool empty;
__shared__ int level;
if (threadIdx.x == 0)
{
// Set the number of nodes.
n = g->n;
// Compute the set of sources to explore.
sourceIdx = blockIdx.x * sourcesPerBlock;
int nextEndIdx = sourceIdx + sourcesPerBlock;
sourceIdxEnd = (sourceCount < nextEndIdx) ? sourceCount : nextEndIdx;
// Compute the global structure offests.
d = ds + blockIdx.x * n;
sigma = sigmas + blockIdx.x * n;
delta = deltas + blockIdx.x * n;
p = ps + blockIdx.x * n;
}
__syncthreads();
while (sourceIdx < sourceIdxEnd)
{
__syncthreads();
if (threadIdx.x == 0)
{
source = sources[sourceIdx];
}
__syncthreads();
// Initialize the required data structures.
for (int v = threadIdx.x; v < n; v += blockDim.x)
{
if (v == source)
{
d[v] = 0;
sigma[v] = 1.0;
}
else
{
d[v] = -1;
sigma[v] = 0.0;
}
delta[v] = 0.0;
p[v].count = 0;
}
if (threadIdx.x == 0)
{
empty = false;
level = 0;
}
__syncthreads();
// Perform BFS.
while (!empty)
{
__syncthreads();
empty = true;
__syncthreads();
// TODO: unroll the first iteration.
for (int v = threadIdx.x; v < n; v += blockDim.x)
{
// Check if vs neighbours are to be visited i.e. if v is in the queue.
if (d[v] == level)
{
// Go through the successors of v.
for (int j = g->rowOffset[v]; j < g->rowOffset[v + 1]; ++j)
{
// Skip edges whose weight is divisible by 8.
if ((g->weight[j] & 7) == 0)
{
continue;
}
int w = g->column[j];
// Not visited.
if (d[w] == -1)
{
// Mark that there are more nodes to be explored.
empty = false;
// No need for an atomic update.
d[w] = d[v] + 1;
}
if (d[w] == (d[v] + 1))
{
atomicAdd(&sigma[w], sigma[v]);
// Save the predecessor.
atomicExch(&(p[w].list[p[w].count]), v);
atomicAdd(&(p[w].count), 1);
}
}
}
}
__syncthreads();
if (threadIdx.x == 0)
{
level++;
}
__syncthreads();
}
__syncthreads();
// Accumulate bc scores.
while (level > 0)
{
__syncthreads();
for (int w = threadIdx.x; w < n; w += blockDim.x)
{
if (d[w] == level)
{
for (int k = 0; k < p[w].count; ++k)
{
int v = p[w].list[k];
float d = (sigma[v] / sigma[w]) * (1.0 + delta[w]);
atomicAdd(&delta[v], d);
}
}
}
__syncthreads();
if (threadIdx.x == 0)
{
level--;
}
__syncthreads();
}
__syncthreads();
// Update bc scores.
for (int v = threadIdx.x; v < n; v += blockDim.x)
{
if (v != source)
{
// Need to perform an atomic update as we may have conflicting reads
// and writes by threads from different blocks.
atomicAdd(&bc[v], delta[v]);
}
}
__syncthreads();
if (threadIdx.x == 0)
{
sourceIdx++;
}
__syncthreads();
}
}
void computeBCGPU(Configuration *config, Graph *g, int *perm, float *bc)
{
// Declare the auxilary structures.
int *d;
float *sigma;
float *delta;
plist *p;
int *pListMem;
int *sources;
// Compute the number of sources.
int maxSourceCount = 1 << config->k4Approx;
cudaMallocManaged(&sources, maxSourceCount * sizeof(int));
// Construct the list of sources.
int sourceCount = 0;
for (int i = 0; i < g->n; ++i)
{
if (sourceCount == maxSourceCount)
{
break;
}
// Apply the permutation.
int source = perm[i];
// Skip vertices with no outgoing edges.
if (g->rowOffset[source + 1] - g->rowOffset[source] == 0)
{
continue;
}
else
{
sources[sourceCount] = source;
sourceCount++;
}
}
// Determine the number of sourecs to explore per block.
int sourcesPerBlock = maxSourceCount / BLOCKS_COUNT;
// Allocate temporary structures in global memory.
cudaMallocManaged(&d, g->n * sizeof(int) * BLOCKS_COUNT);
cudaMallocManaged(&sigma, g->n * sizeof(float) * BLOCKS_COUNT);
cudaMallocManaged(&delta, g->n * sizeof(float) * BLOCKS_COUNT);
cudaMallocManaged(&p, g->n * sizeof(plist) * BLOCKS_COUNT);
cudaMallocManaged(&pListMem, g->m * sizeof(int) * BLOCKS_COUNT);
// Compute the predecessor list sizes.
int *inDegree;
int *numEdges;
cudaMallocManaged(&inDegree, (g->n + 1) * sizeof(int));
cudaMemset(inDegree, 0, g->n + 1);
cudaMallocManaged(&numEdges, (g->n + 1) * sizeof(int));
for (int i = 0; i < g->m; ++i)
{
inDegree[g->column[i]]++;
}
numEdges[0] = 0;
for (int i = 1; i < (g->n + 1); ++i)
{
numEdges[i] = numEdges[i - 1] + inDegree[i - 1];
}
// Set predecessor list pointers.
for (int j = 0; j < BLOCKS_COUNT; ++j)
{
int pOffset = j * g->n;
int pListMemOffset = j * g->m;
for (int i = 0; i < g->n; ++i)
{
p[pOffset + i].list = pListMem + pListMemOffset + numEdges[i];
p[pOffset + i].count = 0;
}
}
cudaFree(inDegree);
cudaFree(numEdges);
// Run BC.
vertexParallelBC<<<BLOCKS_COUNT, MAX_THREADS_PER_BLOCK>>>(
sources,
sourcesPerBlock,
sourceCount,
g,
d,
sigma,
delta,
p,
bc);
cudaDeviceSynchronize();
// Clean up.
cudaFree(pListMem);
cudaFree(p);
cudaFree(delta);
cudaFree(sigma);
cudaFree(d);
cudaFree(sources);
}
void computeBCCPU(Configuration *config, Graph *g, int *perm, float *bc)
{
assert(config != NULL);
assert(g != NULL);
assert(perm != NULL);
assert(bc != NULL);
// Number of vertices to run BFS from.
int rootsCount = 1 << config->k4Approx;
// Predecessors of a vertex v on shortest paths from s.
plist *p;
int *pListMem;
// Number of shortest paths.
float *sigma;
// Shortest path length between the pairs.
int *d;
// Dependency of vertices.
float *delta;
int *inDegree;
int *numEdges;
// The size of the predecessor list of each vertex is bounded
// by its in-degree -> Compute the in-degree of every vertex.
p = (plist *) calloc(g->n, sizeof(plist));
inDegree = (int *) calloc(g->n + 1, sizeof(int));
numEdges = (int *) malloc((g->n + 1) * sizeof(int));
// Compute inDegree.
for (int i = 0; i < g->m; ++i)
{
int v = g->column[i];
inDegree[v]++;
}
// Prefix sums.
numEdges[0] = 0;
for (int i = 1; i < (g->n + 1); ++i)
{
numEdges[i] = numEdges[i - 1] + inDegree[i - 1];
}
// Allocate memory for plists.
pListMem = (int *) malloc(g->m * sizeof(int));
// Set the p pointers accordingly.
for (int i = 0; i < g->n; ++i)
{
p[i].list = pListMem + numEdges[i];
p[i].count = 0;
}
// Clean up temporary structures.
free(inDegree);
free(numEdges);
// Allocate memory.
sigma = (float *) malloc(g->n * sizeof(float));
d = (int *) malloc(g->n * sizeof(int));
delta = (float *) calloc(g->n, sizeof(float));
int *stack = (int *) malloc(g->n * sizeof(int));
int *queue = (int *) malloc(g->n * sizeof(int));
for (int r = 0; r < g->n; ++r)
{
// Check if the required number of roots has been explored.
if (rootsCount == 0)
{
break;
}
// Apply the permutation.
int root = perm[r];
// Skip vertices with no outgoing edges.
if (g->rowOffset[root + 1] - g->rowOffset[root] == 0)
{
continue;
}
else
{
rootsCount--;
}
int sPtr = 0;
int qHead = 0;
int qTail = 0;
// Clear the auxilary structures.
for (int k = 0; k < g->n; ++k)
{
d[k] = -1;
sigma[k] = 0.0;
p[k].count = 0;
delta[k] = 0.0;
}
sigma[root] = 1.0;
d[root] = 0;
queue[qTail] = root;
qTail++;
// While !empty(Q)
while (qTail - qHead > 0)
{
// Dequeue v <- Q
int v = queue[qHead];
qHead++;
// Push v -> Stack
stack[sPtr] = v;
sPtr++;
for (int j = g->rowOffset[v]; j < g->rowOffset[v + 1]; ++j)
{
// Skip edges whose weight is divisible by 8.
if ((g->weight[j] & 7) == 0)
{
continue;
}
int w = g->column[j];
// Not visited.
if (d[w] == -1)
{
// Enqueue w -> Q
queue[qTail] = w;
qTail++;
// Distance to w is distance to v + 1.
d[w] = d[v] + 1;
}
if (d[w] == (d[v] + 1))
{
sigma[w] += sigma[v];
// Save the predecesor.
p[w].list[p[w].count] = v;
p[w].count++;
}
}
}
// While !empty(Stack)
while (sPtr > 0)
{
sPtr--;
int w = stack[sPtr];
for (int k = 0; k < p[w].count; ++k)
{
// v = pred of w
int v = p[w].list[k];
delta[v] = delta[v] + (sigma[v] / sigma[w]) * (1.0 + delta[w]);
}
if (w != root)
{
bc[w] += delta[w];
}
}
}
// Free the memory.
free(queue);
free(stack);
free(delta);
free(d);
free(sigma);
free(pListMem);
free(p);
}
|
cebf3ded9d915e1b8e4985686491342cbca214ec.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifdef PADDLE_WITH_HIP
#include <hiprand.h>
#include <hiprand_kernel.h>
#include <hipcub/hipcub.hpp>
typedef hiprandState hiprandState_t;
namespace cub = hipcub;
#else
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
#include <hipcub/hipcub.hpp>
#endif
#include <iterator>
#include <random>
#include "paddle/fluid/operators/class_center_sample_op.h"
#include "paddle/phi/api/include/tensor.h"
#if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL)
#include "paddle/fluid/distributed/collective/ProcessGroup.h"
#include "paddle/fluid/platform/collective_helper.h"
#include "paddle/fluid/platform/device/gpu/nccl_helper.h"
#endif
namespace paddle {
namespace operators {
#define CUDA_KERNEL_LOOP(i, n) \
for (int32_t i = blockIdx.x * blockDim.x + threadIdx.x, \
step = blockDim.x * gridDim.x; \
i < (n); i += step)
using Tensor = framework::Tensor;
static constexpr int kNumCUDAThreads = 512;
static constexpr int kNumMaxinumNumBlocks = 4096;
inline int32_t NumBlocks(const int32_t n) {
return ::min((n + kNumCUDAThreads - 1) / kNumCUDAThreads,
kNumMaxinumNumBlocks);
}
template <typename T>
__global__ void RandomSampleClassCenter(const int64_t n, int64_t seed,
int64_t increment,
const int64_t max_val, T* buffer) {
const int id = blockIdx.x * blockDim.x + threadIdx.x;
hiprandState_t localState;
size_t local_seed =
(static_cast<size_t>(seed) + 0x9E3779B9U +
(static_cast<size_t>(id) << 6U) + (static_cast<size_t>(id) >> 2U));
#ifdef PADDLE_WITH_HIP
hiprand_init(local_seed, id, increment, &localState);
CUDA_KERNEL_LOOP(i, n) {
buffer[i] = static_cast<T>(hiprand(&localState) % max_val);
}
#else
hiprand_init(local_seed, id, increment, &localState);
CUDA_KERNEL_LOOP(i, n) {
buffer[i] = static_cast<T>(hiprand(&localState) % max_val);
}
#endif
}
template <typename T>
__global__ void Range(const int64_t n, T* out) {
CUDA_KERNEL_LOOP(i, n) { out[i] = static_cast<T>(i); }
}
template <typename T>
__global__ void MarkPositiveClassCenter(const int64_t n, const int64_t rank,
const T* class_interval_ptr,
const int num_classes, const T* labels,
T* out) {
CUDA_KERNEL_LOOP(i, n) {
T label = labels[i] - class_interval_ptr[rank];
if (label >= 0 && label < num_classes) {
out[label] = label - num_classes;
}
}
}
template <typename T>
__device__ void FindIntervalIndex(const T* class_interval_ptr,
const int64_t nranks, const T value,
int64_t* find_index) {
int64_t start = 0;
int64_t end = nranks;
int64_t mid = ((end - start) >> 1) + start + 1;
while (start < end) {
if (class_interval_ptr[mid] == value) break;
if (class_interval_ptr[mid] > value)
end = mid - 1;
else
start = mid;
mid = ((end - start) >> 1) + start + 1;
}
*find_index = min(mid, end);
}
template <typename T>
__global__ void GetClassCenterBound(const int64_t n, const int64_t nranks,
const T* class_interval_ptr,
const T* key_ptr, const T* value_ptr,
T* bound_index, T* bound_value) {
CUDA_KERNEL_LOOP(i, n) {
if (i != 0) {
int64_t cur_index, pre_index;
FindIntervalIndex(class_interval_ptr, nranks, key_ptr[i], &cur_index);
FindIntervalIndex(class_interval_ptr, nranks, key_ptr[i - 1], &pre_index);
if (cur_index > pre_index) {
assert(cur_index < nranks);
#pragma unroll
for (int32_t j = pre_index + 1; j <= cur_index; ++j) {
bound_index[j] = static_cast<T>(i);
bound_value[j] = value_ptr[i];
}
}
}
}
CUDA_KERNEL_LOOP(i, nranks + 1) {
int64_t first_index, last_index;
FindIntervalIndex(class_interval_ptr, nranks, key_ptr[0], &first_index);
FindIntervalIndex(class_interval_ptr, nranks, key_ptr[n - 1], &last_index);
if (i <= first_index) {
bound_index[i] = 0;
bound_value[i] = value_ptr[0];
} else if (i > last_index) {
bound_index[i] = n;
bound_value[i] = value_ptr[n - 1] + 1;
}
}
}
template <typename T>
__global__ void GetRemappedLabel(const int64_t n, const int64_t nranks,
const T* sampled_class_interval_ptr,
const T* bound_index, const T* bound_value,
const T* label_map_key, T* label_map_value,
T* mapped_label) {
CUDA_KERNEL_LOOP(i, n) {
#pragma unroll
for (int64_t j = 0; j < nranks; j++) {
if (i >= bound_index[j] && i < bound_index[j + 1]) {
label_map_value[i] =
label_map_value[i] - bound_value[j] + sampled_class_interval_ptr[j];
}
}
mapped_label[label_map_key[i]] = label_map_value[i];
}
}
// aligned vector generates vectorized load/store on CUDA
template <typename T, int Size>
struct alignas(sizeof(T) * Size) AlignedVector {
T val[Size];
};
template <typename T>
inline int VectorizedSize(const T* pointer) {
uint64_t address = reinterpret_cast<uint64_t>(pointer);
constexpr int vec4 = std::alignment_of<AlignedVector<T, 4>>::value; // NOLINT
if (address % vec4 == 0) {
return 4;
}
return 1;
}
#undef CUDA_KERNEL_LOOP
template <typename T>
class NotEqualToPreviousAdjacentIterator {
public:
using self_type = NotEqualToPreviousAdjacentIterator;
using value_type = T;
using difference_type = std::ptrdiff_t;
using pointer = T*;
using reference = T;
using iterator_category = std::input_iterator_tag;
public:
__host__ __device__ __forceinline__
NotEqualToPreviousAdjacentIterator(const T* arr, int64_t offset)
: arr_(arr), offset_(offset) {}
__host__ __device__ __forceinline__ reference operator*() const {
return offset_ == 0 ? 0 : (arr_[offset_] == arr_[offset_ - 1] ? 0 : 1);
}
template <typename Distance>
__host__ __device__ __forceinline__ self_type operator+(Distance n) const {
self_type ret(arr_, offset_ + n);
return ret;
}
template <typename Distance>
__host__ __device__ __forceinline__ reference operator[](Distance n) const {
return *(*this + n);
}
private:
const T* arr_;
int64_t offset_;
};
template <typename T>
struct ActualNumSampledFunctor {
__host__ __device__ __forceinline__ T operator()(const T& a,
const T& b) const {
return max(num_samples, (b - a));
}
T num_samples;
explicit ActualNumSampledFunctor(const T num) : num_samples(num) {}
};
template <typename T>
class MemoryBuffer {
public:
MemoryBuffer(const int num_buffer_ele, const int num_temp_ele,
const int nranks, const platform::Place& place) {
offset1 = 0;
offset2 = offset1 + num_buffer_ele;
offset3 = offset2 + num_buffer_ele;
offset4 = offset3 + num_buffer_ele;
offset5 = offset4 + num_buffer_ele;
offset6 = offset5 + (nranks + 1);
offset7 = offset6 + (nranks + 1);
offset8 = offset7 + (nranks + 1);
offset9 = offset8 + num_temp_ele;
buffer_ptr = buffer.mutable_data<T>(
{4 * num_buffer_ele + 3 * (nranks + 1) + num_temp_ele}, place);
}
T* cub_sort_keys_ptr() { return buffer_ptr + offset1; }
T* cub_sort_keys_out_ptr() { return buffer_ptr + offset2; }
T* cub_sort_values_ptr() { return buffer_ptr + offset3; }
T* cub_sort_values_out_ptr() { return buffer_ptr + offset4; }
T* bound_index_ptr() { return buffer_ptr + offset5; }
T* bound_value_ptr() { return buffer_ptr + offset6; }
T* class_interval_ptr() { return buffer_ptr + offset7; }
void* cub_temp_storage_ptr() {
return reinterpret_cast<void*>(buffer_ptr + offset8);
}
private:
Tensor buffer;
T* buffer_ptr;
int offset1;
int offset2;
int offset3;
int offset4;
int offset5;
int offset6;
int offset7;
int offset8;
int offset9;
};
template <typename DeviceContext, typename T>
class ClassCenterSampleCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto* label = ctx.Input<Tensor>("Label");
auto* remapped_label = ctx.Output<Tensor>("RemappedLabel");
auto* sampled_local_class_center =
ctx.Output<Tensor>("SampledLocalClassCenter");
int num_classes = ctx.Attr<int>("num_classes");
int num_samples = ctx.Attr<int>("num_samples");
int rid = ctx.Attr<int>("ring_id");
int nranks = ctx.Attr<int>("nranks");
int rank = ctx.Attr<int>("rank");
int seed = ctx.Attr<int>("seed");
bool fix_seed = ctx.Attr<bool>("fix_seed");
PADDLE_ENFORCE_GT(num_classes, 0,
platform::errors::InvalidArgument(
"The value 'num_classes' for Op(class_center_sample) "
"must be greater than 0, "
"but the value given is %d.",
num_classes));
PADDLE_ENFORCE_GT(num_samples, 0,
platform::errors::InvalidArgument(
"The value 'num_samples' for Op(class_center_sample) "
"must be greater than 0, "
"but the value given is %d.",
num_samples));
PADDLE_ENFORCE_LE(num_samples, num_classes,
platform::errors::InvalidArgument(
"The value 'num_samples' for Op(class_center_sample) "
"must be less than or equal to %d, "
"but the value given is %d.",
num_classes, num_samples));
auto& dev_ctx = ctx.template device_context<DeviceContext>();
auto place = dev_ctx.GetPlace();
int batch_size = label->numel();
// Algorithm:
// We first randomly generate a value in [0, num_classes) on each position
// in a array(shape[num_classes]). Then, we mark the element as negative
// value in the array according input label. Now, we can sort the array
// by ascending to ensure that the positive class center always in the
// front of the sorted array. So, we can get the sampled class center
// index by sorted keys. Finally, we can get the rempped label by remap
// the input label according sampled class center.
// step 1: Calculate num classes per device using nccl all reduce
std::vector<T> shard_dim_vec(nranks + 1, 0);
shard_dim_vec[rank + 1] = num_classes;
Tensor num_classes_per_device;
framework::TensorFromVector(shard_dim_vec, ctx.cuda_device_context(),
&num_classes_per_device);
T* num_classes_per_device_ptr = num_classes_per_device.data<T>();
#if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL)
if (nranks > 1) {
auto map = distributed::ProcessGroupMapFromGid::getInstance();
if (map->has(rid)) {
// Use ProcessGroup
distributed::ProcessGroup* pg = map->get(rid);
std::vector<phi::DenseTensor> in_tensor;
std::vector<phi::DenseTensor> out_tensor;
in_tensor.push_back(num_classes_per_device);
out_tensor.push_back(num_classes_per_device);
distributed::AllreduceOptions opts;
opts.reduce_op = distributed::ReduceOp::SUM;
auto task = pg->AllReduce(in_tensor, out_tensor, opts);
task->Wait();
} else {
const auto& comm =
platform::NCCLCommContext::Instance().Get(rid, ctx.GetPlace());
// use global calculate stream
const auto calcu_stream =
static_cast<platform::CUDADeviceContext*>(
platform::DeviceContextPool::Instance().Get(ctx.GetPlace()))
->stream();
PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclAllReduce(
num_classes_per_device_ptr, num_classes_per_device_ptr,
num_classes_per_device.numel(),
platform::ToNCCLDataType(
framework::TransToProtoVarType(num_classes_per_device.dtype())),
ncclSum, comm->comm(), calcu_stream));
}
}
#endif
// step 2: Determine temporary device storage requirements
int num_buffer_ele = ::max(batch_size, num_classes);
size_t cub_sort_temp_store_size = 0;
PADDLE_ENFORCE_GPU_SUCCESS((hipcub::DeviceRadixSort::SortPairs<T, T>(
nullptr, cub_sort_temp_store_size, nullptr, nullptr, nullptr, nullptr,
num_buffer_ele, 0, sizeof(T) * 8, ctx.cuda_device_context().stream())));
size_t cub_sum_temp_store_size = 0;
NotEqualToPreviousAdjacentIterator<T> unique_counting_iter_temp(nullptr, 0);
PADDLE_ENFORCE_GPU_SUCCESS(
(hipcub::DeviceScan::InclusiveSum<NotEqualToPreviousAdjacentIterator<T>,
T*>(
nullptr, cub_sum_temp_store_size, unique_counting_iter_temp,
nullptr, batch_size, ctx.cuda_device_context().stream())));
size_t cub_scan_temp_store_size = 0;
ActualNumSampledFunctor<T> actual_num_sampled_op_temp(num_samples);
PADDLE_ENFORCE_GPU_SUCCESS((hipcub::DeviceScan::InclusiveScan(
nullptr, cub_scan_temp_store_size, num_classes_per_device_ptr,
num_classes_per_device_ptr, actual_num_sampled_op_temp, nranks + 1,
ctx.cuda_device_context().stream())));
size_t cub_temp_storage_bytes =
::max(::max(cub_sort_temp_store_size, cub_scan_temp_store_size),
cub_sum_temp_store_size);
int num_temp_ele = cub_temp_storage_bytes / sizeof(T) + 1;
// step 3: Alloc buffer memory so that we can reuse allocated memory
MemoryBuffer<T> memory_buffer =
MemoryBuffer<T>(num_buffer_ele, num_temp_ele, nranks, ctx.GetPlace());
T* cub_sort_keys_ptr = memory_buffer.cub_sort_keys_ptr();
T* cub_sort_keys_out_ptr = memory_buffer.cub_sort_keys_out_ptr();
T* cub_sort_values_ptr = memory_buffer.cub_sort_values_ptr();
T* cub_sort_values_out_ptr = memory_buffer.cub_sort_values_out_ptr();
T* bound_index_ptr = memory_buffer.bound_index_ptr();
T* bound_value_ptr = memory_buffer.bound_value_ptr();
T* class_interval_ptr = memory_buffer.class_interval_ptr();
void* cub_temp_storage_ptr = memory_buffer.cub_temp_storage_ptr();
// step 4: Calculate class interval among nranks
PADDLE_ENFORCE_GPU_SUCCESS((hipcub::DeviceScan::InclusiveSum(
cub_temp_storage_ptr, cub_temp_storage_bytes,
num_classes_per_device_ptr, class_interval_ptr, nranks + 1,
ctx.cuda_device_context().stream())));
// step 5: random sample negative class center
uint64_t seed_data;
uint64_t increment;
int vec_size = VectorizedSize<T>(cub_sort_keys_ptr);
auto offset = ((num_classes - 1) /
(NumBlocks(num_classes) * kNumCUDAThreads * vec_size) +
1) *
vec_size;
int device_id = ctx.GetPlace().GetDeviceId();
auto gen_cuda = framework::DefaultCUDAGenerator(device_id);
if (!fix_seed) {
auto seed_offset = gen_cuda->IncrementOffset(offset);
seed_data = seed_offset.first;
increment = seed_offset.second;
} else {
seed_data = seed + rank;
increment = offset;
}
hipLaunchKernelGGL(( RandomSampleClassCenter<T>), dim3(NumBlocks(num_classes)), dim3(kNumCUDAThreads), 0,
ctx.cuda_device_context().stream(),
num_classes, seed_data, increment, num_classes, cub_sort_keys_ptr);
// step 6: mark positive class center as negative value
// fill the sort values to index 0, 1, ..., batch_size-1
hipLaunchKernelGGL(( MarkPositiveClassCenter), dim3(NumBlocks(batch_size)), dim3(kNumCUDAThreads), 0,
ctx.cuda_device_context().stream(),
batch_size, rank, class_interval_ptr, num_classes, label->data<T>(),
cub_sort_keys_ptr);
hipLaunchKernelGGL(( Range<T>), dim3(NumBlocks(num_buffer_ele)), dim3(kNumCUDAThreads), 0,
ctx.cuda_device_context().stream(), num_buffer_ele,
cub_sort_values_ptr);
// step 7: sort class center by ascending, so that positive class center
// always be sampled.
PADDLE_ENFORCE_GPU_SUCCESS((hipcub::DeviceRadixSort::SortPairs<T, T>(
cub_temp_storage_ptr, cub_temp_storage_bytes, cub_sort_keys_ptr,
cub_sort_keys_out_ptr, cub_sort_values_ptr, cub_sort_values_out_ptr,
num_classes, 0, sizeof(T) * 8, ctx.cuda_device_context().stream())));
// step 8: sort input label ascending
PADDLE_ENFORCE_GPU_SUCCESS((hipcub::DeviceRadixSort::SortPairs<T, T>(
cub_temp_storage_ptr, cub_temp_storage_bytes, label->data<T>(),
cub_sort_keys_out_ptr, cub_sort_values_ptr, cub_sort_keys_ptr,
batch_size, 0, sizeof(T) * 8, ctx.cuda_device_context().stream())));
// step 9: Calculate new index using InclusiveSum on ascending sorted input
// label
NotEqualToPreviousAdjacentIterator<T> unique_counting_iter(
cub_sort_keys_out_ptr, 0);
PADDLE_ENFORCE_GPU_SUCCESS((hipcub::DeviceScan::InclusiveSum<
NotEqualToPreviousAdjacentIterator<T>, T*>(
cub_temp_storage_ptr, cub_temp_storage_bytes, unique_counting_iter,
cub_sort_values_ptr, batch_size, ctx.cuda_device_context().stream())));
// step 10: Calculate new class center bound among ranks
hipLaunchKernelGGL(( GetClassCenterBound<T>), dim3(NumBlocks(batch_size)), dim3(kNumCUDAThreads), 0,
ctx.cuda_device_context().stream(),
batch_size, nranks, class_interval_ptr, cub_sort_keys_out_ptr,
cub_sort_values_ptr, bound_index_ptr, bound_value_ptr);
// step 11: Calculate actual number of sampled class per device.
// Since maybe num_positive_class_center > num_samples,
// we need to ensure all positive class center per device are sampled.
ActualNumSampledFunctor<T> actual_num_sampled_op(num_samples);
PADDLE_ENFORCE_GPU_SUCCESS((hipcub::DeviceScan::InclusiveScan(
cub_temp_storage_ptr, cub_temp_storage_bytes, bound_value_ptr,
num_classes_per_device_ptr, actual_num_sampled_op, nranks + 1,
ctx.cuda_device_context().stream())));
// step 12: Calculate actual sampled class interval among nranks
PADDLE_ENFORCE_GPU_SUCCESS((hipcub::DeviceScan::InclusiveSum(
cub_temp_storage_ptr, cub_temp_storage_bytes,
num_classes_per_device_ptr, class_interval_ptr, nranks + 1,
ctx.cuda_device_context().stream())));
// step 13: Get remapped label for output
hipLaunchKernelGGL(( GetRemappedLabel<T>), dim3(NumBlocks(batch_size)), dim3(kNumCUDAThreads), 0,
ctx.cuda_device_context().stream(),
batch_size, nranks, class_interval_ptr, bound_index_ptr,
bound_value_ptr, cub_sort_keys_ptr, cub_sort_values_ptr,
remapped_label->mutable_data<T>(ctx.GetPlace()));
// step 14: Get sampled class center for output
framework::TensorCopySync(num_classes_per_device, platform::CPUPlace(),
&num_classes_per_device);
T actual_num_samples = num_classes_per_device.data<T>()[rank + 1];
T* sampled_local_class_center_ptr =
sampled_local_class_center->mutable_data<T>({actual_num_samples},
ctx.GetPlace());
memory::Copy(place, sampled_local_class_center_ptr, place,
cub_sort_values_out_ptr, actual_num_samples * sizeof(T),
nullptr);
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(
class_center_sample,
ops::ClassCenterSampleCUDAKernel<paddle::platform::CUDADeviceContext,
int64_t>,
ops::ClassCenterSampleCUDAKernel<paddle::platform::CUDADeviceContext, int>);
| cebf3ded9d915e1b8e4985686491342cbca214ec.cu | // Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifdef PADDLE_WITH_HIP
#include <hiprand.h>
#include <hiprand_kernel.h>
#include <hipcub/hipcub.hpp>
typedef hiprandState curandState;
namespace cub = hipcub;
#else
#include <curand.h>
#include <curand_kernel.h>
#include <cub/cub.cuh>
#endif
#include <iterator>
#include <random>
#include "paddle/fluid/operators/class_center_sample_op.h"
#include "paddle/phi/api/include/tensor.h"
#if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL)
#include "paddle/fluid/distributed/collective/ProcessGroup.h"
#include "paddle/fluid/platform/collective_helper.h"
#include "paddle/fluid/platform/device/gpu/nccl_helper.h"
#endif
namespace paddle {
namespace operators {
#define CUDA_KERNEL_LOOP(i, n) \
for (int32_t i = blockIdx.x * blockDim.x + threadIdx.x, \
step = blockDim.x * gridDim.x; \
i < (n); i += step)
using Tensor = framework::Tensor;
static constexpr int kNumCUDAThreads = 512;
static constexpr int kNumMaxinumNumBlocks = 4096;
inline int32_t NumBlocks(const int32_t n) {
return std::min((n + kNumCUDAThreads - 1) / kNumCUDAThreads,
kNumMaxinumNumBlocks);
}
template <typename T>
__global__ void RandomSampleClassCenter(const int64_t n, int64_t seed,
int64_t increment,
const int64_t max_val, T* buffer) {
const int id = blockIdx.x * blockDim.x + threadIdx.x;
curandState localState;
size_t local_seed =
(static_cast<size_t>(seed) + 0x9E3779B9U +
(static_cast<size_t>(id) << 6U) + (static_cast<size_t>(id) >> 2U));
#ifdef PADDLE_WITH_HIP
hiprand_init(local_seed, id, increment, &localState);
CUDA_KERNEL_LOOP(i, n) {
buffer[i] = static_cast<T>(hiprand(&localState) % max_val);
}
#else
curand_init(local_seed, id, increment, &localState);
CUDA_KERNEL_LOOP(i, n) {
buffer[i] = static_cast<T>(curand(&localState) % max_val);
}
#endif
}
template <typename T>
__global__ void Range(const int64_t n, T* out) {
CUDA_KERNEL_LOOP(i, n) { out[i] = static_cast<T>(i); }
}
template <typename T>
__global__ void MarkPositiveClassCenter(const int64_t n, const int64_t rank,
const T* class_interval_ptr,
const int num_classes, const T* labels,
T* out) {
CUDA_KERNEL_LOOP(i, n) {
T label = labels[i] - class_interval_ptr[rank];
if (label >= 0 && label < num_classes) {
out[label] = label - num_classes;
}
}
}
template <typename T>
__device__ void FindIntervalIndex(const T* class_interval_ptr,
const int64_t nranks, const T value,
int64_t* find_index) {
int64_t start = 0;
int64_t end = nranks;
int64_t mid = ((end - start) >> 1) + start + 1;
while (start < end) {
if (class_interval_ptr[mid] == value) break;
if (class_interval_ptr[mid] > value)
end = mid - 1;
else
start = mid;
mid = ((end - start) >> 1) + start + 1;
}
*find_index = min(mid, end);
}
template <typename T>
__global__ void GetClassCenterBound(const int64_t n, const int64_t nranks,
const T* class_interval_ptr,
const T* key_ptr, const T* value_ptr,
T* bound_index, T* bound_value) {
CUDA_KERNEL_LOOP(i, n) {
if (i != 0) {
int64_t cur_index, pre_index;
FindIntervalIndex(class_interval_ptr, nranks, key_ptr[i], &cur_index);
FindIntervalIndex(class_interval_ptr, nranks, key_ptr[i - 1], &pre_index);
if (cur_index > pre_index) {
assert(cur_index < nranks);
#pragma unroll
for (int32_t j = pre_index + 1; j <= cur_index; ++j) {
bound_index[j] = static_cast<T>(i);
bound_value[j] = value_ptr[i];
}
}
}
}
CUDA_KERNEL_LOOP(i, nranks + 1) {
int64_t first_index, last_index;
FindIntervalIndex(class_interval_ptr, nranks, key_ptr[0], &first_index);
FindIntervalIndex(class_interval_ptr, nranks, key_ptr[n - 1], &last_index);
if (i <= first_index) {
bound_index[i] = 0;
bound_value[i] = value_ptr[0];
} else if (i > last_index) {
bound_index[i] = n;
bound_value[i] = value_ptr[n - 1] + 1;
}
}
}
template <typename T>
__global__ void GetRemappedLabel(const int64_t n, const int64_t nranks,
const T* sampled_class_interval_ptr,
const T* bound_index, const T* bound_value,
const T* label_map_key, T* label_map_value,
T* mapped_label) {
CUDA_KERNEL_LOOP(i, n) {
#pragma unroll
for (int64_t j = 0; j < nranks; j++) {
if (i >= bound_index[j] && i < bound_index[j + 1]) {
label_map_value[i] =
label_map_value[i] - bound_value[j] + sampled_class_interval_ptr[j];
}
}
mapped_label[label_map_key[i]] = label_map_value[i];
}
}
// aligned vector generates vectorized load/store on CUDA
template <typename T, int Size>
struct alignas(sizeof(T) * Size) AlignedVector {
T val[Size];
};
template <typename T>
inline int VectorizedSize(const T* pointer) {
uint64_t address = reinterpret_cast<uint64_t>(pointer);
constexpr int vec4 = std::alignment_of<AlignedVector<T, 4>>::value; // NOLINT
if (address % vec4 == 0) {
return 4;
}
return 1;
}
#undef CUDA_KERNEL_LOOP
template <typename T>
class NotEqualToPreviousAdjacentIterator {
public:
using self_type = NotEqualToPreviousAdjacentIterator;
using value_type = T;
using difference_type = std::ptrdiff_t;
using pointer = T*;
using reference = T;
using iterator_category = std::input_iterator_tag;
public:
__host__ __device__ __forceinline__
NotEqualToPreviousAdjacentIterator(const T* arr, int64_t offset)
: arr_(arr), offset_(offset) {}
__host__ __device__ __forceinline__ reference operator*() const {
return offset_ == 0 ? 0 : (arr_[offset_] == arr_[offset_ - 1] ? 0 : 1);
}
template <typename Distance>
__host__ __device__ __forceinline__ self_type operator+(Distance n) const {
self_type ret(arr_, offset_ + n);
return ret;
}
template <typename Distance>
__host__ __device__ __forceinline__ reference operator[](Distance n) const {
return *(*this + n);
}
private:
const T* arr_;
int64_t offset_;
};
template <typename T>
struct ActualNumSampledFunctor {
__host__ __device__ __forceinline__ T operator()(const T& a,
const T& b) const {
return max(num_samples, (b - a));
}
T num_samples;
explicit ActualNumSampledFunctor(const T num) : num_samples(num) {}
};
template <typename T>
class MemoryBuffer {
public:
MemoryBuffer(const int num_buffer_ele, const int num_temp_ele,
const int nranks, const platform::Place& place) {
offset1 = 0;
offset2 = offset1 + num_buffer_ele;
offset3 = offset2 + num_buffer_ele;
offset4 = offset3 + num_buffer_ele;
offset5 = offset4 + num_buffer_ele;
offset6 = offset5 + (nranks + 1);
offset7 = offset6 + (nranks + 1);
offset8 = offset7 + (nranks + 1);
offset9 = offset8 + num_temp_ele;
buffer_ptr = buffer.mutable_data<T>(
{4 * num_buffer_ele + 3 * (nranks + 1) + num_temp_ele}, place);
}
T* cub_sort_keys_ptr() { return buffer_ptr + offset1; }
T* cub_sort_keys_out_ptr() { return buffer_ptr + offset2; }
T* cub_sort_values_ptr() { return buffer_ptr + offset3; }
T* cub_sort_values_out_ptr() { return buffer_ptr + offset4; }
T* bound_index_ptr() { return buffer_ptr + offset5; }
T* bound_value_ptr() { return buffer_ptr + offset6; }
T* class_interval_ptr() { return buffer_ptr + offset7; }
void* cub_temp_storage_ptr() {
return reinterpret_cast<void*>(buffer_ptr + offset8);
}
private:
Tensor buffer;
T* buffer_ptr;
int offset1;
int offset2;
int offset3;
int offset4;
int offset5;
int offset6;
int offset7;
int offset8;
int offset9;
};
template <typename DeviceContext, typename T>
class ClassCenterSampleCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto* label = ctx.Input<Tensor>("Label");
auto* remapped_label = ctx.Output<Tensor>("RemappedLabel");
auto* sampled_local_class_center =
ctx.Output<Tensor>("SampledLocalClassCenter");
int num_classes = ctx.Attr<int>("num_classes");
int num_samples = ctx.Attr<int>("num_samples");
int rid = ctx.Attr<int>("ring_id");
int nranks = ctx.Attr<int>("nranks");
int rank = ctx.Attr<int>("rank");
int seed = ctx.Attr<int>("seed");
bool fix_seed = ctx.Attr<bool>("fix_seed");
PADDLE_ENFORCE_GT(num_classes, 0,
platform::errors::InvalidArgument(
"The value 'num_classes' for Op(class_center_sample) "
"must be greater than 0, "
"but the value given is %d.",
num_classes));
PADDLE_ENFORCE_GT(num_samples, 0,
platform::errors::InvalidArgument(
"The value 'num_samples' for Op(class_center_sample) "
"must be greater than 0, "
"but the value given is %d.",
num_samples));
PADDLE_ENFORCE_LE(num_samples, num_classes,
platform::errors::InvalidArgument(
"The value 'num_samples' for Op(class_center_sample) "
"must be less than or equal to %d, "
"but the value given is %d.",
num_classes, num_samples));
auto& dev_ctx = ctx.template device_context<DeviceContext>();
auto place = dev_ctx.GetPlace();
int batch_size = label->numel();
// Algorithm:
// We first randomly generate a value in [0, num_classes) on each position
// in a array(shape[num_classes]). Then, we mark the element as negative
// value in the array according input label. Now, we can sort the array
// by ascending to ensure that the positive class center always in the
// front of the sorted array. So, we can get the sampled class center
// index by sorted keys. Finally, we can get the rempped label by remap
// the input label according sampled class center.
// step 1: Calculate num classes per device using nccl all reduce
std::vector<T> shard_dim_vec(nranks + 1, 0);
shard_dim_vec[rank + 1] = num_classes;
Tensor num_classes_per_device;
framework::TensorFromVector(shard_dim_vec, ctx.cuda_device_context(),
&num_classes_per_device);
T* num_classes_per_device_ptr = num_classes_per_device.data<T>();
#if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL)
if (nranks > 1) {
auto map = distributed::ProcessGroupMapFromGid::getInstance();
if (map->has(rid)) {
// Use ProcessGroup
distributed::ProcessGroup* pg = map->get(rid);
std::vector<phi::DenseTensor> in_tensor;
std::vector<phi::DenseTensor> out_tensor;
in_tensor.push_back(num_classes_per_device);
out_tensor.push_back(num_classes_per_device);
distributed::AllreduceOptions opts;
opts.reduce_op = distributed::ReduceOp::SUM;
auto task = pg->AllReduce(in_tensor, out_tensor, opts);
task->Wait();
} else {
const auto& comm =
platform::NCCLCommContext::Instance().Get(rid, ctx.GetPlace());
// use global calculate stream
const auto calcu_stream =
static_cast<platform::CUDADeviceContext*>(
platform::DeviceContextPool::Instance().Get(ctx.GetPlace()))
->stream();
PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclAllReduce(
num_classes_per_device_ptr, num_classes_per_device_ptr,
num_classes_per_device.numel(),
platform::ToNCCLDataType(
framework::TransToProtoVarType(num_classes_per_device.dtype())),
ncclSum, comm->comm(), calcu_stream));
}
}
#endif
// step 2: Determine temporary device storage requirements
int num_buffer_ele = std::max(batch_size, num_classes);
size_t cub_sort_temp_store_size = 0;
PADDLE_ENFORCE_GPU_SUCCESS((cub::DeviceRadixSort::SortPairs<T, T>(
nullptr, cub_sort_temp_store_size, nullptr, nullptr, nullptr, nullptr,
num_buffer_ele, 0, sizeof(T) * 8, ctx.cuda_device_context().stream())));
size_t cub_sum_temp_store_size = 0;
NotEqualToPreviousAdjacentIterator<T> unique_counting_iter_temp(nullptr, 0);
PADDLE_ENFORCE_GPU_SUCCESS(
(cub::DeviceScan::InclusiveSum<NotEqualToPreviousAdjacentIterator<T>,
T*>(
nullptr, cub_sum_temp_store_size, unique_counting_iter_temp,
nullptr, batch_size, ctx.cuda_device_context().stream())));
size_t cub_scan_temp_store_size = 0;
ActualNumSampledFunctor<T> actual_num_sampled_op_temp(num_samples);
PADDLE_ENFORCE_GPU_SUCCESS((cub::DeviceScan::InclusiveScan(
nullptr, cub_scan_temp_store_size, num_classes_per_device_ptr,
num_classes_per_device_ptr, actual_num_sampled_op_temp, nranks + 1,
ctx.cuda_device_context().stream())));
size_t cub_temp_storage_bytes =
std::max(std::max(cub_sort_temp_store_size, cub_scan_temp_store_size),
cub_sum_temp_store_size);
int num_temp_ele = cub_temp_storage_bytes / sizeof(T) + 1;
// step 3: Alloc buffer memory so that we can reuse allocated memory
MemoryBuffer<T> memory_buffer =
MemoryBuffer<T>(num_buffer_ele, num_temp_ele, nranks, ctx.GetPlace());
T* cub_sort_keys_ptr = memory_buffer.cub_sort_keys_ptr();
T* cub_sort_keys_out_ptr = memory_buffer.cub_sort_keys_out_ptr();
T* cub_sort_values_ptr = memory_buffer.cub_sort_values_ptr();
T* cub_sort_values_out_ptr = memory_buffer.cub_sort_values_out_ptr();
T* bound_index_ptr = memory_buffer.bound_index_ptr();
T* bound_value_ptr = memory_buffer.bound_value_ptr();
T* class_interval_ptr = memory_buffer.class_interval_ptr();
void* cub_temp_storage_ptr = memory_buffer.cub_temp_storage_ptr();
// step 4: Calculate class interval among nranks
PADDLE_ENFORCE_GPU_SUCCESS((cub::DeviceScan::InclusiveSum(
cub_temp_storage_ptr, cub_temp_storage_bytes,
num_classes_per_device_ptr, class_interval_ptr, nranks + 1,
ctx.cuda_device_context().stream())));
// step 5: random sample negative class center
uint64_t seed_data;
uint64_t increment;
int vec_size = VectorizedSize<T>(cub_sort_keys_ptr);
auto offset = ((num_classes - 1) /
(NumBlocks(num_classes) * kNumCUDAThreads * vec_size) +
1) *
vec_size;
int device_id = ctx.GetPlace().GetDeviceId();
auto gen_cuda = framework::DefaultCUDAGenerator(device_id);
if (!fix_seed) {
auto seed_offset = gen_cuda->IncrementOffset(offset);
seed_data = seed_offset.first;
increment = seed_offset.second;
} else {
seed_data = seed + rank;
increment = offset;
}
RandomSampleClassCenter<T><<<NumBlocks(num_classes), kNumCUDAThreads, 0,
ctx.cuda_device_context().stream()>>>(
num_classes, seed_data, increment, num_classes, cub_sort_keys_ptr);
// step 6: mark positive class center as negative value
// fill the sort values to index 0, 1, ..., batch_size-1
MarkPositiveClassCenter<<<NumBlocks(batch_size), kNumCUDAThreads, 0,
ctx.cuda_device_context().stream()>>>(
batch_size, rank, class_interval_ptr, num_classes, label->data<T>(),
cub_sort_keys_ptr);
Range<T><<<NumBlocks(num_buffer_ele), kNumCUDAThreads, 0,
ctx.cuda_device_context().stream()>>>(num_buffer_ele,
cub_sort_values_ptr);
// step 7: sort class center by ascending, so that positive class center
// always be sampled.
PADDLE_ENFORCE_GPU_SUCCESS((cub::DeviceRadixSort::SortPairs<T, T>(
cub_temp_storage_ptr, cub_temp_storage_bytes, cub_sort_keys_ptr,
cub_sort_keys_out_ptr, cub_sort_values_ptr, cub_sort_values_out_ptr,
num_classes, 0, sizeof(T) * 8, ctx.cuda_device_context().stream())));
// step 8: sort input label ascending
PADDLE_ENFORCE_GPU_SUCCESS((cub::DeviceRadixSort::SortPairs<T, T>(
cub_temp_storage_ptr, cub_temp_storage_bytes, label->data<T>(),
cub_sort_keys_out_ptr, cub_sort_values_ptr, cub_sort_keys_ptr,
batch_size, 0, sizeof(T) * 8, ctx.cuda_device_context().stream())));
// step 9: Calculate new index using InclusiveSum on ascending sorted input
// label
NotEqualToPreviousAdjacentIterator<T> unique_counting_iter(
cub_sort_keys_out_ptr, 0);
PADDLE_ENFORCE_GPU_SUCCESS((cub::DeviceScan::InclusiveSum<
NotEqualToPreviousAdjacentIterator<T>, T*>(
cub_temp_storage_ptr, cub_temp_storage_bytes, unique_counting_iter,
cub_sort_values_ptr, batch_size, ctx.cuda_device_context().stream())));
// step 10: Calculate new class center bound among ranks
GetClassCenterBound<T><<<NumBlocks(batch_size), kNumCUDAThreads, 0,
ctx.cuda_device_context().stream()>>>(
batch_size, nranks, class_interval_ptr, cub_sort_keys_out_ptr,
cub_sort_values_ptr, bound_index_ptr, bound_value_ptr);
// step 11: Calculate actual number of sampled class per device.
// Since maybe num_positive_class_center > num_samples,
// we need to ensure all positive class center per device are sampled.
ActualNumSampledFunctor<T> actual_num_sampled_op(num_samples);
PADDLE_ENFORCE_GPU_SUCCESS((cub::DeviceScan::InclusiveScan(
cub_temp_storage_ptr, cub_temp_storage_bytes, bound_value_ptr,
num_classes_per_device_ptr, actual_num_sampled_op, nranks + 1,
ctx.cuda_device_context().stream())));
// step 12: Calculate actual sampled class interval among nranks
PADDLE_ENFORCE_GPU_SUCCESS((cub::DeviceScan::InclusiveSum(
cub_temp_storage_ptr, cub_temp_storage_bytes,
num_classes_per_device_ptr, class_interval_ptr, nranks + 1,
ctx.cuda_device_context().stream())));
// step 13: Get remapped label for output
GetRemappedLabel<T><<<NumBlocks(batch_size), kNumCUDAThreads, 0,
ctx.cuda_device_context().stream()>>>(
batch_size, nranks, class_interval_ptr, bound_index_ptr,
bound_value_ptr, cub_sort_keys_ptr, cub_sort_values_ptr,
remapped_label->mutable_data<T>(ctx.GetPlace()));
// step 14: Get sampled class center for output
framework::TensorCopySync(num_classes_per_device, platform::CPUPlace(),
&num_classes_per_device);
T actual_num_samples = num_classes_per_device.data<T>()[rank + 1];
T* sampled_local_class_center_ptr =
sampled_local_class_center->mutable_data<T>({actual_num_samples},
ctx.GetPlace());
memory::Copy(place, sampled_local_class_center_ptr, place,
cub_sort_values_out_ptr, actual_num_samples * sizeof(T),
nullptr);
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(
class_center_sample,
ops::ClassCenterSampleCUDAKernel<paddle::platform::CUDADeviceContext,
int64_t>,
ops::ClassCenterSampleCUDAKernel<paddle::platform::CUDADeviceContext, int>);
|
419b18d1e16d81b852091413a68ff45a3335d039.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <rocm_smi/rocm_smi.h>
int init(){
if(nvmlInit() == RSMI_STATUS_SUCCESS){
return 0;
} else {
return 1;
}
}
int shutdown(){
if(nvmlShutdown() == RSMI_STATUS_SUCCESS){
return 0;
} else {
return 1;
}
}
int handleReturnValue(rsmi_status_t returnValue){
if(returnValue == RSMI_STATUS_SUCCESS){
//printf("success.\n");
return 0;
} else if(returnValue == NVML_ERROR_UNINITIALIZED){
printf("error, the library has not been successfully initialized.\n");
return 1;
} else if(returnValue == NVML_ERROR_INVALID_ARGUMENT){
printf("error,device or counter is invalid,or value is NULL.\n");
return 1;
} else if(returnValue == NVML_ERROR_NOT_SUPPORTED){
printf("error,the device does not support this feature.\n");
return 1;
} else if(returnValue == NVML_ERROR_GPU_IS_LOST){
printf("error, the target GPU has fallen off the bus or is otherwise inaccessible.\n");
return 1;
} else if(returnValue == NVML_ERROR_NOT_FOUND){
printf("error, the sample entries are not found.\n");
return 1;
} else {
printf("error unknown.\n");
return 1;
}
}
int handlePState(nvmlPstates_t *pState){
int perforState;
switch(*pState){
case NVML_PSTATE_0:
perforState = 0;
break;
case NVML_PSTATE_1:
perforState = 1;
break;
case NVML_PSTATE_2:
perforState = 2;
break;
case NVML_PSTATE_3:
perforState = 3;
break;
case NVML_PSTATE_4:
perforState = 4;
break;
case NVML_PSTATE_5:
perforState = 5;
break;
case NVML_PSTATE_6:
perforState = 6;
break;
case NVML_PSTATE_7:
perforState = 7;
break;
case NVML_PSTATE_8:
perforState = 8;
break;
case NVML_PSTATE_9:
perforState = 9;
break;
case NVML_PSTATE_10:
perforState = 10;
break;
case NVML_PSTATE_11:
perforState = 11;
break;
case NVML_PSTATE_12:
perforState = 12;
break;
case NVML_PSTATE_13:
perforState = 13;
break;
case NVML_PSTATE_14:
perforState = 14;
break;
case NVML_PSTATE_15:
perforState = 15;
break;
default:
// NVML_PSTATE_UNKNOWN
perforState = 32;
}
return perforState;
}
int getDevicehandle(uint32_t *device){
unsigned int index = 0;
if(nvmlDeviceGetHandleByIndex(index,device) == RSMI_STATUS_SUCCESS){
return 0;
} else {
return 1;
}
}
int getMaxPcieLinkGeneration(uint32_t device, unsigned int *maxLinkGen){
if(nvmlDeviceGetMaxPcieLinkGeneration(device, maxLinkGen) == RSMI_STATUS_SUCCESS) {
return 0;
} else {
return 1;
}
}
int getMaxPcieLinkWidth(uint32_t device, unsigned int *maxLinkWidth){
if(nvmlDeviceGetMaxPcieLinkWidth(device, maxLinkWidth) == RSMI_STATUS_SUCCESS) {
return 0;
} else {
return 1;
}
}
int getCurrPcieLinkGeneration(uint32_t device, unsigned int *currLinkGen){
rsmi_status_t returnValue = nvmlDeviceGetCurrPcieLinkGeneration(device, currLinkGen);
return handleReturnValue(returnValue);
}
int getCurrPcieLinkWidth(uint32_t device, unsigned int *currLinkWidth){
rsmi_status_t returnValue = nvmlDeviceGetCurrPcieLinkWidth(device, currLinkWidth);
return handleReturnValue(returnValue);
}
//The device does not support this feature.
/*
int getPcieThroughput(uint32_t device, unsigned int *value){
nvmlPcieUtilCounter_t counter1 = NVML_PCIE_UTIL_TX_BYTES;
// nvmlPcieUtilCounter_t counter2 = NVML_PCIE_UTIL_RX_BYTES;
// nvmlPcieUtilCounter_t counter3 = NVML_PCIE_UTIL_COUNT;
rsmi_status_t returnv = nvmlDeviceGetPcieThroughput(device, counter1, value);
return handleReturnValue(returnv);
}
*/
int getPcieReplayCounter(uint32_t device, unsigned int *value){
rsmi_status_t returnValue = nvmlDeviceGetPcieReplayCounter(device, value);
return handleReturnValue(returnValue);
}
int getCurrSMClockInfo(uint32_t device, unsigned int *currSmClock){
nvmlClockType_t type = NVML_CLOCK_SM;
rsmi_status_t returnValue = nvmlDeviceGetClockInfo(device, type, currSmClock);
return handleReturnValue(returnValue);
}
int getCurrMEMClockInfo(uint32_t device, unsigned int *currMemClock){
nvmlClockType_t type = NVML_CLOCK_MEM;
rsmi_status_t returnValue = nvmlDeviceGetClockInfo(device, type, currMemClock);
return handleReturnValue(returnValue);
}
int getMaxSMClockInfo(uint32_t device, unsigned int *maxSmClock){
nvmlClockType_t type = NVML_CLOCK_SM;
rsmi_status_t returnValue = nvmlDeviceGetMaxClockInfo(device, type, maxSmClock);
return handleReturnValue(returnValue);
}
int getMaxMEMClockInfo(uint32_t device, unsigned int *maxMemClock){
nvmlClockType_t type = NVML_CLOCK_MEM;
rsmi_status_t returnValue = nvmlDeviceGetMaxClockInfo(device, type, maxMemClock);
return handleReturnValue(returnValue);
}
int getSMApplicationsClock(uint32_t device, unsigned int *smclockMHz){
nvmlClockType_t type = NVML_CLOCK_SM;
rsmi_status_t returnValue = nvmlDeviceGetApplicationsClock(device, type, smclockMHz);
return handleReturnValue(returnValue);
}
int GetMEMApplicationsClock(uint32_t device, unsigned int *memclockMHz){
nvmlClockType_t type = NVML_CLOCK_MEM;
rsmi_status_t returnValue = nvmlDeviceGetApplicationsClock(device, type, memclockMHz);
return handleReturnValue(returnValue);
}
int getCurrSMClock(uint32_t device, unsigned int *currsmclo){
nvmlClockType_t type = NVML_CLOCK_SM;
nvmlClockId_t clockId = NVML_CLOCK_ID_CURRENT;
rsmi_status_t returnValue = nvmlDeviceGetClock(device, type, clockId, currsmclo) == RSMI_STATUS_SUCCESS);
return handleReturnValue(returnValue);
}
int getCurrMEMClock(uint32_t device, unsigned int *currmemclo){
nvmlClockType_t type = NVML_CLOCK_MEM;
nvmlClockId_t clockId = NVML_CLOCK_ID_CURRENT;
rsmi_status_t returnValue = nvmlDeviceGetClock(device, type, clockId, currmemclo);
return handleReturnValue(returnValue);
}
int getAppSMClock(uint32_t device, unsigned int *appsmclo){
nvmlClockType_t type = NVML_CLOCK_SM;
nvmlClockId_t clockId = NVML_CLOCK_ID_APP_CLOCK_TARGET;
rsmi_status_t returnValue = nvmlDeviceGetClock(device, type, clockId, appsmclo);
}
int getAppMEMClock(uint32_t device, unsigned int *appmemclo){
nvmlClockType_t type = NVML_CLOCK_MEM;
nvmlClockId_t clockId = NVML_CLOCK_ID_APP_CLOCK_TARGET;
rsmi_status_t returnValue = nvmlDeviceGetClock(device, type, clockId, appmemclo};
return handleReturnValue(returnValue);
}
int getDefSMClock(uint32_t device, unsigned int *defsmclo){
nvmlClockType_t type = NVML_CLOCK_SM;
nvmlClockId_t clockId = NVML_CLOCK_ID_APP_CLOCK_DEFAULT;
rsmi_status_t returnValue = nvmlDeviceGetClock(device, type, clockId, defsmclo);
return handleReturnValue(returnValue);
}
int getDefMEMClock(uint32_t device, unsigned int *defmemclo){
nvmlClockType_t type = NVML_CLOCK_MEM;
vmlClockId_t clockId = NVML_CLOCK_ID_APP_CLOCK_DEFAULT;
rsmi_status_t returnValue = nvmlDeviceGetClock(device, type, clockId, defmemclo);
return handleReturnValue(returnValue);
}
int getTemperature(uint32_t device, unsigned int *temp){
nvmlTemperatureSensors_t sensorType = NVML_TEMPERATURE_GPU;
rsmi_status_t returnValue = nvmlDeviceGetTemperature(device, type, temp);
return handleReturnValue(returnValue);
}
//not support
/*
int getFanSpeed(uint32_t device, unsigned int *speed){
rsmi_status_t returnv = nvmlDeviceGetFanSpeed(device, speed);
return handleReturnValue(returnv);
}
*/
int getPowerUsage(uint32_t device, unsigned int *power){
rsmi_status_t returnv = nvmlDeviceGetPowerUsage(device, power);
return handleReturnValue(returnv);
}
//For volta or newer
/*
int getTotalEnergyConsumption(uint32_t device, unsigned long long *energy){
rsmi_status_t returnValue = nvmlDeviceGetTotalEnergyConsumption(device, energy);
return handleReturnValue(returnValue);
}
*/
int getPerformanceState(uint32_t device, nvmlPstates_t *perState, int perforState){
rsmi_status_t returnValue = nvmlDeviceGetPerformanceState(device, perState);
if(returnValue == RSMI_STATUS_SUCCESS){
perforState = handlePState(perState);
return 0;
} else {
return handleReturnValue(returnValue);
}
}
int getPowerState(uint32_t device, nvmlPstates_t *powState, int powerState){
rsmi_status_t returnValue = nvmlDeviceGetPowerState(device, powState);
if(returnValue == RSMI_STATUS_SUCCESS){
powerState = handlePState(powState);
return 0;
} else {
return handleReturnValue(returnValue);
}
}
int getMemoryInfo(uint32_t device, nvmlMemory_t *memory){
rsmi_status_t returnValue = nvmlDeviceGetMemoryInfo(device, memory);
return handleReturnValue(returnValue);
}
int getTotalVolatileSingleBitEccErrors(uint32_t device, unsigned long long *eccCounts){
//NVML_VOLATILE_ECC = 0
//NVML_AGGREGATE_ECC = 1
nvmlEccCounterType_t counterType = NVML_VOLATILE_ECC;
nvmlMemoryErrorType_t errorType = NVML_MEMORY_ERROR_TYPE_CORRECTED;
rsmi_status_t returnValue = nvmlDeviceGetTotalEccErrors(device, errorType, counterType, eccCounts);
return handleReturnValue(returnValue);
}
int getTotalAggregateSingleBitEccErrors(uint32_t device, unsigned long long *eccCounts){
//NVML_VOLATILE_ECC = 0
//NVML_AGGREGATE_ECC = 1
nvmlEccCounterType_t counterType = NVML_AGGREGATE_ECC;
nvmlMemoryErrorType_t errorType = NVML_MEMORY_ERROR_TYPE_CORRECTED;
rsmi_status_t returnValue = nvmlDeviceGetTotalEccErrors(device, errorType, counterType, eccCounts);
return handleReturnValue(returnValue);
}
int getTotalVolatileDoubleBitEccErrors(uint32_t device, unsigned long long *eccCounts){
//NVML_VOLATILE_ECC = 0
//NVML_AGGREGATE_ECC = 1
nvmlEccCounterType_t counterType = NVML_VOLATILE_ECC;
nvmlMemoryErrorType_t errorType = NVML_MEMORY_ERROR_TYPE_UNCORRECTED;
rsmi_status_t returnValue = nvmlDeviceGetTotalEccErrors(device, errorType, counterType, eccCounts);
return handleReturnValue(returnValue);
}
int getTotalAggregateDoubleBitEccErrors(uint32_t device, unsigned long long *eccCounts){
//NVML_VOLATILE_ECC = 0
//NVML_AGGREGATE_ECC = 1
nvmlEccCounterType_t counterType = NVML_AGGREGATE_ECC;
nvmlMemoryErrorType_t errorType = NVML_MEMORY_ERROR_TYPE_UNCORRECTED;
rsmi_status_t returnValue = nvmlDeviceGetTotalEccErrors(device, errorType, counterType, eccCounts);
return handleReturnValue(returnValue);
}
int getMemoryVolatileSingleBitErrorCounterL1Cache(uint32_t device, unsigned long long *count){
//NVML_MEMORY_ERROR_TYPE_CORRECTED = 0
//NVML_MEMORY_ERROR_TYPE_UNCORRECTED = 1
nvmlMemoryErrorType_t errorType = NVML_MEMORY_ERROR_TYPE_CORRECTED;
//NVML_VOLATILE_ECC = 0
//NVML_AGGREGATE_ECC = 1
nvmlEccCounterType_t counterType = NVML_VOLATILE_ECC;
nvmlMemoryLocation_t locationType = NVML_MEMORY_LOCATION_L1_CACHE;
rsmi_status_t returnValue = nvmlDeviceGetMemoryErrorCounter(device, errorType, counterType, locationType, count);
return handleReturnValue(returnValue);
}
int getMemoryAggregateSingleBitErrorCounterL1Cache(uint32_t device, unsigned long long *count){
//NVML_MEMORY_ERROR_TYPE_CORRECTED = 0
//NVML_MEMORY_ERROR_TYPE_UNCORRECTED = 1
nvmlMemoryErrorType_t errorType = NVML_MEMORY_ERROR_TYPE_CORRECTED;
//NVML_VOLATILE_ECC = 0
//NVML_AGGREGATE_ECC = 1
nvmlEccCounterType_t counterType = NVML_AGGREGATE_ECC;
nvmlMemoryLocation_t locationType = NVML_MEMORY_LOCATION_L1_CACHE;
rsmi_status_t returnValue = nvmlDeviceGetMemoryErrorCounter(device, errorType, counterType, locationType, count);
return handleReturnValue(returnValue);
}
int getMemoryVolatileDoubleBitErrorCounterL1Cache(uint32_t device, unsigned long long *count){
//NVML_MEMORY_ERROR_TYPE_CORRECTED = 0
//NVML_MEMORY_ERROR_TYPE_UNCORRECTED = 1
nvmlMemoryErrorType_t errorType = NVML_MEMORY_ERROR_TYPE_UNCORRECTED;
//NVML_VOLATILE_ECC = 0
//NVML_AGGREGATE_ECC = 1
nvmlEccCounterType_t counterType = NVML_VOLATILE_ECC;
nvmlMemoryLocation_t locationType = NVML_MEMORY_LOCATION_L1_CACHE;
rsmi_status_t returnValue = nvmlDeviceGetMemoryErrorCounter(device, errorType, counterType, locationType, count);
return handleReturnValue(returnValue);
}
int getMemoryAggregateDoubleBitErrorCounterL1Cache(uint32_t device, unsigned long long *count){
//NVML_MEMORY_ERROR_TYPE_CORRECTED = 0
//NVML_MEMORY_ERROR_TYPE_UNCORRECTED = 1
nvmlMemoryErrorType_t errorType = NVML_MEMORY_ERROR_TYPE_UNCORRECTED;
//NVML_VOLATILE_ECC = 0
//NVML_AGGREGATE_ECC = 1
nvmlEccCounterType_t counterType = NVML_AGGREGATE_ECC;
nvmlMemoryLocation_t locationType = NVML_MEMORY_LOCATION_L1_CACHE;
rsmi_status_t returnValue = nvmlDeviceGetMemoryErrorCounter(device, errorType, counterType, locationType, count);
return handleReturnValue(returnValue);
}
int getMemoryVolatileSingleBitErrorCounterL2Cache(uint32_t device, unsigned long long *count){
//NVML_MEMORY_ERROR_TYPE_CORRECTED = 0
//NVML_MEMORY_ERROR_TYPE_UNCORRECTED = 1
nvmlMemoryErrorType_t errorType = NVML_MEMORY_ERROR_TYPE_CORRECTED;
//NVML_VOLATILE_ECC = 0
//NVML_AGGREGATE_ECC = 1
nvmlEccCounterType_t counterType = NVML_VOLATILE_ECC;
nvmlMemoryLocation_t locationType = NVML_MEMORY_LOCATION_L2_CACHE;
rsmi_status_t returnValue = nvmlDeviceGetMemoryErrorCounter(device, errorType, counterType, locationType, count);
return handleReturnValue(returnValue);
}
int getMemoryAggregateSingleBitErrorCounterL2Cache(uint32_t device, unsigned long long *count){
//NVML_MEMORY_ERROR_TYPE_CORRECTED = 0
//NVML_MEMORY_ERROR_TYPE_UNCORRECTED = 1
nvmlMemoryErrorType_t errorType = NVML_MEMORY_ERROR_TYPE_CORRECTED;
//NVML_VOLATILE_ECC = 0
//NVML_AGGREGATE_ECC = 1
nvmlEccCounterType_t counterType = NVML_AGGREGATE_ECC;
nvmlMemoryLocation_t locationType = NVML_MEMORY_LOCATION_L2_CACHE;
rsmi_status_t returnValue = nvmlDeviceGetMemoryErrorCounter(device, errorType, counterType, locationType, count);
return handleReturnValue(returnValue);
}
int getMemoryVolatileDoubleBitErrorCounterL2Cache(uint32_t device, unsigned long long *count){
//NVML_MEMORY_ERROR_TYPE_CORRECTED = 0
//NVML_MEMORY_ERROR_TYPE_UNCORRECTED = 1
nvmlMemoryErrorType_t errorType = NVML_MEMORY_ERROR_TYPE_UNCORRECTED;
//NVML_VOLATILE_ECC = 0
//NVML_AGGREGATE_ECC = 1
nvmlEccCounterType_t counterType = NVML_VOLATILE_ECC;
nvmlMemoryLocation_t locationType = NVML_MEMORY_LOCATION_L2_CACHE;
rsmi_status_t returnValue = nvmlDeviceGetMemoryErrorCounter(device, errorType, counterType, locationType, count);
return handleReturnValue(returnValue);
}
int getMemoryAggregateDoubleBitErrorCounterL2Cache(uint32_t device, unsigned long long *count){
//NVML_MEMORY_ERROR_TYPE_CORRECTED = 0
//NVML_MEMORY_ERROR_TYPE_UNCORRECTED = 1
nvmlMemoryErrorType_t errorType = NVML_MEMORY_ERROR_TYPE_UNCORRECTED;
//NVML_VOLATILE_ECC = 0
//NVML_AGGREGATE_ECC = 1
nvmlEccCounterType_t counterType = NVML_AGGREGATE_ECC;
nvmlMemoryLocation_t locationType = NVML_MEMORY_LOCATION_L2_CACHE;
rsmi_status_t returnValue = nvmlDeviceGetMemoryErrorCounter(device, errorType, counterType, locationType, count);
return handleReturnValue(returnValue);
}
int getMemoryVolatileSingleBitErrorCounterDeviceMemory(uint32_t device, unsigned long long *count){
//NVML_MEMORY_ERROR_TYPE_CORRECTED = 0
//NVML_MEMORY_ERROR_TYPE_UNCORRECTED = 1
nvmlMemoryErrorType_t errorType = NVML_MEMORY_ERROR_TYPE_CORRECTED;
//NVML_VOLATILE_ECC = 0
//NVML_AGGREGATE_ECC = 1
nvmlEccCounterType_t counterType = NVML_VOLATILE_ECC;
nvmlMemoryLocation_t locationType = NVML_MEMORY_LOCATION_DEVICE_MEMORY;
rsmi_status_t returnValue = nvmlDeviceGetMemoryErrorCounter(device, errorType, counterType, locationType, count);
return handleReturnValue(returnValue);
}
int getMemoryAggregateSingleBitErrorCounterDeviceMemory(uint32_t device, unsigned long long *count){
//NVML_MEMORY_ERROR_TYPE_CORRECTED = 0
//NVML_MEMORY_ERROR_TYPE_UNCORRECTED = 1
nvmlMemoryErrorType_t errorType = NVML_MEMORY_ERROR_TYPE_CORRECTED;
//NVML_VOLATILE_ECC = 0
//NVML_AGGREGATE_ECC = 1
nvmlEccCounterType_t counterType = NVML_AGGREGATE_ECC;
nvmlMemoryLocation_t locationType = NVML_MEMORY_LOCATION_DEVICE_MEMORY;
rsmi_status_t returnValue = nvmlDeviceGetMemoryErrorCounter(device, errorType, counterType, locationType, count);
return handleReturnValue(returnValue);
}
int getMemoryVolatileDoubleBitErrorCounterDeviceMemory(uint32_t device, unsigned long long *count){
//NVML_MEMORY_ERROR_TYPE_CORRECTED = 0
//NVML_MEMORY_ERROR_TYPE_UNCORRECTED = 1
nvmlMemoryErrorType_t errorType = NVML_MEMORY_ERROR_TYPE_UNCORRECTED;
//NVML_VOLATILE_ECC = 0
//NVML_AGGREGATE_ECC = 1
nvmlEccCounterType_t counterType = NVML_VOLATILE_ECC;
nvmlMemoryLocation_t locationType = NVML_MEMORY_LOCATION_DEVICE_MEMORY;
rsmi_status_t returnValue = nvmlDeviceGetMemoryErrorCounter(device, errorType, counterType, locationType, count);
return handleReturnValue(returnValue);
}
int getMemoryAggregateDoubleBitErrorCounterDeviceMemory(uint32_t device, unsigned long long *count){
//NVML_MEMORY_ERROR_TYPE_CORRECTED = 0
//NVML_MEMORY_ERROR_TYPE_UNCORRECTED = 1
nvmlMemoryErrorType_t errorType = NVML_MEMORY_ERROR_TYPE_UNCORRECTED;
//NVML_VOLATILE_ECC = 0
//NVML_AGGREGATE_ECC = 1
nvmlEccCounterType_t counterType = NVML_AGGREGATE_ECC;
nvmlMemoryLocation_t locationType = NVML_MEMORY_LOCATION_DEVICE_MEMORY;
rsmi_status_t returnValue = nvmlDeviceGetMemoryErrorCounter(device, errorType, counterType, locationType, count);
return handleReturnValue(returnValue);
}
int getMemoryVolatileSingleBitErrorCounterRegisterFile(uint32_t device, unsigned long long *count){
//NVML_MEMORY_ERROR_TYPE_CORRECTED = 0
//NVML_MEMORY_ERROR_TYPE_UNCORRECTED = 1
nvmlMemoryErrorType_t errorType = NVML_MEMORY_ERROR_TYPE_CORRECTED;
//NVML_VOLATILE_ECC = 0
//NVML_AGGREGATE_ECC = 1
nvmlEccCounterType_t counterType = NVML_VOLATILE_ECC;
nvmlMemoryLocation_t locationType = NVML_MEMORY_LOCATION_REGISTER_FILE;
rsmi_status_t returnValue = nvmlDeviceGetMemoryErrorCounter(device, errorType, counterType, locationType, count);
return handleReturnValue(returnValue);
}
int getMemoryAggregateSingleBitErrorCounterRegisterFile(uint32_t device, unsigned long long *count){
//NVML_MEMORY_ERROR_TYPE_CORRECTED = 0
//NVML_MEMORY_ERROR_TYPE_UNCORRECTED = 1
nvmlMemoryErrorType_t errorType = NVML_MEMORY_ERROR_TYPE_CORRECTED;
//NVML_VOLATILE_ECC = 0
//NVML_AGGREGATE_ECC = 1
nvmlEccCounterType_t counterType = NVML_AGGREGATE_ECC;
nvmlMemoryLocation_t locationType = NVML_MEMORY_LOCATION_REGISTER_FILE;
rsmi_status_t returnValue = nvmlDeviceGetMemoryErrorCounter(device, errorType, counterType, locationType, count);
return handleReturnValue(returnValue);
}
int getMemoryVolatileDoubleBitErrorCounterRegisterFile(uint32_t device, unsigned long long *count){
//NVML_MEMORY_ERROR_TYPE_CORRECTED = 0
//NVML_MEMORY_ERROR_TYPE_UNCORRECTED = 1
nvmlMemoryErrorType_t errorType = NVML_MEMORY_ERROR_TYPE_UNCORRECTED;
//NVML_VOLATILE_ECC = 0
//NVML_AGGREGATE_ECC = 1
nvmlEccCounterType_t counterType = NVML_VOLATILE_ECC;
nvmlMemoryLocation_t locationType = NVML_MEMORY_LOCATION_REGISTER_FILE;
rsmi_status_t returnValue = nvmlDeviceGetMemoryErrorCounter(device, errorType, counterType, locationType, count);
return handleReturnValue(returnValue);
}
int getMemoryAggregateSDoubleBitErrorCounterRegisterFile(uint32_t device, unsigned long long *count){
//NVML_MEMORY_ERROR_TYPE_CORRECTED = 0
//NVML_MEMORY_ERROR_TYPE_UNCORRECTED = 1
nvmlMemoryErrorType_t errorType = NVML_MEMORY_ERROR_TYPE_UNCORRECTED;
//NVML_VOLATILE_ECC = 0
//NVML_AGGREGATE_ECC = 1
nvmlEccCounterType_t counterType = NVML_AGGREGATE_ECC;
nvmlMemoryLocation_t locationType = NVML_MEMORY_LOCATION_REGISTER_FILE;
rsmi_status_t returnValue = nvmlDeviceGetMemoryErrorCounter(device, errorType, counterType, locationType, count);
return handleReturnValue(returnValue);
}
int getMemoryVolatileSingleBitErrorCounterTextureMemory(uint32_t device, unsigned long long *count){
//NVML_MEMORY_ERROR_TYPE_CORRECTED = 0
//NVML_MEMORY_ERROR_TYPE_UNCORRECTED = 1
nvmlMemoryErrorType_t errorType = NVML_MEMORY_ERROR_TYPE_CORRECTED;
//NVML_VOLATILE_ECC = 0
//NVML_AGGREGATE_ECC = 1
nvmlEccCounterType_t counterType = NVML_VOLATILE_ECC;
nvmlMemoryLocation_t locationType = NVML_MEMORY_LOCATION_TEXTURE_MEMORY;
rsmi_status_t returnValue = nvmlDeviceGetMemoryErrorCounter(device, errorType, counterType, locationType, count);
return handleReturnValue(returnValue);
}
int getMemoryAggregateSingleBitErrorCounterTextureMemory(uint32_t device, unsigned long long *count){
//NVML_MEMORY_ERROR_TYPE_CORRECTED = 0
//NVML_MEMORY_ERROR_TYPE_UNCORRECTED = 1
nvmlMemoryErrorType_t errorType = NVML_MEMORY_ERROR_TYPE_CORRECTED;
//NVML_VOLATILE_ECC = 0
//NVML_AGGREGATE_ECC = 1
nvmlEccCounterType_t counterType = NVML_AGGREGATE_ECC;
nvmlMemoryLocation_t locationType = NVML_MEMORY_LOCATION_TEXTURE_MEMORY;
rsmi_status_t returnValue = nvmlDeviceGetMemoryErrorCounter(device, errorType, counterType, locationType, count);
return handleReturnValue(returnValue);
}
int getMemoryVolatileDoubleBitErrorCounterTextureMemory(uint32_t device, unsigned long long *count){
//NVML_MEMORY_ERROR_TYPE_CORRECTED = 0
//NVML_MEMORY_ERROR_TYPE_UNCORRECTED = 1
nvmlMemoryErrorType_t errorType = NVML_MEMORY_ERROR_TYPE_UNCORRECTED;
//NVML_VOLATILE_ECC = 0
//NVML_AGGREGATE_ECC = 1
nvmlEccCounterType_t counterType = NVML_VOLATILE_ECC;
nvmlMemoryLocation_t locationType = NVML_MEMORY_LOCATION_TEXTURE_MEMORY;
rsmi_status_t returnValue = nvmlDeviceGetMemoryErrorCounter(device, errorType, counterType, locationType, count);
return handleReturnValue(returnValue);
}
int getMemoryAggregateDoubleBitErrorCounterTextureMemory(uint32_t device, unsigned long long *count){
//NVML_MEMORY_ERROR_TYPE_CORRECTED = 0
//NVML_MEMORY_ERROR_TYPE_UNCORRECTED = 1
nvmlMemoryErrorType_t errorType = NVML_MEMORY_ERROR_TYPE_UNCORRECTED;
//NVML_VOLATILE_ECC = 0
//NVML_AGGREGATE_ECC = 1
nvmlEccCounterType_t counterType = NVML_AGGREGATE_ECC;
nvmlMemoryLocation_t locationType = NVML_MEMORY_LOCATION_TEXTURE_MEMORY;
rsmi_status_t returnValue = nvmlDeviceGetMemoryErrorCounter(device, errorType, counterType, locationType, count);
return handleReturnValue(returnValue);
}
int getMemoryVolatileSingleBitErrorCounterTextureShm(uint32_t device, unsigned long long *count){
//NVML_MEMORY_ERROR_TYPE_CORRECTED = 0
//NVML_MEMORY_ERROR_TYPE_UNCORRECTED = 1
nvmlMemoryErrorType_t errorType = NVML_MEMORY_ERROR_TYPE_CORRECTED;
//NVML_VOLATILE_ECC = 0
//NVML_AGGREGATE_ECC = 1
nvmlEccCounterType_t counterType = NVML_VOLATILE_ECC;
nvmlMemoryLocation_t locationType = NVML_MEMORY_LOCATION_TEXTURE_SHM;
rsmi_status_t returnValue = nvmlDeviceGetMemoryErrorCounter(device, errorType, counterType, locationType, count);
return handleReturnValue(returnValue);
}
int getMemoryAggregateSingleBitErrorCounterTextureShm(uint32_t device, unsigned long long *count){
//NVML_MEMORY_ERROR_TYPE_CORRECTED = 0
//NVML_MEMORY_ERROR_TYPE_UNCORRECTED = 1
nvmlMemoryErrorType_t errorType = NVML_MEMORY_ERROR_TYPE_CORRECTED;
//NVML_VOLATILE_ECC = 0
//NVML_AGGREGATE_ECC = 1
nvmlEccCounterType_t counterType = NVML_AGGREGATE_ECC;
nvmlMemoryLocation_t locationType = NVML_MEMORY_LOCATION_TEXTURE_SHM;
rsmi_status_t returnValue = nvmlDeviceGetMemoryErrorCounter(device, errorType, counterType, locationType, count);
return handleReturnValue(returnValue);
}
int getMemoryVolatileDoubleBitErrorCounterTextureShm(uint32_t device, unsigned long long *count){
//NVML_MEMORY_ERROR_TYPE_CORRECTED = 0
//NVML_MEMORY_ERROR_TYPE_UNCORRECTED = 1
nvmlMemoryErrorType_t errorType = NVML_MEMORY_ERROR_TYPE_UNCORRECTED;
//NVML_VOLATILE_ECC = 0
//NVML_AGGREGATE_ECC = 1
nvmlEccCounterType_t counterType = NVML_VOLATILE_ECC;
nvmlMemoryLocation_t locationType = NVML_MEMORY_LOCATION_TEXTURE_SHM;
rsmi_status_t returnValue = nvmlDeviceGetMemoryErrorCounter(device, errorType, counterType, locationType, count);
return handleReturnValue(returnValue);
}
int getMemoryAggregateDoubleBitErrorCounterTextureShm(uint32_t device, unsigned long long *count){
//NVML_MEMORY_ERROR_TYPE_CORRECTED = 0
//NVML_MEMORY_ERROR_TYPE_UNCORRECTED = 1
nvmlMemoryErrorType_t errorType = NVML_MEMORY_ERROR_TYPE_UNCORRECTED;
//NVML_VOLATILE_ECC = 0
//NVML_AGGREGATE_ECC = 1
nvmlEccCounterType_t counterType = NVML_AGGREGATE_ECC;
nvmlMemoryLocation_t locationType = NVML_MEMORY_LOCATION_TEXTURE_SHM;
rsmi_status_t returnValue = nvmlDeviceGetMemoryErrorCounter(device, errorType, counterType, locationType, count);
return handleReturnValue(returnValue);
}
int getMemoryVolatileSingleBitErrorCounterCBU(uint32_t device, unsigned long long *count){
//NVML_MEMORY_ERROR_TYPE_CORRECTED = 0
//NVML_MEMORY_ERROR_TYPE_UNCORRECTED = 1
nvmlMemoryErrorType_t errorType = NVML_MEMORY_ERROR_TYPE_CORRECTED;
//NVML_VOLATILE_ECC = 0
//NVML_AGGREGATE_ECC = 1
nvmlEccCounterType_t counterType = NVML_VOLATILE_ECC;
nvmlMemoryLocation_t locationType = NVML_MEMORY_LOCATION_CBU;
rsmi_status_t returnValue = nvmlDeviceGetMemoryErrorCounter(device, errorType, counterType, locationType, count);
return handleReturnValue(returnValue);
}
int getMemoryAggregateSingleBitErrorCounterCBU(uint32_t device, unsigned long long *count){
//NVML_MEMORY_ERROR_TYPE_CORRECTED = 0
//NVML_MEMORY_ERROR_TYPE_UNCORRECTED = 1
nvmlMemoryErrorType_t errorType = NVML_MEMORY_ERROR_TYPE_CORRECTED;
//NVML_VOLATILE_ECC = 0
//NVML_AGGREGATE_ECC = 1
nvmlEccCounterType_t counterType = NVML_AGGREGATE_ECC;
nvmlMemoryLocation_t locationType = NVML_MEMORY_LOCATION_CBU;
rsmi_status_t returnValue = nvmlDeviceGetMemoryErrorCounter(device, errorType, counterType, locationType, count);
return handleReturnValue(returnValue);
}
int getMemoryVolatileDoubleBitErrorCounterCBU(uint32_t device, unsigned long long *count){
//NVML_MEMORY_ERROR_TYPE_CORRECTED = 0
//NVML_MEMORY_ERROR_TYPE_UNCORRECTED = 1
nvmlMemoryErrorType_t errorType = NVML_MEMORY_ERROR_TYPE_UNCORRECTED;
//NVML_VOLATILE_ECC = 0
//NVML_AGGREGATE_ECC = 1
nvmlEccCounterType_t counterType = NVML_VOLATILE_ECC;
nvmlMemoryLocation_t locationType = NVML_MEMORY_LOCATION_CBU;
rsmi_status_t returnValue = nvmlDeviceGetMemoryErrorCounter(device, errorType, counterType, locationType, count);
return handleReturnValue(returnValue);
}
int getMemoryAggregateDoubleBitErrorCounterCBU(uint32_t device, unsigned long long *count){
//NVML_MEMORY_ERROR_TYPE_CORRECTED = 0
//NVML_MEMORY_ERROR_TYPE_UNCORRECTED = 1
nvmlMemoryErrorType_t errorType = NVML_MEMORY_ERROR_TYPE_UNCORRECTED;
//NVML_VOLATILE_ECC = 0
//NVML_AGGREGATE_ECC = 1
nvmlEccCounterType_t counterType = NVML_AGGREGATE_ECC;
nvmlMemoryLocation_t locationType = NVML_MEMORY_LOCATION_CBU;
rsmi_status_t returnValue = nvmlDeviceGetMemoryErrorCounter(device, errorType, counterType, locationType, count);
return handleReturnValue(returnValue);
}
int getUtilizationRates(uint32_t device, nvmlUtilization_t *utilization){
rsmi_status_t returnValue = nvmlDeviceGetUtilizationRates(device, utilization);
return handleReturnValue(returnValue);
}
int getEncoderUtilization(uint32_t device, unsigned int *utilization, unsigned int *samplingPeriodUs){
rsmi_status_t returnValue = nvmlDeviceGetEncoderUtilization(device, utilization, samplingPeriodUs);
return handleReturnValue(returnValue);
}
//Maxwell or newer
/*
int GetEncoderCapacity(uint32_t device, unsigned int *encoderCapacity){
//NVML_ENCODER_QUERY_H264 = 0
//NVML_ENCODER_QUERY_HEVC = 1
nvmlEncoderType_t encoderQueryType = NVML_ENCODER_QUERY_H264;
rsmi_status_t returnValue = nvmlDeviceGetEncoderUtilization(device, encoderQueryType, encoderCapacity);
return handleReturnValue(returnValue);
}
*/
//Maxwell or newer
/*
int getEncoderStats(uint32_t device, unsigned int *sessionCount, unsigned int *averageFps, unsigned int *averageLatency){
rsmi_status_t returnValue = nvmlDeviceGetEncoderStats(device, sessionCount, averageFps, averageLatency);
return handleReturnValue(returnValue);
}
*/
int getDecoderUtilization(uint32_t device, unsigned int *utilization, unsigned int *samplingPeriodUs){
rsmi_status_t returnValue = nvmlDeviceGetDecoderUtilization(device, utilization, samplingPeriodUs);
return handleReturnValue(returnValue);
}
//Maxwell or newer
/*
int getFBCStats(uint32_t device, nvmlFBCStats_t *fbcStats){
rsmi_status_t returnValue = nvmlDeviceGetFBCStats(device, fbcStats);
return handleReturnValue(returnValue);
}
*/
int getComputeRunningProcesses(uint32_t device, rsmi_process_info_t *infos){
unsigned int infoCount = 1;
rsmi_status_t returnValue = nvmlDeviceGetComputeRunningProcesses(device, &infoCount, infos);
return handleReturnValue(returnValue);
}
int getTotalPowerSamples(uint32_t device, unsigned long long lastSeenTimeStamp, nvmlSample_t *samples){
nvmlSamplingType_t type = NVML_TOTAL_POWER_SAMPLES;
nvmlValueType_t sampleValType;
unsigned int sampleCount = 1;
rsmi_status_t returnValue = nvmlDeviceGetSamples(device, type, lastSeenTimeStamp, &sampleValType, &sampleCount, samples);
return handleReturnValue(returnValue);
}
int getGpuUtilizationSamples(uint32_t device, unsigned long long lastSeenTimeStamp, nvmlSample_t *samples){
nvmlSamplingType_t type = NVML_GPU_UTILIZATION_SAMPLES;
nvmlValueType_t sampleValType;
unsigned int sampleCount = 1;
rsmi_status_t returnValue = nvmlDeviceGetSamples(device, type, lastSeenTimeStamp, &sampleValType, &sampleCount, samples);
return handleReturnValue(returnValue);
}
int getMemoryUtilizationSamples(uint32_t device, unsigned long long lastSeenTimeStamp, nvmlSample_t *samples){
nvmlSamplingType_t type = NVML_MEMORY_UTILIZATION_SAMPLES;
nvmlValueType_t sampleValType;
unsigned int sampleCount = 1;
rsmi_status_t returnValue = nvmlDeviceGetSamples(device, type, lastSeenTimeStamp, &sampleValType, &sampleCount, samples);
return handleReturnValue(returnValue);
}
int getENCUtilizationSamples(uint32_t device, unsigned long long lastSeenTimeStamp, nvmlSample_t *samples){
nvmlSamplingType_t type = NVML_ENC_UTILIZATION_SAMPLES;
nvmlValueType_t sampleValType;
unsigned int sampleCount = 1;
rsmi_status_t returnValue = nvmlDeviceGetSamples(device, type, lastSeenTimeStamp, &sampleValType, &sampleCount, samples);
return handleReturnValue(returnValue);
}
int getDECUtilizationSamples(uint32_t device, unsigned long long lastSeenTimeStamp, nvmlSample_t *samples){
nvmlSamplingType_t type = NVML_DEC_UTILIZATION_SAMPLES;
nvmlValueType_t sampleValType;
unsigned int sampleCount = 1;
rsmi_status_t returnValue = nvmlDeviceGetSamples(device, type, lastSeenTimeStamp, &sampleValType, &sampleCount, samples);
return handleReturnValue(returnValue);
}
int getProcessorCLKSamples(uint32_t device, unsigned long long lastSeenTimeStamp, nvmlSample_t *samples){
nvmlSamplingType_t type = NVML_PROCESSOR_CLK_SAMPLES;
nvmlValueType_t sampleValType;
unsigned int sampleCount = 1;
rsmi_status_t returnValue = nvmlDeviceGetSamples(device, type, lastSeenTimeStamp, &sampleValType, &sampleCount, samples);
return handleReturnValue(returnValue);
}
int getMemoryCLKSamples(uint32_t device, unsigned long long lastSeenTimeStamp, nvmlSample_t *samples){
nvmlSamplingType_t type = NVML_MEMORY_CLK_SAMPLES;
nvmlValueType_t sampleValType;
unsigned int sampleCount = 1;
rsmi_status_t returnValue = nvmlDeviceGetSamples(device, type, lastSeenTimeStamp, &sampleValType, &sampleCount, samples);
return handleReturnValue(returnValue);
}
int getBAR1MemoryInfo(uint32_t device, nvmlBAR1Memory_t *bar1Memory){
rsmi_status_t returnValue = nvmlDeviceGetBAR1MemoryInfo(device, bar1Memory);
return handleReturnValue(returnValue);
}
int getViolationStatus(uint32_t device, nvmlPerfPolicyType_t perfPolicyType, nvmlViolationTime_t *violTime){
rsmi_status_t returnValue = nvmlDeviceGetViolationStatus(device, perfPolicyType, violTime);
return handleReturnValue(returnValue);
}
int getRetiredPagesSingleBitError(uint32_t device, unsigned int *pageCount){
nvmlPageRetirementCause_t cause = NVML_PAGE_RETIREMENT_CAUSE_MULTIPLE_SINGLE_BIT_ECC_ERRORS;
unsigned long long address;
rsmi_status_t returnValue = nvmlDeviceGetRetiredPages(device, cause, pageCount, &address);
return handleReturnValue(returnValue);
}
int getRetiredPagesDoubleBitError(uint32_t device, unsigned int *pageCount){
nvmlPageRetirementCause_t cause = NVML_PAGE_RETIREMENT_CAUSE_DOUBLE_BIT_ECC_ERROR;
unsigned long long address;
rsmi_status_t returnValue = nvmlDeviceGetRetiredPages(device, cause, pageCount, &address);
return handleReturnValue(returnValue);
}
int getFieldValues(uint32_t device, int valuesCount, nvmlFieldValue_t *values){
}
//Pascal or newer
/*
int getNvLinkErrorCounterReplay(uint32_t device, unsigned int link, unsigned long long *counterValue){
nvmlNvLinkErrorCounter_t counter = NVML_NVLINK_ERROR_DL_REPLAY;
rsmi_status_t returnValue = nvmlDeviceGetNvLinkErrorCounter(device, link, counter, counterValue);
return handleReturnValue(returnValue);
}
int getNvLinkErrorCounterRecovery(uint32_t device, unsigned int link, unsigned long long *counterValue){
nvmlNvLinkErrorCounter_t counter = NVML_NVLINK_ERROR_DL_RECOVERY;
rsmi_status_t returnValue = nvmlDeviceGetNvLinkErrorCounter(device, link, counter, counterValue);
return handleReturnValue(returnValue);
}
int getNvLinkErrorCounterCRCFlit(uint32_t device, unsigned int link, unsigned long long *counterValue){
nvmlNvLinkErrorCounter_t counter = NVML_NVLINK_ERROR_DL_CRC_FLIT;
rsmi_status_t returnValue = nvmlDeviceGetNvLinkErrorCounter(device, link, counter, counterValue);
return handleReturnValue(returnValue);
}
int getNvLinkErrorCounterCRCData(uint32_t device, unsigned int link, unsigned long long *counterValue){
nvmlNvLinkErrorCounter_t counter = NVML_NVLINK_ERROR_DL_CRC_DATA;
rsmi_status_t returnValue = nvmlDeviceGetNvLinkErrorCounter(device, link, counter, counterValue);
return handleReturnValue(returnValue);
}
int getNvLinkUtilizationCounter(uint32_t device, unsigned int link, unsigned int counter, unsigned long long *rxcounter, unsigned long long *txcounter){
rsmi_status_t returnValue = nvmlDeviceGetNvLinkUtilizationCounter(device, link, counter, rxcounter, txcounter);
return handleReturnValue(returnValue);
}
*/
/*
int getSupportedVgpus(uint32_t device, unsigned int *vgpuCount, nvmlVgpuTypeId_t *vgpuTypeIds){
rsmi_status_t returnValue = nvmlDeviceGetSupportedVgpus(device, vgpuCount, vgpuTypeIds);
return handleReturnValue(returnValue);
}
int getActiveVgpus(uint32_t device, unsigned int *vgpuCount, nvmlVgpuInstance_t *vgpuInstances){
rsmi_status_t returnValue = nvmlDeviceGetActiveVgpus (device, vgpuCount, vgpuInstances);
return handleReturnValue(returnValue);
}
int vgpuInstanceGetEncoderCapacity(nvmlVgpuInstance_t vgpuInstance, unsigned int *encoderCapacity){
rsmi_status_t returnValue = nvmlVgpuInstanceGetEncoderCapacity(vgpuInstance, encoderCapacity);
return handleReturnValue(returnValue);
}
int getVgpuUtilization(uint32_t device, unsigned long long lastSeenTimeStamp, nvmlValueType_t *sampleValType, unsigned int *vgpuInstanceSamplesCount, nvmlVgpuInstanceUtilizationSample_t *utilizationSamples){
rsmi_status_t returnValue = nvmlDeviceGetVgpuUtilization(device, lastSeenTimeStamp, sampleValType, vgpuInstanceSamplesCount, utilizationSamples);
return handleReturnValue(returnValue);
}
int getVgpuProcessUtilization(uint32_t device, unsigned long long lastSeenTimeStamp, unsigned int *vgpuProcessSamplesCount, nvmlVgpuProcessUtilizationSample_t *utilizationSamples){
rsmi_status_t returnValue = nvmlDeviceGetVgpuProcessUtilization(device, lastSeenTimeStamp, vgpuProcessSamplesCount, utilizationSamples);
return handleReturnValue(returnValue);
}
int vgpuInstanceGetEncoderStats(nvmlVgpuInstance_t vgpuInstance, unsigned int *sessionCount, unsigned int *averageFps, unsigned int *averageLatency){
rsmi_status_t returnValue = nvmlVgpuInstanceGetEncoderStats(vgpuInstance, sessionCount, averageFps, averageLatency);
return handleReturnValue(returnValue);
}
int vgpuInstanceGetFBCStats(nvmlVgpuInstance_t vgpuInstance, nvmlFBCStats_t *fbcStats){
rsmi_status_t returnValue = nvmlVgpuInstanceGetFBCStats(vgpuInstance, fbcStats);
return handleReturnValue(returnValue);
}
*/
//Maxwell or newer
/*
int getProcessUtilization(uint32_t device, nvmlProcessUtilizationSample_t *utilization, unsigned int *processSamplesCount, unsigned long long lastSeenTimeStamp){
rsmi_status_t returnValue = nvmlDeviceGetProcessUtilization(device, utilization, processSamplesCount, lastSeenTimeStamp);
return handleReturnValue(returnValue);
}
*/
| 419b18d1e16d81b852091413a68ff45a3335d039.cu | #include <stdio.h>
#include <stdlib.h>
#include <nvml.h>
int init(){
if(nvmlInit() == NVML_SUCCESS){
return 0;
} else {
return 1;
}
}
int shutdown(){
if(nvmlShutdown() == NVML_SUCCESS){
return 0;
} else {
return 1;
}
}
int handleReturnValue(nvmlReturn_t returnValue){
if(returnValue == NVML_SUCCESS){
//printf("success.\n");
return 0;
} else if(returnValue == NVML_ERROR_UNINITIALIZED){
printf("error, the library has not been successfully initialized.\n");
return 1;
} else if(returnValue == NVML_ERROR_INVALID_ARGUMENT){
printf("error,device or counter is invalid,or value is NULL.\n");
return 1;
} else if(returnValue == NVML_ERROR_NOT_SUPPORTED){
printf("error,the device does not support this feature.\n");
return 1;
} else if(returnValue == NVML_ERROR_GPU_IS_LOST){
printf("error, the target GPU has fallen off the bus or is otherwise inaccessible.\n");
return 1;
} else if(returnValue == NVML_ERROR_NOT_FOUND){
printf("error, the sample entries are not found.\n");
return 1;
} else {
printf("error unknown.\n");
return 1;
}
}
int handlePState(nvmlPstates_t *pState){
int perforState;
switch(*pState){
case NVML_PSTATE_0:
perforState = 0;
break;
case NVML_PSTATE_1:
perforState = 1;
break;
case NVML_PSTATE_2:
perforState = 2;
break;
case NVML_PSTATE_3:
perforState = 3;
break;
case NVML_PSTATE_4:
perforState = 4;
break;
case NVML_PSTATE_5:
perforState = 5;
break;
case NVML_PSTATE_6:
perforState = 6;
break;
case NVML_PSTATE_7:
perforState = 7;
break;
case NVML_PSTATE_8:
perforState = 8;
break;
case NVML_PSTATE_9:
perforState = 9;
break;
case NVML_PSTATE_10:
perforState = 10;
break;
case NVML_PSTATE_11:
perforState = 11;
break;
case NVML_PSTATE_12:
perforState = 12;
break;
case NVML_PSTATE_13:
perforState = 13;
break;
case NVML_PSTATE_14:
perforState = 14;
break;
case NVML_PSTATE_15:
perforState = 15;
break;
default:
// NVML_PSTATE_UNKNOWN
perforState = 32;
}
return perforState;
}
int getDevicehandle(nvmlDevice_t *device){
unsigned int index = 0;
if(nvmlDeviceGetHandleByIndex(index,device) == NVML_SUCCESS){
return 0;
} else {
return 1;
}
}
int getMaxPcieLinkGeneration(nvmlDevice_t device, unsigned int *maxLinkGen){
if(nvmlDeviceGetMaxPcieLinkGeneration(device, maxLinkGen) == NVML_SUCCESS) {
return 0;
} else {
return 1;
}
}
int getMaxPcieLinkWidth(nvmlDevice_t device, unsigned int *maxLinkWidth){
if(nvmlDeviceGetMaxPcieLinkWidth(device, maxLinkWidth) == NVML_SUCCESS) {
return 0;
} else {
return 1;
}
}
int getCurrPcieLinkGeneration(nvmlDevice_t device, unsigned int *currLinkGen){
nvmlReturn_t returnValue = nvmlDeviceGetCurrPcieLinkGeneration(device, currLinkGen);
return handleReturnValue(returnValue);
}
int getCurrPcieLinkWidth(nvmlDevice_t device, unsigned int *currLinkWidth){
nvmlReturn_t returnValue = nvmlDeviceGetCurrPcieLinkWidth(device, currLinkWidth);
return handleReturnValue(returnValue);
}
//The device does not support this feature.
/*
int getPcieThroughput(nvmlDevice_t device, unsigned int *value){
nvmlPcieUtilCounter_t counter1 = NVML_PCIE_UTIL_TX_BYTES;
// nvmlPcieUtilCounter_t counter2 = NVML_PCIE_UTIL_RX_BYTES;
// nvmlPcieUtilCounter_t counter3 = NVML_PCIE_UTIL_COUNT;
nvmlReturn_t returnv = nvmlDeviceGetPcieThroughput(device, counter1, value);
return handleReturnValue(returnv);
}
*/
int getPcieReplayCounter(nvmlDevice_t device, unsigned int *value){
nvmlReturn_t returnValue = nvmlDeviceGetPcieReplayCounter(device, value);
return handleReturnValue(returnValue);
}
int getCurrSMClockInfo(nvmlDevice_t device, unsigned int *currSmClock){
nvmlClockType_t type = NVML_CLOCK_SM;
nvmlReturn_t returnValue = nvmlDeviceGetClockInfo(device, type, currSmClock);
return handleReturnValue(returnValue);
}
int getCurrMEMClockInfo(nvmlDevice_t device, unsigned int *currMemClock){
nvmlClockType_t type = NVML_CLOCK_MEM;
nvmlReturn_t returnValue = nvmlDeviceGetClockInfo(device, type, currMemClock);
return handleReturnValue(returnValue);
}
int getMaxSMClockInfo(nvmlDevice_t device, unsigned int *maxSmClock){
nvmlClockType_t type = NVML_CLOCK_SM;
nvmlReturn_t returnValue = nvmlDeviceGetMaxClockInfo(device, type, maxSmClock);
return handleReturnValue(returnValue);
}
int getMaxMEMClockInfo(nvmlDevice_t device, unsigned int *maxMemClock){
nvmlClockType_t type = NVML_CLOCK_MEM;
nvmlReturn_t returnValue = nvmlDeviceGetMaxClockInfo(device, type, maxMemClock);
return handleReturnValue(returnValue);
}
int getSMApplicationsClock(nvmlDevice_t device, unsigned int *smclockMHz){
nvmlClockType_t type = NVML_CLOCK_SM;
nvmlReturn_t returnValue = nvmlDeviceGetApplicationsClock(device, type, smclockMHz);
return handleReturnValue(returnValue);
}
int GetMEMApplicationsClock(nvmlDevice_t device, unsigned int *memclockMHz){
nvmlClockType_t type = NVML_CLOCK_MEM;
nvmlReturn_t returnValue = nvmlDeviceGetApplicationsClock(device, type, memclockMHz);
return handleReturnValue(returnValue);
}
int getCurrSMClock(nvmlDevice_t device, unsigned int *currsmclo){
nvmlClockType_t type = NVML_CLOCK_SM;
nvmlClockId_t clockId = NVML_CLOCK_ID_CURRENT;
nvmlReturn_t returnValue = nvmlDeviceGetClock(device, type, clockId, currsmclo) == NVML_SUCCESS);
return handleReturnValue(returnValue);
}
int getCurrMEMClock(nvmlDevice_t device, unsigned int *currmemclo){
nvmlClockType_t type = NVML_CLOCK_MEM;
nvmlClockId_t clockId = NVML_CLOCK_ID_CURRENT;
nvmlReturn_t returnValue = nvmlDeviceGetClock(device, type, clockId, currmemclo);
return handleReturnValue(returnValue);
}
int getAppSMClock(nvmlDevice_t device, unsigned int *appsmclo){
nvmlClockType_t type = NVML_CLOCK_SM;
nvmlClockId_t clockId = NVML_CLOCK_ID_APP_CLOCK_TARGET;
nvmlReturn_t returnValue = nvmlDeviceGetClock(device, type, clockId, appsmclo);
}
int getAppMEMClock(nvmlDevice_t device, unsigned int *appmemclo){
nvmlClockType_t type = NVML_CLOCK_MEM;
nvmlClockId_t clockId = NVML_CLOCK_ID_APP_CLOCK_TARGET;
nvmlReturn_t returnValue = nvmlDeviceGetClock(device, type, clockId, appmemclo};
return handleReturnValue(returnValue);
}
int getDefSMClock(nvmlDevice_t device, unsigned int *defsmclo){
nvmlClockType_t type = NVML_CLOCK_SM;
nvmlClockId_t clockId = NVML_CLOCK_ID_APP_CLOCK_DEFAULT;
nvmlReturn_t returnValue = nvmlDeviceGetClock(device, type, clockId, defsmclo);
return handleReturnValue(returnValue);
}
int getDefMEMClock(nvmlDevice_t device, unsigned int *defmemclo){
nvmlClockType_t type = NVML_CLOCK_MEM;
vmlClockId_t clockId = NVML_CLOCK_ID_APP_CLOCK_DEFAULT;
nvmlReturn_t returnValue = nvmlDeviceGetClock(device, type, clockId, defmemclo);
return handleReturnValue(returnValue);
}
int getTemperature(nvmlDevice_t device, unsigned int *temp){
nvmlTemperatureSensors_t sensorType = NVML_TEMPERATURE_GPU;
nvmlReturn_t returnValue = nvmlDeviceGetTemperature(device, type, temp);
return handleReturnValue(returnValue);
}
//not support
/*
int getFanSpeed(nvmlDevice_t device, unsigned int *speed){
nvmlReturn_t returnv = nvmlDeviceGetFanSpeed(device, speed);
return handleReturnValue(returnv);
}
*/
int getPowerUsage(nvmlDevice_t device, unsigned int *power){
nvmlReturn_t returnv = nvmlDeviceGetPowerUsage(device, power);
return handleReturnValue(returnv);
}
//For volta or newer
/*
int getTotalEnergyConsumption(nvmlDevice_t device, unsigned long long *energy){
nvmlReturn_t returnValue = nvmlDeviceGetTotalEnergyConsumption(device, energy);
return handleReturnValue(returnValue);
}
*/
int getPerformanceState(nvmlDevice_t device, nvmlPstates_t *perState, int perforState){
nvmlReturn_t returnValue = nvmlDeviceGetPerformanceState(device, perState);
if(returnValue == NVML_SUCCESS){
perforState = handlePState(perState);
return 0;
} else {
return handleReturnValue(returnValue);
}
}
int getPowerState(nvmlDevice_t device, nvmlPstates_t *powState, int powerState){
nvmlReturn_t returnValue = nvmlDeviceGetPowerState(device, powState);
if(returnValue == NVML_SUCCESS){
powerState = handlePState(powState);
return 0;
} else {
return handleReturnValue(returnValue);
}
}
int getMemoryInfo(nvmlDevice_t device, nvmlMemory_t *memory){
nvmlReturn_t returnValue = nvmlDeviceGetMemoryInfo(device, memory);
return handleReturnValue(returnValue);
}
int getTotalVolatileSingleBitEccErrors(nvmlDevice_t device, unsigned long long *eccCounts){
//NVML_VOLATILE_ECC = 0
//NVML_AGGREGATE_ECC = 1
nvmlEccCounterType_t counterType = NVML_VOLATILE_ECC;
nvmlMemoryErrorType_t errorType = NVML_MEMORY_ERROR_TYPE_CORRECTED;
nvmlReturn_t returnValue = nvmlDeviceGetTotalEccErrors(device, errorType, counterType, eccCounts);
return handleReturnValue(returnValue);
}
int getTotalAggregateSingleBitEccErrors(nvmlDevice_t device, unsigned long long *eccCounts){
//NVML_VOLATILE_ECC = 0
//NVML_AGGREGATE_ECC = 1
nvmlEccCounterType_t counterType = NVML_AGGREGATE_ECC;
nvmlMemoryErrorType_t errorType = NVML_MEMORY_ERROR_TYPE_CORRECTED;
nvmlReturn_t returnValue = nvmlDeviceGetTotalEccErrors(device, errorType, counterType, eccCounts);
return handleReturnValue(returnValue);
}
int getTotalVolatileDoubleBitEccErrors(nvmlDevice_t device, unsigned long long *eccCounts){
//NVML_VOLATILE_ECC = 0
//NVML_AGGREGATE_ECC = 1
nvmlEccCounterType_t counterType = NVML_VOLATILE_ECC;
nvmlMemoryErrorType_t errorType = NVML_MEMORY_ERROR_TYPE_UNCORRECTED;
nvmlReturn_t returnValue = nvmlDeviceGetTotalEccErrors(device, errorType, counterType, eccCounts);
return handleReturnValue(returnValue);
}
int getTotalAggregateDoubleBitEccErrors(nvmlDevice_t device, unsigned long long *eccCounts){
//NVML_VOLATILE_ECC = 0
//NVML_AGGREGATE_ECC = 1
nvmlEccCounterType_t counterType = NVML_AGGREGATE_ECC;
nvmlMemoryErrorType_t errorType = NVML_MEMORY_ERROR_TYPE_UNCORRECTED;
nvmlReturn_t returnValue = nvmlDeviceGetTotalEccErrors(device, errorType, counterType, eccCounts);
return handleReturnValue(returnValue);
}
int getMemoryVolatileSingleBitErrorCounterL1Cache(nvmlDevice_t device, unsigned long long *count){
//NVML_MEMORY_ERROR_TYPE_CORRECTED = 0
//NVML_MEMORY_ERROR_TYPE_UNCORRECTED = 1
nvmlMemoryErrorType_t errorType = NVML_MEMORY_ERROR_TYPE_CORRECTED;
//NVML_VOLATILE_ECC = 0
//NVML_AGGREGATE_ECC = 1
nvmlEccCounterType_t counterType = NVML_VOLATILE_ECC;
nvmlMemoryLocation_t locationType = NVML_MEMORY_LOCATION_L1_CACHE;
nvmlReturn_t returnValue = nvmlDeviceGetMemoryErrorCounter(device, errorType, counterType, locationType, count);
return handleReturnValue(returnValue);
}
int getMemoryAggregateSingleBitErrorCounterL1Cache(nvmlDevice_t device, unsigned long long *count){
//NVML_MEMORY_ERROR_TYPE_CORRECTED = 0
//NVML_MEMORY_ERROR_TYPE_UNCORRECTED = 1
nvmlMemoryErrorType_t errorType = NVML_MEMORY_ERROR_TYPE_CORRECTED;
//NVML_VOLATILE_ECC = 0
//NVML_AGGREGATE_ECC = 1
nvmlEccCounterType_t counterType = NVML_AGGREGATE_ECC;
nvmlMemoryLocation_t locationType = NVML_MEMORY_LOCATION_L1_CACHE;
nvmlReturn_t returnValue = nvmlDeviceGetMemoryErrorCounter(device, errorType, counterType, locationType, count);
return handleReturnValue(returnValue);
}
int getMemoryVolatileDoubleBitErrorCounterL1Cache(nvmlDevice_t device, unsigned long long *count){
//NVML_MEMORY_ERROR_TYPE_CORRECTED = 0
//NVML_MEMORY_ERROR_TYPE_UNCORRECTED = 1
nvmlMemoryErrorType_t errorType = NVML_MEMORY_ERROR_TYPE_UNCORRECTED;
//NVML_VOLATILE_ECC = 0
//NVML_AGGREGATE_ECC = 1
nvmlEccCounterType_t counterType = NVML_VOLATILE_ECC;
nvmlMemoryLocation_t locationType = NVML_MEMORY_LOCATION_L1_CACHE;
nvmlReturn_t returnValue = nvmlDeviceGetMemoryErrorCounter(device, errorType, counterType, locationType, count);
return handleReturnValue(returnValue);
}
int getMemoryAggregateDoubleBitErrorCounterL1Cache(nvmlDevice_t device, unsigned long long *count){
//NVML_MEMORY_ERROR_TYPE_CORRECTED = 0
//NVML_MEMORY_ERROR_TYPE_UNCORRECTED = 1
nvmlMemoryErrorType_t errorType = NVML_MEMORY_ERROR_TYPE_UNCORRECTED;
//NVML_VOLATILE_ECC = 0
//NVML_AGGREGATE_ECC = 1
nvmlEccCounterType_t counterType = NVML_AGGREGATE_ECC;
nvmlMemoryLocation_t locationType = NVML_MEMORY_LOCATION_L1_CACHE;
nvmlReturn_t returnValue = nvmlDeviceGetMemoryErrorCounter(device, errorType, counterType, locationType, count);
return handleReturnValue(returnValue);
}
int getMemoryVolatileSingleBitErrorCounterL2Cache(nvmlDevice_t device, unsigned long long *count){
//NVML_MEMORY_ERROR_TYPE_CORRECTED = 0
//NVML_MEMORY_ERROR_TYPE_UNCORRECTED = 1
nvmlMemoryErrorType_t errorType = NVML_MEMORY_ERROR_TYPE_CORRECTED;
//NVML_VOLATILE_ECC = 0
//NVML_AGGREGATE_ECC = 1
nvmlEccCounterType_t counterType = NVML_VOLATILE_ECC;
nvmlMemoryLocation_t locationType = NVML_MEMORY_LOCATION_L2_CACHE;
nvmlReturn_t returnValue = nvmlDeviceGetMemoryErrorCounter(device, errorType, counterType, locationType, count);
return handleReturnValue(returnValue);
}
int getMemoryAggregateSingleBitErrorCounterL2Cache(nvmlDevice_t device, unsigned long long *count){
//NVML_MEMORY_ERROR_TYPE_CORRECTED = 0
//NVML_MEMORY_ERROR_TYPE_UNCORRECTED = 1
nvmlMemoryErrorType_t errorType = NVML_MEMORY_ERROR_TYPE_CORRECTED;
//NVML_VOLATILE_ECC = 0
//NVML_AGGREGATE_ECC = 1
nvmlEccCounterType_t counterType = NVML_AGGREGATE_ECC;
nvmlMemoryLocation_t locationType = NVML_MEMORY_LOCATION_L2_CACHE;
nvmlReturn_t returnValue = nvmlDeviceGetMemoryErrorCounter(device, errorType, counterType, locationType, count);
return handleReturnValue(returnValue);
}
int getMemoryVolatileDoubleBitErrorCounterL2Cache(nvmlDevice_t device, unsigned long long *count){
//NVML_MEMORY_ERROR_TYPE_CORRECTED = 0
//NVML_MEMORY_ERROR_TYPE_UNCORRECTED = 1
nvmlMemoryErrorType_t errorType = NVML_MEMORY_ERROR_TYPE_UNCORRECTED;
//NVML_VOLATILE_ECC = 0
//NVML_AGGREGATE_ECC = 1
nvmlEccCounterType_t counterType = NVML_VOLATILE_ECC;
nvmlMemoryLocation_t locationType = NVML_MEMORY_LOCATION_L2_CACHE;
nvmlReturn_t returnValue = nvmlDeviceGetMemoryErrorCounter(device, errorType, counterType, locationType, count);
return handleReturnValue(returnValue);
}
int getMemoryAggregateDoubleBitErrorCounterL2Cache(nvmlDevice_t device, unsigned long long *count){
//NVML_MEMORY_ERROR_TYPE_CORRECTED = 0
//NVML_MEMORY_ERROR_TYPE_UNCORRECTED = 1
nvmlMemoryErrorType_t errorType = NVML_MEMORY_ERROR_TYPE_UNCORRECTED;
//NVML_VOLATILE_ECC = 0
//NVML_AGGREGATE_ECC = 1
nvmlEccCounterType_t counterType = NVML_AGGREGATE_ECC;
nvmlMemoryLocation_t locationType = NVML_MEMORY_LOCATION_L2_CACHE;
nvmlReturn_t returnValue = nvmlDeviceGetMemoryErrorCounter(device, errorType, counterType, locationType, count);
return handleReturnValue(returnValue);
}
int getMemoryVolatileSingleBitErrorCounterDeviceMemory(nvmlDevice_t device, unsigned long long *count){
//NVML_MEMORY_ERROR_TYPE_CORRECTED = 0
//NVML_MEMORY_ERROR_TYPE_UNCORRECTED = 1
nvmlMemoryErrorType_t errorType = NVML_MEMORY_ERROR_TYPE_CORRECTED;
//NVML_VOLATILE_ECC = 0
//NVML_AGGREGATE_ECC = 1
nvmlEccCounterType_t counterType = NVML_VOLATILE_ECC;
nvmlMemoryLocation_t locationType = NVML_MEMORY_LOCATION_DEVICE_MEMORY;
nvmlReturn_t returnValue = nvmlDeviceGetMemoryErrorCounter(device, errorType, counterType, locationType, count);
return handleReturnValue(returnValue);
}
int getMemoryAggregateSingleBitErrorCounterDeviceMemory(nvmlDevice_t device, unsigned long long *count){
//NVML_MEMORY_ERROR_TYPE_CORRECTED = 0
//NVML_MEMORY_ERROR_TYPE_UNCORRECTED = 1
nvmlMemoryErrorType_t errorType = NVML_MEMORY_ERROR_TYPE_CORRECTED;
//NVML_VOLATILE_ECC = 0
//NVML_AGGREGATE_ECC = 1
nvmlEccCounterType_t counterType = NVML_AGGREGATE_ECC;
nvmlMemoryLocation_t locationType = NVML_MEMORY_LOCATION_DEVICE_MEMORY;
nvmlReturn_t returnValue = nvmlDeviceGetMemoryErrorCounter(device, errorType, counterType, locationType, count);
return handleReturnValue(returnValue);
}
int getMemoryVolatileDoubleBitErrorCounterDeviceMemory(nvmlDevice_t device, unsigned long long *count){
//NVML_MEMORY_ERROR_TYPE_CORRECTED = 0
//NVML_MEMORY_ERROR_TYPE_UNCORRECTED = 1
nvmlMemoryErrorType_t errorType = NVML_MEMORY_ERROR_TYPE_UNCORRECTED;
//NVML_VOLATILE_ECC = 0
//NVML_AGGREGATE_ECC = 1
nvmlEccCounterType_t counterType = NVML_VOLATILE_ECC;
nvmlMemoryLocation_t locationType = NVML_MEMORY_LOCATION_DEVICE_MEMORY;
nvmlReturn_t returnValue = nvmlDeviceGetMemoryErrorCounter(device, errorType, counterType, locationType, count);
return handleReturnValue(returnValue);
}
int getMemoryAggregateDoubleBitErrorCounterDeviceMemory(nvmlDevice_t device, unsigned long long *count){
//NVML_MEMORY_ERROR_TYPE_CORRECTED = 0
//NVML_MEMORY_ERROR_TYPE_UNCORRECTED = 1
nvmlMemoryErrorType_t errorType = NVML_MEMORY_ERROR_TYPE_UNCORRECTED;
//NVML_VOLATILE_ECC = 0
//NVML_AGGREGATE_ECC = 1
nvmlEccCounterType_t counterType = NVML_AGGREGATE_ECC;
nvmlMemoryLocation_t locationType = NVML_MEMORY_LOCATION_DEVICE_MEMORY;
nvmlReturn_t returnValue = nvmlDeviceGetMemoryErrorCounter(device, errorType, counterType, locationType, count);
return handleReturnValue(returnValue);
}
int getMemoryVolatileSingleBitErrorCounterRegisterFile(nvmlDevice_t device, unsigned long long *count){
//NVML_MEMORY_ERROR_TYPE_CORRECTED = 0
//NVML_MEMORY_ERROR_TYPE_UNCORRECTED = 1
nvmlMemoryErrorType_t errorType = NVML_MEMORY_ERROR_TYPE_CORRECTED;
//NVML_VOLATILE_ECC = 0
//NVML_AGGREGATE_ECC = 1
nvmlEccCounterType_t counterType = NVML_VOLATILE_ECC;
nvmlMemoryLocation_t locationType = NVML_MEMORY_LOCATION_REGISTER_FILE;
nvmlReturn_t returnValue = nvmlDeviceGetMemoryErrorCounter(device, errorType, counterType, locationType, count);
return handleReturnValue(returnValue);
}
int getMemoryAggregateSingleBitErrorCounterRegisterFile(nvmlDevice_t device, unsigned long long *count){
//NVML_MEMORY_ERROR_TYPE_CORRECTED = 0
//NVML_MEMORY_ERROR_TYPE_UNCORRECTED = 1
nvmlMemoryErrorType_t errorType = NVML_MEMORY_ERROR_TYPE_CORRECTED;
//NVML_VOLATILE_ECC = 0
//NVML_AGGREGATE_ECC = 1
nvmlEccCounterType_t counterType = NVML_AGGREGATE_ECC;
nvmlMemoryLocation_t locationType = NVML_MEMORY_LOCATION_REGISTER_FILE;
nvmlReturn_t returnValue = nvmlDeviceGetMemoryErrorCounter(device, errorType, counterType, locationType, count);
return handleReturnValue(returnValue);
}
int getMemoryVolatileDoubleBitErrorCounterRegisterFile(nvmlDevice_t device, unsigned long long *count){
//NVML_MEMORY_ERROR_TYPE_CORRECTED = 0
//NVML_MEMORY_ERROR_TYPE_UNCORRECTED = 1
nvmlMemoryErrorType_t errorType = NVML_MEMORY_ERROR_TYPE_UNCORRECTED;
//NVML_VOLATILE_ECC = 0
//NVML_AGGREGATE_ECC = 1
nvmlEccCounterType_t counterType = NVML_VOLATILE_ECC;
nvmlMemoryLocation_t locationType = NVML_MEMORY_LOCATION_REGISTER_FILE;
nvmlReturn_t returnValue = nvmlDeviceGetMemoryErrorCounter(device, errorType, counterType, locationType, count);
return handleReturnValue(returnValue);
}
int getMemoryAggregateSDoubleBitErrorCounterRegisterFile(nvmlDevice_t device, unsigned long long *count){
//NVML_MEMORY_ERROR_TYPE_CORRECTED = 0
//NVML_MEMORY_ERROR_TYPE_UNCORRECTED = 1
nvmlMemoryErrorType_t errorType = NVML_MEMORY_ERROR_TYPE_UNCORRECTED;
//NVML_VOLATILE_ECC = 0
//NVML_AGGREGATE_ECC = 1
nvmlEccCounterType_t counterType = NVML_AGGREGATE_ECC;
nvmlMemoryLocation_t locationType = NVML_MEMORY_LOCATION_REGISTER_FILE;
nvmlReturn_t returnValue = nvmlDeviceGetMemoryErrorCounter(device, errorType, counterType, locationType, count);
return handleReturnValue(returnValue);
}
int getMemoryVolatileSingleBitErrorCounterTextureMemory(nvmlDevice_t device, unsigned long long *count){
//NVML_MEMORY_ERROR_TYPE_CORRECTED = 0
//NVML_MEMORY_ERROR_TYPE_UNCORRECTED = 1
nvmlMemoryErrorType_t errorType = NVML_MEMORY_ERROR_TYPE_CORRECTED;
//NVML_VOLATILE_ECC = 0
//NVML_AGGREGATE_ECC = 1
nvmlEccCounterType_t counterType = NVML_VOLATILE_ECC;
nvmlMemoryLocation_t locationType = NVML_MEMORY_LOCATION_TEXTURE_MEMORY;
nvmlReturn_t returnValue = nvmlDeviceGetMemoryErrorCounter(device, errorType, counterType, locationType, count);
return handleReturnValue(returnValue);
}
int getMemoryAggregateSingleBitErrorCounterTextureMemory(nvmlDevice_t device, unsigned long long *count){
//NVML_MEMORY_ERROR_TYPE_CORRECTED = 0
//NVML_MEMORY_ERROR_TYPE_UNCORRECTED = 1
nvmlMemoryErrorType_t errorType = NVML_MEMORY_ERROR_TYPE_CORRECTED;
//NVML_VOLATILE_ECC = 0
//NVML_AGGREGATE_ECC = 1
nvmlEccCounterType_t counterType = NVML_AGGREGATE_ECC;
nvmlMemoryLocation_t locationType = NVML_MEMORY_LOCATION_TEXTURE_MEMORY;
nvmlReturn_t returnValue = nvmlDeviceGetMemoryErrorCounter(device, errorType, counterType, locationType, count);
return handleReturnValue(returnValue);
}
int getMemoryVolatileDoubleBitErrorCounterTextureMemory(nvmlDevice_t device, unsigned long long *count){
//NVML_MEMORY_ERROR_TYPE_CORRECTED = 0
//NVML_MEMORY_ERROR_TYPE_UNCORRECTED = 1
nvmlMemoryErrorType_t errorType = NVML_MEMORY_ERROR_TYPE_UNCORRECTED;
//NVML_VOLATILE_ECC = 0
//NVML_AGGREGATE_ECC = 1
nvmlEccCounterType_t counterType = NVML_VOLATILE_ECC;
nvmlMemoryLocation_t locationType = NVML_MEMORY_LOCATION_TEXTURE_MEMORY;
nvmlReturn_t returnValue = nvmlDeviceGetMemoryErrorCounter(device, errorType, counterType, locationType, count);
return handleReturnValue(returnValue);
}
int getMemoryAggregateDoubleBitErrorCounterTextureMemory(nvmlDevice_t device, unsigned long long *count){
//NVML_MEMORY_ERROR_TYPE_CORRECTED = 0
//NVML_MEMORY_ERROR_TYPE_UNCORRECTED = 1
nvmlMemoryErrorType_t errorType = NVML_MEMORY_ERROR_TYPE_UNCORRECTED;
//NVML_VOLATILE_ECC = 0
//NVML_AGGREGATE_ECC = 1
nvmlEccCounterType_t counterType = NVML_AGGREGATE_ECC;
nvmlMemoryLocation_t locationType = NVML_MEMORY_LOCATION_TEXTURE_MEMORY;
nvmlReturn_t returnValue = nvmlDeviceGetMemoryErrorCounter(device, errorType, counterType, locationType, count);
return handleReturnValue(returnValue);
}
int getMemoryVolatileSingleBitErrorCounterTextureShm(nvmlDevice_t device, unsigned long long *count){
//NVML_MEMORY_ERROR_TYPE_CORRECTED = 0
//NVML_MEMORY_ERROR_TYPE_UNCORRECTED = 1
nvmlMemoryErrorType_t errorType = NVML_MEMORY_ERROR_TYPE_CORRECTED;
//NVML_VOLATILE_ECC = 0
//NVML_AGGREGATE_ECC = 1
nvmlEccCounterType_t counterType = NVML_VOLATILE_ECC;
nvmlMemoryLocation_t locationType = NVML_MEMORY_LOCATION_TEXTURE_SHM;
nvmlReturn_t returnValue = nvmlDeviceGetMemoryErrorCounter(device, errorType, counterType, locationType, count);
return handleReturnValue(returnValue);
}
int getMemoryAggregateSingleBitErrorCounterTextureShm(nvmlDevice_t device, unsigned long long *count){
//NVML_MEMORY_ERROR_TYPE_CORRECTED = 0
//NVML_MEMORY_ERROR_TYPE_UNCORRECTED = 1
nvmlMemoryErrorType_t errorType = NVML_MEMORY_ERROR_TYPE_CORRECTED;
//NVML_VOLATILE_ECC = 0
//NVML_AGGREGATE_ECC = 1
nvmlEccCounterType_t counterType = NVML_AGGREGATE_ECC;
nvmlMemoryLocation_t locationType = NVML_MEMORY_LOCATION_TEXTURE_SHM;
nvmlReturn_t returnValue = nvmlDeviceGetMemoryErrorCounter(device, errorType, counterType, locationType, count);
return handleReturnValue(returnValue);
}
int getMemoryVolatileDoubleBitErrorCounterTextureShm(nvmlDevice_t device, unsigned long long *count){
//NVML_MEMORY_ERROR_TYPE_CORRECTED = 0
//NVML_MEMORY_ERROR_TYPE_UNCORRECTED = 1
nvmlMemoryErrorType_t errorType = NVML_MEMORY_ERROR_TYPE_UNCORRECTED;
//NVML_VOLATILE_ECC = 0
//NVML_AGGREGATE_ECC = 1
nvmlEccCounterType_t counterType = NVML_VOLATILE_ECC;
nvmlMemoryLocation_t locationType = NVML_MEMORY_LOCATION_TEXTURE_SHM;
nvmlReturn_t returnValue = nvmlDeviceGetMemoryErrorCounter(device, errorType, counterType, locationType, count);
return handleReturnValue(returnValue);
}
int getMemoryAggregateDoubleBitErrorCounterTextureShm(nvmlDevice_t device, unsigned long long *count){
//NVML_MEMORY_ERROR_TYPE_CORRECTED = 0
//NVML_MEMORY_ERROR_TYPE_UNCORRECTED = 1
nvmlMemoryErrorType_t errorType = NVML_MEMORY_ERROR_TYPE_UNCORRECTED;
//NVML_VOLATILE_ECC = 0
//NVML_AGGREGATE_ECC = 1
nvmlEccCounterType_t counterType = NVML_AGGREGATE_ECC;
nvmlMemoryLocation_t locationType = NVML_MEMORY_LOCATION_TEXTURE_SHM;
nvmlReturn_t returnValue = nvmlDeviceGetMemoryErrorCounter(device, errorType, counterType, locationType, count);
return handleReturnValue(returnValue);
}
int getMemoryVolatileSingleBitErrorCounterCBU(nvmlDevice_t device, unsigned long long *count){
//NVML_MEMORY_ERROR_TYPE_CORRECTED = 0
//NVML_MEMORY_ERROR_TYPE_UNCORRECTED = 1
nvmlMemoryErrorType_t errorType = NVML_MEMORY_ERROR_TYPE_CORRECTED;
//NVML_VOLATILE_ECC = 0
//NVML_AGGREGATE_ECC = 1
nvmlEccCounterType_t counterType = NVML_VOLATILE_ECC;
nvmlMemoryLocation_t locationType = NVML_MEMORY_LOCATION_CBU;
nvmlReturn_t returnValue = nvmlDeviceGetMemoryErrorCounter(device, errorType, counterType, locationType, count);
return handleReturnValue(returnValue);
}
int getMemoryAggregateSingleBitErrorCounterCBU(nvmlDevice_t device, unsigned long long *count){
//NVML_MEMORY_ERROR_TYPE_CORRECTED = 0
//NVML_MEMORY_ERROR_TYPE_UNCORRECTED = 1
nvmlMemoryErrorType_t errorType = NVML_MEMORY_ERROR_TYPE_CORRECTED;
//NVML_VOLATILE_ECC = 0
//NVML_AGGREGATE_ECC = 1
nvmlEccCounterType_t counterType = NVML_AGGREGATE_ECC;
nvmlMemoryLocation_t locationType = NVML_MEMORY_LOCATION_CBU;
nvmlReturn_t returnValue = nvmlDeviceGetMemoryErrorCounter(device, errorType, counterType, locationType, count);
return handleReturnValue(returnValue);
}
int getMemoryVolatileDoubleBitErrorCounterCBU(nvmlDevice_t device, unsigned long long *count){
//NVML_MEMORY_ERROR_TYPE_CORRECTED = 0
//NVML_MEMORY_ERROR_TYPE_UNCORRECTED = 1
nvmlMemoryErrorType_t errorType = NVML_MEMORY_ERROR_TYPE_UNCORRECTED;
//NVML_VOLATILE_ECC = 0
//NVML_AGGREGATE_ECC = 1
nvmlEccCounterType_t counterType = NVML_VOLATILE_ECC;
nvmlMemoryLocation_t locationType = NVML_MEMORY_LOCATION_CBU;
nvmlReturn_t returnValue = nvmlDeviceGetMemoryErrorCounter(device, errorType, counterType, locationType, count);
return handleReturnValue(returnValue);
}
int getMemoryAggregateDoubleBitErrorCounterCBU(nvmlDevice_t device, unsigned long long *count){
//NVML_MEMORY_ERROR_TYPE_CORRECTED = 0
//NVML_MEMORY_ERROR_TYPE_UNCORRECTED = 1
nvmlMemoryErrorType_t errorType = NVML_MEMORY_ERROR_TYPE_UNCORRECTED;
//NVML_VOLATILE_ECC = 0
//NVML_AGGREGATE_ECC = 1
nvmlEccCounterType_t counterType = NVML_AGGREGATE_ECC;
nvmlMemoryLocation_t locationType = NVML_MEMORY_LOCATION_CBU;
nvmlReturn_t returnValue = nvmlDeviceGetMemoryErrorCounter(device, errorType, counterType, locationType, count);
return handleReturnValue(returnValue);
}
int getUtilizationRates(nvmlDevice_t device, nvmlUtilization_t *utilization){
nvmlReturn_t returnValue = nvmlDeviceGetUtilizationRates(device, utilization);
return handleReturnValue(returnValue);
}
int getEncoderUtilization(nvmlDevice_t device, unsigned int *utilization, unsigned int *samplingPeriodUs){
nvmlReturn_t returnValue = nvmlDeviceGetEncoderUtilization(device, utilization, samplingPeriodUs);
return handleReturnValue(returnValue);
}
//Maxwell or newer
/*
int GetEncoderCapacity(nvmlDevice_t device, unsigned int *encoderCapacity){
//NVML_ENCODER_QUERY_H264 = 0
//NVML_ENCODER_QUERY_HEVC = 1
nvmlEncoderType_t encoderQueryType = NVML_ENCODER_QUERY_H264;
nvmlReturn_t returnValue = nvmlDeviceGetEncoderUtilization(device, encoderQueryType, encoderCapacity);
return handleReturnValue(returnValue);
}
*/
//Maxwell or newer
/*
int getEncoderStats(nvmlDevice_t device, unsigned int *sessionCount, unsigned int *averageFps, unsigned int *averageLatency){
nvmlReturn_t returnValue = nvmlDeviceGetEncoderStats(device, sessionCount, averageFps, averageLatency);
return handleReturnValue(returnValue);
}
*/
int getDecoderUtilization(nvmlDevice_t device, unsigned int *utilization, unsigned int *samplingPeriodUs){
nvmlReturn_t returnValue = nvmlDeviceGetDecoderUtilization(device, utilization, samplingPeriodUs);
return handleReturnValue(returnValue);
}
//Maxwell or newer
/*
int getFBCStats(nvmlDevice_t device, nvmlFBCStats_t *fbcStats){
nvmlReturn_t returnValue = nvmlDeviceGetFBCStats(device, fbcStats);
return handleReturnValue(returnValue);
}
*/
int getComputeRunningProcesses(nvmlDevice_t device, nvmlProcessInfo_t *infos){
unsigned int infoCount = 1;
nvmlReturn_t returnValue = nvmlDeviceGetComputeRunningProcesses(device, &infoCount, infos);
return handleReturnValue(returnValue);
}
int getTotalPowerSamples(nvmlDevice_t device, unsigned long long lastSeenTimeStamp, nvmlSample_t *samples){
nvmlSamplingType_t type = NVML_TOTAL_POWER_SAMPLES;
nvmlValueType_t sampleValType;
unsigned int sampleCount = 1;
nvmlReturn_t returnValue = nvmlDeviceGetSamples(device, type, lastSeenTimeStamp, &sampleValType, &sampleCount, samples);
return handleReturnValue(returnValue);
}
int getGpuUtilizationSamples(nvmlDevice_t device, unsigned long long lastSeenTimeStamp, nvmlSample_t *samples){
nvmlSamplingType_t type = NVML_GPU_UTILIZATION_SAMPLES;
nvmlValueType_t sampleValType;
unsigned int sampleCount = 1;
nvmlReturn_t returnValue = nvmlDeviceGetSamples(device, type, lastSeenTimeStamp, &sampleValType, &sampleCount, samples);
return handleReturnValue(returnValue);
}
int getMemoryUtilizationSamples(nvmlDevice_t device, unsigned long long lastSeenTimeStamp, nvmlSample_t *samples){
nvmlSamplingType_t type = NVML_MEMORY_UTILIZATION_SAMPLES;
nvmlValueType_t sampleValType;
unsigned int sampleCount = 1;
nvmlReturn_t returnValue = nvmlDeviceGetSamples(device, type, lastSeenTimeStamp, &sampleValType, &sampleCount, samples);
return handleReturnValue(returnValue);
}
int getENCUtilizationSamples(nvmlDevice_t device, unsigned long long lastSeenTimeStamp, nvmlSample_t *samples){
nvmlSamplingType_t type = NVML_ENC_UTILIZATION_SAMPLES;
nvmlValueType_t sampleValType;
unsigned int sampleCount = 1;
nvmlReturn_t returnValue = nvmlDeviceGetSamples(device, type, lastSeenTimeStamp, &sampleValType, &sampleCount, samples);
return handleReturnValue(returnValue);
}
int getDECUtilizationSamples(nvmlDevice_t device, unsigned long long lastSeenTimeStamp, nvmlSample_t *samples){
nvmlSamplingType_t type = NVML_DEC_UTILIZATION_SAMPLES;
nvmlValueType_t sampleValType;
unsigned int sampleCount = 1;
nvmlReturn_t returnValue = nvmlDeviceGetSamples(device, type, lastSeenTimeStamp, &sampleValType, &sampleCount, samples);
return handleReturnValue(returnValue);
}
int getProcessorCLKSamples(nvmlDevice_t device, unsigned long long lastSeenTimeStamp, nvmlSample_t *samples){
nvmlSamplingType_t type = NVML_PROCESSOR_CLK_SAMPLES;
nvmlValueType_t sampleValType;
unsigned int sampleCount = 1;
nvmlReturn_t returnValue = nvmlDeviceGetSamples(device, type, lastSeenTimeStamp, &sampleValType, &sampleCount, samples);
return handleReturnValue(returnValue);
}
int getMemoryCLKSamples(nvmlDevice_t device, unsigned long long lastSeenTimeStamp, nvmlSample_t *samples){
nvmlSamplingType_t type = NVML_MEMORY_CLK_SAMPLES;
nvmlValueType_t sampleValType;
unsigned int sampleCount = 1;
nvmlReturn_t returnValue = nvmlDeviceGetSamples(device, type, lastSeenTimeStamp, &sampleValType, &sampleCount, samples);
return handleReturnValue(returnValue);
}
int getBAR1MemoryInfo(nvmlDevice_t device, nvmlBAR1Memory_t *bar1Memory){
nvmlReturn_t returnValue = nvmlDeviceGetBAR1MemoryInfo(device, bar1Memory);
return handleReturnValue(returnValue);
}
int getViolationStatus(nvmlDevice_t device, nvmlPerfPolicyType_t perfPolicyType, nvmlViolationTime_t *violTime){
nvmlReturn_t returnValue = nvmlDeviceGetViolationStatus(device, perfPolicyType, violTime);
return handleReturnValue(returnValue);
}
int getRetiredPagesSingleBitError(nvmlDevice_t device, unsigned int *pageCount){
nvmlPageRetirementCause_t cause = NVML_PAGE_RETIREMENT_CAUSE_MULTIPLE_SINGLE_BIT_ECC_ERRORS;
unsigned long long address;
nvmlReturn_t returnValue = nvmlDeviceGetRetiredPages(device, cause, pageCount, &address);
return handleReturnValue(returnValue);
}
int getRetiredPagesDoubleBitError(nvmlDevice_t device, unsigned int *pageCount){
nvmlPageRetirementCause_t cause = NVML_PAGE_RETIREMENT_CAUSE_DOUBLE_BIT_ECC_ERROR;
unsigned long long address;
nvmlReturn_t returnValue = nvmlDeviceGetRetiredPages(device, cause, pageCount, &address);
return handleReturnValue(returnValue);
}
int getFieldValues(nvmlDevice_t device, int valuesCount, nvmlFieldValue_t *values){
}
//Pascal or newer
/*
int getNvLinkErrorCounterReplay(nvmlDevice_t device, unsigned int link, unsigned long long *counterValue){
nvmlNvLinkErrorCounter_t counter = NVML_NVLINK_ERROR_DL_REPLAY;
nvmlReturn_t returnValue = nvmlDeviceGetNvLinkErrorCounter(device, link, counter, counterValue);
return handleReturnValue(returnValue);
}
int getNvLinkErrorCounterRecovery(nvmlDevice_t device, unsigned int link, unsigned long long *counterValue){
nvmlNvLinkErrorCounter_t counter = NVML_NVLINK_ERROR_DL_RECOVERY;
nvmlReturn_t returnValue = nvmlDeviceGetNvLinkErrorCounter(device, link, counter, counterValue);
return handleReturnValue(returnValue);
}
int getNvLinkErrorCounterCRCFlit(nvmlDevice_t device, unsigned int link, unsigned long long *counterValue){
nvmlNvLinkErrorCounter_t counter = NVML_NVLINK_ERROR_DL_CRC_FLIT;
nvmlReturn_t returnValue = nvmlDeviceGetNvLinkErrorCounter(device, link, counter, counterValue);
return handleReturnValue(returnValue);
}
int getNvLinkErrorCounterCRCData(nvmlDevice_t device, unsigned int link, unsigned long long *counterValue){
nvmlNvLinkErrorCounter_t counter = NVML_NVLINK_ERROR_DL_CRC_DATA;
nvmlReturn_t returnValue = nvmlDeviceGetNvLinkErrorCounter(device, link, counter, counterValue);
return handleReturnValue(returnValue);
}
int getNvLinkUtilizationCounter(nvmlDevice_t device, unsigned int link, unsigned int counter, unsigned long long *rxcounter, unsigned long long *txcounter){
nvmlReturn_t returnValue = nvmlDeviceGetNvLinkUtilizationCounter(device, link, counter, rxcounter, txcounter);
return handleReturnValue(returnValue);
}
*/
/*
int getSupportedVgpus(nvmlDevice_t device, unsigned int *vgpuCount, nvmlVgpuTypeId_t *vgpuTypeIds){
nvmlReturn_t returnValue = nvmlDeviceGetSupportedVgpus(device, vgpuCount, vgpuTypeIds);
return handleReturnValue(returnValue);
}
int getActiveVgpus(nvmlDevice_t device, unsigned int *vgpuCount, nvmlVgpuInstance_t *vgpuInstances){
nvmlReturn_t returnValue = nvmlDeviceGetActiveVgpus (device, vgpuCount, vgpuInstances);
return handleReturnValue(returnValue);
}
int vgpuInstanceGetEncoderCapacity(nvmlVgpuInstance_t vgpuInstance, unsigned int *encoderCapacity){
nvmlReturn_t returnValue = nvmlVgpuInstanceGetEncoderCapacity(vgpuInstance, encoderCapacity);
return handleReturnValue(returnValue);
}
int getVgpuUtilization(nvmlDevice_t device, unsigned long long lastSeenTimeStamp, nvmlValueType_t *sampleValType, unsigned int *vgpuInstanceSamplesCount, nvmlVgpuInstanceUtilizationSample_t *utilizationSamples){
nvmlReturn_t returnValue = nvmlDeviceGetVgpuUtilization(device, lastSeenTimeStamp, sampleValType, vgpuInstanceSamplesCount, utilizationSamples);
return handleReturnValue(returnValue);
}
int getVgpuProcessUtilization(nvmlDevice_t device, unsigned long long lastSeenTimeStamp, unsigned int *vgpuProcessSamplesCount, nvmlVgpuProcessUtilizationSample_t *utilizationSamples){
nvmlReturn_t returnValue = nvmlDeviceGetVgpuProcessUtilization(device, lastSeenTimeStamp, vgpuProcessSamplesCount, utilizationSamples);
return handleReturnValue(returnValue);
}
int vgpuInstanceGetEncoderStats(nvmlVgpuInstance_t vgpuInstance, unsigned int *sessionCount, unsigned int *averageFps, unsigned int *averageLatency){
nvmlReturn_t returnValue = nvmlVgpuInstanceGetEncoderStats(vgpuInstance, sessionCount, averageFps, averageLatency);
return handleReturnValue(returnValue);
}
int vgpuInstanceGetFBCStats(nvmlVgpuInstance_t vgpuInstance, nvmlFBCStats_t *fbcStats){
nvmlReturn_t returnValue = nvmlVgpuInstanceGetFBCStats(vgpuInstance, fbcStats);
return handleReturnValue(returnValue);
}
*/
//Maxwell or newer
/*
int getProcessUtilization(nvmlDevice_t device, nvmlProcessUtilizationSample_t *utilization, unsigned int *processSamplesCount, unsigned long long lastSeenTimeStamp){
nvmlReturn_t returnValue = nvmlDeviceGetProcessUtilization(device, utilization, processSamplesCount, lastSeenTimeStamp);
return handleReturnValue(returnValue);
}
*/
|
d8061e2569c5e17a8b345b1792ea80f25cdd9cbd.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <vector>
#include <cstdio>
#include <exception>
/** macro to throw a runtime error */
#define THROW(fmt, ...) \
do { \
std::string msg; \
char errMsg[2048]; \
std::sprintf(errMsg, "Exception occured! file=%s line=%d: ", __FILE__, \
__LINE__); \
msg += errMsg; \
std::sprintf(errMsg, fmt, ##__VA_ARGS__); \
msg += errMsg; \
throw std::runtime_error(msg); \
} while (0)
/** macro to check for a conditional and assert on failure */
#define ASSERT(check, fmt, ...) \
do { \
if (!(check)) THROW(fmt, ##__VA_ARGS__); \
} while (0)
/** check for cuda runtime API errors and assert accordingly */
#define CUDA_CHECK(call) \
do { \
hipError_t status = call; \
ASSERT(status == hipSuccess, "FAIL: call='%s'. Reason:%s\n", #call, \
hipGetErrorString(status)); \
} while (0)
__global__ void init_xy(double* x, double* y, double a, double b, int N) {
const int tid = threadIdx.x;
const int bid = blockIdx.x;
const int idx = bid*blockDim.x + tid;
if(idx < N) {
x[idx] = a;
y[idx] = b;
}
}
__global__ void saxpy(const double* x, const double* y, const double a, const int N, double* z) {
const int tid = threadIdx.x;
const int bid = blockIdx.x;
const int idx = bid*blockDim.x + tid;
if(idx < N) {
z[idx] = a*x[idx] + y[idx];
// printf("z[%d]=%f * %f + %f = %f\n", idx, a, x[idx], y[idx], z[idx]);
}
}
int checkResults(double* d_z, double a, const int N) {
std::vector<double> h_z(N);
CUDA_CHECK(hipMemcpy(h_z.data(), d_z, sizeof(double)*N, hipMemcpyDeviceToHost));
// check
int i=0;
for(auto zi : h_z) {
if(zi != a*1 + 2) {
std::printf("ERROR: z[%d]=%f != %fn", i, zi, a*1 + 2);
return -1;
}
i++;
}
return 0;
}
int main() {
const int N = 100;
double* d_x;
double* d_y;
double* d_z;
CUDA_CHECK(hipMalloc(&d_x, sizeof(double)*N));
CUDA_CHECK(hipMalloc(&d_y, sizeof(double)*N));
CUDA_CHECK(hipMalloc(&d_z, sizeof(double)*N));
const int threads_per_block = 64;
const int num_blocks = ceil((double)N/threads_per_block);
// printf("Num blocks=%d\n", num_blocks);
hipLaunchKernelGGL(( init_xy), dim3(num_blocks), dim3(threads_per_block), 0, 0, d_x, d_y, 1.0, 2.0, N);
CUDA_CHECK(hipGetLastError());
double a = 42.0;
hipLaunchKernelGGL(( saxpy), dim3(num_blocks), dim3(threads_per_block), 0, 0, d_x, d_y, a, N, d_z);
CUDA_CHECK(hipGetLastError());
return checkResults(d_z, a, N);
}
| d8061e2569c5e17a8b345b1792ea80f25cdd9cbd.cu | #include <iostream>
#include <vector>
#include <cstdio>
#include <exception>
/** macro to throw a runtime error */
#define THROW(fmt, ...) \
do { \
std::string msg; \
char errMsg[2048]; \
std::sprintf(errMsg, "Exception occured! file=%s line=%d: ", __FILE__, \
__LINE__); \
msg += errMsg; \
std::sprintf(errMsg, fmt, ##__VA_ARGS__); \
msg += errMsg; \
throw std::runtime_error(msg); \
} while (0)
/** macro to check for a conditional and assert on failure */
#define ASSERT(check, fmt, ...) \
do { \
if (!(check)) THROW(fmt, ##__VA_ARGS__); \
} while (0)
/** check for cuda runtime API errors and assert accordingly */
#define CUDA_CHECK(call) \
do { \
cudaError_t status = call; \
ASSERT(status == cudaSuccess, "FAIL: call='%s'. Reason:%s\n", #call, \
cudaGetErrorString(status)); \
} while (0)
__global__ void init_xy(double* x, double* y, double a, double b, int N) {
const int tid = threadIdx.x;
const int bid = blockIdx.x;
const int idx = bid*blockDim.x + tid;
if(idx < N) {
x[idx] = a;
y[idx] = b;
}
}
__global__ void saxpy(const double* x, const double* y, const double a, const int N, double* z) {
const int tid = threadIdx.x;
const int bid = blockIdx.x;
const int idx = bid*blockDim.x + tid;
if(idx < N) {
z[idx] = a*x[idx] + y[idx];
// printf("z[%d]=%f * %f + %f = %f\n", idx, a, x[idx], y[idx], z[idx]);
}
}
int checkResults(double* d_z, double a, const int N) {
std::vector<double> h_z(N);
CUDA_CHECK(cudaMemcpy(h_z.data(), d_z, sizeof(double)*N, cudaMemcpyDeviceToHost));
// check
int i=0;
for(auto zi : h_z) {
if(zi != a*1 + 2) {
std::printf("ERROR: z[%d]=%f != %fn", i, zi, a*1 + 2);
return -1;
}
i++;
}
return 0;
}
int main() {
const int N = 100;
double* d_x;
double* d_y;
double* d_z;
CUDA_CHECK(cudaMalloc(&d_x, sizeof(double)*N));
CUDA_CHECK(cudaMalloc(&d_y, sizeof(double)*N));
CUDA_CHECK(cudaMalloc(&d_z, sizeof(double)*N));
const int threads_per_block = 64;
const int num_blocks = ceil((double)N/threads_per_block);
// printf("Num blocks=%d\n", num_blocks);
init_xy<<<num_blocks, threads_per_block>>>(d_x, d_y, 1.0, 2.0, N);
CUDA_CHECK(cudaGetLastError());
double a = 42.0;
saxpy<<<num_blocks, threads_per_block>>>(d_x, d_y, a, N, d_z);
CUDA_CHECK(cudaGetLastError());
return checkResults(d_z, a, N);
}
|
890815334b5a697e20ea6f34abb9a265740b9349.hip | // !!! This is a file automatically generated by hipify!!!
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <srad.h>
// includes, project
#include <hip/hip_runtime.h>
// includes, kernels
#include <srad_kernel.cu>
#ifdef GEM5_FUSION
#include <stdint.h>
extern "C" {
void m5_work_begin(uint64_t workid, uint64_t threadid);
void m5_work_end(uint64_t workid, uint64_t threadid);
void m5_dump_stats(uint64_t ns_delay, uint64_t ns_period);
}
#endif
void random_matrix(float *I, int rows, int cols);
void runTest( int argc, char** argv);
void usage(int argc, char **argv)
{
fprintf(stderr, "Usage: %s <rows> <cols> <y1> <y2> <x1> <x2> <lamda> <no. of iter>\n", argv[0]);
fprintf(stderr, "\t<rows> - number of rows\n");
fprintf(stderr, "\t<cols> - number of cols\n");
fprintf(stderr, "\t<y1> - y1 value of the speckle\n");
fprintf(stderr, "\t<y2> - y2 value of the speckle\n");
fprintf(stderr, "\t<x1> - x1 value of the speckle\n");
fprintf(stderr, "\t<x2> - x2 value of the speckle\n");
fprintf(stderr, "\t<lamda> - lambda (0,1)\n");
fprintf(stderr, "\t<no. of iter> - number of iterations\n");
exit(1);
}
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int
main( int argc, char** argv)
{
// BEGIN ADARSH DUMMY LOOP
int blosum62[24][24] = {
{ 4, -1, -2, -2, 0, -1, -1, 0, -2, -1, -1, -1, -1, -2, -1, 1, 0, -3, -2, 0, -2, -1, 0, -4},
{-1, 5, 0, -2, -3, 1, 0, -2, 0, -3, -2, 2, -1, -3, -2, -1, -1, -3, -2, -3, -1, 0, -1, -4},
{-2, 0, 6, 1, -3, 0, 0, 0, 1, -3, -3, 0, -2, -3, -2, 1, 0, -4, -2, -3, 3, 0, -1, -4},
{-2, -2, 1, 6, -3, 0, 2, -1, -1, -3, -4, -1, -3, -3, -1, 0, -1, -4, -3, -3, 4, 1, -1, -4},
{ 0, -3, -3, -3, 9, -3, -4, -3, -3, -1, -1, -3, -1, -2, -3, -1, -1, -2, -2, -1, -3, -3, -2, -4},
{-1, 1, 0, 0, -3, 5, 2, -2, 0, -3, -2, 1, 0, -3, -1, 0, -1, -2, -1, -2, 0, 3, -1, -4},
{-1, 0, 0, 2, -4, 2, 5, -2, 0, -3, -3, 1, -2, -3, -1, 0, -1, -3, -2, -2, 1, 4, -1, -4},
{ 0, -2, 0, -1, -3, -2, -2, 6, -2, -4, -4, -2, -3, -3, -2, 0, -2, -2, -3, -3, -1, -2, -1, -4},
{-2, 0, 1, -1, -3, 0, 0, -2, 8, -3, -3, -1, -2, -1, -2, -1, -2, -2, 2, -3, 0, 0, -1, -4},
{-1, -3, -3, -3, -1, -3, -3, -4, -3, 4, 2, -3, 1, 0, -3, -2, -1, -3, -1, 3, -3, -3, -1, -4},
{-1, -2, -3, -4, -1, -2, -3, -4, -3, 2, 4, -2, 2, 0, -3, -2, -1, -2, -1, 1, -4, -3, -1, -4},
{-1, 2, 0, -1, -3, 1, 1, -2, -1, -3, -2, 5, -1, -3, -1, 0, -1, -3, -2, -2, 0, 1, -1, -4},
{-1, -1, -2, -3, -1, 0, -2, -3, -2, 1, 2, -1, 5, 0, -2, -1, -1, -1, -1, 1, -3, -1, -1, -4},
{-2, -3, -3, -3, -2, -3, -3, -3, -1, 0, 0, -3, 0, 6, -4, -2, -2, 1, 3, -1, -3, -3, -1, -4},
{-1, -2, -2, -1, -3, -1, -1, -2, -2, -3, -3, -1, -2, -4, 7, -1, -1, -4, -3, -2, -2, -1, -2, -4},
{ 1, -1, 1, 0, -1, 0, 0, 0, -1, -2, -2, 0, -1, -2, -1, 4, 1, -3, -2, -2, 0, 0, 0, -4},
{ 0, -1, 0, -1, -1, -1, -1, -2, -2, -1, -1, -1, -1, -2, -1, 1, 5, -2, -2, 0, -1, -1, 0, -4},
{-3, -3, -4, -4, -2, -2, -3, -2, -2, -3, -2, -3, -1, 1, -4, -3, -2, 11, 2, -3, -4, -3, -2, -4},
{-2, -2, -2, -3, -2, -1, -2, -3, 2, -1, -1, -2, -1, 3, -3, -2, -2, 2, 7, -1, -3, -2, -1, -4},
{ 0, -3, -3, -3, -1, -2, -2, -3, -3, 3, 1, -2, 1, -1, -2, -2, 0, -3, -1, 4, -3, -2, -1, -4},
{-2, -1, 3, 4, -3, 0, 1, -1, 0, -3, -4, 0, -3, -3, -2, 0, -1, -4, -3, -3, 4, 1, -1, -4},
{-1, 0, 0, 1, -3, 3, 4, -2, 0, -3, -3, 1, -1, -3, -1, 0, -1, -3, -2, -2, 1, 4, -1, -4},
{ 0, -1, -1, -1, -2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -2, 0, 0, -2, -1, -1, -1, -1, -1, -4},
{-4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, 1}
};
int *dummyArray[48];
int i,j,k,ctr;
for (i=0;i<48;i++)
dummyArray[i] = (int *)malloc( 48 * sizeof(int) );
for ( i=0; i<24; i++)
for ( j=0; j<24; j++){
dummyArray[i][j] = blosum62[i][j];
dummyArray[i+24][j] = blosum62[i][j];
dummyArray[i][j+24] = blosum62[i][j];
dummyArray[i+24][j+24] = blosum62[i][j];
}
for ( k=1; k<20000; k++) {
for ( i=1; i<47; i++)
for ( j=1; j<47; j++)
dummyArray[i][j] += (dummyArray[i-1][j] + dummyArray[i+1][j]) * (dummyArray[i][j+1] + dummyArray[i][j-1]);
for ( i=1; i<47; i++)
for ( j=1; j<47; j++)
dummyArray[i][j] += (dummyArray[i-1][j] * dummyArray[i+1][j]) + (dummyArray[i][j+1] * dummyArray[i][j-1]);
for (i=0;i<48; i++) {
ctr = dummyArray[0][i];
for ( j=1;j<48; j++) {
ctr += dummyArray[j][i];
dummyArray[j][i] = ctr;
}
}
for (i=0;i<48; i++) {
ctr = dummyArray[i][0];
for ( j=1;j<48; j++) {
ctr += dummyArray[i][j];
dummyArray[i][j] = ctr;
}
}
}
fprintf(stdout, "Begin dummy output\n");
for ( i=1; i<48; i++)
fprintf(stdout, "%d ", dummyArray[23][i]);
fprintf(stdout, "\nEnd of dummy output\n");
for (i=0;i<48;i++)
free(dummyArray[i]);
// END ADARSH DUMMY LOOP
runTest( argc, argv);
return EXIT_SUCCESS;
}
void
runTest( int argc, char** argv)
{
int rows, cols, size_I, size_R, niter = 10, iter;
float *I, *J, lambda, q0sqr, sum, sum2, tmp, meanROI,varROI ;
float *dN,*dS,*dW,*dE;
#ifdef CPU
float Jc, G2, L, num, den, qsqr;
int *iN,*iS,*jE,*jW, k;
float cN,cS,cW,cE,D;
#endif
#ifdef GPU
//float *J_cuda;
// float *C_cuda;
//float *E_C, *W_C, *N_C, *S_C;
#endif
unsigned int r1, r2, c1, c2;
float *c;
if (argc == 9)
{
rows = atoi(argv[1]); //number of rows in the domain
cols = atoi(argv[2]); //number of cols in the domain
if ((rows%16!=0) || (cols%16!=0)){
fprintf(stderr, "rows and cols must be multiples of 16\n");
exit(1);
}
r1 = atoi(argv[3]); //y1 position of the speckle
r2 = atoi(argv[4]); //y2 position of the speckle
c1 = atoi(argv[5]); //x1 position of the speckle
c2 = atoi(argv[6]); //x2 position of the speckle
lambda = atof(argv[7]); //Lambda value
niter = atoi(argv[8]); //number of iterations
}
else{
usage(argc, argv);
}
size_I = cols * rows;
size_R = (r2-r1+1)*(c2-c1+1);
I = (float *)malloc( size_I * sizeof(float) );
J = (float *)malloc( size_I * sizeof(float) );
c = (float *)malloc(sizeof(float)* size_I) ;
dN = (float *)malloc(sizeof(float)* size_I) ;
dS = (float *)malloc(sizeof(float)* size_I) ;
dW = (float *)malloc(sizeof(float)* size_I) ;
dE = (float *)malloc(sizeof(float)* size_I) ;
#ifdef CPU
iN = (int *)malloc(sizeof(unsigned int*) * rows) ;
iS = (int *)malloc(sizeof(unsigned int*) * rows) ;
jW = (int *)malloc(sizeof(unsigned int*) * cols) ;
jE = (int *)malloc(sizeof(unsigned int*) * cols) ;
for (int i=0; i< rows; i++) {
iN[i] = i-1;
iS[i] = i+1;
}
for (int j=0; j< cols; j++) {
jW[j] = j-1;
jE[j] = j+1;
}
iN[0] = 0;
iS[rows-1] = rows-1;
jW[0] = 0;
jE[cols-1] = cols-1;
#endif
#ifdef GPU
//Allocate device memory
//hipMalloc((void**)& J_cuda, sizeof(float)* size_I);
//hipMalloc((void**)& C_cuda, sizeof(float)* size_I);
//hipMalloc((void**)& E_C, sizeof(float)* size_I);
//hipMalloc((void**)& W_C, sizeof(float)* size_I);
//hipMalloc((void**)& S_C, sizeof(float)* size_I);
//hipMalloc((void**)& N_C, sizeof(float)* size_I);
#endif
printf("Randomizing the input matrix\n");
//Generate a random matrix
random_matrix(I, rows, cols);
for (int k = 0; k < size_I; k++ ) {
J[k] = (float)exp(I[k]) ;
}
printf("Start the SRAD main loop\n");
#ifdef GEM5_FUSION
m5_dump_stats(0, 0);
m5_work_begin(0, 0);
#endif
for (iter=0; iter< niter; iter++){
sum=0; sum2=0;
for (int i=r1; i<=r2; i++) {
for (int j=c1; j<=c2; j++) {
tmp = J[i * cols + j];
sum += tmp ;
sum2 += tmp*tmp;
}
}
meanROI = sum / size_R;
varROI = (sum2 / size_R) - meanROI*meanROI;
q0sqr = varROI / (meanROI*meanROI);
#ifdef CPU
for (int i = 0 ; i < rows ; i++) {
for (int j = 0; j < cols; j++) {
k = i * cols + j;
Jc = J[k];
// directional derivates
dN[k] = J[iN[i] * cols + j] - Jc;
dS[k] = J[iS[i] * cols + j] - Jc;
dW[k] = J[i * cols + jW[j]] - Jc;
dE[k] = J[i * cols + jE[j]] - Jc;
G2 = (dN[k]*dN[k] + dS[k]*dS[k]
+ dW[k]*dW[k] + dE[k]*dE[k]) / (Jc*Jc);
L = (dN[k] + dS[k] + dW[k] + dE[k]) / Jc;
num = (0.5*G2) - ((1.0/16.0)*(L*L)) ;
den = 1 + (.25*L);
qsqr = num/(den*den);
// diffusion coefficent (equ 33)
den = (qsqr-q0sqr) / (q0sqr * (1+q0sqr)) ;
c[k] = 1.0 / (1.0+den) ;
// saturate diffusion coefficent
if (c[k] < 0) {c[k] = 0;}
else if (c[k] > 1) {c[k] = 1;}
}
}
for (int i = 0; i < rows; i++) {
for (int j = 0; j < cols; j++) {
// current index
k = i * cols + j;
// diffusion coefficent
cN = c[k];
cS = c[iS[i] * cols + j];
cW = c[k];
cE = c[i * cols + jE[j]];
// divergence (equ 58)
D = cN * dN[k] + cS * dS[k] + cW * dW[k] + cE * dE[k];
// image update (equ 61)
J[k] = J[k] + 0.25*lambda*D;
}
}
#endif // CPU
#ifdef GPU
//Currently the input size must be divided by 16 - the block size
int block_x = cols/BLOCK_SIZE ;
int block_y = rows/BLOCK_SIZE ;
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid(block_x , block_y);
//Copy data from main memory to device memory
//hipMemcpy(J_cuda, J, sizeof(float) * size_I, hipMemcpyHostToDevice);
//Run kernels
hipLaunchKernelGGL(( srad_cuda_1), dim3(dimGrid), dim3(dimBlock), 0, 0, dE, dW, dN, dS, J, c, cols, rows, q0sqr);
hipLaunchKernelGGL(( srad_cuda_2), dim3(dimGrid), dim3(dimBlock), 0, 0, dE, dW, dN, dS, J, c, cols, rows, lambda, q0sqr);
hipDeviceSynchronize();
//Copy data from device memory to main memory
//hipMemcpy(J, J_cuda, sizeof(float) * size_I, hipMemcpyDeviceToHost);
#endif
}
#ifdef GEM5_FUSION
m5_work_end(0, 0);
#endif
hipDeviceSynchronize();
#ifdef OUTPUT
//Printing output
printf("Printing Output:\n");
for( int i = 0 ; i < rows ; i++){
for ( int j = 0 ; j < cols ; j++){
printf("%.5f ", J[i * cols + j]);
}
printf("\n");
}
#endif
printf("Computation Done\n");
free(I);
free(J);
free(dN); free(dS); free(dW); free(dE);
#ifdef CPU
free(iN); free(iS); free(jW); free(jE);
#endif
#ifdef GPU
// hipFree(C_cuda);
//hipFree(J_cuda);
//hipFree(E_C);
//hipFree(W_C);
//hipFree(N_C);
//hipFree(S_C);
#endif
free(c);
}
void random_matrix(float *I, int rows, int cols){
srand(7);
for( int i = 0 ; i < rows ; i++){
for ( int j = 0 ; j < cols ; j++){
I[i * cols + j] = rand()/(float)RAND_MAX ;
}
}
}
| 890815334b5a697e20ea6f34abb9a265740b9349.cu | // includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <srad.h>
// includes, project
#include <cuda.h>
// includes, kernels
#include <srad_kernel.cu>
#ifdef GEM5_FUSION
#include <stdint.h>
extern "C" {
void m5_work_begin(uint64_t workid, uint64_t threadid);
void m5_work_end(uint64_t workid, uint64_t threadid);
void m5_dump_stats(uint64_t ns_delay, uint64_t ns_period);
}
#endif
void random_matrix(float *I, int rows, int cols);
void runTest( int argc, char** argv);
void usage(int argc, char **argv)
{
fprintf(stderr, "Usage: %s <rows> <cols> <y1> <y2> <x1> <x2> <lamda> <no. of iter>\n", argv[0]);
fprintf(stderr, "\t<rows> - number of rows\n");
fprintf(stderr, "\t<cols> - number of cols\n");
fprintf(stderr, "\t<y1> - y1 value of the speckle\n");
fprintf(stderr, "\t<y2> - y2 value of the speckle\n");
fprintf(stderr, "\t<x1> - x1 value of the speckle\n");
fprintf(stderr, "\t<x2> - x2 value of the speckle\n");
fprintf(stderr, "\t<lamda> - lambda (0,1)\n");
fprintf(stderr, "\t<no. of iter> - number of iterations\n");
exit(1);
}
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int
main( int argc, char** argv)
{
// BEGIN ADARSH DUMMY LOOP
int blosum62[24][24] = {
{ 4, -1, -2, -2, 0, -1, -1, 0, -2, -1, -1, -1, -1, -2, -1, 1, 0, -3, -2, 0, -2, -1, 0, -4},
{-1, 5, 0, -2, -3, 1, 0, -2, 0, -3, -2, 2, -1, -3, -2, -1, -1, -3, -2, -3, -1, 0, -1, -4},
{-2, 0, 6, 1, -3, 0, 0, 0, 1, -3, -3, 0, -2, -3, -2, 1, 0, -4, -2, -3, 3, 0, -1, -4},
{-2, -2, 1, 6, -3, 0, 2, -1, -1, -3, -4, -1, -3, -3, -1, 0, -1, -4, -3, -3, 4, 1, -1, -4},
{ 0, -3, -3, -3, 9, -3, -4, -3, -3, -1, -1, -3, -1, -2, -3, -1, -1, -2, -2, -1, -3, -3, -2, -4},
{-1, 1, 0, 0, -3, 5, 2, -2, 0, -3, -2, 1, 0, -3, -1, 0, -1, -2, -1, -2, 0, 3, -1, -4},
{-1, 0, 0, 2, -4, 2, 5, -2, 0, -3, -3, 1, -2, -3, -1, 0, -1, -3, -2, -2, 1, 4, -1, -4},
{ 0, -2, 0, -1, -3, -2, -2, 6, -2, -4, -4, -2, -3, -3, -2, 0, -2, -2, -3, -3, -1, -2, -1, -4},
{-2, 0, 1, -1, -3, 0, 0, -2, 8, -3, -3, -1, -2, -1, -2, -1, -2, -2, 2, -3, 0, 0, -1, -4},
{-1, -3, -3, -3, -1, -3, -3, -4, -3, 4, 2, -3, 1, 0, -3, -2, -1, -3, -1, 3, -3, -3, -1, -4},
{-1, -2, -3, -4, -1, -2, -3, -4, -3, 2, 4, -2, 2, 0, -3, -2, -1, -2, -1, 1, -4, -3, -1, -4},
{-1, 2, 0, -1, -3, 1, 1, -2, -1, -3, -2, 5, -1, -3, -1, 0, -1, -3, -2, -2, 0, 1, -1, -4},
{-1, -1, -2, -3, -1, 0, -2, -3, -2, 1, 2, -1, 5, 0, -2, -1, -1, -1, -1, 1, -3, -1, -1, -4},
{-2, -3, -3, -3, -2, -3, -3, -3, -1, 0, 0, -3, 0, 6, -4, -2, -2, 1, 3, -1, -3, -3, -1, -4},
{-1, -2, -2, -1, -3, -1, -1, -2, -2, -3, -3, -1, -2, -4, 7, -1, -1, -4, -3, -2, -2, -1, -2, -4},
{ 1, -1, 1, 0, -1, 0, 0, 0, -1, -2, -2, 0, -1, -2, -1, 4, 1, -3, -2, -2, 0, 0, 0, -4},
{ 0, -1, 0, -1, -1, -1, -1, -2, -2, -1, -1, -1, -1, -2, -1, 1, 5, -2, -2, 0, -1, -1, 0, -4},
{-3, -3, -4, -4, -2, -2, -3, -2, -2, -3, -2, -3, -1, 1, -4, -3, -2, 11, 2, -3, -4, -3, -2, -4},
{-2, -2, -2, -3, -2, -1, -2, -3, 2, -1, -1, -2, -1, 3, -3, -2, -2, 2, 7, -1, -3, -2, -1, -4},
{ 0, -3, -3, -3, -1, -2, -2, -3, -3, 3, 1, -2, 1, -1, -2, -2, 0, -3, -1, 4, -3, -2, -1, -4},
{-2, -1, 3, 4, -3, 0, 1, -1, 0, -3, -4, 0, -3, -3, -2, 0, -1, -4, -3, -3, 4, 1, -1, -4},
{-1, 0, 0, 1, -3, 3, 4, -2, 0, -3, -3, 1, -1, -3, -1, 0, -1, -3, -2, -2, 1, 4, -1, -4},
{ 0, -1, -1, -1, -2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -2, 0, 0, -2, -1, -1, -1, -1, -1, -4},
{-4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, 1}
};
int *dummyArray[48];
int i,j,k,ctr;
for (i=0;i<48;i++)
dummyArray[i] = (int *)malloc( 48 * sizeof(int) );
for ( i=0; i<24; i++)
for ( j=0; j<24; j++){
dummyArray[i][j] = blosum62[i][j];
dummyArray[i+24][j] = blosum62[i][j];
dummyArray[i][j+24] = blosum62[i][j];
dummyArray[i+24][j+24] = blosum62[i][j];
}
for ( k=1; k<20000; k++) {
for ( i=1; i<47; i++)
for ( j=1; j<47; j++)
dummyArray[i][j] += (dummyArray[i-1][j] + dummyArray[i+1][j]) * (dummyArray[i][j+1] + dummyArray[i][j-1]);
for ( i=1; i<47; i++)
for ( j=1; j<47; j++)
dummyArray[i][j] += (dummyArray[i-1][j] * dummyArray[i+1][j]) + (dummyArray[i][j+1] * dummyArray[i][j-1]);
for (i=0;i<48; i++) {
ctr = dummyArray[0][i];
for ( j=1;j<48; j++) {
ctr += dummyArray[j][i];
dummyArray[j][i] = ctr;
}
}
for (i=0;i<48; i++) {
ctr = dummyArray[i][0];
for ( j=1;j<48; j++) {
ctr += dummyArray[i][j];
dummyArray[i][j] = ctr;
}
}
}
fprintf(stdout, "Begin dummy output\n");
for ( i=1; i<48; i++)
fprintf(stdout, "%d ", dummyArray[23][i]);
fprintf(stdout, "\nEnd of dummy output\n");
for (i=0;i<48;i++)
free(dummyArray[i]);
// END ADARSH DUMMY LOOP
runTest( argc, argv);
return EXIT_SUCCESS;
}
void
runTest( int argc, char** argv)
{
int rows, cols, size_I, size_R, niter = 10, iter;
float *I, *J, lambda, q0sqr, sum, sum2, tmp, meanROI,varROI ;
float *dN,*dS,*dW,*dE;
#ifdef CPU
float Jc, G2, L, num, den, qsqr;
int *iN,*iS,*jE,*jW, k;
float cN,cS,cW,cE,D;
#endif
#ifdef GPU
//float *J_cuda;
// float *C_cuda;
//float *E_C, *W_C, *N_C, *S_C;
#endif
unsigned int r1, r2, c1, c2;
float *c;
if (argc == 9)
{
rows = atoi(argv[1]); //number of rows in the domain
cols = atoi(argv[2]); //number of cols in the domain
if ((rows%16!=0) || (cols%16!=0)){
fprintf(stderr, "rows and cols must be multiples of 16\n");
exit(1);
}
r1 = atoi(argv[3]); //y1 position of the speckle
r2 = atoi(argv[4]); //y2 position of the speckle
c1 = atoi(argv[5]); //x1 position of the speckle
c2 = atoi(argv[6]); //x2 position of the speckle
lambda = atof(argv[7]); //Lambda value
niter = atoi(argv[8]); //number of iterations
}
else{
usage(argc, argv);
}
size_I = cols * rows;
size_R = (r2-r1+1)*(c2-c1+1);
I = (float *)malloc( size_I * sizeof(float) );
J = (float *)malloc( size_I * sizeof(float) );
c = (float *)malloc(sizeof(float)* size_I) ;
dN = (float *)malloc(sizeof(float)* size_I) ;
dS = (float *)malloc(sizeof(float)* size_I) ;
dW = (float *)malloc(sizeof(float)* size_I) ;
dE = (float *)malloc(sizeof(float)* size_I) ;
#ifdef CPU
iN = (int *)malloc(sizeof(unsigned int*) * rows) ;
iS = (int *)malloc(sizeof(unsigned int*) * rows) ;
jW = (int *)malloc(sizeof(unsigned int*) * cols) ;
jE = (int *)malloc(sizeof(unsigned int*) * cols) ;
for (int i=0; i< rows; i++) {
iN[i] = i-1;
iS[i] = i+1;
}
for (int j=0; j< cols; j++) {
jW[j] = j-1;
jE[j] = j+1;
}
iN[0] = 0;
iS[rows-1] = rows-1;
jW[0] = 0;
jE[cols-1] = cols-1;
#endif
#ifdef GPU
//Allocate device memory
//cudaMalloc((void**)& J_cuda, sizeof(float)* size_I);
//cudaMalloc((void**)& C_cuda, sizeof(float)* size_I);
//cudaMalloc((void**)& E_C, sizeof(float)* size_I);
//cudaMalloc((void**)& W_C, sizeof(float)* size_I);
//cudaMalloc((void**)& S_C, sizeof(float)* size_I);
//cudaMalloc((void**)& N_C, sizeof(float)* size_I);
#endif
printf("Randomizing the input matrix\n");
//Generate a random matrix
random_matrix(I, rows, cols);
for (int k = 0; k < size_I; k++ ) {
J[k] = (float)exp(I[k]) ;
}
printf("Start the SRAD main loop\n");
#ifdef GEM5_FUSION
m5_dump_stats(0, 0);
m5_work_begin(0, 0);
#endif
for (iter=0; iter< niter; iter++){
sum=0; sum2=0;
for (int i=r1; i<=r2; i++) {
for (int j=c1; j<=c2; j++) {
tmp = J[i * cols + j];
sum += tmp ;
sum2 += tmp*tmp;
}
}
meanROI = sum / size_R;
varROI = (sum2 / size_R) - meanROI*meanROI;
q0sqr = varROI / (meanROI*meanROI);
#ifdef CPU
for (int i = 0 ; i < rows ; i++) {
for (int j = 0; j < cols; j++) {
k = i * cols + j;
Jc = J[k];
// directional derivates
dN[k] = J[iN[i] * cols + j] - Jc;
dS[k] = J[iS[i] * cols + j] - Jc;
dW[k] = J[i * cols + jW[j]] - Jc;
dE[k] = J[i * cols + jE[j]] - Jc;
G2 = (dN[k]*dN[k] + dS[k]*dS[k]
+ dW[k]*dW[k] + dE[k]*dE[k]) / (Jc*Jc);
L = (dN[k] + dS[k] + dW[k] + dE[k]) / Jc;
num = (0.5*G2) - ((1.0/16.0)*(L*L)) ;
den = 1 + (.25*L);
qsqr = num/(den*den);
// diffusion coefficent (equ 33)
den = (qsqr-q0sqr) / (q0sqr * (1+q0sqr)) ;
c[k] = 1.0 / (1.0+den) ;
// saturate diffusion coefficent
if (c[k] < 0) {c[k] = 0;}
else if (c[k] > 1) {c[k] = 1;}
}
}
for (int i = 0; i < rows; i++) {
for (int j = 0; j < cols; j++) {
// current index
k = i * cols + j;
// diffusion coefficent
cN = c[k];
cS = c[iS[i] * cols + j];
cW = c[k];
cE = c[i * cols + jE[j]];
// divergence (equ 58)
D = cN * dN[k] + cS * dS[k] + cW * dW[k] + cE * dE[k];
// image update (equ 61)
J[k] = J[k] + 0.25*lambda*D;
}
}
#endif // CPU
#ifdef GPU
//Currently the input size must be divided by 16 - the block size
int block_x = cols/BLOCK_SIZE ;
int block_y = rows/BLOCK_SIZE ;
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid(block_x , block_y);
//Copy data from main memory to device memory
//cudaMemcpy(J_cuda, J, sizeof(float) * size_I, cudaMemcpyHostToDevice);
//Run kernels
srad_cuda_1<<<dimGrid, dimBlock>>>(dE, dW, dN, dS, J, c, cols, rows, q0sqr);
srad_cuda_2<<<dimGrid, dimBlock>>>(dE, dW, dN, dS, J, c, cols, rows, lambda, q0sqr);
cudaThreadSynchronize();
//Copy data from device memory to main memory
//cudaMemcpy(J, J_cuda, sizeof(float) * size_I, cudaMemcpyDeviceToHost);
#endif
}
#ifdef GEM5_FUSION
m5_work_end(0, 0);
#endif
cudaThreadSynchronize();
#ifdef OUTPUT
//Printing output
printf("Printing Output:\n");
for( int i = 0 ; i < rows ; i++){
for ( int j = 0 ; j < cols ; j++){
printf("%.5f ", J[i * cols + j]);
}
printf("\n");
}
#endif
printf("Computation Done\n");
free(I);
free(J);
free(dN); free(dS); free(dW); free(dE);
#ifdef CPU
free(iN); free(iS); free(jW); free(jE);
#endif
#ifdef GPU
// cudaFree(C_cuda);
//cudaFree(J_cuda);
//cudaFree(E_C);
//cudaFree(W_C);
//cudaFree(N_C);
//cudaFree(S_C);
#endif
free(c);
}
void random_matrix(float *I, int rows, int cols){
srand(7);
for( int i = 0 ; i < rows ; i++){
for ( int j = 0 ; j < cols ; j++){
I[i * cols + j] = rand()/(float)RAND_MAX ;
}
}
}
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.