hip_filename
stringlengths
5
84
hip_content
stringlengths
79
9.69M
cuda_filename
stringlengths
4
83
cuda_content
stringlengths
19
9.69M
eea9319c70004e2e0b7e227a4d9a7aff0b7f2e14.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/operators/math/math_function.h" #include "paddle/operators/math/selected_rows_functor.h" #include "paddle/platform/cuda_helper.h" namespace paddle { namespace operators { namespace math { template <typename T> struct SelectedRowsAdd<platform::GPUPlace, T> { void operator()(const platform::DeviceContext& context, const framework::SelectedRows& input1, const framework::SelectedRows& input2, framework::SelectedRows* output) { auto in1_height = input1.height(); PADDLE_ENFORCE_EQ(in1_height, input2.height()); output->set_height(in1_height); auto& in1_rows = input1.rows(); auto& in2_rows = input2.rows(); std::vector<int64_t> out_rows; out_rows.reserve(in1_rows.size() + in2_rows.size()); // concat rows out_rows.insert(out_rows.end(), in1_rows.begin(), in1_rows.end()); out_rows.insert(out_rows.end(), in2_rows.begin(), in2_rows.end()); output->set_rows(out_rows); auto* out_value = output->mutable_value(); auto& in1_value = input1.value(); auto& in2_value = input2.value(); auto in1_row_numel = in1_value.numel() / in1_rows.size(); PADDLE_ENFORCE_EQ(in1_row_numel, in2_value.numel() / in2_rows.size()); PADDLE_ENFORCE_EQ(in1_row_numel, out_value->numel() / out_rows.size()); auto* out_data = out_value->data<T>(); auto* in1_data = in1_value.data<T>(); auto in1_place = input1.place(); PADDLE_ENFORCE(platform::is_gpu_place(in1_place)); auto in2_place = input2.place(); PADDLE_ENFORCE(platform::is_gpu_place(in2_place)); auto out_place = context.GetPlace(); PADDLE_ENFORCE(platform::is_gpu_place(out_place)); memory::Copy( boost::get<platform::GPUPlace>(out_place), out_data, boost::get<platform::GPUPlace>(in1_place), in1_data, in1_value.numel() * sizeof(T), reinterpret_cast<const platform::CUDADeviceContext&>(context).stream()); auto* in2_data = in2_value.data<T>(); memory::Copy( boost::get<platform::GPUPlace>(out_place), out_data + in1_value.numel(), boost::get<platform::GPUPlace>(in2_place), in2_data, in2_value.numel() * sizeof(T), reinterpret_cast<const platform::CUDADeviceContext&>(context).stream()); } }; template struct SelectedRowsAdd<platform::GPUPlace, float>; template struct SelectedRowsAdd<platform::GPUPlace, double>; namespace { template <typename T, int block_size> __global__ void SelectedRowsAddTensorKernel(const T* selected_rows, const int64_t* rows, T* tensor_out, int64_t row_numel) { const int ty = blockIdx.y; int tid = threadIdx.x; selected_rows += ty * row_numel; tensor_out += rows[ty] * row_numel; for (int index = tid; index < row_numel; index += block_size) { // Since index in rows of SelectedRows can be duplicate, we can not use // tensor_out[index] += selected_rows[index]; Instead, we have to use // AtomicAdd to avoid concurrent write error. paddle::platform::CudaAtomicAdd(tensor_out + index, selected_rows[index]); } } } // namespace template <typename T> struct SelectedRowsAddTensor<platform::GPUPlace, T> { void operator()(const platform::DeviceContext& context, const framework::SelectedRows& input1, const framework::Tensor& input2, framework::Tensor* output) { auto in1_height = input1.height(); auto in2_dims = input2.dims(); auto out_dims = output->dims(); PADDLE_ENFORCE_EQ(in1_height, in2_dims[0]); PADDLE_ENFORCE_EQ(in1_height, out_dims[0]); auto& in1_value = input1.value(); auto& in1_rows = input1.rows(); int64_t in1_row_numel = in1_value.numel() / in1_rows.size(); PADDLE_ENFORCE_EQ(in1_row_numel, input2.numel() / in1_height); PADDLE_ENFORCE_EQ(in1_row_numel, output->numel() / in1_height); auto* in1_data = in1_value.data<T>(); auto* in2_data = input2.data<T>(); auto* out_data = output->data<T>(); SetConstant<platform::GPUPlace, T> functor; functor(context, output, 0.0); const int block_size = 256; dim3 threads(block_size, 1); dim3 grid(1, in1_rows.size()); hipLaunchKernelGGL(( SelectedRowsAddTensorKernel<T, block_size>), dim3(grid), dim3(threads), 0, reinterpret_cast<const platform::CUDADeviceContext&>(context) .stream(), in1_data, in1_rows.data(), out_data, in1_row_numel); auto out_eigen = framework::EigenVector<T>::Flatten(*output); auto in2_eigen = framework::EigenVector<T>::Flatten(input2); out_eigen.device(*context.GetEigenDevice<platform::GPUPlace>()) = out_eigen + in2_eigen; } }; template struct SelectedRowsAddTensor<platform::GPUPlace, float>; template struct SelectedRowsAddTensor<platform::GPUPlace, double>; template <typename T> struct SelectedRowsAddTo<platform::GPUPlace, T> { void operator()(const platform::DeviceContext& context, const framework::SelectedRows& input1, const int64_t input2_offset, framework::SelectedRows* input2) { auto in1_height = input1.height(); PADDLE_ENFORCE_EQ(in1_height, input2->height()); auto& in1_rows = input1.rows(); auto& in2_rows = *(input2->mutable_rows()); auto& in1_value = input1.value(); auto* in2_value = input2->mutable_value(); // concat rows in2_rows.insert(in2_rows.end(), in1_rows.begin(), in1_rows.end()); auto in1_place = input1.place(); PADDLE_ENFORCE(platform::is_gpu_place(in1_place)); auto in2_place = input2->place(); PADDLE_ENFORCE(platform::is_gpu_place(in2_place)); auto* in1_data = in1_value.data<T>(); auto* in2_data = in2_value->data<T>(); memory::Copy( boost::get<platform::GPUPlace>(in2_place), in2_data + input2_offset, boost::get<platform::GPUPlace>(in1_place), in1_data, in1_value.numel() * sizeof(T), reinterpret_cast<const platform::CUDADeviceContext&>(context).stream()); } }; template struct SelectedRowsAddTo<platform::GPUPlace, float>; template struct SelectedRowsAddTo<platform::GPUPlace, double>; template struct SelectedRowsAddTo<platform::GPUPlace, int>; template struct SelectedRowsAddTo<platform::GPUPlace, int64_t>; namespace { template <typename T, int block_size> __global__ void SelectedRowsAddToTensorKernel(const T* selected_rows, const int64_t* rows, T* tensor_out, int64_t row_numel) { const int ty = blockIdx.y; int tid = threadIdx.x; selected_rows += ty * row_numel; tensor_out += rows[ty] * row_numel; for (int index = tid; index < row_numel; index += block_size) { // Since index in rows of SelectedRows can be duplicate, we have to use // Atomic Operation to avoid concurrent write error. paddle::platform::CudaAtomicAdd(tensor_out + index, selected_rows[index]); } } } // namespace template <typename T> struct SelectedRowsAddToTensor<platform::GPUPlace, T> { void operator()(const platform::DeviceContext& context, const framework::SelectedRows& input1, framework::Tensor* input2) { auto in1_height = input1.height(); auto in2_dims = input2->dims(); PADDLE_ENFORCE_EQ(in1_height, in2_dims[0]); auto& in1_value = input1.value(); auto& in1_rows = input1.rows(); int64_t in1_row_numel = in1_value.numel() / in1_rows.size(); PADDLE_ENFORCE_EQ(in1_row_numel, input2->numel() / in1_height); auto* in1_data = in1_value.data<T>(); auto* in2_data = input2->data<T>(); const int block_size = 256; dim3 threads(block_size, 1); dim3 grid(1, in1_rows.size()); hipLaunchKernelGGL(( SelectedRowsAddToTensorKernel<T, block_size>), dim3(grid), dim3(threads), 0, reinterpret_cast<const platform::CUDADeviceContext&>(context) .stream(), in1_data, in1_rows.data(), in2_data, in1_row_numel); } }; template struct SelectedRowsAddToTensor<platform::GPUPlace, float>; template struct SelectedRowsAddToTensor<platform::GPUPlace, double>; template struct SelectedRowsAddToTensor<platform::GPUPlace, int>; template struct SelectedRowsAddToTensor<platform::GPUPlace, int64_t>; } // namespace math } // namespace operators } // namespace paddle
eea9319c70004e2e0b7e227a4d9a7aff0b7f2e14.cu
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/operators/math/math_function.h" #include "paddle/operators/math/selected_rows_functor.h" #include "paddle/platform/cuda_helper.h" namespace paddle { namespace operators { namespace math { template <typename T> struct SelectedRowsAdd<platform::GPUPlace, T> { void operator()(const platform::DeviceContext& context, const framework::SelectedRows& input1, const framework::SelectedRows& input2, framework::SelectedRows* output) { auto in1_height = input1.height(); PADDLE_ENFORCE_EQ(in1_height, input2.height()); output->set_height(in1_height); auto& in1_rows = input1.rows(); auto& in2_rows = input2.rows(); std::vector<int64_t> out_rows; out_rows.reserve(in1_rows.size() + in2_rows.size()); // concat rows out_rows.insert(out_rows.end(), in1_rows.begin(), in1_rows.end()); out_rows.insert(out_rows.end(), in2_rows.begin(), in2_rows.end()); output->set_rows(out_rows); auto* out_value = output->mutable_value(); auto& in1_value = input1.value(); auto& in2_value = input2.value(); auto in1_row_numel = in1_value.numel() / in1_rows.size(); PADDLE_ENFORCE_EQ(in1_row_numel, in2_value.numel() / in2_rows.size()); PADDLE_ENFORCE_EQ(in1_row_numel, out_value->numel() / out_rows.size()); auto* out_data = out_value->data<T>(); auto* in1_data = in1_value.data<T>(); auto in1_place = input1.place(); PADDLE_ENFORCE(platform::is_gpu_place(in1_place)); auto in2_place = input2.place(); PADDLE_ENFORCE(platform::is_gpu_place(in2_place)); auto out_place = context.GetPlace(); PADDLE_ENFORCE(platform::is_gpu_place(out_place)); memory::Copy( boost::get<platform::GPUPlace>(out_place), out_data, boost::get<platform::GPUPlace>(in1_place), in1_data, in1_value.numel() * sizeof(T), reinterpret_cast<const platform::CUDADeviceContext&>(context).stream()); auto* in2_data = in2_value.data<T>(); memory::Copy( boost::get<platform::GPUPlace>(out_place), out_data + in1_value.numel(), boost::get<platform::GPUPlace>(in2_place), in2_data, in2_value.numel() * sizeof(T), reinterpret_cast<const platform::CUDADeviceContext&>(context).stream()); } }; template struct SelectedRowsAdd<platform::GPUPlace, float>; template struct SelectedRowsAdd<platform::GPUPlace, double>; namespace { template <typename T, int block_size> __global__ void SelectedRowsAddTensorKernel(const T* selected_rows, const int64_t* rows, T* tensor_out, int64_t row_numel) { const int ty = blockIdx.y; int tid = threadIdx.x; selected_rows += ty * row_numel; tensor_out += rows[ty] * row_numel; for (int index = tid; index < row_numel; index += block_size) { // Since index in rows of SelectedRows can be duplicate, we can not use // tensor_out[index] += selected_rows[index]; Instead, we have to use // AtomicAdd to avoid concurrent write error. paddle::platform::CudaAtomicAdd(tensor_out + index, selected_rows[index]); } } } // namespace template <typename T> struct SelectedRowsAddTensor<platform::GPUPlace, T> { void operator()(const platform::DeviceContext& context, const framework::SelectedRows& input1, const framework::Tensor& input2, framework::Tensor* output) { auto in1_height = input1.height(); auto in2_dims = input2.dims(); auto out_dims = output->dims(); PADDLE_ENFORCE_EQ(in1_height, in2_dims[0]); PADDLE_ENFORCE_EQ(in1_height, out_dims[0]); auto& in1_value = input1.value(); auto& in1_rows = input1.rows(); int64_t in1_row_numel = in1_value.numel() / in1_rows.size(); PADDLE_ENFORCE_EQ(in1_row_numel, input2.numel() / in1_height); PADDLE_ENFORCE_EQ(in1_row_numel, output->numel() / in1_height); auto* in1_data = in1_value.data<T>(); auto* in2_data = input2.data<T>(); auto* out_data = output->data<T>(); SetConstant<platform::GPUPlace, T> functor; functor(context, output, 0.0); const int block_size = 256; dim3 threads(block_size, 1); dim3 grid(1, in1_rows.size()); SelectedRowsAddTensorKernel<T, block_size><<< grid, threads, 0, reinterpret_cast<const platform::CUDADeviceContext&>(context) .stream()>>>(in1_data, in1_rows.data(), out_data, in1_row_numel); auto out_eigen = framework::EigenVector<T>::Flatten(*output); auto in2_eigen = framework::EigenVector<T>::Flatten(input2); out_eigen.device(*context.GetEigenDevice<platform::GPUPlace>()) = out_eigen + in2_eigen; } }; template struct SelectedRowsAddTensor<platform::GPUPlace, float>; template struct SelectedRowsAddTensor<platform::GPUPlace, double>; template <typename T> struct SelectedRowsAddTo<platform::GPUPlace, T> { void operator()(const platform::DeviceContext& context, const framework::SelectedRows& input1, const int64_t input2_offset, framework::SelectedRows* input2) { auto in1_height = input1.height(); PADDLE_ENFORCE_EQ(in1_height, input2->height()); auto& in1_rows = input1.rows(); auto& in2_rows = *(input2->mutable_rows()); auto& in1_value = input1.value(); auto* in2_value = input2->mutable_value(); // concat rows in2_rows.insert(in2_rows.end(), in1_rows.begin(), in1_rows.end()); auto in1_place = input1.place(); PADDLE_ENFORCE(platform::is_gpu_place(in1_place)); auto in2_place = input2->place(); PADDLE_ENFORCE(platform::is_gpu_place(in2_place)); auto* in1_data = in1_value.data<T>(); auto* in2_data = in2_value->data<T>(); memory::Copy( boost::get<platform::GPUPlace>(in2_place), in2_data + input2_offset, boost::get<platform::GPUPlace>(in1_place), in1_data, in1_value.numel() * sizeof(T), reinterpret_cast<const platform::CUDADeviceContext&>(context).stream()); } }; template struct SelectedRowsAddTo<platform::GPUPlace, float>; template struct SelectedRowsAddTo<platform::GPUPlace, double>; template struct SelectedRowsAddTo<platform::GPUPlace, int>; template struct SelectedRowsAddTo<platform::GPUPlace, int64_t>; namespace { template <typename T, int block_size> __global__ void SelectedRowsAddToTensorKernel(const T* selected_rows, const int64_t* rows, T* tensor_out, int64_t row_numel) { const int ty = blockIdx.y; int tid = threadIdx.x; selected_rows += ty * row_numel; tensor_out += rows[ty] * row_numel; for (int index = tid; index < row_numel; index += block_size) { // Since index in rows of SelectedRows can be duplicate, we have to use // Atomic Operation to avoid concurrent write error. paddle::platform::CudaAtomicAdd(tensor_out + index, selected_rows[index]); } } } // namespace template <typename T> struct SelectedRowsAddToTensor<platform::GPUPlace, T> { void operator()(const platform::DeviceContext& context, const framework::SelectedRows& input1, framework::Tensor* input2) { auto in1_height = input1.height(); auto in2_dims = input2->dims(); PADDLE_ENFORCE_EQ(in1_height, in2_dims[0]); auto& in1_value = input1.value(); auto& in1_rows = input1.rows(); int64_t in1_row_numel = in1_value.numel() / in1_rows.size(); PADDLE_ENFORCE_EQ(in1_row_numel, input2->numel() / in1_height); auto* in1_data = in1_value.data<T>(); auto* in2_data = input2->data<T>(); const int block_size = 256; dim3 threads(block_size, 1); dim3 grid(1, in1_rows.size()); SelectedRowsAddToTensorKernel<T, block_size><<< grid, threads, 0, reinterpret_cast<const platform::CUDADeviceContext&>(context) .stream()>>>(in1_data, in1_rows.data(), in2_data, in1_row_numel); } }; template struct SelectedRowsAddToTensor<platform::GPUPlace, float>; template struct SelectedRowsAddToTensor<platform::GPUPlace, double>; template struct SelectedRowsAddToTensor<platform::GPUPlace, int>; template struct SelectedRowsAddToTensor<platform::GPUPlace, int64_t>; } // namespace math } // namespace operators } // namespace paddle
f4b8e2d74e76fe72791a454343da1ff9343bdb02.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // Created by Shitian Ni on 1/18/18. // #include <stdio.h> #include <stdlib.h> #include <math.h> #include <time.h> #include<iostream> #include<string.h> #include "parameter.h" using namespace std; __device__ double d_H1[ROW_H1][COL_H1], d_Ht1[ROW_H1][COL_Ht1]; __device__ double d_H2[ROW_H2][COL_H2], d_Ht2[ROW_H2][COL_Ht2]; __device__ double d_H3[ROW_H3][COL_H3], d_Ht3[ROW_H3][COL_Ht3]; __device__ unsigned char d_image1[MAX_IMAGESIZE][MAX_IMAGESIZE]; __device__ unsigned char d_image2[MAX_IMAGESIZE][MAX_IMAGESIZE]; __device__ double d_g[G_NUM], d_g_can1[ROW][COL], d_g_nor1[ROW][COL], d_gk[ROW][COL], d_gwt[ROW][COL],d_g_can2[ROW][COL]; __device__ int d_g_ang1[ROW][COL]; __device__ int d_g_ang2[ROW][COL]; __device__ char d_sHoG1[ROW - 4][COL - 4]; __device__ char d_sHoG2[ROW - 4][COL - 4]; __device__ double d_new_cor; __device__ double d_gpt[3][3]; __device__ double d_D1[ROW][COL * 8]; __device__ double d_D2[ROW - 4][(COL - 4) * 64]; __device__ double d_ndis[(2 * ROW - 1) * (2 * COL - 1)]; __device__ int d_coor[(2 * ROW - 1) * (2 * COL - 1)][2]; __device__ int d_cuda_global_fsHoGpat_count[2]; __device__ double d_cuda_global_fsHoGpat_dnn[2]; __device__ int d_cuda_global_fwinpat_count[2]; __device__ double d_cuda_global_fwinpat_dnn[2]; int iDivUp(int hostPtr, int b){ return ((hostPtr % b) != 0) ? (hostPtr / b + 1) : (hostPtr / b); }; //https://stackoverflow.com/a/14038590 #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true) { if (code != hipSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line); if (abort) exit(code); } } __global__ void Ht1_1() { int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; if ((y >= ROW_H1) || (x >= COL_Ht1)) { return; } d_Ht1[y][x] = d_H1[y][x + COL * 27 * 5]; }; __global__ void Ht1_2() { int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; if ((y >= ROW_H1) || (x >= COL_Ht1)) { return; } d_Ht1[y][x] = d_H1[y][x]; }; __global__ void Ht1_3(int count, double newVar) { int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; if ((y >= ROW_H1) || (x >= COL_Ht1)) { return; } double var = pow(2.0,count -5) ; double var_p_1 = var * 2.0; d_Ht1[y][x] = d_H1[y][x + COL * 27 * count] + (d_H1[y][x + COL * 27 * (count + 1)] - d_H1[y][x + COL * 27 * count]) / (var_p_1 - var) * (newVar - var); }; __global__ void Ht2_1() { int margin = 2; int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; if ((y >= ROW - 2 * margin) || (x >= 3 * 64 * (COL - 2 * margin))) { return; } d_Ht2[y][x] = d_H2[y][x + (COL - 2 * margin) * 3 * 64 * 5]; }; __global__ void Ht2_2() { int margin = 2; int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; if ((y >= ROW - 2 * margin) || (x >= 3 * 64 * (COL - 2 * margin))) { return; } d_Ht2[y][x] = d_H2[y][x]; }; __global__ void Ht2_3(int count, double newVar) { int margin = 2; int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; if ((y >= ROW - 2 * margin) || (x >= 3 * 64 * (COL - 2 * margin))) { return; } double var = pow(2.0,count -5) ; double var_p_1 = var * 2.0; d_Ht2[y][x] = d_H2[y][x + (COL - 2 * margin) * 3 * 64 * count] + (d_H2[y][x + (COL - 2 * margin) * 3 * 64 * (count + 1)] - d_H2[y][x + (COL - 2 * margin) * 3 * 64 * count]) / (var_p_1 - var) * (newVar - var); }; __global__ void Ht3_1() { int margin = 2; int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; if ((y >= ROW - 2 * margin) || (x >= 3 * 64 * (COL - 2 * margin))) { return; } d_Ht3[y][x] = d_H3[y][x + (COL - 2 * margin) * 3 * 64 * 5]; }; __global__ void Ht3_2() { int margin = 2; int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; if ((y >= ROW - 2 * margin) || (x >= 3 * 64 * (COL - 2 * margin))) { return; } d_Ht3[y][x] = d_H3[y][x]; }; __global__ void Ht3_3(int count, double newVar) { int margin = 2; int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; if ((y >= ROW - 2 * margin) || (x >= 3 * 64 * (COL - 2 * margin))) { return; } double var = pow(2.0,count - 10) ; double var_p_1 = var * 2.0; d_Ht3[y][x] = d_H3[y][x + (COL - 2 * margin) * 3 * 64 * count] + (d_H3[y][x + (COL - 2 * margin) * 3 * 64 * (count + 1)] - d_H3[y][x + (COL - 2 * margin) * 3 * 64 * count]) / (var_p_1 - var) * (newVar - var); }; //1000 times 1200~1300ms //http://developer.download.nvidia.com/compute/cuda/1.1-Beta/x86_website/projects/reduction/doc/reduction.pdf template<typename T> __device__ void customAdd(T* sdata,T* g_odata){ int tx = threadIdx.x; int ty = threadIdx.y; int tid = ty * blockDim.x + tx; // do reduction in shared mem if (tid < 512) { sdata[tid] += sdata[tid + 512]; } __syncthreads(); if (tid < 256) { sdata[tid] += sdata[tid + 256]; } __syncthreads(); if (tid < 128) { sdata[tid] += sdata[tid + 128]; } __syncthreads(); if (tid < 64) { sdata[tid] += sdata[tid + 64]; } __syncthreads(); if (tid < 32){ sdata[tid] += sdata[tid + 32]; }__syncthreads(); if (tid < 16){ sdata[tid] += sdata[tid + 16]; }__syncthreads(); if (tid < 8){ sdata[tid] += sdata[tid + 8]; }__syncthreads(); if (tid < 4){ sdata[tid] += sdata[tid + 4]; }__syncthreads(); if (tid < 2){ sdata[tid] += sdata[tid + 2]; }__syncthreads(); if (tid < 1){ sdata[tid] += sdata[tid + 1]; }__syncthreads(); // write result for this block to global mem if (tid == 0) {atomicAdd(g_odata , sdata[tid]);} } __device__ void weightedAVG_Add(double t0, double tx2,double ty2){ __shared__ double sdata[6][TPB_X_TPB]; int tx = threadIdx.x; int ty = threadIdx.y; int tid = ty * blockDim.x + tx; int x1 = blockIdx.x*blockDim.x + threadIdx.x; int y1 = blockIdx.y*blockDim.y + threadIdx.y; double dx1=x1 - CX; double dy1=y1 - CY; sdata[0][tid]=t0; sdata[1][tid]=tx2; sdata[2][tid]=ty2; sdata[3][tid]=t0 * dx1; sdata[4][tid]=t0 * dx1 * dx1; sdata[5][tid]=t0 * dx1 * dx1 * dx1; __syncthreads(); customAdd(sdata[0],d_g); customAdd(sdata[1],d_g+21); customAdd(sdata[2],d_g+22); customAdd(sdata[3],d_g+3); customAdd(sdata[4],d_g+4); customAdd(sdata[5],d_g+5); __syncthreads(); sdata[0][tid]=t0 * dx1 * dx1 * dx1 * dx1; sdata[1][tid]=t0 * dy1; sdata[2][tid]=t0 * dy1 * dy1; sdata[3][tid]=t0 * dy1 * dy1 * dy1; sdata[4][tid]=t0 * dy1 * dy1 * dy1 * dy1; sdata[5][tid]=t0 * dx1 * dy1; __syncthreads(); customAdd(sdata[0],d_g+6); customAdd(sdata[1],d_g+7); customAdd(sdata[2],d_g+8); customAdd(sdata[3],d_g+9); customAdd(sdata[4],d_g+10); customAdd(sdata[5],d_g+11); __syncthreads(); sdata[0][tid]=t0 * dx1 * dx1 * dy1; sdata[1][tid]=t0 * dx1 * dx1 * dx1 * dy1; sdata[2][tid]=t0 * dx1 * dy1 * dy1; sdata[3][tid]=t0 * dx1 * dx1 * dy1 * dy1; sdata[4][tid]=t0 * dx1 * dy1 * dy1 * dy1; sdata[5][tid]=tx2 * dx1; __syncthreads(); customAdd(sdata[0],d_g+12); customAdd(sdata[1],d_g+13); customAdd(sdata[2],d_g+14); customAdd(sdata[3],d_g+15); customAdd(sdata[4],d_g+16); customAdd(sdata[5],d_g+17); __syncthreads(); sdata[0][tid]=tx2 * dy1; sdata[1][tid]=ty2 * dx1; sdata[2][tid]=ty2 * dy1; sdata[3][tid]=tx2 * dx1 * dx1; sdata[4][tid]=ty2 * dx1 * dy1; sdata[5][tid]=tx2 * dx1 * dy1; __syncthreads(); customAdd(sdata[0],d_g+18); customAdd(sdata[1],d_g+19); customAdd(sdata[2],d_g+20); customAdd(sdata[3],d_g+23); customAdd(sdata[4],d_g+24); customAdd(sdata[5],d_g+25); __syncthreads(); sdata[0][tid]=ty2 * dy1 * dy1; __syncthreads(); customAdd(sdata[0],d_g+26); __syncthreads(); } __global__ void weightedAVG_1() { int x1 = blockIdx.x*blockDim.x + threadIdx.x; int y1 = blockIdx.y*blockDim.y + threadIdx.y; bool condition = ((y1 >= MARGINE) && (x1 >= MARGINE) && (y1 < ROW-MARGINE) && (x1 < COL-MARGINE) && d_g_ang1[y1][x1] != -1); double t0 = 0; double tx2 = 0; double ty2 = 0; int thre = -1; if(condition){ thre = (d_g_ang1[y1][x1] + 1) * 3 * COL; t0 = d_Ht1[y1][thre + x1] * d_g_can1[y1][x1]; tx2 = d_Ht1[y1][thre + x1 + COL] * d_g_can1[y1][x1]; ty2 = d_Ht1[y1][thre + x1 + COL * 2] * d_g_can1[y1][x1]; } weightedAVG_Add(t0, tx2, ty2); }; __global__ void weightedAVG_2() { int x1 = blockIdx.x*blockDim.x + threadIdx.x; int y1 = blockIdx.y*blockDim.y + threadIdx.y; int margin = 2; double sHoGnumber[64] = sHoGNUMBER; bool condition = ((y1 >= margin) && (x1 >= margin) && (y1 < ROW-margin) && (x1 < COL-margin) && d_sHoG1[y1 - margin][x1 - margin] != -1); double t0 = 0; double tx2 = 0; double ty2 = 0; int thre = -1; for (int s = 0 ; condition && s < 64 ; s++) { if (d_sHoG1[y1 - margin][x1 - margin] == sHoGnumber[s]) { thre = s * 3 * (COL - 2 * margin); t0 = d_Ht2[y1 - margin][thre + x1 - margin] * d_g_can1[y1][x1]; tx2 = d_Ht2[y1 - margin][thre + x1 - margin + (COL - 2 * margin)] * d_g_can1[y1][x1]; ty2 = d_Ht2[y1 - margin][thre + x1 - margin + (COL - 2 * margin) * 2] * d_g_can1[y1][x1]; break; } } weightedAVG_Add(t0, tx2, ty2); }; __global__ void weightedAVG_3() { int x1 = blockIdx.x*blockDim.x + threadIdx.x; int y1 = blockIdx.y*blockDim.y + threadIdx.y; int margin = 2; double sHoGnumber[64] = sHoGNUMBER; bool condition = ((y1 >= margin) && (x1 >= margin) && (y1 < ROW-margin) && (x1 < COL-margin) && d_sHoG1[y1 - margin][x1 - margin] != -1); double t0 = 0; double tx2 = 0; double ty2 = 0; int thre = -1; for (int s = 0 ; condition && s < 64 ; s++) { if (d_sHoG1[y1 - margin][x1 - margin] == sHoGnumber[s]) { thre = s * 3 * (COL - 2 * margin); t0 = d_Ht3[y1 - margin][thre + x1 - margin] * d_g_can1[y1][x1]; tx2 = d_Ht3[y1 - margin][thre + x1 - margin + (COL - 2 * margin)] * d_g_can1[y1][x1]; ty2 = d_Ht3[y1 - margin][thre + x1 - margin + (COL - 2 * margin) * 2] * d_g_can1[y1][x1]; break; } } weightedAVG_Add(t0, tx2, ty2); }; __global__ void cuda_roberts8() { int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; if ((y >= ROW) || (x >= COL)) { return; } /* extraction of gradient information by Roberts operator */ /* with 8-directional codes and strength */ double delta_RD, delta_LD; double angle; /* angle & norm of gradient vector calculated by Roberts operator */ if(y >= ROW-1 || x >= COL-1){ d_g_ang1[y][x] = -1; d_g_nor1[y][x] = 0.0; return; } delta_RD = d_image1[y][x + 1] - d_image1[y + 1][x]; delta_LD = d_image1[y][x] - d_image1[y + 1][x + 1]; d_g_nor1[y][x] = sqrt(delta_RD * delta_RD + delta_LD * delta_LD); if (d_g_nor1[y][x] == 0.0 || delta_RD * delta_RD + delta_LD * delta_LD < NoDIRECTION * NoDIRECTION) { d_g_ang1[y][x] = -1; return; } if (abs(delta_RD) == 0.0) { if (delta_LD > 0) d_g_ang1[y][x] = 3; else if (delta_LD < 0) d_g_ang1[y][x] = 7; else d_g_ang1[y][x] = -1; return; } angle = atan2(delta_LD, delta_RD); if ( angle * 8.0 > 7.0 * PI) {d_g_ang1[y][x] = 5; return;} if (angle * 8.0 > 5.0 * PI) {d_g_ang1[y][x] = 4; return;} if (angle * 8.0 > 3.0 * PI) {d_g_ang1[y][x] = 3; return;} if (angle * 8.0 > 1.0 * PI) {d_g_ang1[y][x] = 2; return;} if (angle * 8.0 > -1.0 * PI) {d_g_ang1[y][x] = 1; return;} if (angle * 8.0 > -3.0 * PI) {d_g_ang1[y][x] = 0; return;} if (angle * 8.0 > -5.0 * PI) {d_g_ang1[y][x] = 7; return;} if (angle * 8.0 > -7.0 * PI) {d_g_ang1[y][x] = 6; return;} d_g_ang1[y][x] = 5; } /* d_cuda_defcan_vars[0]: mean d_cuda_defcan_vars[1]: norm d_cuda_defcan_vars[2]: npo */ __device__ double d_cuda_defcan_vars[3]; __global__ void cuda_defcan1() { int tx = threadIdx.x; int ty = threadIdx.y; int tid = ty * blockDim.x + tx; int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; if ((y >= ROW) || (x >= COL)) { return; } /* definite canonicalization */ int margine = CANMARGIN / 2; int condition = ((x>=margine && y>=margine) && (x<COL-margine)&&(y<ROW-margine) && d_image1[y][x]!=WHITE); double this_pixel = condition*(double)d_image1[y][x]; __shared__ double sdata[3][TPB_X_TPB]; sdata[0][tid] = this_pixel; sdata[1][tid] = this_pixel*this_pixel; sdata[2][tid] = condition; __syncthreads(); customAdd(sdata[0],d_cuda_defcan_vars); customAdd(sdata[1],d_cuda_defcan_vars+1); customAdd(sdata[2],d_cuda_defcan_vars+2); } __global__ void cuda_defcan2() { int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; if ((y >= ROW) || (x >= COL)) { return; } /* s_vars[0]: mean s_vars[1]: norm */ __shared__ double s_vars[2]; if(threadIdx.x == 0 && threadIdx.y == 0){ double npo = d_cuda_defcan_vars[2]; double mean = d_cuda_defcan_vars[0]/ (double)npo; double norm = d_cuda_defcan_vars[1] - (double)npo * mean * mean; if (norm == 0.0) norm = 1.0; s_vars[0] = mean; s_vars[1] = norm; } __syncthreads(); int condition = ((x<COL-CANMARGIN)&&(y<ROW-CANMARGIN) && d_image1[y][x]!=WHITE); double ratio = 1.0 / sqrt(s_vars[1]); d_g_can1[y][x] = condition * ratio * ((double)d_image1[y][x] - s_vars[0]); } void* d_image1_ptr; void* d_image2_ptr; void* d_H1_ptr;void* d_Ht1_ptr; void* d_H2_ptr;void* d_Ht2_ptr; void* d_H3_ptr;void* d_Ht3_ptr; void* d_g_ptr; void* d_g_can1_ptr;void* d_g_nor1_ptr;void* d_g_ang1_ptr;void* d_g_ang2_ptr;void* d_sHoG1_ptr;void* d_sHoG2_ptr; void* d_cuda_defcan_vars_ptr; void* d_gk_ptr;void* d_gwt_ptr;void* d_g_can2_ptr; void* d_new_cor_ptr; void* d_gpt_ptr; void* d_D1_ptr;void* d_D2_ptr; void* d_ndis_ptr; void* d_coor_ptr; void* d_cuda_global_fsHoGpat_count_ptr; void* d_cuda_global_fsHoGpat_dnn_ptr; void* d_cuda_global_fwinpat_count_ptr; void* d_cuda_global_fwinpat_dnn_ptr; double g[G_NUM]; int procImg_No = 1; dim3 numBlock; dim3 numThread; void cuda_init_parameter(){ numBlock.x = iDivUp(COL, TPB); numBlock.y = iDivUp(ROW, TPB); numThread.x = TPB; numThread.y = TPB; gpuErrchk( hipGetSymbolAddress(&d_image1_ptr,d_image1)); gpuErrchk( hipGetSymbolAddress(&d_image2_ptr,d_image2)); gpuErrchk( hipGetSymbolAddress(&d_D1_ptr,d_D1)); gpuErrchk( hipGetSymbolAddress(&d_D2_ptr,d_D2)); gpuErrchk( hipGetSymbolAddress(&d_ndis_ptr,d_ndis)); gpuErrchk( hipGetSymbolAddress(&d_coor_ptr,d_coor)); gpuErrchk( hipGetSymbolAddress(&d_cuda_global_fsHoGpat_count_ptr,d_cuda_global_fsHoGpat_count)); gpuErrchk( hipGetSymbolAddress(&d_cuda_global_fsHoGpat_dnn_ptr,d_cuda_global_fsHoGpat_dnn)); gpuErrchk( hipGetSymbolAddress(&d_cuda_global_fwinpat_count_ptr,d_cuda_global_fwinpat_count)); gpuErrchk( hipGetSymbolAddress(&d_cuda_global_fwinpat_dnn_ptr,d_cuda_global_fwinpat_dnn)); gpuErrchk( hipGetSymbolAddress(&d_H1_ptr,d_H1)); gpuErrchk( hipGetSymbolAddress(&d_Ht1_ptr,d_Ht1)); gpuErrchk( hipGetSymbolAddress(&d_H2_ptr,d_H2)); gpuErrchk( hipGetSymbolAddress(&d_Ht2_ptr,d_Ht2)); gpuErrchk( hipGetSymbolAddress(&d_H3_ptr,d_H3)); gpuErrchk( hipGetSymbolAddress(&d_Ht3_ptr,d_Ht3)); gpuErrchk( hipGetSymbolAddress(&d_g_ptr,d_g)); gpuErrchk( hipGetSymbolAddress(&d_sHoG1_ptr,d_sHoG1)); gpuErrchk( hipGetSymbolAddress(&d_sHoG2_ptr,d_sHoG2)); gpuErrchk( hipGetSymbolAddress(&d_g_can1_ptr,d_g_can1)); gpuErrchk( hipGetSymbolAddress(&d_g_nor1_ptr,d_g_nor1)); gpuErrchk( hipGetSymbolAddress(&d_g_ang1_ptr,d_g_ang1)); gpuErrchk( hipGetSymbolAddress(&d_g_ang2_ptr,d_g_ang2)); gpuErrchk( hipGetSymbolAddress(&d_cuda_defcan_vars_ptr,d_cuda_defcan_vars)); gpuErrchk( hipGetSymbolAddress(&d_gk_ptr,d_gk)); gpuErrchk( hipGetSymbolAddress(&d_gwt_ptr,d_gwt)); gpuErrchk( hipGetSymbolAddress(&d_g_can2_ptr,d_g_can2)); gpuErrchk( hipGetSymbolAddress(&d_new_cor_ptr, d_new_cor) ); gpuErrchk( hipGetSymbolAddress(&d_gpt_ptr, d_gpt) ); gpuErrchk( hipDeviceSynchronize() ); gpuErrchk( hipDeviceSynchronize() ); // Checks for execution error gpuErrchk( hipPeekAtLastError() ); // Checks for launch error } void copy_initial_parameters(double gk[ROW][COL],double g_can2[ROW][COL],int g_ang2[ROW][COL],double H1[ROW_H1][COL_H1],double H2[ROW_H2][COL_H2],double H3[ROW_H3][COL_H3], double D1[ROW][COL * 8], double D2[ROW - 4][(COL - 4) * 64],char sHoG2[ROW - 4][COL - 4],double ndis[(2 * ROW - 1) * (2 * COL - 1)], int coor[(2 * ROW - 1) * (2 * COL - 1)][2]){ hipMemcpy(d_gk_ptr, gk, ROW * COL * sizeof(double), hipMemcpyHostToDevice); hipMemcpy(d_g_can2_ptr, g_can2, ROW * COL * sizeof(double), hipMemcpyHostToDevice); hipMemcpy(d_g_ang2_ptr, g_ang2, ROW * COL * sizeof(int), hipMemcpyHostToDevice); hipMemcpy(d_H1_ptr, H1, ROW_H1*COL_H1*sizeof(double), hipMemcpyHostToDevice); hipMemcpy(d_H2_ptr, H2, ROW_H2*COL_H2*sizeof(double), hipMemcpyHostToDevice); hipMemcpy(d_H3_ptr, H3, ROW_H3*COL_H3*sizeof(double), hipMemcpyHostToDevice); hipMemcpy(d_D1_ptr, D1, ROW*(COL * 8)*sizeof(double), hipMemcpyHostToDevice); hipMemcpy(d_D2_ptr, D2, (ROW - 4)*((COL - 4) * 64)*sizeof(double), hipMemcpyHostToDevice); hipMemcpy(d_sHoG2_ptr, sHoG2, (ROW - 4)*(COL - 4)*sizeof(char), hipMemcpyHostToDevice); hipMemcpy(d_ndis_ptr, ndis, (2 * ROW - 1) * (2 * COL - 1)*sizeof(double), hipMemcpyHostToDevice); hipMemcpy(d_coor_ptr, coor, (2 * ROW - 1) * (2 * COL - 1)*2*sizeof(int), hipMemcpyHostToDevice); } __global__ void cuda_calc_gwt(double var){ int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; if ((y >= ROW) || (x >= COL)) { return; } d_gwt[y][x] = pow(d_gk[y][x], 1.0 / var); } void calc_gwt(double var,double gwt[ROW][COL]){ numBlock.x = iDivUp(COL, TPB); numBlock.y = iDivUp(ROW, TPB); hipLaunchKernelGGL(( cuda_calc_gwt), dim3(numBlock), dim3(numThread), 0, 0, var); hipMemcpy(gwt, d_gwt_ptr, ROW*COL*sizeof(double), hipMemcpyDeviceToHost); } __global__ void cuda_calc_new_cor1() { int tx = threadIdx.x; int ty = threadIdx.y; int tid = ty * blockDim.x + tx; int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; if ((y >= ROW) || (x >= COL)) { return; } __shared__ double sdata[TPB_X_TPB]; sdata[tid] = d_g_can1[y][x]*d_g_can2[y][x]; __syncthreads(); customAdd(sdata,&d_new_cor); } double calc_new_cor1(){ hipMemset(d_new_cor_ptr,0,sizeof(double)); numBlock.x = iDivUp(COL, TPB); numBlock.y = iDivUp(ROW, TPB); hipLaunchKernelGGL(( cuda_calc_new_cor1), dim3(numBlock), dim3(numThread), 0, 0, ); double new_cor; hipMemcpy(&new_cor, d_new_cor_ptr, sizeof(double), hipMemcpyDeviceToHost); return new_cor; } __global__ void test(){ // int x = blockIdx.x*blockDim.x + threadIdx.x; // int y = blockIdx.y*blockDim.y + threadIdx.y; // if ((y >= ROW - 2 * margin) || (x >= 3 * 64 * (COL - 2 * margin))) { // return; // } } void cuda_procImg(double g_can[ROW][COL], int g_ang[ROW][COL], double g_nor[ROW][COL], char g_HoG[ROW][COL][8], char sHoG[ROW - 4][COL - 4], unsigned char image1[MAX_IMAGESIZE][MAX_IMAGESIZE]){ hipMemset(d_cuda_defcan_vars_ptr, 0, 3 * sizeof(double)); if(procImg_No <= 2) hipMemcpy(d_image1_ptr, image1, MAX_IMAGESIZE*MAX_IMAGESIZE*sizeof(unsigned char), hipMemcpyHostToDevice); numBlock.x = iDivUp(COL, TPB); numBlock.y = iDivUp(ROW, TPB); hipLaunchKernelGGL(( cuda_defcan1), dim3(numBlock), dim3(numThread), 0, 0, ); hipLaunchKernelGGL(( cuda_defcan2), dim3(numBlock), dim3(numThread), 0, 0, ); hipLaunchKernelGGL(( cuda_roberts8), dim3(numBlock), dim3(numThread), 0, 0, ); hipMemcpy(g_can, d_g_can1_ptr, ROW*COL*sizeof(double), hipMemcpyDeviceToHost); hipMemcpy(g_ang, d_g_ang1_ptr, ROW*COL*sizeof(int), hipMemcpyDeviceToHost); hipMemcpy(g_nor, d_g_nor1_ptr, ROW*COL*sizeof(double), hipMemcpyDeviceToHost); procImg_No = procImg_No+1; } void cuda_calc_defcan1(double g_can1[ROW][COL], unsigned char image1[MAX_IMAGESIZE][MAX_IMAGESIZE]){ // hipMemset(d_cuda_defcan_vars_ptr, 0, 3 * sizeof(double)); // // hipMemcpy(d_image1_ptr, image1, MAX_IMAGESIZE*MAX_IMAGESIZE*sizeof(unsigned char), hipMemcpyHostToDevice); // numBlock.x = iDivUp(COL, TPB); // numBlock.y = iDivUp(ROW, TPB); // cuda_defcan1<<<numBlock, numThread>>>(); // cuda_defcan2<<<numBlock, numThread>>>(); // cuda_roberts8<<<numBlock, numThread>>>(); // hipMemcpy(g_can1, d_g_can1_ptr, ROW*COL*sizeof(double), hipMemcpyDeviceToHost); } int needH = 1; void cuda_update_parameter(char sHoG1[ROW - 4][COL - 4]){ hipMemcpy(d_sHoG1_ptr, sHoG1, (ROW - 4)*(COL-4)*sizeof(char), hipMemcpyHostToDevice); // hipMemcpy(d_g_ang1_ptr, g_ang1, (ROW )*(COL)*sizeof(int), hipMemcpyHostToDevice); // hipMemcpy(d_g_can1_ptr, g_can1, (ROW )*(COL)*sizeof(double), hipMemcpyHostToDevice); } void cuda_update_image(unsigned char image1[MAX_IMAGESIZE][MAX_IMAGESIZE]){ hipMemcpy(d_image1_ptr, image1, MAX_IMAGESIZE*MAX_IMAGESIZE*sizeof(unsigned char), hipMemcpyHostToDevice); } void cuda_Ht(double newVar,int H_No){ if(H_No == 1){ numBlock.x = iDivUp(COL_Ht1, TPB); numBlock.y = iDivUp(ROW_H1, TPB); if (newVar > 1.0) { hipLaunchKernelGGL(( Ht1_1), dim3(numBlock), dim3(numThread), 0, 0, ); }else if (newVar < 1.0 / 32.0) { hipLaunchKernelGGL(( Ht1_2), dim3(numBlock), dim3(numThread), 0, 0, ); } else { int count = floor(log2(newVar)) + 5; hipLaunchKernelGGL(( Ht1_3), dim3(numBlock), dim3(numThread), 0, 0, count, newVar); } } else if(H_No == 2){ numBlock.x = iDivUp(COL_Ht2, TPB); numBlock.y = iDivUp(ROW_H2, TPB); if (newVar > 1.0) { hipLaunchKernelGGL(( Ht2_1), dim3(numBlock), dim3(numThread), 0, 0, ); }else if (newVar < 1.0 / 32.0) { hipLaunchKernelGGL(( Ht2_2), dim3(numBlock), dim3(numThread), 0, 0, ); } else { int count = floor(log2(newVar)) + 5; hipLaunchKernelGGL(( Ht2_3), dim3(numBlock), dim3(numThread), 0, 0, count, newVar); } } else if(H_No == 3){ numBlock.x = iDivUp(COL_Ht3, TPB); numBlock.y = iDivUp(ROW_H3, TPB); double var[6] = VARTABLE2; if (newVar > var[5]) { hipLaunchKernelGGL(( Ht3_1), dim3(numBlock), dim3(numThread), 0, 0, ); }else if (newVar < var[0]) { hipLaunchKernelGGL(( Ht3_2), dim3(numBlock), dim3(numThread), 0, 0, ); } else { int count = floor(log2(newVar)) + 10; hipLaunchKernelGGL(( Ht3_3), dim3(numBlock), dim3(numThread), 0, 0, count, newVar); } } } double* cuda_calc_g(int calc_g_type){ // cout<<"calc_g_type: "<<calc_g_type<<endl; hipMemset(d_g_ptr, 0, G_NUM * sizeof(double)); numBlock.x = iDivUp(COL, TPB); numBlock.y = iDivUp(ROW, TPB); if(calc_g_type == 1){ hipLaunchKernelGGL(( weightedAVG_1), dim3(numBlock), dim3(numThread), 0, 0, ); } else if(calc_g_type == 2){ hipLaunchKernelGGL(( weightedAVG_2), dim3(numBlock), dim3(numThread), 0, 0, ); } else if(calc_g_type == 3){ hipLaunchKernelGGL(( weightedAVG_3), dim3(numBlock), dim3(numThread), 0, 0, ); } hipMemcpy(g, d_g_ptr, G_NUM*sizeof(double), hipMemcpyDeviceToHost); return g; } __device__ void cuda_multiplyVect3x3(double inMat[3][3], double inVect[3], double outVect[3]) { int i, j; double sum; for(i = 0 ; i < 3 ; ++i) { sum = 0.0; for(j = 0 ; j < 3 ; ++j) { sum += inMat[i][j] * inVect[j]; } outVect[i] = sum; } } __global__ void cuda_calc_bilinear_normal_inverse_projection(int x_size1, int y_size1, int x_size2, int y_size2){ int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; if ((y >= y_size1) || (x >= x_size1)) { return; } int cx, cy, cx2, cy2; if (y_size1 == ROW) { cx = CX, cy = CY; cx2 = CX2, cy2 = CY2; } else { cx = CX2, cy = CY2; cx2 = CX, cy2 = CY; } double inVect[3], outVect[3]; double x_new, y_new, x_frac, y_frac; double gray_new; int m, n; inVect[2] = 1.0; inVect[1] = y - cy; inVect[0] = x - cx; int i, j; double sum; for(i = 0 ; i < 3 ; ++i) { sum = 0.0; for(j = 0 ; j < 3 ; ++j) { sum += d_gpt[i][j] * inVect[j]; } outVect[i] = sum; } x_new = outVect[0] / outVect[2] + cx2; y_new = outVect[1] / outVect[2] + cy2; m = (int)floor(x_new); n = (int)floor(y_new); x_frac = x_new - m; y_frac = y_new - n; if (m >= 0 && m+1 < x_size2 && n >= 0 && n+1 < y_size2) { gray_new = (1.0 - y_frac) * ((1.0 - x_frac) * d_image2[n][m] + x_frac * d_image2[n][m+1]) + y_frac * ((1.0 - x_frac) * d_image2[n+1][m] + x_frac * d_image2[n+1][m+1]); d_image1[y][x] = (unsigned char)gray_new; } else { #ifdef BACKGBLACK d_image1[y][x] = BLACK; #else d_image1[y][x] = WHITE; #endif } } void cuda_bilinear_normal_inverse_projection(double gpt[3][3], int x_size1, int y_size1, int x_size2, int y_size2, unsigned char image1[MAX_IMAGESIZE][MAX_IMAGESIZE], unsigned char image2[MAX_IMAGESIZE][MAX_IMAGESIZE]) { /* inverse projection transformation of the image by bilinear interpolation */ numBlock.x = iDivUp(x_size1, TPB); numBlock.y = iDivUp(y_size1, TPB); hipMemcpy(d_image2_ptr,image1,MAX_IMAGESIZE*MAX_IMAGESIZE*sizeof(unsigned char),hipMemcpyHostToDevice); hipMemcpy(d_gpt_ptr,gpt,3*3*sizeof(double),hipMemcpyHostToDevice); hipLaunchKernelGGL(( cuda_calc_bilinear_normal_inverse_projection), dim3(numBlock), dim3(numThread), 0, 0, x_size1, y_size1, x_size2, y_size2); hipMemcpy(image2, d_image1_ptr, MAX_IMAGESIZE*MAX_IMAGESIZE*sizeof(unsigned char), hipMemcpyDeviceToHost); } __global__ void cuda_global_fsHoGpat() { int tx = threadIdx.x; int ty = threadIdx.y; int tid = ty * blockDim.x + tx; int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; // if ((y >= ROW) || (x >= COL)) { // return; // } double sHoGnumber[64] = sHoGNUMBER; int margin = 2; double minInit = sqrt((double)((ROW - 2 * margin) * (ROW - 2 * margin) + (COL - 2 * margin) * (COL - 2 * margin))); bool condition1 = ((y >= margin) && (x >= margin) && (y < ROW-margin) && (x < COL-margin) && d_sHoG1[y - margin][x - margin] != -1); bool condition2 = ((y >= margin) && (x >= margin) && (y < ROW-margin) && (x < COL-margin) && d_sHoG2[y - margin][x - margin] != -1); int angcode = 0; for (int s = 0 ; condition1 && s < 64 ; s++) { if (d_sHoG1[y - margin][x - margin] == sHoGnumber[s]) { angcode = s; break; } } __shared__ int sdata_int[2][TPB_X_TPB]; __shared__ double sdata_double[2][TPB_X_TPB]; sdata_int[0][tid]=condition1; sdata_int[1][tid]=condition2; double min_1 = condition1*minInit; double delta_1 = 0; if(condition1) delta_1 = d_D2[y - margin][x - margin + (COL - 2 * margin) * angcode]; min_1 = min(min_1,delta_1); double delta_2 = condition2; double min_2 = condition2*minInit; for (int y1 = 0 ; condition2 && y1 < TRUNC ; y1++) { if (y + d_coor[y1][0] < margin || y + d_coor[y1][0] >= ROW - margin || x + d_coor[y1][1] < margin || x + d_coor[y1][1] >= COL - margin ) continue; if (d_sHoG1[y + d_coor[y1][0] - margin][x + d_coor[y1][1] - margin] != d_sHoG2[y - margin][x - margin]) continue; // if (ndis[y1] > minInit) break; delta_2 = d_ndis[y1]; // printf("y1 = %d nn1 = %f \n", y1, ndis[y1]); if (delta_2 < min_2) min_2 = delta_2; // printf("%d %d %d d_ndis[y1]:%.5f\n",x,y,y1,d_ndis[y1]); // printf("y1 = %d\n", y1); break; } sdata_double[0][tid] = min_1; sdata_double[1][tid] = min_2; __syncthreads(); customAdd(sdata_int[0],d_cuda_global_fsHoGpat_count); customAdd(sdata_int[1],d_cuda_global_fsHoGpat_count+1); customAdd(sdata_double[0],d_cuda_global_fsHoGpat_dnn); customAdd(sdata_double[1],d_cuda_global_fsHoGpat_dnn+1); } double cuda_fsHoGpat(char sHoG1[ROW - 4][COL - 4]){ numBlock.x = iDivUp(COL, TPB); numBlock.y = iDivUp(ROW, TPB); hipMemset(d_cuda_global_fsHoGpat_dnn_ptr,0,2*sizeof(double)); hipMemset(d_cuda_global_fsHoGpat_count_ptr,0,2*sizeof(int)); hipMemcpy(d_sHoG1_ptr,sHoG1,(ROW - 4)*(COL - 4)*sizeof(char),hipMemcpyHostToDevice); hipLaunchKernelGGL(( cuda_global_fsHoGpat), dim3(numBlock), dim3(numThread), 0, 0, ); int cuda_global_fsHoGpat_count[2]; double cuda_global_fsHoGpat_dnn[2]; hipMemcpy(cuda_global_fsHoGpat_count, d_cuda_global_fsHoGpat_count_ptr, 2*sizeof(int), hipMemcpyDeviceToHost); hipMemcpy(cuda_global_fsHoGpat_dnn, d_cuda_global_fsHoGpat_dnn_ptr, 2*sizeof(double), hipMemcpyDeviceToHost); double dnn1 = cuda_global_fsHoGpat_dnn[0] / cuda_global_fsHoGpat_count[0]; double dnn2 = cuda_global_fsHoGpat_dnn[1] / cuda_global_fsHoGpat_count[1]; return (dnn1 + dnn2)/2.0; } __global__ void cuda_global_fwinpat() { int tx = threadIdx.x; int ty = threadIdx.y; int tid = ty * blockDim.x + tx; int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; // if ((y >= ROW) || (x >= COL)) { // return; // } // if(x==30 && y==60){ // printf("%d %d\n", d_g_ang1[y][x],d_g_ang2[y][x]); // } int margine = 4; double minInit = sqrt((double)((ROW - 2 * margine) * (ROW - 2 * margine) + (COL - 2 * margine) * (COL - 2 * margine))); bool condition1 = ((y >= MARGINE) && (x >= MARGINE) && (y < ROW-MARGINE) && (x < COL-MARGINE) && d_g_ang1[y - MARGINE][x - MARGINE] != -1); bool condition2 = ((y >= MARGINE) && (x >= MARGINE) && (y < ROW-MARGINE) && (x < COL-MARGINE) && d_g_ang2[y - MARGINE][x - MARGINE] != -1); int angcode1 = 0; if(condition1)angcode1 = d_g_ang1[y][x]; __shared__ int sdata_int[2][TPB_X_TPB]; __shared__ double sdata_double[2][TPB_X_TPB]; sdata_int[0][tid]=condition1; sdata_int[1][tid]=condition2; double min_1 = condition1*minInit; double delta_1 = 0; if(condition1) delta_1 = d_D1[y][x + COL * angcode1]; min_1 = min(min_1,delta_1); double delta_2 = condition2; double min_2 = condition2*minInit; int angcode2 = 0; if(condition2)angcode2 = d_g_ang2[y][x]; for (int y1 = 0 ; condition2 && y1 < (2 * ROW - 1) * (2 * COL - 1) ; y1++) { if (y + d_coor[y1][0] < 0 || y + d_coor[y1][0] >= ROW || x + d_coor[y1][1] < 0 || x + d_coor[y1][1] >= COL ) continue; if (d_g_ang1[y + d_coor[y1][0]][x + d_coor[y1][1]] != angcode2) continue; delta_2 = d_ndis[y1]; // printf("y1 = %d nn1 = %f \n", y1, ndis[y1]); if (delta_2 < min_2) min_2 = delta_2; break; } sdata_double[0][tid] = min_1; sdata_double[1][tid] = min_2; __syncthreads(); customAdd(sdata_int[0],d_cuda_global_fwinpat_count); customAdd(sdata_int[1],d_cuda_global_fwinpat_count+1); customAdd(sdata_double[0],d_cuda_global_fwinpat_dnn); customAdd(sdata_double[1],d_cuda_global_fwinpat_dnn+1); } double cuda_fwinpat(){ numBlock.x = iDivUp(COL, TPB); numBlock.y = iDivUp(ROW, TPB); hipMemset(d_cuda_global_fwinpat_dnn_ptr,0,2*sizeof(double)); hipMemset(d_cuda_global_fwinpat_count_ptr,0,2*sizeof(int)); // hipMemcpy(d_g_ang2_ptr,g_ang2,(ROW)*(COL)*sizeof(int),hipMemcpyHostToDevice); hipLaunchKernelGGL(( cuda_global_fwinpat), dim3(numBlock), dim3(numThread), 0, 0, ); gpuErrchk( hipDeviceSynchronize() ); gpuErrchk( hipDeviceSynchronize() ); // Checks for execution error gpuErrchk( hipPeekAtLastError() ); // Checks for launch error int cuda_global_fwinpat_count[2]; double cuda_global_fwinpat_dnn[2]; hipMemcpy(cuda_global_fwinpat_count, d_cuda_global_fwinpat_count_ptr, 2*sizeof(int), hipMemcpyDeviceToHost); hipMemcpy(cuda_global_fwinpat_dnn, d_cuda_global_fwinpat_dnn_ptr, 2*sizeof(double), hipMemcpyDeviceToHost); double dnn1 = cuda_global_fwinpat_dnn[0] / cuda_global_fwinpat_count[0]; double dnn2 = cuda_global_fwinpat_dnn[1] / cuda_global_fwinpat_count[1]; return (dnn1 + dnn2)/2.0; }
f4b8e2d74e76fe72791a454343da1ff9343bdb02.cu
// // Created by Shitian Ni on 1/18/18. // #include <stdio.h> #include <stdlib.h> #include <math.h> #include <time.h> #include<iostream> #include<string.h> #include "parameter.h" using namespace std; __device__ double d_H1[ROW_H1][COL_H1], d_Ht1[ROW_H1][COL_Ht1]; __device__ double d_H2[ROW_H2][COL_H2], d_Ht2[ROW_H2][COL_Ht2]; __device__ double d_H3[ROW_H3][COL_H3], d_Ht3[ROW_H3][COL_Ht3]; __device__ unsigned char d_image1[MAX_IMAGESIZE][MAX_IMAGESIZE]; __device__ unsigned char d_image2[MAX_IMAGESIZE][MAX_IMAGESIZE]; __device__ double d_g[G_NUM], d_g_can1[ROW][COL], d_g_nor1[ROW][COL], d_gk[ROW][COL], d_gwt[ROW][COL],d_g_can2[ROW][COL]; __device__ int d_g_ang1[ROW][COL]; __device__ int d_g_ang2[ROW][COL]; __device__ char d_sHoG1[ROW - 4][COL - 4]; __device__ char d_sHoG2[ROW - 4][COL - 4]; __device__ double d_new_cor; __device__ double d_gpt[3][3]; __device__ double d_D1[ROW][COL * 8]; __device__ double d_D2[ROW - 4][(COL - 4) * 64]; __device__ double d_ndis[(2 * ROW - 1) * (2 * COL - 1)]; __device__ int d_coor[(2 * ROW - 1) * (2 * COL - 1)][2]; __device__ int d_cuda_global_fsHoGpat_count[2]; __device__ double d_cuda_global_fsHoGpat_dnn[2]; __device__ int d_cuda_global_fwinpat_count[2]; __device__ double d_cuda_global_fwinpat_dnn[2]; int iDivUp(int hostPtr, int b){ return ((hostPtr % b) != 0) ? (hostPtr / b + 1) : (hostPtr / b); }; //https://stackoverflow.com/a/14038590 #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true) { if (code != cudaSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } __global__ void Ht1_1() { int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; if ((y >= ROW_H1) || (x >= COL_Ht1)) { return; } d_Ht1[y][x] = d_H1[y][x + COL * 27 * 5]; }; __global__ void Ht1_2() { int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; if ((y >= ROW_H1) || (x >= COL_Ht1)) { return; } d_Ht1[y][x] = d_H1[y][x]; }; __global__ void Ht1_3(int count, double newVar) { int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; if ((y >= ROW_H1) || (x >= COL_Ht1)) { return; } double var = pow(2.0,count -5) ; double var_p_1 = var * 2.0; d_Ht1[y][x] = d_H1[y][x + COL * 27 * count] + (d_H1[y][x + COL * 27 * (count + 1)] - d_H1[y][x + COL * 27 * count]) / (var_p_1 - var) * (newVar - var); }; __global__ void Ht2_1() { int margin = 2; int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; if ((y >= ROW - 2 * margin) || (x >= 3 * 64 * (COL - 2 * margin))) { return; } d_Ht2[y][x] = d_H2[y][x + (COL - 2 * margin) * 3 * 64 * 5]; }; __global__ void Ht2_2() { int margin = 2; int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; if ((y >= ROW - 2 * margin) || (x >= 3 * 64 * (COL - 2 * margin))) { return; } d_Ht2[y][x] = d_H2[y][x]; }; __global__ void Ht2_3(int count, double newVar) { int margin = 2; int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; if ((y >= ROW - 2 * margin) || (x >= 3 * 64 * (COL - 2 * margin))) { return; } double var = pow(2.0,count -5) ; double var_p_1 = var * 2.0; d_Ht2[y][x] = d_H2[y][x + (COL - 2 * margin) * 3 * 64 * count] + (d_H2[y][x + (COL - 2 * margin) * 3 * 64 * (count + 1)] - d_H2[y][x + (COL - 2 * margin) * 3 * 64 * count]) / (var_p_1 - var) * (newVar - var); }; __global__ void Ht3_1() { int margin = 2; int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; if ((y >= ROW - 2 * margin) || (x >= 3 * 64 * (COL - 2 * margin))) { return; } d_Ht3[y][x] = d_H3[y][x + (COL - 2 * margin) * 3 * 64 * 5]; }; __global__ void Ht3_2() { int margin = 2; int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; if ((y >= ROW - 2 * margin) || (x >= 3 * 64 * (COL - 2 * margin))) { return; } d_Ht3[y][x] = d_H3[y][x]; }; __global__ void Ht3_3(int count, double newVar) { int margin = 2; int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; if ((y >= ROW - 2 * margin) || (x >= 3 * 64 * (COL - 2 * margin))) { return; } double var = pow(2.0,count - 10) ; double var_p_1 = var * 2.0; d_Ht3[y][x] = d_H3[y][x + (COL - 2 * margin) * 3 * 64 * count] + (d_H3[y][x + (COL - 2 * margin) * 3 * 64 * (count + 1)] - d_H3[y][x + (COL - 2 * margin) * 3 * 64 * count]) / (var_p_1 - var) * (newVar - var); }; //1000 times 1200~1300ms //http://developer.download.nvidia.com/compute/cuda/1.1-Beta/x86_website/projects/reduction/doc/reduction.pdf template<typename T> __device__ void customAdd(T* sdata,T* g_odata){ int tx = threadIdx.x; int ty = threadIdx.y; int tid = ty * blockDim.x + tx; // do reduction in shared mem if (tid < 512) { sdata[tid] += sdata[tid + 512]; } __syncthreads(); if (tid < 256) { sdata[tid] += sdata[tid + 256]; } __syncthreads(); if (tid < 128) { sdata[tid] += sdata[tid + 128]; } __syncthreads(); if (tid < 64) { sdata[tid] += sdata[tid + 64]; } __syncthreads(); if (tid < 32){ sdata[tid] += sdata[tid + 32]; }__syncthreads(); if (tid < 16){ sdata[tid] += sdata[tid + 16]; }__syncthreads(); if (tid < 8){ sdata[tid] += sdata[tid + 8]; }__syncthreads(); if (tid < 4){ sdata[tid] += sdata[tid + 4]; }__syncthreads(); if (tid < 2){ sdata[tid] += sdata[tid + 2]; }__syncthreads(); if (tid < 1){ sdata[tid] += sdata[tid + 1]; }__syncthreads(); // write result for this block to global mem if (tid == 0) {atomicAdd(g_odata , sdata[tid]);} } __device__ void weightedAVG_Add(double t0, double tx2,double ty2){ __shared__ double sdata[6][TPB_X_TPB]; int tx = threadIdx.x; int ty = threadIdx.y; int tid = ty * blockDim.x + tx; int x1 = blockIdx.x*blockDim.x + threadIdx.x; int y1 = blockIdx.y*blockDim.y + threadIdx.y; double dx1=x1 - CX; double dy1=y1 - CY; sdata[0][tid]=t0; sdata[1][tid]=tx2; sdata[2][tid]=ty2; sdata[3][tid]=t0 * dx1; sdata[4][tid]=t0 * dx1 * dx1; sdata[5][tid]=t0 * dx1 * dx1 * dx1; __syncthreads(); customAdd(sdata[0],d_g); customAdd(sdata[1],d_g+21); customAdd(sdata[2],d_g+22); customAdd(sdata[3],d_g+3); customAdd(sdata[4],d_g+4); customAdd(sdata[5],d_g+5); __syncthreads(); sdata[0][tid]=t0 * dx1 * dx1 * dx1 * dx1; sdata[1][tid]=t0 * dy1; sdata[2][tid]=t0 * dy1 * dy1; sdata[3][tid]=t0 * dy1 * dy1 * dy1; sdata[4][tid]=t0 * dy1 * dy1 * dy1 * dy1; sdata[5][tid]=t0 * dx1 * dy1; __syncthreads(); customAdd(sdata[0],d_g+6); customAdd(sdata[1],d_g+7); customAdd(sdata[2],d_g+8); customAdd(sdata[3],d_g+9); customAdd(sdata[4],d_g+10); customAdd(sdata[5],d_g+11); __syncthreads(); sdata[0][tid]=t0 * dx1 * dx1 * dy1; sdata[1][tid]=t0 * dx1 * dx1 * dx1 * dy1; sdata[2][tid]=t0 * dx1 * dy1 * dy1; sdata[3][tid]=t0 * dx1 * dx1 * dy1 * dy1; sdata[4][tid]=t0 * dx1 * dy1 * dy1 * dy1; sdata[5][tid]=tx2 * dx1; __syncthreads(); customAdd(sdata[0],d_g+12); customAdd(sdata[1],d_g+13); customAdd(sdata[2],d_g+14); customAdd(sdata[3],d_g+15); customAdd(sdata[4],d_g+16); customAdd(sdata[5],d_g+17); __syncthreads(); sdata[0][tid]=tx2 * dy1; sdata[1][tid]=ty2 * dx1; sdata[2][tid]=ty2 * dy1; sdata[3][tid]=tx2 * dx1 * dx1; sdata[4][tid]=ty2 * dx1 * dy1; sdata[5][tid]=tx2 * dx1 * dy1; __syncthreads(); customAdd(sdata[0],d_g+18); customAdd(sdata[1],d_g+19); customAdd(sdata[2],d_g+20); customAdd(sdata[3],d_g+23); customAdd(sdata[4],d_g+24); customAdd(sdata[5],d_g+25); __syncthreads(); sdata[0][tid]=ty2 * dy1 * dy1; __syncthreads(); customAdd(sdata[0],d_g+26); __syncthreads(); } __global__ void weightedAVG_1() { int x1 = blockIdx.x*blockDim.x + threadIdx.x; int y1 = blockIdx.y*blockDim.y + threadIdx.y; bool condition = ((y1 >= MARGINE) && (x1 >= MARGINE) && (y1 < ROW-MARGINE) && (x1 < COL-MARGINE) && d_g_ang1[y1][x1] != -1); double t0 = 0; double tx2 = 0; double ty2 = 0; int thre = -1; if(condition){ thre = (d_g_ang1[y1][x1] + 1) * 3 * COL; t0 = d_Ht1[y1][thre + x1] * d_g_can1[y1][x1]; tx2 = d_Ht1[y1][thre + x1 + COL] * d_g_can1[y1][x1]; ty2 = d_Ht1[y1][thre + x1 + COL * 2] * d_g_can1[y1][x1]; } weightedAVG_Add(t0, tx2, ty2); }; __global__ void weightedAVG_2() { int x1 = blockIdx.x*blockDim.x + threadIdx.x; int y1 = blockIdx.y*blockDim.y + threadIdx.y; int margin = 2; double sHoGnumber[64] = sHoGNUMBER; bool condition = ((y1 >= margin) && (x1 >= margin) && (y1 < ROW-margin) && (x1 < COL-margin) && d_sHoG1[y1 - margin][x1 - margin] != -1); double t0 = 0; double tx2 = 0; double ty2 = 0; int thre = -1; for (int s = 0 ; condition && s < 64 ; s++) { if (d_sHoG1[y1 - margin][x1 - margin] == sHoGnumber[s]) { thre = s * 3 * (COL - 2 * margin); t0 = d_Ht2[y1 - margin][thre + x1 - margin] * d_g_can1[y1][x1]; tx2 = d_Ht2[y1 - margin][thre + x1 - margin + (COL - 2 * margin)] * d_g_can1[y1][x1]; ty2 = d_Ht2[y1 - margin][thre + x1 - margin + (COL - 2 * margin) * 2] * d_g_can1[y1][x1]; break; } } weightedAVG_Add(t0, tx2, ty2); }; __global__ void weightedAVG_3() { int x1 = blockIdx.x*blockDim.x + threadIdx.x; int y1 = blockIdx.y*blockDim.y + threadIdx.y; int margin = 2; double sHoGnumber[64] = sHoGNUMBER; bool condition = ((y1 >= margin) && (x1 >= margin) && (y1 < ROW-margin) && (x1 < COL-margin) && d_sHoG1[y1 - margin][x1 - margin] != -1); double t0 = 0; double tx2 = 0; double ty2 = 0; int thre = -1; for (int s = 0 ; condition && s < 64 ; s++) { if (d_sHoG1[y1 - margin][x1 - margin] == sHoGnumber[s]) { thre = s * 3 * (COL - 2 * margin); t0 = d_Ht3[y1 - margin][thre + x1 - margin] * d_g_can1[y1][x1]; tx2 = d_Ht3[y1 - margin][thre + x1 - margin + (COL - 2 * margin)] * d_g_can1[y1][x1]; ty2 = d_Ht3[y1 - margin][thre + x1 - margin + (COL - 2 * margin) * 2] * d_g_can1[y1][x1]; break; } } weightedAVG_Add(t0, tx2, ty2); }; __global__ void cuda_roberts8() { int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; if ((y >= ROW) || (x >= COL)) { return; } /* extraction of gradient information by Roberts operator */ /* with 8-directional codes and strength */ double delta_RD, delta_LD; double angle; /* angle & norm of gradient vector calculated by Roberts operator */ if(y >= ROW-1 || x >= COL-1){ d_g_ang1[y][x] = -1; d_g_nor1[y][x] = 0.0; return; } delta_RD = d_image1[y][x + 1] - d_image1[y + 1][x]; delta_LD = d_image1[y][x] - d_image1[y + 1][x + 1]; d_g_nor1[y][x] = sqrt(delta_RD * delta_RD + delta_LD * delta_LD); if (d_g_nor1[y][x] == 0.0 || delta_RD * delta_RD + delta_LD * delta_LD < NoDIRECTION * NoDIRECTION) { d_g_ang1[y][x] = -1; return; } if (abs(delta_RD) == 0.0) { if (delta_LD > 0) d_g_ang1[y][x] = 3; else if (delta_LD < 0) d_g_ang1[y][x] = 7; else d_g_ang1[y][x] = -1; return; } angle = atan2(delta_LD, delta_RD); if ( angle * 8.0 > 7.0 * PI) {d_g_ang1[y][x] = 5; return;} if (angle * 8.0 > 5.0 * PI) {d_g_ang1[y][x] = 4; return;} if (angle * 8.0 > 3.0 * PI) {d_g_ang1[y][x] = 3; return;} if (angle * 8.0 > 1.0 * PI) {d_g_ang1[y][x] = 2; return;} if (angle * 8.0 > -1.0 * PI) {d_g_ang1[y][x] = 1; return;} if (angle * 8.0 > -3.0 * PI) {d_g_ang1[y][x] = 0; return;} if (angle * 8.0 > -5.0 * PI) {d_g_ang1[y][x] = 7; return;} if (angle * 8.0 > -7.0 * PI) {d_g_ang1[y][x] = 6; return;} d_g_ang1[y][x] = 5; } /* d_cuda_defcan_vars[0]: mean d_cuda_defcan_vars[1]: norm d_cuda_defcan_vars[2]: npo */ __device__ double d_cuda_defcan_vars[3]; __global__ void cuda_defcan1() { int tx = threadIdx.x; int ty = threadIdx.y; int tid = ty * blockDim.x + tx; int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; if ((y >= ROW) || (x >= COL)) { return; } /* definite canonicalization */ int margine = CANMARGIN / 2; int condition = ((x>=margine && y>=margine) && (x<COL-margine)&&(y<ROW-margine) && d_image1[y][x]!=WHITE); double this_pixel = condition*(double)d_image1[y][x]; __shared__ double sdata[3][TPB_X_TPB]; sdata[0][tid] = this_pixel; sdata[1][tid] = this_pixel*this_pixel; sdata[2][tid] = condition; __syncthreads(); customAdd(sdata[0],d_cuda_defcan_vars); customAdd(sdata[1],d_cuda_defcan_vars+1); customAdd(sdata[2],d_cuda_defcan_vars+2); } __global__ void cuda_defcan2() { int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; if ((y >= ROW) || (x >= COL)) { return; } /* s_vars[0]: mean s_vars[1]: norm */ __shared__ double s_vars[2]; if(threadIdx.x == 0 && threadIdx.y == 0){ double npo = d_cuda_defcan_vars[2]; double mean = d_cuda_defcan_vars[0]/ (double)npo; double norm = d_cuda_defcan_vars[1] - (double)npo * mean * mean; if (norm == 0.0) norm = 1.0; s_vars[0] = mean; s_vars[1] = norm; } __syncthreads(); int condition = ((x<COL-CANMARGIN)&&(y<ROW-CANMARGIN) && d_image1[y][x]!=WHITE); double ratio = 1.0 / sqrt(s_vars[1]); d_g_can1[y][x] = condition * ratio * ((double)d_image1[y][x] - s_vars[0]); } void* d_image1_ptr; void* d_image2_ptr; void* d_H1_ptr;void* d_Ht1_ptr; void* d_H2_ptr;void* d_Ht2_ptr; void* d_H3_ptr;void* d_Ht3_ptr; void* d_g_ptr; void* d_g_can1_ptr;void* d_g_nor1_ptr;void* d_g_ang1_ptr;void* d_g_ang2_ptr;void* d_sHoG1_ptr;void* d_sHoG2_ptr; void* d_cuda_defcan_vars_ptr; void* d_gk_ptr;void* d_gwt_ptr;void* d_g_can2_ptr; void* d_new_cor_ptr; void* d_gpt_ptr; void* d_D1_ptr;void* d_D2_ptr; void* d_ndis_ptr; void* d_coor_ptr; void* d_cuda_global_fsHoGpat_count_ptr; void* d_cuda_global_fsHoGpat_dnn_ptr; void* d_cuda_global_fwinpat_count_ptr; void* d_cuda_global_fwinpat_dnn_ptr; double g[G_NUM]; int procImg_No = 1; dim3 numBlock; dim3 numThread; void cuda_init_parameter(){ numBlock.x = iDivUp(COL, TPB); numBlock.y = iDivUp(ROW, TPB); numThread.x = TPB; numThread.y = TPB; gpuErrchk( cudaGetSymbolAddress(&d_image1_ptr,d_image1)); gpuErrchk( cudaGetSymbolAddress(&d_image2_ptr,d_image2)); gpuErrchk( cudaGetSymbolAddress(&d_D1_ptr,d_D1)); gpuErrchk( cudaGetSymbolAddress(&d_D2_ptr,d_D2)); gpuErrchk( cudaGetSymbolAddress(&d_ndis_ptr,d_ndis)); gpuErrchk( cudaGetSymbolAddress(&d_coor_ptr,d_coor)); gpuErrchk( cudaGetSymbolAddress(&d_cuda_global_fsHoGpat_count_ptr,d_cuda_global_fsHoGpat_count)); gpuErrchk( cudaGetSymbolAddress(&d_cuda_global_fsHoGpat_dnn_ptr,d_cuda_global_fsHoGpat_dnn)); gpuErrchk( cudaGetSymbolAddress(&d_cuda_global_fwinpat_count_ptr,d_cuda_global_fwinpat_count)); gpuErrchk( cudaGetSymbolAddress(&d_cuda_global_fwinpat_dnn_ptr,d_cuda_global_fwinpat_dnn)); gpuErrchk( cudaGetSymbolAddress(&d_H1_ptr,d_H1)); gpuErrchk( cudaGetSymbolAddress(&d_Ht1_ptr,d_Ht1)); gpuErrchk( cudaGetSymbolAddress(&d_H2_ptr,d_H2)); gpuErrchk( cudaGetSymbolAddress(&d_Ht2_ptr,d_Ht2)); gpuErrchk( cudaGetSymbolAddress(&d_H3_ptr,d_H3)); gpuErrchk( cudaGetSymbolAddress(&d_Ht3_ptr,d_Ht3)); gpuErrchk( cudaGetSymbolAddress(&d_g_ptr,d_g)); gpuErrchk( cudaGetSymbolAddress(&d_sHoG1_ptr,d_sHoG1)); gpuErrchk( cudaGetSymbolAddress(&d_sHoG2_ptr,d_sHoG2)); gpuErrchk( cudaGetSymbolAddress(&d_g_can1_ptr,d_g_can1)); gpuErrchk( cudaGetSymbolAddress(&d_g_nor1_ptr,d_g_nor1)); gpuErrchk( cudaGetSymbolAddress(&d_g_ang1_ptr,d_g_ang1)); gpuErrchk( cudaGetSymbolAddress(&d_g_ang2_ptr,d_g_ang2)); gpuErrchk( cudaGetSymbolAddress(&d_cuda_defcan_vars_ptr,d_cuda_defcan_vars)); gpuErrchk( cudaGetSymbolAddress(&d_gk_ptr,d_gk)); gpuErrchk( cudaGetSymbolAddress(&d_gwt_ptr,d_gwt)); gpuErrchk( cudaGetSymbolAddress(&d_g_can2_ptr,d_g_can2)); gpuErrchk( cudaGetSymbolAddress(&d_new_cor_ptr, d_new_cor) ); gpuErrchk( cudaGetSymbolAddress(&d_gpt_ptr, d_gpt) ); gpuErrchk( cudaDeviceSynchronize() ); gpuErrchk( cudaThreadSynchronize() ); // Checks for execution error gpuErrchk( cudaPeekAtLastError() ); // Checks for launch error } void copy_initial_parameters(double gk[ROW][COL],double g_can2[ROW][COL],int g_ang2[ROW][COL],double H1[ROW_H1][COL_H1],double H2[ROW_H2][COL_H2],double H3[ROW_H3][COL_H3], double D1[ROW][COL * 8], double D2[ROW - 4][(COL - 4) * 64],char sHoG2[ROW - 4][COL - 4],double ndis[(2 * ROW - 1) * (2 * COL - 1)], int coor[(2 * ROW - 1) * (2 * COL - 1)][2]){ cudaMemcpy(d_gk_ptr, gk, ROW * COL * sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(d_g_can2_ptr, g_can2, ROW * COL * sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(d_g_ang2_ptr, g_ang2, ROW * COL * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_H1_ptr, H1, ROW_H1*COL_H1*sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(d_H2_ptr, H2, ROW_H2*COL_H2*sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(d_H3_ptr, H3, ROW_H3*COL_H3*sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(d_D1_ptr, D1, ROW*(COL * 8)*sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(d_D2_ptr, D2, (ROW - 4)*((COL - 4) * 64)*sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(d_sHoG2_ptr, sHoG2, (ROW - 4)*(COL - 4)*sizeof(char), cudaMemcpyHostToDevice); cudaMemcpy(d_ndis_ptr, ndis, (2 * ROW - 1) * (2 * COL - 1)*sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(d_coor_ptr, coor, (2 * ROW - 1) * (2 * COL - 1)*2*sizeof(int), cudaMemcpyHostToDevice); } __global__ void cuda_calc_gwt(double var){ int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; if ((y >= ROW) || (x >= COL)) { return; } d_gwt[y][x] = pow(d_gk[y][x], 1.0 / var); } void calc_gwt(double var,double gwt[ROW][COL]){ numBlock.x = iDivUp(COL, TPB); numBlock.y = iDivUp(ROW, TPB); cuda_calc_gwt<<<numBlock, numThread>>>(var); cudaMemcpy(gwt, d_gwt_ptr, ROW*COL*sizeof(double), cudaMemcpyDeviceToHost); } __global__ void cuda_calc_new_cor1() { int tx = threadIdx.x; int ty = threadIdx.y; int tid = ty * blockDim.x + tx; int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; if ((y >= ROW) || (x >= COL)) { return; } __shared__ double sdata[TPB_X_TPB]; sdata[tid] = d_g_can1[y][x]*d_g_can2[y][x]; __syncthreads(); customAdd(sdata,&d_new_cor); } double calc_new_cor1(){ cudaMemset(d_new_cor_ptr,0,sizeof(double)); numBlock.x = iDivUp(COL, TPB); numBlock.y = iDivUp(ROW, TPB); cuda_calc_new_cor1<<<numBlock, numThread>>>(); double new_cor; cudaMemcpy(&new_cor, d_new_cor_ptr, sizeof(double), cudaMemcpyDeviceToHost); return new_cor; } __global__ void test(){ // int x = blockIdx.x*blockDim.x + threadIdx.x; // int y = blockIdx.y*blockDim.y + threadIdx.y; // if ((y >= ROW - 2 * margin) || (x >= 3 * 64 * (COL - 2 * margin))) { // return; // } } void cuda_procImg(double g_can[ROW][COL], int g_ang[ROW][COL], double g_nor[ROW][COL], char g_HoG[ROW][COL][8], char sHoG[ROW - 4][COL - 4], unsigned char image1[MAX_IMAGESIZE][MAX_IMAGESIZE]){ cudaMemset(d_cuda_defcan_vars_ptr, 0, 3 * sizeof(double)); if(procImg_No <= 2) cudaMemcpy(d_image1_ptr, image1, MAX_IMAGESIZE*MAX_IMAGESIZE*sizeof(unsigned char), cudaMemcpyHostToDevice); numBlock.x = iDivUp(COL, TPB); numBlock.y = iDivUp(ROW, TPB); cuda_defcan1<<<numBlock, numThread>>>(); cuda_defcan2<<<numBlock, numThread>>>(); cuda_roberts8<<<numBlock, numThread>>>(); cudaMemcpy(g_can, d_g_can1_ptr, ROW*COL*sizeof(double), cudaMemcpyDeviceToHost); cudaMemcpy(g_ang, d_g_ang1_ptr, ROW*COL*sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(g_nor, d_g_nor1_ptr, ROW*COL*sizeof(double), cudaMemcpyDeviceToHost); procImg_No = procImg_No+1; } void cuda_calc_defcan1(double g_can1[ROW][COL], unsigned char image1[MAX_IMAGESIZE][MAX_IMAGESIZE]){ // cudaMemset(d_cuda_defcan_vars_ptr, 0, 3 * sizeof(double)); // // cudaMemcpy(d_image1_ptr, image1, MAX_IMAGESIZE*MAX_IMAGESIZE*sizeof(unsigned char), cudaMemcpyHostToDevice); // numBlock.x = iDivUp(COL, TPB); // numBlock.y = iDivUp(ROW, TPB); // cuda_defcan1<<<numBlock, numThread>>>(); // cuda_defcan2<<<numBlock, numThread>>>(); // cuda_roberts8<<<numBlock, numThread>>>(); // cudaMemcpy(g_can1, d_g_can1_ptr, ROW*COL*sizeof(double), cudaMemcpyDeviceToHost); } int needH = 1; void cuda_update_parameter(char sHoG1[ROW - 4][COL - 4]){ cudaMemcpy(d_sHoG1_ptr, sHoG1, (ROW - 4)*(COL-4)*sizeof(char), cudaMemcpyHostToDevice); // cudaMemcpy(d_g_ang1_ptr, g_ang1, (ROW )*(COL)*sizeof(int), cudaMemcpyHostToDevice); // cudaMemcpy(d_g_can1_ptr, g_can1, (ROW )*(COL)*sizeof(double), cudaMemcpyHostToDevice); } void cuda_update_image(unsigned char image1[MAX_IMAGESIZE][MAX_IMAGESIZE]){ cudaMemcpy(d_image1_ptr, image1, MAX_IMAGESIZE*MAX_IMAGESIZE*sizeof(unsigned char), cudaMemcpyHostToDevice); } void cuda_Ht(double newVar,int H_No){ if(H_No == 1){ numBlock.x = iDivUp(COL_Ht1, TPB); numBlock.y = iDivUp(ROW_H1, TPB); if (newVar > 1.0) { Ht1_1<<<numBlock, numThread>>>(); }else if (newVar < 1.0 / 32.0) { Ht1_2<<<numBlock, numThread>>>(); } else { int count = floor(log2(newVar)) + 5; Ht1_3<<<numBlock, numThread>>>(count, newVar); } } else if(H_No == 2){ numBlock.x = iDivUp(COL_Ht2, TPB); numBlock.y = iDivUp(ROW_H2, TPB); if (newVar > 1.0) { Ht2_1<<<numBlock, numThread>>>(); }else if (newVar < 1.0 / 32.0) { Ht2_2<<<numBlock, numThread>>>(); } else { int count = floor(log2(newVar)) + 5; Ht2_3<<<numBlock, numThread>>>(count, newVar); } } else if(H_No == 3){ numBlock.x = iDivUp(COL_Ht3, TPB); numBlock.y = iDivUp(ROW_H3, TPB); double var[6] = VARTABLE2; if (newVar > var[5]) { Ht3_1<<<numBlock, numThread>>>(); }else if (newVar < var[0]) { Ht3_2<<<numBlock, numThread>>>(); } else { int count = floor(log2(newVar)) + 10; Ht3_3<<<numBlock, numThread>>>(count, newVar); } } } double* cuda_calc_g(int calc_g_type){ // cout<<"calc_g_type: "<<calc_g_type<<endl; cudaMemset(d_g_ptr, 0, G_NUM * sizeof(double)); numBlock.x = iDivUp(COL, TPB); numBlock.y = iDivUp(ROW, TPB); if(calc_g_type == 1){ weightedAVG_1<<<numBlock, numThread>>>(); } else if(calc_g_type == 2){ weightedAVG_2<<<numBlock, numThread>>>(); } else if(calc_g_type == 3){ weightedAVG_3<<<numBlock, numThread>>>(); } cudaMemcpy(g, d_g_ptr, G_NUM*sizeof(double), cudaMemcpyDeviceToHost); return g; } __device__ void cuda_multiplyVect3x3(double inMat[3][3], double inVect[3], double outVect[3]) { int i, j; double sum; for(i = 0 ; i < 3 ; ++i) { sum = 0.0; for(j = 0 ; j < 3 ; ++j) { sum += inMat[i][j] * inVect[j]; } outVect[i] = sum; } } __global__ void cuda_calc_bilinear_normal_inverse_projection(int x_size1, int y_size1, int x_size2, int y_size2){ int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; if ((y >= y_size1) || (x >= x_size1)) { return; } int cx, cy, cx2, cy2; if (y_size1 == ROW) { cx = CX, cy = CY; cx2 = CX2, cy2 = CY2; } else { cx = CX2, cy = CY2; cx2 = CX, cy2 = CY; } double inVect[3], outVect[3]; double x_new, y_new, x_frac, y_frac; double gray_new; int m, n; inVect[2] = 1.0; inVect[1] = y - cy; inVect[0] = x - cx; int i, j; double sum; for(i = 0 ; i < 3 ; ++i) { sum = 0.0; for(j = 0 ; j < 3 ; ++j) { sum += d_gpt[i][j] * inVect[j]; } outVect[i] = sum; } x_new = outVect[0] / outVect[2] + cx2; y_new = outVect[1] / outVect[2] + cy2; m = (int)floor(x_new); n = (int)floor(y_new); x_frac = x_new - m; y_frac = y_new - n; if (m >= 0 && m+1 < x_size2 && n >= 0 && n+1 < y_size2) { gray_new = (1.0 - y_frac) * ((1.0 - x_frac) * d_image2[n][m] + x_frac * d_image2[n][m+1]) + y_frac * ((1.0 - x_frac) * d_image2[n+1][m] + x_frac * d_image2[n+1][m+1]); d_image1[y][x] = (unsigned char)gray_new; } else { #ifdef BACKGBLACK d_image1[y][x] = BLACK; #else d_image1[y][x] = WHITE; #endif } } void cuda_bilinear_normal_inverse_projection(double gpt[3][3], int x_size1, int y_size1, int x_size2, int y_size2, unsigned char image1[MAX_IMAGESIZE][MAX_IMAGESIZE], unsigned char image2[MAX_IMAGESIZE][MAX_IMAGESIZE]) { /* inverse projection transformation of the image by bilinear interpolation */ numBlock.x = iDivUp(x_size1, TPB); numBlock.y = iDivUp(y_size1, TPB); cudaMemcpy(d_image2_ptr,image1,MAX_IMAGESIZE*MAX_IMAGESIZE*sizeof(unsigned char),cudaMemcpyHostToDevice); cudaMemcpy(d_gpt_ptr,gpt,3*3*sizeof(double),cudaMemcpyHostToDevice); cuda_calc_bilinear_normal_inverse_projection<<<numBlock, numThread>>>(x_size1, y_size1, x_size2, y_size2); cudaMemcpy(image2, d_image1_ptr, MAX_IMAGESIZE*MAX_IMAGESIZE*sizeof(unsigned char), cudaMemcpyDeviceToHost); } __global__ void cuda_global_fsHoGpat() { int tx = threadIdx.x; int ty = threadIdx.y; int tid = ty * blockDim.x + tx; int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; // if ((y >= ROW) || (x >= COL)) { // return; // } double sHoGnumber[64] = sHoGNUMBER; int margin = 2; double minInit = sqrt((double)((ROW - 2 * margin) * (ROW - 2 * margin) + (COL - 2 * margin) * (COL - 2 * margin))); bool condition1 = ((y >= margin) && (x >= margin) && (y < ROW-margin) && (x < COL-margin) && d_sHoG1[y - margin][x - margin] != -1); bool condition2 = ((y >= margin) && (x >= margin) && (y < ROW-margin) && (x < COL-margin) && d_sHoG2[y - margin][x - margin] != -1); int angcode = 0; for (int s = 0 ; condition1 && s < 64 ; s++) { if (d_sHoG1[y - margin][x - margin] == sHoGnumber[s]) { angcode = s; break; } } __shared__ int sdata_int[2][TPB_X_TPB]; __shared__ double sdata_double[2][TPB_X_TPB]; sdata_int[0][tid]=condition1; sdata_int[1][tid]=condition2; double min_1 = condition1*minInit; double delta_1 = 0; if(condition1) delta_1 = d_D2[y - margin][x - margin + (COL - 2 * margin) * angcode]; min_1 = min(min_1,delta_1); double delta_2 = condition2; double min_2 = condition2*minInit; for (int y1 = 0 ; condition2 && y1 < TRUNC ; y1++) { if (y + d_coor[y1][0] < margin || y + d_coor[y1][0] >= ROW - margin || x + d_coor[y1][1] < margin || x + d_coor[y1][1] >= COL - margin ) continue; if (d_sHoG1[y + d_coor[y1][0] - margin][x + d_coor[y1][1] - margin] != d_sHoG2[y - margin][x - margin]) continue; // if (ndis[y1] > minInit) break; delta_2 = d_ndis[y1]; // printf("y1 = %d nn1 = %f \n", y1, ndis[y1]); if (delta_2 < min_2) min_2 = delta_2; // printf("%d %d %d d_ndis[y1]:%.5f\n",x,y,y1,d_ndis[y1]); // printf("y1 = %d\n", y1); break; } sdata_double[0][tid] = min_1; sdata_double[1][tid] = min_2; __syncthreads(); customAdd(sdata_int[0],d_cuda_global_fsHoGpat_count); customAdd(sdata_int[1],d_cuda_global_fsHoGpat_count+1); customAdd(sdata_double[0],d_cuda_global_fsHoGpat_dnn); customAdd(sdata_double[1],d_cuda_global_fsHoGpat_dnn+1); } double cuda_fsHoGpat(char sHoG1[ROW - 4][COL - 4]){ numBlock.x = iDivUp(COL, TPB); numBlock.y = iDivUp(ROW, TPB); cudaMemset(d_cuda_global_fsHoGpat_dnn_ptr,0,2*sizeof(double)); cudaMemset(d_cuda_global_fsHoGpat_count_ptr,0,2*sizeof(int)); cudaMemcpy(d_sHoG1_ptr,sHoG1,(ROW - 4)*(COL - 4)*sizeof(char),cudaMemcpyHostToDevice); cuda_global_fsHoGpat<<<numBlock, numThread>>>(); int cuda_global_fsHoGpat_count[2]; double cuda_global_fsHoGpat_dnn[2]; cudaMemcpy(cuda_global_fsHoGpat_count, d_cuda_global_fsHoGpat_count_ptr, 2*sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(cuda_global_fsHoGpat_dnn, d_cuda_global_fsHoGpat_dnn_ptr, 2*sizeof(double), cudaMemcpyDeviceToHost); double dnn1 = cuda_global_fsHoGpat_dnn[0] / cuda_global_fsHoGpat_count[0]; double dnn2 = cuda_global_fsHoGpat_dnn[1] / cuda_global_fsHoGpat_count[1]; return (dnn1 + dnn2)/2.0; } __global__ void cuda_global_fwinpat() { int tx = threadIdx.x; int ty = threadIdx.y; int tid = ty * blockDim.x + tx; int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; // if ((y >= ROW) || (x >= COL)) { // return; // } // if(x==30 && y==60){ // printf("%d %d\n", d_g_ang1[y][x],d_g_ang2[y][x]); // } int margine = 4; double minInit = sqrt((double)((ROW - 2 * margine) * (ROW - 2 * margine) + (COL - 2 * margine) * (COL - 2 * margine))); bool condition1 = ((y >= MARGINE) && (x >= MARGINE) && (y < ROW-MARGINE) && (x < COL-MARGINE) && d_g_ang1[y - MARGINE][x - MARGINE] != -1); bool condition2 = ((y >= MARGINE) && (x >= MARGINE) && (y < ROW-MARGINE) && (x < COL-MARGINE) && d_g_ang2[y - MARGINE][x - MARGINE] != -1); int angcode1 = 0; if(condition1)angcode1 = d_g_ang1[y][x]; __shared__ int sdata_int[2][TPB_X_TPB]; __shared__ double sdata_double[2][TPB_X_TPB]; sdata_int[0][tid]=condition1; sdata_int[1][tid]=condition2; double min_1 = condition1*minInit; double delta_1 = 0; if(condition1) delta_1 = d_D1[y][x + COL * angcode1]; min_1 = min(min_1,delta_1); double delta_2 = condition2; double min_2 = condition2*minInit; int angcode2 = 0; if(condition2)angcode2 = d_g_ang2[y][x]; for (int y1 = 0 ; condition2 && y1 < (2 * ROW - 1) * (2 * COL - 1) ; y1++) { if (y + d_coor[y1][0] < 0 || y + d_coor[y1][0] >= ROW || x + d_coor[y1][1] < 0 || x + d_coor[y1][1] >= COL ) continue; if (d_g_ang1[y + d_coor[y1][0]][x + d_coor[y1][1]] != angcode2) continue; delta_2 = d_ndis[y1]; // printf("y1 = %d nn1 = %f \n", y1, ndis[y1]); if (delta_2 < min_2) min_2 = delta_2; break; } sdata_double[0][tid] = min_1; sdata_double[1][tid] = min_2; __syncthreads(); customAdd(sdata_int[0],d_cuda_global_fwinpat_count); customAdd(sdata_int[1],d_cuda_global_fwinpat_count+1); customAdd(sdata_double[0],d_cuda_global_fwinpat_dnn); customAdd(sdata_double[1],d_cuda_global_fwinpat_dnn+1); } double cuda_fwinpat(){ numBlock.x = iDivUp(COL, TPB); numBlock.y = iDivUp(ROW, TPB); cudaMemset(d_cuda_global_fwinpat_dnn_ptr,0,2*sizeof(double)); cudaMemset(d_cuda_global_fwinpat_count_ptr,0,2*sizeof(int)); // cudaMemcpy(d_g_ang2_ptr,g_ang2,(ROW)*(COL)*sizeof(int),cudaMemcpyHostToDevice); cuda_global_fwinpat<<<numBlock, numThread>>>(); gpuErrchk( cudaDeviceSynchronize() ); gpuErrchk( cudaThreadSynchronize() ); // Checks for execution error gpuErrchk( cudaPeekAtLastError() ); // Checks for launch error int cuda_global_fwinpat_count[2]; double cuda_global_fwinpat_dnn[2]; cudaMemcpy(cuda_global_fwinpat_count, d_cuda_global_fwinpat_count_ptr, 2*sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(cuda_global_fwinpat_dnn, d_cuda_global_fwinpat_dnn_ptr, 2*sizeof(double), cudaMemcpyDeviceToHost); double dnn1 = cuda_global_fwinpat_dnn[0] / cuda_global_fwinpat_count[0]; double dnn2 = cuda_global_fwinpat_dnn[1] / cuda_global_fwinpat_count[1]; return (dnn1 + dnn2)/2.0; }
cbcf6f6ba517553fbb720fb6276c9087ea04b265.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2020 NVIDIA Corporation. * Copyright (c) 2018-2020 Chris Choy ([email protected]). * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * * Please cite "4D Spatio-Temporal ConvNets: Minkowski Convolutional Neural * Networks", CVPR'19 (https://arxiv.org/abs/1904.08755) if you use any part * of the code. */ #include "coordinate_map.hpp" #include "coordinate_map_key.hpp" #include "coordinate_map_manager.hpp" #include "errors.hpp" #include "types.hpp" #include "utils.hpp" #include "convolution_kernel.cuh" #include "kernel_map.cuh" #include "convolution_cpu.cpp" #include <ATen/hip/HIPUtils.h> #include <pybind11/pybind11.h> #include <torch/extension.h> namespace minkowski { template <typename coordinate_type, template <typename C> class TemplatedAllocator> at::Tensor ConvolutionForwardGPU( at::Tensor const &in_feat, // at::Tensor const &kernel, // default_types::stride_type const &kernel_size, // default_types::stride_type const &kernel_stride, // default_types::stride_type const &kernel_dilation, // RegionType::Type const region_type, // at::Tensor const &offset, // bool const expand_coordinates, // ConvolutionMode::Type const convolution_mode, // CoordinateMapKey *p_in_map_key, // CoordinateMapKey *p_out_map_key, // gpu_manager_type<coordinate_type, TemplatedAllocator> *p_map_manager) { ASSERT(in_feat.is_contiguous(), "in_feat must be contiguous"); ASSERT(kernel.is_contiguous(), "kernel must be contiguous"); ASSERT(in_feat.is_cuda(), "in_feat must be CUDA"); ASSERT(kernel.is_cuda(), "kernel must be CUDA"); ASSERT(at::cuda::check_device({in_feat, kernel}), "in_feat and kernel must be on the same device"); ASSERT(in_feat.scalar_type() == kernel.scalar_type(), "type mismatch"); ASSERT(in_feat.dim() == 2, "in_feat.dim():", in_feat.dim()); ASSERT(kernel.dim() == 3, "kernel.dim():", kernel.dim()); ASSERT(in_feat.size(1) == kernel.size(1), "Input feature size and kernel size mismatch"); // TODO kernel volume assertion. // create out coordinate map // TODO: custom upsampling coordinate_map_key_type in_key = p_in_map_key->get_key(); ASSERT(p_map_manager->exists(in_key), ERROR_MAP_NOT_FOUND); ASSERT(in_feat.size(0) == p_map_manager->size(in_key), "Invalid in_feat size", in_feat.size(0), "!=", p_map_manager->size(in_key)); if (!p_out_map_key->is_key_set()) { if (expand_coordinates) { auto map_it = p_map_manager->find(p_in_map_key->get_key()); ASSERT(map_it != p_map_manager->map_end(), ERROR_MAP_NOT_FOUND); auto const &in_map = (*map_it).second; auto out_tensor_stride = detail::stride_tensor_stride( in_map.get_tensor_stride(), kernel_stride, false /* is_transpose */); auto kernel_region = cpu_kernel_region<coordinate_type>( region_type, // in_map.coordinate_size(), // in_map.get_tensor_stride().data(), // kernel_size.data(), // kernel_dilation.data(), // 0, // volume. Will be initialized automatically offset.data_ptr<coordinate_type>(), offset.size(0), false // is_transpose ); coordinate_map_key_type out_key = std::get<0>(p_map_manager->stride_region( in_key, kernel_region, out_tensor_stride, expand_coordinates)); p_out_map_key->set_key(out_key); } else { coordinate_map_key_type out_key = std::get<0>(p_map_manager->stride(in_key, kernel_stride)); p_out_map_key->set_key(out_key); } } auto const &in_out = p_map_manager->kernel_map( p_in_map_key, // p_out_map_key, // kernel_size, // kernel_stride, // kernel_dilation, // region_type, // offset, false /* is_transpose */, false /* is_pool */); auto const out_nrows = p_map_manager->size(p_out_map_key->get_key()); at::Tensor out_feat = torch::zeros({out_nrows, kernel.size(2)}, in_feat.options()); LOG_DEBUG("Allocated", out_nrows, "x", kernel.size(2), "out_features."); hipblasHandle_t handle = at::cuda::getCurrentCUDABlasHandle(); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA().stream(); hipblasSetStream(handle, stream); LOG_DEBUG("Convolution on", out_nrows, "x", kernel.size(2)); AT_DISPATCH_FLOATING_TYPES( in_feat.scalar_type(), "convolution_forward_gpu", [&] { LOG_DEBUG("ConvolutionForwardKernelGPU with", std::is_same<float, scalar_t>::value ? "float" : "double"); TemplatedAllocator<char> byte_allocator; ConvolutionForwardKernelGPU<scalar_t, default_types::index_type, TemplatedAllocator<char>>( in_feat.template data_ptr<scalar_t>(), // in_feat.size(1), // out_feat.template data_ptr<scalar_t>(), // out_feat.size(1), // kernel.template data_ptr<scalar_t>(), // in_out, // in_feat.size(0), // out_nrows, // byte_allocator, // p_map_manager->algorithm(), // convolution_mode, handle, stream); }); return out_feat; } template <typename coordinate_type, template <typename C> class TemplatedAllocator> std::pair<at::Tensor, at::Tensor> ConvolutionBackwardGPU( at::Tensor const &in_feat, // at::Tensor &grad_out_feat, // at::Tensor const &kernel, // default_types::stride_type const &kernel_size, // default_types::stride_type const &kernel_stride, // default_types::stride_type const &kernel_dilation, // RegionType::Type const region_type, // at::Tensor const &offset, // ConvolutionMode::Type const convolution_mode, // CoordinateMapKey *p_in_map_key, // CoordinateMapKey *p_out_map_key, // gpu_manager_type<coordinate_type, TemplatedAllocator> *p_map_manager) { ASSERT(in_feat.is_contiguous(), "in_feat must be contiguous"); // ASSERT(grad_out_feat.is_contiguous(), "grad_out_feata must be contiguous"); grad_out_feat = grad_out_feat.contiguous(); ASSERT(kernel.is_contiguous(), "kernel must be contiguous"); ASSERT(in_feat.is_cuda(), "in_feat must be CUDA"); ASSERT(grad_out_feat.is_cuda(), "in_feat must be CUDA"); ASSERT(kernel.is_cuda(), "kernel must be CUDA"); ASSERT(at::cuda::check_device({in_feat, grad_out_feat, kernel}), "in_feat, grad_out_feat, kernel must be on the same device"); ASSERT(in_feat.scalar_type() == kernel.scalar_type(), "type mismatch"); ASSERT(in_feat.scalar_type() == grad_out_feat.scalar_type(), "type mismatch"); ASSERT(in_feat.dim() == 2, "in_feat.dim():", in_feat.dim()); ASSERT(grad_out_feat.dim() == 2, "grad_out_feat.dim():", grad_out_feat.dim()); ASSERT(kernel.dim() == 3, "kernel.dim():", kernel.dim()); ASSERT(in_feat.size(1) == kernel.size(1), "Input feature size and kernel size mismatch"); coordinate_map_key_type in_key = p_in_map_key->get_key(); ASSERT(p_map_manager->exists(in_key), ERROR_MAP_NOT_FOUND); coordinate_map_key_type out_key = p_out_map_key->get_key(); ASSERT(p_map_manager->exists(out_key), ERROR_MAP_NOT_FOUND); auto const &in_out = p_map_manager->kernel_map(p_in_map_key, // p_out_map_key, // kernel_size, // kernel_stride, // kernel_dilation, // region_type, // offset, false, false); at::Tensor grad_in_feat = torch::zeros({in_feat.size(0), in_feat.size(1)}, in_feat.options()); at::Tensor grad_kernel = torch::zeros( {kernel.size(0), kernel.size(1), kernel.size(2)}, kernel.options()); hipblasHandle_t handle = at::cuda::getCurrentCUDABlasHandle(); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA().stream(); hipblasSetStream(handle, stream); AT_DISPATCH_FLOATING_TYPES( in_feat.scalar_type(), "convolution_backward_gpu", [&] { TemplatedAllocator<char> byte_allocator; ConvolutionBackwardKernelGPU<scalar_t, default_types::index_type, TemplatedAllocator<char>>( in_feat.template data_ptr<scalar_t>(), // grad_in_feat.template data_ptr<scalar_t>(), // in_feat.size(1), // grad_out_feat.template data_ptr<scalar_t>(), // grad_out_feat.size(1), // kernel.template data_ptr<scalar_t>(), // grad_kernel.template data_ptr<scalar_t>(), // in_out, // in_feat.size(0), // grad_out_feat.size(0), // byte_allocator, // p_map_manager->algorithm(), // convolution_mode, // handle, stream); }); return std::make_pair(grad_in_feat, grad_kernel); } // Forward // default_allocator template at::Tensor ConvolutionForwardGPU<default_types::dcoordinate_type, detail::default_allocator>( at::Tensor const &in_feat, // at::Tensor const &kernel, // default_types::stride_type const &kernel_size, // default_types::stride_type const &kernel_stride, // default_types::stride_type const &kernel_dilation, // RegionType::Type const region_type, // at::Tensor const &offset, // bool const expand_coordinates, // ConvolutionMode::Type const convolution_mode, // CoordinateMapKey *p_in_map_key, // CoordinateMapKey *p_out_map_key, // gpu_manager_type<default_types::dcoordinate_type, detail::default_allocator> *p_map_manager); // c10_allocator template at::Tensor ConvolutionForwardGPU<default_types::dcoordinate_type, detail::c10_allocator>( at::Tensor const &in_feat, // at::Tensor const &kernel, // default_types::stride_type const &kernel_size, // default_types::stride_type const &kernel_stride, // default_types::stride_type const &kernel_dilation, // RegionType::Type const region_type, // at::Tensor const &offset, // bool const expand_coordinates, // ConvolutionMode::Type const convolution_mode, // CoordinateMapKey *p_in_map_key, // CoordinateMapKey *p_out_map_key, // gpu_manager_type<default_types::dcoordinate_type, detail::c10_allocator> *p_map_manager); // Backward // default_allocator template std::pair<at::Tensor, at::Tensor> ConvolutionBackwardGPU<default_types::dcoordinate_type, detail::default_allocator>( at::Tensor const &in_feat, // at::Tensor &grad_out_feat, // at::Tensor const &kernel, // default_types::stride_type const &kernel_size, // default_types::stride_type const &kernel_stride, // default_types::stride_type const &kernel_dilation, // RegionType::Type const region_type, // at::Tensor const &offset, // ConvolutionMode::Type const convolution_mode, // CoordinateMapKey *p_in_map_key, // CoordinateMapKey *p_out_map_key, // gpu_manager_type<default_types::dcoordinate_type, detail::default_allocator> *p_map_manager); // c10_allocator template std::pair<at::Tensor, at::Tensor> ConvolutionBackwardGPU<default_types::dcoordinate_type, detail::c10_allocator>( at::Tensor const &in_feat, // at::Tensor &grad_out_feat, // at::Tensor const &kernel, // default_types::stride_type const &kernel_size, // default_types::stride_type const &kernel_stride, // default_types::stride_type const &kernel_dilation, // RegionType::Type const region_type, // at::Tensor const &offset, // ConvolutionMode::Type const convolution_mode, // CoordinateMapKey *p_in_map_key, // CoordinateMapKey *p_out_map_key, // gpu_manager_type<default_types::dcoordinate_type, detail::c10_allocator> *p_map_manager); } // end namespace minkowski
cbcf6f6ba517553fbb720fb6276c9087ea04b265.cu
/* * Copyright (c) 2020 NVIDIA Corporation. * Copyright (c) 2018-2020 Chris Choy ([email protected]). * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * * Please cite "4D Spatio-Temporal ConvNets: Minkowski Convolutional Neural * Networks", CVPR'19 (https://arxiv.org/abs/1904.08755) if you use any part * of the code. */ #include "coordinate_map.hpp" #include "coordinate_map_key.hpp" #include "coordinate_map_manager.hpp" #include "errors.hpp" #include "types.hpp" #include "utils.hpp" #include "convolution_kernel.cuh" #include "kernel_map.cuh" #include "convolution_cpu.cpp" #include <ATen/cuda/CUDAUtils.h> #include <pybind11/pybind11.h> #include <torch/extension.h> namespace minkowski { template <typename coordinate_type, template <typename C> class TemplatedAllocator> at::Tensor ConvolutionForwardGPU( at::Tensor const &in_feat, // at::Tensor const &kernel, // default_types::stride_type const &kernel_size, // default_types::stride_type const &kernel_stride, // default_types::stride_type const &kernel_dilation, // RegionType::Type const region_type, // at::Tensor const &offset, // bool const expand_coordinates, // ConvolutionMode::Type const convolution_mode, // CoordinateMapKey *p_in_map_key, // CoordinateMapKey *p_out_map_key, // gpu_manager_type<coordinate_type, TemplatedAllocator> *p_map_manager) { ASSERT(in_feat.is_contiguous(), "in_feat must be contiguous"); ASSERT(kernel.is_contiguous(), "kernel must be contiguous"); ASSERT(in_feat.is_cuda(), "in_feat must be CUDA"); ASSERT(kernel.is_cuda(), "kernel must be CUDA"); ASSERT(at::cuda::check_device({in_feat, kernel}), "in_feat and kernel must be on the same device"); ASSERT(in_feat.scalar_type() == kernel.scalar_type(), "type mismatch"); ASSERT(in_feat.dim() == 2, "in_feat.dim():", in_feat.dim()); ASSERT(kernel.dim() == 3, "kernel.dim():", kernel.dim()); ASSERT(in_feat.size(1) == kernel.size(1), "Input feature size and kernel size mismatch"); // TODO kernel volume assertion. // create out coordinate map // TODO: custom upsampling coordinate_map_key_type in_key = p_in_map_key->get_key(); ASSERT(p_map_manager->exists(in_key), ERROR_MAP_NOT_FOUND); ASSERT(in_feat.size(0) == p_map_manager->size(in_key), "Invalid in_feat size", in_feat.size(0), "!=", p_map_manager->size(in_key)); if (!p_out_map_key->is_key_set()) { if (expand_coordinates) { auto map_it = p_map_manager->find(p_in_map_key->get_key()); ASSERT(map_it != p_map_manager->map_end(), ERROR_MAP_NOT_FOUND); auto const &in_map = (*map_it).second; auto out_tensor_stride = detail::stride_tensor_stride( in_map.get_tensor_stride(), kernel_stride, false /* is_transpose */); auto kernel_region = cpu_kernel_region<coordinate_type>( region_type, // in_map.coordinate_size(), // in_map.get_tensor_stride().data(), // kernel_size.data(), // kernel_dilation.data(), // 0, // volume. Will be initialized automatically offset.data_ptr<coordinate_type>(), offset.size(0), false // is_transpose ); coordinate_map_key_type out_key = std::get<0>(p_map_manager->stride_region( in_key, kernel_region, out_tensor_stride, expand_coordinates)); p_out_map_key->set_key(out_key); } else { coordinate_map_key_type out_key = std::get<0>(p_map_manager->stride(in_key, kernel_stride)); p_out_map_key->set_key(out_key); } } auto const &in_out = p_map_manager->kernel_map( p_in_map_key, // p_out_map_key, // kernel_size, // kernel_stride, // kernel_dilation, // region_type, // offset, false /* is_transpose */, false /* is_pool */); auto const out_nrows = p_map_manager->size(p_out_map_key->get_key()); at::Tensor out_feat = torch::zeros({out_nrows, kernel.size(2)}, in_feat.options()); LOG_DEBUG("Allocated", out_nrows, "x", kernel.size(2), "out_features."); cublasHandle_t handle = at::cuda::getCurrentCUDABlasHandle(); cudaStream_t stream = at::cuda::getCurrentCUDAStream().stream(); cublasSetStream(handle, stream); LOG_DEBUG("Convolution on", out_nrows, "x", kernel.size(2)); AT_DISPATCH_FLOATING_TYPES( in_feat.scalar_type(), "convolution_forward_gpu", [&] { LOG_DEBUG("ConvolutionForwardKernelGPU with", std::is_same<float, scalar_t>::value ? "float" : "double"); TemplatedAllocator<char> byte_allocator; ConvolutionForwardKernelGPU<scalar_t, default_types::index_type, TemplatedAllocator<char>>( in_feat.template data_ptr<scalar_t>(), // in_feat.size(1), // out_feat.template data_ptr<scalar_t>(), // out_feat.size(1), // kernel.template data_ptr<scalar_t>(), // in_out, // in_feat.size(0), // out_nrows, // byte_allocator, // p_map_manager->algorithm(), // convolution_mode, handle, stream); }); return out_feat; } template <typename coordinate_type, template <typename C> class TemplatedAllocator> std::pair<at::Tensor, at::Tensor> ConvolutionBackwardGPU( at::Tensor const &in_feat, // at::Tensor &grad_out_feat, // at::Tensor const &kernel, // default_types::stride_type const &kernel_size, // default_types::stride_type const &kernel_stride, // default_types::stride_type const &kernel_dilation, // RegionType::Type const region_type, // at::Tensor const &offset, // ConvolutionMode::Type const convolution_mode, // CoordinateMapKey *p_in_map_key, // CoordinateMapKey *p_out_map_key, // gpu_manager_type<coordinate_type, TemplatedAllocator> *p_map_manager) { ASSERT(in_feat.is_contiguous(), "in_feat must be contiguous"); // ASSERT(grad_out_feat.is_contiguous(), "grad_out_feata must be contiguous"); grad_out_feat = grad_out_feat.contiguous(); ASSERT(kernel.is_contiguous(), "kernel must be contiguous"); ASSERT(in_feat.is_cuda(), "in_feat must be CUDA"); ASSERT(grad_out_feat.is_cuda(), "in_feat must be CUDA"); ASSERT(kernel.is_cuda(), "kernel must be CUDA"); ASSERT(at::cuda::check_device({in_feat, grad_out_feat, kernel}), "in_feat, grad_out_feat, kernel must be on the same device"); ASSERT(in_feat.scalar_type() == kernel.scalar_type(), "type mismatch"); ASSERT(in_feat.scalar_type() == grad_out_feat.scalar_type(), "type mismatch"); ASSERT(in_feat.dim() == 2, "in_feat.dim():", in_feat.dim()); ASSERT(grad_out_feat.dim() == 2, "grad_out_feat.dim():", grad_out_feat.dim()); ASSERT(kernel.dim() == 3, "kernel.dim():", kernel.dim()); ASSERT(in_feat.size(1) == kernel.size(1), "Input feature size and kernel size mismatch"); coordinate_map_key_type in_key = p_in_map_key->get_key(); ASSERT(p_map_manager->exists(in_key), ERROR_MAP_NOT_FOUND); coordinate_map_key_type out_key = p_out_map_key->get_key(); ASSERT(p_map_manager->exists(out_key), ERROR_MAP_NOT_FOUND); auto const &in_out = p_map_manager->kernel_map(p_in_map_key, // p_out_map_key, // kernel_size, // kernel_stride, // kernel_dilation, // region_type, // offset, false, false); at::Tensor grad_in_feat = torch::zeros({in_feat.size(0), in_feat.size(1)}, in_feat.options()); at::Tensor grad_kernel = torch::zeros( {kernel.size(0), kernel.size(1), kernel.size(2)}, kernel.options()); cublasHandle_t handle = at::cuda::getCurrentCUDABlasHandle(); cudaStream_t stream = at::cuda::getCurrentCUDAStream().stream(); cublasSetStream(handle, stream); AT_DISPATCH_FLOATING_TYPES( in_feat.scalar_type(), "convolution_backward_gpu", [&] { TemplatedAllocator<char> byte_allocator; ConvolutionBackwardKernelGPU<scalar_t, default_types::index_type, TemplatedAllocator<char>>( in_feat.template data_ptr<scalar_t>(), // grad_in_feat.template data_ptr<scalar_t>(), // in_feat.size(1), // grad_out_feat.template data_ptr<scalar_t>(), // grad_out_feat.size(1), // kernel.template data_ptr<scalar_t>(), // grad_kernel.template data_ptr<scalar_t>(), // in_out, // in_feat.size(0), // grad_out_feat.size(0), // byte_allocator, // p_map_manager->algorithm(), // convolution_mode, // handle, stream); }); return std::make_pair(grad_in_feat, grad_kernel); } // Forward // default_allocator template at::Tensor ConvolutionForwardGPU<default_types::dcoordinate_type, detail::default_allocator>( at::Tensor const &in_feat, // at::Tensor const &kernel, // default_types::stride_type const &kernel_size, // default_types::stride_type const &kernel_stride, // default_types::stride_type const &kernel_dilation, // RegionType::Type const region_type, // at::Tensor const &offset, // bool const expand_coordinates, // ConvolutionMode::Type const convolution_mode, // CoordinateMapKey *p_in_map_key, // CoordinateMapKey *p_out_map_key, // gpu_manager_type<default_types::dcoordinate_type, detail::default_allocator> *p_map_manager); // c10_allocator template at::Tensor ConvolutionForwardGPU<default_types::dcoordinate_type, detail::c10_allocator>( at::Tensor const &in_feat, // at::Tensor const &kernel, // default_types::stride_type const &kernel_size, // default_types::stride_type const &kernel_stride, // default_types::stride_type const &kernel_dilation, // RegionType::Type const region_type, // at::Tensor const &offset, // bool const expand_coordinates, // ConvolutionMode::Type const convolution_mode, // CoordinateMapKey *p_in_map_key, // CoordinateMapKey *p_out_map_key, // gpu_manager_type<default_types::dcoordinate_type, detail::c10_allocator> *p_map_manager); // Backward // default_allocator template std::pair<at::Tensor, at::Tensor> ConvolutionBackwardGPU<default_types::dcoordinate_type, detail::default_allocator>( at::Tensor const &in_feat, // at::Tensor &grad_out_feat, // at::Tensor const &kernel, // default_types::stride_type const &kernel_size, // default_types::stride_type const &kernel_stride, // default_types::stride_type const &kernel_dilation, // RegionType::Type const region_type, // at::Tensor const &offset, // ConvolutionMode::Type const convolution_mode, // CoordinateMapKey *p_in_map_key, // CoordinateMapKey *p_out_map_key, // gpu_manager_type<default_types::dcoordinate_type, detail::default_allocator> *p_map_manager); // c10_allocator template std::pair<at::Tensor, at::Tensor> ConvolutionBackwardGPU<default_types::dcoordinate_type, detail::c10_allocator>( at::Tensor const &in_feat, // at::Tensor &grad_out_feat, // at::Tensor const &kernel, // default_types::stride_type const &kernel_size, // default_types::stride_type const &kernel_stride, // default_types::stride_type const &kernel_dilation, // RegionType::Type const region_type, // at::Tensor const &offset, // ConvolutionMode::Type const convolution_mode, // CoordinateMapKey *p_in_map_key, // CoordinateMapKey *p_out_map_key, // gpu_manager_type<default_types::dcoordinate_type, detail::c10_allocator> *p_map_manager); } // end namespace minkowski
bd3538b22dace2f31599375d2ec97bd70a8df7d4.hip
// !!! This is a file automatically generated by hipify!!! #include "flo/device/similarity_xform_direct.cuh" #include <thrust/tabulate.h> #include <cusp/monitor.h> #include <cusp/krylov/cg.h> #include <cusp/precond/diagonal.h> #include <thrust/transform_reduce.h> #include <thrust/transform.h> #include <thrust/scatter.h> #include <cusp/print.h> FLO_DEVICE_NAMESPACE_BEGIN namespace direct { FLO_API void similarity_xform( cusp::coo_matrix<int, real, cusp::device_memory>::const_view di_dirac, cusp::array2d<real, cusp::device_memory>::view do_xform, const real i_tolerance, const int i_iterations) { cu_raii::sparse::Handle sparse_handle; cu_raii::solver::SolverSp solver; similarity_xform( &sparse_handle, &solver, di_dirac, do_xform, i_tolerance, i_iterations); } FLO_API void similarity_xform( cu_raii::sparse::Handle* io_sparse_handle, cu_raii::solver::SolverSp* io_solver, cusp::coo_matrix<int, real, cusp::device_memory>::const_view di_dirac, cusp::array2d<real, cusp::device_memory>::view do_xform, const real i_tolerance, const int i_iterations) { // Convert the row indices to csr row offsets cusp::array1d<int, cusp::device_memory> row_offsets(di_dirac.num_rows + 1); cusp::indices_to_offsets(di_dirac.row_indices, row_offsets); // Fill our initial guess with the identity (quaternions) cusp::array1d<real, cusp::device_memory> b(di_dirac.num_cols); thrust::tabulate( do_xform.values.begin(), do_xform.values.end(), [] __device__(int x) { // When x is a multiple of 4, return one return !(x & 3); }); // Get a cuSolver and cuSparse handle io_solver->error_assert(__LINE__); io_sparse_handle->error_assert(__LINE__); // Create a matrix description cu_raii::sparse::MatrixDescription description_D(&io_sparse_handle->status); io_sparse_handle->error_assert(__LINE__); // Tell cuSparse what matrix to expect hipsparseSetMatType(description_D, HIPSPARSE_MATRIX_TYPE_GENERAL); hipsparseSetMatFillMode(description_D, HIPSPARSE_FILL_MODE_LOWER); hipsparseSetMatDiagType(description_D, HIPSPARSE_DIAG_TYPE_NON_UNIT); hipsparseSetMatIndexBase(description_D, HIPSPARSE_INDEX_BASE_ZERO); #if __CUDACC_VER_MAJOR__ < 10 // Tell cusolver to use symamd reordering if we're compiling with cuda 9 const int reorder = 2; #else // Tell cusolver to use metis reordering if we're compiling with cuda 10 const int reorder = 3; #endif // cusolver will set this flag int singularity = -1; // Solve the system Dx = bx, using back substitution for (int iter = 0; iter < (i_iterations + 1) || singularity != -1; ++iter) { const real rnorm = 1.f / cusp::blas::nrm2(do_xform.values); thrust::transform(do_xform.values.begin(), do_xform.values.end(), b.begin(), [=] __device__(real x) { return x * rnorm; }); io_solver->status = cusolverSpScsrlsvchol(*io_solver, di_dirac.num_rows, di_dirac.num_entries, description_D, di_dirac.values.begin().base().get(), row_offsets.data().get(), di_dirac.column_indices.begin().base().get(), b.begin().base().get(), i_tolerance, reorder, do_xform.values.begin().base().get(), &singularity); io_solver->error_assert(__LINE__); } if (singularity != -1) { std::cout << "Singularity: " << singularity << '\n'; } // Normalize the result and re-arrange simultaneously to reduce kernel the // number of launches { // Normalize and shuffle in the same kernel call const real rnorm = 1.f / cusp::blas::nrm2(do_xform.values); thrust::copy(do_xform.values.begin(), do_xform.values.end(), b.begin()); auto xin_ptr = thrust::device_pointer_cast( reinterpret_cast<real4*>(b.data().get())); auto xout_ptr = thrust::make_zip_iterator(thrust::make_tuple(do_xform.row(3).begin(), do_xform.row(0).begin(), do_xform.row(1).begin(), do_xform.row(2).begin())); thrust::transform( xin_ptr, xin_ptr + do_xform.num_cols, xout_ptr, [=] __device__ (real4 quat) { return thrust::make_tuple( quat.x * rnorm, quat.y * rnorm, quat.z * rnorm, quat.w * rnorm); }); } } } FLO_DEVICE_NAMESPACE_END
bd3538b22dace2f31599375d2ec97bd70a8df7d4.cu
#include "flo/device/similarity_xform_direct.cuh" #include <thrust/tabulate.h> #include <cusp/monitor.h> #include <cusp/krylov/cg.h> #include <cusp/precond/diagonal.h> #include <thrust/transform_reduce.h> #include <thrust/transform.h> #include <thrust/scatter.h> #include <cusp/print.h> FLO_DEVICE_NAMESPACE_BEGIN namespace direct { FLO_API void similarity_xform( cusp::coo_matrix<int, real, cusp::device_memory>::const_view di_dirac, cusp::array2d<real, cusp::device_memory>::view do_xform, const real i_tolerance, const int i_iterations) { cu_raii::sparse::Handle sparse_handle; cu_raii::solver::SolverSp solver; similarity_xform( &sparse_handle, &solver, di_dirac, do_xform, i_tolerance, i_iterations); } FLO_API void similarity_xform( cu_raii::sparse::Handle* io_sparse_handle, cu_raii::solver::SolverSp* io_solver, cusp::coo_matrix<int, real, cusp::device_memory>::const_view di_dirac, cusp::array2d<real, cusp::device_memory>::view do_xform, const real i_tolerance, const int i_iterations) { // Convert the row indices to csr row offsets cusp::array1d<int, cusp::device_memory> row_offsets(di_dirac.num_rows + 1); cusp::indices_to_offsets(di_dirac.row_indices, row_offsets); // Fill our initial guess with the identity (quaternions) cusp::array1d<real, cusp::device_memory> b(di_dirac.num_cols); thrust::tabulate( do_xform.values.begin(), do_xform.values.end(), [] __device__(int x) { // When x is a multiple of 4, return one return !(x & 3); }); // Get a cuSolver and cuSparse handle io_solver->error_assert(__LINE__); io_sparse_handle->error_assert(__LINE__); // Create a matrix description cu_raii::sparse::MatrixDescription description_D(&io_sparse_handle->status); io_sparse_handle->error_assert(__LINE__); // Tell cuSparse what matrix to expect cusparseSetMatType(description_D, CUSPARSE_MATRIX_TYPE_GENERAL); cusparseSetMatFillMode(description_D, CUSPARSE_FILL_MODE_LOWER); cusparseSetMatDiagType(description_D, CUSPARSE_DIAG_TYPE_NON_UNIT); cusparseSetMatIndexBase(description_D, CUSPARSE_INDEX_BASE_ZERO); #if __CUDACC_VER_MAJOR__ < 10 // Tell cusolver to use symamd reordering if we're compiling with cuda 9 const int reorder = 2; #else // Tell cusolver to use metis reordering if we're compiling with cuda 10 const int reorder = 3; #endif // cusolver will set this flag int singularity = -1; // Solve the system Dx = bx, using back substitution for (int iter = 0; iter < (i_iterations + 1) || singularity != -1; ++iter) { const real rnorm = 1.f / cusp::blas::nrm2(do_xform.values); thrust::transform(do_xform.values.begin(), do_xform.values.end(), b.begin(), [=] __device__(real x) { return x * rnorm; }); io_solver->status = cusolverSpScsrlsvchol(*io_solver, di_dirac.num_rows, di_dirac.num_entries, description_D, di_dirac.values.begin().base().get(), row_offsets.data().get(), di_dirac.column_indices.begin().base().get(), b.begin().base().get(), i_tolerance, reorder, do_xform.values.begin().base().get(), &singularity); io_solver->error_assert(__LINE__); } if (singularity != -1) { std::cout << "Singularity: " << singularity << '\n'; } // Normalize the result and re-arrange simultaneously to reduce kernel the // number of launches { // Normalize and shuffle in the same kernel call const real rnorm = 1.f / cusp::blas::nrm2(do_xform.values); thrust::copy(do_xform.values.begin(), do_xform.values.end(), b.begin()); auto xin_ptr = thrust::device_pointer_cast( reinterpret_cast<real4*>(b.data().get())); auto xout_ptr = thrust::make_zip_iterator(thrust::make_tuple(do_xform.row(3).begin(), do_xform.row(0).begin(), do_xform.row(1).begin(), do_xform.row(2).begin())); thrust::transform( xin_ptr, xin_ptr + do_xform.num_cols, xout_ptr, [=] __device__ (real4 quat) { return thrust::make_tuple( quat.x * rnorm, quat.y * rnorm, quat.z * rnorm, quat.w * rnorm); }); } } } FLO_DEVICE_NAMESPACE_END
ac46544e0f20a4491ae13d0669beeee175af162f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/elementwise/elementwise_div_op.h" #include "paddle/fluid/operators/elementwise/elementwise_op_function.cu.h" #include "paddle/fluid/operators/elementwise/elementwise_op_function.h" #include "paddle/fluid/platform/float16.h" namespace ops = paddle::operators; namespace plat = paddle::platform; namespace paddle { namespace operators { template <typename T> struct SameDimsElemwiseDiv<platform::CUDADeviceContext, T> { void operator()(const framework::ExecutionContext& ctx, const framework::Tensor* x, const framework::Tensor* y, framework::Tensor* z) { DivRangeFunctor<T> functor(x->data<T>(), y->data<T>(), z->data<T>()); auto& dev_ctx = ctx.template device_context<platform::CUDADeviceContext>(); platform::ForRange<platform::CUDADeviceContext> for_range(dev_ctx, x->numel()); for_range(functor); } }; template <> struct SameDimsElemwiseDiv<platform::CUDADeviceContext, platform::float16> { void operator()(const framework::ExecutionContext& ctx, const framework::Tensor* x, const framework::Tensor* y, framework::Tensor* z) { auto size = x->numel(); dim3 gird_size = dim3( (size / 2 + PADDLE_CUDA_THREAD_SIZE - 1) / PADDLE_CUDA_THREAD_SIZE, 1); dim3 block_size = dim3(PADDLE_CUDA_THREAD_SIZE, 1); const half* x2 = reinterpret_cast<const half*>(x->data<platform::float16>()); const half* y2 = reinterpret_cast<const half*>(y->data<platform::float16>()); half* z2 = reinterpret_cast<half*>(z->data<platform::float16>()); hipLaunchKernelGGL(( SameDimsElemwiseDivCUDAKernel), dim3(gird_size), dim3(block_size), 0, ctx.template device_context<platform::CUDADeviceContext>().stream(), x2, y2, z2, size); } }; template <typename T> static __global__ void SimpleElemwiseDivGradCUDAKernel(const T* x, const T* y, const T* out, const T* dout, int64_t size, T* dx, T* dy) { int col = blockIdx.x * blockDim.x + threadIdx.x; while (col < size) { T o = dout[col]; dx[col] = o / y[col]; dy[col] = -o * out[col] / y[col]; col += blockDim.x * gridDim.x; } } template <typename DeviceContext, typename T> typename std::enable_if< std::is_same<DeviceContext, plat::CUDADeviceContext>::value>::type elementwise_div_grad(const framework::ExecutionContext& ctx, const framework::Tensor* x, const framework::Tensor* y, const framework::Tensor* out, const framework::Tensor* dout, framework::Tensor* dx, framework::Tensor* dy) { dim3 block_size = dim3(PADDLE_CUDA_THREAD_SIZE, 1); auto size = x->numel(); dim3 gird_size = dim3((size + PADDLE_CUDA_THREAD_SIZE - 1) / PADDLE_CUDA_THREAD_SIZE, 1); hipLaunchKernelGGL(( SimpleElemwiseDivGradCUDAKernel< T>), dim3(gird_size), dim3(block_size), 0, ctx.template device_context<plat::CUDADeviceContext>().stream(), x->data<T>(), y->data<T>(), out->data<T>(), dout->data<T>(), size, dx->mutable_data<T>(ctx.GetPlace()), dy->mutable_data<T>(ctx.GetPlace())); } } // namespace operators } // namespace paddle REGISTER_OP_CUDA_KERNEL( elementwise_div, ops::ElementwiseDivKernel<paddle::platform::CUDADeviceContext, float>, ops::ElementwiseDivKernel<paddle::platform::CUDADeviceContext, paddle::platform::float16>, ops::ElementwiseDivKernel<paddle::platform::CUDADeviceContext, double>, ops::ElementwiseDivKernel<paddle::platform::CUDADeviceContext, int>, ops::ElementwiseDivKernel<paddle::platform::CUDADeviceContext, int64_t>); REGISTER_OP_CUDA_KERNEL( elementwise_div_grad, ops::ElementwiseDivGradKernel<paddle::platform::CUDADeviceContext, float>, ops::ElementwiseDivGradKernel<paddle::platform::CUDADeviceContext, paddle::platform::float16>, ops::ElementwiseDivGradKernel<paddle::platform::CUDADeviceContext, double>, ops::ElementwiseDivGradKernel<paddle::platform::CUDADeviceContext, int>, ops::ElementwiseDivGradKernel<paddle::platform::CUDADeviceContext, int64_t>); REGISTER_OP_CUDA_KERNEL( elementwise_div_grad_grad, ops::ElementwiseDivDoubleGradKernel<paddle::platform::CUDADeviceContext, float>, ops::ElementwiseDivDoubleGradKernel<paddle::platform::CUDADeviceContext, paddle::platform::float16>, ops::ElementwiseDivDoubleGradKernel<paddle::platform::CUDADeviceContext, double>, ops::ElementwiseDivDoubleGradKernel<paddle::platform::CUDADeviceContext, int>, ops::ElementwiseDivDoubleGradKernel<paddle::platform::CUDADeviceContext, int64_t>);
ac46544e0f20a4491ae13d0669beeee175af162f.cu
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/elementwise/elementwise_div_op.h" #include "paddle/fluid/operators/elementwise/elementwise_op_function.cu.h" #include "paddle/fluid/operators/elementwise/elementwise_op_function.h" #include "paddle/fluid/platform/float16.h" namespace ops = paddle::operators; namespace plat = paddle::platform; namespace paddle { namespace operators { template <typename T> struct SameDimsElemwiseDiv<platform::CUDADeviceContext, T> { void operator()(const framework::ExecutionContext& ctx, const framework::Tensor* x, const framework::Tensor* y, framework::Tensor* z) { DivRangeFunctor<T> functor(x->data<T>(), y->data<T>(), z->data<T>()); auto& dev_ctx = ctx.template device_context<platform::CUDADeviceContext>(); platform::ForRange<platform::CUDADeviceContext> for_range(dev_ctx, x->numel()); for_range(functor); } }; template <> struct SameDimsElemwiseDiv<platform::CUDADeviceContext, platform::float16> { void operator()(const framework::ExecutionContext& ctx, const framework::Tensor* x, const framework::Tensor* y, framework::Tensor* z) { auto size = x->numel(); dim3 gird_size = dim3( (size / 2 + PADDLE_CUDA_THREAD_SIZE - 1) / PADDLE_CUDA_THREAD_SIZE, 1); dim3 block_size = dim3(PADDLE_CUDA_THREAD_SIZE, 1); const half* x2 = reinterpret_cast<const half*>(x->data<platform::float16>()); const half* y2 = reinterpret_cast<const half*>(y->data<platform::float16>()); half* z2 = reinterpret_cast<half*>(z->data<platform::float16>()); SameDimsElemwiseDivCUDAKernel<<< gird_size, block_size, 0, ctx.template device_context<platform::CUDADeviceContext>().stream()>>>( x2, y2, z2, size); } }; template <typename T> static __global__ void SimpleElemwiseDivGradCUDAKernel(const T* x, const T* y, const T* out, const T* dout, int64_t size, T* dx, T* dy) { int col = blockIdx.x * blockDim.x + threadIdx.x; while (col < size) { T o = dout[col]; dx[col] = o / y[col]; dy[col] = -o * out[col] / y[col]; col += blockDim.x * gridDim.x; } } template <typename DeviceContext, typename T> typename std::enable_if< std::is_same<DeviceContext, plat::CUDADeviceContext>::value>::type elementwise_div_grad(const framework::ExecutionContext& ctx, const framework::Tensor* x, const framework::Tensor* y, const framework::Tensor* out, const framework::Tensor* dout, framework::Tensor* dx, framework::Tensor* dy) { dim3 block_size = dim3(PADDLE_CUDA_THREAD_SIZE, 1); auto size = x->numel(); dim3 gird_size = dim3((size + PADDLE_CUDA_THREAD_SIZE - 1) / PADDLE_CUDA_THREAD_SIZE, 1); SimpleElemwiseDivGradCUDAKernel< T><<<gird_size, block_size, 0, ctx.template device_context<plat::CUDADeviceContext>().stream()>>>( x->data<T>(), y->data<T>(), out->data<T>(), dout->data<T>(), size, dx->mutable_data<T>(ctx.GetPlace()), dy->mutable_data<T>(ctx.GetPlace())); } } // namespace operators } // namespace paddle REGISTER_OP_CUDA_KERNEL( elementwise_div, ops::ElementwiseDivKernel<paddle::platform::CUDADeviceContext, float>, ops::ElementwiseDivKernel<paddle::platform::CUDADeviceContext, paddle::platform::float16>, ops::ElementwiseDivKernel<paddle::platform::CUDADeviceContext, double>, ops::ElementwiseDivKernel<paddle::platform::CUDADeviceContext, int>, ops::ElementwiseDivKernel<paddle::platform::CUDADeviceContext, int64_t>); REGISTER_OP_CUDA_KERNEL( elementwise_div_grad, ops::ElementwiseDivGradKernel<paddle::platform::CUDADeviceContext, float>, ops::ElementwiseDivGradKernel<paddle::platform::CUDADeviceContext, paddle::platform::float16>, ops::ElementwiseDivGradKernel<paddle::platform::CUDADeviceContext, double>, ops::ElementwiseDivGradKernel<paddle::platform::CUDADeviceContext, int>, ops::ElementwiseDivGradKernel<paddle::platform::CUDADeviceContext, int64_t>); REGISTER_OP_CUDA_KERNEL( elementwise_div_grad_grad, ops::ElementwiseDivDoubleGradKernel<paddle::platform::CUDADeviceContext, float>, ops::ElementwiseDivDoubleGradKernel<paddle::platform::CUDADeviceContext, paddle::platform::float16>, ops::ElementwiseDivDoubleGradKernel<paddle::platform::CUDADeviceContext, double>, ops::ElementwiseDivDoubleGradKernel<paddle::platform::CUDADeviceContext, int>, ops::ElementwiseDivDoubleGradKernel<paddle::platform::CUDADeviceContext, int64_t>);
0aca4d4cb0d7e71f8e7123c214576cab0c068750.hip
// !!! This is a file automatically generated by hipify!!! #include "wrapperFunc.h" #include <stdio.h> #include <hip/hip_runtime_api.h> #define CUDA_CALL_SAFE(f) \ do { \ hipError_t _e = f; \ if(_e != hipSuccess) { \ fprintf(stderr, "Cuda error %s %d %s:: %s\n", __FILE__,__LINE__, __func__, hipGetErrorString(_e)); \ exit(EXIT_FAILURE); \ } \ } while(0) __global__ void mykernel(int* data, int start){ int myId = blockIdx.x * blockDim.x + threadIdx.x; data[myId] = start+myId; } void allocateMemory(void **ptr, size_t size){ CUDA_CALL_SAFE(hipMalloc(ptr, size)); return; } void cudaCopy(void *src, void *dest, size_t size){ CUDA_CALL_SAFE(hipMemcpy(dest, src, size, hipMemcpyHostToDevice)); } void hostCopy(void *src, void *dest, size_t size){ CUDA_CALL_SAFE(hipMemcpy(dest, src, size, hipMemcpyDeviceToHost)); } void freeCuda( void *ptr ){ CUDA_CALL_SAFE(hipFree(ptr)); } void deviceMemset(void *ptr, int size){ CUDA_CALL_SAFE( hipMemset(ptr, 0, size) ); } void executeKernel( int *ptr, int start){ hipLaunchKernelGGL(( mykernel), dim3(1024), dim3(1024), 0, 0, ptr, start); CUDA_CALL_SAFE(hipPeekAtLastError()); CUDA_CALL_SAFE(hipDeviceSynchronize()); } int getProperties(){ int nDevices; hipGetDeviceCount(&nDevices); return nDevices; } void setDevice(int id){ CUDA_CALL_SAFE(hipSetDevice(id)); }
0aca4d4cb0d7e71f8e7123c214576cab0c068750.cu
#include "wrapperFunc.h" #include <stdio.h> #include <cuda_runtime_api.h> #define CUDA_CALL_SAFE(f) \ do { \ cudaError_t _e = f; \ if(_e != cudaSuccess) { \ fprintf(stderr, "Cuda error %s %d %s:: %s\n", __FILE__,__LINE__, __func__, cudaGetErrorString(_e)); \ exit(EXIT_FAILURE); \ } \ } while(0) __global__ void mykernel(int* data, int start){ int myId = blockIdx.x * blockDim.x + threadIdx.x; data[myId] = start+myId; } void allocateMemory(void **ptr, size_t size){ CUDA_CALL_SAFE(cudaMalloc(ptr, size)); return; } void cudaCopy(void *src, void *dest, size_t size){ CUDA_CALL_SAFE(cudaMemcpy(dest, src, size, cudaMemcpyHostToDevice)); } void hostCopy(void *src, void *dest, size_t size){ CUDA_CALL_SAFE(cudaMemcpy(dest, src, size, cudaMemcpyDeviceToHost)); } void freeCuda( void *ptr ){ CUDA_CALL_SAFE(cudaFree(ptr)); } void deviceMemset(void *ptr, int size){ CUDA_CALL_SAFE( cudaMemset(ptr, 0, size) ); } void executeKernel( int *ptr, int start){ mykernel<<<1024, 1024>>>(ptr, start); CUDA_CALL_SAFE(cudaPeekAtLastError()); CUDA_CALL_SAFE(cudaDeviceSynchronize()); } int getProperties(){ int nDevices; cudaGetDeviceCount(&nDevices); return nDevices; } void setDevice(int id){ CUDA_CALL_SAFE(cudaSetDevice(id)); }
81c2a44b33cdd40b163b70d8933877b58a0f3209.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "hough.h" #include "commons.h" __host__ __device__ double calcRho(double x, double y, double theta) { double thetaRadian = (theta * PI) / 180.0; return x * cos(thetaRadian) + y * sin(thetaRadian); } __host__ __device__ int index(int nRows, int nCols, int rho, double theta) { return ((rho / RHO_STEP_SIZE) + (nRows / 2)) * nCols + (int) ((theta - (THETA_A-THETA_VARIATION)) / THETA_STEP_SIZE + 0.5); } __host__ __device__ bool isLocalMaximum(int i, int j, int nRows, int nCols, int *accumulator) { for (int i_delta = -50; i_delta <= 50; i_delta++) { for (int j_delta = -50; j_delta <= 50; j_delta++) { if (i + i_delta > 0 && i + i_delta < nRows && j + j_delta > 0 && j + j_delta < nCols && accumulator[(i + i_delta) * nCols + j + j_delta] > accumulator[i * nCols + j]) { return false; } } } return true; } __global__ void houghKernel(int frameWidth, int frameHeight, unsigned char* frame, int nRows, int nCols, int *accumulator) { int i = blockIdx.x * blockDim.y + threadIdx.y; int j = blockIdx.y * blockDim.z + threadIdx.z; double theta; int rho; if(i < frameHeight && j < frameWidth && ((int) frame[(i * frameWidth) + j]) != 0) { // thetas of interest will be close to 45 and close to 135 (vertical lines) // we are doing 2 thetas at a time, 1 for each theta of Interest // we use thetas varying 15 degrees more and less for(int k = threadIdx.x * (1 / THETA_STEP_SIZE); k < (threadIdx.x + 1) * (1 / THETA_STEP_SIZE); k++) { theta = THETA_A-THETA_VARIATION + ((double)k*THETA_STEP_SIZE); rho = calcRho(j, i, theta); atomicAdd(&accumulator[index(nRows, nCols, rho, theta)], 1); theta = THETA_B-THETA_VARIATION + ((double)k*THETA_STEP_SIZE); rho = calcRho(j, i, theta); atomicAdd(&accumulator[index(nRows, nCols, rho, theta)], 1); } } } __global__ void findLinesKernel(int nRows, int nCols, int *accumulator, int *lines, int *lineCounter) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (accumulator[i * nCols + j] >= THRESHOLD && isLocalMaximum(i, j, nRows, nCols, accumulator)) { int insertPt = atomicAdd(lineCounter, 2); if (insertPt + 1 < 2 * MAX_NUM_LINES) { lines[insertPt] = THETA_A-THETA_VARIATION + (j * THETA_STEP_SIZE); lines[insertPt + 1] = (i - (nRows / 2)) * RHO_STEP_SIZE; } } } Line::Line(double theta, double rho) { this->theta = theta; this->rho = rho; } /** Calculates y value of line based on given x */ double Line::getY(double x) { double thetaRadian = (theta * PI) / 180.0; return (rho - x * cos(thetaRadian)) / sin(thetaRadian); } /** Calculates x value of line based on given y */ double Line::getX(double y) { double thetaRadian = (theta * PI) / 180.0; return (rho - y * sin(thetaRadian)) / cos(thetaRadian); } void HoughTransformDevice::processFrame(cv::Mat& frame, std::vector<Line>& outputLines) { hipMemcpy(d_frame, frame.ptr(), frameSize, hipMemcpyHostToDevice); hipMemset(d_accumulator, 0, nRows * nCols * sizeof(int)); hipLaunchKernelGGL(( houghKernel) , dim3(houghGridDim), dim3(houghBlockDim), 0, 0, frame.cols, frame.rows, d_frame, nRows, nCols, d_accumulator); hipDeviceSynchronize(); hipError_t err = hipGetLastError(); if (err != hipSuccess) printf("Error: %s\n", hipGetErrorString( err )); hipMemset(d_lineCounter, 0, sizeof(int)); hipLaunchKernelGGL(( findLinesKernel), dim3(findLinesGridDim), dim3(findLinesBlockDim), 0, 0, nRows, nCols, d_accumulator, d_lines, d_lineCounter); hipDeviceSynchronize(); hipMemcpy(&lineCounter, d_lineCounter, sizeof(int), hipMemcpyDeviceToHost); hipMemcpy(lines, d_lines, 2 * MAX_NUM_LINES * sizeof(int), hipMemcpyDeviceToHost); for (int i = 0; i < lineCounter - 1; i += 2) { outputLines.push_back(Line(lines[i], lines[i + 1])); } } HoughTransformDevice::HoughTransformDevice(int frameWidth, int frameHeight) { nRows = (int) ceil(sqrt(frameHeight * frameHeight + frameWidth * frameWidth)) * 2 / RHO_STEP_SIZE; nCols = (THETA_B -THETA_A + (2*THETA_VARIATION)) / THETA_STEP_SIZE; frameSize = frameWidth * frameHeight * sizeof(uchar); hipHostMalloc(&(lines), 2 * MAX_NUM_LINES * sizeof(int)); lineCounter = 0; hipMalloc(&d_lines, 2 * MAX_NUM_LINES * sizeof(int)); hipMalloc(&d_lineCounter, sizeof(int)); hipMalloc(&d_frame, frameSize); hipMalloc(&d_accumulator, nRows * nCols * sizeof(int)); houghBlockDim = dim3(32, 5, 5); houghGridDim = dim3(ceil(frameHeight / 5), ceil(frameWidth / 5)); findLinesBlockDim = dim3(32, 32); findLinesGridDim = dim3(ceil(nRows / 32), ceil(nCols / 32)); } HoughTransformDevice::~HoughTransformDevice() { hipFree(d_lines); hipFree(d_lineCounter); hipFree(d_frame); hipFree(d_accumulator); hipHostFree(lines); }
81c2a44b33cdd40b163b70d8933877b58a0f3209.cu
#include "hough.h" #include "commons.h" __host__ __device__ double calcRho(double x, double y, double theta) { double thetaRadian = (theta * PI) / 180.0; return x * cos(thetaRadian) + y * sin(thetaRadian); } __host__ __device__ int index(int nRows, int nCols, int rho, double theta) { return ((rho / RHO_STEP_SIZE) + (nRows / 2)) * nCols + (int) ((theta - (THETA_A-THETA_VARIATION)) / THETA_STEP_SIZE + 0.5); } __host__ __device__ bool isLocalMaximum(int i, int j, int nRows, int nCols, int *accumulator) { for (int i_delta = -50; i_delta <= 50; i_delta++) { for (int j_delta = -50; j_delta <= 50; j_delta++) { if (i + i_delta > 0 && i + i_delta < nRows && j + j_delta > 0 && j + j_delta < nCols && accumulator[(i + i_delta) * nCols + j + j_delta] > accumulator[i * nCols + j]) { return false; } } } return true; } __global__ void houghKernel(int frameWidth, int frameHeight, unsigned char* frame, int nRows, int nCols, int *accumulator) { int i = blockIdx.x * blockDim.y + threadIdx.y; int j = blockIdx.y * blockDim.z + threadIdx.z; double theta; int rho; if(i < frameHeight && j < frameWidth && ((int) frame[(i * frameWidth) + j]) != 0) { // thetas of interest will be close to 45 and close to 135 (vertical lines) // we are doing 2 thetas at a time, 1 for each theta of Interest // we use thetas varying 15 degrees more and less for(int k = threadIdx.x * (1 / THETA_STEP_SIZE); k < (threadIdx.x + 1) * (1 / THETA_STEP_SIZE); k++) { theta = THETA_A-THETA_VARIATION + ((double)k*THETA_STEP_SIZE); rho = calcRho(j, i, theta); atomicAdd(&accumulator[index(nRows, nCols, rho, theta)], 1); theta = THETA_B-THETA_VARIATION + ((double)k*THETA_STEP_SIZE); rho = calcRho(j, i, theta); atomicAdd(&accumulator[index(nRows, nCols, rho, theta)], 1); } } } __global__ void findLinesKernel(int nRows, int nCols, int *accumulator, int *lines, int *lineCounter) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (accumulator[i * nCols + j] >= THRESHOLD && isLocalMaximum(i, j, nRows, nCols, accumulator)) { int insertPt = atomicAdd(lineCounter, 2); if (insertPt + 1 < 2 * MAX_NUM_LINES) { lines[insertPt] = THETA_A-THETA_VARIATION + (j * THETA_STEP_SIZE); lines[insertPt + 1] = (i - (nRows / 2)) * RHO_STEP_SIZE; } } } Line::Line(double theta, double rho) { this->theta = theta; this->rho = rho; } /** Calculates y value of line based on given x */ double Line::getY(double x) { double thetaRadian = (theta * PI) / 180.0; return (rho - x * cos(thetaRadian)) / sin(thetaRadian); } /** Calculates x value of line based on given y */ double Line::getX(double y) { double thetaRadian = (theta * PI) / 180.0; return (rho - y * sin(thetaRadian)) / cos(thetaRadian); } void HoughTransformDevice::processFrame(cv::Mat& frame, std::vector<Line>& outputLines) { cudaMemcpy(d_frame, frame.ptr(), frameSize, cudaMemcpyHostToDevice); cudaMemset(d_accumulator, 0, nRows * nCols * sizeof(int)); houghKernel <<<houghGridDim, houghBlockDim>>> (frame.cols, frame.rows, d_frame, nRows, nCols, d_accumulator); cudaDeviceSynchronize(); cudaError err = cudaGetLastError(); if (err != cudaSuccess) printf("Error: %s\n", cudaGetErrorString( err )); cudaMemset(d_lineCounter, 0, sizeof(int)); findLinesKernel<<<findLinesGridDim, findLinesBlockDim>>>(nRows, nCols, d_accumulator, d_lines, d_lineCounter); cudaDeviceSynchronize(); cudaMemcpy(&lineCounter, d_lineCounter, sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(lines, d_lines, 2 * MAX_NUM_LINES * sizeof(int), cudaMemcpyDeviceToHost); for (int i = 0; i < lineCounter - 1; i += 2) { outputLines.push_back(Line(lines[i], lines[i + 1])); } } HoughTransformDevice::HoughTransformDevice(int frameWidth, int frameHeight) { nRows = (int) ceil(sqrt(frameHeight * frameHeight + frameWidth * frameWidth)) * 2 / RHO_STEP_SIZE; nCols = (THETA_B -THETA_A + (2*THETA_VARIATION)) / THETA_STEP_SIZE; frameSize = frameWidth * frameHeight * sizeof(uchar); cudaMallocHost(&(lines), 2 * MAX_NUM_LINES * sizeof(int)); lineCounter = 0; cudaMalloc(&d_lines, 2 * MAX_NUM_LINES * sizeof(int)); cudaMalloc(&d_lineCounter, sizeof(int)); cudaMalloc(&d_frame, frameSize); cudaMalloc(&d_accumulator, nRows * nCols * sizeof(int)); houghBlockDim = dim3(32, 5, 5); houghGridDim = dim3(ceil(frameHeight / 5), ceil(frameWidth / 5)); findLinesBlockDim = dim3(32, 32); findLinesGridDim = dim3(ceil(nRows / 32), ceil(nCols / 32)); } HoughTransformDevice::~HoughTransformDevice() { cudaFree(d_lines); cudaFree(d_lineCounter); cudaFree(d_frame); cudaFree(d_accumulator); cudaFreeHost(lines); }
e316ae49893fd530ede4fbc0d5e41cd23760214d.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <chrono> #include <thrust/device_vector.h> #include <thrust/scan.h> #include <hip/hip_runtime.h> #include "device_launch_parameters.h" template <typename T> __global__ void sum(T* output, const T* start, const T* stop, const int n) { int thid = threadIdx.x + blockIdx.x * blockDim.x; if (thid < n) { output[thid] = stop[thid] - start[thid]; } } template <typename T> void prefix_sum1(T* output, const T* arr, const T* arr2, const int size) { int block, thread; if (size > 1024) { block = (size / 1024) + 1; thread = 1024; } else { thread = size; block = 1; } T* d_output, * d_arr, * d_arr2; hipMalloc((void**)&d_output, size * sizeof(T)); hipMalloc((void**)&d_arr, size * sizeof(T)); hipMemcpy(d_arr, arr, size * sizeof(T), hipMemcpyHostToDevice); hipMalloc((void**)&d_arr2, size * sizeof(T)); hipMemcpy(d_arr2, arr2, size * sizeof(T), hipMemcpyHostToDevice); sum<T> << <block, thread >> > (d_output, d_arr, d_arr2, size); hipDeviceSynchronize(); hipMemcpy(output, d_output, size * sizeof(T), hipMemcpyDeviceToHost); thrust::device_vector<T> data(output, output+size); thrust::device_vector<T> temp(data.size() + 1); thrust::exclusive_scan(data.begin(), data.end(), temp.begin()); temp[data.size()] = data.back() + temp[data.size() - 1]; thrust::copy(temp.begin(), temp.end(), output); //for (const auto& i : temp) // std::cout << i << '\n'; hipFree(d_output); hipFree(d_arr); hipFree(d_arr2); } template <typename T> void prefix_sum2(T* output, const T* arr, const T* arr2, const int size) { thrust::device_vector<T> d_arr(arr, arr + size); thrust::device_vector<T> d_arr2(arr2, arr2 + size); thrust::device_vector<T> data(size); thrust::transform(d_arr2.begin(), d_arr2.end(), d_arr.begin(), data.begin(), thrust::minus<T>()); thrust::device_vector<T> temp(data.size() + 1); thrust::exclusive_scan(data.begin(), data.end(), temp.begin()); temp[data.size()] = data.back() + temp[data.size() - 1]; thrust::copy(temp.begin(), temp.end(), output); //for (const auto& i : temp) // std::cout << i << '\n'; } int main() { int const size = 70000; int starter[size], stopper[size], output[size + 1]; for (int i = 0; i < size; i++) { starter[i] = i; stopper[i] = i + 1; } prefix_sum1<int>(output, starter, stopper, size); // Warming up the GPU auto start1 = std::chrono::high_resolution_clock::now(); prefix_sum1<int>(output, starter, stopper, size); auto stop1 = std::chrono::high_resolution_clock::now(); auto time1 = std::chrono::duration_cast<std::chrono::microseconds>(stop1 - start1); std::cout << "Time taken for kernel = " << time1.count() << "\n"; auto start2 = std::chrono::high_resolution_clock::now(); prefix_sum2<int>(output, starter, stopper, size); auto stop2 = std::chrono::high_resolution_clock::now(); auto time2 = std::chrono::duration_cast<std::chrono::microseconds>(stop2 - start2); std::cout << "Time taken for thrust = " << time2.count() << "\n"; //for (int i = 0; i < size + 1; i++) // std::cout << output[i] << "\n"; }
e316ae49893fd530ede4fbc0d5e41cd23760214d.cu
#include <iostream> #include <chrono> #include <thrust/device_vector.h> #include <thrust/scan.h> #include <cuda_runtime.h> #include "device_launch_parameters.h" template <typename T> __global__ void sum(T* output, const T* start, const T* stop, const int n) { int thid = threadIdx.x + blockIdx.x * blockDim.x; if (thid < n) { output[thid] = stop[thid] - start[thid]; } } template <typename T> void prefix_sum1(T* output, const T* arr, const T* arr2, const int size) { int block, thread; if (size > 1024) { block = (size / 1024) + 1; thread = 1024; } else { thread = size; block = 1; } T* d_output, * d_arr, * d_arr2; cudaMalloc((void**)&d_output, size * sizeof(T)); cudaMalloc((void**)&d_arr, size * sizeof(T)); cudaMemcpy(d_arr, arr, size * sizeof(T), cudaMemcpyHostToDevice); cudaMalloc((void**)&d_arr2, size * sizeof(T)); cudaMemcpy(d_arr2, arr2, size * sizeof(T), cudaMemcpyHostToDevice); sum<T> << <block, thread >> > (d_output, d_arr, d_arr2, size); cudaDeviceSynchronize(); cudaMemcpy(output, d_output, size * sizeof(T), cudaMemcpyDeviceToHost); thrust::device_vector<T> data(output, output+size); thrust::device_vector<T> temp(data.size() + 1); thrust::exclusive_scan(data.begin(), data.end(), temp.begin()); temp[data.size()] = data.back() + temp[data.size() - 1]; thrust::copy(temp.begin(), temp.end(), output); //for (const auto& i : temp) // std::cout << i << '\n'; cudaFree(d_output); cudaFree(d_arr); cudaFree(d_arr2); } template <typename T> void prefix_sum2(T* output, const T* arr, const T* arr2, const int size) { thrust::device_vector<T> d_arr(arr, arr + size); thrust::device_vector<T> d_arr2(arr2, arr2 + size); thrust::device_vector<T> data(size); thrust::transform(d_arr2.begin(), d_arr2.end(), d_arr.begin(), data.begin(), thrust::minus<T>()); thrust::device_vector<T> temp(data.size() + 1); thrust::exclusive_scan(data.begin(), data.end(), temp.begin()); temp[data.size()] = data.back() + temp[data.size() - 1]; thrust::copy(temp.begin(), temp.end(), output); //for (const auto& i : temp) // std::cout << i << '\n'; } int main() { int const size = 70000; int starter[size], stopper[size], output[size + 1]; for (int i = 0; i < size; i++) { starter[i] = i; stopper[i] = i + 1; } prefix_sum1<int>(output, starter, stopper, size); // Warming up the GPU auto start1 = std::chrono::high_resolution_clock::now(); prefix_sum1<int>(output, starter, stopper, size); auto stop1 = std::chrono::high_resolution_clock::now(); auto time1 = std::chrono::duration_cast<std::chrono::microseconds>(stop1 - start1); std::cout << "Time taken for kernel = " << time1.count() << "\n"; auto start2 = std::chrono::high_resolution_clock::now(); prefix_sum2<int>(output, starter, stopper, size); auto stop2 = std::chrono::high_resolution_clock::now(); auto time2 = std::chrono::duration_cast<std::chrono::microseconds>(stop2 - start2); std::cout << "Time taken for thrust = " << time2.count() << "\n"; //for (int i = 0; i < size + 1; i++) // std::cout << output[i] << "\n"; }
f4adcffd471a25f1a8ecc5ba7b5cdad2a622e280.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void kernel() { printf("hello world"); }
f4adcffd471a25f1a8ecc5ba7b5cdad2a622e280.cu
#include "includes.h" __global__ void kernel() { printf("hello world"); }
4b2aac59599e15c994063dcf55bfd1debe011f8f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2016 paddlepaddle Authors. All Rights Reserve. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/operators/math/unpooling.h" #include "paddle/platform/cuda_helper.h" namespace paddle { namespace operators { namespace math { template <typename T> __global__ void KernelUnpool2dMax(const int nthreads, const T* input_data, const int* indices_data, const int input_height, const int input_width, const int channels, T* output_data, const int output_height, const int output_width) { int in_n_stride = input_height * input_width * channels; int in_c_stride = input_height * input_width; int out_n_stride = output_height * output_width * channels; int out_c_stride = output_height * output_width; int index = blockIdx.x * blockDim.x + threadIdx.x; int offset = blockDim.x * gridDim.x; for (int i = index; i < nthreads; i += offset) { int bidx = i / in_n_stride; int boffset = i % in_n_stride; int cidx = boffset / in_c_stride; int out_offset = bidx * out_n_stride + cidx * out_c_stride; int out_index = indices_data[i]; PADDLE_ASSERT(out_index < out_c_stride); output_data[out_offset + out_index] = input_data[i]; } } template <typename T> __global__ void KernelUnpool2dMaxGrad( const int nthreads, const T* input_data, const int* indices_data, const int input_height, const int input_width, const int channels, const T* output_data, const T* output_grad, const int output_height, const int output_width, T* input_grad) { int in_n_stride = input_height * input_width * channels; int in_c_stride = input_height * input_width; int out_n_stride = output_height * output_width * channels; int out_c_stride = output_height * output_width; int index = blockIdx.x * blockDim.x + threadIdx.x; int offset = blockDim.x * gridDim.x; for (int i = index; i < nthreads; i += offset) { int bidx = i / in_n_stride; int boffset = i % in_n_stride; int cidx = boffset / in_c_stride; int out_offset = bidx * out_n_stride + cidx * out_c_stride; int out_index = indices_data[i]; PADDLE_ASSERT(out_index < out_c_stride); input_grad[i] = output_grad[out_offset + out_index]; } } /* * All tensors are in NCHW format. */ template <typename T> class Unpool2dMaxFunctor<platform::GPUPlace, T> { public: void operator()(const platform::DeviceContext& context, const framework::Tensor& input, const framework::Tensor& indices, framework::Tensor* output) { const int batch_size = input.dims()[0]; const int input_height = input.dims()[2]; const int input_width = input.dims()[3]; const int output_channels = output->dims()[1]; const int output_height = output->dims()[2]; const int output_width = output->dims()[3]; const T* input_data = input.data<T>(); const int* indices_data = indices.data<int>(); T* output_data = output->mutable_data<T>(context.GetPlace()); int threads = 1024; int grid = (input.numel() + threads - 1) / threads; hipLaunchKernelGGL(( KernelUnpool2dMax< T>), dim3(grid), dim3(threads), 0, reinterpret_cast<const platform::CUDADeviceContext&>(context) .stream(), input.numel(), input_data, indices_data, input_height, input_width, output_channels, output_data, output_height, output_width); } }; /* * All tensors are in NCHW format. */ template <typename T> class Unpool2dMaxGradFunctor<platform::GPUPlace, T> { public: void operator()(const platform::DeviceContext& context, const framework::Tensor& input, const framework::Tensor& indices, const framework::Tensor& output, const framework::Tensor& output_grad, framework::Tensor* input_grad) { const int batch_size = input.dims()[0]; const int input_height = input.dims()[2]; const int input_width = input.dims()[3]; const int output_channels = output.dims()[1]; const int output_height = output.dims()[2]; const int output_width = output.dims()[3]; const T* input_data = input.data<T>(); const int* indices_data = indices.data<int>(); const T* output_data = output.data<T>(); const T* output_grad_data = output_grad.data<T>(); T* input_grad_data = input_grad->mutable_data<T>(context.GetPlace()); int threads = 1024; int grid = (input.numel() + threads - 1) / threads; hipLaunchKernelGGL(( KernelUnpool2dMaxGrad< T>), dim3(grid), dim3(threads), 0, reinterpret_cast<const platform::CUDADeviceContext&>(context) .stream(), input.numel(), input_data, indices_data, input_height, input_width, output_channels, output_data, output_grad_data, output_height, output_width, input_grad_data); } }; template class Unpool2dMaxGradFunctor<platform::GPUPlace, float>; template class Unpool2dMaxGradFunctor<platform::GPUPlace, double>; template class Unpool2dMaxFunctor<platform::GPUPlace, float>; template class Unpool2dMaxFunctor<platform::GPUPlace, double>; } // namespace math } // namespace operators } // namespace paddle
4b2aac59599e15c994063dcf55bfd1debe011f8f.cu
/* Copyright (c) 2016 paddlepaddle Authors. All Rights Reserve. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/operators/math/unpooling.h" #include "paddle/platform/cuda_helper.h" namespace paddle { namespace operators { namespace math { template <typename T> __global__ void KernelUnpool2dMax(const int nthreads, const T* input_data, const int* indices_data, const int input_height, const int input_width, const int channels, T* output_data, const int output_height, const int output_width) { int in_n_stride = input_height * input_width * channels; int in_c_stride = input_height * input_width; int out_n_stride = output_height * output_width * channels; int out_c_stride = output_height * output_width; int index = blockIdx.x * blockDim.x + threadIdx.x; int offset = blockDim.x * gridDim.x; for (int i = index; i < nthreads; i += offset) { int bidx = i / in_n_stride; int boffset = i % in_n_stride; int cidx = boffset / in_c_stride; int out_offset = bidx * out_n_stride + cidx * out_c_stride; int out_index = indices_data[i]; PADDLE_ASSERT(out_index < out_c_stride); output_data[out_offset + out_index] = input_data[i]; } } template <typename T> __global__ void KernelUnpool2dMaxGrad( const int nthreads, const T* input_data, const int* indices_data, const int input_height, const int input_width, const int channels, const T* output_data, const T* output_grad, const int output_height, const int output_width, T* input_grad) { int in_n_stride = input_height * input_width * channels; int in_c_stride = input_height * input_width; int out_n_stride = output_height * output_width * channels; int out_c_stride = output_height * output_width; int index = blockIdx.x * blockDim.x + threadIdx.x; int offset = blockDim.x * gridDim.x; for (int i = index; i < nthreads; i += offset) { int bidx = i / in_n_stride; int boffset = i % in_n_stride; int cidx = boffset / in_c_stride; int out_offset = bidx * out_n_stride + cidx * out_c_stride; int out_index = indices_data[i]; PADDLE_ASSERT(out_index < out_c_stride); input_grad[i] = output_grad[out_offset + out_index]; } } /* * All tensors are in NCHW format. */ template <typename T> class Unpool2dMaxFunctor<platform::GPUPlace, T> { public: void operator()(const platform::DeviceContext& context, const framework::Tensor& input, const framework::Tensor& indices, framework::Tensor* output) { const int batch_size = input.dims()[0]; const int input_height = input.dims()[2]; const int input_width = input.dims()[3]; const int output_channels = output->dims()[1]; const int output_height = output->dims()[2]; const int output_width = output->dims()[3]; const T* input_data = input.data<T>(); const int* indices_data = indices.data<int>(); T* output_data = output->mutable_data<T>(context.GetPlace()); int threads = 1024; int grid = (input.numel() + threads - 1) / threads; KernelUnpool2dMax< T><<<grid, threads, 0, reinterpret_cast<const platform::CUDADeviceContext&>(context) .stream()>>>(input.numel(), input_data, indices_data, input_height, input_width, output_channels, output_data, output_height, output_width); } }; /* * All tensors are in NCHW format. */ template <typename T> class Unpool2dMaxGradFunctor<platform::GPUPlace, T> { public: void operator()(const platform::DeviceContext& context, const framework::Tensor& input, const framework::Tensor& indices, const framework::Tensor& output, const framework::Tensor& output_grad, framework::Tensor* input_grad) { const int batch_size = input.dims()[0]; const int input_height = input.dims()[2]; const int input_width = input.dims()[3]; const int output_channels = output.dims()[1]; const int output_height = output.dims()[2]; const int output_width = output.dims()[3]; const T* input_data = input.data<T>(); const int* indices_data = indices.data<int>(); const T* output_data = output.data<T>(); const T* output_grad_data = output_grad.data<T>(); T* input_grad_data = input_grad->mutable_data<T>(context.GetPlace()); int threads = 1024; int grid = (input.numel() + threads - 1) / threads; KernelUnpool2dMaxGrad< T><<<grid, threads, 0, reinterpret_cast<const platform::CUDADeviceContext&>(context) .stream()>>>(input.numel(), input_data, indices_data, input_height, input_width, output_channels, output_data, output_grad_data, output_height, output_width, input_grad_data); } }; template class Unpool2dMaxGradFunctor<platform::GPUPlace, float>; template class Unpool2dMaxGradFunctor<platform::GPUPlace, double>; template class Unpool2dMaxFunctor<platform::GPUPlace, float>; template class Unpool2dMaxFunctor<platform::GPUPlace, double>; } // namespace math } // namespace operators } // namespace paddle
62b64fedfb78548d8084837cc68b2aaf38884618.hip
// !!! This is a file automatically generated by hipify!!! #include <assert.h> #include <iostream> #include <fstream> #include <iomanip> #include <sstream> #include "node.h" #include "adaptive_grid.h" #include "adaptive_multigrid_cuda_new.h" #include "wavelet_compression.h" #include "wavelet_decompression.h" #include "RK4.h" void BubbleSort(Node* num, const int numLength) { int i, j, flag = 1; // set flag to 1 to start first pass Node temp; // holding variable //int numLength = num.length( ); for(i = 1; (i <= numLength) && flag; i++) { flag = 0; for (j=0; j < (numLength -1); j++) { if (num[j+1].layer > num[j].layer) // ascending order simply changes to < { temp = num[j]; // swap elements num[j] = num[j+1]; num[j+1] = temp; flag = 1; // indicates that a swap occurred. } } } return; //arrays are passed to functions by address; nothing is returned } void ERRORCHECK2(){ hipDeviceSynchronize(); hipError_t err = hipGetLastError(); if (err != hipSuccess) { printf("Error (!): %s\n", hipGetErrorString(err)); exit(-1); } } int main(int argc, char const *argv[]) { Node* matrix; matrix = (Node*) calloc(LEN_OF_MATRIX*LEN_OF_MATRIX,sizeof(Node)); datatype t = 0.0f; float x = 0; float y = 0; for(int i=0; i < (LEN_OF_MATRIX * LEN_OF_MATRIX); i++){ matrix[i].x_index_global = x; matrix[i].y_index_global = y; matrix[i].x = x/(LEN_OF_MATRIX-1); matrix[i].y = y/(LEN_OF_MATRIX-1); /*if (x*x/LEN_OF_MATRIX> y){ matrix[i].vort = 1.0f; }*/ if (y < 0.000001f) { matrix[i].vort = 1.0f; } if (y<LEN_OF_MATRIX - 1){ y++; } else{ y = 0; x++; } //std::cout<<"x = "<<matrix[i].x<<std::endl<< "y = "<<matrix[i].y<<std::endl<< "vort = "<<matrix[i].vort<<std::endl;/*<< "x_index = "<<matrix[i].x_index<<std::endl<< "y_index = "<<matrix[i].y_index<<std::endl<< "isPicked = "<<matrix[i].isPicked<<std::endl;*/ } Node* nodeArray; int* origoArray; int* countTrue; std::string STRING; while(t < END_TIME){ countTrue = (int*) malloc(1*sizeof(int)); std::cout<<"hej?"<<std::endl; ERRORCHECK2(); wavelet_compression(matrix, countTrue); nodeArray = (Node*) calloc(*countTrue , sizeof(Node)); origoArray = (int*) malloc(LAYERS*2 * sizeof(int)); //*origoArray[layers*2]; ERRORCHECK2(); //std::cout<<"CountTrue main: "<<*countTrue<<std::endl; for(int i = 0; i<LAYERS*2; i++){ origoArray[i] = LEN_OF_MATRIX; } //std::cout<<"CountTrue main: "<<*countTrue<<std::endl; ERRORCHECK2(); int orderPlace = *countTrue -1; //std::cout<<"CountTrue main: "<<*countTrue<<std::endl; for (int m = 1; m <= LAYERS; m++) { for (int i=0; i<LEN_OF_MATRIX; i++){ for (int j=0; j<LEN_OF_MATRIX; j++){ if (matrix[i*LEN_OF_MATRIX + j].isPicked == true && matrix[i*LEN_OF_MATRIX + j].layer == m){ nodeArray[orderPlace] = matrix[i*LEN_OF_MATRIX + j]; if(matrix[i*LEN_OF_MATRIX + j].x_index_global<origoArray[(m-1)*2]){ origoArray[(m-1)*2] = matrix[i*LEN_OF_MATRIX + j].x_index_global; } if(matrix[i*LEN_OF_MATRIX + j].y_index_global<origoArray[(m-1)*2 + 1]){ origoArray[(m-1)*2 + 1] = matrix[i*LEN_OF_MATRIX + j].y_index_global; } orderPlace --; } } } } ERRORCHECK2(); int stepSize; for (int lay = 1; lay <= LAYERS; ++lay) { //Round origo array down. //origoArray[2*i] = 2*(origoArray[2*i]/2); //origoArray[2*i+1] = 2*(origoArray[2*i+1]/2); stepSize = 1<<(lay); if (origoArray[2*(lay-1)] % stepSize != 0) { origoArray[2*(lay-1)] -= stepSize/2; } if (origoArray[2*(lay-1)+1] % stepSize != 0) { origoArray[2*(lay-1)+1] -= stepSize/2; } //int stepSize = 2; //int layerCount = 1; //while(nodeArray[i].x_index_global % stepSize == 0 && nodeArray[i].y_index_global % stepSize == 0){ // stepSize *= 2; // layerCount++; // if(layerCount == LAYERS){ // break; // } //} //nodeArray[i].layer = layerCount; } //BubbleSort(nodeArray, *countTrue); ERRORCHECK2(); //assert(0); std::cout<<"CountTrue main: "<<countTrue<<std::endl; for(int i=0; i<*countTrue; i++){ std::cout<<nodeArray[i].x_index_global<<" "<< nodeArray[i].y_index_global<<" "<<nodeArray[i].layer<<std::endl<<std::endl; } for(int i=0; i<LAYERS; i++){ std::cout<<"origo: "<<origoArray[2*i]<<" "<< origoArray[2*i+1]<<std::endl; } //delete[] matrix; ERRORCHECK2(); for (int i = 0; i < ITERATIONS_UNTIL_GRID_UPDATE; ++i) { std::cout<<"T=========================================================: "<<t<<std::endl; RK4(DELTA_T, nodeArray, origoArray, *countTrue); } //adaptive_multigrid(nodeArray, origoArray, *countTrue); //assert(0); //matrix = (Node*) calloc(LEN_OF_MATRIX*LEN_OF_MATRIX,sizeof(Node)); int x_int = 0; int y_int = 0; for(int i=0; i< (LEN_OF_MATRIX*LEN_OF_MATRIX); i++){ matrix[i] = Node(); matrix[i].x_index_global = x_int; matrix[i].y_index_global = y_int; if (y_int<LEN_OF_MATRIX - 1){ y_int++; } else{ y_int = 0; x_int++; } //std::cout<< /*"x = "<<matrix[i].x<<std::endl<< "y = "<<matrix[i].y<<std::endl<< "vort = "<<matrix[i].vort<<std::endl;/*<< "x_index = "<<matrix[i].x_index<<std::endl<< "y_index = "<<matrix[i].y_index<<std::endl<< "isPicked = "<<matrix[i].isPicked<<std::endl;*/ } std::cout<<"haj1"<<std::endl; ERRORCHECK2(); std::cout<<"haj2"<<std::endl; wavelet_decompression(nodeArray, matrix, countTrue); std::cout<<"haj3"<<std::endl; ERRORCHECK2(); std::cout<<"haj4"<<std::endl; //visualize(matrix); free (countTrue); free (origoArray); t = t + DELTA_T; /* for (int i =0; i < LEN_OF_MATRIX*LEN_OF_MATRIX; ++i) { std::ostringstream ss; ss << matrix[i].vort; STRING += (ss.str())+" "; } */ for (int i =0; i < LEN_OF_MATRIX*LEN_OF_MATRIX; ++i) { std::ostringstream ss; ss << matrix[i].stream; STRING += (ss.str())+" "; } /* STRING += "A = ["; for (int x = 0; x < LEN_OF_MATRIX; ++x) { for (int y = 0; y < LEN_OF_MATRIX; ++y) { std::ostringstream ss; ss << matrix[x*LEN_OF_MATRIX + y].vort; STRING += (ss.str())+" "; } STRING += "\n"; } STRING += "]; \n subplot(1,2,1); surf(A); \n"; STRING += "B = ["; for (int x = 0; x < LEN_OF_MATRIX; ++x) { for (int y = 0; y < LEN_OF_MATRIX; ++y) { std::ostringstream ss; ss << matrix[x*LEN_OF_MATRIX + y].stream; STRING += (ss.str())+" "; } STRING += "\n"; } STRING += "]; \n subplot(1,2,2); surf(B); drawnow; pause(0.5); \n"; */ } std::ofstream myfile; myfile.open ("MrWagsOutputStream.txt"); myfile << STRING; myfile.close(); free (matrix); return 0; };
62b64fedfb78548d8084837cc68b2aaf38884618.cu
#include <assert.h> #include <iostream> #include <fstream> #include <iomanip> #include <sstream> #include "node.h" #include "adaptive_grid.h" #include "adaptive_multigrid_cuda_new.h" #include "wavelet_compression.h" #include "wavelet_decompression.h" #include "RK4.h" void BubbleSort(Node* num, const int numLength) { int i, j, flag = 1; // set flag to 1 to start first pass Node temp; // holding variable //int numLength = num.length( ); for(i = 1; (i <= numLength) && flag; i++) { flag = 0; for (j=0; j < (numLength -1); j++) { if (num[j+1].layer > num[j].layer) // ascending order simply changes to < { temp = num[j]; // swap elements num[j] = num[j+1]; num[j+1] = temp; flag = 1; // indicates that a swap occurred. } } } return; //arrays are passed to functions by address; nothing is returned } void ERRORCHECK2(){ cudaThreadSynchronize(); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf("Error (!): %s\n", cudaGetErrorString(err)); exit(-1); } } int main(int argc, char const *argv[]) { Node* matrix; matrix = (Node*) calloc(LEN_OF_MATRIX*LEN_OF_MATRIX,sizeof(Node)); datatype t = 0.0f; float x = 0; float y = 0; for(int i=0; i < (LEN_OF_MATRIX * LEN_OF_MATRIX); i++){ matrix[i].x_index_global = x; matrix[i].y_index_global = y; matrix[i].x = x/(LEN_OF_MATRIX-1); matrix[i].y = y/(LEN_OF_MATRIX-1); /*if (x*x/LEN_OF_MATRIX> y){ matrix[i].vort = 1.0f; }*/ if (y < 0.000001f) { matrix[i].vort = 1.0f; } if (y<LEN_OF_MATRIX - 1){ y++; } else{ y = 0; x++; } //std::cout<<"x = "<<matrix[i].x<<std::endl<< "y = "<<matrix[i].y<<std::endl<< "vort = "<<matrix[i].vort<<std::endl;/*<< "x_index = "<<matrix[i].x_index<<std::endl<< "y_index = "<<matrix[i].y_index<<std::endl<< "isPicked = "<<matrix[i].isPicked<<std::endl;*/ } Node* nodeArray; int* origoArray; int* countTrue; std::string STRING; while(t < END_TIME){ countTrue = (int*) malloc(1*sizeof(int)); std::cout<<"hej?"<<std::endl; ERRORCHECK2(); wavelet_compression(matrix, countTrue); nodeArray = (Node*) calloc(*countTrue , sizeof(Node)); origoArray = (int*) malloc(LAYERS*2 * sizeof(int)); //*origoArray[layers*2]; ERRORCHECK2(); //std::cout<<"CountTrue main: "<<*countTrue<<std::endl; for(int i = 0; i<LAYERS*2; i++){ origoArray[i] = LEN_OF_MATRIX; } //std::cout<<"CountTrue main: "<<*countTrue<<std::endl; ERRORCHECK2(); int orderPlace = *countTrue -1; //std::cout<<"CountTrue main: "<<*countTrue<<std::endl; for (int m = 1; m <= LAYERS; m++) { for (int i=0; i<LEN_OF_MATRIX; i++){ for (int j=0; j<LEN_OF_MATRIX; j++){ if (matrix[i*LEN_OF_MATRIX + j].isPicked == true && matrix[i*LEN_OF_MATRIX + j].layer == m){ nodeArray[orderPlace] = matrix[i*LEN_OF_MATRIX + j]; if(matrix[i*LEN_OF_MATRIX + j].x_index_global<origoArray[(m-1)*2]){ origoArray[(m-1)*2] = matrix[i*LEN_OF_MATRIX + j].x_index_global; } if(matrix[i*LEN_OF_MATRIX + j].y_index_global<origoArray[(m-1)*2 + 1]){ origoArray[(m-1)*2 + 1] = matrix[i*LEN_OF_MATRIX + j].y_index_global; } orderPlace --; } } } } ERRORCHECK2(); int stepSize; for (int lay = 1; lay <= LAYERS; ++lay) { //Round origo array down. //origoArray[2*i] = 2*(origoArray[2*i]/2); //origoArray[2*i+1] = 2*(origoArray[2*i+1]/2); stepSize = 1<<(lay); if (origoArray[2*(lay-1)] % stepSize != 0) { origoArray[2*(lay-1)] -= stepSize/2; } if (origoArray[2*(lay-1)+1] % stepSize != 0) { origoArray[2*(lay-1)+1] -= stepSize/2; } //int stepSize = 2; //int layerCount = 1; //while(nodeArray[i].x_index_global % stepSize == 0 && nodeArray[i].y_index_global % stepSize == 0){ // stepSize *= 2; // layerCount++; // if(layerCount == LAYERS){ // break; // } //} //nodeArray[i].layer = layerCount; } //BubbleSort(nodeArray, *countTrue); ERRORCHECK2(); //assert(0); std::cout<<"CountTrue main: "<<countTrue<<std::endl; for(int i=0; i<*countTrue; i++){ std::cout<<nodeArray[i].x_index_global<<" "<< nodeArray[i].y_index_global<<" "<<nodeArray[i].layer<<std::endl<<std::endl; } for(int i=0; i<LAYERS; i++){ std::cout<<"origo: "<<origoArray[2*i]<<" "<< origoArray[2*i+1]<<std::endl; } //delete[] matrix; ERRORCHECK2(); for (int i = 0; i < ITERATIONS_UNTIL_GRID_UPDATE; ++i) { std::cout<<"T=========================================================: "<<t<<std::endl; RK4(DELTA_T, nodeArray, origoArray, *countTrue); } //adaptive_multigrid(nodeArray, origoArray, *countTrue); //assert(0); //matrix = (Node*) calloc(LEN_OF_MATRIX*LEN_OF_MATRIX,sizeof(Node)); int x_int = 0; int y_int = 0; for(int i=0; i< (LEN_OF_MATRIX*LEN_OF_MATRIX); i++){ matrix[i] = Node(); matrix[i].x_index_global = x_int; matrix[i].y_index_global = y_int; if (y_int<LEN_OF_MATRIX - 1){ y_int++; } else{ y_int = 0; x_int++; } //std::cout<< /*"x = "<<matrix[i].x<<std::endl<< "y = "<<matrix[i].y<<std::endl<< "vort = "<<matrix[i].vort<<std::endl;/*<< "x_index = "<<matrix[i].x_index<<std::endl<< "y_index = "<<matrix[i].y_index<<std::endl<< "isPicked = "<<matrix[i].isPicked<<std::endl;*/ } std::cout<<"haj1"<<std::endl; ERRORCHECK2(); std::cout<<"haj2"<<std::endl; wavelet_decompression(nodeArray, matrix, countTrue); std::cout<<"haj3"<<std::endl; ERRORCHECK2(); std::cout<<"haj4"<<std::endl; //visualize(matrix); free (countTrue); free (origoArray); t = t + DELTA_T; /* for (int i =0; i < LEN_OF_MATRIX*LEN_OF_MATRIX; ++i) { std::ostringstream ss; ss << matrix[i].vort; STRING += (ss.str())+" "; } */ for (int i =0; i < LEN_OF_MATRIX*LEN_OF_MATRIX; ++i) { std::ostringstream ss; ss << matrix[i].stream; STRING += (ss.str())+" "; } /* STRING += "A = ["; for (int x = 0; x < LEN_OF_MATRIX; ++x) { for (int y = 0; y < LEN_OF_MATRIX; ++y) { std::ostringstream ss; ss << matrix[x*LEN_OF_MATRIX + y].vort; STRING += (ss.str())+" "; } STRING += "\n"; } STRING += "]; \n subplot(1,2,1); surf(A); \n"; STRING += "B = ["; for (int x = 0; x < LEN_OF_MATRIX; ++x) { for (int y = 0; y < LEN_OF_MATRIX; ++y) { std::ostringstream ss; ss << matrix[x*LEN_OF_MATRIX + y].stream; STRING += (ss.str())+" "; } STRING += "\n"; } STRING += "]; \n subplot(1,2,2); surf(B); drawnow; pause(0.5); \n"; */ } std::ofstream myfile; myfile.open ("MrWagsOutputStream.txt"); myfile << STRING; myfile.close(); free (matrix); return 0; };
cc15310aed901fb48fa47437c92d08259fb645b7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <hiprand/hiprand.h> #include <hiprand/hiprand_kernel.h> #include <math.h> #include <assert.h> #define MIN 2 #define MAX 7 #define ITER 10000000 __global__ void setup_kernel(hiprandState_t *state){ int idx = threadIdx.x+blockDim.x*blockIdx.x; hiprand_init(1234, idx, 0, &state[idx]); } __global__ void generate_kernel(hiprandState_t *my_curandstate, const unsigned int n, const unsigned *max_rand_int, const unsigned *min_rand_int, unsigned int *result){ int idx = threadIdx.x + blockDim.x*blockIdx.x; int count = 0; while (count < n){ float myrandf = hiprand_uniform(my_curandstate+idx); printf("idx=%d and myrandf=%f\n", idx,myrandf); myrandf *= (max_rand_int[idx] - min_rand_int[idx]+0.999999); myrandf += min_rand_int[idx]; int myrand = (int)truncf(myrandf); assert(myrand <= max_rand_int[idx]); assert(myrand >= min_rand_int[idx]); result[myrand-min_rand_int[idx]]++; count++;} } int main(){ hiprandState_t *d_state; hipMalloc(&d_state, sizeof(hiprandState_t)); unsigned *d_result, *h_result; unsigned *d_max_rand_int, *h_max_rand_int, *d_min_rand_int, *h_min_rand_int; hipMalloc(&d_result, (MAX-MIN+1) * sizeof(unsigned)); h_result = (unsigned *)malloc((MAX-MIN+1)*sizeof(unsigned)); hipMalloc(&d_max_rand_int, sizeof(unsigned)); h_max_rand_int = (unsigned *)malloc(sizeof(unsigned)); hipMalloc(&d_min_rand_int, sizeof(unsigned)); h_min_rand_int = (unsigned *)malloc(sizeof(unsigned)); hipMemset(d_result, 0, (MAX-MIN+1)*sizeof(unsigned)); hipLaunchKernelGGL(( setup_kernel), dim3(1),dim3(1), 0, 0, d_state); *h_max_rand_int = MAX; *h_min_rand_int = MIN; hipMemcpy(d_max_rand_int, h_max_rand_int, sizeof(unsigned), hipMemcpyHostToDevice); hipMemcpy(d_min_rand_int, h_min_rand_int, sizeof(unsigned), hipMemcpyHostToDevice); hipLaunchKernelGGL(( generate_kernel), dim3(1),dim3(1), 0, 0, d_state, ITER, d_max_rand_int, d_min_rand_int, d_result); hipMemcpy(h_result, d_result, (MAX-MIN+1) * sizeof(unsigned), hipMemcpyDeviceToHost); printf("Bin: Count: \n"); for (int i = MIN; i <= MAX; i++) printf("%d %d\n", i, h_result[i-MIN]); return 0; }
cc15310aed901fb48fa47437c92d08259fb645b7.cu
#include <stdio.h> #include <curand.h> #include <curand_kernel.h> #include <math.h> #include <assert.h> #define MIN 2 #define MAX 7 #define ITER 10000000 __global__ void setup_kernel(curandState *state){ int idx = threadIdx.x+blockDim.x*blockIdx.x; curand_init(1234, idx, 0, &state[idx]); } __global__ void generate_kernel(curandState *my_curandstate, const unsigned int n, const unsigned *max_rand_int, const unsigned *min_rand_int, unsigned int *result){ int idx = threadIdx.x + blockDim.x*blockIdx.x; int count = 0; while (count < n){ float myrandf = curand_uniform(my_curandstate+idx); printf("idx=%d and myrandf=%f\n", idx,myrandf); myrandf *= (max_rand_int[idx] - min_rand_int[idx]+0.999999); myrandf += min_rand_int[idx]; int myrand = (int)truncf(myrandf); assert(myrand <= max_rand_int[idx]); assert(myrand >= min_rand_int[idx]); result[myrand-min_rand_int[idx]]++; count++;} } int main(){ curandState *d_state; cudaMalloc(&d_state, sizeof(curandState)); unsigned *d_result, *h_result; unsigned *d_max_rand_int, *h_max_rand_int, *d_min_rand_int, *h_min_rand_int; cudaMalloc(&d_result, (MAX-MIN+1) * sizeof(unsigned)); h_result = (unsigned *)malloc((MAX-MIN+1)*sizeof(unsigned)); cudaMalloc(&d_max_rand_int, sizeof(unsigned)); h_max_rand_int = (unsigned *)malloc(sizeof(unsigned)); cudaMalloc(&d_min_rand_int, sizeof(unsigned)); h_min_rand_int = (unsigned *)malloc(sizeof(unsigned)); cudaMemset(d_result, 0, (MAX-MIN+1)*sizeof(unsigned)); setup_kernel<<<1,1>>>(d_state); *h_max_rand_int = MAX; *h_min_rand_int = MIN; cudaMemcpy(d_max_rand_int, h_max_rand_int, sizeof(unsigned), cudaMemcpyHostToDevice); cudaMemcpy(d_min_rand_int, h_min_rand_int, sizeof(unsigned), cudaMemcpyHostToDevice); generate_kernel<<<1,1>>>(d_state, ITER, d_max_rand_int, d_min_rand_int, d_result); cudaMemcpy(h_result, d_result, (MAX-MIN+1) * sizeof(unsigned), cudaMemcpyDeviceToHost); printf("Bin: Count: \n"); for (int i = MIN; i <= MAX; i++) printf("%d %d\n", i, h_result[i-MIN]); return 0; }
98458178e0bc5d5ce545343f652cf74c797bbbf3.hip
// !!! This is a file automatically generated by hipify!!! /** * Copyright 2020 Mobvoi Inc. (authors: Fangjun Kuang) * Xiaomi Corporation (authors: Haowen Qiu) * * See LICENSE for clarification regarding multiple authors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cstdlib> #include <mutex> // NOLINT #include "k2/csrc/context.h" #include "k2/csrc/log.h" #include "k2/csrc/nvtx.h" namespace k2 { static constexpr std::size_t kAlignment = 64; // TODO(haowen): most of implementations below should be updated later. class CpuContext : public Context { public: CpuContext() = default; ContextPtr GetCpuContext() override { return shared_from_this(); } DeviceType GetDeviceType() const override { return kCpu; } void *Allocate(std::size_t bytes, void **deleter_context) override { void *p = nullptr; if (bytes) { int32_t ret = posix_memalign(&p, kAlignment, bytes); K2_CHECK_EQ(ret, 0); } if (deleter_context != nullptr) *deleter_context = nullptr; return p; } bool IsCompatible(const Context &other) const override { return other.GetDeviceType() == kCpu; } void Deallocate(void *data, void * /*deleter_context*/) override { free(data); } }; class CudaContext : public Context { public: explicit CudaContext(int32_t gpu_id) : gpu_id_(gpu_id) { if (gpu_id_ != -1) { auto ret = hipSetDevice(gpu_id_); K2_CHECK_CUDA_ERROR(ret); } // TODO(haowen): choose one from available GPUs if gpu_id == -1? // and handle GPU ids from multiple machines. auto ret = hipStreamCreate(&stream_); K2_CHECK_CUDA_ERROR(ret); } ContextPtr GetCpuContext() override { return k2::GetCpuContext(); } DeviceType GetDeviceType() const override { return kCuda; } int32_t GetDeviceId() const override { return gpu_id_; } void *Allocate(std::size_t bytes, void **deleter_context) override { void *p = nullptr; if (bytes) { auto ret = hipMalloc(&p, bytes); K2_CHECK_CUDA_ERROR(ret); } if (deleter_context != nullptr) *deleter_context = nullptr; return p; } bool IsCompatible(const Context &other) const override { return other.GetDeviceType() == kCuda && other.GetDeviceId() == gpu_id_; } void Deallocate(void *data, void * /*deleter_context*/) override { auto ret = hipFree(data); K2_CHECK_CUDA_ERROR(ret); } hipStream_t GetCudaStream() const override { return g_stream_override.OverrideStream(stream_); } void Sync() const override { auto ret = hipStreamSynchronize(stream_); K2_CHECK_CUDA_ERROR(ret); } ~CudaContext() { auto ret = hipStreamDestroy(stream_); K2_CHECK_CUDA_ERROR(ret); } private: int32_t gpu_id_; hipStream_t stream_; }; ContextPtr GetCpuContext() { return std::make_shared<CpuContext>(); } ContextPtr GetCudaContext(int32_t gpu_id /*= -1*/) { static std::once_flag has_cuda_init_flag; static bool has_cuda = false; std::call_once(has_cuda_init_flag, []() { int n = 0; auto ret = hipGetDeviceCount(&n); if (ret == hipSuccess && n > 0) has_cuda = true; else K2_LOG(WARNING) << "CUDA is not available. Return a CPU context."; }); if (has_cuda) return std::make_shared<CudaContext>(gpu_id); return GetCpuContext(); } } // namespace k2
98458178e0bc5d5ce545343f652cf74c797bbbf3.cu
/** * Copyright 2020 Mobvoi Inc. (authors: Fangjun Kuang) * Xiaomi Corporation (authors: Haowen Qiu) * * See LICENSE for clarification regarding multiple authors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cstdlib> #include <mutex> // NOLINT #include "k2/csrc/context.h" #include "k2/csrc/log.h" #include "k2/csrc/nvtx.h" namespace k2 { static constexpr std::size_t kAlignment = 64; // TODO(haowen): most of implementations below should be updated later. class CpuContext : public Context { public: CpuContext() = default; ContextPtr GetCpuContext() override { return shared_from_this(); } DeviceType GetDeviceType() const override { return kCpu; } void *Allocate(std::size_t bytes, void **deleter_context) override { void *p = nullptr; if (bytes) { int32_t ret = posix_memalign(&p, kAlignment, bytes); K2_CHECK_EQ(ret, 0); } if (deleter_context != nullptr) *deleter_context = nullptr; return p; } bool IsCompatible(const Context &other) const override { return other.GetDeviceType() == kCpu; } void Deallocate(void *data, void * /*deleter_context*/) override { free(data); } }; class CudaContext : public Context { public: explicit CudaContext(int32_t gpu_id) : gpu_id_(gpu_id) { if (gpu_id_ != -1) { auto ret = cudaSetDevice(gpu_id_); K2_CHECK_CUDA_ERROR(ret); } // TODO(haowen): choose one from available GPUs if gpu_id == -1? // and handle GPU ids from multiple machines. auto ret = cudaStreamCreate(&stream_); K2_CHECK_CUDA_ERROR(ret); } ContextPtr GetCpuContext() override { return k2::GetCpuContext(); } DeviceType GetDeviceType() const override { return kCuda; } int32_t GetDeviceId() const override { return gpu_id_; } void *Allocate(std::size_t bytes, void **deleter_context) override { void *p = nullptr; if (bytes) { auto ret = cudaMalloc(&p, bytes); K2_CHECK_CUDA_ERROR(ret); } if (deleter_context != nullptr) *deleter_context = nullptr; return p; } bool IsCompatible(const Context &other) const override { return other.GetDeviceType() == kCuda && other.GetDeviceId() == gpu_id_; } void Deallocate(void *data, void * /*deleter_context*/) override { auto ret = cudaFree(data); K2_CHECK_CUDA_ERROR(ret); } cudaStream_t GetCudaStream() const override { return g_stream_override.OverrideStream(stream_); } void Sync() const override { auto ret = cudaStreamSynchronize(stream_); K2_CHECK_CUDA_ERROR(ret); } ~CudaContext() { auto ret = cudaStreamDestroy(stream_); K2_CHECK_CUDA_ERROR(ret); } private: int32_t gpu_id_; cudaStream_t stream_; }; ContextPtr GetCpuContext() { return std::make_shared<CpuContext>(); } ContextPtr GetCudaContext(int32_t gpu_id /*= -1*/) { static std::once_flag has_cuda_init_flag; static bool has_cuda = false; std::call_once(has_cuda_init_flag, []() { int n = 0; auto ret = cudaGetDeviceCount(&n); if (ret == cudaSuccess && n > 0) has_cuda = true; else K2_LOG(WARNING) << "CUDA is not available. Return a CPU context."; }); if (has_cuda) return std::make_shared<CudaContext>(gpu_id); return GetCpuContext(); } } // namespace k2
0cb324177518b08b35c50ae0f9ab4a0a12ab3b23.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <hiprand/hiprand_kernel.h> #include <stdio.h> #include <MatKernel.hpp> static const unsigned int c1 = 0xcc9e2d51; static const unsigned int c2 = 0x1b873593; static const unsigned int r1 = 15; static const unsigned int r2 = 13; static const unsigned int m = 5; static const unsigned int n = 0xe6546b64; __device__ inline unsigned int h1(unsigned int k, unsigned int hash) { k *= c1; k = (k << r1) | (k >> (32-r1)); k *= c2; hash ^= k; hash = ((hash << r2) | (hash >> (32-r2)) * m) + n; return hash; } __device__ inline unsigned int mmhash(unsigned int v1, unsigned int v2, unsigned int v3, unsigned int mod, unsigned int seed) { unsigned int hash = seed; hash = h1(v1, hash); hash = h1(v2, hash); hash = h1(v3, hash); hash ^= (hash >> 16); hash *= 0x85ebca6b; hash ^= (hash >> 13); hash *= 0xc2b2ae35; hash ^= (hash >> 16); return (hash % mod); } #define DBSIZE (8*1024) __global__ void __treePack(float *fdata, int *treenodes, int *icats, long long *out, int *fieldlens, int nrows, int ncols, int ntrees, int nsamps, int seed) { __shared__ float fbuff[DBSIZE]; __shared__ int fl[32]; int i, j, ic, ival; int tid = threadIdx.x + blockDim.x * threadIdx.y; if (tid < 6) { fl[tid] = fieldlens[tid]; } __syncthreads(); int vshift = fl[5]; int ishift = fl[4] + vshift; int jshift = fl[3] + ishift; int nshift = fl[2] + jshift; int tshift = fl[1] + nshift; int cmask = (1 << fl[5]) - 1; int vmask = (1 << fl[4]) - 1; int imask = (1 << fl[3]) - 1; int jmask = (1 << fl[2]) - 1; int nmask = (1 << fl[1]) - 1; int tmask = (1 << fl[0]) - 1; int nc = (DBSIZE / nrows); int itree = threadIdx.y; int jfeat = threadIdx.x; for (i = nc * blockIdx.x; i < ncols; i += nc * gridDim.x) { int ctodo = min(nc, ncols - i); for (j = tid; j < nrows * ctodo; j += blockDim.x*blockDim.y) { fbuff[j] = fdata[j + i * nrows]; } __syncthreads(); for (j = i; j < i + ctodo; j++) { // j is the column index ic = icats[j]; for (itree = threadIdx.y; itree < ntrees; itree += blockDim.y) { if (jfeat < nsamps) { int inode = treenodes[itree + j * ntrees]; int ifeat = mmhash(itree, inode, jfeat, nrows, seed); ival = (int)(fbuff[ifeat + (j - i) * nrows]); long long hdr = (((long long)(tmask & itree)) << tshift) | (((long long)(nmask & inode)) << nshift) | (((long long)(jmask & jfeat)) << jshift) | (((long long)(imask & ifeat)) << ishift) | (((long long)(vmask & ival)) << vshift) | ((long long)(ic & cmask)); out[jfeat + nsamps * (itree + ntrees * j)] = hdr; } } } __syncthreads(); } } int treePack(float *fdata, int *treenodes, int *icats, long long *out, int *fieldlens, int nrows, int ncols, int ntrees, int nsamps, int seed) { int ntx = 32 * (1 + (nsamps - 1)/32); int nty = min(1024 / ntx, ntrees); dim3 bdim(ntx, nty, 1); int nb = min(32, 1 + (ncols-1)/32); hipLaunchKernelGGL(( __treePack), dim3(nb),dim3(bdim), 0, 0, fdata, treenodes, icats, out, fieldlens, nrows, ncols, ntrees, nsamps, seed); hipDeviceSynchronize(); hipError_t err = hipGetLastError(); return err; } class entImpty { public: static __device__ inline float fupdate(int v) { return (float)v * logf((float)max(1, v)); } static __device__ inline float fresult(float vacc, int vsum) { float vs = (float)max(1, vsum); return logf(vs) - vacc / vs; } }; class giniImpty { public: static __device__ inline float fupdate(int v) { return (float)v * (float)v; } static __device__ inline float fresult(float vacc, int vsum) { float vs = (float)max(1, vsum); return 1.0f - vacc / (vs*vs); } }; #if __CUDA_ARCH__ >= 300 __device__ inline void accumup2(int &cnt, float &update) { #pragma unroll for (int h = 1; h < 32; h = h + h) { float tmpx = __shfl_up(update, h); int tmp = __shfl_up(cnt, h); if (threadIdx.x >=h) { update += tmpx; cnt += tmp; } } } __device__ inline void accumup3(int &cnt, float &update, float &updatet) { #pragma unroll for (int h = 1; h < 32; h = h + h) { float tmpx = __shfl_up(update, h); float tmpy = __shfl_up(updatet, h); int tmp = __shfl_up(cnt, h); if (threadIdx.x >=h) { update += tmpx; updatet += tmpy; cnt += tmp; } } } __device__ inline void accumdown3(int &cnt, float &update, float &updatet, int bound) { #pragma unroll for (int h = 1; h < 32; h = h + h) { float tmpx = __shfl_down(update, h); float tmpy = __shfl_down(updatet, h); int tmp = __shfl_down(cnt, h); if (threadIdx.x + h <= bound) { update += tmpx; updatet += tmpy; cnt += tmp; } } } __device__ inline void minup2(float &impty, int &ival) { #pragma unroll for (int h = 1; h < 32; h = h + h) { float tmpx = __shfl_up(impty, h); int tmp = __shfl_up(ival, h); if (threadIdx.x >= h && tmpx < impty) { impty = tmpx; ival = tmp; } } } __device__ inline void maxup2(int &v, int &indx) { #pragma unroll for (int h = 1; h < 32; h = h + h) { int tmpv = __shfl_up(v, h); int tmpi = __shfl_up(indx, h); if (threadIdx.x >= h && tmpv > v) { v = tmpv; indx = tmpi; } } } template<typename T> __global__ void __minImpuritya(long long *keys, int *counts, int *outv, int *outf, float *outg, int *outc, int *jc, int *fieldlens, int nnodes, int ncats, int nsamps) { __shared__ int catcnt[DBSIZE/2]; __shared__ int cattot[DBSIZE/2]; int tid = threadIdx.x + blockDim.x * threadIdx.y; if (tid < 6) { catcnt[tid] = fieldlens[tid]; } __syncthreads(); int vshift = catcnt[5]; int ishift = catcnt[4] + vshift; int cmask = (1 << catcnt[5]) - 1; int vmask = (1 << catcnt[4]) - 1; int imask = (1 << catcnt[3]) - 1; __syncthreads(); int i, j, k, jc0, jc1, jlast; long long key; int cold, ctot, ctt, ctotall, cnew, cnt, ival, icat, lastival, bestival, tmp, maxcnt, imaxcnt; float update, updatet, cacc, cact, caccall, impty, minimpty, lastimpty, tmpx; for (i = threadIdx.y + blockDim.y * blockIdx.x; i < nnodes*nsamps; i += blockDim.y * gridDim.x) { // Process a group with fixed itree, inode, and ifeat jc0 = jc[i]; // The range of indices for this group jc1 = jc[i+1]; __syncthreads(); // Clear the cat counts for this group for (j = tid; j < DBSIZE/2; j += blockDim.x * blockDim.y) { catcnt[j] = 0; cattot[j] = 0; } __syncthreads(); // First pass gets counts for each category and the (ci)log(ci) sum for this block ctot = 0; cacc = 0.0f; maxcnt = -1; imaxcnt = -1; for (j = jc0; j < jc1; j += blockDim.x) { if (j + threadIdx.x < jc1) { // Read a block of (32) keys and counts key = keys[j + threadIdx.x]; // Each (x) thread handles a different input cnt = counts[j + threadIdx.x]; icat = ((int)key) & cmask; // Extract the cat id and integer value } jlast = min(31, jc1 - j - 1); for (k = 0; k <= jlast; k++) { // Sequentially update counts so that each thread if (threadIdx.x == k) { // in this warp gets the old and new counts cold = cattot[icat + ncats * threadIdx.y]; // i.e. data for item k is in thread k cnew = cold + cnt; cattot[icat + ncats * threadIdx.y] = cnew; } } update = T::fupdate(cnew) - T::fupdate(cold); accumup2(cnt,update); ctot += cnt; // Now update the total c and total ci log ci sums cacc += update; ctot = __shfl(ctot, jlast); cacc = __shfl(cacc, jlast); if (cnew > maxcnt) { // Compute and distribute the max cnt maxcnt = cnew; imaxcnt = icat; } maxup2(maxcnt, imaxcnt); maxcnt = __shfl(maxcnt, jlast); imaxcnt = __shfl(imaxcnt, jlast); } __syncthreads(); // if (threadIdx.x == 0 && i < 32) printf("cuda %d %d %f\n", i, ctot, cacc); // Second pass to compute impurity at every input point caccall = cacc; // Save the total count and (ci)log(ci) sum cact = cacc; ctotall = ctot; ctot = 0; cacc = 0.0f; lastival = -1; lastimpty = 1e7f; minimpty = 1e7f; for (j = jc0; j < jc1; j += blockDim.x) { if (j + threadIdx.x < jc1) { // Read a block of (32) keys and counts key = keys[j + threadIdx.x]; // Each (x) thread handles a different input cnt = counts[j + threadIdx.x]; icat = ((int)key) & cmask; // Extract the cat id and integer value ival = ((int)(key >> vshift)) & vmask; } jlast = min(31, jc1 - j - 1); for (k = 0; k <= jlast; k++) { // Sequentially update counts so that each thread if (threadIdx.x == k) { // in this warp gets the old and new counts cold = catcnt[icat + ncats * threadIdx.y]; // i.e. data for item k is in thread k ctt = cattot[icat + ncats * threadIdx.y]; cnew = cold + cnt; catcnt[icat + ncats * threadIdx.y] = cnew; } } update = T::fupdate(cnew) - T::fupdate(cold); // Compute the impurity updates for this input updatet = T::fupdate(ctt-cnew) - T::fupdate(ctt-cold); accumup3(cnt, update, updatet); ctot += cnt; // Now update the total c and total ci log ci sums cacc += update; cact += updatet; impty = T::fresult(cacc, ctot) + T::fresult(cact, ctotall-ctot); // And the impurity for this input // if (i == 0) printf("cuda pos %d impty %f icat %d cnts %d %d cacc %f %d\n", j + threadIdx.x, impty, icat, cold, cnew, cacc, ctot); tmp = __shfl_up(ival, 1); // Need the last impurity and ival in order tmpx = __shfl_up(impty, 1); // to restrict the partition feature to a value boundary if (threadIdx.x > 0) { lastival = tmp; lastimpty = tmpx; } if (ival == lastival) lastimpty = 1e7f; // Eliminate values which are not at value boundaries if (lastimpty < minimpty) { minimpty = lastimpty; bestival = ival; } minup2(minimpty,bestival); minimpty = __shfl(minimpty, jlast); // Carefully copy the last active thread to all threads, needed outside this loop bestival = __shfl(bestival, jlast); ctot = __shfl(ctot, jlast); cacc = __shfl(cacc, jlast); cact = __shfl(cact, jlast); lastival = __shfl(ival, jlast); lastimpty = __shfl(impty, jlast); } if (threadIdx.x == 0) { outv[i] = bestival; // Output the best split feature value outf[i] = ((int)(key >> ishift)) & imask; // Save the feature index outg[i] = T::fresult(caccall, ctotall) - minimpty; // And the impurity gain outc[i] = imaxcnt; } } } template<typename T> __global__ void __minImpurityb(long long *keys, int *counts, int *outv, int *outf, float *outg, int *outc, int *jc, int *fieldlens, int nnodes, int ncats, int nsamps) { __shared__ int catcnt[DBSIZE]; __shared__ int cattot[DBSIZE/4]; __shared__ int stott[32]; __shared__ float sacct[32]; __shared__ int slastival[64]; __shared__ int sbestival[32]; __shared__ float sminimpty[32]; int tid = threadIdx.x + blockDim.x * threadIdx.y; if (tid < 6) { catcnt[tid] = fieldlens[tid]; } __syncthreads(); int vshift = catcnt[5]; int ishift = catcnt[4] + vshift; int cmask = (1 << catcnt[5]) - 1; int vmask = (1 << catcnt[4]) - 1; int imask = (1 << catcnt[3]) - 1; __syncthreads(); int i, j, k, h, jc0, jc1, ilast, jlast; long long key; int cold, tot, ctt, tott, cnew, cnt, ncnt, tcnt, ival, icat, lastival, bestival, tmp; float update, updatet, acc, acct, impty, minimpty; for (i = blockIdx.x; i < nnodes*nsamps; i += gridDim.x) { // Process a group with fixed itree, inode, and ifeat jc0 = jc[i]; // The range of indices for this group jc1 = jc[i+1]; __syncthreads(); // Clear the cat counts and totals for (j = threadIdx.x; j < ncats; j += blockDim.x) { catcnt[j + threadIdx.y * blockDim.x] = 0; if (threadIdx.y == 0) cattot[j] = 0; } if (threadIdx.y == 0) { sminimpty[threadIdx.x] = 1e7f; sbestival[threadIdx.x] = -1; } __syncthreads(); // First pass gets counts for each category and the (ci)log(ci) sum for this entire ifeat group for (j = jc0; j < jc1; j += blockDim.x * blockDim.x) { if (j + tid < jc1) { // Read a block of keys and counts key = keys[j + tid]; cnt = counts[j + tid]; icat = ((int)key) & cmask; // Extract the cat id atomicAdd(&cattot[icat + threadIdx.y * ncats], cnt); // Update count totals } } __syncthreads(); tott = 0; // Compute total count and (c)log(c) for the entire ifeat group acct = 0; if (threadIdx.y == 0) { for (k = 0; k < ncats; k += blockDim.x) { if (k + threadIdx.x < ncats) { tcnt = cattot[k + threadIdx.x]; update = T::fupdate(tcnt); } else { tcnt = 0; update = 0; } accumup2(tcnt,update); ilast = min(31, ncats - k - 1); tcnt = __shfl(tcnt, ilast); update = __shfl(update, ilast); tott += tcnt; acct += update; } stott[threadIdx.x] = tott; sacct[threadIdx.x] = acct; } tott = stott[threadIdx.x]; // if (tid == 0 && i < 32) printf("cuda %d %d %f\n", i, tott, acct); // Main loop, work on blocks of 1024 (ideally) for (j = jc0; j < jc1; j += blockDim.x * blockDim.x) { for (k = 0; k < ncats; k += blockDim.x) { // copy cumcounts from last row of last iteration to the first row tmp = catcnt[k + threadIdx.x + (blockDim.y -1) * ncats]; __syncthreads(); if (threadIdx.y == 0) { catcnt[k + threadIdx.x] = tmp; } else { catcnt[k + threadIdx.x + threadIdx.y * ncats] = 0; } __syncthreads(); } if (j + tid < jc1) { // Read a block of keys and counts key = keys[j + tid]; cnt = counts[j + tid]; icat = ((int)key) & cmask; // Extract the cat id and integer value; ival = ((int)(key >> vshift)) & vmask; atomicAdd(&catcnt[icat + threadIdx.y * ncats], cnt); // Update count totals } jlast = min(31, jc1 - j - threadIdx.y * 32 - 1); // Save the last value in this group if (threadIdx.x == jlast) { slastival[threadIdx.y + 1] = ival; } __syncthreads(); for (k = 0; k < ncats; k += blockDim.x) { // Form the cumsum along columns of catcnts for (h = 1; h < blockDim.y; h = h + h) { if (k + threadIdx.x < ncats && blockIdx.y + h < blockDim.y) { tmp = catcnt[k + threadIdx.x + ncats * threadIdx.y]; } __syncthreads(); if (k + threadIdx.x < ncats && blockIdx.y + h < blockDim.y) { catcnt[k + threadIdx.x + ncats * (threadIdx.y + h)] += tmp; } __syncthreads(); } } tot = 0; // Local to a yblock (row) of catcnts acc = 0.0f; acct = 0.0f; for (k = 0; k < ncats; k += blockDim.x) { // Now sum within a row (yblock) if (k + threadIdx.x < ncats) { cnt = catcnt[k + threadIdx.x + threadIdx.y * ncats]; update = T::fupdate(cnt); updatet = T::fupdate(cattot[k + threadIdx.x] - cnt); } else { cnt = 0; update = 0; updatet = 0; } accumup3(cnt,update,updatet); ilast = min(31, ncats - k - 1); update = __shfl(update, ilast); updatet = __shfl(updatet, ilast); cnt = __shfl(cnt, ilast); tot += cnt; acc += update; acct += updatet; } __syncthreads(); // OK, we have everything needed now to compute impurity for the rows in this yblock: // tot, acc, acct at the end of the block lastival = -1; minimpty = 1e7f; ncnt = -cnt; for (k = jlast; k >= 0; k--) { // Sequentially update counts so that each thread if (threadIdx.x == k) { // in this warp gets the old and new counts cold = catcnt[icat + ncats * threadIdx.y]; // i.e. data for item k is in thread k ctt = cattot[icat + ncats * threadIdx.y]; cnew = cold + ncnt; catcnt[icat + ncats * threadIdx.y] = cnew; } } update = T::fupdate(cnew) - T::fupdate(cold); updatet = T::fupdate(ctt - cnew) - T::fupdate(ctt - cold); accumdown3(ncnt,update,updatet,jlast); tot += cnt; // Now update the total c and total ci log ci sums acc += update; acct += updatet; impty = T::fresult(acc, tot) + T::fresult(acct, tott - tot); // And the impurity for this input tmp = __shfl_up(ival, 1); if (threadIdx.x > 0) { // Get the last ival to check for a boundary lastival = tmp; } else { lastival = slastival[threadIdx.y]; } __syncthreads(); if (tid == 0) { tmp = slastival[33]; slastival[0] = tmp; } __syncthreads(); if (ival == lastival) impty = 1e7f; // Eliminate values which are not at value boundaries if (impty < minimpty) { minimpty = impty; bestival = ival; } minup2(minimpty,bestival); minimpty = __shfl(minimpty, jlast); bestival = __shfl(bestival, jlast); if (threadIdx.x == 0) { sminimpty[threadIdx.y] = minimpty; sbestival[threadIdx.y] = bestival; } __syncthreads(); if (threadIdx.y == 0) { minimpty = sminimpty[threadIdx.x]; bestival = sbestival[threadIdx.x]; minup2(minimpty,bestival); minimpty = __shfl(minimpty, blockDim.y - 1); bestival = __shfl(bestival, blockDim.y - 1); sminimpty[threadIdx.x] = minimpty; sbestival[threadIdx.x] = bestival; } __syncthreads(); } if (tid == 0) { outv[i] = bestival; // Output the best split feature value outf[i] = (int)((key >> ishift) & imask); // Save the feature index // outg[i] = T::fresult(sacct[0], tott) - minimpty; // And the impurity gain outg[i] = T::fresult(sacct[0], tott); // And the impurity gain } __syncthreads(); } } #else template<class T> __global__ void __minImpuritya(long long *keys, int *counts, int *outv, int *outf, float *outg, int *outc, int *jc, int *fieldlens, int nnodes, int ncats, int nsamps) {} template<class T> __global__ void __minImpurityb(long long *keys, int *counts, int *outv, int *outf, float *outg, int *outc, int *jc, int *fieldlens, int nnodes, int ncats, int nsamps) {} #endif int minImpurity(long long *keys, int *counts, int *outv, int *outf, float *outg, int *outc, int *jc, int *fieldlens, int nnodes, int ncats, int nsamps, int impType) { // Note: its safe to round ncats up to a multiple of 32, since its only used to split shmem int ny = min(32, DBSIZE/ncats/2); dim3 tdim(32, ny, 1); int ng = min(64, nnodes*nsamps); if ((impType & 2) == 0) { if ((impType & 1) == 0) { hipLaunchKernelGGL(( __minImpuritya<entImpty>), dim3(ng),dim3(tdim), 0, 0, keys, counts, outv, outf, outg, outc, jc, fieldlens, nnodes, ncats, nsamps); } else { hipLaunchKernelGGL(( __minImpuritya<giniImpty>), dim3(ng),dim3(tdim), 0, 0, keys, counts, outv, outf, outg, outc, jc, fieldlens, nnodes, ncats, nsamps); } } else { if ((impType & 1) == 0) { hipLaunchKernelGGL(( __minImpurityb<entImpty>), dim3(ng),dim3(tdim), 0, 0, keys, counts, outv, outf, outg, outc, jc, fieldlens, nnodes, ncats, nsamps); } else { hipLaunchKernelGGL(( __minImpurityb<giniImpty>), dim3(ng),dim3(tdim), 0, 0, keys, counts, outv, outf, outg, outc, jc, fieldlens, nnodes, ncats, nsamps); } } fflush(stdout); hipDeviceSynchronize(); hipError_t err = hipGetLastError(); return err; } __global__ void __findBoundaries(long long *keys, int *jc, int n, int njc, int shift) { __shared__ int dbuff[1024]; int i, j, iv, lasti; int imin = ((int)(32 * ((((long long)n) * blockIdx.x) / (gridDim.x * 32)))); int imax = min(n, ((int)(32 * ((((long long)n) * (blockIdx.x + 1)) / (gridDim.x * 32) + 1)))); int tid = threadIdx.x + blockDim.x * threadIdx.y; if (tid == 0 && blockIdx.x == 0) { jc[0] = 0; } __syncthreads(); lasti = 0x7fffffff; for (i = imin; i <= imax; i += blockDim.x * blockDim.y) { iv = njc; if (i + tid < imax) { iv = (int)(keys[i + tid] >> shift); dbuff[tid] = iv; } __syncthreads(); if (i + tid < imax || i + tid == n) { if (tid > 0) lasti = dbuff[tid - 1]; if (iv > lasti) { for (j = lasti+1; j <= iv; j++) { jc[j] = i + tid; } } if (tid == 0) { lasti = dbuff[blockDim.x * blockDim.y - 1]; } } __syncthreads(); } } int findBoundaries(long long *keys, int *jc, int n, int njc, int shift) { int ny = min(32, 1 + (n-1)/32); dim3 tdim(32, ny, 1); int ng = min(64, 1+n/32/ny); hipLaunchKernelGGL(( __findBoundaries), dim3(ng),dim3(tdim), 0, 0, keys, jc, n, njc, shift); hipDeviceSynchronize(); hipError_t err = hipGetLastError(); return err; } template<typename T> __global__ void __mergeIndsP1(T *keys, int *cspine, T *ispine, T *vspine, int n) { __shared__ T dbuff[1024]; int i, j, itodo, doit, total; T thisval, lastval, endval, tmp; int tid = threadIdx.x + threadIdx.y * blockDim.x; int imin = (int)(((long long)n) * blockIdx.x / gridDim.x); int imax = (int)(((long long)n) * (blockIdx.x + 1) / gridDim.x); total = 0; if (tid == 0) { lastval = keys[imin]; ispine[blockIdx.x] = lastval; } for (i = imin; i < imax; i += blockDim.x * blockDim.y) { itodo = min(blockDim.x * blockDim.y, imax - i); __syncthreads(); if (i + tid < imax) { thisval = keys[i + tid]; dbuff[tid] = thisval; } __syncthreads(); if (tid > 0 && i + tid < imax) lastval = dbuff[tid - 1]; if (tid == 0) endval = dbuff[itodo-1]; __syncthreads(); if (i + tid < imax) { dbuff[tid] = (thisval == lastval) ? 0 : 1; } __syncthreads(); for (j = 1; j < itodo; j = j + j) { doit = tid + j < itodo && (tid & ((j + j)-1)) == 0; if (doit) { tmp = dbuff[tid] + dbuff[tid + j]; } __syncthreads(); if (doit) { dbuff[tid] = tmp; } __syncthreads(); } if (tid == 0) { total += dbuff[0]; lastval = endval; } __syncthreads(); } if (tid == 0) { cspine[blockIdx.x] = total; vspine[blockIdx.x] = endval; } } template<typename T> __global__ void __fixSpine(int *cspine, T *ispine, T *vspine, int n) { __shared__ int counts[1024]; int tid = threadIdx.x + threadIdx.y * blockDim.x; int i, tmp; if (tid < n) { counts[tid] = cspine[tid]; } __syncthreads(); if (tid < n - 1) { if (ispine[tid + 1] != vspine[tid]) { counts[tid] += 1; } } __syncthreads(); for (i = 1; i < n; i = i << 1) { if (tid >= i) { tmp = counts[tid - i]; } __syncthreads(); if (tid >= i) { counts[tid] += tmp; } __syncthreads(); } if (tid == 0) { counts[n-1] += 1; } __syncthreads(); if (tid < n) { cspine[tid] = counts[tid]; } } template<typename T> __global__ void __mergeIndsP2(T *keys, T *okeys, int *counts, int *cspine, int n) { __shared__ T dbuff[1024]; __shared__ T obuff[2048]; __shared__ int ocnts[2048]; __shared__ int icnts[1024]; int i, j, itodo, doit, lastcnt, lastocnt, obase, odone, total, coff; T thisval, lastval, tmp; int tid = threadIdx.x + threadIdx.y * blockDim.x; int imin = (int)(((long long)n) * blockIdx.x / gridDim.x); int imax = (int)(((long long)n) * (blockIdx.x + 1) / gridDim.x); int nbthreads = blockDim.x * blockDim.y; if (blockIdx.x == 0) { odone = 0; } else { odone = cspine[blockIdx.x - 1]; } obase = 0; lastocnt = imin; if (tid == 0) { lastval = keys[imin]; } for (i = imin; i < imax; i += nbthreads) { itodo = min(nbthreads, imax - i); __syncthreads(); if (i + tid < imax) { // Copy a block of input data into dbuff thisval = keys[i + tid]; dbuff[tid] = thisval; } __syncthreads(); if (tid > 0 && i + tid < imax) lastval = dbuff[tid - 1]; __syncthreads(); if (i + tid < imax) { icnts[tid] = (thisval == lastval) ? 0 : 1; // Bit that indicates a change of index } __syncthreads(); for (j = 1; j < itodo; j = j << 1) { // Cumsum of these bits = where to put key doit = tid + j < itodo; if (doit) { tmp = icnts[tid] + icnts[tid + j]; } __syncthreads(); if (doit) { icnts[tid + j] = tmp; } __syncthreads(); } total = icnts[itodo-1]; __syncthreads(); if (i + tid < imax && thisval != lastval) { // and save the key/counts there in buffer memory if (tid > 0) { lastcnt = icnts[tid-1]; } else { lastcnt = 0; } obuff[obase + lastcnt] = lastval; ocnts[obase + lastcnt] = i + tid; } __syncthreads(); obase += total; if (obase >= nbthreads) { // Buffer full so flush it okeys[odone+tid] = obuff[tid]; if (tid > 0) lastocnt = ocnts[tid-1]; coff = ocnts[tid] - lastocnt; atomicAdd(&counts[odone+tid], coff); lastocnt = ocnts[nbthreads-1]; odone += nbthreads; } __syncthreads(); if (obase >= nbthreads) { // Copy top to bottom of buffer obuff[tid] = obuff[tid+nbthreads]; ocnts[tid] = ocnts[tid+nbthreads]; obase -= nbthreads; } __syncthreads(); } if (tid == itodo-1) { obuff[obase] = thisval; ocnts[obase] = i - nbthreads + tid + 1; } __syncthreads(); if (tid <= obase) { // Flush out anything that's left okeys[odone+tid] = obuff[tid]; if (tid > 0) lastocnt = ocnts[tid-1]; coff = ocnts[tid] - lastocnt; atomicAdd(&counts[odone+tid], coff); } } // // Accepts an array of int64 keys which should be sorted. Outputs an array okeys with unique copies of each key, // with corresponding counts in the *counts* array. cspine is a working storage array in GPUmem which should be // passed in. The size of cspine should be at least nb32 * 32 bytes with nb32 as below (maximum 2048 bytes). // Returns the length of the output in cspine[0]. // int mergeInds(long long *keys, long long *okeys, int *counts, int n, int *cspine) { hipError_t err; int nthreads = min(n, 1024); int nt32 = 32*(1 + (nthreads-1)/32); int nblocks = min(1 + (n-1)/nthreads, 64); int nb32 = 32*(1+(nblocks - 1)/32); long long *ispine = (long long *)&cspine[2*nb32]; long long *vspine = (long long *)&cspine[4*nb32]; hipLaunchKernelGGL(( __mergeIndsP1<long long>), dim3(nblocks),dim3(nt32), 0, 0, keys, cspine, ispine, vspine, n); hipDeviceSynchronize(); err = hipGetLastError(); if (err == 0) { hipLaunchKernelGGL(( __fixSpine<long long>), dim3(1),dim3(nblocks), 0, 0, cspine, ispine, vspine, nblocks); hipDeviceSynchronize(); err = hipGetLastError(); } if (err == 0) { hipLaunchKernelGGL(( __mergeIndsP2<long long>), dim3(nblocks),dim3(nt32), 0, 0, keys, okeys, counts, cspine, n); hipDeviceSynchronize(); err = hipGetLastError(); } if (err == 0) { hipMemcpy(cspine, &cspine[nblocks-1], 4, hipMemcpyDeviceToDevice); hipDeviceSynchronize(); err = hipGetLastError(); } return err; } // // Support function for mergeInds. Returns the length of the output arrays in cspine[0]. // cspine is a working storage array in GPUmem which should be passed in. // The size of cspine should be at least nb32 * 32 bytes with nb32 as below (maximum 2048 bytes). // int getMergeIndsLen(long long *keys, int n, int *cspine) { hipError_t err; int nthreads = min(n, 1024); int nt32 = 32*(1 + (nthreads-1)/32); int nblocks = min(1 + (n-1)/nthreads, 64); int nb32 = 32*(1+(nblocks - 1)/32); long long *ispine = (long long *)&cspine[2*nb32]; long long *vspine = (long long *)&cspine[4*nb32]; hipLaunchKernelGGL(( __mergeIndsP1<long long>), dim3(nblocks),dim3(nt32), 0, 0, keys, cspine, ispine, vspine, n); hipDeviceSynchronize(); err = hipGetLastError(); if (err == 0) { hipLaunchKernelGGL(( __fixSpine<long long>), dim3(1),dim3(nblocks), 0, 0, cspine, ispine, vspine, nblocks); hipDeviceSynchronize(); err = hipGetLastError(); } if (err == 0) { hipMemcpy(cspine, &cspine[nblocks-1], 4, hipMemcpyDeviceToDevice); hipDeviceSynchronize(); err = hipGetLastError(); } return err; }
0cb324177518b08b35c50ae0f9ab4a0a12ab3b23.cu
#include <cuda_runtime.h> #include <curand_kernel.h> #include <stdio.h> #include <MatKernel.hpp> static const unsigned int c1 = 0xcc9e2d51; static const unsigned int c2 = 0x1b873593; static const unsigned int r1 = 15; static const unsigned int r2 = 13; static const unsigned int m = 5; static const unsigned int n = 0xe6546b64; __device__ inline unsigned int h1(unsigned int k, unsigned int hash) { k *= c1; k = (k << r1) | (k >> (32-r1)); k *= c2; hash ^= k; hash = ((hash << r2) | (hash >> (32-r2)) * m) + n; return hash; } __device__ inline unsigned int mmhash(unsigned int v1, unsigned int v2, unsigned int v3, unsigned int mod, unsigned int seed) { unsigned int hash = seed; hash = h1(v1, hash); hash = h1(v2, hash); hash = h1(v3, hash); hash ^= (hash >> 16); hash *= 0x85ebca6b; hash ^= (hash >> 13); hash *= 0xc2b2ae35; hash ^= (hash >> 16); return (hash % mod); } #define DBSIZE (8*1024) __global__ void __treePack(float *fdata, int *treenodes, int *icats, long long *out, int *fieldlens, int nrows, int ncols, int ntrees, int nsamps, int seed) { __shared__ float fbuff[DBSIZE]; __shared__ int fl[32]; int i, j, ic, ival; int tid = threadIdx.x + blockDim.x * threadIdx.y; if (tid < 6) { fl[tid] = fieldlens[tid]; } __syncthreads(); int vshift = fl[5]; int ishift = fl[4] + vshift; int jshift = fl[3] + ishift; int nshift = fl[2] + jshift; int tshift = fl[1] + nshift; int cmask = (1 << fl[5]) - 1; int vmask = (1 << fl[4]) - 1; int imask = (1 << fl[3]) - 1; int jmask = (1 << fl[2]) - 1; int nmask = (1 << fl[1]) - 1; int tmask = (1 << fl[0]) - 1; int nc = (DBSIZE / nrows); int itree = threadIdx.y; int jfeat = threadIdx.x; for (i = nc * blockIdx.x; i < ncols; i += nc * gridDim.x) { int ctodo = min(nc, ncols - i); for (j = tid; j < nrows * ctodo; j += blockDim.x*blockDim.y) { fbuff[j] = fdata[j + i * nrows]; } __syncthreads(); for (j = i; j < i + ctodo; j++) { // j is the column index ic = icats[j]; for (itree = threadIdx.y; itree < ntrees; itree += blockDim.y) { if (jfeat < nsamps) { int inode = treenodes[itree + j * ntrees]; int ifeat = mmhash(itree, inode, jfeat, nrows, seed); ival = (int)(fbuff[ifeat + (j - i) * nrows]); long long hdr = (((long long)(tmask & itree)) << tshift) | (((long long)(nmask & inode)) << nshift) | (((long long)(jmask & jfeat)) << jshift) | (((long long)(imask & ifeat)) << ishift) | (((long long)(vmask & ival)) << vshift) | ((long long)(ic & cmask)); out[jfeat + nsamps * (itree + ntrees * j)] = hdr; } } } __syncthreads(); } } int treePack(float *fdata, int *treenodes, int *icats, long long *out, int *fieldlens, int nrows, int ncols, int ntrees, int nsamps, int seed) { int ntx = 32 * (1 + (nsamps - 1)/32); int nty = min(1024 / ntx, ntrees); dim3 bdim(ntx, nty, 1); int nb = min(32, 1 + (ncols-1)/32); __treePack<<<nb,bdim>>>(fdata, treenodes, icats, out, fieldlens, nrows, ncols, ntrees, nsamps, seed); cudaDeviceSynchronize(); cudaError_t err = cudaGetLastError(); return err; } class entImpty { public: static __device__ inline float fupdate(int v) { return (float)v * logf((float)max(1, v)); } static __device__ inline float fresult(float vacc, int vsum) { float vs = (float)max(1, vsum); return logf(vs) - vacc / vs; } }; class giniImpty { public: static __device__ inline float fupdate(int v) { return (float)v * (float)v; } static __device__ inline float fresult(float vacc, int vsum) { float vs = (float)max(1, vsum); return 1.0f - vacc / (vs*vs); } }; #if __CUDA_ARCH__ >= 300 __device__ inline void accumup2(int &cnt, float &update) { #pragma unroll for (int h = 1; h < 32; h = h + h) { float tmpx = __shfl_up(update, h); int tmp = __shfl_up(cnt, h); if (threadIdx.x >=h) { update += tmpx; cnt += tmp; } } } __device__ inline void accumup3(int &cnt, float &update, float &updatet) { #pragma unroll for (int h = 1; h < 32; h = h + h) { float tmpx = __shfl_up(update, h); float tmpy = __shfl_up(updatet, h); int tmp = __shfl_up(cnt, h); if (threadIdx.x >=h) { update += tmpx; updatet += tmpy; cnt += tmp; } } } __device__ inline void accumdown3(int &cnt, float &update, float &updatet, int bound) { #pragma unroll for (int h = 1; h < 32; h = h + h) { float tmpx = __shfl_down(update, h); float tmpy = __shfl_down(updatet, h); int tmp = __shfl_down(cnt, h); if (threadIdx.x + h <= bound) { update += tmpx; updatet += tmpy; cnt += tmp; } } } __device__ inline void minup2(float &impty, int &ival) { #pragma unroll for (int h = 1; h < 32; h = h + h) { float tmpx = __shfl_up(impty, h); int tmp = __shfl_up(ival, h); if (threadIdx.x >= h && tmpx < impty) { impty = tmpx; ival = tmp; } } } __device__ inline void maxup2(int &v, int &indx) { #pragma unroll for (int h = 1; h < 32; h = h + h) { int tmpv = __shfl_up(v, h); int tmpi = __shfl_up(indx, h); if (threadIdx.x >= h && tmpv > v) { v = tmpv; indx = tmpi; } } } template<typename T> __global__ void __minImpuritya(long long *keys, int *counts, int *outv, int *outf, float *outg, int *outc, int *jc, int *fieldlens, int nnodes, int ncats, int nsamps) { __shared__ int catcnt[DBSIZE/2]; __shared__ int cattot[DBSIZE/2]; int tid = threadIdx.x + blockDim.x * threadIdx.y; if (tid < 6) { catcnt[tid] = fieldlens[tid]; } __syncthreads(); int vshift = catcnt[5]; int ishift = catcnt[4] + vshift; int cmask = (1 << catcnt[5]) - 1; int vmask = (1 << catcnt[4]) - 1; int imask = (1 << catcnt[3]) - 1; __syncthreads(); int i, j, k, jc0, jc1, jlast; long long key; int cold, ctot, ctt, ctotall, cnew, cnt, ival, icat, lastival, bestival, tmp, maxcnt, imaxcnt; float update, updatet, cacc, cact, caccall, impty, minimpty, lastimpty, tmpx; for (i = threadIdx.y + blockDim.y * blockIdx.x; i < nnodes*nsamps; i += blockDim.y * gridDim.x) { // Process a group with fixed itree, inode, and ifeat jc0 = jc[i]; // The range of indices for this group jc1 = jc[i+1]; __syncthreads(); // Clear the cat counts for this group for (j = tid; j < DBSIZE/2; j += blockDim.x * blockDim.y) { catcnt[j] = 0; cattot[j] = 0; } __syncthreads(); // First pass gets counts for each category and the (ci)log(ci) sum for this block ctot = 0; cacc = 0.0f; maxcnt = -1; imaxcnt = -1; for (j = jc0; j < jc1; j += blockDim.x) { if (j + threadIdx.x < jc1) { // Read a block of (32) keys and counts key = keys[j + threadIdx.x]; // Each (x) thread handles a different input cnt = counts[j + threadIdx.x]; icat = ((int)key) & cmask; // Extract the cat id and integer value } jlast = min(31, jc1 - j - 1); for (k = 0; k <= jlast; k++) { // Sequentially update counts so that each thread if (threadIdx.x == k) { // in this warp gets the old and new counts cold = cattot[icat + ncats * threadIdx.y]; // i.e. data for item k is in thread k cnew = cold + cnt; cattot[icat + ncats * threadIdx.y] = cnew; } } update = T::fupdate(cnew) - T::fupdate(cold); accumup2(cnt,update); ctot += cnt; // Now update the total c and total ci log ci sums cacc += update; ctot = __shfl(ctot, jlast); cacc = __shfl(cacc, jlast); if (cnew > maxcnt) { // Compute and distribute the max cnt maxcnt = cnew; imaxcnt = icat; } maxup2(maxcnt, imaxcnt); maxcnt = __shfl(maxcnt, jlast); imaxcnt = __shfl(imaxcnt, jlast); } __syncthreads(); // if (threadIdx.x == 0 && i < 32) printf("cuda %d %d %f\n", i, ctot, cacc); // Second pass to compute impurity at every input point caccall = cacc; // Save the total count and (ci)log(ci) sum cact = cacc; ctotall = ctot; ctot = 0; cacc = 0.0f; lastival = -1; lastimpty = 1e7f; minimpty = 1e7f; for (j = jc0; j < jc1; j += blockDim.x) { if (j + threadIdx.x < jc1) { // Read a block of (32) keys and counts key = keys[j + threadIdx.x]; // Each (x) thread handles a different input cnt = counts[j + threadIdx.x]; icat = ((int)key) & cmask; // Extract the cat id and integer value ival = ((int)(key >> vshift)) & vmask; } jlast = min(31, jc1 - j - 1); for (k = 0; k <= jlast; k++) { // Sequentially update counts so that each thread if (threadIdx.x == k) { // in this warp gets the old and new counts cold = catcnt[icat + ncats * threadIdx.y]; // i.e. data for item k is in thread k ctt = cattot[icat + ncats * threadIdx.y]; cnew = cold + cnt; catcnt[icat + ncats * threadIdx.y] = cnew; } } update = T::fupdate(cnew) - T::fupdate(cold); // Compute the impurity updates for this input updatet = T::fupdate(ctt-cnew) - T::fupdate(ctt-cold); accumup3(cnt, update, updatet); ctot += cnt; // Now update the total c and total ci log ci sums cacc += update; cact += updatet; impty = T::fresult(cacc, ctot) + T::fresult(cact, ctotall-ctot); // And the impurity for this input // if (i == 0) printf("cuda pos %d impty %f icat %d cnts %d %d cacc %f %d\n", j + threadIdx.x, impty, icat, cold, cnew, cacc, ctot); tmp = __shfl_up(ival, 1); // Need the last impurity and ival in order tmpx = __shfl_up(impty, 1); // to restrict the partition feature to a value boundary if (threadIdx.x > 0) { lastival = tmp; lastimpty = tmpx; } if (ival == lastival) lastimpty = 1e7f; // Eliminate values which are not at value boundaries if (lastimpty < minimpty) { minimpty = lastimpty; bestival = ival; } minup2(minimpty,bestival); minimpty = __shfl(minimpty, jlast); // Carefully copy the last active thread to all threads, needed outside this loop bestival = __shfl(bestival, jlast); ctot = __shfl(ctot, jlast); cacc = __shfl(cacc, jlast); cact = __shfl(cact, jlast); lastival = __shfl(ival, jlast); lastimpty = __shfl(impty, jlast); } if (threadIdx.x == 0) { outv[i] = bestival; // Output the best split feature value outf[i] = ((int)(key >> ishift)) & imask; // Save the feature index outg[i] = T::fresult(caccall, ctotall) - minimpty; // And the impurity gain outc[i] = imaxcnt; } } } template<typename T> __global__ void __minImpurityb(long long *keys, int *counts, int *outv, int *outf, float *outg, int *outc, int *jc, int *fieldlens, int nnodes, int ncats, int nsamps) { __shared__ int catcnt[DBSIZE]; __shared__ int cattot[DBSIZE/4]; __shared__ int stott[32]; __shared__ float sacct[32]; __shared__ int slastival[64]; __shared__ int sbestival[32]; __shared__ float sminimpty[32]; int tid = threadIdx.x + blockDim.x * threadIdx.y; if (tid < 6) { catcnt[tid] = fieldlens[tid]; } __syncthreads(); int vshift = catcnt[5]; int ishift = catcnt[4] + vshift; int cmask = (1 << catcnt[5]) - 1; int vmask = (1 << catcnt[4]) - 1; int imask = (1 << catcnt[3]) - 1; __syncthreads(); int i, j, k, h, jc0, jc1, ilast, jlast; long long key; int cold, tot, ctt, tott, cnew, cnt, ncnt, tcnt, ival, icat, lastival, bestival, tmp; float update, updatet, acc, acct, impty, minimpty; for (i = blockIdx.x; i < nnodes*nsamps; i += gridDim.x) { // Process a group with fixed itree, inode, and ifeat jc0 = jc[i]; // The range of indices for this group jc1 = jc[i+1]; __syncthreads(); // Clear the cat counts and totals for (j = threadIdx.x; j < ncats; j += blockDim.x) { catcnt[j + threadIdx.y * blockDim.x] = 0; if (threadIdx.y == 0) cattot[j] = 0; } if (threadIdx.y == 0) { sminimpty[threadIdx.x] = 1e7f; sbestival[threadIdx.x] = -1; } __syncthreads(); // First pass gets counts for each category and the (ci)log(ci) sum for this entire ifeat group for (j = jc0; j < jc1; j += blockDim.x * blockDim.x) { if (j + tid < jc1) { // Read a block of keys and counts key = keys[j + tid]; cnt = counts[j + tid]; icat = ((int)key) & cmask; // Extract the cat id atomicAdd(&cattot[icat + threadIdx.y * ncats], cnt); // Update count totals } } __syncthreads(); tott = 0; // Compute total count and (c)log(c) for the entire ifeat group acct = 0; if (threadIdx.y == 0) { for (k = 0; k < ncats; k += blockDim.x) { if (k + threadIdx.x < ncats) { tcnt = cattot[k + threadIdx.x]; update = T::fupdate(tcnt); } else { tcnt = 0; update = 0; } accumup2(tcnt,update); ilast = min(31, ncats - k - 1); tcnt = __shfl(tcnt, ilast); update = __shfl(update, ilast); tott += tcnt; acct += update; } stott[threadIdx.x] = tott; sacct[threadIdx.x] = acct; } tott = stott[threadIdx.x]; // if (tid == 0 && i < 32) printf("cuda %d %d %f\n", i, tott, acct); // Main loop, work on blocks of 1024 (ideally) for (j = jc0; j < jc1; j += blockDim.x * blockDim.x) { for (k = 0; k < ncats; k += blockDim.x) { // copy cumcounts from last row of last iteration to the first row tmp = catcnt[k + threadIdx.x + (blockDim.y -1) * ncats]; __syncthreads(); if (threadIdx.y == 0) { catcnt[k + threadIdx.x] = tmp; } else { catcnt[k + threadIdx.x + threadIdx.y * ncats] = 0; } __syncthreads(); } if (j + tid < jc1) { // Read a block of keys and counts key = keys[j + tid]; cnt = counts[j + tid]; icat = ((int)key) & cmask; // Extract the cat id and integer value; ival = ((int)(key >> vshift)) & vmask; atomicAdd(&catcnt[icat + threadIdx.y * ncats], cnt); // Update count totals } jlast = min(31, jc1 - j - threadIdx.y * 32 - 1); // Save the last value in this group if (threadIdx.x == jlast) { slastival[threadIdx.y + 1] = ival; } __syncthreads(); for (k = 0; k < ncats; k += blockDim.x) { // Form the cumsum along columns of catcnts for (h = 1; h < blockDim.y; h = h + h) { if (k + threadIdx.x < ncats && blockIdx.y + h < blockDim.y) { tmp = catcnt[k + threadIdx.x + ncats * threadIdx.y]; } __syncthreads(); if (k + threadIdx.x < ncats && blockIdx.y + h < blockDim.y) { catcnt[k + threadIdx.x + ncats * (threadIdx.y + h)] += tmp; } __syncthreads(); } } tot = 0; // Local to a yblock (row) of catcnts acc = 0.0f; acct = 0.0f; for (k = 0; k < ncats; k += blockDim.x) { // Now sum within a row (yblock) if (k + threadIdx.x < ncats) { cnt = catcnt[k + threadIdx.x + threadIdx.y * ncats]; update = T::fupdate(cnt); updatet = T::fupdate(cattot[k + threadIdx.x] - cnt); } else { cnt = 0; update = 0; updatet = 0; } accumup3(cnt,update,updatet); ilast = min(31, ncats - k - 1); update = __shfl(update, ilast); updatet = __shfl(updatet, ilast); cnt = __shfl(cnt, ilast); tot += cnt; acc += update; acct += updatet; } __syncthreads(); // OK, we have everything needed now to compute impurity for the rows in this yblock: // tot, acc, acct at the end of the block lastival = -1; minimpty = 1e7f; ncnt = -cnt; for (k = jlast; k >= 0; k--) { // Sequentially update counts so that each thread if (threadIdx.x == k) { // in this warp gets the old and new counts cold = catcnt[icat + ncats * threadIdx.y]; // i.e. data for item k is in thread k ctt = cattot[icat + ncats * threadIdx.y]; cnew = cold + ncnt; catcnt[icat + ncats * threadIdx.y] = cnew; } } update = T::fupdate(cnew) - T::fupdate(cold); updatet = T::fupdate(ctt - cnew) - T::fupdate(ctt - cold); accumdown3(ncnt,update,updatet,jlast); tot += cnt; // Now update the total c and total ci log ci sums acc += update; acct += updatet; impty = T::fresult(acc, tot) + T::fresult(acct, tott - tot); // And the impurity for this input tmp = __shfl_up(ival, 1); if (threadIdx.x > 0) { // Get the last ival to check for a boundary lastival = tmp; } else { lastival = slastival[threadIdx.y]; } __syncthreads(); if (tid == 0) { tmp = slastival[33]; slastival[0] = tmp; } __syncthreads(); if (ival == lastival) impty = 1e7f; // Eliminate values which are not at value boundaries if (impty < minimpty) { minimpty = impty; bestival = ival; } minup2(minimpty,bestival); minimpty = __shfl(minimpty, jlast); bestival = __shfl(bestival, jlast); if (threadIdx.x == 0) { sminimpty[threadIdx.y] = minimpty; sbestival[threadIdx.y] = bestival; } __syncthreads(); if (threadIdx.y == 0) { minimpty = sminimpty[threadIdx.x]; bestival = sbestival[threadIdx.x]; minup2(minimpty,bestival); minimpty = __shfl(minimpty, blockDim.y - 1); bestival = __shfl(bestival, blockDim.y - 1); sminimpty[threadIdx.x] = minimpty; sbestival[threadIdx.x] = bestival; } __syncthreads(); } if (tid == 0) { outv[i] = bestival; // Output the best split feature value outf[i] = (int)((key >> ishift) & imask); // Save the feature index // outg[i] = T::fresult(sacct[0], tott) - minimpty; // And the impurity gain outg[i] = T::fresult(sacct[0], tott); // And the impurity gain } __syncthreads(); } } #else template<class T> __global__ void __minImpuritya(long long *keys, int *counts, int *outv, int *outf, float *outg, int *outc, int *jc, int *fieldlens, int nnodes, int ncats, int nsamps) {} template<class T> __global__ void __minImpurityb(long long *keys, int *counts, int *outv, int *outf, float *outg, int *outc, int *jc, int *fieldlens, int nnodes, int ncats, int nsamps) {} #endif int minImpurity(long long *keys, int *counts, int *outv, int *outf, float *outg, int *outc, int *jc, int *fieldlens, int nnodes, int ncats, int nsamps, int impType) { // Note: its safe to round ncats up to a multiple of 32, since its only used to split shmem int ny = min(32, DBSIZE/ncats/2); dim3 tdim(32, ny, 1); int ng = min(64, nnodes*nsamps); if ((impType & 2) == 0) { if ((impType & 1) == 0) { __minImpuritya<entImpty><<<ng,tdim>>>(keys, counts, outv, outf, outg, outc, jc, fieldlens, nnodes, ncats, nsamps); } else { __minImpuritya<giniImpty><<<ng,tdim>>>(keys, counts, outv, outf, outg, outc, jc, fieldlens, nnodes, ncats, nsamps); } } else { if ((impType & 1) == 0) { __minImpurityb<entImpty><<<ng,tdim>>>(keys, counts, outv, outf, outg, outc, jc, fieldlens, nnodes, ncats, nsamps); } else { __minImpurityb<giniImpty><<<ng,tdim>>>(keys, counts, outv, outf, outg, outc, jc, fieldlens, nnodes, ncats, nsamps); } } fflush(stdout); cudaDeviceSynchronize(); cudaError_t err = cudaGetLastError(); return err; } __global__ void __findBoundaries(long long *keys, int *jc, int n, int njc, int shift) { __shared__ int dbuff[1024]; int i, j, iv, lasti; int imin = ((int)(32 * ((((long long)n) * blockIdx.x) / (gridDim.x * 32)))); int imax = min(n, ((int)(32 * ((((long long)n) * (blockIdx.x + 1)) / (gridDim.x * 32) + 1)))); int tid = threadIdx.x + blockDim.x * threadIdx.y; if (tid == 0 && blockIdx.x == 0) { jc[0] = 0; } __syncthreads(); lasti = 0x7fffffff; for (i = imin; i <= imax; i += blockDim.x * blockDim.y) { iv = njc; if (i + tid < imax) { iv = (int)(keys[i + tid] >> shift); dbuff[tid] = iv; } __syncthreads(); if (i + tid < imax || i + tid == n) { if (tid > 0) lasti = dbuff[tid - 1]; if (iv > lasti) { for (j = lasti+1; j <= iv; j++) { jc[j] = i + tid; } } if (tid == 0) { lasti = dbuff[blockDim.x * blockDim.y - 1]; } } __syncthreads(); } } int findBoundaries(long long *keys, int *jc, int n, int njc, int shift) { int ny = min(32, 1 + (n-1)/32); dim3 tdim(32, ny, 1); int ng = min(64, 1+n/32/ny); __findBoundaries<<<ng,tdim>>>(keys, jc, n, njc, shift); cudaDeviceSynchronize(); cudaError_t err = cudaGetLastError(); return err; } template<typename T> __global__ void __mergeIndsP1(T *keys, int *cspine, T *ispine, T *vspine, int n) { __shared__ T dbuff[1024]; int i, j, itodo, doit, total; T thisval, lastval, endval, tmp; int tid = threadIdx.x + threadIdx.y * blockDim.x; int imin = (int)(((long long)n) * blockIdx.x / gridDim.x); int imax = (int)(((long long)n) * (blockIdx.x + 1) / gridDim.x); total = 0; if (tid == 0) { lastval = keys[imin]; ispine[blockIdx.x] = lastval; } for (i = imin; i < imax; i += blockDim.x * blockDim.y) { itodo = min(blockDim.x * blockDim.y, imax - i); __syncthreads(); if (i + tid < imax) { thisval = keys[i + tid]; dbuff[tid] = thisval; } __syncthreads(); if (tid > 0 && i + tid < imax) lastval = dbuff[tid - 1]; if (tid == 0) endval = dbuff[itodo-1]; __syncthreads(); if (i + tid < imax) { dbuff[tid] = (thisval == lastval) ? 0 : 1; } __syncthreads(); for (j = 1; j < itodo; j = j + j) { doit = tid + j < itodo && (tid & ((j + j)-1)) == 0; if (doit) { tmp = dbuff[tid] + dbuff[tid + j]; } __syncthreads(); if (doit) { dbuff[tid] = tmp; } __syncthreads(); } if (tid == 0) { total += dbuff[0]; lastval = endval; } __syncthreads(); } if (tid == 0) { cspine[blockIdx.x] = total; vspine[blockIdx.x] = endval; } } template<typename T> __global__ void __fixSpine(int *cspine, T *ispine, T *vspine, int n) { __shared__ int counts[1024]; int tid = threadIdx.x + threadIdx.y * blockDim.x; int i, tmp; if (tid < n) { counts[tid] = cspine[tid]; } __syncthreads(); if (tid < n - 1) { if (ispine[tid + 1] != vspine[tid]) { counts[tid] += 1; } } __syncthreads(); for (i = 1; i < n; i = i << 1) { if (tid >= i) { tmp = counts[tid - i]; } __syncthreads(); if (tid >= i) { counts[tid] += tmp; } __syncthreads(); } if (tid == 0) { counts[n-1] += 1; } __syncthreads(); if (tid < n) { cspine[tid] = counts[tid]; } } template<typename T> __global__ void __mergeIndsP2(T *keys, T *okeys, int *counts, int *cspine, int n) { __shared__ T dbuff[1024]; __shared__ T obuff[2048]; __shared__ int ocnts[2048]; __shared__ int icnts[1024]; int i, j, itodo, doit, lastcnt, lastocnt, obase, odone, total, coff; T thisval, lastval, tmp; int tid = threadIdx.x + threadIdx.y * blockDim.x; int imin = (int)(((long long)n) * blockIdx.x / gridDim.x); int imax = (int)(((long long)n) * (blockIdx.x + 1) / gridDim.x); int nbthreads = blockDim.x * blockDim.y; if (blockIdx.x == 0) { odone = 0; } else { odone = cspine[blockIdx.x - 1]; } obase = 0; lastocnt = imin; if (tid == 0) { lastval = keys[imin]; } for (i = imin; i < imax; i += nbthreads) { itodo = min(nbthreads, imax - i); __syncthreads(); if (i + tid < imax) { // Copy a block of input data into dbuff thisval = keys[i + tid]; dbuff[tid] = thisval; } __syncthreads(); if (tid > 0 && i + tid < imax) lastval = dbuff[tid - 1]; __syncthreads(); if (i + tid < imax) { icnts[tid] = (thisval == lastval) ? 0 : 1; // Bit that indicates a change of index } __syncthreads(); for (j = 1; j < itodo; j = j << 1) { // Cumsum of these bits = where to put key doit = tid + j < itodo; if (doit) { tmp = icnts[tid] + icnts[tid + j]; } __syncthreads(); if (doit) { icnts[tid + j] = tmp; } __syncthreads(); } total = icnts[itodo-1]; __syncthreads(); if (i + tid < imax && thisval != lastval) { // and save the key/counts there in buffer memory if (tid > 0) { lastcnt = icnts[tid-1]; } else { lastcnt = 0; } obuff[obase + lastcnt] = lastval; ocnts[obase + lastcnt] = i + tid; } __syncthreads(); obase += total; if (obase >= nbthreads) { // Buffer full so flush it okeys[odone+tid] = obuff[tid]; if (tid > 0) lastocnt = ocnts[tid-1]; coff = ocnts[tid] - lastocnt; atomicAdd(&counts[odone+tid], coff); lastocnt = ocnts[nbthreads-1]; odone += nbthreads; } __syncthreads(); if (obase >= nbthreads) { // Copy top to bottom of buffer obuff[tid] = obuff[tid+nbthreads]; ocnts[tid] = ocnts[tid+nbthreads]; obase -= nbthreads; } __syncthreads(); } if (tid == itodo-1) { obuff[obase] = thisval; ocnts[obase] = i - nbthreads + tid + 1; } __syncthreads(); if (tid <= obase) { // Flush out anything that's left okeys[odone+tid] = obuff[tid]; if (tid > 0) lastocnt = ocnts[tid-1]; coff = ocnts[tid] - lastocnt; atomicAdd(&counts[odone+tid], coff); } } // // Accepts an array of int64 keys which should be sorted. Outputs an array okeys with unique copies of each key, // with corresponding counts in the *counts* array. cspine is a working storage array in GPUmem which should be // passed in. The size of cspine should be at least nb32 * 32 bytes with nb32 as below (maximum 2048 bytes). // Returns the length of the output in cspine[0]. // int mergeInds(long long *keys, long long *okeys, int *counts, int n, int *cspine) { cudaError_t err; int nthreads = min(n, 1024); int nt32 = 32*(1 + (nthreads-1)/32); int nblocks = min(1 + (n-1)/nthreads, 64); int nb32 = 32*(1+(nblocks - 1)/32); long long *ispine = (long long *)&cspine[2*nb32]; long long *vspine = (long long *)&cspine[4*nb32]; __mergeIndsP1<long long><<<nblocks,nt32>>>(keys, cspine, ispine, vspine, n); cudaDeviceSynchronize(); err = cudaGetLastError(); if (err == 0) { __fixSpine<long long><<<1,nblocks>>>(cspine, ispine, vspine, nblocks); cudaDeviceSynchronize(); err = cudaGetLastError(); } if (err == 0) { __mergeIndsP2<long long><<<nblocks,nt32>>>(keys, okeys, counts, cspine, n); cudaDeviceSynchronize(); err = cudaGetLastError(); } if (err == 0) { cudaMemcpy(cspine, &cspine[nblocks-1], 4, cudaMemcpyDeviceToDevice); cudaDeviceSynchronize(); err = cudaGetLastError(); } return err; } // // Support function for mergeInds. Returns the length of the output arrays in cspine[0]. // cspine is a working storage array in GPUmem which should be passed in. // The size of cspine should be at least nb32 * 32 bytes with nb32 as below (maximum 2048 bytes). // int getMergeIndsLen(long long *keys, int n, int *cspine) { cudaError_t err; int nthreads = min(n, 1024); int nt32 = 32*(1 + (nthreads-1)/32); int nblocks = min(1 + (n-1)/nthreads, 64); int nb32 = 32*(1+(nblocks - 1)/32); long long *ispine = (long long *)&cspine[2*nb32]; long long *vspine = (long long *)&cspine[4*nb32]; __mergeIndsP1<long long><<<nblocks,nt32>>>(keys, cspine, ispine, vspine, n); cudaDeviceSynchronize(); err = cudaGetLastError(); if (err == 0) { __fixSpine<long long><<<1,nblocks>>>(cspine, ispine, vspine, nblocks); cudaDeviceSynchronize(); err = cudaGetLastError(); } if (err == 0) { cudaMemcpy(cspine, &cspine[nblocks-1], 4, cudaMemcpyDeviceToDevice); cudaDeviceSynchronize(); err = cudaGetLastError(); } return err; }
9b04cd98e6329308f31e6aecde0d367c473bee12.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright 2018,2019,2020,2021 Sony Corporation. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <nbla/array.hpp> #include <nbla/cuda/common.hpp> #include <nbla/cuda/function/interpolate.hpp> #include <nbla/cuda/utils/atomic_add.cuh> #include <nbla/cuda/utils/nd_index.cuh> #include <nbla/variable.hpp> namespace nbla { inline float compute_scale(int isize, int osize, bool align_corners) { return (osize <= 1) ? 0.0f : (align_corners ? float(isize - 1) / (osize - 1) : float(isize) / osize); } inline float compute_scale_for_nn(int isize, int osize, bool align_corners, bool half_pixel_for_nn) { return half_pixel_for_nn ? isize / static_cast<float>(osize) : compute_scale(isize, osize, align_corners); } __device__ __forceinline__ float get_src_index(float scale, int dst_index, bool half_pixel) { return half_pixel ? fmaxf(0.0f, scale * (float(dst_index) + 0.5f) - 0.5f) : scale * dst_index; } __device__ __forceinline__ float get_src_index_for_nn(float scale, int dst_index, bool half_pixel, bool half_pixel_for_nn) { return half_pixel_for_nn ? scale * (dst_index + 0.5f) : get_src_index(scale, dst_index, half_pixel); } template <typename T, bool channel_last = false> __global__ void kernel_linear_interpolate_1d( const int dst_inner_size, T *dst, const int src_inner_size, const T *src, int outer_size, const int ishape, const int istride, const int ostride, const float sx, const bool half_pixel) { NBLA_CUDA_KERNEL_LOOP(index, dst_inner_size) { const auto nd_index = device_flat_to_2d(index, ostride); const auto oc = channel_last ? nd_index.y : 0; const auto ox = nd_index.x; const auto iw = ishape; const auto fx = get_src_index(sx, ox, half_pixel); const auto x1 = static_cast<int>(fx); const auto x2 = min(x1 + 1, iw - 1); const auto lx1 = static_cast<T>(fx - x1); const auto lx0 = static_cast<T>(1) - lx1; const auto nd_idx_x1 = make_int2(x1, oc); const auto nd_idx_x2 = make_int2(x2, oc); const auto idx_lx0 = device_2d_to_flat(nd_idx_x1, istride); const auto idx_lx1 = device_2d_to_flat(nd_idx_x2, istride); for (; outer_size--; src += src_inner_size, dst += dst_inner_size) { const T val0 = lx0 * src[idx_lx0]; const T val1 = lx1 * src[idx_lx1]; dst[index] = val0 + val1; } } } template <typename T, bool channel_last = false> __global__ void kernel_linear_interpolate_2d( const int dst_inner_size, T *dst, const int src_inner_size, const T *src, int outer_size, const int2 ishape, const int2 istride, const int2 ostride, const float sx, const float sy, const bool half_pixel) { NBLA_CUDA_KERNEL_LOOP(index, dst_inner_size) { const auto nd_index = device_flat_to_3d(index, ostride); const auto oc = channel_last ? nd_index.z : 0; const auto oy = nd_index.x; const auto ox = nd_index.y; const auto ih = ishape.x; const auto iw = ishape.y; const auto fy = get_src_index(sy, oy, half_pixel); const auto y1 = static_cast<int>(fy); const auto y2 = min(y1 + 1, ih - 1); const auto ly1 = static_cast<T>(fy - y1); const auto ly0 = static_cast<T>(1) - ly1; const auto fx = get_src_index(sx, ox, half_pixel); const auto x1 = static_cast<int>(fx); const auto x2 = min(x1 + 1, iw - 1); const auto lx1 = static_cast<T>(fx - x1); const auto lx0 = static_cast<T>(1) - lx1; const auto nd_idx_y1x1 = make_int3(y1, x1, oc); const auto nd_idx_y1x2 = make_int3(y1, x2, oc); const auto nd_idx_y2x1 = make_int3(y2, x1, oc); const auto nd_idx_y2x2 = make_int3(y2, x2, oc); const auto idx_ly0x0 = device_3d_to_flat(nd_idx_y1x1, istride); const auto idx_ly0x1 = device_3d_to_flat(nd_idx_y1x2, istride); const auto idx_ly1x0 = device_3d_to_flat(nd_idx_y2x1, istride); const auto idx_ly1x1 = device_3d_to_flat(nd_idx_y2x2, istride); for (; outer_size--; src += src_inner_size, dst += dst_inner_size) { const T val0 = lx0 * src[idx_ly0x0]; const T val1 = lx1 * src[idx_ly0x1]; const T val2 = lx0 * src[idx_ly1x0]; const T val3 = lx1 * src[idx_ly1x1]; dst[index] = ly0 * (val0 + val1) + ly1 * (val2 + val3); } } } template <typename T, bool channel_last = false> __global__ void kernel_linear_interpolate_3d( const int dst_inner_size, T *dst, const int src_inner_size, const T *src, int outer_size, const int3 ishape, const int3 istride, const int3 ostride, const float sx, const float sy, const float sz, const bool half_pixel) { NBLA_CUDA_KERNEL_LOOP(index, dst_inner_size) { const auto nd_index = device_flat_to_4d(index, ostride); const auto oc = channel_last ? nd_index.w : 0; const auto oz = nd_index.x; const auto oy = nd_index.y; const auto ox = nd_index.z; const auto id = ishape.x; const auto ih = ishape.y; const auto iw = ishape.z; const auto fz = get_src_index(sz, oz, half_pixel); const auto z1 = static_cast<int>(fz); const auto z2 = min(z1 + 1, id - 1); const auto lz1 = static_cast<T>(fz - z1); const auto lz0 = static_cast<T>(1) - lz1; const auto fy = get_src_index(sy, oy, half_pixel); const auto y1 = static_cast<int>(fy); const auto y2 = min(y1 + 1, ih - 1); const auto ly1 = static_cast<T>(fy - y1); const auto ly0 = static_cast<T>(1) - ly1; const auto fx = get_src_index(sx, ox, half_pixel); const auto x1 = static_cast<int>(fx); const auto x2 = min(x1 + 1, iw - 1); const auto lx1 = static_cast<T>(fx - x1); const auto lx0 = static_cast<T>(1) - lx1; const auto nd_idx_z1y1x1 = make_int4(z1, y1, x1, oc); const auto nd_idx_z1y1x2 = make_int4(z1, y1, x2, oc); const auto nd_idx_z1y2x1 = make_int4(z1, y2, x1, oc); const auto nd_idx_z1y2x2 = make_int4(z1, y2, x2, oc); const auto nd_idx_z2y1x1 = make_int4(z2, y1, x1, oc); const auto nd_idx_z2y1x2 = make_int4(z2, y1, x2, oc); const auto nd_idx_z2y2x1 = make_int4(z2, y2, x1, oc); const auto nd_idx_z2y2x2 = make_int4(z2, y2, x2, oc); const auto idx_lz0y0x0 = device_4d_to_flat(nd_idx_z1y1x1, istride); const auto idx_lz0y0x1 = device_4d_to_flat(nd_idx_z1y1x2, istride); const auto idx_lz0y1x0 = device_4d_to_flat(nd_idx_z1y2x1, istride); const auto idx_lz0y1x1 = device_4d_to_flat(nd_idx_z1y2x2, istride); const auto idx_lz1y0x0 = device_4d_to_flat(nd_idx_z2y1x1, istride); const auto idx_lz1y0x1 = device_4d_to_flat(nd_idx_z2y1x2, istride); const auto idx_lz1y1x0 = device_4d_to_flat(nd_idx_z2y2x1, istride); const auto idx_lz1y1x1 = device_4d_to_flat(nd_idx_z2y2x2, istride); for (; outer_size--; src += src_inner_size, dst += dst_inner_size) { const T val0 = lx0 * src[idx_lz0y0x0]; const T val1 = lx1 * src[idx_lz0y0x1]; const T val2 = lx0 * src[idx_lz0y1x0]; const T val3 = lx1 * src[idx_lz0y1x1]; const T val4 = lx0 * src[idx_lz1y0x0]; const T val5 = lx1 * src[idx_lz1y0x1]; const T val6 = lx0 * src[idx_lz1y1x0]; const T val7 = lx1 * src[idx_lz1y1x1]; const T val8 = ly0 * (val0 + val1) + ly1 * (val2 + val3); const T val9 = ly0 * (val4 + val5) + ly1 * (val6 + val7); dst[index] = lz0 * val8 + lz1 * val9; } } } template <typename T, bool channel_last = false> __global__ void kernel_linear_interpolate_1d_backward( const int g_y_inner_size, const T *g_y, const int g_x_inner_size, T *g_x, int outer_size, const int ishape, const int istride, const int ostride, const float sx, const bool half_pixel) { NBLA_CUDA_KERNEL_LOOP(index, g_y_inner_size) { const auto nd_index = device_flat_to_2d(index, ostride); const auto oc = channel_last ? nd_index.y : 0; const auto ox = nd_index.x; const auto iw = ishape; const auto fx = get_src_index(sx, ox, half_pixel); const auto x1 = static_cast<int>(fx); const auto x2 = min(x1 + 1, iw - 1); const auto lx1 = static_cast<T>(fx - x1); const auto lx0 = static_cast<T>(1) - lx1; const auto nd_idx_x1 = make_int2(x1, oc); const auto nd_idx_x2 = make_int2(x2, oc); const auto idx_lx1 = device_2d_to_flat(nd_idx_x1, istride); const auto idx_lx2 = device_2d_to_flat(nd_idx_x2, istride); for (; outer_size--; g_x += g_x_inner_size, g_y += g_y_inner_size) { const T g = g_y[index]; atomic_add(g_x + idx_lx1, lx0 * g); atomic_add(g_x + idx_lx2, lx1 * g); } } } template <typename T, bool channel_last = false> __global__ void kernel_linear_interpolate_2d_backward( const int g_y_inner_size, const T *g_y, const int g_x_inner_size, T *g_x, int outer_size, const int2 ishape, const int2 istride, const int2 ostride, const float sx, const float sy, const bool half_pixel) { NBLA_CUDA_KERNEL_LOOP(index, g_y_inner_size) { const auto nd_index = device_flat_to_3d(index, ostride); const auto oc = channel_last ? nd_index.z : 0; const auto oy = nd_index.x; const auto ox = nd_index.y; const auto ih = ishape.x; const auto iw = ishape.y; const auto fy = get_src_index(sy, oy, half_pixel); const auto y1 = static_cast<int>(fy); const auto y2 = min(y1 + 1, ih - 1); const auto ly1 = static_cast<T>(fy - y1); const auto ly0 = static_cast<T>(1) - ly1; const auto fx = get_src_index(sx, ox, half_pixel); const auto x1 = static_cast<int>(fx); const auto x2 = min(x1 + 1, iw - 1); const auto lx1 = static_cast<T>(fx - x1); const auto lx0 = static_cast<T>(1) - lx1; const auto nd_idx_y1x1 = make_int3(y1, x1, oc); const auto nd_idx_y1x2 = make_int3(y1, x2, oc); const auto nd_idx_y2x1 = make_int3(y2, x1, oc); const auto nd_idx_y2x2 = make_int3(y2, x2, oc); const auto idx_ly0x0 = device_3d_to_flat(nd_idx_y1x1, istride); const auto idx_ly0x1 = device_3d_to_flat(nd_idx_y1x2, istride); const auto idx_ly1x0 = device_3d_to_flat(nd_idx_y2x1, istride); const auto idx_ly1x1 = device_3d_to_flat(nd_idx_y2x2, istride); for (; outer_size--; g_x += g_x_inner_size, g_y += g_y_inner_size) { const T g = g_y[index]; atomic_add(g_x + idx_ly0x0, ly0 * lx0 * g); atomic_add(g_x + idx_ly0x1, ly0 * lx1 * g); atomic_add(g_x + idx_ly1x0, ly1 * lx0 * g); atomic_add(g_x + idx_ly1x1, ly1 * lx1 * g); } } } template <typename T, bool channel_last = false> __global__ void kernel_linear_interpolate_3d_backward( const int g_y_inner_size, const T *g_y, const int g_x_inner_size, T *g_x, int outer_size, const int3 ishape, const int3 istride, const int3 ostride, const float sx, const float sy, const float sz, const bool half_pixel) { NBLA_CUDA_KERNEL_LOOP(index, g_y_inner_size) { const auto nd_index = device_flat_to_4d(index, ostride); const auto oc = channel_last ? nd_index.w : 0; const auto oz = nd_index.x; const auto oy = nd_index.y; const auto ox = nd_index.z; const auto id = ishape.x; const auto ih = ishape.y; const auto iw = ishape.z; const auto fz = get_src_index(sz, oz, half_pixel); const auto z1 = static_cast<int>(fz); const auto z2 = min(z1 + 1, id - 1); const auto lz1 = static_cast<T>(fz - z1); const auto lz0 = static_cast<T>(1) - lz1; const auto fy = get_src_index(sy, oy, half_pixel); const auto y1 = static_cast<int>(fy); const auto y2 = min(y1 + 1, ih - 1); const auto ly1 = static_cast<T>(fy - y1); const auto ly0 = static_cast<T>(1) - ly1; const auto fx = get_src_index(sx, ox, half_pixel); const auto x1 = static_cast<int>(fx); const auto x2 = min(x1 + 1, iw - 1); const auto lx1 = static_cast<T>(fx - x1); const auto lx0 = static_cast<T>(1) - lx1; const auto nd_idx_z1y1x1 = make_int4(z1, y1, x1, oc); const auto nd_idx_z1y1x2 = make_int4(z1, y1, x2, oc); const auto nd_idx_z1y2x1 = make_int4(z1, y2, x1, oc); const auto nd_idx_z1y2x2 = make_int4(z1, y2, x2, oc); const auto nd_idx_z2y1x1 = make_int4(z2, y1, x1, oc); const auto nd_idx_z2y1x2 = make_int4(z2, y1, x2, oc); const auto nd_idx_z2y2x1 = make_int4(z2, y2, x1, oc); const auto nd_idx_z2y2x2 = make_int4(z2, y2, x2, oc); const auto idx_lz0y0x0 = device_4d_to_flat(nd_idx_z1y1x1, istride); const auto idx_lz0y0x1 = device_4d_to_flat(nd_idx_z1y1x2, istride); const auto idx_lz0y1x0 = device_4d_to_flat(nd_idx_z1y2x1, istride); const auto idx_lz0y1x1 = device_4d_to_flat(nd_idx_z1y2x2, istride); const auto idx_lz1y0x0 = device_4d_to_flat(nd_idx_z2y1x1, istride); const auto idx_lz1y0x1 = device_4d_to_flat(nd_idx_z2y1x2, istride); const auto idx_lz1y1x0 = device_4d_to_flat(nd_idx_z2y2x1, istride); const auto idx_lz1y1x1 = device_4d_to_flat(nd_idx_z2y2x2, istride); for (; outer_size--; g_x += g_x_inner_size, g_y += g_y_inner_size) { const T g = g_y[index]; atomic_add(g_x + idx_lz0y0x0, lz0 * ly0 * lx0 * g); atomic_add(g_x + idx_lz0y0x1, lz0 * ly0 * lx1 * g); atomic_add(g_x + idx_lz0y1x0, lz0 * ly1 * lx0 * g); atomic_add(g_x + idx_lz0y1x1, lz0 * ly1 * lx1 * g); atomic_add(g_x + idx_lz1y0x0, lz1 * ly0 * lx0 * g); atomic_add(g_x + idx_lz1y0x1, lz1 * ly0 * lx1 * g); atomic_add(g_x + idx_lz1y1x0, lz1 * ly1 * lx0 * g); atomic_add(g_x + idx_lz1y1x1, lz1 * ly1 * lx1 * g); } } } template <typename T, bool channel_last = false> __global__ void kernel_nearest_interpolate_1d( const int dst_inner_size, T *dst, const int src_inner_size, const T *src, int outer_size, const int ishape, const int istride, const int ostride, const float sx, const bool half_pixel, const bool half_pixel_for_nn) { NBLA_CUDA_KERNEL_LOOP(index, dst_inner_size) { const auto nd_index = device_flat_to_2d(index, ostride); const auto oc = channel_last ? nd_index.y : 0; const auto ox = nd_index.x; const auto iw = ishape; const auto fx = get_src_index_for_nn(sx, ox, half_pixel, half_pixel_for_nn); const auto ix = min(static_cast<int>(fx), iw - 1); const auto nd_idx_x = make_int2(ix, oc); const auto idx_x = device_2d_to_flat(nd_idx_x, istride); for (; outer_size--; src += src_inner_size, dst += dst_inner_size) { dst[index] = src[idx_x]; } } } template <typename T, bool channel_last = false> __global__ void kernel_nearest_interpolate_2d( const int dst_inner_size, T *dst, const int src_inner_size, const T *src, int outer_size, const int2 ishape, const int2 istride, const int2 ostride, const float sx, const float sy, const bool half_pixel, const bool half_pixel_for_nn) { NBLA_CUDA_KERNEL_LOOP(index, dst_inner_size) { const auto nd_index = device_flat_to_3d(index, ostride); const auto oc = channel_last ? nd_index.z : 0; const auto oy = nd_index.x; const auto ox = nd_index.y; const auto ih = ishape.x; const auto iw = ishape.y; const auto fy = get_src_index_for_nn(sy, oy, half_pixel, half_pixel_for_nn); const auto fx = get_src_index_for_nn(sx, ox, half_pixel, half_pixel_for_nn); const auto iy = min(static_cast<int>(fy), ih - 1); const auto ix = min(static_cast<int>(fx), iw - 1); const auto nd_idx_yx = make_int3(iy, ix, oc); const auto idx_yx = device_3d_to_flat(nd_idx_yx, istride); for (; outer_size--; src += src_inner_size, dst += dst_inner_size) { dst[index] = src[idx_yx]; } } } template <typename T, bool channel_last = false> __global__ void kernel_nearest_interpolate_3d( const int dst_inner_size, T *dst, const int src_inner_size, const T *src, int outer_size, const int3 ishape, const int3 istride, const int3 ostride, const float sx, const float sy, const float sz, const bool half_pixel, const bool half_pixel_for_nn) { NBLA_CUDA_KERNEL_LOOP(index, dst_inner_size) { const auto nd_index = device_flat_to_4d(index, ostride); const auto oc = channel_last ? nd_index.w : 0; const auto oz = nd_index.x; const auto oy = nd_index.y; const auto ox = nd_index.z; const auto id = ishape.x; const auto ih = ishape.y; const auto iw = ishape.z; const auto fz = get_src_index_for_nn(sz, oz, half_pixel, half_pixel_for_nn); const auto fy = get_src_index_for_nn(sy, oy, half_pixel, half_pixel_for_nn); const auto fx = get_src_index_for_nn(sx, ox, half_pixel, half_pixel_for_nn); const auto iz = min(static_cast<int>(fz), id - 1); const auto iy = min(static_cast<int>(fy), ih - 1); const auto ix = min(static_cast<int>(fx), iw - 1); const auto nd_idx_zyx = make_int4(iz, iy, ix, oc); const auto idx_zyx = device_4d_to_flat(nd_idx_zyx, istride); for (; outer_size--; src += src_inner_size, dst += dst_inner_size) { dst[index] = src[idx_zyx]; } } } template <typename T, bool channel_last = false> __global__ void kernel_nearest_interpolate_1d_backward( const int g_y_inner_size, const T *g_y, const int g_x_inner_size, T *g_x, int outer_size, const int ishape, const int istride, const int ostride, const float sx, const bool half_pixel, const bool half_pixel_for_nn) { NBLA_CUDA_KERNEL_LOOP(index, g_y_inner_size) { const auto nd_index = device_flat_to_2d(index, ostride); const auto oc = channel_last ? nd_index.y : 0; const auto ox = nd_index.x; const auto iw = ishape; const auto fx = get_src_index_for_nn(sx, ox, half_pixel, half_pixel_for_nn); const auto ix = min(static_cast<int>(fx), iw - 1); const auto nd_idx_x = make_int2(ix, oc); const auto idx_x = device_2d_to_flat(nd_idx_x, istride); for (; outer_size--; g_x += g_x_inner_size, g_y += g_y_inner_size) { atomic_add(g_x + idx_x, g_y[index]); } } } template <typename T, bool channel_last = false> __global__ void kernel_nearest_interpolate_2d_backward( const int g_y_inner_size, const T *g_y, const int g_x_inner_size, T *g_x, int outer_size, const int2 ishape, const int2 istride, const int2 ostride, const float sx, const float sy, const bool half_pixel, const bool half_pixel_for_nn) { NBLA_CUDA_KERNEL_LOOP(index, g_y_inner_size) { const auto nd_index = device_flat_to_3d(index, ostride); const auto oc = channel_last ? nd_index.z : 0; const auto oy = nd_index.x; const auto ox = nd_index.y; const auto ih = ishape.x; const auto iw = ishape.y; const auto fy = get_src_index_for_nn(sy, oy, half_pixel, half_pixel_for_nn); const auto fx = get_src_index_for_nn(sx, ox, half_pixel, half_pixel_for_nn); const auto iy = min(static_cast<int>(fy), ih - 1); const auto ix = min(static_cast<int>(fx), iw - 1); const auto nd_idx_yx = make_int3(iy, ix, oc); const auto idx_yx = device_3d_to_flat(nd_idx_yx, istride); for (; outer_size--; g_x += g_x_inner_size, g_y += g_y_inner_size) { atomic_add(g_x + idx_yx, g_y[index]); } } } template <typename T, bool channel_last = false> __global__ void kernel_nearest_interpolate_3d_backward( const int g_y_inner_size, const T *g_y, const int g_x_inner_size, T *g_x, int outer_size, const int3 ishape, const int3 istride, const int3 ostride, const float sx, const float sy, const float sz, const bool half_pixel, const bool half_pixel_for_nn) { NBLA_CUDA_KERNEL_LOOP(index, g_y_inner_size) { const auto nd_index = device_flat_to_4d(index, ostride); const auto oc = channel_last ? nd_index.w : 0; const auto oz = nd_index.x; const auto oy = nd_index.y; const auto ox = nd_index.z; const auto id = ishape.x; const auto ih = ishape.y; const auto iw = ishape.z; const auto fz = get_src_index_for_nn(sz, oz, half_pixel, half_pixel_for_nn); const auto fy = get_src_index_for_nn(sy, oy, half_pixel, half_pixel_for_nn); const auto fx = get_src_index_for_nn(sx, ox, half_pixel, half_pixel_for_nn); const auto iz = min(static_cast<int>(fz), id - 1); const auto iy = min(static_cast<int>(fy), ih - 1); const auto ix = min(static_cast<int>(fx), iw - 1); const auto nd_idx_zyx = make_int4(iz, iy, ix, oc); const auto idx_zyx = device_4d_to_flat(nd_idx_zyx, istride); for (; outer_size--; g_x += g_x_inner_size, g_y += g_y_inner_size) { atomic_add(g_x + idx_zyx, g_y[index]); } } } template <typename T> void InterpolateCuda<T>::forward_impl(const Variables &inputs, const Variables &outputs) { cuda_set_device(this->device_); auto src = inputs[0]->get_data_pointer<Tcu>(this->ctx_); auto dst = outputs[0]->cast_data_and_get_pointer<Tcu>(this->ctx_, true); const int ndim = inputs[0]->ndim(); if (this->output_size_.size() == 1) { const int ic = this->channel_last_ ? inputs[0]->shape()[ndim - 1] : inputs[0]->shape()[ndim - 2]; const int iw = this->channel_last_ ? inputs[0]->shape()[ndim - 2] : inputs[0]->shape()[ndim - 1]; const int oc = this->channel_last_ ? outputs[0]->shape()[ndim - 1] : outputs[0]->shape()[ndim - 2]; const int ow = this->channel_last_ ? outputs[0]->shape()[ndim - 2] : outputs[0]->shape()[ndim - 1]; const int src_inner_size = this->channel_last_ ? ic * iw : iw; const int dst_inner_size = this->channel_last_ ? oc * ow : ow; const int outer_size = inputs[0]->size() / src_inner_size; const auto ishape = iw; const auto istride = this->channel_last_ ? ic : 1; const auto ostride = this->channel_last_ ? oc : 1; if (this->mode_ == "linear") { const float sx = compute_scale(iw, ow, this->align_corners_); auto kernel = this->channel_last_ ? kernel_linear_interpolate_1d<Tcu, true> : kernel_linear_interpolate_1d<Tcu, false>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel, dst_inner_size, dst, src_inner_size, src, outer_size, ishape, istride, ostride, sx, this->half_pixel_); } else if (this->mode_ == "nearest") { const float sx = compute_scale_for_nn(iw, ow, this->align_corners_, this->half_pixel_for_nn_); auto kernel = this->channel_last_ ? kernel_nearest_interpolate_1d<Tcu, true> : kernel_nearest_interpolate_1d<Tcu, false>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE( kernel, dst_inner_size, dst, src_inner_size, src, outer_size, ishape, istride, ostride, sx, this->half_pixel_, this->half_pixel_for_nn_); } } else if (this->output_size_.size() == 2) { const int ic = this->channel_last_ ? inputs[0]->shape()[ndim - 1] : inputs[0]->shape()[ndim - 3]; const int ih = this->channel_last_ ? inputs[0]->shape()[ndim - 3] : inputs[0]->shape()[ndim - 2]; const int iw = this->channel_last_ ? inputs[0]->shape()[ndim - 2] : inputs[0]->shape()[ndim - 1]; const int oc = this->channel_last_ ? outputs[0]->shape()[ndim - 1] : outputs[0]->shape()[ndim - 3]; const int oh = this->channel_last_ ? outputs[0]->shape()[ndim - 3] : outputs[0]->shape()[ndim - 2]; const int ow = this->channel_last_ ? outputs[0]->shape()[ndim - 2] : outputs[0]->shape()[ndim - 1]; const int src_inner_size = this->channel_last_ ? ic * iw * ih : iw * ih; const int dst_inner_size = this->channel_last_ ? oc * ow * oh : ow * oh; const int outer_size = inputs[0]->size() / src_inner_size; const auto ishape = make_int2(ih, iw); const auto istride = this->channel_last_ ? make_int2(iw * ic, ic) : make_int2(iw, 1); const auto ostride = this->channel_last_ ? make_int2(ow * oc, oc) : make_int2(ow, 1); if (this->mode_ == "linear") { const float sx = compute_scale(iw, ow, this->align_corners_); const float sy = compute_scale(ih, oh, this->align_corners_); auto kernel = this->channel_last_ ? kernel_linear_interpolate_2d<Tcu, true> : kernel_linear_interpolate_2d<Tcu, false>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE( kernel, dst_inner_size, dst, src_inner_size, src, outer_size, ishape, istride, ostride, sx, sy, this->half_pixel_); } else if (this->mode_ == "nearest") { const float sx = compute_scale_for_nn(iw, ow, this->align_corners_, this->half_pixel_for_nn_); const float sy = compute_scale_for_nn(ih, oh, this->align_corners_, this->half_pixel_for_nn_); auto kernel = this->channel_last_ ? kernel_nearest_interpolate_2d<Tcu, true> : kernel_nearest_interpolate_2d<Tcu, false>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE( kernel, dst_inner_size, dst, src_inner_size, src, outer_size, ishape, istride, ostride, sx, sy, this->half_pixel_, this->half_pixel_for_nn_); } } else if (this->output_size_.size() == 3) { const int ic = this->channel_last_ ? inputs[0]->shape()[ndim - 1] : inputs[0]->shape()[ndim - 4]; const int id = this->channel_last_ ? inputs[0]->shape()[ndim - 4] : inputs[0]->shape()[ndim - 3]; const int ih = this->channel_last_ ? inputs[0]->shape()[ndim - 3] : inputs[0]->shape()[ndim - 2]; const int iw = this->channel_last_ ? inputs[0]->shape()[ndim - 2] : inputs[0]->shape()[ndim - 1]; const int oc = this->channel_last_ ? outputs[0]->shape()[ndim - 1] : outputs[0]->shape()[ndim - 4]; const int od = this->channel_last_ ? outputs[0]->shape()[ndim - 4] : outputs[0]->shape()[ndim - 3]; const int oh = this->channel_last_ ? outputs[0]->shape()[ndim - 3] : outputs[0]->shape()[ndim - 2]; const int ow = this->channel_last_ ? outputs[0]->shape()[ndim - 2] : outputs[0]->shape()[ndim - 1]; const int src_inner_size = this->channel_last_ ? ic * iw * ih * id : iw * ih * id; const int dst_inner_size = this->channel_last_ ? oc * ow * oh * od : ow * oh * od; const int outer_size = inputs[0]->size() / src_inner_size; const auto ishape = make_int3(id, ih, iw); const auto istride = this->channel_last_ ? make_int3(ih * iw * ic, iw * ic, ic) : make_int3(ih * iw, iw, 1); const auto ostride = this->channel_last_ ? make_int3(oh * ow * oc, ow * oc, oc) : make_int3(oh * ow, ow, 1); if (this->mode_ == "linear") { const float sx = compute_scale(iw, ow, this->align_corners_); const float sy = compute_scale(ih, oh, this->align_corners_); const float sz = compute_scale(id, od, this->align_corners_); auto kernel = this->channel_last_ ? kernel_linear_interpolate_3d<Tcu, true> : kernel_linear_interpolate_3d<Tcu, false>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE( kernel, dst_inner_size, dst, src_inner_size, src, outer_size, ishape, istride, ostride, sx, sy, sz, this->half_pixel_); } else if (this->mode_ == "nearest") { const float sx = compute_scale_for_nn(iw, ow, this->align_corners_, this->half_pixel_for_nn_); const float sy = compute_scale_for_nn(ih, oh, this->align_corners_, this->half_pixel_for_nn_); const float sz = compute_scale_for_nn(id, od, this->align_corners_, this->half_pixel_for_nn_); auto kernel = this->channel_last_ ? kernel_nearest_interpolate_3d<Tcu, true> : kernel_nearest_interpolate_3d<Tcu, false>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE( kernel, dst_inner_size, dst, src_inner_size, src, outer_size, ishape, istride, ostride, sx, sy, sz, this->half_pixel_, this->half_pixel_for_nn_); } } } template <typename T> void InterpolateCuda<T>::backward_impl(const Variables &inputs, const Variables &outputs, const vector<bool> &propagate_down, const vector<bool> &accum) { if (!(propagate_down[0])) { return; } cuda_set_device(this->device_); auto g_y = outputs[0]->get_grad_pointer<Tcu>(this->ctx_); auto g_x = inputs[0]->cast_grad_and_get_pointer<Tcu>(this->ctx_, false); const int ndim = inputs[0]->ndim(); if (this->output_size_.size() == 1) { const int ic = this->channel_last_ ? inputs[0]->shape()[ndim - 1] : inputs[0]->shape()[ndim - 2]; const int iw = this->channel_last_ ? inputs[0]->shape()[ndim - 2] : inputs[0]->shape()[ndim - 1]; const int oc = this->channel_last_ ? outputs[0]->shape()[ndim - 1] : outputs[0]->shape()[ndim - 2]; const int ow = this->channel_last_ ? outputs[0]->shape()[ndim - 2] : outputs[0]->shape()[ndim - 1]; const int g_x_inner_size = this->channel_last_ ? ic * iw : iw; const int g_y_inner_size = this->channel_last_ ? oc * ow : ow; const int outer_size = inputs[0]->size() / g_x_inner_size; const auto ishape = iw; const auto istride = this->channel_last_ ? ic : 1; const auto ostride = this->channel_last_ ? oc : 1; if (this->mode_ == "linear") { const float sx = compute_scale(iw, ow, this->align_corners_); auto kernel = this->channel_last_ ? kernel_linear_interpolate_1d_backward<Tcu, true> : kernel_linear_interpolate_1d_backward<Tcu, false>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel, g_y_inner_size, g_y, g_x_inner_size, g_x, outer_size, ishape, istride, ostride, sx, this->half_pixel_); } else if (this->mode_ == "nearest") { const float sx = compute_scale_for_nn(iw, ow, this->align_corners_, this->half_pixel_for_nn_); auto kernel = this->channel_last_ ? kernel_nearest_interpolate_1d_backward<Tcu, true> : kernel_nearest_interpolate_1d_backward<Tcu, false>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE( kernel, g_y_inner_size, g_y, g_x_inner_size, g_x, outer_size, ishape, istride, ostride, sx, this->half_pixel_, this->half_pixel_for_nn_); } } else if (this->output_size_.size() == 2) { const int ic = this->channel_last_ ? inputs[0]->shape()[ndim - 1] : inputs[0]->shape()[ndim - 3]; const int ih = this->channel_last_ ? inputs[0]->shape()[ndim - 3] : inputs[0]->shape()[ndim - 2]; const int iw = this->channel_last_ ? inputs[0]->shape()[ndim - 2] : inputs[0]->shape()[ndim - 1]; const int oc = this->channel_last_ ? outputs[0]->shape()[ndim - 1] : outputs[0]->shape()[ndim - 3]; const int oh = this->channel_last_ ? outputs[0]->shape()[ndim - 3] : outputs[0]->shape()[ndim - 2]; const int ow = this->channel_last_ ? outputs[0]->shape()[ndim - 2] : outputs[0]->shape()[ndim - 1]; const int g_x_inner_size = this->channel_last_ ? ic * iw * ih : iw * ih; const int g_y_inner_size = this->channel_last_ ? oc * ow * oh : ow * oh; const int outer_size = inputs[0]->size() / g_x_inner_size; const auto ishape = make_int2(ih, iw); const auto istride = this->channel_last_ ? make_int2(iw * ic, ic) : make_int2(iw, 1); const auto ostride = this->channel_last_ ? make_int2(ow * oc, oc) : make_int2(ow, 1); if (this->mode_ == "linear") { const float sx = compute_scale(iw, ow, this->align_corners_); const float sy = compute_scale(ih, oh, this->align_corners_); auto kernel = this->channel_last_ ? kernel_linear_interpolate_2d_backward<Tcu, true> : kernel_linear_interpolate_2d_backward<Tcu, false>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE( kernel, g_y_inner_size, g_y, g_x_inner_size, g_x, outer_size, ishape, istride, ostride, sx, sy, this->half_pixel_); } else if (this->mode_ == "nearest") { const float sx = compute_scale_for_nn(iw, ow, this->align_corners_, this->half_pixel_for_nn_); const float sy = compute_scale_for_nn(ih, oh, this->align_corners_, this->half_pixel_for_nn_); auto kernel = this->channel_last_ ? kernel_nearest_interpolate_2d_backward<Tcu, true> : kernel_nearest_interpolate_2d_backward<Tcu, false>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE( kernel, g_y_inner_size, g_y, g_x_inner_size, g_x, outer_size, ishape, istride, ostride, sx, sy, this->half_pixel_, this->half_pixel_for_nn_); } } else if (this->output_size_.size() == 3) { const int ic = this->channel_last_ ? inputs[0]->shape()[ndim - 1] : inputs[0]->shape()[ndim - 4]; const int id = this->channel_last_ ? inputs[0]->shape()[ndim - 4] : inputs[0]->shape()[ndim - 3]; const int ih = this->channel_last_ ? inputs[0]->shape()[ndim - 3] : inputs[0]->shape()[ndim - 2]; const int iw = this->channel_last_ ? inputs[0]->shape()[ndim - 2] : inputs[0]->shape()[ndim - 1]; const int oc = this->channel_last_ ? outputs[0]->shape()[ndim - 1] : outputs[0]->shape()[ndim - 4]; const int od = this->channel_last_ ? outputs[0]->shape()[ndim - 4] : outputs[0]->shape()[ndim - 3]; const int oh = this->channel_last_ ? outputs[0]->shape()[ndim - 3] : outputs[0]->shape()[ndim - 2]; const int ow = this->channel_last_ ? outputs[0]->shape()[ndim - 2] : outputs[0]->shape()[ndim - 1]; const int g_x_inner_size = this->channel_last_ ? ic * iw * ih * id : iw * ih * id; const int g_y_inner_size = this->channel_last_ ? oc * ow * oh * od : ow * oh * od; const int outer_size = inputs[0]->size() / g_x_inner_size; const auto ishape = make_int3(id, ih, iw); const auto istride = this->channel_last_ ? make_int3(ih * iw * ic, iw * ic, ic) : make_int3(ih * iw, iw, 1); const auto ostride = this->channel_last_ ? make_int3(oh * ow * oc, ow * oc, oc) : make_int3(oh * ow, ow, 1); if (this->mode_ == "linear") { const float sx = compute_scale(iw, ow, this->align_corners_); const float sy = compute_scale(ih, oh, this->align_corners_); const float sz = compute_scale(id, od, this->align_corners_); auto kernel = this->channel_last_ ? kernel_linear_interpolate_3d_backward<Tcu, true> : kernel_linear_interpolate_3d_backward<Tcu, false>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE( kernel, g_y_inner_size, g_y, g_x_inner_size, g_x, outer_size, ishape, istride, ostride, sx, sy, sz, this->half_pixel_); } else if (this->mode_ == "nearest") { const float sx = compute_scale_for_nn(iw, ow, this->align_corners_, this->half_pixel_for_nn_); const float sy = compute_scale_for_nn(ih, oh, this->align_corners_, this->half_pixel_for_nn_); const float sz = compute_scale_for_nn(id, od, this->align_corners_, this->half_pixel_for_nn_); auto kernel = this->channel_last_ ? kernel_nearest_interpolate_3d_backward<Tcu, true> : kernel_nearest_interpolate_3d_backward<Tcu, false>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE( kernel, g_y_inner_size, g_y, g_x_inner_size, g_x, outer_size, ishape, istride, ostride, sx, sy, sz, this->half_pixel_, this->half_pixel_for_nn_); } } } } // namespace nbla
9b04cd98e6329308f31e6aecde0d367c473bee12.cu
// Copyright 2018,2019,2020,2021 Sony Corporation. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <nbla/array.hpp> #include <nbla/cuda/common.hpp> #include <nbla/cuda/function/interpolate.hpp> #include <nbla/cuda/utils/atomic_add.cuh> #include <nbla/cuda/utils/nd_index.cuh> #include <nbla/variable.hpp> namespace nbla { inline float compute_scale(int isize, int osize, bool align_corners) { return (osize <= 1) ? 0.0f : (align_corners ? float(isize - 1) / (osize - 1) : float(isize) / osize); } inline float compute_scale_for_nn(int isize, int osize, bool align_corners, bool half_pixel_for_nn) { return half_pixel_for_nn ? isize / static_cast<float>(osize) : compute_scale(isize, osize, align_corners); } __device__ __forceinline__ float get_src_index(float scale, int dst_index, bool half_pixel) { return half_pixel ? fmaxf(0.0f, scale * (float(dst_index) + 0.5f) - 0.5f) : scale * dst_index; } __device__ __forceinline__ float get_src_index_for_nn(float scale, int dst_index, bool half_pixel, bool half_pixel_for_nn) { return half_pixel_for_nn ? scale * (dst_index + 0.5f) : get_src_index(scale, dst_index, half_pixel); } template <typename T, bool channel_last = false> __global__ void kernel_linear_interpolate_1d( const int dst_inner_size, T *dst, const int src_inner_size, const T *src, int outer_size, const int ishape, const int istride, const int ostride, const float sx, const bool half_pixel) { NBLA_CUDA_KERNEL_LOOP(index, dst_inner_size) { const auto nd_index = device_flat_to_2d(index, ostride); const auto oc = channel_last ? nd_index.y : 0; const auto ox = nd_index.x; const auto iw = ishape; const auto fx = get_src_index(sx, ox, half_pixel); const auto x1 = static_cast<int>(fx); const auto x2 = min(x1 + 1, iw - 1); const auto lx1 = static_cast<T>(fx - x1); const auto lx0 = static_cast<T>(1) - lx1; const auto nd_idx_x1 = make_int2(x1, oc); const auto nd_idx_x2 = make_int2(x2, oc); const auto idx_lx0 = device_2d_to_flat(nd_idx_x1, istride); const auto idx_lx1 = device_2d_to_flat(nd_idx_x2, istride); for (; outer_size--; src += src_inner_size, dst += dst_inner_size) { const T val0 = lx0 * src[idx_lx0]; const T val1 = lx1 * src[idx_lx1]; dst[index] = val0 + val1; } } } template <typename T, bool channel_last = false> __global__ void kernel_linear_interpolate_2d( const int dst_inner_size, T *dst, const int src_inner_size, const T *src, int outer_size, const int2 ishape, const int2 istride, const int2 ostride, const float sx, const float sy, const bool half_pixel) { NBLA_CUDA_KERNEL_LOOP(index, dst_inner_size) { const auto nd_index = device_flat_to_3d(index, ostride); const auto oc = channel_last ? nd_index.z : 0; const auto oy = nd_index.x; const auto ox = nd_index.y; const auto ih = ishape.x; const auto iw = ishape.y; const auto fy = get_src_index(sy, oy, half_pixel); const auto y1 = static_cast<int>(fy); const auto y2 = min(y1 + 1, ih - 1); const auto ly1 = static_cast<T>(fy - y1); const auto ly0 = static_cast<T>(1) - ly1; const auto fx = get_src_index(sx, ox, half_pixel); const auto x1 = static_cast<int>(fx); const auto x2 = min(x1 + 1, iw - 1); const auto lx1 = static_cast<T>(fx - x1); const auto lx0 = static_cast<T>(1) - lx1; const auto nd_idx_y1x1 = make_int3(y1, x1, oc); const auto nd_idx_y1x2 = make_int3(y1, x2, oc); const auto nd_idx_y2x1 = make_int3(y2, x1, oc); const auto nd_idx_y2x2 = make_int3(y2, x2, oc); const auto idx_ly0x0 = device_3d_to_flat(nd_idx_y1x1, istride); const auto idx_ly0x1 = device_3d_to_flat(nd_idx_y1x2, istride); const auto idx_ly1x0 = device_3d_to_flat(nd_idx_y2x1, istride); const auto idx_ly1x1 = device_3d_to_flat(nd_idx_y2x2, istride); for (; outer_size--; src += src_inner_size, dst += dst_inner_size) { const T val0 = lx0 * src[idx_ly0x0]; const T val1 = lx1 * src[idx_ly0x1]; const T val2 = lx0 * src[idx_ly1x0]; const T val3 = lx1 * src[idx_ly1x1]; dst[index] = ly0 * (val0 + val1) + ly1 * (val2 + val3); } } } template <typename T, bool channel_last = false> __global__ void kernel_linear_interpolate_3d( const int dst_inner_size, T *dst, const int src_inner_size, const T *src, int outer_size, const int3 ishape, const int3 istride, const int3 ostride, const float sx, const float sy, const float sz, const bool half_pixel) { NBLA_CUDA_KERNEL_LOOP(index, dst_inner_size) { const auto nd_index = device_flat_to_4d(index, ostride); const auto oc = channel_last ? nd_index.w : 0; const auto oz = nd_index.x; const auto oy = nd_index.y; const auto ox = nd_index.z; const auto id = ishape.x; const auto ih = ishape.y; const auto iw = ishape.z; const auto fz = get_src_index(sz, oz, half_pixel); const auto z1 = static_cast<int>(fz); const auto z2 = min(z1 + 1, id - 1); const auto lz1 = static_cast<T>(fz - z1); const auto lz0 = static_cast<T>(1) - lz1; const auto fy = get_src_index(sy, oy, half_pixel); const auto y1 = static_cast<int>(fy); const auto y2 = min(y1 + 1, ih - 1); const auto ly1 = static_cast<T>(fy - y1); const auto ly0 = static_cast<T>(1) - ly1; const auto fx = get_src_index(sx, ox, half_pixel); const auto x1 = static_cast<int>(fx); const auto x2 = min(x1 + 1, iw - 1); const auto lx1 = static_cast<T>(fx - x1); const auto lx0 = static_cast<T>(1) - lx1; const auto nd_idx_z1y1x1 = make_int4(z1, y1, x1, oc); const auto nd_idx_z1y1x2 = make_int4(z1, y1, x2, oc); const auto nd_idx_z1y2x1 = make_int4(z1, y2, x1, oc); const auto nd_idx_z1y2x2 = make_int4(z1, y2, x2, oc); const auto nd_idx_z2y1x1 = make_int4(z2, y1, x1, oc); const auto nd_idx_z2y1x2 = make_int4(z2, y1, x2, oc); const auto nd_idx_z2y2x1 = make_int4(z2, y2, x1, oc); const auto nd_idx_z2y2x2 = make_int4(z2, y2, x2, oc); const auto idx_lz0y0x0 = device_4d_to_flat(nd_idx_z1y1x1, istride); const auto idx_lz0y0x1 = device_4d_to_flat(nd_idx_z1y1x2, istride); const auto idx_lz0y1x0 = device_4d_to_flat(nd_idx_z1y2x1, istride); const auto idx_lz0y1x1 = device_4d_to_flat(nd_idx_z1y2x2, istride); const auto idx_lz1y0x0 = device_4d_to_flat(nd_idx_z2y1x1, istride); const auto idx_lz1y0x1 = device_4d_to_flat(nd_idx_z2y1x2, istride); const auto idx_lz1y1x0 = device_4d_to_flat(nd_idx_z2y2x1, istride); const auto idx_lz1y1x1 = device_4d_to_flat(nd_idx_z2y2x2, istride); for (; outer_size--; src += src_inner_size, dst += dst_inner_size) { const T val0 = lx0 * src[idx_lz0y0x0]; const T val1 = lx1 * src[idx_lz0y0x1]; const T val2 = lx0 * src[idx_lz0y1x0]; const T val3 = lx1 * src[idx_lz0y1x1]; const T val4 = lx0 * src[idx_lz1y0x0]; const T val5 = lx1 * src[idx_lz1y0x1]; const T val6 = lx0 * src[idx_lz1y1x0]; const T val7 = lx1 * src[idx_lz1y1x1]; const T val8 = ly0 * (val0 + val1) + ly1 * (val2 + val3); const T val9 = ly0 * (val4 + val5) + ly1 * (val6 + val7); dst[index] = lz0 * val8 + lz1 * val9; } } } template <typename T, bool channel_last = false> __global__ void kernel_linear_interpolate_1d_backward( const int g_y_inner_size, const T *g_y, const int g_x_inner_size, T *g_x, int outer_size, const int ishape, const int istride, const int ostride, const float sx, const bool half_pixel) { NBLA_CUDA_KERNEL_LOOP(index, g_y_inner_size) { const auto nd_index = device_flat_to_2d(index, ostride); const auto oc = channel_last ? nd_index.y : 0; const auto ox = nd_index.x; const auto iw = ishape; const auto fx = get_src_index(sx, ox, half_pixel); const auto x1 = static_cast<int>(fx); const auto x2 = min(x1 + 1, iw - 1); const auto lx1 = static_cast<T>(fx - x1); const auto lx0 = static_cast<T>(1) - lx1; const auto nd_idx_x1 = make_int2(x1, oc); const auto nd_idx_x2 = make_int2(x2, oc); const auto idx_lx1 = device_2d_to_flat(nd_idx_x1, istride); const auto idx_lx2 = device_2d_to_flat(nd_idx_x2, istride); for (; outer_size--; g_x += g_x_inner_size, g_y += g_y_inner_size) { const T g = g_y[index]; atomic_add(g_x + idx_lx1, lx0 * g); atomic_add(g_x + idx_lx2, lx1 * g); } } } template <typename T, bool channel_last = false> __global__ void kernel_linear_interpolate_2d_backward( const int g_y_inner_size, const T *g_y, const int g_x_inner_size, T *g_x, int outer_size, const int2 ishape, const int2 istride, const int2 ostride, const float sx, const float sy, const bool half_pixel) { NBLA_CUDA_KERNEL_LOOP(index, g_y_inner_size) { const auto nd_index = device_flat_to_3d(index, ostride); const auto oc = channel_last ? nd_index.z : 0; const auto oy = nd_index.x; const auto ox = nd_index.y; const auto ih = ishape.x; const auto iw = ishape.y; const auto fy = get_src_index(sy, oy, half_pixel); const auto y1 = static_cast<int>(fy); const auto y2 = min(y1 + 1, ih - 1); const auto ly1 = static_cast<T>(fy - y1); const auto ly0 = static_cast<T>(1) - ly1; const auto fx = get_src_index(sx, ox, half_pixel); const auto x1 = static_cast<int>(fx); const auto x2 = min(x1 + 1, iw - 1); const auto lx1 = static_cast<T>(fx - x1); const auto lx0 = static_cast<T>(1) - lx1; const auto nd_idx_y1x1 = make_int3(y1, x1, oc); const auto nd_idx_y1x2 = make_int3(y1, x2, oc); const auto nd_idx_y2x1 = make_int3(y2, x1, oc); const auto nd_idx_y2x2 = make_int3(y2, x2, oc); const auto idx_ly0x0 = device_3d_to_flat(nd_idx_y1x1, istride); const auto idx_ly0x1 = device_3d_to_flat(nd_idx_y1x2, istride); const auto idx_ly1x0 = device_3d_to_flat(nd_idx_y2x1, istride); const auto idx_ly1x1 = device_3d_to_flat(nd_idx_y2x2, istride); for (; outer_size--; g_x += g_x_inner_size, g_y += g_y_inner_size) { const T g = g_y[index]; atomic_add(g_x + idx_ly0x0, ly0 * lx0 * g); atomic_add(g_x + idx_ly0x1, ly0 * lx1 * g); atomic_add(g_x + idx_ly1x0, ly1 * lx0 * g); atomic_add(g_x + idx_ly1x1, ly1 * lx1 * g); } } } template <typename T, bool channel_last = false> __global__ void kernel_linear_interpolate_3d_backward( const int g_y_inner_size, const T *g_y, const int g_x_inner_size, T *g_x, int outer_size, const int3 ishape, const int3 istride, const int3 ostride, const float sx, const float sy, const float sz, const bool half_pixel) { NBLA_CUDA_KERNEL_LOOP(index, g_y_inner_size) { const auto nd_index = device_flat_to_4d(index, ostride); const auto oc = channel_last ? nd_index.w : 0; const auto oz = nd_index.x; const auto oy = nd_index.y; const auto ox = nd_index.z; const auto id = ishape.x; const auto ih = ishape.y; const auto iw = ishape.z; const auto fz = get_src_index(sz, oz, half_pixel); const auto z1 = static_cast<int>(fz); const auto z2 = min(z1 + 1, id - 1); const auto lz1 = static_cast<T>(fz - z1); const auto lz0 = static_cast<T>(1) - lz1; const auto fy = get_src_index(sy, oy, half_pixel); const auto y1 = static_cast<int>(fy); const auto y2 = min(y1 + 1, ih - 1); const auto ly1 = static_cast<T>(fy - y1); const auto ly0 = static_cast<T>(1) - ly1; const auto fx = get_src_index(sx, ox, half_pixel); const auto x1 = static_cast<int>(fx); const auto x2 = min(x1 + 1, iw - 1); const auto lx1 = static_cast<T>(fx - x1); const auto lx0 = static_cast<T>(1) - lx1; const auto nd_idx_z1y1x1 = make_int4(z1, y1, x1, oc); const auto nd_idx_z1y1x2 = make_int4(z1, y1, x2, oc); const auto nd_idx_z1y2x1 = make_int4(z1, y2, x1, oc); const auto nd_idx_z1y2x2 = make_int4(z1, y2, x2, oc); const auto nd_idx_z2y1x1 = make_int4(z2, y1, x1, oc); const auto nd_idx_z2y1x2 = make_int4(z2, y1, x2, oc); const auto nd_idx_z2y2x1 = make_int4(z2, y2, x1, oc); const auto nd_idx_z2y2x2 = make_int4(z2, y2, x2, oc); const auto idx_lz0y0x0 = device_4d_to_flat(nd_idx_z1y1x1, istride); const auto idx_lz0y0x1 = device_4d_to_flat(nd_idx_z1y1x2, istride); const auto idx_lz0y1x0 = device_4d_to_flat(nd_idx_z1y2x1, istride); const auto idx_lz0y1x1 = device_4d_to_flat(nd_idx_z1y2x2, istride); const auto idx_lz1y0x0 = device_4d_to_flat(nd_idx_z2y1x1, istride); const auto idx_lz1y0x1 = device_4d_to_flat(nd_idx_z2y1x2, istride); const auto idx_lz1y1x0 = device_4d_to_flat(nd_idx_z2y2x1, istride); const auto idx_lz1y1x1 = device_4d_to_flat(nd_idx_z2y2x2, istride); for (; outer_size--; g_x += g_x_inner_size, g_y += g_y_inner_size) { const T g = g_y[index]; atomic_add(g_x + idx_lz0y0x0, lz0 * ly0 * lx0 * g); atomic_add(g_x + idx_lz0y0x1, lz0 * ly0 * lx1 * g); atomic_add(g_x + idx_lz0y1x0, lz0 * ly1 * lx0 * g); atomic_add(g_x + idx_lz0y1x1, lz0 * ly1 * lx1 * g); atomic_add(g_x + idx_lz1y0x0, lz1 * ly0 * lx0 * g); atomic_add(g_x + idx_lz1y0x1, lz1 * ly0 * lx1 * g); atomic_add(g_x + idx_lz1y1x0, lz1 * ly1 * lx0 * g); atomic_add(g_x + idx_lz1y1x1, lz1 * ly1 * lx1 * g); } } } template <typename T, bool channel_last = false> __global__ void kernel_nearest_interpolate_1d( const int dst_inner_size, T *dst, const int src_inner_size, const T *src, int outer_size, const int ishape, const int istride, const int ostride, const float sx, const bool half_pixel, const bool half_pixel_for_nn) { NBLA_CUDA_KERNEL_LOOP(index, dst_inner_size) { const auto nd_index = device_flat_to_2d(index, ostride); const auto oc = channel_last ? nd_index.y : 0; const auto ox = nd_index.x; const auto iw = ishape; const auto fx = get_src_index_for_nn(sx, ox, half_pixel, half_pixel_for_nn); const auto ix = min(static_cast<int>(fx), iw - 1); const auto nd_idx_x = make_int2(ix, oc); const auto idx_x = device_2d_to_flat(nd_idx_x, istride); for (; outer_size--; src += src_inner_size, dst += dst_inner_size) { dst[index] = src[idx_x]; } } } template <typename T, bool channel_last = false> __global__ void kernel_nearest_interpolate_2d( const int dst_inner_size, T *dst, const int src_inner_size, const T *src, int outer_size, const int2 ishape, const int2 istride, const int2 ostride, const float sx, const float sy, const bool half_pixel, const bool half_pixel_for_nn) { NBLA_CUDA_KERNEL_LOOP(index, dst_inner_size) { const auto nd_index = device_flat_to_3d(index, ostride); const auto oc = channel_last ? nd_index.z : 0; const auto oy = nd_index.x; const auto ox = nd_index.y; const auto ih = ishape.x; const auto iw = ishape.y; const auto fy = get_src_index_for_nn(sy, oy, half_pixel, half_pixel_for_nn); const auto fx = get_src_index_for_nn(sx, ox, half_pixel, half_pixel_for_nn); const auto iy = min(static_cast<int>(fy), ih - 1); const auto ix = min(static_cast<int>(fx), iw - 1); const auto nd_idx_yx = make_int3(iy, ix, oc); const auto idx_yx = device_3d_to_flat(nd_idx_yx, istride); for (; outer_size--; src += src_inner_size, dst += dst_inner_size) { dst[index] = src[idx_yx]; } } } template <typename T, bool channel_last = false> __global__ void kernel_nearest_interpolate_3d( const int dst_inner_size, T *dst, const int src_inner_size, const T *src, int outer_size, const int3 ishape, const int3 istride, const int3 ostride, const float sx, const float sy, const float sz, const bool half_pixel, const bool half_pixel_for_nn) { NBLA_CUDA_KERNEL_LOOP(index, dst_inner_size) { const auto nd_index = device_flat_to_4d(index, ostride); const auto oc = channel_last ? nd_index.w : 0; const auto oz = nd_index.x; const auto oy = nd_index.y; const auto ox = nd_index.z; const auto id = ishape.x; const auto ih = ishape.y; const auto iw = ishape.z; const auto fz = get_src_index_for_nn(sz, oz, half_pixel, half_pixel_for_nn); const auto fy = get_src_index_for_nn(sy, oy, half_pixel, half_pixel_for_nn); const auto fx = get_src_index_for_nn(sx, ox, half_pixel, half_pixel_for_nn); const auto iz = min(static_cast<int>(fz), id - 1); const auto iy = min(static_cast<int>(fy), ih - 1); const auto ix = min(static_cast<int>(fx), iw - 1); const auto nd_idx_zyx = make_int4(iz, iy, ix, oc); const auto idx_zyx = device_4d_to_flat(nd_idx_zyx, istride); for (; outer_size--; src += src_inner_size, dst += dst_inner_size) { dst[index] = src[idx_zyx]; } } } template <typename T, bool channel_last = false> __global__ void kernel_nearest_interpolate_1d_backward( const int g_y_inner_size, const T *g_y, const int g_x_inner_size, T *g_x, int outer_size, const int ishape, const int istride, const int ostride, const float sx, const bool half_pixel, const bool half_pixel_for_nn) { NBLA_CUDA_KERNEL_LOOP(index, g_y_inner_size) { const auto nd_index = device_flat_to_2d(index, ostride); const auto oc = channel_last ? nd_index.y : 0; const auto ox = nd_index.x; const auto iw = ishape; const auto fx = get_src_index_for_nn(sx, ox, half_pixel, half_pixel_for_nn); const auto ix = min(static_cast<int>(fx), iw - 1); const auto nd_idx_x = make_int2(ix, oc); const auto idx_x = device_2d_to_flat(nd_idx_x, istride); for (; outer_size--; g_x += g_x_inner_size, g_y += g_y_inner_size) { atomic_add(g_x + idx_x, g_y[index]); } } } template <typename T, bool channel_last = false> __global__ void kernel_nearest_interpolate_2d_backward( const int g_y_inner_size, const T *g_y, const int g_x_inner_size, T *g_x, int outer_size, const int2 ishape, const int2 istride, const int2 ostride, const float sx, const float sy, const bool half_pixel, const bool half_pixel_for_nn) { NBLA_CUDA_KERNEL_LOOP(index, g_y_inner_size) { const auto nd_index = device_flat_to_3d(index, ostride); const auto oc = channel_last ? nd_index.z : 0; const auto oy = nd_index.x; const auto ox = nd_index.y; const auto ih = ishape.x; const auto iw = ishape.y; const auto fy = get_src_index_for_nn(sy, oy, half_pixel, half_pixel_for_nn); const auto fx = get_src_index_for_nn(sx, ox, half_pixel, half_pixel_for_nn); const auto iy = min(static_cast<int>(fy), ih - 1); const auto ix = min(static_cast<int>(fx), iw - 1); const auto nd_idx_yx = make_int3(iy, ix, oc); const auto idx_yx = device_3d_to_flat(nd_idx_yx, istride); for (; outer_size--; g_x += g_x_inner_size, g_y += g_y_inner_size) { atomic_add(g_x + idx_yx, g_y[index]); } } } template <typename T, bool channel_last = false> __global__ void kernel_nearest_interpolate_3d_backward( const int g_y_inner_size, const T *g_y, const int g_x_inner_size, T *g_x, int outer_size, const int3 ishape, const int3 istride, const int3 ostride, const float sx, const float sy, const float sz, const bool half_pixel, const bool half_pixel_for_nn) { NBLA_CUDA_KERNEL_LOOP(index, g_y_inner_size) { const auto nd_index = device_flat_to_4d(index, ostride); const auto oc = channel_last ? nd_index.w : 0; const auto oz = nd_index.x; const auto oy = nd_index.y; const auto ox = nd_index.z; const auto id = ishape.x; const auto ih = ishape.y; const auto iw = ishape.z; const auto fz = get_src_index_for_nn(sz, oz, half_pixel, half_pixel_for_nn); const auto fy = get_src_index_for_nn(sy, oy, half_pixel, half_pixel_for_nn); const auto fx = get_src_index_for_nn(sx, ox, half_pixel, half_pixel_for_nn); const auto iz = min(static_cast<int>(fz), id - 1); const auto iy = min(static_cast<int>(fy), ih - 1); const auto ix = min(static_cast<int>(fx), iw - 1); const auto nd_idx_zyx = make_int4(iz, iy, ix, oc); const auto idx_zyx = device_4d_to_flat(nd_idx_zyx, istride); for (; outer_size--; g_x += g_x_inner_size, g_y += g_y_inner_size) { atomic_add(g_x + idx_zyx, g_y[index]); } } } template <typename T> void InterpolateCuda<T>::forward_impl(const Variables &inputs, const Variables &outputs) { cuda_set_device(this->device_); auto src = inputs[0]->get_data_pointer<Tcu>(this->ctx_); auto dst = outputs[0]->cast_data_and_get_pointer<Tcu>(this->ctx_, true); const int ndim = inputs[0]->ndim(); if (this->output_size_.size() == 1) { const int ic = this->channel_last_ ? inputs[0]->shape()[ndim - 1] : inputs[0]->shape()[ndim - 2]; const int iw = this->channel_last_ ? inputs[0]->shape()[ndim - 2] : inputs[0]->shape()[ndim - 1]; const int oc = this->channel_last_ ? outputs[0]->shape()[ndim - 1] : outputs[0]->shape()[ndim - 2]; const int ow = this->channel_last_ ? outputs[0]->shape()[ndim - 2] : outputs[0]->shape()[ndim - 1]; const int src_inner_size = this->channel_last_ ? ic * iw : iw; const int dst_inner_size = this->channel_last_ ? oc * ow : ow; const int outer_size = inputs[0]->size() / src_inner_size; const auto ishape = iw; const auto istride = this->channel_last_ ? ic : 1; const auto ostride = this->channel_last_ ? oc : 1; if (this->mode_ == "linear") { const float sx = compute_scale(iw, ow, this->align_corners_); auto kernel = this->channel_last_ ? kernel_linear_interpolate_1d<Tcu, true> : kernel_linear_interpolate_1d<Tcu, false>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel, dst_inner_size, dst, src_inner_size, src, outer_size, ishape, istride, ostride, sx, this->half_pixel_); } else if (this->mode_ == "nearest") { const float sx = compute_scale_for_nn(iw, ow, this->align_corners_, this->half_pixel_for_nn_); auto kernel = this->channel_last_ ? kernel_nearest_interpolate_1d<Tcu, true> : kernel_nearest_interpolate_1d<Tcu, false>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE( kernel, dst_inner_size, dst, src_inner_size, src, outer_size, ishape, istride, ostride, sx, this->half_pixel_, this->half_pixel_for_nn_); } } else if (this->output_size_.size() == 2) { const int ic = this->channel_last_ ? inputs[0]->shape()[ndim - 1] : inputs[0]->shape()[ndim - 3]; const int ih = this->channel_last_ ? inputs[0]->shape()[ndim - 3] : inputs[0]->shape()[ndim - 2]; const int iw = this->channel_last_ ? inputs[0]->shape()[ndim - 2] : inputs[0]->shape()[ndim - 1]; const int oc = this->channel_last_ ? outputs[0]->shape()[ndim - 1] : outputs[0]->shape()[ndim - 3]; const int oh = this->channel_last_ ? outputs[0]->shape()[ndim - 3] : outputs[0]->shape()[ndim - 2]; const int ow = this->channel_last_ ? outputs[0]->shape()[ndim - 2] : outputs[0]->shape()[ndim - 1]; const int src_inner_size = this->channel_last_ ? ic * iw * ih : iw * ih; const int dst_inner_size = this->channel_last_ ? oc * ow * oh : ow * oh; const int outer_size = inputs[0]->size() / src_inner_size; const auto ishape = make_int2(ih, iw); const auto istride = this->channel_last_ ? make_int2(iw * ic, ic) : make_int2(iw, 1); const auto ostride = this->channel_last_ ? make_int2(ow * oc, oc) : make_int2(ow, 1); if (this->mode_ == "linear") { const float sx = compute_scale(iw, ow, this->align_corners_); const float sy = compute_scale(ih, oh, this->align_corners_); auto kernel = this->channel_last_ ? kernel_linear_interpolate_2d<Tcu, true> : kernel_linear_interpolate_2d<Tcu, false>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE( kernel, dst_inner_size, dst, src_inner_size, src, outer_size, ishape, istride, ostride, sx, sy, this->half_pixel_); } else if (this->mode_ == "nearest") { const float sx = compute_scale_for_nn(iw, ow, this->align_corners_, this->half_pixel_for_nn_); const float sy = compute_scale_for_nn(ih, oh, this->align_corners_, this->half_pixel_for_nn_); auto kernel = this->channel_last_ ? kernel_nearest_interpolate_2d<Tcu, true> : kernel_nearest_interpolate_2d<Tcu, false>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE( kernel, dst_inner_size, dst, src_inner_size, src, outer_size, ishape, istride, ostride, sx, sy, this->half_pixel_, this->half_pixel_for_nn_); } } else if (this->output_size_.size() == 3) { const int ic = this->channel_last_ ? inputs[0]->shape()[ndim - 1] : inputs[0]->shape()[ndim - 4]; const int id = this->channel_last_ ? inputs[0]->shape()[ndim - 4] : inputs[0]->shape()[ndim - 3]; const int ih = this->channel_last_ ? inputs[0]->shape()[ndim - 3] : inputs[0]->shape()[ndim - 2]; const int iw = this->channel_last_ ? inputs[0]->shape()[ndim - 2] : inputs[0]->shape()[ndim - 1]; const int oc = this->channel_last_ ? outputs[0]->shape()[ndim - 1] : outputs[0]->shape()[ndim - 4]; const int od = this->channel_last_ ? outputs[0]->shape()[ndim - 4] : outputs[0]->shape()[ndim - 3]; const int oh = this->channel_last_ ? outputs[0]->shape()[ndim - 3] : outputs[0]->shape()[ndim - 2]; const int ow = this->channel_last_ ? outputs[0]->shape()[ndim - 2] : outputs[0]->shape()[ndim - 1]; const int src_inner_size = this->channel_last_ ? ic * iw * ih * id : iw * ih * id; const int dst_inner_size = this->channel_last_ ? oc * ow * oh * od : ow * oh * od; const int outer_size = inputs[0]->size() / src_inner_size; const auto ishape = make_int3(id, ih, iw); const auto istride = this->channel_last_ ? make_int3(ih * iw * ic, iw * ic, ic) : make_int3(ih * iw, iw, 1); const auto ostride = this->channel_last_ ? make_int3(oh * ow * oc, ow * oc, oc) : make_int3(oh * ow, ow, 1); if (this->mode_ == "linear") { const float sx = compute_scale(iw, ow, this->align_corners_); const float sy = compute_scale(ih, oh, this->align_corners_); const float sz = compute_scale(id, od, this->align_corners_); auto kernel = this->channel_last_ ? kernel_linear_interpolate_3d<Tcu, true> : kernel_linear_interpolate_3d<Tcu, false>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE( kernel, dst_inner_size, dst, src_inner_size, src, outer_size, ishape, istride, ostride, sx, sy, sz, this->half_pixel_); } else if (this->mode_ == "nearest") { const float sx = compute_scale_for_nn(iw, ow, this->align_corners_, this->half_pixel_for_nn_); const float sy = compute_scale_for_nn(ih, oh, this->align_corners_, this->half_pixel_for_nn_); const float sz = compute_scale_for_nn(id, od, this->align_corners_, this->half_pixel_for_nn_); auto kernel = this->channel_last_ ? kernel_nearest_interpolate_3d<Tcu, true> : kernel_nearest_interpolate_3d<Tcu, false>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE( kernel, dst_inner_size, dst, src_inner_size, src, outer_size, ishape, istride, ostride, sx, sy, sz, this->half_pixel_, this->half_pixel_for_nn_); } } } template <typename T> void InterpolateCuda<T>::backward_impl(const Variables &inputs, const Variables &outputs, const vector<bool> &propagate_down, const vector<bool> &accum) { if (!(propagate_down[0])) { return; } cuda_set_device(this->device_); auto g_y = outputs[0]->get_grad_pointer<Tcu>(this->ctx_); auto g_x = inputs[0]->cast_grad_and_get_pointer<Tcu>(this->ctx_, false); const int ndim = inputs[0]->ndim(); if (this->output_size_.size() == 1) { const int ic = this->channel_last_ ? inputs[0]->shape()[ndim - 1] : inputs[0]->shape()[ndim - 2]; const int iw = this->channel_last_ ? inputs[0]->shape()[ndim - 2] : inputs[0]->shape()[ndim - 1]; const int oc = this->channel_last_ ? outputs[0]->shape()[ndim - 1] : outputs[0]->shape()[ndim - 2]; const int ow = this->channel_last_ ? outputs[0]->shape()[ndim - 2] : outputs[0]->shape()[ndim - 1]; const int g_x_inner_size = this->channel_last_ ? ic * iw : iw; const int g_y_inner_size = this->channel_last_ ? oc * ow : ow; const int outer_size = inputs[0]->size() / g_x_inner_size; const auto ishape = iw; const auto istride = this->channel_last_ ? ic : 1; const auto ostride = this->channel_last_ ? oc : 1; if (this->mode_ == "linear") { const float sx = compute_scale(iw, ow, this->align_corners_); auto kernel = this->channel_last_ ? kernel_linear_interpolate_1d_backward<Tcu, true> : kernel_linear_interpolate_1d_backward<Tcu, false>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel, g_y_inner_size, g_y, g_x_inner_size, g_x, outer_size, ishape, istride, ostride, sx, this->half_pixel_); } else if (this->mode_ == "nearest") { const float sx = compute_scale_for_nn(iw, ow, this->align_corners_, this->half_pixel_for_nn_); auto kernel = this->channel_last_ ? kernel_nearest_interpolate_1d_backward<Tcu, true> : kernel_nearest_interpolate_1d_backward<Tcu, false>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE( kernel, g_y_inner_size, g_y, g_x_inner_size, g_x, outer_size, ishape, istride, ostride, sx, this->half_pixel_, this->half_pixel_for_nn_); } } else if (this->output_size_.size() == 2) { const int ic = this->channel_last_ ? inputs[0]->shape()[ndim - 1] : inputs[0]->shape()[ndim - 3]; const int ih = this->channel_last_ ? inputs[0]->shape()[ndim - 3] : inputs[0]->shape()[ndim - 2]; const int iw = this->channel_last_ ? inputs[0]->shape()[ndim - 2] : inputs[0]->shape()[ndim - 1]; const int oc = this->channel_last_ ? outputs[0]->shape()[ndim - 1] : outputs[0]->shape()[ndim - 3]; const int oh = this->channel_last_ ? outputs[0]->shape()[ndim - 3] : outputs[0]->shape()[ndim - 2]; const int ow = this->channel_last_ ? outputs[0]->shape()[ndim - 2] : outputs[0]->shape()[ndim - 1]; const int g_x_inner_size = this->channel_last_ ? ic * iw * ih : iw * ih; const int g_y_inner_size = this->channel_last_ ? oc * ow * oh : ow * oh; const int outer_size = inputs[0]->size() / g_x_inner_size; const auto ishape = make_int2(ih, iw); const auto istride = this->channel_last_ ? make_int2(iw * ic, ic) : make_int2(iw, 1); const auto ostride = this->channel_last_ ? make_int2(ow * oc, oc) : make_int2(ow, 1); if (this->mode_ == "linear") { const float sx = compute_scale(iw, ow, this->align_corners_); const float sy = compute_scale(ih, oh, this->align_corners_); auto kernel = this->channel_last_ ? kernel_linear_interpolate_2d_backward<Tcu, true> : kernel_linear_interpolate_2d_backward<Tcu, false>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE( kernel, g_y_inner_size, g_y, g_x_inner_size, g_x, outer_size, ishape, istride, ostride, sx, sy, this->half_pixel_); } else if (this->mode_ == "nearest") { const float sx = compute_scale_for_nn(iw, ow, this->align_corners_, this->half_pixel_for_nn_); const float sy = compute_scale_for_nn(ih, oh, this->align_corners_, this->half_pixel_for_nn_); auto kernel = this->channel_last_ ? kernel_nearest_interpolate_2d_backward<Tcu, true> : kernel_nearest_interpolate_2d_backward<Tcu, false>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE( kernel, g_y_inner_size, g_y, g_x_inner_size, g_x, outer_size, ishape, istride, ostride, sx, sy, this->half_pixel_, this->half_pixel_for_nn_); } } else if (this->output_size_.size() == 3) { const int ic = this->channel_last_ ? inputs[0]->shape()[ndim - 1] : inputs[0]->shape()[ndim - 4]; const int id = this->channel_last_ ? inputs[0]->shape()[ndim - 4] : inputs[0]->shape()[ndim - 3]; const int ih = this->channel_last_ ? inputs[0]->shape()[ndim - 3] : inputs[0]->shape()[ndim - 2]; const int iw = this->channel_last_ ? inputs[0]->shape()[ndim - 2] : inputs[0]->shape()[ndim - 1]; const int oc = this->channel_last_ ? outputs[0]->shape()[ndim - 1] : outputs[0]->shape()[ndim - 4]; const int od = this->channel_last_ ? outputs[0]->shape()[ndim - 4] : outputs[0]->shape()[ndim - 3]; const int oh = this->channel_last_ ? outputs[0]->shape()[ndim - 3] : outputs[0]->shape()[ndim - 2]; const int ow = this->channel_last_ ? outputs[0]->shape()[ndim - 2] : outputs[0]->shape()[ndim - 1]; const int g_x_inner_size = this->channel_last_ ? ic * iw * ih * id : iw * ih * id; const int g_y_inner_size = this->channel_last_ ? oc * ow * oh * od : ow * oh * od; const int outer_size = inputs[0]->size() / g_x_inner_size; const auto ishape = make_int3(id, ih, iw); const auto istride = this->channel_last_ ? make_int3(ih * iw * ic, iw * ic, ic) : make_int3(ih * iw, iw, 1); const auto ostride = this->channel_last_ ? make_int3(oh * ow * oc, ow * oc, oc) : make_int3(oh * ow, ow, 1); if (this->mode_ == "linear") { const float sx = compute_scale(iw, ow, this->align_corners_); const float sy = compute_scale(ih, oh, this->align_corners_); const float sz = compute_scale(id, od, this->align_corners_); auto kernel = this->channel_last_ ? kernel_linear_interpolate_3d_backward<Tcu, true> : kernel_linear_interpolate_3d_backward<Tcu, false>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE( kernel, g_y_inner_size, g_y, g_x_inner_size, g_x, outer_size, ishape, istride, ostride, sx, sy, sz, this->half_pixel_); } else if (this->mode_ == "nearest") { const float sx = compute_scale_for_nn(iw, ow, this->align_corners_, this->half_pixel_for_nn_); const float sy = compute_scale_for_nn(ih, oh, this->align_corners_, this->half_pixel_for_nn_); const float sz = compute_scale_for_nn(id, od, this->align_corners_, this->half_pixel_for_nn_); auto kernel = this->channel_last_ ? kernel_nearest_interpolate_3d_backward<Tcu, true> : kernel_nearest_interpolate_3d_backward<Tcu, false>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE( kernel, g_y_inner_size, g_y, g_x_inner_size, g_x, outer_size, ishape, istride, ostride, sx, sy, sz, this->half_pixel_, this->half_pixel_for_nn_); } } } } // namespace nbla
71a29d5daf67405716232c6da2217220c70647a9.hip
// !!! This is a file automatically generated by hipify!!! // // CUDA OpenCV #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <iostream> #include <string> #include <opencv2/core/core.hpp> #include <opencv2/highgui/highgui.hpp> using namespace cv; using namespace std; #define HEIGHT 512 // block_size.y #define WIDTH 512 // block_size.x #define MAX_ITER 10000 // void mandelbrotGPU(uchar*); __global__ void calc(char* image_buffer); #define cudaAssertSuccess(ans)\ { _cudaAssertSuccess((ans), __FILE__, __LINE__); } // Assert inline void _cudaAssertSuccess(hipError_t code, char *file, int line) { if (code != hipSuccess) { fprintf(stderr,"_cudaAssertSuccess: %s %s %d\n", hipGetErrorString(code), file, line); exit(code); } } int main(int argc, char** argv) { // cv::Mat image_output = cv::Mat(WIDTH, HEIGHT, CV_8UC1); // image* image_output = cvCreateImage(cvSize(WIDTH, HEIGHT), IPL_DEPTH_8U,1); mandelbrotGPU(image_output.data); cv::namedWindow("GPU", 256); cv::imshow("GPU", image_output); cv::waitKey(); } // GPU void mandelbrotGPU(uchar* image_buffer) { char* d_image_buffer; cudaAssertSuccess(hipMalloc(&d_image_buffer, WIDTH * HEIGHT)); dim3 block_size(16, 16); dim3 grid_size(WIDTH / block_size.x, HEIGHT / block_size.y); hipLaunchKernelGGL(( calc), dim3(grid_size), dim3(block_size), 0, 0, d_image_buffer); cudaAssertSuccess(hipPeekAtLastError()); cudaAssertSuccess(hipDeviceSynchronize()); cudaAssertSuccess(hipMemcpy(image_buffer, d_image_buffer, HEIGHT * WIDTH, hipMemcpyDeviceToHost)); cudaAssertSuccess(hipFree(d_image_buffer)); } __global__ void calc(char* image_buffer) { int row = blockIdx.y * blockDim.y + threadIdx.y; // WIDTH int col = blockIdx.x * blockDim.x + threadIdx.x; // HEIGHT int idx = row * WIDTH + col; if(col >= WIDTH || row >= HEIGHT) return; float x0 = ((float)col / WIDTH) * 3.5f - 2.5f; float y0 = ((float)row / HEIGHT) * 3.5f - 1.75f; float x = 0.0f; float y = 0.0f; int iter = 0; float xtemp; // while((x * x + y * y <= 4.0f) && (iter < MAX_ITER)) { xtemp = x * x - y * y + x0; y = 2.0f * x * y + y0; x = xtemp; iter++; } // int color = iter * 5; if (color >= 256) color = 0; image_buffer[idx] = color; }
71a29d5daf67405716232c6da2217220c70647a9.cu
//Реализация фрактала Множество Мандельброта // Библиотеки CUDA и OpenCV #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <iostream> #include <string> #include <opencv2/core/core.hpp> #include <opencv2/highgui/highgui.hpp> using namespace cv; using namespace std; #define HEIGHT 512 // Кратность block_size.y #define WIDTH 512 // Кратность block_size.x #define MAX_ITER 10000 // Функция множества Мандельброта void mandelbrotGPU(uchar*); __global__ void calc(char* image_buffer); #define cudaAssertSuccess(ans)\ { _cudaAssertSuccess((ans), __FILE__, __LINE__); } // Функция Assert’а inline void _cudaAssertSuccess(cudaError_t code, char *file, int line) { if (code != cudaSuccess) { fprintf(stderr,"_cudaAssertSuccess: %s %s %d\n", cudaGetErrorString(code), file, line); exit(code); } } int main(int argc, char** argv) { // Рендеринг множества cv::Mat image_output = cv::Mat(WIDTH, HEIGHT, CV_8UC1); // image* image_output = cvCreateImage(cvSize(WIDTH, HEIGHT), IPL_DEPTH_8U,1); mandelbrotGPU(image_output.data); cv::namedWindow("GPU", 256); cv::imshow("GPU", image_output); cv::waitKey(); } // Работа GPU с множеством Мандельброта void mandelbrotGPU(uchar* image_buffer) { char* d_image_buffer; cudaAssertSuccess(cudaMalloc(&d_image_buffer, WIDTH * HEIGHT)); dim3 block_size(16, 16); dim3 grid_size(WIDTH / block_size.x, HEIGHT / block_size.y); calc<<<grid_size, block_size>>>(d_image_buffer); cudaAssertSuccess(cudaPeekAtLastError()); cudaAssertSuccess(cudaDeviceSynchronize()); cudaAssertSuccess(cudaMemcpy(image_buffer, d_image_buffer, HEIGHT * WIDTH, cudaMemcpyDeviceToHost)); cudaAssertSuccess(cudaFree(d_image_buffer)); } __global__ void calc(char* image_buffer) { int row = blockIdx.y * blockDim.y + threadIdx.y; // WIDTH int col = blockIdx.x * blockDim.x + threadIdx.x; // HEIGHT int idx = row * WIDTH + col; if(col >= WIDTH || row >= HEIGHT) return; float x0 = ((float)col / WIDTH) * 3.5f - 2.5f; float y0 = ((float)row / HEIGHT) * 3.5f - 1.75f; float x = 0.0f; float y = 0.0f; int iter = 0; float xtemp; // Основная часть построения множества Мандельброта while((x * x + y * y <= 4.0f) && (iter < MAX_ITER)) { xtemp = x * x - y * y + x0; y = 2.0f * x * y + y0; x = xtemp; iter++; } // Цвет рендеринга int color = iter * 5; if (color >= 256) color = 0; image_buffer[idx] = color; }
a0ebc82b85557e727de29ea83307b0213d137f49.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> __global__ void add(int *a, int *b, int *c) { c[threadIdx.x] = a[threadIdx.x] + b[threadIdx.x]; } int main(void) { int *d_a, *d_b, *d_c; size_t size = 2*sizeof(int); int a[2] = {1,2}; int b[2] = {1,2}; int c[2] = {0,0}; int test; hipGetDeviceCount(&test); hipMalloc(&d_a, size); hipMalloc(&d_b, size); hipMalloc(&d_c, size); hipMemcpy(d_a, &a, size, hipMemcpyHostToDevice); hipMemcpy(d_b, &b, size, hipMemcpyHostToDevice); printf("c[0]%d\n",c[0]); printf("c[1]%d\n",c[1]); hipLaunchKernelGGL(( add), dim3(1),dim3(2), 0, 0, d_a, d_b, d_c); a[1] = 6; hipMemcpy(&c, d_c, size, hipMemcpyDeviceToHost); hipMemcpy(&b, d_b, size, hipMemcpyDeviceToHost); hipMemcpy(&a, d_a, size, hipMemcpyDeviceToHost); printf("c[0]%d\n",c[0]); printf("c[1]%d\n",c[1]); printf("%p\n", &a); hipFree(d_a); hipFree(d_b); hipFree(d_c); return 0; }
a0ebc82b85557e727de29ea83307b0213d137f49.cu
#include <stdio.h> #include <stdlib.h> __global__ void add(int *a, int *b, int *c) { c[threadIdx.x] = a[threadIdx.x] + b[threadIdx.x]; } int main(void) { int *d_a, *d_b, *d_c; size_t size = 2*sizeof(int); int a[2] = {1,2}; int b[2] = {1,2}; int c[2] = {0,0}; int test; cudaGetDeviceCount(&test); cudaMalloc(&d_a, size); cudaMalloc(&d_b, size); cudaMalloc(&d_c, size); cudaMemcpy(d_a, &a, size, cudaMemcpyHostToDevice); cudaMemcpy(d_b, &b, size, cudaMemcpyHostToDevice); printf("c[0]%d\n",c[0]); printf("c[1]%d\n",c[1]); add<<<1,2>>>(d_a, d_b, d_c); a[1] = 6; cudaMemcpy(&c, d_c, size, cudaMemcpyDeviceToHost); cudaMemcpy(&b, d_b, size, cudaMemcpyDeviceToHost); cudaMemcpy(&a, d_a, size, cudaMemcpyDeviceToHost); printf("c[0]%d\n",c[0]); printf("c[1]%d\n",c[1]); printf("%p\n", &a); cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); return 0; }
97baa0543af786d5c330ab694079a8da2f80a8ae.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "copyCol.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int *in = NULL; hipMalloc(&in, XSIZE*YSIZE); int *out = NULL; hipMalloc(&out, XSIZE*YSIZE); const int nx = 1; const int ny = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( copyCol), dim3(gridBlock),dim3(threadBlock), 0, 0, in,out,nx,ny); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( copyCol), dim3(gridBlock),dim3(threadBlock), 0, 0, in,out,nx,ny); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( copyCol), dim3(gridBlock),dim3(threadBlock), 0, 0, in,out,nx,ny); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
97baa0543af786d5c330ab694079a8da2f80a8ae.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "copyCol.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int *in = NULL; cudaMalloc(&in, XSIZE*YSIZE); int *out = NULL; cudaMalloc(&out, XSIZE*YSIZE); const int nx = 1; const int ny = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); copyCol<<<gridBlock,threadBlock>>>(in,out,nx,ny); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { copyCol<<<gridBlock,threadBlock>>>(in,out,nx,ny); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { copyCol<<<gridBlock,threadBlock>>>(in,out,nx,ny); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
0dfbf9aba1f65976080d1368616500bc9d118b5f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include "ReductionTools.h" #include "CommunDevices.h" #include "hiprand/hiprand_kernel.h" /*----------------------------------------------------------------------*\ |* Declaration *| \*---------------------------------------------------------------------*/ __global__ void computeMontecarlo(int* ptrDevResult, hiprandState_t* ptrDevTabGenerators, int nbGen); __device__ int generatorDevice(hiprandState_t* ptrDevTabGenerators, int nbGen); __device__ void generatePoints(hiprandState_t* ptrDevGenerator, float xMin, float xMax, float yMin, float yMax, float* x, float* y); /*--------------------------------------*\ |* Imported *| \*-------------------------------------*/ /*--------------------------------------*\ |* Public *| \*-------------------------------------*/ /*--------------------------------------*\ |* Private *| \*-------------------------------------*/ /*----------------------------------------------------------------------*\ |* Implementation *| \*---------------------------------------------------------------------*/ /*--------------------------------------*\ |* Public *| \*-------------------------------------*/ __global__ void computeMontecarlo(int* ptrDevResult, hiprandState_t* ptrDevTabGenerators, int nbGen) { __shared__ int ptrDevArraySM[4096]; // sm_52 ptrDevArraySM[Indice1D::tidLocal()] = generatorDevice(ptrDevTabGenerators, nbGen); __syncthreads(); ReductionTools::template reductionIntraBlock<int>(ptrDevArraySM, Indice1D::nbThreadBlock()); ReductionTools::template reductionInterBlock<int>(ptrDevArraySM, ptrDevResult); } __device__ int generatorDevice(hiprandState_t* ptrDevTabGenerators, int nbGen) { const int NB_THREADS= Indice1D::nbThread(); const int TID = Indice1D::tid(); float sum = 0.0; float dx; float dy; int s = TID; while (s < nbGen) { generatePoints(&ptrDevTabGenerators[TID], 0, 0, 1, 4, &dx, &dy); if (CommunDevices::calcPi(dx) > dy) { sum++; } s += NB_THREADS; } return sum; } __device__ void generatePoints(hiprandState_t* ptrDevGenerator, float xMin, float yMin, float xMax, float yMax, float* dx, float* dy) { *dx = hiprand_uniform(ptrDevGenerator) * (xMax - xMin) + xMin; *dy = hiprand_uniform(ptrDevGenerator) * (yMax - yMin) + yMin; } /*--------------------------------------*\ |* Private *| \*-------------------------------------*/ /*----------------------------------------------------------------------*\ |* End *| \*---------------------------------------------------------------------*/
0dfbf9aba1f65976080d1368616500bc9d118b5f.cu
#include <iostream> #include "ReductionTools.h" #include "CommunDevices.h" #include "curand_kernel.h" /*----------------------------------------------------------------------*\ |* Declaration *| \*---------------------------------------------------------------------*/ __global__ void computeMontecarlo(int* ptrDevResult, curandState* ptrDevTabGenerators, int nbGen); __device__ int generatorDevice(curandState* ptrDevTabGenerators, int nbGen); __device__ void generatePoints(curandState* ptrDevGenerator, float xMin, float xMax, float yMin, float yMax, float* x, float* y); /*--------------------------------------*\ |* Imported *| \*-------------------------------------*/ /*--------------------------------------*\ |* Public *| \*-------------------------------------*/ /*--------------------------------------*\ |* Private *| \*-------------------------------------*/ /*----------------------------------------------------------------------*\ |* Implementation *| \*---------------------------------------------------------------------*/ /*--------------------------------------*\ |* Public *| \*-------------------------------------*/ __global__ void computeMontecarlo(int* ptrDevResult, curandState* ptrDevTabGenerators, int nbGen) { __shared__ int ptrDevArraySM[4096]; // sm_52 ptrDevArraySM[Indice1D::tidLocal()] = generatorDevice(ptrDevTabGenerators, nbGen); __syncthreads(); ReductionTools::template reductionIntraBlock<int>(ptrDevArraySM, Indice1D::nbThreadBlock()); ReductionTools::template reductionInterBlock<int>(ptrDevArraySM, ptrDevResult); } __device__ int generatorDevice(curandState* ptrDevTabGenerators, int nbGen) { const int NB_THREADS= Indice1D::nbThread(); const int TID = Indice1D::tid(); float sum = 0.0; float dx; float dy; int s = TID; while (s < nbGen) { generatePoints(&ptrDevTabGenerators[TID], 0, 0, 1, 4, &dx, &dy); if (CommunDevices::calcPi(dx) > dy) { sum++; } s += NB_THREADS; } return sum; } __device__ void generatePoints(curandState* ptrDevGenerator, float xMin, float yMin, float xMax, float yMax, float* dx, float* dy) { *dx = curand_uniform(ptrDevGenerator) * (xMax - xMin) + xMin; *dy = curand_uniform(ptrDevGenerator) * (yMax - yMin) + yMin; } /*--------------------------------------*\ |* Private *| \*-------------------------------------*/ /*----------------------------------------------------------------------*\ |* End *| \*---------------------------------------------------------------------*/
0097446d57846fe7dea22101d927a91a1d5dc21d.hip
// !!! This is a file automatically generated by hipify!!! /* Luis Murphy Marcos JHU Engineering for Professionals 605.617 Introduction to GPU Programming */ // Standard Library includes #include <iostream> #include <fstream> #include <ctime> #include <cstdio> #include <cmath> #include <random> // CUDA includes #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <hipfft.h> // Local includes #include "helper_functions.h" #include "helper_cuda.h" typedef float2 Complex; const double C = 299792.458; //km/s const double PI = 3.1415926535897932384626433; template <typename T> T dBToPower(T dB){ return ::pow(10, dB/10); } template <typename T> T powerTodB(T ratio){ return 10*std::log10(ratio); } // Bit twiddling hack from: // https://graphics.stanford.edu/~seander/bithacks.html#RoundUpPowerOf2 int nextPowerOfTwo(int v){ v--; v |= v >> 1; v |= v >> 2; v |= v >> 4; v |= v >> 8; v |= v >> 16; v++; return v; } float complexAbs(Complex cplx) { return std::sqrt(cplx.x*cplx.x + cplx.y*cplx.y); } float complexPhase(Complex cplx) { if(cplx.x==0 && cplx.y>=0) return PI/2; if(cplx.x==0 && cplx.y<0) return -PI/2; return std::atan(cplx.y / cplx.x); } int main() { // // Radar parameters from POMR Section 2.12 const float Pt = 150; // Transmit peak power (kW) const float f0 = 9.4e9; // Signal frequency (Hz) const float tau = 1.2e-6; // Pulse width (s) const float PRF = 2e3; // Pulse repetition frequency (Hz) const float Da = 2.4; // Antenna diameter (m) const float eta_a = 0.6; // Antenna efficiency const float CPI = 16e-3; // coherent processing interval / dwell time (s) // Adjusted from 18.3 to get a power of 2 number of dwells const float Lt = 3.1; // Transmit loss (dB) const float Lr = 2.4; // Receive loss (dB) const float Lsp = 3.2; // Signal processing loss (dB) const float La_per_km = 0.16; // Atmospheric loss per km (dB/km) const float RCS = -10; // Target RCS (dBsm) const float R0 = 25; // Initial target range (km) const float vt = 120e-3; // Target velocity (km/s) // Derived parameters const float lambda = C/f0; // Signal wavelength (km) const float PRI = 1.0/PRF; // Pulse repetition interval (s) const float Ae = PI * ::pow(Da/2, 2) * eta_a; // Antenna effective area (m^2) // // Transmitted pulse for display purposes (Eq. 8.25) std::cout << "Generating transmitted pulse..." << std::endl; const float fs_tx = 2*f0; // Nyquist rate (Hz) const float Ts_tx = 1/fs_tx; const int num_samples_tx = tau/Ts_tx; float *pulse_tx = new float[num_samples_tx]; for(int i = 0; i < num_samples_tx; ++i) { pulse_tx[i] = std::cos(2*PI*f0*i*Ts_tx); } delete[] pulse_tx; // -------------------------------------------------------------------- const int num_pulses = CPI/PRI; const int num_range_bins = PRI/tau; std::cout << "Generating fast time slow time matrix...\n"; std::cout << "Range bins: " << num_range_bins << "\n"; std::cout << "Pulses: " << num_pulses << std::endl; // Allocate fast-time slow-time matrix Complex **data_matrix = new Complex*[num_pulses]; for(int i = 0; i < num_pulses; ++i) data_matrix[i] = new Complex[num_range_bins]; // Open file for data matrix std::fstream data_matrix_file; data_matrix_file.open("data-matrix.txt",std::ios::out); if(!data_matrix_file) { std::cout << "Error creating data matrix file" << std::endl; return 0; } // Additive noise parameters std::random_device rd; std::mt19937 gen{rd()}; std::normal_distribution<float> dist{0.0, 0.1}; for(int pulse = 0; pulse < num_pulses; ++pulse) { const float R = R0 + vt*(pulse*PRI); const float echo_start_time = 2*R/C; // // Radar range equation w/o noise (Eq. 2.17) const float Ls = dBToPower( Lt + La_per_km*2*R + Lr + Lsp ); // Two-way system loss (dB) float A_prime = (Pt * dBToPower(RCS) * Ae) / (::pow(4*PI, 3) * ::pow(R, 4) * Ls); for(int i = 0; i < num_range_bins; ++i) { // // Received pulse with doppler phase shift (Eq. 8.26) const float t = i*tau - echo_start_time; // Use pulse arrival time as reference time // Evaluates to 0 or 1, using to avoid if statement in kernel const bool in_pulse = t >= 0 && t <= tau; // Inside pulse arrival window float signal_rx = A_prime * std::cos(2*PI*f0*i*tau - 4*PI*R/lambda) * in_pulse; // Add some Gaussian noise signal_rx += A_prime*dist(gen); // // Analytic signal from I/Q channels (Eq. 8.27) Complex signal_analytic; signal_analytic.x = signal_rx * 2*std::cos(2*PI*f0*i*tau); signal_analytic.y = signal_rx * 2*std::sin(2*PI*f0*i*tau); // // Populate fast-time slow-time matrix data_matrix[pulse][i] = signal_analytic; data_matrix_file << complexAbs(signal_analytic) << " "; } data_matrix_file << "\n"; } data_matrix_file.close(); // -------------------------------------------------------------------- // // FFT along each slow-time row std::fstream range_doppler_file; range_doppler_file.open("range-doppler.txt",std::ios::out); if(!range_doppler_file) { std::cout << "Error creating range-doppler file"<< std::endl; return 0; } std::cout << "Performing FFT on slow-time sequences..." << std::endl; const int fft_size = nextPowerOfTwo(num_pulses); const int mem_size = sizeof(Complex)*fft_size; float total_alloc_time = 0.0; float total_memcpy_time = 0.0; float total_kernel_time = 0.0; // CUFFT plan and stream hipfftHandle plan; hipStream_t stream; hipEvent_t allocate_start_event, allocate_end_event; hipEvent_t to_device_event; hipEvent_t kernel_start_event; hipEvent_t to_host_event; hipEvent_t end_event; hipEvent_t deallocate_start_event, deallocate_end_event; hipfftPlan1d(&plan, fft_size, HIPFFT_C2C, 1); checkCudaErrors( hipEventCreate(&allocate_start_event) ); checkCudaErrors( hipEventCreate(&allocate_end_event) ); checkCudaErrors( hipEventCreate(&to_device_event) ); checkCudaErrors( hipEventCreate(&kernel_start_event) ); checkCudaErrors( hipEventCreate(&to_host_event) ); checkCudaErrors( hipEventCreate(&end_event) ); checkCudaErrors( hipEventCreate(&deallocate_start_event) ); checkCudaErrors( hipEventCreate(&deallocate_end_event) ); // Allocate host and device memory Complex *slow_time_data, *fft_data; hipfftComplex *d_data_to_process; checkCudaErrors( hipEventRecord(allocate_start_event, stream) ); checkCudaErrors( hipHostMalloc((void **) &slow_time_data, mem_size, hipHostMallocDefault) ); checkCudaErrors( hipHostMalloc((void **) &fft_data, mem_size, hipHostMallocDefault) ); checkCudaErrors( hipMalloc((void **) &d_data_to_process, mem_size) ); checkCudaErrors( hipEventRecord(allocate_end_event, stream) ); checkCudaErrors( hipStreamSynchronize(stream) ); checkCudaErrors( hipEventElapsedTime(&total_alloc_time, allocate_start_event, allocate_end_event) ); Complex complex_zero; complex_zero.x=0; complex_zero.y=0; for(int range_bin = 0; range_bin < num_range_bins; ++range_bin) { // Populate slow time data (pulse data for given range) for(int pulse = 0; pulse < fft_size; ++pulse) { // Zero pad if needed slow_time_data[pulse] = (pulse < num_pulses) ? data_matrix[pulse][range_bin] : complex_zero; } // Allocate device memory checkCudaErrors( hipEventRecord(to_device_event, stream) ); checkCudaErrors( hipMemcpy(d_data_to_process, slow_time_data, mem_size, hipMemcpyHostToDevice) ); // Transform slow time data checkCudaErrors( hipEventRecord(kernel_start_event, stream) ); hipfftExecC2C(plan, (hipfftComplex *)d_data_to_process, (hipfftComplex *)d_data_to_process, HIPFFT_FORWARD); // Retrieve range-doppler matrix row checkCudaErrors( hipEventRecord(to_host_event, stream) ); checkCudaErrors( hipMemcpyAsync(fft_data, d_data_to_process, mem_size, hipMemcpyDeviceToHost, stream) ); // Wait for stream to synchronize checkCudaErrors( hipEventRecord(end_event, stream) ); checkCudaErrors( hipStreamSynchronize(stream) ); // Measure time to transfer memory and execute float to_device_time; float kernel_time; float to_host_time; checkCudaErrors(hipEventElapsedTime(&to_device_time, to_device_event, kernel_start_event)); checkCudaErrors(hipEventElapsedTime(&kernel_time, kernel_start_event, to_host_event)); checkCudaErrors(hipEventElapsedTime(&to_host_time, to_host_event, end_event)); total_memcpy_time += (to_device_time + to_host_time); total_kernel_time += kernel_time; // Write to range doppler file for (int i = 0; i < fft_size; ++i) { range_doppler_file << complexAbs(fft_data[i]) << " "; } range_doppler_file << std::endl; } // Deallocate host and device memory checkCudaErrors( hipEventRecord(deallocate_start_event, stream) ); checkCudaErrors( hipHostFree(slow_time_data) ); checkCudaErrors( hipHostFree(fft_data) ); checkCudaErrors( hipFree(d_data_to_process) ); checkCudaErrors( hipEventRecord(deallocate_end_event, stream) ); checkCudaErrors( hipStreamSynchronize(stream) ); float dealloc_time; checkCudaErrors(hipEventElapsedTime(&dealloc_time, deallocate_start_event, deallocate_end_event)); total_alloc_time += dealloc_time; range_doppler_file.close(); std::cout << "cuFFT time (FFT operations) : " << total_kernel_time << "ms" << std::endl; std::cout << "cuFFT time (memcpy operations): " << total_memcpy_time << "ms" << std::endl; std::cout << "cuFFT time (alloc operations): " << total_alloc_time << "ms" << std::endl; // Final cleanup checkCudaErrors( hipEventDestroy(allocate_start_event) ); checkCudaErrors( hipEventDestroy(allocate_end_event) ); checkCudaErrors( hipEventDestroy(to_device_event) ); checkCudaErrors( hipEventDestroy(kernel_start_event) ); checkCudaErrors( hipEventDestroy(to_host_event) ); checkCudaErrors( hipEventDestroy(end_event) ); checkCudaErrors( hipEventDestroy(deallocate_start_event) ); checkCudaErrors( hipEventDestroy(deallocate_end_event) ); std::cout << "Deleting data matrix..." << std::endl; for(int i = 0; i < num_pulses; ++i) { delete[] data_matrix[i]; } delete[] data_matrix; std::cout << "Destroying plan..." << std::endl; hipfftDestroy(plan); checkCudaErrors(hipDeviceReset()); std::cout << "Success!" << std::endl; }
0097446d57846fe7dea22101d927a91a1d5dc21d.cu
/* Luis Murphy Marcos JHU Engineering for Professionals 605.617 Introduction to GPU Programming */ // Standard Library includes #include <iostream> #include <fstream> #include <ctime> #include <cstdio> #include <cmath> #include <random> // CUDA includes #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <cufft.h> // Local includes #include "helper_functions.h" #include "helper_cuda.h" typedef float2 Complex; const double C = 299792.458; //km/s const double PI = 3.1415926535897932384626433; template <typename T> T dBToPower(T dB){ return std::pow(10, dB/10); } template <typename T> T powerTodB(T ratio){ return 10*std::log10(ratio); } // Bit twiddling hack from: // https://graphics.stanford.edu/~seander/bithacks.html#RoundUpPowerOf2 int nextPowerOfTwo(int v){ v--; v |= v >> 1; v |= v >> 2; v |= v >> 4; v |= v >> 8; v |= v >> 16; v++; return v; } float complexAbs(Complex cplx) { return std::sqrt(cplx.x*cplx.x + cplx.y*cplx.y); } float complexPhase(Complex cplx) { if(cplx.x==0 && cplx.y>=0) return PI/2; if(cplx.x==0 && cplx.y<0) return -PI/2; return std::atan(cplx.y / cplx.x); } int main() { // // Radar parameters from POMR Section 2.12 const float Pt = 150; // Transmit peak power (kW) const float f0 = 9.4e9; // Signal frequency (Hz) const float tau = 1.2e-6; // Pulse width (s) const float PRF = 2e3; // Pulse repetition frequency (Hz) const float Da = 2.4; // Antenna diameter (m) const float eta_a = 0.6; // Antenna efficiency const float CPI = 16e-3; // coherent processing interval / dwell time (s) // Adjusted from 18.3 to get a power of 2 number of dwells const float Lt = 3.1; // Transmit loss (dB) const float Lr = 2.4; // Receive loss (dB) const float Lsp = 3.2; // Signal processing loss (dB) const float La_per_km = 0.16; // Atmospheric loss per km (dB/km) const float RCS = -10; // Target RCS (dBsm) const float R0 = 25; // Initial target range (km) const float vt = 120e-3; // Target velocity (km/s) // Derived parameters const float lambda = C/f0; // Signal wavelength (km) const float PRI = 1.0/PRF; // Pulse repetition interval (s) const float Ae = PI * std::pow(Da/2, 2) * eta_a; // Antenna effective area (m^2) // // Transmitted pulse for display purposes (Eq. 8.25) std::cout << "Generating transmitted pulse..." << std::endl; const float fs_tx = 2*f0; // Nyquist rate (Hz) const float Ts_tx = 1/fs_tx; const int num_samples_tx = tau/Ts_tx; float *pulse_tx = new float[num_samples_tx]; for(int i = 0; i < num_samples_tx; ++i) { pulse_tx[i] = std::cos(2*PI*f0*i*Ts_tx); } delete[] pulse_tx; // -------------------------------------------------------------------- const int num_pulses = CPI/PRI; const int num_range_bins = PRI/tau; std::cout << "Generating fast time slow time matrix...\n"; std::cout << "Range bins: " << num_range_bins << "\n"; std::cout << "Pulses: " << num_pulses << std::endl; // Allocate fast-time slow-time matrix Complex **data_matrix = new Complex*[num_pulses]; for(int i = 0; i < num_pulses; ++i) data_matrix[i] = new Complex[num_range_bins]; // Open file for data matrix std::fstream data_matrix_file; data_matrix_file.open("data-matrix.txt",std::ios::out); if(!data_matrix_file) { std::cout << "Error creating data matrix file" << std::endl; return 0; } // Additive noise parameters std::random_device rd; std::mt19937 gen{rd()}; std::normal_distribution<float> dist{0.0, 0.1}; for(int pulse = 0; pulse < num_pulses; ++pulse) { const float R = R0 + vt*(pulse*PRI); const float echo_start_time = 2*R/C; // // Radar range equation w/o noise (Eq. 2.17) const float Ls = dBToPower( Lt + La_per_km*2*R + Lr + Lsp ); // Two-way system loss (dB) float A_prime = (Pt * dBToPower(RCS) * Ae) / (std::pow(4*PI, 3) * std::pow(R, 4) * Ls); for(int i = 0; i < num_range_bins; ++i) { // // Received pulse with doppler phase shift (Eq. 8.26) const float t = i*tau - echo_start_time; // Use pulse arrival time as reference time // Evaluates to 0 or 1, using to avoid if statement in kernel const bool in_pulse = t >= 0 && t <= tau; // Inside pulse arrival window float signal_rx = A_prime * std::cos(2*PI*f0*i*tau - 4*PI*R/lambda) * in_pulse; // Add some Gaussian noise signal_rx += A_prime*dist(gen); // // Analytic signal from I/Q channels (Eq. 8.27) Complex signal_analytic; signal_analytic.x = signal_rx * 2*std::cos(2*PI*f0*i*tau); signal_analytic.y = signal_rx * 2*std::sin(2*PI*f0*i*tau); // // Populate fast-time slow-time matrix data_matrix[pulse][i] = signal_analytic; data_matrix_file << complexAbs(signal_analytic) << " "; } data_matrix_file << "\n"; } data_matrix_file.close(); // -------------------------------------------------------------------- // // FFT along each slow-time row std::fstream range_doppler_file; range_doppler_file.open("range-doppler.txt",std::ios::out); if(!range_doppler_file) { std::cout << "Error creating range-doppler file"<< std::endl; return 0; } std::cout << "Performing FFT on slow-time sequences..." << std::endl; const int fft_size = nextPowerOfTwo(num_pulses); const int mem_size = sizeof(Complex)*fft_size; float total_alloc_time = 0.0; float total_memcpy_time = 0.0; float total_kernel_time = 0.0; // CUFFT plan and stream cufftHandle plan; cudaStream_t stream; cudaEvent_t allocate_start_event, allocate_end_event; cudaEvent_t to_device_event; cudaEvent_t kernel_start_event; cudaEvent_t to_host_event; cudaEvent_t end_event; cudaEvent_t deallocate_start_event, deallocate_end_event; cufftPlan1d(&plan, fft_size, CUFFT_C2C, 1); checkCudaErrors( cudaEventCreate(&allocate_start_event) ); checkCudaErrors( cudaEventCreate(&allocate_end_event) ); checkCudaErrors( cudaEventCreate(&to_device_event) ); checkCudaErrors( cudaEventCreate(&kernel_start_event) ); checkCudaErrors( cudaEventCreate(&to_host_event) ); checkCudaErrors( cudaEventCreate(&end_event) ); checkCudaErrors( cudaEventCreate(&deallocate_start_event) ); checkCudaErrors( cudaEventCreate(&deallocate_end_event) ); // Allocate host and device memory Complex *slow_time_data, *fft_data; cufftComplex *d_data_to_process; checkCudaErrors( cudaEventRecord(allocate_start_event, stream) ); checkCudaErrors( cudaHostAlloc((void **) &slow_time_data, mem_size, cudaHostAllocDefault) ); checkCudaErrors( cudaHostAlloc((void **) &fft_data, mem_size, cudaHostAllocDefault) ); checkCudaErrors( cudaMalloc((void **) &d_data_to_process, mem_size) ); checkCudaErrors( cudaEventRecord(allocate_end_event, stream) ); checkCudaErrors( cudaStreamSynchronize(stream) ); checkCudaErrors( cudaEventElapsedTime(&total_alloc_time, allocate_start_event, allocate_end_event) ); Complex complex_zero; complex_zero.x=0; complex_zero.y=0; for(int range_bin = 0; range_bin < num_range_bins; ++range_bin) { // Populate slow time data (pulse data for given range) for(int pulse = 0; pulse < fft_size; ++pulse) { // Zero pad if needed slow_time_data[pulse] = (pulse < num_pulses) ? data_matrix[pulse][range_bin] : complex_zero; } // Allocate device memory checkCudaErrors( cudaEventRecord(to_device_event, stream) ); checkCudaErrors( cudaMemcpy(d_data_to_process, slow_time_data, mem_size, cudaMemcpyHostToDevice) ); // Transform slow time data checkCudaErrors( cudaEventRecord(kernel_start_event, stream) ); cufftExecC2C(plan, (cufftComplex *)d_data_to_process, (cufftComplex *)d_data_to_process, CUFFT_FORWARD); // Retrieve range-doppler matrix row checkCudaErrors( cudaEventRecord(to_host_event, stream) ); checkCudaErrors( cudaMemcpyAsync(fft_data, d_data_to_process, mem_size, cudaMemcpyDeviceToHost, stream) ); // Wait for stream to synchronize checkCudaErrors( cudaEventRecord(end_event, stream) ); checkCudaErrors( cudaStreamSynchronize(stream) ); // Measure time to transfer memory and execute float to_device_time; float kernel_time; float to_host_time; checkCudaErrors(cudaEventElapsedTime(&to_device_time, to_device_event, kernel_start_event)); checkCudaErrors(cudaEventElapsedTime(&kernel_time, kernel_start_event, to_host_event)); checkCudaErrors(cudaEventElapsedTime(&to_host_time, to_host_event, end_event)); total_memcpy_time += (to_device_time + to_host_time); total_kernel_time += kernel_time; // Write to range doppler file for (int i = 0; i < fft_size; ++i) { range_doppler_file << complexAbs(fft_data[i]) << " "; } range_doppler_file << std::endl; } // Deallocate host and device memory checkCudaErrors( cudaEventRecord(deallocate_start_event, stream) ); checkCudaErrors( cudaFreeHost(slow_time_data) ); checkCudaErrors( cudaFreeHost(fft_data) ); checkCudaErrors( cudaFree(d_data_to_process) ); checkCudaErrors( cudaEventRecord(deallocate_end_event, stream) ); checkCudaErrors( cudaStreamSynchronize(stream) ); float dealloc_time; checkCudaErrors(cudaEventElapsedTime(&dealloc_time, deallocate_start_event, deallocate_end_event)); total_alloc_time += dealloc_time; range_doppler_file.close(); std::cout << "cuFFT time (FFT operations) : " << total_kernel_time << "ms" << std::endl; std::cout << "cuFFT time (memcpy operations): " << total_memcpy_time << "ms" << std::endl; std::cout << "cuFFT time (alloc operations): " << total_alloc_time << "ms" << std::endl; // Final cleanup checkCudaErrors( cudaEventDestroy(allocate_start_event) ); checkCudaErrors( cudaEventDestroy(allocate_end_event) ); checkCudaErrors( cudaEventDestroy(to_device_event) ); checkCudaErrors( cudaEventDestroy(kernel_start_event) ); checkCudaErrors( cudaEventDestroy(to_host_event) ); checkCudaErrors( cudaEventDestroy(end_event) ); checkCudaErrors( cudaEventDestroy(deallocate_start_event) ); checkCudaErrors( cudaEventDestroy(deallocate_end_event) ); std::cout << "Deleting data matrix..." << std::endl; for(int i = 0; i < num_pulses; ++i) { delete[] data_matrix[i]; } delete[] data_matrix; std::cout << "Destroying plan..." << std::endl; cufftDestroy(plan); checkCudaErrors(cudaDeviceReset()); std::cout << "Success!" << std::endl; }
7b93ca49ced8b18842184f0273396e0d4a17711b.hip
// !!! This is a file automatically generated by hipify!!! #include <glm/glm.hpp> #include <glm/gtc/type_ptr.hpp> #include <glm/gtx/transform.hpp> #include <glm/gtc/matrix_transform.hpp> #include <string> #include <iostream> #include <chrono> #include <math.h> #include <stdlib.h> #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #define TPB 256 //dont forget renaming everything .cu - or is that needed in VS? /* PASTE THESE INTO THE TOP OF THE OPENGL-FILE*/ // #include "nbodysim.cu" // bool started = false; // Particle *cuda_particles = 0; // Particle cpuparticles[NUM_PARTICLES]; /* PASTE THESE LINE WHEREVER PARTICLE INITIALIZATION IS APPROPRIATE*/ // if (useGpu)printf("Calculating with CUDA.\n"); // if (!useGpu)printf("Calculating with regular cpu.\n"); // system("pause"); // init_particles_planets(cpuparticles); // hipMalloc(&cuda_particles, NUM_PARTICLES * sizeof(Particle)); // hipMemcpy(cuda_particles, cpuparticles, NUM_PARTICLES * sizeof(Particle), hipMemcpyHostToDevice); /* PASTE THESE LINES INTO THE RENDER LOOP */ // if (started) { // for (int i = 0; i < SIM_PER_RENDER; i++) { // if (useGpu) { // simulateStepGPU(NUM_PARTICLES, cpuparticles, cuda_particles); // } // else { // simulateStepCPU(NUM_PARTICLES, cpuparticles); // } // } // } /*PASTE THIS INTO THE DECONSTRUCTOR, AFTER THE RENDER LOOP*/ // hipFree(cuda_particles); struct Particle { int type; glm::vec3 pos; glm::vec3 nPos; glm::vec3 vel; bool lock; }; //**************************************************** SIMULATION VARIABLES **************************************************** static const bool useGpu = true; __constant__ static const float pi = 3.14159265358979323846; //Life of Pi static int timesteps = 0; __constant__ static const float epsilon = 47097.5; //the "minimum" gravitational distance. If smaller, will threshhold at this value to avoid gravitational singularities __constant__ static const float D = 376780.0f; //particle diameter // [0] = silicate, [1] = iron __constant__ static const float masses[2] = { 7.4161E+19f, 1.9549E+20f }; //element mass(kg) __constant__ static const float rep[2] = { 2.9114E+11f, 5.8228E+11f }; //the repulsive factor __constant__ static const float rep_redc[2] = { 0.01, 0.02 }; //the modifier when particles repulse eachother while moving away from eachother __constant__ static const float shell_depth[2] = { 0.001, 0.002 }; //shell depth percentage __constant__ static const float G = 6.674E-11f; //gravitational constant __constant__ static const float timestep = 1.0E-9f; //time step static const int SIM_PER_RENDER = 1; __constant__ static const int NUM_PARTICLES = 100; //currently takes 10ms for 100 particles, 1s for 1000 particles // Planet spawning variables __constant__ static const float mass_ratio = 0.5f; //the mass distribution between the two planetary bodies (0.5 means equal distribution, 1.0 means one gets all) __constant__ static const float rad = 3500000.0f; //the radius of the planets (that is, the initial particle spawning radius) __constant__ static const float collision_speed = 10; //the speed with which the planetoids approach eachother __constant__ static const float rotational_speed = 10; //the speed with which the planetoids rotate __constant__ static const float planet_offset = 2; //the number times radius each planetoid is spawned from world origin //**************************************************** SIMULATION FUNCTIONS **************************************************** static void prep_planetoid(int i0, int i1, glm::vec3 centerpos, glm::vec3 dir, Particle *list, int coreMaterial, int shellMaterial, float shellThickness); /*Initialize the two planetoids.*/ static void init_particles_planets(Particle *list) { int mass1 = (NUM_PARTICLES * mass_ratio); glm::vec3 centerPos = glm::vec3((planet_offset * rad), 0, 0); //The planetoid center position glm::vec3 collVel = glm::vec3(1, 0, 0); //The normalized vector of collision velocity direction (multiplied with the speed factor later) //Two planets equal in size, moving toward eachother on the x-axis, with a core reaching 50% towards the surface of each planetoid. prep_planetoid(0, mass1, centerPos, collVel, list, 1, 0, 0.5); prep_planetoid(mass1, NUM_PARTICLES, -centerPos, -collVel, list, 1, 0, 0.5); } /*If I've thought correctly, this should never be modified. Every planetoid will be created with this * Parameters are mass(interval of particle indices), position, and speed.*/ static void prep_planetoid(int i0, int i1, glm::vec3 centerpos, glm::vec3 dir, Particle *list, int coreMaterial, int shellMaterial, float shellThickness) { for (int i = i0; i < i1; i++) { //Here we randomly distribute particles uniformly within a sphere float rho1 = static_cast <float> (rand()) / static_cast <float> (RAND_MAX); float rho2 = static_cast <float> (rand()) / static_cast <float> (RAND_MAX); float rho3 = static_cast <float> (rand()) / static_cast <float> (RAND_MAX); float mu = (1 - 2 * rho2); list[i].pos.x = rad * pow(rho1, (1.0 / 3.0)) * pow((1 - mu*mu), (1.0 / 2.0)) * cos(2 * pi * rho3); list[i].pos.y = rad * pow(rho1, (1.0 / 3.0)) * pow((1 - mu*mu), (1.0 / 2.0)) * sin(2 * pi * rho3); list[i].pos.z = rad * pow(rho1, (1.0 / 3.0)) * mu; //Here we position the planetoid to center it on a certain position. list[i].pos += centerpos; //To modify the composition of this particle, use the different types. //When we wish to create a core of a certain type of matter, we will turn every particle within a certain radius of the core into that type of matter if (glm::distance(centerpos, centerpos + list[i].pos) > rad*(1.0 - shellThickness)) {//If particle is within shell list[i].type = shellMaterial; } else {//If particle is within core list[i].type = coreMaterial; } //Here we add the velocity too the particles to make them rotate along with the planet around its axis float rc = pow((-1), ((int)(list[i].pos.x - centerpos.x) > 0)); float r_xz = sqrt(((list[i].pos.x - centerpos.x)*(list[i].pos.x - centerpos.x)) + ((list[i].pos.z)*(list[i].pos.z))); float theta = atan((list[i].pos.z) / (list[i].pos.x - centerpos.x)); list[i].vel.x = rotational_speed*r_xz*sin(theta)*rc; list[i].vel.z = -rotational_speed*r_xz*cos(theta)*rc; //Here we add the "collision" velocity to the planetoid list[i].vel += dir*collision_speed; } } /*simulate the next state of particle with index i*/ __host__ __device__ static void particleStep(int NUM_PARTICLES, Particle *list, int i) { glm::vec3 force(0, 0, 0); int iType = list[i].type; float Mi = masses[iType]; float Ki = rep[iType]; float KRPi = rep_redc[iType]; float SDPi = shell_depth[iType]; for (int j = 0; j < NUM_PARTICLES; j++) { if (j != i) { int jType = list[j].type; float Mj = masses[jType]; float Kj = rep[jType]; float KRPj = rep_redc[jType]; float SDPj = shell_depth[jType]; bool isMerging = (glm::dot((list[j].pos - list[i].pos), (list[j].vel - list[i].vel)) <= 0); float r = glm::distance(list[i].pos, list[j].pos); glm::vec3 unit_vector = glm::normalize(list[j].pos - list[i].pos); float gravForce = (G * Mi * Mj / (r*r)); float repForce = 0.0f; if (r < epsilon) { r = epsilon; } //-------------------------------------------------------- //If the two particles doesn't touch at all if (D <= r) { force += (gravForce)* unit_vector; } else if (D - D*SDPi <= r && D - D*SDPj <= r) { repForce = 0.5*(Ki + Kj)*((D*D) - (r*r)); force += (gravForce - repForce) * unit_vector; } //If the shell of one of the particles is penetrated, but not the other else if (D - D*SDPi <= r < D - D*SDPj) { if (isMerging) { repForce = 0.5*(Ki + Kj)*((D*D) - (r*r)); force += (gravForce - repForce) * unit_vector; } else { repForce = 0.5*(Ki + (Kj*KRPj))*((D*D) - (r*r)); force += (gravForce - repForce) * unit_vector; } } //If the shell of one of the particles is penetrated, but not the other(same as above, but if the ratios are the opposite) else if (D - D*SDPi <= r < D - D*SDPj) { if (isMerging) { repForce = 0.5*(Ki + Kj)*((D*D) - (r*r)); force += (gravForce - repForce) * unit_vector; } else { repForce = 0.5*((Ki*KRPi) + Kj)*((D*D) - (r*r)); force += (gravForce - repForce) * unit_vector; } } //If both shells are penetrated else if (r < D - D*SDPj && r < D - D*SDPi) { if (isMerging) { repForce = 0.5*(Ki + Kj)*((D*D) - (r*r)); force += (gravForce - repForce) * unit_vector; } else { repForce = 0.5*((Ki*KRPi) + (Kj*KRPj))*((D*D) - (r*r)); force += (gravForce - repForce) * unit_vector; } } //-------------------------------------------------------- } } //update nVel via force list[i].vel = list[i].vel + (timestep * force); list[i].nPos = list[i].pos + (timestep * list[i].vel); } /*The CPU variant of the particle n-body simulation loop iteration*/ static void simulateStepCPU(int NUM_PARTICLES, Particle *particles) { auto start = std::chrono::high_resolution_clock::now(); //calculate their next positions for (int i = 0; i < NUM_PARTICLES; i++) { particleStep(NUM_PARTICLES, particles, i); } //update their positions for (int i = 0; i < NUM_PARTICLES; i++) { if (particles[i].lock == false) { particles[i].pos = particles[i].nPos; } } timesteps++; auto end = std::chrono::high_resolution_clock::now(); std::chrono::duration<double> elapsed = end - start; double timems = elapsed.count() * 1000; printf("calculation time for one step took %f ms\n", timems); } /*Update every particles next position*/ __global__ static void simstepCuda(int NUM_PARTICLES, Particle *particles) { const int i = blockIdx.x*blockDim.x + threadIdx.x; if (i >= NUM_PARTICLES)return; particleStep(NUM_PARTICLES, particles, i); } /*frogleap every particles position from nextPos to pos*/ __global__ static void updateposCuda(int NUM_PARTICLES, Particle *particles) { const int i = blockIdx.x*blockDim.x + threadIdx.x; if (i < NUM_PARTICLES) { if (particles[i].lock == false) { particles[i].pos = particles[i].nPos; } } } /*The CUDA variant of the particle n-body simulation loop iteration*/ static void simulateStepGPU(int NUM_PARTICLES, Particle *cpuparticles, Particle *gpuparticles) { int blocks = pow(2, ceil(log(NUM_PARTICLES) / log(2))); auto start = std::chrono::high_resolution_clock::now(); simstepCuda <<< blocks, TPB >> >(NUM_PARTICLES, gpuparticles); hipDeviceSynchronize(); updateposCuda <<< blocks, TPB >> >(NUM_PARTICLES, gpuparticles); hipDeviceSynchronize(); hipMemcpy(cpuparticles, gpuparticles, NUM_PARTICLES * sizeof(Particle), hipMemcpyDeviceToHost); hipError_t err = hipGetLastError(); if (err != hipSuccess) { printf("Error: %s\n", hipGetErrorString(err)); } timesteps++; auto end = std::chrono::high_resolution_clock::now(); std::chrono::duration<double> elapsed = end - start; double timems = elapsed.count() * 1000; printf("calculation time for one step took %f ms\n", timems); } //******************************************************************************************************************************
7b93ca49ced8b18842184f0273396e0d4a17711b.cu
#include <glm/glm.hpp> #include <glm/gtc/type_ptr.hpp> #include <glm/gtx/transform.hpp> #include <glm/gtc/matrix_transform.hpp> #include <string> #include <iostream> #include <chrono> #include <math.h> #include <stdlib.h> #include "cuda_runtime.h" #include "device_launch_parameters.h" #define TPB 256 //dont forget renaming everything .cu - or is that needed in VS? /* PASTE THESE INTO THE TOP OF THE OPENGL-FILE*/ // #include "nbodysim.cu" // bool started = false; // Particle *cuda_particles = 0; // Particle cpuparticles[NUM_PARTICLES]; /* PASTE THESE LINE WHEREVER PARTICLE INITIALIZATION IS APPROPRIATE*/ // if (useGpu)printf("Calculating with CUDA.\n"); // if (!useGpu)printf("Calculating with regular cpu.\n"); // system("pause"); // init_particles_planets(cpuparticles); // cudaMalloc(&cuda_particles, NUM_PARTICLES * sizeof(Particle)); // cudaMemcpy(cuda_particles, cpuparticles, NUM_PARTICLES * sizeof(Particle), cudaMemcpyHostToDevice); /* PASTE THESE LINES INTO THE RENDER LOOP */ // if (started) { // for (int i = 0; i < SIM_PER_RENDER; i++) { // if (useGpu) { // simulateStepGPU(NUM_PARTICLES, cpuparticles, cuda_particles); // } // else { // simulateStepCPU(NUM_PARTICLES, cpuparticles); // } // } // } /*PASTE THIS INTO THE DECONSTRUCTOR, AFTER THE RENDER LOOP*/ // cudaFree(cuda_particles); struct Particle { int type; glm::vec3 pos; glm::vec3 nPos; glm::vec3 vel; bool lock; }; //**************************************************** SIMULATION VARIABLES **************************************************** static const bool useGpu = true; __constant__ static const float pi = 3.14159265358979323846; //Life of Pi static int timesteps = 0; __constant__ static const float epsilon = 47097.5; //the "minimum" gravitational distance. If smaller, will threshhold at this value to avoid gravitational singularities __constant__ static const float D = 376780.0f; //particle diameter // [0] = silicate, [1] = iron __constant__ static const float masses[2] = { 7.4161E+19f, 1.9549E+20f }; //element mass(kg) __constant__ static const float rep[2] = { 2.9114E+11f, 5.8228E+11f }; //the repulsive factor __constant__ static const float rep_redc[2] = { 0.01, 0.02 }; //the modifier when particles repulse eachother while moving away from eachother __constant__ static const float shell_depth[2] = { 0.001, 0.002 }; //shell depth percentage __constant__ static const float G = 6.674E-11f; //gravitational constant __constant__ static const float timestep = 1.0E-9f; //time step static const int SIM_PER_RENDER = 1; __constant__ static const int NUM_PARTICLES = 100; //currently takes 10ms for 100 particles, 1s for 1000 particles // Planet spawning variables __constant__ static const float mass_ratio = 0.5f; //the mass distribution between the two planetary bodies (0.5 means equal distribution, 1.0 means one gets all) __constant__ static const float rad = 3500000.0f; //the radius of the planets (that is, the initial particle spawning radius) __constant__ static const float collision_speed = 10; //the speed with which the planetoids approach eachother __constant__ static const float rotational_speed = 10; //the speed with which the planetoids rotate __constant__ static const float planet_offset = 2; //the number times radius each planetoid is spawned from world origin //**************************************************** SIMULATION FUNCTIONS **************************************************** static void prep_planetoid(int i0, int i1, glm::vec3 centerpos, glm::vec3 dir, Particle *list, int coreMaterial, int shellMaterial, float shellThickness); /*Initialize the two planetoids.*/ static void init_particles_planets(Particle *list) { int mass1 = (NUM_PARTICLES * mass_ratio); glm::vec3 centerPos = glm::vec3((planet_offset * rad), 0, 0); //The planetoid center position glm::vec3 collVel = glm::vec3(1, 0, 0); //The normalized vector of collision velocity direction (multiplied with the speed factor later) //Two planets equal in size, moving toward eachother on the x-axis, with a core reaching 50% towards the surface of each planetoid. prep_planetoid(0, mass1, centerPos, collVel, list, 1, 0, 0.5); prep_planetoid(mass1, NUM_PARTICLES, -centerPos, -collVel, list, 1, 0, 0.5); } /*If I've thought correctly, this should never be modified. Every planetoid will be created with this * Parameters are mass(interval of particle indices), position, and speed.*/ static void prep_planetoid(int i0, int i1, glm::vec3 centerpos, glm::vec3 dir, Particle *list, int coreMaterial, int shellMaterial, float shellThickness) { for (int i = i0; i < i1; i++) { //Here we randomly distribute particles uniformly within a sphere float rho1 = static_cast <float> (rand()) / static_cast <float> (RAND_MAX); float rho2 = static_cast <float> (rand()) / static_cast <float> (RAND_MAX); float rho3 = static_cast <float> (rand()) / static_cast <float> (RAND_MAX); float mu = (1 - 2 * rho2); list[i].pos.x = rad * pow(rho1, (1.0 / 3.0)) * pow((1 - mu*mu), (1.0 / 2.0)) * cos(2 * pi * rho3); list[i].pos.y = rad * pow(rho1, (1.0 / 3.0)) * pow((1 - mu*mu), (1.0 / 2.0)) * sin(2 * pi * rho3); list[i].pos.z = rad * pow(rho1, (1.0 / 3.0)) * mu; //Here we position the planetoid to center it on a certain position. list[i].pos += centerpos; //To modify the composition of this particle, use the different types. //When we wish to create a core of a certain type of matter, we will turn every particle within a certain radius of the core into that type of matter if (glm::distance(centerpos, centerpos + list[i].pos) > rad*(1.0 - shellThickness)) {//If particle is within shell list[i].type = shellMaterial; } else {//If particle is within core list[i].type = coreMaterial; } //Here we add the velocity too the particles to make them rotate along with the planet around its axis float rc = pow((-1), ((int)(list[i].pos.x - centerpos.x) > 0)); float r_xz = sqrt(((list[i].pos.x - centerpos.x)*(list[i].pos.x - centerpos.x)) + ((list[i].pos.z)*(list[i].pos.z))); float theta = atan((list[i].pos.z) / (list[i].pos.x - centerpos.x)); list[i].vel.x = rotational_speed*r_xz*sin(theta)*rc; list[i].vel.z = -rotational_speed*r_xz*cos(theta)*rc; //Here we add the "collision" velocity to the planetoid list[i].vel += dir*collision_speed; } } /*simulate the next state of particle with index i*/ __host__ __device__ static void particleStep(int NUM_PARTICLES, Particle *list, int i) { glm::vec3 force(0, 0, 0); int iType = list[i].type; float Mi = masses[iType]; float Ki = rep[iType]; float KRPi = rep_redc[iType]; float SDPi = shell_depth[iType]; for (int j = 0; j < NUM_PARTICLES; j++) { if (j != i) { int jType = list[j].type; float Mj = masses[jType]; float Kj = rep[jType]; float KRPj = rep_redc[jType]; float SDPj = shell_depth[jType]; bool isMerging = (glm::dot((list[j].pos - list[i].pos), (list[j].vel - list[i].vel)) <= 0); float r = glm::distance(list[i].pos, list[j].pos); glm::vec3 unit_vector = glm::normalize(list[j].pos - list[i].pos); float gravForce = (G * Mi * Mj / (r*r)); float repForce = 0.0f; if (r < epsilon) { r = epsilon; } //-------------------------------------------------------- //If the two particles doesn't touch at all if (D <= r) { force += (gravForce)* unit_vector; } else if (D - D*SDPi <= r && D - D*SDPj <= r) { repForce = 0.5*(Ki + Kj)*((D*D) - (r*r)); force += (gravForce - repForce) * unit_vector; } //If the shell of one of the particles is penetrated, but not the other else if (D - D*SDPi <= r < D - D*SDPj) { if (isMerging) { repForce = 0.5*(Ki + Kj)*((D*D) - (r*r)); force += (gravForce - repForce) * unit_vector; } else { repForce = 0.5*(Ki + (Kj*KRPj))*((D*D) - (r*r)); force += (gravForce - repForce) * unit_vector; } } //If the shell of one of the particles is penetrated, but not the other(same as above, but if the ratios are the opposite) else if (D - D*SDPi <= r < D - D*SDPj) { if (isMerging) { repForce = 0.5*(Ki + Kj)*((D*D) - (r*r)); force += (gravForce - repForce) * unit_vector; } else { repForce = 0.5*((Ki*KRPi) + Kj)*((D*D) - (r*r)); force += (gravForce - repForce) * unit_vector; } } //If both shells are penetrated else if (r < D - D*SDPj && r < D - D*SDPi) { if (isMerging) { repForce = 0.5*(Ki + Kj)*((D*D) - (r*r)); force += (gravForce - repForce) * unit_vector; } else { repForce = 0.5*((Ki*KRPi) + (Kj*KRPj))*((D*D) - (r*r)); force += (gravForce - repForce) * unit_vector; } } //-------------------------------------------------------- } } //update nVel via force list[i].vel = list[i].vel + (timestep * force); list[i].nPos = list[i].pos + (timestep * list[i].vel); } /*The CPU variant of the particle n-body simulation loop iteration*/ static void simulateStepCPU(int NUM_PARTICLES, Particle *particles) { auto start = std::chrono::high_resolution_clock::now(); //calculate their next positions for (int i = 0; i < NUM_PARTICLES; i++) { particleStep(NUM_PARTICLES, particles, i); } //update their positions for (int i = 0; i < NUM_PARTICLES; i++) { if (particles[i].lock == false) { particles[i].pos = particles[i].nPos; } } timesteps++; auto end = std::chrono::high_resolution_clock::now(); std::chrono::duration<double> elapsed = end - start; double timems = elapsed.count() * 1000; printf("calculation time for one step took %f ms\n", timems); } /*Update every particles next position*/ __global__ static void simstepCuda(int NUM_PARTICLES, Particle *particles) { const int i = blockIdx.x*blockDim.x + threadIdx.x; if (i >= NUM_PARTICLES)return; particleStep(NUM_PARTICLES, particles, i); } /*frogleap every particles position from nextPos to pos*/ __global__ static void updateposCuda(int NUM_PARTICLES, Particle *particles) { const int i = blockIdx.x*blockDim.x + threadIdx.x; if (i < NUM_PARTICLES) { if (particles[i].lock == false) { particles[i].pos = particles[i].nPos; } } } /*The CUDA variant of the particle n-body simulation loop iteration*/ static void simulateStepGPU(int NUM_PARTICLES, Particle *cpuparticles, Particle *gpuparticles) { int blocks = pow(2, ceil(log(NUM_PARTICLES) / log(2))); auto start = std::chrono::high_resolution_clock::now(); simstepCuda <<< blocks, TPB >> >(NUM_PARTICLES, gpuparticles); cudaDeviceSynchronize(); updateposCuda <<< blocks, TPB >> >(NUM_PARTICLES, gpuparticles); cudaDeviceSynchronize(); cudaMemcpy(cpuparticles, gpuparticles, NUM_PARTICLES * sizeof(Particle), cudaMemcpyDeviceToHost); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf("Error: %s\n", cudaGetErrorString(err)); } timesteps++; auto end = std::chrono::high_resolution_clock::now(); std::chrono::duration<double> elapsed = end - start; double timems = elapsed.count() * 1000; printf("calculation time for one step took %f ms\n", timems); } //******************************************************************************************************************************
06ad2170c4d7939032cf0998a34836b982b4f917.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> #include <fcntl.h> #include "string.h" #include <unistd.h> // #include <io.h> #define DEFAULT_THRESHOLD 4000 #define DEFAULT_FILENAME "mountains.ppm" void write_ppm(char* filename, int xsize, int ysize, int maxval, int* pic); unsigned int* read_ppm(char* filename, int& xsize, int& ysize, int& maxval); /* (j - 1, i - 1) (j, i - 1) (j + 1, i - 1) (j - 1, i) (j, i) (j + 1, i) (j - 1, i + 1) (j, i + 1) (j + 1, i + 1) */ __global__ void sobel_kernel_naive(const unsigned int* inputVec, unsigned int* outVec, int width, int height) { int j = blockIdx.x * blockDim.x + threadIdx.x; int i = blockIdx.y * blockDim.y + threadIdx.y; if (i < 1 || j < 1 || i >= (height - 1) || j >= (width - 1)) { return; } int offset = i * width + j; unsigned int sum1 = inputVec[width * (i - 1) + j + 1] - inputVec[width * (i - 1) + j - 1] + 2 * inputVec[width * (i)+j + 1] - 2 * inputVec[width * (i)+j - 1] + inputVec[width * (i + 1) + j + 1] - inputVec[width * (i + 1) + j - 1]; unsigned int sum2 = inputVec[width * (i - 1) + j - 1] + 2 * inputVec[width * (i - 1) + j] + inputVec[width * (i - 1) + j + 1] - inputVec[width * (i + 1) + j - 1] - 2 * inputVec[width * (i + 1) + j] - inputVec[width * (i + 1) + j + 1]; unsigned int magnitude = sum1 * sum1 + sum2 * sum2; if (magnitude > DEFAULT_THRESHOLD) { outVec[offset] = 255; } } __global__ void sobel_kernel_register_reuse(const unsigned int* inputVec, unsigned int *outVec, int width, int height) { int j = blockIdx.x * blockDim.x + threadIdx.x; int i = blockIdx.y * blockDim.y + threadIdx.y; if (i < 1 || j < 1 || i >= (height - 1) || j >= (width - 1)) { return; } int offset = i * width + j; unsigned int topLeft = inputVec[width * (i - 1) + j - 1]; unsigned int topRight = inputVec[width * (i - 1) + j + 1]; unsigned int bottomLeft = inputVec[width * (i + 1) + j - 1]; unsigned int bottomRight = inputVec[width * (i + 1) + j + 1]; unsigned int sum1 = topRight - topLeft + 2 * inputVec[width * (i)+j + 1] - 2 * inputVec[width * (i)+j - 1] + bottomRight - bottomLeft; unsigned int sum2 = topLeft + 2 * inputVec[width * (i - 1) + j] + topRight - bottomLeft - 2 * inputVec[width * (i + 1) + j] - bottomRight; unsigned int magnitude = sum1 * sum1 + sum2 * sum2; if (magnitude > DEFAULT_THRESHOLD) { outVec[offset] = 255; } } /* (j - 1, i - 1) (j, i - 1) (j + 1, i - 1) (j - 1, i) (j, i) (j + 1, i) (j - 1, i + 1) (j, i + 1) (j + 1, i + 1) */ __global__ void sobel_kernel_shared_mem_reuse(const unsigned int* inputVec, unsigned int *outVec, int width, int height) { int j = blockIdx.x * 14 + threadIdx.x; int i = blockIdx.y * 14 + threadIdx.y; if(j >= width || i >= height) { return; } __shared__ int ccpy[16][16]; ccpy[threadIdx.x][threadIdx.y] = inputVec[width * i + j]; __syncthreads(); // Only Inner Part Need Compute Edges if(threadIdx.x == 0 || threadIdx.x == 15 || threadIdx.y == 0 || threadIdx.y == 15) { return; } int topLeft = ccpy[threadIdx.x - 1][threadIdx.y - 1]; int topRight = ccpy[threadIdx.x + 1][threadIdx.y - 1]; int bottomLeft = ccpy[threadIdx.x - 1][threadIdx.y + 1]; int bottomRight = ccpy[threadIdx.x + 1][threadIdx.y + 1]; int sum1 = topRight - topLeft + 2 * ccpy[threadIdx.x + 1][threadIdx.y] - 2 * ccpy[threadIdx.x - 1][threadIdx.y] + bottomRight - bottomLeft; int sum2 = topLeft + 2 * ccpy[threadIdx.x][threadIdx.y - 1] + topRight - bottomLeft - 2 * ccpy[threadIdx.x][threadIdx.y + 1] - bottomRight; if ((sum1 * sum1 + sum2 * sum2) > DEFAULT_THRESHOLD) { outVec[i * width + j] = 255; } } int main(int argc, char** argv) { int thresh = DEFAULT_THRESHOLD; char* filename; filename = strdup(DEFAULT_FILENAME); if (argc > 1) { if (argc == 3) { // filename AND threshold filename = strdup(argv[1]); thresh = atoi(argv[2]); } if (argc == 2) { // default file but specified threshhold thresh = atoi(argv[1]); } fprintf(stderr, "file %s threshold %d\n", filename, thresh); } int xsize, ysize, maxval; unsigned int* pic = read_ppm(filename, xsize, ysize, maxval); int numbytes = xsize * ysize * sizeof(int); int* result = (int*)malloc(numbytes); if (!result) { fprintf(stderr, "sobel() unable to malloc %d bytes\n", numbytes); exit(-1); // fail } hipError_t cudaStatus = hipSetDevice(0); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?"); } unsigned int* dev_in = 0; unsigned int* dev_out = 0; // Allocate GPU buffers for two vectors (one input, one output) . cudaStatus = hipMalloc((void**)&dev_in, ysize * xsize * sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); } cudaStatus = hipMalloc((void**)&dev_out, ysize * xsize * sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); } // Copy input vectors from host memory to GPU buffers. cudaStatus = hipMemcpy(dev_in, pic, ysize * xsize * sizeof(int), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); } hipEvent_t start, stop; float elapsedTime; hipEventCreate(&start); hipEventRecord(start, 0); //// Kernel invocation // dim3 threadsPerBlock(16, 16); // dim3 numBlocks((xsize + 15) / threadsPerBlock.x, (ysize + 15) / threadsPerBlock.y); dim3 threadsPerBlock(16, 16); dim3 numBlocks((xsize - 2 + 15) / 14, (ysize - 2 + 15) / 14); //MatAdd << <numBlocks, threadsPerBlock >> > (A, B, C); sobel_kernel_shared_mem_reuse << <numBlocks, threadsPerBlock >> > (dev_in, dev_out, xsize, ysize); hipEventCreate(&stop); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&elapsedTime, start, stop); printf("Elapsed time : %f ms\n", elapsedTime); // Check for any errors launching the kernel cudaStatus = hipGetLastError(); if (cudaStatus != hipSuccess) { fprintf(stderr, "sobel_kernel launch failed: %s\n", hipGetErrorString(cudaStatus)); } // hipDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = hipDeviceSynchronize(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus); } // Copy output vectors from GPU buffers to host memory . cudaStatus = hipMemcpy(result, dev_out, ysize * xsize * sizeof(int), hipMemcpyDeviceToHost); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); } write_ppm("result.ppm", xsize, ysize, 255, result); fprintf(stderr, "cuda sobel done\n"); hipFree(dev_in); hipFree(dev_out); free(result); // hipDeviceReset must be called before exiting in order for profiling and // tracing tools such as Nsight and Visual Profiler to show complete traces. cudaStatus = hipDeviceReset(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceReset failed!"); return 1; } return 0; } unsigned int* read_ppm(char* filename, int& xsize, int& ysize, int& maxval) { if (!filename || filename[0] == '\0') { fprintf(stderr, "read_ppm but no file name\n"); return NULL; // fail } fprintf(stderr, "read_ppm( %s )\n", filename); int fd = open(filename, O_RDONLY); if (fd == -1) { fprintf(stderr, "read_ppm() ERROR file '%s' cannot be opened for reading\n", filename); return NULL; // fail } char chars[1024]; int num = read(fd, chars, 1000); if (chars[0] != 'P' || chars[1] != '6') { fprintf(stderr, "Texture::Texture() ERROR file '%s' does not start with \"P6\" I am expecting a binary PPM file\n", filename); return NULL; } unsigned int width, height, maxvalue; char* ptr = chars + 3; // P 6 newline if (*ptr == '#') // comment line! { ptr = 1 + strstr(ptr, "\n"); } num = sscanf(ptr, "%d\n%d\n%d", &width, &height, &maxvalue); fprintf(stderr, "read %d things width %d height %d maxval %d\n", num, width, height, maxvalue); xsize = width; ysize = height; maxval = maxvalue; unsigned int* pic = (unsigned int*)malloc(width * height * sizeof(unsigned int)); if (!pic) { fprintf(stderr, "read_ppm() unable to allocate %d x %d unsigned ints for the picture\n", width, height); return NULL; // fail but return } // allocate buffer to read the rest of the file into int bufsize = 3 * width * height * sizeof(unsigned char); if (maxval > 255) bufsize *= 2; unsigned char* buf = (unsigned char*)malloc(bufsize); if (!buf) { fprintf(stderr, "read_ppm() unable to allocate %d bytes of read buffer\n", bufsize); return NULL; // fail but return } // TODO really read char duh[80]; char* line = chars; // find the start of the pixel data. no doubt stupid sprintf(duh, "%d\0", xsize); line = strstr(line, duh); //fprintf(stderr, "%s found at offset %d\n", duh, line-chars); line += strlen(duh) + 1; sprintf(duh, "%d\0", ysize); line = strstr(line, duh); //fprintf(stderr, "%s found at offset %d\n", duh, line-chars); line += strlen(duh) + 1; sprintf(duh, "%d\0", maxval); line = strstr(line, duh); fprintf(stderr, "%s found at offset %d\n", duh, line - chars); line += strlen(duh) + 1; long offset = line - chars; lseek(fd, offset, SEEK_SET); // move to the correct offset long numread = read(fd, buf, bufsize); fprintf(stderr, "Texture %s read %ld of %ld bytes\n", filename, numread, bufsize); close(fd); int pixels = xsize * ysize; for (int i = 0; i < pixels; i++) pic[i] = (int)buf[3 * i]; // red channel return pic; // success } void write_ppm(char* filename, int xsize, int ysize, int maxval, int* pic) { FILE* fp; fp = fopen(filename, "w"); if (!fp) { fprintf(stderr, "FAILED TO OPEN FILE '%s' for writing\n"); exit(-1); } fprintf(fp, "P6\n"); fprintf(fp, "%d %d\n%d\n", xsize, ysize, maxval); int numpix = xsize * ysize; for (int i = 0; i < numpix; i++) { unsigned char uc = (unsigned char)pic[i]; fprintf(fp, "%c%c%c", uc, uc, uc); } fclose(fp); }
06ad2170c4d7939032cf0998a34836b982b4f917.cu
 #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> #include <fcntl.h> #include "string.h" #include <unistd.h> // #include <io.h> #define DEFAULT_THRESHOLD 4000 #define DEFAULT_FILENAME "mountains.ppm" void write_ppm(char* filename, int xsize, int ysize, int maxval, int* pic); unsigned int* read_ppm(char* filename, int& xsize, int& ysize, int& maxval); /* (j - 1, i - 1) (j, i - 1) (j + 1, i - 1) (j - 1, i) (j, i) (j + 1, i) (j - 1, i + 1) (j, i + 1) (j + 1, i + 1) */ __global__ void sobel_kernel_naive(const unsigned int* inputVec, unsigned int* outVec, int width, int height) { int j = blockIdx.x * blockDim.x + threadIdx.x; int i = blockIdx.y * blockDim.y + threadIdx.y; if (i < 1 || j < 1 || i >= (height - 1) || j >= (width - 1)) { return; } int offset = i * width + j; unsigned int sum1 = inputVec[width * (i - 1) + j + 1] - inputVec[width * (i - 1) + j - 1] + 2 * inputVec[width * (i)+j + 1] - 2 * inputVec[width * (i)+j - 1] + inputVec[width * (i + 1) + j + 1] - inputVec[width * (i + 1) + j - 1]; unsigned int sum2 = inputVec[width * (i - 1) + j - 1] + 2 * inputVec[width * (i - 1) + j] + inputVec[width * (i - 1) + j + 1] - inputVec[width * (i + 1) + j - 1] - 2 * inputVec[width * (i + 1) + j] - inputVec[width * (i + 1) + j + 1]; unsigned int magnitude = sum1 * sum1 + sum2 * sum2; if (magnitude > DEFAULT_THRESHOLD) { outVec[offset] = 255; } } __global__ void sobel_kernel_register_reuse(const unsigned int* inputVec, unsigned int *outVec, int width, int height) { int j = blockIdx.x * blockDim.x + threadIdx.x; int i = blockIdx.y * blockDim.y + threadIdx.y; if (i < 1 || j < 1 || i >= (height - 1) || j >= (width - 1)) { return; } int offset = i * width + j; unsigned int topLeft = inputVec[width * (i - 1) + j - 1]; unsigned int topRight = inputVec[width * (i - 1) + j + 1]; unsigned int bottomLeft = inputVec[width * (i + 1) + j - 1]; unsigned int bottomRight = inputVec[width * (i + 1) + j + 1]; unsigned int sum1 = topRight - topLeft + 2 * inputVec[width * (i)+j + 1] - 2 * inputVec[width * (i)+j - 1] + bottomRight - bottomLeft; unsigned int sum2 = topLeft + 2 * inputVec[width * (i - 1) + j] + topRight - bottomLeft - 2 * inputVec[width * (i + 1) + j] - bottomRight; unsigned int magnitude = sum1 * sum1 + sum2 * sum2; if (magnitude > DEFAULT_THRESHOLD) { outVec[offset] = 255; } } /* (j - 1, i - 1) (j, i - 1) (j + 1, i - 1) (j - 1, i) (j, i) (j + 1, i) (j - 1, i + 1) (j, i + 1) (j + 1, i + 1) */ __global__ void sobel_kernel_shared_mem_reuse(const unsigned int* inputVec, unsigned int *outVec, int width, int height) { int j = blockIdx.x * 14 + threadIdx.x; int i = blockIdx.y * 14 + threadIdx.y; if(j >= width || i >= height) { return; } __shared__ int ccpy[16][16]; ccpy[threadIdx.x][threadIdx.y] = inputVec[width * i + j]; __syncthreads(); // Only Inner Part Need Compute Edges if(threadIdx.x == 0 || threadIdx.x == 15 || threadIdx.y == 0 || threadIdx.y == 15) { return; } int topLeft = ccpy[threadIdx.x - 1][threadIdx.y - 1]; int topRight = ccpy[threadIdx.x + 1][threadIdx.y - 1]; int bottomLeft = ccpy[threadIdx.x - 1][threadIdx.y + 1]; int bottomRight = ccpy[threadIdx.x + 1][threadIdx.y + 1]; int sum1 = topRight - topLeft + 2 * ccpy[threadIdx.x + 1][threadIdx.y] - 2 * ccpy[threadIdx.x - 1][threadIdx.y] + bottomRight - bottomLeft; int sum2 = topLeft + 2 * ccpy[threadIdx.x][threadIdx.y - 1] + topRight - bottomLeft - 2 * ccpy[threadIdx.x][threadIdx.y + 1] - bottomRight; if ((sum1 * sum1 + sum2 * sum2) > DEFAULT_THRESHOLD) { outVec[i * width + j] = 255; } } int main(int argc, char** argv) { int thresh = DEFAULT_THRESHOLD; char* filename; filename = strdup(DEFAULT_FILENAME); if (argc > 1) { if (argc == 3) { // filename AND threshold filename = strdup(argv[1]); thresh = atoi(argv[2]); } if (argc == 2) { // default file but specified threshhold thresh = atoi(argv[1]); } fprintf(stderr, "file %s threshold %d\n", filename, thresh); } int xsize, ysize, maxval; unsigned int* pic = read_ppm(filename, xsize, ysize, maxval); int numbytes = xsize * ysize * sizeof(int); int* result = (int*)malloc(numbytes); if (!result) { fprintf(stderr, "sobel() unable to malloc %d bytes\n", numbytes); exit(-1); // fail } cudaError_t cudaStatus = cudaSetDevice(0); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?"); } unsigned int* dev_in = 0; unsigned int* dev_out = 0; // Allocate GPU buffers for two vectors (one input, one output) . cudaStatus = cudaMalloc((void**)&dev_in, ysize * xsize * sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); } cudaStatus = cudaMalloc((void**)&dev_out, ysize * xsize * sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); } // Copy input vectors from host memory to GPU buffers. cudaStatus = cudaMemcpy(dev_in, pic, ysize * xsize * sizeof(int), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); } cudaEvent_t start, stop; float elapsedTime; cudaEventCreate(&start); cudaEventRecord(start, 0); //// Kernel invocation // dim3 threadsPerBlock(16, 16); // dim3 numBlocks((xsize + 15) / threadsPerBlock.x, (ysize + 15) / threadsPerBlock.y); dim3 threadsPerBlock(16, 16); dim3 numBlocks((xsize - 2 + 15) / 14, (ysize - 2 + 15) / 14); //MatAdd << <numBlocks, threadsPerBlock >> > (A, B, C); sobel_kernel_shared_mem_reuse << <numBlocks, threadsPerBlock >> > (dev_in, dev_out, xsize, ysize); cudaEventCreate(&stop); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsedTime, start, stop); printf("Elapsed time : %f ms\n", elapsedTime); // Check for any errors launching the kernel cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "sobel_kernel launch failed: %s\n", cudaGetErrorString(cudaStatus)); } // cudaDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus); } // Copy output vectors from GPU buffers to host memory . cudaStatus = cudaMemcpy(result, dev_out, ysize * xsize * sizeof(int), cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); } write_ppm("result.ppm", xsize, ysize, 255, result); fprintf(stderr, "cuda sobel done\n"); cudaFree(dev_in); cudaFree(dev_out); free(result); // cudaDeviceReset must be called before exiting in order for profiling and // tracing tools such as Nsight and Visual Profiler to show complete traces. cudaStatus = cudaDeviceReset(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceReset failed!"); return 1; } return 0; } unsigned int* read_ppm(char* filename, int& xsize, int& ysize, int& maxval) { if (!filename || filename[0] == '\0') { fprintf(stderr, "read_ppm but no file name\n"); return NULL; // fail } fprintf(stderr, "read_ppm( %s )\n", filename); int fd = open(filename, O_RDONLY); if (fd == -1) { fprintf(stderr, "read_ppm() ERROR file '%s' cannot be opened for reading\n", filename); return NULL; // fail } char chars[1024]; int num = read(fd, chars, 1000); if (chars[0] != 'P' || chars[1] != '6') { fprintf(stderr, "Texture::Texture() ERROR file '%s' does not start with \"P6\" I am expecting a binary PPM file\n", filename); return NULL; } unsigned int width, height, maxvalue; char* ptr = chars + 3; // P 6 newline if (*ptr == '#') // comment line! { ptr = 1 + strstr(ptr, "\n"); } num = sscanf(ptr, "%d\n%d\n%d", &width, &height, &maxvalue); fprintf(stderr, "read %d things width %d height %d maxval %d\n", num, width, height, maxvalue); xsize = width; ysize = height; maxval = maxvalue; unsigned int* pic = (unsigned int*)malloc(width * height * sizeof(unsigned int)); if (!pic) { fprintf(stderr, "read_ppm() unable to allocate %d x %d unsigned ints for the picture\n", width, height); return NULL; // fail but return } // allocate buffer to read the rest of the file into int bufsize = 3 * width * height * sizeof(unsigned char); if (maxval > 255) bufsize *= 2; unsigned char* buf = (unsigned char*)malloc(bufsize); if (!buf) { fprintf(stderr, "read_ppm() unable to allocate %d bytes of read buffer\n", bufsize); return NULL; // fail but return } // TODO really read char duh[80]; char* line = chars; // find the start of the pixel data. no doubt stupid sprintf(duh, "%d\0", xsize); line = strstr(line, duh); //fprintf(stderr, "%s found at offset %d\n", duh, line-chars); line += strlen(duh) + 1; sprintf(duh, "%d\0", ysize); line = strstr(line, duh); //fprintf(stderr, "%s found at offset %d\n", duh, line-chars); line += strlen(duh) + 1; sprintf(duh, "%d\0", maxval); line = strstr(line, duh); fprintf(stderr, "%s found at offset %d\n", duh, line - chars); line += strlen(duh) + 1; long offset = line - chars; lseek(fd, offset, SEEK_SET); // move to the correct offset long numread = read(fd, buf, bufsize); fprintf(stderr, "Texture %s read %ld of %ld bytes\n", filename, numread, bufsize); close(fd); int pixels = xsize * ysize; for (int i = 0; i < pixels; i++) pic[i] = (int)buf[3 * i]; // red channel return pic; // success } void write_ppm(char* filename, int xsize, int ysize, int maxval, int* pic) { FILE* fp; fp = fopen(filename, "w"); if (!fp) { fprintf(stderr, "FAILED TO OPEN FILE '%s' for writing\n"); exit(-1); } fprintf(fp, "P6\n"); fprintf(fp, "%d %d\n%d\n", xsize, ysize, maxval); int numpix = xsize * ysize; for (int i = 0; i < numpix; i++) { unsigned char uc = (unsigned char)pic[i]; fprintf(fp, "%c%c%c", uc, uc, uc); } fclose(fp); }
c5508d858d15c431da72085fd7b65fea26ae065f.hip
// !!! This is a file automatically generated by hipify!!! #include <primitiv/config.h> #include <primitiv/devices/cuda/device.h> #include <primitiv/devices/cuda/ops/common.h> #include <primitiv/internal/cuda/utils.h> namespace primitiv { namespace devices { void CUDA::max_pool2d_fw_impl( const Tensor &x, std::uint32_t window0, std::uint32_t window1, std::uint32_t padding0, std::uint32_t padding1, std::uint32_t stride0, std::uint32_t stride1, Tensor &y) { #ifdef PRIMITIV_USE_CUDNN const Shape x_shape = x.shape(); const Shape y_shape = y.shape(); // Specifies a target device. CUDA_CALL(::hipSetDevice(dev_id_)); // Prepares descriptors. const cuda::CuDNNTensorDescriptor x_desc( x_shape.batch(), x_shape[2], x_shape[1], x_shape[0], ::CUDNN_DATA_FLOAT); const cuda::CuDNNTensorDescriptor y_desc( y_shape.batch(), y_shape[2], y_shape[1], y_shape[0], ::CUDNN_DATA_FLOAT); const cuda::CuDNNPoolingDescriptor pool_desc( CUDNN_POOLING_MAX, window1, window0, padding1, padding0, stride1, stride0); // Performs a forward operation. const float alpha = 1.f; const float beta = 0.f; const float *x_ptr = CDATA(x); float *y_ptr = MDATA(y); CUDNN_CALL(::cudnnPoolingForward( state_->cudnn.get(), pool_desc.get(), &alpha, x_desc.get(), x_ptr, &beta, y_desc.get(), y_ptr)); #else // PRIMITIV_USE_CUDNN static_cast<void>(x); static_cast<void>(window0); static_cast<void>(window1); static_cast<void>(padding0); static_cast<void>(padding1); static_cast<void>(stride0); static_cast<void>(stride1); static_cast<void>(y); PRIMITIV_THROW_NOT_IMPLEMENTED; #endif // PRIMITIV_USE_CUDNN } void CUDA::max_pool2d_bw_impl( const Tensor &x, const Tensor &y, const Tensor &gy, std::uint32_t window0, std::uint32_t window1, std::uint32_t padding0, std::uint32_t padding1, std::uint32_t stride0, std::uint32_t stride1, Tensor &gx) { #ifdef PRIMITIV_USE_CUDNN const Shape x_shape = x.shape(); const Shape y_shape = y.shape(); // Specifies a target device. CUDA_CALL(::hipSetDevice(dev_id_)); // Prepares descriptors. const cuda::CuDNNTensorDescriptor x_desc( x_shape.batch(), x_shape[2], x_shape[1], x_shape[0], ::CUDNN_DATA_FLOAT); const cuda::CuDNNTensorDescriptor y_desc( y_shape.batch(), y_shape[2], y_shape[1], y_shape[0], ::CUDNN_DATA_FLOAT); const cuda::CuDNNPoolingDescriptor pool_desc( CUDNN_POOLING_MAX, window1, window0, padding1, padding0, stride1, stride0); // Performs a backward operation. const float alpha = 1.f; const float beta = 1.f; const float *x_ptr = CDATA(x); const float *y_ptr = CDATA(y); const float *gy_ptr = CDATA(gy); float *gx_ptr = MDATA(gx); CUDNN_CALL(::cudnnPoolingBackward( state_->cudnn.get(), pool_desc.get(), &alpha, y_desc.get(), y_ptr, y_desc.get(), gy_ptr, x_desc.get(), x_ptr, &beta, x_desc.get(), gx_ptr)); #else // PRIMITIV_USE_CUDNN static_cast<void>(x); static_cast<void>(y); static_cast<void>(gy); static_cast<void>(window0); static_cast<void>(window1); static_cast<void>(padding0); static_cast<void>(padding1); static_cast<void>(stride0); static_cast<void>(stride1); static_cast<void>(gx); PRIMITIV_THROW_NOT_IMPLEMENTED; #endif // PRIMITIV_USE_CUDNN } } // namespace devices } // namespace primitiv
c5508d858d15c431da72085fd7b65fea26ae065f.cu
#include <primitiv/config.h> #include <primitiv/devices/cuda/device.h> #include <primitiv/devices/cuda/ops/common.h> #include <primitiv/internal/cuda/utils.h> namespace primitiv { namespace devices { void CUDA::max_pool2d_fw_impl( const Tensor &x, std::uint32_t window0, std::uint32_t window1, std::uint32_t padding0, std::uint32_t padding1, std::uint32_t stride0, std::uint32_t stride1, Tensor &y) { #ifdef PRIMITIV_USE_CUDNN const Shape x_shape = x.shape(); const Shape y_shape = y.shape(); // Specifies a target device. CUDA_CALL(::cudaSetDevice(dev_id_)); // Prepares descriptors. const cuda::CuDNNTensorDescriptor x_desc( x_shape.batch(), x_shape[2], x_shape[1], x_shape[0], ::CUDNN_DATA_FLOAT); const cuda::CuDNNTensorDescriptor y_desc( y_shape.batch(), y_shape[2], y_shape[1], y_shape[0], ::CUDNN_DATA_FLOAT); const cuda::CuDNNPoolingDescriptor pool_desc( CUDNN_POOLING_MAX, window1, window0, padding1, padding0, stride1, stride0); // Performs a forward operation. const float alpha = 1.f; const float beta = 0.f; const float *x_ptr = CDATA(x); float *y_ptr = MDATA(y); CUDNN_CALL(::cudnnPoolingForward( state_->cudnn.get(), pool_desc.get(), &alpha, x_desc.get(), x_ptr, &beta, y_desc.get(), y_ptr)); #else // PRIMITIV_USE_CUDNN static_cast<void>(x); static_cast<void>(window0); static_cast<void>(window1); static_cast<void>(padding0); static_cast<void>(padding1); static_cast<void>(stride0); static_cast<void>(stride1); static_cast<void>(y); PRIMITIV_THROW_NOT_IMPLEMENTED; #endif // PRIMITIV_USE_CUDNN } void CUDA::max_pool2d_bw_impl( const Tensor &x, const Tensor &y, const Tensor &gy, std::uint32_t window0, std::uint32_t window1, std::uint32_t padding0, std::uint32_t padding1, std::uint32_t stride0, std::uint32_t stride1, Tensor &gx) { #ifdef PRIMITIV_USE_CUDNN const Shape x_shape = x.shape(); const Shape y_shape = y.shape(); // Specifies a target device. CUDA_CALL(::cudaSetDevice(dev_id_)); // Prepares descriptors. const cuda::CuDNNTensorDescriptor x_desc( x_shape.batch(), x_shape[2], x_shape[1], x_shape[0], ::CUDNN_DATA_FLOAT); const cuda::CuDNNTensorDescriptor y_desc( y_shape.batch(), y_shape[2], y_shape[1], y_shape[0], ::CUDNN_DATA_FLOAT); const cuda::CuDNNPoolingDescriptor pool_desc( CUDNN_POOLING_MAX, window1, window0, padding1, padding0, stride1, stride0); // Performs a backward operation. const float alpha = 1.f; const float beta = 1.f; const float *x_ptr = CDATA(x); const float *y_ptr = CDATA(y); const float *gy_ptr = CDATA(gy); float *gx_ptr = MDATA(gx); CUDNN_CALL(::cudnnPoolingBackward( state_->cudnn.get(), pool_desc.get(), &alpha, y_desc.get(), y_ptr, y_desc.get(), gy_ptr, x_desc.get(), x_ptr, &beta, x_desc.get(), gx_ptr)); #else // PRIMITIV_USE_CUDNN static_cast<void>(x); static_cast<void>(y); static_cast<void>(gy); static_cast<void>(window0); static_cast<void>(window1); static_cast<void>(padding0); static_cast<void>(padding1); static_cast<void>(stride0); static_cast<void>(stride1); static_cast<void>(gx); PRIMITIV_THROW_NOT_IMPLEMENTED; #endif // PRIMITIV_USE_CUDNN } } // namespace devices } // namespace primitiv
eb44962c81bd6e7ed97f3f090bd7738a36d020b6.hip
// !!! This is a file automatically generated by hipify!!! #ifndef COMMON_CU #define COMMON_CU //#define DEBUG_SAVEN 0 // includes, project #include <cutil.h> #include "stdio.h" #include "stdlib.h" #define NUM_RECORDS_R (512*512*4*16) #define NUM_THREADS_SORT (512) #define NUM_BLOCKS_X_SORT (NUM_RECORDS_R/NUM_THREADS_SORT) #define NUM_BLOCKS_Y_SORT (1) #define REDUCE_SUM (0) #define REDUCE_MAX (1) #define REDUCE_MIN (2) #define REDUCE_AVERAGE (3) #define SPLIT (4) #define PARTITION (5) //#define FILTER_CONDITION (d_Rin[idx].y < 100000000) typedef int4 cmp_type_t; //2^n int TwoPowerN( int n ) { return (1<<n); } void gpuPrint(int *d_output, int numResults, char *notes) { #ifdef DEBUG_SAVEN printf("----------%s------------\n", notes); int result=0; int *h_output=(int *)malloc(sizeof(int)*numResults); CUDA_SAFE_CALL( hipMemcpy( h_output, d_output, numResults*sizeof(int) , hipMemcpyDeviceToHost) ); for(int i=0;i<numResults;i++) { printf("%d, ", h_output[i]); result+=h_output[i]; if(i%10==9) printf("\n"); } printf("#results in GPU: %f K, length, %d\n",(double)result, numResults); free(h_output); #endif } void gpuPrintInterval(int *d_output, int numResults, char *notes) { #ifdef DEBUG_SAVEN printf("----------%s------------\n", notes); int result=0; int *h_output=(int *)malloc(sizeof(int)*numResults); CUDA_SAFE_CALL( hipMemcpy( h_output, d_output, numResults*sizeof(int) , hipMemcpyDeviceToHost) ); int unitSize=256; int hist[20];//each is 50. int k=0; for(k=0;k<20;k++) hist[k]=0; int interval=0; for(int i=0;i<numResults;i=i+2) { interval=h_output[i+1]-h_output[i]; //printf("%d, ", interval); if(interval>1) result+=1; interval=interval/unitSize; if(interval>=20) interval=19; hist[interval]++; //if(i%10==8) printf("\n"); } printf("#results in GPU: %f K, length, %d\n",(double)result, numResults); for(k=0;k<20;k++) printf("%d, ", k*unitSize); printf("\n"); for(k=0;k<20;k++) printf("%d, ", hist[k]); printf("\n"); free(h_output); #endif } void gpuPrintInt2(Record *d_output, int numResults, char *notes) { //#ifdef DEBUG_SAVEN printf("----------%s------------\n", notes); int result=0; Record *h_output=(Record *)malloc(sizeof(Record)*numResults); CUDA_SAFE_CALL( hipMemcpy( h_output, d_output, numResults*sizeof(Record) , hipMemcpyDeviceToHost) ); for(int i=0;i<numResults;i++) { printf("[%d,%d], ", h_output[i].x, h_output[i].y); result+=h_output[i].y; if(i%10==9) printf("\n"); } printf("#results in GPU: %f K, length, %d\n",(double)result, numResults); free(h_output); //#endif } void gpuPrintFloat(float *d_output, int numResults, char *notes) { #ifdef DEBUG_SAVEN printf("----------%s------------\n", notes); float result=0; float *h_output=(float *)malloc(sizeof(float)*numResults); CUDA_SAFE_CALL( hipMemcpy( h_output, d_output, numResults*sizeof(float) , hipMemcpyDeviceToHost) ); for(int i=0;i<numResults;i++) { printf("%f, ", h_output[i]); result+=h_output[i]; if(i%10==9) printf("\n"); } printf("#results in GPU: %f K, length, %d\n",(double)result, numResults); free(h_output); #endif } void validateScan( int* input, int rLen, int* output ) { int* checkOutput = (int*)malloc( sizeof(int)*rLen ); checkOutput[0] = 0; for( int i = 1; i < rLen; i++ ) { checkOutput[i] = checkOutput[i - 1] + input[i - 1]; } bool pass = true; for( int i = 0; i < rLen; i++ ) { if( checkOutput[i] != output[i] ) { pass = false; printf( "!!!error\n" ); break; } } if( pass ) { printf( "scan test pass!\n" ); } else { printf( "scan test failed!\n" ); } } void validateProjection( Record* h_Rin, int rLen, Record* originalProjTable, Record* h_projTable, int pLen ) { Record* checkProjTable = (Record*)malloc( sizeof(Record)*pLen ); bool pass = true; unsigned int timer = 0; startTimer( &timer ); for( int i = 0; i < pLen; i++ ) { checkProjTable[i].x = originalProjTable[i].x; checkProjTable[i].y = h_Rin[originalProjTable[i].x].y; } endTimer( "cpu projection", &timer ); for( int i = 0; i < pLen; i++ ) { if( (h_projTable[i].x != checkProjTable[i].x) || (h_projTable[i].y != h_projTable[i].y) ) { pass = false; break; } } if( pass ) { printf( "projection test pass!\n" ); } else { printf( "!error, porjection test failed! \n" ); } } void validateAggAfterGroupBy( Record *Rin, int rLen, int* startPos, int numGroups, Record* Ragg, int* aggResults, int OPERATOR ) { bool pass = true; int* checkResult = (int*)malloc( sizeof(int)*numGroups ); int result; //int groupIdx = 0; Record* S = (Record*)malloc( sizeof(Record)*rLen ); for( int i = 0; i < rLen; i++ ) { S[i] = Ragg[Rin[i].x]; } //aggregation unsigned int timer = 0; startTimer( &timer ); int* endPos = (int*)malloc( sizeof(int)*numGroups ); for( int i = 0; i < numGroups - 1; i++ ) { endPos[i] = startPos[i + 1]; } endPos[numGroups - 1] = rLen; for( int i = 0; i < numGroups; i++ ) { if( OPERATOR == REDUCE_MAX ) { result = 0; for( int j = startPos[i]; j < endPos[i]; j++ ) { if( S[j].y > result ) { result = S[j].y; } } checkResult[i] = result; } else if( OPERATOR == REDUCE_MIN ) { result = TEST_MAX; for( int j = startPos[i]; j < endPos[i]; j++ ) { if( S[j].y < result ) { result = S[j].y; } } checkResult[i] = result; } else if( OPERATOR == REDUCE_SUM ) { result = 0; for( int j = startPos[i]; j < endPos[i]; j++ ) { result += S[j].y; } checkResult[i] = result; } else if( OPERATOR == REDUCE_AVERAGE ) { result = 0; for( int j = startPos[i]; j < endPos[i]; j++ ) { result += S[j].y; } checkResult[i] = result/(endPos[i] - startPos[i]); } } endTimer( "cpu aggregration after group by", &timer ); for( int i = 0; i < numGroups; i++ ) { if( checkResult[i] != aggResults[i] ) { printf( "Aggregrate test failed!\n" ); pass = false; break; } } if( pass == true ) { printf( "Test Passed!\n" ); } free( S ); } void validateGroupBy( Record* h_Rin, int rLen, Record* h_Rout, int* h_startPos, int numGroup ) { bool pass = true; qsort(h_Rin,rLen,sizeof(Record),compare); //test sort for( int i = 0; i < rLen; i++ ) { if( (h_Rin[i].y != h_Rout[i].y) ) { pass = false; printf( "sort error!\n" ); } break; } //test group int count = 1; for( int i = 0; i < rLen - 1; i++ ) { if( h_Rin[i].y != h_Rin[i+1].y ) { count++; } } if( count != numGroup ) { pass = false; printf( "count error! GPU, %d, CPU, %d\n", numGroup, count ); } int* startPos = (int*)malloc( sizeof(int)*count ); int j = 1; for( int i = 0; i < rLen - 1; i++ ) { if( h_Rin[i].y != h_Rin[i+1].y ) { startPos[j] = i + 1; j++; } } startPos[0] = 0; for( int idx = 0; idx < count; idx++ ) { if( h_startPos[idx] != startPos[idx] ) { pass = false; printf( "start position error!, GPU position: %d, CPU position: %d\n", h_startPos[idx], startPos[idx] ); break; } } if( pass == true ) { printf( "GroupBy Test passed!\n" ); } else { printf( "GroupBy Test failed!\n" ); } } void validateFilter( Record* d_Rin, int beginPos, int rLen, Record* Rout, int outSize, int smallKey, int largeKey) { bool passed = true; unsigned int timer = 0; startTimer( &timer ); int count = 0; for( int i = 0; i < rLen; i++ ) { //the filter condition int idx = beginPos + i; if( ( d_Rin[idx].y >= smallKey ) && ( d_Rin[idx].y <= largeKey ) ) { count++; } } if( count != outSize ) { printf( "!!!filter error: the number error, GPU, %d, CPU, %d\n", outSize, count ); passed = false; exit(0); } Record* v_Rout = (Record*)malloc( sizeof(Record)*outSize ); int j = 0; for( int i = 0; i < rLen; i++ ) { //the filter condition int idx = beginPos + i; if( ( d_Rin[idx].y >= smallKey ) && ( d_Rin[idx].y <= largeKey ) ) { v_Rout[j] = d_Rin[beginPos+i]; j++; } } endTimer( "cpu timer", &timer ); for( int i = 0; i < outSize; i++ ) { if( (v_Rout[i].x != Rout[i].x) || (v_Rout[i].y != Rout[i].y) ) { printf( "!!! filter error\n" ); passed = false; exit(0); } } if( passed ) { printf( "filter passed\n" ); } } void validateReduce( Record* R, int rLen, unsigned int gpuResult, int OPERATOR ) { unsigned int timer = 0; if( OPERATOR == REDUCE_SUM ) { unsigned int cpuSum = 0; startTimer( &timer ); for( int i = 0; i < rLen; i++ ) { cpuSum += R[i].y; } endTimer( "cpu sum", &timer ); if( gpuResult == cpuSum ) { printf( "Test Passed: gpuSum = %d, cpuSum = %d\n", gpuResult, cpuSum ); } else { printf( "!!!Test Failed: gpuSum = %d, cpuSum = %d\n", gpuResult, cpuSum ); } } else if ( OPERATOR == REDUCE_AVERAGE ) { unsigned int cpuAvg = 0; startTimer( &timer ); for( int i = 0; i < rLen; i++ ) { cpuAvg += R[i].y; } cpuAvg = cpuAvg/rLen; endTimer( "cpu sum", &timer ); if( gpuResult == cpuAvg ) { printf( "Test Passed: gpuAvg = %d, cpuAvg = %d\n", gpuResult, cpuAvg ); } else { printf( "!!!Test Failed: gpuAvg = %d, cpuAvg = %d\n", gpuResult, cpuAvg ); } } else if( OPERATOR == REDUCE_MAX ) { int cpuMax = R[0].y; startTimer( &timer ); for( int i = 1; i < rLen; i++ ) { if( R[i].y > cpuMax ) { cpuMax = R[i].y; } } endTimer( "cpu max", &timer ); if( gpuResult == cpuMax ) { printf( "Test Passed: gpuMax = %d, cpuMax = %d\n", gpuResult, cpuMax ); } else { printf( "!!!Test Failed: gpuMax = %d, cpuMax = %d\n", gpuResult, cpuMax ); } } else if( OPERATOR == REDUCE_MIN ) { int cpuMin = R[0].y; startTimer( &timer ); for( int i = 1; i < rLen; i++ ) { if( R[i].y < cpuMin ) { cpuMin = R[i].y; } } endTimer( "cpu min", &timer ); if( gpuResult == cpuMin ) { printf( "Test Passed: gpuMin = %d, cpuMin = %d\n", gpuResult, cpuMin ); } else { printf( "!!!Test Failed: gpuMin = %d, cpuMin = %d\n", gpuResult, cpuMin ); } } } void validateSort(Record *R, int rLen) { int i=0; bool passed=true; for(i=1;i<rLen;i++) { if(R[i].y<R[i-1].y) { printf("!!!error in sorting: %d, %d, %d, %d\n", i-1, R[i-1].y, i,R[i].y); passed=false; return; } } if(passed) printf("sorting passed\n"); } void gpuValidateSort(Record *d_R, int rLen) { int i=0; Record *R; CPUMALLOC((void**)&R, rLen*sizeof(Record)); FROMGPU(R, d_R, rLen*sizeof(Record)); bool passed=true; for(i=1;i<rLen;i++) { if(R[i].y<R[i-1].y) { printf("!!!!!!!! error in sorting: %d, %d, %d, %d\n", i-1, R[i-1].y, i,R[i].y); passed=false; return; } } if(passed) printf("sorting passed\n"); CPUFREE(R); } void validateSplit(Record *R, int rLen, int numPart) { int i=0; bool passed=true; for(i=1;i<rLen;i++) { if((R[i].y%numPart)<(R[i-1].y%numPart)) { printf("error in partition: %d, %d, %d, %d\n", i-1, R[i-1].y, i,R[i].y); passed=false; break; } } if(passed) printf("\npartition passed\n"); } unsigned int cpu_RSHash(int value, int mask) { unsigned int b=378551; unsigned int a=63689; unsigned int hash=0; int i=0; for(i=0;i<4;i++) { hash=hash*a+(value>>(24-(i<<3))); a*=b; } return (hash & mask); } void validatePartition( Record* R, int rLen, int numPart ) { bool pass = true; for( int i = 1; i < rLen; i++ ) { if( cpu_RSHash( R[i].y, numPart - 1 ) < cpu_RSHash( R[i - 1].y, numPart - 1 ) ) { printf("error in partition: %d, %d, %d, %d\n", i-1, R[i-1].y, i,R[i].y); pass = false; break; } } if( pass ) { printf( "partition test pass! \n" ); } } int get2N( int rLen ) { unsigned int numRecordsR = 0; unsigned int size = rLen; unsigned int level = 0; while( size != 1 ) { size = size/2; level++; } if( (1<<level) < rLen ) { level++; } numRecordsR = (1<<level); return numRecordsR; } bool is2n(unsigned int i) { if(i==0) return false; else return (i&(i-1))==0; } unsigned int g_timerArray[10]; unsigned long g_totalArray[10]={0}; void array_startTime(int i) { CUT_SAFE_CALL( cutCreateTimer( &(g_timerArray[i]))); CUT_SAFE_CALL( cutStartTimer( (g_timerArray[i]))); } void array_endTime(char *info,int i) { CUT_SAFE_CALL( cutStopTimer( g_timerArray[i])); g_totalArray[i]+=(long)cutGetTimerValue( g_timerArray[i]); printf( "%s (ms), %f, total, %d\n", info, cutGetTimerValue( g_timerArray[i]), g_totalArray[i]); CUT_SAFE_CALL( cutDeleteTimer( g_timerArray[i])); } #endif
eb44962c81bd6e7ed97f3f090bd7738a36d020b6.cu
#ifndef COMMON_CU #define COMMON_CU //#define DEBUG_SAVEN 0 // includes, project #include <cutil.h> #include "stdio.h" #include "stdlib.h" #define NUM_RECORDS_R (512*512*4*16) #define NUM_THREADS_SORT (512) #define NUM_BLOCKS_X_SORT (NUM_RECORDS_R/NUM_THREADS_SORT) #define NUM_BLOCKS_Y_SORT (1) #define REDUCE_SUM (0) #define REDUCE_MAX (1) #define REDUCE_MIN (2) #define REDUCE_AVERAGE (3) #define SPLIT (4) #define PARTITION (5) //#define FILTER_CONDITION (d_Rin[idx].y < 100000000) typedef int4 cmp_type_t; //2^n int TwoPowerN( int n ) { return (1<<n); } void gpuPrint(int *d_output, int numResults, char *notes) { #ifdef DEBUG_SAVEN printf("----------%s------------\n", notes); int result=0; int *h_output=(int *)malloc(sizeof(int)*numResults); CUDA_SAFE_CALL( cudaMemcpy( h_output, d_output, numResults*sizeof(int) , cudaMemcpyDeviceToHost) ); for(int i=0;i<numResults;i++) { printf("%d, ", h_output[i]); result+=h_output[i]; if(i%10==9) printf("\n"); } printf("#results in GPU: %f K, length, %d\n",(double)result, numResults); free(h_output); #endif } void gpuPrintInterval(int *d_output, int numResults, char *notes) { #ifdef DEBUG_SAVEN printf("----------%s------------\n", notes); int result=0; int *h_output=(int *)malloc(sizeof(int)*numResults); CUDA_SAFE_CALL( cudaMemcpy( h_output, d_output, numResults*sizeof(int) , cudaMemcpyDeviceToHost) ); int unitSize=256; int hist[20];//each is 50. int k=0; for(k=0;k<20;k++) hist[k]=0; int interval=0; for(int i=0;i<numResults;i=i+2) { interval=h_output[i+1]-h_output[i]; //printf("%d, ", interval); if(interval>1) result+=1; interval=interval/unitSize; if(interval>=20) interval=19; hist[interval]++; //if(i%10==8) printf("\n"); } printf("#results in GPU: %f K, length, %d\n",(double)result, numResults); for(k=0;k<20;k++) printf("%d, ", k*unitSize); printf("\n"); for(k=0;k<20;k++) printf("%d, ", hist[k]); printf("\n"); free(h_output); #endif } void gpuPrintInt2(Record *d_output, int numResults, char *notes) { //#ifdef DEBUG_SAVEN printf("----------%s------------\n", notes); int result=0; Record *h_output=(Record *)malloc(sizeof(Record)*numResults); CUDA_SAFE_CALL( cudaMemcpy( h_output, d_output, numResults*sizeof(Record) , cudaMemcpyDeviceToHost) ); for(int i=0;i<numResults;i++) { printf("[%d,%d], ", h_output[i].x, h_output[i].y); result+=h_output[i].y; if(i%10==9) printf("\n"); } printf("#results in GPU: %f K, length, %d\n",(double)result, numResults); free(h_output); //#endif } void gpuPrintFloat(float *d_output, int numResults, char *notes) { #ifdef DEBUG_SAVEN printf("----------%s------------\n", notes); float result=0; float *h_output=(float *)malloc(sizeof(float)*numResults); CUDA_SAFE_CALL( cudaMemcpy( h_output, d_output, numResults*sizeof(float) , cudaMemcpyDeviceToHost) ); for(int i=0;i<numResults;i++) { printf("%f, ", h_output[i]); result+=h_output[i]; if(i%10==9) printf("\n"); } printf("#results in GPU: %f K, length, %d\n",(double)result, numResults); free(h_output); #endif } void validateScan( int* input, int rLen, int* output ) { int* checkOutput = (int*)malloc( sizeof(int)*rLen ); checkOutput[0] = 0; for( int i = 1; i < rLen; i++ ) { checkOutput[i] = checkOutput[i - 1] + input[i - 1]; } bool pass = true; for( int i = 0; i < rLen; i++ ) { if( checkOutput[i] != output[i] ) { pass = false; printf( "!!!error\n" ); break; } } if( pass ) { printf( "scan test pass!\n" ); } else { printf( "scan test failed!\n" ); } } void validateProjection( Record* h_Rin, int rLen, Record* originalProjTable, Record* h_projTable, int pLen ) { Record* checkProjTable = (Record*)malloc( sizeof(Record)*pLen ); bool pass = true; unsigned int timer = 0; startTimer( &timer ); for( int i = 0; i < pLen; i++ ) { checkProjTable[i].x = originalProjTable[i].x; checkProjTable[i].y = h_Rin[originalProjTable[i].x].y; } endTimer( "cpu projection", &timer ); for( int i = 0; i < pLen; i++ ) { if( (h_projTable[i].x != checkProjTable[i].x) || (h_projTable[i].y != h_projTable[i].y) ) { pass = false; break; } } if( pass ) { printf( "projection test pass!\n" ); } else { printf( "!error, porjection test failed! \n" ); } } void validateAggAfterGroupBy( Record *Rin, int rLen, int* startPos, int numGroups, Record* Ragg, int* aggResults, int OPERATOR ) { bool pass = true; int* checkResult = (int*)malloc( sizeof(int)*numGroups ); int result; //int groupIdx = 0; Record* S = (Record*)malloc( sizeof(Record)*rLen ); for( int i = 0; i < rLen; i++ ) { S[i] = Ragg[Rin[i].x]; } //aggregation unsigned int timer = 0; startTimer( &timer ); int* endPos = (int*)malloc( sizeof(int)*numGroups ); for( int i = 0; i < numGroups - 1; i++ ) { endPos[i] = startPos[i + 1]; } endPos[numGroups - 1] = rLen; for( int i = 0; i < numGroups; i++ ) { if( OPERATOR == REDUCE_MAX ) { result = 0; for( int j = startPos[i]; j < endPos[i]; j++ ) { if( S[j].y > result ) { result = S[j].y; } } checkResult[i] = result; } else if( OPERATOR == REDUCE_MIN ) { result = TEST_MAX; for( int j = startPos[i]; j < endPos[i]; j++ ) { if( S[j].y < result ) { result = S[j].y; } } checkResult[i] = result; } else if( OPERATOR == REDUCE_SUM ) { result = 0; for( int j = startPos[i]; j < endPos[i]; j++ ) { result += S[j].y; } checkResult[i] = result; } else if( OPERATOR == REDUCE_AVERAGE ) { result = 0; for( int j = startPos[i]; j < endPos[i]; j++ ) { result += S[j].y; } checkResult[i] = result/(endPos[i] - startPos[i]); } } endTimer( "cpu aggregration after group by", &timer ); for( int i = 0; i < numGroups; i++ ) { if( checkResult[i] != aggResults[i] ) { printf( "Aggregrate test failed!\n" ); pass = false; break; } } if( pass == true ) { printf( "Test Passed!\n" ); } free( S ); } void validateGroupBy( Record* h_Rin, int rLen, Record* h_Rout, int* h_startPos, int numGroup ) { bool pass = true; qsort(h_Rin,rLen,sizeof(Record),compare); //test sort for( int i = 0; i < rLen; i++ ) { if( (h_Rin[i].y != h_Rout[i].y) ) { pass = false; printf( "sort error!\n" ); } break; } //test group int count = 1; for( int i = 0; i < rLen - 1; i++ ) { if( h_Rin[i].y != h_Rin[i+1].y ) { count++; } } if( count != numGroup ) { pass = false; printf( "count error! GPU, %d, CPU, %d\n", numGroup, count ); } int* startPos = (int*)malloc( sizeof(int)*count ); int j = 1; for( int i = 0; i < rLen - 1; i++ ) { if( h_Rin[i].y != h_Rin[i+1].y ) { startPos[j] = i + 1; j++; } } startPos[0] = 0; for( int idx = 0; idx < count; idx++ ) { if( h_startPos[idx] != startPos[idx] ) { pass = false; printf( "start position error!, GPU position: %d, CPU position: %d\n", h_startPos[idx], startPos[idx] ); break; } } if( pass == true ) { printf( "GroupBy Test passed!\n" ); } else { printf( "GroupBy Test failed!\n" ); } } void validateFilter( Record* d_Rin, int beginPos, int rLen, Record* Rout, int outSize, int smallKey, int largeKey) { bool passed = true; unsigned int timer = 0; startTimer( &timer ); int count = 0; for( int i = 0; i < rLen; i++ ) { //the filter condition int idx = beginPos + i; if( ( d_Rin[idx].y >= smallKey ) && ( d_Rin[idx].y <= largeKey ) ) { count++; } } if( count != outSize ) { printf( "!!!filter error: the number error, GPU, %d, CPU, %d\n", outSize, count ); passed = false; exit(0); } Record* v_Rout = (Record*)malloc( sizeof(Record)*outSize ); int j = 0; for( int i = 0; i < rLen; i++ ) { //the filter condition int idx = beginPos + i; if( ( d_Rin[idx].y >= smallKey ) && ( d_Rin[idx].y <= largeKey ) ) { v_Rout[j] = d_Rin[beginPos+i]; j++; } } endTimer( "cpu timer", &timer ); for( int i = 0; i < outSize; i++ ) { if( (v_Rout[i].x != Rout[i].x) || (v_Rout[i].y != Rout[i].y) ) { printf( "!!! filter error\n" ); passed = false; exit(0); } } if( passed ) { printf( "filter passed\n" ); } } void validateReduce( Record* R, int rLen, unsigned int gpuResult, int OPERATOR ) { unsigned int timer = 0; if( OPERATOR == REDUCE_SUM ) { unsigned int cpuSum = 0; startTimer( &timer ); for( int i = 0; i < rLen; i++ ) { cpuSum += R[i].y; } endTimer( "cpu sum", &timer ); if( gpuResult == cpuSum ) { printf( "Test Passed: gpuSum = %d, cpuSum = %d\n", gpuResult, cpuSum ); } else { printf( "!!!Test Failed: gpuSum = %d, cpuSum = %d\n", gpuResult, cpuSum ); } } else if ( OPERATOR == REDUCE_AVERAGE ) { unsigned int cpuAvg = 0; startTimer( &timer ); for( int i = 0; i < rLen; i++ ) { cpuAvg += R[i].y; } cpuAvg = cpuAvg/rLen; endTimer( "cpu sum", &timer ); if( gpuResult == cpuAvg ) { printf( "Test Passed: gpuAvg = %d, cpuAvg = %d\n", gpuResult, cpuAvg ); } else { printf( "!!!Test Failed: gpuAvg = %d, cpuAvg = %d\n", gpuResult, cpuAvg ); } } else if( OPERATOR == REDUCE_MAX ) { int cpuMax = R[0].y; startTimer( &timer ); for( int i = 1; i < rLen; i++ ) { if( R[i].y > cpuMax ) { cpuMax = R[i].y; } } endTimer( "cpu max", &timer ); if( gpuResult == cpuMax ) { printf( "Test Passed: gpuMax = %d, cpuMax = %d\n", gpuResult, cpuMax ); } else { printf( "!!!Test Failed: gpuMax = %d, cpuMax = %d\n", gpuResult, cpuMax ); } } else if( OPERATOR == REDUCE_MIN ) { int cpuMin = R[0].y; startTimer( &timer ); for( int i = 1; i < rLen; i++ ) { if( R[i].y < cpuMin ) { cpuMin = R[i].y; } } endTimer( "cpu min", &timer ); if( gpuResult == cpuMin ) { printf( "Test Passed: gpuMin = %d, cpuMin = %d\n", gpuResult, cpuMin ); } else { printf( "!!!Test Failed: gpuMin = %d, cpuMin = %d\n", gpuResult, cpuMin ); } } } void validateSort(Record *R, int rLen) { int i=0; bool passed=true; for(i=1;i<rLen;i++) { if(R[i].y<R[i-1].y) { printf("!!!error in sorting: %d, %d, %d, %d\n", i-1, R[i-1].y, i,R[i].y); passed=false; return; } } if(passed) printf("sorting passed\n"); } void gpuValidateSort(Record *d_R, int rLen) { int i=0; Record *R; CPUMALLOC((void**)&R, rLen*sizeof(Record)); FROMGPU(R, d_R, rLen*sizeof(Record)); bool passed=true; for(i=1;i<rLen;i++) { if(R[i].y<R[i-1].y) { printf("!!!!!!!! error in sorting: %d, %d, %d, %d\n", i-1, R[i-1].y, i,R[i].y); passed=false; return; } } if(passed) printf("sorting passed\n"); CPUFREE(R); } void validateSplit(Record *R, int rLen, int numPart) { int i=0; bool passed=true; for(i=1;i<rLen;i++) { if((R[i].y%numPart)<(R[i-1].y%numPart)) { printf("error in partition: %d, %d, %d, %d\n", i-1, R[i-1].y, i,R[i].y); passed=false; break; } } if(passed) printf("\npartition passed\n"); } unsigned int cpu_RSHash(int value, int mask) { unsigned int b=378551; unsigned int a=63689; unsigned int hash=0; int i=0; for(i=0;i<4;i++) { hash=hash*a+(value>>(24-(i<<3))); a*=b; } return (hash & mask); } void validatePartition( Record* R, int rLen, int numPart ) { bool pass = true; for( int i = 1; i < rLen; i++ ) { if( cpu_RSHash( R[i].y, numPart - 1 ) < cpu_RSHash( R[i - 1].y, numPart - 1 ) ) { printf("error in partition: %d, %d, %d, %d\n", i-1, R[i-1].y, i,R[i].y); pass = false; break; } } if( pass ) { printf( "partition test pass! \n" ); } } int get2N( int rLen ) { unsigned int numRecordsR = 0; unsigned int size = rLen; unsigned int level = 0; while( size != 1 ) { size = size/2; level++; } if( (1<<level) < rLen ) { level++; } numRecordsR = (1<<level); return numRecordsR; } bool is2n(unsigned int i) { if(i==0) return false; else return (i&(i-1))==0; } unsigned int g_timerArray[10]; unsigned long g_totalArray[10]={0}; void array_startTime(int i) { CUT_SAFE_CALL( cutCreateTimer( &(g_timerArray[i]))); CUT_SAFE_CALL( cutStartTimer( (g_timerArray[i]))); } void array_endTime(char *info,int i) { CUT_SAFE_CALL( cutStopTimer( g_timerArray[i])); g_totalArray[i]+=(long)cutGetTimerValue( g_timerArray[i]); printf( "%s (ms), %f, total, %d\n", info, cutGetTimerValue( g_timerArray[i]), g_totalArray[i]); CUT_SAFE_CALL( cutDeleteTimer( g_timerArray[i])); } #endif
d3c423217787f82f3cab06e582fa77fc5deb04b3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "ATen/ATen.h" #include "ATen/native/GridSampler.h" #include "ATen/hip/HIPContext.h" #include "ATen/hip/HIPApplyUtils.cuh" #include "ATen/hip/detail/TensorInfo.cuh" #include "ATen/hip/detail/IndexUtils.cuh" #include "ATen/hip/detail/KernelUtils.h" namespace at { namespace native { using namespace at::cuda::detail; using at::native::detail::GridSamplerInterpolation; using at::native::detail::GridSamplerPadding; namespace { static __forceinline__ __device__ float clip_coordinates(float in, int clip_limit) { return ::min(static_cast<float>(clip_limit - 1), ::max(in, 0.f)); } // clip_coordinates_set_grad works similarly to clip_coordinates except that // it also returns the `d output / d input` via pointer argument `grad_in`. // This is useful in the backward pass of grid_sampler. template <typename scalar_t> static __forceinline__ __device__ float clip_coordinates_set_grad(float in, int clip_limit, scalar_t *grad_in) { if (in < 0.f) { *grad_in = static_cast<scalar_t>(0); return 0.f; } else { float max = static_cast<float>(clip_limit - 1); if (in > max) { *grad_in = static_cast<scalar_t>(0); return max; } else { *grad_in = static_cast<scalar_t>(1); return in; } } } static __forceinline__ __device__ float reflect_coordinates(float in, int clip_limit) { if (clip_limit == static_cast<int>(1)) { return 0.f; } in = ::fabs(in); float max = static_cast<float>(clip_limit - 1); // `fmod` returns same sign as `in`, which is positive after the `fabs` above. float extra = ::fmod(in, max); int flips = static_cast<int>(::floor(in / max)); if (flips % 2 == 0) { return extra; } else { return max - extra; } } // reflect_coordinates_set_grad works similarly to reflect_coordinates except // that it also returns the `d output / d input` via pointer argument // `grad_in`. // This is useful in the backward pass of grid_sampler. template <typename scalar_t> static __forceinline__ __device__ float reflect_coordinates_set_grad(float in, int clip_limit, scalar_t *grad_in) { if (clip_limit == static_cast<int>(1)) { *grad_in = static_cast<scalar_t>(0); return 0.f; } int grad_in_mult_; if (in < 0.f) { grad_in_mult_ = -1; in = -in; } else { grad_in_mult_ = 1; } float max = static_cast<float>(clip_limit - 1); // `fmod` returns same sign as `in`, which is positive after the `if` above. float extra = ::fmod(in, max); int flips = static_cast<int>(::floor(in / max)); if (flips % 2 == 0) { *grad_in = static_cast<scalar_t>(grad_in_mult_); return extra; } else { *grad_in = static_cast<scalar_t>(-grad_in_mult_); return max - extra; } } static __forceinline__ __device__ bool within_bounds_2d(int h, int w, int H, int W) { return h >= 0 && h < H && w >= 0 && w < W; } static __forceinline__ __device__ bool within_bounds_3d(int d, int h, int w, int D, int H, int W) { return d >= 0 && d < D && h >= 0 && h < H && w >= 0 && w < W; } template<typename scalar_t> static __forceinline__ __device__ void safe_add_2d(scalar_t *data, int h, int w, int sH, int sW, int H, int W, scalar_t delta) { if (within_bounds_2d(h, w, H, W)) { atomicAdd(data + h * sH + w * sW, delta); } } template<typename scalar_t> static __forceinline__ __device__ void safe_add_3d(scalar_t *data, int d, int h, int w, int sD, int sH, int sW, int D, int H, int W, scalar_t delta) { if (within_bounds_3d(d, h, w, D, H, W)) { atomicAdd(data + d * sD + h * sH + w * sW, delta); } } template <typename scalar_t> __launch_bounds__(1024) __global__ void grid_sampler_2d_kernel( const int nthreads, TensorInfo<scalar_t, int> input, TensorInfo<scalar_t, int> grid, TensorInfo<scalar_t, int> output, const GridSamplerInterpolation interpolation_mode, const GridSamplerPadding padding_mode) { int C = input.sizes[1]; int inp_H = input.sizes[2]; int inp_W = input.sizes[3]; int out_H = grid.sizes[1]; int out_W = grid.sizes[2]; int inp_sN = input.strides[0]; int inp_sC = input.strides[1]; int inp_sH = input.strides[2]; int inp_sW = input.strides[3]; int grid_sN = grid.strides[0]; int grid_sH = grid.strides[1]; int grid_sW = grid.strides[2]; int grid_sCoor = grid.strides[3]; int out_sN = output.strides[0]; int out_sC = output.strides[1]; int out_sH = output.strides[2]; int out_sW = output.strides[3]; CUDA_KERNEL_LOOP(index, nthreads) { const int w = index % out_W; const int h = (index / out_W) % out_H; const int n = index / (out_H * out_W); const int grid_offset = n * grid_sN + h * grid_sH + w * grid_sW; // get the corresponding input x, y co-ordinates from grid scalar_t ix = grid.data[grid_offset]; scalar_t iy = grid.data[grid_offset + grid_sCoor]; // normalize ix, iy from [-1, 1] to [0, IH-1] & [0, IW-1] float ixf = ((ix + 1.f) / 2) * (inp_W - 1); float iyf = ((iy + 1.f) / 2) * (inp_H - 1); if (padding_mode == GridSamplerPadding::Border) { // clip coordinates to image borders ixf = clip_coordinates(ixf, inp_W); iyf = clip_coordinates(iyf, inp_H); } else if (padding_mode == GridSamplerPadding::Reflection) { // reflect coordinates by image borders ixf = reflect_coordinates(ixf, inp_W); iyf = reflect_coordinates(iyf, inp_H); } ix = static_cast<scalar_t>(ixf); iy = static_cast<scalar_t>(iyf); if (interpolation_mode == GridSamplerInterpolation::Bilinear) { // get NE, NW, SE, SW pixel values from (x, y) int ix_nw = static_cast<int>(::floor(ixf)); int iy_nw = static_cast<int>(::floor(iyf)); int ix_ne = ix_nw + 1; int iy_ne = iy_nw; int ix_sw = ix_nw; int iy_sw = iy_nw + 1; int ix_se = ix_nw + 1; int iy_se = iy_nw + 1; // get surfaces to each neighbor: scalar_t nw = (ix_se - ix) * (iy_se - iy); scalar_t ne = (ix - ix_sw) * (iy_sw - iy); scalar_t sw = (ix_ne - ix) * (iy - iy_ne); scalar_t se = (ix - ix_nw) * (iy - iy_nw); // calculate bilinear weighted pixel value and set output pixel auto inp_ptr_NC = input.data + n * inp_sN; auto out_ptr_NCHW = output.data + n * out_sN + h * out_sH + w * out_sW; for (int c = 0; c < C; ++c, inp_ptr_NC += inp_sC, out_ptr_NCHW += out_sC) { *out_ptr_NCHW = static_cast<scalar_t>(0); if (within_bounds_2d(iy_nw, ix_nw, inp_H, inp_W)) { *out_ptr_NCHW += inp_ptr_NC[iy_nw * inp_sH + ix_nw * inp_sW] * nw; } if (within_bounds_2d(iy_ne, ix_ne, inp_H, inp_W)) { *out_ptr_NCHW += inp_ptr_NC[iy_ne * inp_sH + ix_ne * inp_sW] * ne; } if (within_bounds_2d(iy_sw, ix_sw, inp_H, inp_W)) { *out_ptr_NCHW += inp_ptr_NC[iy_sw * inp_sH + ix_sw * inp_sW] * sw; } if (within_bounds_2d(iy_se, ix_se, inp_H, inp_W)) { *out_ptr_NCHW += inp_ptr_NC[iy_se * inp_sH + ix_se * inp_sW] * se; } } } else if (interpolation_mode == GridSamplerInterpolation::Nearest) { int ix_nearest = static_cast<int>(::round(ixf)); int iy_nearest = static_cast<int>(::round(iyf)); // assign nearest neighor pixel value to output pixel auto inp_ptr_NC = input.data + n * inp_sN; auto out_ptr_NCHW = output.data + n * out_sN + h * out_sH + w * out_sW; for (int c = 0; c < C; ++c, inp_ptr_NC += inp_sC, out_ptr_NCHW += out_sC) { if (within_bounds_2d(iy_nearest, ix_nearest, inp_H, inp_W)) { *out_ptr_NCHW = inp_ptr_NC[iy_nearest * inp_sH + ix_nearest * inp_sW]; } else { *out_ptr_NCHW = static_cast<scalar_t>(0); } } } } } template <typename scalar_t> __launch_bounds__(1024) __global__ void grid_sampler_3d_kernel( const int nthreads, TensorInfo<scalar_t, int> input, TensorInfo<scalar_t, int> grid, TensorInfo<scalar_t, int> output, const GridSamplerInterpolation interpolation_mode, const GridSamplerPadding padding_mode) { int C = input.sizes[1]; int inp_D = input.sizes[2]; int inp_H = input.sizes[3]; int inp_W = input.sizes[4]; int out_D = grid.sizes[1]; int out_H = grid.sizes[2]; int out_W = grid.sizes[3]; int inp_sN = input.strides[0]; int inp_sC = input.strides[1]; int inp_sD = input.strides[2]; int inp_sH = input.strides[3]; int inp_sW = input.strides[4]; int grid_sN = grid.strides[0]; int grid_sD = grid.strides[1]; int grid_sH = grid.strides[2]; int grid_sW = grid.strides[3]; int grid_sCoor = grid.strides[4]; int out_sN = output.strides[0]; int out_sC = output.strides[1]; int out_sD = output.strides[2]; int out_sH = output.strides[3]; int out_sW = output.strides[4]; CUDA_KERNEL_LOOP(index, nthreads) { const int w = index % out_W; const int h = (index / out_W) % out_H; const int d = (index / (out_H * out_W)) % out_D; const int n = index / (out_D * out_H * out_W); const int grid_offset = n * grid_sN + d * grid_sD + h * grid_sH + w * grid_sW; // get the corresponding input x, y, z co-ordinates from grid scalar_t ix = grid.data[grid_offset]; scalar_t iy = grid.data[grid_offset + grid_sCoor]; scalar_t iz = grid.data[grid_offset + 2 * grid_sCoor]; // normalize ix, iy, iz from [-1, 1] to [0, inp_W-1] & [0, inp_H-1] & [0, inp_D-1] float ixf = ((ix + 1.f) / 2) * (inp_W - 1); float iyf = ((iy + 1.f) / 2) * (inp_H - 1); float izf = ((iz + 1.f) / 2) * (inp_D - 1); if (padding_mode == GridSamplerPadding::Border) { // clip coordinates to image borders ixf = clip_coordinates(ixf, inp_W); iyf = clip_coordinates(iyf, inp_H); izf = clip_coordinates(izf, inp_D); } else if (padding_mode == GridSamplerPadding::Reflection) { // reflect coordinates by image borders ixf = reflect_coordinates(ixf, inp_W); iyf = reflect_coordinates(iyf, inp_H); izf = reflect_coordinates(izf, inp_D); } if (interpolation_mode == GridSamplerInterpolation::Bilinear) { ix = static_cast<scalar_t>(ixf); iy = static_cast<scalar_t>(iyf); iz = static_cast<scalar_t>(izf); // get corner pixel values from (x, y, z) // for 4d, we used north-east-south-west // for 5d, we add top-bottom int ix_tnw = static_cast<int>(::floor(ix)); int iy_tnw = static_cast<int>(::floor(iy)); int iz_tnw = static_cast<int>(::floor(iz)); int ix_tne = ix_tnw + 1; int iy_tne = iy_tnw; int iz_tne = iz_tnw; int ix_tsw = ix_tnw; int iy_tsw = iy_tnw + 1; int iz_tsw = iz_tnw; int ix_tse = ix_tnw + 1; int iy_tse = iy_tnw + 1; int iz_tse = iz_tnw; int ix_bnw = ix_tnw; int iy_bnw = iy_tnw; int iz_bnw = iz_tnw + 1; int ix_bne = ix_tnw + 1; int iy_bne = iy_tnw; int iz_bne = iz_tnw + 1; int ix_bsw = ix_tnw; int iy_bsw = iy_tnw + 1; int iz_bsw = iz_tnw + 1; int ix_bse = ix_tnw + 1; int iy_bse = iy_tnw + 1; int iz_bse = iz_tnw + 1; // get surfaces to each neighbor: scalar_t tnw = (ix_bse - ix) * (iy_bse - iy) * (iz_bse - iz); scalar_t tne = (ix - ix_bsw) * (iy_bsw - iy) * (iz_bsw - iz); scalar_t tsw = (ix_bne - ix) * (iy - iy_bne) * (iz_bne - iz); scalar_t tse = (ix - ix_bnw) * (iy - iy_bnw) * (iz_bnw - iz); scalar_t bnw = (ix_tse - ix) * (iy_tse - iy) * (iz - iz_tse); scalar_t bne = (ix - ix_tsw) * (iy_tsw - iy) * (iz - iz_tsw); scalar_t bsw = (ix_tne - ix) * (iy - iy_tne) * (iz - iz_tne); scalar_t bse = (ix - ix_tnw) * (iy - iy_tnw) * (iz - iz_tnw); auto inp_ptr_NC = input.data + n * inp_sN; auto out_ptr_NCDHW = output.data + n * out_sN + d * out_sD + h * out_sH + w * out_sW; for (int c = 0; c < C; ++c, inp_ptr_NC += inp_sC, out_ptr_NCDHW += out_sC) { // (c, iz_tnw, iy_tnw, ix_tnw) * tnw + (c, iz_tne, iy_tne, ix_tne) * tne // + (c, iz_tsw, iy_tsw, ix_tsw) * tsw + (c, iz_tse, iy_tse, ix_tse) * tse // + (c, iz_bnw, iy_bnw, ix_bnw) * bnw + (c, iz_bne, iy_bne, ix_bne) * bne // + (c, iz_bsw, iy_bsw, ix_bsw) * bsw + (c, iz_bse, iy_bse, ix_bse) * bse *out_ptr_NCDHW = static_cast<scalar_t>(0); if (within_bounds_3d(iz_tnw, iy_tnw, ix_tnw, inp_D, inp_H, inp_W)) { *out_ptr_NCDHW += inp_ptr_NC[iz_tnw * inp_sD + iy_tnw * inp_sH + ix_tnw * inp_sW] * tnw; } if (within_bounds_3d(iz_tne, iy_tne, ix_tne, inp_D, inp_H, inp_W)) { *out_ptr_NCDHW += inp_ptr_NC[iz_tne * inp_sD + iy_tne * inp_sH + ix_tne * inp_sW] * tne; } if (within_bounds_3d(iz_tsw, iy_tsw, ix_tsw, inp_D, inp_H, inp_W)) { *out_ptr_NCDHW += inp_ptr_NC[iz_tsw * inp_sD + iy_tsw * inp_sH + ix_tsw * inp_sW] * tsw; } if (within_bounds_3d(iz_tse, iy_tse, ix_tse, inp_D, inp_H, inp_W)) { *out_ptr_NCDHW += inp_ptr_NC[iz_tse * inp_sD + iy_tse * inp_sH + ix_tse * inp_sW] * tse; } if (within_bounds_3d(iz_bnw, iy_bnw, ix_bnw, inp_D, inp_H, inp_W)) { *out_ptr_NCDHW += inp_ptr_NC[iz_bnw * inp_sD + iy_bnw * inp_sH + ix_bnw * inp_sW] * bnw; } if (within_bounds_3d(iz_bne, iy_bne, ix_bne, inp_D, inp_H, inp_W)) { *out_ptr_NCDHW += inp_ptr_NC[iz_bne * inp_sD + iy_bne * inp_sH + ix_bne * inp_sW] * bne; } if (within_bounds_3d(iz_bsw, iy_bsw, ix_bsw, inp_D, inp_H, inp_W)) { *out_ptr_NCDHW += inp_ptr_NC[iz_bsw * inp_sD + iy_bsw * inp_sH + ix_bsw * inp_sW] * bsw; } if (within_bounds_3d(iz_bse, iy_bse, ix_bse, inp_D, inp_H, inp_W)) { *out_ptr_NCDHW += inp_ptr_NC[iz_bse * inp_sD + iy_bse * inp_sH + ix_bse * inp_sW] * bse; } } } else if (interpolation_mode == GridSamplerInterpolation::Nearest) { int ix_nearest = static_cast<int>(::round(ixf)); int iy_nearest = static_cast<int>(::round(iyf)); int iz_nearest = static_cast<int>(::round(izf)); // assign nearest neighor pixel value to output pixel auto inp_ptr_NC = input.data + n * inp_sN; auto out_ptr_NCDHW = output.data + n * out_sN + d * out_sD + h * out_sH + w * out_sW; for (int c = 0; c < C; ++c, inp_ptr_NC += inp_sC, out_ptr_NCDHW += out_sC) { if (within_bounds_3d(iz_nearest, iy_nearest, ix_nearest, inp_D, inp_H, inp_W)) { *out_ptr_NCDHW = inp_ptr_NC[iz_nearest * inp_sD + iy_nearest * inp_sH + ix_nearest * inp_sW]; } else { *out_ptr_NCDHW = static_cast<scalar_t>(0); } } } } } template <typename scalar_t> __launch_bounds__(1024) __global__ void grid_sampler_2d_backward_kernel( const int nthreads, TensorInfo<scalar_t, int> grad_output, TensorInfo<scalar_t, int> input, TensorInfo<scalar_t, int> grid, TensorInfo<scalar_t, int> grad_input, // initialized to zeros TensorInfo<scalar_t, int> grad_grid, // initialized to empty const GridSamplerInterpolation interpolation_mode, const GridSamplerPadding padding_mode) { int C = input.sizes[1]; int inp_H = input.sizes[2]; int inp_W = input.sizes[3]; int out_H = grid.sizes[1]; int out_W = grid.sizes[2]; int inp_sN = input.strides[0]; int inp_sC = input.strides[1]; int inp_sH = input.strides[2]; int inp_sW = input.strides[3]; int grid_sN = grid.strides[0]; int grid_sH = grid.strides[1]; int grid_sW = grid.strides[2]; int grid_sCoor = grid.strides[3]; int gOut_sN = grad_output.strides[0]; int gOut_sC = grad_output.strides[1]; int gOut_sH = grad_output.strides[2]; int gOut_sW = grad_output.strides[3]; int gInp_sN = grad_input.strides[0]; int gInp_sC = grad_input.strides[1]; int gInp_sH = grad_input.strides[2]; int gInp_sW = grad_input.strides[3]; int gGrid_sW = grad_grid.strides[2]; CUDA_KERNEL_LOOP(index, nthreads) { const int w = index % out_W; const int h = (index / out_W) % out_H; const int n = index / (out_H * out_W); const int grid_offset = n * grid_sN + h * grid_sH + w * grid_sW; // get the corresponding input x, y co-ordinates from grid scalar_t ix = grid.data[grid_offset]; scalar_t iy = grid.data[grid_offset + grid_sCoor]; // normalize ix, iy from [-1, 1] to [0, IH-1] & [0, IW-1] float ixf = ((ix + 1.f) / 2) * (inp_W - 1); float iyf = ((iy + 1.f) / 2) * (inp_H - 1); // multipliers for gradients on ix and iy // E.g., 0 for out-of-bound indices when GridSamplerPadding::Border scalar_t gix_mult, giy_mult; if (padding_mode == GridSamplerPadding::Border) { // clip coordinates to image borders ixf = clip_coordinates_set_grad(ixf, inp_W, &gix_mult); iyf = clip_coordinates_set_grad(iyf, inp_H, &giy_mult); } else if (padding_mode == GridSamplerPadding::Reflection) { // reflect coordinates by image borders ixf = reflect_coordinates_set_grad(ixf, inp_W, &gix_mult); iyf = reflect_coordinates_set_grad(iyf, inp_H, &giy_mult); } else { // padding_mode == GridSamplerPadding::Zeros gix_mult = static_cast<scalar_t>(1); giy_mult = static_cast<scalar_t>(1); } if (interpolation_mode == GridSamplerInterpolation::Bilinear) { ix = static_cast<scalar_t>(ixf); iy = static_cast<scalar_t>(iyf); // get NE, NW, SE, SW pixel values from (x, y) int ix_nw = static_cast<int>(::floor(ixf)); int iy_nw = static_cast<int>(::floor(iyf)); int ix_ne = ix_nw + 1; int iy_ne = iy_nw; int ix_sw = ix_nw; int iy_sw = iy_nw + 1; int ix_se = ix_nw + 1; int iy_se = iy_nw + 1; // get surfaces to each neighbor: scalar_t nw = (ix_se - ix) * (iy_se - iy); scalar_t ne = (ix - ix_sw) * (iy_sw - iy); scalar_t sw = (ix_ne - ix) * (iy - iy_ne); scalar_t se = (ix - ix_nw) * (iy - iy_nw); scalar_t gix = static_cast<scalar_t>(0), giy = static_cast<scalar_t>(0); scalar_t *gOut_ptr_NCHW = grad_output.data + n * gOut_sN + h * gOut_sH + w * gOut_sW; scalar_t *gInp_ptr_NC = grad_input.data + n * gInp_sN; scalar_t *inp_ptr_NC = input.data + n * inp_sN; for (int c = 0; c < C; ++c, inp_ptr_NC += inp_sC, gInp_ptr_NC += gInp_sC, gOut_ptr_NCHW += gOut_sC) { scalar_t gOut = *gOut_ptr_NCHW; // calculate and set grad_input safe_add_2d(gInp_ptr_NC, iy_nw, ix_nw, gInp_sH, gInp_sW, inp_H, inp_W, nw * gOut); safe_add_2d(gInp_ptr_NC, iy_ne, ix_ne, gInp_sH, gInp_sW, inp_H, inp_W, ne * gOut); safe_add_2d(gInp_ptr_NC, iy_sw, ix_sw, gInp_sH, gInp_sW, inp_H, inp_W, sw * gOut); safe_add_2d(gInp_ptr_NC, iy_se, ix_se, gInp_sH, gInp_sW, inp_H, inp_W, se * gOut); // calculate grad_grid if (within_bounds_2d(iy_nw, ix_nw, inp_H, inp_W)) { scalar_t nw_val = inp_ptr_NC[iy_nw * inp_sH + ix_nw * inp_sW]; gix -= nw_val * (iy_se - iy) * gOut; giy -= nw_val * (ix_se - ix) * gOut; } if (within_bounds_2d(iy_ne, ix_ne, inp_H, inp_W)) { scalar_t ne_val = inp_ptr_NC[iy_ne * inp_sH + ix_ne * inp_sW]; gix += ne_val * (iy_sw - iy) * gOut; giy -= ne_val * (ix - ix_sw) * gOut; } if (within_bounds_2d(iy_sw, ix_sw, inp_H, inp_W)) { scalar_t sw_val = inp_ptr_NC[iy_sw * inp_sH + ix_sw * inp_sW]; gix -= sw_val * (iy - iy_ne) * gOut; giy += sw_val * (ix_ne - ix) * gOut; } if (within_bounds_2d(iy_se, ix_se, inp_H, inp_W)) { scalar_t se_val = inp_ptr_NC[iy_se * inp_sH + ix_se * inp_sW]; gix += se_val * (iy - iy_nw) * gOut; giy += se_val * (ix - ix_nw) * gOut; } } // un-normalize grad_grid values back to [-1, 1] constraints gix = gix * (inp_W - 1.f) / 2; giy = giy * (inp_H - 1.f) / 2; // assuming grad_grid is contiguous // thus we can // 1. use index with gGrid_sW to diectly compute gGrid_ptr_NHW // 2. directly assign to gGrid_ptr_NHW[0], gGrid_ptr_NHW[1] scalar_t *gGrid_ptr_NHW = grad_grid.data + index * gGrid_sW; gGrid_ptr_NHW[0] = gix_mult * gix; gGrid_ptr_NHW[1] = giy_mult * giy; } else if (interpolation_mode == GridSamplerInterpolation::Nearest) { int ix_nearest = static_cast<int>(::round(ixf)); int iy_nearest = static_cast<int>(::round(iyf)); // assign nearest neighor pixel value to output pixel scalar_t *gOut_ptr_NCHW = grad_output.data + n * gOut_sN + h * gOut_sH + w * gOut_sW; scalar_t *gInp_ptr_NC = grad_input.data + n * gInp_sN; for (int c = 0; c < C; ++c, gInp_ptr_NC += gInp_sC, gOut_ptr_NCHW += gOut_sC) { // calculate and set grad_input safe_add_2d(gInp_ptr_NC, iy_nearest, ix_nearest, gInp_sH, gInp_sW, inp_H, inp_W, *gOut_ptr_NCHW); } // assuming grad_grid is contiguous // thus we can // 1. use index with gGrid_sW to diectly compute gGrid_ptr_NHW // 2. directly assign to gGrid_ptr_NHW[0], gGrid_ptr_NHW[1] scalar_t *gGrid_ptr_NHW = grad_grid.data + index * gGrid_sW; gGrid_ptr_NHW[0] = static_cast<scalar_t>(0); gGrid_ptr_NHW[1] = static_cast<scalar_t>(0); } } } template <typename scalar_t> __launch_bounds__(1024) __global__ void grid_sampler_3d_backward_kernel( const int nthreads, TensorInfo<scalar_t, int> grad_output, TensorInfo<scalar_t, int> input, TensorInfo<scalar_t, int> grid, TensorInfo<scalar_t, int> grad_input, // initialized to zeros TensorInfo<scalar_t, int> grad_grid, // initialized to empty const GridSamplerInterpolation interpolation_mode, const GridSamplerPadding padding_mode) { int C = input.sizes[1]; int inp_D = input.sizes[2]; int inp_H = input.sizes[3]; int inp_W = input.sizes[4]; int out_D = grid.sizes[1]; int out_H = grid.sizes[2]; int out_W = grid.sizes[3]; int inp_sN = input.strides[0]; int inp_sC = input.strides[1]; int inp_sD = input.strides[2]; int inp_sH = input.strides[3]; int inp_sW = input.strides[4]; int grid_sN = grid.strides[0]; int grid_sD = grid.strides[1]; int grid_sH = grid.strides[2]; int grid_sW = grid.strides[3]; int grid_sCoor = grid.strides[4]; int gOut_sN = grad_output.strides[0]; int gOut_sC = grad_output.strides[1]; int gOut_sD = grad_output.strides[2]; int gOut_sH = grad_output.strides[3]; int gOut_sW = grad_output.strides[4]; int gInp_sN = grad_input.strides[0]; int gInp_sC = grad_input.strides[1]; int gInp_sD = grad_input.strides[2]; int gInp_sH = grad_input.strides[3]; int gInp_sW = grad_input.strides[4]; int gGrid_sW = grad_grid.strides[3]; CUDA_KERNEL_LOOP(index, nthreads) { const int w = index % out_W; const int h = (index / out_W) % out_H; const int d = (index / (out_H * out_W)) % out_D; const int n = index / (out_D * out_H * out_W); const int grid_offset = n * grid_sN + d * grid_sD + h * grid_sH + w * grid_sW; // get the corresponding input x, y, z co-ordinates from grid scalar_t ix = grid.data[grid_offset]; scalar_t iy = grid.data[grid_offset + grid_sCoor]; scalar_t iz = grid.data[grid_offset + 2 * grid_sCoor]; // normalize ix, iy, iz from [-1, 1] to [0, inp_W-1] & [0, inp_H-1] & [0, inp_D-1] float ixf = ((ix + 1.f) / 2) * (inp_W - 1); float iyf = ((iy + 1.f) / 2) * (inp_H - 1); float izf = ((iz + 1.f) / 2) * (inp_D - 1); // multipliers for gradients on ix, iy, and iz // E.g., 0 for out-of-bound indices when GridSamplerPadding::Border scalar_t gix_mult, giy_mult, giz_mult; if (padding_mode == GridSamplerPadding::Border) { // clip coordinates to image borders ixf = clip_coordinates_set_grad(ixf, inp_W, &gix_mult); iyf = clip_coordinates_set_grad(iyf, inp_H, &giy_mult); izf = clip_coordinates_set_grad(izf, inp_D, &giz_mult); } else if (padding_mode == GridSamplerPadding::Reflection) { // reflect coordinates by image borders ixf = reflect_coordinates_set_grad(ixf, inp_W, &gix_mult); iyf = reflect_coordinates_set_grad(iyf, inp_H, &giy_mult); izf = reflect_coordinates_set_grad(izf, inp_D, &giz_mult); } else { // padding_mode == GridSamplerPadding::Zeros gix_mult = static_cast<scalar_t>(1); giy_mult = static_cast<scalar_t>(1); giz_mult = static_cast<scalar_t>(1); } if (interpolation_mode == GridSamplerInterpolation::Bilinear) { ix = static_cast<scalar_t>(ixf); iy = static_cast<scalar_t>(iyf); iz = static_cast<scalar_t>(izf); // get corner pixel values from (x, y, z) // for 4d, we used north-east-south-west // for 5d, we add top-bottom int ix_tnw = static_cast<int>(::floor(ix)); int iy_tnw = static_cast<int>(::floor(iy)); int iz_tnw = static_cast<int>(::floor(iz)); int ix_tne = ix_tnw + 1; int iy_tne = iy_tnw; int iz_tne = iz_tnw; int ix_tsw = ix_tnw; int iy_tsw = iy_tnw + 1; int iz_tsw = iz_tnw; int ix_tse = ix_tnw + 1; int iy_tse = iy_tnw + 1; int iz_tse = iz_tnw; int ix_bnw = ix_tnw; int iy_bnw = iy_tnw; int iz_bnw = iz_tnw + 1; int ix_bne = ix_tnw + 1; int iy_bne = iy_tnw; int iz_bne = iz_tnw + 1; int ix_bsw = ix_tnw; int iy_bsw = iy_tnw + 1; int iz_bsw = iz_tnw + 1; int ix_bse = ix_tnw + 1; int iy_bse = iy_tnw + 1; int iz_bse = iz_tnw + 1; // get surfaces to each neighbor: scalar_t tnw = (ix_bse - ix) * (iy_bse - iy) * (iz_bse - iz); scalar_t tne = (ix - ix_bsw) * (iy_bsw - iy) * (iz_bsw - iz); scalar_t tsw = (ix_bne - ix) * (iy - iy_bne) * (iz_bne - iz); scalar_t tse = (ix - ix_bnw) * (iy - iy_bnw) * (iz_bnw - iz); scalar_t bnw = (ix_tse - ix) * (iy_tse - iy) * (iz - iz_tse); scalar_t bne = (ix - ix_tsw) * (iy_tsw - iy) * (iz - iz_tsw); scalar_t bsw = (ix_tne - ix) * (iy - iy_tne) * (iz - iz_tne); scalar_t bse = (ix - ix_tnw) * (iy - iy_tnw) * (iz - iz_tnw); scalar_t gix = static_cast<scalar_t>(0), giy = static_cast<scalar_t>(0), giz = static_cast<scalar_t>(0); scalar_t *gOut_ptr_NCDHW = grad_output.data + n * gOut_sN + d * gOut_sD + h * gOut_sH + w * gOut_sW; scalar_t *gInp_ptr_NC = grad_input.data + n * gInp_sN; scalar_t *inp_ptr_NC = input.data + n * inp_sN; // calculate bilinear weighted pixel value and set output pixel for (int c = 0; c < C; ++c, gOut_ptr_NCDHW += gOut_sC, gInp_ptr_NC += gInp_sC, inp_ptr_NC += inp_sC) { scalar_t gOut = *gOut_ptr_NCDHW; // calculate and set grad_input safe_add_3d(gInp_ptr_NC, iz_tnw, iy_tnw, ix_tnw, gInp_sD, gInp_sH, gInp_sW, inp_D, inp_H, inp_W, tnw * gOut); safe_add_3d(gInp_ptr_NC, iz_tne, iy_tne, ix_tne, gInp_sD, gInp_sH, gInp_sW, inp_D, inp_H, inp_W, tne * gOut); safe_add_3d(gInp_ptr_NC, iz_tsw, iy_tsw, ix_tsw, gInp_sD, gInp_sH, gInp_sW, inp_D, inp_H, inp_W, tsw * gOut); safe_add_3d(gInp_ptr_NC, iz_tse, iy_tse, ix_tse, gInp_sD, gInp_sH, gInp_sW, inp_D, inp_H, inp_W, tse * gOut); safe_add_3d(gInp_ptr_NC, iz_bnw, iy_bnw, ix_bnw, gInp_sD, gInp_sH, gInp_sW, inp_D, inp_H, inp_W, bnw * gOut); safe_add_3d(gInp_ptr_NC, iz_bne, iy_bne, ix_bne, gInp_sD, gInp_sH, gInp_sW, inp_D, inp_H, inp_W, bne * gOut); safe_add_3d(gInp_ptr_NC, iz_bsw, iy_bsw, ix_bsw, gInp_sD, gInp_sH, gInp_sW, inp_D, inp_H, inp_W, bsw * gOut); safe_add_3d(gInp_ptr_NC, iz_bse, iy_bse, ix_bse, gInp_sD, gInp_sH, gInp_sW, inp_D, inp_H, inp_W, bse * gOut); // calculate grad_grid if (within_bounds_3d(iz_tnw, iy_tnw, ix_tnw, inp_D, inp_H, inp_W)) { scalar_t tnw_val = inp_ptr_NC[iz_tnw * inp_sD + iy_tnw * inp_sH + ix_tnw * inp_sW]; gix -= tnw_val * (iy_bse - iy) * (iz_bse - iz) * gOut; giy -= tnw_val * (ix_bse - ix) * (iz_bse - iz) * gOut; giz -= tnw_val * (ix_bse - ix) * (iy_bse - iy) * gOut; } if (within_bounds_3d(iz_tne, iy_tne, ix_tne, inp_D, inp_H, inp_W)) { scalar_t tne_val = inp_ptr_NC[iz_tne * inp_sD + iy_tne * inp_sH + ix_tne * inp_sW]; gix += tne_val * (iy_bsw - iy) * (iz_bsw - iz) * gOut; giy -= tne_val * (ix - ix_bsw) * (iz_bsw - iz) * gOut; giz -= tne_val * (ix - ix_bsw) * (iy_bsw - iy) * gOut; } if (within_bounds_3d(iz_tsw, iy_tsw, ix_tsw, inp_D, inp_H, inp_W)) { scalar_t tsw_val = inp_ptr_NC[iz_tsw * inp_sD + iy_tsw * inp_sH + ix_tsw * inp_sW]; gix -= tsw_val * (iy - iy_bne) * (iz_bne - iz) * gOut; giy += tsw_val * (ix_bne - ix) * (iz_bne - iz) * gOut; giz -= tsw_val * (ix_bne - ix) * (iy - iy_bne) * gOut; } if (within_bounds_3d(iz_tse, iy_tse, ix_tse, inp_D, inp_H, inp_W)) { scalar_t tse_val = inp_ptr_NC[iz_tse * inp_sD + iy_tse * inp_sH + ix_tse * inp_sW]; gix += tse_val * (iy - iy_bnw) * (iz_bnw - iz) * gOut; giy += tse_val * (ix - ix_bnw) * (iz_bnw - iz) * gOut; giz -= tse_val * (ix - ix_bnw) * (iy - iy_bnw) * gOut; } if (within_bounds_3d(iz_bnw, iy_bnw, ix_bnw, inp_D, inp_H, inp_W)) { scalar_t bnw_val = inp_ptr_NC[iz_bnw * inp_sD + iy_bnw * inp_sH + ix_bnw * inp_sW]; gix -= bnw_val * (iy_tse - iy) * (iz - iz_tse) * gOut; giy -= bnw_val * (ix_tse - ix) * (iz - iz_tse) * gOut; giz += bnw_val * (ix_tse - ix) * (iy_tse - iy) * gOut; } if (within_bounds_3d(iz_bne, iy_bne, ix_bne, inp_D, inp_H, inp_W)) { scalar_t bne_val = inp_ptr_NC[iz_bne * inp_sD + iy_bne * inp_sH + ix_bne * inp_sW]; gix += bne_val * (iy_tsw - iy) * (iz - iz_tsw) * gOut; giy -= bne_val * (ix - ix_tsw) * (iz - iz_tsw) * gOut; giz += bne_val * (ix - ix_tsw) * (iy_tsw - iy) * gOut; } if (within_bounds_3d(iz_bsw, iy_bsw, ix_bsw, inp_D, inp_H, inp_W)) { scalar_t bsw_val = inp_ptr_NC[iz_bsw * inp_sD + iy_bsw * inp_sH + ix_bsw * inp_sW]; gix -= bsw_val * (iy - iy_tne) * (iz - iz_tne) * gOut; giy += bsw_val * (ix_tne - ix) * (iz - iz_tne) * gOut; giz += bsw_val * (ix_tne - ix) * (iy - iy_tne) * gOut; } if (within_bounds_3d(iz_bse, iy_bse, ix_bse, inp_D, inp_H, inp_W)) { scalar_t bse_val = inp_ptr_NC[iz_bse * inp_sD + iy_bse * inp_sH + ix_bse * inp_sW]; gix += bse_val * (iy - iy_tnw) * (iz - iz_tnw) * gOut; giy += bse_val * (ix - ix_tnw) * (iz - iz_tnw) * gOut; giz += bse_val * (ix - ix_tnw) * (iy - iy_tnw) * gOut; } } // un-normalize grad_grid values back to [-1, 1] constraints gix = gix * (inp_W - 1) / 2; giy = giy * (inp_H - 1) / 2; giz = giz * (inp_D - 1) / 2; // assuming grad_grid is contiguous // thus we can // 1. use index with gGrid_sW to diectly compute gGrid_ptr_NDHW // 2. directly assign to gGrid_ptr_NDHW[0], gGrid_ptr_NDHW[1], gGrid_ptr_NDHW[2] scalar_t *gGrid_ptr_NDHW = grad_grid.data + index * gGrid_sW; gGrid_ptr_NDHW[0] = gix_mult * gix; gGrid_ptr_NDHW[1] = giy_mult * giy; gGrid_ptr_NDHW[2] = giz_mult * giz; } else if (interpolation_mode == GridSamplerInterpolation::Nearest) { int ix_nearest = static_cast<int>(::round(ixf)); int iy_nearest = static_cast<int>(::round(iyf)); int iz_nearest = static_cast<int>(::round(izf)); // assign nearest neighor pixel value to output pixel scalar_t *gOut_ptr_NCDHW = grad_output.data + n * gOut_sN + d * gOut_sD + h * gOut_sH + w * gOut_sW; scalar_t *gInp_ptr_NC = grad_input.data + n * gInp_sN; for (int c = 0; c < C; ++c, gOut_ptr_NCDHW += gOut_sC, gInp_ptr_NC += gInp_sC) { // calculate and set grad_input safe_add_3d(gInp_ptr_NC, iz_nearest, iy_nearest, ix_nearest, gInp_sD, gInp_sH, gInp_sW, inp_D, inp_H, inp_W, *gOut_ptr_NCDHW); } // assuming grad_grid is contiguous // thus we can // 1. use index with gGrid_sW to diectly compute gGrid_ptr_NDHW // 2. directly assign to gGrid_ptr_NDHW[0], gGrid_ptr_NDHW[1], gGrid_ptr_NDHW[2] scalar_t *gGrid_ptr_NDHW = grad_grid.data + index * gGrid_sW; gGrid_ptr_NDHW[0] = static_cast<scalar_t>(0); gGrid_ptr_NDHW[1] = static_cast<scalar_t>(0); gGrid_ptr_NDHW[2] = static_cast<scalar_t>(0); } } } } // namespace // No shape checking needed here. See # NOTE [ grid_sampler Native Functions ]. Tensor grid_sampler_2d_cuda(const Tensor& input, const Tensor& grid, int64_t interpolation_mode, int64_t padding_mode) { auto N = input.size(0); auto H = grid.size(1); auto W = grid.size(2); auto output = at::empty({N, input.size(1), H, W}, input.options()); int count = static_cast<int>(N * H * W); if (count > 0) { AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.type(), "grid_sampler_2d_cuda", [&] { hipLaunchKernelGGL(( grid_sampler_2d_kernel<scalar_t>) , dim3(GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), count, getTensorInfo<scalar_t, int>(input), getTensorInfo<scalar_t, int>(grid), getTensorInfo<scalar_t, int>(output), static_cast<GridSamplerInterpolation>(interpolation_mode), static_cast<GridSamplerPadding>(padding_mode)); }); } return output; } // No shape checking needed here. See # NOTE [ grid_sampler Native Functions ]. Tensor grid_sampler_3d_cuda(const Tensor& input, const Tensor& grid, int64_t interpolation_mode, int64_t padding_mode) { auto N = input.size(0); auto D = grid.size(1); auto H = grid.size(2); auto W = grid.size(3); auto output = at::empty({N, input.size(1), D, H, W}, input.options()); int count = static_cast<int>(N * D * H * W); if (count > 0) { AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.type(), "grid_sampler_2d_cuda", [&] { hipLaunchKernelGGL(( grid_sampler_3d_kernel<scalar_t>) , dim3(GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), count, getTensorInfo<scalar_t, int>(input), getTensorInfo<scalar_t, int>(grid), getTensorInfo<scalar_t, int>(output), static_cast<GridSamplerInterpolation>(interpolation_mode), static_cast<GridSamplerPadding>(padding_mode)); }); } return output; } // No shape checking needed here. See # NOTE [ grid_sampler Native Functions ]. std::tuple<Tensor, Tensor> grid_sampler_2d_backward_cuda(const Tensor& grad_output, const Tensor& input, const Tensor& grid, int64_t interpolation_mode, int64_t padding_mode) { auto N = input.size(0); auto H = grid.size(1); auto W = grid.size(2); auto grad_input = at::zeros_like(input); auto grad_grid = at::empty_like(grid); int count = static_cast<int>(N * H * W); if (count > 0) { AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.type(), "grid_sampler_2d_backward_cuda", [&] { hipLaunchKernelGGL(( grid_sampler_2d_backward_kernel<scalar_t>) , dim3(GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), count, getTensorInfo<scalar_t, int>(grad_output), getTensorInfo<scalar_t, int>(input), getTensorInfo<scalar_t, int>(grid), getTensorInfo<scalar_t, int>(grad_input), getTensorInfo<scalar_t, int>(grad_grid), static_cast<GridSamplerInterpolation>(interpolation_mode), static_cast<GridSamplerPadding>(padding_mode)); }); } return std::make_tuple(grad_input, grad_grid); } // No shape checking needed here. See # NOTE [ grid_sampler Native Functions ]. std::tuple<Tensor, Tensor> grid_sampler_3d_backward_cuda(const Tensor& grad_output, const Tensor& input, const Tensor& grid, int64_t interpolation_mode, int64_t padding_mode) { auto N = input.size(0); auto D = grid.size(1); auto H = grid.size(2); auto W = grid.size(3); auto grad_input = at::zeros_like(input); auto grad_grid = at::empty_like(grid); int count = static_cast<int>(N * D * H * W); if (count > 0) { AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.type(), "grid_sampler_3d_backward_cuda", [&] { hipLaunchKernelGGL(( grid_sampler_3d_backward_kernel<scalar_t>) , dim3(GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), count, getTensorInfo<scalar_t, int>(grad_output), getTensorInfo<scalar_t, int>(input), getTensorInfo<scalar_t, int>(grid), getTensorInfo<scalar_t, int>(grad_input), getTensorInfo<scalar_t, int>(grad_grid), static_cast<GridSamplerInterpolation>(interpolation_mode), static_cast<GridSamplerPadding>(padding_mode)); }); } return std::make_tuple(grad_input, grad_grid); } }} // namespace at::native
d3c423217787f82f3cab06e582fa77fc5deb04b3.cu
#include "ATen/ATen.h" #include "ATen/native/GridSampler.h" #include "ATen/cuda/CUDAContext.h" #include "ATen/cuda/CUDAApplyUtils.cuh" #include "ATen/cuda/detail/TensorInfo.cuh" #include "ATen/cuda/detail/IndexUtils.cuh" #include "ATen/cuda/detail/KernelUtils.h" namespace at { namespace native { using namespace at::cuda::detail; using at::native::detail::GridSamplerInterpolation; using at::native::detail::GridSamplerPadding; namespace { static __forceinline__ __device__ float clip_coordinates(float in, int clip_limit) { return ::min(static_cast<float>(clip_limit - 1), ::max(in, 0.f)); } // clip_coordinates_set_grad works similarly to clip_coordinates except that // it also returns the `d output / d input` via pointer argument `grad_in`. // This is useful in the backward pass of grid_sampler. template <typename scalar_t> static __forceinline__ __device__ float clip_coordinates_set_grad(float in, int clip_limit, scalar_t *grad_in) { if (in < 0.f) { *grad_in = static_cast<scalar_t>(0); return 0.f; } else { float max = static_cast<float>(clip_limit - 1); if (in > max) { *grad_in = static_cast<scalar_t>(0); return max; } else { *grad_in = static_cast<scalar_t>(1); return in; } } } static __forceinline__ __device__ float reflect_coordinates(float in, int clip_limit) { if (clip_limit == static_cast<int>(1)) { return 0.f; } in = ::fabs(in); float max = static_cast<float>(clip_limit - 1); // `fmod` returns same sign as `in`, which is positive after the `fabs` above. float extra = ::fmod(in, max); int flips = static_cast<int>(::floor(in / max)); if (flips % 2 == 0) { return extra; } else { return max - extra; } } // reflect_coordinates_set_grad works similarly to reflect_coordinates except // that it also returns the `d output / d input` via pointer argument // `grad_in`. // This is useful in the backward pass of grid_sampler. template <typename scalar_t> static __forceinline__ __device__ float reflect_coordinates_set_grad(float in, int clip_limit, scalar_t *grad_in) { if (clip_limit == static_cast<int>(1)) { *grad_in = static_cast<scalar_t>(0); return 0.f; } int grad_in_mult_; if (in < 0.f) { grad_in_mult_ = -1; in = -in; } else { grad_in_mult_ = 1; } float max = static_cast<float>(clip_limit - 1); // `fmod` returns same sign as `in`, which is positive after the `if` above. float extra = ::fmod(in, max); int flips = static_cast<int>(::floor(in / max)); if (flips % 2 == 0) { *grad_in = static_cast<scalar_t>(grad_in_mult_); return extra; } else { *grad_in = static_cast<scalar_t>(-grad_in_mult_); return max - extra; } } static __forceinline__ __device__ bool within_bounds_2d(int h, int w, int H, int W) { return h >= 0 && h < H && w >= 0 && w < W; } static __forceinline__ __device__ bool within_bounds_3d(int d, int h, int w, int D, int H, int W) { return d >= 0 && d < D && h >= 0 && h < H && w >= 0 && w < W; } template<typename scalar_t> static __forceinline__ __device__ void safe_add_2d(scalar_t *data, int h, int w, int sH, int sW, int H, int W, scalar_t delta) { if (within_bounds_2d(h, w, H, W)) { atomicAdd(data + h * sH + w * sW, delta); } } template<typename scalar_t> static __forceinline__ __device__ void safe_add_3d(scalar_t *data, int d, int h, int w, int sD, int sH, int sW, int D, int H, int W, scalar_t delta) { if (within_bounds_3d(d, h, w, D, H, W)) { atomicAdd(data + d * sD + h * sH + w * sW, delta); } } template <typename scalar_t> __launch_bounds__(1024) __global__ void grid_sampler_2d_kernel( const int nthreads, TensorInfo<scalar_t, int> input, TensorInfo<scalar_t, int> grid, TensorInfo<scalar_t, int> output, const GridSamplerInterpolation interpolation_mode, const GridSamplerPadding padding_mode) { int C = input.sizes[1]; int inp_H = input.sizes[2]; int inp_W = input.sizes[3]; int out_H = grid.sizes[1]; int out_W = grid.sizes[2]; int inp_sN = input.strides[0]; int inp_sC = input.strides[1]; int inp_sH = input.strides[2]; int inp_sW = input.strides[3]; int grid_sN = grid.strides[0]; int grid_sH = grid.strides[1]; int grid_sW = grid.strides[2]; int grid_sCoor = grid.strides[3]; int out_sN = output.strides[0]; int out_sC = output.strides[1]; int out_sH = output.strides[2]; int out_sW = output.strides[3]; CUDA_KERNEL_LOOP(index, nthreads) { const int w = index % out_W; const int h = (index / out_W) % out_H; const int n = index / (out_H * out_W); const int grid_offset = n * grid_sN + h * grid_sH + w * grid_sW; // get the corresponding input x, y co-ordinates from grid scalar_t ix = grid.data[grid_offset]; scalar_t iy = grid.data[grid_offset + grid_sCoor]; // normalize ix, iy from [-1, 1] to [0, IH-1] & [0, IW-1] float ixf = ((ix + 1.f) / 2) * (inp_W - 1); float iyf = ((iy + 1.f) / 2) * (inp_H - 1); if (padding_mode == GridSamplerPadding::Border) { // clip coordinates to image borders ixf = clip_coordinates(ixf, inp_W); iyf = clip_coordinates(iyf, inp_H); } else if (padding_mode == GridSamplerPadding::Reflection) { // reflect coordinates by image borders ixf = reflect_coordinates(ixf, inp_W); iyf = reflect_coordinates(iyf, inp_H); } ix = static_cast<scalar_t>(ixf); iy = static_cast<scalar_t>(iyf); if (interpolation_mode == GridSamplerInterpolation::Bilinear) { // get NE, NW, SE, SW pixel values from (x, y) int ix_nw = static_cast<int>(::floor(ixf)); int iy_nw = static_cast<int>(::floor(iyf)); int ix_ne = ix_nw + 1; int iy_ne = iy_nw; int ix_sw = ix_nw; int iy_sw = iy_nw + 1; int ix_se = ix_nw + 1; int iy_se = iy_nw + 1; // get surfaces to each neighbor: scalar_t nw = (ix_se - ix) * (iy_se - iy); scalar_t ne = (ix - ix_sw) * (iy_sw - iy); scalar_t sw = (ix_ne - ix) * (iy - iy_ne); scalar_t se = (ix - ix_nw) * (iy - iy_nw); // calculate bilinear weighted pixel value and set output pixel auto inp_ptr_NC = input.data + n * inp_sN; auto out_ptr_NCHW = output.data + n * out_sN + h * out_sH + w * out_sW; for (int c = 0; c < C; ++c, inp_ptr_NC += inp_sC, out_ptr_NCHW += out_sC) { *out_ptr_NCHW = static_cast<scalar_t>(0); if (within_bounds_2d(iy_nw, ix_nw, inp_H, inp_W)) { *out_ptr_NCHW += inp_ptr_NC[iy_nw * inp_sH + ix_nw * inp_sW] * nw; } if (within_bounds_2d(iy_ne, ix_ne, inp_H, inp_W)) { *out_ptr_NCHW += inp_ptr_NC[iy_ne * inp_sH + ix_ne * inp_sW] * ne; } if (within_bounds_2d(iy_sw, ix_sw, inp_H, inp_W)) { *out_ptr_NCHW += inp_ptr_NC[iy_sw * inp_sH + ix_sw * inp_sW] * sw; } if (within_bounds_2d(iy_se, ix_se, inp_H, inp_W)) { *out_ptr_NCHW += inp_ptr_NC[iy_se * inp_sH + ix_se * inp_sW] * se; } } } else if (interpolation_mode == GridSamplerInterpolation::Nearest) { int ix_nearest = static_cast<int>(::round(ixf)); int iy_nearest = static_cast<int>(::round(iyf)); // assign nearest neighor pixel value to output pixel auto inp_ptr_NC = input.data + n * inp_sN; auto out_ptr_NCHW = output.data + n * out_sN + h * out_sH + w * out_sW; for (int c = 0; c < C; ++c, inp_ptr_NC += inp_sC, out_ptr_NCHW += out_sC) { if (within_bounds_2d(iy_nearest, ix_nearest, inp_H, inp_W)) { *out_ptr_NCHW = inp_ptr_NC[iy_nearest * inp_sH + ix_nearest * inp_sW]; } else { *out_ptr_NCHW = static_cast<scalar_t>(0); } } } } } template <typename scalar_t> __launch_bounds__(1024) __global__ void grid_sampler_3d_kernel( const int nthreads, TensorInfo<scalar_t, int> input, TensorInfo<scalar_t, int> grid, TensorInfo<scalar_t, int> output, const GridSamplerInterpolation interpolation_mode, const GridSamplerPadding padding_mode) { int C = input.sizes[1]; int inp_D = input.sizes[2]; int inp_H = input.sizes[3]; int inp_W = input.sizes[4]; int out_D = grid.sizes[1]; int out_H = grid.sizes[2]; int out_W = grid.sizes[3]; int inp_sN = input.strides[0]; int inp_sC = input.strides[1]; int inp_sD = input.strides[2]; int inp_sH = input.strides[3]; int inp_sW = input.strides[4]; int grid_sN = grid.strides[0]; int grid_sD = grid.strides[1]; int grid_sH = grid.strides[2]; int grid_sW = grid.strides[3]; int grid_sCoor = grid.strides[4]; int out_sN = output.strides[0]; int out_sC = output.strides[1]; int out_sD = output.strides[2]; int out_sH = output.strides[3]; int out_sW = output.strides[4]; CUDA_KERNEL_LOOP(index, nthreads) { const int w = index % out_W; const int h = (index / out_W) % out_H; const int d = (index / (out_H * out_W)) % out_D; const int n = index / (out_D * out_H * out_W); const int grid_offset = n * grid_sN + d * grid_sD + h * grid_sH + w * grid_sW; // get the corresponding input x, y, z co-ordinates from grid scalar_t ix = grid.data[grid_offset]; scalar_t iy = grid.data[grid_offset + grid_sCoor]; scalar_t iz = grid.data[grid_offset + 2 * grid_sCoor]; // normalize ix, iy, iz from [-1, 1] to [0, inp_W-1] & [0, inp_H-1] & [0, inp_D-1] float ixf = ((ix + 1.f) / 2) * (inp_W - 1); float iyf = ((iy + 1.f) / 2) * (inp_H - 1); float izf = ((iz + 1.f) / 2) * (inp_D - 1); if (padding_mode == GridSamplerPadding::Border) { // clip coordinates to image borders ixf = clip_coordinates(ixf, inp_W); iyf = clip_coordinates(iyf, inp_H); izf = clip_coordinates(izf, inp_D); } else if (padding_mode == GridSamplerPadding::Reflection) { // reflect coordinates by image borders ixf = reflect_coordinates(ixf, inp_W); iyf = reflect_coordinates(iyf, inp_H); izf = reflect_coordinates(izf, inp_D); } if (interpolation_mode == GridSamplerInterpolation::Bilinear) { ix = static_cast<scalar_t>(ixf); iy = static_cast<scalar_t>(iyf); iz = static_cast<scalar_t>(izf); // get corner pixel values from (x, y, z) // for 4d, we used north-east-south-west // for 5d, we add top-bottom int ix_tnw = static_cast<int>(::floor(ix)); int iy_tnw = static_cast<int>(::floor(iy)); int iz_tnw = static_cast<int>(::floor(iz)); int ix_tne = ix_tnw + 1; int iy_tne = iy_tnw; int iz_tne = iz_tnw; int ix_tsw = ix_tnw; int iy_tsw = iy_tnw + 1; int iz_tsw = iz_tnw; int ix_tse = ix_tnw + 1; int iy_tse = iy_tnw + 1; int iz_tse = iz_tnw; int ix_bnw = ix_tnw; int iy_bnw = iy_tnw; int iz_bnw = iz_tnw + 1; int ix_bne = ix_tnw + 1; int iy_bne = iy_tnw; int iz_bne = iz_tnw + 1; int ix_bsw = ix_tnw; int iy_bsw = iy_tnw + 1; int iz_bsw = iz_tnw + 1; int ix_bse = ix_tnw + 1; int iy_bse = iy_tnw + 1; int iz_bse = iz_tnw + 1; // get surfaces to each neighbor: scalar_t tnw = (ix_bse - ix) * (iy_bse - iy) * (iz_bse - iz); scalar_t tne = (ix - ix_bsw) * (iy_bsw - iy) * (iz_bsw - iz); scalar_t tsw = (ix_bne - ix) * (iy - iy_bne) * (iz_bne - iz); scalar_t tse = (ix - ix_bnw) * (iy - iy_bnw) * (iz_bnw - iz); scalar_t bnw = (ix_tse - ix) * (iy_tse - iy) * (iz - iz_tse); scalar_t bne = (ix - ix_tsw) * (iy_tsw - iy) * (iz - iz_tsw); scalar_t bsw = (ix_tne - ix) * (iy - iy_tne) * (iz - iz_tne); scalar_t bse = (ix - ix_tnw) * (iy - iy_tnw) * (iz - iz_tnw); auto inp_ptr_NC = input.data + n * inp_sN; auto out_ptr_NCDHW = output.data + n * out_sN + d * out_sD + h * out_sH + w * out_sW; for (int c = 0; c < C; ++c, inp_ptr_NC += inp_sC, out_ptr_NCDHW += out_sC) { // (c, iz_tnw, iy_tnw, ix_tnw) * tnw + (c, iz_tne, iy_tne, ix_tne) * tne // + (c, iz_tsw, iy_tsw, ix_tsw) * tsw + (c, iz_tse, iy_tse, ix_tse) * tse // + (c, iz_bnw, iy_bnw, ix_bnw) * bnw + (c, iz_bne, iy_bne, ix_bne) * bne // + (c, iz_bsw, iy_bsw, ix_bsw) * bsw + (c, iz_bse, iy_bse, ix_bse) * bse *out_ptr_NCDHW = static_cast<scalar_t>(0); if (within_bounds_3d(iz_tnw, iy_tnw, ix_tnw, inp_D, inp_H, inp_W)) { *out_ptr_NCDHW += inp_ptr_NC[iz_tnw * inp_sD + iy_tnw * inp_sH + ix_tnw * inp_sW] * tnw; } if (within_bounds_3d(iz_tne, iy_tne, ix_tne, inp_D, inp_H, inp_W)) { *out_ptr_NCDHW += inp_ptr_NC[iz_tne * inp_sD + iy_tne * inp_sH + ix_tne * inp_sW] * tne; } if (within_bounds_3d(iz_tsw, iy_tsw, ix_tsw, inp_D, inp_H, inp_W)) { *out_ptr_NCDHW += inp_ptr_NC[iz_tsw * inp_sD + iy_tsw * inp_sH + ix_tsw * inp_sW] * tsw; } if (within_bounds_3d(iz_tse, iy_tse, ix_tse, inp_D, inp_H, inp_W)) { *out_ptr_NCDHW += inp_ptr_NC[iz_tse * inp_sD + iy_tse * inp_sH + ix_tse * inp_sW] * tse; } if (within_bounds_3d(iz_bnw, iy_bnw, ix_bnw, inp_D, inp_H, inp_W)) { *out_ptr_NCDHW += inp_ptr_NC[iz_bnw * inp_sD + iy_bnw * inp_sH + ix_bnw * inp_sW] * bnw; } if (within_bounds_3d(iz_bne, iy_bne, ix_bne, inp_D, inp_H, inp_W)) { *out_ptr_NCDHW += inp_ptr_NC[iz_bne * inp_sD + iy_bne * inp_sH + ix_bne * inp_sW] * bne; } if (within_bounds_3d(iz_bsw, iy_bsw, ix_bsw, inp_D, inp_H, inp_W)) { *out_ptr_NCDHW += inp_ptr_NC[iz_bsw * inp_sD + iy_bsw * inp_sH + ix_bsw * inp_sW] * bsw; } if (within_bounds_3d(iz_bse, iy_bse, ix_bse, inp_D, inp_H, inp_W)) { *out_ptr_NCDHW += inp_ptr_NC[iz_bse * inp_sD + iy_bse * inp_sH + ix_bse * inp_sW] * bse; } } } else if (interpolation_mode == GridSamplerInterpolation::Nearest) { int ix_nearest = static_cast<int>(::round(ixf)); int iy_nearest = static_cast<int>(::round(iyf)); int iz_nearest = static_cast<int>(::round(izf)); // assign nearest neighor pixel value to output pixel auto inp_ptr_NC = input.data + n * inp_sN; auto out_ptr_NCDHW = output.data + n * out_sN + d * out_sD + h * out_sH + w * out_sW; for (int c = 0; c < C; ++c, inp_ptr_NC += inp_sC, out_ptr_NCDHW += out_sC) { if (within_bounds_3d(iz_nearest, iy_nearest, ix_nearest, inp_D, inp_H, inp_W)) { *out_ptr_NCDHW = inp_ptr_NC[iz_nearest * inp_sD + iy_nearest * inp_sH + ix_nearest * inp_sW]; } else { *out_ptr_NCDHW = static_cast<scalar_t>(0); } } } } } template <typename scalar_t> __launch_bounds__(1024) __global__ void grid_sampler_2d_backward_kernel( const int nthreads, TensorInfo<scalar_t, int> grad_output, TensorInfo<scalar_t, int> input, TensorInfo<scalar_t, int> grid, TensorInfo<scalar_t, int> grad_input, // initialized to zeros TensorInfo<scalar_t, int> grad_grid, // initialized to empty const GridSamplerInterpolation interpolation_mode, const GridSamplerPadding padding_mode) { int C = input.sizes[1]; int inp_H = input.sizes[2]; int inp_W = input.sizes[3]; int out_H = grid.sizes[1]; int out_W = grid.sizes[2]; int inp_sN = input.strides[0]; int inp_sC = input.strides[1]; int inp_sH = input.strides[2]; int inp_sW = input.strides[3]; int grid_sN = grid.strides[0]; int grid_sH = grid.strides[1]; int grid_sW = grid.strides[2]; int grid_sCoor = grid.strides[3]; int gOut_sN = grad_output.strides[0]; int gOut_sC = grad_output.strides[1]; int gOut_sH = grad_output.strides[2]; int gOut_sW = grad_output.strides[3]; int gInp_sN = grad_input.strides[0]; int gInp_sC = grad_input.strides[1]; int gInp_sH = grad_input.strides[2]; int gInp_sW = grad_input.strides[3]; int gGrid_sW = grad_grid.strides[2]; CUDA_KERNEL_LOOP(index, nthreads) { const int w = index % out_W; const int h = (index / out_W) % out_H; const int n = index / (out_H * out_W); const int grid_offset = n * grid_sN + h * grid_sH + w * grid_sW; // get the corresponding input x, y co-ordinates from grid scalar_t ix = grid.data[grid_offset]; scalar_t iy = grid.data[grid_offset + grid_sCoor]; // normalize ix, iy from [-1, 1] to [0, IH-1] & [0, IW-1] float ixf = ((ix + 1.f) / 2) * (inp_W - 1); float iyf = ((iy + 1.f) / 2) * (inp_H - 1); // multipliers for gradients on ix and iy // E.g., 0 for out-of-bound indices when GridSamplerPadding::Border scalar_t gix_mult, giy_mult; if (padding_mode == GridSamplerPadding::Border) { // clip coordinates to image borders ixf = clip_coordinates_set_grad(ixf, inp_W, &gix_mult); iyf = clip_coordinates_set_grad(iyf, inp_H, &giy_mult); } else if (padding_mode == GridSamplerPadding::Reflection) { // reflect coordinates by image borders ixf = reflect_coordinates_set_grad(ixf, inp_W, &gix_mult); iyf = reflect_coordinates_set_grad(iyf, inp_H, &giy_mult); } else { // padding_mode == GridSamplerPadding::Zeros gix_mult = static_cast<scalar_t>(1); giy_mult = static_cast<scalar_t>(1); } if (interpolation_mode == GridSamplerInterpolation::Bilinear) { ix = static_cast<scalar_t>(ixf); iy = static_cast<scalar_t>(iyf); // get NE, NW, SE, SW pixel values from (x, y) int ix_nw = static_cast<int>(::floor(ixf)); int iy_nw = static_cast<int>(::floor(iyf)); int ix_ne = ix_nw + 1; int iy_ne = iy_nw; int ix_sw = ix_nw; int iy_sw = iy_nw + 1; int ix_se = ix_nw + 1; int iy_se = iy_nw + 1; // get surfaces to each neighbor: scalar_t nw = (ix_se - ix) * (iy_se - iy); scalar_t ne = (ix - ix_sw) * (iy_sw - iy); scalar_t sw = (ix_ne - ix) * (iy - iy_ne); scalar_t se = (ix - ix_nw) * (iy - iy_nw); scalar_t gix = static_cast<scalar_t>(0), giy = static_cast<scalar_t>(0); scalar_t *gOut_ptr_NCHW = grad_output.data + n * gOut_sN + h * gOut_sH + w * gOut_sW; scalar_t *gInp_ptr_NC = grad_input.data + n * gInp_sN; scalar_t *inp_ptr_NC = input.data + n * inp_sN; for (int c = 0; c < C; ++c, inp_ptr_NC += inp_sC, gInp_ptr_NC += gInp_sC, gOut_ptr_NCHW += gOut_sC) { scalar_t gOut = *gOut_ptr_NCHW; // calculate and set grad_input safe_add_2d(gInp_ptr_NC, iy_nw, ix_nw, gInp_sH, gInp_sW, inp_H, inp_W, nw * gOut); safe_add_2d(gInp_ptr_NC, iy_ne, ix_ne, gInp_sH, gInp_sW, inp_H, inp_W, ne * gOut); safe_add_2d(gInp_ptr_NC, iy_sw, ix_sw, gInp_sH, gInp_sW, inp_H, inp_W, sw * gOut); safe_add_2d(gInp_ptr_NC, iy_se, ix_se, gInp_sH, gInp_sW, inp_H, inp_W, se * gOut); // calculate grad_grid if (within_bounds_2d(iy_nw, ix_nw, inp_H, inp_W)) { scalar_t nw_val = inp_ptr_NC[iy_nw * inp_sH + ix_nw * inp_sW]; gix -= nw_val * (iy_se - iy) * gOut; giy -= nw_val * (ix_se - ix) * gOut; } if (within_bounds_2d(iy_ne, ix_ne, inp_H, inp_W)) { scalar_t ne_val = inp_ptr_NC[iy_ne * inp_sH + ix_ne * inp_sW]; gix += ne_val * (iy_sw - iy) * gOut; giy -= ne_val * (ix - ix_sw) * gOut; } if (within_bounds_2d(iy_sw, ix_sw, inp_H, inp_W)) { scalar_t sw_val = inp_ptr_NC[iy_sw * inp_sH + ix_sw * inp_sW]; gix -= sw_val * (iy - iy_ne) * gOut; giy += sw_val * (ix_ne - ix) * gOut; } if (within_bounds_2d(iy_se, ix_se, inp_H, inp_W)) { scalar_t se_val = inp_ptr_NC[iy_se * inp_sH + ix_se * inp_sW]; gix += se_val * (iy - iy_nw) * gOut; giy += se_val * (ix - ix_nw) * gOut; } } // un-normalize grad_grid values back to [-1, 1] constraints gix = gix * (inp_W - 1.f) / 2; giy = giy * (inp_H - 1.f) / 2; // assuming grad_grid is contiguous // thus we can // 1. use index with gGrid_sW to diectly compute gGrid_ptr_NHW // 2. directly assign to gGrid_ptr_NHW[0], gGrid_ptr_NHW[1] scalar_t *gGrid_ptr_NHW = grad_grid.data + index * gGrid_sW; gGrid_ptr_NHW[0] = gix_mult * gix; gGrid_ptr_NHW[1] = giy_mult * giy; } else if (interpolation_mode == GridSamplerInterpolation::Nearest) { int ix_nearest = static_cast<int>(::round(ixf)); int iy_nearest = static_cast<int>(::round(iyf)); // assign nearest neighor pixel value to output pixel scalar_t *gOut_ptr_NCHW = grad_output.data + n * gOut_sN + h * gOut_sH + w * gOut_sW; scalar_t *gInp_ptr_NC = grad_input.data + n * gInp_sN; for (int c = 0; c < C; ++c, gInp_ptr_NC += gInp_sC, gOut_ptr_NCHW += gOut_sC) { // calculate and set grad_input safe_add_2d(gInp_ptr_NC, iy_nearest, ix_nearest, gInp_sH, gInp_sW, inp_H, inp_W, *gOut_ptr_NCHW); } // assuming grad_grid is contiguous // thus we can // 1. use index with gGrid_sW to diectly compute gGrid_ptr_NHW // 2. directly assign to gGrid_ptr_NHW[0], gGrid_ptr_NHW[1] scalar_t *gGrid_ptr_NHW = grad_grid.data + index * gGrid_sW; gGrid_ptr_NHW[0] = static_cast<scalar_t>(0); gGrid_ptr_NHW[1] = static_cast<scalar_t>(0); } } } template <typename scalar_t> __launch_bounds__(1024) __global__ void grid_sampler_3d_backward_kernel( const int nthreads, TensorInfo<scalar_t, int> grad_output, TensorInfo<scalar_t, int> input, TensorInfo<scalar_t, int> grid, TensorInfo<scalar_t, int> grad_input, // initialized to zeros TensorInfo<scalar_t, int> grad_grid, // initialized to empty const GridSamplerInterpolation interpolation_mode, const GridSamplerPadding padding_mode) { int C = input.sizes[1]; int inp_D = input.sizes[2]; int inp_H = input.sizes[3]; int inp_W = input.sizes[4]; int out_D = grid.sizes[1]; int out_H = grid.sizes[2]; int out_W = grid.sizes[3]; int inp_sN = input.strides[0]; int inp_sC = input.strides[1]; int inp_sD = input.strides[2]; int inp_sH = input.strides[3]; int inp_sW = input.strides[4]; int grid_sN = grid.strides[0]; int grid_sD = grid.strides[1]; int grid_sH = grid.strides[2]; int grid_sW = grid.strides[3]; int grid_sCoor = grid.strides[4]; int gOut_sN = grad_output.strides[0]; int gOut_sC = grad_output.strides[1]; int gOut_sD = grad_output.strides[2]; int gOut_sH = grad_output.strides[3]; int gOut_sW = grad_output.strides[4]; int gInp_sN = grad_input.strides[0]; int gInp_sC = grad_input.strides[1]; int gInp_sD = grad_input.strides[2]; int gInp_sH = grad_input.strides[3]; int gInp_sW = grad_input.strides[4]; int gGrid_sW = grad_grid.strides[3]; CUDA_KERNEL_LOOP(index, nthreads) { const int w = index % out_W; const int h = (index / out_W) % out_H; const int d = (index / (out_H * out_W)) % out_D; const int n = index / (out_D * out_H * out_W); const int grid_offset = n * grid_sN + d * grid_sD + h * grid_sH + w * grid_sW; // get the corresponding input x, y, z co-ordinates from grid scalar_t ix = grid.data[grid_offset]; scalar_t iy = grid.data[grid_offset + grid_sCoor]; scalar_t iz = grid.data[grid_offset + 2 * grid_sCoor]; // normalize ix, iy, iz from [-1, 1] to [0, inp_W-1] & [0, inp_H-1] & [0, inp_D-1] float ixf = ((ix + 1.f) / 2) * (inp_W - 1); float iyf = ((iy + 1.f) / 2) * (inp_H - 1); float izf = ((iz + 1.f) / 2) * (inp_D - 1); // multipliers for gradients on ix, iy, and iz // E.g., 0 for out-of-bound indices when GridSamplerPadding::Border scalar_t gix_mult, giy_mult, giz_mult; if (padding_mode == GridSamplerPadding::Border) { // clip coordinates to image borders ixf = clip_coordinates_set_grad(ixf, inp_W, &gix_mult); iyf = clip_coordinates_set_grad(iyf, inp_H, &giy_mult); izf = clip_coordinates_set_grad(izf, inp_D, &giz_mult); } else if (padding_mode == GridSamplerPadding::Reflection) { // reflect coordinates by image borders ixf = reflect_coordinates_set_grad(ixf, inp_W, &gix_mult); iyf = reflect_coordinates_set_grad(iyf, inp_H, &giy_mult); izf = reflect_coordinates_set_grad(izf, inp_D, &giz_mult); } else { // padding_mode == GridSamplerPadding::Zeros gix_mult = static_cast<scalar_t>(1); giy_mult = static_cast<scalar_t>(1); giz_mult = static_cast<scalar_t>(1); } if (interpolation_mode == GridSamplerInterpolation::Bilinear) { ix = static_cast<scalar_t>(ixf); iy = static_cast<scalar_t>(iyf); iz = static_cast<scalar_t>(izf); // get corner pixel values from (x, y, z) // for 4d, we used north-east-south-west // for 5d, we add top-bottom int ix_tnw = static_cast<int>(::floor(ix)); int iy_tnw = static_cast<int>(::floor(iy)); int iz_tnw = static_cast<int>(::floor(iz)); int ix_tne = ix_tnw + 1; int iy_tne = iy_tnw; int iz_tne = iz_tnw; int ix_tsw = ix_tnw; int iy_tsw = iy_tnw + 1; int iz_tsw = iz_tnw; int ix_tse = ix_tnw + 1; int iy_tse = iy_tnw + 1; int iz_tse = iz_tnw; int ix_bnw = ix_tnw; int iy_bnw = iy_tnw; int iz_bnw = iz_tnw + 1; int ix_bne = ix_tnw + 1; int iy_bne = iy_tnw; int iz_bne = iz_tnw + 1; int ix_bsw = ix_tnw; int iy_bsw = iy_tnw + 1; int iz_bsw = iz_tnw + 1; int ix_bse = ix_tnw + 1; int iy_bse = iy_tnw + 1; int iz_bse = iz_tnw + 1; // get surfaces to each neighbor: scalar_t tnw = (ix_bse - ix) * (iy_bse - iy) * (iz_bse - iz); scalar_t tne = (ix - ix_bsw) * (iy_bsw - iy) * (iz_bsw - iz); scalar_t tsw = (ix_bne - ix) * (iy - iy_bne) * (iz_bne - iz); scalar_t tse = (ix - ix_bnw) * (iy - iy_bnw) * (iz_bnw - iz); scalar_t bnw = (ix_tse - ix) * (iy_tse - iy) * (iz - iz_tse); scalar_t bne = (ix - ix_tsw) * (iy_tsw - iy) * (iz - iz_tsw); scalar_t bsw = (ix_tne - ix) * (iy - iy_tne) * (iz - iz_tne); scalar_t bse = (ix - ix_tnw) * (iy - iy_tnw) * (iz - iz_tnw); scalar_t gix = static_cast<scalar_t>(0), giy = static_cast<scalar_t>(0), giz = static_cast<scalar_t>(0); scalar_t *gOut_ptr_NCDHW = grad_output.data + n * gOut_sN + d * gOut_sD + h * gOut_sH + w * gOut_sW; scalar_t *gInp_ptr_NC = grad_input.data + n * gInp_sN; scalar_t *inp_ptr_NC = input.data + n * inp_sN; // calculate bilinear weighted pixel value and set output pixel for (int c = 0; c < C; ++c, gOut_ptr_NCDHW += gOut_sC, gInp_ptr_NC += gInp_sC, inp_ptr_NC += inp_sC) { scalar_t gOut = *gOut_ptr_NCDHW; // calculate and set grad_input safe_add_3d(gInp_ptr_NC, iz_tnw, iy_tnw, ix_tnw, gInp_sD, gInp_sH, gInp_sW, inp_D, inp_H, inp_W, tnw * gOut); safe_add_3d(gInp_ptr_NC, iz_tne, iy_tne, ix_tne, gInp_sD, gInp_sH, gInp_sW, inp_D, inp_H, inp_W, tne * gOut); safe_add_3d(gInp_ptr_NC, iz_tsw, iy_tsw, ix_tsw, gInp_sD, gInp_sH, gInp_sW, inp_D, inp_H, inp_W, tsw * gOut); safe_add_3d(gInp_ptr_NC, iz_tse, iy_tse, ix_tse, gInp_sD, gInp_sH, gInp_sW, inp_D, inp_H, inp_W, tse * gOut); safe_add_3d(gInp_ptr_NC, iz_bnw, iy_bnw, ix_bnw, gInp_sD, gInp_sH, gInp_sW, inp_D, inp_H, inp_W, bnw * gOut); safe_add_3d(gInp_ptr_NC, iz_bne, iy_bne, ix_bne, gInp_sD, gInp_sH, gInp_sW, inp_D, inp_H, inp_W, bne * gOut); safe_add_3d(gInp_ptr_NC, iz_bsw, iy_bsw, ix_bsw, gInp_sD, gInp_sH, gInp_sW, inp_D, inp_H, inp_W, bsw * gOut); safe_add_3d(gInp_ptr_NC, iz_bse, iy_bse, ix_bse, gInp_sD, gInp_sH, gInp_sW, inp_D, inp_H, inp_W, bse * gOut); // calculate grad_grid if (within_bounds_3d(iz_tnw, iy_tnw, ix_tnw, inp_D, inp_H, inp_W)) { scalar_t tnw_val = inp_ptr_NC[iz_tnw * inp_sD + iy_tnw * inp_sH + ix_tnw * inp_sW]; gix -= tnw_val * (iy_bse - iy) * (iz_bse - iz) * gOut; giy -= tnw_val * (ix_bse - ix) * (iz_bse - iz) * gOut; giz -= tnw_val * (ix_bse - ix) * (iy_bse - iy) * gOut; } if (within_bounds_3d(iz_tne, iy_tne, ix_tne, inp_D, inp_H, inp_W)) { scalar_t tne_val = inp_ptr_NC[iz_tne * inp_sD + iy_tne * inp_sH + ix_tne * inp_sW]; gix += tne_val * (iy_bsw - iy) * (iz_bsw - iz) * gOut; giy -= tne_val * (ix - ix_bsw) * (iz_bsw - iz) * gOut; giz -= tne_val * (ix - ix_bsw) * (iy_bsw - iy) * gOut; } if (within_bounds_3d(iz_tsw, iy_tsw, ix_tsw, inp_D, inp_H, inp_W)) { scalar_t tsw_val = inp_ptr_NC[iz_tsw * inp_sD + iy_tsw * inp_sH + ix_tsw * inp_sW]; gix -= tsw_val * (iy - iy_bne) * (iz_bne - iz) * gOut; giy += tsw_val * (ix_bne - ix) * (iz_bne - iz) * gOut; giz -= tsw_val * (ix_bne - ix) * (iy - iy_bne) * gOut; } if (within_bounds_3d(iz_tse, iy_tse, ix_tse, inp_D, inp_H, inp_W)) { scalar_t tse_val = inp_ptr_NC[iz_tse * inp_sD + iy_tse * inp_sH + ix_tse * inp_sW]; gix += tse_val * (iy - iy_bnw) * (iz_bnw - iz) * gOut; giy += tse_val * (ix - ix_bnw) * (iz_bnw - iz) * gOut; giz -= tse_val * (ix - ix_bnw) * (iy - iy_bnw) * gOut; } if (within_bounds_3d(iz_bnw, iy_bnw, ix_bnw, inp_D, inp_H, inp_W)) { scalar_t bnw_val = inp_ptr_NC[iz_bnw * inp_sD + iy_bnw * inp_sH + ix_bnw * inp_sW]; gix -= bnw_val * (iy_tse - iy) * (iz - iz_tse) * gOut; giy -= bnw_val * (ix_tse - ix) * (iz - iz_tse) * gOut; giz += bnw_val * (ix_tse - ix) * (iy_tse - iy) * gOut; } if (within_bounds_3d(iz_bne, iy_bne, ix_bne, inp_D, inp_H, inp_W)) { scalar_t bne_val = inp_ptr_NC[iz_bne * inp_sD + iy_bne * inp_sH + ix_bne * inp_sW]; gix += bne_val * (iy_tsw - iy) * (iz - iz_tsw) * gOut; giy -= bne_val * (ix - ix_tsw) * (iz - iz_tsw) * gOut; giz += bne_val * (ix - ix_tsw) * (iy_tsw - iy) * gOut; } if (within_bounds_3d(iz_bsw, iy_bsw, ix_bsw, inp_D, inp_H, inp_W)) { scalar_t bsw_val = inp_ptr_NC[iz_bsw * inp_sD + iy_bsw * inp_sH + ix_bsw * inp_sW]; gix -= bsw_val * (iy - iy_tne) * (iz - iz_tne) * gOut; giy += bsw_val * (ix_tne - ix) * (iz - iz_tne) * gOut; giz += bsw_val * (ix_tne - ix) * (iy - iy_tne) * gOut; } if (within_bounds_3d(iz_bse, iy_bse, ix_bse, inp_D, inp_H, inp_W)) { scalar_t bse_val = inp_ptr_NC[iz_bse * inp_sD + iy_bse * inp_sH + ix_bse * inp_sW]; gix += bse_val * (iy - iy_tnw) * (iz - iz_tnw) * gOut; giy += bse_val * (ix - ix_tnw) * (iz - iz_tnw) * gOut; giz += bse_val * (ix - ix_tnw) * (iy - iy_tnw) * gOut; } } // un-normalize grad_grid values back to [-1, 1] constraints gix = gix * (inp_W - 1) / 2; giy = giy * (inp_H - 1) / 2; giz = giz * (inp_D - 1) / 2; // assuming grad_grid is contiguous // thus we can // 1. use index with gGrid_sW to diectly compute gGrid_ptr_NDHW // 2. directly assign to gGrid_ptr_NDHW[0], gGrid_ptr_NDHW[1], gGrid_ptr_NDHW[2] scalar_t *gGrid_ptr_NDHW = grad_grid.data + index * gGrid_sW; gGrid_ptr_NDHW[0] = gix_mult * gix; gGrid_ptr_NDHW[1] = giy_mult * giy; gGrid_ptr_NDHW[2] = giz_mult * giz; } else if (interpolation_mode == GridSamplerInterpolation::Nearest) { int ix_nearest = static_cast<int>(::round(ixf)); int iy_nearest = static_cast<int>(::round(iyf)); int iz_nearest = static_cast<int>(::round(izf)); // assign nearest neighor pixel value to output pixel scalar_t *gOut_ptr_NCDHW = grad_output.data + n * gOut_sN + d * gOut_sD + h * gOut_sH + w * gOut_sW; scalar_t *gInp_ptr_NC = grad_input.data + n * gInp_sN; for (int c = 0; c < C; ++c, gOut_ptr_NCDHW += gOut_sC, gInp_ptr_NC += gInp_sC) { // calculate and set grad_input safe_add_3d(gInp_ptr_NC, iz_nearest, iy_nearest, ix_nearest, gInp_sD, gInp_sH, gInp_sW, inp_D, inp_H, inp_W, *gOut_ptr_NCDHW); } // assuming grad_grid is contiguous // thus we can // 1. use index with gGrid_sW to diectly compute gGrid_ptr_NDHW // 2. directly assign to gGrid_ptr_NDHW[0], gGrid_ptr_NDHW[1], gGrid_ptr_NDHW[2] scalar_t *gGrid_ptr_NDHW = grad_grid.data + index * gGrid_sW; gGrid_ptr_NDHW[0] = static_cast<scalar_t>(0); gGrid_ptr_NDHW[1] = static_cast<scalar_t>(0); gGrid_ptr_NDHW[2] = static_cast<scalar_t>(0); } } } } // namespace // No shape checking needed here. See # NOTE [ grid_sampler Native Functions ]. Tensor grid_sampler_2d_cuda(const Tensor& input, const Tensor& grid, int64_t interpolation_mode, int64_t padding_mode) { auto N = input.size(0); auto H = grid.size(1); auto W = grid.size(2); auto output = at::empty({N, input.size(1), H, W}, input.options()); int count = static_cast<int>(N * H * W); if (count > 0) { AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.type(), "grid_sampler_2d_cuda", [&] { grid_sampler_2d_kernel<scalar_t> <<<GET_BLOCKS(count), CUDA_NUM_THREADS, 0, at::cuda::getCurrentCUDAStream()>>>( count, getTensorInfo<scalar_t, int>(input), getTensorInfo<scalar_t, int>(grid), getTensorInfo<scalar_t, int>(output), static_cast<GridSamplerInterpolation>(interpolation_mode), static_cast<GridSamplerPadding>(padding_mode)); }); } return output; } // No shape checking needed here. See # NOTE [ grid_sampler Native Functions ]. Tensor grid_sampler_3d_cuda(const Tensor& input, const Tensor& grid, int64_t interpolation_mode, int64_t padding_mode) { auto N = input.size(0); auto D = grid.size(1); auto H = grid.size(2); auto W = grid.size(3); auto output = at::empty({N, input.size(1), D, H, W}, input.options()); int count = static_cast<int>(N * D * H * W); if (count > 0) { AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.type(), "grid_sampler_2d_cuda", [&] { grid_sampler_3d_kernel<scalar_t> <<<GET_BLOCKS(count), CUDA_NUM_THREADS, 0, at::cuda::getCurrentCUDAStream()>>>( count, getTensorInfo<scalar_t, int>(input), getTensorInfo<scalar_t, int>(grid), getTensorInfo<scalar_t, int>(output), static_cast<GridSamplerInterpolation>(interpolation_mode), static_cast<GridSamplerPadding>(padding_mode)); }); } return output; } // No shape checking needed here. See # NOTE [ grid_sampler Native Functions ]. std::tuple<Tensor, Tensor> grid_sampler_2d_backward_cuda(const Tensor& grad_output, const Tensor& input, const Tensor& grid, int64_t interpolation_mode, int64_t padding_mode) { auto N = input.size(0); auto H = grid.size(1); auto W = grid.size(2); auto grad_input = at::zeros_like(input); auto grad_grid = at::empty_like(grid); int count = static_cast<int>(N * H * W); if (count > 0) { AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.type(), "grid_sampler_2d_backward_cuda", [&] { grid_sampler_2d_backward_kernel<scalar_t> <<<GET_BLOCKS(count), CUDA_NUM_THREADS, 0, at::cuda::getCurrentCUDAStream()>>>( count, getTensorInfo<scalar_t, int>(grad_output), getTensorInfo<scalar_t, int>(input), getTensorInfo<scalar_t, int>(grid), getTensorInfo<scalar_t, int>(grad_input), getTensorInfo<scalar_t, int>(grad_grid), static_cast<GridSamplerInterpolation>(interpolation_mode), static_cast<GridSamplerPadding>(padding_mode)); }); } return std::make_tuple(grad_input, grad_grid); } // No shape checking needed here. See # NOTE [ grid_sampler Native Functions ]. std::tuple<Tensor, Tensor> grid_sampler_3d_backward_cuda(const Tensor& grad_output, const Tensor& input, const Tensor& grid, int64_t interpolation_mode, int64_t padding_mode) { auto N = input.size(0); auto D = grid.size(1); auto H = grid.size(2); auto W = grid.size(3); auto grad_input = at::zeros_like(input); auto grad_grid = at::empty_like(grid); int count = static_cast<int>(N * D * H * W); if (count > 0) { AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.type(), "grid_sampler_3d_backward_cuda", [&] { grid_sampler_3d_backward_kernel<scalar_t> <<<GET_BLOCKS(count), CUDA_NUM_THREADS, 0, at::cuda::getCurrentCUDAStream()>>>( count, getTensorInfo<scalar_t, int>(grad_output), getTensorInfo<scalar_t, int>(input), getTensorInfo<scalar_t, int>(grid), getTensorInfo<scalar_t, int>(grad_input), getTensorInfo<scalar_t, int>(grad_grid), static_cast<GridSamplerInterpolation>(interpolation_mode), static_cast<GridSamplerPadding>(padding_mode)); }); } return std::make_tuple(grad_input, grad_grid); } }} // namespace at::native
1b51af0a66d8d4e6b9fe79a7c19ffd0c13a0ab6d.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "sum4M.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *A = NULL; hipMalloc(&A, XSIZE*YSIZE); float *B = NULL; hipMalloc(&B, XSIZE*YSIZE); float *C = NULL; hipMalloc(&C, XSIZE*YSIZE); const int N = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( sum4M), dim3(gridBlock),dim3(threadBlock), 0, 0, A,B,C,N); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( sum4M), dim3(gridBlock),dim3(threadBlock), 0, 0, A,B,C,N); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( sum4M), dim3(gridBlock),dim3(threadBlock), 0, 0, A,B,C,N); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
1b51af0a66d8d4e6b9fe79a7c19ffd0c13a0ab6d.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "sum4M.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *A = NULL; cudaMalloc(&A, XSIZE*YSIZE); float *B = NULL; cudaMalloc(&B, XSIZE*YSIZE); float *C = NULL; cudaMalloc(&C, XSIZE*YSIZE); const int N = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); sum4M<<<gridBlock,threadBlock>>>(A,B,C,N); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { sum4M<<<gridBlock,threadBlock>>>(A,B,C,N); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { sum4M<<<gridBlock,threadBlock>>>(A,B,C,N); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
7dd189af9f9f6acdf56ed4b3491844ffa990bb46.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <chrono> #include <random> #include <XLib.hpp> using namespace xlib; const int N = 1 << 25; int main() { unsigned seed = std::chrono::system_clock::now().time_since_epoch().count(); std::default_random_engine generator (seed); std::uniform_int_distribution<int> distribution( std::numeric_limits<int>::min(), std::numeric_limits<int>::max()); int* Input = new int[N]; int* Output = new int[N]; for (int i = 0; i < N; i++) Input[i] = distribution(generator); int *devInput, *devOutput; __SAFE_CALL( hipMalloc(&devInput, N * sizeof(int)) ) __SAFE_CALL( hipMalloc(&devOutput, N * sizeof(int)) ) __SAFE_CALL( hipMemcpy(devInput, Input, N * sizeof(int), hipMemcpyHostToDevice) ) const unsigned BLOCK_SIZE = 128; const unsigned grid_size = Div<BLOCK_SIZE>(N); /*xlib::copy<1><<<grid_size, BLOCK_SIZE>>>(devInput, N, devOutput); xlib::copy<2><<<grid_size, BLOCK_SIZE>>>(devInput, N, devOutput); xlib::copy<4><<<grid_size, BLOCK_SIZE>>>(devInput, N, devOutput); xlib::copy<8><<<grid_size, BLOCK_SIZE>>>(devInput, N, devOutput); xlib::copy<16><<<grid_size, BLOCK_SIZE>>>(devInput, N, devOutput); xlib::copy<32><<<grid_size, BLOCK_SIZE>>>(devInput, N, devOutput); xlib::copy<64><<<grid_size, BLOCK_SIZE>>>(devInput, N, devOutput); xlib::copy<128><<<grid_size, BLOCK_SIZE>>>(devInput, N, devOutput);*/ hipLaunchKernelGGL(( xlib::copy), dim3(grid_size), dim3(BLOCK_SIZE), 0, 0, devInput, N, devOutput); __SAFE_CALL( hipMemcpy(Output, devOutput, N * sizeof(int), hipMemcpyDeviceToHost) ) for (int i = 0; i < N; i++) { if (Output[i] != Input[i]) { std::cout << "error" << std::endl; delete[] Input; delete[] Output; std::exit(EXIT_FAILURE); } } std::cout << "correct" << std::endl; delete[] Input; delete[] Output; }
7dd189af9f9f6acdf56ed4b3491844ffa990bb46.cu
#include <iostream> #include <chrono> #include <random> #include <XLib.hpp> using namespace xlib; const int N = 1 << 25; int main() { unsigned seed = std::chrono::system_clock::now().time_since_epoch().count(); std::default_random_engine generator (seed); std::uniform_int_distribution<int> distribution( std::numeric_limits<int>::min(), std::numeric_limits<int>::max()); int* Input = new int[N]; int* Output = new int[N]; for (int i = 0; i < N; i++) Input[i] = distribution(generator); int *devInput, *devOutput; __SAFE_CALL( cudaMalloc(&devInput, N * sizeof(int)) ) __SAFE_CALL( cudaMalloc(&devOutput, N * sizeof(int)) ) __SAFE_CALL( cudaMemcpy(devInput, Input, N * sizeof(int), cudaMemcpyHostToDevice) ) const unsigned BLOCK_SIZE = 128; const unsigned grid_size = Div<BLOCK_SIZE>(N); /*xlib::copy<1><<<grid_size, BLOCK_SIZE>>>(devInput, N, devOutput); xlib::copy<2><<<grid_size, BLOCK_SIZE>>>(devInput, N, devOutput); xlib::copy<4><<<grid_size, BLOCK_SIZE>>>(devInput, N, devOutput); xlib::copy<8><<<grid_size, BLOCK_SIZE>>>(devInput, N, devOutput); xlib::copy<16><<<grid_size, BLOCK_SIZE>>>(devInput, N, devOutput); xlib::copy<32><<<grid_size, BLOCK_SIZE>>>(devInput, N, devOutput); xlib::copy<64><<<grid_size, BLOCK_SIZE>>>(devInput, N, devOutput); xlib::copy<128><<<grid_size, BLOCK_SIZE>>>(devInput, N, devOutput);*/ xlib::copy<<<grid_size, BLOCK_SIZE>>>(devInput, N, devOutput); __SAFE_CALL( cudaMemcpy(Output, devOutput, N * sizeof(int), cudaMemcpyDeviceToHost) ) for (int i = 0; i < N; i++) { if (Output[i] != Input[i]) { std::cout << "error" << std::endl; delete[] Input; delete[] Output; std::exit(EXIT_FAILURE); } } std::cout << "correct" << std::endl; delete[] Input; delete[] Output; }
1eb58cdbac064e35852c8869876c56c72bb71f19.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #define NP_MAX 7 // Important variables for GPU shit float *d_u; float *d_f; float *d_x; float *d_mesh; float *d_r; float *d_w; // Runge-Kutta time integration storage float *d_kstar; float *d_k1; float *d_k2; float *d_k3; float *d_k4; /* legendre polynomials * * Calculates the value of P_i(x) */ __device__ float legendre(float x, int i) { switch (i) { case 0: return 1.; case 1: return x; case 2: return (3.*powf(x,2) -1.) / 2.; case 3: return (5.*powf(x,3) - 3.*x) / 2.; case 4: return (35.*powf(x,4) - 30.*powf(x,2) + 3.)/8.; case 5: return (63.*powf(x,5) - 70.*powf(x,3) + 15.*x)/8.; case 6: return (231.*powf(x,6) - 315.*powf(x,4) + 105.*powf(x,2) -5.)/16.; case 7: return (429.*powf(x,7) - 693.*powf(x,5) + 315.*powf(x,3) - 35.*x)/16.; case 8: return (6435.*powf(x,8) - 12012.*powf(x,6) + 6930.*powf(x,4) - 1260.*powf(x,2) + 35.)/128.; case 9: return (12155.*powf(x,9) - 25740.*powf(x,7) + 18018*powf(x,5) - 4620.*powf(x,3) + 315.*x)/128.; case 10: return (46189.*powf(x,10) - 109395.*powf(x,8) + 90090.*powf(x,6) - 30030.*powf(x,4) + 3465.*powf(x,2) - 63.)/256.; } return -1; } /* legendre polynomials derivatives * * Calculates the value of d/dx P_i(x) */ __device__ float legendreDeriv(float x, int i) { switch (i) { case 0: return 0.; case 1: return 1.; case 2: return 3.*x; case 3: return (15.*powf(x,2) - 3.) / 2.; case 4: return (140.*powf(x,3) - 60*x)/8.; case 5: return (315.*powf(x,4) - 210.*powf(x,2) + 15.)/8.; case 6: return (1386.*powf(x,5) - 1260.*powf(x,3) + 210.*x)/16.; case 7: return (3003.*powf(x,6) - 3465.*powf(x,4) + 945.*powf(x,2) - 35.)/16.; case 8: return (51480.*powf(x,7) - 72072.*powf(x,5) + 27720.*powf(x,3) - 2520.*x)/128.; case 9: return (109395.*powf(x,8) - 180180.*powf(x,6) + 90090.*powf(x,4) - 13860.*powf(x,2) + 315.)/128.; case 10: return (461890.*powf(x,9) - 875160.*powf(x,7) + 540540.*powf(x,5) - 120120.*powf(x,3) + 6930.*x)/256.; } return -1; } /* flux function f(u) * * evaluate the flux function f(u) */ __device__ float flux(float u) { float aspeed = 2.*3.14159; // the wave speed return aspeed*u; } /* initilialize the mesh nodes * * ideally, this should be done on the GPU, but meh */ __global__ void initMesh(float *mesh, float dx, float a, int K) { int idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx < K) { mesh[idx] = a + dx * idx; } } /* initialize flux * * for these periodic boundary conditions, you need to set the flux * on the ghost state to be something. */ __global__ void initFlux(float *u, float *f, int K, int Np) { float cl[NP_MAX]; float ul; int i; for (i = 0; i < Np+1; i++) { cl[i] = u[(K + 1)*i + K]; } ul = 0; for (i = 0; i < Np+1; i++) { ul += cl[i]; } f[K+1] = flux(ul); f[0] = f[K+1]; } /* flux calculations for each node * * | endpoint - f0 - f1 - ... - fm-1 - endpoint | * * That is, fi is the flux between nodes i and i+1, making f a m+1 length vector. * Store results into f */ __global__ void calcFlux(float *u, float *f, float aspeed, float time, int K, int Np) { int idx = blockDim.x * blockIdx.x + threadIdx.x; int i; float ul, ur; float cl[NP_MAX], cr[NP_MAX]; if (idx < K+2) { // periodic if (idx == 0) { f[idx] = f[K+1]; } if (idx > 0) { for (i = 0; i < Np+1; i++) { cl[i] = u[(K + 1)*i + idx - 1]; cr[i] = u[(K + 1)*i + idx]; } // Left value ul = 0; for (i = 0; i < Np+1; i++) { ul += cl[i]; } // Evaluate flux ul = flux(ul); // Right value ur = 0; for (i = 0; i < Np+1; i++) { ur += powf(-1, i) * cr[i]; } // Evaluate flux ur = flux(ur); // Upwind flux f[idx] = ul; } } } /* initial condition function * * returns the value of the intial condition at point x */ __device__ float u0(float x) { if (x > -0.25 && x < 0.25) { return 1; } else { return 0; } //return sinf(2*3.14159*x); } /* intialize the ghost state * * since we have periodic boundary conditions, make the ghost state * think that it's just the first element. */ __global__ void initUPeriodic(float *u, int K, int Np) { int i; for (i = 0; i < Np+1; i++) { u[i*(K + 1) + K] = 0;//u[i*(K + 1)]; } } /* calculate the initial data for U * * needs to interpolate u0 with legendre polynomials to grab the right coefficients. */ __global__ void initU(float *u, float *x, float *w, float *r, float dx, int K, int Np) { int idx = blockDim.x * blockIdx.x + threadIdx.x; int i, j; float xi, uval; if (idx < K) { for (i = 0; i < Np+1; i++) { uval = 0.; for (j = 0; j < Np+1; j++) { // The mapping to the integration points for u0 xi = x[idx] + dx*(r[j] + 1.)/2.; uval += w[j] * u0(xi) * legendre(r[j], i); } // Leftover from integration u[i*(K + 1) + idx] = (2.*i + 1.)/2. * uval; } } } /* right hand side calculations * * Calculates the flux integral * int_k (u * vprime) dx * and adds it to the flux boundary integral. * Store results into k, the RK variable */ __global__ void rhs(float *c, float *kstar, float *f, float *w, float *r, float a, float dt, float dx, int K, int Np) { int idx = blockDim.x * blockIdx.x + threadIdx.x; int i,j, k; float rhs[NP_MAX], register_c[NP_MAX]; float lflux, rflux, u; if (idx < (K + 1)) { // Read the global u into a register variable and set rhs = 0. for (i = 0; i < Np+1; i++) { register_c[i] = c[i*(K + 1) + idx]; } // Read the flux contributions. lflux = f[idx]; rflux = f[idx+1]; // Perform quadrature W*P'*f(U) at integration points for (i = 0; i < Np+1; i++) { rhs[i] = 0.; for (j = 0; j < Np+1; j++) { // Evaluate u(r_j) u = 0.; for (k = 0; k < Np+1; k++) { u += legendre(r[j], k) * register_c[k]; } // rhs = sum w_j P'(r_j) flux(u_j) rhs[i] += w[j] * legendreDeriv(r[j], i) * flux(u); } } // Store result for (i = 0; i < Np+1; i++) { kstar[(K + 1)*i + idx] = dt*(((2.*i+1.) / dx) * (-rflux + powf(-1.,i) * lflux + rhs[i])); } } } /* tempstorage for RK4 * * I need to store u + alpha * k_i into some temporary variable called k*. */ __global__ void rk4_tempstorage(float *u, float *kstar, float*k, float alpha, float dt, int Np, int K) { int idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx < (Np + 1) * K) { kstar[idx] = u[idx] + alpha * k[idx]; } } /* rk4 * * computes the runge-kutta solution * u_n+1 = u_n + k1/6 + k2/3 + k3/3 + k4/6 */ __global__ void rk4(float *u, float *k1, float *k2, float *k3, float *k4, int Np, int K) { int idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx < (Np + 1) * K) { u[idx] += k1[idx]/6. + k2[idx]/3. + k3[idx]/3. + k4[idx]/6.; } }
1eb58cdbac064e35852c8869876c56c72bb71f19.cu
#include <cuda.h> #define NP_MAX 7 // Important variables for GPU shit float *d_u; float *d_f; float *d_x; float *d_mesh; float *d_r; float *d_w; // Runge-Kutta time integration storage float *d_kstar; float *d_k1; float *d_k2; float *d_k3; float *d_k4; /* legendre polynomials * * Calculates the value of P_i(x) */ __device__ float legendre(float x, int i) { switch (i) { case 0: return 1.; case 1: return x; case 2: return (3.*powf(x,2) -1.) / 2.; case 3: return (5.*powf(x,3) - 3.*x) / 2.; case 4: return (35.*powf(x,4) - 30.*powf(x,2) + 3.)/8.; case 5: return (63.*powf(x,5) - 70.*powf(x,3) + 15.*x)/8.; case 6: return (231.*powf(x,6) - 315.*powf(x,4) + 105.*powf(x,2) -5.)/16.; case 7: return (429.*powf(x,7) - 693.*powf(x,5) + 315.*powf(x,3) - 35.*x)/16.; case 8: return (6435.*powf(x,8) - 12012.*powf(x,6) + 6930.*powf(x,4) - 1260.*powf(x,2) + 35.)/128.; case 9: return (12155.*powf(x,9) - 25740.*powf(x,7) + 18018*powf(x,5) - 4620.*powf(x,3) + 315.*x)/128.; case 10: return (46189.*powf(x,10) - 109395.*powf(x,8) + 90090.*powf(x,6) - 30030.*powf(x,4) + 3465.*powf(x,2) - 63.)/256.; } return -1; } /* legendre polynomials derivatives * * Calculates the value of d/dx P_i(x) */ __device__ float legendreDeriv(float x, int i) { switch (i) { case 0: return 0.; case 1: return 1.; case 2: return 3.*x; case 3: return (15.*powf(x,2) - 3.) / 2.; case 4: return (140.*powf(x,3) - 60*x)/8.; case 5: return (315.*powf(x,4) - 210.*powf(x,2) + 15.)/8.; case 6: return (1386.*powf(x,5) - 1260.*powf(x,3) + 210.*x)/16.; case 7: return (3003.*powf(x,6) - 3465.*powf(x,4) + 945.*powf(x,2) - 35.)/16.; case 8: return (51480.*powf(x,7) - 72072.*powf(x,5) + 27720.*powf(x,3) - 2520.*x)/128.; case 9: return (109395.*powf(x,8) - 180180.*powf(x,6) + 90090.*powf(x,4) - 13860.*powf(x,2) + 315.)/128.; case 10: return (461890.*powf(x,9) - 875160.*powf(x,7) + 540540.*powf(x,5) - 120120.*powf(x,3) + 6930.*x)/256.; } return -1; } /* flux function f(u) * * evaluate the flux function f(u) */ __device__ float flux(float u) { float aspeed = 2.*3.14159; // the wave speed return aspeed*u; } /* initilialize the mesh nodes * * ideally, this should be done on the GPU, but meh */ __global__ void initMesh(float *mesh, float dx, float a, int K) { int idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx < K) { mesh[idx] = a + dx * idx; } } /* initialize flux * * for these periodic boundary conditions, you need to set the flux * on the ghost state to be something. */ __global__ void initFlux(float *u, float *f, int K, int Np) { float cl[NP_MAX]; float ul; int i; for (i = 0; i < Np+1; i++) { cl[i] = u[(K + 1)*i + K]; } ul = 0; for (i = 0; i < Np+1; i++) { ul += cl[i]; } f[K+1] = flux(ul); f[0] = f[K+1]; } /* flux calculations for each node * * | endpoint - f0 - f1 - ... - fm-1 - endpoint | * * That is, fi is the flux between nodes i and i+1, making f a m+1 length vector. * Store results into f */ __global__ void calcFlux(float *u, float *f, float aspeed, float time, int K, int Np) { int idx = blockDim.x * blockIdx.x + threadIdx.x; int i; float ul, ur; float cl[NP_MAX], cr[NP_MAX]; if (idx < K+2) { // periodic if (idx == 0) { f[idx] = f[K+1]; } if (idx > 0) { for (i = 0; i < Np+1; i++) { cl[i] = u[(K + 1)*i + idx - 1]; cr[i] = u[(K + 1)*i + idx]; } // Left value ul = 0; for (i = 0; i < Np+1; i++) { ul += cl[i]; } // Evaluate flux ul = flux(ul); // Right value ur = 0; for (i = 0; i < Np+1; i++) { ur += powf(-1, i) * cr[i]; } // Evaluate flux ur = flux(ur); // Upwind flux f[idx] = ul; } } } /* initial condition function * * returns the value of the intial condition at point x */ __device__ float u0(float x) { if (x > -0.25 && x < 0.25) { return 1; } else { return 0; } //return sinf(2*3.14159*x); } /* intialize the ghost state * * since we have periodic boundary conditions, make the ghost state * think that it's just the first element. */ __global__ void initUPeriodic(float *u, int K, int Np) { int i; for (i = 0; i < Np+1; i++) { u[i*(K + 1) + K] = 0;//u[i*(K + 1)]; } } /* calculate the initial data for U * * needs to interpolate u0 with legendre polynomials to grab the right coefficients. */ __global__ void initU(float *u, float *x, float *w, float *r, float dx, int K, int Np) { int idx = blockDim.x * blockIdx.x + threadIdx.x; int i, j; float xi, uval; if (idx < K) { for (i = 0; i < Np+1; i++) { uval = 0.; for (j = 0; j < Np+1; j++) { // The mapping to the integration points for u0 xi = x[idx] + dx*(r[j] + 1.)/2.; uval += w[j] * u0(xi) * legendre(r[j], i); } // Leftover from integration u[i*(K + 1) + idx] = (2.*i + 1.)/2. * uval; } } } /* right hand side calculations * * Calculates the flux integral * int_k (u * vprime) dx * and adds it to the flux boundary integral. * Store results into k, the RK variable */ __global__ void rhs(float *c, float *kstar, float *f, float *w, float *r, float a, float dt, float dx, int K, int Np) { int idx = blockDim.x * blockIdx.x + threadIdx.x; int i,j, k; float rhs[NP_MAX], register_c[NP_MAX]; float lflux, rflux, u; if (idx < (K + 1)) { // Read the global u into a register variable and set rhs = 0. for (i = 0; i < Np+1; i++) { register_c[i] = c[i*(K + 1) + idx]; } // Read the flux contributions. lflux = f[idx]; rflux = f[idx+1]; // Perform quadrature W*P'*f(U) at integration points for (i = 0; i < Np+1; i++) { rhs[i] = 0.; for (j = 0; j < Np+1; j++) { // Evaluate u(r_j) u = 0.; for (k = 0; k < Np+1; k++) { u += legendre(r[j], k) * register_c[k]; } // rhs = sum w_j P'(r_j) flux(u_j) rhs[i] += w[j] * legendreDeriv(r[j], i) * flux(u); } } // Store result for (i = 0; i < Np+1; i++) { kstar[(K + 1)*i + idx] = dt*(((2.*i+1.) / dx) * (-rflux + powf(-1.,i) * lflux + rhs[i])); } } } /* tempstorage for RK4 * * I need to store u + alpha * k_i into some temporary variable called k*. */ __global__ void rk4_tempstorage(float *u, float *kstar, float*k, float alpha, float dt, int Np, int K) { int idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx < (Np + 1) * K) { kstar[idx] = u[idx] + alpha * k[idx]; } } /* rk4 * * computes the runge-kutta solution * u_n+1 = u_n + k1/6 + k2/3 + k3/3 + k4/6 */ __global__ void rk4(float *u, float *k1, float *k2, float *k3, float *k4, int Np, int K) { int idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx < (Np + 1) * K) { u[idx] += k1[idx]/6. + k2[idx]/3. + k3[idx]/3. + k4[idx]/6.; } }
28d3d620b5501caf74251067e6dc3ca856d00198.hip
// !!! This is a file automatically generated by hipify!!! #include <cmath> #include "cudaFun.h" #include "hip/hip_runtime.h" #include <hip/hip_runtime.h> #include <stdio.h> __global__ void mat_add(const double* mat1, const double* mat2, double* result, const int M, const int N) { int iRow = blockDim.x * blockIdx.x + threadIdx.x; int iCol = blockDim.y * blockIdx.y + threadIdx.y; int threadId = iRow + iCol * N; if (threadId < M * N) result[threadId] = mat1[threadId] + mat2[threadId]; } void matrix_add_gpu(const double* mat1, const double* mat2, double* result, const int M, const int N) { double *g_mat1, *g_mat2, *g_mat_result; hipMalloc((void **)&g_mat1, sizeof(double) * M*N); hipMalloc((void **)&g_mat2, sizeof(double) * M*N); hipMalloc((void **)&g_mat_result, sizeof(double) * M*N); hipMemcpy(g_mat1, mat1, sizeof(double) * M*N, hipMemcpyHostToDevice); hipMemcpy(g_mat2, mat2, sizeof(double) * M*N, hipMemcpyHostToDevice); dim3 blockSize(32, 32); dim3 gridSize(20, 20); hipLaunchKernelGGL(( mat_add), dim3(gridSize), dim3(blockSize) , 0, 0, g_mat1, g_mat2, g_mat_result, M, N); hipMemcpy(result, g_mat_result, sizeof(double) * M*N, hipMemcpyDeviceToHost); hipFree(g_mat1); hipFree(g_mat2); hipFree(g_mat_result); }
28d3d620b5501caf74251067e6dc3ca856d00198.cu
#include <cmath> #include "cudaFun.h" #include "cuda_runtime.h" #include <cuda.h> #include <stdio.h> __global__ void mat_add(const double* mat1, const double* mat2, double* result, const int M, const int N) { int iRow = blockDim.x * blockIdx.x + threadIdx.x; int iCol = blockDim.y * blockIdx.y + threadIdx.y; int threadId = iRow + iCol * N; if (threadId < M * N) result[threadId] = mat1[threadId] + mat2[threadId]; } void matrix_add_gpu(const double* mat1, const double* mat2, double* result, const int M, const int N) { double *g_mat1, *g_mat2, *g_mat_result; cudaMalloc((void **)&g_mat1, sizeof(double) * M*N); cudaMalloc((void **)&g_mat2, sizeof(double) * M*N); cudaMalloc((void **)&g_mat_result, sizeof(double) * M*N); cudaMemcpy(g_mat1, mat1, sizeof(double) * M*N, cudaMemcpyHostToDevice); cudaMemcpy(g_mat2, mat2, sizeof(double) * M*N, cudaMemcpyHostToDevice); dim3 blockSize(32, 32); dim3 gridSize(20, 20); mat_add<<< gridSize, blockSize >>>(g_mat1, g_mat2, g_mat_result, M, N); cudaMemcpy(result, g_mat_result, sizeof(double) * M*N, cudaMemcpyDeviceToHost); cudaFree(g_mat1); cudaFree(g_mat2); cudaFree(g_mat_result); }
7655fc1d85842ba56e460c02bb43826a119d545b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*! * Copyright 2017 XGBoost contributors */ #include <xgboost/tree_updater.h> #include <utility> #include <vector> #include "param.h" #include "updater_gpu_common.cuh" namespace xgboost { namespace tree { DMLC_REGISTRY_FILE_TAG(updater_gpu); /** * @brief Absolute BFS order IDs to col-wise unique IDs based on user input * @param tid the index of the element that this thread should access * @param abs the array of absolute IDs * @param colIds the array of column IDs for each element * @param nodeStart the start of the node ID at this level * @param nKeys number of nodes at this level. * @return the uniq key */ static HOST_DEV_INLINE node_id_t abs2uniqKey(int tid, const node_id_t* abs, const int* colIds, node_id_t nodeStart, int nKeys) { int a = abs[tid]; if (a == UNUSED_NODE) return a; return ((a - nodeStart) + (colIds[tid] * nKeys)); } /** * @struct Pair * @brief Pair used for key basd scan operations on bst_gpair */ struct Pair { int key; bst_gpair value; }; /** define a key that's not used at all in the entire boosting process */ static const int NONE_KEY = -100; /** * @brief Allocate temporary buffers needed for scan operations * @param tmpScans gradient buffer * @param tmpKeys keys buffer * @param size number of elements that will be scanned */ template <int BLKDIM_L1L3 = 256> int scanTempBufferSize(int size) { int nBlks = dh::div_round_up(size, BLKDIM_L1L3); return nBlks; } struct AddByKey { template <typename T> HOST_DEV_INLINE T operator()(const T& first, const T& second) const { T result; if (first.key == second.key) { result.key = first.key; result.value = first.value + second.value; } else { result.key = second.key; result.value = second.value; } return result; } }; /** * @brief Gradient value getter function * @param id the index into the vals or instIds array to which to fetch * @param vals the gradient value buffer * @param instIds instance index buffer * @return the expected gradient value */ HOST_DEV_INLINE bst_gpair get(int id, const bst_gpair* vals, const int* instIds) { id = instIds[id]; return vals[id]; } template <int BLKDIM_L1L3> __global__ void cubScanByKeyL1(bst_gpair* scans, const bst_gpair* vals, const int* instIds, bst_gpair* mScans, int* mKeys, const node_id_t* keys, int nUniqKeys, const int* colIds, node_id_t nodeStart, const int size) { Pair rootPair = {NONE_KEY, bst_gpair(0.f, 0.f)}; int myKey; bst_gpair myValue; typedef hipcub::BlockScan<Pair, BLKDIM_L1L3> BlockScan; __shared__ typename BlockScan::TempStorage temp_storage; Pair threadData; int tid = blockIdx.x * BLKDIM_L1L3 + threadIdx.x; if (tid < size) { myKey = abs2uniqKey(tid, keys, colIds, nodeStart, nUniqKeys); myValue = get(tid, vals, instIds); } else { myKey = NONE_KEY; myValue = 0.f; } threadData.key = myKey; threadData.value = myValue; // get previous key, especially needed for the last thread in this block // in order to pass on the partial scan values. // this statement MUST appear before the checks below! // else, the result of this shuffle operation will be undefined int previousKey = __shfl_up(myKey, 1); // Collectively compute the block-wide exclusive prefix sum BlockScan(temp_storage) .ExclusiveScan(threadData, threadData, rootPair, AddByKey()); if (tid < size) { scans[tid] = threadData.value; } else { return; } if (threadIdx.x == BLKDIM_L1L3 - 1) { threadData.value = (myKey == previousKey) ? threadData.value : bst_gpair(0.0f, 0.0f); mKeys[blockIdx.x] = myKey; mScans[blockIdx.x] = threadData.value + myValue; } } template <int BLKSIZE> __global__ void cubScanByKeyL2(bst_gpair* mScans, int* mKeys, int mLength) { typedef hipcub::BlockScan<Pair, BLKSIZE, cub::BLOCK_SCAN_WARP_SCANS> BlockScan; Pair threadData; __shared__ typename BlockScan::TempStorage temp_storage; for (int i = threadIdx.x; i < mLength; i += BLKSIZE - 1) { threadData.key = mKeys[i]; threadData.value = mScans[i]; BlockScan(temp_storage).InclusiveScan(threadData, threadData, AddByKey()); mScans[i] = threadData.value; __syncthreads(); } } template <int BLKDIM_L1L3> __global__ void cubScanByKeyL3(bst_gpair* sums, bst_gpair* scans, const bst_gpair* vals, const int* instIds, const bst_gpair* mScans, const int* mKeys, const node_id_t* keys, int nUniqKeys, const int* colIds, node_id_t nodeStart, const int size) { int relId = threadIdx.x; int tid = (blockIdx.x * BLKDIM_L1L3) + relId; // to avoid the following warning from nvcc: // __shared__ memory variable with non-empty constructor or destructor // (potential race between threads) __shared__ char gradBuff[sizeof(bst_gpair)]; __shared__ int s_mKeys; bst_gpair* s_mScans = reinterpret_cast<bst_gpair*>(gradBuff); if (tid >= size) return; // cache block-wide partial scan info if (relId == 0) { s_mKeys = (blockIdx.x > 0) ? mKeys[blockIdx.x - 1] : NONE_KEY; s_mScans[0] = (blockIdx.x > 0) ? mScans[blockIdx.x - 1] : bst_gpair(); } int myKey = abs2uniqKey(tid, keys, colIds, nodeStart, nUniqKeys); int previousKey = tid == 0 ? NONE_KEY : abs2uniqKey(tid - 1, keys, colIds, nodeStart, nUniqKeys); bst_gpair myValue = scans[tid]; __syncthreads(); if (blockIdx.x > 0 && s_mKeys == previousKey) { myValue += s_mScans[0]; } if (tid == size - 1) { sums[previousKey] = myValue + get(tid, vals, instIds); } if ((previousKey != myKey) && (previousKey >= 0)) { sums[previousKey] = myValue; myValue = bst_gpair(0.0f, 0.0f); } scans[tid] = myValue; } /** * @brief Performs fused reduce and scan by key functionality. It is assumed * that * the keys occur contiguously! * @param sums the output gradient reductions for each element performed * key-wise * @param scans the output gradient scans for each element performed key-wise * @param vals the gradients evaluated for each observation. * @param instIds instance ids for each element * @param keys keys to be used to segment the reductions. They need not occur * contiguously in contrast to scan_by_key. Currently, we need one key per * value in the 'vals' array. * @param size number of elements in the 'vals' array * @param nUniqKeys max number of uniq keys found per column * @param nCols number of columns * @param tmpScans temporary scan buffer needed for cub-pyramid algo * @param tmpKeys temporary key buffer needed for cub-pyramid algo * @param colIds column indices for each element in the array * @param nodeStart index of the leftmost node in the current level */ template <int BLKDIM_L1L3 = 256, int BLKDIM_L2 = 512> void reduceScanByKey(bst_gpair* sums, bst_gpair* scans, const bst_gpair* vals, const int* instIds, const node_id_t* keys, int size, int nUniqKeys, int nCols, bst_gpair* tmpScans, int* tmpKeys, const int* colIds, node_id_t nodeStart) { int nBlks = dh::div_round_up(size, BLKDIM_L1L3); hipMemset(sums, 0, nUniqKeys * nCols * sizeof(bst_gpair)); hipLaunchKernelGGL(( cubScanByKeyL1<BLKDIM_L1L3>) , dim3(nBlks), dim3(BLKDIM_L1L3), 0, 0, scans, vals, instIds, tmpScans, tmpKeys, keys, nUniqKeys, colIds, nodeStart, size); hipLaunchKernelGGL(( cubScanByKeyL2<BLKDIM_L2>), dim3(1), dim3(BLKDIM_L2), 0, 0, tmpScans, tmpKeys, nBlks); hipLaunchKernelGGL(( cubScanByKeyL3<BLKDIM_L1L3>) , dim3(nBlks), dim3(BLKDIM_L1L3), 0, 0, sums, scans, vals, instIds, tmpScans, tmpKeys, keys, nUniqKeys, colIds, nodeStart, size); } /** * @struct ExactSplitCandidate * @brief Abstraction of a possible split in the decision tree */ struct ExactSplitCandidate { /** the optimal gain score for this node */ float score; /** index where to split in the DMatrix */ int index; HOST_DEV_INLINE ExactSplitCandidate() : score(-FLT_MAX), index(INT_MAX) {} /** * @brief Whether the split info is valid to be used to create a new child * @param minSplitLoss minimum score above which decision to split is made * @return true if splittable, else false */ HOST_DEV_INLINE bool isSplittable(float minSplitLoss) const { return ((score >= minSplitLoss) && (index != INT_MAX)); } }; /** * @enum ArgMaxByKeyAlgo best_split_evaluation.cuh * @brief Help decide which algorithm to use for multi-argmax operation */ enum ArgMaxByKeyAlgo { /** simplest, use gmem-atomics for all updates */ ABK_GMEM = 0, /** use smem-atomics for updates (when number of keys are less) */ ABK_SMEM }; /** max depth until which to use shared mem based atomics for argmax */ static const int MAX_ABK_LEVELS = 3; HOST_DEV_INLINE ExactSplitCandidate maxSplit(ExactSplitCandidate a, ExactSplitCandidate b) { ExactSplitCandidate out; if (a.score < b.score) { out.score = b.score; out.index = b.index; } else if (a.score == b.score) { out.score = a.score; out.index = (a.index < b.index) ? a.index : b.index; } else { out.score = a.score; out.index = a.index; } return out; } DEV_INLINE void atomicArgMax(ExactSplitCandidate* address, ExactSplitCandidate val) { unsigned long long* intAddress = (unsigned long long*)address; // NOLINT unsigned long long old = *intAddress; // NOLINT unsigned long long assumed; // NOLINT do { assumed = old; ExactSplitCandidate res = maxSplit(val, *reinterpret_cast<ExactSplitCandidate*>(&assumed)); old = atomicCAS(intAddress, assumed, *reinterpret_cast<uint64_t*>(&res)); } while (assumed != old); } DEV_INLINE void argMaxWithAtomics( int id, ExactSplitCandidate* nodeSplits, const bst_gpair* gradScans, const bst_gpair* gradSums, const float* vals, const int* colIds, const node_id_t* nodeAssigns, const DeviceDenseNode* nodes, int nUniqKeys, node_id_t nodeStart, int len, const GPUTrainingParam& param) { int nodeId = nodeAssigns[id]; // @todo: this is really a bad check! but will be fixed when we move // to key-based reduction if ((id == 0) || !((nodeId == nodeAssigns[id - 1]) && (colIds[id] == colIds[id - 1]) && (vals[id] == vals[id - 1]))) { if (nodeId != UNUSED_NODE) { int sumId = abs2uniqKey(id, nodeAssigns, colIds, nodeStart, nUniqKeys); bst_gpair colSum = gradSums[sumId]; int uid = nodeId - nodeStart; DeviceDenseNode n = nodes[nodeId]; bst_gpair parentSum = n.sum_gradients; float parentGain = n.root_gain; bool tmp; ExactSplitCandidate s; bst_gpair missing = parentSum - colSum; s.score = loss_chg_missing(gradScans[id], missing, parentSum, parentGain, param, tmp); s.index = id; atomicArgMax(nodeSplits + uid, s); } // end if nodeId != UNUSED_NODE } // end if id == 0 ... } __global__ void atomicArgMaxByKeyGmem( ExactSplitCandidate* nodeSplits, const bst_gpair* gradScans, const bst_gpair* gradSums, const float* vals, const int* colIds, const node_id_t* nodeAssigns, const DeviceDenseNode* nodes, int nUniqKeys, node_id_t nodeStart, int len, const TrainParam param) { int id = threadIdx.x + (blockIdx.x * blockDim.x); const int stride = blockDim.x * gridDim.x; for (; id < len; id += stride) { argMaxWithAtomics(id, nodeSplits, gradScans, gradSums, vals, colIds, nodeAssigns, nodes, nUniqKeys, nodeStart, len, GPUTrainingParam(param)); } } __global__ void atomicArgMaxByKeySmem( ExactSplitCandidate* nodeSplits, const bst_gpair* gradScans, const bst_gpair* gradSums, const float* vals, const int* colIds, const node_id_t* nodeAssigns, const DeviceDenseNode* nodes, int nUniqKeys, node_id_t nodeStart, int len, const TrainParam param) { extern __shared__ char sArr[]; ExactSplitCandidate* sNodeSplits = reinterpret_cast<ExactSplitCandidate*>(sArr); int tid = threadIdx.x; ExactSplitCandidate defVal; #pragma unroll 1 for (int i = tid; i < nUniqKeys; i += blockDim.x) { sNodeSplits[i] = defVal; } __syncthreads(); int id = tid + (blockIdx.x * blockDim.x); const int stride = blockDim.x * gridDim.x; for (; id < len; id += stride) { argMaxWithAtomics(id, sNodeSplits, gradScans, gradSums, vals, colIds, nodeAssigns, nodes, nUniqKeys, nodeStart, len, param); } __syncthreads(); for (int i = tid; i < nUniqKeys; i += blockDim.x) { ExactSplitCandidate s = sNodeSplits[i]; atomicArgMax(nodeSplits + i, s); } } /** * @brief Performs argmax_by_key functionality but for cases when keys need not * occur contiguously * @param nodeSplits will contain information on best split for each node * @param gradScans exclusive sum on sorted segments for each col * @param gradSums gradient sum for each column in DMatrix based on to node-ids * @param vals feature values * @param colIds column index for each element in the feature values array * @param nodeAssigns node-id assignments to each element in DMatrix * @param nodes pointer to all nodes for this tree in BFS order * @param nUniqKeys number of unique node-ids in this level * @param nodeStart start index of the node-ids in this level * @param len number of elements * @param param training parameters * @param algo which algorithm to use for argmax_by_key */ template <int BLKDIM = 256, int ITEMS_PER_THREAD = 4> void argMaxByKey(ExactSplitCandidate* nodeSplits, const bst_gpair* gradScans, const bst_gpair* gradSums, const float* vals, const int* colIds, const node_id_t* nodeAssigns, const DeviceDenseNode* nodes, int nUniqKeys, node_id_t nodeStart, int len, const TrainParam param, ArgMaxByKeyAlgo algo) { dh::fillConst<ExactSplitCandidate, BLKDIM, ITEMS_PER_THREAD>( dh::get_device_idx(param.gpu_id), nodeSplits, nUniqKeys, ExactSplitCandidate()); int nBlks = dh::div_round_up(len, ITEMS_PER_THREAD * BLKDIM); switch (algo) { case ABK_GMEM: hipLaunchKernelGGL(( atomicArgMaxByKeyGmem), dim3(nBlks), dim3(BLKDIM), 0, 0, nodeSplits, gradScans, gradSums, vals, colIds, nodeAssigns, nodes, nUniqKeys, nodeStart, len, param); break; case ABK_SMEM: hipLaunchKernelGGL(( atomicArgMaxByKeySmem), dim3(nBlks), dim3(BLKDIM), sizeof(ExactSplitCandidate) * nUniqKeys, 0, nodeSplits, gradScans, gradSums, vals, colIds, nodeAssigns, nodes, nUniqKeys, nodeStart, len, param); break; default: throw std::runtime_error("argMaxByKey: Bad algo passed!"); } } __global__ void assignColIds(int* colIds, const int* colOffsets) { int myId = blockIdx.x; int start = colOffsets[myId]; int end = colOffsets[myId + 1]; for (int id = start + threadIdx.x; id < end; id += blockDim.x) { colIds[id] = myId; } } __global__ void fillDefaultNodeIds(node_id_t* nodeIdsPerInst, const DeviceDenseNode* nodes, int nRows) { int id = threadIdx.x + (blockIdx.x * blockDim.x); if (id >= nRows) { return; } // if this element belongs to none of the currently active node-id's node_id_t nId = nodeIdsPerInst[id]; if (nId == UNUSED_NODE) { return; } const DeviceDenseNode n = nodes[nId]; node_id_t result; if (n.IsLeaf() || n.IsUnused()) { result = UNUSED_NODE; } else if (n.dir == LeftDir) { result = (2 * n.idx) + 1; } else { result = (2 * n.idx) + 2; } nodeIdsPerInst[id] = result; } __global__ void assignNodeIds(node_id_t* nodeIdsPerInst, int* nodeLocations, const node_id_t* nodeIds, const int* instId, const DeviceDenseNode* nodes, const int* colOffsets, const float* vals, int nVals, int nCols) { int id = threadIdx.x + (blockIdx.x * blockDim.x); const int stride = blockDim.x * gridDim.x; for (; id < nVals; id += stride) { // fusing generation of indices for node locations nodeLocations[id] = id; // using nodeIds here since the previous kernel would have updated // the nodeIdsPerInst with all default assignments int nId = nodeIds[id]; // if this element belongs to none of the currently active node-id's if (nId != UNUSED_NODE) { const DeviceDenseNode n = nodes[nId]; int colId = n.fidx; // printf("nid=%d colId=%d id=%d\n", nId, colId, id); int start = colOffsets[colId]; int end = colOffsets[colId + 1]; // @todo: too much wasteful threads!! if ((id >= start) && (id < end) && !(n.IsLeaf() || n.IsUnused())) { node_id_t result = (2 * n.idx) + 1 + (vals[id] >= n.fvalue); nodeIdsPerInst[instId[id]] = result; } } } } __global__ void markLeavesKernel(DeviceDenseNode* nodes, int len) { int id = (blockIdx.x * blockDim.x) + threadIdx.x; if ((id < len) && !nodes[id].IsUnused()) { int lid = (id << 1) + 1; int rid = (id << 1) + 2; if ((lid >= len) || (rid >= len)) { nodes[id].root_gain = -FLT_MAX; // bottom-most nodes } else if (nodes[lid].IsUnused() && nodes[rid].IsUnused()) { nodes[id].root_gain = -FLT_MAX; // unused child nodes } } } class GPUMaker : public TreeUpdater { protected: TrainParam param; /** whether we have initialized memory already (so as not to repeat!) */ bool allocated; /** feature values stored in column-major compressed format */ dh::dvec2<float> vals; dh::dvec<float> vals_cached; /** corresponding instance id's of these featutre values */ dh::dvec2<int> instIds; dh::dvec<int> instIds_cached; /** column offsets for these feature values */ dh::dvec<int> colOffsets; dh::dvec<bst_gpair> gradsInst; dh::dvec2<node_id_t> nodeAssigns; dh::dvec2<int> nodeLocations; dh::dvec<DeviceDenseNode> nodes; dh::dvec<node_id_t> nodeAssignsPerInst; dh::dvec<bst_gpair> gradSums; dh::dvec<bst_gpair> gradScans; dh::dvec<ExactSplitCandidate> nodeSplits; int nVals; int nRows; int nCols; int maxNodes; int maxLeaves; dh::CubMemory tmp_mem; dh::dvec<bst_gpair> tmpScanGradBuff; dh::dvec<int> tmpScanKeyBuff; dh::dvec<int> colIds; dh::bulk_allocator<dh::memory_type::DEVICE> ba; public: GPUMaker() : allocated(false) {} ~GPUMaker() {} void Init( const std::vector<std::pair<std::string, std::string>>& args) override { param.InitAllowUnknown(args); maxNodes = (1 << (param.max_depth + 1)) - 1; maxLeaves = 1 << param.max_depth; } void Update(const std::vector<bst_gpair>& gpair, DMatrix* dmat, const std::vector<RegTree*>& trees) override { GradStats::CheckInfo(dmat->info()); // rescale learning rate according to size of trees float lr = param.learning_rate; param.learning_rate = lr / trees.size(); try { // build tree for (size_t i = 0; i < trees.size(); ++i) { UpdateTree(gpair, dmat, trees[i]); } } catch (const std::exception& e) { LOG(FATAL) << "GPU plugin exception: " << e.what() << std::endl; } param.learning_rate = lr; } /// @note: Update should be only after Init!! void UpdateTree(const std::vector<bst_gpair>& gpair, DMatrix* dmat, RegTree* hTree) { if (!allocated) { setupOneTimeData(dmat); } for (int i = 0; i < param.max_depth; ++i) { if (i == 0) { // make sure to start on a fresh tree with sorted values! vals.current_dvec() = vals_cached; instIds.current_dvec() = instIds_cached; transferGrads(gpair); } int nNodes = 1 << i; node_id_t nodeStart = nNodes - 1; initNodeData(i, nodeStart, nNodes); findSplit(i, nodeStart, nNodes); } // mark all the used nodes with unused children as leaf nodes markLeaves(); dense2sparse_tree(hTree, nodes, param); } void split2node(int nNodes, node_id_t nodeStart) { auto d_nodes = nodes.data(); auto d_gradScans = gradScans.data(); auto d_gradSums = gradSums.data(); auto d_nodeAssigns = nodeAssigns.current(); auto d_colIds = colIds.data(); auto d_vals = vals.current(); auto d_nodeSplits = nodeSplits.data(); int nUniqKeys = nNodes; float min_split_loss = param.min_split_loss; auto gpu_param = GPUTrainingParam(param); dh::launch_n(param.gpu_id, nNodes, [=] __device__(int uid) { int absNodeId = uid + nodeStart; ExactSplitCandidate s = d_nodeSplits[uid]; if (s.isSplittable(min_split_loss)) { int idx = s.index; int nodeInstId = abs2uniqKey(idx, d_nodeAssigns, d_colIds, nodeStart, nUniqKeys); bool missingLeft = true; const DeviceDenseNode& n = d_nodes[absNodeId]; bst_gpair gradScan = d_gradScans[idx]; bst_gpair gradSum = d_gradSums[nodeInstId]; float thresh = d_vals[idx]; int colId = d_colIds[idx]; // get the default direction for the current node bst_gpair missing = n.sum_gradients - gradSum; loss_chg_missing(gradScan, missing, n.sum_gradients, n.root_gain, gpu_param, missingLeft); // get the score/weight/id/gradSum for left and right child nodes bst_gpair lGradSum = missingLeft ? gradScan + missing : gradScan; bst_gpair rGradSum = n.sum_gradients - lGradSum; // Create children d_nodes[left_child_nidx(absNodeId)] = DeviceDenseNode(lGradSum, left_child_nidx(absNodeId), gpu_param); d_nodes[right_child_nidx(absNodeId)] = DeviceDenseNode(rGradSum, right_child_nidx(absNodeId), gpu_param); // Set split for parent d_nodes[absNodeId].SetSplit(thresh, colId, missingLeft ? LeftDir : RightDir); } else { // cannot be split further, so this node is a leaf! d_nodes[absNodeId].root_gain = -FLT_MAX; } }); } void findSplit(int level, node_id_t nodeStart, int nNodes) { reduceScanByKey(gradSums.data(), gradScans.data(), gradsInst.data(), instIds.current(), nodeAssigns.current(), nVals, nNodes, nCols, tmpScanGradBuff.data(), tmpScanKeyBuff.data(), colIds.data(), nodeStart); argMaxByKey(nodeSplits.data(), gradScans.data(), gradSums.data(), vals.current(), colIds.data(), nodeAssigns.current(), nodes.data(), nNodes, nodeStart, nVals, param, level <= MAX_ABK_LEVELS ? ABK_SMEM : ABK_GMEM); split2node(nNodes, nodeStart); } void allocateAllData(int offsetSize) { int tmpBuffSize = scanTempBufferSize(nVals); ba.allocate(dh::get_device_idx(param.gpu_id), param.silent, &vals, nVals, &vals_cached, nVals, &instIds, nVals, &instIds_cached, nVals, &colOffsets, offsetSize, &gradsInst, nRows, &nodeAssigns, nVals, &nodeLocations, nVals, &nodes, maxNodes, &nodeAssignsPerInst, nRows, &gradSums, maxLeaves * nCols, &gradScans, nVals, &nodeSplits, maxLeaves, &tmpScanGradBuff, tmpBuffSize, &tmpScanKeyBuff, tmpBuffSize, &colIds, nVals); } void setupOneTimeData(DMatrix* dmat) { size_t free_memory = dh::available_memory(dh::get_device_idx(param.gpu_id)); if (!dmat->SingleColBlock()) { throw std::runtime_error("exact::GPUBuilder - must have 1 column block"); } std::vector<float> fval; std::vector<int> fId, offset; convertToCsc(dmat, &fval, &fId, &offset); allocateAllData(static_cast<int>(offset.size())); transferAndSortData(fval, fId, offset); allocated = true; } void convertToCsc(DMatrix* dmat, std::vector<float>* fval, std::vector<int>* fId, std::vector<int>* offset) { MetaInfo info = dmat->info(); nRows = info.num_row; nCols = info.num_col; offset->reserve(nCols + 1); offset->push_back(0); fval->reserve(nCols * nRows); fId->reserve(nCols * nRows); // in case you end up with a DMatrix having no column access // then make sure to enable that before copying the data! if (!dmat->HaveColAccess()) { const std::vector<bool> enable(nCols, true); dmat->InitColAccess(enable, 1, nRows); } dmlc::DataIter<ColBatch>* iter = dmat->ColIterator(); iter->BeforeFirst(); while (iter->Next()) { const ColBatch& batch = iter->Value(); for (int i = 0; i < batch.size; i++) { const ColBatch::Inst& col = batch[i]; for (const ColBatch::Entry* it = col.data; it != col.data + col.length; it++) { int inst_id = static_cast<int>(it->index); fval->push_back(it->fvalue); fId->push_back(inst_id); } offset->push_back(fval->size()); } } nVals = fval->size(); } void transferAndSortData(const std::vector<float>& fval, const std::vector<int>& fId, const std::vector<int>& offset) { vals.current_dvec() = fval; instIds.current_dvec() = fId; colOffsets = offset; dh::segmentedSort<float, int>(&tmp_mem, &vals, &instIds, nVals, nCols, colOffsets); vals_cached = vals.current_dvec(); instIds_cached = instIds.current_dvec(); hipLaunchKernelGGL(( assignColIds), dim3(nCols), dim3(512), 0, 0, colIds.data(), colOffsets.data()); } void transferGrads(const std::vector<bst_gpair>& gpair) { // HACK dh::safe_cuda(hipMemcpy(gradsInst.data(), &(gpair[0]), sizeof(bst_gpair) * nRows, hipMemcpyHostToDevice)); // evaluate the full-grad reduction for the root node dh::sumReduction<bst_gpair>(tmp_mem, gradsInst, gradSums, nRows); } void initNodeData(int level, node_id_t nodeStart, int nNodes) { // all instances belong to root node at the beginning! if (level == 0) { nodes.fill(DeviceDenseNode()); nodeAssigns.current_dvec().fill(0); nodeAssignsPerInst.fill(0); // for root node, just update the gradient/score/weight/id info // before splitting it! Currently all data is on GPU, hence this // stupid little kernel auto d_nodes = nodes.data(); auto d_sums = gradSums.data(); auto gpu_params = GPUTrainingParam(param); dh::launch_n(param.gpu_id, 1, [=] __device__(int idx) { d_nodes[0] = DeviceDenseNode(d_sums[0], 0, gpu_params); }); } else { const int BlkDim = 256; const int ItemsPerThread = 4; // assign default node ids first int nBlks = dh::div_round_up(nRows, BlkDim); hipLaunchKernelGGL(( fillDefaultNodeIds), dim3(nBlks), dim3(BlkDim), 0, 0, nodeAssignsPerInst.data(), nodes.data(), nRows); // evaluate the correct child indices of non-missing values next nBlks = dh::div_round_up(nVals, BlkDim * ItemsPerThread); hipLaunchKernelGGL(( assignNodeIds), dim3(nBlks), dim3(BlkDim), 0, 0, nodeAssignsPerInst.data(), nodeLocations.current(), nodeAssigns.current(), instIds.current(), nodes.data(), colOffsets.data(), vals.current(), nVals, nCols); // gather the node assignments across all other columns too dh::gather(dh::get_device_idx(param.gpu_id), nodeAssigns.current(), nodeAssignsPerInst.data(), instIds.current(), nVals); sortKeys(level); } } void sortKeys(int level) { // segmented-sort the arrays based on node-id's // but we don't need more than level+1 bits for sorting! segmentedSort(&tmp_mem, &nodeAssigns, &nodeLocations, nVals, nCols, colOffsets, 0, level + 1); dh::gather<float, int>(dh::get_device_idx(param.gpu_id), vals.other(), vals.current(), instIds.other(), instIds.current(), nodeLocations.current(), nVals); vals.buff().selector ^= 1; instIds.buff().selector ^= 1; } void markLeaves() { const int BlkDim = 128; int nBlks = dh::div_round_up(maxNodes, BlkDim); hipLaunchKernelGGL(( markLeavesKernel), dim3(nBlks), dim3(BlkDim), 0, 0, nodes.data(), maxNodes); } }; XGBOOST_REGISTER_TREE_UPDATER(GPUMaker, "grow_gpu") .describe("Grow tree with GPU.") .set_body([]() { return new GPUMaker(); }); } // namespace tree } // namespace xgboost
7655fc1d85842ba56e460c02bb43826a119d545b.cu
/*! * Copyright 2017 XGBoost contributors */ #include <xgboost/tree_updater.h> #include <utility> #include <vector> #include "param.h" #include "updater_gpu_common.cuh" namespace xgboost { namespace tree { DMLC_REGISTRY_FILE_TAG(updater_gpu); /** * @brief Absolute BFS order IDs to col-wise unique IDs based on user input * @param tid the index of the element that this thread should access * @param abs the array of absolute IDs * @param colIds the array of column IDs for each element * @param nodeStart the start of the node ID at this level * @param nKeys number of nodes at this level. * @return the uniq key */ static HOST_DEV_INLINE node_id_t abs2uniqKey(int tid, const node_id_t* abs, const int* colIds, node_id_t nodeStart, int nKeys) { int a = abs[tid]; if (a == UNUSED_NODE) return a; return ((a - nodeStart) + (colIds[tid] * nKeys)); } /** * @struct Pair * @brief Pair used for key basd scan operations on bst_gpair */ struct Pair { int key; bst_gpair value; }; /** define a key that's not used at all in the entire boosting process */ static const int NONE_KEY = -100; /** * @brief Allocate temporary buffers needed for scan operations * @param tmpScans gradient buffer * @param tmpKeys keys buffer * @param size number of elements that will be scanned */ template <int BLKDIM_L1L3 = 256> int scanTempBufferSize(int size) { int nBlks = dh::div_round_up(size, BLKDIM_L1L3); return nBlks; } struct AddByKey { template <typename T> HOST_DEV_INLINE T operator()(const T& first, const T& second) const { T result; if (first.key == second.key) { result.key = first.key; result.value = first.value + second.value; } else { result.key = second.key; result.value = second.value; } return result; } }; /** * @brief Gradient value getter function * @param id the index into the vals or instIds array to which to fetch * @param vals the gradient value buffer * @param instIds instance index buffer * @return the expected gradient value */ HOST_DEV_INLINE bst_gpair get(int id, const bst_gpair* vals, const int* instIds) { id = instIds[id]; return vals[id]; } template <int BLKDIM_L1L3> __global__ void cubScanByKeyL1(bst_gpair* scans, const bst_gpair* vals, const int* instIds, bst_gpair* mScans, int* mKeys, const node_id_t* keys, int nUniqKeys, const int* colIds, node_id_t nodeStart, const int size) { Pair rootPair = {NONE_KEY, bst_gpair(0.f, 0.f)}; int myKey; bst_gpair myValue; typedef cub::BlockScan<Pair, BLKDIM_L1L3> BlockScan; __shared__ typename BlockScan::TempStorage temp_storage; Pair threadData; int tid = blockIdx.x * BLKDIM_L1L3 + threadIdx.x; if (tid < size) { myKey = abs2uniqKey(tid, keys, colIds, nodeStart, nUniqKeys); myValue = get(tid, vals, instIds); } else { myKey = NONE_KEY; myValue = 0.f; } threadData.key = myKey; threadData.value = myValue; // get previous key, especially needed for the last thread in this block // in order to pass on the partial scan values. // this statement MUST appear before the checks below! // else, the result of this shuffle operation will be undefined int previousKey = __shfl_up(myKey, 1); // Collectively compute the block-wide exclusive prefix sum BlockScan(temp_storage) .ExclusiveScan(threadData, threadData, rootPair, AddByKey()); if (tid < size) { scans[tid] = threadData.value; } else { return; } if (threadIdx.x == BLKDIM_L1L3 - 1) { threadData.value = (myKey == previousKey) ? threadData.value : bst_gpair(0.0f, 0.0f); mKeys[blockIdx.x] = myKey; mScans[blockIdx.x] = threadData.value + myValue; } } template <int BLKSIZE> __global__ void cubScanByKeyL2(bst_gpair* mScans, int* mKeys, int mLength) { typedef cub::BlockScan<Pair, BLKSIZE, cub::BLOCK_SCAN_WARP_SCANS> BlockScan; Pair threadData; __shared__ typename BlockScan::TempStorage temp_storage; for (int i = threadIdx.x; i < mLength; i += BLKSIZE - 1) { threadData.key = mKeys[i]; threadData.value = mScans[i]; BlockScan(temp_storage).InclusiveScan(threadData, threadData, AddByKey()); mScans[i] = threadData.value; __syncthreads(); } } template <int BLKDIM_L1L3> __global__ void cubScanByKeyL3(bst_gpair* sums, bst_gpair* scans, const bst_gpair* vals, const int* instIds, const bst_gpair* mScans, const int* mKeys, const node_id_t* keys, int nUniqKeys, const int* colIds, node_id_t nodeStart, const int size) { int relId = threadIdx.x; int tid = (blockIdx.x * BLKDIM_L1L3) + relId; // to avoid the following warning from nvcc: // __shared__ memory variable with non-empty constructor or destructor // (potential race between threads) __shared__ char gradBuff[sizeof(bst_gpair)]; __shared__ int s_mKeys; bst_gpair* s_mScans = reinterpret_cast<bst_gpair*>(gradBuff); if (tid >= size) return; // cache block-wide partial scan info if (relId == 0) { s_mKeys = (blockIdx.x > 0) ? mKeys[blockIdx.x - 1] : NONE_KEY; s_mScans[0] = (blockIdx.x > 0) ? mScans[blockIdx.x - 1] : bst_gpair(); } int myKey = abs2uniqKey(tid, keys, colIds, nodeStart, nUniqKeys); int previousKey = tid == 0 ? NONE_KEY : abs2uniqKey(tid - 1, keys, colIds, nodeStart, nUniqKeys); bst_gpair myValue = scans[tid]; __syncthreads(); if (blockIdx.x > 0 && s_mKeys == previousKey) { myValue += s_mScans[0]; } if (tid == size - 1) { sums[previousKey] = myValue + get(tid, vals, instIds); } if ((previousKey != myKey) && (previousKey >= 0)) { sums[previousKey] = myValue; myValue = bst_gpair(0.0f, 0.0f); } scans[tid] = myValue; } /** * @brief Performs fused reduce and scan by key functionality. It is assumed * that * the keys occur contiguously! * @param sums the output gradient reductions for each element performed * key-wise * @param scans the output gradient scans for each element performed key-wise * @param vals the gradients evaluated for each observation. * @param instIds instance ids for each element * @param keys keys to be used to segment the reductions. They need not occur * contiguously in contrast to scan_by_key. Currently, we need one key per * value in the 'vals' array. * @param size number of elements in the 'vals' array * @param nUniqKeys max number of uniq keys found per column * @param nCols number of columns * @param tmpScans temporary scan buffer needed for cub-pyramid algo * @param tmpKeys temporary key buffer needed for cub-pyramid algo * @param colIds column indices for each element in the array * @param nodeStart index of the leftmost node in the current level */ template <int BLKDIM_L1L3 = 256, int BLKDIM_L2 = 512> void reduceScanByKey(bst_gpair* sums, bst_gpair* scans, const bst_gpair* vals, const int* instIds, const node_id_t* keys, int size, int nUniqKeys, int nCols, bst_gpair* tmpScans, int* tmpKeys, const int* colIds, node_id_t nodeStart) { int nBlks = dh::div_round_up(size, BLKDIM_L1L3); cudaMemset(sums, 0, nUniqKeys * nCols * sizeof(bst_gpair)); cubScanByKeyL1<BLKDIM_L1L3> <<<nBlks, BLKDIM_L1L3>>>(scans, vals, instIds, tmpScans, tmpKeys, keys, nUniqKeys, colIds, nodeStart, size); cubScanByKeyL2<BLKDIM_L2><<<1, BLKDIM_L2>>>(tmpScans, tmpKeys, nBlks); cubScanByKeyL3<BLKDIM_L1L3> <<<nBlks, BLKDIM_L1L3>>>(sums, scans, vals, instIds, tmpScans, tmpKeys, keys, nUniqKeys, colIds, nodeStart, size); } /** * @struct ExactSplitCandidate * @brief Abstraction of a possible split in the decision tree */ struct ExactSplitCandidate { /** the optimal gain score for this node */ float score; /** index where to split in the DMatrix */ int index; HOST_DEV_INLINE ExactSplitCandidate() : score(-FLT_MAX), index(INT_MAX) {} /** * @brief Whether the split info is valid to be used to create a new child * @param minSplitLoss minimum score above which decision to split is made * @return true if splittable, else false */ HOST_DEV_INLINE bool isSplittable(float minSplitLoss) const { return ((score >= minSplitLoss) && (index != INT_MAX)); } }; /** * @enum ArgMaxByKeyAlgo best_split_evaluation.cuh * @brief Help decide which algorithm to use for multi-argmax operation */ enum ArgMaxByKeyAlgo { /** simplest, use gmem-atomics for all updates */ ABK_GMEM = 0, /** use smem-atomics for updates (when number of keys are less) */ ABK_SMEM }; /** max depth until which to use shared mem based atomics for argmax */ static const int MAX_ABK_LEVELS = 3; HOST_DEV_INLINE ExactSplitCandidate maxSplit(ExactSplitCandidate a, ExactSplitCandidate b) { ExactSplitCandidate out; if (a.score < b.score) { out.score = b.score; out.index = b.index; } else if (a.score == b.score) { out.score = a.score; out.index = (a.index < b.index) ? a.index : b.index; } else { out.score = a.score; out.index = a.index; } return out; } DEV_INLINE void atomicArgMax(ExactSplitCandidate* address, ExactSplitCandidate val) { unsigned long long* intAddress = (unsigned long long*)address; // NOLINT unsigned long long old = *intAddress; // NOLINT unsigned long long assumed; // NOLINT do { assumed = old; ExactSplitCandidate res = maxSplit(val, *reinterpret_cast<ExactSplitCandidate*>(&assumed)); old = atomicCAS(intAddress, assumed, *reinterpret_cast<uint64_t*>(&res)); } while (assumed != old); } DEV_INLINE void argMaxWithAtomics( int id, ExactSplitCandidate* nodeSplits, const bst_gpair* gradScans, const bst_gpair* gradSums, const float* vals, const int* colIds, const node_id_t* nodeAssigns, const DeviceDenseNode* nodes, int nUniqKeys, node_id_t nodeStart, int len, const GPUTrainingParam& param) { int nodeId = nodeAssigns[id]; // @todo: this is really a bad check! but will be fixed when we move // to key-based reduction if ((id == 0) || !((nodeId == nodeAssigns[id - 1]) && (colIds[id] == colIds[id - 1]) && (vals[id] == vals[id - 1]))) { if (nodeId != UNUSED_NODE) { int sumId = abs2uniqKey(id, nodeAssigns, colIds, nodeStart, nUniqKeys); bst_gpair colSum = gradSums[sumId]; int uid = nodeId - nodeStart; DeviceDenseNode n = nodes[nodeId]; bst_gpair parentSum = n.sum_gradients; float parentGain = n.root_gain; bool tmp; ExactSplitCandidate s; bst_gpair missing = parentSum - colSum; s.score = loss_chg_missing(gradScans[id], missing, parentSum, parentGain, param, tmp); s.index = id; atomicArgMax(nodeSplits + uid, s); } // end if nodeId != UNUSED_NODE } // end if id == 0 ... } __global__ void atomicArgMaxByKeyGmem( ExactSplitCandidate* nodeSplits, const bst_gpair* gradScans, const bst_gpair* gradSums, const float* vals, const int* colIds, const node_id_t* nodeAssigns, const DeviceDenseNode* nodes, int nUniqKeys, node_id_t nodeStart, int len, const TrainParam param) { int id = threadIdx.x + (blockIdx.x * blockDim.x); const int stride = blockDim.x * gridDim.x; for (; id < len; id += stride) { argMaxWithAtomics(id, nodeSplits, gradScans, gradSums, vals, colIds, nodeAssigns, nodes, nUniqKeys, nodeStart, len, GPUTrainingParam(param)); } } __global__ void atomicArgMaxByKeySmem( ExactSplitCandidate* nodeSplits, const bst_gpair* gradScans, const bst_gpair* gradSums, const float* vals, const int* colIds, const node_id_t* nodeAssigns, const DeviceDenseNode* nodes, int nUniqKeys, node_id_t nodeStart, int len, const TrainParam param) { extern __shared__ char sArr[]; ExactSplitCandidate* sNodeSplits = reinterpret_cast<ExactSplitCandidate*>(sArr); int tid = threadIdx.x; ExactSplitCandidate defVal; #pragma unroll 1 for (int i = tid; i < nUniqKeys; i += blockDim.x) { sNodeSplits[i] = defVal; } __syncthreads(); int id = tid + (blockIdx.x * blockDim.x); const int stride = blockDim.x * gridDim.x; for (; id < len; id += stride) { argMaxWithAtomics(id, sNodeSplits, gradScans, gradSums, vals, colIds, nodeAssigns, nodes, nUniqKeys, nodeStart, len, param); } __syncthreads(); for (int i = tid; i < nUniqKeys; i += blockDim.x) { ExactSplitCandidate s = sNodeSplits[i]; atomicArgMax(nodeSplits + i, s); } } /** * @brief Performs argmax_by_key functionality but for cases when keys need not * occur contiguously * @param nodeSplits will contain information on best split for each node * @param gradScans exclusive sum on sorted segments for each col * @param gradSums gradient sum for each column in DMatrix based on to node-ids * @param vals feature values * @param colIds column index for each element in the feature values array * @param nodeAssigns node-id assignments to each element in DMatrix * @param nodes pointer to all nodes for this tree in BFS order * @param nUniqKeys number of unique node-ids in this level * @param nodeStart start index of the node-ids in this level * @param len number of elements * @param param training parameters * @param algo which algorithm to use for argmax_by_key */ template <int BLKDIM = 256, int ITEMS_PER_THREAD = 4> void argMaxByKey(ExactSplitCandidate* nodeSplits, const bst_gpair* gradScans, const bst_gpair* gradSums, const float* vals, const int* colIds, const node_id_t* nodeAssigns, const DeviceDenseNode* nodes, int nUniqKeys, node_id_t nodeStart, int len, const TrainParam param, ArgMaxByKeyAlgo algo) { dh::fillConst<ExactSplitCandidate, BLKDIM, ITEMS_PER_THREAD>( dh::get_device_idx(param.gpu_id), nodeSplits, nUniqKeys, ExactSplitCandidate()); int nBlks = dh::div_round_up(len, ITEMS_PER_THREAD * BLKDIM); switch (algo) { case ABK_GMEM: atomicArgMaxByKeyGmem<<<nBlks, BLKDIM>>>( nodeSplits, gradScans, gradSums, vals, colIds, nodeAssigns, nodes, nUniqKeys, nodeStart, len, param); break; case ABK_SMEM: atomicArgMaxByKeySmem<<<nBlks, BLKDIM, sizeof(ExactSplitCandidate) * nUniqKeys>>>( nodeSplits, gradScans, gradSums, vals, colIds, nodeAssigns, nodes, nUniqKeys, nodeStart, len, param); break; default: throw std::runtime_error("argMaxByKey: Bad algo passed!"); } } __global__ void assignColIds(int* colIds, const int* colOffsets) { int myId = blockIdx.x; int start = colOffsets[myId]; int end = colOffsets[myId + 1]; for (int id = start + threadIdx.x; id < end; id += blockDim.x) { colIds[id] = myId; } } __global__ void fillDefaultNodeIds(node_id_t* nodeIdsPerInst, const DeviceDenseNode* nodes, int nRows) { int id = threadIdx.x + (blockIdx.x * blockDim.x); if (id >= nRows) { return; } // if this element belongs to none of the currently active node-id's node_id_t nId = nodeIdsPerInst[id]; if (nId == UNUSED_NODE) { return; } const DeviceDenseNode n = nodes[nId]; node_id_t result; if (n.IsLeaf() || n.IsUnused()) { result = UNUSED_NODE; } else if (n.dir == LeftDir) { result = (2 * n.idx) + 1; } else { result = (2 * n.idx) + 2; } nodeIdsPerInst[id] = result; } __global__ void assignNodeIds(node_id_t* nodeIdsPerInst, int* nodeLocations, const node_id_t* nodeIds, const int* instId, const DeviceDenseNode* nodes, const int* colOffsets, const float* vals, int nVals, int nCols) { int id = threadIdx.x + (blockIdx.x * blockDim.x); const int stride = blockDim.x * gridDim.x; for (; id < nVals; id += stride) { // fusing generation of indices for node locations nodeLocations[id] = id; // using nodeIds here since the previous kernel would have updated // the nodeIdsPerInst with all default assignments int nId = nodeIds[id]; // if this element belongs to none of the currently active node-id's if (nId != UNUSED_NODE) { const DeviceDenseNode n = nodes[nId]; int colId = n.fidx; // printf("nid=%d colId=%d id=%d\n", nId, colId, id); int start = colOffsets[colId]; int end = colOffsets[colId + 1]; // @todo: too much wasteful threads!! if ((id >= start) && (id < end) && !(n.IsLeaf() || n.IsUnused())) { node_id_t result = (2 * n.idx) + 1 + (vals[id] >= n.fvalue); nodeIdsPerInst[instId[id]] = result; } } } } __global__ void markLeavesKernel(DeviceDenseNode* nodes, int len) { int id = (blockIdx.x * blockDim.x) + threadIdx.x; if ((id < len) && !nodes[id].IsUnused()) { int lid = (id << 1) + 1; int rid = (id << 1) + 2; if ((lid >= len) || (rid >= len)) { nodes[id].root_gain = -FLT_MAX; // bottom-most nodes } else if (nodes[lid].IsUnused() && nodes[rid].IsUnused()) { nodes[id].root_gain = -FLT_MAX; // unused child nodes } } } class GPUMaker : public TreeUpdater { protected: TrainParam param; /** whether we have initialized memory already (so as not to repeat!) */ bool allocated; /** feature values stored in column-major compressed format */ dh::dvec2<float> vals; dh::dvec<float> vals_cached; /** corresponding instance id's of these featutre values */ dh::dvec2<int> instIds; dh::dvec<int> instIds_cached; /** column offsets for these feature values */ dh::dvec<int> colOffsets; dh::dvec<bst_gpair> gradsInst; dh::dvec2<node_id_t> nodeAssigns; dh::dvec2<int> nodeLocations; dh::dvec<DeviceDenseNode> nodes; dh::dvec<node_id_t> nodeAssignsPerInst; dh::dvec<bst_gpair> gradSums; dh::dvec<bst_gpair> gradScans; dh::dvec<ExactSplitCandidate> nodeSplits; int nVals; int nRows; int nCols; int maxNodes; int maxLeaves; dh::CubMemory tmp_mem; dh::dvec<bst_gpair> tmpScanGradBuff; dh::dvec<int> tmpScanKeyBuff; dh::dvec<int> colIds; dh::bulk_allocator<dh::memory_type::DEVICE> ba; public: GPUMaker() : allocated(false) {} ~GPUMaker() {} void Init( const std::vector<std::pair<std::string, std::string>>& args) override { param.InitAllowUnknown(args); maxNodes = (1 << (param.max_depth + 1)) - 1; maxLeaves = 1 << param.max_depth; } void Update(const std::vector<bst_gpair>& gpair, DMatrix* dmat, const std::vector<RegTree*>& trees) override { GradStats::CheckInfo(dmat->info()); // rescale learning rate according to size of trees float lr = param.learning_rate; param.learning_rate = lr / trees.size(); try { // build tree for (size_t i = 0; i < trees.size(); ++i) { UpdateTree(gpair, dmat, trees[i]); } } catch (const std::exception& e) { LOG(FATAL) << "GPU plugin exception: " << e.what() << std::endl; } param.learning_rate = lr; } /// @note: Update should be only after Init!! void UpdateTree(const std::vector<bst_gpair>& gpair, DMatrix* dmat, RegTree* hTree) { if (!allocated) { setupOneTimeData(dmat); } for (int i = 0; i < param.max_depth; ++i) { if (i == 0) { // make sure to start on a fresh tree with sorted values! vals.current_dvec() = vals_cached; instIds.current_dvec() = instIds_cached; transferGrads(gpair); } int nNodes = 1 << i; node_id_t nodeStart = nNodes - 1; initNodeData(i, nodeStart, nNodes); findSplit(i, nodeStart, nNodes); } // mark all the used nodes with unused children as leaf nodes markLeaves(); dense2sparse_tree(hTree, nodes, param); } void split2node(int nNodes, node_id_t nodeStart) { auto d_nodes = nodes.data(); auto d_gradScans = gradScans.data(); auto d_gradSums = gradSums.data(); auto d_nodeAssigns = nodeAssigns.current(); auto d_colIds = colIds.data(); auto d_vals = vals.current(); auto d_nodeSplits = nodeSplits.data(); int nUniqKeys = nNodes; float min_split_loss = param.min_split_loss; auto gpu_param = GPUTrainingParam(param); dh::launch_n(param.gpu_id, nNodes, [=] __device__(int uid) { int absNodeId = uid + nodeStart; ExactSplitCandidate s = d_nodeSplits[uid]; if (s.isSplittable(min_split_loss)) { int idx = s.index; int nodeInstId = abs2uniqKey(idx, d_nodeAssigns, d_colIds, nodeStart, nUniqKeys); bool missingLeft = true; const DeviceDenseNode& n = d_nodes[absNodeId]; bst_gpair gradScan = d_gradScans[idx]; bst_gpair gradSum = d_gradSums[nodeInstId]; float thresh = d_vals[idx]; int colId = d_colIds[idx]; // get the default direction for the current node bst_gpair missing = n.sum_gradients - gradSum; loss_chg_missing(gradScan, missing, n.sum_gradients, n.root_gain, gpu_param, missingLeft); // get the score/weight/id/gradSum for left and right child nodes bst_gpair lGradSum = missingLeft ? gradScan + missing : gradScan; bst_gpair rGradSum = n.sum_gradients - lGradSum; // Create children d_nodes[left_child_nidx(absNodeId)] = DeviceDenseNode(lGradSum, left_child_nidx(absNodeId), gpu_param); d_nodes[right_child_nidx(absNodeId)] = DeviceDenseNode(rGradSum, right_child_nidx(absNodeId), gpu_param); // Set split for parent d_nodes[absNodeId].SetSplit(thresh, colId, missingLeft ? LeftDir : RightDir); } else { // cannot be split further, so this node is a leaf! d_nodes[absNodeId].root_gain = -FLT_MAX; } }); } void findSplit(int level, node_id_t nodeStart, int nNodes) { reduceScanByKey(gradSums.data(), gradScans.data(), gradsInst.data(), instIds.current(), nodeAssigns.current(), nVals, nNodes, nCols, tmpScanGradBuff.data(), tmpScanKeyBuff.data(), colIds.data(), nodeStart); argMaxByKey(nodeSplits.data(), gradScans.data(), gradSums.data(), vals.current(), colIds.data(), nodeAssigns.current(), nodes.data(), nNodes, nodeStart, nVals, param, level <= MAX_ABK_LEVELS ? ABK_SMEM : ABK_GMEM); split2node(nNodes, nodeStart); } void allocateAllData(int offsetSize) { int tmpBuffSize = scanTempBufferSize(nVals); ba.allocate(dh::get_device_idx(param.gpu_id), param.silent, &vals, nVals, &vals_cached, nVals, &instIds, nVals, &instIds_cached, nVals, &colOffsets, offsetSize, &gradsInst, nRows, &nodeAssigns, nVals, &nodeLocations, nVals, &nodes, maxNodes, &nodeAssignsPerInst, nRows, &gradSums, maxLeaves * nCols, &gradScans, nVals, &nodeSplits, maxLeaves, &tmpScanGradBuff, tmpBuffSize, &tmpScanKeyBuff, tmpBuffSize, &colIds, nVals); } void setupOneTimeData(DMatrix* dmat) { size_t free_memory = dh::available_memory(dh::get_device_idx(param.gpu_id)); if (!dmat->SingleColBlock()) { throw std::runtime_error("exact::GPUBuilder - must have 1 column block"); } std::vector<float> fval; std::vector<int> fId, offset; convertToCsc(dmat, &fval, &fId, &offset); allocateAllData(static_cast<int>(offset.size())); transferAndSortData(fval, fId, offset); allocated = true; } void convertToCsc(DMatrix* dmat, std::vector<float>* fval, std::vector<int>* fId, std::vector<int>* offset) { MetaInfo info = dmat->info(); nRows = info.num_row; nCols = info.num_col; offset->reserve(nCols + 1); offset->push_back(0); fval->reserve(nCols * nRows); fId->reserve(nCols * nRows); // in case you end up with a DMatrix having no column access // then make sure to enable that before copying the data! if (!dmat->HaveColAccess()) { const std::vector<bool> enable(nCols, true); dmat->InitColAccess(enable, 1, nRows); } dmlc::DataIter<ColBatch>* iter = dmat->ColIterator(); iter->BeforeFirst(); while (iter->Next()) { const ColBatch& batch = iter->Value(); for (int i = 0; i < batch.size; i++) { const ColBatch::Inst& col = batch[i]; for (const ColBatch::Entry* it = col.data; it != col.data + col.length; it++) { int inst_id = static_cast<int>(it->index); fval->push_back(it->fvalue); fId->push_back(inst_id); } offset->push_back(fval->size()); } } nVals = fval->size(); } void transferAndSortData(const std::vector<float>& fval, const std::vector<int>& fId, const std::vector<int>& offset) { vals.current_dvec() = fval; instIds.current_dvec() = fId; colOffsets = offset; dh::segmentedSort<float, int>(&tmp_mem, &vals, &instIds, nVals, nCols, colOffsets); vals_cached = vals.current_dvec(); instIds_cached = instIds.current_dvec(); assignColIds<<<nCols, 512>>>(colIds.data(), colOffsets.data()); } void transferGrads(const std::vector<bst_gpair>& gpair) { // HACK dh::safe_cuda(cudaMemcpy(gradsInst.data(), &(gpair[0]), sizeof(bst_gpair) * nRows, cudaMemcpyHostToDevice)); // evaluate the full-grad reduction for the root node dh::sumReduction<bst_gpair>(tmp_mem, gradsInst, gradSums, nRows); } void initNodeData(int level, node_id_t nodeStart, int nNodes) { // all instances belong to root node at the beginning! if (level == 0) { nodes.fill(DeviceDenseNode()); nodeAssigns.current_dvec().fill(0); nodeAssignsPerInst.fill(0); // for root node, just update the gradient/score/weight/id info // before splitting it! Currently all data is on GPU, hence this // stupid little kernel auto d_nodes = nodes.data(); auto d_sums = gradSums.data(); auto gpu_params = GPUTrainingParam(param); dh::launch_n(param.gpu_id, 1, [=] __device__(int idx) { d_nodes[0] = DeviceDenseNode(d_sums[0], 0, gpu_params); }); } else { const int BlkDim = 256; const int ItemsPerThread = 4; // assign default node ids first int nBlks = dh::div_round_up(nRows, BlkDim); fillDefaultNodeIds<<<nBlks, BlkDim>>>(nodeAssignsPerInst.data(), nodes.data(), nRows); // evaluate the correct child indices of non-missing values next nBlks = dh::div_round_up(nVals, BlkDim * ItemsPerThread); assignNodeIds<<<nBlks, BlkDim>>>( nodeAssignsPerInst.data(), nodeLocations.current(), nodeAssigns.current(), instIds.current(), nodes.data(), colOffsets.data(), vals.current(), nVals, nCols); // gather the node assignments across all other columns too dh::gather(dh::get_device_idx(param.gpu_id), nodeAssigns.current(), nodeAssignsPerInst.data(), instIds.current(), nVals); sortKeys(level); } } void sortKeys(int level) { // segmented-sort the arrays based on node-id's // but we don't need more than level+1 bits for sorting! segmentedSort(&tmp_mem, &nodeAssigns, &nodeLocations, nVals, nCols, colOffsets, 0, level + 1); dh::gather<float, int>(dh::get_device_idx(param.gpu_id), vals.other(), vals.current(), instIds.other(), instIds.current(), nodeLocations.current(), nVals); vals.buff().selector ^= 1; instIds.buff().selector ^= 1; } void markLeaves() { const int BlkDim = 128; int nBlks = dh::div_round_up(maxNodes, BlkDim); markLeavesKernel<<<nBlks, BlkDim>>>(nodes.data(), maxNodes); } }; XGBOOST_REGISTER_TREE_UPDATER(GPUMaker, "grow_gpu") .describe("Grow tree with GPU.") .set_body([]() { return new GPUMaker(); }); } // namespace tree } // namespace xgboost
8a17308fc5d494c06f8df5b137ecf0f9c2d153d0.hip
// !!! This is a file automatically generated by hipify!!! #if !MEGDNN_TEGRA_X1 // generated by gen_cuda_conv_bias_kern_impls.py // ignore warning of cutlass #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunused-parameter" #pragma GCC diagnostic ignored "-Wstrict-aliasing" #include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl" using LayoutSrc = cutlass::layout::TensorNCxHWx<32>; using LayoutFilter = cutlass::layout::TensorCxRSKx<32>; using LayoutDst = cutlass::layout::TensorNCxHWx<32>; using ThreadBlockShape = cutlass::gemm::GemmShape<128, 64, 64>; using WarpShape = cutlass::gemm::GemmShape<64, 32, 64>; using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>; using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationHSwishClamp< int8_t, 8, int32_t, int32_t, float>; using Convolution = cutlass::convolution::device::Convolution< int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t, LayoutDst, int32_t, LayoutDst, int32_t, cutlass::convolution::ConvType::kConvolution, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm75, ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp, cutlass::convolution::threadblock::ConvolutionNCxHWxThreadblockSwizzle< cutlass::convolution::ConvType::kConvolution>, 2, 16, 16, true, cutlass::arch::OpMultiplyAddSaturate>; template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( const typename Convolution::ElementSrc* d_src, const typename Convolution::ElementFilter* d_filter, const typename Convolution::ElementBias* d_bias, const typename Convolution::ElementDst* d_z, typename Convolution::ElementDst* d_dst, int* workspace, typename Convolution::ConvolutionParameter const& conv_param, typename Convolution::EpilogueOutputOp::Params const& epilogue, hipStream_t stream); #pragma GCC diagnostic pop #endif
8a17308fc5d494c06f8df5b137ecf0f9c2d153d0.cu
#if !MEGDNN_TEGRA_X1 // generated by gen_cuda_conv_bias_kern_impls.py // ignore warning of cutlass #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunused-parameter" #pragma GCC diagnostic ignored "-Wstrict-aliasing" #include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl" using LayoutSrc = cutlass::layout::TensorNCxHWx<32>; using LayoutFilter = cutlass::layout::TensorCxRSKx<32>; using LayoutDst = cutlass::layout::TensorNCxHWx<32>; using ThreadBlockShape = cutlass::gemm::GemmShape<128, 64, 64>; using WarpShape = cutlass::gemm::GemmShape<64, 32, 64>; using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>; using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationHSwishClamp< int8_t, 8, int32_t, int32_t, float>; using Convolution = cutlass::convolution::device::Convolution< int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t, LayoutDst, int32_t, LayoutDst, int32_t, cutlass::convolution::ConvType::kConvolution, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm75, ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp, cutlass::convolution::threadblock::ConvolutionNCxHWxThreadblockSwizzle< cutlass::convolution::ConvType::kConvolution>, 2, 16, 16, true, cutlass::arch::OpMultiplyAddSaturate>; template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( const typename Convolution::ElementSrc* d_src, const typename Convolution::ElementFilter* d_filter, const typename Convolution::ElementBias* d_bias, const typename Convolution::ElementDst* d_z, typename Convolution::ElementDst* d_dst, int* workspace, typename Convolution::ConvolutionParameter const& conv_param, typename Convolution::EpilogueOutputOp::Params const& epilogue, cudaStream_t stream); #pragma GCC diagnostic pop #endif
1502d87cb91bda9d45582ca9d04c69998bae185e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*ckwg +29 * Copyright 2016 by Kitware SAS, 2018-2019 Kitware, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * * Neither name of Kitware, Inc. nor the names of any contributors may be used * to endorse or promote products derived from this software without specific * prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef INTEGRATE_DEPTH_MAPS_CU_ #define INTEGRATE_DEPTH_MAPS_CU_ // STD include #include <math.h> #include <stdio.h> #include <vector> #include "cuda_error_check.h" #include <vital/logger/logger.h> #define size4x4 16 //***************************************************************************** // Define texture and constants __constant__ double c_gridOrig[3]; // Origin of the output volume __constant__ int3 c_gridDims; // Dimensions of the output volume __constant__ double c_gridSpacing[3]; // Spacing of the output volume __constant__ int2 c_depthMapDims; // Dimensions of all depths map __constant__ double c_rayPotentialThick; // Thickness threshold for the ray potential function __constant__ double c_rayPotentialRho; // Rho at the Y axis for the ray potential function __constant__ double c_rayPotentialEta; __constant__ double c_rayPotentialEpsilon; __constant__ double c_rayPotentialDelta; int grid_dims[3]; //***************************************************************************** // Truncated Signed Distance Function (TSDF) Parameter Description //***************************************************************************** //** Eta is a percentage of rho ( 0 < Eta < 1) //** Epsilon is a percentage of rho ( 0 < Epsilon < 1) //** Delta has to be superior to Thick // // 'real distance' - 'depth value' // | // | // | --------------- Rho // | /| | // | / | // | / | | // | / | // | / | | // | / | // | / | | // | / Epsilon*Rho |______________ // |/ | //---------------------------------------------------------------------------- // / // / // / //-------------- Eta*rho / // | / // | / // | / // | / // | / // --------------- // <---------> // Thick // <-----------------------> // Delta //***************************************************************************** __device__ void computeVoxelCenter(int voxelCoordinate[3], double output[3]) { output[0] = c_gridOrig[0] + (voxelCoordinate[0] + 0.5) * c_gridSpacing[0]; output[1] = c_gridOrig[1] + (voxelCoordinate[1] + 0.5) * c_gridSpacing[1]; output[2] = c_gridOrig[2] + (voxelCoordinate[2] + 0.5) * c_gridSpacing[2]; } //***************************************************************************** //Apply a 3x4 matrix to a 3D points (assumes last row of M is 0, 0, 0, 1) __device__ void transformFrom4Matrix(double M[size4x4], double point[3], double output[3]) { output[0] = M[0 * 4 + 0] * point[0] + M[0 * 4 + 1] * point[1] + M[0 * 4 + 2] * point[2] + M[0 * 4 + 3]; output[1] = M[1 * 4 + 0] * point[0] + M[1 * 4 + 1] * point[1] + M[1 * 4 + 2] * point[2] + M[1 * 4 + 3]; output[2] = M[2 * 4 + 0] * point[0] + M[2 * 4 + 1] * point[1] + M[2 * 4 + 2] * point[2] + M[2 * 4 + 3]; } //***************************************************************************** // Compute the norm of a 3 vec __device__ double norm(double vec[3]) { return sqrt(vec[0] * vec[0] + vec[1] * vec[1] + vec[2] * vec[2]); } //***************************************************************************** //Ray potential function which computes the increment to the current voxel __device__ void rayPotential(double realDistance, double depthMapDistance, double& res) { double diff = (realDistance - depthMapDistance); double absoluteDiff = abs(diff); // Can't divide by zero int sign = diff != 0 ? diff / absoluteDiff : 0; if (absoluteDiff > c_rayPotentialDelta) res = diff > 0 ? c_rayPotentialEpsilon * c_rayPotentialRho : - c_rayPotentialEta * c_rayPotentialRho; else if (absoluteDiff > c_rayPotentialThick) res = c_rayPotentialRho * sign; else res = (c_rayPotentialRho / c_rayPotentialThick) * diff; } //***************************************************************************** // Compute the voxel Id on a 1D table according to its 3D coordinates __device__ int computeVoxelIDGrid(int coordinates[3]) { int dimX = c_gridDims.x; int dimY = c_gridDims.y; int i = coordinates[0]; int j = coordinates[1]; int k = coordinates[2]; return (k*dimY + j)*dimX + i; } //***************************************************************************** //Compute the pixel Id on a 1D table according to its 3D coordinates (third coordinate is not used) __device__ int computeVoxelIDDepth(int coordinates[3]) { int dimX = c_depthMapDims.x; int dimY = c_depthMapDims.y; int x = coordinates[0]; int y = coordinates[1]; // /!\ vtkImageData has its origin at the bottom left, not top left return (dimX*(dimY - 1 - y)) + x; } //***************************************************************************** // Main kernel for adding a depth map to the volume __global__ void depthMapKernel(double* depths, double* weights, double matrixK[size4x4], double matrixRT[size4x4], double* output, int z_offset) { // Get voxel coordinate according to thread id int voxelIndex[3] = { (int)(blockIdx.x * blockDim.x + threadIdx.x), (int)(blockIdx.y * blockDim.y + threadIdx.y), (int)blockIdx.z + z_offset }; if (voxelIndex[0] >= c_gridDims.x || voxelIndex[1] >= c_gridDims.y || voxelIndex[2] >= c_gridDims.z) { return; } double voxelCenterCoordinate[3]; computeVoxelCenter(voxelIndex, voxelCenterCoordinate); // Transform voxel center from real coord to camera coords double voxelCenterCamera[3]; transformFrom4Matrix(matrixRT, voxelCenterCoordinate, voxelCenterCamera); // Transform voxel center from camera coords to depth map homogeneous coords double voxelCenterHomogen[3]; transformFrom4Matrix(matrixK, voxelCenterCamera, voxelCenterHomogen); if (voxelCenterHomogen[2] < 0) return; // Get voxel center on depth map coord double voxelCenterDepthMap[2]; voxelCenterDepthMap[0] = voxelCenterHomogen[0] / voxelCenterHomogen[2]; voxelCenterDepthMap[1] = voxelCenterHomogen[1] / voxelCenterHomogen[2]; // Get real pixel position (approximation) int pixel[3]; pixel[0] = round(voxelCenterDepthMap[0]); pixel[1] = round(voxelCenterDepthMap[1]); pixel[2] = 0; // Test if coordinate are inside depth map if (pixel[0] < 0 || pixel[1] < 0 || pixel[0] >= c_depthMapDims.x || pixel[1] >= c_depthMapDims.y) return; // Compute the ID on depthmap values according to pixel position and depth map dimensions int depthMapId = computeVoxelIDDepth(pixel); double depth = depths[depthMapId]; double weight = weights ? weights[depthMapId] : 1.0; if (depth <= 0 || weight <= 0) return; int gridId = computeVoxelIDGrid(voxelIndex); // Get the distance between voxel and camera double realDepth = voxelCenterCamera[2]; double newValue; rayPotential(realDepth, depth, newValue); // Update the value to the output output[gridId] += weight * newValue; } //***************************************************************************** // Initialize cuda constants void cuda_initalize(int h_gridDims[3], // Dimensions of the output volume double h_gridOrig[3], // Origin of the output volume double h_gridSpacing[3], // Spacing of the output volume double h_rayPThick, double h_rayPRho, double h_rayPEta, double h_rayPEpsilon, double h_rayPDelta) { CudaErrorCheck(hipMemcpyToSymbol(c_gridDims, h_gridDims, 3 * sizeof(int))); CudaErrorCheck(hipMemcpyToSymbol(c_gridOrig, h_gridOrig, 3 * sizeof(double))); CudaErrorCheck(hipMemcpyToSymbol(c_gridSpacing, h_gridSpacing, 3 * sizeof(double))); CudaErrorCheck(hipMemcpyToSymbol(c_rayPotentialThick, &h_rayPThick, sizeof(double))); CudaErrorCheck(hipMemcpyToSymbol(c_rayPotentialRho, &h_rayPRho, sizeof(double))); CudaErrorCheck(hipMemcpyToSymbol(c_rayPotentialEta, &h_rayPEta, sizeof(double))); CudaErrorCheck(hipMemcpyToSymbol(c_rayPotentialEpsilon, &h_rayPEpsilon, sizeof(double))); CudaErrorCheck(hipMemcpyToSymbol(c_rayPotentialDelta, &h_rayPDelta, sizeof(double))); grid_dims[0] = h_gridDims[0]; grid_dims[1] = h_gridDims[1]; grid_dims[2] = h_gridDims[2]; } //***************************************************************************** void launch_depth_kernel(double * d_depth, double * d_conf, int h_depthMapDims[2], double d_K[size4x4], double d_RT[size4x4], double* d_volume, unsigned max_voxels_per_launch) { auto logger = kwiver::vital::get_logger("arrows.cuda.integrate_depth_maps"); int zstep = grid_dims[2]; unsigned num_voxels_xy = grid_dims[0] * grid_dims[1]; if (max_voxels_per_launch > 0) { zstep = max_voxels_per_launch / num_voxels_xy; } if (zstep == 0) { zstep = 1; LOG_WARN(logger, num_voxels_xy << " voxels per X-Y slice exceeds " << max_voxels_per_launch); } // Organize threads into blocks and grids // Number of threads on each block dim3 dimBlock(16, 16, 1); // Number of blocks on a grid dim3 dimGrid((grid_dims[0] - 1) / dimBlock.x + 1, (grid_dims[1] - 1) / dimBlock.y + 1, zstep); CudaErrorCheck(hipMemcpyToSymbol(c_depthMapDims, h_depthMapDims, 2 * sizeof(int))); CudaErrorCheck(hipDeviceSynchronize()); for (int z_offset = 0; z_offset < grid_dims[2]; z_offset += zstep) { LOG_DEBUG(logger, "Launching kernel with Z=" << z_offset); depthMapKernel << < dimGrid, dimBlock >> >(d_depth, d_conf, d_K, d_RT, d_volume, z_offset); CudaErrorCheck(hipPeekAtLastError()); CudaErrorCheck(hipDeviceSynchronize()); } } #endif
1502d87cb91bda9d45582ca9d04c69998bae185e.cu
/*ckwg +29 * Copyright 2016 by Kitware SAS, 2018-2019 Kitware, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * * Neither name of Kitware, Inc. nor the names of any contributors may be used * to endorse or promote products derived from this software without specific * prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef INTEGRATE_DEPTH_MAPS_CU_ #define INTEGRATE_DEPTH_MAPS_CU_ // STD include #include <math.h> #include <stdio.h> #include <vector> #include "cuda_error_check.h" #include <vital/logger/logger.h> #define size4x4 16 //***************************************************************************** // Define texture and constants __constant__ double c_gridOrig[3]; // Origin of the output volume __constant__ int3 c_gridDims; // Dimensions of the output volume __constant__ double c_gridSpacing[3]; // Spacing of the output volume __constant__ int2 c_depthMapDims; // Dimensions of all depths map __constant__ double c_rayPotentialThick; // Thickness threshold for the ray potential function __constant__ double c_rayPotentialRho; // Rho at the Y axis for the ray potential function __constant__ double c_rayPotentialEta; __constant__ double c_rayPotentialEpsilon; __constant__ double c_rayPotentialDelta; int grid_dims[3]; //***************************************************************************** // Truncated Signed Distance Function (TSDF) Parameter Description //***************************************************************************** //** Eta is a percentage of rho ( 0 < Eta < 1) //** Epsilon is a percentage of rho ( 0 < Epsilon < 1) //** Delta has to be superior to Thick // // 'real distance' - 'depth value' // | // | // | --------------- Rho // | /| | // | / | // | / | | // | / | // | / | | // | / | // | / | | // | / Epsilon*Rho |______________ // |/ | //---------------------------------------------------------------------------- // / // / // / //-------------- Eta*rho / // | / // | / // | / // | / // | / // --------------- // <---------> // Thick // <-----------------------> // Delta //***************************************************************************** __device__ void computeVoxelCenter(int voxelCoordinate[3], double output[3]) { output[0] = c_gridOrig[0] + (voxelCoordinate[0] + 0.5) * c_gridSpacing[0]; output[1] = c_gridOrig[1] + (voxelCoordinate[1] + 0.5) * c_gridSpacing[1]; output[2] = c_gridOrig[2] + (voxelCoordinate[2] + 0.5) * c_gridSpacing[2]; } //***************************************************************************** //Apply a 3x4 matrix to a 3D points (assumes last row of M is 0, 0, 0, 1) __device__ void transformFrom4Matrix(double M[size4x4], double point[3], double output[3]) { output[0] = M[0 * 4 + 0] * point[0] + M[0 * 4 + 1] * point[1] + M[0 * 4 + 2] * point[2] + M[0 * 4 + 3]; output[1] = M[1 * 4 + 0] * point[0] + M[1 * 4 + 1] * point[1] + M[1 * 4 + 2] * point[2] + M[1 * 4 + 3]; output[2] = M[2 * 4 + 0] * point[0] + M[2 * 4 + 1] * point[1] + M[2 * 4 + 2] * point[2] + M[2 * 4 + 3]; } //***************************************************************************** // Compute the norm of a 3 vec __device__ double norm(double vec[3]) { return sqrt(vec[0] * vec[0] + vec[1] * vec[1] + vec[2] * vec[2]); } //***************************************************************************** //Ray potential function which computes the increment to the current voxel __device__ void rayPotential(double realDistance, double depthMapDistance, double& res) { double diff = (realDistance - depthMapDistance); double absoluteDiff = abs(diff); // Can't divide by zero int sign = diff != 0 ? diff / absoluteDiff : 0; if (absoluteDiff > c_rayPotentialDelta) res = diff > 0 ? c_rayPotentialEpsilon * c_rayPotentialRho : - c_rayPotentialEta * c_rayPotentialRho; else if (absoluteDiff > c_rayPotentialThick) res = c_rayPotentialRho * sign; else res = (c_rayPotentialRho / c_rayPotentialThick) * diff; } //***************************************************************************** // Compute the voxel Id on a 1D table according to its 3D coordinates __device__ int computeVoxelIDGrid(int coordinates[3]) { int dimX = c_gridDims.x; int dimY = c_gridDims.y; int i = coordinates[0]; int j = coordinates[1]; int k = coordinates[2]; return (k*dimY + j)*dimX + i; } //***************************************************************************** //Compute the pixel Id on a 1D table according to its 3D coordinates (third coordinate is not used) __device__ int computeVoxelIDDepth(int coordinates[3]) { int dimX = c_depthMapDims.x; int dimY = c_depthMapDims.y; int x = coordinates[0]; int y = coordinates[1]; // /!\ vtkImageData has its origin at the bottom left, not top left return (dimX*(dimY - 1 - y)) + x; } //***************************************************************************** // Main kernel for adding a depth map to the volume __global__ void depthMapKernel(double* depths, double* weights, double matrixK[size4x4], double matrixRT[size4x4], double* output, int z_offset) { // Get voxel coordinate according to thread id int voxelIndex[3] = { (int)(blockIdx.x * blockDim.x + threadIdx.x), (int)(blockIdx.y * blockDim.y + threadIdx.y), (int)blockIdx.z + z_offset }; if (voxelIndex[0] >= c_gridDims.x || voxelIndex[1] >= c_gridDims.y || voxelIndex[2] >= c_gridDims.z) { return; } double voxelCenterCoordinate[3]; computeVoxelCenter(voxelIndex, voxelCenterCoordinate); // Transform voxel center from real coord to camera coords double voxelCenterCamera[3]; transformFrom4Matrix(matrixRT, voxelCenterCoordinate, voxelCenterCamera); // Transform voxel center from camera coords to depth map homogeneous coords double voxelCenterHomogen[3]; transformFrom4Matrix(matrixK, voxelCenterCamera, voxelCenterHomogen); if (voxelCenterHomogen[2] < 0) return; // Get voxel center on depth map coord double voxelCenterDepthMap[2]; voxelCenterDepthMap[0] = voxelCenterHomogen[0] / voxelCenterHomogen[2]; voxelCenterDepthMap[1] = voxelCenterHomogen[1] / voxelCenterHomogen[2]; // Get real pixel position (approximation) int pixel[3]; pixel[0] = round(voxelCenterDepthMap[0]); pixel[1] = round(voxelCenterDepthMap[1]); pixel[2] = 0; // Test if coordinate are inside depth map if (pixel[0] < 0 || pixel[1] < 0 || pixel[0] >= c_depthMapDims.x || pixel[1] >= c_depthMapDims.y) return; // Compute the ID on depthmap values according to pixel position and depth map dimensions int depthMapId = computeVoxelIDDepth(pixel); double depth = depths[depthMapId]; double weight = weights ? weights[depthMapId] : 1.0; if (depth <= 0 || weight <= 0) return; int gridId = computeVoxelIDGrid(voxelIndex); // Get the distance between voxel and camera double realDepth = voxelCenterCamera[2]; double newValue; rayPotential(realDepth, depth, newValue); // Update the value to the output output[gridId] += weight * newValue; } //***************************************************************************** // Initialize cuda constants void cuda_initalize(int h_gridDims[3], // Dimensions of the output volume double h_gridOrig[3], // Origin of the output volume double h_gridSpacing[3], // Spacing of the output volume double h_rayPThick, double h_rayPRho, double h_rayPEta, double h_rayPEpsilon, double h_rayPDelta) { CudaErrorCheck(cudaMemcpyToSymbol(c_gridDims, h_gridDims, 3 * sizeof(int))); CudaErrorCheck(cudaMemcpyToSymbol(c_gridOrig, h_gridOrig, 3 * sizeof(double))); CudaErrorCheck(cudaMemcpyToSymbol(c_gridSpacing, h_gridSpacing, 3 * sizeof(double))); CudaErrorCheck(cudaMemcpyToSymbol(c_rayPotentialThick, &h_rayPThick, sizeof(double))); CudaErrorCheck(cudaMemcpyToSymbol(c_rayPotentialRho, &h_rayPRho, sizeof(double))); CudaErrorCheck(cudaMemcpyToSymbol(c_rayPotentialEta, &h_rayPEta, sizeof(double))); CudaErrorCheck(cudaMemcpyToSymbol(c_rayPotentialEpsilon, &h_rayPEpsilon, sizeof(double))); CudaErrorCheck(cudaMemcpyToSymbol(c_rayPotentialDelta, &h_rayPDelta, sizeof(double))); grid_dims[0] = h_gridDims[0]; grid_dims[1] = h_gridDims[1]; grid_dims[2] = h_gridDims[2]; } //***************************************************************************** void launch_depth_kernel(double * d_depth, double * d_conf, int h_depthMapDims[2], double d_K[size4x4], double d_RT[size4x4], double* d_volume, unsigned max_voxels_per_launch) { auto logger = kwiver::vital::get_logger("arrows.cuda.integrate_depth_maps"); int zstep = grid_dims[2]; unsigned num_voxels_xy = grid_dims[0] * grid_dims[1]; if (max_voxels_per_launch > 0) { zstep = max_voxels_per_launch / num_voxels_xy; } if (zstep == 0) { zstep = 1; LOG_WARN(logger, num_voxels_xy << " voxels per X-Y slice exceeds " << max_voxels_per_launch); } // Organize threads into blocks and grids // Number of threads on each block dim3 dimBlock(16, 16, 1); // Number of blocks on a grid dim3 dimGrid((grid_dims[0] - 1) / dimBlock.x + 1, (grid_dims[1] - 1) / dimBlock.y + 1, zstep); CudaErrorCheck(cudaMemcpyToSymbol(c_depthMapDims, h_depthMapDims, 2 * sizeof(int))); CudaErrorCheck(cudaDeviceSynchronize()); for (int z_offset = 0; z_offset < grid_dims[2]; z_offset += zstep) { LOG_DEBUG(logger, "Launching kernel with Z=" << z_offset); depthMapKernel << < dimGrid, dimBlock >> >(d_depth, d_conf, d_K, d_RT, d_volume, z_offset); CudaErrorCheck(cudaPeekAtLastError()); CudaErrorCheck(cudaDeviceSynchronize()); } } #endif
00fa95efa07a6184f6f6d22a9eac12d1d9635de4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void subtract_kernal(float* data, float f, const int totaltc) { int idx = threadIdx.x + (blockIdx.x + blockIdx.y*gridDim.x)*MAX_THREADS; if(idx < totaltc){ data[idx] = data[idx] - f; } }
00fa95efa07a6184f6f6d22a9eac12d1d9635de4.cu
#include "includes.h" __global__ void subtract_kernal(float* data, float f, const int totaltc) { int idx = threadIdx.x + (blockIdx.x + blockIdx.y*gridDim.x)*MAX_THREADS; if(idx < totaltc){ data[idx] = data[idx] - f; } }
3c180c9b8432682df5565a1c3f130d88eaad47c1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Fermat * * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "MeshStorage.h" #include <cugar/basic/vector.h> #include <cugar/linalg/matrix.h> __global__ void translate_group_kernel( MeshView mesh, const uint32 group_id, const float3 delta, uint32* set) { const uint32 begin = mesh.group_offsets[group_id]; const uint32 end = mesh.group_offsets[group_id + 1]; const uint32 thread_id = threadIdx.x + blockIdx.x * blockDim.x; const uint32 tri_id = begin + thread_id; if (tri_id >= end) return; for (uint32 i = 0; i < 3; ++i) { const uint32 vertex_id = mesh.vertex_indices[tri_id * 3 + i]; const uint32 word_id = vertex_id / 32u; const uint32 bit_id = vertex_id & 31u; const uint32 bit_mask = 1u << bit_id; if ((cugar::atomic_or( set + word_id, bit_mask ) & bit_mask) == 0u) { float3* v = reinterpret_cast<float3*>(mesh.vertex_data) + vertex_id; v->x += delta.x; v->y += delta.y; v->z += delta.z; } } } // translate a given group // SUTILAPI void translate_group( DeviceMeshStorage& mesh, const uint32 group_id, const float3 delta) { // NOTE: device vector reads! const uint32 begin = mesh.m_group_offsets[group_id]; const uint32 end = mesh.m_group_offsets[group_id + 1]; const uint32 n_entries = end - begin; const uint32 block_dim = 128; const uint32 grid_dim = cugar::divide_ri(n_entries,block_dim); cugar::caching_device_vector<uint32> set(cugar::divide_ri(mesh.getNumVertices(),32u)); hipLaunchKernelGGL(( translate_group_kernel), dim3(grid_dim),dim3(block_dim), 0, 0, mesh.view(), group_id, delta, cugar::raw_pointer(set) ); }
3c180c9b8432682df5565a1c3f130d88eaad47c1.cu
/* * Fermat * * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "MeshStorage.h" #include <cugar/basic/vector.h> #include <cugar/linalg/matrix.h> __global__ void translate_group_kernel( MeshView mesh, const uint32 group_id, const float3 delta, uint32* set) { const uint32 begin = mesh.group_offsets[group_id]; const uint32 end = mesh.group_offsets[group_id + 1]; const uint32 thread_id = threadIdx.x + blockIdx.x * blockDim.x; const uint32 tri_id = begin + thread_id; if (tri_id >= end) return; for (uint32 i = 0; i < 3; ++i) { const uint32 vertex_id = mesh.vertex_indices[tri_id * 3 + i]; const uint32 word_id = vertex_id / 32u; const uint32 bit_id = vertex_id & 31u; const uint32 bit_mask = 1u << bit_id; if ((cugar::atomic_or( set + word_id, bit_mask ) & bit_mask) == 0u) { float3* v = reinterpret_cast<float3*>(mesh.vertex_data) + vertex_id; v->x += delta.x; v->y += delta.y; v->z += delta.z; } } } // translate a given group // SUTILAPI void translate_group( DeviceMeshStorage& mesh, const uint32 group_id, const float3 delta) { // NOTE: device vector reads! const uint32 begin = mesh.m_group_offsets[group_id]; const uint32 end = mesh.m_group_offsets[group_id + 1]; const uint32 n_entries = end - begin; const uint32 block_dim = 128; const uint32 grid_dim = cugar::divide_ri(n_entries,block_dim); cugar::caching_device_vector<uint32> set(cugar::divide_ri(mesh.getNumVertices(),32u)); translate_group_kernel<<<grid_dim,block_dim>>>( mesh.view(), group_id, delta, cugar::raw_pointer(set) ); }
005a8aea65b05cb86dade2779fefa78d6a0e6b70.hip
// !!! This is a file automatically generated by hipify!!! //////////////////////////////////////////////////////////////////////////////// // BSD 3-Clause License // // Copyright (c) 2021, NVIDIA Corporation // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // // 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // 3. Neither the name of the copyright holder nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ///////////////////////////////////////////////////////////////////////////////// #include "assert.h" #include "matx.h" #include "test_types.h" #include "utilities.h" #include "gtest/gtest.h" using namespace matx; template <typename TensorType> class SolveTestsFloatNonComplexNonHalf : public ::testing::Test { }; TYPED_TEST_SUITE(SolveTestsFloatNonComplexNonHalf, MatXFloatNonComplexNonHalfTypes); TYPED_TEST(SolveTestsFloatNonComplexNonHalf, CGSolve) { MATX_ENTER_HANDLER(); int gN = 4; int N = gN * gN; int BATCH = 4; auto A = make_tensor<TypeParam, 3> ({BATCH, N, N}); auto X = make_tensor<TypeParam, 2> ({BATCH, N}); auto B = make_tensor<TypeParam, 2> ({BATCH, N}); // Simple 1D Poisson matrix for(int b = 0; b < BATCH; b++) { for(int i = 0; i < N; i++) { X(b,i) = TypeParam(0+b); B(b,i) = TypeParam(1+b); for(int j = 0; j < N; j++) { if(i==j) A(b,i,j) = 2; else if( i == j-1) A(b,i,j) = -1; else if (i == j+1) A(b,i,j) = -1; else A(b,i,j) = 0; } } } // example-begin cgsolve-test-1 (X = cgsolve(A, B, .00001, 10)).run(); // example-end cgsolve-test-1 matvec(B, A, X); hipDeviceSynchronize(); for(int i = 0; i < BATCH; i++) { for(int j = 0; j < N; j++) { ASSERT_NEAR(B(i,j), TypeParam(1+i), .0001); } } MATX_EXIT_HANDLER(); }
005a8aea65b05cb86dade2779fefa78d6a0e6b70.cu
//////////////////////////////////////////////////////////////////////////////// // BSD 3-Clause License // // Copyright (c) 2021, NVIDIA Corporation // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // // 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // 3. Neither the name of the copyright holder nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ///////////////////////////////////////////////////////////////////////////////// #include "assert.h" #include "matx.h" #include "test_types.h" #include "utilities.h" #include "gtest/gtest.h" using namespace matx; template <typename TensorType> class SolveTestsFloatNonComplexNonHalf : public ::testing::Test { }; TYPED_TEST_SUITE(SolveTestsFloatNonComplexNonHalf, MatXFloatNonComplexNonHalfTypes); TYPED_TEST(SolveTestsFloatNonComplexNonHalf, CGSolve) { MATX_ENTER_HANDLER(); int gN = 4; int N = gN * gN; int BATCH = 4; auto A = make_tensor<TypeParam, 3> ({BATCH, N, N}); auto X = make_tensor<TypeParam, 2> ({BATCH, N}); auto B = make_tensor<TypeParam, 2> ({BATCH, N}); // Simple 1D Poisson matrix for(int b = 0; b < BATCH; b++) { for(int i = 0; i < N; i++) { X(b,i) = TypeParam(0+b); B(b,i) = TypeParam(1+b); for(int j = 0; j < N; j++) { if(i==j) A(b,i,j) = 2; else if( i == j-1) A(b,i,j) = -1; else if (i == j+1) A(b,i,j) = -1; else A(b,i,j) = 0; } } } // example-begin cgsolve-test-1 (X = cgsolve(A, B, .00001, 10)).run(); // example-end cgsolve-test-1 matvec(B, A, X); cudaDeviceSynchronize(); for(int i = 0; i < BATCH; i++) { for(int j = 0; j < N; j++) { ASSERT_NEAR(B(i,j), TypeParam(1+i), .0001); } } MATX_EXIT_HANDLER(); }
9ed1bebc7c927622c2b20ad3c2f3907ad0970cf9.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <time.h> #include <hip/hip_runtime.h> #define NUM_THREADS 1024 //#define EPSILON 0.0001 #define EPSILON 0.00001 //--> error is too small #define height 256 #define width 10 #define UPPER 0.01 #define LOWER -0.01 // KERNEL: x*A = B __global__ void MatMul(float* x, float* A, float* B) { // index into flattened weights matrix int i = blockDim.x * blockIdx.x + threadIdx.x; // index into the input vector int row = i / width; // index into the output vector int col = i % width; // zero out resultant vector B if (i < width) B[i] = 0.0; __syncthreads(); if ((i < height * width) && (row < height)) { // TODO: atomicAdd to local, shared output vectors --> atomicAdd to global atomicAdd(&B[col], x[row] * A[i]); __syncthreads(); if (i < width && B[i] < 0.0) B[i] = 0.0; } } // HOST int main(int argc, char** argv) { // Variables float *h_x, *h_A, *h_B, *d_x, *d_A, *d_B; // Allocate vectors and matrices in host memory and device memory h_x = (float*)malloc(height*sizeof(float)); h_A = (float*)malloc(height*width*sizeof(float)); h_B = (float*)malloc(width*sizeof(float)); hipMalloc((void**)&d_x, height*sizeof(float)); hipMalloc((void**)&d_A, height*width*sizeof(float)); hipMalloc((void**)&d_B, width*sizeof(float)); // Initialize input vector x for (int i = 0; i < height; ++i) { h_x[i] = rand() / (float)RAND_MAX - 0.5; } // Initialize input matrix A for (int i = 0; i < height; i++) { for (int j = 0; j < width; j++) { // initialize weights matrix values to be between LOWER and UPPER h_A[i*width + j] = (rand() / (float)RAND_MAX)*(UPPER - LOWER) + LOWER; } } // Copy vectors from host memory to device memory hipMemcpy(d_x, h_x, height*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_A, h_A, height*width*sizeof(float), hipMemcpyHostToDevice); // FILL IN KERNEL SETUP AND INVOCATION int blocks = (height*width) / NUM_THREADS; if ((height*width) % NUM_THREADS != 0) blocks++; hipLaunchKernelGGL(( MatMul) , dim3(blocks), dim3(NUM_THREADS) , 0, 0, d_x, d_A, d_B); hipDeviceSynchronize(); // Copy result from device memory to host memory hipMemcpy(h_B, d_B, width*sizeof(float), hipMemcpyDeviceToHost); bool correct = true; // Calculate solution on the host and compare float* result = (float*)malloc(width*sizeof(float)); for (int i = 0; i < height; i++) { for (int j = 0; j < width; j++) { // zero out result elements if (i == 0) result[j] = 0.0; result[j] += h_x[i] * h_A[i*width + j]; } } for (int j = 0; j < width; j++) { if (result[j] < 0.0) { printf("--old result[%i]: %f--", j, result[j]); result[j] = 0.0; } if (fabs(h_B[j] - result[j]) > EPSILON) { printf("ERROR: expected h_B[%i] = %f but received %f\n", j, result[j], h_B[j]); correct = false; //break; } else { printf("result[j]: %f\th_B[j]: %f\n", result[j], h_B[j]); } } if (correct) printf("---PASSED---\n"); // Free host and device memory hipFree(d_x); hipFree(d_A); hipFree(d_B); free(h_x); free(h_A); free(h_B); free(result); }
9ed1bebc7c927622c2b20ad3c2f3907ad0970cf9.cu
#include <stdio.h> #include <stdlib.h> #include <time.h> #include <cuda.h> #define NUM_THREADS 1024 //#define EPSILON 0.0001 #define EPSILON 0.00001 //--> error is too small #define height 256 #define width 10 #define UPPER 0.01 #define LOWER -0.01 // KERNEL: x*A = B __global__ void MatMul(float* x, float* A, float* B) { // index into flattened weights matrix int i = blockDim.x * blockIdx.x + threadIdx.x; // index into the input vector int row = i / width; // index into the output vector int col = i % width; // zero out resultant vector B if (i < width) B[i] = 0.0; __syncthreads(); if ((i < height * width) && (row < height)) { // TODO: atomicAdd to local, shared output vectors --> atomicAdd to global atomicAdd(&B[col], x[row] * A[i]); __syncthreads(); if (i < width && B[i] < 0.0) B[i] = 0.0; } } // HOST int main(int argc, char** argv) { // Variables float *h_x, *h_A, *h_B, *d_x, *d_A, *d_B; // Allocate vectors and matrices in host memory and device memory h_x = (float*)malloc(height*sizeof(float)); h_A = (float*)malloc(height*width*sizeof(float)); h_B = (float*)malloc(width*sizeof(float)); cudaMalloc((void**)&d_x, height*sizeof(float)); cudaMalloc((void**)&d_A, height*width*sizeof(float)); cudaMalloc((void**)&d_B, width*sizeof(float)); // Initialize input vector x for (int i = 0; i < height; ++i) { h_x[i] = rand() / (float)RAND_MAX - 0.5; } // Initialize input matrix A for (int i = 0; i < height; i++) { for (int j = 0; j < width; j++) { // initialize weights matrix values to be between LOWER and UPPER h_A[i*width + j] = (rand() / (float)RAND_MAX)*(UPPER - LOWER) + LOWER; } } // Copy vectors from host memory to device memory cudaMemcpy(d_x, h_x, height*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_A, h_A, height*width*sizeof(float), cudaMemcpyHostToDevice); // FILL IN KERNEL SETUP AND INVOCATION int blocks = (height*width) / NUM_THREADS; if ((height*width) % NUM_THREADS != 0) blocks++; MatMul <<< blocks, NUM_THREADS >>> (d_x, d_A, d_B); cudaDeviceSynchronize(); // Copy result from device memory to host memory cudaMemcpy(h_B, d_B, width*sizeof(float), cudaMemcpyDeviceToHost); bool correct = true; // Calculate solution on the host and compare float* result = (float*)malloc(width*sizeof(float)); for (int i = 0; i < height; i++) { for (int j = 0; j < width; j++) { // zero out result elements if (i == 0) result[j] = 0.0; result[j] += h_x[i] * h_A[i*width + j]; } } for (int j = 0; j < width; j++) { if (result[j] < 0.0) { printf("--old result[%i]: %f--", j, result[j]); result[j] = 0.0; } if (fabs(h_B[j] - result[j]) > EPSILON) { printf("ERROR: expected h_B[%i] = %f but received %f\n", j, result[j], h_B[j]); correct = false; //break; } else { printf("result[j]: %f\th_B[j]: %f\n", result[j], h_B[j]); } } if (correct) printf("---PASSED---\n"); // Free host and device memory cudaFree(d_x); cudaFree(d_A); cudaFree(d_B); free(h_x); free(h_A); free(h_B); free(result); }
4c78ea84f834816f5ba9aabd38a4471c815f4ee4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <stdio.h> #include <hiprand/hiprand_kernel.h> #include <cmath> #include "vec_3d.cuh" #include "ray.cuh" #include "sphere_hip.cuh" #include "hittable_list.cuh" #include "camera.cuh" #include "material.cuh" #include "utility.hpp" #define checkCudaErrors(val) check_cuda( (val), #val, __FILE__, __LINE__ ) void check_cuda(hipError_t result, char const *const func, const char *const file, int const line) { if (result) { std::cerr << "CUDA error: " << static_cast<unsigned int>(result) << " at " << file << ":" << line << " '" << func << "' \n"; hipDeviceReset(); exit(99); } } __device__ float did_hit_sphere(const point_3d sphere_center, float sphere_radius, const ray r) { vec_3d oc = r.orig() - sphere_center; float a = r.dir().length_squared(); float half_b = oc.dot(r.dir()); float c = oc.length_squared() - sphere_radius * sphere_radius; float delta = half_b * half_b - a * c; if (delta < 0) return -1; else return (- half_b - sqrt(delta))/a; } __device__ vec_3d ray_color(const ray& r, hittable** d_world, hiprandState_t* local_rand_state) { ray current_ray = r; vec_3d current_attenuation(1.0f, 1.0f, 1.0f); for (int i = 0; i < 50; i++) { hit_record record; if ((*d_world)->hit(current_ray, 0.001f, infinity, record)) { ray scattered; vec_3d attenuation; if(record.mat_ptr->scatter(current_ray, record, attenuation, scattered, local_rand_state)) { current_ray = scattered; current_attenuation *= attenuation; } else { return vec_3d(0, 0, 0); } } else { const vec_3d unit_vector = current_ray.dir().unit_vector(); float s = 0.5f * (unit_vector.y() + 1.0f); vec_3d final_color = (1.0f - s) * vec_3d(1.0f, 1.0f, 1.0f) + s * vec_3d(0.5f, 0.7f, 1.0f); return current_attenuation * final_color; } } return vec_3d(0, 0, 0); } __global__ void rand_init(hiprandState_t* rand_state) { if(threadIdx.x == 0 && blockIdx.x == 0) { hiprand_init(1984, 0, 0, rand_state); } } __global__ void render_init_rand(const int height, const int width, hiprandState_t* rand_state) { int thread_x = blockIdx.x * blockDim.x + threadIdx.x; int thread_y = blockIdx.y * blockDim.y + threadIdx.y; if ((thread_x >= width) || (thread_y >= height)) return; int current_index = thread_y * width + thread_x; hiprand_init(1984 + current_index, 0, 0, &rand_state[current_index]); } __global__ void render(hittable** d_world, const int height, const int width, float* frame_buffer, const int number_of_samples, hiprandState_t* rand_state, camera** cam) { int thread_x = blockIdx.x * blockDim.x + threadIdx.x; int thread_y = blockIdx.y * blockDim.y + threadIdx.y; if ((thread_x >= width) || (thread_y >= height)) return; float v = float(thread_y)/(height); float u = float(thread_x)/(width); int current_index = thread_y * width + thread_x; hiprandState_t local_rand_state = rand_state[current_index]; vec_3d color(0, 0, 0); for (int i = 0; i < number_of_samples; i++) { ray r = (*cam)->get_ray( u + hiprand_uniform(&local_rand_state)/float(width), v + hiprand_uniform(&local_rand_state)/float(height), &local_rand_state); color += ray_color(r, d_world, &local_rand_state); } frame_buffer[3 * current_index + 0] = color.data[0]; frame_buffer[3 * current_index + 1] = color.data[1]; frame_buffer[3 * current_index + 2] = color.data[2]; } #define RND (hiprand_uniform(&local_rand_state)) __global__ void create_scene(hittable** d_objects, hittable** d_world, camera** d_cam, float aspect_ratio, int p_rank, int number_of_divisions, hiprandState_t* rand_state) { if (threadIdx.x == 0 && blockIdx.x == 0) { hiprandState_t local_rand_state = *rand_state; d_objects[0] = new sphere(vec_3d(0, -1000.0f, -1), 1000, new lambertian(vec_3d(0.5, 0.5, 0.5))); int i = 1; for (int a = -11; a < 11; a++) { for(int b = -11; b < 11; b++) { float choose_mat = RND; vec_3d center(a+0.9*RND, 0.2, b+0.9*RND); if(choose_mat < 0.8f) d_objects[i++] = new sphere(center, 0.2, new lambertian(vec_3d(RND*RND, RND*RND, RND*RND))); else if (choose_mat < 0.95f) d_objects[i++] = new sphere(center, 0.2, new metal(vec_3d(0.5f * (1.0f + RND), 0.5f * (1.0f + RND), 0.5f * (1.0f + RND)), 0.5f*RND)); else d_objects[i++] = new sphere(center, 0.2, new dielectric(1.5)); } } d_objects[i++] = new sphere(vec_3d(0, 1, 0), 1.0f, new dielectric(1.5)); d_objects[i++] = new sphere(vec_3d(-4, 1, 0), 1.0f, new lambertian(vec_3d(0.4, 0.2, 0.1))); d_objects[i++] = new sphere(vec_3d(4, 1, 0), 1.0f, new metal(vec_3d(0.7f, 0.6f, 0.5f), 0.0f)); *rand_state = local_rand_state; *d_world = new hittable_list(d_objects, 1 + 22*22 + 3); point_3d lookfrom(13, 2, 3); point_3d lookat(0, 0, 0); vec_3d vup(0, 1, 0); float dist_to_focus = 10.0f; float aperture = 0.1f; *d_cam = new camera(lookfrom, lookat, vup, 30, aspect_ratio, aperture, dist_to_focus, p_rank, number_of_divisions); } } __global__ void cleanup_scene(hittable** d_objects, hittable** d_world, camera** d_cam) { for (int i = 0; i < 1 + 22*22 + 3; i++) { delete ((sphere*) d_objects[i])->mat_ptr; delete d_objects[i]; } delete *d_world; delete *d_cam; } void run_raytracer(int p_rank, int number_of_divisions, float* out_fb, int image_width, float aspect_ratio, const float number_of_samples) { const int image_height = static_cast<int>((image_width / aspect_ratio) * 1/number_of_divisions); hittable **d_objects, **d_world; camera** d_cam; checkCudaErrors(hipMalloc(&d_objects, (22*22+4)*sizeof(hittable *))); checkCudaErrors(hipMalloc(&d_world, sizeof(hittable *))); checkCudaErrors(hipMalloc(&d_cam, sizeof(camera *))); float* frame_buffer; checkCudaErrors(hipMallocManaged(&frame_buffer, 3*image_height*image_width*sizeof(float))); hiprandState_t* d_rand_state; checkCudaErrors(hipMalloc(&d_rand_state, image_height*image_width*sizeof(hiprandState_t))); hiprandState_t* d_rand_state2; checkCudaErrors(hipMalloc(&d_rand_state2, 1*sizeof(hiprandState_t))); hipStream_t cuda0; hipStreamCreate(&cuda0); std::cerr << "stream: " << cuda0 << '\n'; hipLaunchKernelGGL(( rand_init), dim3(1), dim3(1), 0, cuda0, d_rand_state2); checkCudaErrors(hipGetLastError()); checkCudaErrors(hipStreamSynchronize(cuda0)); hipLaunchKernelGGL(( create_scene), dim3(1), dim3(1), 0, cuda0, d_objects, d_world, d_cam, aspect_ratio, p_rank, number_of_divisions, d_rand_state2); checkCudaErrors(hipGetLastError()); checkCudaErrors(hipStreamSynchronize(cuda0)); int threadsDim = 32; dim3 numThreads(threadsDim, threadsDim); dim3 numBlocks((image_width / threadsDim) + 1, (image_height / threadsDim) + 1); hipLaunchKernelGGL(( render_init_rand), dim3(numBlocks), dim3(numThreads), 0, cuda0, image_height, image_width, d_rand_state); checkCudaErrors(hipGetLastError()); checkCudaErrors(hipStreamSynchronize(cuda0)); hipLaunchKernelGGL(( render), dim3(numBlocks),dim3(numThreads), 0, cuda0, d_world, image_height, image_width, frame_buffer, number_of_samples, d_rand_state, d_cam); checkCudaErrors(hipGetLastError()); checkCudaErrors(hipStreamSynchronize(cuda0)); hipMemcpyAsync(out_fb, frame_buffer, 3*image_height*image_width * sizeof(float), hipMemcpyDeviceToHost, cuda0); checkCudaErrors(hipStreamSynchronize(cuda0)); hipLaunchKernelGGL(( cleanup_scene), dim3(1), dim3(1), 0, cuda0, d_objects, d_world, d_cam); checkCudaErrors(hipGetLastError()); hipDeviceReset(); }
4c78ea84f834816f5ba9aabd38a4471c815f4ee4.cu
#include <iostream> #include <stdio.h> #include <curand_kernel.h> #include <cmath> #include "vec_3d.cuh" #include "ray.cuh" #include "sphere.cuh" #include "hittable_list.cuh" #include "camera.cuh" #include "material.cuh" #include "utility.hpp" #define checkCudaErrors(val) check_cuda( (val), #val, __FILE__, __LINE__ ) void check_cuda(cudaError_t result, char const *const func, const char *const file, int const line) { if (result) { std::cerr << "CUDA error: " << static_cast<unsigned int>(result) << " at " << file << ":" << line << " '" << func << "' \n"; cudaDeviceReset(); exit(99); } } __device__ float did_hit_sphere(const point_3d sphere_center, float sphere_radius, const ray r) { vec_3d oc = r.orig() - sphere_center; float a = r.dir().length_squared(); float half_b = oc.dot(r.dir()); float c = oc.length_squared() - sphere_radius * sphere_radius; float delta = half_b * half_b - a * c; if (delta < 0) return -1; else return (- half_b - sqrt(delta))/a; } __device__ vec_3d ray_color(const ray& r, hittable** d_world, curandState* local_rand_state) { ray current_ray = r; vec_3d current_attenuation(1.0f, 1.0f, 1.0f); for (int i = 0; i < 50; i++) { hit_record record; if ((*d_world)->hit(current_ray, 0.001f, infinity, record)) { ray scattered; vec_3d attenuation; if(record.mat_ptr->scatter(current_ray, record, attenuation, scattered, local_rand_state)) { current_ray = scattered; current_attenuation *= attenuation; } else { return vec_3d(0, 0, 0); } } else { const vec_3d unit_vector = current_ray.dir().unit_vector(); float s = 0.5f * (unit_vector.y() + 1.0f); vec_3d final_color = (1.0f - s) * vec_3d(1.0f, 1.0f, 1.0f) + s * vec_3d(0.5f, 0.7f, 1.0f); return current_attenuation * final_color; } } return vec_3d(0, 0, 0); } __global__ void rand_init(curandState* rand_state) { if(threadIdx.x == 0 && blockIdx.x == 0) { curand_init(1984, 0, 0, rand_state); } } __global__ void render_init_rand(const int height, const int width, curandState* rand_state) { int thread_x = blockIdx.x * blockDim.x + threadIdx.x; int thread_y = blockIdx.y * blockDim.y + threadIdx.y; if ((thread_x >= width) || (thread_y >= height)) return; int current_index = thread_y * width + thread_x; curand_init(1984 + current_index, 0, 0, &rand_state[current_index]); } __global__ void render(hittable** d_world, const int height, const int width, float* frame_buffer, const int number_of_samples, curandState* rand_state, camera** cam) { int thread_x = blockIdx.x * blockDim.x + threadIdx.x; int thread_y = blockIdx.y * blockDim.y + threadIdx.y; if ((thread_x >= width) || (thread_y >= height)) return; float v = float(thread_y)/(height); float u = float(thread_x)/(width); int current_index = thread_y * width + thread_x; curandState local_rand_state = rand_state[current_index]; vec_3d color(0, 0, 0); for (int i = 0; i < number_of_samples; i++) { ray r = (*cam)->get_ray( u + curand_uniform(&local_rand_state)/float(width), v + curand_uniform(&local_rand_state)/float(height), &local_rand_state); color += ray_color(r, d_world, &local_rand_state); } frame_buffer[3 * current_index + 0] = color.data[0]; frame_buffer[3 * current_index + 1] = color.data[1]; frame_buffer[3 * current_index + 2] = color.data[2]; } #define RND (curand_uniform(&local_rand_state)) __global__ void create_scene(hittable** d_objects, hittable** d_world, camera** d_cam, float aspect_ratio, int p_rank, int number_of_divisions, curandState* rand_state) { if (threadIdx.x == 0 && blockIdx.x == 0) { curandState local_rand_state = *rand_state; d_objects[0] = new sphere(vec_3d(0, -1000.0f, -1), 1000, new lambertian(vec_3d(0.5, 0.5, 0.5))); int i = 1; for (int a = -11; a < 11; a++) { for(int b = -11; b < 11; b++) { float choose_mat = RND; vec_3d center(a+0.9*RND, 0.2, b+0.9*RND); if(choose_mat < 0.8f) d_objects[i++] = new sphere(center, 0.2, new lambertian(vec_3d(RND*RND, RND*RND, RND*RND))); else if (choose_mat < 0.95f) d_objects[i++] = new sphere(center, 0.2, new metal(vec_3d(0.5f * (1.0f + RND), 0.5f * (1.0f + RND), 0.5f * (1.0f + RND)), 0.5f*RND)); else d_objects[i++] = new sphere(center, 0.2, new dielectric(1.5)); } } d_objects[i++] = new sphere(vec_3d(0, 1, 0), 1.0f, new dielectric(1.5)); d_objects[i++] = new sphere(vec_3d(-4, 1, 0), 1.0f, new lambertian(vec_3d(0.4, 0.2, 0.1))); d_objects[i++] = new sphere(vec_3d(4, 1, 0), 1.0f, new metal(vec_3d(0.7f, 0.6f, 0.5f), 0.0f)); *rand_state = local_rand_state; *d_world = new hittable_list(d_objects, 1 + 22*22 + 3); point_3d lookfrom(13, 2, 3); point_3d lookat(0, 0, 0); vec_3d vup(0, 1, 0); float dist_to_focus = 10.0f; float aperture = 0.1f; *d_cam = new camera(lookfrom, lookat, vup, 30, aspect_ratio, aperture, dist_to_focus, p_rank, number_of_divisions); } } __global__ void cleanup_scene(hittable** d_objects, hittable** d_world, camera** d_cam) { for (int i = 0; i < 1 + 22*22 + 3; i++) { delete ((sphere*) d_objects[i])->mat_ptr; delete d_objects[i]; } delete *d_world; delete *d_cam; } void run_raytracer(int p_rank, int number_of_divisions, float* out_fb, int image_width, float aspect_ratio, const float number_of_samples) { const int image_height = static_cast<int>((image_width / aspect_ratio) * 1/number_of_divisions); hittable **d_objects, **d_world; camera** d_cam; checkCudaErrors(cudaMalloc(&d_objects, (22*22+4)*sizeof(hittable *))); checkCudaErrors(cudaMalloc(&d_world, sizeof(hittable *))); checkCudaErrors(cudaMalloc(&d_cam, sizeof(camera *))); float* frame_buffer; checkCudaErrors(cudaMallocManaged(&frame_buffer, 3*image_height*image_width*sizeof(float))); curandState* d_rand_state; checkCudaErrors(cudaMalloc(&d_rand_state, image_height*image_width*sizeof(curandState))); curandState* d_rand_state2; checkCudaErrors(cudaMalloc(&d_rand_state2, 1*sizeof(curandState))); cudaStream_t cuda0; cudaStreamCreate(&cuda0); std::cerr << "stream: " << cuda0 << '\n'; rand_init<<<1, 1, 0, cuda0>>>(d_rand_state2); checkCudaErrors(cudaGetLastError()); checkCudaErrors(cudaStreamSynchronize(cuda0)); create_scene<<<1, 1, 0, cuda0>>>(d_objects, d_world, d_cam, aspect_ratio, p_rank, number_of_divisions, d_rand_state2); checkCudaErrors(cudaGetLastError()); checkCudaErrors(cudaStreamSynchronize(cuda0)); int threadsDim = 32; dim3 numThreads(threadsDim, threadsDim); dim3 numBlocks((image_width / threadsDim) + 1, (image_height / threadsDim) + 1); render_init_rand<<<numBlocks, numThreads, 0, cuda0>>>(image_height, image_width, d_rand_state); checkCudaErrors(cudaGetLastError()); checkCudaErrors(cudaStreamSynchronize(cuda0)); render<<<numBlocks,numThreads, 0, cuda0>>>(d_world, image_height, image_width, frame_buffer, number_of_samples, d_rand_state, d_cam); checkCudaErrors(cudaGetLastError()); checkCudaErrors(cudaStreamSynchronize(cuda0)); cudaMemcpyAsync(out_fb, frame_buffer, 3*image_height*image_width * sizeof(float), cudaMemcpyDeviceToHost, cuda0); checkCudaErrors(cudaStreamSynchronize(cuda0)); cleanup_scene<<<1, 1, 0, cuda0>>>(d_objects, d_world, d_cam); checkCudaErrors(cudaGetLastError()); cudaDeviceReset(); }
9eaacdc6397ece83bbccd0df680055d5d77e8138.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" void initialize_cuda_runtime(void) { hipSetDevice(0); hipFree(0); }
9eaacdc6397ece83bbccd0df680055d5d77e8138.cu
#include "cuda_runtime.h" void initialize_cuda_runtime(void) { cudaSetDevice(0); cudaFree(0); }
5d220b41b34537c0c5b1b512225bd540bc56224c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * triplet_loss_layer.cu * */ #include <algorithm> #include <vector> #include "caffe/layers/triplet_loss_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <typename Dtype> void TripletLossLayer<Dtype>::Forward_gpu( const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const int count = bottom[0]->count(); Dtype* sampleWv = NULL; Blob<Dtype> sampleWv_Blob; if(bottom.size() == 4) { sampleWv = bottom[3]->mutable_cpu_data(); }else { sampleWv_Blob.Reshape(bottom[0]->num(), 1, 1, 1); sampleWv = sampleWv_Blob.mutable_cpu_data(); for(int i= 0; i<bottom[0]->num(); i++) sampleWv[i] = Dtype(1); } caffe_gpu_sub( count, bottom[0]->gpu_data(), // a bottom[1]->gpu_data(), // p diff_ap_.mutable_gpu_data()); // a_i-p_i caffe_gpu_sub( count, bottom[0]->gpu_data(), // a bottom[2]->gpu_data(), // n diff_an_.mutable_gpu_data()); // a_i-n_i caffe_gpu_sub( count, bottom[1]->gpu_data(), // p bottom[2]->gpu_data(), // n diff_pn_.mutable_gpu_data()); // p_i-n_i caffe_gpu_powx( count, diff_ap_.mutable_gpu_data(), // a_i-p_i Dtype(2), diff_sq_ap_.mutable_gpu_data()); // (a_i-p_i)^2 caffe_gpu_gemv( CblasNoTrans, bottom[0]->num(), bottom[0]->channels(), Dtype(1.0), //alpha diff_sq_ap_.gpu_data(), // (a_i-p_i)^2 // A summer_vec_.gpu_data(), // x Dtype(0.0), //belta dist_sq_ap_.mutable_gpu_data()); // \Sum (a_i-p_i)^2 //y caffe_gpu_powx( count, diff_an_.mutable_gpu_data(), // a_i-n_i Dtype(2), diff_sq_an_.mutable_gpu_data()); // (a_i-n_i)^2 caffe_gpu_gemv( CblasNoTrans, bottom[0]->num(), bottom[0]->channels(), Dtype(1.0), //alpha diff_sq_an_.gpu_data(), // (a_i-n_i)^2 // A summer_vec_.gpu_data(), // x Dtype(0.0), //belta dist_sq_an_.mutable_gpu_data()); // \Sum (a_i-n_i)^2 //y Dtype margin = this->layer_param_.triplet_loss_param().margin(); Dtype loss(0.0); for (int i = 0; i < bottom[0]->num(); ++i) { loss += sampleWv[i]*::max(margin + dist_sq_ap_.cpu_data()[i] - dist_sq_an_.cpu_data()[i], Dtype(0.0)); } loss = loss / static_cast<Dtype>(bottom[0]->num()) / Dtype(2); top[0]->mutable_cpu_data()[0] = loss; } template <typename Dtype> __global__ void CLLBackward(const int count, const int channels, const Dtype margin, const Dtype alpha, const Dtype* diff, const Dtype* dist_sq_ap_, const Dtype* dist_sq_an_, Dtype *sampleWv_cuda, Dtype *bottom_diff) { CUDA_KERNEL_LOOP(i, count) { int n = i / channels; // the num index, to access dist_sq_ap_ and dist_sq_an_ Dtype mdist(0.0); mdist = margin + dist_sq_ap_[n] - dist_sq_an_[n]; if (mdist > 0.0) { bottom_diff[i] = alpha*sampleWv_cuda[n]*diff[i]; // bottom_diff[i] = alpha*diff[i]; } else { bottom_diff[i] = 0; } } } template <typename Dtype> void TripletLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { Dtype margin = this->layer_param_.triplet_loss_param().margin(); const int count = bottom[0]->count(); const int channels = bottom[0]->channels(); // the weight triplet loss Dtype* sampleWv = NULL; Blob<Dtype> sampleWv_Blob; if(bottom.size() == 4) { sampleWv = bottom[3]->mutable_gpu_data(); }else { sampleWv_Blob.Reshape(bottom[0]->num(), 1, 1, 1); sampleWv = sampleWv_Blob.mutable_cpu_data(); for(int i= 0; i<bottom[0]->num(); i++) sampleWv[i] = Dtype(1); sampleWv = sampleWv_Blob.mutable_gpu_data(); } for (int i = 0; i < 3; ++i) { if (propagate_down[i]) { const Dtype sign = (i < 2) ? -1 : 1; const Dtype alpha = sign * top[0]->cpu_diff()[0] / static_cast<Dtype>(bottom[0]->num()); if (i == 0) { // NOLINT_NEXT_LINE(whitespace/operators) CLLBackward<Dtype> << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> >( count, channels, margin, alpha, diff_pn_.gpu_data(), // the cached eltwise difference between p and n dist_sq_ap_.gpu_data(), // the cached square distance between a and p dist_sq_an_.gpu_data(), // the cached square distance between a and n sampleWv, // the sample's weight bottom[i]->mutable_gpu_diff()); CUDA_POST_KERNEL_CHECK; } else if (i == 1) { // NOLINT_NEXT_LINE(whitespace/operators) CLLBackward<Dtype> << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> >( count, channels, margin, alpha, diff_ap_.gpu_data(), // the cached eltwise difference between a and p dist_sq_ap_.gpu_data(), // the cached square distance between a and p dist_sq_an_.gpu_data(), // the cached square distance between a and n sampleWv, // the sample's weight bottom[i]->mutable_gpu_diff()); CUDA_POST_KERNEL_CHECK; } else if (i == 2) { // NOLINT_NEXT_LINE(whitespace/operators) CLLBackward<Dtype> << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> >( count, channels, margin, alpha, diff_an_.gpu_data(), // the cached eltwise difference between a and n dist_sq_ap_.gpu_data(), // the cached square distance between a and p dist_sq_an_.gpu_data(), // the cached square distance between a and n sampleWv, // the weight's wight bottom[i]->mutable_gpu_diff()); CUDA_POST_KERNEL_CHECK; } // end if } // end propagation[i] } // end for i=1:3 // release the resource, automally } INSTANTIATE_LAYER_GPU_FUNCS(TripletLossLayer); } // namespace caffe
5d220b41b34537c0c5b1b512225bd540bc56224c.cu
/* * triplet_loss_layer.cu * */ #include <algorithm> #include <vector> #include "caffe/layers/triplet_loss_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <typename Dtype> void TripletLossLayer<Dtype>::Forward_gpu( const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const int count = bottom[0]->count(); Dtype* sampleWv = NULL; Blob<Dtype> sampleWv_Blob; if(bottom.size() == 4) { sampleWv = bottom[3]->mutable_cpu_data(); }else { sampleWv_Blob.Reshape(bottom[0]->num(), 1, 1, 1); sampleWv = sampleWv_Blob.mutable_cpu_data(); for(int i= 0; i<bottom[0]->num(); i++) sampleWv[i] = Dtype(1); } caffe_gpu_sub( count, bottom[0]->gpu_data(), // a bottom[1]->gpu_data(), // p diff_ap_.mutable_gpu_data()); // a_i-p_i caffe_gpu_sub( count, bottom[0]->gpu_data(), // a bottom[2]->gpu_data(), // n diff_an_.mutable_gpu_data()); // a_i-n_i caffe_gpu_sub( count, bottom[1]->gpu_data(), // p bottom[2]->gpu_data(), // n diff_pn_.mutable_gpu_data()); // p_i-n_i caffe_gpu_powx( count, diff_ap_.mutable_gpu_data(), // a_i-p_i Dtype(2), diff_sq_ap_.mutable_gpu_data()); // (a_i-p_i)^2 caffe_gpu_gemv( CblasNoTrans, bottom[0]->num(), bottom[0]->channels(), Dtype(1.0), //alpha diff_sq_ap_.gpu_data(), // (a_i-p_i)^2 // A summer_vec_.gpu_data(), // x Dtype(0.0), //belta dist_sq_ap_.mutable_gpu_data()); // \Sum (a_i-p_i)^2 //y caffe_gpu_powx( count, diff_an_.mutable_gpu_data(), // a_i-n_i Dtype(2), diff_sq_an_.mutable_gpu_data()); // (a_i-n_i)^2 caffe_gpu_gemv( CblasNoTrans, bottom[0]->num(), bottom[0]->channels(), Dtype(1.0), //alpha diff_sq_an_.gpu_data(), // (a_i-n_i)^2 // A summer_vec_.gpu_data(), // x Dtype(0.0), //belta dist_sq_an_.mutable_gpu_data()); // \Sum (a_i-n_i)^2 //y Dtype margin = this->layer_param_.triplet_loss_param().margin(); Dtype loss(0.0); for (int i = 0; i < bottom[0]->num(); ++i) { loss += sampleWv[i]*std::max(margin + dist_sq_ap_.cpu_data()[i] - dist_sq_an_.cpu_data()[i], Dtype(0.0)); } loss = loss / static_cast<Dtype>(bottom[0]->num()) / Dtype(2); top[0]->mutable_cpu_data()[0] = loss; } template <typename Dtype> __global__ void CLLBackward(const int count, const int channels, const Dtype margin, const Dtype alpha, const Dtype* diff, const Dtype* dist_sq_ap_, const Dtype* dist_sq_an_, Dtype *sampleWv_cuda, Dtype *bottom_diff) { CUDA_KERNEL_LOOP(i, count) { int n = i / channels; // the num index, to access dist_sq_ap_ and dist_sq_an_ Dtype mdist(0.0); mdist = margin + dist_sq_ap_[n] - dist_sq_an_[n]; if (mdist > 0.0) { bottom_diff[i] = alpha*sampleWv_cuda[n]*diff[i]; // bottom_diff[i] = alpha*diff[i]; } else { bottom_diff[i] = 0; } } } template <typename Dtype> void TripletLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { Dtype margin = this->layer_param_.triplet_loss_param().margin(); const int count = bottom[0]->count(); const int channels = bottom[0]->channels(); // the weight triplet loss Dtype* sampleWv = NULL; Blob<Dtype> sampleWv_Blob; if(bottom.size() == 4) { sampleWv = bottom[3]->mutable_gpu_data(); }else { sampleWv_Blob.Reshape(bottom[0]->num(), 1, 1, 1); sampleWv = sampleWv_Blob.mutable_cpu_data(); for(int i= 0; i<bottom[0]->num(); i++) sampleWv[i] = Dtype(1); sampleWv = sampleWv_Blob.mutable_gpu_data(); } for (int i = 0; i < 3; ++i) { if (propagate_down[i]) { const Dtype sign = (i < 2) ? -1 : 1; const Dtype alpha = sign * top[0]->cpu_diff()[0] / static_cast<Dtype>(bottom[0]->num()); if (i == 0) { // NOLINT_NEXT_LINE(whitespace/operators) CLLBackward<Dtype> << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> >( count, channels, margin, alpha, diff_pn_.gpu_data(), // the cached eltwise difference between p and n dist_sq_ap_.gpu_data(), // the cached square distance between a and p dist_sq_an_.gpu_data(), // the cached square distance between a and n sampleWv, // the sample's weight bottom[i]->mutable_gpu_diff()); CUDA_POST_KERNEL_CHECK; } else if (i == 1) { // NOLINT_NEXT_LINE(whitespace/operators) CLLBackward<Dtype> << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> >( count, channels, margin, alpha, diff_ap_.gpu_data(), // the cached eltwise difference between a and p dist_sq_ap_.gpu_data(), // the cached square distance between a and p dist_sq_an_.gpu_data(), // the cached square distance between a and n sampleWv, // the sample's weight bottom[i]->mutable_gpu_diff()); CUDA_POST_KERNEL_CHECK; } else if (i == 2) { // NOLINT_NEXT_LINE(whitespace/operators) CLLBackward<Dtype> << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> >( count, channels, margin, alpha, diff_an_.gpu_data(), // the cached eltwise difference between a and n dist_sq_ap_.gpu_data(), // the cached square distance between a and p dist_sq_an_.gpu_data(), // the cached square distance between a and n sampleWv, // the weight's wight bottom[i]->mutable_gpu_diff()); CUDA_POST_KERNEL_CHECK; } // end if } // end propagation[i] } // end for i=1:3 // release the resource, automally } INSTANTIATE_LAYER_GPU_FUNCS(TripletLossLayer); } // namespace caffe
417953bc56e172b6a943c47f18ddc609f4194ca7.hip
// !!! This is a file automatically generated by hipify!!! #include <unittest/unittest.h> #include <thrust/functional.h> #include <thrust/sequence.h> #include <thrust/device_malloc_allocator.h> #include <thrust/sort.h> #include <thrust/system/hip/detail/detail/stable_radix_sort.h> #if THRUST_DEVICE_SYSTEM == THRUST_DEVICE_SYSTEM_CUDA typedef unittest::type_list< #if !(defined(__GNUC__) && (__GNUC__ <= 4) && (__GNUC_MINOR__ <= 1)) // XXX GCC 4.1 miscompiles the char sorts with -O2 for some reason unsigned char, #endif unsigned short, unsigned int, unsigned long, unsigned long long> UnsignedIntegerTypes; template <typename T> struct TestRadixSortByKeyShortValues { void operator()(const size_t n) { thrust::host_vector<T> h_keys = unittest::random_integers<T>(n); thrust::device_vector<T> d_keys = h_keys; thrust::host_vector<short> h_values(n); thrust::device_vector<short> d_values(n); thrust::sequence(h_values.begin(), h_values.end()); thrust::sequence(d_values.begin(), d_values.end()); thrust::stable_sort_by_key(h_keys.begin(), h_keys.end(), h_values.begin()); thrust::hip::tag cuda_tag; thrust::system::cuda::detail::detail::stable_radix_sort_by_key(cuda_tag, d_keys.begin(), d_keys.end(), d_values.begin()); ASSERT_ALMOST_EQUAL(h_keys, d_keys); ASSERT_ALMOST_EQUAL(h_values, d_values); } }; VariableUnitTest<TestRadixSortByKeyShortValues, UnsignedIntegerTypes> TestRadixSortByKeyShortValuesInstance; template <typename T> struct TestRadixSortByKeyLongLongValues { void operator()(const size_t n) { thrust::host_vector<T> h_keys = unittest::random_integers<T>(n); thrust::device_vector<T> d_keys = h_keys; thrust::host_vector<long long> h_values(n); thrust::device_vector<long long> d_values(n); thrust::sequence(h_values.begin(), h_values.end()); thrust::sequence(d_values.begin(), d_values.end()); thrust::stable_sort_by_key(h_keys.begin(), h_keys.end(), h_values.begin()); thrust::hip::tag cuda_tag; thrust::system::cuda::detail::detail::stable_radix_sort_by_key(cuda_tag, d_keys.begin(), d_keys.end(), d_values.begin()); ASSERT_ALMOST_EQUAL(h_keys, d_keys); ASSERT_ALMOST_EQUAL(h_values, d_values); } }; VariableUnitTest<TestRadixSortByKeyLongLongValues, UnsignedIntegerTypes> TestRadixSortByKeyLongLongValuesInstance; #endif // THRUST_DEVICE_SYSTEM == THRUST_DEVICE_SYSTEM_CUDA
417953bc56e172b6a943c47f18ddc609f4194ca7.cu
#include <unittest/unittest.h> #include <thrust/functional.h> #include <thrust/sequence.h> #include <thrust/device_malloc_allocator.h> #include <thrust/sort.h> #include <thrust/system/cuda/detail/detail/stable_radix_sort.h> #if THRUST_DEVICE_SYSTEM == THRUST_DEVICE_SYSTEM_CUDA typedef unittest::type_list< #if !(defined(__GNUC__) && (__GNUC__ <= 4) && (__GNUC_MINOR__ <= 1)) // XXX GCC 4.1 miscompiles the char sorts with -O2 for some reason unsigned char, #endif unsigned short, unsigned int, unsigned long, unsigned long long> UnsignedIntegerTypes; template <typename T> struct TestRadixSortByKeyShortValues { void operator()(const size_t n) { thrust::host_vector<T> h_keys = unittest::random_integers<T>(n); thrust::device_vector<T> d_keys = h_keys; thrust::host_vector<short> h_values(n); thrust::device_vector<short> d_values(n); thrust::sequence(h_values.begin(), h_values.end()); thrust::sequence(d_values.begin(), d_values.end()); thrust::stable_sort_by_key(h_keys.begin(), h_keys.end(), h_values.begin()); thrust::cuda::tag cuda_tag; thrust::system::cuda::detail::detail::stable_radix_sort_by_key(cuda_tag, d_keys.begin(), d_keys.end(), d_values.begin()); ASSERT_ALMOST_EQUAL(h_keys, d_keys); ASSERT_ALMOST_EQUAL(h_values, d_values); } }; VariableUnitTest<TestRadixSortByKeyShortValues, UnsignedIntegerTypes> TestRadixSortByKeyShortValuesInstance; template <typename T> struct TestRadixSortByKeyLongLongValues { void operator()(const size_t n) { thrust::host_vector<T> h_keys = unittest::random_integers<T>(n); thrust::device_vector<T> d_keys = h_keys; thrust::host_vector<long long> h_values(n); thrust::device_vector<long long> d_values(n); thrust::sequence(h_values.begin(), h_values.end()); thrust::sequence(d_values.begin(), d_values.end()); thrust::stable_sort_by_key(h_keys.begin(), h_keys.end(), h_values.begin()); thrust::cuda::tag cuda_tag; thrust::system::cuda::detail::detail::stable_radix_sort_by_key(cuda_tag, d_keys.begin(), d_keys.end(), d_values.begin()); ASSERT_ALMOST_EQUAL(h_keys, d_keys); ASSERT_ALMOST_EQUAL(h_values, d_values); } }; VariableUnitTest<TestRadixSortByKeyLongLongValues, UnsignedIntegerTypes> TestRadixSortByKeyLongLongValuesInstance; #endif // THRUST_DEVICE_SYSTEM == THRUST_DEVICE_SYSTEM_CUDA
0d857cc1cf5dbb484198df8b84ee1fe20a510073.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" __global__ void dotProduct(int *d_baseDenseRIV, int *d_multiplierBlock, int *d_multiplierValueCount, int *d_output, int multiplierCount, int displacement){ int id = blockIdx.x*blockDim.x + threadIdx.x; if (id>=multiplierCount) return; int *d_multiplierLocations = d_multiplierBlock +(id*2*displacement); int *d_multiplierValues = d_multiplierLocations+displacement; //int *d_baseStop = d_baseLocations+baseValueCount; int *d_multiplierStop = d_multiplierLocations+(d_multiplierValueCount[id]); //printf("spacing: %d, %d\n", (d_multiplierStop-d_multiplierLocations), d_multiplierValueCount[id]); d_output+=id; *d_output= 0; while(d_multiplierLocations< d_multiplierStop){ *d_output += (*d_multiplierValues)*(d_baseDenseRIV[*d_multiplierLocations]); d_multiplierValues++; d_multiplierLocations++; } } __global__ void getMagnitude(float *d_magnitudes, int *d_values, int *valueCount, int RIVCount, int memSectionSize){ //consider changing to single operation per thread int id = blockIdx.x*blockDim.x + threadIdx.x; if (id>=RIVCount) return; d_values+=(id*(memSectionSize)); int *stop = d_values+ valueCount[id]; float *magnitude = d_magnitudes+id; *magnitude = 0; for( ;d_values<stop; d_values++){ *magnitude +=(*d_values)*(*d_values); } *magnitude = sqrt(*magnitude); } int* getDotProducts(sparseRIV *inputs, int baseNumber, int RIVCount, int maxSize){ int remainingSet = RIVCount-(baseNumber+1); int *output = (int*)malloc(remainingSet*sizeof(int)); int *output_slider=output; //badly written, fix it int *d_baseDenseRIV = RIVKeyData.d_OpenSlot; int *baseDenseRIV = mapS2D(inputs[baseNumber]);//as a byproduct, also places a denseRIV form of the input into RIVKeyData.d_OpenSlot at the beginning; HANDLE_ERROR (hipMemcpy (d_baseDenseRIV, baseDenseRIV, RIVKeyData.RIVsize*sizeof(int), hipMemcpyHostToDevice)); int *d_slider = RIVKeyData.d_OpenSlot+RIVKeyData.RIVsize; int *valueCounts = (int*)malloc(remainingSet*sizeof(int)); int *d_valueCounts = d_slider; d_slider+=remainingSet; int *d_output = d_slider; d_slider+=remainingSet; int *d_multiplierBlock = d_slider; int i=baseNumber+1; while(i<RIVCount){ int doneSoFar = i; while((i<RIVCount) && (d_slider < RIVKeyData.d_SlotEnd)){ if(inputs[i].boolean){ //each set of locations and then values is layed out linear in GPU ram with buffer the size of the largest RIV HANDLE_ERROR (hipMemcpy (d_slider, inputs[i].locations, inputs[i].count*sizeof(int), hipMemcpyHostToDevice)); d_slider +=maxSize; HANDLE_ERROR (hipMemcpy (d_slider, inputs[i].values, inputs[i].count*sizeof(int), hipMemcpyHostToDevice)); d_slider +=maxSize; //printf("%p", d_slider); valueCounts[i-doneSoFar] = inputs[i].count; }else{ valueCounts[i-doneSoFar] = 0; } i++; } int thisBlock = i-doneSoFar; HANDLE_ERROR (hipMemcpy (d_valueCounts, valueCounts, thisBlock*sizeof(int), hipMemcpyHostToDevice)); //d_slider+= remainingSet; int blockSize; int minGridSize = 0; int gridSize; hipOccupancyMaxPotentialBlockSize( &minGridSize, &blockSize, dotProduct); gridSize = ((thisBlock + blockSize -1) / blockSize)+1; hipLaunchKernelGGL(( dotProduct), dim3(gridSize),dim3(blockSize), 0, 0, d_baseDenseRIV, d_multiplierBlock, d_valueCounts, d_output, thisBlock, maxSize); HANDLE_ERROR (hipMemcpy (output_slider, d_output, thisBlock*sizeof(int), hipMemcpyDeviceToHost)); //printf("did a thing"); output_slider+=thisBlock; d_slider = d_multiplierBlock; } return output; } float* getMagnitudes(sparseRIV *dataSet, int RIVCount, int maxSize){ //int **values = (int**)malloc(RIVCount*sizeof(int*)); /*int **d_values; HANDLE_ERROR (hipMalloc((void***)&d_values, RIVCount*sizeof(int*))); */ int *valueCounts = (int*)malloc(RIVCount*sizeof(int)); float *magnitudes = (float*)malloc(RIVCount*sizeof(float)); HANDLE_ERROR (hipMalloc((void**)&RIVKeyData.d_magnitudes, RIVCount*sizeof(float))); //HANDLE_ERROR(hipMemset(RIVKeyData.d_magnitudes, 0, RIVCount*(sizeof(float)))); float *magnitudes_slider = magnitudes; int *d_slider = RIVKeyData.d_OpenSlot;//+RIVCount; int *d_valueCounts = d_slider; d_slider+=RIVCount; int *d_valuesBlock = d_slider; //printf("magnitudesSlot: %d, %d\n", d_slider-RIVKeyData.d_OpenSlot, RIVCount*sizeof(float)); //prepare for overflow? //printf("%d\n", RIVCount); int i=0; while(i<RIVCount){ int doneSoFar = i; while((d_slider<RIVKeyData.d_SlotEnd) && (i<RIVCount)){ HANDLE_ERROR (hipMemcpy (d_slider, dataSet[i].values, dataSet[i].count*sizeof(int), hipMemcpyHostToDevice)); d_slider +=maxSize; valueCounts[i-doneSoFar] = dataSet[i].count; i++; //printf("%d, %d, %d, %d\n", d_slider-RIVKeyData.d_OpenSlot, doneSoFar, i, RIVCount); } int thisBlock = i-doneSoFar; //HANDLE_ERROR (hipMemcpy (d_values, values, RIVCount*sizeof(int*), hipMemcpyHostToDevice)); HANDLE_ERROR (hipMemcpy (d_valueCounts, valueCounts, thisBlock*sizeof(int), hipMemcpyHostToDevice)); int blockSize; int minGridSize = 0; int gridSize; hipOccupancyMaxPotentialBlockSize( &minGridSize, &blockSize, getMagnitude); //HANDLE_ERROR (hipMalloc((void**)&d_magnitudes, RIVCount*sizeof(float))); gridSize = ((thisBlock + blockSize -1) / blockSize)+1; hipLaunchKernelGGL(( getMagnitude), dim3(gridSize),dim3(blockSize), 0, 0, RIVKeyData.d_magnitudes, d_valuesBlock, d_valueCounts, thisBlock, maxSize); //printf("got here"); HANDLE_ERROR (hipMemcpy (magnitudes_slider, RIVKeyData.d_magnitudes, thisBlock*sizeof(float), hipMemcpyDeviceToHost)); magnitudes_slider+=thisBlock; d_slider =d_valuesBlock; } for(int i=0; i<RIVCount; i++){ dataSet[i].magnitude = magnitudes[i]; //printf("%f\n", dataSet[i].magnitude); } return magnitudes; } sparseRIV compileD2SOrdered(denseRIV input){ //int *valueCount; //*RIVsize = 0; int *d_valueCount; HANDLE_ERROR(hipMalloc((void**)&d_valueCount, sizeof(int))); HANDLE_ERROR(hipMemset(d_valueCount, 0, sizeof(int))); int *d_locations = RIVKeyData.d_OpenSlot+RIVKeyData.RIVsize; //HANDLE_ERROR (hipMemcpy (d_valueCount, valueCount, sizeof(int), hipMemcpyHostToDevice)); HANDLE_ERROR (hipMemcpy (RIVKeyData.d_OpenSlot, input.values, RIVKeyData.RIVsize*sizeof(int), hipMemcpyHostToDevice)); int blockSize; int minGridSize = 0; int gridSize; hipOccupancyMaxPotentialBlockSize( &minGridSize, &blockSize, D2SLocations); gridSize = ((RIVKeyData.RIVsize + blockSize -1) / blockSize)+1; hipLaunchKernelGGL(( D2SLocations) , dim3(gridSize),dim3(blockSize), 0, 0, RIVKeyData.d_OpenSlot, d_locations, d_valueCount, RIVKeyData.RIVsize); hipDeviceSynchronize(); sparseRIV output; HANDLE_ERROR (hipMemcpy (&output.count, d_valueCount, sizeof(int), hipMemcpyDeviceToHost)); output.values = (int*)malloc(output.count*sizeof(int)); if(output.values ==NULL) printf("malloc fail 246"); output.locations = (int*)malloc(output.count*sizeof(int)); if(output.locations ==NULL) printf("malloc fail 248"); HANDLE_ERROR (hipMemcpy (output.locations, d_locations, (output.count)*sizeof(int), hipMemcpyDeviceToHost)); qsort(output.locations, output.count, sizeof(int), compareLocations); for(int i=0; i<output.count; i++){ output.values[i] = input.values[output.locations[i]]; } free(input.values); hipFree(d_valueCount); return output; } int compareLocations(const void *first, const void *second){ int *f = (int*)first; int *s = (int*)second; return(*f - *s); } __global__ void D2SLocations(int *d_DenseRIV, int* d_SparseLocations, int* d_NZCount, int d_DenseSize){ int id = blockIdx.x*blockDim.x + threadIdx.x; if(id>=d_DenseSize) return; if(!d_DenseRIV[id]) return; int sparseSlot = atomicAdd(d_NZCount, 1); d_SparseLocations[sparseSlot] = id; }
0d857cc1cf5dbb484198df8b84ee1fe20a510073.cu
__global__ void dotProduct(int *d_baseDenseRIV, int *d_multiplierBlock, int *d_multiplierValueCount, int *d_output, int multiplierCount, int displacement){ int id = blockIdx.x*blockDim.x + threadIdx.x; if (id>=multiplierCount) return; int *d_multiplierLocations = d_multiplierBlock +(id*2*displacement); int *d_multiplierValues = d_multiplierLocations+displacement; //int *d_baseStop = d_baseLocations+baseValueCount; int *d_multiplierStop = d_multiplierLocations+(d_multiplierValueCount[id]); //printf("spacing: %d, %d\n", (d_multiplierStop-d_multiplierLocations), d_multiplierValueCount[id]); d_output+=id; *d_output= 0; while(d_multiplierLocations< d_multiplierStop){ *d_output += (*d_multiplierValues)*(d_baseDenseRIV[*d_multiplierLocations]); d_multiplierValues++; d_multiplierLocations++; } } __global__ void getMagnitude(float *d_magnitudes, int *d_values, int *valueCount, int RIVCount, int memSectionSize){ //consider changing to single operation per thread int id = blockIdx.x*blockDim.x + threadIdx.x; if (id>=RIVCount) return; d_values+=(id*(memSectionSize)); int *stop = d_values+ valueCount[id]; float *magnitude = d_magnitudes+id; *magnitude = 0; for( ;d_values<stop; d_values++){ *magnitude +=(*d_values)*(*d_values); } *magnitude = sqrt(*magnitude); } int* getDotProducts(sparseRIV *inputs, int baseNumber, int RIVCount, int maxSize){ int remainingSet = RIVCount-(baseNumber+1); int *output = (int*)malloc(remainingSet*sizeof(int)); int *output_slider=output; //badly written, fix it int *d_baseDenseRIV = RIVKeyData.d_OpenSlot; int *baseDenseRIV = mapS2D(inputs[baseNumber]);//as a byproduct, also places a denseRIV form of the input into RIVKeyData.d_OpenSlot at the beginning; HANDLE_ERROR (cudaMemcpy (d_baseDenseRIV, baseDenseRIV, RIVKeyData.RIVsize*sizeof(int), cudaMemcpyHostToDevice)); int *d_slider = RIVKeyData.d_OpenSlot+RIVKeyData.RIVsize; int *valueCounts = (int*)malloc(remainingSet*sizeof(int)); int *d_valueCounts = d_slider; d_slider+=remainingSet; int *d_output = d_slider; d_slider+=remainingSet; int *d_multiplierBlock = d_slider; int i=baseNumber+1; while(i<RIVCount){ int doneSoFar = i; while((i<RIVCount) && (d_slider < RIVKeyData.d_SlotEnd)){ if(inputs[i].boolean){ //each set of locations and then values is layed out linear in GPU ram with buffer the size of the largest RIV HANDLE_ERROR (cudaMemcpy (d_slider, inputs[i].locations, inputs[i].count*sizeof(int), cudaMemcpyHostToDevice)); d_slider +=maxSize; HANDLE_ERROR (cudaMemcpy (d_slider, inputs[i].values, inputs[i].count*sizeof(int), cudaMemcpyHostToDevice)); d_slider +=maxSize; //printf("%p", d_slider); valueCounts[i-doneSoFar] = inputs[i].count; }else{ valueCounts[i-doneSoFar] = 0; } i++; } int thisBlock = i-doneSoFar; HANDLE_ERROR (cudaMemcpy (d_valueCounts, valueCounts, thisBlock*sizeof(int), cudaMemcpyHostToDevice)); //d_slider+= remainingSet; int blockSize; int minGridSize = 0; int gridSize; cudaOccupancyMaxPotentialBlockSize( &minGridSize, &blockSize, dotProduct); gridSize = ((thisBlock + blockSize -1) / blockSize)+1; dotProduct<<<gridSize,blockSize>>>(d_baseDenseRIV, d_multiplierBlock, d_valueCounts, d_output, thisBlock, maxSize); HANDLE_ERROR (cudaMemcpy (output_slider, d_output, thisBlock*sizeof(int), cudaMemcpyDeviceToHost)); //printf("did a thing"); output_slider+=thisBlock; d_slider = d_multiplierBlock; } return output; } float* getMagnitudes(sparseRIV *dataSet, int RIVCount, int maxSize){ //int **values = (int**)malloc(RIVCount*sizeof(int*)); /*int **d_values; HANDLE_ERROR (cudaMalloc((void***)&d_values, RIVCount*sizeof(int*))); */ int *valueCounts = (int*)malloc(RIVCount*sizeof(int)); float *magnitudes = (float*)malloc(RIVCount*sizeof(float)); HANDLE_ERROR (cudaMalloc((void**)&RIVKeyData.d_magnitudes, RIVCount*sizeof(float))); //HANDLE_ERROR(cudaMemset(RIVKeyData.d_magnitudes, 0, RIVCount*(sizeof(float)))); float *magnitudes_slider = magnitudes; int *d_slider = RIVKeyData.d_OpenSlot;//+RIVCount; int *d_valueCounts = d_slider; d_slider+=RIVCount; int *d_valuesBlock = d_slider; //printf("magnitudesSlot: %d, %d\n", d_slider-RIVKeyData.d_OpenSlot, RIVCount*sizeof(float)); //prepare for overflow? //printf("%d\n", RIVCount); int i=0; while(i<RIVCount){ int doneSoFar = i; while((d_slider<RIVKeyData.d_SlotEnd) && (i<RIVCount)){ HANDLE_ERROR (cudaMemcpy (d_slider, dataSet[i].values, dataSet[i].count*sizeof(int), cudaMemcpyHostToDevice)); d_slider +=maxSize; valueCounts[i-doneSoFar] = dataSet[i].count; i++; //printf("%d, %d, %d, %d\n", d_slider-RIVKeyData.d_OpenSlot, doneSoFar, i, RIVCount); } int thisBlock = i-doneSoFar; //HANDLE_ERROR (cudaMemcpy (d_values, values, RIVCount*sizeof(int*), cudaMemcpyHostToDevice)); HANDLE_ERROR (cudaMemcpy (d_valueCounts, valueCounts, thisBlock*sizeof(int), cudaMemcpyHostToDevice)); int blockSize; int minGridSize = 0; int gridSize; cudaOccupancyMaxPotentialBlockSize( &minGridSize, &blockSize, getMagnitude); //HANDLE_ERROR (cudaMalloc((void**)&d_magnitudes, RIVCount*sizeof(float))); gridSize = ((thisBlock + blockSize -1) / blockSize)+1; getMagnitude<<<gridSize,blockSize>>>(RIVKeyData.d_magnitudes, d_valuesBlock, d_valueCounts, thisBlock, maxSize); //printf("got here"); HANDLE_ERROR (cudaMemcpy (magnitudes_slider, RIVKeyData.d_magnitudes, thisBlock*sizeof(float), cudaMemcpyDeviceToHost)); magnitudes_slider+=thisBlock; d_slider =d_valuesBlock; } for(int i=0; i<RIVCount; i++){ dataSet[i].magnitude = magnitudes[i]; //printf("%f\n", dataSet[i].magnitude); } return magnitudes; } sparseRIV compileD2SOrdered(denseRIV input){ //int *valueCount; //*RIVsize = 0; int *d_valueCount; HANDLE_ERROR(cudaMalloc((void**)&d_valueCount, sizeof(int))); HANDLE_ERROR(cudaMemset(d_valueCount, 0, sizeof(int))); int *d_locations = RIVKeyData.d_OpenSlot+RIVKeyData.RIVsize; //HANDLE_ERROR (cudaMemcpy (d_valueCount, valueCount, sizeof(int), cudaMemcpyHostToDevice)); HANDLE_ERROR (cudaMemcpy (RIVKeyData.d_OpenSlot, input.values, RIVKeyData.RIVsize*sizeof(int), cudaMemcpyHostToDevice)); int blockSize; int minGridSize = 0; int gridSize; cudaOccupancyMaxPotentialBlockSize( &minGridSize, &blockSize, D2SLocations); gridSize = ((RIVKeyData.RIVsize + blockSize -1) / blockSize)+1; D2SLocations <<<gridSize,blockSize>>> (RIVKeyData.d_OpenSlot, d_locations, d_valueCount, RIVKeyData.RIVsize); cudaDeviceSynchronize(); sparseRIV output; HANDLE_ERROR (cudaMemcpy (&output.count, d_valueCount, sizeof(int), cudaMemcpyDeviceToHost)); output.values = (int*)malloc(output.count*sizeof(int)); if(output.values ==NULL) printf("malloc fail 246"); output.locations = (int*)malloc(output.count*sizeof(int)); if(output.locations ==NULL) printf("malloc fail 248"); HANDLE_ERROR (cudaMemcpy (output.locations, d_locations, (output.count)*sizeof(int), cudaMemcpyDeviceToHost)); qsort(output.locations, output.count, sizeof(int), compareLocations); for(int i=0; i<output.count; i++){ output.values[i] = input.values[output.locations[i]]; } free(input.values); cudaFree(d_valueCount); return output; } int compareLocations(const void *first, const void *second){ int *f = (int*)first; int *s = (int*)second; return(*f - *s); } __global__ void D2SLocations(int *d_DenseRIV, int* d_SparseLocations, int* d_NZCount, int d_DenseSize){ int id = blockIdx.x*blockDim.x + threadIdx.x; if(id>=d_DenseSize) return; if(!d_DenseRIV[id]) return; int sparseSlot = atomicAdd(d_NZCount, 1); d_SparseLocations[sparseSlot] = id; }
bb29473236073dc44b2df0b52654c2ce95250564.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "nms_kern.cuh" #include <cassert> #include <algorithm> namespace { #if __CUDACC_VER_MAJOR__ >= 9 #define __shfl_down(x, y) __shfl_down_sync(0xffffffffu, x, y) #endif // each thread computs one bit const int THREADS_PER_BLOCK = 64; const int WARP_SIZE = 32; // use aligned structure for large memory transaction struct __align__(16) Box { float x0, y0, x1, y1; }; //! return whether IoU(a, b) > thresh __device__ __forceinline__ bool box_iou(Box a, Box b, float thresh) { float left = max(a.x0, b.x0), right = min(a.x1, b.x1); float top = max(a.y0, b.y0), bottom = min(a.y1, b.y1); float width = max(right - left, 0.f), height = max(bottom - top, 0.f); float interS = width * height; float Sa = (a.x1 - a.x0) * (a.y1 - a.y0); float Sb = (b.x1 - b.x0) * (b.y1 - b.y0); return interS > (Sa + Sb - interS) * thresh; } //! store uint64_t with cache streaming __device__ __forceinline__ void store_u64_cs(uint64_t *ptr, uint64_t val) { asm volatile("st.cs.u64 [%0], %1;" : : "l"(ptr), "l"(val)); } //! load uint64_t with cache streaming __device__ __forceinline__ uint64_t load_u64_cs(const uint64_t *ptr) { uint64_t val; asm volatile("ld.cs.u64 %0, [%1];" : "=l"(val) : "l"(ptr)); return val; } __global__ void kern_gen_mask( const int nr_boxes, const float nms_overlap_thresh, const Box *dev_boxes, const int dev_mask_width, uint64_t *dev_mask) { const int box_group_row = blockIdx.y, box_group_col = blockIdx.x; if (box_group_row > box_group_col) return; const int row_nr_boxes = min( nr_boxes - box_group_row * THREADS_PER_BLOCK, THREADS_PER_BLOCK), col_nr_boxes = min( nr_boxes - box_group_col * THREADS_PER_BLOCK, THREADS_PER_BLOCK); __shared__ Box block_boxes[THREADS_PER_BLOCK]; if (threadIdx.x < col_nr_boxes) { block_boxes[threadIdx.x] = dev_boxes[ THREADS_PER_BLOCK * box_group_col + threadIdx.x]; } __syncthreads(); if (threadIdx.x < row_nr_boxes) { const int cur_box_idx = THREADS_PER_BLOCK * box_group_row + threadIdx.x; Box cur_box = dev_boxes[cur_box_idx]; uint64_t result = 0; const int start = (box_group_row == box_group_col) ? threadIdx.x + 1 : // blocks on diagnal 0; for (int i = start; i < col_nr_boxes; ++ i) { result |= static_cast<uint64_t>( box_iou(cur_box, block_boxes[i], nms_overlap_thresh)) << i; } store_u64_cs( &dev_mask[cur_box_idx * dev_mask_width + box_group_col], result); } } //! true -> ~0, false -> 0 __device__ __forceinline__ uint32_t bool_as_u32_mask(bool v) { return (!v) - 1; } //! return min value of val in current warp __device__ __forceinline__ uint32_t warp_reduce_min_brdcst(uint32_t val) { __shared__ uint32_t ans; static_assert(WARP_SIZE == 32, "warp size != 32"); #pragma unroll for (uint32_t offset = WARP_SIZE / 2; offset; offset /= 2) val = min(val, __shfl_down(val, offset)); if (!threadIdx.x) ans = val; __syncthreads(); return ans; } struct BitwiseOrArgs { uint64_t *dst; const uint64_t *src; uint32_t size; }; __device__ __forceinline__ void bitwise_or_single_warp(BitwiseOrArgs args) { uint64_t * __restrict__ dst = args.dst; const uint64_t * __restrict__ src = args.src; uint32_t size = args.size; for (uint32_t i = threadIdx.x; i < size; i += WARP_SIZE) { dst[i] |= load_u64_cs(&src[i]); } } __global__ void kern_gen_indices( uint32_t nr_boxes, uint32_t max_output, uint32_t overlap_mask_width, const uint64_t * __restrict__ overlap_mask, uint64_t *__restrict__ rm_mask, uint32_t * __restrict__ out_idx, uint32_t * __restrict__ out_size) { __shared__ uint32_t out_pos; __shared__ BitwiseOrArgs bitwise_or_args; const uint32_t nr_box_blocks = DIVUP(nr_boxes, 64); if (!threadIdx.x) { uint32_t cnt = nr_box_blocks * 64 - nr_boxes; // mark the padded boxes as having been removed rm_mask[nr_box_blocks - 1] = ((1ull << cnt) - 1) << (64 - cnt); out_pos = 0; } __syncthreads(); uint32_t box_block_id = threadIdx.x, th0_box_block_id = 0; while (th0_box_block_id < nr_box_blocks) { bool in_range = box_block_id < nr_box_blocks; uint64_t cur_mask = ~rm_mask[box_block_id & bool_as_u32_mask(in_range)]; uint32_t min_box_block_id = warp_reduce_min_brdcst( box_block_id | bool_as_u32_mask(!(in_range && cur_mask))); if (min_box_block_id + 1) { // min_box_block_id != UINT32_MAX, so at least one thread finds a // un-removed box if (min_box_block_id == box_block_id) { // exactly one thread can take this path uint32_t box_id_in_block = __ffsll(cur_mask) - 1, box_id = box_block_id * 64 + box_id_in_block; // so this box would not be processed again rm_mask[box_block_id] |= 1ull << box_id_in_block; bitwise_or_args.dst = &rm_mask[box_block_id]; bitwise_or_args.src = &overlap_mask[box_id * overlap_mask_width + box_block_id]; bitwise_or_args.size = nr_box_blocks - box_block_id; out_idx[out_pos ++] = box_id; } __syncthreads(); if (out_pos == max_output) break; bitwise_or_single_warp(bitwise_or_args); // skip the blocks before min_box_block_id th0_box_block_id = min_box_block_id; box_block_id = min_box_block_id + threadIdx.x; } else { th0_box_block_id += WARP_SIZE; box_block_id += WARP_SIZE; } } if (out_pos < max_output) { // fill the values after out_pos uint32_t val = out_idx[out_pos - 1]; for (uint32_t i = out_pos + threadIdx.x; i < max_output; i += WARP_SIZE) { out_idx[i] = val; } } if (!threadIdx.x) { *out_size = out_pos; } } } // anonymous namespace void mgb::opr::standalone::nms::launch_gen_mask( const int nr_boxes, const float nms_overlap_thresh, const float *dev_boxes, const int dev_mask_width, uint64_t *dev_mask, hipStream_t stream) { dim3 blocks(DIVUP(nr_boxes, THREADS_PER_BLOCK), DIVUP(nr_boxes, THREADS_PER_BLOCK)); dim3 threads(THREADS_PER_BLOCK); hipLaunchKernelGGL(( kern_gen_mask), dim3(blocks), dim3(threads), 0, stream, nr_boxes, nms_overlap_thresh, reinterpret_cast<const Box*>(dev_boxes), dev_mask_width, dev_mask); } void mgb::opr::standalone::nms::launch_gen_indices( int nr_boxes, int max_output, int overlap_mask_width, const uint64_t *overlap_mask, uint64_t *rm_mask, uint32_t *out_idx, uint32_t *out_size, hipStream_t stream) { hipLaunchKernelGGL(( kern_gen_indices), dim3(1), dim3(WARP_SIZE), 0, stream, nr_boxes, max_output, overlap_mask_width, overlap_mask, rm_mask, out_idx, out_size); } // vim: ft=cuda syntax=cuda.doxygen
bb29473236073dc44b2df0b52654c2ce95250564.cu
#include "nms_kern.cuh" #include <cassert> #include <algorithm> namespace { #if __CUDACC_VER_MAJOR__ >= 9 #define __shfl_down(x, y) __shfl_down_sync(0xffffffffu, x, y) #endif // each thread computs one bit const int THREADS_PER_BLOCK = 64; const int WARP_SIZE = 32; // use aligned structure for large memory transaction struct __align__(16) Box { float x0, y0, x1, y1; }; //! return whether IoU(a, b) > thresh __device__ __forceinline__ bool box_iou(Box a, Box b, float thresh) { float left = max(a.x0, b.x0), right = min(a.x1, b.x1); float top = max(a.y0, b.y0), bottom = min(a.y1, b.y1); float width = max(right - left, 0.f), height = max(bottom - top, 0.f); float interS = width * height; float Sa = (a.x1 - a.x0) * (a.y1 - a.y0); float Sb = (b.x1 - b.x0) * (b.y1 - b.y0); return interS > (Sa + Sb - interS) * thresh; } //! store uint64_t with cache streaming __device__ __forceinline__ void store_u64_cs(uint64_t *ptr, uint64_t val) { asm volatile("st.cs.u64 [%0], %1;" : : "l"(ptr), "l"(val)); } //! load uint64_t with cache streaming __device__ __forceinline__ uint64_t load_u64_cs(const uint64_t *ptr) { uint64_t val; asm volatile("ld.cs.u64 %0, [%1];" : "=l"(val) : "l"(ptr)); return val; } __global__ void kern_gen_mask( const int nr_boxes, const float nms_overlap_thresh, const Box *dev_boxes, const int dev_mask_width, uint64_t *dev_mask) { const int box_group_row = blockIdx.y, box_group_col = blockIdx.x; if (box_group_row > box_group_col) return; const int row_nr_boxes = min( nr_boxes - box_group_row * THREADS_PER_BLOCK, THREADS_PER_BLOCK), col_nr_boxes = min( nr_boxes - box_group_col * THREADS_PER_BLOCK, THREADS_PER_BLOCK); __shared__ Box block_boxes[THREADS_PER_BLOCK]; if (threadIdx.x < col_nr_boxes) { block_boxes[threadIdx.x] = dev_boxes[ THREADS_PER_BLOCK * box_group_col + threadIdx.x]; } __syncthreads(); if (threadIdx.x < row_nr_boxes) { const int cur_box_idx = THREADS_PER_BLOCK * box_group_row + threadIdx.x; Box cur_box = dev_boxes[cur_box_idx]; uint64_t result = 0; const int start = (box_group_row == box_group_col) ? threadIdx.x + 1 : // blocks on diagnal 0; for (int i = start; i < col_nr_boxes; ++ i) { result |= static_cast<uint64_t>( box_iou(cur_box, block_boxes[i], nms_overlap_thresh)) << i; } store_u64_cs( &dev_mask[cur_box_idx * dev_mask_width + box_group_col], result); } } //! true -> ~0, false -> 0 __device__ __forceinline__ uint32_t bool_as_u32_mask(bool v) { return (!v) - 1; } //! return min value of val in current warp __device__ __forceinline__ uint32_t warp_reduce_min_brdcst(uint32_t val) { __shared__ uint32_t ans; static_assert(WARP_SIZE == 32, "warp size != 32"); #pragma unroll for (uint32_t offset = WARP_SIZE / 2; offset; offset /= 2) val = min(val, __shfl_down(val, offset)); if (!threadIdx.x) ans = val; __syncthreads(); return ans; } struct BitwiseOrArgs { uint64_t *dst; const uint64_t *src; uint32_t size; }; __device__ __forceinline__ void bitwise_or_single_warp(BitwiseOrArgs args) { uint64_t * __restrict__ dst = args.dst; const uint64_t * __restrict__ src = args.src; uint32_t size = args.size; for (uint32_t i = threadIdx.x; i < size; i += WARP_SIZE) { dst[i] |= load_u64_cs(&src[i]); } } __global__ void kern_gen_indices( uint32_t nr_boxes, uint32_t max_output, uint32_t overlap_mask_width, const uint64_t * __restrict__ overlap_mask, uint64_t *__restrict__ rm_mask, uint32_t * __restrict__ out_idx, uint32_t * __restrict__ out_size) { __shared__ uint32_t out_pos; __shared__ BitwiseOrArgs bitwise_or_args; const uint32_t nr_box_blocks = DIVUP(nr_boxes, 64); if (!threadIdx.x) { uint32_t cnt = nr_box_blocks * 64 - nr_boxes; // mark the padded boxes as having been removed rm_mask[nr_box_blocks - 1] = ((1ull << cnt) - 1) << (64 - cnt); out_pos = 0; } __syncthreads(); uint32_t box_block_id = threadIdx.x, th0_box_block_id = 0; while (th0_box_block_id < nr_box_blocks) { bool in_range = box_block_id < nr_box_blocks; uint64_t cur_mask = ~rm_mask[box_block_id & bool_as_u32_mask(in_range)]; uint32_t min_box_block_id = warp_reduce_min_brdcst( box_block_id | bool_as_u32_mask(!(in_range && cur_mask))); if (min_box_block_id + 1) { // min_box_block_id != UINT32_MAX, so at least one thread finds a // un-removed box if (min_box_block_id == box_block_id) { // exactly one thread can take this path uint32_t box_id_in_block = __ffsll(cur_mask) - 1, box_id = box_block_id * 64 + box_id_in_block; // so this box would not be processed again rm_mask[box_block_id] |= 1ull << box_id_in_block; bitwise_or_args.dst = &rm_mask[box_block_id]; bitwise_or_args.src = &overlap_mask[box_id * overlap_mask_width + box_block_id]; bitwise_or_args.size = nr_box_blocks - box_block_id; out_idx[out_pos ++] = box_id; } __syncthreads(); if (out_pos == max_output) break; bitwise_or_single_warp(bitwise_or_args); // skip the blocks before min_box_block_id th0_box_block_id = min_box_block_id; box_block_id = min_box_block_id + threadIdx.x; } else { th0_box_block_id += WARP_SIZE; box_block_id += WARP_SIZE; } } if (out_pos < max_output) { // fill the values after out_pos uint32_t val = out_idx[out_pos - 1]; for (uint32_t i = out_pos + threadIdx.x; i < max_output; i += WARP_SIZE) { out_idx[i] = val; } } if (!threadIdx.x) { *out_size = out_pos; } } } // anonymous namespace void mgb::opr::standalone::nms::launch_gen_mask( const int nr_boxes, const float nms_overlap_thresh, const float *dev_boxes, const int dev_mask_width, uint64_t *dev_mask, cudaStream_t stream) { dim3 blocks(DIVUP(nr_boxes, THREADS_PER_BLOCK), DIVUP(nr_boxes, THREADS_PER_BLOCK)); dim3 threads(THREADS_PER_BLOCK); kern_gen_mask<<<blocks, threads, 0, stream>>>( nr_boxes, nms_overlap_thresh, reinterpret_cast<const Box*>(dev_boxes), dev_mask_width, dev_mask); } void mgb::opr::standalone::nms::launch_gen_indices( int nr_boxes, int max_output, int overlap_mask_width, const uint64_t *overlap_mask, uint64_t *rm_mask, uint32_t *out_idx, uint32_t *out_size, cudaStream_t stream) { kern_gen_indices<<<1, WARP_SIZE, 0, stream>>>( nr_boxes, max_output, overlap_mask_width, overlap_mask, rm_mask, out_idx, out_size); } // vim: ft=cuda syntax=cuda.doxygen
44786834b9aca1bee08f7e63dc5ed326cf7aa33b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> static const int WORK_SIZE = 10; __global__ void sort(int* a, int i, int n) { int tid = threadIdx.x; int p; int temp; if (i % 2 == 0) { p = tid * 2; if (a[p] > a[p + 1]) { temp = a[p]; a[p] = a[p + 1]; a[p + 1] = temp; } } else { p = tid * 2 + 1; if (p < n - 1) { if (a[p] > a[p + 1]) { temp = a[p]; a[p] = a[p + 1]; a[p + 1] = temp; } } } } int main(void) { int a[WORK_SIZE]; int i; int* da; hipMalloc((void**)&da, sizeof(int) * WORK_SIZE); for (i = 0; i < WORK_SIZE; i++) { printf("%d:", i); scanf("%d", &a[i]); } hipMemcpy(da, a, sizeof(int) * WORK_SIZE, hipMemcpyHostToDevice); for (i = 0; i < WORK_SIZE; i++) { sort << <1, WORK_SIZE / 2 >> > (da, i, WORK_SIZE); } hipDeviceSynchronize(); // Wait for the GPU launched work to complete hipGetLastError(); hipMemcpy(a, da, sizeof(int) * WORK_SIZE, hipMemcpyDeviceToHost); for (i = 0; i < WORK_SIZE; i++) { printf("%d\t", a[i]); } printf("\n"); hipFree((void*)da); return 0; }
44786834b9aca1bee08f7e63dc5ed326cf7aa33b.cu
#include <stdio.h> #include <stdlib.h> static const int WORK_SIZE = 10; __global__ void sort(int* a, int i, int n) { int tid = threadIdx.x; int p; int temp; if (i % 2 == 0) { p = tid * 2; if (a[p] > a[p + 1]) { temp = a[p]; a[p] = a[p + 1]; a[p + 1] = temp; } } else { p = tid * 2 + 1; if (p < n - 1) { if (a[p] > a[p + 1]) { temp = a[p]; a[p] = a[p + 1]; a[p + 1] = temp; } } } } int main(void) { int a[WORK_SIZE]; int i; int* da; cudaMalloc((void**)&da, sizeof(int) * WORK_SIZE); for (i = 0; i < WORK_SIZE; i++) { printf("%d:", i); scanf("%d", &a[i]); } cudaMemcpy(da, a, sizeof(int) * WORK_SIZE, cudaMemcpyHostToDevice); for (i = 0; i < WORK_SIZE; i++) { sort << <1, WORK_SIZE / 2 >> > (da, i, WORK_SIZE); } cudaThreadSynchronize(); // Wait for the GPU launched work to complete cudaGetLastError(); cudaMemcpy(a, da, sizeof(int) * WORK_SIZE, cudaMemcpyDeviceToHost); for (i = 0; i < WORK_SIZE; i++) { printf("%d\t", a[i]); } printf("\n"); cudaFree((void*)da); return 0; }
c5834b98db69c1f956f19a0e7b0b6635d80e63f6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/row_conv_op.h" #include "paddle/fluid/platform/device/gpu/gpu_device_function.h" #include "paddle/phi/kernels/funcs/math_function.h" namespace paddle { namespace operators { using LoDTensor = framework::LoDTensor; namespace { inline int DivUp(int x, int y) { return (x + y - 1) / y; } // Forward prop (shared memory version, for small future_context) template <typename T> __global__ void RowConvForwardSharedMemory(const T *in, const T *wt, int num_sequence, int input_dim, int future_context, const size_t *batch_indices, T *out) { int blx = blockDim.x; int bly = blockDim.y; int thx = threadIdx.x; int thy = threadIdx.y; int d = blockIdx.x * blx + thx; // index along input dim extern __shared__ T mem[]; T *sw = mem; if (thy < future_context) { sw[thy * blx + thx] = (d < input_dim) ? wt[thy * input_dim + d] : static_cast<T>(0); } __syncthreads(); for (size_t i = 0; i < num_sequence; i++) { int start = static_cast<int>(batch_indices[i]); int end = static_cast<int>(batch_indices[i + 1]); int current_timesteps = end - start; for (int k = thy; k < current_timesteps; k += bly) { T sum = 0; for (int w = 0; (w < future_context) && ((k + w) < current_timesteps); w++) { sum += (d < input_dim) ? sw[w * blx + thx] * in[(start + k + w) * input_dim + d] : static_cast<T>(0); } if (d < input_dim) { out[(start + k) * input_dim + d] = sum; } } } } // Forward prop (naive version) template <typename T> __global__ void RowConvForward(const T *in, const T *wt, int num_sequence, int input_dim, int future_context, const size_t *batch_indices, T *out) { int d = blockIdx.x * blockDim.x + threadIdx.x; // index along input_dim int bly = blockDim.y; int thy = threadIdx.y; if (d >= input_dim) return; for (size_t i = 0; i < num_sequence; i++) { int start = static_cast<int>(batch_indices[i]); int end = static_cast<int>(batch_indices[i + 1]); int current_timesteps = end - start; for (int k = thy; k < current_timesteps; k += bly) { T sum = 0; for (int w = 0; (w < future_context) && ((k + w) < current_timesteps); w++) { sum += (wt[w * input_dim + d] * in[(start + k + w) * input_dim + d]); } out[(start + k) * input_dim + d] = sum; } } } // Compute input gradient (shared memory version, for small future_context) template <typename T> __global__ void RowConvGradInputSharedMemory(const T *dout, const T *wt, int num_sequence, int input_dim, int future_context, const size_t *batch_indices, T *din) { int blx = blockDim.x; int bly = blockDim.y; int thx = threadIdx.x; int thy = threadIdx.y; int d = blockIdx.x * blx + thx; // index along input dim extern __shared__ T mem[]; T *sw = mem; if (thy < future_context) { sw[thy * blx + thx] = (d < input_dim) ? wt[thy * input_dim + d] : static_cast<T>(0); } __syncthreads(); int current_timesteps = 0; for (int i = 0; i < num_sequence; i++) { int start = static_cast<int>(batch_indices[i]); int end = static_cast<int>(batch_indices[i + 1]); current_timesteps = end - start; for (int k = thy; k < current_timesteps; k += bly) { T sum = 0; for (int w = 0; (w < future_context) && ((k - w) >= 0); w++) { sum += (d < input_dim) ? (sw[w * blx + thx] * dout[(k + start - w) * input_dim + d]) : static_cast<T>(0); } if (d < input_dim) { din[(k + start) * input_dim + d] = sum; } } } } // Compute input gradient (Naive version) template <typename T> __global__ void RowConvGradInput(const T *dout, const T *wt, int num_sequence, int input_dim, int future_context, const size_t *batch_indices, T *din) { int d = blockIdx.x * blockDim.x + threadIdx.x; // index along input_dim int bly = blockDim.y; int thy = threadIdx.y; if (d >= input_dim) return; int current_timesteps = 0; for (int i = 0; i < num_sequence; i++) { int start = static_cast<int>(batch_indices[i]); int end = static_cast<int>(batch_indices[i + 1]); current_timesteps = end - start; for (int k = thy; k < current_timesteps; k += bly) { T sum = 0; for (int w = 0; (w < future_context) && ((k - w) >= 0); w++) { sum += (wt[w * input_dim + d] * dout[(k + start - w) * input_dim + d]); } din[(k + start) * input_dim + d] = sum; } } } // Compute W gradient (small future_context version) template <typename T> __global__ void RowConvGradFilterImproved(const T *in, const T *dout, int num_sequence, int input_dim, int future_context, int block_x, int block_y, const size_t *batch_indices, T *dfilter) { int blx = blockDim.x; int bly = blockDim.y; int thx = threadIdx.x; int thy = threadIdx.y; int gx = blockIdx.x * blx; int d = gx + thx; // index along input dim extern __shared__ T mem[]; int xdim_sh_in = block_y; int xdim_sh_dout = block_y; int ydim_sh_in = block_x; int ydim_sh_dout = block_x + future_context - 1; int ydim_sh_dfilter = block_y; T *sh_in = mem; T *sh_dout = &mem[xdim_sh_in * ydim_sh_in]; T *sh_dfilter = &mem[xdim_sh_in * ydim_sh_in + xdim_sh_dout * ydim_sh_dout]; if (thy < future_context) { sh_dfilter[thy * ydim_sh_dfilter + thx] = static_cast<T>(0); } __syncthreads(); // NOTE(zcd): temporary solution unsigned mask = 0u; CREATE_SHFL_MASK(mask, true); for (int i = 0; i < num_sequence; i++) { int start = static_cast<int>(batch_indices[i]); int end = static_cast<int>(batch_indices[i + 1]); int current_timesteps = end - start; int scaled_cur_steps = ((current_timesteps + block_x - 1) / block_x) * block_x; for (int k = thy; k < scaled_cur_steps; k += block_x) { int pos = start + k; sh_in[thx * ydim_sh_in + thy] = (d < input_dim && pos < end) ? in[pos * input_dim + d] : T(0); sh_dout[thx * ydim_sh_dout + thy + future_context - 1] = (d < input_dim && pos < end) ? dout[pos * input_dim + d] : T(0); __syncthreads(); if (thy < future_context - 1) { int pos_offset = pos - future_context + 1; sh_dout[thx * ydim_sh_dout + thy] = (d < input_dim && pos_offset >= start) ? dout[pos_offset * input_dim + d] : T(0); } __syncthreads(); for (int w = 0; w < future_context; w++) { T val = sh_in[thy * ydim_sh_in + thx] * sh_dout[thy * ydim_sh_dout + thx + future_context - 1 - w]; __syncthreads(); for (int offset = 16; offset > 0; offset = offset / 2) { // blockDim.x is 32. val += platform::CudaShuffleDownSync(mask, val, offset); } __syncthreads(); if (thx == 0) { sh_dfilter[w * ydim_sh_dfilter + thy] += val; } __syncthreads(); } } } for (int w = thy; (w < future_context) && (d < input_dim); w += bly) { dfilter[w * input_dim + d] += sh_dfilter[w * ydim_sh_dfilter + thx]; } } // Compute weight(filter) gradient template <typename T> __global__ void RowConvGradFilter(const T *in, const T *dout, int num_sequence, int input_dim, int future_context, int block_x, int block_y, const size_t *batch_indices, T *dfilter) { int blx = blockDim.x; int thx = threadIdx.x; int thy = threadIdx.y; int gx = blockIdx.x * blx; int d = gx + thx; // index along input dim extern __shared__ T mem[]; T *sh_in = mem; T *sh_dout = &mem[block_x * block_y]; // NOTE(zcd): temporary solution unsigned mask = 0u; CREATE_SHFL_MASK(mask, true); for (int i = 0; i < num_sequence; i++) { int start = static_cast<int>(batch_indices[i]); int end = static_cast<int>(batch_indices[i + 1]); int current_timesteps = end - start; int scaled_cur_steps = ((current_timesteps + block_x - 1) / block_x) * block_x; for (int k = thy; k < scaled_cur_steps; k += block_x) { int pos = start + k; sh_in[thx * block_y + thy] = (d < input_dim && pos < end) ? in[pos * input_dim + d] : 0.0; __syncthreads(); for (int w = 0; w < future_context; w++) { sh_dout[thx * block_y + thy] = (d < input_dim && (k - w) >= 0 && (k - w) < current_timesteps) ? dout[(pos - w) * input_dim + d] : 0.0; __syncthreads(); T val = sh_in[thy * block_y + thx] * sh_dout[thy * block_y + thx]; __syncthreads(); for (int offset = 16; offset > 0; offset = offset / 2) { // blockDim.x is 32. val += platform::CudaShuffleDownSync(mask, val, offset); } __syncthreads(); if (thx == 0 && (gx + thy) < input_dim) { dfilter[w * input_dim + gx + thy] += val; } } } } } } // namespace template <typename T> class RowConvKernel<phi::GPUContext, T> : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext &context) const override { auto *X = context.Input<LoDTensor>("X"); auto *Filter = context.Input<phi::DenseTensor>("Filter"); auto *Out = context.Output<LoDTensor>("Out"); const T *in = X->data<T>(); const T *weight = Filter->data<T>(); T *out = Out->mutable_data<T>(context.GetPlace()); bool is_tensor = X->lod().empty(); int batch_size = 0; if (is_tensor) { batch_size = X->dims()[0]; } else { batch_size = X->lod()[0].size() - 1; } int input_dim = 0; framework::Vector<size_t> batch_indices(batch_size + 1); int timesteps = X->dims()[1]; if (is_tensor) { for (int i = 0; i < batch_size + 1; i++) { batch_indices[i] = i * timesteps; } input_dim = X->dims()[2]; } else { batch_indices = X->lod()[0]; input_dim = X->dims()[1]; } int num_sequence = batch_indices.size() - 1; int future_context = Filter->dims()[0]; paddle::framework::MixVector<size_t> mix_vector(&batch_indices); size_t *idx = mix_vector.CUDAMutableData(context.GetPlace()); auto stream = context.cuda_device_context().stream(); if (future_context <= 32) { dim3 block_dim = dim3(32, 32); dim3 grid_dim = dim3(DivUp(input_dim, block_dim.x), 1); int mem_per_block = (future_context * block_dim.x) * sizeof(T); hipLaunchKernelGGL(( RowConvForwardSharedMemory<T>) , dim3(grid_dim), dim3(block_dim), mem_per_block, stream, in, weight, num_sequence, input_dim, future_context, idx, out); } else { dim3 block_dim = dim3(32, 32); dim3 grid_dim = dim3(DivUp(input_dim, block_dim.x), 1); hipLaunchKernelGGL(( RowConvForward<T>), dim3(grid_dim), dim3(block_dim), 0, stream, in, weight, num_sequence, input_dim, future_context, idx, out); } mix_vector.CopyToCPU(); } }; template <typename T> class RowConvGradKernel<phi::GPUContext, T> : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext &context) const override { auto *X = context.Input<LoDTensor>("X"); auto *Filter = context.Input<phi::DenseTensor>("Filter"); auto *dOut = context.Input<LoDTensor>(framework::GradVarName("Out")); const T *in = X->data<T>(); const T *weights = Filter->data<T>(); const T *dout = dOut->data<T>(); phi::DenseTensor *dX = context.Output<LoDTensor>(framework::GradVarName("X")); phi::DenseTensor *dFilter = context.Output<phi::DenseTensor>(framework::GradVarName("Filter")); int batch_size = 0; bool is_tensor = X->lod().empty(); if (is_tensor) { batch_size = X->dims()[0]; } else { batch_size = X->lod()[0].size() - 1; } int input_dim = 0; framework::Vector<size_t> batch_indices(batch_size + 1); int timesteps = X->dims()[1]; if (is_tensor) { for (int i = 0; i < batch_size + 1; i++) { batch_indices[i] = i * timesteps; } input_dim = X->dims()[2]; } else { batch_indices = X->lod()[0]; input_dim = X->dims()[1]; } // int input_dim = X->dims()[1]; int num_sequence = batch_indices.size() - 1; int future_context = Filter->dims()[0]; paddle::framework::MixVector<size_t> mixv_batch_indices(&batch_indices); size_t *idx = mixv_batch_indices.CUDAMutableData(context.GetPlace()); auto &device_ctx = context.cuda_device_context(); phi::funcs::SetConstant<phi::GPUContext, T> zero; if (dFilter) { T *dfilter = dFilter->mutable_data<T>(context.GetPlace()); zero(device_ctx, dFilter, static_cast<T>(0.0)); if (future_context <= 32) { dim3 block_dim = dim3(32, 32); dim3 grid_dim = dim3(DivUp(input_dim, block_dim.x), 1); int block_x = block_dim.x; int block_y = block_dim.y; int mem_per_block = (block_y * block_x + block_y * (block_x + future_context - 1) + future_context * block_y) * sizeof(T); hipLaunchKernelGGL(( RowConvGradFilterImproved<T>) , dim3(grid_dim), dim3(block_dim), mem_per_block, device_ctx.stream(), in, dout, num_sequence, input_dim, future_context, block_x, block_y, idx, dfilter); } else { dim3 block_dim = dim3(32, 32); dim3 grid_dim = dim3(DivUp(input_dim, block_dim.x), 1); int block_x = block_dim.x; int block_y = block_dim.y; int mem_per_block = (block_x * block_y * 2) * sizeof(T); // For 2 arrays of size 32x32 hipLaunchKernelGGL(( RowConvGradFilter<T>) , dim3(grid_dim), dim3(block_dim), mem_per_block, device_ctx.stream(), in, dout, num_sequence, input_dim, future_context, block_x, block_y, idx, dfilter); } } if (dX) { T *din = dX->mutable_data<T>(context.GetPlace()); if (future_context <= 32) { dim3 block_dim = dim3(32, 32); dim3 grid_dim = dim3(DivUp(input_dim, block_dim.x), 1); int mem_per_block = (future_context * block_dim.x) * sizeof(T); hipLaunchKernelGGL(( RowConvGradInputSharedMemory<T>) , dim3(grid_dim), dim3(block_dim), mem_per_block, device_ctx.stream(), dout, weights, num_sequence, input_dim, future_context, idx, din); } else { dim3 block_dim = dim3(32, 32); dim3 grid_dim = dim3(DivUp(input_dim, block_dim.x), 1); hipLaunchKernelGGL(( RowConvGradInput<T>), dim3(grid_dim), dim3(block_dim), 0, device_ctx.stream(), dout, weights, num_sequence, input_dim, future_context, idx, din); } } mixv_batch_indices.CopyToCPU(); } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL(row_conv, ops::RowConvKernel<phi::GPUContext, float>); REGISTER_OP_CUDA_KERNEL(row_conv_grad, ops::RowConvGradKernel<phi::GPUContext, float>);
c5834b98db69c1f956f19a0e7b0b6635d80e63f6.cu
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/row_conv_op.h" #include "paddle/fluid/platform/device/gpu/gpu_device_function.h" #include "paddle/phi/kernels/funcs/math_function.h" namespace paddle { namespace operators { using LoDTensor = framework::LoDTensor; namespace { inline int DivUp(int x, int y) { return (x + y - 1) / y; } // Forward prop (shared memory version, for small future_context) template <typename T> __global__ void RowConvForwardSharedMemory(const T *in, const T *wt, int num_sequence, int input_dim, int future_context, const size_t *batch_indices, T *out) { int blx = blockDim.x; int bly = blockDim.y; int thx = threadIdx.x; int thy = threadIdx.y; int d = blockIdx.x * blx + thx; // index along input dim extern __shared__ T mem[]; T *sw = mem; if (thy < future_context) { sw[thy * blx + thx] = (d < input_dim) ? wt[thy * input_dim + d] : static_cast<T>(0); } __syncthreads(); for (size_t i = 0; i < num_sequence; i++) { int start = static_cast<int>(batch_indices[i]); int end = static_cast<int>(batch_indices[i + 1]); int current_timesteps = end - start; for (int k = thy; k < current_timesteps; k += bly) { T sum = 0; for (int w = 0; (w < future_context) && ((k + w) < current_timesteps); w++) { sum += (d < input_dim) ? sw[w * blx + thx] * in[(start + k + w) * input_dim + d] : static_cast<T>(0); } if (d < input_dim) { out[(start + k) * input_dim + d] = sum; } } } } // Forward prop (naive version) template <typename T> __global__ void RowConvForward(const T *in, const T *wt, int num_sequence, int input_dim, int future_context, const size_t *batch_indices, T *out) { int d = blockIdx.x * blockDim.x + threadIdx.x; // index along input_dim int bly = blockDim.y; int thy = threadIdx.y; if (d >= input_dim) return; for (size_t i = 0; i < num_sequence; i++) { int start = static_cast<int>(batch_indices[i]); int end = static_cast<int>(batch_indices[i + 1]); int current_timesteps = end - start; for (int k = thy; k < current_timesteps; k += bly) { T sum = 0; for (int w = 0; (w < future_context) && ((k + w) < current_timesteps); w++) { sum += (wt[w * input_dim + d] * in[(start + k + w) * input_dim + d]); } out[(start + k) * input_dim + d] = sum; } } } // Compute input gradient (shared memory version, for small future_context) template <typename T> __global__ void RowConvGradInputSharedMemory(const T *dout, const T *wt, int num_sequence, int input_dim, int future_context, const size_t *batch_indices, T *din) { int blx = blockDim.x; int bly = blockDim.y; int thx = threadIdx.x; int thy = threadIdx.y; int d = blockIdx.x * blx + thx; // index along input dim extern __shared__ T mem[]; T *sw = mem; if (thy < future_context) { sw[thy * blx + thx] = (d < input_dim) ? wt[thy * input_dim + d] : static_cast<T>(0); } __syncthreads(); int current_timesteps = 0; for (int i = 0; i < num_sequence; i++) { int start = static_cast<int>(batch_indices[i]); int end = static_cast<int>(batch_indices[i + 1]); current_timesteps = end - start; for (int k = thy; k < current_timesteps; k += bly) { T sum = 0; for (int w = 0; (w < future_context) && ((k - w) >= 0); w++) { sum += (d < input_dim) ? (sw[w * blx + thx] * dout[(k + start - w) * input_dim + d]) : static_cast<T>(0); } if (d < input_dim) { din[(k + start) * input_dim + d] = sum; } } } } // Compute input gradient (Naive version) template <typename T> __global__ void RowConvGradInput(const T *dout, const T *wt, int num_sequence, int input_dim, int future_context, const size_t *batch_indices, T *din) { int d = blockIdx.x * blockDim.x + threadIdx.x; // index along input_dim int bly = blockDim.y; int thy = threadIdx.y; if (d >= input_dim) return; int current_timesteps = 0; for (int i = 0; i < num_sequence; i++) { int start = static_cast<int>(batch_indices[i]); int end = static_cast<int>(batch_indices[i + 1]); current_timesteps = end - start; for (int k = thy; k < current_timesteps; k += bly) { T sum = 0; for (int w = 0; (w < future_context) && ((k - w) >= 0); w++) { sum += (wt[w * input_dim + d] * dout[(k + start - w) * input_dim + d]); } din[(k + start) * input_dim + d] = sum; } } } // Compute W gradient (small future_context version) template <typename T> __global__ void RowConvGradFilterImproved(const T *in, const T *dout, int num_sequence, int input_dim, int future_context, int block_x, int block_y, const size_t *batch_indices, T *dfilter) { int blx = blockDim.x; int bly = blockDim.y; int thx = threadIdx.x; int thy = threadIdx.y; int gx = blockIdx.x * blx; int d = gx + thx; // index along input dim extern __shared__ T mem[]; int xdim_sh_in = block_y; int xdim_sh_dout = block_y; int ydim_sh_in = block_x; int ydim_sh_dout = block_x + future_context - 1; int ydim_sh_dfilter = block_y; T *sh_in = mem; T *sh_dout = &mem[xdim_sh_in * ydim_sh_in]; T *sh_dfilter = &mem[xdim_sh_in * ydim_sh_in + xdim_sh_dout * ydim_sh_dout]; if (thy < future_context) { sh_dfilter[thy * ydim_sh_dfilter + thx] = static_cast<T>(0); } __syncthreads(); // NOTE(zcd): temporary solution unsigned mask = 0u; CREATE_SHFL_MASK(mask, true); for (int i = 0; i < num_sequence; i++) { int start = static_cast<int>(batch_indices[i]); int end = static_cast<int>(batch_indices[i + 1]); int current_timesteps = end - start; int scaled_cur_steps = ((current_timesteps + block_x - 1) / block_x) * block_x; for (int k = thy; k < scaled_cur_steps; k += block_x) { int pos = start + k; sh_in[thx * ydim_sh_in + thy] = (d < input_dim && pos < end) ? in[pos * input_dim + d] : T(0); sh_dout[thx * ydim_sh_dout + thy + future_context - 1] = (d < input_dim && pos < end) ? dout[pos * input_dim + d] : T(0); __syncthreads(); if (thy < future_context - 1) { int pos_offset = pos - future_context + 1; sh_dout[thx * ydim_sh_dout + thy] = (d < input_dim && pos_offset >= start) ? dout[pos_offset * input_dim + d] : T(0); } __syncthreads(); for (int w = 0; w < future_context; w++) { T val = sh_in[thy * ydim_sh_in + thx] * sh_dout[thy * ydim_sh_dout + thx + future_context - 1 - w]; __syncthreads(); for (int offset = 16; offset > 0; offset = offset / 2) { // blockDim.x is 32. val += platform::CudaShuffleDownSync(mask, val, offset); } __syncthreads(); if (thx == 0) { sh_dfilter[w * ydim_sh_dfilter + thy] += val; } __syncthreads(); } } } for (int w = thy; (w < future_context) && (d < input_dim); w += bly) { dfilter[w * input_dim + d] += sh_dfilter[w * ydim_sh_dfilter + thx]; } } // Compute weight(filter) gradient template <typename T> __global__ void RowConvGradFilter(const T *in, const T *dout, int num_sequence, int input_dim, int future_context, int block_x, int block_y, const size_t *batch_indices, T *dfilter) { int blx = blockDim.x; int thx = threadIdx.x; int thy = threadIdx.y; int gx = blockIdx.x * blx; int d = gx + thx; // index along input dim extern __shared__ T mem[]; T *sh_in = mem; T *sh_dout = &mem[block_x * block_y]; // NOTE(zcd): temporary solution unsigned mask = 0u; CREATE_SHFL_MASK(mask, true); for (int i = 0; i < num_sequence; i++) { int start = static_cast<int>(batch_indices[i]); int end = static_cast<int>(batch_indices[i + 1]); int current_timesteps = end - start; int scaled_cur_steps = ((current_timesteps + block_x - 1) / block_x) * block_x; for (int k = thy; k < scaled_cur_steps; k += block_x) { int pos = start + k; sh_in[thx * block_y + thy] = (d < input_dim && pos < end) ? in[pos * input_dim + d] : 0.0; __syncthreads(); for (int w = 0; w < future_context; w++) { sh_dout[thx * block_y + thy] = (d < input_dim && (k - w) >= 0 && (k - w) < current_timesteps) ? dout[(pos - w) * input_dim + d] : 0.0; __syncthreads(); T val = sh_in[thy * block_y + thx] * sh_dout[thy * block_y + thx]; __syncthreads(); for (int offset = 16; offset > 0; offset = offset / 2) { // blockDim.x is 32. val += platform::CudaShuffleDownSync(mask, val, offset); } __syncthreads(); if (thx == 0 && (gx + thy) < input_dim) { dfilter[w * input_dim + gx + thy] += val; } } } } } } // namespace template <typename T> class RowConvKernel<phi::GPUContext, T> : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext &context) const override { auto *X = context.Input<LoDTensor>("X"); auto *Filter = context.Input<phi::DenseTensor>("Filter"); auto *Out = context.Output<LoDTensor>("Out"); const T *in = X->data<T>(); const T *weight = Filter->data<T>(); T *out = Out->mutable_data<T>(context.GetPlace()); bool is_tensor = X->lod().empty(); int batch_size = 0; if (is_tensor) { batch_size = X->dims()[0]; } else { batch_size = X->lod()[0].size() - 1; } int input_dim = 0; framework::Vector<size_t> batch_indices(batch_size + 1); int timesteps = X->dims()[1]; if (is_tensor) { for (int i = 0; i < batch_size + 1; i++) { batch_indices[i] = i * timesteps; } input_dim = X->dims()[2]; } else { batch_indices = X->lod()[0]; input_dim = X->dims()[1]; } int num_sequence = batch_indices.size() - 1; int future_context = Filter->dims()[0]; paddle::framework::MixVector<size_t> mix_vector(&batch_indices); size_t *idx = mix_vector.CUDAMutableData(context.GetPlace()); auto stream = context.cuda_device_context().stream(); if (future_context <= 32) { dim3 block_dim = dim3(32, 32); dim3 grid_dim = dim3(DivUp(input_dim, block_dim.x), 1); int mem_per_block = (future_context * block_dim.x) * sizeof(T); RowConvForwardSharedMemory<T> <<<grid_dim, block_dim, mem_per_block, stream>>>( in, weight, num_sequence, input_dim, future_context, idx, out); } else { dim3 block_dim = dim3(32, 32); dim3 grid_dim = dim3(DivUp(input_dim, block_dim.x), 1); RowConvForward<T><<<grid_dim, block_dim, 0, stream>>>( in, weight, num_sequence, input_dim, future_context, idx, out); } mix_vector.CopyToCPU(); } }; template <typename T> class RowConvGradKernel<phi::GPUContext, T> : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext &context) const override { auto *X = context.Input<LoDTensor>("X"); auto *Filter = context.Input<phi::DenseTensor>("Filter"); auto *dOut = context.Input<LoDTensor>(framework::GradVarName("Out")); const T *in = X->data<T>(); const T *weights = Filter->data<T>(); const T *dout = dOut->data<T>(); phi::DenseTensor *dX = context.Output<LoDTensor>(framework::GradVarName("X")); phi::DenseTensor *dFilter = context.Output<phi::DenseTensor>(framework::GradVarName("Filter")); int batch_size = 0; bool is_tensor = X->lod().empty(); if (is_tensor) { batch_size = X->dims()[0]; } else { batch_size = X->lod()[0].size() - 1; } int input_dim = 0; framework::Vector<size_t> batch_indices(batch_size + 1); int timesteps = X->dims()[1]; if (is_tensor) { for (int i = 0; i < batch_size + 1; i++) { batch_indices[i] = i * timesteps; } input_dim = X->dims()[2]; } else { batch_indices = X->lod()[0]; input_dim = X->dims()[1]; } // int input_dim = X->dims()[1]; int num_sequence = batch_indices.size() - 1; int future_context = Filter->dims()[0]; paddle::framework::MixVector<size_t> mixv_batch_indices(&batch_indices); size_t *idx = mixv_batch_indices.CUDAMutableData(context.GetPlace()); auto &device_ctx = context.cuda_device_context(); phi::funcs::SetConstant<phi::GPUContext, T> zero; if (dFilter) { T *dfilter = dFilter->mutable_data<T>(context.GetPlace()); zero(device_ctx, dFilter, static_cast<T>(0.0)); if (future_context <= 32) { dim3 block_dim = dim3(32, 32); dim3 grid_dim = dim3(DivUp(input_dim, block_dim.x), 1); int block_x = block_dim.x; int block_y = block_dim.y; int mem_per_block = (block_y * block_x + block_y * (block_x + future_context - 1) + future_context * block_y) * sizeof(T); RowConvGradFilterImproved<T> <<<grid_dim, block_dim, mem_per_block, device_ctx.stream()>>>( in, dout, num_sequence, input_dim, future_context, block_x, block_y, idx, dfilter); } else { dim3 block_dim = dim3(32, 32); dim3 grid_dim = dim3(DivUp(input_dim, block_dim.x), 1); int block_x = block_dim.x; int block_y = block_dim.y; int mem_per_block = (block_x * block_y * 2) * sizeof(T); // For 2 arrays of size 32x32 RowConvGradFilter<T> <<<grid_dim, block_dim, mem_per_block, device_ctx.stream()>>>( in, dout, num_sequence, input_dim, future_context, block_x, block_y, idx, dfilter); } } if (dX) { T *din = dX->mutable_data<T>(context.GetPlace()); if (future_context <= 32) { dim3 block_dim = dim3(32, 32); dim3 grid_dim = dim3(DivUp(input_dim, block_dim.x), 1); int mem_per_block = (future_context * block_dim.x) * sizeof(T); RowConvGradInputSharedMemory<T> <<<grid_dim, block_dim, mem_per_block, device_ctx.stream()>>>( dout, weights, num_sequence, input_dim, future_context, idx, din); } else { dim3 block_dim = dim3(32, 32); dim3 grid_dim = dim3(DivUp(input_dim, block_dim.x), 1); RowConvGradInput<T><<<grid_dim, block_dim, 0, device_ctx.stream()>>>( dout, weights, num_sequence, input_dim, future_context, idx, din); } } mixv_batch_indices.CopyToCPU(); } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL(row_conv, ops::RowConvKernel<phi::GPUContext, float>); REGISTER_OP_CUDA_KERNEL(row_conv_grad, ops::RowConvGradKernel<phi::GPUContext, float>);
7c094c8b12ef299211dea447469dd72958e2fb62.hip
// !!! This is a file automatically generated by hipify!!! /* The implementation of this file is based on qkvToContext plugin in TensorRT demo: https://github.com/NVIDIA/TensorRT/tree/release/5.1/demo/BERT/ Copyright 2019 NVIDIA Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // Modifications: scaling is moved from masked softmax to the gemm before that. // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #include <hip/hip_fp16.h> #include "core/providers/cuda/cu_inc/common.cuh" #include "core/providers/cuda/cuda_common.h" #include "core/providers/cuda/shared_inc/fpgeneric.h" #include "attention_impl.h" #include "attention_softmax.h" using namespace onnxruntime::cuda; using namespace cub; namespace onnxruntime { namespace contrib { namespace cuda { static size_t AlignTo(size_t a, size_t b) { return CeilDiv(a, b) * b; } size_t GetAttentionScratchSize(size_t element_size, int batch_size, int num_heads, int sequence_length, int all_sequence_length) { const size_t len = batch_size * num_heads * sequence_length * all_sequence_length; const size_t bytes = len * element_size; const size_t alignment = 256; const size_t bytesAligned = AlignTo(bytes, alignment); return bytesAligned; } size_t GetAttentionWorkspaceSize( size_t element_size, int batch_size, int num_heads, int head_size, int sequence_length, int past_sequence_length) { size_t qkv_size = 3 * batch_size * sequence_length * num_heads * head_size * element_size; return qkv_size + 2 * GetAttentionScratchSize(element_size, batch_size, num_heads, sequence_length, past_sequence_length + sequence_length); } template <typename T> bool QkvToContext( const hipDeviceProp_t& prop, hipblasHandle_t& cublas, hipStream_t stream, const int batch_size, const int sequence_length, const int num_heads, const int head_size, const size_t element_size, const T* input, T* output, T* workspace, const int* mask_index, const std::vector<int64_t>* mask_index_dims, bool is_unidirectional, int past_sequence_length, const T* past, T* present) { const int all_sequence_length = past_sequence_length + sequence_length; const size_t bytes = GetAttentionScratchSize(element_size, batch_size, num_heads, sequence_length, all_sequence_length); T* scratch1 = workspace; T* scratch2 = scratch1 + (bytes / element_size); T* scratch3 = scratch2 + (bytes / element_size); const int max_threads_per_block(prop.maxThreadsPerBlock); // input should be BxSx3xNxH => scratch3: 3xBxNxSxH if (!LaunchTransQkv(stream, sequence_length, batch_size, head_size, num_heads, max_threads_per_block, input, scratch3)) { return false; } // now scratch3 has Q, K, V: each has size BxNxSxH const int batches = batch_size * num_heads; const int size_per_batch = sequence_length * head_size; const int total_size = batches * size_per_batch; const T* q = scratch3; const T* k = q + total_size; const T* v = k + total_size; hipblasSetStream(cublas, stream); // Concat past (2xBxNxS'xH) to present (2xBxNxS*xH): // past_k (BxNxS'xH) + k (BxNxSxH) => present_k (BxNxS*xH) // past_v (BxNxS'xH) + v (BxNxSxH) => present_v (BxNxS*xH) const int present_size_per_batch = all_sequence_length * head_size; if (nullptr != present) { if (!LaunchConcatPastToPresent(stream, all_sequence_length, sequence_length, batch_size, head_size, num_heads, max_threads_per_block, past, k, present)) { return false; } // update pointers to present_k and present_v. k = present; v = present + batches * present_size_per_batch; } // Raw attention mask could be 2D (BxS) or 3D (BxSxS*) or 4D(Bx1xMxM), where M is the max sequence length. bool use_raw_attention_mask = (nullptr != mask_index && nullptr != mask_index_dims && mask_index_dims->size() >= 2); // compute Q*K' (as K'*Q), scaled by 1/sqrt(H) and store in scratch1: BxNxSxS* // Q: BxNxSxH, K (present_k): BxNxS*xH, Q*K': BxNxSxS* const float rsqrt_head_size = 1.f / sqrt(static_cast<float>(head_size)); const int temp_matrix_size = sequence_length * all_sequence_length; float one = 1.0f; float zero = 0.f; // For raw attention mask, the scalar if 1/sqrt(H) is moved to softmax computation. float alpha = use_raw_attention_mask ? one : rsqrt_head_size; if (!CUBLAS_CALL(cublasGemmStridedBatchedHelper( cublas, HIPBLAS_OP_T, HIPBLAS_OP_N, all_sequence_length, sequence_length, head_size, &alpha, k, head_size, present_size_per_batch, q, head_size, size_per_batch, &zero, scratch1, all_sequence_length, temp_matrix_size, batches, prop))) { return false; } // apply softmax and store result P to scratch2: BxNxSxS* if (use_raw_attention_mask) { // 2d, 3d or 4d attention mask const int mask_dimension = static_cast<int>(mask_index_dims->size()); const int64_t max_sequence_length = mask_dimension == 4 ? mask_index_dims->at(3) : 0; if (!ComputeSoftmaxWithRawMask<T>(stream, all_sequence_length, sequence_length, batch_size, num_heads, mask_index, scratch1, scratch2, is_unidirectional, rsqrt_head_size, mask_dimension, static_cast<int>(max_sequence_length))) { return false; } } else if (nullptr != mask_index) { // 1d mask index ORT_ENFORCE(nullptr != mask_index_dims && mask_index_dims->size() == 1); // mask_index has 1D shape: either (batch_size) or (2*batch_size). Only the later one has start postions. const int* mask_start = (mask_index_dims->at(0) > batch_size) ? mask_index + batch_size : nullptr; if (!ComputeSoftmaxWithMask1D<T>(stream, all_sequence_length, sequence_length, batch_size, num_heads, mask_index, mask_start, scratch1, scratch2, is_unidirectional)) { return false; } } else { // no mask if (!ComputeSoftmax<T>(stream, all_sequence_length, sequence_length, batch_size, num_heads, scratch1, scratch2, is_unidirectional)) { return false; } } // compute P*V (as V*P), and store in scratch3: BxNxSxH if (!CUBLAS_CALL(cublasGemmStridedBatchedHelper( cublas, HIPBLAS_OP_N, HIPBLAS_OP_N, head_size, sequence_length, all_sequence_length, &one, v, head_size, present_size_per_batch, scratch2, all_sequence_length, temp_matrix_size, &zero, scratch3, head_size, size_per_batch, batches, prop))) { return false; } // scratch3 is BxNxSxH, transpose to output BxSxNxH return LaunchTransCtx(stream, sequence_length, batch_size, head_size, num_heads, max_threads_per_block, scratch3, output); } bool LaunchAttentionKernel( const hipDeviceProp_t& prop, hipStream_t stream, const void* input, const int* mask_index, const std::vector<int64_t>* mask_index_dims, void* output, const int batch_size, const int sequence_length, const int num_heads, const int head_size, void* workspace, hipblasHandle_t& cublas, const size_t element_size, bool is_unidirectional, int past_sequence_length, const void* past, void* present) { if (element_size == 2) { return QkvToContext(prop, cublas, stream, batch_size, sequence_length, num_heads, head_size, element_size, reinterpret_cast<const half*>(input), reinterpret_cast<half*>(output), reinterpret_cast<half*>(workspace), mask_index, mask_index_dims, is_unidirectional, past_sequence_length, reinterpret_cast<const half*>(past), reinterpret_cast<half*>(present)); } else { return QkvToContext(prop, cublas, stream, batch_size, sequence_length, num_heads, head_size, element_size, reinterpret_cast<const float*>(input), reinterpret_cast<float*>(output), reinterpret_cast<float*>(workspace), mask_index, mask_index_dims, is_unidirectional, past_sequence_length, reinterpret_cast<const float*>(past), reinterpret_cast<float*>(present)); } } } // namespace cuda } // namespace contrib } // namespace onnxruntime
7c094c8b12ef299211dea447469dd72958e2fb62.cu
/* The implementation of this file is based on qkvToContext plugin in TensorRT demo: https://github.com/NVIDIA/TensorRT/tree/release/5.1/demo/BERT/ Copyright 2019 NVIDIA Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // Modifications: scaling is moved from masked softmax to the gemm before that. // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #include <cuda_fp16.h> #include "core/providers/cuda/cu_inc/common.cuh" #include "core/providers/cuda/cuda_common.h" #include "core/providers/cuda/shared_inc/fpgeneric.h" #include "attention_impl.h" #include "attention_softmax.h" using namespace onnxruntime::cuda; using namespace cub; namespace onnxruntime { namespace contrib { namespace cuda { static size_t AlignTo(size_t a, size_t b) { return CeilDiv(a, b) * b; } size_t GetAttentionScratchSize(size_t element_size, int batch_size, int num_heads, int sequence_length, int all_sequence_length) { const size_t len = batch_size * num_heads * sequence_length * all_sequence_length; const size_t bytes = len * element_size; const size_t alignment = 256; const size_t bytesAligned = AlignTo(bytes, alignment); return bytesAligned; } size_t GetAttentionWorkspaceSize( size_t element_size, int batch_size, int num_heads, int head_size, int sequence_length, int past_sequence_length) { size_t qkv_size = 3 * batch_size * sequence_length * num_heads * head_size * element_size; return qkv_size + 2 * GetAttentionScratchSize(element_size, batch_size, num_heads, sequence_length, past_sequence_length + sequence_length); } template <typename T> bool QkvToContext( const cudaDeviceProp& prop, cublasHandle_t& cublas, cudaStream_t stream, const int batch_size, const int sequence_length, const int num_heads, const int head_size, const size_t element_size, const T* input, T* output, T* workspace, const int* mask_index, const std::vector<int64_t>* mask_index_dims, bool is_unidirectional, int past_sequence_length, const T* past, T* present) { const int all_sequence_length = past_sequence_length + sequence_length; const size_t bytes = GetAttentionScratchSize(element_size, batch_size, num_heads, sequence_length, all_sequence_length); T* scratch1 = workspace; T* scratch2 = scratch1 + (bytes / element_size); T* scratch3 = scratch2 + (bytes / element_size); const int max_threads_per_block(prop.maxThreadsPerBlock); // input should be BxSx3xNxH => scratch3: 3xBxNxSxH if (!LaunchTransQkv(stream, sequence_length, batch_size, head_size, num_heads, max_threads_per_block, input, scratch3)) { return false; } // now scratch3 has Q, K, V: each has size BxNxSxH const int batches = batch_size * num_heads; const int size_per_batch = sequence_length * head_size; const int total_size = batches * size_per_batch; const T* q = scratch3; const T* k = q + total_size; const T* v = k + total_size; cublasSetStream(cublas, stream); // Concat past (2xBxNxS'xH) to present (2xBxNxS*xH): // past_k (BxNxS'xH) + k (BxNxSxH) => present_k (BxNxS*xH) // past_v (BxNxS'xH) + v (BxNxSxH) => present_v (BxNxS*xH) const int present_size_per_batch = all_sequence_length * head_size; if (nullptr != present) { if (!LaunchConcatPastToPresent(stream, all_sequence_length, sequence_length, batch_size, head_size, num_heads, max_threads_per_block, past, k, present)) { return false; } // update pointers to present_k and present_v. k = present; v = present + batches * present_size_per_batch; } // Raw attention mask could be 2D (BxS) or 3D (BxSxS*) or 4D(Bx1xMxM), where M is the max sequence length. bool use_raw_attention_mask = (nullptr != mask_index && nullptr != mask_index_dims && mask_index_dims->size() >= 2); // compute Q*K' (as K'*Q), scaled by 1/sqrt(H) and store in scratch1: BxNxSxS* // Q: BxNxSxH, K (present_k): BxNxS*xH, Q*K': BxNxSxS* const float rsqrt_head_size = 1.f / sqrt(static_cast<float>(head_size)); const int temp_matrix_size = sequence_length * all_sequence_length; float one = 1.0f; float zero = 0.f; // For raw attention mask, the scalar if 1/sqrt(H) is moved to softmax computation. float alpha = use_raw_attention_mask ? one : rsqrt_head_size; if (!CUBLAS_CALL(cublasGemmStridedBatchedHelper( cublas, CUBLAS_OP_T, CUBLAS_OP_N, all_sequence_length, sequence_length, head_size, &alpha, k, head_size, present_size_per_batch, q, head_size, size_per_batch, &zero, scratch1, all_sequence_length, temp_matrix_size, batches, prop))) { return false; } // apply softmax and store result P to scratch2: BxNxSxS* if (use_raw_attention_mask) { // 2d, 3d or 4d attention mask const int mask_dimension = static_cast<int>(mask_index_dims->size()); const int64_t max_sequence_length = mask_dimension == 4 ? mask_index_dims->at(3) : 0; if (!ComputeSoftmaxWithRawMask<T>(stream, all_sequence_length, sequence_length, batch_size, num_heads, mask_index, scratch1, scratch2, is_unidirectional, rsqrt_head_size, mask_dimension, static_cast<int>(max_sequence_length))) { return false; } } else if (nullptr != mask_index) { // 1d mask index ORT_ENFORCE(nullptr != mask_index_dims && mask_index_dims->size() == 1); // mask_index has 1D shape: either (batch_size) or (2*batch_size). Only the later one has start postions. const int* mask_start = (mask_index_dims->at(0) > batch_size) ? mask_index + batch_size : nullptr; if (!ComputeSoftmaxWithMask1D<T>(stream, all_sequence_length, sequence_length, batch_size, num_heads, mask_index, mask_start, scratch1, scratch2, is_unidirectional)) { return false; } } else { // no mask if (!ComputeSoftmax<T>(stream, all_sequence_length, sequence_length, batch_size, num_heads, scratch1, scratch2, is_unidirectional)) { return false; } } // compute P*V (as V*P), and store in scratch3: BxNxSxH if (!CUBLAS_CALL(cublasGemmStridedBatchedHelper( cublas, CUBLAS_OP_N, CUBLAS_OP_N, head_size, sequence_length, all_sequence_length, &one, v, head_size, present_size_per_batch, scratch2, all_sequence_length, temp_matrix_size, &zero, scratch3, head_size, size_per_batch, batches, prop))) { return false; } // scratch3 is BxNxSxH, transpose to output BxSxNxH return LaunchTransCtx(stream, sequence_length, batch_size, head_size, num_heads, max_threads_per_block, scratch3, output); } bool LaunchAttentionKernel( const cudaDeviceProp& prop, cudaStream_t stream, const void* input, const int* mask_index, const std::vector<int64_t>* mask_index_dims, void* output, const int batch_size, const int sequence_length, const int num_heads, const int head_size, void* workspace, cublasHandle_t& cublas, const size_t element_size, bool is_unidirectional, int past_sequence_length, const void* past, void* present) { if (element_size == 2) { return QkvToContext(prop, cublas, stream, batch_size, sequence_length, num_heads, head_size, element_size, reinterpret_cast<const half*>(input), reinterpret_cast<half*>(output), reinterpret_cast<half*>(workspace), mask_index, mask_index_dims, is_unidirectional, past_sequence_length, reinterpret_cast<const half*>(past), reinterpret_cast<half*>(present)); } else { return QkvToContext(prop, cublas, stream, batch_size, sequence_length, num_heads, head_size, element_size, reinterpret_cast<const float*>(input), reinterpret_cast<float*>(output), reinterpret_cast<float*>(workspace), mask_index, mask_index_dims, is_unidirectional, past_sequence_length, reinterpret_cast<const float*>(past), reinterpret_cast<float*>(present)); } } } // namespace cuda } // namespace contrib } // namespace onnxruntime
567c35542230fb13817dd89d0c289f3083ec993f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> __global__ void mul2(int *a, int *b, int *c,int col) { int x; for(int i=0;i<blockDim.x;i++){ x=0; for(int j=0;j<col;j++){ x+= a[i*col+j] *b[j*col+threadIdx.x]; } c[i*col+ threadIdx.x]=x; } } int main10(void) { int r,col; printf("Enter dimensions:"); scanf("%d %d",&r,&col); int a[r*col], b[r*col], c[r*col]; int *d_a, *d_b, *d_c; int size = r*col*sizeof(int); // Allocate space for device copies of a, b, c hipMalloc((void **)&d_a, size); hipMalloc((void **)&d_b, size); hipMalloc((void **)&d_c, size); printf("Enter the 2 matrices: \n"); for(int i=0;i<r*col;i++) scanf("%d",&a[i]); for(int i=0;i<r*col;i++) scanf("%d",&b[i]); hipMemcpy(d_a, a, size, hipMemcpyHostToDevice); hipMemcpy(d_b, b, size, hipMemcpyHostToDevice);hipLaunchKernelGGL(( mul2), dim3(1),dim3(r), 0, 0, d_a, d_b, d_c,col); // Copy result back to host hipMemcpy(c, d_c, size, hipMemcpyDeviceToHost); //print result for(int i=0;i<r*col;i++){ printf("%d ",c[i]); } // Cleanup hipFree(d_a); hipFree(d_b); hipFree(d_c); return 0; }
567c35542230fb13817dd89d0c289f3083ec993f.cu
#include <stdio.h> __global__ void mul2(int *a, int *b, int *c,int col) { int x; for(int i=0;i<blockDim.x;i++){ x=0; for(int j=0;j<col;j++){ x+= a[i*col+j] *b[j*col+threadIdx.x]; } c[i*col+ threadIdx.x]=x; } } int main10(void) { int r,col; printf("Enter dimensions:"); scanf("%d %d",&r,&col); int a[r*col], b[r*col], c[r*col]; int *d_a, *d_b, *d_c; int size = r*col*sizeof(int); // Allocate space for device copies of a, b, c cudaMalloc((void **)&d_a, size); cudaMalloc((void **)&d_b, size); cudaMalloc((void **)&d_c, size); printf("Enter the 2 matrices: \n"); for(int i=0;i<r*col;i++) scanf("%d",&a[i]); for(int i=0;i<r*col;i++) scanf("%d",&b[i]); cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice); cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice); mul2<<<1,r>>>(d_a, d_b, d_c,col); // Copy result back to host cudaMemcpy(c, d_c, size, cudaMemcpyDeviceToHost); //print result for(int i=0;i<r*col;i++){ printf("%d ",c[i]); } // Cleanup cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); return 0; }
feb27b125fa7c2af80f743bcf0c52488849aeb02.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" using namespace std; #ifndef MAP_FILE #define MAP_FILE MAP_SHARED #endif __global__ void nllLoss(float *x, int x_stride, float *y, int* target) { int tid = threadIdx.x + blockIdx.x * blockDim.x; int offset = tid * x_stride + target[tid]; y[tid] = -1 * x[offset]; }
feb27b125fa7c2af80f743bcf0c52488849aeb02.cu
#include "includes.h" using namespace std; #ifndef MAP_FILE #define MAP_FILE MAP_SHARED #endif __global__ void nllLoss(float *x, int x_stride, float *y, int* target) { int tid = threadIdx.x + blockIdx.x * blockDim.x; int offset = tid * x_stride + target[tid]; y[tid] = -1 * x[offset]; }
0ab33b5103d4b6268dc6e2bf6a7c42b461a191f3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.5.4) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date October 2020 @precisions normal z -> c d s @author Hartwig Anzt */ #include "magmasparse_internal.h" #define BLOCK_SIZE 256 #define PRECISION_z // initialize arrays with zero __global__ void magma_zgpumemzero( magmaDoubleComplex * d, int n, int k ) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n) { for (int j = 0; j < k; j++) d[ i+j*n ] = MAGMA_Z_MAKE( 0.0, 0.0 ); } } // dot product __global__ void magma_zdot_kernel( int Gs, int n, magmaDoubleComplex * v, magmaDoubleComplex * r, magmaDoubleComplex * vtmp) { extern __shared__ magmaDoubleComplex temp[]; int Idx = threadIdx.x; int i = blockIdx.x * blockDim.x + Idx; temp[ Idx ] = ( i < n ) ? v[ i ] * r[ i ] : MAGMA_Z_MAKE( 0.0, 0.0); __syncthreads(); if ( Idx < 128 ){ temp[ Idx ] += temp[ Idx + 128 ]; } __syncthreads(); if ( Idx < 64 ){ temp[ Idx ] += temp[ Idx + 64 ]; } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ){ temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ){ volatile double *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif #if defined(PRECISION_s) if( Idx < 32 ){ volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif if ( Idx == 0 ){ vtmp[ blockIdx.x ] = temp[ 0 ]; } } // dot product for multiple vectors __global__ void magma_zblockdot_kernel( int Gs, int n, int k, magmaDoubleComplex * v, magmaDoubleComplex * r, magmaDoubleComplex * vtmp) { extern __shared__ magmaDoubleComplex temp[]; int Idx = threadIdx.x; int i = blockIdx.x * blockDim.x + Idx; int j; // k vectors v(i) if (i<n){ for( j=0; j<k; j++) temp[Idx+j*blockDim.x] = v[i+j*n] * r[i]; } else { for( j=0; j<k; j++) temp[Idx+j*blockDim.x] = MAGMA_Z_MAKE( 0.0, 0.0); } __syncthreads(); if ( Idx < 128 ){ for( j=0; j<k; j++){ temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 128 ]; } } __syncthreads(); if ( Idx < 64 ){ for( j=0; j<k; j++){ temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 64 ]; } } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ){ for( j=0; j<k; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 32 ]; __syncthreads(); for( j=0; j<k; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 16 ]; __syncthreads(); for( j=0; j<k; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 8 ]; __syncthreads(); for( j=0; j<k; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 4 ]; __syncthreads(); for( j=0; j<k; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 2 ]; __syncthreads(); for( j=0; j<k; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 1 ]; __syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ){ volatile double *temp2 = temp; for( j=0; j<k; j++){ temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ]; } } #endif #if defined(PRECISION_s) if( Idx < 32 ){ volatile float *temp2 = temp; for( j=0; j<k; j++){ temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ]; } } #endif if ( Idx == 0 ){ for( j=0; j<k; j++){ vtmp[ blockIdx.x+j*n ] = temp[ j*blockDim.x ]; } } } // block reduction for multiple vectors __global__ void magma_zblockreduce_kernel( int Gs, int n, int k, magmaDoubleComplex * vtmp, magmaDoubleComplex * vtmp2 ) { extern __shared__ magmaDoubleComplex temp[]; int Idx = threadIdx.x; int i = blockIdx.x * blockDim.x + Idx; int j; for( j=0; j<k; j++){ temp[ Idx+j*blockDim.x ] = ( i < n ) ? vtmp[ i+j*n ] : MAGMA_Z_MAKE( 0.0, 0.0); } __syncthreads(); if ( Idx < 128 ){ for( j=0; j<k; j++){ temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 128 ]; } } __syncthreads(); if ( Idx < 64 ){ for( j=0; j<k; j++){ temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 64 ]; } } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ){ for( j=0; j<k; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 32 ]; __syncthreads(); for( j=0; j<k; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 16 ]; __syncthreads(); for( j=0; j<k; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 8 ]; __syncthreads(); for( j=0; j<k; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 4 ]; __syncthreads(); for( j=0; j<k; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 2 ]; __syncthreads(); for( j=0; j<k; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 1 ]; __syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ){ volatile double *temp2 = temp; for( j=0; j<k; j++){ temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ]; } } #endif #if defined(PRECISION_s) if( Idx < 32 ){ volatile float *temp2 = temp; for( j=0; j<k; j++){ temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ]; } } #endif if ( Idx == 0 ){ for( j=0; j<k; j++){ vtmp2[ blockIdx.x+j*n ] = temp[ j*blockDim.x ]; } } } // accelerated reduction for one vector __global__ void magma_zreduce_kernel_fast( int Gs, int n, magmaDoubleComplex * vtmp, magmaDoubleComplex * vtmp2 ) { extern __shared__ magmaDoubleComplex temp[]; int Idx = threadIdx.x; int blockSize = 128; int gridSize = blockSize * 2 * gridDim.x; temp[Idx] = MAGMA_Z_MAKE( 0.0, 0.0); int i = blockIdx.x * ( blockSize * 2 ) + Idx; while (i < Gs ) { temp[ Idx ] += vtmp[ i ]; temp[ Idx ] += ( i + blockSize < Gs ) ? vtmp[ i + blockSize ] : MAGMA_Z_MAKE( 0.0, 0.0); i += gridSize; } __syncthreads(); if ( Idx < 64 ){ temp[ Idx ] += temp[ Idx + 64 ]; } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ){ temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ){ volatile double *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif #if defined(PRECISION_s) if( Idx < 32 ){ volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif if ( Idx == 0 ){ vtmp2[ blockIdx.x ] = temp[ 0 ]; } } // accelerated block reduction for multiple vectors __global__ void magma_zblockreduce_kernel_fast( int Gs, int n, int k, magmaDoubleComplex * vtmp, magmaDoubleComplex * vtmp2 ) { extern __shared__ magmaDoubleComplex temp[]; int Idx = threadIdx.x; int blockSize = 128; int gridSize = blockSize * 2 * gridDim.x; int j; for( j=0; j<k; j++){ int i = blockIdx.x * ( blockSize * 2 ) + Idx; temp[Idx+j*(blockSize)] = MAGMA_Z_MAKE( 0.0, 0.0); while (i < Gs ) { temp[ Idx+j*(blockSize) ] += vtmp[ i+j*n ]; temp[ Idx+j*(blockSize) ] += ( i + (blockSize) < Gs ) ? vtmp[ i+j*n + (blockSize) ] : MAGMA_Z_MAKE( 0.0, 0.0); i += gridSize; } } __syncthreads(); if ( Idx < 64 ){ for( j=0; j<k; j++){ temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 64 ]; } } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ){ for( j=0; j<k; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 32 ]; __syncthreads(); for( j=0; j<k; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 16 ]; __syncthreads(); for( j=0; j<k; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 8 ]; __syncthreads(); for( j=0; j<k; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 4 ]; __syncthreads(); for( j=0; j<k; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 2 ]; __syncthreads(); for( j=0; j<k; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 1 ]; __syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ){ volatile double *temp2 = temp; for( j=0; j<k; j++){ temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 32 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 16 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 8 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 4 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 2 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 1 ]; } } #endif #if defined(PRECISION_s) if( Idx < 32 ){ volatile float *temp2 = temp; for( j=0; j<k; j++){ temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 32 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 16 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 8 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 4 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 2 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 1 ]; } } #endif if ( Idx == 0 ){ for( j=0; j<k; j++){ vtmp2[ blockIdx.x+j*n ] = temp[ j*(blockSize) ]; } } } /** Purpose ------- Computes the scalar product of a set of vectors v_i such that skp = ( <v_0,r>, <v_1,r>, .. ) Returns the vector skp. Arguments --------- @param[in] n int length of v_i and r @param[in] k int # vectors v_i @param[in] v magmaDoubleComplex_ptr v = (v_0 .. v_i.. v_k) @param[in] r magmaDoubleComplex_ptr r @param[in] d1 magmaDoubleComplex_ptr workspace @param[in] d2 magmaDoubleComplex_ptr workspace @param[out] skp magmaDoubleComplex_ptr vector[k] of scalar products (<v_i,r>...) @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_zblas ********************************************************************/ extern "C" magma_int_t magma_zmdotc( magma_int_t n, magma_int_t k, magmaDoubleComplex_ptr v, magmaDoubleComplex_ptr r, magmaDoubleComplex_ptr d1, magmaDoubleComplex_ptr d2, magmaDoubleComplex_ptr skp, magma_queue_t queue ) { int local_block_size=256; dim3 Bs( local_block_size ); dim3 Gs( magma_ceildiv( n, local_block_size ) ); dim3 Gs_next; int Ms = (k)* (local_block_size) * sizeof( magmaDoubleComplex ); // k vecs magmaDoubleComplex_ptr aux1 = d1, aux2 = d2; int b = 1; if (k>1) { hipLaunchKernelGGL(( magma_zblockdot_kernel), dim3(Gs), dim3(Bs), Ms, queue->cuda_stream() , Gs.x, n, k, v, r, d1 ); } else { hipLaunchKernelGGL(( magma_zdot_kernel), dim3(Gs), dim3(Bs), Ms, queue->cuda_stream() , Gs.x, n, v, r, d1 ); } /* // not necessary to zero GPU mem magma_zgpumemzero<<< Gs, Bs, 0, queue->cuda_stream >>>( d1, n*k,1 ); magma_zgpumemzero<<< Gs, Bs, 0, queue->cuda_stream >>>( d2, n*k,1 ); //magmablas_zlaset( MagmaFull, n, k, d1, n, UNKNOWN ); //magmablas_zlaset( MagmaFull, n, k, d2, n, UNKNOWN ); while( Gs.x > 1 ) { Gs_next.x = magma_ceildiv( Gs.x, Bs.x ); magma_zblockreduce_kernel<<< Gs_next.x, Bs.x, Ms, queue->cuda_stream >>> ( Gs.x, n, k, aux1, aux2 ); Gs.x = Gs_next.x; b = 1 - b; if ( b ) { aux1 = d1; aux2 = d2; } else { aux2 = d1; aux1 = d2; } } for( int j=0; j<k; j++) { magma_zcopyvector( 1, aux1+j*n, 1, skp+j, 1, UNKNOWN ); } */ if ( k>1) { while( Gs.x > 1 ) { Gs_next.x = magma_ceildiv( Gs.x, Bs.x ); if ( Gs_next.x == 1 ) Gs_next.x = 2; hipLaunchKernelGGL(( magma_zblockreduce_kernel_fast), dim3(Gs_next.x/2), dim3(Bs.x/2), Ms/2, queue->cuda_stream() , Gs.x, n, k, aux1, aux2 ); Gs_next.x = Gs_next.x /2; Gs.x = Gs_next.x; b = 1 - b; if ( b ) { aux1 = d1; aux2 = d2; } else { aux2 = d1; aux1 = d2; } } } else { while( Gs.x > 1 ) { Gs_next.x = magma_ceildiv( Gs.x, Bs.x ); if ( Gs_next.x == 1 ) Gs_next.x = 2; hipLaunchKernelGGL(( magma_zreduce_kernel_fast), dim3(Gs_next.x/2), dim3(Bs.x/2), Ms/2, queue->cuda_stream() , Gs.x, n, aux1, aux2 ); Gs_next.x = Gs_next.x /2; Gs.x = Gs_next.x; b = 1 - b; if ( b ) { aux1 = d1; aux2 = d2; } else { aux2 = d1; aux1 = d2; } } } magma_zcopyvector_async( k, aux1, n, skp, 1, queue ); return MAGMA_SUCCESS; } /** Purpose ------- This is an extension of the merged dot product above by chunking the set of vectors v_i such that the data always fits into cache. It is equivalent to a matrix vecor product Vr where V contains few rows and many columns. The computation is the same: skp = ( <v_0,r>, <v_1,r>, .. ) Returns the vector skp. Arguments --------- @param[in] n int length of v_i and r @param[in] k int # vectors v_i @param[in] v magmaDoubleComplex_ptr v = (v_0 .. v_i.. v_k) @param[in] r magmaDoubleComplex_ptr r @param[in] d1 magmaDoubleComplex_ptr workspace @param[in] d2 magmaDoubleComplex_ptr workspace @param[out] skp magmaDoubleComplex_ptr vector[k] of scalar products (<v_i,r>...) @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_z ********************************************************************/ extern "C" magma_int_t magma_zgemvmdot( magma_int_t n, magma_int_t k, magmaDoubleComplex_ptr v, magmaDoubleComplex_ptr r, magmaDoubleComplex_ptr d1, magmaDoubleComplex_ptr d2, magmaDoubleComplex_ptr skp, magma_queue_t queue ) { int rows_left = k; int offset = 0; int chunk_size = 4; // process in chunks of 10 - has to be adapted to hardware and precision while( rows_left > (chunk_size) ) { magma_zmdotc( n, chunk_size, v+offset*n, r, d1, d2, skp+offset, queue ); offset = offset + chunk_size; rows_left = rows_left-chunk_size; } // process rest magma_zmdotc( n, rows_left, v+offset*n, r, d1, d2, skp+offset, queue ); return MAGMA_SUCCESS; }
0ab33b5103d4b6268dc6e2bf6a7c42b461a191f3.cu
/* -- MAGMA (version 2.5.4) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date October 2020 @precisions normal z -> c d s @author Hartwig Anzt */ #include "magmasparse_internal.h" #define BLOCK_SIZE 256 #define PRECISION_z // initialize arrays with zero __global__ void magma_zgpumemzero( magmaDoubleComplex * d, int n, int k ) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n) { for (int j = 0; j < k; j++) d[ i+j*n ] = MAGMA_Z_MAKE( 0.0, 0.0 ); } } // dot product __global__ void magma_zdot_kernel( int Gs, int n, magmaDoubleComplex * v, magmaDoubleComplex * r, magmaDoubleComplex * vtmp) { extern __shared__ magmaDoubleComplex temp[]; int Idx = threadIdx.x; int i = blockIdx.x * blockDim.x + Idx; temp[ Idx ] = ( i < n ) ? v[ i ] * r[ i ] : MAGMA_Z_MAKE( 0.0, 0.0); __syncthreads(); if ( Idx < 128 ){ temp[ Idx ] += temp[ Idx + 128 ]; } __syncthreads(); if ( Idx < 64 ){ temp[ Idx ] += temp[ Idx + 64 ]; } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ){ temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ){ volatile double *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif #if defined(PRECISION_s) if( Idx < 32 ){ volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif if ( Idx == 0 ){ vtmp[ blockIdx.x ] = temp[ 0 ]; } } // dot product for multiple vectors __global__ void magma_zblockdot_kernel( int Gs, int n, int k, magmaDoubleComplex * v, magmaDoubleComplex * r, magmaDoubleComplex * vtmp) { extern __shared__ magmaDoubleComplex temp[]; int Idx = threadIdx.x; int i = blockIdx.x * blockDim.x + Idx; int j; // k vectors v(i) if (i<n){ for( j=0; j<k; j++) temp[Idx+j*blockDim.x] = v[i+j*n] * r[i]; } else { for( j=0; j<k; j++) temp[Idx+j*blockDim.x] = MAGMA_Z_MAKE( 0.0, 0.0); } __syncthreads(); if ( Idx < 128 ){ for( j=0; j<k; j++){ temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 128 ]; } } __syncthreads(); if ( Idx < 64 ){ for( j=0; j<k; j++){ temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 64 ]; } } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ){ for( j=0; j<k; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 32 ]; __syncthreads(); for( j=0; j<k; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 16 ]; __syncthreads(); for( j=0; j<k; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 8 ]; __syncthreads(); for( j=0; j<k; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 4 ]; __syncthreads(); for( j=0; j<k; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 2 ]; __syncthreads(); for( j=0; j<k; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 1 ]; __syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ){ volatile double *temp2 = temp; for( j=0; j<k; j++){ temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ]; } } #endif #if defined(PRECISION_s) if( Idx < 32 ){ volatile float *temp2 = temp; for( j=0; j<k; j++){ temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ]; } } #endif if ( Idx == 0 ){ for( j=0; j<k; j++){ vtmp[ blockIdx.x+j*n ] = temp[ j*blockDim.x ]; } } } // block reduction for multiple vectors __global__ void magma_zblockreduce_kernel( int Gs, int n, int k, magmaDoubleComplex * vtmp, magmaDoubleComplex * vtmp2 ) { extern __shared__ magmaDoubleComplex temp[]; int Idx = threadIdx.x; int i = blockIdx.x * blockDim.x + Idx; int j; for( j=0; j<k; j++){ temp[ Idx+j*blockDim.x ] = ( i < n ) ? vtmp[ i+j*n ] : MAGMA_Z_MAKE( 0.0, 0.0); } __syncthreads(); if ( Idx < 128 ){ for( j=0; j<k; j++){ temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 128 ]; } } __syncthreads(); if ( Idx < 64 ){ for( j=0; j<k; j++){ temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 64 ]; } } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ){ for( j=0; j<k; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 32 ]; __syncthreads(); for( j=0; j<k; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 16 ]; __syncthreads(); for( j=0; j<k; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 8 ]; __syncthreads(); for( j=0; j<k; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 4 ]; __syncthreads(); for( j=0; j<k; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 2 ]; __syncthreads(); for( j=0; j<k; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 1 ]; __syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ){ volatile double *temp2 = temp; for( j=0; j<k; j++){ temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ]; } } #endif #if defined(PRECISION_s) if( Idx < 32 ){ volatile float *temp2 = temp; for( j=0; j<k; j++){ temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ]; } } #endif if ( Idx == 0 ){ for( j=0; j<k; j++){ vtmp2[ blockIdx.x+j*n ] = temp[ j*blockDim.x ]; } } } // accelerated reduction for one vector __global__ void magma_zreduce_kernel_fast( int Gs, int n, magmaDoubleComplex * vtmp, magmaDoubleComplex * vtmp2 ) { extern __shared__ magmaDoubleComplex temp[]; int Idx = threadIdx.x; int blockSize = 128; int gridSize = blockSize * 2 * gridDim.x; temp[Idx] = MAGMA_Z_MAKE( 0.0, 0.0); int i = blockIdx.x * ( blockSize * 2 ) + Idx; while (i < Gs ) { temp[ Idx ] += vtmp[ i ]; temp[ Idx ] += ( i + blockSize < Gs ) ? vtmp[ i + blockSize ] : MAGMA_Z_MAKE( 0.0, 0.0); i += gridSize; } __syncthreads(); if ( Idx < 64 ){ temp[ Idx ] += temp[ Idx + 64 ]; } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ){ temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ){ volatile double *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif #if defined(PRECISION_s) if( Idx < 32 ){ volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif if ( Idx == 0 ){ vtmp2[ blockIdx.x ] = temp[ 0 ]; } } // accelerated block reduction for multiple vectors __global__ void magma_zblockreduce_kernel_fast( int Gs, int n, int k, magmaDoubleComplex * vtmp, magmaDoubleComplex * vtmp2 ) { extern __shared__ magmaDoubleComplex temp[]; int Idx = threadIdx.x; int blockSize = 128; int gridSize = blockSize * 2 * gridDim.x; int j; for( j=0; j<k; j++){ int i = blockIdx.x * ( blockSize * 2 ) + Idx; temp[Idx+j*(blockSize)] = MAGMA_Z_MAKE( 0.0, 0.0); while (i < Gs ) { temp[ Idx+j*(blockSize) ] += vtmp[ i+j*n ]; temp[ Idx+j*(blockSize) ] += ( i + (blockSize) < Gs ) ? vtmp[ i+j*n + (blockSize) ] : MAGMA_Z_MAKE( 0.0, 0.0); i += gridSize; } } __syncthreads(); if ( Idx < 64 ){ for( j=0; j<k; j++){ temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 64 ]; } } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ){ for( j=0; j<k; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 32 ]; __syncthreads(); for( j=0; j<k; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 16 ]; __syncthreads(); for( j=0; j<k; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 8 ]; __syncthreads(); for( j=0; j<k; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 4 ]; __syncthreads(); for( j=0; j<k; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 2 ]; __syncthreads(); for( j=0; j<k; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 1 ]; __syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ){ volatile double *temp2 = temp; for( j=0; j<k; j++){ temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 32 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 16 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 8 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 4 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 2 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 1 ]; } } #endif #if defined(PRECISION_s) if( Idx < 32 ){ volatile float *temp2 = temp; for( j=0; j<k; j++){ temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 32 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 16 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 8 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 4 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 2 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 1 ]; } } #endif if ( Idx == 0 ){ for( j=0; j<k; j++){ vtmp2[ blockIdx.x+j*n ] = temp[ j*(blockSize) ]; } } } /** Purpose ------- Computes the scalar product of a set of vectors v_i such that skp = ( <v_0,r>, <v_1,r>, .. ) Returns the vector skp. Arguments --------- @param[in] n int length of v_i and r @param[in] k int # vectors v_i @param[in] v magmaDoubleComplex_ptr v = (v_0 .. v_i.. v_k) @param[in] r magmaDoubleComplex_ptr r @param[in] d1 magmaDoubleComplex_ptr workspace @param[in] d2 magmaDoubleComplex_ptr workspace @param[out] skp magmaDoubleComplex_ptr vector[k] of scalar products (<v_i,r>...) @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_zblas ********************************************************************/ extern "C" magma_int_t magma_zmdotc( magma_int_t n, magma_int_t k, magmaDoubleComplex_ptr v, magmaDoubleComplex_ptr r, magmaDoubleComplex_ptr d1, magmaDoubleComplex_ptr d2, magmaDoubleComplex_ptr skp, magma_queue_t queue ) { int local_block_size=256; dim3 Bs( local_block_size ); dim3 Gs( magma_ceildiv( n, local_block_size ) ); dim3 Gs_next; int Ms = (k)* (local_block_size) * sizeof( magmaDoubleComplex ); // k vecs magmaDoubleComplex_ptr aux1 = d1, aux2 = d2; int b = 1; if (k>1) { magma_zblockdot_kernel<<< Gs, Bs, Ms, queue->cuda_stream() >>>( Gs.x, n, k, v, r, d1 ); } else { magma_zdot_kernel<<< Gs, Bs, Ms, queue->cuda_stream() >>>( Gs.x, n, v, r, d1 ); } /* // not necessary to zero GPU mem magma_zgpumemzero<<< Gs, Bs, 0, queue->cuda_stream >>>( d1, n*k,1 ); magma_zgpumemzero<<< Gs, Bs, 0, queue->cuda_stream >>>( d2, n*k,1 ); //magmablas_zlaset( MagmaFull, n, k, d1, n, UNKNOWN ); //magmablas_zlaset( MagmaFull, n, k, d2, n, UNKNOWN ); while( Gs.x > 1 ) { Gs_next.x = magma_ceildiv( Gs.x, Bs.x ); magma_zblockreduce_kernel<<< Gs_next.x, Bs.x, Ms, queue->cuda_stream >>> ( Gs.x, n, k, aux1, aux2 ); Gs.x = Gs_next.x; b = 1 - b; if ( b ) { aux1 = d1; aux2 = d2; } else { aux2 = d1; aux1 = d2; } } for( int j=0; j<k; j++) { magma_zcopyvector( 1, aux1+j*n, 1, skp+j, 1, UNKNOWN ); } */ if ( k>1) { while( Gs.x > 1 ) { Gs_next.x = magma_ceildiv( Gs.x, Bs.x ); if ( Gs_next.x == 1 ) Gs_next.x = 2; magma_zblockreduce_kernel_fast<<< Gs_next.x/2, Bs.x/2, Ms/2, queue->cuda_stream() >>> ( Gs.x, n, k, aux1, aux2 ); Gs_next.x = Gs_next.x /2; Gs.x = Gs_next.x; b = 1 - b; if ( b ) { aux1 = d1; aux2 = d2; } else { aux2 = d1; aux1 = d2; } } } else { while( Gs.x > 1 ) { Gs_next.x = magma_ceildiv( Gs.x, Bs.x ); if ( Gs_next.x == 1 ) Gs_next.x = 2; magma_zreduce_kernel_fast<<< Gs_next.x/2, Bs.x/2, Ms/2, queue->cuda_stream() >>> ( Gs.x, n, aux1, aux2 ); Gs_next.x = Gs_next.x /2; Gs.x = Gs_next.x; b = 1 - b; if ( b ) { aux1 = d1; aux2 = d2; } else { aux2 = d1; aux1 = d2; } } } magma_zcopyvector_async( k, aux1, n, skp, 1, queue ); return MAGMA_SUCCESS; } /** Purpose ------- This is an extension of the merged dot product above by chunking the set of vectors v_i such that the data always fits into cache. It is equivalent to a matrix vecor product Vr where V contains few rows and many columns. The computation is the same: skp = ( <v_0,r>, <v_1,r>, .. ) Returns the vector skp. Arguments --------- @param[in] n int length of v_i and r @param[in] k int # vectors v_i @param[in] v magmaDoubleComplex_ptr v = (v_0 .. v_i.. v_k) @param[in] r magmaDoubleComplex_ptr r @param[in] d1 magmaDoubleComplex_ptr workspace @param[in] d2 magmaDoubleComplex_ptr workspace @param[out] skp magmaDoubleComplex_ptr vector[k] of scalar products (<v_i,r>...) @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_z ********************************************************************/ extern "C" magma_int_t magma_zgemvmdot( magma_int_t n, magma_int_t k, magmaDoubleComplex_ptr v, magmaDoubleComplex_ptr r, magmaDoubleComplex_ptr d1, magmaDoubleComplex_ptr d2, magmaDoubleComplex_ptr skp, magma_queue_t queue ) { int rows_left = k; int offset = 0; int chunk_size = 4; // process in chunks of 10 - has to be adapted to hardware and precision while( rows_left > (chunk_size) ) { magma_zmdotc( n, chunk_size, v+offset*n, r, d1, d2, skp+offset, queue ); offset = offset + chunk_size; rows_left = rows_left-chunk_size; } // process rest magma_zmdotc( n, rows_left, v+offset*n, r, d1, d2, skp+offset, queue ); return MAGMA_SUCCESS; }
3600faa495b4a8fa4a476738729df131acd1beb5.hip
// !!! This is a file automatically generated by hipify!!! /* * PROJECT: Pairwise sequence alignments on GPU * FILE: psa_swgotoh_2b_mixed_gpu * AUTHOR(S): Alejandro Chacon <[email protected]> * DESCRIPTION: Device functions for the SW-Gotoh GPU implementation using: * (A) pack 4 SW in the same register mixing integer instructions and video instructions. * (B) 32bit integer resources + and store the temporal columns with 8 bits. * (C) bases are represented using 2 bits/base. */ extern "C" { #include "../../include/psa_pairwise_gpu.h" } #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include "../../include/simd_functions.h" #ifndef QUERIES_SIZE #define QUERIES_SIZE 100 #endif #ifndef CANDIDATES_SIZE #define CANDIDATES_SIZE 120 #endif #define MAX3(a,b,c) (MAX(MAX(a, b), c)) #define WARP_SIZE 32 #define MAX_THREADS_PER_SM 64 #define CUDA_NUM_THREADS 64 #define THREADS_PER_SEGMENT 32 #define NUM_SW_PER_BLOCK (MAX_THREADS_PER_SM / THREADS_PER_SEGMENT) #define NUM_WARPS (MAX_THREADS_PER_SM / WARP_SIZE) #define BAND_LEN 8 #define VECTOR_ELEMENTS 4 #define MAX_QUERY_SIZE QUERIES_SIZE #define BIAS 16 #define RAW_BASES_PER_ENTRY (UINT32_LENGTH / RAW_4B_LENGTH) typedef union { uchar4 c4; uint32_t i; }score_type; typedef uchar4 score_type2; inline __device__ uint32_t sim_vmaxu4(uint32_t NUM_A, uint32_t NUM_B){ return(MAX(NUM_A & 0xFF000000, NUM_B & 0xFF000000) | MAX(NUM_A & 0x00FF0000, NUM_B & 0x00FF0000) | MAX(NUM_A & 0x0000FF00, NUM_B & 0x0000FF00) | MAX(NUM_A & 0x000000FF, NUM_B & 0x000000FF)); } inline __device__ void update_band(int32_t idRow, const uint32_t q_i, const uint32_t *ref_cache, uint32_t *H_band, uint32_t *F_band, uint32_t *H_temp, uint32_t *E_temp, uint32_t *H_maxScore, const uint32_t MATCH_SCORE, const uint32_t MISMATCH_SCORE, const uint32_t OPEN_INDEL_SCORE, const uint32_t EXTEND_INDEL_SCORE) { //Biased Scores to 16 const uint32_t ZERO = 0x10101010; uint32_t H_diag = H_band[0]; H_band[0] = H_temp[idRow]; uint32_t E = E_temp[idRow]; #pragma unroll for (uint32_t j = 1; j <= BAND_LEN; ++j) { // update F const uint32_t ftop = F_band[j] - EXTEND_INDEL_SCORE; const uint32_t htop = H_band[j] - OPEN_INDEL_SCORE; F_band[j] = vmaxu4(ftop, htop); // update E const uint32_t eleft = E - EXTEND_INDEL_SCORE; const uint32_t hleft = H_band[j-1] - OPEN_INDEL_SCORE; E = vmaxu4(eleft, hleft); // update H const uint32_t r_j = ref_cache[j-1]; const uint32_t Eq = vcmpeq4(r_j, q_i); const uint32_t notEq = ~Eq; const uint32_t diagonal = (notEq & (H_diag - MISMATCH_SCORE)) | (Eq & (H_diag + MATCH_SCORE)); const uint32_t top = F_band[j]; const uint32_t left = E; uint32_t hi = vmaxu4(vmaxu4(left, top), diagonal); hi = vmaxu4(hi, ZERO); H_diag = H_band[j]; H_band[j] = hi; (* H_maxScore) = vmaxu4((* H_maxScore), hi); } H_temp[idRow] = H_band[BAND_LEN]; E_temp[idRow] = E; } __global__ void localProcessSWTiling(RAWHlfEntry_t *d_CandidatesHlfRaw, uint32_t *d_CandidatesHlfRAWposition, RAWHlfEntry_t *d_QueriesHlfRaw, uint32_t *d_QueriesHlfRAWposition, alignmentInfo_t *d_AlignmentsInfo, alignmentEntry_t *d_AlignmentsResults, uint32_t querySize, uint32_t candidateSize, uint32_t candidatesNum) { const uint32_t idCandidate = (blockIdx.x * MAX_THREADS_PER_SM + threadIdx.x) * VECTOR_ELEMENTS; if (idCandidate < (candidatesNum)) { const RAWHlfEntry_t* candidate0 = d_CandidatesHlfRaw + d_CandidatesHlfRAWposition[idCandidate]; const RAWHlfEntry_t* candidate1 = d_CandidatesHlfRaw + d_CandidatesHlfRAWposition[idCandidate + 1]; const RAWHlfEntry_t* candidate2 = d_CandidatesHlfRaw + d_CandidatesHlfRAWposition[idCandidate + 2]; const RAWHlfEntry_t* candidate3 = d_CandidatesHlfRaw + d_CandidatesHlfRAWposition[idCandidate + 3]; const RAWHlfEntry_t* query0 = d_QueriesHlfRaw + d_QueriesHlfRAWposition[d_AlignmentsInfo[idCandidate]]; const RAWHlfEntry_t* query1 = d_QueriesHlfRaw + d_QueriesHlfRAWposition[d_AlignmentsInfo[idCandidate + 1]]; const RAWHlfEntry_t* query2 = d_QueriesHlfRaw + d_QueriesHlfRAWposition[d_AlignmentsInfo[idCandidate + 2]]; const RAWHlfEntry_t* query3 = d_QueriesHlfRaw + d_QueriesHlfRAWposition[d_AlignmentsInfo[idCandidate + 3]]; // All the scores have to be absolute numbers: // Original BWA Scores: Gap_Ex = -1; Gap_Op = -2; Match = 2; Miss = -5; const score_type MATCH_SCORE = { 2, 2, 2, 2}; const score_type MISMATCH_SCORE = { 5, 5, 5, 5}; const score_type OPEN_INDEL_SCORE = { 2, 2, 2, 2}; const score_type EXTEND_INDEL_SCORE = { 1, 1, 1, 1}; //Biased Scores to 16 const score_type ZERO = { BIAS, BIAS, BIAS, BIAS}; score_type r_cache[BAND_LEN]; score_type H_temp [MAX_QUERY_SIZE]; score_type E_temp [MAX_QUERY_SIZE]; score_type H_band [BAND_LEN + 1]; score_type F_band [BAND_LEN + 1]; const int32_t numRows = querySize, numColumns = candidateSize; int32_t idColumn, idRow, idBand; score_type H_maxScore = ZERO; uint32_t idCandidateEntry = 0; uint32_t entryCandidate0, entryCandidate1, entryCandidate2, entryCandidate3; for(idBand = 0; idBand < MAX_QUERY_SIZE; ++idBand){ H_temp[idBand].i = ZERO.i; E_temp[idBand].i = ZERO.i; } // Compute Score SW-GOTOH for(idColumn = 0; idColumn < numColumns; idColumn += BAND_LEN){ uint32_t idQueryEntry = 0; uint32_t entryQuery0, entryQuery1, entryQuery2, entryQuery3; if((idColumn % RAW_BASES_PER_ENTRY) == 0){ entryCandidate0 = candidate0[idCandidateEntry]; entryCandidate1 = candidate1[idCandidateEntry]; entryCandidate2 = candidate2[idCandidateEntry]; entryCandidate3 = candidate3[idCandidateEntry]; idCandidateEntry++; } // Load a block of entries from the reference #pragma unroll for (uint32_t idBand = 0; idBand < BAND_LEN; ++idBand){ r_cache[idBand].c4 = make_uchar4(entryCandidate0 & 0x3, entryCandidate1 & 0x3, entryCandidate2 & 0x3, entryCandidate3 & 0x3); entryCandidate0 >>= RAW_4B_LENGTH; entryCandidate1 >>= RAW_4B_LENGTH; entryCandidate2 >>= RAW_4B_LENGTH; entryCandidate3 >>= RAW_4B_LENGTH; } // Initialize the first band #pragma unroll for (uint32_t idBand = 0; idBand <= BAND_LEN; ++idBand){ H_band[idBand].i = ZERO.i; F_band[idBand].i = ZERO.i; } #pragma unroll 1 for(idRow = 0; idRow < numRows; ++idRow){ entryQuery0 >>= RAW_4B_LENGTH; entryQuery1 >>= RAW_4B_LENGTH; entryQuery2 >>= RAW_4B_LENGTH; entryQuery3 >>= RAW_4B_LENGTH; if((idRow % RAW_BASES_PER_ENTRY) == 0){ entryQuery0 = query0[idQueryEntry]; entryQuery1 = query1[idQueryEntry]; entryQuery2 = query2[idQueryEntry]; entryQuery3 = query3[idQueryEntry]; idQueryEntry++; } const score_type q_i = {entryQuery0 & 0x03, entryQuery1 & 0x03, entryQuery2 & 0x03, entryQuery3 & 0x03}; update_band(idRow, q_i.i, (uint32_t *) r_cache, (uint32_t *) H_band, (uint32_t *) F_band, (uint32_t *) H_temp, (uint32_t *) E_temp, &H_maxScore.i, MATCH_SCORE.i, MISMATCH_SCORE.i, OPEN_INDEL_SCORE.i, EXTEND_INDEL_SCORE.i); } } d_AlignmentsResults[idCandidate].score = H_maxScore.c4.x - BIAS; d_AlignmentsResults[idCandidate + 1].score = H_maxScore.c4.y - BIAS; d_AlignmentsResults[idCandidate + 2].score = H_maxScore.c4.z - BIAS; d_AlignmentsResults[idCandidate + 3].score = H_maxScore.c4.w - BIAS; d_AlignmentsResults[idCandidate].column = 0; d_AlignmentsResults[idCandidate + 1].column = 0; d_AlignmentsResults[idCandidate + 2].column = 0; d_AlignmentsResults[idCandidate + 3].column = 0; } } extern "C" psaError_t localProcessPairwiseStream(sequences_t *candidates, sequences_t *queries, alignments_t *alignments) { uint32_t blocks = DIV_CEIL(DIV_CEIL(candidates->num, VECTOR_ELEMENTS), CUDA_NUM_THREADS); uint32_t threads = CUDA_NUM_THREADS; uint32_t querySize = queries->h_size[0]; uint32_t candidateSize = candidates->h_size[0]; hipDeviceSetCacheConfig(hipFuncCachePreferL1); printf("Grid Size: %d, Block Size: %d, Total alignments: %d, BAND_LEN: %d \n", blocks, threads, candidates->num, BAND_LEN); hipLaunchKernelGGL(( localProcessSWTiling), dim3(blocks), dim3(threads), 0, 0, candidates->d_HlfRAW, candidates->d_HlfRAWposition, queries->d_HlfRAW, queries->d_HlfRAWposition, alignments->d_info, alignments->d_results, querySize, candidateSize, candidates->num - VECTOR_ELEMENTS); hipDeviceSynchronize(); return (SUCCESS); }
3600faa495b4a8fa4a476738729df131acd1beb5.cu
/* * PROJECT: Pairwise sequence alignments on GPU * FILE: psa_swgotoh_2b_mixed_gpu * AUTHOR(S): Alejandro Chacon <[email protected]> * DESCRIPTION: Device functions for the SW-Gotoh GPU implementation using: * (A) pack 4 SW in the same register mixing integer instructions and video instructions. * (B) 32bit integer resources + and store the temporal columns with 8 bits. * (C) bases are represented using 2 bits/base. */ extern "C" { #include "../../include/psa_pairwise_gpu.h" } #include <cuda_runtime.h> #include <cuda.h> #include "../../include/simd_functions.h" #ifndef QUERIES_SIZE #define QUERIES_SIZE 100 #endif #ifndef CANDIDATES_SIZE #define CANDIDATES_SIZE 120 #endif #define MAX3(a,b,c) (MAX(MAX(a, b), c)) #define WARP_SIZE 32 #define MAX_THREADS_PER_SM 64 #define CUDA_NUM_THREADS 64 #define THREADS_PER_SEGMENT 32 #define NUM_SW_PER_BLOCK (MAX_THREADS_PER_SM / THREADS_PER_SEGMENT) #define NUM_WARPS (MAX_THREADS_PER_SM / WARP_SIZE) #define BAND_LEN 8 #define VECTOR_ELEMENTS 4 #define MAX_QUERY_SIZE QUERIES_SIZE #define BIAS 16 #define RAW_BASES_PER_ENTRY (UINT32_LENGTH / RAW_4B_LENGTH) typedef union { uchar4 c4; uint32_t i; }score_type; typedef uchar4 score_type2; inline __device__ uint32_t sim_vmaxu4(uint32_t NUM_A, uint32_t NUM_B){ return(MAX(NUM_A & 0xFF000000, NUM_B & 0xFF000000) | MAX(NUM_A & 0x00FF0000, NUM_B & 0x00FF0000) | MAX(NUM_A & 0x0000FF00, NUM_B & 0x0000FF00) | MAX(NUM_A & 0x000000FF, NUM_B & 0x000000FF)); } inline __device__ void update_band(int32_t idRow, const uint32_t q_i, const uint32_t *ref_cache, uint32_t *H_band, uint32_t *F_band, uint32_t *H_temp, uint32_t *E_temp, uint32_t *H_maxScore, const uint32_t MATCH_SCORE, const uint32_t MISMATCH_SCORE, const uint32_t OPEN_INDEL_SCORE, const uint32_t EXTEND_INDEL_SCORE) { //Biased Scores to 16 const uint32_t ZERO = 0x10101010; uint32_t H_diag = H_band[0]; H_band[0] = H_temp[idRow]; uint32_t E = E_temp[idRow]; #pragma unroll for (uint32_t j = 1; j <= BAND_LEN; ++j) { // update F const uint32_t ftop = F_band[j] - EXTEND_INDEL_SCORE; const uint32_t htop = H_band[j] - OPEN_INDEL_SCORE; F_band[j] = vmaxu4(ftop, htop); // update E const uint32_t eleft = E - EXTEND_INDEL_SCORE; const uint32_t hleft = H_band[j-1] - OPEN_INDEL_SCORE; E = vmaxu4(eleft, hleft); // update H const uint32_t r_j = ref_cache[j-1]; const uint32_t Eq = vcmpeq4(r_j, q_i); const uint32_t notEq = ~Eq; const uint32_t diagonal = (notEq & (H_diag - MISMATCH_SCORE)) | (Eq & (H_diag + MATCH_SCORE)); const uint32_t top = F_band[j]; const uint32_t left = E; uint32_t hi = vmaxu4(vmaxu4(left, top), diagonal); hi = vmaxu4(hi, ZERO); H_diag = H_band[j]; H_band[j] = hi; (* H_maxScore) = vmaxu4((* H_maxScore), hi); } H_temp[idRow] = H_band[BAND_LEN]; E_temp[idRow] = E; } __global__ void localProcessSWTiling(RAWHlfEntry_t *d_CandidatesHlfRaw, uint32_t *d_CandidatesHlfRAWposition, RAWHlfEntry_t *d_QueriesHlfRaw, uint32_t *d_QueriesHlfRAWposition, alignmentInfo_t *d_AlignmentsInfo, alignmentEntry_t *d_AlignmentsResults, uint32_t querySize, uint32_t candidateSize, uint32_t candidatesNum) { const uint32_t idCandidate = (blockIdx.x * MAX_THREADS_PER_SM + threadIdx.x) * VECTOR_ELEMENTS; if (idCandidate < (candidatesNum)) { const RAWHlfEntry_t* candidate0 = d_CandidatesHlfRaw + d_CandidatesHlfRAWposition[idCandidate]; const RAWHlfEntry_t* candidate1 = d_CandidatesHlfRaw + d_CandidatesHlfRAWposition[idCandidate + 1]; const RAWHlfEntry_t* candidate2 = d_CandidatesHlfRaw + d_CandidatesHlfRAWposition[idCandidate + 2]; const RAWHlfEntry_t* candidate3 = d_CandidatesHlfRaw + d_CandidatesHlfRAWposition[idCandidate + 3]; const RAWHlfEntry_t* query0 = d_QueriesHlfRaw + d_QueriesHlfRAWposition[d_AlignmentsInfo[idCandidate]]; const RAWHlfEntry_t* query1 = d_QueriesHlfRaw + d_QueriesHlfRAWposition[d_AlignmentsInfo[idCandidate + 1]]; const RAWHlfEntry_t* query2 = d_QueriesHlfRaw + d_QueriesHlfRAWposition[d_AlignmentsInfo[idCandidate + 2]]; const RAWHlfEntry_t* query3 = d_QueriesHlfRaw + d_QueriesHlfRAWposition[d_AlignmentsInfo[idCandidate + 3]]; // All the scores have to be absolute numbers: // Original BWA Scores: Gap_Ex = -1; Gap_Op = -2; Match = 2; Miss = -5; const score_type MATCH_SCORE = { 2, 2, 2, 2}; const score_type MISMATCH_SCORE = { 5, 5, 5, 5}; const score_type OPEN_INDEL_SCORE = { 2, 2, 2, 2}; const score_type EXTEND_INDEL_SCORE = { 1, 1, 1, 1}; //Biased Scores to 16 const score_type ZERO = { BIAS, BIAS, BIAS, BIAS}; score_type r_cache[BAND_LEN]; score_type H_temp [MAX_QUERY_SIZE]; score_type E_temp [MAX_QUERY_SIZE]; score_type H_band [BAND_LEN + 1]; score_type F_band [BAND_LEN + 1]; const int32_t numRows = querySize, numColumns = candidateSize; int32_t idColumn, idRow, idBand; score_type H_maxScore = ZERO; uint32_t idCandidateEntry = 0; uint32_t entryCandidate0, entryCandidate1, entryCandidate2, entryCandidate3; for(idBand = 0; idBand < MAX_QUERY_SIZE; ++idBand){ H_temp[idBand].i = ZERO.i; E_temp[idBand].i = ZERO.i; } // Compute Score SW-GOTOH for(idColumn = 0; idColumn < numColumns; idColumn += BAND_LEN){ uint32_t idQueryEntry = 0; uint32_t entryQuery0, entryQuery1, entryQuery2, entryQuery3; if((idColumn % RAW_BASES_PER_ENTRY) == 0){ entryCandidate0 = candidate0[idCandidateEntry]; entryCandidate1 = candidate1[idCandidateEntry]; entryCandidate2 = candidate2[idCandidateEntry]; entryCandidate3 = candidate3[idCandidateEntry]; idCandidateEntry++; } // Load a block of entries from the reference #pragma unroll for (uint32_t idBand = 0; idBand < BAND_LEN; ++idBand){ r_cache[idBand].c4 = make_uchar4(entryCandidate0 & 0x3, entryCandidate1 & 0x3, entryCandidate2 & 0x3, entryCandidate3 & 0x3); entryCandidate0 >>= RAW_4B_LENGTH; entryCandidate1 >>= RAW_4B_LENGTH; entryCandidate2 >>= RAW_4B_LENGTH; entryCandidate3 >>= RAW_4B_LENGTH; } // Initialize the first band #pragma unroll for (uint32_t idBand = 0; idBand <= BAND_LEN; ++idBand){ H_band[idBand].i = ZERO.i; F_band[idBand].i = ZERO.i; } #pragma unroll 1 for(idRow = 0; idRow < numRows; ++idRow){ entryQuery0 >>= RAW_4B_LENGTH; entryQuery1 >>= RAW_4B_LENGTH; entryQuery2 >>= RAW_4B_LENGTH; entryQuery3 >>= RAW_4B_LENGTH; if((idRow % RAW_BASES_PER_ENTRY) == 0){ entryQuery0 = query0[idQueryEntry]; entryQuery1 = query1[idQueryEntry]; entryQuery2 = query2[idQueryEntry]; entryQuery3 = query3[idQueryEntry]; idQueryEntry++; } const score_type q_i = {entryQuery0 & 0x03, entryQuery1 & 0x03, entryQuery2 & 0x03, entryQuery3 & 0x03}; update_band(idRow, q_i.i, (uint32_t *) r_cache, (uint32_t *) H_band, (uint32_t *) F_band, (uint32_t *) H_temp, (uint32_t *) E_temp, &H_maxScore.i, MATCH_SCORE.i, MISMATCH_SCORE.i, OPEN_INDEL_SCORE.i, EXTEND_INDEL_SCORE.i); } } d_AlignmentsResults[idCandidate].score = H_maxScore.c4.x - BIAS; d_AlignmentsResults[idCandidate + 1].score = H_maxScore.c4.y - BIAS; d_AlignmentsResults[idCandidate + 2].score = H_maxScore.c4.z - BIAS; d_AlignmentsResults[idCandidate + 3].score = H_maxScore.c4.w - BIAS; d_AlignmentsResults[idCandidate].column = 0; d_AlignmentsResults[idCandidate + 1].column = 0; d_AlignmentsResults[idCandidate + 2].column = 0; d_AlignmentsResults[idCandidate + 3].column = 0; } } extern "C" psaError_t localProcessPairwiseStream(sequences_t *candidates, sequences_t *queries, alignments_t *alignments) { uint32_t blocks = DIV_CEIL(DIV_CEIL(candidates->num, VECTOR_ELEMENTS), CUDA_NUM_THREADS); uint32_t threads = CUDA_NUM_THREADS; uint32_t querySize = queries->h_size[0]; uint32_t candidateSize = candidates->h_size[0]; cudaThreadSetCacheConfig(cudaFuncCachePreferL1); printf("Grid Size: %d, Block Size: %d, Total alignments: %d, BAND_LEN: %d \n", blocks, threads, candidates->num, BAND_LEN); localProcessSWTiling<<<blocks, threads>>>(candidates->d_HlfRAW, candidates->d_HlfRAWposition, queries->d_HlfRAW, queries->d_HlfRAWposition, alignments->d_info, alignments->d_results, querySize, candidateSize, candidates->num - VECTOR_ELEMENTS); cudaThreadSynchronize(); return (SUCCESS); }
f6d429cf53ce2218429a7eedeaf51e793bc620d7.hip
// !!! This is a file automatically generated by hipify!!! /************************************************************************* * Copyright (c) 2016-2019, NVIDIA CORPORATION. All rights reserved. * * See LICENSE.txt for license information ************************************************************************/ #include "hip/hip_runtime.h" #include "common.h" void print_header() { PRINT("# %10s %12s %8s %6s out-of-place in-place \n", "", "", "", ""); PRINT("# %10s %12s %8s %6s %7s %6s %6s %5s %7s %6s %6s %5s\n", "size", "count", "type", "redop", "time", "algbw", "busbw", "error", "time", "algbw", "busbw", "error"); PRINT("# %10s %12s %8s %6s %7s %6s %6s %5s %7s %6s %6s %5s\n", "(B)", "(elements)", "", "", "(us)", "(GB/s)", "(GB/s)", "", "(us)", "(GB/s)", "(GB/s)", ""); } void print_line_header (size_t size, size_t count, const char *typeName, const char *opName, int root) { PRINT("%12li %12li %8s %6s", size, count, typeName, opName); } void AllReduceGetCollByteCount(size_t *sendcount, size_t *recvcount, size_t *paramcount, size_t *sendInplaceOffset, size_t *recvInplaceOffset, size_t count, int nranks) { *sendcount = count; *recvcount = count; *sendInplaceOffset = 0; *recvInplaceOffset = 0; *paramcount = *sendcount; } testResult_t AllReduceInitData(struct threadArgs* args, ncclDataType_t type, ncclRedOp_t op, int root, int rep, int in_place) { size_t sendcount = args->sendBytes / wordSize(type); size_t recvcount = args->expectedBytes / wordSize(type); int nranks = args->nProcs*args->nThreads*args->nGpus; for (int i=0; i<args->nGpus; i++) { int gpuid = args->localRank*args->nThreads*args->nGpus + args->thread*args->nGpus + i; CUDACHECK(hipSetDevice(gpuid)); int rank = ((args->proc*args->nThreads + args->thread)*args->nGpus + i); CUDACHECK(hipMemset(args->recvbuffs[i], 0, args->expectedBytes)); void* data = in_place ? args->recvbuffs[i] : args->sendbuffs[i]; TESTCHECK(InitData(data, sendcount, type, rep, rank)); TESTCHECK(InitDataReduce(args->expected[i], recvcount, 0, type, op, rep, nranks)); CUDACHECK(hipDeviceSynchronize()); } return testSuccess; } void AllReduceGetBw(size_t count, int typesize, double sec, double* algBw, double* busBw, int nranks) { double baseBw = (double)(count * typesize) / 1.0E9 / sec; *algBw = baseBw; double factor = ((double)(2*(nranks - 1)))/((double)nranks); *busBw = baseBw * factor; } testResult_t AllReduceRunColl(void* sendbuff, void* recvbuff, size_t count, ncclDataType_t type, ncclRedOp_t op, int root, ncclComm_t comm, hipStream_t stream) { NCCLCHECK(ncclAllReduce(sendbuff, recvbuff, count, type, op, comm, stream)); return testSuccess; } struct testColl allReduceTest = { "AllReduce", AllReduceGetCollByteCount, AllReduceInitData, AllReduceGetBw, AllReduceRunColl }; void AllReduceGetBuffSize(size_t *sendcount, size_t *recvcount, size_t count, int nranks) { size_t paramcount, sendInplaceOffset, recvInplaceOffset; AllReduceGetCollByteCount(sendcount, recvcount, &paramcount, &sendInplaceOffset, &recvInplaceOffset, count, nranks); } testResult_t AllReduceRunTest(struct threadArgs* args, int root, ncclDataType_t type, const char* typeName, ncclRedOp_t op, const char* opName) { args->collTest = &allReduceTest; ncclDataType_t *run_types; ncclRedOp_t *run_ops; const char **run_typenames, **run_opnames; int type_count, op_count; if ((int)type != -1) { type_count = 1; run_types = &type; run_typenames = &typeName; } else { type_count = test_typenum; run_types = test_types; run_typenames = test_typenames; } if ((int)op != -1) { op_count = 1; run_ops = &op; run_opnames = &opName; } else { op_count = test_opnum; run_ops = test_ops; run_opnames = test_opnames; } for (int i=0; i<type_count; i++) { for (int j=0; j<op_count; j++) { TESTCHECK(TimeTest(args, run_types[i], run_typenames[i], run_ops[j], run_opnames[j], -1)); } } return testSuccess; } struct testEngine allReduceEngine = { AllReduceGetBuffSize, AllReduceRunTest }; #pragma weak ncclTestEngine=allReduceEngine
f6d429cf53ce2218429a7eedeaf51e793bc620d7.cu
/************************************************************************* * Copyright (c) 2016-2019, NVIDIA CORPORATION. All rights reserved. * * See LICENSE.txt for license information ************************************************************************/ #include "cuda_runtime.h" #include "common.h" void print_header() { PRINT("# %10s %12s %8s %6s out-of-place in-place \n", "", "", "", ""); PRINT("# %10s %12s %8s %6s %7s %6s %6s %5s %7s %6s %6s %5s\n", "size", "count", "type", "redop", "time", "algbw", "busbw", "error", "time", "algbw", "busbw", "error"); PRINT("# %10s %12s %8s %6s %7s %6s %6s %5s %7s %6s %6s %5s\n", "(B)", "(elements)", "", "", "(us)", "(GB/s)", "(GB/s)", "", "(us)", "(GB/s)", "(GB/s)", ""); } void print_line_header (size_t size, size_t count, const char *typeName, const char *opName, int root) { PRINT("%12li %12li %8s %6s", size, count, typeName, opName); } void AllReduceGetCollByteCount(size_t *sendcount, size_t *recvcount, size_t *paramcount, size_t *sendInplaceOffset, size_t *recvInplaceOffset, size_t count, int nranks) { *sendcount = count; *recvcount = count; *sendInplaceOffset = 0; *recvInplaceOffset = 0; *paramcount = *sendcount; } testResult_t AllReduceInitData(struct threadArgs* args, ncclDataType_t type, ncclRedOp_t op, int root, int rep, int in_place) { size_t sendcount = args->sendBytes / wordSize(type); size_t recvcount = args->expectedBytes / wordSize(type); int nranks = args->nProcs*args->nThreads*args->nGpus; for (int i=0; i<args->nGpus; i++) { int gpuid = args->localRank*args->nThreads*args->nGpus + args->thread*args->nGpus + i; CUDACHECK(cudaSetDevice(gpuid)); int rank = ((args->proc*args->nThreads + args->thread)*args->nGpus + i); CUDACHECK(cudaMemset(args->recvbuffs[i], 0, args->expectedBytes)); void* data = in_place ? args->recvbuffs[i] : args->sendbuffs[i]; TESTCHECK(InitData(data, sendcount, type, rep, rank)); TESTCHECK(InitDataReduce(args->expected[i], recvcount, 0, type, op, rep, nranks)); CUDACHECK(cudaDeviceSynchronize()); } return testSuccess; } void AllReduceGetBw(size_t count, int typesize, double sec, double* algBw, double* busBw, int nranks) { double baseBw = (double)(count * typesize) / 1.0E9 / sec; *algBw = baseBw; double factor = ((double)(2*(nranks - 1)))/((double)nranks); *busBw = baseBw * factor; } testResult_t AllReduceRunColl(void* sendbuff, void* recvbuff, size_t count, ncclDataType_t type, ncclRedOp_t op, int root, ncclComm_t comm, cudaStream_t stream) { NCCLCHECK(ncclAllReduce(sendbuff, recvbuff, count, type, op, comm, stream)); return testSuccess; } struct testColl allReduceTest = { "AllReduce", AllReduceGetCollByteCount, AllReduceInitData, AllReduceGetBw, AllReduceRunColl }; void AllReduceGetBuffSize(size_t *sendcount, size_t *recvcount, size_t count, int nranks) { size_t paramcount, sendInplaceOffset, recvInplaceOffset; AllReduceGetCollByteCount(sendcount, recvcount, &paramcount, &sendInplaceOffset, &recvInplaceOffset, count, nranks); } testResult_t AllReduceRunTest(struct threadArgs* args, int root, ncclDataType_t type, const char* typeName, ncclRedOp_t op, const char* opName) { args->collTest = &allReduceTest; ncclDataType_t *run_types; ncclRedOp_t *run_ops; const char **run_typenames, **run_opnames; int type_count, op_count; if ((int)type != -1) { type_count = 1; run_types = &type; run_typenames = &typeName; } else { type_count = test_typenum; run_types = test_types; run_typenames = test_typenames; } if ((int)op != -1) { op_count = 1; run_ops = &op; run_opnames = &opName; } else { op_count = test_opnum; run_ops = test_ops; run_opnames = test_opnames; } for (int i=0; i<type_count; i++) { for (int j=0; j<op_count; j++) { TESTCHECK(TimeTest(args, run_types[i], run_typenames[i], run_ops[j], run_opnames[j], -1)); } } return testSuccess; } struct testEngine allReduceEngine = { AllReduceGetBuffSize, AllReduceRunTest }; #pragma weak ncclTestEngine=allReduceEngine
a0d8158e42fec1bf42f4fd0857308c11470b43af.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) Facebook, Inc. and its affiliates. * All rights reserved. * * This source code is licensed under the BSD-style license found in the * LICENSE file in the root directory of this source tree. */ #include <ATen/ATen.h> #include <ATen/hip/HIPContext.h> #include <ATen/hip/impl/HIPGuardImplMasqueradingAsCUDA.h> // TODO(T47953967) to make this cuda kernel support all datatypes. __global__ void GatherScatterCudaKernel( const float* __restrict__ input, const int64_t* __restrict__ edges, float* __restrict__ output, bool directed, bool backward, const size_t V, const size_t D, const size_t E) { const int tid = threadIdx.x; // Reverse the vertex order if backward. const int v0_idx = backward ? 1 : 0; const int v1_idx = backward ? 0 : 1; // Edges are split evenly across the blocks. for (int e = blockIdx.x; e < E; e += gridDim.x) { // Get indices of vertices which form the edge. const int64_t v0 = edges[2 * e + v0_idx]; const int64_t v1 = edges[2 * e + v1_idx]; // Split vertex features evenly across threads. // This implementation will be quite wasteful when D<128 since there will be // a lot of threads doing nothing. for (int d = tid; d < D; d += blockDim.x) { const float val = input[v1 * D + d]; float* address = output + v0 * D + d; atomicAdd(address, val); if (!directed) { const float val = input[v0 * D + d]; float* address = output + v1 * D + d; atomicAdd(address, val); } } __syncthreads(); } } at::Tensor GatherScatterCuda( const at::Tensor& input, const at::Tensor& edges, bool directed, bool backward) { // Check inputs are on the same device at::TensorArg input_t{input, "input", 1}, edges_t{edges, "edges", 2}; at::CheckedFrom c = "GatherScatterCuda"; at::checkAllSameGPU(c, {input_t, edges_t}); // Set the device for the kernel launch based on the device of the input at::hip::HIPGuardMasqueradingAsCUDA device_guard(input.device()); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); const auto num_vertices = input.size(0); const auto input_feature_dim = input.size(1); const auto num_edges = edges.size(0); auto output = at::zeros({num_vertices, input_feature_dim}, input.options()); const size_t threads = 128; const size_t max_blocks = 1920; const size_t blocks = num_edges < max_blocks ? num_edges : max_blocks; if (output.numel() == 0) { AT_CUDA_CHECK(hipGetLastError()); return output; } hipLaunchKernelGGL(( GatherScatterCudaKernel), dim3(blocks), dim3(threads), 0, stream, input.contiguous().data_ptr<float>(), edges.contiguous().data_ptr<int64_t>(), output.data_ptr<float>(), directed, backward, num_vertices, input_feature_dim, num_edges); AT_CUDA_CHECK(hipGetLastError()); return output; }
a0d8158e42fec1bf42f4fd0857308c11470b43af.cu
/* * Copyright (c) Facebook, Inc. and its affiliates. * All rights reserved. * * This source code is licensed under the BSD-style license found in the * LICENSE file in the root directory of this source tree. */ #include <ATen/ATen.h> #include <ATen/cuda/CUDAContext.h> #include <c10/cuda/CUDAGuard.h> // TODO(T47953967) to make this cuda kernel support all datatypes. __global__ void GatherScatterCudaKernel( const float* __restrict__ input, const int64_t* __restrict__ edges, float* __restrict__ output, bool directed, bool backward, const size_t V, const size_t D, const size_t E) { const int tid = threadIdx.x; // Reverse the vertex order if backward. const int v0_idx = backward ? 1 : 0; const int v1_idx = backward ? 0 : 1; // Edges are split evenly across the blocks. for (int e = blockIdx.x; e < E; e += gridDim.x) { // Get indices of vertices which form the edge. const int64_t v0 = edges[2 * e + v0_idx]; const int64_t v1 = edges[2 * e + v1_idx]; // Split vertex features evenly across threads. // This implementation will be quite wasteful when D<128 since there will be // a lot of threads doing nothing. for (int d = tid; d < D; d += blockDim.x) { const float val = input[v1 * D + d]; float* address = output + v0 * D + d; atomicAdd(address, val); if (!directed) { const float val = input[v0 * D + d]; float* address = output + v1 * D + d; atomicAdd(address, val); } } __syncthreads(); } } at::Tensor GatherScatterCuda( const at::Tensor& input, const at::Tensor& edges, bool directed, bool backward) { // Check inputs are on the same device at::TensorArg input_t{input, "input", 1}, edges_t{edges, "edges", 2}; at::CheckedFrom c = "GatherScatterCuda"; at::checkAllSameGPU(c, {input_t, edges_t}); // Set the device for the kernel launch based on the device of the input at::cuda::CUDAGuard device_guard(input.device()); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); const auto num_vertices = input.size(0); const auto input_feature_dim = input.size(1); const auto num_edges = edges.size(0); auto output = at::zeros({num_vertices, input_feature_dim}, input.options()); const size_t threads = 128; const size_t max_blocks = 1920; const size_t blocks = num_edges < max_blocks ? num_edges : max_blocks; if (output.numel() == 0) { AT_CUDA_CHECK(cudaGetLastError()); return output; } GatherScatterCudaKernel<<<blocks, threads, 0, stream>>>( input.contiguous().data_ptr<float>(), edges.contiguous().data_ptr<int64_t>(), output.data_ptr<float>(), directed, backward, num_vertices, input_feature_dim, num_edges); AT_CUDA_CHECK(cudaGetLastError()); return output; }
2b26fba5c1f30351b46b5859baa66d9e02b9c39e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2018, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <gtest/gtest.h> #include <hipcub/hipcub.hpp> #include "cuda_utils.h" #include "random/rng.h" #include "test_utils.h" namespace MLCommon { namespace Random { enum RandomType { RNG_Uniform }; template <typename T, int TPB> __global__ void meanKernel(float *out, const T *data, int len) { typedef hipcub::BlockReduce<float, TPB> BlockReduce; __shared__ typename BlockReduce::TempStorage temp_storage; int tid = threadIdx.x + blockIdx.x * blockDim.x; float val = tid < len ? data[tid] : T(0); float x = BlockReduce(temp_storage).Sum(val); __syncthreads(); float xx = BlockReduce(temp_storage).Sum(val * val); __syncthreads(); if (threadIdx.x == 0) { myAtomicAdd(out, x); myAtomicAdd(out + 1, xx); } } template <typename T> struct RngInputs { float tolerance; int len; // start, end: for uniform // mean, sigma: for normal/lognormal // mean, beta: for gumbel // mean, scale: for logistic and laplace // lambda: for exponential // sigma: for rayleigh T start, end; RandomType type; GeneratorType gtype; unsigned long long int seed; }; template <typename T> ::std::ostream &operator<<(::std::ostream &os, const RngInputs<T> &dims) { return os; } template <typename T> class RngTest : public ::testing::TestWithParam<RngInputs<T>> { protected: void SetUp() override { params = ::testing::TestWithParam<RngInputs<T>>::GetParam(); Rng r(params.seed, params.gtype); hipStream_t stream; CUDA_CHECK(hipStreamCreate(&stream)); allocate(data, params.len); allocate(stats, 2, true); switch (params.type) { case RNG_Uniform: r.uniformInt(data, params.len, params.start, params.end, stream); break; }; static const int threads = 128; hipLaunchKernelGGL(( meanKernel<T, threads>), dim3(ceildiv(params.len, threads)), dim3(threads), 0, stream, stats, data, params.len); updateHost<float>(h_stats, stats, 2, stream); CUDA_CHECK(hipStreamSynchronize(stream)); h_stats[0] /= params.len; h_stats[1] = (h_stats[1] / params.len) - (h_stats[0] * h_stats[0]); CUDA_CHECK(hipStreamDestroy(stream)); } void TearDown() override { CUDA_CHECK(hipFree(data)); CUDA_CHECK(hipFree(stats)); } void getExpectedMeanVar(float meanvar[2]) { switch (params.type) { case RNG_Uniform: meanvar[0] = (params.start + params.end) * 0.5f; meanvar[1] = params.end - params.start; meanvar[1] = meanvar[1] * meanvar[1] / 12.f; break; }; } protected: RngInputs<T> params; T *data; float *stats; float h_stats[2]; // mean, var }; typedef RngTest<uint32_t> RngTestU32; const std::vector<RngInputs<uint32_t>> inputs_u32 = { {0.1f, 32 * 1024, 0, 20, RNG_Uniform, GenPhilox, 1234ULL}, {0.1f, 8 * 1024, 0, 20, RNG_Uniform, GenPhilox, 1234ULL}, {0.1f, 32 * 1024, 0, 20, RNG_Uniform, GenTaps, 1234ULL}, {0.1f, 8 * 1024, 0, 20, RNG_Uniform, GenTaps, 1234ULL}, {0.1f, 32 * 1024, 0, 20, RNG_Uniform, GenKiss99, 1234ULL}, {0.1f, 8 * 1024, 0, 20, RNG_Uniform, GenKiss99, 1234ULL}}; TEST_P(RngTestU32, Result) { float meanvar[2]; getExpectedMeanVar(meanvar); ASSERT_TRUE( match(meanvar[0], h_stats[0], CompareApprox<float>(params.tolerance))); ASSERT_TRUE( match(meanvar[1], h_stats[1], CompareApprox<float>(params.tolerance))); } INSTANTIATE_TEST_CASE_P(RngTests, RngTestU32, ::testing::ValuesIn(inputs_u32)); typedef RngTest<uint64_t> RngTestU64; const std::vector<RngInputs<uint64_t>> inputs_u64 = { {0.1f, 32 * 1024, 0, 20, RNG_Uniform, GenPhilox, 1234ULL}, {0.1f, 8 * 1024, 0, 20, RNG_Uniform, GenPhilox, 1234ULL}, {0.1f, 32 * 1024, 0, 20, RNG_Uniform, GenTaps, 1234ULL}, {0.1f, 8 * 1024, 0, 20, RNG_Uniform, GenTaps, 1234ULL}, {0.1f, 32 * 1024, 0, 20, RNG_Uniform, GenKiss99, 1234ULL}, {0.1f, 8 * 1024, 0, 20, RNG_Uniform, GenKiss99, 1234ULL}}; TEST_P(RngTestU64, Result) { float meanvar[2]; getExpectedMeanVar(meanvar); ASSERT_TRUE( match(meanvar[0], h_stats[0], CompareApprox<float>(params.tolerance))); ASSERT_TRUE( match(meanvar[1], h_stats[1], CompareApprox<float>(params.tolerance))); } INSTANTIATE_TEST_CASE_P(RngTests, RngTestU64, ::testing::ValuesIn(inputs_u64)); typedef RngTest<int32_t> RngTestS32; const std::vector<RngInputs<int32_t>> inputs_s32 = { {0.1f, 32 * 1024, 0, 20, RNG_Uniform, GenPhilox, 1234ULL}, {0.1f, 8 * 1024, 0, 20, RNG_Uniform, GenPhilox, 1234ULL}, {0.1f, 32 * 1024, 0, 20, RNG_Uniform, GenTaps, 1234ULL}, {0.1f, 8 * 1024, 0, 20, RNG_Uniform, GenTaps, 1234ULL}, {0.1f, 32 * 1024, 0, 20, RNG_Uniform, GenKiss99, 1234ULL}, {0.1f, 8 * 1024, 0, 20, RNG_Uniform, GenKiss99, 1234ULL}}; TEST_P(RngTestS32, Result) { float meanvar[2]; getExpectedMeanVar(meanvar); ASSERT_TRUE( match(meanvar[0], h_stats[0], CompareApprox<float>(params.tolerance))); ASSERT_TRUE( match(meanvar[1], h_stats[1], CompareApprox<float>(params.tolerance))); } INSTANTIATE_TEST_CASE_P(RngTests, RngTestS32, ::testing::ValuesIn(inputs_s32)); typedef RngTest<int64_t> RngTestS64; const std::vector<RngInputs<int64_t>> inputs_s64 = { {0.1f, 32 * 1024, 0, 20, RNG_Uniform, GenPhilox, 1234ULL}, {0.1f, 8 * 1024, 0, 20, RNG_Uniform, GenPhilox, 1234ULL}, {0.1f, 32 * 1024, 0, 20, RNG_Uniform, GenTaps, 1234ULL}, {0.1f, 8 * 1024, 0, 20, RNG_Uniform, GenTaps, 1234ULL}, {0.1f, 32 * 1024, 0, 20, RNG_Uniform, GenKiss99, 1234ULL}, {0.1f, 8 * 1024, 0, 20, RNG_Uniform, GenKiss99, 1234ULL}}; TEST_P(RngTestS64, Result) { float meanvar[2]; getExpectedMeanVar(meanvar); ASSERT_TRUE( match(meanvar[0], h_stats[0], CompareApprox<float>(params.tolerance))); ASSERT_TRUE( match(meanvar[1], h_stats[1], CompareApprox<float>(params.tolerance))); } INSTANTIATE_TEST_CASE_P(RngTests, RngTestS64, ::testing::ValuesIn(inputs_s64)); } // end namespace Random } // end namespace MLCommon
2b26fba5c1f30351b46b5859baa66d9e02b9c39e.cu
/* * Copyright (c) 2018, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <gtest/gtest.h> #include <cub/cub.cuh> #include "cuda_utils.h" #include "random/rng.h" #include "test_utils.h" namespace MLCommon { namespace Random { enum RandomType { RNG_Uniform }; template <typename T, int TPB> __global__ void meanKernel(float *out, const T *data, int len) { typedef cub::BlockReduce<float, TPB> BlockReduce; __shared__ typename BlockReduce::TempStorage temp_storage; int tid = threadIdx.x + blockIdx.x * blockDim.x; float val = tid < len ? data[tid] : T(0); float x = BlockReduce(temp_storage).Sum(val); __syncthreads(); float xx = BlockReduce(temp_storage).Sum(val * val); __syncthreads(); if (threadIdx.x == 0) { myAtomicAdd(out, x); myAtomicAdd(out + 1, xx); } } template <typename T> struct RngInputs { float tolerance; int len; // start, end: for uniform // mean, sigma: for normal/lognormal // mean, beta: for gumbel // mean, scale: for logistic and laplace // lambda: for exponential // sigma: for rayleigh T start, end; RandomType type; GeneratorType gtype; unsigned long long int seed; }; template <typename T> ::std::ostream &operator<<(::std::ostream &os, const RngInputs<T> &dims) { return os; } template <typename T> class RngTest : public ::testing::TestWithParam<RngInputs<T>> { protected: void SetUp() override { params = ::testing::TestWithParam<RngInputs<T>>::GetParam(); Rng r(params.seed, params.gtype); cudaStream_t stream; CUDA_CHECK(cudaStreamCreate(&stream)); allocate(data, params.len); allocate(stats, 2, true); switch (params.type) { case RNG_Uniform: r.uniformInt(data, params.len, params.start, params.end, stream); break; }; static const int threads = 128; meanKernel<T, threads><<<ceildiv(params.len, threads), threads, 0, stream>>>( stats, data, params.len); updateHost<float>(h_stats, stats, 2, stream); CUDA_CHECK(cudaStreamSynchronize(stream)); h_stats[0] /= params.len; h_stats[1] = (h_stats[1] / params.len) - (h_stats[0] * h_stats[0]); CUDA_CHECK(cudaStreamDestroy(stream)); } void TearDown() override { CUDA_CHECK(cudaFree(data)); CUDA_CHECK(cudaFree(stats)); } void getExpectedMeanVar(float meanvar[2]) { switch (params.type) { case RNG_Uniform: meanvar[0] = (params.start + params.end) * 0.5f; meanvar[1] = params.end - params.start; meanvar[1] = meanvar[1] * meanvar[1] / 12.f; break; }; } protected: RngInputs<T> params; T *data; float *stats; float h_stats[2]; // mean, var }; typedef RngTest<uint32_t> RngTestU32; const std::vector<RngInputs<uint32_t>> inputs_u32 = { {0.1f, 32 * 1024, 0, 20, RNG_Uniform, GenPhilox, 1234ULL}, {0.1f, 8 * 1024, 0, 20, RNG_Uniform, GenPhilox, 1234ULL}, {0.1f, 32 * 1024, 0, 20, RNG_Uniform, GenTaps, 1234ULL}, {0.1f, 8 * 1024, 0, 20, RNG_Uniform, GenTaps, 1234ULL}, {0.1f, 32 * 1024, 0, 20, RNG_Uniform, GenKiss99, 1234ULL}, {0.1f, 8 * 1024, 0, 20, RNG_Uniform, GenKiss99, 1234ULL}}; TEST_P(RngTestU32, Result) { float meanvar[2]; getExpectedMeanVar(meanvar); ASSERT_TRUE( match(meanvar[0], h_stats[0], CompareApprox<float>(params.tolerance))); ASSERT_TRUE( match(meanvar[1], h_stats[1], CompareApprox<float>(params.tolerance))); } INSTANTIATE_TEST_CASE_P(RngTests, RngTestU32, ::testing::ValuesIn(inputs_u32)); typedef RngTest<uint64_t> RngTestU64; const std::vector<RngInputs<uint64_t>> inputs_u64 = { {0.1f, 32 * 1024, 0, 20, RNG_Uniform, GenPhilox, 1234ULL}, {0.1f, 8 * 1024, 0, 20, RNG_Uniform, GenPhilox, 1234ULL}, {0.1f, 32 * 1024, 0, 20, RNG_Uniform, GenTaps, 1234ULL}, {0.1f, 8 * 1024, 0, 20, RNG_Uniform, GenTaps, 1234ULL}, {0.1f, 32 * 1024, 0, 20, RNG_Uniform, GenKiss99, 1234ULL}, {0.1f, 8 * 1024, 0, 20, RNG_Uniform, GenKiss99, 1234ULL}}; TEST_P(RngTestU64, Result) { float meanvar[2]; getExpectedMeanVar(meanvar); ASSERT_TRUE( match(meanvar[0], h_stats[0], CompareApprox<float>(params.tolerance))); ASSERT_TRUE( match(meanvar[1], h_stats[1], CompareApprox<float>(params.tolerance))); } INSTANTIATE_TEST_CASE_P(RngTests, RngTestU64, ::testing::ValuesIn(inputs_u64)); typedef RngTest<int32_t> RngTestS32; const std::vector<RngInputs<int32_t>> inputs_s32 = { {0.1f, 32 * 1024, 0, 20, RNG_Uniform, GenPhilox, 1234ULL}, {0.1f, 8 * 1024, 0, 20, RNG_Uniform, GenPhilox, 1234ULL}, {0.1f, 32 * 1024, 0, 20, RNG_Uniform, GenTaps, 1234ULL}, {0.1f, 8 * 1024, 0, 20, RNG_Uniform, GenTaps, 1234ULL}, {0.1f, 32 * 1024, 0, 20, RNG_Uniform, GenKiss99, 1234ULL}, {0.1f, 8 * 1024, 0, 20, RNG_Uniform, GenKiss99, 1234ULL}}; TEST_P(RngTestS32, Result) { float meanvar[2]; getExpectedMeanVar(meanvar); ASSERT_TRUE( match(meanvar[0], h_stats[0], CompareApprox<float>(params.tolerance))); ASSERT_TRUE( match(meanvar[1], h_stats[1], CompareApprox<float>(params.tolerance))); } INSTANTIATE_TEST_CASE_P(RngTests, RngTestS32, ::testing::ValuesIn(inputs_s32)); typedef RngTest<int64_t> RngTestS64; const std::vector<RngInputs<int64_t>> inputs_s64 = { {0.1f, 32 * 1024, 0, 20, RNG_Uniform, GenPhilox, 1234ULL}, {0.1f, 8 * 1024, 0, 20, RNG_Uniform, GenPhilox, 1234ULL}, {0.1f, 32 * 1024, 0, 20, RNG_Uniform, GenTaps, 1234ULL}, {0.1f, 8 * 1024, 0, 20, RNG_Uniform, GenTaps, 1234ULL}, {0.1f, 32 * 1024, 0, 20, RNG_Uniform, GenKiss99, 1234ULL}, {0.1f, 8 * 1024, 0, 20, RNG_Uniform, GenKiss99, 1234ULL}}; TEST_P(RngTestS64, Result) { float meanvar[2]; getExpectedMeanVar(meanvar); ASSERT_TRUE( match(meanvar[0], h_stats[0], CompareApprox<float>(params.tolerance))); ASSERT_TRUE( match(meanvar[1], h_stats[1], CompareApprox<float>(params.tolerance))); } INSTANTIATE_TEST_CASE_P(RngTests, RngTestS64, ::testing::ValuesIn(inputs_s64)); } // end namespace Random } // end namespace MLCommon
83c3236908eff27fb43be7dc4aa9be38f2feaed9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "ml.h" #include "cuda_macro.h" namespace gpuNN { __device__ void cuda_sum_weights(int connection, float * inputs, float * weights) { extern __shared__ cudafloat tempWeights[]; tempWeights[connection] = weights[connection]; if (threadIdx.x > 0) tempWeights[connection] *= inputs[blockIdx.x * (blockDim.x - 1) + (threadIdx.x - 1)]; __syncthreads(); int sumElements = blockDim.x; for (int sumUpTo = (sumElements >> 1); sumElements > 1; sumUpTo = (sumElements >> 1)) { int nextNumberElemSum = sumUpTo; if (sumElements & 1) nextNumberElemSum++; if (threadIdx.x < sumUpTo) tempWeights[connection] += tempWeights[connection + nextNumberElemSum]; sumElements = nextNumberElemSum; __syncthreads(); } } __global__ void kSigmoid(const int nThreads, float const *input, float *output) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < nThreads; i += blockDim.x * gridDim.x) { output[i] = 1.0 / (1.0 + ::exp(-input[i])); } } __global__ void kTanh(const int nThreads, float const *input, float *output) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < nThreads; i += blockDim.x * gridDim.x) { output[i] = tanh(input[i]); } } __global__ void kTanhDerivative(const int nThreads, float const *input, float *output) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < nThreads; i += blockDim.x * gridDim.x) { output[i] = 1 - (tanh(input[i]) * tanh(input[i])); } } __global__ void kSigmoid_d(const int nThreads, float const *input, float *output) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < nThreads; i += blockDim.x * gridDim.x) { output[i] = input[i] * (1 - input[i]); } } __global__ void kDot(const int nThreads, const float *m1, const float *m2, float *output, const int m1_rows, const int m1_columns, const int m2_columns) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < nThreads; i += blockDim.x * gridDim.x) { int r = (int)i / m2_columns; int c = i % m2_columns; float t_output = 0.f; for (int k = 0; k < m1_columns; ++k) { t_output += m1[r * m1_columns + k] * m2[k * m2_columns + c]; } output[i] = t_output; } } __global__ void cuda_activate_output_layer(float * inputs, float * weights, int mOffset, float * expected_outputs,float * outputs, float * gradients, float * rootMeanSquare) { int connection = threadIdx.y * blockDim.x + threadIdx.x; cuda_sum_weights(connection, inputs, weights); extern __shared__ float tempWeights[]; float * sharedRootMeanSquare = (tempWeights + (blockDim.x * blockDim.y)); if (threadIdx.x == 0) { int n = blockIdx.x * blockDim.y + threadIdx.y; float output = sigmoid(tempWeights[connection]); float outn = output; float error = (expected_outputs[n] - output); outputs[n] = outn; gradients[n] = error *sigmoid_derivate(output); sharedRootMeanSquare[threadIdx.y] = error * error; } if (blockDim.y > 1) { __syncthreads(); if (threadIdx.x == 0 && (threadIdx.y & 1) == 0 && threadIdx.y + 1 < blockDim.y) sharedRootMeanSquare[threadIdx.y] += sharedRootMeanSquare[threadIdx.y + 1]; __syncthreads(); int nextInterval; for (int interval = 2; interval < blockDim.y; interval = nextInterval) { nextInterval = interval << 1; if (threadIdx.x == 0 && (threadIdx.y & (nextInterval - 1)) == 0 && threadIdx.y + interval < blockDim.y) sharedRootMeanSquare[threadIdx.y] += sharedRootMeanSquare[threadIdx.y + interval]; __syncthreads(); } } if (threadIdx.y == 0 && threadIdx.x == 0) rootMeanSquare[blockIdx.x] = sharedRootMeanSquare[0]; } __global__ void cuda_activate_layer(float * inputs, float * weights,int mOffset, float * outputs) { extern __shared__ float tempWeights[]; int connection = threadIdx.y * blockDim.x + threadIdx.x; cuda_sum_weights(connection, inputs, weights); if (threadIdx.x == 0) { int n = blockIdx.x * blockDim.y + threadIdx.y; cudafloat output = sigmoid(tempWeights[connection]); outputs[n] = output; } } __global__ void cuda_calculate_gradients(float * rmsF, float * outputs, float* weights, int mOffset, float * localGradientNextLayer, int neuronsNextLayer, int neurons, float * localGradient) { extern __shared__ cudafloat lg[]; float * lgNextLayer = (lg + (blockDim.y * blockDim.x)); int threadId = (threadIdx.y * blockDim.x + threadIdx.x); for (int neuron = threadIdx.y; neuron < neurons + threadIdx.y; neuron += blockDim.y) { lg[threadId] = 0; for (int outputNeuron = threadIdx.x; outputNeuron < neuronsNextLayer + threadIdx.x; outputNeuron += blockDim.x) { if (threadIdx.y == 0 && outputNeuron < neuronsNextLayer) { lgNextLayer[threadIdx.x] = localGradientNextLayer[blockIdx.x * neuronsNextLayer + outputNeuron]; } __syncthreads(); if (outputNeuron < neuronsNextLayer && neuron < neurons) { int connection = outputNeuron * (neurons + 1) + neuron + 1; lg[threadId] += weights[connection] * lgNextLayer[threadIdx.x]; } __syncthreads(); } int numberElemSum = blockDim.x; for (int sumUpTo = (numberElemSum >> 1); numberElemSum > 1; sumUpTo = (numberElemSum >> 1)) { int nextNumberElemSum = sumUpTo; if (numberElemSum & 1) nextNumberElemSum++; if (threadIdx.x < sumUpTo) lg[threadId] += lg[threadId + nextNumberElemSum]; numberElemSum = nextNumberElemSum; __syncthreads(); } if (threadIdx.x == 0 && neuron < neurons) { int n = blockIdx.x * neurons + neuron; cudafloat Fh = outputs[n]; cudafloat lgn = lg[threadId]; localGradient[n] = lgn * CUDA_SIGMOID_DERIVATE(Fh); } } } __global__ void cuda_calculate_weights_block128(float * rmsF, float * inputs, float * localGradient, float * selectiveNeuronsWeights, float * selectiveNeuronsBias, float * learningRateWeights, float * learningRateBias, float * lastDeltaWithoutLearningMomentumWeights, float * lastDeltaWithoutLearningMomentumBias, float * lastDeltaWeights, float * lastDeltaBias, float u, float d, float r, float maxStepSize, float momentum, int numberPatterns) { extern __shared__ float deltasWeights[]; float * deltasBias = (deltasWeights + blockDim.x); deltasBias[threadIdx.x] = 0.0; deltasWeights[threadIdx.x] = 0.0; for (int p = threadIdx.x; p < numberPatterns; p += blockDim.x) { int n = p * gridDim.x + blockIdx.x; float i = inputs[n]; if (!isfinite(i)) { float delta = localGradient[n]; deltasBias[threadIdx.x] += delta; deltasWeights[threadIdx.x] += delta * i; } } __syncthreads(); if (threadIdx.x < 64) { deltasBias[threadIdx.x] += deltasBias[threadIdx.x + 64]; deltasWeights[threadIdx.x] += deltasWeights[threadIdx.x + 64]; } __syncthreads(); if (threadIdx.x < 32) { volatile float * _deltasBias = deltasBias; volatile float * _deltasWeights = deltasWeights; // Perform the unroll looping _deltasBias[threadIdx.x] += _deltasBias[threadIdx.x + 32]; _deltasWeights[threadIdx.x] += _deltasWeights[threadIdx.x + 32]; _deltasBias[threadIdx.x] += _deltasBias[threadIdx.x + 16]; _deltasWeights[threadIdx.x] += _deltasWeights[threadIdx.x + 16]; _deltasBias[threadIdx.x] += _deltasBias[threadIdx.x + 8]; _deltasWeights[threadIdx.x] += _deltasWeights[threadIdx.x + 8]; _deltasBias[threadIdx.x] += _deltasBias[threadIdx.x + 4]; _deltasWeights[threadIdx.x] += _deltasWeights[threadIdx.x + 4]; _deltasBias[threadIdx.x] += _deltasBias[threadIdx.x + 2]; _deltasWeights[threadIdx.x] += _deltasWeights[threadIdx.x + 2]; _deltasBias[threadIdx.x] += _deltasBias[threadIdx.x + 1]; _deltasWeights[threadIdx.x] += _deltasWeights[threadIdx.x + 1]; if (threadIdx.x == 0) { float deltaB = deltasBias[0] / numberPatterns; float deltaW = deltasWeights[0] / numberPatterns; float learnRateB = learningRateBias[blockIdx.x]; float learnRateW = learningRateWeights[blockIdx.x]; float factorB = same(lastDeltaWithoutLearningMomentumBias[blockIdx.x], deltaB) ? u : d; float factorW = same(lastDeltaWithoutLearningMomentumWeights[blockIdx.x], deltaW) ? u : d; learnRateB *= factorB; learnRateW *= factorW; if (learnRateB > maxStepSize) learnRateB = maxStepSize; if (learnRateW > maxStepSize) learnRateW = maxStepSize; learningRateBias[blockIdx.x] = learnRateB; learningRateWeights[blockIdx.x] = learnRateW; lastDeltaWithoutLearningMomentumBias[blockIdx.x] = deltaB; lastDeltaWithoutLearningMomentumWeights[blockIdx.x] = deltaW; deltaB += momentum * lastDeltaBias[blockIdx.x]; deltaW += momentum * lastDeltaWeights[blockIdx.x]; lastDeltaBias[blockIdx.x] = deltaB; lastDeltaWeights[blockIdx.x] = deltaW; float wb = selectiveNeuronsBias[blockIdx.x] + (learnRateB * deltaB); float w = selectiveNeuronsWeights[blockIdx.x] + (learnRateW * deltaW); if (!isfinite(wb)) { lastDeltaBias[blockIdx.x] = 0.0; lastDeltaWithoutLearningMomentumBias[blockIdx.x] = 0.0; } else { selectiveNeuronsBias[blockIdx.x] = wb; } if (!isfinite(w)) { lastDeltaWeights[blockIdx.x] = 0.0; lastDeltaWithoutLearningMomentumWeights[blockIdx.x] = 0.0; } else { selectiveNeuronsWeights[blockIdx.x] = w; } } } } void cuda_activate_layerWrapper(hipStream_t stream, dim3 & gridSize, int blockSize, float * inputs, float * weights, int mOffset, float * outputs, int numInputs) { switch (blockSize) { case 1: cuda_activate_layerTemplate<1> << <gridSize, blockSize, blockSize * sizeof(cudafloat), stream >> >(inputs, weights, mOffset, outputs, numInputs); break; case 2: cuda_activate_layerTemplate<2> << <gridSize, blockSize, blockSize * sizeof(cudafloat), stream >> >(inputs, weights, mOffset, outputs, numInputs); break; case 4: cuda_activate_layerTemplate<4> << <gridSize, blockSize, blockSize * sizeof(cudafloat), stream >> >(inputs, weights, mOffset, outputs, numInputs); break; case 8: cuda_activate_layerTemplate<8> << <gridSize, blockSize, blockSize * sizeof(cudafloat), stream >> >(inputs, weights, mOffset, outputs, numInputs); break; case 16: cuda_activate_layerTemplate<16> << <gridSize, blockSize, blockSize * sizeof(cudafloat), stream >> >(inputs, weights, mOffset, outputs, numInputs); break; case 32: cuda_activate_layerTemplate<32> << <gridSize, blockSize, blockSize * sizeof(cudafloat), stream >> >(inputs, weights, mOffset, outputs, numInputs); break; case 64: cuda_activate_layerTemplate<64> << <gridSize, blockSize, blockSize * sizeof(cudafloat), stream >> >(inputs, weights, mOffset, outputs, numInputs); break; case 128: cuda_activate_layerTemplate<128> << <gridSize, blockSize, blockSize * sizeof(cudafloat), stream >> >(inputs, weights, mOffset, outputs, numInputs); break; case 256: cuda_activate_layerTemplate<256> << <gridSize, blockSize, blockSize * sizeof(cudafloat), stream >> >(inputs, weights, mOffset, outputs, numInputs); break; case 512: cuda_activate_layerTemplate<512> << <gridSize, blockSize, blockSize * sizeof(cudafloat), stream >> >(inputs, weights, mOffset, outputs, numInputs); break; case 1024: cuda_activate_layerTemplate<1024> << <gridSize, blockSize, blockSize * sizeof(cudafloat), stream >> >(inputs, weights, mOffset, outputs, numInputs); break; case 2048: cuda_activate_layerTemplate<2048> << <gridSize, blockSize, blockSize * sizeof(cudafloat), stream >> >(inputs, weights, mOffset, outputs, numInputs); break; } } void cuda_Calculate_errorsWrapper(hipStream_t stream, int blockSize, float* rms, float* rmsF, int patternsNo, float numberPatternsNeurons) { switch (blockSize) { case 1024: cuda_calculate_errors<1024> << <1, blockSize, blockSize * sizeof(cudafloat), stream >> > (rms, rmsF, patternsNo, numberPatternsNeurons); break; case 512: cuda_calculate_errors<512> << <1, blockSize, blockSize * sizeof(cudafloat), stream >> > (rms, rmsF, patternsNo, numberPatternsNeurons); break; case 256: cuda_calculate_errors<256> << <1, blockSize, blockSize * sizeof(cudafloat), stream >> > (rms, rmsF, patternsNo, numberPatternsNeurons); break; case 128: cuda_calculate_errors<128> << <1, blockSize, blockSize * sizeof(cudafloat), stream >> > (rms, rmsF, patternsNo, numberPatternsNeurons); break; case 64: cuda_calculate_errors<64> << <1, blockSize, blockSize * sizeof(cudafloat), stream >> > (rms, rmsF, patternsNo, numberPatternsNeurons); break; case 32: cuda_calculate_errors<32> << <1, blockSize, blockSize * sizeof(cudafloat), stream >> > (rms, rmsF, patternsNo, numberPatternsNeurons); break; case 16: cuda_calculate_errors<16> << <1, blockSize, blockSize * sizeof(cudafloat), stream >> > (rms, rmsF, patternsNo, numberPatternsNeurons); break; case 8: cuda_calculate_errors<8> << <1, blockSize, blockSize * sizeof(cudafloat), stream >> > (rms, rmsF, patternsNo, numberPatternsNeurons); break; case 4: cuda_calculate_errors<4> << <1, blockSize, blockSize * sizeof(cudafloat), stream >> > (rms, rmsF, patternsNo, numberPatternsNeurons); break; case 2: cuda_calculate_errors<2> << <1, blockSize, blockSize * sizeof(cudafloat), stream >> > (rms, rmsF, patternsNo, numberPatternsNeurons); break; case 1: cuda_calculate_errors<1> << <1, blockSize, blockSize * sizeof(cudafloat), stream >> > (rms, rmsF, patternsNo, numberPatternsNeurons); break; } } void cuda_correct_weights_Wrapper(hipStream_t stream, dim3 & gridSize, int blockSize, float * rmsF, float * inputs, float * localGradient, float * weights, float * learningRate, float * lastDeltaWithoutLearningMomentum, float * lastDelta, float maxStepSize, float u, float d, float momentum, int numberPatterns) { switch (blockSize) { case 1024: cuda_correct_weights<1024> << <gridSize, blockSize, blockSize * sizeof(cudafloat), stream >> > (rmsF, inputs, localGradient, weights, learningRate, lastDeltaWithoutLearningMomentum, lastDelta, maxStepSize, u, d, momentum, numberPatterns); break; case 512: cuda_correct_weights<512> << <gridSize, blockSize, blockSize * sizeof(cudafloat), stream >> > (rmsF, inputs, localGradient, weights, learningRate, lastDeltaWithoutLearningMomentum, lastDelta, maxStepSize, u, d, momentum, numberPatterns); break; case 256: cuda_correct_weights<256> << <gridSize, blockSize, blockSize * sizeof(cudafloat), stream >> > (rmsF, inputs, localGradient, weights, learningRate, lastDeltaWithoutLearningMomentum, lastDelta, maxStepSize, u, d, momentum, numberPatterns); break; case 128: cuda_correct_weights<128> << <gridSize, blockSize, blockSize * sizeof(cudafloat), stream >> > (rmsF, inputs, localGradient, weights, learningRate, lastDeltaWithoutLearningMomentum, lastDelta, maxStepSize, u, d, momentum, numberPatterns); break; case 64: cuda_correct_weights<64> << <gridSize, blockSize, blockSize * sizeof(cudafloat), stream >> > (rmsF, inputs, localGradient, weights, learningRate, lastDeltaWithoutLearningMomentum, lastDelta, maxStepSize, u, d, momentum, numberPatterns); break; case 32: cuda_correct_weights<32> << <gridSize, blockSize, blockSize * sizeof(cudafloat), stream >> > (rmsF, inputs, localGradient, weights, learningRate, lastDeltaWithoutLearningMomentum, lastDelta, maxStepSize, u, d, momentum, numberPatterns); break; case 16: cuda_correct_weights<16> << <gridSize, blockSize, blockSize * sizeof(cudafloat), stream >> > (rmsF, inputs, localGradient, weights, learningRate, lastDeltaWithoutLearningMomentum, lastDelta, maxStepSize, u, d, momentum, numberPatterns); break; case 8: cuda_correct_weights<8> << <gridSize, blockSize, blockSize * sizeof(cudafloat), stream >> > (rmsF, inputs, localGradient, weights, learningRate, lastDeltaWithoutLearningMomentum, lastDelta, maxStepSize, u, d, momentum, numberPatterns); break; case 4: cuda_correct_weights<4> << <gridSize, blockSize, blockSize * sizeof(cudafloat), stream >> > (rmsF, inputs, localGradient, weights, learningRate, lastDeltaWithoutLearningMomentum, lastDelta, maxStepSize, u, d, momentum, numberPatterns); break; case 2: cuda_correct_weights<2> << <gridSize, blockSize, blockSize * sizeof(cudafloat), stream >> > (rmsF, inputs, localGradient, weights, learningRate, lastDeltaWithoutLearningMomentum, lastDelta, maxStepSize, u, d, momentum, numberPatterns); break; case 1: cuda_correct_weights<1> << <gridSize, blockSize, blockSize * sizeof(cudafloat), stream >>> (rmsF,inputs, localGradient, weights, learningRate, lastDeltaWithoutLearningMomentum, lastDelta, maxStepSize, u, d,momentum, numberPatterns); break; } } }
83c3236908eff27fb43be7dc4aa9be38f2feaed9.cu
#include "ml.h" #include "cuda_macro.h" namespace gpuNN { __device__ void cuda_sum_weights(int connection, float * inputs, float * weights) { extern __shared__ cudafloat tempWeights[]; tempWeights[connection] = weights[connection]; if (threadIdx.x > 0) tempWeights[connection] *= inputs[blockIdx.x * (blockDim.x - 1) + (threadIdx.x - 1)]; __syncthreads(); int sumElements = blockDim.x; for (int sumUpTo = (sumElements >> 1); sumElements > 1; sumUpTo = (sumElements >> 1)) { int nextNumberElemSum = sumUpTo; if (sumElements & 1) nextNumberElemSum++; if (threadIdx.x < sumUpTo) tempWeights[connection] += tempWeights[connection + nextNumberElemSum]; sumElements = nextNumberElemSum; __syncthreads(); } } __global__ void kSigmoid(const int nThreads, float const *input, float *output) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < nThreads; i += blockDim.x * gridDim.x) { output[i] = 1.0 / (1.0 + std::exp(-input[i])); } } __global__ void kTanh(const int nThreads, float const *input, float *output) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < nThreads; i += blockDim.x * gridDim.x) { output[i] = tanh(input[i]); } } __global__ void kTanhDerivative(const int nThreads, float const *input, float *output) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < nThreads; i += blockDim.x * gridDim.x) { output[i] = 1 - (tanh(input[i]) * tanh(input[i])); } } __global__ void kSigmoid_d(const int nThreads, float const *input, float *output) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < nThreads; i += blockDim.x * gridDim.x) { output[i] = input[i] * (1 - input[i]); } } __global__ void kDot(const int nThreads, const float *m1, const float *m2, float *output, const int m1_rows, const int m1_columns, const int m2_columns) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < nThreads; i += blockDim.x * gridDim.x) { int r = (int)i / m2_columns; int c = i % m2_columns; float t_output = 0.f; for (int k = 0; k < m1_columns; ++k) { t_output += m1[r * m1_columns + k] * m2[k * m2_columns + c]; } output[i] = t_output; } } __global__ void cuda_activate_output_layer(float * inputs, float * weights, int mOffset, float * expected_outputs,float * outputs, float * gradients, float * rootMeanSquare) { int connection = threadIdx.y * blockDim.x + threadIdx.x; cuda_sum_weights(connection, inputs, weights); extern __shared__ float tempWeights[]; float * sharedRootMeanSquare = (tempWeights + (blockDim.x * blockDim.y)); if (threadIdx.x == 0) { int n = blockIdx.x * blockDim.y + threadIdx.y; float output = sigmoid(tempWeights[connection]); float outn = output; float error = (expected_outputs[n] - output); outputs[n] = outn; gradients[n] = error *sigmoid_derivate(output); sharedRootMeanSquare[threadIdx.y] = error * error; } if (blockDim.y > 1) { __syncthreads(); if (threadIdx.x == 0 && (threadIdx.y & 1) == 0 && threadIdx.y + 1 < blockDim.y) sharedRootMeanSquare[threadIdx.y] += sharedRootMeanSquare[threadIdx.y + 1]; __syncthreads(); int nextInterval; for (int interval = 2; interval < blockDim.y; interval = nextInterval) { nextInterval = interval << 1; if (threadIdx.x == 0 && (threadIdx.y & (nextInterval - 1)) == 0 && threadIdx.y + interval < blockDim.y) sharedRootMeanSquare[threadIdx.y] += sharedRootMeanSquare[threadIdx.y + interval]; __syncthreads(); } } if (threadIdx.y == 0 && threadIdx.x == 0) rootMeanSquare[blockIdx.x] = sharedRootMeanSquare[0]; } __global__ void cuda_activate_layer(float * inputs, float * weights,int mOffset, float * outputs) { extern __shared__ float tempWeights[]; int connection = threadIdx.y * blockDim.x + threadIdx.x; cuda_sum_weights(connection, inputs, weights); if (threadIdx.x == 0) { int n = blockIdx.x * blockDim.y + threadIdx.y; cudafloat output = sigmoid(tempWeights[connection]); outputs[n] = output; } } __global__ void cuda_calculate_gradients(float * rmsF, float * outputs, float* weights, int mOffset, float * localGradientNextLayer, int neuronsNextLayer, int neurons, float * localGradient) { extern __shared__ cudafloat lg[]; float * lgNextLayer = (lg + (blockDim.y * blockDim.x)); int threadId = (threadIdx.y * blockDim.x + threadIdx.x); for (int neuron = threadIdx.y; neuron < neurons + threadIdx.y; neuron += blockDim.y) { lg[threadId] = 0; for (int outputNeuron = threadIdx.x; outputNeuron < neuronsNextLayer + threadIdx.x; outputNeuron += blockDim.x) { if (threadIdx.y == 0 && outputNeuron < neuronsNextLayer) { lgNextLayer[threadIdx.x] = localGradientNextLayer[blockIdx.x * neuronsNextLayer + outputNeuron]; } __syncthreads(); if (outputNeuron < neuronsNextLayer && neuron < neurons) { int connection = outputNeuron * (neurons + 1) + neuron + 1; lg[threadId] += weights[connection] * lgNextLayer[threadIdx.x]; } __syncthreads(); } int numberElemSum = blockDim.x; for (int sumUpTo = (numberElemSum >> 1); numberElemSum > 1; sumUpTo = (numberElemSum >> 1)) { int nextNumberElemSum = sumUpTo; if (numberElemSum & 1) nextNumberElemSum++; if (threadIdx.x < sumUpTo) lg[threadId] += lg[threadId + nextNumberElemSum]; numberElemSum = nextNumberElemSum; __syncthreads(); } if (threadIdx.x == 0 && neuron < neurons) { int n = blockIdx.x * neurons + neuron; cudafloat Fh = outputs[n]; cudafloat lgn = lg[threadId]; localGradient[n] = lgn * CUDA_SIGMOID_DERIVATE(Fh); } } } __global__ void cuda_calculate_weights_block128(float * rmsF, float * inputs, float * localGradient, float * selectiveNeuronsWeights, float * selectiveNeuronsBias, float * learningRateWeights, float * learningRateBias, float * lastDeltaWithoutLearningMomentumWeights, float * lastDeltaWithoutLearningMomentumBias, float * lastDeltaWeights, float * lastDeltaBias, float u, float d, float r, float maxStepSize, float momentum, int numberPatterns) { extern __shared__ float deltasWeights[]; float * deltasBias = (deltasWeights + blockDim.x); deltasBias[threadIdx.x] = 0.0; deltasWeights[threadIdx.x] = 0.0; for (int p = threadIdx.x; p < numberPatterns; p += blockDim.x) { int n = p * gridDim.x + blockIdx.x; float i = inputs[n]; if (!isfinite(i)) { float delta = localGradient[n]; deltasBias[threadIdx.x] += delta; deltasWeights[threadIdx.x] += delta * i; } } __syncthreads(); if (threadIdx.x < 64) { deltasBias[threadIdx.x] += deltasBias[threadIdx.x + 64]; deltasWeights[threadIdx.x] += deltasWeights[threadIdx.x + 64]; } __syncthreads(); if (threadIdx.x < 32) { volatile float * _deltasBias = deltasBias; volatile float * _deltasWeights = deltasWeights; // Perform the unroll looping _deltasBias[threadIdx.x] += _deltasBias[threadIdx.x + 32]; _deltasWeights[threadIdx.x] += _deltasWeights[threadIdx.x + 32]; _deltasBias[threadIdx.x] += _deltasBias[threadIdx.x + 16]; _deltasWeights[threadIdx.x] += _deltasWeights[threadIdx.x + 16]; _deltasBias[threadIdx.x] += _deltasBias[threadIdx.x + 8]; _deltasWeights[threadIdx.x] += _deltasWeights[threadIdx.x + 8]; _deltasBias[threadIdx.x] += _deltasBias[threadIdx.x + 4]; _deltasWeights[threadIdx.x] += _deltasWeights[threadIdx.x + 4]; _deltasBias[threadIdx.x] += _deltasBias[threadIdx.x + 2]; _deltasWeights[threadIdx.x] += _deltasWeights[threadIdx.x + 2]; _deltasBias[threadIdx.x] += _deltasBias[threadIdx.x + 1]; _deltasWeights[threadIdx.x] += _deltasWeights[threadIdx.x + 1]; if (threadIdx.x == 0) { float deltaB = deltasBias[0] / numberPatterns; float deltaW = deltasWeights[0] / numberPatterns; float learnRateB = learningRateBias[blockIdx.x]; float learnRateW = learningRateWeights[blockIdx.x]; float factorB = same(lastDeltaWithoutLearningMomentumBias[blockIdx.x], deltaB) ? u : d; float factorW = same(lastDeltaWithoutLearningMomentumWeights[blockIdx.x], deltaW) ? u : d; learnRateB *= factorB; learnRateW *= factorW; if (learnRateB > maxStepSize) learnRateB = maxStepSize; if (learnRateW > maxStepSize) learnRateW = maxStepSize; learningRateBias[blockIdx.x] = learnRateB; learningRateWeights[blockIdx.x] = learnRateW; lastDeltaWithoutLearningMomentumBias[blockIdx.x] = deltaB; lastDeltaWithoutLearningMomentumWeights[blockIdx.x] = deltaW; deltaB += momentum * lastDeltaBias[blockIdx.x]; deltaW += momentum * lastDeltaWeights[blockIdx.x]; lastDeltaBias[blockIdx.x] = deltaB; lastDeltaWeights[blockIdx.x] = deltaW; float wb = selectiveNeuronsBias[blockIdx.x] + (learnRateB * deltaB); float w = selectiveNeuronsWeights[blockIdx.x] + (learnRateW * deltaW); if (!isfinite(wb)) { lastDeltaBias[blockIdx.x] = 0.0; lastDeltaWithoutLearningMomentumBias[blockIdx.x] = 0.0; } else { selectiveNeuronsBias[blockIdx.x] = wb; } if (!isfinite(w)) { lastDeltaWeights[blockIdx.x] = 0.0; lastDeltaWithoutLearningMomentumWeights[blockIdx.x] = 0.0; } else { selectiveNeuronsWeights[blockIdx.x] = w; } } } } void cuda_activate_layerWrapper(cudaStream_t stream, dim3 & gridSize, int blockSize, float * inputs, float * weights, int mOffset, float * outputs, int numInputs) { switch (blockSize) { case 1: cuda_activate_layerTemplate<1> << <gridSize, blockSize, blockSize * sizeof(cudafloat), stream >> >(inputs, weights, mOffset, outputs, numInputs); break; case 2: cuda_activate_layerTemplate<2> << <gridSize, blockSize, blockSize * sizeof(cudafloat), stream >> >(inputs, weights, mOffset, outputs, numInputs); break; case 4: cuda_activate_layerTemplate<4> << <gridSize, blockSize, blockSize * sizeof(cudafloat), stream >> >(inputs, weights, mOffset, outputs, numInputs); break; case 8: cuda_activate_layerTemplate<8> << <gridSize, blockSize, blockSize * sizeof(cudafloat), stream >> >(inputs, weights, mOffset, outputs, numInputs); break; case 16: cuda_activate_layerTemplate<16> << <gridSize, blockSize, blockSize * sizeof(cudafloat), stream >> >(inputs, weights, mOffset, outputs, numInputs); break; case 32: cuda_activate_layerTemplate<32> << <gridSize, blockSize, blockSize * sizeof(cudafloat), stream >> >(inputs, weights, mOffset, outputs, numInputs); break; case 64: cuda_activate_layerTemplate<64> << <gridSize, blockSize, blockSize * sizeof(cudafloat), stream >> >(inputs, weights, mOffset, outputs, numInputs); break; case 128: cuda_activate_layerTemplate<128> << <gridSize, blockSize, blockSize * sizeof(cudafloat), stream >> >(inputs, weights, mOffset, outputs, numInputs); break; case 256: cuda_activate_layerTemplate<256> << <gridSize, blockSize, blockSize * sizeof(cudafloat), stream >> >(inputs, weights, mOffset, outputs, numInputs); break; case 512: cuda_activate_layerTemplate<512> << <gridSize, blockSize, blockSize * sizeof(cudafloat), stream >> >(inputs, weights, mOffset, outputs, numInputs); break; case 1024: cuda_activate_layerTemplate<1024> << <gridSize, blockSize, blockSize * sizeof(cudafloat), stream >> >(inputs, weights, mOffset, outputs, numInputs); break; case 2048: cuda_activate_layerTemplate<2048> << <gridSize, blockSize, blockSize * sizeof(cudafloat), stream >> >(inputs, weights, mOffset, outputs, numInputs); break; } } void cuda_Calculate_errorsWrapper(cudaStream_t stream, int blockSize, float* rms, float* rmsF, int patternsNo, float numberPatternsNeurons) { switch (blockSize) { case 1024: cuda_calculate_errors<1024> << <1, blockSize, blockSize * sizeof(cudafloat), stream >> > (rms, rmsF, patternsNo, numberPatternsNeurons); break; case 512: cuda_calculate_errors<512> << <1, blockSize, blockSize * sizeof(cudafloat), stream >> > (rms, rmsF, patternsNo, numberPatternsNeurons); break; case 256: cuda_calculate_errors<256> << <1, blockSize, blockSize * sizeof(cudafloat), stream >> > (rms, rmsF, patternsNo, numberPatternsNeurons); break; case 128: cuda_calculate_errors<128> << <1, blockSize, blockSize * sizeof(cudafloat), stream >> > (rms, rmsF, patternsNo, numberPatternsNeurons); break; case 64: cuda_calculate_errors<64> << <1, blockSize, blockSize * sizeof(cudafloat), stream >> > (rms, rmsF, patternsNo, numberPatternsNeurons); break; case 32: cuda_calculate_errors<32> << <1, blockSize, blockSize * sizeof(cudafloat), stream >> > (rms, rmsF, patternsNo, numberPatternsNeurons); break; case 16: cuda_calculate_errors<16> << <1, blockSize, blockSize * sizeof(cudafloat), stream >> > (rms, rmsF, patternsNo, numberPatternsNeurons); break; case 8: cuda_calculate_errors<8> << <1, blockSize, blockSize * sizeof(cudafloat), stream >> > (rms, rmsF, patternsNo, numberPatternsNeurons); break; case 4: cuda_calculate_errors<4> << <1, blockSize, blockSize * sizeof(cudafloat), stream >> > (rms, rmsF, patternsNo, numberPatternsNeurons); break; case 2: cuda_calculate_errors<2> << <1, blockSize, blockSize * sizeof(cudafloat), stream >> > (rms, rmsF, patternsNo, numberPatternsNeurons); break; case 1: cuda_calculate_errors<1> << <1, blockSize, blockSize * sizeof(cudafloat), stream >> > (rms, rmsF, patternsNo, numberPatternsNeurons); break; } } void cuda_correct_weights_Wrapper(cudaStream_t stream, dim3 & gridSize, int blockSize, float * rmsF, float * inputs, float * localGradient, float * weights, float * learningRate, float * lastDeltaWithoutLearningMomentum, float * lastDelta, float maxStepSize, float u, float d, float momentum, int numberPatterns) { switch (blockSize) { case 1024: cuda_correct_weights<1024> << <gridSize, blockSize, blockSize * sizeof(cudafloat), stream >> > (rmsF, inputs, localGradient, weights, learningRate, lastDeltaWithoutLearningMomentum, lastDelta, maxStepSize, u, d, momentum, numberPatterns); break; case 512: cuda_correct_weights<512> << <gridSize, blockSize, blockSize * sizeof(cudafloat), stream >> > (rmsF, inputs, localGradient, weights, learningRate, lastDeltaWithoutLearningMomentum, lastDelta, maxStepSize, u, d, momentum, numberPatterns); break; case 256: cuda_correct_weights<256> << <gridSize, blockSize, blockSize * sizeof(cudafloat), stream >> > (rmsF, inputs, localGradient, weights, learningRate, lastDeltaWithoutLearningMomentum, lastDelta, maxStepSize, u, d, momentum, numberPatterns); break; case 128: cuda_correct_weights<128> << <gridSize, blockSize, blockSize * sizeof(cudafloat), stream >> > (rmsF, inputs, localGradient, weights, learningRate, lastDeltaWithoutLearningMomentum, lastDelta, maxStepSize, u, d, momentum, numberPatterns); break; case 64: cuda_correct_weights<64> << <gridSize, blockSize, blockSize * sizeof(cudafloat), stream >> > (rmsF, inputs, localGradient, weights, learningRate, lastDeltaWithoutLearningMomentum, lastDelta, maxStepSize, u, d, momentum, numberPatterns); break; case 32: cuda_correct_weights<32> << <gridSize, blockSize, blockSize * sizeof(cudafloat), stream >> > (rmsF, inputs, localGradient, weights, learningRate, lastDeltaWithoutLearningMomentum, lastDelta, maxStepSize, u, d, momentum, numberPatterns); break; case 16: cuda_correct_weights<16> << <gridSize, blockSize, blockSize * sizeof(cudafloat), stream >> > (rmsF, inputs, localGradient, weights, learningRate, lastDeltaWithoutLearningMomentum, lastDelta, maxStepSize, u, d, momentum, numberPatterns); break; case 8: cuda_correct_weights<8> << <gridSize, blockSize, blockSize * sizeof(cudafloat), stream >> > (rmsF, inputs, localGradient, weights, learningRate, lastDeltaWithoutLearningMomentum, lastDelta, maxStepSize, u, d, momentum, numberPatterns); break; case 4: cuda_correct_weights<4> << <gridSize, blockSize, blockSize * sizeof(cudafloat), stream >> > (rmsF, inputs, localGradient, weights, learningRate, lastDeltaWithoutLearningMomentum, lastDelta, maxStepSize, u, d, momentum, numberPatterns); break; case 2: cuda_correct_weights<2> << <gridSize, blockSize, blockSize * sizeof(cudafloat), stream >> > (rmsF, inputs, localGradient, weights, learningRate, lastDeltaWithoutLearningMomentum, lastDelta, maxStepSize, u, d, momentum, numberPatterns); break; case 1: cuda_correct_weights<1> << <gridSize, blockSize, blockSize * sizeof(cudafloat), stream >>> (rmsF,inputs, localGradient, weights, learningRate, lastDeltaWithoutLearningMomentum, lastDelta, maxStepSize, u, d,momentum, numberPatterns); break; } } }
2e4e3e96ab449201943613f62176e5f35a1ce152.hip
// !!! This is a file automatically generated by hipify!!! %%cuda --name im2col.cu #include <stdio.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <time.h> __global__ void Im2Col_optimised(float * A, float * B, int h, int w, int c, int s) { //A - input matrix(h*w*c) //size of kernel - s //B - output matrix //computing the required dimenions int blockNum = blockIdx.z * (gridDim.y * gridDim.x) + blockIdx.x * gridDim.y + blockIdx.y; int threadNum = threadIdx.z * (blockDim.y * blockDim.x) + threadIdx.x * (blockDim.y) + threadIdx.y; int globalThreadId = blockNum * (blockDim.y * blockDim.x * blockDim.z) + threadNum; //starting point of kernel moves from [0][0] to [converted_h-1][converted_w-1] int converted_h = h - s + 1; int converted_w = w - s + 1; int k = globalThreadId / (h * w); //channel number //shared memory to store the contents of one block at a time __shared__ float arr[1024]; //coalesced load int tid = threadIdx.x; arr[tid] = A[globalThreadId]; __syncthreads(); //further code to be implemented only after completely loading the shared memory blockNum %= h; int l = 0; int r = w - s; //The contents of channel k is stored in the output matrix from rows rangeup to rangedn int rangeup = s * s * k; int rangedn = s * s * (k + 1); //[startrow][startcol] denotes the right and topmost starting position of a chunk created from a single block int x = max(0, blockNum - converted_h + 1); int startrow = rangeup + (x * s); int startcol = converted_w * (blockNum - x); while (startrow < rangedn && startcol >= 0) { for (int m = 0; m < s; m++) { if (tid <= r + m && tid >= l + m) { //computing the row and col in the output matrix where the element needs to be stored int row = startrow + m; int col = startcol + (tid - l - m); //coalesced store B[(row) * converted_h * converted_w + col] = arr[tid]; __syncthreads; } } //computing the next starting postion (lying just below and to the left of the current position) of the chunk startrow += s; startcol -= converted_w; } } int main(void) { // Error code to check return values for CUDA calls hipError_t err = hipSuccess; // Take the image and kernel size as input int height, width, channel, size_kernel; printf("Enter values of image height, width and number of channels: "); scanf("%d %d %d", & height, & width, & channel); printf("Enter value of kernel size: "); scanf("%d", & size_kernel); //image dimensions int numElements = height * width * channel; //starting point of kernel moves from [0][0] to [converted_h-1][converted_w-1] int converted_h = height - size_kernel + 1; int converted_w = width - size_kernel + 1; //total number of elements in the final output matrix int converted_numElements = converted_h * converted_w * channel * size_kernel * size_kernel; size_t size = numElements * sizeof(float); size_t converted_size = converted_numElements * sizeof(float); // Allocate the host input vector A float * h_A = (float * ) malloc(size); // Allocate the host output vector B float * h_B = (float * ) malloc(converted_size); // Verify that allocations succeeded if (h_A == NULL || h_B == NULL) { fprintf(stderr, "Failed to allocate host vectors!\n"); exit(EXIT_FAILURE); } // Initialize the host input vector for (int i = 0; i < numElements; ++i) { h_A[i] = i; } // Allocate the device input vector A float * d_A = NULL; err = hipMalloc((void ** ) & d_A, size); if (err != hipSuccess) { fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } // Allocate the device output vector B float * d_B = NULL; err = hipMalloc((void ** ) & d_B, converted_size); if (err != hipSuccess) { fprintf(stderr, "Failed to allocate device vector B (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } // Copy the host input vector A in host memory to the device input vectors in device memory err = hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice); if (err != hipSuccess) { fprintf(stderr, "Failed to copy vector A from host to device (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } //grid and block dimensions //each block is a one-dimensional collection of threads dim3 block(width, 1, 1); dim3 grid(1, height, channel); //to record the time consumed for im2col operation hipEvent_t start, stop; hipEventCreate( & start); hipEventCreate( & stop); hipEventRecord(start); // Launch the Im2Col CUDA Kernel Im2Col_optimised << < grid, block >>> (d_A, d_B, height, width, channel, size_kernel); hipEventRecord(stop); hipEventSynchronize(stop); float milliseconds = 0; hipEventElapsedTime( & milliseconds, start, stop); printf("The elapsed time in gpu was %f ms", milliseconds); err = hipGetLastError(); if (err != hipSuccess) { fprintf(stderr, "Failed to launch Im2Col_optimised kernel (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } // Copy the device output vector in device memory to the host output vector in host memory. err = hipMemcpy(h_B, d_B, converted_size, hipMemcpyDeviceToHost); if (err != hipSuccess) { fprintf(stderr, "Failed to copy vector B from device to host (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } // Verify that the result vector is correct float A[height][width][channel]; float check[size_kernel * size_kernel * channel][converted_h * converted_w]; //creating the actual 3-D image from the 1-D array for (int gid = 0; gid < numElements; gid++) { int k = gid / (height * width); int i = (gid % (height * width)) / width; int j = (gid % (height * width)) % width; A[i][j][k] = h_A[gid]; } clock_t cpu_start, cpu_end; double cpu_time_used; cpu_start = clock(); //creating the output matrix for (int i = 0; i + size_kernel <= height; i++) { for (int j = 0; j + size_kernel <= width; j++) { for (int k = 0; k < channel; k++) { int row = k * size_kernel * size_kernel; int col = i * (width - size_kernel + 1) + j; int cnt = 0; for (int l = i; l < i + size_kernel; l++) { for (int m = j; m < j + size_kernel; m++) { check[row + cnt][col] = A[l][m][k]; cnt++; } } } } } cpu_end = clock(); cpu_time_used = ((double)(cpu_end - cpu_start)) / CLOCKS_PER_SEC; printf("\nTime elapsed in serial execution:%f ms\n", cpu_time_used * 1000.00); //checking whether the output matrix cretaed in CPU and GPU are same int gid = 0, flag = 1; for (int i = 0; i < size_kernel * size_kernel * channel; i++) { for (int j = 0; j < converted_h * converted_w; j++) { if (check[i][j] != h_B[gid]) flag = 0; gid++; } } if (flag) //if the two matrix are same printf("Success!!\n"); else printf("Failure!!\n"); // Free device global memory err = hipFree(d_A); if (err != hipSuccess) { fprintf(stderr, "Failed to free device vector A (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } err = hipFree(d_B); if (err != hipSuccess) { fprintf(stderr, "Failed to free device vector B (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } // Reset the device and exit err = hipDeviceReset(); if (err != hipSuccess) { fprintf(stderr, "Failed to deinitialize the device! error=%s\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } printf("\n"); // Free host memory free(h_A); free(h_B); return 0; }
2e4e3e96ab449201943613f62176e5f35a1ce152.cu
%%cuda --name im2col.cu #include <stdio.h> #include <stdlib.h> #include <cuda.h> #include <cuda_runtime.h> #include <time.h> __global__ void Im2Col_optimised(float * A, float * B, int h, int w, int c, int s) { //A - input matrix(h*w*c) //size of kernel - s //B - output matrix //computing the required dimenions int blockNum = blockIdx.z * (gridDim.y * gridDim.x) + blockIdx.x * gridDim.y + blockIdx.y; int threadNum = threadIdx.z * (blockDim.y * blockDim.x) + threadIdx.x * (blockDim.y) + threadIdx.y; int globalThreadId = blockNum * (blockDim.y * blockDim.x * blockDim.z) + threadNum; //starting point of kernel moves from [0][0] to [converted_h-1][converted_w-1] int converted_h = h - s + 1; int converted_w = w - s + 1; int k = globalThreadId / (h * w); //channel number //shared memory to store the contents of one block at a time __shared__ float arr[1024]; //coalesced load int tid = threadIdx.x; arr[tid] = A[globalThreadId]; __syncthreads(); //further code to be implemented only after completely loading the shared memory blockNum %= h; int l = 0; int r = w - s; //The contents of channel k is stored in the output matrix from rows rangeup to rangedn int rangeup = s * s * k; int rangedn = s * s * (k + 1); //[startrow][startcol] denotes the right and topmost starting position of a chunk created from a single block int x = max(0, blockNum - converted_h + 1); int startrow = rangeup + (x * s); int startcol = converted_w * (blockNum - x); while (startrow < rangedn && startcol >= 0) { for (int m = 0; m < s; m++) { if (tid <= r + m && tid >= l + m) { //computing the row and col in the output matrix where the element needs to be stored int row = startrow + m; int col = startcol + (tid - l - m); //coalesced store B[(row) * converted_h * converted_w + col] = arr[tid]; __syncthreads; } } //computing the next starting postion (lying just below and to the left of the current position) of the chunk startrow += s; startcol -= converted_w; } } int main(void) { // Error code to check return values for CUDA calls cudaError_t err = cudaSuccess; // Take the image and kernel size as input int height, width, channel, size_kernel; printf("Enter values of image height, width and number of channels: "); scanf("%d %d %d", & height, & width, & channel); printf("Enter value of kernel size: "); scanf("%d", & size_kernel); //image dimensions int numElements = height * width * channel; //starting point of kernel moves from [0][0] to [converted_h-1][converted_w-1] int converted_h = height - size_kernel + 1; int converted_w = width - size_kernel + 1; //total number of elements in the final output matrix int converted_numElements = converted_h * converted_w * channel * size_kernel * size_kernel; size_t size = numElements * sizeof(float); size_t converted_size = converted_numElements * sizeof(float); // Allocate the host input vector A float * h_A = (float * ) malloc(size); // Allocate the host output vector B float * h_B = (float * ) malloc(converted_size); // Verify that allocations succeeded if (h_A == NULL || h_B == NULL) { fprintf(stderr, "Failed to allocate host vectors!\n"); exit(EXIT_FAILURE); } // Initialize the host input vector for (int i = 0; i < numElements; ++i) { h_A[i] = i; } // Allocate the device input vector A float * d_A = NULL; err = cudaMalloc((void ** ) & d_A, size); if (err != cudaSuccess) { fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // Allocate the device output vector B float * d_B = NULL; err = cudaMalloc((void ** ) & d_B, converted_size); if (err != cudaSuccess) { fprintf(stderr, "Failed to allocate device vector B (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // Copy the host input vector A in host memory to the device input vectors in device memory err = cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice); if (err != cudaSuccess) { fprintf(stderr, "Failed to copy vector A from host to device (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } //grid and block dimensions //each block is a one-dimensional collection of threads dim3 block(width, 1, 1); dim3 grid(1, height, channel); //to record the time consumed for im2col operation cudaEvent_t start, stop; cudaEventCreate( & start); cudaEventCreate( & stop); cudaEventRecord(start); // Launch the Im2Col CUDA Kernel Im2Col_optimised << < grid, block >>> (d_A, d_B, height, width, channel, size_kernel); cudaEventRecord(stop); cudaEventSynchronize(stop); float milliseconds = 0; cudaEventElapsedTime( & milliseconds, start, stop); printf("The elapsed time in gpu was %f ms", milliseconds); err = cudaGetLastError(); if (err != cudaSuccess) { fprintf(stderr, "Failed to launch Im2Col_optimised kernel (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // Copy the device output vector in device memory to the host output vector in host memory. err = cudaMemcpy(h_B, d_B, converted_size, cudaMemcpyDeviceToHost); if (err != cudaSuccess) { fprintf(stderr, "Failed to copy vector B from device to host (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // Verify that the result vector is correct float A[height][width][channel]; float check[size_kernel * size_kernel * channel][converted_h * converted_w]; //creating the actual 3-D image from the 1-D array for (int gid = 0; gid < numElements; gid++) { int k = gid / (height * width); int i = (gid % (height * width)) / width; int j = (gid % (height * width)) % width; A[i][j][k] = h_A[gid]; } clock_t cpu_start, cpu_end; double cpu_time_used; cpu_start = clock(); //creating the output matrix for (int i = 0; i + size_kernel <= height; i++) { for (int j = 0; j + size_kernel <= width; j++) { for (int k = 0; k < channel; k++) { int row = k * size_kernel * size_kernel; int col = i * (width - size_kernel + 1) + j; int cnt = 0; for (int l = i; l < i + size_kernel; l++) { for (int m = j; m < j + size_kernel; m++) { check[row + cnt][col] = A[l][m][k]; cnt++; } } } } } cpu_end = clock(); cpu_time_used = ((double)(cpu_end - cpu_start)) / CLOCKS_PER_SEC; printf("\nTime elapsed in serial execution:%f ms\n", cpu_time_used * 1000.00); //checking whether the output matrix cretaed in CPU and GPU are same int gid = 0, flag = 1; for (int i = 0; i < size_kernel * size_kernel * channel; i++) { for (int j = 0; j < converted_h * converted_w; j++) { if (check[i][j] != h_B[gid]) flag = 0; gid++; } } if (flag) //if the two matrix are same printf("Success!!\n"); else printf("Failure!!\n"); // Free device global memory err = cudaFree(d_A); if (err != cudaSuccess) { fprintf(stderr, "Failed to free device vector A (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } err = cudaFree(d_B); if (err != cudaSuccess) { fprintf(stderr, "Failed to free device vector B (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // Reset the device and exit err = cudaDeviceReset(); if (err != cudaSuccess) { fprintf(stderr, "Failed to deinitialize the device! error=%s\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } printf("\n"); // Free host memory free(h_A); free(h_B); return 0; }
84eca2d6d4eda066b514fc2aba3e63f2e4ef7a55.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <hip/hip_runtime.h> #define SIZE (1024 * 1024) #define CUDA_CHECK_RETURN(value) {\ hipError_t _m_cudaStat = value;\ if (_m_cudaStat != hipSuccess) {\ fprintf(stderr, "Error %s at line %d in file %s\n",\ hipGetErrorString(_m_cudaStat), __LINE__, __FILE__);\ exit(1);\ } } float cuda_pageable(int size, bool flag) { int *host_a, *dev_a; float elapsedTime; hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); host_a = (int*)malloc(size * sizeof(int)); CUDA_CHECK_RETURN(hipMalloc((void**)&dev_a, size * sizeof(float))); hipEventRecord(start, 0); if(flag) CUDA_CHECK_RETURN(hipMemcpy(dev_a, host_a, size * sizeof(float), hipMemcpyHostToDevice)); if(!flag) CUDA_CHECK_RETURN(hipMemcpy(host_a, dev_a, size * sizeof(float), hipMemcpyDeviceToHost)); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&elapsedTime, start, stop); free(host_a); hipFree(dev_a); hipEventDestroy(start); hipEventDestroy(stop); return elapsedTime; } float cuda_page_locked(int size, bool flag) { int *host_a, *dev_a; float elapsedTime; hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); CUDA_CHECK_RETURN(hipHostMalloc((void**)&host_a, size * sizeof(float), hipHostMallocDefault)); CUDA_CHECK_RETURN(hipMalloc((void**)&dev_a, size * sizeof(float))); CUDA_CHECK_RETURN(hipEventRecord(start, 0)); if(flag) CUDA_CHECK_RETURN(hipMemcpy(dev_a, host_a, size * sizeof(float), hipMemcpyHostToDevice)); if(!flag) CUDA_CHECK_RETURN(hipMemcpy(host_a, dev_a, size * sizeof(float), hipMemcpyDeviceToHost)); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&elapsedTime, start, stop); hipHostFree(host_a); hipFree(dev_a); hipEventDestroy(start); hipEventDestroy(stop); return elapsedTime; } int main(int argc, char *argv[]) { float elapsedTime; int numbMB = atoi(argv[1]); int size = SIZE * numbMB; elapsedTime = cuda_pageable(size, true); printf("\nPageable memory:\n\n[CPU -> GPU] = %f ms\n", elapsedTime); elapsedTime = cuda_pageable(size, false); printf("[GPU -> CPU] = %f ms\n", elapsedTime); elapsedTime = cuda_page_locked(size, true); printf("\nPage-locked memory:\n\n[CPU -> GPU] = %f ms\n", elapsedTime); elapsedTime = cuda_page_locked(size, false); printf("[GPU -> CPU] = %f ms\n\n", elapsedTime); return 0; }
84eca2d6d4eda066b514fc2aba3e63f2e4ef7a55.cu
#include <stdio.h> #include <cuda.h> #define SIZE (1024 * 1024) #define CUDA_CHECK_RETURN(value) {\ cudaError_t _m_cudaStat = value;\ if (_m_cudaStat != cudaSuccess) {\ fprintf(stderr, "Error %s at line %d in file %s\n",\ cudaGetErrorString(_m_cudaStat), __LINE__, __FILE__);\ exit(1);\ } } float cuda_pageable(int size, bool flag) { int *host_a, *dev_a; float elapsedTime; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); host_a = (int*)malloc(size * sizeof(int)); CUDA_CHECK_RETURN(cudaMalloc((void**)&dev_a, size * sizeof(float))); cudaEventRecord(start, 0); if(flag) CUDA_CHECK_RETURN(cudaMemcpy(dev_a, host_a, size * sizeof(float), cudaMemcpyHostToDevice)); if(!flag) CUDA_CHECK_RETURN(cudaMemcpy(host_a, dev_a, size * sizeof(float), cudaMemcpyDeviceToHost)); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsedTime, start, stop); free(host_a); cudaFree(dev_a); cudaEventDestroy(start); cudaEventDestroy(stop); return elapsedTime; } float cuda_page_locked(int size, bool flag) { int *host_a, *dev_a; float elapsedTime; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); CUDA_CHECK_RETURN(cudaHostAlloc((void**)&host_a, size * sizeof(float), cudaHostAllocDefault)); CUDA_CHECK_RETURN(cudaMalloc((void**)&dev_a, size * sizeof(float))); CUDA_CHECK_RETURN(cudaEventRecord(start, 0)); if(flag) CUDA_CHECK_RETURN(cudaMemcpy(dev_a, host_a, size * sizeof(float), cudaMemcpyHostToDevice)); if(!flag) CUDA_CHECK_RETURN(cudaMemcpy(host_a, dev_a, size * sizeof(float), cudaMemcpyDeviceToHost)); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsedTime, start, stop); cudaFreeHost(host_a); cudaFree(dev_a); cudaEventDestroy(start); cudaEventDestroy(stop); return elapsedTime; } int main(int argc, char *argv[]) { float elapsedTime; int numbMB = atoi(argv[1]); int size = SIZE * numbMB; elapsedTime = cuda_pageable(size, true); printf("\nPageable memory:\n\n[CPU -> GPU] = %f ms\n", elapsedTime); elapsedTime = cuda_pageable(size, false); printf("[GPU -> CPU] = %f ms\n", elapsedTime); elapsedTime = cuda_page_locked(size, true); printf("\nPage-locked memory:\n\n[CPU -> GPU] = %f ms\n", elapsedTime); elapsedTime = cuda_page_locked(size, false); printf("[GPU -> CPU] = %f ms\n\n", elapsedTime); return 0; }
4d291e6d29412b6dfd1b191559e7b68631a3eafa.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // inspired by https://devblogs.nvidia.com/efficient-matrix-transpose-cuda-cc/ // TILE_DIM=32, BLOCK_ROWS=8 // No bank-conflict transpose // Same as transposeCoalesced except the first tile dimension is padded // to avoid shared memory bank conflicts. // can be used to transpose non-square 2D arrays __global__ void transposeNoBankConflicts32x8(float *odata, const float *idata, int xdim, int ydim) { __shared__ float tile[32][32 + 1]; int tilex = blockIdx.x * 32; int tiley = blockIdx.y * 32; int x = tilex + threadIdx.x; int y = tiley + threadIdx.y; for (int j = 0; j < 32; j += 8) { int index = (y + j) * xdim + x; if (index < (xdim*ydim)) { tile[threadIdx.y + j][threadIdx.x] = idata[index]; } } __syncthreads(); x = tiley + threadIdx.x; // transpose tiles y = tilex + threadIdx.y; // transpose tiles if (x >= ydim) return; // output matrix has y columns int maxJ = min(32, xdim - y); // output matrix has x rows for (int j = 0; j < maxJ; j += 8) { int index = (y+j) * ydim + x; odata[index] = tile[threadIdx.x][threadIdx.y + j]; } } __global__ void iirConvolve2D_Cardinal_Bspline_3_MirrorOffBoundNew(float* input, float* output, int xDim, int yDim) { // assign column to thread int idx = blockIdx.x * blockDim.x + threadIdx.x; // we will process data line by line, but data are stored in // columns! if (idx >= yDim) return; // only threads with data should continue float* line = output + idx; // FIXME rename to sth reasonable // adjust gain float z = sqrtf(3.f) - 2.f; float z1 = 1.0 - z; float gain = -(z1 * z1) / z; // copy original data for (int i = 0; i < xDim; i++) { line[i * yDim] = input[(i * yDim) + idx] * gain; } // prepare some values float sum = (line[0] + powf(z, xDim) * line[(xDim - 1) * yDim]) * (1.f + z) / z; z1 = z; float z2 = powf(z, 2 * xDim - 2); float iz = 1.f / z; for (int j = 1; j < (xDim - 1); ++j) { sum += (z2 + z1) * line[j * yDim]; z1 *= z; z2 *= iz; } line[0] = sum * z / (1.f - powf(z, 2 * xDim)); for (int j = 1; j < xDim; ++j) { line[j * yDim] += z * line[(j - 1) * yDim]; } line[(xDim - 1) * yDim] *= z / (z - 1.f); for (int j = xDim - 2; 0 <= j; --j) { line[j * yDim] = z * (line[(j + 1) * yDim] - line[j * yDim]); } } __global__ void iirConvolve2D_Cardinal_Bspline_3_MirrorOffBound(float* input, float* output, size_t xDim, size_t yDim) { // assign line to thread int idy = blockIdx.y * blockDim.y + threadIdx.y; if (idy >= yDim) return; // only threads with data should continue float* line = output + (idy * xDim); // adjust gain float z = sqrtf(3.f) - 2.f; float z1 = 1.0 - z; float gain = -(z1 * z1) / z; // copy original data for (int i = 0; i < xDim; i++) { line[i] = input[i + (idy * xDim)] * gain; } // prepare some values float sum = (line[0] + powf(z, xDim) * line[xDim - 1]) * (1.f + z) / z; z1 = z; float z2 = powf(z, 2 * xDim - 2); float iz = 1.f / z; for (int j = 1; j < (xDim - 1); ++j) { sum += (z2 + z1) * line[j]; z1 *= z; z2 *= iz; } line[0] = sum * z / (1.f - powf(z, 2 * xDim)); for (int j = 1; j < xDim; ++j) { line[j] += z * line[j - 1]; } line[xDim - 1] *= z / (z - 1.f); for (int j = xDim - 2; 0 <= j; --j) { line[j] = z * (line[j + 1] - line[j]); } }
4d291e6d29412b6dfd1b191559e7b68631a3eafa.cu
// inspired by https://devblogs.nvidia.com/efficient-matrix-transpose-cuda-cc/ // TILE_DIM=32, BLOCK_ROWS=8 // No bank-conflict transpose // Same as transposeCoalesced except the first tile dimension is padded // to avoid shared memory bank conflicts. // can be used to transpose non-square 2D arrays __global__ void transposeNoBankConflicts32x8(float *odata, const float *idata, int xdim, int ydim) { __shared__ float tile[32][32 + 1]; int tilex = blockIdx.x * 32; int tiley = blockIdx.y * 32; int x = tilex + threadIdx.x; int y = tiley + threadIdx.y; for (int j = 0; j < 32; j += 8) { int index = (y + j) * xdim + x; if (index < (xdim*ydim)) { tile[threadIdx.y + j][threadIdx.x] = idata[index]; } } __syncthreads(); x = tiley + threadIdx.x; // transpose tiles y = tilex + threadIdx.y; // transpose tiles if (x >= ydim) return; // output matrix has y columns int maxJ = min(32, xdim - y); // output matrix has x rows for (int j = 0; j < maxJ; j += 8) { int index = (y+j) * ydim + x; odata[index] = tile[threadIdx.x][threadIdx.y + j]; } } __global__ void iirConvolve2D_Cardinal_Bspline_3_MirrorOffBoundNew(float* input, float* output, int xDim, int yDim) { // assign column to thread int idx = blockIdx.x * blockDim.x + threadIdx.x; // we will process data line by line, but data are stored in // columns! if (idx >= yDim) return; // only threads with data should continue float* line = output + idx; // FIXME rename to sth reasonable // adjust gain float z = sqrtf(3.f) - 2.f; float z1 = 1.0 - z; float gain = -(z1 * z1) / z; // copy original data for (int i = 0; i < xDim; i++) { line[i * yDim] = input[(i * yDim) + idx] * gain; } // prepare some values float sum = (line[0] + powf(z, xDim) * line[(xDim - 1) * yDim]) * (1.f + z) / z; z1 = z; float z2 = powf(z, 2 * xDim - 2); float iz = 1.f / z; for (int j = 1; j < (xDim - 1); ++j) { sum += (z2 + z1) * line[j * yDim]; z1 *= z; z2 *= iz; } line[0] = sum * z / (1.f - powf(z, 2 * xDim)); for (int j = 1; j < xDim; ++j) { line[j * yDim] += z * line[(j - 1) * yDim]; } line[(xDim - 1) * yDim] *= z / (z - 1.f); for (int j = xDim - 2; 0 <= j; --j) { line[j * yDim] = z * (line[(j + 1) * yDim] - line[j * yDim]); } } __global__ void iirConvolve2D_Cardinal_Bspline_3_MirrorOffBound(float* input, float* output, size_t xDim, size_t yDim) { // assign line to thread int idy = blockIdx.y * blockDim.y + threadIdx.y; if (idy >= yDim) return; // only threads with data should continue float* line = output + (idy * xDim); // adjust gain float z = sqrtf(3.f) - 2.f; float z1 = 1.0 - z; float gain = -(z1 * z1) / z; // copy original data for (int i = 0; i < xDim; i++) { line[i] = input[i + (idy * xDim)] * gain; } // prepare some values float sum = (line[0] + powf(z, xDim) * line[xDim - 1]) * (1.f + z) / z; z1 = z; float z2 = powf(z, 2 * xDim - 2); float iz = 1.f / z; for (int j = 1; j < (xDim - 1); ++j) { sum += (z2 + z1) * line[j]; z1 *= z; z2 *= iz; } line[0] = sum * z / (1.f - powf(z, 2 * xDim)); for (int j = 1; j < xDim; ++j) { line[j] += z * line[j - 1]; } line[xDim - 1] *= z / (z - 1.f); for (int j = xDim - 2; 0 <= j; --j) { line[j] = z * (line[j + 1] - line[j]); } }
clover_invert.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <tune_quda.h> #include <clover_field_order.h> #include <complex_quda.h> #include <hipcub/hipcub.hpp> #include <launch_kernel.cuh> #include <face_quda.h> namespace quda { #ifdef GPU_CLOVER_DIRAC template <typename Clover> struct CloverInvertArg { const Clover clover; Clover inverse; bool computeTraceLog; double * const trlogA_h; double *trlogA_d; //extra attributes for twisted mass clover bool twist; double mu2; CloverInvertArg(Clover &inverse, const Clover &clover, bool computeTraceLog=0, double* const trlogA=0) : inverse(inverse), clover(clover), computeTraceLog(computeTraceLog), trlogA_h(trlogA), twist(clover.Twisted()), mu2(clover.Mu2()){ hipHostGetDevicePointer(&trlogA_d, trlogA_h, 0); // set the matching device pointer } }; static __inline__ __device__ double atomicAdd(double *addr, double val) { double old=*addr, assumed; do { assumed = old; old = __longlong_as_double( atomicCAS((unsigned long long int*)addr, __double_as_longlong(assumed), __double_as_longlong(val+assumed))); } while( __double_as_longlong(assumed)!=__double_as_longlong(old) ); return old; } /** Use a Cholesky decomposition to invert the clover matrix Here we use an inplace inversion which hopefully reduces register pressure */ template <int blockSize, typename Float, typename Clover> __device__ __host__ double cloverInvertCompute(CloverInvertArg<Clover> arg, int x, int parity) { Float A[72]; double trlogA = 0.0; // load the clover term into memory arg.clover.load(A, x, parity); for (int ch=0; ch<2; ch++) { Float diag[6]; Float tmp[6]; // temporary storage complex<Float> tri[15]; // hack into the right order as MILC just to copy algorithm directly // FIXME use native ordering in the Cholseky // factor of two is inherent to QUDA clover storage for (int i=0; i<6; i++) diag[i] = 2.0*A[ch*36+i]; const int idtab[15]={0,1,3,6,10,2,4,7,11,5,8,12,9,13,14}; for (int i=0; i<15; i++) tri[idtab[i]] = complex<Float>(2.0*A[ch*36+6+2*i], 2.0*A[ch*36+6+2*i+1]); //Compute (T^2 + mu2) first, then invert (not optimized!): if(arg.twist) { complex<Float> aux[15];//hmmm, better to reuse A-regs... //another solution just to define (but compiler may not be happy with this, swapping everything in //the global buffer): //complex<Float>* aux = (complex<Float>*)&A[ch*36]; //compute off-diagonal terms: // aux[ 0] = tri[0]*diag[0]+diag[1]*tri[0]+conj(tri[2])*tri[1]+conj(tri[4])*tri[3]+conj(tri[7])*tri[6]+conj(tri[11])*tri[10]; // aux[ 1] = tri[1]*diag[0]+diag[2]*tri[1]+tri[2]*tri[0]+conj(tri[5])*tri[3]+conj(tri[8])*tri[6]+conj(tri[12])*tri[10]; aux[ 2] = tri[2]*diag[1]+diag[2]*tri[2]+tri[1]*conj(tri[0])+conj(tri[5])*tri[4]+conj(tri[8])*tri[7]+conj(tri[12])*tri[11]; // aux[ 3] = tri[3]*diag[0]+diag[3]*tri[3]+tri[4]*tri[0]+tri[5]*tri[1]+conj(tri[9])*tri[6]+conj(tri[13])*tri[10]; aux[ 4] = tri[4]*diag[1]+diag[3]*tri[4]+tri[3]*conj(tri[0])+tri[5]*tri[2]+conj(tri[9])*tri[7]+conj(tri[13])*tri[11]; aux[ 5] = tri[5]*diag[2]+diag[3]*tri[5]+tri[3]*conj(tri[1])+tri[4]*conj(tri[2])+conj(tri[9])*tri[8]+conj(tri[13])*tri[12]; // aux[ 6] = tri[6]*diag[0]+diag[4]*tri[6]+tri[7]*tri[0]+tri[8]*tri[1]+tri[9]*tri[3]+conj(tri[14])*tri[10]; aux[ 7] = tri[7]*diag[1]+diag[4]*tri[7]+tri[6]*conj(tri[0])+tri[8]*tri[2]+tri[9]*tri[4]+conj(tri[14])*tri[11]; aux[ 8] = tri[8]*diag[2]+diag[4]*tri[8]+tri[6]*conj(tri[1])+tri[7]*conj(tri[2])+tri[9]*tri[5]+conj(tri[14])*tri[12]; aux[ 9] = tri[9]*diag[3]+diag[4]*tri[9]+tri[6]*conj(tri[3])+tri[7]*conj(tri[4])+tri[8]*conj(tri[5])+conj(tri[14])*tri[13]; // aux[10] = tri[10]*diag[0]+diag[5]*tri[10]+tri[11]*tri[0]+tri[12]*tri[1]+tri[13]*tri[3]+tri[14]*tri[6]; aux[11] = tri[11]*diag[1]+diag[5]*tri[11]+tri[10]*conj(tri[0])+tri[12]*tri[2]+tri[13]*tri[4]+tri[14]*tri[7]; aux[12] = tri[12]*diag[2]+diag[5]*tri[12]+tri[10]*conj(tri[1])+tri[11]*conj(tri[2])+tri[13]*tri[5]+tri[14]*tri[8]; aux[13] = tri[13]*diag[3]+diag[5]*tri[13]+tri[10]*conj(tri[3])+tri[11]*conj(tri[4])+tri[12]*conj(tri[5])+tri[14]*tri[9]; aux[14] = tri[14]*diag[4]+diag[5]*tri[14]+tri[10]*conj(tri[6])+tri[11]*conj(tri[7])+tri[12]*conj(tri[8])+tri[13]*conj(tri[9]); //update diagonal elements: diag[0] = (Float)arg.mu2+diag[0]*diag[0]+norm(tri[ 0])+norm(tri[ 1])+norm(tri[ 3])+norm(tri[ 6])+norm(tri[10]); diag[1] = (Float)arg.mu2+diag[1]*diag[1]+norm(tri[ 0])+norm(tri[ 2])+norm(tri[ 4])+norm(tri[ 7])+norm(tri[11]); diag[2] = (Float)arg.mu2+diag[2]*diag[2]+norm(tri[ 1])+norm(tri[ 2])+norm(tri[ 5])+norm(tri[ 8])+norm(tri[12]); diag[3] = (Float)arg.mu2+diag[3]*diag[3]+norm(tri[ 3])+norm(tri[ 4])+norm(tri[ 5])+norm(tri[ 9])+norm(tri[13]); diag[4] = (Float)arg.mu2+diag[4]*diag[4]+norm(tri[ 6])+norm(tri[ 7])+norm(tri[ 8])+norm(tri[ 9])+norm(tri[14]); diag[5] = (Float)arg.mu2+diag[5]*diag[5]+norm(tri[10])+norm(tri[11])+norm(tri[12])+norm(tri[13])+norm(tri[14]); //update off-diagonal elements: for(int i = 0; i < 15; i++) tri[i] = aux[i]; } // for (int j=0; j<6; j++) { diag[j] = sqrt(diag[j]); tmp[j] = 1.0 / diag[j]; for (int k=j+1; k<6; k++) { int kj = k*(k-1)/2+j; tri[kj] *= tmp[j]; } for(int k=j+1;k<6;k++){ int kj=k*(k-1)/2+j; diag[k] -= (tri[kj] * conj(tri[kj])).real(); for(int l=k+1;l<6;l++){ int lj=l*(l-1)/2+j; int lk=l*(l-1)/2+k; tri[lk] -= tri[lj] * conj(tri[kj]); } } } /* Accumulate trlogA */ for (int j=0;j<6;j++) trlogA += (double)2.0*log((double)(diag[j])); /* Now use forward and backward substitution to construct inverse */ complex<Float> v1[6]; for (int k=0;k<6;k++) { for(int l=0;l<k;l++) v1[l] = complex<Float>(0.0, 0.0); /* Forward substitute */ v1[k] = complex<Float>(tmp[k], 0.0); for(int l=k+1;l<6;l++){ complex<Float> sum = complex<Float>(0.0, 0.0); for(int j=k;j<l;j++){ int lj=l*(l-1)/2+j; sum -= tri[lj] * v1[j]; } v1[l] = sum * tmp[l]; } /* Backward substitute */ v1[5] = v1[5] * tmp[5]; for(int l=4;l>=k;l--){ complex<Float> sum = v1[l]; for(int j=l+1;j<6;j++){ int jl=j*(j-1)/2+l; sum -= conj(tri[jl]) * v1[j]; } v1[l] = sum * tmp[l]; } /* Overwrite column k */ diag[k] = v1[k].real(); for(int l=k+1;l<6;l++){ int lk=l*(l-1)/2+k; tri[lk] = v1[l]; } } for (int i=0; i<6; i++) A[ch*36+i] = 0.5 * diag[i]; for (int i=0; i<15; i++) { A[ch*36+6+2*i] = 0.5*tri[idtab[i]].real(); A[ch*36+6+2*i+1] = 0.5*tri[idtab[i]].imag(); } } // save the inverted matrix arg.inverse.save(A, x, parity); return trlogA; } template <int blockSize, typename Float, typename Clover> void cloverInvert(CloverInvertArg<Clover> arg) { for (int parity=0; parity<2; parity++) { for (int x=0; x<arg.clover.volumeCB; x++) { // should make this thread safe if we ever apply threads to cpu code double trlogA = cloverInvertCompute<blockSize, Float>(arg, x, parity); if (arg.computeTraceLog) arg.trlogA_h[parity] += trlogA; } } } template <int blockSize, typename Float, typename Clover> __global__ void cloverInvertKernel(CloverInvertArg<Clover> arg) { int idx = blockIdx.x*blockDim.x + threadIdx.x; //if (idx >= arg.clover.volumeCB) return; int parity = blockIdx.y; double trlogA = 0.0; if (idx < arg.clover.volumeCB) trlogA = cloverInvertCompute<blockSize, Float>(arg, idx, parity); if (arg.computeTraceLog) { typedef hipcub::BlockReduce<double, blockSize> BlockReduce; __shared__ typename BlockReduce::TempStorage temp_storage; double aggregate = BlockReduce(temp_storage).Sum(trlogA); if (threadIdx.x == 0) atomicAdd(arg.trlogA_d+parity, aggregate); } } template <typename Float, typename Clover> class CloverInvert : Tunable { CloverInvertArg<Clover> arg; const CloverField &meta; // used for meta data only const QudaFieldLocation location; private: unsigned int sharedBytesPerThread() const { return 0; } unsigned int sharedBytesPerBlock(const TuneParam &param) const { return 0 ;} bool tuneSharedBytes() const { return false; } // Don't tune the shared memory bool tuneGridDim() const { return false; } // Don't tune the grid dimensions. unsigned int minThreads() const { return arg.clover.volumeCB; } public: CloverInvert(CloverInvertArg<Clover> &arg, const CloverField &meta, QudaFieldLocation location) : arg(arg), meta(meta), location(location) { writeAuxString("stride=%d,prec=%lu",arg.clover.stride,sizeof(Float)); } virtual ~CloverInvert() { ; } void apply(const hipStream_t &stream) { TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity()); arg.trlogA_h[0] = 0.0; arg.trlogA_h[1] = 0.0; if (location == QUDA_CUDA_FIELD_LOCATION) { tp.grid.y = 2; // for parity LAUNCH_KERNEL(cloverInvertKernel, tp, stream, arg, Float, Clover); } else { cloverInvert<1, Float, Clover>(arg); } if (arg.computeTraceLog) { hipDeviceSynchronize(); reduceDoubleArray(arg.trlogA_h, 2); } } TuneKey tuneKey() const { return TuneKey(meta.VolString(), typeid(*this).name(), aux); } std::string paramString(const TuneParam &param) const { // Don't bother printing the grid dim. std::stringstream ps; ps << "block=(" << param.block.x << "," << param.block.y << "," << param.block.z << "), "; ps << "shared=" << param.shared_bytes; return ps.str(); } long long flops() const { return 0; } long long bytes() const { return 2*arg.clover.volumeCB*(arg.inverse.Bytes() + arg.clover.Bytes()); } }; template <typename Float, typename Clover> void cloverInvert(Clover inverse, const Clover clover, bool computeTraceLog, double* const trlog, const CloverField &meta, QudaFieldLocation location) { CloverInvertArg<Clover> arg(inverse, clover, computeTraceLog, trlog); CloverInvert<Float,Clover> invert(arg, meta, location); invert.apply(0); hipDeviceSynchronize(); } template <typename Float> void cloverInvert(const CloverField &clover, bool computeTraceLog, QudaFieldLocation location) { if (clover.Order() == QUDA_FLOAT2_CLOVER_ORDER) { cloverInvert<Float>(FloatNOrder<Float,72,2>(clover, 1), FloatNOrder<Float,72,2>(clover, 0), computeTraceLog, clover.TrLog(), clover, location); } else if (clover.Order() == QUDA_FLOAT4_CLOVER_ORDER) { cloverInvert<Float>(FloatNOrder<Float,72,4>(clover, 1), FloatNOrder<Float,72,4>(clover, 0), computeTraceLog, clover.TrLog(), clover, location); } else { errorQuda("Clover field %d order not supported", clover.Order()); } } #endif // this is the function that is actually called, from here on down we instantiate all required templates void cloverInvert(CloverField &clover, bool computeTraceLog, QudaFieldLocation location) { #ifdef GPU_CLOVER_DIRAC if (clover.Precision() == QUDA_HALF_PRECISION && clover.Order() > 4) errorQuda("Half precision not supported for order %d", clover.Order()); if (clover.Precision() == QUDA_DOUBLE_PRECISION) { cloverInvert<double>(clover, computeTraceLog, location); } else if (clover.Precision() == QUDA_SINGLE_PRECISION) { cloverInvert<float>(clover, computeTraceLog, location); } else { errorQuda("Precision %d not supported", clover.Precision()); } #else errorQuda("Clover has not been built"); #endif } } // namespace quda
clover_invert.cu
#include <tune_quda.h> #include <clover_field_order.h> #include <complex_quda.h> #include <cub/cub.cuh> #include <launch_kernel.cuh> #include <face_quda.h> namespace quda { #ifdef GPU_CLOVER_DIRAC template <typename Clover> struct CloverInvertArg { const Clover clover; Clover inverse; bool computeTraceLog; double * const trlogA_h; double *trlogA_d; //extra attributes for twisted mass clover bool twist; double mu2; CloverInvertArg(Clover &inverse, const Clover &clover, bool computeTraceLog=0, double* const trlogA=0) : inverse(inverse), clover(clover), computeTraceLog(computeTraceLog), trlogA_h(trlogA), twist(clover.Twisted()), mu2(clover.Mu2()){ cudaHostGetDevicePointer(&trlogA_d, trlogA_h, 0); // set the matching device pointer } }; static __inline__ __device__ double atomicAdd(double *addr, double val) { double old=*addr, assumed; do { assumed = old; old = __longlong_as_double( atomicCAS((unsigned long long int*)addr, __double_as_longlong(assumed), __double_as_longlong(val+assumed))); } while( __double_as_longlong(assumed)!=__double_as_longlong(old) ); return old; } /** Use a Cholesky decomposition to invert the clover matrix Here we use an inplace inversion which hopefully reduces register pressure */ template <int blockSize, typename Float, typename Clover> __device__ __host__ double cloverInvertCompute(CloverInvertArg<Clover> arg, int x, int parity) { Float A[72]; double trlogA = 0.0; // load the clover term into memory arg.clover.load(A, x, parity); for (int ch=0; ch<2; ch++) { Float diag[6]; Float tmp[6]; // temporary storage complex<Float> tri[15]; // hack into the right order as MILC just to copy algorithm directly // FIXME use native ordering in the Cholseky // factor of two is inherent to QUDA clover storage for (int i=0; i<6; i++) diag[i] = 2.0*A[ch*36+i]; const int idtab[15]={0,1,3,6,10,2,4,7,11,5,8,12,9,13,14}; for (int i=0; i<15; i++) tri[idtab[i]] = complex<Float>(2.0*A[ch*36+6+2*i], 2.0*A[ch*36+6+2*i+1]); //Compute (T^2 + mu2) first, then invert (not optimized!): if(arg.twist) { complex<Float> aux[15];//hmmm, better to reuse A-regs... //another solution just to define (but compiler may not be happy with this, swapping everything in //the global buffer): //complex<Float>* aux = (complex<Float>*)&A[ch*36]; //compute off-diagonal terms: // aux[ 0] = tri[0]*diag[0]+diag[1]*tri[0]+conj(tri[2])*tri[1]+conj(tri[4])*tri[3]+conj(tri[7])*tri[6]+conj(tri[11])*tri[10]; // aux[ 1] = tri[1]*diag[0]+diag[2]*tri[1]+tri[2]*tri[0]+conj(tri[5])*tri[3]+conj(tri[8])*tri[6]+conj(tri[12])*tri[10]; aux[ 2] = tri[2]*diag[1]+diag[2]*tri[2]+tri[1]*conj(tri[0])+conj(tri[5])*tri[4]+conj(tri[8])*tri[7]+conj(tri[12])*tri[11]; // aux[ 3] = tri[3]*diag[0]+diag[3]*tri[3]+tri[4]*tri[0]+tri[5]*tri[1]+conj(tri[9])*tri[6]+conj(tri[13])*tri[10]; aux[ 4] = tri[4]*diag[1]+diag[3]*tri[4]+tri[3]*conj(tri[0])+tri[5]*tri[2]+conj(tri[9])*tri[7]+conj(tri[13])*tri[11]; aux[ 5] = tri[5]*diag[2]+diag[3]*tri[5]+tri[3]*conj(tri[1])+tri[4]*conj(tri[2])+conj(tri[9])*tri[8]+conj(tri[13])*tri[12]; // aux[ 6] = tri[6]*diag[0]+diag[4]*tri[6]+tri[7]*tri[0]+tri[8]*tri[1]+tri[9]*tri[3]+conj(tri[14])*tri[10]; aux[ 7] = tri[7]*diag[1]+diag[4]*tri[7]+tri[6]*conj(tri[0])+tri[8]*tri[2]+tri[9]*tri[4]+conj(tri[14])*tri[11]; aux[ 8] = tri[8]*diag[2]+diag[4]*tri[8]+tri[6]*conj(tri[1])+tri[7]*conj(tri[2])+tri[9]*tri[5]+conj(tri[14])*tri[12]; aux[ 9] = tri[9]*diag[3]+diag[4]*tri[9]+tri[6]*conj(tri[3])+tri[7]*conj(tri[4])+tri[8]*conj(tri[5])+conj(tri[14])*tri[13]; // aux[10] = tri[10]*diag[0]+diag[5]*tri[10]+tri[11]*tri[0]+tri[12]*tri[1]+tri[13]*tri[3]+tri[14]*tri[6]; aux[11] = tri[11]*diag[1]+diag[5]*tri[11]+tri[10]*conj(tri[0])+tri[12]*tri[2]+tri[13]*tri[4]+tri[14]*tri[7]; aux[12] = tri[12]*diag[2]+diag[5]*tri[12]+tri[10]*conj(tri[1])+tri[11]*conj(tri[2])+tri[13]*tri[5]+tri[14]*tri[8]; aux[13] = tri[13]*diag[3]+diag[5]*tri[13]+tri[10]*conj(tri[3])+tri[11]*conj(tri[4])+tri[12]*conj(tri[5])+tri[14]*tri[9]; aux[14] = tri[14]*diag[4]+diag[5]*tri[14]+tri[10]*conj(tri[6])+tri[11]*conj(tri[7])+tri[12]*conj(tri[8])+tri[13]*conj(tri[9]); //update diagonal elements: diag[0] = (Float)arg.mu2+diag[0]*diag[0]+norm(tri[ 0])+norm(tri[ 1])+norm(tri[ 3])+norm(tri[ 6])+norm(tri[10]); diag[1] = (Float)arg.mu2+diag[1]*diag[1]+norm(tri[ 0])+norm(tri[ 2])+norm(tri[ 4])+norm(tri[ 7])+norm(tri[11]); diag[2] = (Float)arg.mu2+diag[2]*diag[2]+norm(tri[ 1])+norm(tri[ 2])+norm(tri[ 5])+norm(tri[ 8])+norm(tri[12]); diag[3] = (Float)arg.mu2+diag[3]*diag[3]+norm(tri[ 3])+norm(tri[ 4])+norm(tri[ 5])+norm(tri[ 9])+norm(tri[13]); diag[4] = (Float)arg.mu2+diag[4]*diag[4]+norm(tri[ 6])+norm(tri[ 7])+norm(tri[ 8])+norm(tri[ 9])+norm(tri[14]); diag[5] = (Float)arg.mu2+diag[5]*diag[5]+norm(tri[10])+norm(tri[11])+norm(tri[12])+norm(tri[13])+norm(tri[14]); //update off-diagonal elements: for(int i = 0; i < 15; i++) tri[i] = aux[i]; } // for (int j=0; j<6; j++) { diag[j] = sqrt(diag[j]); tmp[j] = 1.0 / diag[j]; for (int k=j+1; k<6; k++) { int kj = k*(k-1)/2+j; tri[kj] *= tmp[j]; } for(int k=j+1;k<6;k++){ int kj=k*(k-1)/2+j; diag[k] -= (tri[kj] * conj(tri[kj])).real(); for(int l=k+1;l<6;l++){ int lj=l*(l-1)/2+j; int lk=l*(l-1)/2+k; tri[lk] -= tri[lj] * conj(tri[kj]); } } } /* Accumulate trlogA */ for (int j=0;j<6;j++) trlogA += (double)2.0*log((double)(diag[j])); /* Now use forward and backward substitution to construct inverse */ complex<Float> v1[6]; for (int k=0;k<6;k++) { for(int l=0;l<k;l++) v1[l] = complex<Float>(0.0, 0.0); /* Forward substitute */ v1[k] = complex<Float>(tmp[k], 0.0); for(int l=k+1;l<6;l++){ complex<Float> sum = complex<Float>(0.0, 0.0); for(int j=k;j<l;j++){ int lj=l*(l-1)/2+j; sum -= tri[lj] * v1[j]; } v1[l] = sum * tmp[l]; } /* Backward substitute */ v1[5] = v1[5] * tmp[5]; for(int l=4;l>=k;l--){ complex<Float> sum = v1[l]; for(int j=l+1;j<6;j++){ int jl=j*(j-1)/2+l; sum -= conj(tri[jl]) * v1[j]; } v1[l] = sum * tmp[l]; } /* Overwrite column k */ diag[k] = v1[k].real(); for(int l=k+1;l<6;l++){ int lk=l*(l-1)/2+k; tri[lk] = v1[l]; } } for (int i=0; i<6; i++) A[ch*36+i] = 0.5 * diag[i]; for (int i=0; i<15; i++) { A[ch*36+6+2*i] = 0.5*tri[idtab[i]].real(); A[ch*36+6+2*i+1] = 0.5*tri[idtab[i]].imag(); } } // save the inverted matrix arg.inverse.save(A, x, parity); return trlogA; } template <int blockSize, typename Float, typename Clover> void cloverInvert(CloverInvertArg<Clover> arg) { for (int parity=0; parity<2; parity++) { for (int x=0; x<arg.clover.volumeCB; x++) { // should make this thread safe if we ever apply threads to cpu code double trlogA = cloverInvertCompute<blockSize, Float>(arg, x, parity); if (arg.computeTraceLog) arg.trlogA_h[parity] += trlogA; } } } template <int blockSize, typename Float, typename Clover> __global__ void cloverInvertKernel(CloverInvertArg<Clover> arg) { int idx = blockIdx.x*blockDim.x + threadIdx.x; //if (idx >= arg.clover.volumeCB) return; int parity = blockIdx.y; double trlogA = 0.0; if (idx < arg.clover.volumeCB) trlogA = cloverInvertCompute<blockSize, Float>(arg, idx, parity); if (arg.computeTraceLog) { typedef cub::BlockReduce<double, blockSize> BlockReduce; __shared__ typename BlockReduce::TempStorage temp_storage; double aggregate = BlockReduce(temp_storage).Sum(trlogA); if (threadIdx.x == 0) atomicAdd(arg.trlogA_d+parity, aggregate); } } template <typename Float, typename Clover> class CloverInvert : Tunable { CloverInvertArg<Clover> arg; const CloverField &meta; // used for meta data only const QudaFieldLocation location; private: unsigned int sharedBytesPerThread() const { return 0; } unsigned int sharedBytesPerBlock(const TuneParam &param) const { return 0 ;} bool tuneSharedBytes() const { return false; } // Don't tune the shared memory bool tuneGridDim() const { return false; } // Don't tune the grid dimensions. unsigned int minThreads() const { return arg.clover.volumeCB; } public: CloverInvert(CloverInvertArg<Clover> &arg, const CloverField &meta, QudaFieldLocation location) : arg(arg), meta(meta), location(location) { writeAuxString("stride=%d,prec=%lu",arg.clover.stride,sizeof(Float)); } virtual ~CloverInvert() { ; } void apply(const cudaStream_t &stream) { TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity()); arg.trlogA_h[0] = 0.0; arg.trlogA_h[1] = 0.0; if (location == QUDA_CUDA_FIELD_LOCATION) { tp.grid.y = 2; // for parity LAUNCH_KERNEL(cloverInvertKernel, tp, stream, arg, Float, Clover); } else { cloverInvert<1, Float, Clover>(arg); } if (arg.computeTraceLog) { cudaDeviceSynchronize(); reduceDoubleArray(arg.trlogA_h, 2); } } TuneKey tuneKey() const { return TuneKey(meta.VolString(), typeid(*this).name(), aux); } std::string paramString(const TuneParam &param) const { // Don't bother printing the grid dim. std::stringstream ps; ps << "block=(" << param.block.x << "," << param.block.y << "," << param.block.z << "), "; ps << "shared=" << param.shared_bytes; return ps.str(); } long long flops() const { return 0; } long long bytes() const { return 2*arg.clover.volumeCB*(arg.inverse.Bytes() + arg.clover.Bytes()); } }; template <typename Float, typename Clover> void cloverInvert(Clover inverse, const Clover clover, bool computeTraceLog, double* const trlog, const CloverField &meta, QudaFieldLocation location) { CloverInvertArg<Clover> arg(inverse, clover, computeTraceLog, trlog); CloverInvert<Float,Clover> invert(arg, meta, location); invert.apply(0); cudaDeviceSynchronize(); } template <typename Float> void cloverInvert(const CloverField &clover, bool computeTraceLog, QudaFieldLocation location) { if (clover.Order() == QUDA_FLOAT2_CLOVER_ORDER) { cloverInvert<Float>(FloatNOrder<Float,72,2>(clover, 1), FloatNOrder<Float,72,2>(clover, 0), computeTraceLog, clover.TrLog(), clover, location); } else if (clover.Order() == QUDA_FLOAT4_CLOVER_ORDER) { cloverInvert<Float>(FloatNOrder<Float,72,4>(clover, 1), FloatNOrder<Float,72,4>(clover, 0), computeTraceLog, clover.TrLog(), clover, location); } else { errorQuda("Clover field %d order not supported", clover.Order()); } } #endif // this is the function that is actually called, from here on down we instantiate all required templates void cloverInvert(CloverField &clover, bool computeTraceLog, QudaFieldLocation location) { #ifdef GPU_CLOVER_DIRAC if (clover.Precision() == QUDA_HALF_PRECISION && clover.Order() > 4) errorQuda("Half precision not supported for order %d", clover.Order()); if (clover.Precision() == QUDA_DOUBLE_PRECISION) { cloverInvert<double>(clover, computeTraceLog, location); } else if (clover.Precision() == QUDA_SINGLE_PRECISION) { cloverInvert<float>(clover, computeTraceLog, location); } else { errorQuda("Precision %d not supported", clover.Precision()); } #else errorQuda("Clover has not been built"); #endif } } // namespace quda
ba4cc0b868875e47ca6393ab85e47c4b6d95b2bb.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //### cudaVDW functions ##### // Here and next fer_, fe_ and e_ functions must contfloatain the same code // prefexis fer_, fe_ and e_ means force and energy calculation by r also, force and energy and energy #include <stdio.h> #include "dataStruct.h" #include "vdw.h" #include "cuStruct.h" #include "cuVdW.h" /* struct cudaVdW { // all fields are constant during the simulation int type; float p0, p1, p2, p3, p4; float r2cut; //square of cutoff float (*eng)(float r2, cudaVdW* vdw); // function to calculate energy float (*feng)(float r2, cudaVdW* vdw, float& eng); // function to calculate force (return) & energy (save in eng) float (*feng_r)(float r2, float& r, cudaVdW* vdw, float& eng); // function to calculate force (return) & energy (save in eng) if r may be knonw float (*eng_r)(float r2, float r, cudaVdW* vdw); // return energy by r and r^2 }; */ __device__ float cu_fer_lj(float r2, float& r, cudaVdW* vdw, float& eng) // calculate force and energy by Lennard-Jones pair potential: U = 4e[(s/r)^12 - (s/r)^6] // r2 - square of distance, vdw - parameters (the same for next PP functions) { float r2i = 1.f / r2; float sr2 = vdw->p1 * r2i; float sr6 = sr2 * sr2 * sr2; eng += vdw->p0 * sr6 * (sr6 - 1.f); return vdw->p2 * r2i * sr6 * (2.f * sr6 - 1.f); } __device__ float cu_fe_lj(float r2, cudaVdW* vdw, float& eng) // calculate force and energy by Lennard-Jones pair pfloatotential: U = 4e[(s/r)^12 - (s/r)^6] // r2 - square of distance, vdw - parameters (the same for next PP functions) { float r2i = 1.f / r2; float sr2 = vdw->p1 * r2i; float sr6 = sr2 * sr2 * sr2; eng += vdw->p0 * sr6 * (sr6 - 1.f); return vdw->p2 * r2i * sr6 * (2.f * sr6 - 1.f); } __device__ float cu_e_lj(float r2, cudaVdW* vdw) // calculate energy by Lennard-Jones pair potentialfloat { float r2i = 1.f / r2; float sr2 = vdw->p1 * r2i; float sr6 = sr2 * sr2 * sr2; return vdw->p0 * sr6 * (sr6 - 1.f); } __device__ float cu_er_lj(float r2, float &r, cudaVdW* vdw) // calculate energy by Lennard-Jones pair potential { float r2i = 1.f / r2; float sr2 = vdw->p1 * r2i; float sr6 = sr2 * sr2 * sr2; return vdw->p0 * sr6 * (sr6 - 1.f); } __device__ float cu_fer_buck(float r2, float& r, cudaVdW* vdw, float& eng) // calculate force and energy (with r) by Buckingham pair potential: U = A exp(-r/ro) - C/r^6 { //printf("begin cu_fer_buck\n"); float r2i = 1.f / r2; float r4i = r2i * r2i; if (r == 0.f) // calculate r if unkonwn (zero) and use it otherwise r = sqrt(r2); eng += vdw->p0 * exp(-r / vdw->p1) - vdw->p2 * r4i * r2i; return vdw->p0 * exp(-r / vdw->p1) / r / vdw->p1 - 6.f * vdw->p2 * r4i * r4i; } __device__ float cu_fe_buck(float r2, cudaVdW* vdw, float& eng) // calculate force and energy by Buckingham pair potential: U = A exp(-r/ro) - C/r^6 { float r2i = 1.f / r2; float r = sqrt(r2); float r4i = r2i * r2i; eng += vdw->p0 * exp(-r / vdw->p1) - vdw->p2 * r4i * r2i; return vdw->p0 * exp(-r / vdw->p1) / r / vdw->p1 - 6.0 * vdw->p2 * r4i * r4i; } __device__ float cu_e_buck(float r2, cudaVdW* vdw) // calculate energy by Buckingham pair potential: U = A exp(-r/ro) - C/r^6 { float r2i = 1.f / r2; float r = sqrt(r2); float r4i = r2i * r2i; return vdw->p0 * exp(-r / vdw->p1) - vdw->p2 * r4i * r2i; } __device__ float cu_er_buck(float r2, float &r, cudaVdW* vdw) // calculate energy by Buckingham pair potential: U = A exp(-r/ro) - C/r^6 { float r2i = 1.f / r2; float r4i = r2i * r2i; if (r == 0.f) // calculate r if unkonwn (zero) and use it otherwise r = sqrt(r2); return vdw->p0 * exp(-r / vdw->p1) - vdw->p2 * r4i * r2i; } __device__ float cu_fer_bmh(float r2, float& r, cudaVdW* vdw, float& eng) // calculate force and energy (with r) by BornMayerHuggins pair potential: U = Aexp[B(s-r)] - C/r^6 - D/r^8 { float r2i = 1.f / r2; float r4i = r2i * r2i; if (r == 0.f) // calculate if unkonwn (zero) and use otherwise r = sqrt(r2); //printf("r(bmh)=%f\n", r); eng += vdw->p0 * exp(vdw->p1 * (vdw->p2 - r)) - vdw->p3 * r4i * r2i - vdw->p4 * r4i * r4i; return vdw->p0 * vdw->p1 * exp(vdw->p1 * (vdw->p2 - r)) / r - 6.f * vdw->p3 * r4i * r4i - 8.f * vdw->p4 * r4i * r4i * r2i; } __device__ float cu_fe_bmh(float r2, cudaVdW* vdw, float& eng) // calculate force and energy by BornMayerHuggins pair potential: U = Aexp[B(s-r)] - C/r^6 - D/r^8 { float r2i = 1.f / r2; float r = sqrt(r2); float r4i = r2i * r2i; eng += vdw->p0 * exp(vdw->p1 * (vdw->p2 - r)) - vdw->p3 * r4i * r2i - vdw->p4 * r4i * r4i; return vdw->p0 * vdw->p1 * exp(vdw->p1 * (vdw->p2 - r)) / r - 6.f * vdw->p3 * r4i * r4i - 8.f * vdw->p4 * r4i * r4i * r2i; } __device__ float cu_e_bmh(float r2, cudaVdW* vdw) // calculate energy by BornMayerHuggins pair potential: U = Aexp[B(s-r)] - C/r^6 - D/r^8 { float r2i = 1.f / r2; float r = sqrt(r2); float r4i = r2i * r2i; return vdw->p0 * exp(vdw->p1 * (vdw->p2 - r)) - vdw->p3 * r4i * r2i - vdw->p4 * r4i * r4i; } __device__ float cu_er_bmh(float r2, float &r, cudaVdW* vdw) // calculate energy by BornMayerHuggins pair potential: U = Aexp[B(s-r)] - C/r^6 - D/r^8 { float r2i = 1.f / r2; float r4i = r2i * r2i; if (r == 0.f) // calculate r if unkonwn (zero) and use it otherwise r = sqrt(r2); return vdw->p0 * exp(vdw->p1 * (vdw->p2 - r)) - vdw->p3 * r4i * r2i - vdw->p4 * r4i * r4i; } __device__ float cu_fer_elin(float r2, float& r, cudaVdW* vdw, float& eng) // calculate force and energy by "elin" potential: U = A * exp(-x/ro) + C*x // r2 - square of distance, vdw - parameters (the same for next PP functions) { if (r == 0.f) // calculate r if unkonwn (zero) and use it otherwise r = sqrt(r2); eng += vdw->p0 * exp(-r / vdw->p1) + vdw->p2 * r; return vdw->p0 * exp(-r / vdw->p1) / r / vdw->p1 - vdw->p2 / r; } __device__ float cu_fe_elin(float r2, cudaVdW* vdw, float& eng) // calculate force and energy by "elin" potential: U = A * exp(-x/ro) + C*x // r2 - square of distance, vdw - parameters (the same for next PP functions) { float r = sqrt(r2); eng += vdw->p0 * exp(-r / vdw->p1) + vdw->p2 * r; return vdw->p0 * exp(-r / vdw->p1) / r / vdw->p1 - vdw->p2 / r; } __device__ float cu_e_elin(float r2, cudaVdW* vdw) // calculate energy by "elin" potential: U = A * exp(-x/ro) + C*x { float r = sqrt(r2); return vdw->p0 * exp(-r / vdw->p1) + vdw->p2 * r; } __device__ float cu_er_elin(float r2, float &r, cudaVdW* vdw) // calculate energy by "elin" potential: U = A * exp(-x/ro) + C*x { if (r == 0.f) // calculate r if unkonwn (zero) and use it otherwise r = sqrt(r2); return vdw->p0 * exp(-r / vdw->p1) + vdw->p2 * r; } __device__ float cu_fer_einv(float r2, float& r, cudaVdW* vdw, float& eng) // calculate force and energy by "einv" potential: U = A * exp(-x/ro) - C/x // r2 - square of distance, vdw - parameters (the same for next PP functions) { if (r == 0.f) // calculate r if unkonwn (zero) and use it otherwise r = sqrt(r2); eng += vdw->p0 * exp(-r / vdw->p1) - vdw->p2 / r; return vdw->p0 * exp(-r / vdw->p1) / r / vdw->p1 - vdw->p2 / r / r2; } __device__ float cu_fe_einv(float r2, cudaVdW* vdw, float& eng) // calculate force and energy by "einv" potential: U = A * exp(-x/ro) - C/x // r2 - square of distance, vdw - parameters (the same for next PP functions) { float r = sqrt(r2); eng += vdw->p0 * exp(-r / vdw->p1) - vdw->p2 / r; return vdw->p0 * exp(-r / vdw->p1) / r / vdw->p1 - vdw->p2 / r / r2; } __device__ float cu_e_einv(float r2, cudaVdW* vdw) // calculate energy by "einv" potential: U = A * exp(-x/ro) - C/x { float r = sqrt(r2); return vdw->p0 * exp(-r / vdw->p1) - vdw->p2 / r; } __device__ float cu_er_einv(float r2, float &r, cudaVdW* vdw) // calculate energy by "einv" potential: U = A * exp(-x/ro) - C/x { if (r == 0.f) // calculate r if unkonwn (zero) and use it otherwise r = sqrt(r2); return vdw->p0 * exp(-r / vdw->p1) - vdw->p2 / r; } __device__ float surk_pot(float r2, float rad1, float rad2, cudaVdW* vdw, float& eng) // potential derived by Platon Surkov: // U = ri*rj*(C1 ri^2 rj^2 / rij^7 - C2 / (ki*ri + kj * rj) / r^6 // p0 = C1, p1 = C2, p2 = ki, p3 = kj { //rad1 = 0.577; //rad2 = 0.577; float 2ir_sum = vdw->p1 / (vdw->p2 * rad1 + vdw->p3 * rad2); // C2 / (ka + lb) float r_prod = rad1 * rad2; float C1ab2 = r_prod * r_prod * vdw->p0; // C1 * a^2 b^2 float r6 = r2 * r2 * r2; float r = sqrt(r2); float ir6 = 1.f / r6; float ir = 1.f / r; float val = r_prod * ir6 * (C1ab2 * ir - 2ir_sum); //printf("U=%f: ra=%f rb=%f (%f %f %f %f)\n", val, rad1, rad2, vdw->p0, vdw->p1, vdw->p2, vdw->p3); eng += val; return r_prod * ir6 / r2 * (7.f * C1ab2 * ir - 6.f * 2ir_sum); } __global__ void define_vdw_func(cudaMD* md) { cudaVdW* vdw; vdw = &(md->pairpots[threadIdx.x]); switch (vdw->type) { case lj_type: vdw->eng = &cu_e_lj; vdw->eng_r = &cu_er_lj; vdw->feng = &cu_fe_lj; vdw->feng_r = &cu_fer_lj; break; case bh_type: vdw->eng = &cu_e_buck; vdw->eng_r = &cu_er_buck; vdw->feng = &cu_fe_buck; vdw->feng_r = &cu_fer_buck; break; case BHM_type: vdw->eng = &cu_e_bmh; vdw->eng_r = &cu_er_bmh; vdw->feng = &cu_fe_bmh; vdw->feng_r = &cu_fer_bmh; break; case elin_type: vdw->eng = &cu_e_elin; vdw->eng_r = &cu_er_elin; vdw->feng = &cu_fe_elin; vdw->feng_r = &cu_fer_elin; break; case einv_type: vdw->eng = &cu_e_einv; vdw->eng_r = &cu_er_einv; vdw->feng = &cu_fe_einv; vdw->feng_r = &cu_fer_einv; break; case surk_type: vdw->radi_func = &surk_pot; break; } }
ba4cc0b868875e47ca6393ab85e47c4b6d95b2bb.cu
//### cudaVDW functions ##### // Here and next fer_, fe_ and e_ functions must contfloatain the same code // prefexis fer_, fe_ and e_ means force and energy calculation by r also, force and energy and energy #include <stdio.h> #include "dataStruct.h" #include "vdw.h" #include "cuStruct.h" #include "cuVdW.h" /* struct cudaVdW { // all fields are constant during the simulation int type; float p0, p1, p2, p3, p4; float r2cut; //square of cutoff float (*eng)(float r2, cudaVdW* vdw); // function to calculate energy float (*feng)(float r2, cudaVdW* vdw, float& eng); // function to calculate force (return) & energy (save in eng) float (*feng_r)(float r2, float& r, cudaVdW* vdw, float& eng); // function to calculate force (return) & energy (save in eng) if r may be knonw float (*eng_r)(float r2, float r, cudaVdW* vdw); // return energy by r and r^2 }; */ __device__ float cu_fer_lj(float r2, float& r, cudaVdW* vdw, float& eng) // calculate force and energy by Lennard-Jones pair potential: U = 4e[(s/r)^12 - (s/r)^6] // r2 - square of distance, vdw - parameters (the same for next PP functions) { float r2i = 1.f / r2; float sr2 = vdw->p1 * r2i; float sr6 = sr2 * sr2 * sr2; eng += vdw->p0 * sr6 * (sr6 - 1.f); return vdw->p2 * r2i * sr6 * (2.f * sr6 - 1.f); } __device__ float cu_fe_lj(float r2, cudaVdW* vdw, float& eng) // calculate force and energy by Lennard-Jones pair pfloatotential: U = 4e[(s/r)^12 - (s/r)^6] // r2 - square of distance, vdw - parameters (the same for next PP functions) { float r2i = 1.f / r2; float sr2 = vdw->p1 * r2i; float sr6 = sr2 * sr2 * sr2; eng += vdw->p0 * sr6 * (sr6 - 1.f); return vdw->p2 * r2i * sr6 * (2.f * sr6 - 1.f); } __device__ float cu_e_lj(float r2, cudaVdW* vdw) // calculate energy by Lennard-Jones pair potentialfloat { float r2i = 1.f / r2; float sr2 = vdw->p1 * r2i; float sr6 = sr2 * sr2 * sr2; return vdw->p0 * sr6 * (sr6 - 1.f); } __device__ float cu_er_lj(float r2, float &r, cudaVdW* vdw) // calculate energy by Lennard-Jones pair potential { float r2i = 1.f / r2; float sr2 = vdw->p1 * r2i; float sr6 = sr2 * sr2 * sr2; return vdw->p0 * sr6 * (sr6 - 1.f); } __device__ float cu_fer_buck(float r2, float& r, cudaVdW* vdw, float& eng) // calculate force and energy (with r) by Buckingham pair potential: U = A exp(-r/ro) - C/r^6 { //printf("begin cu_fer_buck\n"); float r2i = 1.f / r2; float r4i = r2i * r2i; if (r == 0.f) // calculate r if unkonwn (zero) and use it otherwise r = sqrt(r2); eng += vdw->p0 * exp(-r / vdw->p1) - vdw->p2 * r4i * r2i; return vdw->p0 * exp(-r / vdw->p1) / r / vdw->p1 - 6.f * vdw->p2 * r4i * r4i; } __device__ float cu_fe_buck(float r2, cudaVdW* vdw, float& eng) // calculate force and energy by Buckingham pair potential: U = A exp(-r/ro) - C/r^6 { float r2i = 1.f / r2; float r = sqrt(r2); float r4i = r2i * r2i; eng += vdw->p0 * exp(-r / vdw->p1) - vdw->p2 * r4i * r2i; return vdw->p0 * exp(-r / vdw->p1) / r / vdw->p1 - 6.0 * vdw->p2 * r4i * r4i; } __device__ float cu_e_buck(float r2, cudaVdW* vdw) // calculate energy by Buckingham pair potential: U = A exp(-r/ro) - C/r^6 { float r2i = 1.f / r2; float r = sqrt(r2); float r4i = r2i * r2i; return vdw->p0 * exp(-r / vdw->p1) - vdw->p2 * r4i * r2i; } __device__ float cu_er_buck(float r2, float &r, cudaVdW* vdw) // calculate energy by Buckingham pair potential: U = A exp(-r/ro) - C/r^6 { float r2i = 1.f / r2; float r4i = r2i * r2i; if (r == 0.f) // calculate r if unkonwn (zero) and use it otherwise r = sqrt(r2); return vdw->p0 * exp(-r / vdw->p1) - vdw->p2 * r4i * r2i; } __device__ float cu_fer_bmh(float r2, float& r, cudaVdW* vdw, float& eng) // calculate force and energy (with r) by Born–Mayer–Huggins pair potential: U = Aexp[B(s-r)] - C/r^6 - D/r^8 { float r2i = 1.f / r2; float r4i = r2i * r2i; if (r == 0.f) // calculate if unkonwn (zero) and use otherwise r = sqrt(r2); //printf("r(bmh)=%f\n", r); eng += vdw->p0 * exp(vdw->p1 * (vdw->p2 - r)) - vdw->p3 * r4i * r2i - vdw->p4 * r4i * r4i; return vdw->p0 * vdw->p1 * exp(vdw->p1 * (vdw->p2 - r)) / r - 6.f * vdw->p3 * r4i * r4i - 8.f * vdw->p4 * r4i * r4i * r2i; } __device__ float cu_fe_bmh(float r2, cudaVdW* vdw, float& eng) // calculate force and energy by Born–Mayer–Huggins pair potential: U = Aexp[B(s-r)] - C/r^6 - D/r^8 { float r2i = 1.f / r2; float r = sqrt(r2); float r4i = r2i * r2i; eng += vdw->p0 * exp(vdw->p1 * (vdw->p2 - r)) - vdw->p3 * r4i * r2i - vdw->p4 * r4i * r4i; return vdw->p0 * vdw->p1 * exp(vdw->p1 * (vdw->p2 - r)) / r - 6.f * vdw->p3 * r4i * r4i - 8.f * vdw->p4 * r4i * r4i * r2i; } __device__ float cu_e_bmh(float r2, cudaVdW* vdw) // calculate energy by Born–Mayer–Huggins pair potential: U = Aexp[B(s-r)] - C/r^6 - D/r^8 { float r2i = 1.f / r2; float r = sqrt(r2); float r4i = r2i * r2i; return vdw->p0 * exp(vdw->p1 * (vdw->p2 - r)) - vdw->p3 * r4i * r2i - vdw->p4 * r4i * r4i; } __device__ float cu_er_bmh(float r2, float &r, cudaVdW* vdw) // calculate energy by Born–Mayer–Huggins pair potential: U = Aexp[B(s-r)] - C/r^6 - D/r^8 { float r2i = 1.f / r2; float r4i = r2i * r2i; if (r == 0.f) // calculate r if unkonwn (zero) and use it otherwise r = sqrt(r2); return vdw->p0 * exp(vdw->p1 * (vdw->p2 - r)) - vdw->p3 * r4i * r2i - vdw->p4 * r4i * r4i; } __device__ float cu_fer_elin(float r2, float& r, cudaVdW* vdw, float& eng) // calculate force and energy by "elin" potential: U = A * exp(-x/ro) + C*x // r2 - square of distance, vdw - parameters (the same for next PP functions) { if (r == 0.f) // calculate r if unkonwn (zero) and use it otherwise r = sqrt(r2); eng += vdw->p0 * exp(-r / vdw->p1) + vdw->p2 * r; return vdw->p0 * exp(-r / vdw->p1) / r / vdw->p1 - vdw->p2 / r; } __device__ float cu_fe_elin(float r2, cudaVdW* vdw, float& eng) // calculate force and energy by "elin" potential: U = A * exp(-x/ro) + C*x // r2 - square of distance, vdw - parameters (the same for next PP functions) { float r = sqrt(r2); eng += vdw->p0 * exp(-r / vdw->p1) + vdw->p2 * r; return vdw->p0 * exp(-r / vdw->p1) / r / vdw->p1 - vdw->p2 / r; } __device__ float cu_e_elin(float r2, cudaVdW* vdw) // calculate energy by "elin" potential: U = A * exp(-x/ro) + C*x { float r = sqrt(r2); return vdw->p0 * exp(-r / vdw->p1) + vdw->p2 * r; } __device__ float cu_er_elin(float r2, float &r, cudaVdW* vdw) // calculate energy by "elin" potential: U = A * exp(-x/ro) + C*x { if (r == 0.f) // calculate r if unkonwn (zero) and use it otherwise r = sqrt(r2); return vdw->p0 * exp(-r / vdw->p1) + vdw->p2 * r; } __device__ float cu_fer_einv(float r2, float& r, cudaVdW* vdw, float& eng) // calculate force and energy by "einv" potential: U = A * exp(-x/ro) - C/x // r2 - square of distance, vdw - parameters (the same for next PP functions) { if (r == 0.f) // calculate r if unkonwn (zero) and use it otherwise r = sqrt(r2); eng += vdw->p0 * exp(-r / vdw->p1) - vdw->p2 / r; return vdw->p0 * exp(-r / vdw->p1) / r / vdw->p1 - vdw->p2 / r / r2; } __device__ float cu_fe_einv(float r2, cudaVdW* vdw, float& eng) // calculate force and energy by "einv" potential: U = A * exp(-x/ro) - C/x // r2 - square of distance, vdw - parameters (the same for next PP functions) { float r = sqrt(r2); eng += vdw->p0 * exp(-r / vdw->p1) - vdw->p2 / r; return vdw->p0 * exp(-r / vdw->p1) / r / vdw->p1 - vdw->p2 / r / r2; } __device__ float cu_e_einv(float r2, cudaVdW* vdw) // calculate energy by "einv" potential: U = A * exp(-x/ro) - C/x { float r = sqrt(r2); return vdw->p0 * exp(-r / vdw->p1) - vdw->p2 / r; } __device__ float cu_er_einv(float r2, float &r, cudaVdW* vdw) // calculate energy by "einv" potential: U = A * exp(-x/ro) - C/x { if (r == 0.f) // calculate r if unkonwn (zero) and use it otherwise r = sqrt(r2); return vdw->p0 * exp(-r / vdw->p1) - vdw->p2 / r; } __device__ float surk_pot(float r2, float rad1, float rad2, cudaVdW* vdw, float& eng) // potential derived by Platon Surkov: // U = ri*rj*(C1 ri^2 rj^2 / rij^7 - C2 / (ki*ri + kj * rj) / r^6 // p0 = C1, p1 = C2, p2 = ki, p3 = kj { //rad1 = 0.577; //rad2 = 0.577; float Ñ2ir_sum = vdw->p1 / (vdw->p2 * rad1 + vdw->p3 * rad2); // C2 / (ka + lb) float r_prod = rad1 * rad2; float C1ab2 = r_prod * r_prod * vdw->p0; // C1 * a^2 b^2 float r6 = r2 * r2 * r2; float r = sqrt(r2); float ir6 = 1.f / r6; float ir = 1.f / r; float val = r_prod * ir6 * (C1ab2 * ir - Ñ2ir_sum); //printf("U=%f: ra=%f rb=%f (%f %f %f %f)\n", val, rad1, rad2, vdw->p0, vdw->p1, vdw->p2, vdw->p3); eng += val; return r_prod * ir6 / r2 * (7.f * C1ab2 * ir - 6.f * Ñ2ir_sum); } __global__ void define_vdw_func(cudaMD* md) { cudaVdW* vdw; vdw = &(md->pairpots[threadIdx.x]); switch (vdw->type) { case lj_type: vdw->eng = &cu_e_lj; vdw->eng_r = &cu_er_lj; vdw->feng = &cu_fe_lj; vdw->feng_r = &cu_fer_lj; break; case bh_type: vdw->eng = &cu_e_buck; vdw->eng_r = &cu_er_buck; vdw->feng = &cu_fe_buck; vdw->feng_r = &cu_fer_buck; break; case BHM_type: vdw->eng = &cu_e_bmh; vdw->eng_r = &cu_er_bmh; vdw->feng = &cu_fe_bmh; vdw->feng_r = &cu_fer_bmh; break; case elin_type: vdw->eng = &cu_e_elin; vdw->eng_r = &cu_er_elin; vdw->feng = &cu_fe_elin; vdw->feng_r = &cu_fer_elin; break; case einv_type: vdw->eng = &cu_e_einv; vdw->eng_r = &cu_er_einv; vdw->feng = &cu_fe_einv; vdw->feng_r = &cu_fer_einv; break; case surk_type: vdw->radi_func = &surk_pot; break; } }
5a88746f45d7786836d1410d903c80b8c57a8df5.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <cmath> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <driver_functions.h> #include <hiprand/hiprand.h> #include <hiprand/hiprand_kernel.h> #include "CycleTimer.h" using namespace std; void printCudaInfo(); extern float toBW(int bytes, float sec); struct GlobalConstants { int nx; int ny; int Mt; int nts; int ictype; float G; float R; float delta; float k; float c_infm; float Dl; float d0; float W0; float lT; float lamd; float tau0; float c_infty; float R_tilde; float Dl_tilde; float lT_tilde; float eps; float alpha0; float dx; float dt; float asp_ratio; float lxd; float lx; float lyd; float eta; float U0; // parameters that are not in the input file float hi; float cosa; float sina; float sqrt2; float a_s; float epsilon; float a_12; }; __constant__ GlobalConstants cP; // Device codes // boundary condition // only use this function to access the boundary points, // other functions return at the boundary __global__ void set_BC(float* ps, float* ph, float* U, float* dpsi, int fnx, int fny){ // find the location of boundary: int index = blockIdx.x * blockDim.x + threadIdx.x; // z=0, lx if (index<fnx) { int b_in = index+2*fnx; int t_out = index+(fny-1)*fnx; int t_in = index+(fny-3)*fnx; ps[index] = ps[b_in]; ph[index] = ph[b_in]; U[index] = U[b_in]; dpsi[index] = dpsi[b_in]; ps[t_out] = ps[t_in]; ph[t_out] = ph[t_in]; U[t_out] = U[t_in]; dpsi[t_out] = dpsi[t_in]; } if (index<fny){ int l_out = index*fnx; int l_in = index*fnx + 2; int r_out = index*fnx + fnx -1; int r_in = index*fnx + fnx -3; ps[l_out] = ps[l_in]; ph[l_out] = ph[l_in]; U[l_out] = U[l_in]; dpsi[l_out] = dpsi[l_in]; ps[r_out] = ps[r_in]; ph[r_out] = ph[r_in]; U[r_out] = U[r_in]; dpsi[r_out] = dpsi[r_in]; } } // initialization __global__ void initialize(float* ps_old, float* ph_old, float* U_old, float* ps_new, float* ph_new, float* U_new , float* x, float* y, int fnx, int fny){ int C = blockIdx.x * blockDim.x + threadIdx.x; int j=C/fnx; int i=C-j*fnx; // when initialize, you need to consider C/F layout // if F layout, the 1D array has peroidicity of nx // all the variables should be functions of x and y // size (nx+2)*(ny+2), x:nx, y:ny if ( (i>0) && (i<fnx-1) && (j>0) && (j<fny-1) ) { float xc = x[i]; float yc = y[j]; int cent = fnx/2; ps_old[C] = 5.625f - sqrtf( (xc-x[cent])*(xc-x[cent]) + yc*yc )/cP.W0 ; //if (C<1000){printf("ps %f\n",ps_old[C]);} ps_new[C] = ps_old[C]; U_old[C] = cP.U0; U_new[C] = cP.U0; ph_old[C] = tanhf(ps_old[C]/cP.sqrt2); ph_new[C] = tanhf(ps_new[C]/cP.sqrt2); // if (C<1000){printf("phi %f\n",ph_old[C]);} } } __global__ void initialize_many(float* ps_old, float* ph_old, float* U_old, float* ps_new, float* ph_new, float* U_new , float* x, float* y, int fnx, int fny){ int C = blockIdx.x * blockDim.x + threadIdx.x; int j=C/fnx; int i=C-j*fnx; // when initialize, you need to consider C/F layout // if F layout, the 1D array has peroidicity of nx // all the variables should be functions of x and y // size (nx+2)*(ny+2), x:nx, y:ny if ( (i>0) && (i<fnx-1) && (j>0) && (j<fny-1) ) { float xc = x[i]; float yc = y[j]; int cent = fnx/2; int num_cells = 24; float per_len = cP.lxd/num_cells; int devision = (int) xc/per_len; //np.asarray(xx/(lx/24),dtype=int) float loc = per_len/2.0f+devision*per_len; ps_old[C] = 5.625f - sqrtf( (xc-loc)*(xc-loc) + yc*yc )/cP.W0 ; //if (C<1000){printf("ps %f\n",ps_old[C]);} ps_new[C] = ps_old[C]; U_old[C] = cP.U0; U_new[C] = cP.U0; ph_old[C] = tanhf(ps_old[C]/cP.sqrt2); ph_new[C] = tanhf(ps_new[C]/cP.sqrt2); // if (C<1000){printf("phi %f\n",ph_old[C]);} } } // anisotropy functions __device__ float atheta(float ux, float uz){ float ux2 = cP.cosa*ux + cP.sina*uz; ux2 = ux2*ux2; float uz2 = -cP.sina*ux + cP.cosa*uz; uz2 = uz2*uz2; float MAG_sq = (ux2 + uz2); float MAG_sq2= MAG_sq*MAG_sq; if (MAG_sq > cP.eps){ return cP.a_s*( 1.0f + cP.epsilon*(ux2*ux2 + uz2*uz2) / MAG_sq2);} else {return 1.0f;} } __device__ float aptheta(float ux, float uz){ float uxr = cP.cosa*ux + cP.sina*uz; float ux2 = uxr*uxr; float uzr = -cP.sina*ux + cP.cosa*uz; float uz2 = uzr*uzr; float MAG_sq = (ux2 + uz2); float MAG_sq2= MAG_sq*MAG_sq; if (MAG_sq > cP.eps){ return -cP.a_12*uxr*uzr*(ux2 - uz2) / MAG_sq2;} else {return 0.0f;} } // psi equation __global__ void rhs_psi(float* ps, float* ph, float* U, float* ps_new, float* ph_new, \ float* y, float* dpsi, int fnx, int fny, int nt ){ int C = blockIdx.x * blockDim.x + threadIdx.x; int j=C/fnx; int i=C-j*fnx; // if the points are at boundary, return if ( (i>0) && (i<fnx-1) && (j>0) && (j<fny-1) ) { // find the indices of the 8 neighbors for center //if (C==1000){printf("find");} int R=C+1; int L=C-1; int T=C+fnx; int B=C-fnx; // ============================================================= // 1. ANISOTROPIC DIFFUSION // ============================================================= // these ps's are defined on cell centers float psipjp=( ps[C] + ps[R] + ps[T] + ps[T+1] ) * 0.25f; float psipjm=( ps[C] + ps[R] + ps[B] + ps[B+1] ) * 0.25f; float psimjp=( ps[C] + ps[L] + ps[T-1] + ps[T] ) * 0.25f; float psimjm=( ps[C] + ps[L] + ps[B-1] + ps[B] ) * 0.25f; float phipjp=( ph[C] + ph[R] + ph[T] + ph[T+1] ) * 0.25f; float phipjm=( ph[C] + ph[R] + ph[B] + ph[B+1] ) * 0.25f; float phimjp=( ph[C] + ph[L] + ph[T-1] + ph[T] ) * 0.25f; float phimjm=( ph[C] + ph[L] + ph[B-1] + ph[B] ) * 0.25f; // ============================ // right edge flux // ============================ float psx = ps[R]-ps[C]; float psz = psipjp - psipjm; float phx = ph[R]-ph[C]; float phz = phipjp - phipjm; float A = atheta( phx,phz); float Ap = aptheta(phx,phz); float JR = A * ( A*psx - Ap*psz ); // ============================ // left edge flux // ============================ psx = ps[C]-ps[L]; psz = psimjp - psimjm; phx = ph[C]-ph[L]; phz = phimjp - phimjm; A = atheta( phx,phz); Ap = aptheta(phx,phz); float JL = A * ( A*psx - Ap*psz ); // ============================ // top edge flux // ============================ psx = psipjp - psimjp; psz = ps[T]-ps[C]; phx = phipjp - phimjp; phz = ph[T]-ph[C]; A = atheta( phx,phz); Ap = aptheta(phx,phz); float JT = A * ( A*psz + Ap*psx ); // ============================ // bottom edge flux // ============================ psx = psipjm - psimjm; psz = ps[C]-ps[B]; phx = phipjm - phimjm; phz = ph[C]-ph[B]; A = atheta( phx,phz); Ap = aptheta(phx,phz); float JB = A * ( A*psz + Ap*psx ); /*# ============================================================= # # 2. EXTRA TERM: sqrt2 * atheta**2 * phi * |grad psi|^2 # # ============================================================= # d(phi)/dx d(psi)/dx d(phi)/dz d(psi)/dz at nodes (i,j)*/ float phxn = ( ph[R] - ph[L] ) * 0.5f; float phzn = ( ph[T] - ph[B] ) * 0.5f; float psxn = ( ps[R] - ps[L] ) * 0.5f; float pszn = ( ps[T] - ps[B] ) * 0.5f; float A2 = atheta(phxn,phzn); A2 = A2*A2; float gradps2 = (psxn)*(psxn) + (pszn)*(pszn); float extra = -cP.sqrt2 * A2 * ph[C] * gradps2; /*# ============================================================= # # 3. double well (transformed): sqrt2 * phi + nonlinear terms # # =============================================================*/ float Up = (y[j]/cP.W0 - cP.R_tilde * (nt*cP.dt) )/cP.lT_tilde; float rhs_psi = ((JR-JL) + (JT-JB) + extra) * cP.hi*cP.hi + \ cP.sqrt2*ph[C] - cP.lamd*(1.0f-ph[C]*ph[C])*cP.sqrt2*(U[C] + Up); /*# ============================================================= # # 4. dpsi/dt term # # =============================================================*/ float tp = (1.0f-(1.0f-cP.k)*Up); float tau_psi; if (tp >= cP.k){tau_psi = tp*A2;} else {tau_psi = cP.k*A2;} dpsi[C] = rhs_psi / tau_psi; ps_new[C] = ps[C] + cP.dt * dpsi[C]; ph_new[C] = tanhf(ps_new[C]/cP.sqrt2); //if (C==1000){printf("%f ",ph_new[C]);} } } // U equation __global__ void rhs_U(float* U, float* U_new, float* ph, float* dpsi, int fnx, int fny ){ int C = blockIdx.x * blockDim.x + threadIdx.x; int j=C/fnx; int i=C-j*fnx; // if the points are at boundary, return if ( (i>0) && (i<fnx-1) && (j>0) && (j<fny-1) ) { // find the indices of the 8 neighbors for center int R=C+1; int L=C-1; int T=C+fnx; int B=C-fnx; float hi = cP.hi; float Dl_tilde = cP.Dl_tilde; float k = cP.k; float nx,nz; float eps = cP.eps; // ============================================================= // 1. ANISOTROPIC DIFFUSION // ============================================================= // these ps's are defined on cell centers float phipjp=( ph[C] + ph[R] + ph[T] + ph[T+1] ) * 0.25f; float phipjm=( ph[C] + ph[R] + ph[B] + ph[B+1] ) * 0.25f; float phimjp=( ph[C] + ph[L] + ph[T-1] + ph[T] ) * 0.25f; float phimjm=( ph[C] + ph[L] + ph[B-1] + ph[B] ) * 0.25f; float jat = 0.5f*(1.0f+(1.0f-k)*U[C])*(1.0f-ph[C]*ph[C])*dpsi[C]; /*# ============================ # right edge flux (i+1/2, j) # ============================*/ float phx = ph[R]-ph[C]; float phz = phipjp - phipjm; float phn2 = phx*phx + phz*phz; if (phn2 > eps) {nx = phx / sqrtf(phn2);} else {nx = 0.0f;} float jat_ip = 0.5f*(1.0f+(1.0f-k)*U[R])*(1.0f-ph[R]*ph[R])*dpsi[R]; float UR = hi*Dl_tilde*0.5f*(2.0f - ph[C] - ph[R])*(U[R]-U[C]) + 0.5f*(jat + jat_ip)*nx; /* ============================ # left edge flux (i-1/2, j) # ============================*/ phx = ph[C]-ph[L]; phz = phimjp - phimjm; phn2 = phx*phx + phz*phz; if (phn2 > eps) {nx = phx / sqrtf(phn2);} else {nx = 0.0f;} float jat_im = 0.5f*(1.0f+(1.0f-k)*U[L])*(1.0f-ph[L]*ph[L])*dpsi[L]; float UL = hi*Dl_tilde*0.5f*(2.0f - ph[C] - ph[L])*(U[C]-U[L]) + 0.5f*(jat + jat_im)*nx; /*# ============================ # top edge flux (i, j+1/2) # ============================*/ phx = phipjp - phimjp; phz = ph[T]-ph[C]; phn2 = phx*phx + phz*phz; if (phn2 > eps) {nz = phz / sqrtf(phn2);} else {nz = 0.0f;} float jat_jp = 0.5f*(1.0f+(1.0f-k)*U[T])*(1.0f-ph[T]*ph[T])*dpsi[T]; float UT = hi*Dl_tilde*0.5f*(2.0f - ph[C] - ph[T])*(U[T]-U[C]) + 0.5f*(jat + jat_jp)*nz; /*# ============================ # bottom edge flux (i, j-1/2) # ============================*/ phx = phipjm - phimjm; phz = ph[C]-ph[B]; phn2 = phx*phx + phz*phz; if (phn2 > eps) {nz = phz / sqrtf(phn2);} else {nz = 0.0f;} float jat_jm = 0.5f*(1.0f+(1.0f-k)*U[B])*(1.0f-ph[B]*ph[B])*dpsi[B]; float UB = hi*Dl_tilde*0.5f*(2.0f - ph[C] - ph[B])*(U[C]-U[B]) + 0.5f*(jat + jat_jm)*nz; float rhs_U = ( (UR-UL) + (UT-UB) ) * hi + cP.sqrt2 * jat; float tau_U = (1.0f+cP.k) - (1.0f-cP.k)*ph[C]; U_new[C] = U[C] + cP.dt * ( rhs_U / tau_U ); } } void setup(GlobalConstants params, int fnx, int fny, float* x, float* y, float* phi, float* psi,float* U){ // we should have already pass all the data structure in by this time // move those data onto device printCudaInfo(); float* x_device;// = NULL; float* y_device;// = NULL; float* psi_old;// = NULL; float* psi_new;// = NULL; float* U_old;// = NULL; float* U_new;// = NULL; float* phi_old;// = NULL; float* phi_new;// = NULL; float* dpsi;// = NULL; // allocate x, y, phi, psi, U related params int length = fnx*fny; hipMalloc((void **)&x_device, sizeof(float) * fnx); hipMalloc((void **)&y_device, sizeof(float) * fny); hipMalloc((void **)&phi_old, sizeof(float) * length); hipMalloc((void **)&psi_old, sizeof(float) * length); hipMalloc((void **)&U_old, sizeof(float) * length); hipMalloc((void **)&phi_new, sizeof(float) * length); hipMalloc((void **)&psi_new, sizeof(float) * length); hipMalloc((void **)&U_new, sizeof(float) * length); hipMalloc((void **)&dpsi, sizeof(float) * length); hipMemcpy(x_device, x, sizeof(float) * fnx, hipMemcpyHostToDevice); hipMemcpy(y_device, y, sizeof(float) * fny, hipMemcpyHostToDevice); hipMemcpy(psi_old, psi, sizeof(float) * length, hipMemcpyHostToDevice); hipMemcpy(phi_old, phi, sizeof(float) * length, hipMemcpyHostToDevice); hipMemcpy(U_old, U, sizeof(float) * length, hipMemcpyHostToDevice); // pass all the read-only params into global constant hipMemcpyToSymbol(cP, &params, sizeof(GlobalConstants) ); int blocksize_1d = 128; int blocksize_2d = 128; // seems reduce the block size makes it a little faster, but around 128 is okay. int num_block_2d = (fnx*fny+blocksize_2d-1)/blocksize_2d; int num_block_1d = (fnx+fny+blocksize_1d-1)/blocksize_1d; printf("block size %d, # blocks %d\n", blocksize_2d, num_block_2d); hipLaunchKernelGGL(( initialize), dim3(num_block_2d), dim3(blocksize_2d) , 0, 0, psi_old, phi_old, U_old, psi_new, phi_new, U_new, x_device, y_device, fnx, fny); hipLaunchKernelGGL(( set_BC), dim3(num_block_1d), dim3(blocksize_1d) , 0, 0, psi_new, phi_new, U_new, dpsi, fnx, fny); hipLaunchKernelGGL(( set_BC), dim3(num_block_1d), dim3(blocksize_1d) , 0, 0, psi_old, phi_old, U_old, dpsi, fnx, fny); hipDeviceSynchronize(); double startTime = CycleTimer::currentSeconds(); for (int kt=0; kt<params.Mt/2; kt++){ // printf("time step %d\n",kt); hipLaunchKernelGGL(( rhs_psi), dim3(num_block_2d), dim3(blocksize_2d) , 0, 0, psi_old, phi_old, U_old, psi_new, phi_new, y_device, dpsi, fnx, fny, 2*kt ); //hipDeviceSynchronize(); hipLaunchKernelGGL(( set_BC), dim3(num_block_1d), dim3(blocksize_1d) , 0, 0, psi_new, phi_new, U_old, dpsi, fnx, fny); //hipDeviceSynchronize(); hipLaunchKernelGGL(( rhs_U), dim3(num_block_2d), dim3(blocksize_2d) , 0, 0, U_old, U_new, phi_new, dpsi, fnx, fny); //hipDeviceSynchronize(); hipLaunchKernelGGL(( rhs_psi), dim3(num_block_2d), dim3(blocksize_2d) , 0, 0, psi_new, phi_new, U_new, psi_old, phi_old, y_device, dpsi, fnx, fny, 2*kt+1 ); //hipDeviceSynchronize(); hipLaunchKernelGGL(( set_BC), dim3(num_block_1d), dim3(blocksize_1d) , 0, 0, psi_old, phi_old, U_new, dpsi, fnx, fny); //hipDeviceSynchronize(); hipLaunchKernelGGL(( rhs_U), dim3(num_block_2d), dim3(blocksize_2d) , 0, 0, U_new, U_old, phi_old, dpsi, fnx, fny); //hipDeviceSynchronize(); } hipDeviceSynchronize(); double endTime = CycleTimer::currentSeconds(); printf("time for %d iterations: %f s\n", params.Mt, endTime-startTime); hipMemcpy(psi, psi_old, length * sizeof(float),hipMemcpyDeviceToHost); hipMemcpy(phi, phi_old, length * sizeof(float),hipMemcpyDeviceToHost); hipMemcpy(U, U_old, length * sizeof(float),hipMemcpyDeviceToHost); hipFree(x_device); hipFree(y_device); hipFree(psi_old); hipFree(psi_new); hipFree(phi_old); hipFree(phi_new); hipFree(U_old); hipFree(U_new); hipFree(dpsi); } /* void time_marching(GlobalConstants params, int fnx, int fny){ // initialize or load int blocksize_1d = 256; int blocksize_2d = 512; int num_block_2d = (fnx*fny+blocksize_2d-1)/blocksize_2d; int num_block_1d = (fnx+fny+blocksize_1d-1)/blocksize_1d; hipLaunchKernelGGL(( initialize), dim3(num_block_2d), dim3(blocksize_2d) , 0, 0, ps_old, ph_old, U_old, ps_new, ph_new, U_new, x_device, y_device, fnx, fny); for (int kt=0; kt<params.Mt/2; kt++){ hipLaunchKernelGGL(( rhs_psi), dim3(num_block_2d), dim3(blocksize_2d) , 0, 0, psi_old, phi_old, U_old, psi_new, phi_new, y_device, dpsi, fnx, fny, 2*kt ); hipLaunchKernelGGL(( set_BC), dim3(num_block_1d), dim3(blocksize_1d) , 0, 0, psi_new, phi_new, U_old, dpsi, fnx, fny); hipLaunchKernelGGL(( rhs_U), dim3(num_block_2d), dim3(blocksize_2d) , 0, 0, U_old, U_new, phi_new, dpsi); hipLaunchKernelGGL(( rhs_psi), dim3(num_block_2d), dim3(blocksize_2d) , 0, 0, psi_new, phi_new, U_new, psi_old, phi_old, y_device, dpsi, fnx, fny, 2*kt+1 ); hipLaunchKernelGGL(( set_BC), dim3(num_block_1d), dim3(blocksize_1d) , 0, 0, psi_old, phi_old, U_new, dpsi, fnx, fny); hipLaunchKernelGGL(( rhs_U), dim3(num_block_2d), dim3(blocksize_2d) , 0, 0, U_new, U_old, phi_old, dpsi); } }*/ void printCudaInfo() { // for fun, just print out some stats on the machine int deviceCount = 0; hipError_t err = hipGetDeviceCount(&deviceCount); printf("---------------------------------------------------------\n"); printf("Found %d CUDA devices\n", deviceCount); for (int i=0; i<deviceCount; i++) { hipDeviceProp_t deviceProps; hipGetDeviceProperties(&deviceProps, i); printf("Device %d: %s\n", i, deviceProps.name); printf(" SMs: %d\n", deviceProps.multiProcessorCount); printf(" Global mem: %.0f MB\n", static_cast<float>(deviceProps.totalGlobalMem) / (1024 * 1024)); printf(" CUDA Cap: %d.%d\n", deviceProps.major, deviceProps.minor); } printf("---------------------------------------------------------\n"); }
5a88746f45d7786836d1410d903c80b8c57a8df5.cu
#include <stdio.h> #include <cmath> #include <cuda.h> #include <cuda_runtime.h> #include <driver_functions.h> #include <curand.h> #include <curand_kernel.h> #include "CycleTimer.h" using namespace std; void printCudaInfo(); extern float toBW(int bytes, float sec); struct GlobalConstants { int nx; int ny; int Mt; int nts; int ictype; float G; float R; float delta; float k; float c_infm; float Dl; float d0; float W0; float lT; float lamd; float tau0; float c_infty; float R_tilde; float Dl_tilde; float lT_tilde; float eps; float alpha0; float dx; float dt; float asp_ratio; float lxd; float lx; float lyd; float eta; float U0; // parameters that are not in the input file float hi; float cosa; float sina; float sqrt2; float a_s; float epsilon; float a_12; }; __constant__ GlobalConstants cP; // Device codes // boundary condition // only use this function to access the boundary points, // other functions return at the boundary __global__ void set_BC(float* ps, float* ph, float* U, float* dpsi, int fnx, int fny){ // find the location of boundary: int index = blockIdx.x * blockDim.x + threadIdx.x; // z=0, lx if (index<fnx) { int b_in = index+2*fnx; int t_out = index+(fny-1)*fnx; int t_in = index+(fny-3)*fnx; ps[index] = ps[b_in]; ph[index] = ph[b_in]; U[index] = U[b_in]; dpsi[index] = dpsi[b_in]; ps[t_out] = ps[t_in]; ph[t_out] = ph[t_in]; U[t_out] = U[t_in]; dpsi[t_out] = dpsi[t_in]; } if (index<fny){ int l_out = index*fnx; int l_in = index*fnx + 2; int r_out = index*fnx + fnx -1; int r_in = index*fnx + fnx -3; ps[l_out] = ps[l_in]; ph[l_out] = ph[l_in]; U[l_out] = U[l_in]; dpsi[l_out] = dpsi[l_in]; ps[r_out] = ps[r_in]; ph[r_out] = ph[r_in]; U[r_out] = U[r_in]; dpsi[r_out] = dpsi[r_in]; } } // initialization __global__ void initialize(float* ps_old, float* ph_old, float* U_old, float* ps_new, float* ph_new, float* U_new , float* x, float* y, int fnx, int fny){ int C = blockIdx.x * blockDim.x + threadIdx.x; int j=C/fnx; int i=C-j*fnx; // when initialize, you need to consider C/F layout // if F layout, the 1D array has peroidicity of nx // all the variables should be functions of x and y // size (nx+2)*(ny+2), x:nx, y:ny if ( (i>0) && (i<fnx-1) && (j>0) && (j<fny-1) ) { float xc = x[i]; float yc = y[j]; int cent = fnx/2; ps_old[C] = 5.625f - sqrtf( (xc-x[cent])*(xc-x[cent]) + yc*yc )/cP.W0 ; //if (C<1000){printf("ps %f\n",ps_old[C]);} ps_new[C] = ps_old[C]; U_old[C] = cP.U0; U_new[C] = cP.U0; ph_old[C] = tanhf(ps_old[C]/cP.sqrt2); ph_new[C] = tanhf(ps_new[C]/cP.sqrt2); // if (C<1000){printf("phi %f\n",ph_old[C]);} } } __global__ void initialize_many(float* ps_old, float* ph_old, float* U_old, float* ps_new, float* ph_new, float* U_new , float* x, float* y, int fnx, int fny){ int C = blockIdx.x * blockDim.x + threadIdx.x; int j=C/fnx; int i=C-j*fnx; // when initialize, you need to consider C/F layout // if F layout, the 1D array has peroidicity of nx // all the variables should be functions of x and y // size (nx+2)*(ny+2), x:nx, y:ny if ( (i>0) && (i<fnx-1) && (j>0) && (j<fny-1) ) { float xc = x[i]; float yc = y[j]; int cent = fnx/2; int num_cells = 24; float per_len = cP.lxd/num_cells; int devision = (int) xc/per_len; //np.asarray(xx/(lx/24),dtype=int) float loc = per_len/2.0f+devision*per_len; ps_old[C] = 5.625f - sqrtf( (xc-loc)*(xc-loc) + yc*yc )/cP.W0 ; //if (C<1000){printf("ps %f\n",ps_old[C]);} ps_new[C] = ps_old[C]; U_old[C] = cP.U0; U_new[C] = cP.U0; ph_old[C] = tanhf(ps_old[C]/cP.sqrt2); ph_new[C] = tanhf(ps_new[C]/cP.sqrt2); // if (C<1000){printf("phi %f\n",ph_old[C]);} } } // anisotropy functions __device__ float atheta(float ux, float uz){ float ux2 = cP.cosa*ux + cP.sina*uz; ux2 = ux2*ux2; float uz2 = -cP.sina*ux + cP.cosa*uz; uz2 = uz2*uz2; float MAG_sq = (ux2 + uz2); float MAG_sq2= MAG_sq*MAG_sq; if (MAG_sq > cP.eps){ return cP.a_s*( 1.0f + cP.epsilon*(ux2*ux2 + uz2*uz2) / MAG_sq2);} else {return 1.0f;} } __device__ float aptheta(float ux, float uz){ float uxr = cP.cosa*ux + cP.sina*uz; float ux2 = uxr*uxr; float uzr = -cP.sina*ux + cP.cosa*uz; float uz2 = uzr*uzr; float MAG_sq = (ux2 + uz2); float MAG_sq2= MAG_sq*MAG_sq; if (MAG_sq > cP.eps){ return -cP.a_12*uxr*uzr*(ux2 - uz2) / MAG_sq2;} else {return 0.0f;} } // psi equation __global__ void rhs_psi(float* ps, float* ph, float* U, float* ps_new, float* ph_new, \ float* y, float* dpsi, int fnx, int fny, int nt ){ int C = blockIdx.x * blockDim.x + threadIdx.x; int j=C/fnx; int i=C-j*fnx; // if the points are at boundary, return if ( (i>0) && (i<fnx-1) && (j>0) && (j<fny-1) ) { // find the indices of the 8 neighbors for center //if (C==1000){printf("find");} int R=C+1; int L=C-1; int T=C+fnx; int B=C-fnx; // ============================================================= // 1. ANISOTROPIC DIFFUSION // ============================================================= // these ps's are defined on cell centers float psipjp=( ps[C] + ps[R] + ps[T] + ps[T+1] ) * 0.25f; float psipjm=( ps[C] + ps[R] + ps[B] + ps[B+1] ) * 0.25f; float psimjp=( ps[C] + ps[L] + ps[T-1] + ps[T] ) * 0.25f; float psimjm=( ps[C] + ps[L] + ps[B-1] + ps[B] ) * 0.25f; float phipjp=( ph[C] + ph[R] + ph[T] + ph[T+1] ) * 0.25f; float phipjm=( ph[C] + ph[R] + ph[B] + ph[B+1] ) * 0.25f; float phimjp=( ph[C] + ph[L] + ph[T-1] + ph[T] ) * 0.25f; float phimjm=( ph[C] + ph[L] + ph[B-1] + ph[B] ) * 0.25f; // ============================ // right edge flux // ============================ float psx = ps[R]-ps[C]; float psz = psipjp - psipjm; float phx = ph[R]-ph[C]; float phz = phipjp - phipjm; float A = atheta( phx,phz); float Ap = aptheta(phx,phz); float JR = A * ( A*psx - Ap*psz ); // ============================ // left edge flux // ============================ psx = ps[C]-ps[L]; psz = psimjp - psimjm; phx = ph[C]-ph[L]; phz = phimjp - phimjm; A = atheta( phx,phz); Ap = aptheta(phx,phz); float JL = A * ( A*psx - Ap*psz ); // ============================ // top edge flux // ============================ psx = psipjp - psimjp; psz = ps[T]-ps[C]; phx = phipjp - phimjp; phz = ph[T]-ph[C]; A = atheta( phx,phz); Ap = aptheta(phx,phz); float JT = A * ( A*psz + Ap*psx ); // ============================ // bottom edge flux // ============================ psx = psipjm - psimjm; psz = ps[C]-ps[B]; phx = phipjm - phimjm; phz = ph[C]-ph[B]; A = atheta( phx,phz); Ap = aptheta(phx,phz); float JB = A * ( A*psz + Ap*psx ); /*# ============================================================= # # 2. EXTRA TERM: sqrt2 * atheta**2 * phi * |grad psi|^2 # # ============================================================= # d(phi)/dx d(psi)/dx d(phi)/dz d(psi)/dz at nodes (i,j)*/ float phxn = ( ph[R] - ph[L] ) * 0.5f; float phzn = ( ph[T] - ph[B] ) * 0.5f; float psxn = ( ps[R] - ps[L] ) * 0.5f; float pszn = ( ps[T] - ps[B] ) * 0.5f; float A2 = atheta(phxn,phzn); A2 = A2*A2; float gradps2 = (psxn)*(psxn) + (pszn)*(pszn); float extra = -cP.sqrt2 * A2 * ph[C] * gradps2; /*# ============================================================= # # 3. double well (transformed): sqrt2 * phi + nonlinear terms # # =============================================================*/ float Up = (y[j]/cP.W0 - cP.R_tilde * (nt*cP.dt) )/cP.lT_tilde; float rhs_psi = ((JR-JL) + (JT-JB) + extra) * cP.hi*cP.hi + \ cP.sqrt2*ph[C] - cP.lamd*(1.0f-ph[C]*ph[C])*cP.sqrt2*(U[C] + Up); /*# ============================================================= # # 4. dpsi/dt term # # =============================================================*/ float tp = (1.0f-(1.0f-cP.k)*Up); float tau_psi; if (tp >= cP.k){tau_psi = tp*A2;} else {tau_psi = cP.k*A2;} dpsi[C] = rhs_psi / tau_psi; ps_new[C] = ps[C] + cP.dt * dpsi[C]; ph_new[C] = tanhf(ps_new[C]/cP.sqrt2); //if (C==1000){printf("%f ",ph_new[C]);} } } // U equation __global__ void rhs_U(float* U, float* U_new, float* ph, float* dpsi, int fnx, int fny ){ int C = blockIdx.x * blockDim.x + threadIdx.x; int j=C/fnx; int i=C-j*fnx; // if the points are at boundary, return if ( (i>0) && (i<fnx-1) && (j>0) && (j<fny-1) ) { // find the indices of the 8 neighbors for center int R=C+1; int L=C-1; int T=C+fnx; int B=C-fnx; float hi = cP.hi; float Dl_tilde = cP.Dl_tilde; float k = cP.k; float nx,nz; float eps = cP.eps; // ============================================================= // 1. ANISOTROPIC DIFFUSION // ============================================================= // these ps's are defined on cell centers float phipjp=( ph[C] + ph[R] + ph[T] + ph[T+1] ) * 0.25f; float phipjm=( ph[C] + ph[R] + ph[B] + ph[B+1] ) * 0.25f; float phimjp=( ph[C] + ph[L] + ph[T-1] + ph[T] ) * 0.25f; float phimjm=( ph[C] + ph[L] + ph[B-1] + ph[B] ) * 0.25f; float jat = 0.5f*(1.0f+(1.0f-k)*U[C])*(1.0f-ph[C]*ph[C])*dpsi[C]; /*# ============================ # right edge flux (i+1/2, j) # ============================*/ float phx = ph[R]-ph[C]; float phz = phipjp - phipjm; float phn2 = phx*phx + phz*phz; if (phn2 > eps) {nx = phx / sqrtf(phn2);} else {nx = 0.0f;} float jat_ip = 0.5f*(1.0f+(1.0f-k)*U[R])*(1.0f-ph[R]*ph[R])*dpsi[R]; float UR = hi*Dl_tilde*0.5f*(2.0f - ph[C] - ph[R])*(U[R]-U[C]) + 0.5f*(jat + jat_ip)*nx; /* ============================ # left edge flux (i-1/2, j) # ============================*/ phx = ph[C]-ph[L]; phz = phimjp - phimjm; phn2 = phx*phx + phz*phz; if (phn2 > eps) {nx = phx / sqrtf(phn2);} else {nx = 0.0f;} float jat_im = 0.5f*(1.0f+(1.0f-k)*U[L])*(1.0f-ph[L]*ph[L])*dpsi[L]; float UL = hi*Dl_tilde*0.5f*(2.0f - ph[C] - ph[L])*(U[C]-U[L]) + 0.5f*(jat + jat_im)*nx; /*# ============================ # top edge flux (i, j+1/2) # ============================*/ phx = phipjp - phimjp; phz = ph[T]-ph[C]; phn2 = phx*phx + phz*phz; if (phn2 > eps) {nz = phz / sqrtf(phn2);} else {nz = 0.0f;} float jat_jp = 0.5f*(1.0f+(1.0f-k)*U[T])*(1.0f-ph[T]*ph[T])*dpsi[T]; float UT = hi*Dl_tilde*0.5f*(2.0f - ph[C] - ph[T])*(U[T]-U[C]) + 0.5f*(jat + jat_jp)*nz; /*# ============================ # bottom edge flux (i, j-1/2) # ============================*/ phx = phipjm - phimjm; phz = ph[C]-ph[B]; phn2 = phx*phx + phz*phz; if (phn2 > eps) {nz = phz / sqrtf(phn2);} else {nz = 0.0f;} float jat_jm = 0.5f*(1.0f+(1.0f-k)*U[B])*(1.0f-ph[B]*ph[B])*dpsi[B]; float UB = hi*Dl_tilde*0.5f*(2.0f - ph[C] - ph[B])*(U[C]-U[B]) + 0.5f*(jat + jat_jm)*nz; float rhs_U = ( (UR-UL) + (UT-UB) ) * hi + cP.sqrt2 * jat; float tau_U = (1.0f+cP.k) - (1.0f-cP.k)*ph[C]; U_new[C] = U[C] + cP.dt * ( rhs_U / tau_U ); } } void setup(GlobalConstants params, int fnx, int fny, float* x, float* y, float* phi, float* psi,float* U){ // we should have already pass all the data structure in by this time // move those data onto device printCudaInfo(); float* x_device;// = NULL; float* y_device;// = NULL; float* psi_old;// = NULL; float* psi_new;// = NULL; float* U_old;// = NULL; float* U_new;// = NULL; float* phi_old;// = NULL; float* phi_new;// = NULL; float* dpsi;// = NULL; // allocate x, y, phi, psi, U related params int length = fnx*fny; cudaMalloc((void **)&x_device, sizeof(float) * fnx); cudaMalloc((void **)&y_device, sizeof(float) * fny); cudaMalloc((void **)&phi_old, sizeof(float) * length); cudaMalloc((void **)&psi_old, sizeof(float) * length); cudaMalloc((void **)&U_old, sizeof(float) * length); cudaMalloc((void **)&phi_new, sizeof(float) * length); cudaMalloc((void **)&psi_new, sizeof(float) * length); cudaMalloc((void **)&U_new, sizeof(float) * length); cudaMalloc((void **)&dpsi, sizeof(float) * length); cudaMemcpy(x_device, x, sizeof(float) * fnx, cudaMemcpyHostToDevice); cudaMemcpy(y_device, y, sizeof(float) * fny, cudaMemcpyHostToDevice); cudaMemcpy(psi_old, psi, sizeof(float) * length, cudaMemcpyHostToDevice); cudaMemcpy(phi_old, phi, sizeof(float) * length, cudaMemcpyHostToDevice); cudaMemcpy(U_old, U, sizeof(float) * length, cudaMemcpyHostToDevice); // pass all the read-only params into global constant cudaMemcpyToSymbol(cP, &params, sizeof(GlobalConstants) ); int blocksize_1d = 128; int blocksize_2d = 128; // seems reduce the block size makes it a little faster, but around 128 is okay. int num_block_2d = (fnx*fny+blocksize_2d-1)/blocksize_2d; int num_block_1d = (fnx+fny+blocksize_1d-1)/blocksize_1d; printf("block size %d, # blocks %d\n", blocksize_2d, num_block_2d); initialize<<< num_block_2d, blocksize_2d >>>(psi_old, phi_old, U_old, psi_new, phi_new, U_new, x_device, y_device, fnx, fny); set_BC<<< num_block_1d, blocksize_1d >>>(psi_new, phi_new, U_new, dpsi, fnx, fny); set_BC<<< num_block_1d, blocksize_1d >>>(psi_old, phi_old, U_old, dpsi, fnx, fny); cudaDeviceSynchronize(); double startTime = CycleTimer::currentSeconds(); for (int kt=0; kt<params.Mt/2; kt++){ // printf("time step %d\n",kt); rhs_psi<<< num_block_2d, blocksize_2d >>>(psi_old, phi_old, U_old, psi_new, phi_new, y_device, dpsi, fnx, fny, 2*kt ); //cudaDeviceSynchronize(); set_BC<<< num_block_1d, blocksize_1d >>>(psi_new, phi_new, U_old, dpsi, fnx, fny); //cudaDeviceSynchronize(); rhs_U<<< num_block_2d, blocksize_2d >>>(U_old, U_new, phi_new, dpsi, fnx, fny); //cudaDeviceSynchronize(); rhs_psi<<< num_block_2d, blocksize_2d >>>(psi_new, phi_new, U_new, psi_old, phi_old, y_device, dpsi, fnx, fny, 2*kt+1 ); //cudaDeviceSynchronize(); set_BC<<< num_block_1d, blocksize_1d >>>(psi_old, phi_old, U_new, dpsi, fnx, fny); //cudaDeviceSynchronize(); rhs_U<<< num_block_2d, blocksize_2d >>>(U_new, U_old, phi_old, dpsi, fnx, fny); //cudaDeviceSynchronize(); } cudaDeviceSynchronize(); double endTime = CycleTimer::currentSeconds(); printf("time for %d iterations: %f s\n", params.Mt, endTime-startTime); cudaMemcpy(psi, psi_old, length * sizeof(float),cudaMemcpyDeviceToHost); cudaMemcpy(phi, phi_old, length * sizeof(float),cudaMemcpyDeviceToHost); cudaMemcpy(U, U_old, length * sizeof(float),cudaMemcpyDeviceToHost); cudaFree(x_device); cudaFree(y_device); cudaFree(psi_old); cudaFree(psi_new); cudaFree(phi_old); cudaFree(phi_new); cudaFree(U_old); cudaFree(U_new); cudaFree(dpsi); } /* void time_marching(GlobalConstants params, int fnx, int fny){ // initialize or load int blocksize_1d = 256; int blocksize_2d = 512; int num_block_2d = (fnx*fny+blocksize_2d-1)/blocksize_2d; int num_block_1d = (fnx+fny+blocksize_1d-1)/blocksize_1d; initialize<<< num_block_2d, blocksize_2d >>>(ps_old, ph_old, U_old, ps_new, ph_new, U_new, x_device, y_device, fnx, fny); for (int kt=0; kt<params.Mt/2; kt++){ rhs_psi<<< num_block_2d, blocksize_2d >>>(psi_old, phi_old, U_old, psi_new, phi_new, y_device, dpsi, fnx, fny, 2*kt ); set_BC<<< num_block_1d, blocksize_1d >>>(psi_new, phi_new, U_old, dpsi, fnx, fny); rhs_U<<< num_block_2d, blocksize_2d >>>(U_old, U_new, phi_new, dpsi); rhs_psi<<< num_block_2d, blocksize_2d >>>(psi_new, phi_new, U_new, psi_old, phi_old, y_device, dpsi, fnx, fny, 2*kt+1 ); set_BC<<< num_block_1d, blocksize_1d >>>(psi_old, phi_old, U_new, dpsi, fnx, fny); rhs_U<<< num_block_2d, blocksize_2d >>>(U_new, U_old, phi_old, dpsi); } }*/ void printCudaInfo() { // for fun, just print out some stats on the machine int deviceCount = 0; cudaError_t err = cudaGetDeviceCount(&deviceCount); printf("---------------------------------------------------------\n"); printf("Found %d CUDA devices\n", deviceCount); for (int i=0; i<deviceCount; i++) { cudaDeviceProp deviceProps; cudaGetDeviceProperties(&deviceProps, i); printf("Device %d: %s\n", i, deviceProps.name); printf(" SMs: %d\n", deviceProps.multiProcessorCount); printf(" Global mem: %.0f MB\n", static_cast<float>(deviceProps.totalGlobalMem) / (1024 * 1024)); printf(" CUDA Cap: %d.%d\n", deviceProps.major, deviceProps.minor); } printf("---------------------------------------------------------\n"); }
bf233cfc1307a92d0231a648a6371ae86517779c.hip
// !!! This is a file automatically generated by hipify!!! //////////////////////////////////////////////////////////// //Ho Thien Luan -> History Tracking! // 1. Ver_0: Approximate string matching with k-mismatches // 2. Ver_1: Optimize by using sharing_memory for storing pattern // // // //////////////////////////////////////////////////////////// #include <stdio.h> #include <stdlib.h> #include <string.h> #include <assert.h> #include <hip/hip_runtime.h> #include <math.h> #include <hip/hip_runtime.h> #include "cuPrintf.hip" #include <time.h> #define FILENAME_MAXLEN 256 #define THREAD_BLOCK_EXP (7) #define THREAD_BLOCK_SIZE (1 << THREAD_BLOCK_EXP) __global__ void ASM_kernel(char *g_input_string, int input_size, int *g_pattern_decode, int real_pattern_size, int mask, int maskplus, int b, int *g_matched_result) { int tid = threadIdx.x ; int gbid = blockIdx.y * gridDim.x + blockIdx.x ; int start = gbid*THREAD_BLOCK_SIZE; int start_tid = start + tid; int pow_2b = 1 << b; unsigned long long int bit_vector=0; int t_shift; __shared__ char sub_string_shared [256] ; __shared__ int pattern_decode_shared [4] ; sub_string_shared[tid] = g_input_string[start+tid]; if ( tid < (real_pattern_size - 1) ){ sub_string_shared[THREAD_BLOCK_SIZE + tid] = g_input_string[start+THREAD_BLOCK_SIZE+tid]; } if ( (real_pattern_size <= tid) && (tid < real_pattern_size + 4) ){ pattern_decode_shared[tid-real_pattern_size] = g_pattern_decode[tid-real_pattern_size]; } __syncthreads(); //////////////////////////////////////////////////////////////// if (start_tid < input_size-real_pattern_size+1) { for (int i = 0; i < real_pattern_size; i++) { t_shift = i%real_pattern_size; if (sub_string_shared[ tid+i ] == 'A') { bit_vector = bit_vector + (((pattern_decode_shared[0] << t_shift*b) & mask) | (pattern_decode_shared[0] >> (real_pattern_size - t_shift)*b)); } else if (sub_string_shared[ tid+i ] == 'C'){ bit_vector = bit_vector + (((pattern_decode_shared[1] << t_shift*b) & mask) | (pattern_decode_shared[1] >> (real_pattern_size - t_shift)*b)); } else if (sub_string_shared[ tid+i ] == 'T'){ bit_vector = bit_vector + (((pattern_decode_shared[2] << t_shift*b) & mask) | (pattern_decode_shared[2] >> (real_pattern_size - t_shift)*b)); } else if (sub_string_shared[ tid+i ] == 'G'){ //case of G bit_vector = bit_vector + (((pattern_decode_shared[3] << t_shift*b) & mask) | (pattern_decode_shared[3] >> (real_pattern_size - t_shift)*b)); } else { // can be char "\n" bit_vector = bit_vector + maskplus; } } //Get results for (int j = 0; j < real_pattern_size ; j++) { //circular patterns //h_matched_result[(i-real_pattern_size+1)*real_pattern_size+(real_pattern_size-1-j)] += ((bit_vector >> (k*real_pattern_size+j)) & 1); g_matched_result[start_tid*real_pattern_size+j] = bit_vector % pow_2b; bit_vector = bit_vector >> b; } //cuPrintf("threadIdx.x = %d \t ,start = %d, matrix_M = %d, matrix_B = %d, string = %s, pattern = %s, pattern_size = %d\n", tid, start_tid, g_matrix_M[(real_pattern_size+1)*(start_tid+i) + i],g_matrix_B[(real_pattern_size+1)*(start_tid+i) + i], g_input_string[i-1], g_pattern_circular[i-1], real_pattern_size); } } //////////////////////////////// void ASM_process_top (char *g_input_string, size_t input_size, int *g_pattern_decode, int real_pattern_size, int mask, int maskplus, int b, int *g_matched_result) { // num_blocks = # of thread blocks to cover input stream int num_blocks = (input_size-real_pattern_size+1)/THREAD_BLOCK_SIZE + 1 ; dim3 dimBlock( THREAD_BLOCK_SIZE, 1 ) ; dim3 dimGrid ; int p = num_blocks >> 15 ; dimGrid.x = num_blocks ; if ( p ){ dimGrid.x = 1<<15 ; dimGrid.y = p+1 ; } cudaPrintfInit();////for cuPrintf hipLaunchKernelGGL(( ASM_kernel) , dim3(dimGrid), dim3(dimBlock) , 0, 0, g_input_string, input_size, g_pattern_decode, real_pattern_size, mask, maskplus, b, g_matched_result); cudaPrintfDisplay();////for cuPrintf cudaPrintfEnd(); ////for cuPrintf } //////////////////////////////////////////////////////////////////////////////////// int main(int argc, char **argv) { char inputFile[FILENAME_MAXLEN]; char patternFile[FILENAME_MAXLEN]; strcpy( inputFile, argv[2]) ; strcpy( patternFile, argv[1]) ; int k_par; k_par = strtol(argv[3], NULL, 10); //////////////////////////////////////////////////////////////////////////////////// //Process input patterns int input_size; int pattern_size; int real_pattern_size; char *h_input_string = NULL ; char *h_pattern = NULL ; int *h_matched_result = NULL ; int *h_pattern_decode = (int*) malloc( sizeof(int)*4 ) ; // step 1: read patterns and dump transition table // int deviceID = 0 ; // hipDeviceProp_t deviceProp; // hipGetDeviceProperties(&deviceProp, deviceID); //readPatternFromFile( patternFile) ; //step 2: prepare input stream FILE* fpin = fopen( inputFile, "rb"); assert ( NULL != fpin ) ; // obtain file size fseek (fpin , 0 , SEEK_END); input_size = ftell (fpin); rewind (fpin); //step2: prepare input pattern FILE* fpattern = fopen( patternFile, "rb"); assert ( NULL != fpattern ) ; // obtain file size fseek (fpattern , 0 , SEEK_END); pattern_size = ftell (fpattern); rewind (fpattern); // allocate memory to contain the whole file h_input_string = (char *) malloc (sizeof(char)*input_size); assert( NULL != h_input_string ); h_pattern = (char *) malloc (sizeof(char)*pattern_size); assert( NULL != h_pattern ); real_pattern_size = pattern_size-1; h_matched_result = (int *) malloc (sizeof(int)*(input_size-real_pattern_size+1)*real_pattern_size); assert( NULL != h_matched_result ); memset( h_matched_result, 0, sizeof(int)*(input_size-real_pattern_size+1)*real_pattern_size) ; // copy the file into the buffer input_size = fread (h_input_string, 1, input_size, fpin); fclose(fpin); pattern_size = fread (h_pattern, 1, pattern_size, fpattern); fclose(fpattern); //printf("Cir string = %s, length = %d\n", h_pattern, real_pattern_size); //ACSM Preprocess - Define table T[] unsigned long long int T_A = 0; int cal_A = 0; unsigned long long int T_C = 0; int cal_C = 0; unsigned long long int T_T = 0; int cal_T = 0; unsigned long long int T_G = 0; int cal_G = 0; float sub_real_pattern_size = real_pattern_size+1; float float_b = log2(sub_real_pattern_size); int b = float_b; if(b != float_b) { b=b+1; } //int b = log2(sub_real_pattern_size)+1; //int pow_2b = 1 << b; //printf("#-pattern = %d, b=%d, float_b = %f, opw_2b = %d-#\n",real_pattern_size,b,float_b,pow_2b); //for ( int i = real_pattern_size-1; i>=0; i--) { for ( int i = 0; h_pattern[ i ]; i++) { if(h_pattern[ i ] != '\n') { //printf("Process for char: %c\n", h_pattern[ i ]); if (h_pattern[ i ] == 'A') { cal_A = 0; cal_C = 1; cal_T = 1; cal_G = 1; } else if (h_pattern[ i ] == 'C'){ cal_A = 1; cal_C = 0; cal_T = 1; cal_G = 1; } else if (h_pattern[ i ] == 'T'){ cal_A = 1; cal_C = 1; cal_T = 0; cal_G = 1; } else if (h_pattern[ i ] == 'G'){ cal_A = 1; cal_C = 1; cal_T = 1; cal_G = 0; } T_A = (T_A << b) + cal_A; T_C = (T_C << b) + cal_C; T_T = (T_T << b) + cal_T; T_G = (T_G << b) + cal_G; } } h_pattern_decode[0] = T_A; h_pattern_decode[1] = T_C; h_pattern_decode[2] = T_T; h_pattern_decode[3] = T_G; //printf("\nT_A: %d\n", T_A); //printf("\nT_C: %d\n", T_C); //printf("\nT_T: %d\n", T_T); //printf("\nT_G: %d\n", T_G); //shift-add bit-vector. //unsigned long long int bit_vector=0; //int t_shift = 0; #define BIT(x) (1<<(x)) unsigned long long int mask = 0; for (int i = 0; i < real_pattern_size*b ; i++) { mask = (mask << 1) | 1; } unsigned long long int maskplus = 0; for (int i = 0; i < real_pattern_size ; i++) { maskplus = (maskplus << b) | 1; } /* //ACSM process struct timespec t_start, t_end; double elapsedTime; clock_gettime (CLOCK_REALTIME, &t_start); //for (int i = 0; h_input_string [ i ] ; i++) { for (int i = 0; i<input_size-(real_pattern_size-1) ; i++) { bit_vector = 0 ; for(int k=0; k< real_pattern_size; k++) { t_shift = k%real_pattern_size; //printf("Process for char: %c, T_A = %u, t_shift = %d\n", h_input_string[ i ], T_A, t_shift); if (h_input_string[ i+k ] == 'A') { bit_vector = bit_vector + ((T_A >> t_shift*b) & mask); } else if (h_input_string[ i+k ] == 'C'){ bit_vector = bit_vector + ((T_C >> t_shift*b) & mask); } else if (h_input_string[ i+k ] == 'T'){ bit_vector = bit_vector + ((T_T >> t_shift*b) & mask); } else if (h_input_string[ i+k ] == 'G'){ //case of G bit_vector = bit_vector + ((T_G >> t_shift*b) & mask); } else { // can be char "\n" bit_vector = bit_vector + maskplus; } //Get results // for (int j = no_of_patterns-1; j >= 0; j--) { // h_matched_result[i*no_of_patterns+j] = vector % pow_2b; // vector = vector >> b; // } //printf("bit_vector: %u\n", bit_vector); } for (int j = 0; j < real_pattern_size ; j++) { //circular patterns //h_matched_result[(i-real_pattern_size+1)*real_pattern_size+(real_pattern_size-1-j)] += ((bit_vector >> (k*real_pattern_size+j)) & 1); h_matched_result[i*real_pattern_size+j] = bit_vector % pow_2b; bit_vector = bit_vector >> b; } }// for h_input_string clock_gettime(CLOCK_REALTIME, &t_end); elapsedTime = (t_end.tv_sec*1000+t_end.tv_nsec/1000000)-(t_start.tv_sec*1000+t_start.tv_nsec/1000000); */ //Process in GPU char *g_input_string; int *g_matched_result; int *g_pattern_decode; hipMalloc (&g_input_string, sizeof(char)*input_size); hipMalloc (&g_matched_result, sizeof(int)*(input_size-real_pattern_size+1)*real_pattern_size); hipMalloc (&g_pattern_decode, sizeof(int)*4); hipMemcpy (g_input_string, h_input_string, sizeof(char)*input_size, hipMemcpyHostToDevice ); hipMemcpy (g_pattern_decode, h_pattern_decode, sizeof(int)*4, hipMemcpyHostToDevice); // record time setting hipEvent_t start, stop; float time; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); // step 3: run ASM on GPU ASM_process_top ( g_input_string, input_size, g_pattern_decode, real_pattern_size, mask, maskplus, b, g_matched_result) ; // record time setting hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&time, start, stop); hipMemcpy (h_matched_result, g_matched_result, sizeof(int)*(input_size-real_pattern_size+1)*real_pattern_size, hipMemcpyDeviceToHost ); // step 4: output matched result int total_result = 0; for (int i = 0; i < input_size-(real_pattern_size-1); i++) { for (int j = 0; j < real_pattern_size; j++) { //printf("At position %4d, circular pattern %4d : match pattern %d\n", i, j, h_matched_result[i*real_pattern_size + j]); if(h_matched_result[i*real_pattern_size + j] <= k_par) {total_result++;} } } printf("\n\n"); printf("############################################################\n"); printf("#--Approximate Circular String Matching with k-Mismatches--#\n"); printf("#----------------------------------------------------------#\n"); printf("#----------Modified shared-mem PCVM Alg. in GPU------------#\n"); printf("############################################################\n"); printf("#--Pattern Length |\t\t %10d \t #\n",real_pattern_size); printf("#----------------------------------------------------------#\n"); printf("#--Input Size (bytes) |\t\t %10d \t #\n", input_size ); printf("#----------------------------------------------------------#\n"); printf("#--Total matched with k = %d |\t\t %10d \t #\n", k_par, total_result); printf("#----------------------------------------------------------#\n"); //printf("#--Total elapsed time (ms) |\t\t %10f \t #\n", elapsedTime); printf("#--Total elapsed time (ms) |\t\t %10f \t #\n", time); printf("#----------------------------------------------------------#\n"); //printf("#--Throughput Result (Gbps) |\t\t %10f \t #\n", (float)(input_size*8)/(elapsedTime*1000000) ); //printf("#--Throughput Result (Mbps) |\t\t %10f \t #\n", (float)(input_size*8)/(elapsedTime*1000) ); printf("#--Throughput Result (Gbps) |\t\t %10f \t #\n", (float)(input_size*8)/(time*1000000) ); printf("#--Throughput Result (Mbps) |\t\t %10f \t #\n", (float)(input_size*8)/(time*1000) ); printf("############################################################\n"); free(h_input_string); free(h_pattern); free(h_matched_result); hipFree(g_input_string); hipFree(g_pattern_decode); hipFree(g_matched_result); return 0; }
bf233cfc1307a92d0231a648a6371ae86517779c.cu
//////////////////////////////////////////////////////////// //Ho Thien Luan -> History Tracking! // 1. Ver_0: Approximate string matching with k-mismatches // 2. Ver_1: Optimize by using sharing_memory for storing pattern // // // //////////////////////////////////////////////////////////// #include <stdio.h> #include <stdlib.h> #include <string.h> #include <assert.h> #include <cuda.h> #include <math.h> #include <cuda_runtime.h> #include "cuPrintf.cu" #include <time.h> #define FILENAME_MAXLEN 256 #define THREAD_BLOCK_EXP (7) #define THREAD_BLOCK_SIZE (1 << THREAD_BLOCK_EXP) __global__ void ASM_kernel(char *g_input_string, int input_size, int *g_pattern_decode, int real_pattern_size, int mask, int maskplus, int b, int *g_matched_result) { int tid = threadIdx.x ; int gbid = blockIdx.y * gridDim.x + blockIdx.x ; int start = gbid*THREAD_BLOCK_SIZE; int start_tid = start + tid; int pow_2b = 1 << b; unsigned long long int bit_vector=0; int t_shift; __shared__ char sub_string_shared [256] ; __shared__ int pattern_decode_shared [4] ; sub_string_shared[tid] = g_input_string[start+tid]; if ( tid < (real_pattern_size - 1) ){ sub_string_shared[THREAD_BLOCK_SIZE + tid] = g_input_string[start+THREAD_BLOCK_SIZE+tid]; } if ( (real_pattern_size <= tid) && (tid < real_pattern_size + 4) ){ pattern_decode_shared[tid-real_pattern_size] = g_pattern_decode[tid-real_pattern_size]; } __syncthreads(); //////////////////////////////////////////////////////////////// if (start_tid < input_size-real_pattern_size+1) { for (int i = 0; i < real_pattern_size; i++) { t_shift = i%real_pattern_size; if (sub_string_shared[ tid+i ] == 'A') { bit_vector = bit_vector + (((pattern_decode_shared[0] << t_shift*b) & mask) | (pattern_decode_shared[0] >> (real_pattern_size - t_shift)*b)); } else if (sub_string_shared[ tid+i ] == 'C'){ bit_vector = bit_vector + (((pattern_decode_shared[1] << t_shift*b) & mask) | (pattern_decode_shared[1] >> (real_pattern_size - t_shift)*b)); } else if (sub_string_shared[ tid+i ] == 'T'){ bit_vector = bit_vector + (((pattern_decode_shared[2] << t_shift*b) & mask) | (pattern_decode_shared[2] >> (real_pattern_size - t_shift)*b)); } else if (sub_string_shared[ tid+i ] == 'G'){ //case of G bit_vector = bit_vector + (((pattern_decode_shared[3] << t_shift*b) & mask) | (pattern_decode_shared[3] >> (real_pattern_size - t_shift)*b)); } else { // can be char "\n" bit_vector = bit_vector + maskplus; } } //Get results for (int j = 0; j < real_pattern_size ; j++) { //circular patterns //h_matched_result[(i-real_pattern_size+1)*real_pattern_size+(real_pattern_size-1-j)] += ((bit_vector >> (k*real_pattern_size+j)) & 1); g_matched_result[start_tid*real_pattern_size+j] = bit_vector % pow_2b; bit_vector = bit_vector >> b; } //cuPrintf("threadIdx.x = %d \t ,start = %d, matrix_M = %d, matrix_B = %d, string = %s, pattern = %s, pattern_size = %d\n", tid, start_tid, g_matrix_M[(real_pattern_size+1)*(start_tid+i) + i],g_matrix_B[(real_pattern_size+1)*(start_tid+i) + i], g_input_string[i-1], g_pattern_circular[i-1], real_pattern_size); } } //////////////////////////////// void ASM_process_top (char *g_input_string, size_t input_size, int *g_pattern_decode, int real_pattern_size, int mask, int maskplus, int b, int *g_matched_result) { // num_blocks = # of thread blocks to cover input stream int num_blocks = (input_size-real_pattern_size+1)/THREAD_BLOCK_SIZE + 1 ; dim3 dimBlock( THREAD_BLOCK_SIZE, 1 ) ; dim3 dimGrid ; int p = num_blocks >> 15 ; dimGrid.x = num_blocks ; if ( p ){ dimGrid.x = 1<<15 ; dimGrid.y = p+1 ; } cudaPrintfInit();////for cuPrintf ASM_kernel <<< dimGrid, dimBlock >>>(g_input_string, input_size, g_pattern_decode, real_pattern_size, mask, maskplus, b, g_matched_result); cudaPrintfDisplay();////for cuPrintf cudaPrintfEnd(); ////for cuPrintf } //////////////////////////////////////////////////////////////////////////////////// int main(int argc, char **argv) { char inputFile[FILENAME_MAXLEN]; char patternFile[FILENAME_MAXLEN]; strcpy( inputFile, argv[2]) ; strcpy( patternFile, argv[1]) ; int k_par; k_par = strtol(argv[3], NULL, 10); //////////////////////////////////////////////////////////////////////////////////// //Process input patterns int input_size; int pattern_size; int real_pattern_size; char *h_input_string = NULL ; char *h_pattern = NULL ; int *h_matched_result = NULL ; int *h_pattern_decode = (int*) malloc( sizeof(int)*4 ) ; // step 1: read patterns and dump transition table // int deviceID = 0 ; // cudaDeviceProp deviceProp; // cudaGetDeviceProperties(&deviceProp, deviceID); //readPatternFromFile( patternFile) ; //step 2: prepare input stream FILE* fpin = fopen( inputFile, "rb"); assert ( NULL != fpin ) ; // obtain file size fseek (fpin , 0 , SEEK_END); input_size = ftell (fpin); rewind (fpin); //step2: prepare input pattern FILE* fpattern = fopen( patternFile, "rb"); assert ( NULL != fpattern ) ; // obtain file size fseek (fpattern , 0 , SEEK_END); pattern_size = ftell (fpattern); rewind (fpattern); // allocate memory to contain the whole file h_input_string = (char *) malloc (sizeof(char)*input_size); assert( NULL != h_input_string ); h_pattern = (char *) malloc (sizeof(char)*pattern_size); assert( NULL != h_pattern ); real_pattern_size = pattern_size-1; h_matched_result = (int *) malloc (sizeof(int)*(input_size-real_pattern_size+1)*real_pattern_size); assert( NULL != h_matched_result ); memset( h_matched_result, 0, sizeof(int)*(input_size-real_pattern_size+1)*real_pattern_size) ; // copy the file into the buffer input_size = fread (h_input_string, 1, input_size, fpin); fclose(fpin); pattern_size = fread (h_pattern, 1, pattern_size, fpattern); fclose(fpattern); //printf("Cir string = %s, length = %d\n", h_pattern, real_pattern_size); //ACSM Preprocess - Define table T[] unsigned long long int T_A = 0; int cal_A = 0; unsigned long long int T_C = 0; int cal_C = 0; unsigned long long int T_T = 0; int cal_T = 0; unsigned long long int T_G = 0; int cal_G = 0; float sub_real_pattern_size = real_pattern_size+1; float float_b = log2(sub_real_pattern_size); int b = float_b; if(b != float_b) { b=b+1; } //int b = log2(sub_real_pattern_size)+1; //int pow_2b = 1 << b; //printf("#-pattern = %d, b=%d, float_b = %f, opw_2b = %d-#\n",real_pattern_size,b,float_b,pow_2b); //for ( int i = real_pattern_size-1; i>=0; i--) { for ( int i = 0; h_pattern[ i ]; i++) { if(h_pattern[ i ] != '\n') { //printf("Process for char: %c\n", h_pattern[ i ]); if (h_pattern[ i ] == 'A') { cal_A = 0; cal_C = 1; cal_T = 1; cal_G = 1; } else if (h_pattern[ i ] == 'C'){ cal_A = 1; cal_C = 0; cal_T = 1; cal_G = 1; } else if (h_pattern[ i ] == 'T'){ cal_A = 1; cal_C = 1; cal_T = 0; cal_G = 1; } else if (h_pattern[ i ] == 'G'){ cal_A = 1; cal_C = 1; cal_T = 1; cal_G = 0; } T_A = (T_A << b) + cal_A; T_C = (T_C << b) + cal_C; T_T = (T_T << b) + cal_T; T_G = (T_G << b) + cal_G; } } h_pattern_decode[0] = T_A; h_pattern_decode[1] = T_C; h_pattern_decode[2] = T_T; h_pattern_decode[3] = T_G; //printf("\nT_A: %d\n", T_A); //printf("\nT_C: %d\n", T_C); //printf("\nT_T: %d\n", T_T); //printf("\nT_G: %d\n", T_G); //shift-add bit-vector. //unsigned long long int bit_vector=0; //int t_shift = 0; #define BIT(x) (1<<(x)) unsigned long long int mask = 0; for (int i = 0; i < real_pattern_size*b ; i++) { mask = (mask << 1) | 1; } unsigned long long int maskplus = 0; for (int i = 0; i < real_pattern_size ; i++) { maskplus = (maskplus << b) | 1; } /* //ACSM process struct timespec t_start, t_end; double elapsedTime; clock_gettime (CLOCK_REALTIME, &t_start); //for (int i = 0; h_input_string [ i ] ; i++) { for (int i = 0; i<input_size-(real_pattern_size-1) ; i++) { bit_vector = 0 ; for(int k=0; k< real_pattern_size; k++) { t_shift = k%real_pattern_size; //printf("Process for char: %c, T_A = %u, t_shift = %d\n", h_input_string[ i ], T_A, t_shift); if (h_input_string[ i+k ] == 'A') { bit_vector = bit_vector + ((T_A >> t_shift*b) & mask); } else if (h_input_string[ i+k ] == 'C'){ bit_vector = bit_vector + ((T_C >> t_shift*b) & mask); } else if (h_input_string[ i+k ] == 'T'){ bit_vector = bit_vector + ((T_T >> t_shift*b) & mask); } else if (h_input_string[ i+k ] == 'G'){ //case of G bit_vector = bit_vector + ((T_G >> t_shift*b) & mask); } else { // can be char "\n" bit_vector = bit_vector + maskplus; } //Get results // for (int j = no_of_patterns-1; j >= 0; j--) { // h_matched_result[i*no_of_patterns+j] = vector % pow_2b; // vector = vector >> b; // } //printf("bit_vector: %u\n", bit_vector); } for (int j = 0; j < real_pattern_size ; j++) { //circular patterns //h_matched_result[(i-real_pattern_size+1)*real_pattern_size+(real_pattern_size-1-j)] += ((bit_vector >> (k*real_pattern_size+j)) & 1); h_matched_result[i*real_pattern_size+j] = bit_vector % pow_2b; bit_vector = bit_vector >> b; } }// for h_input_string clock_gettime(CLOCK_REALTIME, &t_end); elapsedTime = (t_end.tv_sec*1000+t_end.tv_nsec/1000000)-(t_start.tv_sec*1000+t_start.tv_nsec/1000000); */ //Process in GPU char *g_input_string; int *g_matched_result; int *g_pattern_decode; cudaMalloc (&g_input_string, sizeof(char)*input_size); cudaMalloc (&g_matched_result, sizeof(int)*(input_size-real_pattern_size+1)*real_pattern_size); cudaMalloc (&g_pattern_decode, sizeof(int)*4); cudaMemcpy (g_input_string, h_input_string, sizeof(char)*input_size, cudaMemcpyHostToDevice ); cudaMemcpy (g_pattern_decode, h_pattern_decode, sizeof(int)*4, cudaMemcpyHostToDevice); // record time setting cudaEvent_t start, stop; float time; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); // step 3: run ASM on GPU ASM_process_top ( g_input_string, input_size, g_pattern_decode, real_pattern_size, mask, maskplus, b, g_matched_result) ; // record time setting cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop); cudaMemcpy (h_matched_result, g_matched_result, sizeof(int)*(input_size-real_pattern_size+1)*real_pattern_size, cudaMemcpyDeviceToHost ); // step 4: output matched result int total_result = 0; for (int i = 0; i < input_size-(real_pattern_size-1); i++) { for (int j = 0; j < real_pattern_size; j++) { //printf("At position %4d, circular pattern %4d : match pattern %d\n", i, j, h_matched_result[i*real_pattern_size + j]); if(h_matched_result[i*real_pattern_size + j] <= k_par) {total_result++;} } } printf("\n\n"); printf("############################################################\n"); printf("#--Approximate Circular String Matching with k-Mismatches--#\n"); printf("#----------------------------------------------------------#\n"); printf("#----------Modified shared-mem PCVM Alg. in GPU------------#\n"); printf("############################################################\n"); printf("#--Pattern Length |\t\t %10d \t #\n",real_pattern_size); printf("#----------------------------------------------------------#\n"); printf("#--Input Size (bytes) |\t\t %10d \t #\n", input_size ); printf("#----------------------------------------------------------#\n"); printf("#--Total matched with k = %d |\t\t %10d \t #\n", k_par, total_result); printf("#----------------------------------------------------------#\n"); //printf("#--Total elapsed time (ms) |\t\t %10f \t #\n", elapsedTime); printf("#--Total elapsed time (ms) |\t\t %10f \t #\n", time); printf("#----------------------------------------------------------#\n"); //printf("#--Throughput Result (Gbps) |\t\t %10f \t #\n", (float)(input_size*8)/(elapsedTime*1000000) ); //printf("#--Throughput Result (Mbps) |\t\t %10f \t #\n", (float)(input_size*8)/(elapsedTime*1000) ); printf("#--Throughput Result (Gbps) |\t\t %10f \t #\n", (float)(input_size*8)/(time*1000000) ); printf("#--Throughput Result (Mbps) |\t\t %10f \t #\n", (float)(input_size*8)/(time*1000) ); printf("############################################################\n"); free(h_input_string); free(h_pattern); free(h_matched_result); cudaFree(g_input_string); cudaFree(g_pattern_decode); cudaFree(g_matched_result); return 0; }
5199819c1cd6954d49c46c6edaaa235741954e04.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // //fordevice4 #include<iostream> #include<cuda.h> using namespace std; const int N=2; __global__ void mul(int *a,int *b,int *c){//4 int row=blockIdx.x; int col=threadIdx.x; int temp_sum=0; for(int i=0;i<blockDim.x;i++){ //temp_sum+=a[row][i]*b[i][row]; temp_sum+=a[row*blockDim.x+i]*b[i*blockDim.x+col]; // temp_sum+=row+col; // temp_sum=a[row*blockDim.x+col]; } // c[row][col]=temp_sum; c[row*blockDim.x+col]=temp_sum; } __global__ void mul_8(int *a,int *b,int *c){//2*2*28 int row=blockIdx.x/N; int col=blockIdx.x%N; __shared__ int mul[N]; //mul[0]=a[row][0]*b[0][col] mul[threadIdx.x]=a[row*N+threadIdx.x]*b[threadIdx.x*N+col]; //mul[1]=a[row][1]*b[1][col] __syncthreads(); int sum=0; for(int i=0;i<blockDim.x;i++) sum+=mul[i]; c[blockIdx.x]=sum; } int main(){ int *a,*b,*dev_a,*dev_b,*dev_c,*c; a=new int[N*N]; b=new int [N*N]; c=new int[N*N]; for(int i=0;i<N;i++){ for(int j=0;j<N;j++){ a[i*N+j]=i*N+j; b[i*N+j]=i*N+j; cout<<a[i*N+j]<<endl; } } hipMalloc(&dev_a,N*N*sizeof(int)); hipMalloc(&dev_b,N*N*sizeof(int)); hipMalloc(&dev_c,N*N*sizeof(int)); hipMemcpy(dev_a,a,N*N*sizeof(int),hipMemcpyHostToDevice); hipMemcpy(dev_b,b,N*N*sizeof(int),hipMemcpyHostToDevice); // mul<<<N,N>>>(dev_a,dev_b,dev_c); hipEvent_t start,stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start,0); hipLaunchKernelGGL(( mul_8), dim3(N*N),dim3(N), 0, 0, dev_a,dev_b,dev_c); hipEventRecord(stop,0); hipEventSynchronize(stop); float elapsedTime; hipEventElapsedTime(&elapsedTime,start,stop); cout<<"Time: "<<elapsedTime<<endl; hipMemcpy(c,dev_c,N*N*sizeof(int),hipMemcpyDeviceToHost); for(int i=0;i<N;i++){ for(int j=0;j<N;j++){ cout<<c[i*N+j]<<" "; } cout<<endl; } return 0; }
5199819c1cd6954d49c46c6edaaa235741954e04.cu
//本质上来说,几维的数组其实都是一维数组,不过是变变表现形式而已,二维数组加法没什么意思, //就是一维数组加法,还是二维数组乘法有点意思 //这个方法,还不是高并发,高并发,应该是把求和那一块for也并发了。估计要用device,现在的并发度是4 #include<iostream> #include<cuda.h> using namespace std; const int N=2; __global__ void mul(int *a,int *b,int *c){//并发度为4的矩阵乘法 int row=blockIdx.x; int col=threadIdx.x; int temp_sum=0; for(int i=0;i<blockDim.x;i++){ //temp_sum+=a[row][i]*b[i][row]; temp_sum+=a[row*blockDim.x+i]*b[i*blockDim.x+col]; // temp_sum+=row+col; // temp_sum=a[row*blockDim.x+col]; } // c[row][col]=temp_sum; c[row*blockDim.x+col]=temp_sum; } __global__ void mul_8(int *a,int *b,int *c){//实现了2*2*2个线程,每个都进行计算,并行度为8 int row=blockIdx.x/N; int col=blockIdx.x%N; __shared__ int mul[N]; //mul[0]=a[row][0]*b[0][col] mul[threadIdx.x]=a[row*N+threadIdx.x]*b[threadIdx.x*N+col]; //mul[1]=a[row][1]*b[1][col] __syncthreads(); int sum=0; for(int i=0;i<blockDim.x;i++) sum+=mul[i]; c[blockIdx.x]=sum; } int main(){ int *a,*b,*dev_a,*dev_b,*dev_c,*c; a=new int[N*N]; b=new int [N*N]; c=new int[N*N]; for(int i=0;i<N;i++){ for(int j=0;j<N;j++){ a[i*N+j]=i*N+j; b[i*N+j]=i*N+j; cout<<a[i*N+j]<<endl; } } cudaMalloc(&dev_a,N*N*sizeof(int)); cudaMalloc(&dev_b,N*N*sizeof(int)); cudaMalloc(&dev_c,N*N*sizeof(int)); cudaMemcpy(dev_a,a,N*N*sizeof(int),cudaMemcpyHostToDevice); cudaMemcpy(dev_b,b,N*N*sizeof(int),cudaMemcpyHostToDevice); // mul<<<N,N>>>(dev_a,dev_b,dev_c); cudaEvent_t start,stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start,0); mul_8<<<N*N,N>>>(dev_a,dev_b,dev_c); cudaEventRecord(stop,0); cudaEventSynchronize(stop); float elapsedTime; cudaEventElapsedTime(&elapsedTime,start,stop); cout<<"Time: "<<elapsedTime<<endl; cudaMemcpy(c,dev_c,N*N*sizeof(int),cudaMemcpyDeviceToHost); for(int i=0;i<N;i++){ for(int j=0;j<N;j++){ cout<<c[i*N+j]<<" "; } cout<<endl; } return 0; }
2669ea510ea7bcd7d69f34b2af48c0e876502eb3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <vector> #include <cstring> #include <iostream> #include "NvInferPlugin.h" #include "ssdOpt.h" #include "ssdOptMacros.h" //#include "ssd_internal.h" #include "nmsPluginOpt.h" #include "fast_divmod.h" namespace nvinfer1 { namespace plugin { template <typename T_BBOX, int NUM_LAYERS> struct DecodeBBoxesOptData { const T_BBOX * loc_data[NUM_LAYERS]; int feature_size_pow2[NUM_LAYERS]; int end_layer_prior[NUM_LAYERS]; int num_anchors[NUM_LAYERS]; uint32_t num_anchors_mul[NUM_LAYERS]; uint32_t num_anchors_shr[NUM_LAYERS]; bool reshape_before_permute; bool packed32_nchw; }; /* This function maps the input index to the corresponding loc_data offset. The input "loc_data" is composed of "num_layers" loc tensors from the CONV layers in SSD. These tensors are in NCHW layout. The input index is broken down to 4 components: i, c, d, n i - box coordinate (max 4) c - class (max num_loc_classes) d - prior (max num_priors) n - batch size For SSD-MobileNet: The transformed loc_data is generated by: 1. loc_data[id_layer] -> NCHW 2. permute(1,2,0) -> NHWC 3. reshape -> (N, H*W*C/num_loc_classes/4, num_loc_classes, 4) or (N, num_priors_layer, num_loc_classes, 4) 4. concat(axis=1, num_layers) -> (N, num_priors, num_loc_classes, 4) 5. flatten -> (N, num_priors * num_loc_classes *4, 1, 1) Therefore, before concat, each loc_data[id_layer] effectively has: num_anchors_layer = C/num_loc_classes/4 num_priors_layer = H*W*num_anchors_layer = H*W*C/num_loc_classes/4 num_box_coors = 4 After concat, the num_priors_layer (id_layer=0...5) are concatenated together, so num_priors = sum(num_priors_layer) Correspondingly, giving an index from the result of step 5 above, the mapping process will a. Compute its i (box corrdinate), c (class), d (prior), n (batch size) accordingly b. Using d, find the corresponding id_layer using num_priors_layer and num_priors c. Compute prior_in_layer which is prior in id_layer d. Using prior_in_layer to get the original value of hw. hw = priors_in_layer / num_anchors_layer. And correspondingly anchor = priors_in_layer % num_anchors_layer e. C = num_anchors_layer * num_loc_classes * 4, the original value of ch can be computed using anchor, c (class), and i (box coordinate). f. With n, hw and ch, the original index in an id_layer can be computed according to NCHW layerout e.g. for ssd-mobilenet-v1, layer 0: C = 12 HW = 19x19 num_anchors_layer = 3 num_loc_classes = 1 (shared) num_priors_layer = 1083 For SSD-ResNet34, it is a bit different since reshape happens before permute 1. loc_data[id_layer] -> NCHW 2. reshape -> (N, num_loc_classes, 4, C/num_loc_classes/4*H*W) or (N, num_loc_classes, 4, num_priors_layer) 2. permute(0,2,1) -> (N, num_loc_classes, num_priors_layer, 4) 4. concat(axis=2, num_layers) -> (N, num_loc_classes, num_priors, 4) 5. flatten -> (N, num_loc_classes*num_priors*4, 1, 1) num_anchors_layer = C/num_loc_classes/4 num_priors_layer = num_anchors_layer*H*W = C/num_loc_classes/4*H*W num_box_coors = 4 After concat, the num_priors_layer (id_layer=0...5) are concatenated together, so num_priors = sum(num_priors_layer) Correspondingly, giving an index from the result of step 5 above, the mapping process will a. Compute its i (box corrdinate), d (prior), c (class), n (batch size) accordingly b. Using d, find the corresponding id_layer using num_priors_layer and num_priors c. Compute prior_in_layer which is prior in id_layer d. Using prior_in_layer to get the original value of anchor. anchor = priors_in_layer / HW. And correspondingly hw = priors_in_layer % HW e. C = num_loc_classes*4*num_anchors_layer, the original value of ch can be computed using c (class), i (box coordinate) and anchor. f. With n, hw and ch, the original index in an id_layer can be computed according to NCHW layerout (g). An alternative to step d), e), and f) is that since CHW = num_loc_classes * 4 * C/num_loc_classes/4*HW = num_loc_classes*4*num_priors_layer, the original index in NCHW layout can be computed using prior_in_layer, i (box coordinate) and c (class). Dealing with inputs with NC/32HW32 packed layout: This means the loc_data[id_layer] would be in NC/32HW32 format instead of NCHW. As a result, after n, ch, hw are computed, extra steps are needed to map them back to the original index: h. packed_C = (C+31)/32 i. packed_ch = ch/32 j. packed_ch_offset = ch%32 k. Get the packed index for NC/32HW32 using h),i) and j) */ template <typename T_BBOX, int NUM_LAYERS > __device__ inline void mapLocData(const int c, int d, const int n, const T_BBOX* const* loc_data, const int * feature_size_pow2, const int * end_layer_prior, const int * num_anchors_data, const uint32_t * num_anchors_mul_data, const uint32_t * num_anchors_shr_data, const int num_loc_classes, T_BBOX mapped_loc_data[4], const bool reshape_before_permute, const bool packed32_nchw ) { //find layer_id int start_layer_prior = 0; int prior_in_layer = 0; int num_anchors, num_anchors_mul, num_anchors_shr; int num_hw; const T_BBOX* loc_data_layer; #pragma unroll for(int layer = 0; layer < NUM_LAYERS; layer++) { if(d < end_layer_prior[layer]) { num_anchors = num_anchors_data[layer]; num_anchors_mul = num_anchors_mul_data[layer]; num_anchors_shr = num_anchors_shr_data[layer]; num_hw = feature_size_pow2[layer]; prior_in_layer = d - start_layer_prior; loc_data_layer = loc_data[layer]; // there should be a break here - but this would make all access to the arrays dynamic // due to compiler deoptimizations d = INT_MAX; } start_layer_prior = end_layer_prior[layer]; } //Transform id_in_layer from HWC -> CHW int num_ch = num_anchors * num_loc_classes * 4; if(reshape_before_permute) { int hw = prior_in_layer % num_hw; int anchor = prior_in_layer / num_hw; #pragma unroll for (int i = 0;i < 4;++i) { int ch = (c*4 + i)*num_anchors + anchor; int mappedIndex; if(packed32_nchw) { int packed_num_ch = (num_ch+31)/32; int packed_ch = ch >> 5; // ch/32; int packed_ch_offset = ch & 31; // ch%32; mappedIndex = ((n * packed_num_ch + packed_ch)*num_hw + hw)*32 + packed_ch_offset; } else { mappedIndex = (n * num_ch + ch)*num_hw + hw; } mapped_loc_data[i] = loc_data_layer[mappedIndex]; } } else { //anchor = prior_in_layer % num_anchors; //hw = prior_in_layer / num_anchors; int anchor, hw; fast_divmod(hw, anchor, prior_in_layer, num_anchors, num_anchors_mul, num_anchors_shr); int ch_base = (anchor*num_loc_classes+c)*4; #pragma unroll for (int i = 0;i < 4;++i) { int ch = ch_base + i; int mappedIndex; if(packed32_nchw) { int packed_num_ch = (num_ch+31)/32; int packed_ch = ch >> 5; // ch/32; int packed_ch_offset = ch & 31; // ch%32; mappedIndex = ((n * packed_num_ch + packed_ch)*num_hw + hw)*32 + packed_ch_offset; } else { mappedIndex = (n * num_ch + ch)*num_hw + hw; } mapped_loc_data[i] = loc_data_layer[mappedIndex]; } } } template <typename T_BBOX, unsigned nthds_per_cta, int NUM_LAYERS> __launch_bounds__(nthds_per_cta) __global__ void decodeBBoxesOpt_kernel( const int nthreads, const CodeTypeSSD code_type, const bool variance_encoded_in_target, const int num_priors, const uint32_t num_priors_mul, const uint32_t num_priors_shr, const bool share_location, const int num_loc_classes, const uint32_t num_loc_classes_mul, const uint32_t num_loc_classes_shr, const int background_label_id, const bool clip_bbox, const T_BBOX* prior_data, T_BBOX* bbox_data, const DecodeBBoxesOptData<T_BBOX, NUM_LAYERS> decodeBBoxesOptData) { for (int index = blockIdx.x * nthds_per_cta + threadIdx.x; index < nthreads; index += nthds_per_cta * gridDim.x) { const T_BBOX* const* loc_data = &decodeBBoxesOptData.loc_data[0]; const int* feature_size_pow2 = &decodeBBoxesOptData.feature_size_pow2[0]; const int* end_layer_prior = &decodeBBoxesOptData.end_layer_prior[0]; const int* num_anchors = &decodeBBoxesOptData.num_anchors[0]; const uint32_t* num_anchors_mul = &decodeBBoxesOptData.num_anchors_mul[0]; const uint32_t* num_anchors_shr = &decodeBBoxesOptData.num_anchors_shr[0]; const bool reshape_before_permute = decodeBBoxesOptData.reshape_before_permute; const bool packed32_nchw = decodeBBoxesOptData.packed32_nchw; // index has been already been divided by 4 (num_box_coors) before passed to this function //c = (index) % num_loc_classes; //d = (index / num_loc_classes) % num_priors; //n = (index / num_loc_classes / num_priors); int c, c_div, d, n; fast_divmod(c_div, c, index, num_loc_classes, num_loc_classes_mul, num_loc_classes_shr); fast_divmod(n, d, c_div, num_priors, num_priors_mul, num_priors_shr); if (!share_location && c == background_label_id) { // Ignore background class if not share_location. return; } const int pi = d * 4; const int vi = pi + num_priors * 4; T_BBOX locData[4]; mapLocData<T_BBOX, NUM_LAYERS>(c, d, n, loc_data, feature_size_pow2, end_layer_prior, num_anchors, num_anchors_mul, num_anchors_shr, num_loc_classes, locData, reshape_before_permute, packed32_nchw); if (code_type == CodeTypeSSD::CORNER) { #pragma unroll for (int i = 0;i < 4;++i) { //mapping index to original input loc_data[NUM_LAYERS] T_BBOX mapped_loc_data = locData[i]; if (variance_encoded_in_target) { // variance is encoded in target, we simply need to add the offset // predictions. bbox_data[index] = prior_data[pi + i] + mapped_loc_data; } else { // variance is encoded in bbox, we need to scale the offset accordingly. bbox_data[index] = prior_data[pi + i] + mapped_loc_data * prior_data[vi + i]; } } } else if (code_type == CodeTypeSSD::CENTER_SIZE) { const T_BBOX p_xmin = prior_data[pi]; const T_BBOX p_ymin = prior_data[pi + 1]; const T_BBOX p_xmax = prior_data[pi + 2]; const T_BBOX p_ymax = prior_data[pi + 3]; const T_BBOX prior_width = p_xmax - p_xmin; const T_BBOX prior_height = p_ymax - p_ymin; const T_BBOX prior_center_x = (p_xmin + p_xmax) / T_BBOX(2); const T_BBOX prior_center_y = (p_ymin + p_ymax) / T_BBOX(2); //mapping index to original input loc_data[NUM_LAYERS] const T_BBOX xmin = locData[0]; const T_BBOX ymin = locData[1]; const T_BBOX xmax = locData[2]; const T_BBOX ymax = locData[3]; T_BBOX decode_bbox_center_x, decode_bbox_center_y; T_BBOX decode_bbox_width, decode_bbox_height; if (variance_encoded_in_target) { // variance is encoded in target, we simply need to retore the offset // predictions. decode_bbox_center_x = xmin * prior_width + prior_center_x; decode_bbox_center_y = ymin * prior_height + prior_center_y; decode_bbox_width = __expf(xmax) * prior_width; decode_bbox_height = __expf(ymax) * prior_height; } else { // variance is encoded in bbox, we need to scale the offset accordingly. decode_bbox_center_x = prior_data[vi] * xmin * prior_width + prior_center_x; decode_bbox_center_y = prior_data[vi + 1] * ymin * prior_height + prior_center_y; decode_bbox_width = __expf(prior_data[vi + 2] * xmax) * prior_width; decode_bbox_height = __expf(prior_data[vi + 3] * ymax) * prior_height; } bbox_data[4 * index + 0] = decode_bbox_center_x - decode_bbox_width / T_BBOX(2); bbox_data[4 * index + 1] = decode_bbox_center_y - decode_bbox_height / T_BBOX(2); bbox_data[4 * index + 2] = decode_bbox_center_x + decode_bbox_width / T_BBOX(2); bbox_data[4 * index + 3] = decode_bbox_center_y + decode_bbox_height / T_BBOX(2); } else if (code_type == CodeTypeSSD::CORNER_SIZE) { //mapping index to original input loc_data[NUM_LAYERS] const T_BBOX p_xmin = prior_data[pi]; const T_BBOX p_ymin = prior_data[pi + 1]; const T_BBOX p_xmax = prior_data[pi + 2]; const T_BBOX p_ymax = prior_data[pi + 3]; const T_BBOX prior_width = p_xmax - p_xmin; const T_BBOX prior_height = p_ymax - p_ymin; T_BBOX p_size; for (int i = 0;i < 4;++i) { T_BBOX mapped_loc_data = locData[i]; if (i == 0 || i == 2) { p_size = prior_width; } else { p_size = prior_height; } if (variance_encoded_in_target) { // variance is encoded in target, we simply need to add the offset // predictions. bbox_data[4 * index + i] = prior_data[pi + i] + mapped_loc_data * p_size; } else { // variance is encoded in bbox, we need to scale the offset accordingly. bbox_data[4 * index + i] = prior_data[pi + i] + mapped_loc_data * prior_data[vi + i] * p_size; } } } else if (code_type == CodeTypeSSD::TF_CENTER) { float4 prior_data_p = *((float4*)(prior_data + pi)); const T_BBOX pXmin = prior_data_p.x; const T_BBOX pYmin = prior_data_p.y; const T_BBOX pXmax = prior_data_p.z; const T_BBOX pYmax = prior_data_p.w; const T_BBOX priorWidth = pXmax - pXmin; const T_BBOX priorHeight = pYmax - pYmin; const T_BBOX priorCenterX = (pXmin + pXmax) / T_BBOX(2); const T_BBOX priorCenterY = (pYmin + pYmax) / T_BBOX(2); const T_BBOX ymin = locData[0]; const T_BBOX xmin = locData[1]; const T_BBOX ymax = locData[2]; const T_BBOX xmax = locData[3]; T_BBOX bboxCenterX, bboxCenterY; T_BBOX bboxWidth, bboxHeight; float4 prior_data_v = *((float4*)(prior_data + vi)); bboxCenterX = prior_data_v.x * xmin * priorWidth + priorCenterX; bboxCenterY = prior_data_v.y * ymin * priorHeight + priorCenterY; bboxWidth = __expf(prior_data_v.z * xmax) * priorWidth; bboxHeight = __expf(prior_data_v.w * ymax) * priorHeight; *((float4*)(bbox_data + 4 * index)) = make_float4( bboxCenterX - bboxWidth / T_BBOX(2), bboxCenterY - bboxHeight / T_BBOX(2), bboxCenterX + bboxWidth / T_BBOX(2), bboxCenterY + bboxHeight / T_BBOX(2) ); } else { // Unknown code type. assert("Unknown Box decode code type"); } if (clip_bbox) { bbox_data[index] = max(min(bbox_data[index], T_BBOX(1.)), T_BBOX(0.)); } } } template <typename T_BBOX> ssdStatus_t decodeBBoxesOpt_gpu( hipStream_t stream, const int nthreads, const CodeTypeSSD code_type, const bool variance_encoded_in_target, const int num_priors, const bool share_location, const int num_loc_classes, const int background_label_id, const bool clip_bbox, const void* const* loc_data, const void* prior_data, void* bbox_data, const int num_layers, const int* feature_size, const int * num_anchors, const bool reshape_before_permute, const bool packed32_nchw) { const int BS = 512; const int GS = (nthreads / 4 + BS - 1) / BS; if(num_layers == 6) { // handle a special case DecodeBBoxesOptData<T_BBOX, 6> decodeBBoxesOptData; decodeBBoxesOptData.reshape_before_permute = reshape_before_permute; decodeBBoxesOptData.packed32_nchw = packed32_nchw; int end_layer_prior = 0; for (int layer = 0;layer < num_layers;++layer) { end_layer_prior = end_layer_prior + num_anchors[layer] * feature_size[layer] * feature_size[layer]; decodeBBoxesOptData.end_layer_prior[layer] = end_layer_prior; decodeBBoxesOptData.feature_size_pow2[layer] = feature_size[layer] * feature_size[layer]; find_divisor(decodeBBoxesOptData.num_anchors_mul[layer], decodeBBoxesOptData.num_anchors_shr[layer], num_anchors[layer]); } std::memcpy(decodeBBoxesOptData.num_anchors, num_anchors, 6 * sizeof(int)); std::memcpy(decodeBBoxesOptData.loc_data, loc_data, 6 * sizeof(void*)); // determine constants for efficient integer division uint32_t num_loc_classes_mul, num_loc_classes_shr; uint32_t num_priors_mul, num_priors_shr; find_divisor(num_loc_classes_mul, num_loc_classes_shr, num_loc_classes); find_divisor(num_priors_mul, num_priors_shr, num_priors); hipLaunchKernelGGL(( decodeBBoxesOpt_kernel<T_BBOX, BS, 6>), dim3(GS), dim3(BS), 0, stream, nthreads/4, code_type, variance_encoded_in_target, num_priors, num_priors_mul, num_priors_shr, share_location, num_loc_classes, num_loc_classes_mul, num_loc_classes_shr, background_label_id, clip_bbox, (const T_BBOX*) prior_data, (T_BBOX*) bbox_data, decodeBBoxesOptData); } else{ std::cerr<< "Only support numLayers == 6" << std::endl; return STATUS_FAILURE; } CSC(hipGetLastError(), STATUS_FAILURE); return STATUS_SUCCESS; } // decodeBBoxesOpt LAUNCH CONFIG{{{ typedef ssdStatus_t (*dbbFunc)(hipStream_t, const int, const CodeTypeSSD, const bool, const int, const bool, const int, const int, const bool, const void* const*, const void*, void*, const int, const int*, const int*, const bool, const bool); struct dbbLaunchConfig { DType_t t_bbox; dbbFunc function; dbbLaunchConfig(DType_t t_bbox) : t_bbox(t_bbox) { } dbbLaunchConfig(DType_t t_bbox, dbbFunc function) : t_bbox(t_bbox) , function(function) { } bool operator==(const dbbLaunchConfig& other) { return t_bbox == other.t_bbox; } }; static std::vector<dbbLaunchConfig> dbbFuncVec; bool decodeBBoxesOptInit() { dbbFuncVec.push_back(dbbLaunchConfig(DataType::kFLOAT, decodeBBoxesOpt_gpu<float>)); return true; } static bool initialized = decodeBBoxesOptInit(); //}}} ssdStatus_t decodeBBoxesOpt( hipStream_t stream, const int nthreads, const CodeTypeSSD code_type, const bool variance_encoded_in_target, const int num_priors, const bool share_location, const int num_loc_classes, const int background_label_id, const bool clip_bbox, const DType_t DT_BBOX, const void* const* loc_data, const void* prior_data, void* bbox_data, const int num_layers, const int * feature_size, const int * num_anchors, const bool packed32_nchw, const bool reshape_before_permute) { dbbLaunchConfig lc = dbbLaunchConfig(DT_BBOX); for (unsigned i = 0; i < dbbFuncVec.size(); ++i) { if (lc == dbbFuncVec[i]) { DEBUG_PRINTF("decodeBBox kernel %d\n", i); return dbbFuncVec[i].function(stream, nthreads, code_type, variance_encoded_in_target, num_priors, share_location, num_loc_classes, background_label_id, clip_bbox, loc_data, prior_data, bbox_data, num_layers, feature_size, num_anchors, reshape_before_permute, packed32_nchw); } } return STATUS_BAD_PARAM; } } // namespace plugin } // namespace nvinfer1
2669ea510ea7bcd7d69f34b2af48c0e876502eb3.cu
/* * Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <vector> #include <cstring> #include <iostream> #include "NvInferPlugin.h" #include "ssdOpt.h" #include "ssdOptMacros.h" //#include "ssd_internal.h" #include "nmsPluginOpt.h" #include "fast_divmod.h" namespace nvinfer1 { namespace plugin { template <typename T_BBOX, int NUM_LAYERS> struct DecodeBBoxesOptData { const T_BBOX * loc_data[NUM_LAYERS]; int feature_size_pow2[NUM_LAYERS]; int end_layer_prior[NUM_LAYERS]; int num_anchors[NUM_LAYERS]; uint32_t num_anchors_mul[NUM_LAYERS]; uint32_t num_anchors_shr[NUM_LAYERS]; bool reshape_before_permute; bool packed32_nchw; }; /* This function maps the input index to the corresponding loc_data offset. The input "loc_data" is composed of "num_layers" loc tensors from the CONV layers in SSD. These tensors are in NCHW layout. The input index is broken down to 4 components: i, c, d, n i - box coordinate (max 4) c - class (max num_loc_classes) d - prior (max num_priors) n - batch size For SSD-MobileNet: The transformed loc_data is generated by: 1. loc_data[id_layer] -> NCHW 2. permute(1,2,0) -> NHWC 3. reshape -> (N, H*W*C/num_loc_classes/4, num_loc_classes, 4) or (N, num_priors_layer, num_loc_classes, 4) 4. concat(axis=1, num_layers) -> (N, num_priors, num_loc_classes, 4) 5. flatten -> (N, num_priors * num_loc_classes *4, 1, 1) Therefore, before concat, each loc_data[id_layer] effectively has: num_anchors_layer = C/num_loc_classes/4 num_priors_layer = H*W*num_anchors_layer = H*W*C/num_loc_classes/4 num_box_coors = 4 After concat, the num_priors_layer (id_layer=0...5) are concatenated together, so num_priors = sum(num_priors_layer) Correspondingly, giving an index from the result of step 5 above, the mapping process will a. Compute its i (box corrdinate), c (class), d (prior), n (batch size) accordingly b. Using d, find the corresponding id_layer using num_priors_layer and num_priors c. Compute prior_in_layer which is prior in id_layer d. Using prior_in_layer to get the original value of hw. hw = priors_in_layer / num_anchors_layer. And correspondingly anchor = priors_in_layer % num_anchors_layer e. C = num_anchors_layer * num_loc_classes * 4, the original value of ch can be computed using anchor, c (class), and i (box coordinate). f. With n, hw and ch, the original index in an id_layer can be computed according to NCHW layerout e.g. for ssd-mobilenet-v1, layer 0: C = 12 HW = 19x19 num_anchors_layer = 3 num_loc_classes = 1 (shared) num_priors_layer = 1083 For SSD-ResNet34, it is a bit different since reshape happens before permute 1. loc_data[id_layer] -> NCHW 2. reshape -> (N, num_loc_classes, 4, C/num_loc_classes/4*H*W) or (N, num_loc_classes, 4, num_priors_layer) 2. permute(0,2,1) -> (N, num_loc_classes, num_priors_layer, 4) 4. concat(axis=2, num_layers) -> (N, num_loc_classes, num_priors, 4) 5. flatten -> (N, num_loc_classes*num_priors*4, 1, 1) num_anchors_layer = C/num_loc_classes/4 num_priors_layer = num_anchors_layer*H*W = C/num_loc_classes/4*H*W num_box_coors = 4 After concat, the num_priors_layer (id_layer=0...5) are concatenated together, so num_priors = sum(num_priors_layer) Correspondingly, giving an index from the result of step 5 above, the mapping process will a. Compute its i (box corrdinate), d (prior), c (class), n (batch size) accordingly b. Using d, find the corresponding id_layer using num_priors_layer and num_priors c. Compute prior_in_layer which is prior in id_layer d. Using prior_in_layer to get the original value of anchor. anchor = priors_in_layer / HW. And correspondingly hw = priors_in_layer % HW e. C = num_loc_classes*4*num_anchors_layer, the original value of ch can be computed using c (class), i (box coordinate) and anchor. f. With n, hw and ch, the original index in an id_layer can be computed according to NCHW layerout (g). An alternative to step d), e), and f) is that since CHW = num_loc_classes * 4 * C/num_loc_classes/4*HW = num_loc_classes*4*num_priors_layer, the original index in NCHW layout can be computed using prior_in_layer, i (box coordinate) and c (class). Dealing with inputs with NC/32HW32 packed layout: This means the loc_data[id_layer] would be in NC/32HW32 format instead of NCHW. As a result, after n, ch, hw are computed, extra steps are needed to map them back to the original index: h. packed_C = (C+31)/32 i. packed_ch = ch/32 j. packed_ch_offset = ch%32 k. Get the packed index for NC/32HW32 using h),i) and j) */ template <typename T_BBOX, int NUM_LAYERS > __device__ inline void mapLocData(const int c, int d, const int n, const T_BBOX* const* loc_data, const int * feature_size_pow2, const int * end_layer_prior, const int * num_anchors_data, const uint32_t * num_anchors_mul_data, const uint32_t * num_anchors_shr_data, const int num_loc_classes, T_BBOX mapped_loc_data[4], const bool reshape_before_permute, const bool packed32_nchw ) { //find layer_id int start_layer_prior = 0; int prior_in_layer = 0; int num_anchors, num_anchors_mul, num_anchors_shr; int num_hw; const T_BBOX* loc_data_layer; #pragma unroll for(int layer = 0; layer < NUM_LAYERS; layer++) { if(d < end_layer_prior[layer]) { num_anchors = num_anchors_data[layer]; num_anchors_mul = num_anchors_mul_data[layer]; num_anchors_shr = num_anchors_shr_data[layer]; num_hw = feature_size_pow2[layer]; prior_in_layer = d - start_layer_prior; loc_data_layer = loc_data[layer]; // there should be a break here - but this would make all access to the arrays dynamic // due to compiler deoptimizations d = INT_MAX; } start_layer_prior = end_layer_prior[layer]; } //Transform id_in_layer from HWC -> CHW int num_ch = num_anchors * num_loc_classes * 4; if(reshape_before_permute) { int hw = prior_in_layer % num_hw; int anchor = prior_in_layer / num_hw; #pragma unroll for (int i = 0;i < 4;++i) { int ch = (c*4 + i)*num_anchors + anchor; int mappedIndex; if(packed32_nchw) { int packed_num_ch = (num_ch+31)/32; int packed_ch = ch >> 5; // ch/32; int packed_ch_offset = ch & 31; // ch%32; mappedIndex = ((n * packed_num_ch + packed_ch)*num_hw + hw)*32 + packed_ch_offset; } else { mappedIndex = (n * num_ch + ch)*num_hw + hw; } mapped_loc_data[i] = loc_data_layer[mappedIndex]; } } else { //anchor = prior_in_layer % num_anchors; //hw = prior_in_layer / num_anchors; int anchor, hw; fast_divmod(hw, anchor, prior_in_layer, num_anchors, num_anchors_mul, num_anchors_shr); int ch_base = (anchor*num_loc_classes+c)*4; #pragma unroll for (int i = 0;i < 4;++i) { int ch = ch_base + i; int mappedIndex; if(packed32_nchw) { int packed_num_ch = (num_ch+31)/32; int packed_ch = ch >> 5; // ch/32; int packed_ch_offset = ch & 31; // ch%32; mappedIndex = ((n * packed_num_ch + packed_ch)*num_hw + hw)*32 + packed_ch_offset; } else { mappedIndex = (n * num_ch + ch)*num_hw + hw; } mapped_loc_data[i] = loc_data_layer[mappedIndex]; } } } template <typename T_BBOX, unsigned nthds_per_cta, int NUM_LAYERS> __launch_bounds__(nthds_per_cta) __global__ void decodeBBoxesOpt_kernel( const int nthreads, const CodeTypeSSD code_type, const bool variance_encoded_in_target, const int num_priors, const uint32_t num_priors_mul, const uint32_t num_priors_shr, const bool share_location, const int num_loc_classes, const uint32_t num_loc_classes_mul, const uint32_t num_loc_classes_shr, const int background_label_id, const bool clip_bbox, const T_BBOX* prior_data, T_BBOX* bbox_data, const DecodeBBoxesOptData<T_BBOX, NUM_LAYERS> decodeBBoxesOptData) { for (int index = blockIdx.x * nthds_per_cta + threadIdx.x; index < nthreads; index += nthds_per_cta * gridDim.x) { const T_BBOX* const* loc_data = &decodeBBoxesOptData.loc_data[0]; const int* feature_size_pow2 = &decodeBBoxesOptData.feature_size_pow2[0]; const int* end_layer_prior = &decodeBBoxesOptData.end_layer_prior[0]; const int* num_anchors = &decodeBBoxesOptData.num_anchors[0]; const uint32_t* num_anchors_mul = &decodeBBoxesOptData.num_anchors_mul[0]; const uint32_t* num_anchors_shr = &decodeBBoxesOptData.num_anchors_shr[0]; const bool reshape_before_permute = decodeBBoxesOptData.reshape_before_permute; const bool packed32_nchw = decodeBBoxesOptData.packed32_nchw; // index has been already been divided by 4 (num_box_coors) before passed to this function //c = (index) % num_loc_classes; //d = (index / num_loc_classes) % num_priors; //n = (index / num_loc_classes / num_priors); int c, c_div, d, n; fast_divmod(c_div, c, index, num_loc_classes, num_loc_classes_mul, num_loc_classes_shr); fast_divmod(n, d, c_div, num_priors, num_priors_mul, num_priors_shr); if (!share_location && c == background_label_id) { // Ignore background class if not share_location. return; } const int pi = d * 4; const int vi = pi + num_priors * 4; T_BBOX locData[4]; mapLocData<T_BBOX, NUM_LAYERS>(c, d, n, loc_data, feature_size_pow2, end_layer_prior, num_anchors, num_anchors_mul, num_anchors_shr, num_loc_classes, locData, reshape_before_permute, packed32_nchw); if (code_type == CodeTypeSSD::CORNER) { #pragma unroll for (int i = 0;i < 4;++i) { //mapping index to original input loc_data[NUM_LAYERS] T_BBOX mapped_loc_data = locData[i]; if (variance_encoded_in_target) { // variance is encoded in target, we simply need to add the offset // predictions. bbox_data[index] = prior_data[pi + i] + mapped_loc_data; } else { // variance is encoded in bbox, we need to scale the offset accordingly. bbox_data[index] = prior_data[pi + i] + mapped_loc_data * prior_data[vi + i]; } } } else if (code_type == CodeTypeSSD::CENTER_SIZE) { const T_BBOX p_xmin = prior_data[pi]; const T_BBOX p_ymin = prior_data[pi + 1]; const T_BBOX p_xmax = prior_data[pi + 2]; const T_BBOX p_ymax = prior_data[pi + 3]; const T_BBOX prior_width = p_xmax - p_xmin; const T_BBOX prior_height = p_ymax - p_ymin; const T_BBOX prior_center_x = (p_xmin + p_xmax) / T_BBOX(2); const T_BBOX prior_center_y = (p_ymin + p_ymax) / T_BBOX(2); //mapping index to original input loc_data[NUM_LAYERS] const T_BBOX xmin = locData[0]; const T_BBOX ymin = locData[1]; const T_BBOX xmax = locData[2]; const T_BBOX ymax = locData[3]; T_BBOX decode_bbox_center_x, decode_bbox_center_y; T_BBOX decode_bbox_width, decode_bbox_height; if (variance_encoded_in_target) { // variance is encoded in target, we simply need to retore the offset // predictions. decode_bbox_center_x = xmin * prior_width + prior_center_x; decode_bbox_center_y = ymin * prior_height + prior_center_y; decode_bbox_width = __expf(xmax) * prior_width; decode_bbox_height = __expf(ymax) * prior_height; } else { // variance is encoded in bbox, we need to scale the offset accordingly. decode_bbox_center_x = prior_data[vi] * xmin * prior_width + prior_center_x; decode_bbox_center_y = prior_data[vi + 1] * ymin * prior_height + prior_center_y; decode_bbox_width = __expf(prior_data[vi + 2] * xmax) * prior_width; decode_bbox_height = __expf(prior_data[vi + 3] * ymax) * prior_height; } bbox_data[4 * index + 0] = decode_bbox_center_x - decode_bbox_width / T_BBOX(2); bbox_data[4 * index + 1] = decode_bbox_center_y - decode_bbox_height / T_BBOX(2); bbox_data[4 * index + 2] = decode_bbox_center_x + decode_bbox_width / T_BBOX(2); bbox_data[4 * index + 3] = decode_bbox_center_y + decode_bbox_height / T_BBOX(2); } else if (code_type == CodeTypeSSD::CORNER_SIZE) { //mapping index to original input loc_data[NUM_LAYERS] const T_BBOX p_xmin = prior_data[pi]; const T_BBOX p_ymin = prior_data[pi + 1]; const T_BBOX p_xmax = prior_data[pi + 2]; const T_BBOX p_ymax = prior_data[pi + 3]; const T_BBOX prior_width = p_xmax - p_xmin; const T_BBOX prior_height = p_ymax - p_ymin; T_BBOX p_size; for (int i = 0;i < 4;++i) { T_BBOX mapped_loc_data = locData[i]; if (i == 0 || i == 2) { p_size = prior_width; } else { p_size = prior_height; } if (variance_encoded_in_target) { // variance is encoded in target, we simply need to add the offset // predictions. bbox_data[4 * index + i] = prior_data[pi + i] + mapped_loc_data * p_size; } else { // variance is encoded in bbox, we need to scale the offset accordingly. bbox_data[4 * index + i] = prior_data[pi + i] + mapped_loc_data * prior_data[vi + i] * p_size; } } } else if (code_type == CodeTypeSSD::TF_CENTER) { float4 prior_data_p = *((float4*)(prior_data + pi)); const T_BBOX pXmin = prior_data_p.x; const T_BBOX pYmin = prior_data_p.y; const T_BBOX pXmax = prior_data_p.z; const T_BBOX pYmax = prior_data_p.w; const T_BBOX priorWidth = pXmax - pXmin; const T_BBOX priorHeight = pYmax - pYmin; const T_BBOX priorCenterX = (pXmin + pXmax) / T_BBOX(2); const T_BBOX priorCenterY = (pYmin + pYmax) / T_BBOX(2); const T_BBOX ymin = locData[0]; const T_BBOX xmin = locData[1]; const T_BBOX ymax = locData[2]; const T_BBOX xmax = locData[3]; T_BBOX bboxCenterX, bboxCenterY; T_BBOX bboxWidth, bboxHeight; float4 prior_data_v = *((float4*)(prior_data + vi)); bboxCenterX = prior_data_v.x * xmin * priorWidth + priorCenterX; bboxCenterY = prior_data_v.y * ymin * priorHeight + priorCenterY; bboxWidth = __expf(prior_data_v.z * xmax) * priorWidth; bboxHeight = __expf(prior_data_v.w * ymax) * priorHeight; *((float4*)(bbox_data + 4 * index)) = make_float4( bboxCenterX - bboxWidth / T_BBOX(2), bboxCenterY - bboxHeight / T_BBOX(2), bboxCenterX + bboxWidth / T_BBOX(2), bboxCenterY + bboxHeight / T_BBOX(2) ); } else { // Unknown code type. assert("Unknown Box decode code type"); } if (clip_bbox) { bbox_data[index] = max(min(bbox_data[index], T_BBOX(1.)), T_BBOX(0.)); } } } template <typename T_BBOX> ssdStatus_t decodeBBoxesOpt_gpu( cudaStream_t stream, const int nthreads, const CodeTypeSSD code_type, const bool variance_encoded_in_target, const int num_priors, const bool share_location, const int num_loc_classes, const int background_label_id, const bool clip_bbox, const void* const* loc_data, const void* prior_data, void* bbox_data, const int num_layers, const int* feature_size, const int * num_anchors, const bool reshape_before_permute, const bool packed32_nchw) { const int BS = 512; const int GS = (nthreads / 4 + BS - 1) / BS; if(num_layers == 6) { // handle a special case DecodeBBoxesOptData<T_BBOX, 6> decodeBBoxesOptData; decodeBBoxesOptData.reshape_before_permute = reshape_before_permute; decodeBBoxesOptData.packed32_nchw = packed32_nchw; int end_layer_prior = 0; for (int layer = 0;layer < num_layers;++layer) { end_layer_prior = end_layer_prior + num_anchors[layer] * feature_size[layer] * feature_size[layer]; decodeBBoxesOptData.end_layer_prior[layer] = end_layer_prior; decodeBBoxesOptData.feature_size_pow2[layer] = feature_size[layer] * feature_size[layer]; find_divisor(decodeBBoxesOptData.num_anchors_mul[layer], decodeBBoxesOptData.num_anchors_shr[layer], num_anchors[layer]); } std::memcpy(decodeBBoxesOptData.num_anchors, num_anchors, 6 * sizeof(int)); std::memcpy(decodeBBoxesOptData.loc_data, loc_data, 6 * sizeof(void*)); // determine constants for efficient integer division uint32_t num_loc_classes_mul, num_loc_classes_shr; uint32_t num_priors_mul, num_priors_shr; find_divisor(num_loc_classes_mul, num_loc_classes_shr, num_loc_classes); find_divisor(num_priors_mul, num_priors_shr, num_priors); decodeBBoxesOpt_kernel<T_BBOX, BS, 6><<<GS, BS, 0, stream>>>(nthreads/4, code_type, variance_encoded_in_target, num_priors, num_priors_mul, num_priors_shr, share_location, num_loc_classes, num_loc_classes_mul, num_loc_classes_shr, background_label_id, clip_bbox, (const T_BBOX*) prior_data, (T_BBOX*) bbox_data, decodeBBoxesOptData); } else{ std::cerr<< "Only support numLayers == 6" << std::endl; return STATUS_FAILURE; } CSC(cudaGetLastError(), STATUS_FAILURE); return STATUS_SUCCESS; } // decodeBBoxesOpt LAUNCH CONFIG{{{ typedef ssdStatus_t (*dbbFunc)(cudaStream_t, const int, const CodeTypeSSD, const bool, const int, const bool, const int, const int, const bool, const void* const*, const void*, void*, const int, const int*, const int*, const bool, const bool); struct dbbLaunchConfig { DType_t t_bbox; dbbFunc function; dbbLaunchConfig(DType_t t_bbox) : t_bbox(t_bbox) { } dbbLaunchConfig(DType_t t_bbox, dbbFunc function) : t_bbox(t_bbox) , function(function) { } bool operator==(const dbbLaunchConfig& other) { return t_bbox == other.t_bbox; } }; static std::vector<dbbLaunchConfig> dbbFuncVec; bool decodeBBoxesOptInit() { dbbFuncVec.push_back(dbbLaunchConfig(DataType::kFLOAT, decodeBBoxesOpt_gpu<float>)); return true; } static bool initialized = decodeBBoxesOptInit(); //}}} ssdStatus_t decodeBBoxesOpt( cudaStream_t stream, const int nthreads, const CodeTypeSSD code_type, const bool variance_encoded_in_target, const int num_priors, const bool share_location, const int num_loc_classes, const int background_label_id, const bool clip_bbox, const DType_t DT_BBOX, const void* const* loc_data, const void* prior_data, void* bbox_data, const int num_layers, const int * feature_size, const int * num_anchors, const bool packed32_nchw, const bool reshape_before_permute) { dbbLaunchConfig lc = dbbLaunchConfig(DT_BBOX); for (unsigned i = 0; i < dbbFuncVec.size(); ++i) { if (lc == dbbFuncVec[i]) { DEBUG_PRINTF("decodeBBox kernel %d\n", i); return dbbFuncVec[i].function(stream, nthreads, code_type, variance_encoded_in_target, num_priors, share_location, num_loc_classes, background_label_id, clip_bbox, loc_data, prior_data, bbox_data, num_layers, feature_size, num_anchors, reshape_before_permute, packed32_nchw); } } return STATUS_BAD_PARAM; } } // namespace plugin } // namespace nvinfer1
73b556a8341845e168759f6e8bf55120fa9c9dff.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <hip/hip_runtime_api.h> #include <unistd.h> #include <hiprand/hiprand.h> #include <hiprand/hiprand_kernel.h> #include <string> #define GPUJOULE_DIR "" #define SHARED_MEM_ELEMENTS 1024 #define GLOBAL_MEM_ELEMENTS 196608 //#define GLOBAL_MEM_ELEMENTS 131072 //#define GLOBAL_MEM_ELEMENTS 196608 int num_blocks; int num_threads_per_block; int num_iterations; int divergence; __global__ void init_memory (unsigned long long ** my_ptr_array, unsigned long long * my_array, int stride, int num_blocks_k, int num_threads_per_block_k) { int block_id; int warp_id; int i; int index; int tid = blockDim.x * blockIdx.x + threadIdx.x; void **ptr_array = (void **)my_ptr_array; unsigned long long *array = (unsigned long long *)my_array; if (tid == 0) { // int elements_per_block = GLOBAL_MEM_ELEMENTS / num_blocks_k; int num_warps_per_block = num_threads_per_block_k / 32; //int elements_per_warp = elements_per_block / num_warps_per_block; int elements_per_warp = GLOBAL_MEM_ELEMENTS / num_warps_per_block; // for (block_id = 0; block_id < num_blocks_k; block_id++) { for (warp_id = 0; warp_id < num_warps_per_block; warp_id++) { for (i = 0; i < elements_per_warp; i++) { //index = (block_id * elements_per_block) + (warp_id * elements_per_warp); index = (warp_id * elements_per_warp); ptr_array[index + i] = (void*)&array[(index + ((i + 48) % elements_per_warp))]; } } /* for (i = 0; i < GLOBAL_MEM_ELEMENTS; i++) { ptr_array[i] = (void*)&array[(i + 32)%GLOBAL_MEM_ELEMENTS]; } */ for (i = 0; i < GLOBAL_MEM_ELEMENTS; i++) { //array[i] = (unsigned long long)ptr_array[(i+stride)%GLOBAL_MEM_ELEMENTS]; array[i] = (unsigned long long)ptr_array[i]; } } __syncthreads(); } __global__ void shared_latency (unsigned long long ** my_ptr_array, unsigned long long * my_array, int array_length, int iterations, unsigned long long * duration, int stride, int divergence, int num_blocks_k, int num_threads_per_block_k, unsigned long long ** my_end_ptr_array) { unsigned long long int start_time, end_time; unsigned long long int sum_time = 0; int i, k; int tid = blockDim.x * blockIdx.x + threadIdx.x; int block_id = blockIdx.x; int warp_id = threadIdx.x / 32; int warp_thread_id = threadIdx.x % 32; // int elements_per_block = GLOBAL_MEM_ELEMENTS / num_blocks_k; int num_warps_per_block = num_threads_per_block_k / 32; // int elements_per_warp = elements_per_block / num_warps_per_block; int elements_per_warp = GLOBAL_MEM_ELEMENTS / num_warps_per_block; //int index1 = (block_id * elements_per_block) + (warp_id * elements_per_warp) + warp_thread_id; int index1 = (warp_id * elements_per_warp) + warp_thread_id; void **ptr_array = (void **)my_ptr_array; unsigned long long int *array = (unsigned long long int *)my_array; void **tmp_ptr; //tmp_ptr = (void *)sdata; //tmp_ptr = (void **)(&(ptr_array[(threadIdx.x * stride)%GLOBAL_MEM_ELEMENTS])); //tmp_ptr = (void **)(&(ptr_array[(tid * stride)%GLOBAL_MEM_ELEMENTS])); //tmp_ptr = (void **)(&(ptr_array[index1])); tmp_ptr = (void **)(&(array[index1])); //#define ONCE tmp_ptr = *(void**)tmp_ptr; #define ONCE tmp_ptr = (void**)(*tmp_ptr); #define REPEAT_FOUR_TIMES ONCE ONCE ONCE ONCE #define REPEAT_SIXTEEN_TIMES REPEAT_FOUR_TIMES REPEAT_FOUR_TIMES REPEAT_FOUR_TIMES REPEAT_FOUR_TIMES #define REPEAT_SIXTYFOUR_TIMES REPEAT_SIXTEEN_TIMES REPEAT_SIXTEEN_TIMES REPEAT_SIXTEEN_TIMES REPEAT_SIXTEEN_TIMES if ((threadIdx.x % 32) < divergence) { for(k = 0; k <= iterations; k++) { // tmp_ptr = (void**)(*tmp_ptr); if (k == 0) { sum_time = 0; } start_time = clock(); // ONCE REPEAT_SIXTYFOUR_TIMES; REPEAT_SIXTYFOUR_TIMES; REPEAT_SIXTYFOUR_TIMES; REPEAT_FOUR_TIMES; REPEAT_FOUR_TIMES; end_time = clock(); sum_time += (end_time - start_time); } } my_end_ptr_array[tid] = (unsigned long long*)(*tmp_ptr); duration[tid] = sum_time; } // Shared memory array size is N-2. Last two elements are used as dummy variables. void parametric_measure_shared(int N, int iterations, int stride) { hipProfilerStop(); int i; unsigned long long int * h_a; unsigned long long int * d_a; unsigned long long ** h_ptr_a; unsigned long long ** d_ptr_a; unsigned long long ** h_end_ptr_a; unsigned long long ** d_end_ptr_a; unsigned long long * duration; unsigned long long * latency; hipError_t error_id; /* allocate array on CPU */ h_a = (unsigned long long *)malloc(sizeof(unsigned long long int) * N); h_ptr_a = (unsigned long long **)malloc(sizeof(unsigned long long int*)*N); latency = (unsigned long long *)malloc(sizeof(unsigned long long) * num_threads_per_block * num_blocks); h_end_ptr_a = (unsigned long long **)malloc(sizeof(unsigned long long int*) * num_threads_per_block * num_blocks); /* initialize array elements on CPU */ for (i = 0; i < N; i++) { h_ptr_a[i] = (unsigned long long *)&h_a[i]; } for (i = 0; i < N; i++) { h_a[i] = (unsigned long long)h_ptr_a[(i + 1 + stride) % N]; } /* allocate arrays on GPU */ hipMalloc ((void **) &d_a, sizeof(unsigned long long int) * N ); hipMalloc ((void **) &d_ptr_a, sizeof(unsigned long long int*) * N ); hipMalloc ((void **) &duration, sizeof(unsigned long long) * num_threads_per_block * num_blocks); hipMalloc ((void **) &d_end_ptr_a, sizeof(unsigned long long *) * num_threads_per_block * num_blocks); hipDeviceSynchronize (); error_id = hipGetLastError(); if (error_id != hipSuccess) { printf("Error 1 is %s\n", hipGetErrorString(error_id)); } /* copy array elements from CPU to GPU */ hipMemcpy((void *)d_a, (void *)h_a, sizeof(unsigned long long int) * N, hipMemcpyHostToDevice); hipMemcpy((void *)d_ptr_a, (void *)h_ptr_a, sizeof(unsigned long long int *) * N, hipMemcpyHostToDevice); hipMemcpy((void *)duration, (void *)latency, sizeof(unsigned long long) * num_threads_per_block * num_blocks, hipMemcpyHostToDevice); hipMemcpy((void *)d_end_ptr_a, (void *)h_end_ptr_a, sizeof(unsigned long long *) * num_threads_per_block * num_blocks, hipMemcpyHostToDevice); hipDeviceSynchronize (); error_id = hipGetLastError(); if (error_id != hipSuccess) { printf("Error 2 is %s\n", hipGetErrorString(error_id)); } hipLaunchKernelGGL(( init_memory) , dim3(1), dim3(1), 0, 0, d_ptr_a, d_a, stride, num_blocks, num_threads_per_block); hipDeviceSynchronize(); /* launch kernel*/ //dim3 Db = dim3(13); //dim3 Dg = dim3(768,1,1); //printf("Launch kernel with parameters: %d, N: %d, stride: %d\n", iterations, N, stride); // int sharedMemSize = sizeof(unsigned long long int) * N ; for (int i = 0; i < 1; i++) { hipEvent_t start, stop; float time; std::string cmd = "GPUJOULE_DIR/nvml/example/power_monitor 5 > GPUJOULE_DIR/energy_model_ubench/energy_model_data/data_movement_energy/l2_cache/fadd_l2d_0_100_64p_asm_power.txt &"; std::system(cmd.c_str()); std::system("sleep 5"); hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); hipProfilerStart(); hipFuncSetCacheConfig(shared_latency, hipFuncCachePreferL1); //shared_latency <<<Dg, Db, sharedMemSize>>>(d_a, N, iterations, duration); //shared_latency <<<num_blocks, num_threads_per_block, sharedMemSize>>>(d_a, N, num_iterations, duration, stride, divergence); hipLaunchKernelGGL(( shared_latency) , dim3(num_blocks), dim3(num_threads_per_block), 0, 0, d_ptr_a, d_a, N, num_iterations, duration, stride, divergence, num_blocks, num_threads_per_block, d_end_ptr_a); hipDeviceSynchronize(); ///hipDeviceSynchronize (); hipProfilerStop(); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&time, start, stop); std::system("killall power_monitor"); error_id = hipGetLastError(); if (error_id != hipSuccess) { printf("Error 3 is %s\n", hipGetErrorString(error_id)); } /* copy results from GPU to CPU */ hipMemcpy((void *)h_a, (void *)d_a, sizeof(unsigned long long int) * N, hipMemcpyDeviceToHost); hipMemcpy((void *)latency, (void *)duration, sizeof(unsigned long long) * num_threads_per_block * num_blocks, hipMemcpyDeviceToHost); hipMemcpy((void *)h_end_ptr_a, (void *)d_end_ptr_a, sizeof(unsigned long long *) * num_threads_per_block * num_blocks, hipMemcpyDeviceToHost); hipDeviceSynchronize (); /* print results*/ unsigned long long max_dur = latency[0]; unsigned long long min_dur = latency[0]; unsigned long long avg_lat = latency[0]; for (int i = 1; i < num_threads_per_block * num_blocks; i++) { if (latency[i] > 0) { avg_lat += latency[i]; if (latency[i] > max_dur) { max_dur = latency[i]; } else if (latency[i] < min_dur) { min_dur = latency[i]; } } } printf(" %d, %f, %f, %f, %f\n",stride,(double)(avg_lat/(num_threads_per_block * (divergence / 32.0) * num_blocks * 200.0 *num_iterations)), (double)(min_dur/(200.0 * num_iterations)), (double)(max_dur/(200.0 * num_iterations)), time); //printf(" %d, %f, %f, %f, %f\n",stride,(double)(avg_lat/(num_threads_per_block * num_blocks * 200.0 *num_iterations)), (double)(min_dur/(200.0 * num_iterations)), (double)(max_dur/(200.0 * num_iterations)), time); //printf("%f\n", time); } /* free memory on GPU */ hipFree(d_a); hipFree(d_ptr_a); hipFree(d_end_ptr_a); hipFree(duration); hipDeviceSynchronize (); /*free memory on CPU */ free(h_a); free(h_ptr_a); free(h_end_ptr_a); free(latency); } void usage() { printf("Usage ./binary <num_blocks> <num_threads_per_block> <iterations> <threads active per warp> <stride>\n"); } int main(int argc, char **argv) { int N, stride; // initialize upper bounds here // int stride_upper_bound = 1; if(argc != 6) { usage(); exit(1); } num_blocks = atoi(argv[1]); num_threads_per_block = atoi(argv[2]); num_iterations = atoi(argv[3]); divergence = atoi(argv[4]); stride = atoi(argv[5]); // printf("Shared memory latency for varying stride.\n"); // printf("stride (bytes), latency (clocks)\n"); // N = SHARED_MEM_ELEMENTS; N = GLOBAL_MEM_ELEMENTS; // N = num_threads_per_block; // stride_upper_bound = 1; // for (stride = 1; stride <= stride_upper_bound; stride += 1) { parametric_measure_shared(N, 10, stride); // } return 0; }
73b556a8341845e168759f6e8bf55120fa9c9dff.cu
#include <stdio.h> #include <cuda_profiler_api.h> #include <unistd.h> #include <curand.h> #include <curand_kernel.h> #include <string> #define GPUJOULE_DIR "" #define SHARED_MEM_ELEMENTS 1024 #define GLOBAL_MEM_ELEMENTS 196608 //#define GLOBAL_MEM_ELEMENTS 131072 //#define GLOBAL_MEM_ELEMENTS 196608 int num_blocks; int num_threads_per_block; int num_iterations; int divergence; __global__ void init_memory (unsigned long long ** my_ptr_array, unsigned long long * my_array, int stride, int num_blocks_k, int num_threads_per_block_k) { int block_id; int warp_id; int i; int index; int tid = blockDim.x * blockIdx.x + threadIdx.x; void **ptr_array = (void **)my_ptr_array; unsigned long long *array = (unsigned long long *)my_array; if (tid == 0) { // int elements_per_block = GLOBAL_MEM_ELEMENTS / num_blocks_k; int num_warps_per_block = num_threads_per_block_k / 32; //int elements_per_warp = elements_per_block / num_warps_per_block; int elements_per_warp = GLOBAL_MEM_ELEMENTS / num_warps_per_block; // for (block_id = 0; block_id < num_blocks_k; block_id++) { for (warp_id = 0; warp_id < num_warps_per_block; warp_id++) { for (i = 0; i < elements_per_warp; i++) { //index = (block_id * elements_per_block) + (warp_id * elements_per_warp); index = (warp_id * elements_per_warp); ptr_array[index + i] = (void*)&array[(index + ((i + 48) % elements_per_warp))]; } } /* for (i = 0; i < GLOBAL_MEM_ELEMENTS; i++) { ptr_array[i] = (void*)&array[(i + 32)%GLOBAL_MEM_ELEMENTS]; } */ for (i = 0; i < GLOBAL_MEM_ELEMENTS; i++) { //array[i] = (unsigned long long)ptr_array[(i+stride)%GLOBAL_MEM_ELEMENTS]; array[i] = (unsigned long long)ptr_array[i]; } } __syncthreads(); } __global__ void shared_latency (unsigned long long ** my_ptr_array, unsigned long long * my_array, int array_length, int iterations, unsigned long long * duration, int stride, int divergence, int num_blocks_k, int num_threads_per_block_k, unsigned long long ** my_end_ptr_array) { unsigned long long int start_time, end_time; unsigned long long int sum_time = 0; int i, k; int tid = blockDim.x * blockIdx.x + threadIdx.x; int block_id = blockIdx.x; int warp_id = threadIdx.x / 32; int warp_thread_id = threadIdx.x % 32; // int elements_per_block = GLOBAL_MEM_ELEMENTS / num_blocks_k; int num_warps_per_block = num_threads_per_block_k / 32; // int elements_per_warp = elements_per_block / num_warps_per_block; int elements_per_warp = GLOBAL_MEM_ELEMENTS / num_warps_per_block; //int index1 = (block_id * elements_per_block) + (warp_id * elements_per_warp) + warp_thread_id; int index1 = (warp_id * elements_per_warp) + warp_thread_id; void **ptr_array = (void **)my_ptr_array; unsigned long long int *array = (unsigned long long int *)my_array; void **tmp_ptr; //tmp_ptr = (void *)sdata; //tmp_ptr = (void **)(&(ptr_array[(threadIdx.x * stride)%GLOBAL_MEM_ELEMENTS])); //tmp_ptr = (void **)(&(ptr_array[(tid * stride)%GLOBAL_MEM_ELEMENTS])); //tmp_ptr = (void **)(&(ptr_array[index1])); tmp_ptr = (void **)(&(array[index1])); //#define ONCE tmp_ptr = *(void**)tmp_ptr; #define ONCE tmp_ptr = (void**)(*tmp_ptr); #define REPEAT_FOUR_TIMES ONCE ONCE ONCE ONCE #define REPEAT_SIXTEEN_TIMES REPEAT_FOUR_TIMES REPEAT_FOUR_TIMES REPEAT_FOUR_TIMES REPEAT_FOUR_TIMES #define REPEAT_SIXTYFOUR_TIMES REPEAT_SIXTEEN_TIMES REPEAT_SIXTEEN_TIMES REPEAT_SIXTEEN_TIMES REPEAT_SIXTEEN_TIMES if ((threadIdx.x % 32) < divergence) { for(k = 0; k <= iterations; k++) { // tmp_ptr = (void**)(*tmp_ptr); if (k == 0) { sum_time = 0; } start_time = clock(); // ONCE REPEAT_SIXTYFOUR_TIMES; REPEAT_SIXTYFOUR_TIMES; REPEAT_SIXTYFOUR_TIMES; REPEAT_FOUR_TIMES; REPEAT_FOUR_TIMES; end_time = clock(); sum_time += (end_time - start_time); } } my_end_ptr_array[tid] = (unsigned long long*)(*tmp_ptr); duration[tid] = sum_time; } // Shared memory array size is N-2. Last two elements are used as dummy variables. void parametric_measure_shared(int N, int iterations, int stride) { cudaProfilerStop(); int i; unsigned long long int * h_a; unsigned long long int * d_a; unsigned long long ** h_ptr_a; unsigned long long ** d_ptr_a; unsigned long long ** h_end_ptr_a; unsigned long long ** d_end_ptr_a; unsigned long long * duration; unsigned long long * latency; cudaError_t error_id; /* allocate array on CPU */ h_a = (unsigned long long *)malloc(sizeof(unsigned long long int) * N); h_ptr_a = (unsigned long long **)malloc(sizeof(unsigned long long int*)*N); latency = (unsigned long long *)malloc(sizeof(unsigned long long) * num_threads_per_block * num_blocks); h_end_ptr_a = (unsigned long long **)malloc(sizeof(unsigned long long int*) * num_threads_per_block * num_blocks); /* initialize array elements on CPU */ for (i = 0; i < N; i++) { h_ptr_a[i] = (unsigned long long *)&h_a[i]; } for (i = 0; i < N; i++) { h_a[i] = (unsigned long long)h_ptr_a[(i + 1 + stride) % N]; } /* allocate arrays on GPU */ cudaMalloc ((void **) &d_a, sizeof(unsigned long long int) * N ); cudaMalloc ((void **) &d_ptr_a, sizeof(unsigned long long int*) * N ); cudaMalloc ((void **) &duration, sizeof(unsigned long long) * num_threads_per_block * num_blocks); cudaMalloc ((void **) &d_end_ptr_a, sizeof(unsigned long long *) * num_threads_per_block * num_blocks); cudaThreadSynchronize (); error_id = cudaGetLastError(); if (error_id != cudaSuccess) { printf("Error 1 is %s\n", cudaGetErrorString(error_id)); } /* copy array elements from CPU to GPU */ cudaMemcpy((void *)d_a, (void *)h_a, sizeof(unsigned long long int) * N, cudaMemcpyHostToDevice); cudaMemcpy((void *)d_ptr_a, (void *)h_ptr_a, sizeof(unsigned long long int *) * N, cudaMemcpyHostToDevice); cudaMemcpy((void *)duration, (void *)latency, sizeof(unsigned long long) * num_threads_per_block * num_blocks, cudaMemcpyHostToDevice); cudaMemcpy((void *)d_end_ptr_a, (void *)h_end_ptr_a, sizeof(unsigned long long *) * num_threads_per_block * num_blocks, cudaMemcpyHostToDevice); cudaThreadSynchronize (); error_id = cudaGetLastError(); if (error_id != cudaSuccess) { printf("Error 2 is %s\n", cudaGetErrorString(error_id)); } init_memory <<<1, 1>>>(d_ptr_a, d_a, stride, num_blocks, num_threads_per_block); cudaDeviceSynchronize(); /* launch kernel*/ //dim3 Db = dim3(13); //dim3 Dg = dim3(768,1,1); //printf("Launch kernel with parameters: %d, N: %d, stride: %d\n", iterations, N, stride); // int sharedMemSize = sizeof(unsigned long long int) * N ; for (int i = 0; i < 1; i++) { cudaEvent_t start, stop; float time; std::string cmd = "GPUJOULE_DIR/nvml/example/power_monitor 5 > GPUJOULE_DIR/energy_model_ubench/energy_model_data/data_movement_energy/l2_cache/fadd_l2d_0_100_64p_asm_power.txt &"; std::system(cmd.c_str()); std::system("sleep 5"); cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); cudaProfilerStart(); cudaFuncSetCacheConfig(shared_latency, cudaFuncCachePreferL1); //shared_latency <<<Dg, Db, sharedMemSize>>>(d_a, N, iterations, duration); //shared_latency <<<num_blocks, num_threads_per_block, sharedMemSize>>>(d_a, N, num_iterations, duration, stride, divergence); shared_latency <<<num_blocks, num_threads_per_block>>>(d_ptr_a, d_a, N, num_iterations, duration, stride, divergence, num_blocks, num_threads_per_block, d_end_ptr_a); cudaDeviceSynchronize(); ///cudaThreadSynchronize (); cudaProfilerStop(); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop); std::system("killall power_monitor"); error_id = cudaGetLastError(); if (error_id != cudaSuccess) { printf("Error 3 is %s\n", cudaGetErrorString(error_id)); } /* copy results from GPU to CPU */ cudaMemcpy((void *)h_a, (void *)d_a, sizeof(unsigned long long int) * N, cudaMemcpyDeviceToHost); cudaMemcpy((void *)latency, (void *)duration, sizeof(unsigned long long) * num_threads_per_block * num_blocks, cudaMemcpyDeviceToHost); cudaMemcpy((void *)h_end_ptr_a, (void *)d_end_ptr_a, sizeof(unsigned long long *) * num_threads_per_block * num_blocks, cudaMemcpyDeviceToHost); cudaThreadSynchronize (); /* print results*/ unsigned long long max_dur = latency[0]; unsigned long long min_dur = latency[0]; unsigned long long avg_lat = latency[0]; for (int i = 1; i < num_threads_per_block * num_blocks; i++) { if (latency[i] > 0) { avg_lat += latency[i]; if (latency[i] > max_dur) { max_dur = latency[i]; } else if (latency[i] < min_dur) { min_dur = latency[i]; } } } printf(" %d, %f, %f, %f, %f\n",stride,(double)(avg_lat/(num_threads_per_block * (divergence / 32.0) * num_blocks * 200.0 *num_iterations)), (double)(min_dur/(200.0 * num_iterations)), (double)(max_dur/(200.0 * num_iterations)), time); //printf(" %d, %f, %f, %f, %f\n",stride,(double)(avg_lat/(num_threads_per_block * num_blocks * 200.0 *num_iterations)), (double)(min_dur/(200.0 * num_iterations)), (double)(max_dur/(200.0 * num_iterations)), time); //printf("%f\n", time); } /* free memory on GPU */ cudaFree(d_a); cudaFree(d_ptr_a); cudaFree(d_end_ptr_a); cudaFree(duration); cudaThreadSynchronize (); /*free memory on CPU */ free(h_a); free(h_ptr_a); free(h_end_ptr_a); free(latency); } void usage() { printf("Usage ./binary <num_blocks> <num_threads_per_block> <iterations> <threads active per warp> <stride>\n"); } int main(int argc, char **argv) { int N, stride; // initialize upper bounds here // int stride_upper_bound = 1; if(argc != 6) { usage(); exit(1); } num_blocks = atoi(argv[1]); num_threads_per_block = atoi(argv[2]); num_iterations = atoi(argv[3]); divergence = atoi(argv[4]); stride = atoi(argv[5]); // printf("Shared memory latency for varying stride.\n"); // printf("stride (bytes), latency (clocks)\n"); // N = SHARED_MEM_ELEMENTS; N = GLOBAL_MEM_ELEMENTS; // N = num_threads_per_block; // stride_upper_bound = 1; // for (stride = 1; stride <= stride_upper_bound; stride += 1) { parametric_measure_shared(N, 10, stride); // } return 0; }
1d06dcfc6b0bc71873b8619974058aebf56fe0b8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <assert.h> // Convenience function for checking CUDA runtime API results // can be wrapped around any runtime API call. No-op in release builds. inline hipError_t checkCuda(hipError_t result) { #if defined(DEBUG) || defined(_DEBUG) if (result != hipSuccess) { fprintf(stderr, "CUDA Runtime Error: %s\n", hipGetErrorString(result)); assert(result == hipSuccess); } #endif return result; } template <typename T> __global__ void offset(T* a, int s) { int i = blockDim.x * blockIdx.x + threadIdx.x + s; a[i] = a[i] + 1; } template <typename T> __global__ void stride(T* a, int s) { int i = (blockDim.x * blockIdx.x + threadIdx.x) * s; a[i] = a[i] + 1; } template <typename T> void runTest(int deviceId, int nMB) { int blockSize = 256; float ms; T *d_a; hipEvent_t startEvent, stopEvent; int n = nMB*1024*1024/sizeof(T); // NB: d_a(33*nMB) for stride case checkCuda( hipMalloc(&d_a, n * 33 * sizeof(T)) ); checkCuda( hipEventCreate(&startEvent) ); checkCuda( hipEventCreate(&stopEvent) ); printf("Offset, Bandwidth (GB/s), Time (ms):\n"); hipLaunchKernelGGL(( offset), dim3(n/blockSize), dim3(blockSize), 0, 0, d_a, 0); // warm up for (int i = 0; i <= 32; i++) { checkCuda( hipMemset(d_a, 0, n * sizeof(T)) ); checkCuda( hipEventRecord(startEvent,0) ); hipLaunchKernelGGL(( offset), dim3(n/blockSize), dim3(blockSize), 0, 0, d_a, i); checkCuda( hipEventRecord(stopEvent,0) ); checkCuda( hipEventSynchronize(stopEvent) ); checkCuda( hipEventElapsedTime(&ms, startEvent, stopEvent) ); printf("%d, %f, %f\n", i, 2*nMB/ms, ms); } printf("\n"); printf("Stride, Bandwidth (GB/s), Time (ms):\n"); hipLaunchKernelGGL(( stride), dim3(n/blockSize), dim3(blockSize), 0, 0, d_a, 1); // warm up for (int i = 1; i <= 32; i++) { checkCuda( hipMemset(d_a, 0, n * sizeof(T)) ); checkCuda( hipEventRecord(startEvent,0) ); hipLaunchKernelGGL(( stride), dim3(n/blockSize), dim3(blockSize), 0, 0, d_a, i); checkCuda( hipEventRecord(stopEvent,0) ); checkCuda( hipEventSynchronize(stopEvent) ); checkCuda( hipEventElapsedTime(&ms, startEvent, stopEvent) ); printf("%d, %f, %f\n", i, 2*nMB/ms, ms); } checkCuda( hipEventDestroy(startEvent) ); checkCuda( hipEventDestroy(stopEvent) ); hipFree(d_a); } int main(int argc, char **argv) { int nMB = 4; int deviceId = 0; bool bFp64 = false; for (int i = 1; i < argc; i++) { if (!strncmp(argv[i], "dev=", 4)) deviceId = atoi((char*)(&argv[i][4])); else if (!strcmp(argv[i], "fp64")) bFp64 = true; } hipDeviceProp_t prop; checkCuda( hipSetDevice(deviceId) ) ; checkCuda( hipGetDeviceProperties(&prop, deviceId) ); printf("Device: %s\n", prop.name); printf("Transfer size (MB): %d\n", nMB); printf("%s Precision\n", bFp64 ? "Double" : "Single"); if (bFp64) runTest<double>(deviceId, nMB); else runTest<float>(deviceId, nMB); }
1d06dcfc6b0bc71873b8619974058aebf56fe0b8.cu
#include <stdio.h> #include <assert.h> // Convenience function for checking CUDA runtime API results // can be wrapped around any runtime API call. No-op in release builds. inline cudaError_t checkCuda(cudaError_t result) { #if defined(DEBUG) || defined(_DEBUG) if (result != cudaSuccess) { fprintf(stderr, "CUDA Runtime Error: %s\n", cudaGetErrorString(result)); assert(result == cudaSuccess); } #endif return result; } template <typename T> __global__ void offset(T* a, int s) { int i = blockDim.x * blockIdx.x + threadIdx.x + s; a[i] = a[i] + 1; } template <typename T> __global__ void stride(T* a, int s) { int i = (blockDim.x * blockIdx.x + threadIdx.x) * s; a[i] = a[i] + 1; } template <typename T> void runTest(int deviceId, int nMB) { int blockSize = 256; float ms; T *d_a; cudaEvent_t startEvent, stopEvent; int n = nMB*1024*1024/sizeof(T); // NB: d_a(33*nMB) for stride case checkCuda( cudaMalloc(&d_a, n * 33 * sizeof(T)) ); checkCuda( cudaEventCreate(&startEvent) ); checkCuda( cudaEventCreate(&stopEvent) ); printf("Offset, Bandwidth (GB/s), Time (ms):\n"); offset<<<n/blockSize, blockSize>>>(d_a, 0); // warm up for (int i = 0; i <= 32; i++) { checkCuda( cudaMemset(d_a, 0, n * sizeof(T)) ); checkCuda( cudaEventRecord(startEvent,0) ); offset<<<n/blockSize, blockSize>>>(d_a, i); checkCuda( cudaEventRecord(stopEvent,0) ); checkCuda( cudaEventSynchronize(stopEvent) ); checkCuda( cudaEventElapsedTime(&ms, startEvent, stopEvent) ); printf("%d, %f, %f\n", i, 2*nMB/ms, ms); } printf("\n"); printf("Stride, Bandwidth (GB/s), Time (ms):\n"); stride<<<n/blockSize, blockSize>>>(d_a, 1); // warm up for (int i = 1; i <= 32; i++) { checkCuda( cudaMemset(d_a, 0, n * sizeof(T)) ); checkCuda( cudaEventRecord(startEvent,0) ); stride<<<n/blockSize, blockSize>>>(d_a, i); checkCuda( cudaEventRecord(stopEvent,0) ); checkCuda( cudaEventSynchronize(stopEvent) ); checkCuda( cudaEventElapsedTime(&ms, startEvent, stopEvent) ); printf("%d, %f, %f\n", i, 2*nMB/ms, ms); } checkCuda( cudaEventDestroy(startEvent) ); checkCuda( cudaEventDestroy(stopEvent) ); cudaFree(d_a); } int main(int argc, char **argv) { int nMB = 4; int deviceId = 0; bool bFp64 = false; for (int i = 1; i < argc; i++) { if (!strncmp(argv[i], "dev=", 4)) deviceId = atoi((char*)(&argv[i][4])); else if (!strcmp(argv[i], "fp64")) bFp64 = true; } cudaDeviceProp prop; checkCuda( cudaSetDevice(deviceId) ) ; checkCuda( cudaGetDeviceProperties(&prop, deviceId) ); printf("Device: %s\n", prop.name); printf("Transfer size (MB): %d\n", nMB); printf("%s Precision\n", bFp64 ? "Double" : "Single"); if (bFp64) runTest<double>(deviceId, nMB); else runTest<float>(deviceId, nMB); }
dba8dd76913a92741ea60336e0a3f6bf730d24fb.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "gpu_runtime.h" // -label * log(prediction) - (1 - label) * log(1 - prediction) __global__ void binary_cross_entropy_kernel(int nrow, const float *prediction, const float *label, float *loss) { // Two dimensional thread blocks. size_t id = blockIdx.x * blockDim.x + threadIdx.x; if (id >= nrow) return; loss[id] = - label[id] * log(prediction[id]) - (1 - label[id]) * log(1 - prediction[id]); } int DLGpuBinaryCrossEntropy(const DLArrayHandle prediction, const DLArrayHandle label, DLArrayHandle loss, DLStreamHandle stream_handle = NULL) { size_t indim = prediction->ndim; assert (indim == label->ndim && indim == loss->ndim); int nrow = 1; for (int i = 0; i < indim-1; ++i) { nrow *= prediction->shape[i]; } const float *prediction_data = (const float *)prediction->data; const float *label_data = (const float *)label->data; float *output_data = (float *)loss->data; dim3 blocks; dim3 threads; if (nrow <= 1024) { threads.x = nrow; blocks.x = 1; } else { threads.x = 1024; blocks.x = (nrow + 1023) / 1024; } // 1 block if (stream_handle) { hipLaunchKernelGGL(( binary_cross_entropy_kernel), dim3(blocks), dim3(threads), 0, *(hipStream_t*)stream_handle->handle, nrow, prediction_data, label_data, output_data); } else { hipLaunchKernelGGL(( binary_cross_entropy_kernel), dim3(blocks), dim3(threads), 0, 0, nrow, prediction_data, label_data, output_data); } return 0; } __global__ void binary_cross_entropy_gradient_kernel(int nrow, const float *prediction, const float *label, const float *output_grad, float *output) { size_t id = blockIdx.x * blockDim.x + threadIdx.x; if (id >= nrow) return; output[id] = output_grad[id] * (-label[id]/prediction[id] + (1 - label[id])/(1-prediction[id])); } int DLGpuBinaryCrossEntropy_Gradient(const DLArrayHandle prediction, const DLArrayHandle label, const DLArrayHandle output_grad, DLArrayHandle output, DLStreamHandle stream_handle = NULL) { size_t indim = prediction->ndim; assert (indim >= 2 && indim == label->ndim && indim == output_grad->ndim && indim == output->ndim); int nrow = 1; for (int i = 0; i < indim-1; ++i) { nrow *= prediction->shape[i]; } const float *prediction_data = (const float *)prediction->data; const float *label_data = (const float *)label->data; const float *output_grad_data = (const float *)output_grad->data; float *output_data = (float *)output->data; dim3 blocks; dim3 threads; if (nrow <= 1024) { threads.x = nrow; blocks.x = 1; } else { threads.x = 1024; blocks.x = (nrow + 1023) / 1024; } if (stream_handle) { hipLaunchKernelGGL(( binary_cross_entropy_gradient_kernel), dim3(blocks), dim3(threads), 0, *(hipStream_t*)stream_handle->handle, nrow, prediction_data, label_data, output_grad_data, output_data); } else { hipLaunchKernelGGL(( binary_cross_entropy_gradient_kernel), dim3(blocks), dim3(threads), 0, 0, nrow, prediction_data, label_data, output_grad_data, output_data); } return 0; }
dba8dd76913a92741ea60336e0a3f6bf730d24fb.cu
#include "gpu_runtime.h" // -label * log(prediction) - (1 - label) * log(1 - prediction) __global__ void binary_cross_entropy_kernel(int nrow, const float *prediction, const float *label, float *loss) { // Two dimensional thread blocks. size_t id = blockIdx.x * blockDim.x + threadIdx.x; if (id >= nrow) return; loss[id] = - label[id] * log(prediction[id]) - (1 - label[id]) * log(1 - prediction[id]); } int DLGpuBinaryCrossEntropy(const DLArrayHandle prediction, const DLArrayHandle label, DLArrayHandle loss, DLStreamHandle stream_handle = NULL) { size_t indim = prediction->ndim; assert (indim == label->ndim && indim == loss->ndim); int nrow = 1; for (int i = 0; i < indim-1; ++i) { nrow *= prediction->shape[i]; } const float *prediction_data = (const float *)prediction->data; const float *label_data = (const float *)label->data; float *output_data = (float *)loss->data; dim3 blocks; dim3 threads; if (nrow <= 1024) { threads.x = nrow; blocks.x = 1; } else { threads.x = 1024; blocks.x = (nrow + 1023) / 1024; } // 1 block if (stream_handle) { binary_cross_entropy_kernel<<<blocks, threads, 0, *(cudaStream_t*)stream_handle->handle>>>( nrow, prediction_data, label_data, output_data); } else { binary_cross_entropy_kernel<<<blocks, threads>>>( nrow, prediction_data, label_data, output_data); } return 0; } __global__ void binary_cross_entropy_gradient_kernel(int nrow, const float *prediction, const float *label, const float *output_grad, float *output) { size_t id = blockIdx.x * blockDim.x + threadIdx.x; if (id >= nrow) return; output[id] = output_grad[id] * (-label[id]/prediction[id] + (1 - label[id])/(1-prediction[id])); } int DLGpuBinaryCrossEntropy_Gradient(const DLArrayHandle prediction, const DLArrayHandle label, const DLArrayHandle output_grad, DLArrayHandle output, DLStreamHandle stream_handle = NULL) { size_t indim = prediction->ndim; assert (indim >= 2 && indim == label->ndim && indim == output_grad->ndim && indim == output->ndim); int nrow = 1; for (int i = 0; i < indim-1; ++i) { nrow *= prediction->shape[i]; } const float *prediction_data = (const float *)prediction->data; const float *label_data = (const float *)label->data; const float *output_grad_data = (const float *)output_grad->data; float *output_data = (float *)output->data; dim3 blocks; dim3 threads; if (nrow <= 1024) { threads.x = nrow; blocks.x = 1; } else { threads.x = 1024; blocks.x = (nrow + 1023) / 1024; } if (stream_handle) { binary_cross_entropy_gradient_kernel<<<blocks, threads, 0, *(cudaStream_t*)stream_handle->handle>>>( nrow, prediction_data, label_data, output_grad_data, output_data); } else { binary_cross_entropy_gradient_kernel<<<blocks, threads>>>( nrow, prediction_data, label_data, output_grad_data, output_data); } return 0; }
4ef332e028bf8b79edd5e180016c125a2777f691.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright 1993-2015 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ // // This sample demonstrates how HyperQ allows supporting devices to avoid false // dependencies between kernels in different streams. // // - Devices without HyperQ will run a maximum of two kernels at a time (one // kernel_A and one kernel_B). // - Devices with HyperQ will run up to 32 kernels simultaneously. #include <stdio.h> #include <helper_functions.h> #include <helper_cuda.h> const char *sSDKsample = "hyperQ"; // This subroutine does no real work but runs for at least the specified number // of clock ticks. __device__ void clock_block(clock_t *d_o, clock_t clock_count) { clock_t start_clock = clock(); clock_t clock_offset = 0; while (clock_offset < clock_count) { clock_offset = clock() - start_clock; } d_o[0] = clock_offset; } // We create two identical kernels calling clock_block(), we create two so that // we can identify dependencies in the profile timeline ("kernel_B" is always // dependent on "kernel_A" in the same stream). __global__ void kernel_A(clock_t *d_o, clock_t clock_count) { clock_block(d_o, clock_count); } __global__ void kernel_B(clock_t *d_o, clock_t clock_count) { clock_block(d_o, clock_count); } // Single-warp reduction kernel (note: this is not optimized for simplicity) __global__ void sum(clock_t *d_clocks, int N) { __shared__ clock_t s_clocks[32]; clock_t my_sum = 0; for (int i = threadIdx.x ; i < N ; i += blockDim.x) { my_sum += d_clocks[i]; } s_clocks[threadIdx.x] = my_sum; __syncthreads(); for (int i = warpSize / 2 ; i > 0 ; i /= 2) { if (threadIdx.x < i) { s_clocks[threadIdx.x] += s_clocks[threadIdx.x + i]; } __syncthreads(); } if (threadIdx.x == 0) { d_clocks[0] = s_clocks[0]; } } int main(int argc, char **argv) { int nstreams = 32; // One stream for each pair of kernels float kernel_time = 10; // Time each kernel should run in ms float elapsed_time; int cuda_device = 0; printf("starting %s...\n", sSDKsample); // Get number of streams (if overridden on the command line) if (checkCmdLineFlag(argc, (const char **)argv, "nstreams")) { nstreams = getCmdLineArgumentInt(argc, (const char **)argv, "nstreams"); } // Use command-line specified CUDA device, otherwise use device with // highest Gflops/s cuda_device = findCudaDevice(argc, (const char **)argv); // Get device properties hipDeviceProp_t deviceProp; checkCudaErrors(hipGetDevice(&cuda_device)); checkCudaErrors(hipGetDeviceProperties(&deviceProp, cuda_device)); // HyperQ is available in devices of Compute Capability 3.5 and higher if (deviceProp.major < 3 || (deviceProp.major == 3 && deviceProp.minor < 5)) { if (deviceProp.concurrentKernels == 0) { printf("> GPU does not support concurrent kernel execution (SM 3.5 or higher required)\n"); printf(" CUDA kernel runs will be serialized\n"); } else { printf("> GPU does not support HyperQ\n"); printf(" CUDA kernel runs will have limited concurrency\n"); } } printf("> Detected Compute SM %d.%d hardware with %d multi-processors\n", deviceProp.major, deviceProp.minor, deviceProp.multiProcessorCount); // Allocate host memory for the output (reduced to a single value) clock_t *a = 0; checkCudaErrors(hipHostMalloc((void **)&a, sizeof(clock_t))); // Allocate device memory for the output (one value for each kernel) clock_t *d_a = 0; checkCudaErrors(hipMalloc((void **)&d_a, 2 * nstreams * sizeof(clock_t))); // Allocate and initialize an array of stream handles hipStream_t *streams = (hipStream_t *) malloc(nstreams * sizeof(hipStream_t)); for (int i = 0 ; i < nstreams ; i++) { checkCudaErrors(hipStreamCreate(&(streams[i]))); } // Create CUDA event handles hipEvent_t start_event, stop_event; checkCudaErrors(hipEventCreate(&start_event)); checkCudaErrors(hipEventCreate(&stop_event)); // Target time per kernel is kernel_time ms, clockRate is in KHz // Target number of clocks = target time * clock frequency clock_t time_clocks = (clock_t)(kernel_time * deviceProp.clockRate); clock_t total_clocks = 0; // Start the clock checkCudaErrors(hipEventRecord(start_event, 0)); // Queue pairs of {kernel_A, kernel_B} in separate streams for (int i = 0 ; i < nstreams ; ++i) { hipLaunchKernelGGL(( kernel_A), dim3(1),dim3(1),0,streams[i], &d_a[2*i], time_clocks); total_clocks += time_clocks; hipLaunchKernelGGL(( kernel_B), dim3(1),dim3(1),0,streams[i], &d_a[2*i+1], time_clocks); total_clocks += time_clocks; } // Stop the clock in stream 0 (i.e. all previous kernels will be complete) checkCudaErrors(hipEventRecord(stop_event, 0)); // At this point the CPU has dispatched all work for the GPU and can // continue processing other tasks in parallel. In this sample we just want // to wait until all work is done so we use a blocking hipMemcpy below. // Run the sum kernel and copy the result back to host hipLaunchKernelGGL(( sum), dim3(1),dim3(32), 0, 0, d_a, 2 * nstreams); checkCudaErrors(hipMemcpy(a, d_a, sizeof(clock_t), hipMemcpyDeviceToHost)); // stop_event will have been recorded but including the synchronize here to // prevent copy/paste errors! checkCudaErrors(hipEventSynchronize(stop_event)); checkCudaErrors(hipEventElapsedTime(&elapsed_time, start_event, stop_event)); printf("Expected time for serial execution of %d sets of kernels is between approx. %.3fs and %.3fs\n", nstreams, (nstreams + 1) * kernel_time / 1000.0f, 2 * nstreams *kernel_time / 1000.0f); printf("Expected time for fully concurrent execution of %d sets of kernels is approx. %.3fs\n", nstreams, 2 * kernel_time / 1000.0f); printf("Measured time for sample = %.3fs\n", elapsed_time / 1000.0f); bool bTestResult = (a[0] >= total_clocks); // Release resources for (int i = 0 ; i < nstreams ; i++) { hipStreamDestroy(streams[i]); } free(streams); hipEventDestroy(start_event); hipEventDestroy(stop_event); hipHostFree(a); hipFree(d_a); // hipDeviceReset causes the driver to clean up all state. While // not mandatory in normal operation, it is good practice. It is also // needed to ensure correct operation when the application is being // profiled. Calling hipDeviceReset causes all profile data to be // flushed before the application exits hipDeviceReset(); exit(bTestResult ? EXIT_SUCCESS : EXIT_FAILURE); }
4ef332e028bf8b79edd5e180016c125a2777f691.cu
/* * Copyright 1993-2015 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ // // This sample demonstrates how HyperQ allows supporting devices to avoid false // dependencies between kernels in different streams. // // - Devices without HyperQ will run a maximum of two kernels at a time (one // kernel_A and one kernel_B). // - Devices with HyperQ will run up to 32 kernels simultaneously. #include <stdio.h> #include <helper_functions.h> #include <helper_cuda.h> const char *sSDKsample = "hyperQ"; // This subroutine does no real work but runs for at least the specified number // of clock ticks. __device__ void clock_block(clock_t *d_o, clock_t clock_count) { clock_t start_clock = clock(); clock_t clock_offset = 0; while (clock_offset < clock_count) { clock_offset = clock() - start_clock; } d_o[0] = clock_offset; } // We create two identical kernels calling clock_block(), we create two so that // we can identify dependencies in the profile timeline ("kernel_B" is always // dependent on "kernel_A" in the same stream). __global__ void kernel_A(clock_t *d_o, clock_t clock_count) { clock_block(d_o, clock_count); } __global__ void kernel_B(clock_t *d_o, clock_t clock_count) { clock_block(d_o, clock_count); } // Single-warp reduction kernel (note: this is not optimized for simplicity) __global__ void sum(clock_t *d_clocks, int N) { __shared__ clock_t s_clocks[32]; clock_t my_sum = 0; for (int i = threadIdx.x ; i < N ; i += blockDim.x) { my_sum += d_clocks[i]; } s_clocks[threadIdx.x] = my_sum; __syncthreads(); for (int i = warpSize / 2 ; i > 0 ; i /= 2) { if (threadIdx.x < i) { s_clocks[threadIdx.x] += s_clocks[threadIdx.x + i]; } __syncthreads(); } if (threadIdx.x == 0) { d_clocks[0] = s_clocks[0]; } } int main(int argc, char **argv) { int nstreams = 32; // One stream for each pair of kernels float kernel_time = 10; // Time each kernel should run in ms float elapsed_time; int cuda_device = 0; printf("starting %s...\n", sSDKsample); // Get number of streams (if overridden on the command line) if (checkCmdLineFlag(argc, (const char **)argv, "nstreams")) { nstreams = getCmdLineArgumentInt(argc, (const char **)argv, "nstreams"); } // Use command-line specified CUDA device, otherwise use device with // highest Gflops/s cuda_device = findCudaDevice(argc, (const char **)argv); // Get device properties cudaDeviceProp deviceProp; checkCudaErrors(cudaGetDevice(&cuda_device)); checkCudaErrors(cudaGetDeviceProperties(&deviceProp, cuda_device)); // HyperQ is available in devices of Compute Capability 3.5 and higher if (deviceProp.major < 3 || (deviceProp.major == 3 && deviceProp.minor < 5)) { if (deviceProp.concurrentKernels == 0) { printf("> GPU does not support concurrent kernel execution (SM 3.5 or higher required)\n"); printf(" CUDA kernel runs will be serialized\n"); } else { printf("> GPU does not support HyperQ\n"); printf(" CUDA kernel runs will have limited concurrency\n"); } } printf("> Detected Compute SM %d.%d hardware with %d multi-processors\n", deviceProp.major, deviceProp.minor, deviceProp.multiProcessorCount); // Allocate host memory for the output (reduced to a single value) clock_t *a = 0; checkCudaErrors(cudaMallocHost((void **)&a, sizeof(clock_t))); // Allocate device memory for the output (one value for each kernel) clock_t *d_a = 0; checkCudaErrors(cudaMalloc((void **)&d_a, 2 * nstreams * sizeof(clock_t))); // Allocate and initialize an array of stream handles cudaStream_t *streams = (cudaStream_t *) malloc(nstreams * sizeof(cudaStream_t)); for (int i = 0 ; i < nstreams ; i++) { checkCudaErrors(cudaStreamCreate(&(streams[i]))); } // Create CUDA event handles cudaEvent_t start_event, stop_event; checkCudaErrors(cudaEventCreate(&start_event)); checkCudaErrors(cudaEventCreate(&stop_event)); // Target time per kernel is kernel_time ms, clockRate is in KHz // Target number of clocks = target time * clock frequency clock_t time_clocks = (clock_t)(kernel_time * deviceProp.clockRate); clock_t total_clocks = 0; // Start the clock checkCudaErrors(cudaEventRecord(start_event, 0)); // Queue pairs of {kernel_A, kernel_B} in separate streams for (int i = 0 ; i < nstreams ; ++i) { kernel_A<<<1,1,0,streams[i]>>>(&d_a[2*i], time_clocks); total_clocks += time_clocks; kernel_B<<<1,1,0,streams[i]>>>(&d_a[2*i+1], time_clocks); total_clocks += time_clocks; } // Stop the clock in stream 0 (i.e. all previous kernels will be complete) checkCudaErrors(cudaEventRecord(stop_event, 0)); // At this point the CPU has dispatched all work for the GPU and can // continue processing other tasks in parallel. In this sample we just want // to wait until all work is done so we use a blocking cudaMemcpy below. // Run the sum kernel and copy the result back to host sum<<<1,32>>>(d_a, 2 * nstreams); checkCudaErrors(cudaMemcpy(a, d_a, sizeof(clock_t), cudaMemcpyDeviceToHost)); // stop_event will have been recorded but including the synchronize here to // prevent copy/paste errors! checkCudaErrors(cudaEventSynchronize(stop_event)); checkCudaErrors(cudaEventElapsedTime(&elapsed_time, start_event, stop_event)); printf("Expected time for serial execution of %d sets of kernels is between approx. %.3fs and %.3fs\n", nstreams, (nstreams + 1) * kernel_time / 1000.0f, 2 * nstreams *kernel_time / 1000.0f); printf("Expected time for fully concurrent execution of %d sets of kernels is approx. %.3fs\n", nstreams, 2 * kernel_time / 1000.0f); printf("Measured time for sample = %.3fs\n", elapsed_time / 1000.0f); bool bTestResult = (a[0] >= total_clocks); // Release resources for (int i = 0 ; i < nstreams ; i++) { cudaStreamDestroy(streams[i]); } free(streams); cudaEventDestroy(start_event); cudaEventDestroy(stop_event); cudaFreeHost(a); cudaFree(d_a); // cudaDeviceReset causes the driver to clean up all state. While // not mandatory in normal operation, it is good practice. It is also // needed to ensure correct operation when the application is being // profiled. Calling cudaDeviceReset causes all profile data to be // flushed before the application exits cudaDeviceReset(); exit(bTestResult ? EXIT_SUCCESS : EXIT_FAILURE); }
7af80db7ac1901dff5f4f15bf7e2ff72a0f6121e.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "kernel_prefix_sum_efficient.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; double *g_idata = NULL; hipMalloc(&g_idata, XSIZE*YSIZE); double *g_odata = NULL; hipMalloc(&g_odata, XSIZE*YSIZE); int l = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( kernel_prefix_sum_efficient), dim3(gridBlock),dim3(threadBlock), 0, 0, g_idata,g_odata,l); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( kernel_prefix_sum_efficient), dim3(gridBlock),dim3(threadBlock), 0, 0, g_idata,g_odata,l); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( kernel_prefix_sum_efficient), dim3(gridBlock),dim3(threadBlock), 0, 0, g_idata,g_odata,l); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
7af80db7ac1901dff5f4f15bf7e2ff72a0f6121e.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "kernel_prefix_sum_efficient.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; double *g_idata = NULL; cudaMalloc(&g_idata, XSIZE*YSIZE); double *g_odata = NULL; cudaMalloc(&g_odata, XSIZE*YSIZE); int l = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); kernel_prefix_sum_efficient<<<gridBlock,threadBlock>>>(g_idata,g_odata,l); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { kernel_prefix_sum_efficient<<<gridBlock,threadBlock>>>(g_idata,g_odata,l); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { kernel_prefix_sum_efficient<<<gridBlock,threadBlock>>>(g_idata,g_odata,l); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
dae8743ce0101bd13081a853eca880515bdcbffb.hip
// !!! This is a file automatically generated by hipify!!! #include <thrust/transform.h> #include <thrust/execution_policy.h> #include "caffe/util/half_util.hpp" #include <hip/hip_fp16.h> namespace caffe { template <typename Dtype> struct __half2floatOp { __device__ Dtype operator()(half v) { return (Dtype)__half2float(v); } }; template <typename Dtype> struct __float2halfOp { __device__ half operator()(Dtype v) { return __float2half((float)v); } }; template <typename Dtype> void THCFloat2Half(/*THCState *state,*/ half *out, Dtype *in, long len) { thrust::transform( #if TORCH_HIP_VERSION >= 7000 //thrust::hip::par.on(THCState_getCurrentStream(state)), thrust::hip::par, #else thrust::device, #endif in, in + len, out, __float2halfOp<Dtype>()); } template <typename Dtype> void THCHalf2Float(/*THCState *state,*/ Dtype *out, half *in, long len) { thrust::transform( #if TORCH_HIP_VERSION >= 7000 thrust::hip::par,//.on(THCState_getCurrentStream(state)), #else thrust::device, #endif in, in + len, out, __half2floatOp<Dtype>()); } template struct __half2floatOp<double>; template struct __half2floatOp<float>; template struct __float2halfOp<double>; template struct __float2halfOp<float>; template void THCFloat2Half<double>(half *out, double *in, long len); template void THCFloat2Half<float>(half *out, float *in, long len); template void THCHalf2Float<double>(double *out, half *in, long len); template void THCHalf2Float<float>(float *out, half *in, long len); float THC_half2float(half a) { unsigned int bits = a.x & 0x7fff; unsigned int sign = a.x & 0x8000; unsigned int exp = a.x & 0x7c00; bits <<= 13; sign <<= 16; bits += 0x38000000U; // flush denormals to 0 bits = (exp == 0 ? 0 : bits) | sign; union { float f; unsigned int v; } conv; conv.v = bits; return conv.f; } /* Copyright (c) 2015, Norbert Juffa All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ half THC_float2half(float a) { uint32_t ia; uint16_t ir; memcpy(&ia, &a, sizeof(float)); ir = (ia >> 16) & 0x8000; if ((ia & 0x7f800000) == 0x7f800000) { if ((ia & 0x7fffffff) == 0x7f800000) { ir |= 0x7c00; /* infinity */ } else { ir = 0x7fff; /* canonical NaN */ } } else if ((ia & 0x7f800000) >= 0x33000000) { int shift = (int)((ia >> 23) & 0xff) - 127; if (shift > 15) { ir |= 0x7c00; /* infinity */ } else { ia = (ia & 0x007fffff) | 0x00800000; /* extract mantissa */ if (shift < -14) { /* denormal */ ir |= ia >> (-1 - shift); ia = ia << (32 - (-1 - shift)); } else { /* normal */ ir |= ia >> (24 - 11); ia = ia << (32 - (24 - 11)); ir = ir + ((14 + shift) << 10); } /* IEEE-754 round to nearest of even */ if ((ia > 0x80000000) || ((ia == 0x80000000) && (ir & 1))) { ir++; } } } half ret; memcpy(&ret, &ir, sizeof(half)); return ret; } //int THC_nativeHalfInstructions(THCState *state) { // hipDeviceProp_t* prop = // THCState_getCurrentDeviceProperties(state); // // // CC 5.3+ // return (prop->major > 5 || // (prop->major == 5 && prop->minor == 3)); //} }
dae8743ce0101bd13081a853eca880515bdcbffb.cu
#include <thrust/transform.h> #include <thrust/execution_policy.h> #include "caffe/util/half_util.hpp" #include <cuda_fp16.h> namespace caffe { template <typename Dtype> struct __half2floatOp { __device__ Dtype operator()(half v) { return (Dtype)__half2float(v); } }; template <typename Dtype> struct __float2halfOp { __device__ half operator()(Dtype v) { return __float2half((float)v); } }; template <typename Dtype> void THCFloat2Half(/*THCState *state,*/ half *out, Dtype *in, long len) { thrust::transform( #if CUDA_VERSION >= 7000 //thrust::cuda::par.on(THCState_getCurrentStream(state)), thrust::cuda::par, #else thrust::device, #endif in, in + len, out, __float2halfOp<Dtype>()); } template <typename Dtype> void THCHalf2Float(/*THCState *state,*/ Dtype *out, half *in, long len) { thrust::transform( #if CUDA_VERSION >= 7000 thrust::cuda::par,//.on(THCState_getCurrentStream(state)), #else thrust::device, #endif in, in + len, out, __half2floatOp<Dtype>()); } template struct __half2floatOp<double>; template struct __half2floatOp<float>; template struct __float2halfOp<double>; template struct __float2halfOp<float>; template void THCFloat2Half<double>(half *out, double *in, long len); template void THCFloat2Half<float>(half *out, float *in, long len); template void THCHalf2Float<double>(double *out, half *in, long len); template void THCHalf2Float<float>(float *out, half *in, long len); float THC_half2float(half a) { unsigned int bits = a.x & 0x7fff; unsigned int sign = a.x & 0x8000; unsigned int exp = a.x & 0x7c00; bits <<= 13; sign <<= 16; bits += 0x38000000U; // flush denormals to 0 bits = (exp == 0 ? 0 : bits) | sign; union { float f; unsigned int v; } conv; conv.v = bits; return conv.f; } /* Copyright (c) 2015, Norbert Juffa All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ half THC_float2half(float a) { uint32_t ia; uint16_t ir; memcpy(&ia, &a, sizeof(float)); ir = (ia >> 16) & 0x8000; if ((ia & 0x7f800000) == 0x7f800000) { if ((ia & 0x7fffffff) == 0x7f800000) { ir |= 0x7c00; /* infinity */ } else { ir = 0x7fff; /* canonical NaN */ } } else if ((ia & 0x7f800000) >= 0x33000000) { int shift = (int)((ia >> 23) & 0xff) - 127; if (shift > 15) { ir |= 0x7c00; /* infinity */ } else { ia = (ia & 0x007fffff) | 0x00800000; /* extract mantissa */ if (shift < -14) { /* denormal */ ir |= ia >> (-1 - shift); ia = ia << (32 - (-1 - shift)); } else { /* normal */ ir |= ia >> (24 - 11); ia = ia << (32 - (24 - 11)); ir = ir + ((14 + shift) << 10); } /* IEEE-754 round to nearest of even */ if ((ia > 0x80000000) || ((ia == 0x80000000) && (ir & 1))) { ir++; } } } half ret; memcpy(&ret, &ir, sizeof(half)); return ret; } //int THC_nativeHalfInstructions(THCState *state) { // cudaDeviceProp* prop = // THCState_getCurrentDeviceProperties(state); // // // CC 5.3+ // return (prop->major > 5 || // (prop->major == 5 && prop->minor == 3)); //} }
9ad423684781728a4e14f7c1a2110d2987294fe7.hip
// !!! This is a file automatically generated by hipify!!! #include <wb.h> #include "hip/hip_runtime.h" #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #define HISTOGRAM_LENGTH 256 #define TILE_WIDTH 16 #define wbCheck(stmt) \ do { \ hipError_t err = stmt; \ if (err != hipSuccess) { \ wbLog(ERROR, "Failed to run stmt ", #stmt); \ wbLog(ERROR, "Got CUDA error ... ", hipGetErrorString(err)); \ return -1; \ } \ } while (0) __global__ void castToUChar(float * inputImage, unsigned char * ucharImage, int imageSize) { int index = blockDim.x*blockIdx.x + threadIdx.x; if (index < imageSize) { ucharImage[index] = (unsigned char)(255 * inputImage[index]); } } __global__ void convertToGreyscale(unsigned char * ucharImage, unsigned char * greyImage, int imageWidth, int imageHeight) { int col = blockDim.x*blockIdx.x + threadIdx.x; int row = blockDim.y*blockIdx.y + threadIdx.y; if (col < imageWidth && row < imageHeight) { int index = imageWidth*row + col; unsigned char r = ucharImage[3 * index]; unsigned char g = ucharImage[3 * index + 1]; unsigned char b = ucharImage[3 * index + 2]; greyImage[index] = (unsigned char)(0.21*r + 0.71*g + 0.07*b); } } __global__ void calcHistogram(unsigned char * greyImage, unsigned int * histogram, long imageSize) { __shared__ unsigned int localHistogram[HISTOGRAM_LENGTH]; // Initalize to zero int i = threadIdx.x + blockIdx.x * blockDim.x; if (threadIdx.x < HISTOGRAM_LENGTH) { localHistogram[threadIdx.x] = 0; } __syncthreads(); int stride = blockDim.x * gridDim.x; while (i < imageSize) { atomicAdd(&(localHistogram[greyImage[i]]), 1); i += stride; } // wait for all other threads in the block to finish __syncthreads(); if (threadIdx.x < HISTOGRAM_LENGTH) { atomicAdd(&(histogram[threadIdx.x]), localHistogram[threadIdx.x]); } } __device__ float normalize(int x, float normConstant) { return normConstant*x; } //Cumulative Distribution Function of histogram ////Block size must be HISTOGRAM_LENGTH and grid size must be 1 __global__ void calcCDF(unsigned int * histogram, float * cdf, float normConstant) { __shared__ float localHistogram[HISTOGRAM_LENGTH]; localHistogram[threadIdx.x] = histogram[threadIdx.x]; __syncthreads(); int sum = 0; for (int i = 0; i <= threadIdx.x; ++i) { sum += localHistogram[i]; } cdf[threadIdx.x] = normalize(sum, normConstant); } //Block size must be HISTOGRAM_LENGTH/2 and grid size must be 1 __global__ void minimum(float * cdf, float * result) { __shared__ float partialMin[HISTOGRAM_LENGTH]; int loadIndex; for (int i = 0; i < 2; ++i) { loadIndex = 2 * blockIdx.x*blockDim.x + i*blockDim.x + threadIdx.x; if (loadIndex < HISTOGRAM_LENGTH) { partialMin[i*blockDim.x + threadIdx.x] = cdf[loadIndex]; } else { partialMin[i*blockDim.x + threadIdx.x] = cdf[0]; } } //Traverse the reduction tree int t = threadIdx.x; for (unsigned int stride = blockDim.x; stride > 0; stride /= 2) { __syncthreads(); if (t < stride) { partialMin[t] = min(partialMin[t], partialMin[t + stride]); } } __syncthreads(); if (t == 0) { *result = partialMin[0]; } } __device__ float clamp(float x, float start, float end) { return min(max(x, start), end); } __device__ unsigned char correct_colour(int val, float * cdf, float * cdfmin) { return (unsigned char)clamp(255 * (cdf[val] - cdfmin[0]) / (1 - cdfmin[0]), 0, 255); } __global__ void equalizeImage(unsigned char * ucharImage, float * cdf, float * cdfmin, int imageSize) { int index = blockDim.x*blockIdx.x + threadIdx.x; if (index < imageSize) { ucharImage[index] = correct_colour(ucharImage[index], cdf, cdfmin); } } __global__ void castToFloat(float * inputImage, unsigned char * ucharImage, int imageSize) { int index = blockDim.x*blockIdx.x + threadIdx.x; if (index < imageSize) { inputImage[index] = (float)(ucharImage[index] / 255.0); } } int main(int argc, char ** argv) { wbArg_t args; int imageWidth; int imageHeight; int imageChannels; wbImage_t inputImage; wbImage_t outputImage; float * hostInputImageData; float * hostOutputImageData; const char * inputImageFile; float * deviceInputImageData; unsigned char * deviceGreyImage; unsigned char * deviceUCharImage; unsigned int * deviceHistogram; float * deviceCDF; float * deviceCDFMin; args = wbArg_read(argc, argv); /* parse the input arguments */ inputImageFile = wbArg_getInputFile(args, 0); wbTime_start(Generic, "Importing data and creating memory on host"); inputImage = wbImport(inputImageFile); imageWidth = wbImage_getWidth(inputImage); imageHeight = wbImage_getHeight(inputImage); imageChannels = wbImage_getChannels(inputImage); outputImage = wbImage_new(imageWidth, imageHeight, imageChannels); hostInputImageData = wbImage_getData(inputImage); hostOutputImageData = wbImage_getData(outputImage); wbTime_stop(Generic, "Importing data and creating memory on host"); wbTime_start(GPU, "Doing GPU memory allocation"); hipMalloc((void **)&deviceInputImageData, imageWidth * imageHeight * imageChannels * sizeof(float)); hipMalloc((void **)&deviceUCharImage, imageWidth * imageHeight * imageChannels * sizeof(unsigned char)); hipMalloc((void **)&deviceGreyImage, imageWidth * imageHeight * sizeof(unsigned char)); hipMalloc((void **)&deviceHistogram, HISTOGRAM_LENGTH * sizeof(unsigned int)); hipMemset(deviceHistogram, 0, HISTOGRAM_LENGTH * sizeof(unsigned int)); hipMalloc((void **)&deviceCDF, HISTOGRAM_LENGTH * sizeof(float)); hipMalloc((void **)&deviceCDFMin, sizeof(float)); wbTime_stop(GPU, "Doing GPU memory allocation"); wbTime_start(GPU, "Copying input memory to the GPU."); wbCheck(hipMemcpy( deviceInputImageData, hostInputImageData, imageWidth * imageHeight * imageChannels * sizeof(float), hipMemcpyHostToDevice )); wbTime_stop(GPU, "Copying input memory to the GPU."); dim3 dimBlock(TILE_WIDTH*TILE_WIDTH, 1, 1); dim3 dimGrid((imageWidth*imageHeight*imageChannels - 1) / (TILE_WIDTH*TILE_WIDTH) + 1, 1, 1); wbTime_start(Compute, "Converting image input to uchar"); hipLaunchKernelGGL(( castToUChar), dim3(dimGrid), dim3(dimBlock), 0, 0, deviceInputImageData, deviceUCharImage, imageWidth*imageHeight*imageChannels); hipDeviceSynchronize(); wbCheck(hipGetLastError()); wbTime_stop(Compute, "Converting image input to uchar"); dimBlock = dim3(TILE_WIDTH, TILE_WIDTH, 1); dimGrid = dim3((imageWidth - 1) / TILE_WIDTH + 1, (imageHeight - 1) / TILE_WIDTH + 1, 1); wbTime_start(Compute, "Converting to greyscale"); hipLaunchKernelGGL(( convertToGreyscale), dim3(dimGrid), dim3(dimBlock), 0, 0, deviceUCharImage, deviceGreyImage, imageWidth, imageHeight); hipDeviceSynchronize(); wbCheck(hipGetLastError()); wbTime_stop(Compute, "Converting to greyscale"); dimBlock = dim3(HISTOGRAM_LENGTH, 1, 1); dimGrid = dim3(6, 1, 1); wbTime_start(Compute, "Calculating histogram"); calcHistogram << <dimGrid, dimBlock >> >(deviceGreyImage, deviceHistogram, imageWidth*imageHeight); hipDeviceSynchronize(); wbCheck(hipGetLastError()); wbTime_stop(Compute, "Calculating histogram"); dimBlock = dim3(HISTOGRAM_LENGTH, 1, 1); dimGrid = dim3(1, 1, 1); wbTime_start(Compute, "Calculating CDF"); hipLaunchKernelGGL(( calcCDF), dim3(dimGrid), dim3(dimBlock), 0, 0, deviceHistogram, deviceCDF, (float)(1.0/(imageWidth*imageHeight))); hipDeviceSynchronize(); wbCheck(hipGetLastError()); wbTime_stop(Compute, "Calculating CDF"); dimBlock = dim3(HISTOGRAM_LENGTH/2, 1, 1); dimGrid = dim3(1, 1, 1); wbTime_start(Compute, "Calculating CDF min"); hipLaunchKernelGGL(( minimum), dim3(dimGrid), dim3(dimBlock), 0, 0, deviceCDF, deviceCDFMin); hipDeviceSynchronize(); wbCheck(hipGetLastError()); wbTime_stop(Compute, "Calculating CDF min"); dimBlock = dim3(TILE_WIDTH*TILE_WIDTH, 1, 1); dimGrid = dim3((imageWidth*imageHeight*imageChannels - 1) / (TILE_WIDTH*TILE_WIDTH) + 1, 1, 1); wbTime_start(Compute, "equalize uchar image"); hipLaunchKernelGGL(( equalizeImage), dim3(dimGrid), dim3(dimBlock) , 0, 0, deviceUCharImage, deviceCDF, deviceCDFMin, imageWidth*imageHeight*imageChannels); hipDeviceSynchronize(); wbCheck(hipGetLastError()); wbTime_stop(Compute, "equalize uchar image"); dimBlock = dim3(TILE_WIDTH*TILE_WIDTH, 1, 1); dimGrid = dim3((imageWidth*imageHeight*imageChannels - 1) / (TILE_WIDTH*TILE_WIDTH) + 1, 1, 1); wbTime_start(Compute, "uchar image to float"); castToFloat<< <dimGrid, dimBlock >> >(deviceInputImageData, deviceUCharImage, imageWidth*imageHeight*imageChannels); hipDeviceSynchronize(); wbCheck(hipGetLastError()); wbTime_stop(Compute, "uchar image to float"); wbTime_start(Copy, "Copying data from the GPU"); hipMemcpy( hostOutputImageData, deviceInputImageData, imageWidth * imageHeight * imageChannels * sizeof(float), hipMemcpyDeviceToHost); wbTime_stop(Copy, "Copying data from the GPU"); wbSolution(args, outputImage); hipFree(deviceInputImageData); hipFree(deviceGreyImage); hipFree(deviceUCharImage); hipFree(deviceHistogram); hipFree(deviceCDF); hipFree(deviceCDFMin); wbImage_delete(outputImage); wbImage_delete(inputImage); return 0; }
9ad423684781728a4e14f7c1a2110d2987294fe7.cu
#include <wb.h> #include "cuda.h" #include "cuda_runtime.h" #include "device_launch_parameters.h" #define HISTOGRAM_LENGTH 256 #define TILE_WIDTH 16 #define wbCheck(stmt) \ do { \ cudaError_t err = stmt; \ if (err != cudaSuccess) { \ wbLog(ERROR, "Failed to run stmt ", #stmt); \ wbLog(ERROR, "Got CUDA error ... ", cudaGetErrorString(err)); \ return -1; \ } \ } while (0) __global__ void castToUChar(float * inputImage, unsigned char * ucharImage, int imageSize) { int index = blockDim.x*blockIdx.x + threadIdx.x; if (index < imageSize) { ucharImage[index] = (unsigned char)(255 * inputImage[index]); } } __global__ void convertToGreyscale(unsigned char * ucharImage, unsigned char * greyImage, int imageWidth, int imageHeight) { int col = blockDim.x*blockIdx.x + threadIdx.x; int row = blockDim.y*blockIdx.y + threadIdx.y; if (col < imageWidth && row < imageHeight) { int index = imageWidth*row + col; unsigned char r = ucharImage[3 * index]; unsigned char g = ucharImage[3 * index + 1]; unsigned char b = ucharImage[3 * index + 2]; greyImage[index] = (unsigned char)(0.21*r + 0.71*g + 0.07*b); } } __global__ void calcHistogram(unsigned char * greyImage, unsigned int * histogram, long imageSize) { __shared__ unsigned int localHistogram[HISTOGRAM_LENGTH]; // Initalize to zero int i = threadIdx.x + blockIdx.x * blockDim.x; if (threadIdx.x < HISTOGRAM_LENGTH) { localHistogram[threadIdx.x] = 0; } __syncthreads(); int stride = blockDim.x * gridDim.x; while (i < imageSize) { atomicAdd(&(localHistogram[greyImage[i]]), 1); i += stride; } // wait for all other threads in the block to finish __syncthreads(); if (threadIdx.x < HISTOGRAM_LENGTH) { atomicAdd(&(histogram[threadIdx.x]), localHistogram[threadIdx.x]); } } __device__ float normalize(int x, float normConstant) { return normConstant*x; } //Cumulative Distribution Function of histogram ////Block size must be HISTOGRAM_LENGTH and grid size must be 1 __global__ void calcCDF(unsigned int * histogram, float * cdf, float normConstant) { __shared__ float localHistogram[HISTOGRAM_LENGTH]; localHistogram[threadIdx.x] = histogram[threadIdx.x]; __syncthreads(); int sum = 0; for (int i = 0; i <= threadIdx.x; ++i) { sum += localHistogram[i]; } cdf[threadIdx.x] = normalize(sum, normConstant); } //Block size must be HISTOGRAM_LENGTH/2 and grid size must be 1 __global__ void minimum(float * cdf, float * result) { __shared__ float partialMin[HISTOGRAM_LENGTH]; int loadIndex; for (int i = 0; i < 2; ++i) { loadIndex = 2 * blockIdx.x*blockDim.x + i*blockDim.x + threadIdx.x; if (loadIndex < HISTOGRAM_LENGTH) { partialMin[i*blockDim.x + threadIdx.x] = cdf[loadIndex]; } else { partialMin[i*blockDim.x + threadIdx.x] = cdf[0]; } } //Traverse the reduction tree int t = threadIdx.x; for (unsigned int stride = blockDim.x; stride > 0; stride /= 2) { __syncthreads(); if (t < stride) { partialMin[t] = min(partialMin[t], partialMin[t + stride]); } } __syncthreads(); if (t == 0) { *result = partialMin[0]; } } __device__ float clamp(float x, float start, float end) { return min(max(x, start), end); } __device__ unsigned char correct_colour(int val, float * cdf, float * cdfmin) { return (unsigned char)clamp(255 * (cdf[val] - cdfmin[0]) / (1 - cdfmin[0]), 0, 255); } __global__ void equalizeImage(unsigned char * ucharImage, float * cdf, float * cdfmin, int imageSize) { int index = blockDim.x*blockIdx.x + threadIdx.x; if (index < imageSize) { ucharImage[index] = correct_colour(ucharImage[index], cdf, cdfmin); } } __global__ void castToFloat(float * inputImage, unsigned char * ucharImage, int imageSize) { int index = blockDim.x*blockIdx.x + threadIdx.x; if (index < imageSize) { inputImage[index] = (float)(ucharImage[index] / 255.0); } } int main(int argc, char ** argv) { wbArg_t args; int imageWidth; int imageHeight; int imageChannels; wbImage_t inputImage; wbImage_t outputImage; float * hostInputImageData; float * hostOutputImageData; const char * inputImageFile; float * deviceInputImageData; unsigned char * deviceGreyImage; unsigned char * deviceUCharImage; unsigned int * deviceHistogram; float * deviceCDF; float * deviceCDFMin; args = wbArg_read(argc, argv); /* parse the input arguments */ inputImageFile = wbArg_getInputFile(args, 0); wbTime_start(Generic, "Importing data and creating memory on host"); inputImage = wbImport(inputImageFile); imageWidth = wbImage_getWidth(inputImage); imageHeight = wbImage_getHeight(inputImage); imageChannels = wbImage_getChannels(inputImage); outputImage = wbImage_new(imageWidth, imageHeight, imageChannels); hostInputImageData = wbImage_getData(inputImage); hostOutputImageData = wbImage_getData(outputImage); wbTime_stop(Generic, "Importing data and creating memory on host"); wbTime_start(GPU, "Doing GPU memory allocation"); cudaMalloc((void **)&deviceInputImageData, imageWidth * imageHeight * imageChannels * sizeof(float)); cudaMalloc((void **)&deviceUCharImage, imageWidth * imageHeight * imageChannels * sizeof(unsigned char)); cudaMalloc((void **)&deviceGreyImage, imageWidth * imageHeight * sizeof(unsigned char)); cudaMalloc((void **)&deviceHistogram, HISTOGRAM_LENGTH * sizeof(unsigned int)); cudaMemset(deviceHistogram, 0, HISTOGRAM_LENGTH * sizeof(unsigned int)); cudaMalloc((void **)&deviceCDF, HISTOGRAM_LENGTH * sizeof(float)); cudaMalloc((void **)&deviceCDFMin, sizeof(float)); wbTime_stop(GPU, "Doing GPU memory allocation"); wbTime_start(GPU, "Copying input memory to the GPU."); wbCheck(cudaMemcpy( deviceInputImageData, hostInputImageData, imageWidth * imageHeight * imageChannels * sizeof(float), cudaMemcpyHostToDevice )); wbTime_stop(GPU, "Copying input memory to the GPU."); dim3 dimBlock(TILE_WIDTH*TILE_WIDTH, 1, 1); dim3 dimGrid((imageWidth*imageHeight*imageChannels - 1) / (TILE_WIDTH*TILE_WIDTH) + 1, 1, 1); wbTime_start(Compute, "Converting image input to uchar"); castToUChar<<<dimGrid, dimBlock>>>(deviceInputImageData, deviceUCharImage, imageWidth*imageHeight*imageChannels); cudaDeviceSynchronize(); wbCheck(cudaGetLastError()); wbTime_stop(Compute, "Converting image input to uchar"); dimBlock = dim3(TILE_WIDTH, TILE_WIDTH, 1); dimGrid = dim3((imageWidth - 1) / TILE_WIDTH + 1, (imageHeight - 1) / TILE_WIDTH + 1, 1); wbTime_start(Compute, "Converting to greyscale"); convertToGreyscale<<<dimGrid, dimBlock>>>(deviceUCharImage, deviceGreyImage, imageWidth, imageHeight); cudaDeviceSynchronize(); wbCheck(cudaGetLastError()); wbTime_stop(Compute, "Converting to greyscale"); dimBlock = dim3(HISTOGRAM_LENGTH, 1, 1); dimGrid = dim3(6, 1, 1); wbTime_start(Compute, "Calculating histogram"); calcHistogram << <dimGrid, dimBlock >> >(deviceGreyImage, deviceHistogram, imageWidth*imageHeight); cudaDeviceSynchronize(); wbCheck(cudaGetLastError()); wbTime_stop(Compute, "Calculating histogram"); dimBlock = dim3(HISTOGRAM_LENGTH, 1, 1); dimGrid = dim3(1, 1, 1); wbTime_start(Compute, "Calculating CDF"); calcCDF<<<dimGrid, dimBlock>>>(deviceHistogram, deviceCDF, (float)(1.0/(imageWidth*imageHeight))); cudaDeviceSynchronize(); wbCheck(cudaGetLastError()); wbTime_stop(Compute, "Calculating CDF"); dimBlock = dim3(HISTOGRAM_LENGTH/2, 1, 1); dimGrid = dim3(1, 1, 1); wbTime_start(Compute, "Calculating CDF min"); minimum<<<dimGrid, dimBlock>>>(deviceCDF, deviceCDFMin); cudaDeviceSynchronize(); wbCheck(cudaGetLastError()); wbTime_stop(Compute, "Calculating CDF min"); dimBlock = dim3(TILE_WIDTH*TILE_WIDTH, 1, 1); dimGrid = dim3((imageWidth*imageHeight*imageChannels - 1) / (TILE_WIDTH*TILE_WIDTH) + 1, 1, 1); wbTime_start(Compute, "equalize uchar image"); equalizeImage<<<dimGrid, dimBlock >>>(deviceUCharImage, deviceCDF, deviceCDFMin, imageWidth*imageHeight*imageChannels); cudaDeviceSynchronize(); wbCheck(cudaGetLastError()); wbTime_stop(Compute, "equalize uchar image"); dimBlock = dim3(TILE_WIDTH*TILE_WIDTH, 1, 1); dimGrid = dim3((imageWidth*imageHeight*imageChannels - 1) / (TILE_WIDTH*TILE_WIDTH) + 1, 1, 1); wbTime_start(Compute, "uchar image to float"); castToFloat<< <dimGrid, dimBlock >> >(deviceInputImageData, deviceUCharImage, imageWidth*imageHeight*imageChannels); cudaDeviceSynchronize(); wbCheck(cudaGetLastError()); wbTime_stop(Compute, "uchar image to float"); wbTime_start(Copy, "Copying data from the GPU"); cudaMemcpy( hostOutputImageData, deviceInputImageData, imageWidth * imageHeight * imageChannels * sizeof(float), cudaMemcpyDeviceToHost); wbTime_stop(Copy, "Copying data from the GPU"); wbSolution(args, outputImage); cudaFree(deviceInputImageData); cudaFree(deviceGreyImage); cudaFree(deviceUCharImage); cudaFree(deviceHistogram); cudaFree(deviceCDF); cudaFree(deviceCDFMin); wbImage_delete(outputImage); wbImage_delete(inputImage); return 0; }
cb1a34a22ccbfad87e8f5f94c6109a3fdff3170a.hip
// !!! This is a file automatically generated by hipify!!! /* * This CUDA-Cusparse code can handle/work with any type of the input mxArrays, * GPUarray or standard matlab CPU array as input {prhs[0]/prhs[1] := mxGPUArray or CPU Array}[double/complex double] * Sparse/Dense matrix-sparse/dense vector multiplication Z=CuMatlab_solve(Sparse/Dense(A),Sparse/Dense(Y)). * AZ=Y -->Z=A\Y * Developed at UCL, Institute of Neurology, 12 Queen Square, WC1N 3AR, London * Wellcome Trust Centre for Neuroimaging * Part of the project SPM(http://www.fil.ion.ucl.ac.uk/spm) * Copyright 2018 * Kevin Bronik */ #include "matrix.h" #include "mex.h" #include "gpu/mxGPUArray.h" #include <cusparse_v2.h> #include <cusolverSp.h> #include <hip/hip_runtime_api.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include "SPARSEHELPER.h" #include "ERRORCHK.h" #include <omp.h> // Input Arguments #define INPUTDENSEA prhs[0] #define INPUTDENSEB prhs[1] // Output Arguments #define OUTPUTMATRIX plhs[0] extern "C" static void mexCuMatlab_sparseDDR(int nlhs, mxArray *plhs[], int nrhs, mxArray const *prhs[]) { int nDevices; hipError_t errCode =hipGetDeviceCount(&nDevices); //int nDevices; //hipGetDeviceCount(&nDevices); if (errCode != hipSuccess){ printf("Error! No CUDA devices found! \n"); return; } char const * const InputErrMsg = "Invalid input to MEX file, number of input arguments must be two."; char const * const OutputErrMsg = "Invalid output to MEX file, number of output arguments must be one."; if ((nrhs!=2)) { mexErrMsgIdAndTxt("MATLAB:mexatexit:invalidInput", InputErrMsg); } if ((nlhs!=1)) { mexErrMsgIdAndTxt("MATLAB:mexatexit:invalidInput", OutputErrMsg); } char *input_buf0; input_buf0 = mxArrayToString(INPUTDENSEA); if ((mxIsChar(INPUTDENSEA))){ mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Input(FIRST ARGUMENT) must be array, or gpuArray object not %s\n",input_buf0); } char *input_buf1; input_buf1 = mxArrayToString(INPUTDENSEB); if ((mxIsChar(INPUTDENSEB))){ mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Input(SECOND ARGUMENT) must be array, or gpuArray object not %s\n",input_buf1); } if (mxIsGPUArray(INPUTDENSEA) && mxIsGPUArray(INPUTDENSEB)) { mxGPUArray const *INPUTDENSEGPUA; mxGPUArray const *INPUTDENSEGPUB; /* Initialize the MathWorks GPU API. */ mxInitGPU(); INPUTDENSEGPUA = mxGPUCreateFromMxArray(INPUTDENSEA); INPUTDENSEGPUB = mxGPUCreateFromMxArray(INPUTDENSEB); if((!mxGPUIsSparse(INPUTDENSEGPUA))&& (!mxGPUIsSparse(INPUTDENSEGPUB)) ){ const mwSize *dimsGPUSA; dimsGPUSA=mxGPUGetDimensions(INPUTDENSEGPUA); int numARows, numAColumns; numARows = (int)dimsGPUSA[0]; /* gets number of rows of A */ numAColumns = (int)dimsGPUSA[1]; /* gets number of columns of A */ const mwSize *dimsGPUSB; dimsGPUSB=mxGPUGetDimensions(INPUTDENSEGPUB); int numBRows, numBColumns; numBRows = (int)dimsGPUSB[0]; /* gets number of rows of B */ numBColumns = (int)dimsGPUSB[1]; /* gets number of columns of B */ if ( numARows < numAColumns ) { mxGPUDestroyGPUArray(INPUTDENSEGPUB); mxGPUDestroyGPUArray(INPUTDENSEGPUA); mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Invalid input to MEX file,first argument must be a sparse/dense tall (numARows > numAColumns) or square matrix."); } if ( (numBColumns!= 1) ) { mxGPUDestroyGPUArray(INPUTDENSEGPUB); mxGPUDestroyGPUArray(INPUTDENSEGPUA); mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Invalid input to MEX file, second argument must be a dense/sparse column vector."); } if ( (numBRows!= numARows) ) { mxGPUDestroyGPUArray(INPUTDENSEGPUB); mxGPUDestroyGPUArray(INPUTDENSEGPUA); mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Invalid input to MEX file, array (matrix-vector) dimensions must agree."); } double const *d_A_dense; d_A_dense = (double const *)(mxGPUGetDataReadOnly(INPUTDENSEGPUA)); mxGPUDestroyGPUArray(INPUTDENSEGPUA); hipsparseHandle_t handle; cusparseSafeCall(hipsparseCreate(&handle)); hipsparseMatDescr_t descrA; cusparseSafeCall(hipsparseCreateMatDescr(&descrA)); hipsparseSetMatType(descrA, HIPSPARSE_MATRIX_TYPE_GENERAL); hipsparseSetMatIndexBase(descrA, HIPSPARSE_INDEX_BASE_ONE); int nnzA = 0; // --- Number of nonzero elements in dense matrix A const int lda = numARows; //int *d_nnzPerVectorA; //gpuErrchk(hipMalloc(&d_nnzPerVectorA, numARows * sizeof(*d_nnzPerVectorA))); size_t pivot_pervect1[1] = {numARows}; mxGPUArray *PerVect1 = mxGPUCreateGPUArray(1, (mwSize*) pivot_pervect1, mxINT32_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE); int *d_nnzPerVectorA = (int*)mxGPUGetData(PerVect1); //double *d_A_dense; gpuErrchk(hipMalloc(&d_A_dense, numARows * numAColumns * sizeof(*d_A_dense))); //gpuErrchk(hipMemcpy(d_A_dense, h_A_dense1, numARows * numAColumns * sizeof(*d_A_dense), hipMemcpyHostToDevice)); cusparseSafeCall(hipsparseDnnz(handle, HIPSPARSE_DIRECTION_ROW, numARows, numAColumns, descrA, d_A_dense, lda, d_nnzPerVectorA, &nnzA)); //double *d_A; //gpuErrchk(hipMalloc(&d_A, nnzA * sizeof(*d_A))); //int *d_A_RowIndices; //gpuErrchk(hipMalloc(&d_A_RowIndices, (numARows + 1) * sizeof(*d_A_RowIndices))); //int *d_A_ColIndices; //gpuErrchk(hipMalloc(&d_A_ColIndices, nnzA * sizeof(*d_A_ColIndices))); size_t pivot_dimensA[1] = {nnzA}; size_t pivot_dimensROW_A[1] = {numARows+1}; size_t pivot_dimensCOL_A[1] = {nnzA}; mxGPUArray *A = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensA, mxDOUBLE_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE); double *d_A = (double *)mxGPUGetData(A); mxGPUArray * ROW_A = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensROW_A, mxINT32_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE); int *d_A_RowIndices = (int *)mxGPUGetData(ROW_A); mxGPUArray * COL_A = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensCOL_A, mxINT32_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE); int *d_A_ColIndices = (int *)mxGPUGetData(COL_A); cusparseSafeCall(hipsparseDdense2csr(handle, numARows, numAColumns, descrA, d_A_dense, lda, d_nnzPerVectorA, d_A, d_A_RowIndices, d_A_ColIndices)); mxGPUDestroyGPUArray(PerVect1); double const *d_B_dense; d_B_dense = (double const *)(mxGPUGetDataReadOnly(INPUTDENSEGPUB)); mxGPUDestroyGPUArray(INPUTDENSEGPUB); const int batchSize = 1; cusolverSpHandle_t handle_cusolver; cusolverSpCreate(&handle_cusolver); csrqrInfo_t info = NULL; size_t internalDataInBytes = 0; size_t workspaceInBytes = 0; void *pBuffer = NULL; cusolverSafeCall(cusolverSpCreateCsrqrInfo(&info)); cusolverSafeCall(cusolverSpXcsrqrAnalysisBatched( handle_cusolver, numARows, numAColumns, nnzA, descrA, d_A_RowIndices, d_A_ColIndices, info)); cusolverSafeCall(cusolverSpDcsrqrBufferInfoBatched( handle_cusolver, numARows, numAColumns, nnzA, descrA, d_A, d_A_RowIndices, d_A_ColIndices, batchSize, info, &internalDataInBytes, &workspaceInBytes)); gpuErrchk(hipMalloc(&pBuffer, workspaceInBytes)); size_t pivot_dimensionsvalueV[1] = {numAColumns}; mxGPUArray *VAL = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensionsvalueV, mxDOUBLE_CLASS, mxREAL, MX_GPU_INITIALIZE_VALUES); double *VALOUT = (double *)mxGPUGetData(VAL); cusolverSafeCall(cusolverSpDcsrqrsvBatched( handle_cusolver, numARows, numAColumns, nnzA, descrA, d_A, d_A_RowIndices, d_A_ColIndices, d_B_dense, VALOUT, batchSize, info, pBuffer)); mxGPUDestroyGPUArray(A); mxGPUDestroyGPUArray(ROW_A); mxGPUDestroyGPUArray(COL_A); gpuErrchk(hipFree(pBuffer)); OUTPUTMATRIX = mxGPUCreateMxArrayOnGPU(VAL); mxGPUDestroyGPUArray(VAL); cusolverSpDestroyCsrqrInfo(info); hipsparseDestroyMatDescr(descrA); cusolverSpDestroy(handle_cusolver); hipsparseDestroy(handle); } else{ mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Incorrect input arguments! %s\n"); } } //////////////////////////////////////////////////////////////////////////////////// else if (!(mxIsGPUArray(INPUTDENSEA)) && !(mxIsGPUArray(INPUTDENSEB))){ // if ((mxGetClassID(INPUTSPARSEA) != mxDOUBLE_CLASS) || (mxGetClassID(INPUTSPARSEB) != mxDOUBLE_CLASS)) { // mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", // "Invalid input to MEX file, input(FIRST and SECOND ARGUMENTS) must be double precision."); // } if((!mxIsSparse(INPUTDENSEA))&& (!mxIsSparse(INPUTDENSEB)) ){ mxInitGPU(); const mwSize *dimsCPUA; dimsCPUA=mxGetDimensions(INPUTDENSEA); int numARows = (int)dimsCPUA[0]; /* gets number of rows of A */ int numAColumns = (int)dimsCPUA[1]; /* gets number of columns of A */ const mwSize *dimsCPUB; dimsCPUB=mxGetDimensions(INPUTDENSEB); int numBRows = (int)dimsCPUB[0]; /* gets number of rows of B */ int numBColumns = (int)dimsCPUB[1]; /* gets number of columns of B */ if ( numARows < numAColumns ) { mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Invalid input to MEX file,first argument must be a sparse/dense tall (numARows > numAColumns) or square matrix."); } if ( (numBColumns!= 1) ) { mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Invalid input to MEX file, second argument must be a dense/sparse column vector."); } if ( (numBRows!= numARows) ) { mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Invalid input to MEX file, array (matrix-vector) dimensions must agree."); } double *h_A_dense1; h_A_dense1 = (double *)mxGetDoubles(INPUTDENSEA); hipsparseHandle_t handle; cusparseSafeCall(hipsparseCreate(&handle)); hipsparseMatDescr_t descrA; cusparseSafeCall(hipsparseCreateMatDescr(&descrA)); hipsparseSetMatType(descrA, HIPSPARSE_MATRIX_TYPE_GENERAL); hipsparseSetMatIndexBase(descrA, HIPSPARSE_INDEX_BASE_ONE); int nnzA = 0; // --- Number of nonzero elements in dense matrix A const int lda = numARows; //int *d_nnzPerVectorA; gpuErrchk(hipMalloc(&d_nnzPerVectorA, numARows * sizeof(*d_nnzPerVectorA))); size_t pivot_pervect1[1] = {numARows}; mxGPUArray *PerVect1 = mxGPUCreateGPUArray(1, (mwSize*) pivot_pervect1, mxINT32_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE); int *d_nnzPerVectorA = (int*)mxGPUGetData(PerVect1); //double *d_A_dense; gpuErrchk(hipMalloc(&d_A_dense, numARows * numAColumns * sizeof(*d_A_dense))); size_t pivot_dimensionsvalueDA[2] = {numARows, numAColumns}; mxGPUArray *OUTMA = mxGPUCreateGPUArray(2, (mwSize*) pivot_dimensionsvalueDA, mxDOUBLE_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE); double *d_A_dense = (double *)mxGPUGetData(OUTMA); gpuErrchk(hipMemcpy(d_A_dense, h_A_dense1, numARows * numAColumns * sizeof(*d_A_dense), hipMemcpyHostToDevice)); cusparseSafeCall(hipsparseDnnz(handle, HIPSPARSE_DIRECTION_ROW, numARows, numAColumns, descrA, d_A_dense, lda, d_nnzPerVectorA, &nnzA)); //double *d_A; gpuErrchk(hipMalloc(&d_A, nnzA * sizeof(*d_A))); //int *d_A_RowIndices; gpuErrchk(hipMalloc(&d_A_RowIndices, (numARows + 1) * sizeof(*d_A_RowIndices))); //int *d_A_ColIndices; gpuErrchk(hipMalloc(&d_A_ColIndices, nnzA * sizeof(*d_A_ColIndices))); size_t pivot_dimensA[1] = {nnzA}; size_t pivot_dimensROW_A[1] = {numARows+1}; size_t pivot_dimensCOL_A[1] = {nnzA}; mxGPUArray *A = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensA, mxDOUBLE_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE); double *d_A = (double *)mxGPUGetData(A); mxGPUArray * ROW_A = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensROW_A, mxINT32_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE); int *d_A_RowIndices = (int *)mxGPUGetData(ROW_A); mxGPUArray * COL_A = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensCOL_A, mxINT32_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE); int *d_A_ColIndices = (int *)mxGPUGetData(COL_A); cusparseSafeCall(hipsparseDdense2csr(handle, numARows, numAColumns, descrA, d_A_dense, lda, d_nnzPerVectorA, d_A, d_A_RowIndices, d_A_ColIndices)); mxGPUDestroyGPUArray(OUTMA); mxGPUDestroyGPUArray(PerVect1); double *h_B_dense1; h_B_dense1 = (double *)mxGetDoubles(INPUTDENSEB); size_t pivot_dimensionsvalueDB[1] = {numBRows}; mxGPUArray *OUTMB = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensionsvalueDB, mxDOUBLE_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE); double *d_B_dense = (double *)mxGPUGetData(OUTMB); gpuErrchk(hipMemcpy(d_B_dense, h_B_dense1, numBRows * sizeof(*d_B_dense), hipMemcpyHostToDevice)); const int batchSize = 1; cusolverSpHandle_t handle_cusolver; cusolverSpCreate(&handle_cusolver); csrqrInfo_t info = NULL; size_t internalDataInBytes = 0; size_t workspaceInBytes = 0; void *pBuffer = NULL; cusolverSafeCall(cusolverSpCreateCsrqrInfo(&info)); cusolverSafeCall(cusolverSpXcsrqrAnalysisBatched( handle_cusolver, numARows, numAColumns, nnzA, descrA, d_A_RowIndices, d_A_ColIndices, info)); cusolverSafeCall(cusolverSpDcsrqrBufferInfoBatched( handle_cusolver, numARows, numAColumns, nnzA, descrA, d_A, d_A_RowIndices, d_A_ColIndices, batchSize, info, &internalDataInBytes, &workspaceInBytes)); gpuErrchk(hipMalloc(&pBuffer, workspaceInBytes)); size_t pivot_dimensionsvalueV[1] = {numAColumns}; mxGPUArray *VAL = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensionsvalueV, mxDOUBLE_CLASS, mxREAL, MX_GPU_INITIALIZE_VALUES); double *VALOUT = (double *)mxGPUGetData(VAL); cusolverSafeCall(cusolverSpDcsrqrsvBatched( handle_cusolver, numARows, numAColumns, nnzA, descrA, d_A, d_A_RowIndices, d_A_ColIndices, d_B_dense, VALOUT, batchSize, info, pBuffer)); mxGPUDestroyGPUArray(A); mxGPUDestroyGPUArray(ROW_A); mxGPUDestroyGPUArray(COL_A); gpuErrchk(hipFree(pBuffer)); mxGPUDestroyGPUArray(OUTMB); OUTPUTMATRIX = mxGPUCreateMxArrayOnGPU(VAL); mxGPUDestroyGPUArray(VAL); cusolverSpDestroyCsrqrInfo(info); hipsparseDestroyMatDescr(descrA); cusolverSpDestroy(handle_cusolver); hipsparseDestroy(handle); } else{ mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Incorrect input arguments! %s\n"); } } // else{ mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Incorrect input arguments! %s\n"); } }
cb1a34a22ccbfad87e8f5f94c6109a3fdff3170a.cu
/* * This CUDA-Cusparse code can handle/work with any type of the input mxArrays, * GPUarray or standard matlab CPU array as input {prhs[0]/prhs[1] := mxGPUArray or CPU Array}[double/complex double] * Sparse/Dense matrix-sparse/dense vector multiplication Z=CuMatlab_solve(Sparse/Dense(A),Sparse/Dense(Y)). * AZ=Y -->Z=A\Y * Developed at UCL, Institute of Neurology, 12 Queen Square, WC1N 3AR, London * Wellcome Trust Centre for Neuroimaging * Part of the project SPM(http://www.fil.ion.ucl.ac.uk/spm) * Copyright 2018 * Kevin Bronik */ #include "matrix.h" #include "mex.h" #include "gpu/mxGPUArray.h" #include <cusparse_v2.h> #include <cusolverSp.h> #include <cuda_runtime_api.h> #include <cuda.h> #include <cuda_runtime.h> #include "SPARSEHELPER.h" #include "ERRORCHK.h" #include <omp.h> // Input Arguments #define INPUTDENSEA prhs[0] #define INPUTDENSEB prhs[1] // Output Arguments #define OUTPUTMATRIX plhs[0] extern "C" static void mexCuMatlab_sparseDDR(int nlhs, mxArray *plhs[], int nrhs, mxArray const *prhs[]) { int nDevices; cudaError_t errCode =cudaGetDeviceCount(&nDevices); //int nDevices; //cudaGetDeviceCount(&nDevices); if (errCode != cudaSuccess){ printf("Error! No CUDA devices found! \n"); return; } char const * const InputErrMsg = "Invalid input to MEX file, number of input arguments must be two."; char const * const OutputErrMsg = "Invalid output to MEX file, number of output arguments must be one."; if ((nrhs!=2)) { mexErrMsgIdAndTxt("MATLAB:mexatexit:invalidInput", InputErrMsg); } if ((nlhs!=1)) { mexErrMsgIdAndTxt("MATLAB:mexatexit:invalidInput", OutputErrMsg); } char *input_buf0; input_buf0 = mxArrayToString(INPUTDENSEA); if ((mxIsChar(INPUTDENSEA))){ mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Input(FIRST ARGUMENT) must be array, or gpuArray object not %s\n",input_buf0); } char *input_buf1; input_buf1 = mxArrayToString(INPUTDENSEB); if ((mxIsChar(INPUTDENSEB))){ mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Input(SECOND ARGUMENT) must be array, or gpuArray object not %s\n",input_buf1); } if (mxIsGPUArray(INPUTDENSEA) && mxIsGPUArray(INPUTDENSEB)) { mxGPUArray const *INPUTDENSEGPUA; mxGPUArray const *INPUTDENSEGPUB; /* Initialize the MathWorks GPU API. */ mxInitGPU(); INPUTDENSEGPUA = mxGPUCreateFromMxArray(INPUTDENSEA); INPUTDENSEGPUB = mxGPUCreateFromMxArray(INPUTDENSEB); if((!mxGPUIsSparse(INPUTDENSEGPUA))&& (!mxGPUIsSparse(INPUTDENSEGPUB)) ){ const mwSize *dimsGPUSA; dimsGPUSA=mxGPUGetDimensions(INPUTDENSEGPUA); int numARows, numAColumns; numARows = (int)dimsGPUSA[0]; /* gets number of rows of A */ numAColumns = (int)dimsGPUSA[1]; /* gets number of columns of A */ const mwSize *dimsGPUSB; dimsGPUSB=mxGPUGetDimensions(INPUTDENSEGPUB); int numBRows, numBColumns; numBRows = (int)dimsGPUSB[0]; /* gets number of rows of B */ numBColumns = (int)dimsGPUSB[1]; /* gets number of columns of B */ if ( numARows < numAColumns ) { mxGPUDestroyGPUArray(INPUTDENSEGPUB); mxGPUDestroyGPUArray(INPUTDENSEGPUA); mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Invalid input to MEX file,first argument must be a sparse/dense tall (numARows > numAColumns) or square matrix."); } if ( (numBColumns!= 1) ) { mxGPUDestroyGPUArray(INPUTDENSEGPUB); mxGPUDestroyGPUArray(INPUTDENSEGPUA); mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Invalid input to MEX file, second argument must be a dense/sparse column vector."); } if ( (numBRows!= numARows) ) { mxGPUDestroyGPUArray(INPUTDENSEGPUB); mxGPUDestroyGPUArray(INPUTDENSEGPUA); mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Invalid input to MEX file, array (matrix-vector) dimensions must agree."); } double const *d_A_dense; d_A_dense = (double const *)(mxGPUGetDataReadOnly(INPUTDENSEGPUA)); mxGPUDestroyGPUArray(INPUTDENSEGPUA); cusparseHandle_t handle; cusparseSafeCall(cusparseCreate(&handle)); cusparseMatDescr_t descrA; cusparseSafeCall(cusparseCreateMatDescr(&descrA)); cusparseSetMatType(descrA, CUSPARSE_MATRIX_TYPE_GENERAL); cusparseSetMatIndexBase(descrA, CUSPARSE_INDEX_BASE_ONE); int nnzA = 0; // --- Number of nonzero elements in dense matrix A const int lda = numARows; //int *d_nnzPerVectorA; //gpuErrchk(cudaMalloc(&d_nnzPerVectorA, numARows * sizeof(*d_nnzPerVectorA))); size_t pivot_pervect1[1] = {numARows}; mxGPUArray *PerVect1 = mxGPUCreateGPUArray(1, (mwSize*) pivot_pervect1, mxINT32_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE); int *d_nnzPerVectorA = (int*)mxGPUGetData(PerVect1); //double *d_A_dense; gpuErrchk(cudaMalloc(&d_A_dense, numARows * numAColumns * sizeof(*d_A_dense))); //gpuErrchk(cudaMemcpy(d_A_dense, h_A_dense1, numARows * numAColumns * sizeof(*d_A_dense), cudaMemcpyHostToDevice)); cusparseSafeCall(cusparseDnnz(handle, CUSPARSE_DIRECTION_ROW, numARows, numAColumns, descrA, d_A_dense, lda, d_nnzPerVectorA, &nnzA)); //double *d_A; //gpuErrchk(cudaMalloc(&d_A, nnzA * sizeof(*d_A))); //int *d_A_RowIndices; //gpuErrchk(cudaMalloc(&d_A_RowIndices, (numARows + 1) * sizeof(*d_A_RowIndices))); //int *d_A_ColIndices; //gpuErrchk(cudaMalloc(&d_A_ColIndices, nnzA * sizeof(*d_A_ColIndices))); size_t pivot_dimensA[1] = {nnzA}; size_t pivot_dimensROW_A[1] = {numARows+1}; size_t pivot_dimensCOL_A[1] = {nnzA}; mxGPUArray *A = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensA, mxDOUBLE_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE); double *d_A = (double *)mxGPUGetData(A); mxGPUArray * ROW_A = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensROW_A, mxINT32_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE); int *d_A_RowIndices = (int *)mxGPUGetData(ROW_A); mxGPUArray * COL_A = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensCOL_A, mxINT32_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE); int *d_A_ColIndices = (int *)mxGPUGetData(COL_A); cusparseSafeCall(cusparseDdense2csr(handle, numARows, numAColumns, descrA, d_A_dense, lda, d_nnzPerVectorA, d_A, d_A_RowIndices, d_A_ColIndices)); mxGPUDestroyGPUArray(PerVect1); double const *d_B_dense; d_B_dense = (double const *)(mxGPUGetDataReadOnly(INPUTDENSEGPUB)); mxGPUDestroyGPUArray(INPUTDENSEGPUB); const int batchSize = 1; cusolverSpHandle_t handle_cusolver; cusolverSpCreate(&handle_cusolver); csrqrInfo_t info = NULL; size_t internalDataInBytes = 0; size_t workspaceInBytes = 0; void *pBuffer = NULL; cusolverSafeCall(cusolverSpCreateCsrqrInfo(&info)); cusolverSafeCall(cusolverSpXcsrqrAnalysisBatched( handle_cusolver, numARows, numAColumns, nnzA, descrA, d_A_RowIndices, d_A_ColIndices, info)); cusolverSafeCall(cusolverSpDcsrqrBufferInfoBatched( handle_cusolver, numARows, numAColumns, nnzA, descrA, d_A, d_A_RowIndices, d_A_ColIndices, batchSize, info, &internalDataInBytes, &workspaceInBytes)); gpuErrchk(cudaMalloc(&pBuffer, workspaceInBytes)); size_t pivot_dimensionsvalueV[1] = {numAColumns}; mxGPUArray *VAL = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensionsvalueV, mxDOUBLE_CLASS, mxREAL, MX_GPU_INITIALIZE_VALUES); double *VALOUT = (double *)mxGPUGetData(VAL); cusolverSafeCall(cusolverSpDcsrqrsvBatched( handle_cusolver, numARows, numAColumns, nnzA, descrA, d_A, d_A_RowIndices, d_A_ColIndices, d_B_dense, VALOUT, batchSize, info, pBuffer)); mxGPUDestroyGPUArray(A); mxGPUDestroyGPUArray(ROW_A); mxGPUDestroyGPUArray(COL_A); gpuErrchk(cudaFree(pBuffer)); OUTPUTMATRIX = mxGPUCreateMxArrayOnGPU(VAL); mxGPUDestroyGPUArray(VAL); cusolverSpDestroyCsrqrInfo(info); cusparseDestroyMatDescr(descrA); cusolverSpDestroy(handle_cusolver); cusparseDestroy(handle); } else{ mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Incorrect input arguments! %s\n"); } } //////////////////////////////////////////////////////////////////////////////////// else if (!(mxIsGPUArray(INPUTDENSEA)) && !(mxIsGPUArray(INPUTDENSEB))){ // if ((mxGetClassID(INPUTSPARSEA) != mxDOUBLE_CLASS) || (mxGetClassID(INPUTSPARSEB) != mxDOUBLE_CLASS)) { // mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", // "Invalid input to MEX file, input(FIRST and SECOND ARGUMENTS) must be double precision."); // } if((!mxIsSparse(INPUTDENSEA))&& (!mxIsSparse(INPUTDENSEB)) ){ mxInitGPU(); const mwSize *dimsCPUA; dimsCPUA=mxGetDimensions(INPUTDENSEA); int numARows = (int)dimsCPUA[0]; /* gets number of rows of A */ int numAColumns = (int)dimsCPUA[1]; /* gets number of columns of A */ const mwSize *dimsCPUB; dimsCPUB=mxGetDimensions(INPUTDENSEB); int numBRows = (int)dimsCPUB[0]; /* gets number of rows of B */ int numBColumns = (int)dimsCPUB[1]; /* gets number of columns of B */ if ( numARows < numAColumns ) { mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Invalid input to MEX file,first argument must be a sparse/dense tall (numARows > numAColumns) or square matrix."); } if ( (numBColumns!= 1) ) { mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Invalid input to MEX file, second argument must be a dense/sparse column vector."); } if ( (numBRows!= numARows) ) { mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Invalid input to MEX file, array (matrix-vector) dimensions must agree."); } double *h_A_dense1; h_A_dense1 = (double *)mxGetDoubles(INPUTDENSEA); cusparseHandle_t handle; cusparseSafeCall(cusparseCreate(&handle)); cusparseMatDescr_t descrA; cusparseSafeCall(cusparseCreateMatDescr(&descrA)); cusparseSetMatType(descrA, CUSPARSE_MATRIX_TYPE_GENERAL); cusparseSetMatIndexBase(descrA, CUSPARSE_INDEX_BASE_ONE); int nnzA = 0; // --- Number of nonzero elements in dense matrix A const int lda = numARows; //int *d_nnzPerVectorA; gpuErrchk(cudaMalloc(&d_nnzPerVectorA, numARows * sizeof(*d_nnzPerVectorA))); size_t pivot_pervect1[1] = {numARows}; mxGPUArray *PerVect1 = mxGPUCreateGPUArray(1, (mwSize*) pivot_pervect1, mxINT32_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE); int *d_nnzPerVectorA = (int*)mxGPUGetData(PerVect1); //double *d_A_dense; gpuErrchk(cudaMalloc(&d_A_dense, numARows * numAColumns * sizeof(*d_A_dense))); size_t pivot_dimensionsvalueDA[2] = {numARows, numAColumns}; mxGPUArray *OUTMA = mxGPUCreateGPUArray(2, (mwSize*) pivot_dimensionsvalueDA, mxDOUBLE_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE); double *d_A_dense = (double *)mxGPUGetData(OUTMA); gpuErrchk(cudaMemcpy(d_A_dense, h_A_dense1, numARows * numAColumns * sizeof(*d_A_dense), cudaMemcpyHostToDevice)); cusparseSafeCall(cusparseDnnz(handle, CUSPARSE_DIRECTION_ROW, numARows, numAColumns, descrA, d_A_dense, lda, d_nnzPerVectorA, &nnzA)); //double *d_A; gpuErrchk(cudaMalloc(&d_A, nnzA * sizeof(*d_A))); //int *d_A_RowIndices; gpuErrchk(cudaMalloc(&d_A_RowIndices, (numARows + 1) * sizeof(*d_A_RowIndices))); //int *d_A_ColIndices; gpuErrchk(cudaMalloc(&d_A_ColIndices, nnzA * sizeof(*d_A_ColIndices))); size_t pivot_dimensA[1] = {nnzA}; size_t pivot_dimensROW_A[1] = {numARows+1}; size_t pivot_dimensCOL_A[1] = {nnzA}; mxGPUArray *A = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensA, mxDOUBLE_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE); double *d_A = (double *)mxGPUGetData(A); mxGPUArray * ROW_A = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensROW_A, mxINT32_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE); int *d_A_RowIndices = (int *)mxGPUGetData(ROW_A); mxGPUArray * COL_A = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensCOL_A, mxINT32_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE); int *d_A_ColIndices = (int *)mxGPUGetData(COL_A); cusparseSafeCall(cusparseDdense2csr(handle, numARows, numAColumns, descrA, d_A_dense, lda, d_nnzPerVectorA, d_A, d_A_RowIndices, d_A_ColIndices)); mxGPUDestroyGPUArray(OUTMA); mxGPUDestroyGPUArray(PerVect1); double *h_B_dense1; h_B_dense1 = (double *)mxGetDoubles(INPUTDENSEB); size_t pivot_dimensionsvalueDB[1] = {numBRows}; mxGPUArray *OUTMB = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensionsvalueDB, mxDOUBLE_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE); double *d_B_dense = (double *)mxGPUGetData(OUTMB); gpuErrchk(cudaMemcpy(d_B_dense, h_B_dense1, numBRows * sizeof(*d_B_dense), cudaMemcpyHostToDevice)); const int batchSize = 1; cusolverSpHandle_t handle_cusolver; cusolverSpCreate(&handle_cusolver); csrqrInfo_t info = NULL; size_t internalDataInBytes = 0; size_t workspaceInBytes = 0; void *pBuffer = NULL; cusolverSafeCall(cusolverSpCreateCsrqrInfo(&info)); cusolverSafeCall(cusolverSpXcsrqrAnalysisBatched( handle_cusolver, numARows, numAColumns, nnzA, descrA, d_A_RowIndices, d_A_ColIndices, info)); cusolverSafeCall(cusolverSpDcsrqrBufferInfoBatched( handle_cusolver, numARows, numAColumns, nnzA, descrA, d_A, d_A_RowIndices, d_A_ColIndices, batchSize, info, &internalDataInBytes, &workspaceInBytes)); gpuErrchk(cudaMalloc(&pBuffer, workspaceInBytes)); size_t pivot_dimensionsvalueV[1] = {numAColumns}; mxGPUArray *VAL = mxGPUCreateGPUArray(1, (mwSize*) pivot_dimensionsvalueV, mxDOUBLE_CLASS, mxREAL, MX_GPU_INITIALIZE_VALUES); double *VALOUT = (double *)mxGPUGetData(VAL); cusolverSafeCall(cusolverSpDcsrqrsvBatched( handle_cusolver, numARows, numAColumns, nnzA, descrA, d_A, d_A_RowIndices, d_A_ColIndices, d_B_dense, VALOUT, batchSize, info, pBuffer)); mxGPUDestroyGPUArray(A); mxGPUDestroyGPUArray(ROW_A); mxGPUDestroyGPUArray(COL_A); gpuErrchk(cudaFree(pBuffer)); mxGPUDestroyGPUArray(OUTMB); OUTPUTMATRIX = mxGPUCreateMxArrayOnGPU(VAL); mxGPUDestroyGPUArray(VAL); cusolverSpDestroyCsrqrInfo(info); cusparseDestroyMatDescr(descrA); cusolverSpDestroy(handle_cusolver); cusparseDestroy(handle); } else{ mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Incorrect input arguments! %s\n"); } } // else{ mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Incorrect input arguments! %s\n"); } }
627a1cddd8dfefa593492e04e56b91c2bb0081a6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "symbols/NaN.cuh" __inline__ __device__ float sigmoid (float x) { return 1.0 / (1.0 + expf (-x)); } __global__ void sigmoidKernel ( int batchSize, int numberRows, int numberEntriesPerInstance, int numberIterations, float* source, float* destination) { int indexInstance = blockIdx.x; int indexColumn = blockIdx.y; int startInstanceWithinBatch = indexInstance * numberEntriesPerInstance; int startColumnWithinInstance = indexColumn * numberRows; int startRowWithinColumn = threadIdx.x * numberIterations; int firstEntryWithinBatch = startInstanceWithinBatch + startColumnWithinInstance + startRowWithinColumn; int startNextColumn = startInstanceWithinBatch + startColumnWithinInstance + numberRows; if(firstEntryWithinBatch < startNextColumn) { int lastEntryWithinBatch = min(firstEntryWithinBatch + numberIterations, startNextColumn); if(indexInstance < batchSize) { for(int indexEntry = firstEntryWithinBatch; indexEntry < lastEntryWithinBatch; indexEntry++) { destination[indexEntry] = sigmoid(source[indexEntry]); } } else { setToNaN(destination, firstEntryWithinBatch, lastEntryWithinBatch); } } }
627a1cddd8dfefa593492e04e56b91c2bb0081a6.cu
#include "symbols/NaN.cuh" __inline__ __device__ float sigmoid (float x) { return 1.0 / (1.0 + expf (-x)); } __global__ void sigmoidKernel ( int batchSize, int numberRows, int numberEntriesPerInstance, int numberIterations, float* source, float* destination) { int indexInstance = blockIdx.x; int indexColumn = blockIdx.y; int startInstanceWithinBatch = indexInstance * numberEntriesPerInstance; int startColumnWithinInstance = indexColumn * numberRows; int startRowWithinColumn = threadIdx.x * numberIterations; int firstEntryWithinBatch = startInstanceWithinBatch + startColumnWithinInstance + startRowWithinColumn; int startNextColumn = startInstanceWithinBatch + startColumnWithinInstance + numberRows; if(firstEntryWithinBatch < startNextColumn) { int lastEntryWithinBatch = min(firstEntryWithinBatch + numberIterations, startNextColumn); if(indexInstance < batchSize) { for(int indexEntry = firstEntryWithinBatch; indexEntry < lastEntryWithinBatch; indexEntry++) { destination[indexEntry] = sigmoid(source[indexEntry]); } } else { setToNaN(destination, firstEntryWithinBatch, lastEntryWithinBatch); } } }
456e6c894baa4f1384ac76dcfdd51600239cffa5.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "reduceInterleavedFloat.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *g_idata = NULL; hipMalloc(&g_idata, XSIZE*YSIZE); float *g_odata = NULL; hipMalloc(&g_odata, XSIZE*YSIZE); unsigned int n = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( reduceInterleavedFloat), dim3(gridBlock),dim3(threadBlock), 0, 0, g_idata,g_odata,n); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( reduceInterleavedFloat), dim3(gridBlock),dim3(threadBlock), 0, 0, g_idata,g_odata,n); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( reduceInterleavedFloat), dim3(gridBlock),dim3(threadBlock), 0, 0, g_idata,g_odata,n); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
456e6c894baa4f1384ac76dcfdd51600239cffa5.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "reduceInterleavedFloat.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *g_idata = NULL; cudaMalloc(&g_idata, XSIZE*YSIZE); float *g_odata = NULL; cudaMalloc(&g_odata, XSIZE*YSIZE); unsigned int n = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); reduceInterleavedFloat<<<gridBlock,threadBlock>>>(g_idata,g_odata,n); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { reduceInterleavedFloat<<<gridBlock,threadBlock>>>(g_idata,g_odata,n); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { reduceInterleavedFloat<<<gridBlock,threadBlock>>>(g_idata,g_odata,n); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
00df383297b68ab8751df3790d75d29745c621dd.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "kernels/gptKernels.h" #include "kernels/transformerKernels.h" #include "model/gpt_encoder.h" /** @file GPT encoder, composed by gemm lib and custom cuda kernel function */ // #define DEBUG_RESULT namespace lightseq { namespace cuda { template <OperationType OpType_> GptEncoder<OpType_>::GptEncoder(int max_batch_size, const int *p_d_token_id, float *p_d_ppl, int *p_d_sample_id, const GptWeight<OpType_> &tw, hipStream_t stream, hipStream_t cache_stream, hipblasHandle_t hd) : _max_batch_size(max_batch_size), _p_d_token_id(p_d_token_id), _p_d_ppl(p_d_ppl), _p_d_sample_id(p_d_sample_id), _tw(tw), _stream(stream), _cache_stream(cache_stream), _hd(hd), _p_d_src_emb_wei(tw.get_src_emb_wei()), _p_d_enc_wei(tw.get_enc_wei()), _fone((_DataType)1.f), _fzero((_DataType)0.f), _atten_scaler((_DataType)sqrt(1.f / tw._dim_per_head)), _max_batch_dim(max_batch_size * tw._max_step * tw._hidden_size), _max_thread_per_block(1024), _h_real_seq_len(max_batch_size, 0), _h_ppl(max_batch_size, 0.f), _h_sample_id(max_batch_size * tw._max_step, 0), _h_unfinished(1) {} /** Compute GPU memory size needed by gpt encoder, to see how these memory is used, checkout init_buffer() for detail */ template <OperationType OpType_> size_t GptEncoder<OpType_>::compute_buffer_bytesize() { int si = _max_batch_size; size_t sz0 = (size_t)_max_batch_dim; sz0 += 2 * (size_t)_max_batch_dim * (size_t)_tw._n_enc_layer; long long sz1 = (size_t)_max_batch_dim * 6 + (size_t)_max_batch_size * (size_t)_tw._head_num * (size_t)_tw._max_step * (size_t)_tw._max_step; long long sz2 = (size_t)_max_batch_dim + (size_t)_max_batch_size * (size_t)_tw._max_step * (size_t)_tw._inner_size; long long sz3 = (size_t)_max_batch_size * (size_t)_tw._max_step * (size_t)_tw._src_vocab_size; return (sz0 + max(max(sz1, sz2), sz3)) * sizeof(_DataType) + si * sizeof(int); } /** Init the GPU memory pointer which point to the memory buffer needed by encoder. These buffer are used during custom cuda kernel function, find the corresponding function to see how these buffer are used */ template <OperationType OpType_> void GptEncoder<OpType_>::init_buffer(void *pbuf) { // int buffer int *p_d_int = reinterpret_cast<int *>(pbuf); _p_d_real_seq_len = p_d_int; p_d_int += _max_batch_size; // datatype buffer _DataType *p_d_datatype = reinterpret_cast<_DataType *>(p_d_int); _p_d_query = p_d_datatype; _p_d_k_cache = _p_d_query + _max_batch_dim; _p_d_v_cache = _p_d_k_cache + _max_batch_dim * _tw._n_enc_layer; p_d_datatype = _p_d_v_cache + _max_batch_dim * _tw._n_enc_layer; // reuse 1 --------------------- _p_d_qkv_projected = p_d_datatype; _p_d_q = _p_d_qkv_projected + _max_batch_dim * 3; _p_d_k = _p_d_q + _max_batch_dim; _p_d_v = _p_d_k + _max_batch_dim; // _max_batch_size * _tw._head_num * // _tw._max_step * _tw._max_step _p_d_c = _p_d_v + _max_batch_dim; // reuse 2 --------------------- _p_d_ffn_buf1 = p_d_datatype; // _max_batch_size * _tw._max_step * _tw._inner_size _p_d_ffn_buf2 = _p_d_ffn_buf1 + _max_batch_dim; // reuse 3 --------------------- // _max_batch_size * _tw._max_step * _tw._src_vocab_size _p_d_logit = p_d_datatype; CHECK_GPU_ERROR(hipMalloc((void **)&_p_d_curandstate, _max_batch_size * sizeof(hiprandState_t))); CHECK_GPU_ERROR(hipMalloc((void **)&_p_d_sample_id_buf, _max_batch_size * _tw._max_step * sizeof(int))); CHECK_GPU_ERROR(hipMalloc((void **)&_p_d_unfinished, sizeof(int))); hipLaunchKernelGGL(( ker_curand_setup), dim3(_max_batch_size), dim3(1), 0, _stream, _p_d_curandstate); return; } /** Some requirements needed by custom cuda kernel function */ template <OperationType OpType_> std::string GptEncoder<OpType_>::check() { // if (_max_thread_per_block < _tw._hidden_size) { // return "violate hidden_size <= max_thread_per_block"; // } if (_tw._inner_size & 1) { return "violate inner_size % 2 = 0"; } if (_tw._dim_per_head & 1) { return "violate dim_per_head % 2 = 0"; } if (_p_d_src_emb_wei.size() != 4) { return "violate p_d_src_emb_wei.size() = 4"; } if (_p_d_enc_wei.size() != _tw._weight_per_enc_layer * _tw._n_enc_layer) { return "violate p_d_enc_wei.size() = weight_per_enc_layer * n_enc_layer"; } return ""; } template <OperationType OpType_> void GptEncoder<OpType_>::run_one_infer(int batch_size, int batch_seq_len) { _batch_size = batch_size; _batch_seq_len = batch_seq_len; _batch_token_num = batch_size * batch_seq_len; CHECK_GPU_ERROR(hipMemcpyAsync(_p_d_real_seq_len, _h_real_seq_len.data(), sizeof(int) * _batch_size, hipMemcpyHostToDevice, _stream)); CHECK_GPU_ERROR(hipMemcpyAsync(_p_d_ppl, _h_ppl.data(), sizeof(float) * _batch_size, hipMemcpyHostToDevice, _stream)); #ifdef DEBUG_RESULT std::cout << "batch_size-" << batch_size << " batch_seq_len-" << batch_seq_len << std::endl; print_vec(_p_d_token_id, "batch_token_ids", batch_size * batch_seq_len); #endif // token embedding, add position embedding and layer_norm ker_gpt_embedding_launcher<_DataType>( batch_size, batch_seq_len, _tw._hidden_size, _stream, _p_d_src_emb_wei[0], _p_d_src_emb_wei[1], _p_d_token_id, _p_d_query, _p_d_real_seq_len, _tw._padding_id, 0); #ifdef DEBUG_RESULT print_vec(_p_d_query, "input embeddings", _batch_token_num * _tw._hidden_size - 5, _batch_token_num * _tw._hidden_size); #endif for (_layer_id = 0; _layer_id < _tw._n_enc_layer; _layer_id++) { _weight_offset = _layer_id * _tw._weight_per_enc_layer; self_attention(); ffn_add_norm(); } // last layer norm ker_norm_layer_launcher<_DataType>( _batch_token_num, _tw._hidden_size, _stream, _p_d_query, _p_d_src_emb_wei[2], _p_d_src_emb_wei[3], _max_thread_per_block); compute_ppl(); return; } template <OperationType OpType_> int GptEncoder<OpType_>::run_one_sample(int batch_size, int batch_seq_len) { _batch_size = batch_size; _batch_seq_len = batch_seq_len; _batch_token_num = batch_size * batch_seq_len; if (_batch_seq_len >= _tw._max_step) { return _batch_seq_len; } CHECK_GPU_ERROR(hipMemcpyAsync(_p_d_real_seq_len, _h_real_seq_len.data(), sizeof(int) * _batch_size, hipMemcpyHostToDevice, _stream)); CHECK_GPU_ERROR(hipMemcpyAsync(_p_d_ppl, _h_ppl.data(), sizeof(float) * _batch_size, hipMemcpyHostToDevice, _stream)); CHECK_GPU_ERROR(hipMemcpyAsync(_p_d_sample_id, _p_d_token_id, sizeof(int) * _batch_size * _tw._max_step, hipMemcpyDeviceToDevice, _stream)); #ifdef DEBUG_RESULT std::cout << "batch_size-" << batch_size << " batch_seq_len-" << batch_seq_len << std::endl; std::cout << "Sample with " << _tw._sampling_method << std::endl; std::cout << "padding_id: " << _tw._padding_id << std::endl; std::cout << "vocab_size: " << _tw._src_vocab_size << std::endl; print_vec(_p_d_sample_id, "batch_token_ids", batch_size * batch_seq_len); #endif // token embedding, add position embedding and layer_norm ker_gpt_embedding_launcher<_DataType>( _batch_size, _batch_seq_len, _tw._hidden_size, _stream, _p_d_src_emb_wei[0], _p_d_src_emb_wei[1], _p_d_sample_id, _p_d_query, _p_d_real_seq_len, _tw._padding_id, 0); #ifdef DEBUG_RESULT print_vec(_p_d_query, "embedding", _batch_token_num * _tw._hidden_size - 10, _batch_token_num * _tw._hidden_size); #endif for (_layer_id = 0; _layer_id < _tw._n_enc_layer; _layer_id++) { _weight_offset = _layer_id * _tw._weight_per_enc_layer; self_attention(true); ffn_add_norm(); } // last layer norm ker_norm_layer_launcher<_DataType>( _batch_token_num, _tw._hidden_size, _stream, _p_d_query, _p_d_src_emb_wei[2], _p_d_src_emb_wei[3], _max_thread_per_block); if (sample_one_token() == 0 || _batch_seq_len >= _tw._max_step) { CHECK_GPU_ERROR(hipMemcpyAsync(_p_d_sample_id_buf, _p_d_sample_id, _batch_token_num * sizeof(int), hipMemcpyDeviceToDevice, _stream)); CHECK_GPU_ERROR(hipStreamSynchronize(_stream)); return _batch_seq_len; } while (1) { #ifdef DEBUG_RESULT std::cout << "before sample:batch_size-" << _batch_size << " batch_seq_len-" << _batch_seq_len << std::endl; print_vec(_p_d_sample_id, "batch_token_ids", _batch_token_num); #endif // token embedding, add position embedding and layer_norm ker_gpt_embedding_launcher<_DataType>( _batch_size, 1, _tw._hidden_size, _stream, _p_d_src_emb_wei[0], _p_d_src_emb_wei[1], _p_d_last_sample_id, _p_d_query, _p_d_real_seq_len, _tw._padding_id, _batch_seq_len - 1); #ifdef DEBUG_RESULT print_vec(_p_d_query, "embedding", _batch_size * _tw._hidden_size - 10, _batch_size * _tw._hidden_size); #endif for (_layer_id = 0; _layer_id < _tw._n_enc_layer; _layer_id++) { _weight_offset = _layer_id * _tw._weight_per_enc_layer; self_attention_with_cache(); ffn_add_norm_with_cache(); } // last layer norm ker_norm_layer_launcher<_DataType>( _batch_size, _tw._hidden_size, _stream, _p_d_query, _p_d_src_emb_wei[2], _p_d_src_emb_wei[3], _max_thread_per_block); #ifdef DEBUG_RESULT print_vec(_p_d_query, "_p_d_query before logits", _batch_size * _tw._hidden_size - 10, _batch_size * _tw._hidden_size); if (sample_one_token_with_cache() == 0 || _batch_seq_len >= _tw._max_step) break; #else if (sample_one_token_with_cache() == 0 || _batch_seq_len >= _tw._max_step) break; #endif } CHECK_GPU_ERROR(hipMemcpyAsync(_p_d_sample_id_buf, _p_d_sample_id, _batch_token_num * sizeof(int), hipMemcpyDeviceToDevice, _stream)); CHECK_GPU_ERROR(hipStreamSynchronize(_stream)); return _batch_seq_len; } template <OperationType OpType_> int GptEncoder<OpType_>::sample_one_token() { /* ---step 1. project hidden states to vocab logits--- */ CHECK_GPU_ERROR(hipblasGemmEx( _hd, HIPBLAS_OP_T, HIPBLAS_OP_N, _tw._src_vocab_size, _batch_token_num, _tw._hidden_size, &_fone, _p_d_src_emb_wei[0], _AType, _tw._hidden_size, _p_d_query, _BType, _tw._hidden_size, &_fzero, _p_d_logit, _CType, _tw._src_vocab_size, _computeType, CUBLAS_GEMM_DEFAULT_TENSOR_OP)); #ifdef DEBUG_RESULT print_vec(_p_d_logit, "logits", _batch_token_num * _tw._src_vocab_size - 10, _batch_token_num * _tw._src_vocab_size); #endif CHECK_GPU_ERROR(hipMemsetAsync(_p_d_unfinished, 0, sizeof(int), _stream)); /* ---step 2. sample new tokens from logits */ if (_tw._sampling_method == "topk") { #ifdef DEBUG_RESULT std::cout << "sampling using topk\n"; #endif ker_topk_sample_launcher<_DataType>( _batch_size, _batch_seq_len, _batch_seq_len, _max_thread_per_block, _stream, _p_d_logit, _p_d_sample_id, _p_d_sample_id_buf, _p_d_real_seq_len, _tw._src_vocab_size, _tw._topk, _p_d_unfinished, _p_d_curandstate, _tw._eos_id); } else { #ifdef DEBUG_RESULT std::cout << "sampling using topp\n"; #endif ker_topp_sample_launcher<_DataType>( _batch_size, _batch_seq_len, _batch_seq_len, _max_thread_per_block, _stream, _p_d_logit, _p_d_sample_id, _p_d_sample_id_buf, _p_d_real_seq_len, _tw._src_vocab_size, _tw._topp, _p_d_unfinished, _p_d_curandstate, _tw._eos_id); } int *temp = _p_d_sample_id; _p_d_sample_id = _p_d_sample_id_buf; _p_d_sample_id_buf = temp; CHECK_GPU_ERROR(hipMemcpyAsync(&_h_unfinished, _p_d_unfinished, sizeof(int), hipMemcpyDeviceToHost, _stream)); CHECK_GPU_ERROR(hipStreamSynchronize(_stream)); _p_d_last_sample_id = _p_d_sample_id_buf + _batch_token_num; _batch_seq_len++; _batch_token_num += _batch_size; return _h_unfinished; } template <OperationType OpType_> int GptEncoder<OpType_>::sample_one_token_with_cache() { /* ---step 1. project hidden states to vocab logits--- */ CHECK_GPU_ERROR(hipblasGemmEx( _hd, HIPBLAS_OP_T, HIPBLAS_OP_N, _tw._src_vocab_size, _batch_size, _tw._hidden_size, &_fone, _p_d_src_emb_wei[0], _AType, _tw._hidden_size, _p_d_query, _BType, _tw._hidden_size, &_fzero, _p_d_logit, _CType, _tw._src_vocab_size, _computeType, CUBLAS_GEMM_DEFAULT_TENSOR_OP)); #ifdef DEBUG_RESULT print_vec(_p_d_logit, "sampling-logits", _batch_size * _tw._src_vocab_size - 5, _batch_size * _tw._src_vocab_size); #endif CHECK_GPU_ERROR(hipMemsetAsync(_p_d_unfinished, 0, sizeof(int), _stream)); // /* ---step 2. sample new tokens from logits */ if (_tw._sampling_method == "topk") { #ifdef DEBUG_RESULT std::cout << "sampling using topk\n"; #endif ker_topk_sample_launcher<_DataType>( _batch_size, _batch_seq_len, 1, _max_thread_per_block, _stream, _p_d_logit, _p_d_sample_id, _p_d_sample_id_buf, _p_d_real_seq_len, _tw._src_vocab_size, _tw._topk, _p_d_unfinished, _p_d_curandstate, _tw._eos_id); } else { #ifdef DEBUG_RESULT std::cout << "sampling using topp\n"; #endif ker_topp_sample_launcher<_DataType>( _batch_size, _batch_seq_len, 1, _max_thread_per_block, _stream, _p_d_logit, _p_d_sample_id, _p_d_sample_id_buf, _p_d_real_seq_len, _tw._src_vocab_size, _tw._topp, _p_d_unfinished, _p_d_curandstate, _tw._eos_id); } int *temp = _p_d_sample_id; _p_d_sample_id = _p_d_sample_id_buf; _p_d_sample_id_buf = temp; CHECK_GPU_ERROR(hipMemcpyAsync(&_h_unfinished, _p_d_unfinished, sizeof(int), hipMemcpyDeviceToHost, _stream)); CHECK_GPU_ERROR(hipStreamSynchronize(_stream)); _p_d_last_sample_id = _p_d_sample_id_buf + _batch_token_num; _batch_seq_len++; _batch_token_num += _batch_size; return _h_unfinished; } template <OperationType OpType_> void GptEncoder<OpType_>::self_attention(bool cache) { /* ---step 0. layer_norm, add output_bias to "query"--- */ ker_norm_layer_resual_launcher<_DataType>( _batch_token_num, _tw._hidden_size, _stream, _p_d_query, _p_d_q, _p_d_enc_wei[_weight_offset], _p_d_enc_wei[_weight_offset + 1], _p_d_enc_wei[_weight_offset + 5], _max_thread_per_block); #ifdef DEBUG_RESULT if (_layer_id == 0) { print_vec(_p_d_query, "input with bias", _batch_token_num * _tw._hidden_size - 5, _batch_token_num * _tw._hidden_size); print_vec(_p_d_q, "first ln output", _batch_token_num * _tw._hidden_size - 5, _batch_token_num * _tw._hidden_size); } #endif /* ---step 1. qkv = ori_q * qkv_wei + bias, and reshape qkv for multi-head * gemm--- */ CHECK_GPU_ERROR(hipblasGemmEx( _hd, HIPBLAS_OP_N, HIPBLAS_OP_N, _tw._hidden_size * 3, _batch_token_num, _tw._hidden_size, &_fone, _p_d_enc_wei[_weight_offset + 2], _AType, _tw._hidden_size * 3, _p_d_q, _BType, _tw._hidden_size, &_fzero, _p_d_qkv_projected, _CType, _tw._hidden_size * 3, _computeType, CUBLAS_GEMM_DEFAULT_TENSOR_OP)); #ifdef DEBUG_RESULT if (_layer_id == 0) { print_vec(_p_d_enc_wei[_weight_offset + 2], "qkv_weight_mat", _tw._hidden_size * _tw._hidden_size * 3 - 5, _tw._hidden_size * _tw._hidden_size * 3); print_vec(_p_d_qkv_projected, "_p_d_qkv_projected", _batch_token_num * _tw._hidden_size * 3 - 5, _batch_token_num * _tw._hidden_size * 3); } #endif // get q, k, v by split and reshape qkv ker_arrange_encself_qkv_launcher<_DataType>( _batch_token_num, _tw._hidden_size, _stream, _p_d_qkv_projected, _p_d_enc_wei[_weight_offset + 3], _p_d_q, _max_batch_dim, _batch_seq_len, _tw._dim_per_head, _tw._head_num, _max_thread_per_block); if (cache) { hipStream_t stream; if (_batch_token_num > 360) { stream = _cache_stream; CHECK_GPU_ERROR(hipStreamSynchronize(_stream)); } else { stream = _stream; } CHECK_GPU_ERROR( hipMemcpyAsync(_p_d_k_cache + _layer_id * _max_batch_dim, _p_d_k, _batch_token_num * _tw._hidden_size * sizeof(_DataType), hipMemcpyDeviceToDevice, stream)); CHECK_GPU_ERROR( hipMemcpyAsync(_p_d_v_cache + _layer_id * _max_batch_dim, _p_d_v, _batch_token_num * _tw._hidden_size * sizeof(_DataType), hipMemcpyDeviceToDevice, stream)); } #ifdef DEBUG_RESULT if (_layer_id == 0) { print_vec(_p_d_q, "_p_d_q", _batch_token_num * _tw._hidden_size - 5, _batch_token_num * _tw._hidden_size); print_vec(_p_d_k, "_p_d_k", _batch_token_num * _tw._hidden_size - 5, _batch_token_num * _tw._hidden_size); print_vec(_p_d_v, "_p_d_v", _batch_token_num * _tw._hidden_size - 5, _batch_token_num * _tw._hidden_size); } #endif /* ---step 2. correlation = q * k, perform softmax on correlation--- */ CHECK_GPU_ERROR(hipblasGemmStridedBatchedEx( _hd, HIPBLAS_OP_T, HIPBLAS_OP_N, _batch_seq_len, _batch_seq_len, _tw._dim_per_head, &_atten_scaler, _p_d_k, _AType, _tw._dim_per_head, _batch_seq_len * _tw._dim_per_head, _p_d_q, _BType, _tw._dim_per_head, _batch_seq_len * _tw._dim_per_head, &_fzero, _p_d_c, _CType, _batch_seq_len, _batch_seq_len * _batch_seq_len, _batch_size * _tw._head_num, _computeType, CUBLAS_GEMM_DEFAULT_TENSOR_OP)); #ifdef DEBUG_RESULT if (_layer_id == 0) { print_vec(_p_d_c, "q*k", _batch_token_num * _batch_seq_len * _tw._head_num - 5, _batch_token_num * _batch_seq_len * _tw._head_num); } #endif ker_correlation_softmax_gpt_launcher<_DataType>(_batch_size, _batch_seq_len, _tw._head_num, _stream, _p_d_c, _p_d_real_seq_len); #ifdef DEBUG_RESULT if (_layer_id == 0) { print_vec(_p_d_c, "mask weights", _batch_token_num * _batch_seq_len * _tw._head_num - 5, _batch_token_num * _batch_seq_len * _tw._head_num); } #endif /* ---step 3. new_q = correlation * v--- */ CHECK_GPU_ERROR(hipblasGemmStridedBatchedEx( _hd, HIPBLAS_OP_N, HIPBLAS_OP_N, _tw._dim_per_head, _batch_seq_len, _batch_seq_len, &_fone, _p_d_v, _AType, _tw._dim_per_head, _batch_seq_len * _tw._dim_per_head, _p_d_c, _BType, _batch_seq_len, _batch_seq_len * _batch_seq_len, &_fzero, _p_d_q, _CType, _tw._dim_per_head, _batch_seq_len * _tw._dim_per_head, _batch_size * _tw._head_num, _computeType, CUBLAS_GEMM_DEFAULT_TENSOR_OP)); #ifdef DEBUG_RESULT if (_layer_id == 0) { print_vec(_p_d_q, "value after attention", _batch_token_num * _tw._hidden_size - 5, _batch_token_num * _tw._hidden_size); } #endif // use v to save reshaped q, since they are in same size and v // will not be use again before the next multi-head-attention ker_arrange_atten_output_launcher<_DataType>( _batch_token_num, _tw._hidden_size, _stream, _p_d_q, _p_d_v, _batch_seq_len, _tw._dim_per_head, _tw._head_num, _max_thread_per_block); #ifdef DEBUG_RESULT if (_layer_id == 0) { print_vec(_p_d_v, "reshaped value after attention", 0, 5); print_vec(_p_d_query, "attention input with output bias", 0, 5); } #endif /* ---step 4. new_q = ori_q + new_q * output_wei--- */ CHECK_GPU_ERROR(hipblasGemmEx( _hd, HIPBLAS_OP_N, HIPBLAS_OP_N, _tw._hidden_size, _batch_token_num, _tw._hidden_size, &_fone, _p_d_enc_wei[_weight_offset + 4], _AType, _tw._hidden_size, _p_d_v, _BType, _tw._hidden_size, &_fone, _p_d_query, _CType, _tw._hidden_size, _computeType, CUBLAS_GEMM_DEFAULT_TENSOR_OP)); #ifdef DEBUG_RESULT if (_layer_id == 0) { print_vec(_p_d_enc_wei[_weight_offset + 4], "attn out kernel", 0, 5); print_vec(_p_d_query, "attention output", 0, 5); } #endif return; } template <OperationType OpType_> void GptEncoder<OpType_>::self_attention_with_cache() { _DataType *_p_d_k_cache_cur_layer = _p_d_k_cache + _layer_id * _max_batch_dim; _DataType *_p_d_v_cache_cur_layer = _p_d_v_cache + _layer_id * _max_batch_dim; #ifdef DEBUG_RESULT if (_layer_id == 0) { print_vec(_p_d_k_cache_cur_layer, "_p_d_k_cache_cur_layer", _batch_size * (_batch_seq_len - 1) * _tw._hidden_size - 5, _batch_size * (_batch_seq_len - 1) * _tw._hidden_size); print_vec(_p_d_v_cache_cur_layer, "_p_d_v_cache_cur_layer", _batch_size * (_batch_seq_len - 1) * _tw._hidden_size - 5, _batch_size * (_batch_seq_len - 1) * _tw._hidden_size); } #endif /* ---step 0. layer_norm, add output_bias to "query"--- */ ker_norm_layer_resual_launcher<_DataType>( _batch_size, _tw._hidden_size, _stream, _p_d_query, _p_d_q, _p_d_enc_wei[_weight_offset], _p_d_enc_wei[_weight_offset + 1], _p_d_enc_wei[_weight_offset + 5], _max_thread_per_block); #ifdef DEBUG_RESULT if (_layer_id == 0) { print_vec(_p_d_query, "input with bias", _batch_size * _tw._hidden_size - 5, _batch_size * _tw._hidden_size); print_vec(_p_d_q, "first ln output", _batch_size * _tw._hidden_size - 5, _batch_size * _tw._hidden_size); } #endif /* ---step 1. qkv = ori_q * qkv_wei + bias, and reshape qkv for multi-head * gemm--- */ CHECK_GPU_ERROR(hipblasGemmEx( _hd, HIPBLAS_OP_N, HIPBLAS_OP_N, _tw._hidden_size * 3, _batch_size, _tw._hidden_size, &_fone, _p_d_enc_wei[_weight_offset + 2], _AType, _tw._hidden_size * 3, _p_d_q, _BType, _tw._hidden_size, &_fzero, _p_d_qkv_projected, _CType, _tw._hidden_size * 3, _computeType, CUBLAS_GEMM_DEFAULT_TENSOR_OP)); #ifdef DEBUG_RESULT if (_layer_id == 0) { print_vec(_p_d_qkv_projected, "_p_d_qkv_projected", _batch_size * _tw._hidden_size * 3 - 5, _batch_size * _tw._hidden_size * 3); } #endif // get q, k, v by split and reshape qkv ker_arrange_qkv_with_cache_launcher<_DataType>( _batch_token_num, _tw._hidden_size, _stream, _p_d_qkv_projected, _p_d_enc_wei[_weight_offset + 3], _p_d_q, _p_d_k, _p_d_k_cache_cur_layer, _p_d_v, _p_d_v_cache_cur_layer, _max_batch_dim, _batch_seq_len, _tw._dim_per_head, _tw._head_num); // copy new k and v to cache hipStream_t stream; if (_batch_token_num > 360) { stream = _cache_stream; CHECK_GPU_ERROR(hipStreamSynchronize(_stream)); } else { stream = _stream; } CHECK_GPU_ERROR( hipMemcpyAsync(_p_d_k_cache_cur_layer, _p_d_k, _batch_token_num * _tw._hidden_size * sizeof(_DataType), hipMemcpyDeviceToDevice, stream)); CHECK_GPU_ERROR( hipMemcpyAsync(_p_d_v_cache_cur_layer, _p_d_v, _batch_token_num * _tw._hidden_size * sizeof(_DataType), hipMemcpyDeviceToDevice, stream)); #ifdef DEBUG_RESULT if (_layer_id == 0) { print_vec(_p_d_q, "_p_d_q", _batch_size * _tw._hidden_size - 5, _batch_size * _tw._hidden_size); print_vec(_p_d_k, "_p_d_k", _batch_token_num * _tw._hidden_size - 5, _batch_token_num * _tw._hidden_size); print_vec(_p_d_v, "_p_d_v", _batch_token_num * _tw._hidden_size - 5, _batch_token_num * _tw._hidden_size); } #endif /* ---step 2. correlation = q * k, perform softmax on correlation correlation: [batch_size, heads_num, 1, batch_seq_len]--- */ CHECK_GPU_ERROR(hipblasGemmStridedBatchedEx( _hd, HIPBLAS_OP_T, HIPBLAS_OP_N, _batch_seq_len, 1, _tw._dim_per_head, &_atten_scaler, _p_d_k, _AType, _tw._dim_per_head, _batch_seq_len * _tw._dim_per_head, _p_d_q, _BType, _tw._dim_per_head, _tw._dim_per_head, &_fzero, _p_d_c, _CType, _batch_seq_len, _batch_seq_len, _batch_size * _tw._head_num, _computeType, CUBLAS_GEMM_DEFAULT_TENSOR_OP)); #ifdef DEBUG_RESULT if (_layer_id == 0) { print_vec(_p_d_c, "q*k", _batch_size * _batch_seq_len * _tw._head_num - 5, _batch_size * _batch_seq_len * _tw._head_num); } #endif ker_attention_mask_weights_launcher<_DataType>(_batch_size, 1, _batch_seq_len, _tw._head_num, _stream, _p_d_c, _p_d_real_seq_len); #ifdef DEBUG_RESULT if (_layer_id == 0) { print_vec(_p_d_c, "mask weights", _batch_size * _batch_seq_len * _tw._head_num - 5, _batch_size * _batch_seq_len * _tw._head_num); } #endif /* ---step 3. new_q = correlation * v--- */ CHECK_GPU_ERROR(hipblasGemmStridedBatchedEx( _hd, HIPBLAS_OP_N, HIPBLAS_OP_N, _tw._dim_per_head, 1, _batch_seq_len, &_fone, _p_d_v, _AType, _tw._dim_per_head, _batch_seq_len * _tw._dim_per_head, _p_d_c, _BType, _batch_seq_len, _batch_seq_len, &_fzero, _p_d_q, _CType, _tw._dim_per_head, _tw._dim_per_head, _batch_size * _tw._head_num, _computeType, CUBLAS_GEMM_DEFAULT_TENSOR_OP)); #ifdef DEBUG_RESULT if (_layer_id == 0) { print_vec(_p_d_q, "value after attention", _batch_size * _tw._hidden_size - 5, _batch_size * _tw._hidden_size); } #endif // use v to save reshaped q, since they are in same size and v // will not be use again before the next multi-head-attention ker_arrange_atten_output_launcher<_DataType>( _batch_size, _tw._hidden_size, _stream, _p_d_q, _p_d_v, 1, _tw._dim_per_head, _tw._head_num, _max_thread_per_block); #ifdef DEBUG_RESULT if (_layer_id == 0) { print_vec(_p_d_v, "reshaped value after attention", 0, 5); print_vec(_p_d_query, "attention input with output bias", 0, 5); } #endif /* ---step 4. new_q = ori_q + new_q * output_wei--- */ CHECK_GPU_ERROR(hipblasGemmEx( _hd, HIPBLAS_OP_N, HIPBLAS_OP_N, _tw._hidden_size, _batch_size, _tw._hidden_size, &_fone, _p_d_enc_wei[_weight_offset + 4], _AType, _tw._hidden_size, _p_d_v, _BType, _tw._hidden_size, &_fone, _p_d_query, _CType, _tw._hidden_size, _computeType, CUBLAS_GEMM_DEFAULT_TENSOR_OP)); #ifdef DEBUG_RESULT if (_layer_id == 0) { print_vec(_p_d_enc_wei[_weight_offset + 4], "attn out kernel", 0, 5); print_vec(_p_d_query, "attention output", 0, 5); } #endif return; } template <OperationType OpType_> void GptEncoder<OpType_>::ffn_add_norm() { /* ---step 0. layer_norm, add output_bias to "query"--- */ ker_norm_layer_resual_launcher<_DataType>( _batch_token_num, _tw._hidden_size, _stream, _p_d_query, _p_d_ffn_buf1, _p_d_enc_wei[_weight_offset + 6], _p_d_enc_wei[_weight_offset + 7], _p_d_enc_wei[_weight_offset + 11], _max_thread_per_block); /* ---step 1. first ffn layer--- */ CHECK_GPU_ERROR(hipblasGemmEx( _hd, HIPBLAS_OP_N, HIPBLAS_OP_N, _tw._inner_size, _batch_token_num, _tw._hidden_size, &_fone, _p_d_enc_wei[_weight_offset + 8], _AType, _tw._inner_size, _p_d_ffn_buf1, _BType, _tw._hidden_size, &_fzero, _p_d_ffn_buf2, _CType, _tw._inner_size, _computeType, CUBLAS_GEMM_DEFAULT_TENSOR_OP)); ker_bias_gelu_launcher<_DataType>( _batch_token_num, _max_thread_per_block, _stream, _p_d_ffn_buf2, _p_d_enc_wei[_weight_offset + 9], _tw._inner_size); /* ---step 2. second ffn layer--- */ CHECK_GPU_ERROR(hipblasGemmEx( _hd, HIPBLAS_OP_N, HIPBLAS_OP_N, _tw._hidden_size, _batch_token_num, _tw._inner_size, &_fone, _p_d_enc_wei[_weight_offset + 10], _AType, _tw._hidden_size, _p_d_ffn_buf2, _BType, _tw._inner_size, &_fone, _p_d_query, _CType, _tw._hidden_size, _computeType, CUBLAS_GEMM_DEFAULT_TENSOR_OP)); return; } template <OperationType OpType_> void GptEncoder<OpType_>::ffn_add_norm_with_cache() { /* ---step 0. layer_norm, add output_bias to "query"--- */ ker_norm_layer_resual_launcher<_DataType>( _batch_size, _tw._hidden_size, _stream, _p_d_query, _p_d_ffn_buf1, _p_d_enc_wei[_weight_offset + 6], _p_d_enc_wei[_weight_offset + 7], _p_d_enc_wei[_weight_offset + 11], _max_thread_per_block); /* ---step 1. first ffn layer--- */ CHECK_GPU_ERROR(hipblasGemmEx( _hd, HIPBLAS_OP_N, HIPBLAS_OP_N, _tw._inner_size, _batch_size, _tw._hidden_size, &_fone, _p_d_enc_wei[_weight_offset + 8], _AType, _tw._inner_size, _p_d_ffn_buf1, _BType, _tw._hidden_size, &_fzero, _p_d_ffn_buf2, _CType, _tw._inner_size, _computeType, CUBLAS_GEMM_DEFAULT_TENSOR_OP)); ker_bias_gelu_launcher<_DataType>( _batch_size, _max_thread_per_block, _stream, _p_d_ffn_buf2, _p_d_enc_wei[_weight_offset + 9], _tw._inner_size); /* ---step 2. second ffn layer--- */ CHECK_GPU_ERROR(hipblasGemmEx( _hd, HIPBLAS_OP_N, HIPBLAS_OP_N, _tw._hidden_size, _batch_size, _tw._inner_size, &_fone, _p_d_enc_wei[_weight_offset + 10], _AType, _tw._hidden_size, _p_d_ffn_buf2, _BType, _tw._inner_size, &_fone, _p_d_query, _CType, _tw._hidden_size, _computeType, CUBLAS_GEMM_DEFAULT_TENSOR_OP)); return; } /** Compute ppl from encoder output */ template <OperationType OpType_> void GptEncoder<OpType_>::compute_ppl() { /* ---step 1. project hidden states to vocab logits--- */ CHECK_GPU_ERROR(hipblasGemmEx( _hd, HIPBLAS_OP_T, HIPBLAS_OP_N, _tw._src_vocab_size, _batch_token_num, _tw._hidden_size, &_fone, _p_d_src_emb_wei[0], _AType, _tw._hidden_size, _p_d_query, _BType, _tw._hidden_size, &_fzero, _p_d_logit, _CType, _tw._src_vocab_size, _computeType, CUBLAS_GEMM_DEFAULT_TENSOR_OP)); #ifdef DEBUG_RESULT print_vec(_p_d_logit, "logits", _batch_token_num * _tw._src_vocab_size - 5, _batch_token_num * _tw._src_vocab_size); #endif /* ---step 2. compute language model ppl--- */ ker_ppl_launcher<_DataType>( _batch_size, _batch_seq_len, _max_thread_per_block, _stream, _p_d_logit, _p_d_token_id, _p_d_real_seq_len, _p_d_ppl, _tw._src_vocab_size); } template class GptEncoder<OperationType::FP16>; template class GptEncoder<OperationType::FP32>; } // namespace cuda } // namespace lightseq
00df383297b68ab8751df3790d75d29745c621dd.cu
#include "kernels/gptKernels.h" #include "kernels/transformerKernels.h" #include "model/gpt_encoder.h" /** @file GPT encoder, composed by gemm lib and custom cuda kernel function */ // #define DEBUG_RESULT namespace lightseq { namespace cuda { template <OperationType OpType_> GptEncoder<OpType_>::GptEncoder(int max_batch_size, const int *p_d_token_id, float *p_d_ppl, int *p_d_sample_id, const GptWeight<OpType_> &tw, cudaStream_t stream, cudaStream_t cache_stream, cublasHandle_t hd) : _max_batch_size(max_batch_size), _p_d_token_id(p_d_token_id), _p_d_ppl(p_d_ppl), _p_d_sample_id(p_d_sample_id), _tw(tw), _stream(stream), _cache_stream(cache_stream), _hd(hd), _p_d_src_emb_wei(tw.get_src_emb_wei()), _p_d_enc_wei(tw.get_enc_wei()), _fone((_DataType)1.f), _fzero((_DataType)0.f), _atten_scaler((_DataType)sqrt(1.f / tw._dim_per_head)), _max_batch_dim(max_batch_size * tw._max_step * tw._hidden_size), _max_thread_per_block(1024), _h_real_seq_len(max_batch_size, 0), _h_ppl(max_batch_size, 0.f), _h_sample_id(max_batch_size * tw._max_step, 0), _h_unfinished(1) {} /** Compute GPU memory size needed by gpt encoder, to see how these memory is used, checkout init_buffer() for detail */ template <OperationType OpType_> size_t GptEncoder<OpType_>::compute_buffer_bytesize() { int si = _max_batch_size; size_t sz0 = (size_t)_max_batch_dim; sz0 += 2 * (size_t)_max_batch_dim * (size_t)_tw._n_enc_layer; long long sz1 = (size_t)_max_batch_dim * 6 + (size_t)_max_batch_size * (size_t)_tw._head_num * (size_t)_tw._max_step * (size_t)_tw._max_step; long long sz2 = (size_t)_max_batch_dim + (size_t)_max_batch_size * (size_t)_tw._max_step * (size_t)_tw._inner_size; long long sz3 = (size_t)_max_batch_size * (size_t)_tw._max_step * (size_t)_tw._src_vocab_size; return (sz0 + max(max(sz1, sz2), sz3)) * sizeof(_DataType) + si * sizeof(int); } /** Init the GPU memory pointer which point to the memory buffer needed by encoder. These buffer are used during custom cuda kernel function, find the corresponding function to see how these buffer are used */ template <OperationType OpType_> void GptEncoder<OpType_>::init_buffer(void *pbuf) { // int buffer int *p_d_int = reinterpret_cast<int *>(pbuf); _p_d_real_seq_len = p_d_int; p_d_int += _max_batch_size; // datatype buffer _DataType *p_d_datatype = reinterpret_cast<_DataType *>(p_d_int); _p_d_query = p_d_datatype; _p_d_k_cache = _p_d_query + _max_batch_dim; _p_d_v_cache = _p_d_k_cache + _max_batch_dim * _tw._n_enc_layer; p_d_datatype = _p_d_v_cache + _max_batch_dim * _tw._n_enc_layer; // reuse 1 --------------------- _p_d_qkv_projected = p_d_datatype; _p_d_q = _p_d_qkv_projected + _max_batch_dim * 3; _p_d_k = _p_d_q + _max_batch_dim; _p_d_v = _p_d_k + _max_batch_dim; // _max_batch_size * _tw._head_num * // _tw._max_step * _tw._max_step _p_d_c = _p_d_v + _max_batch_dim; // reuse 2 --------------------- _p_d_ffn_buf1 = p_d_datatype; // _max_batch_size * _tw._max_step * _tw._inner_size _p_d_ffn_buf2 = _p_d_ffn_buf1 + _max_batch_dim; // reuse 3 --------------------- // _max_batch_size * _tw._max_step * _tw._src_vocab_size _p_d_logit = p_d_datatype; CHECK_GPU_ERROR(cudaMalloc((void **)&_p_d_curandstate, _max_batch_size * sizeof(curandState))); CHECK_GPU_ERROR(cudaMalloc((void **)&_p_d_sample_id_buf, _max_batch_size * _tw._max_step * sizeof(int))); CHECK_GPU_ERROR(cudaMalloc((void **)&_p_d_unfinished, sizeof(int))); ker_curand_setup<<<_max_batch_size, 1, 0, _stream>>>(_p_d_curandstate); return; } /** Some requirements needed by custom cuda kernel function */ template <OperationType OpType_> std::string GptEncoder<OpType_>::check() { // if (_max_thread_per_block < _tw._hidden_size) { // return "violate hidden_size <= max_thread_per_block"; // } if (_tw._inner_size & 1) { return "violate inner_size % 2 = 0"; } if (_tw._dim_per_head & 1) { return "violate dim_per_head % 2 = 0"; } if (_p_d_src_emb_wei.size() != 4) { return "violate p_d_src_emb_wei.size() = 4"; } if (_p_d_enc_wei.size() != _tw._weight_per_enc_layer * _tw._n_enc_layer) { return "violate p_d_enc_wei.size() = weight_per_enc_layer * n_enc_layer"; } return ""; } template <OperationType OpType_> void GptEncoder<OpType_>::run_one_infer(int batch_size, int batch_seq_len) { _batch_size = batch_size; _batch_seq_len = batch_seq_len; _batch_token_num = batch_size * batch_seq_len; CHECK_GPU_ERROR(cudaMemcpyAsync(_p_d_real_seq_len, _h_real_seq_len.data(), sizeof(int) * _batch_size, cudaMemcpyHostToDevice, _stream)); CHECK_GPU_ERROR(cudaMemcpyAsync(_p_d_ppl, _h_ppl.data(), sizeof(float) * _batch_size, cudaMemcpyHostToDevice, _stream)); #ifdef DEBUG_RESULT std::cout << "batch_size-" << batch_size << " batch_seq_len-" << batch_seq_len << std::endl; print_vec(_p_d_token_id, "batch_token_ids", batch_size * batch_seq_len); #endif // token embedding, add position embedding and layer_norm ker_gpt_embedding_launcher<_DataType>( batch_size, batch_seq_len, _tw._hidden_size, _stream, _p_d_src_emb_wei[0], _p_d_src_emb_wei[1], _p_d_token_id, _p_d_query, _p_d_real_seq_len, _tw._padding_id, 0); #ifdef DEBUG_RESULT print_vec(_p_d_query, "input embeddings", _batch_token_num * _tw._hidden_size - 5, _batch_token_num * _tw._hidden_size); #endif for (_layer_id = 0; _layer_id < _tw._n_enc_layer; _layer_id++) { _weight_offset = _layer_id * _tw._weight_per_enc_layer; self_attention(); ffn_add_norm(); } // last layer norm ker_norm_layer_launcher<_DataType>( _batch_token_num, _tw._hidden_size, _stream, _p_d_query, _p_d_src_emb_wei[2], _p_d_src_emb_wei[3], _max_thread_per_block); compute_ppl(); return; } template <OperationType OpType_> int GptEncoder<OpType_>::run_one_sample(int batch_size, int batch_seq_len) { _batch_size = batch_size; _batch_seq_len = batch_seq_len; _batch_token_num = batch_size * batch_seq_len; if (_batch_seq_len >= _tw._max_step) { return _batch_seq_len; } CHECK_GPU_ERROR(cudaMemcpyAsync(_p_d_real_seq_len, _h_real_seq_len.data(), sizeof(int) * _batch_size, cudaMemcpyHostToDevice, _stream)); CHECK_GPU_ERROR(cudaMemcpyAsync(_p_d_ppl, _h_ppl.data(), sizeof(float) * _batch_size, cudaMemcpyHostToDevice, _stream)); CHECK_GPU_ERROR(cudaMemcpyAsync(_p_d_sample_id, _p_d_token_id, sizeof(int) * _batch_size * _tw._max_step, cudaMemcpyDeviceToDevice, _stream)); #ifdef DEBUG_RESULT std::cout << "batch_size-" << batch_size << " batch_seq_len-" << batch_seq_len << std::endl; std::cout << "Sample with " << _tw._sampling_method << std::endl; std::cout << "padding_id: " << _tw._padding_id << std::endl; std::cout << "vocab_size: " << _tw._src_vocab_size << std::endl; print_vec(_p_d_sample_id, "batch_token_ids", batch_size * batch_seq_len); #endif // token embedding, add position embedding and layer_norm ker_gpt_embedding_launcher<_DataType>( _batch_size, _batch_seq_len, _tw._hidden_size, _stream, _p_d_src_emb_wei[0], _p_d_src_emb_wei[1], _p_d_sample_id, _p_d_query, _p_d_real_seq_len, _tw._padding_id, 0); #ifdef DEBUG_RESULT print_vec(_p_d_query, "embedding", _batch_token_num * _tw._hidden_size - 10, _batch_token_num * _tw._hidden_size); #endif for (_layer_id = 0; _layer_id < _tw._n_enc_layer; _layer_id++) { _weight_offset = _layer_id * _tw._weight_per_enc_layer; self_attention(true); ffn_add_norm(); } // last layer norm ker_norm_layer_launcher<_DataType>( _batch_token_num, _tw._hidden_size, _stream, _p_d_query, _p_d_src_emb_wei[2], _p_d_src_emb_wei[3], _max_thread_per_block); if (sample_one_token() == 0 || _batch_seq_len >= _tw._max_step) { CHECK_GPU_ERROR(cudaMemcpyAsync(_p_d_sample_id_buf, _p_d_sample_id, _batch_token_num * sizeof(int), cudaMemcpyDeviceToDevice, _stream)); CHECK_GPU_ERROR(cudaStreamSynchronize(_stream)); return _batch_seq_len; } while (1) { #ifdef DEBUG_RESULT std::cout << "before sample:batch_size-" << _batch_size << " batch_seq_len-" << _batch_seq_len << std::endl; print_vec(_p_d_sample_id, "batch_token_ids", _batch_token_num); #endif // token embedding, add position embedding and layer_norm ker_gpt_embedding_launcher<_DataType>( _batch_size, 1, _tw._hidden_size, _stream, _p_d_src_emb_wei[0], _p_d_src_emb_wei[1], _p_d_last_sample_id, _p_d_query, _p_d_real_seq_len, _tw._padding_id, _batch_seq_len - 1); #ifdef DEBUG_RESULT print_vec(_p_d_query, "embedding", _batch_size * _tw._hidden_size - 10, _batch_size * _tw._hidden_size); #endif for (_layer_id = 0; _layer_id < _tw._n_enc_layer; _layer_id++) { _weight_offset = _layer_id * _tw._weight_per_enc_layer; self_attention_with_cache(); ffn_add_norm_with_cache(); } // last layer norm ker_norm_layer_launcher<_DataType>( _batch_size, _tw._hidden_size, _stream, _p_d_query, _p_d_src_emb_wei[2], _p_d_src_emb_wei[3], _max_thread_per_block); #ifdef DEBUG_RESULT print_vec(_p_d_query, "_p_d_query before logits", _batch_size * _tw._hidden_size - 10, _batch_size * _tw._hidden_size); if (sample_one_token_with_cache() == 0 || _batch_seq_len >= _tw._max_step) break; #else if (sample_one_token_with_cache() == 0 || _batch_seq_len >= _tw._max_step) break; #endif } CHECK_GPU_ERROR(cudaMemcpyAsync(_p_d_sample_id_buf, _p_d_sample_id, _batch_token_num * sizeof(int), cudaMemcpyDeviceToDevice, _stream)); CHECK_GPU_ERROR(cudaStreamSynchronize(_stream)); return _batch_seq_len; } template <OperationType OpType_> int GptEncoder<OpType_>::sample_one_token() { /* ---step 1. project hidden states to vocab logits--- */ CHECK_GPU_ERROR(cublasGemmEx( _hd, CUBLAS_OP_T, CUBLAS_OP_N, _tw._src_vocab_size, _batch_token_num, _tw._hidden_size, &_fone, _p_d_src_emb_wei[0], _AType, _tw._hidden_size, _p_d_query, _BType, _tw._hidden_size, &_fzero, _p_d_logit, _CType, _tw._src_vocab_size, _computeType, CUBLAS_GEMM_DEFAULT_TENSOR_OP)); #ifdef DEBUG_RESULT print_vec(_p_d_logit, "logits", _batch_token_num * _tw._src_vocab_size - 10, _batch_token_num * _tw._src_vocab_size); #endif CHECK_GPU_ERROR(cudaMemsetAsync(_p_d_unfinished, 0, sizeof(int), _stream)); /* ---step 2. sample new tokens from logits */ if (_tw._sampling_method == "topk") { #ifdef DEBUG_RESULT std::cout << "sampling using topk\n"; #endif ker_topk_sample_launcher<_DataType>( _batch_size, _batch_seq_len, _batch_seq_len, _max_thread_per_block, _stream, _p_d_logit, _p_d_sample_id, _p_d_sample_id_buf, _p_d_real_seq_len, _tw._src_vocab_size, _tw._topk, _p_d_unfinished, _p_d_curandstate, _tw._eos_id); } else { #ifdef DEBUG_RESULT std::cout << "sampling using topp\n"; #endif ker_topp_sample_launcher<_DataType>( _batch_size, _batch_seq_len, _batch_seq_len, _max_thread_per_block, _stream, _p_d_logit, _p_d_sample_id, _p_d_sample_id_buf, _p_d_real_seq_len, _tw._src_vocab_size, _tw._topp, _p_d_unfinished, _p_d_curandstate, _tw._eos_id); } int *temp = _p_d_sample_id; _p_d_sample_id = _p_d_sample_id_buf; _p_d_sample_id_buf = temp; CHECK_GPU_ERROR(cudaMemcpyAsync(&_h_unfinished, _p_d_unfinished, sizeof(int), cudaMemcpyDeviceToHost, _stream)); CHECK_GPU_ERROR(cudaStreamSynchronize(_stream)); _p_d_last_sample_id = _p_d_sample_id_buf + _batch_token_num; _batch_seq_len++; _batch_token_num += _batch_size; return _h_unfinished; } template <OperationType OpType_> int GptEncoder<OpType_>::sample_one_token_with_cache() { /* ---step 1. project hidden states to vocab logits--- */ CHECK_GPU_ERROR(cublasGemmEx( _hd, CUBLAS_OP_T, CUBLAS_OP_N, _tw._src_vocab_size, _batch_size, _tw._hidden_size, &_fone, _p_d_src_emb_wei[0], _AType, _tw._hidden_size, _p_d_query, _BType, _tw._hidden_size, &_fzero, _p_d_logit, _CType, _tw._src_vocab_size, _computeType, CUBLAS_GEMM_DEFAULT_TENSOR_OP)); #ifdef DEBUG_RESULT print_vec(_p_d_logit, "sampling-logits", _batch_size * _tw._src_vocab_size - 5, _batch_size * _tw._src_vocab_size); #endif CHECK_GPU_ERROR(cudaMemsetAsync(_p_d_unfinished, 0, sizeof(int), _stream)); // /* ---step 2. sample new tokens from logits */ if (_tw._sampling_method == "topk") { #ifdef DEBUG_RESULT std::cout << "sampling using topk\n"; #endif ker_topk_sample_launcher<_DataType>( _batch_size, _batch_seq_len, 1, _max_thread_per_block, _stream, _p_d_logit, _p_d_sample_id, _p_d_sample_id_buf, _p_d_real_seq_len, _tw._src_vocab_size, _tw._topk, _p_d_unfinished, _p_d_curandstate, _tw._eos_id); } else { #ifdef DEBUG_RESULT std::cout << "sampling using topp\n"; #endif ker_topp_sample_launcher<_DataType>( _batch_size, _batch_seq_len, 1, _max_thread_per_block, _stream, _p_d_logit, _p_d_sample_id, _p_d_sample_id_buf, _p_d_real_seq_len, _tw._src_vocab_size, _tw._topp, _p_d_unfinished, _p_d_curandstate, _tw._eos_id); } int *temp = _p_d_sample_id; _p_d_sample_id = _p_d_sample_id_buf; _p_d_sample_id_buf = temp; CHECK_GPU_ERROR(cudaMemcpyAsync(&_h_unfinished, _p_d_unfinished, sizeof(int), cudaMemcpyDeviceToHost, _stream)); CHECK_GPU_ERROR(cudaStreamSynchronize(_stream)); _p_d_last_sample_id = _p_d_sample_id_buf + _batch_token_num; _batch_seq_len++; _batch_token_num += _batch_size; return _h_unfinished; } template <OperationType OpType_> void GptEncoder<OpType_>::self_attention(bool cache) { /* ---step 0. layer_norm, add output_bias to "query"--- */ ker_norm_layer_resual_launcher<_DataType>( _batch_token_num, _tw._hidden_size, _stream, _p_d_query, _p_d_q, _p_d_enc_wei[_weight_offset], _p_d_enc_wei[_weight_offset + 1], _p_d_enc_wei[_weight_offset + 5], _max_thread_per_block); #ifdef DEBUG_RESULT if (_layer_id == 0) { print_vec(_p_d_query, "input with bias", _batch_token_num * _tw._hidden_size - 5, _batch_token_num * _tw._hidden_size); print_vec(_p_d_q, "first ln output", _batch_token_num * _tw._hidden_size - 5, _batch_token_num * _tw._hidden_size); } #endif /* ---step 1. qkv = ori_q * qkv_wei + bias, and reshape qkv for multi-head * gemm--- */ CHECK_GPU_ERROR(cublasGemmEx( _hd, CUBLAS_OP_N, CUBLAS_OP_N, _tw._hidden_size * 3, _batch_token_num, _tw._hidden_size, &_fone, _p_d_enc_wei[_weight_offset + 2], _AType, _tw._hidden_size * 3, _p_d_q, _BType, _tw._hidden_size, &_fzero, _p_d_qkv_projected, _CType, _tw._hidden_size * 3, _computeType, CUBLAS_GEMM_DEFAULT_TENSOR_OP)); #ifdef DEBUG_RESULT if (_layer_id == 0) { print_vec(_p_d_enc_wei[_weight_offset + 2], "qkv_weight_mat", _tw._hidden_size * _tw._hidden_size * 3 - 5, _tw._hidden_size * _tw._hidden_size * 3); print_vec(_p_d_qkv_projected, "_p_d_qkv_projected", _batch_token_num * _tw._hidden_size * 3 - 5, _batch_token_num * _tw._hidden_size * 3); } #endif // get q, k, v by split and reshape qkv ker_arrange_encself_qkv_launcher<_DataType>( _batch_token_num, _tw._hidden_size, _stream, _p_d_qkv_projected, _p_d_enc_wei[_weight_offset + 3], _p_d_q, _max_batch_dim, _batch_seq_len, _tw._dim_per_head, _tw._head_num, _max_thread_per_block); if (cache) { cudaStream_t stream; if (_batch_token_num > 360) { stream = _cache_stream; CHECK_GPU_ERROR(cudaStreamSynchronize(_stream)); } else { stream = _stream; } CHECK_GPU_ERROR( cudaMemcpyAsync(_p_d_k_cache + _layer_id * _max_batch_dim, _p_d_k, _batch_token_num * _tw._hidden_size * sizeof(_DataType), cudaMemcpyDeviceToDevice, stream)); CHECK_GPU_ERROR( cudaMemcpyAsync(_p_d_v_cache + _layer_id * _max_batch_dim, _p_d_v, _batch_token_num * _tw._hidden_size * sizeof(_DataType), cudaMemcpyDeviceToDevice, stream)); } #ifdef DEBUG_RESULT if (_layer_id == 0) { print_vec(_p_d_q, "_p_d_q", _batch_token_num * _tw._hidden_size - 5, _batch_token_num * _tw._hidden_size); print_vec(_p_d_k, "_p_d_k", _batch_token_num * _tw._hidden_size - 5, _batch_token_num * _tw._hidden_size); print_vec(_p_d_v, "_p_d_v", _batch_token_num * _tw._hidden_size - 5, _batch_token_num * _tw._hidden_size); } #endif /* ---step 2. correlation = q * k, perform softmax on correlation--- */ CHECK_GPU_ERROR(cublasGemmStridedBatchedEx( _hd, CUBLAS_OP_T, CUBLAS_OP_N, _batch_seq_len, _batch_seq_len, _tw._dim_per_head, &_atten_scaler, _p_d_k, _AType, _tw._dim_per_head, _batch_seq_len * _tw._dim_per_head, _p_d_q, _BType, _tw._dim_per_head, _batch_seq_len * _tw._dim_per_head, &_fzero, _p_d_c, _CType, _batch_seq_len, _batch_seq_len * _batch_seq_len, _batch_size * _tw._head_num, _computeType, CUBLAS_GEMM_DEFAULT_TENSOR_OP)); #ifdef DEBUG_RESULT if (_layer_id == 0) { print_vec(_p_d_c, "q*k", _batch_token_num * _batch_seq_len * _tw._head_num - 5, _batch_token_num * _batch_seq_len * _tw._head_num); } #endif ker_correlation_softmax_gpt_launcher<_DataType>(_batch_size, _batch_seq_len, _tw._head_num, _stream, _p_d_c, _p_d_real_seq_len); #ifdef DEBUG_RESULT if (_layer_id == 0) { print_vec(_p_d_c, "mask weights", _batch_token_num * _batch_seq_len * _tw._head_num - 5, _batch_token_num * _batch_seq_len * _tw._head_num); } #endif /* ---step 3. new_q = correlation * v--- */ CHECK_GPU_ERROR(cublasGemmStridedBatchedEx( _hd, CUBLAS_OP_N, CUBLAS_OP_N, _tw._dim_per_head, _batch_seq_len, _batch_seq_len, &_fone, _p_d_v, _AType, _tw._dim_per_head, _batch_seq_len * _tw._dim_per_head, _p_d_c, _BType, _batch_seq_len, _batch_seq_len * _batch_seq_len, &_fzero, _p_d_q, _CType, _tw._dim_per_head, _batch_seq_len * _tw._dim_per_head, _batch_size * _tw._head_num, _computeType, CUBLAS_GEMM_DEFAULT_TENSOR_OP)); #ifdef DEBUG_RESULT if (_layer_id == 0) { print_vec(_p_d_q, "value after attention", _batch_token_num * _tw._hidden_size - 5, _batch_token_num * _tw._hidden_size); } #endif // use v to save reshaped q, since they are in same size and v // will not be use again before the next multi-head-attention ker_arrange_atten_output_launcher<_DataType>( _batch_token_num, _tw._hidden_size, _stream, _p_d_q, _p_d_v, _batch_seq_len, _tw._dim_per_head, _tw._head_num, _max_thread_per_block); #ifdef DEBUG_RESULT if (_layer_id == 0) { print_vec(_p_d_v, "reshaped value after attention", 0, 5); print_vec(_p_d_query, "attention input with output bias", 0, 5); } #endif /* ---step 4. new_q = ori_q + new_q * output_wei--- */ CHECK_GPU_ERROR(cublasGemmEx( _hd, CUBLAS_OP_N, CUBLAS_OP_N, _tw._hidden_size, _batch_token_num, _tw._hidden_size, &_fone, _p_d_enc_wei[_weight_offset + 4], _AType, _tw._hidden_size, _p_d_v, _BType, _tw._hidden_size, &_fone, _p_d_query, _CType, _tw._hidden_size, _computeType, CUBLAS_GEMM_DEFAULT_TENSOR_OP)); #ifdef DEBUG_RESULT if (_layer_id == 0) { print_vec(_p_d_enc_wei[_weight_offset + 4], "attn out kernel", 0, 5); print_vec(_p_d_query, "attention output", 0, 5); } #endif return; } template <OperationType OpType_> void GptEncoder<OpType_>::self_attention_with_cache() { _DataType *_p_d_k_cache_cur_layer = _p_d_k_cache + _layer_id * _max_batch_dim; _DataType *_p_d_v_cache_cur_layer = _p_d_v_cache + _layer_id * _max_batch_dim; #ifdef DEBUG_RESULT if (_layer_id == 0) { print_vec(_p_d_k_cache_cur_layer, "_p_d_k_cache_cur_layer", _batch_size * (_batch_seq_len - 1) * _tw._hidden_size - 5, _batch_size * (_batch_seq_len - 1) * _tw._hidden_size); print_vec(_p_d_v_cache_cur_layer, "_p_d_v_cache_cur_layer", _batch_size * (_batch_seq_len - 1) * _tw._hidden_size - 5, _batch_size * (_batch_seq_len - 1) * _tw._hidden_size); } #endif /* ---step 0. layer_norm, add output_bias to "query"--- */ ker_norm_layer_resual_launcher<_DataType>( _batch_size, _tw._hidden_size, _stream, _p_d_query, _p_d_q, _p_d_enc_wei[_weight_offset], _p_d_enc_wei[_weight_offset + 1], _p_d_enc_wei[_weight_offset + 5], _max_thread_per_block); #ifdef DEBUG_RESULT if (_layer_id == 0) { print_vec(_p_d_query, "input with bias", _batch_size * _tw._hidden_size - 5, _batch_size * _tw._hidden_size); print_vec(_p_d_q, "first ln output", _batch_size * _tw._hidden_size - 5, _batch_size * _tw._hidden_size); } #endif /* ---step 1. qkv = ori_q * qkv_wei + bias, and reshape qkv for multi-head * gemm--- */ CHECK_GPU_ERROR(cublasGemmEx( _hd, CUBLAS_OP_N, CUBLAS_OP_N, _tw._hidden_size * 3, _batch_size, _tw._hidden_size, &_fone, _p_d_enc_wei[_weight_offset + 2], _AType, _tw._hidden_size * 3, _p_d_q, _BType, _tw._hidden_size, &_fzero, _p_d_qkv_projected, _CType, _tw._hidden_size * 3, _computeType, CUBLAS_GEMM_DEFAULT_TENSOR_OP)); #ifdef DEBUG_RESULT if (_layer_id == 0) { print_vec(_p_d_qkv_projected, "_p_d_qkv_projected", _batch_size * _tw._hidden_size * 3 - 5, _batch_size * _tw._hidden_size * 3); } #endif // get q, k, v by split and reshape qkv ker_arrange_qkv_with_cache_launcher<_DataType>( _batch_token_num, _tw._hidden_size, _stream, _p_d_qkv_projected, _p_d_enc_wei[_weight_offset + 3], _p_d_q, _p_d_k, _p_d_k_cache_cur_layer, _p_d_v, _p_d_v_cache_cur_layer, _max_batch_dim, _batch_seq_len, _tw._dim_per_head, _tw._head_num); // copy new k and v to cache cudaStream_t stream; if (_batch_token_num > 360) { stream = _cache_stream; CHECK_GPU_ERROR(cudaStreamSynchronize(_stream)); } else { stream = _stream; } CHECK_GPU_ERROR( cudaMemcpyAsync(_p_d_k_cache_cur_layer, _p_d_k, _batch_token_num * _tw._hidden_size * sizeof(_DataType), cudaMemcpyDeviceToDevice, stream)); CHECK_GPU_ERROR( cudaMemcpyAsync(_p_d_v_cache_cur_layer, _p_d_v, _batch_token_num * _tw._hidden_size * sizeof(_DataType), cudaMemcpyDeviceToDevice, stream)); #ifdef DEBUG_RESULT if (_layer_id == 0) { print_vec(_p_d_q, "_p_d_q", _batch_size * _tw._hidden_size - 5, _batch_size * _tw._hidden_size); print_vec(_p_d_k, "_p_d_k", _batch_token_num * _tw._hidden_size - 5, _batch_token_num * _tw._hidden_size); print_vec(_p_d_v, "_p_d_v", _batch_token_num * _tw._hidden_size - 5, _batch_token_num * _tw._hidden_size); } #endif /* ---step 2. correlation = q * k, perform softmax on correlation correlation: [batch_size, heads_num, 1, batch_seq_len]--- */ CHECK_GPU_ERROR(cublasGemmStridedBatchedEx( _hd, CUBLAS_OP_T, CUBLAS_OP_N, _batch_seq_len, 1, _tw._dim_per_head, &_atten_scaler, _p_d_k, _AType, _tw._dim_per_head, _batch_seq_len * _tw._dim_per_head, _p_d_q, _BType, _tw._dim_per_head, _tw._dim_per_head, &_fzero, _p_d_c, _CType, _batch_seq_len, _batch_seq_len, _batch_size * _tw._head_num, _computeType, CUBLAS_GEMM_DEFAULT_TENSOR_OP)); #ifdef DEBUG_RESULT if (_layer_id == 0) { print_vec(_p_d_c, "q*k", _batch_size * _batch_seq_len * _tw._head_num - 5, _batch_size * _batch_seq_len * _tw._head_num); } #endif ker_attention_mask_weights_launcher<_DataType>(_batch_size, 1, _batch_seq_len, _tw._head_num, _stream, _p_d_c, _p_d_real_seq_len); #ifdef DEBUG_RESULT if (_layer_id == 0) { print_vec(_p_d_c, "mask weights", _batch_size * _batch_seq_len * _tw._head_num - 5, _batch_size * _batch_seq_len * _tw._head_num); } #endif /* ---step 3. new_q = correlation * v--- */ CHECK_GPU_ERROR(cublasGemmStridedBatchedEx( _hd, CUBLAS_OP_N, CUBLAS_OP_N, _tw._dim_per_head, 1, _batch_seq_len, &_fone, _p_d_v, _AType, _tw._dim_per_head, _batch_seq_len * _tw._dim_per_head, _p_d_c, _BType, _batch_seq_len, _batch_seq_len, &_fzero, _p_d_q, _CType, _tw._dim_per_head, _tw._dim_per_head, _batch_size * _tw._head_num, _computeType, CUBLAS_GEMM_DEFAULT_TENSOR_OP)); #ifdef DEBUG_RESULT if (_layer_id == 0) { print_vec(_p_d_q, "value after attention", _batch_size * _tw._hidden_size - 5, _batch_size * _tw._hidden_size); } #endif // use v to save reshaped q, since they are in same size and v // will not be use again before the next multi-head-attention ker_arrange_atten_output_launcher<_DataType>( _batch_size, _tw._hidden_size, _stream, _p_d_q, _p_d_v, 1, _tw._dim_per_head, _tw._head_num, _max_thread_per_block); #ifdef DEBUG_RESULT if (_layer_id == 0) { print_vec(_p_d_v, "reshaped value after attention", 0, 5); print_vec(_p_d_query, "attention input with output bias", 0, 5); } #endif /* ---step 4. new_q = ori_q + new_q * output_wei--- */ CHECK_GPU_ERROR(cublasGemmEx( _hd, CUBLAS_OP_N, CUBLAS_OP_N, _tw._hidden_size, _batch_size, _tw._hidden_size, &_fone, _p_d_enc_wei[_weight_offset + 4], _AType, _tw._hidden_size, _p_d_v, _BType, _tw._hidden_size, &_fone, _p_d_query, _CType, _tw._hidden_size, _computeType, CUBLAS_GEMM_DEFAULT_TENSOR_OP)); #ifdef DEBUG_RESULT if (_layer_id == 0) { print_vec(_p_d_enc_wei[_weight_offset + 4], "attn out kernel", 0, 5); print_vec(_p_d_query, "attention output", 0, 5); } #endif return; } template <OperationType OpType_> void GptEncoder<OpType_>::ffn_add_norm() { /* ---step 0. layer_norm, add output_bias to "query"--- */ ker_norm_layer_resual_launcher<_DataType>( _batch_token_num, _tw._hidden_size, _stream, _p_d_query, _p_d_ffn_buf1, _p_d_enc_wei[_weight_offset + 6], _p_d_enc_wei[_weight_offset + 7], _p_d_enc_wei[_weight_offset + 11], _max_thread_per_block); /* ---step 1. first ffn layer--- */ CHECK_GPU_ERROR(cublasGemmEx( _hd, CUBLAS_OP_N, CUBLAS_OP_N, _tw._inner_size, _batch_token_num, _tw._hidden_size, &_fone, _p_d_enc_wei[_weight_offset + 8], _AType, _tw._inner_size, _p_d_ffn_buf1, _BType, _tw._hidden_size, &_fzero, _p_d_ffn_buf2, _CType, _tw._inner_size, _computeType, CUBLAS_GEMM_DEFAULT_TENSOR_OP)); ker_bias_gelu_launcher<_DataType>( _batch_token_num, _max_thread_per_block, _stream, _p_d_ffn_buf2, _p_d_enc_wei[_weight_offset + 9], _tw._inner_size); /* ---step 2. second ffn layer--- */ CHECK_GPU_ERROR(cublasGemmEx( _hd, CUBLAS_OP_N, CUBLAS_OP_N, _tw._hidden_size, _batch_token_num, _tw._inner_size, &_fone, _p_d_enc_wei[_weight_offset + 10], _AType, _tw._hidden_size, _p_d_ffn_buf2, _BType, _tw._inner_size, &_fone, _p_d_query, _CType, _tw._hidden_size, _computeType, CUBLAS_GEMM_DEFAULT_TENSOR_OP)); return; } template <OperationType OpType_> void GptEncoder<OpType_>::ffn_add_norm_with_cache() { /* ---step 0. layer_norm, add output_bias to "query"--- */ ker_norm_layer_resual_launcher<_DataType>( _batch_size, _tw._hidden_size, _stream, _p_d_query, _p_d_ffn_buf1, _p_d_enc_wei[_weight_offset + 6], _p_d_enc_wei[_weight_offset + 7], _p_d_enc_wei[_weight_offset + 11], _max_thread_per_block); /* ---step 1. first ffn layer--- */ CHECK_GPU_ERROR(cublasGemmEx( _hd, CUBLAS_OP_N, CUBLAS_OP_N, _tw._inner_size, _batch_size, _tw._hidden_size, &_fone, _p_d_enc_wei[_weight_offset + 8], _AType, _tw._inner_size, _p_d_ffn_buf1, _BType, _tw._hidden_size, &_fzero, _p_d_ffn_buf2, _CType, _tw._inner_size, _computeType, CUBLAS_GEMM_DEFAULT_TENSOR_OP)); ker_bias_gelu_launcher<_DataType>( _batch_size, _max_thread_per_block, _stream, _p_d_ffn_buf2, _p_d_enc_wei[_weight_offset + 9], _tw._inner_size); /* ---step 2. second ffn layer--- */ CHECK_GPU_ERROR(cublasGemmEx( _hd, CUBLAS_OP_N, CUBLAS_OP_N, _tw._hidden_size, _batch_size, _tw._inner_size, &_fone, _p_d_enc_wei[_weight_offset + 10], _AType, _tw._hidden_size, _p_d_ffn_buf2, _BType, _tw._inner_size, &_fone, _p_d_query, _CType, _tw._hidden_size, _computeType, CUBLAS_GEMM_DEFAULT_TENSOR_OP)); return; } /** Compute ppl from encoder output */ template <OperationType OpType_> void GptEncoder<OpType_>::compute_ppl() { /* ---step 1. project hidden states to vocab logits--- */ CHECK_GPU_ERROR(cublasGemmEx( _hd, CUBLAS_OP_T, CUBLAS_OP_N, _tw._src_vocab_size, _batch_token_num, _tw._hidden_size, &_fone, _p_d_src_emb_wei[0], _AType, _tw._hidden_size, _p_d_query, _BType, _tw._hidden_size, &_fzero, _p_d_logit, _CType, _tw._src_vocab_size, _computeType, CUBLAS_GEMM_DEFAULT_TENSOR_OP)); #ifdef DEBUG_RESULT print_vec(_p_d_logit, "logits", _batch_token_num * _tw._src_vocab_size - 5, _batch_token_num * _tw._src_vocab_size); #endif /* ---step 2. compute language model ppl--- */ ker_ppl_launcher<_DataType>( _batch_size, _batch_seq_len, _max_thread_per_block, _stream, _p_d_logit, _p_d_token_id, _p_d_real_seq_len, _p_d_ppl, _tw._src_vocab_size); } template class GptEncoder<OperationType::FP16>; template class GptEncoder<OperationType::FP32>; } // namespace cuda } // namespace lightseq
60d14faa80f5a2f16c0cc30c74cc351cc282f8dd.hip
// !!! This is a file automatically generated by hipify!!! #include "profiler.h" #include <stdio.h> #include <string> #include <stdexcept> //call once to initialize the void CudaProfiler::initStack(const char * profile_filename) { fProfileLog = fopen(profile_filename, "w"); fprintf(fProfileLog, "day,stack_depth,function_name,problem_size,inclusive_time_milliseconds,exclusive_time_milliseconds\n"); fFunctionCalls = NULL; stack_depth = -1; for(int i = 0; i < MAX_STACK_DEPTH; i++) { hipEventCreate(&profile_eventStack_start[i]); hipEventCreate(&profile_eventStack_end[i]); profile_timeInChildren[i] = 0.0; } } //call when you begin a function - pushes a new timer event onto stack void CudaProfiler::beginFunction(int current_day, const char * function_name) { //check whether the stack is full if(stack_depth == MAX_STACK_DEPTH - 1) { fprintf(fProfileLog,"Error: profiler stack overflowed!\n"); fflush(fProfileLog); exit(1); } stack_depth++; profile_timeInChildren[stack_depth] = 0.0; profile_functionName[stack_depth] = function_name; if(fFunctionCalls != NULL){ fprintf(fFunctionCalls, "%d,%d,beginning %s\n",current_day, stack_depth, function_name); fflush(fFunctionCalls); } hipEventRecord(profile_eventStack_start[stack_depth]); } //call when you end a function - pops a timer event and pushes the inclusive time upwards void CudaProfiler::endFunction(int current_day, int problem_size) { //record the event and synchronize hipEventRecord(profile_eventStack_end[stack_depth]); hipEventSynchronize(profile_eventStack_end[stack_depth]); //get the elapsed time between the events float inclusive_milliseconds; hipEventElapsedTime( &inclusive_milliseconds, profile_eventStack_start[stack_depth], profile_eventStack_end[stack_depth]); //if this is the child of another function, add this time to the parent's child counter if(stack_depth > 0) profile_timeInChildren[stack_depth - 1] += inclusive_milliseconds; //if this function called any children, their time will be subtracted from this function's total time double exclusive_milliseconds = inclusive_milliseconds - profile_timeInChildren[stack_depth]; //write log fprintf(fProfileLog, "%d, %d, %s, %d, %f, %lf\n", current_day, stack_depth, profile_functionName[stack_depth], problem_size, inclusive_milliseconds, exclusive_milliseconds); fflush(fProfileLog); if(fFunctionCalls != NULL) { fprintf(fFunctionCalls, "%d,%d,ending %s\n", current_day, stack_depth, profile_functionName[stack_depth]); fflush(fFunctionCalls); } //rotate to parent stack_depth--; } void CudaProfiler::done() { //close streams fclose(fProfileLog); if(fFunctionCalls != NULL) fclose(fFunctionCalls); //destroy event objects for(int i = 0; i < MAX_STACK_DEPTH; i++) { hipEventDestroy(profile_eventStack_start[i]); hipEventDestroy(profile_eventStack_end[i]); } } void CudaProfiler::dailyFlush() { fflush(fProfileLog); if(stack_depth > 0) { printf("ERROR: missed profile_end_function call somewhere\n"); exit(1); } if(stack_depth < 0) { printf("ERROR: too many profile_end_function calls somewhere\n"); exit(1); } } const char * CudaProfiler::getCurrentFuncName() { if(stack_depth >= 0 && stack_depth < MAX_STACK_DEPTH) return profile_functionName[stack_depth]; else throw std::runtime_error(std::string("No function on stack")); }
60d14faa80f5a2f16c0cc30c74cc351cc282f8dd.cu
#include "profiler.h" #include <stdio.h> #include <string> #include <stdexcept> //call once to initialize the void CudaProfiler::initStack(const char * profile_filename) { fProfileLog = fopen(profile_filename, "w"); fprintf(fProfileLog, "day,stack_depth,function_name,problem_size,inclusive_time_milliseconds,exclusive_time_milliseconds\n"); fFunctionCalls = NULL; stack_depth = -1; for(int i = 0; i < MAX_STACK_DEPTH; i++) { cudaEventCreate(&profile_eventStack_start[i]); cudaEventCreate(&profile_eventStack_end[i]); profile_timeInChildren[i] = 0.0; } } //call when you begin a function - pushes a new timer event onto stack void CudaProfiler::beginFunction(int current_day, const char * function_name) { //check whether the stack is full if(stack_depth == MAX_STACK_DEPTH - 1) { fprintf(fProfileLog,"Error: profiler stack overflowed!\n"); fflush(fProfileLog); exit(1); } stack_depth++; profile_timeInChildren[stack_depth] = 0.0; profile_functionName[stack_depth] = function_name; if(fFunctionCalls != NULL){ fprintf(fFunctionCalls, "%d,%d,beginning %s\n",current_day, stack_depth, function_name); fflush(fFunctionCalls); } cudaEventRecord(profile_eventStack_start[stack_depth]); } //call when you end a function - pops a timer event and pushes the inclusive time upwards void CudaProfiler::endFunction(int current_day, int problem_size) { //record the event and synchronize cudaEventRecord(profile_eventStack_end[stack_depth]); cudaEventSynchronize(profile_eventStack_end[stack_depth]); //get the elapsed time between the events float inclusive_milliseconds; cudaEventElapsedTime( &inclusive_milliseconds, profile_eventStack_start[stack_depth], profile_eventStack_end[stack_depth]); //if this is the child of another function, add this time to the parent's child counter if(stack_depth > 0) profile_timeInChildren[stack_depth - 1] += inclusive_milliseconds; //if this function called any children, their time will be subtracted from this function's total time double exclusive_milliseconds = inclusive_milliseconds - profile_timeInChildren[stack_depth]; //write log fprintf(fProfileLog, "%d, %d, %s, %d, %f, %lf\n", current_day, stack_depth, profile_functionName[stack_depth], problem_size, inclusive_milliseconds, exclusive_milliseconds); fflush(fProfileLog); if(fFunctionCalls != NULL) { fprintf(fFunctionCalls, "%d,%d,ending %s\n", current_day, stack_depth, profile_functionName[stack_depth]); fflush(fFunctionCalls); } //rotate to parent stack_depth--; } void CudaProfiler::done() { //close streams fclose(fProfileLog); if(fFunctionCalls != NULL) fclose(fFunctionCalls); //destroy event objects for(int i = 0; i < MAX_STACK_DEPTH; i++) { cudaEventDestroy(profile_eventStack_start[i]); cudaEventDestroy(profile_eventStack_end[i]); } } void CudaProfiler::dailyFlush() { fflush(fProfileLog); if(stack_depth > 0) { printf("ERROR: missed profile_end_function call somewhere\n"); exit(1); } if(stack_depth < 0) { printf("ERROR: too many profile_end_function calls somewhere\n"); exit(1); } } const char * CudaProfiler::getCurrentFuncName() { if(stack_depth >= 0 && stack_depth < MAX_STACK_DEPTH) return profile_functionName[stack_depth]; else throw std::runtime_error(std::string("No function on stack")); }
146be6d44bf8fcb8877caf519dacf8d4acbce4e4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void sum(int *a, int *b, int *c) { int i = blockIdx.x * blockDim.x + threadIdx.x; c[i] = a[i] + b[i]; }
146be6d44bf8fcb8877caf519dacf8d4acbce4e4.cu
#include "includes.h" __global__ void sum(int *a, int *b, int *c) { int i = blockIdx.x * blockDim.x + threadIdx.x; c[i] = a[i] + b[i]; }
fc5edf8a68ce4e501876e9eb2ec68d66891f898a.hip
// !!! This is a file automatically generated by hipify!!! /****************************************************************************** * * (C) Copyright 2014 The Board of Trustees of the * Florida Institute of Technology * All Rights Reserved * * Lab Image Filters ******************************************************************************/ #include "ImageFilters.h" //CUDA #include <helper_cuda.h> #include <helper_functions.h> #include <device_launch_parameters.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <helper_string.h> #include <hip/hip_texture_types.h> #include <texture_fetch_functions.h> /*TEXTURES*/ texture<uchar4, 2, hipReadModeNormalizedFloat> texImage_rgb; hipChannelFormatDesc uchar4tex = hipCreateChannelDesc<uchar4>(); hipArray *cu_image; extern "C" void CUDA_CreateMemoryArray(int imageW,int imageH){ hipMallocArray(&cu_image, &uchar4tex, imageW, imageH); } extern "C" void CUDA_BindTextureToArray(){ hipBindTextureToArray(texImage_rgb,cu_image); } extern "C" void CUDA_FreeArrays(){ hipFreeArray(cu_image); } extern "C" void CUDA_MemcpyToArray(uchar4 *src,int imageW,int imageH){ hipMemcpyToArray( cu_image, 0, 0,src, imageW * imageH * sizeof(uchar4), hipMemcpyDeviceToDevice); } /*************************************** Box Filter *****************************************/ __constant__ float MeanKernel[9] = {1,1,1, 1,1,1, 1,1,1}; __constant__ float MeanKernel_55[25] = { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 }; __constant__ float MeanKernel_77[49] = { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 }; /*************************************** Mean Filter Kernel Function *****************************************/ __global__ void MeanFilter(uchar4 *Image_dev, int w, int h){ const int ix = blockDim.x * blockIdx.x + threadIdx.x; const int iy = blockDim.y * blockIdx.y + threadIdx.y; const float x=(float)ix+0.5f; const float y=(float)iy+0.5f; int win_W=1; //int win_W = 2; //int win_W = 3; if(ix < w && iy < h){ float4 pixval; float3 sum; sum.x=0.0f; sum.y=0.0f; sum.z=0.0f; int k=0; for(int ii=-win_W;ii<=win_W;ii++){ for(int jj=-win_W;jj<=win_W;jj++){ pixval=tex2D(texImage_rgb,x+ii,y+jj); sum.x += pixval.x*MeanKernel[k]; sum.y += pixval.y*MeanKernel[k]; sum.z += pixval.z*MeanKernel[k]; //sum.x += pixval.x*MeanKernel_55[k]; //sum.y += pixval.y*MeanKernel_55[k]; //sum.z += pixval.z*MeanKernel_55[k]; //sum.x += pixval.x*MeanKernel_77[k]; //sum.y += pixval.y*MeanKernel_77[k]; //sum.z += pixval.z*MeanKernel_77[k]; k++; } } Image_dev[w*iy+ix].x=(unsigned char)((sum.x/9)*255); Image_dev[w*iy+ix].y=(unsigned char)((sum.y/9)*255); Image_dev[w*iy+ix].z=(unsigned char)((sum.z/9)*255); //Image_dev[w*iy + ix].x = (unsigned char)((sum.x / 25) * 255); //Image_dev[w*iy + ix].y = (unsigned char)((sum.y / 25) * 255); //Image_dev[w*iy + ix].z = (unsigned char)((sum.z / 25) * 255); //Image_dev[w*iy + ix].x = (unsigned char)((sum.x / 49) * 255); //Image_dev[w*iy + ix].y = (unsigned char)((sum.y / 49) * 255); //Image_dev[w*iy + ix].z = (unsigned char)((sum.z / 49) * 255); } } /*************************************** Mean Filter Calling Function *****************************************/ extern "C" void CUDA_MeanFilter(uchar4 *Image_dev,int imageW,int imageH,dim3 grid,dim3 threads){ hipLaunchKernelGGL(( MeanFilter), dim3(grid),dim3(threads), 0, 0, Image_dev,imageW,imageH); }
fc5edf8a68ce4e501876e9eb2ec68d66891f898a.cu
/****************************************************************************** * * (C) Copyright 2014 The Board of Trustees of the * Florida Institute of Technology * All Rights Reserved * * Lab Image Filters ******************************************************************************/ #include "ImageFilters.h" //CUDA #include <helper_cuda.h> #include <helper_functions.h> #include <device_launch_parameters.h> #include <cuda_runtime.h> #include <cuda.h> #include <helper_string.h> #include <cuda_texture_types.h> #include <texture_fetch_functions.h> /*TEXTURES*/ texture<uchar4, 2, cudaReadModeNormalizedFloat> texImage_rgb; cudaChannelFormatDesc uchar4tex = cudaCreateChannelDesc<uchar4>(); cudaArray *cu_image; extern "C" void CUDA_CreateMemoryArray(int imageW,int imageH){ cudaMallocArray(&cu_image, &uchar4tex, imageW, imageH); } extern "C" void CUDA_BindTextureToArray(){ cudaBindTextureToArray(texImage_rgb,cu_image); } extern "C" void CUDA_FreeArrays(){ cudaFreeArray(cu_image); } extern "C" void CUDA_MemcpyToArray(uchar4 *src,int imageW,int imageH){ cudaMemcpyToArray( cu_image, 0, 0,src, imageW * imageH * sizeof(uchar4), cudaMemcpyDeviceToDevice); } /*************************************** Box Filter *****************************************/ __constant__ float MeanKernel[9] = {1,1,1, 1,1,1, 1,1,1}; __constant__ float MeanKernel_55[25] = { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 }; __constant__ float MeanKernel_77[49] = { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 }; /*************************************** Mean Filter Kernel Function *****************************************/ __global__ void MeanFilter(uchar4 *Image_dev, int w, int h){ const int ix = blockDim.x * blockIdx.x + threadIdx.x; const int iy = blockDim.y * blockIdx.y + threadIdx.y; const float x=(float)ix+0.5f; const float y=(float)iy+0.5f; int win_W=1; //int win_W = 2; //int win_W = 3; if(ix < w && iy < h){ float4 pixval; float3 sum; sum.x=0.0f; sum.y=0.0f; sum.z=0.0f; int k=0; for(int ii=-win_W;ii<=win_W;ii++){ for(int jj=-win_W;jj<=win_W;jj++){ pixval=tex2D(texImage_rgb,x+ii,y+jj); sum.x += pixval.x*MeanKernel[k]; sum.y += pixval.y*MeanKernel[k]; sum.z += pixval.z*MeanKernel[k]; //sum.x += pixval.x*MeanKernel_55[k]; //sum.y += pixval.y*MeanKernel_55[k]; //sum.z += pixval.z*MeanKernel_55[k]; //sum.x += pixval.x*MeanKernel_77[k]; //sum.y += pixval.y*MeanKernel_77[k]; //sum.z += pixval.z*MeanKernel_77[k]; k++; } } Image_dev[w*iy+ix].x=(unsigned char)((sum.x/9)*255); Image_dev[w*iy+ix].y=(unsigned char)((sum.y/9)*255); Image_dev[w*iy+ix].z=(unsigned char)((sum.z/9)*255); //Image_dev[w*iy + ix].x = (unsigned char)((sum.x / 25) * 255); //Image_dev[w*iy + ix].y = (unsigned char)((sum.y / 25) * 255); //Image_dev[w*iy + ix].z = (unsigned char)((sum.z / 25) * 255); //Image_dev[w*iy + ix].x = (unsigned char)((sum.x / 49) * 255); //Image_dev[w*iy + ix].y = (unsigned char)((sum.y / 49) * 255); //Image_dev[w*iy + ix].z = (unsigned char)((sum.z / 49) * 255); } } /*************************************** Mean Filter Calling Function *****************************************/ extern "C" void CUDA_MeanFilter(uchar4 *Image_dev,int imageW,int imageH,dim3 grid,dim3 threads){ MeanFilter<<<grid,threads>>>(Image_dev,imageW,imageH); }
7888d7429c861dc500a3add62fd8f472f5784d76.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "../bm_config.hpp" __global__ void raw1f_copy_kernel(float* p_src, float* p_dst, int_t size) { for (int_t i = threadIdx.x + blockIdx.x * blockDim.x; i < size; i += blockDim.x * gridDim.x) { p_dst[i] = p_src[i]; } } void bm_cuda_raw1f_for_copy(benchmark::State& state) { int ts_size = state.range(0); cuda::tensor<float, 1> cts_src(ts_size); cuda::tensor<float, 1> cts_dst(ts_size); cuda::for_index_execution_policy policy; policy.total_size(cts_src.size()); cuda::configure_grid(policy, raw1f_copy_kernel); while (state.KeepRunning()) { hipLaunchKernelGGL(( raw1f_copy_kernel), dim3(policy.grid_dim()[0]), dim3(policy.block_dim()[0]), policy.shared_mem_bytes(), policy.stream(), cts_src.data(), cts_dst.data(), cts_src.size()); hipDeviceSynchronize(); benchmark::DoNotOptimize(cts_dst.data()); } state.SetBytesProcessed(state.iterations() * static_cast<size_t>(cts_src.size()) * sizeof(cts_src[0])); state.SetItemsProcessed(state.iterations() * static_cast<size_t>(cts_src.size())); } BENCHMARK(bm_cuda_raw1f_for_copy)->Arg(1_G); template <typename tensor_type> inline void bm_tensor_for_array_index_copy(benchmark::State& state) { int ts_size = state.range(0); constexpr int_t rank = tensor_type::rank; pointi<rank> shape; fill(shape, ts_size); tensor_type ts_src(shape); tensor_type ts_dst(shape); while (state.KeepRunning()) { cuda::for_index(shape, [ts_src, ts_dst] MATAZURE_GENERAL(pointi<rank> idx) { ts_dst(idx) = ts_src(idx); }); // cuda::copy(ts_src, ts_dst); benchmark::DoNotOptimize(ts_dst.data()); } state.SetBytesProcessed(state.iterations() * static_cast<size_t>(ts_src.size()) * sizeof(ts_src[0])); state.SetItemsProcessed(state.iterations() * static_cast<size_t>(ts_src.size())); } auto bm_cuda_tensor1f_for_array_index_copy = bm_tensor_for_array_index_copy<cuda::tensor<float, 1>>; auto bm_cuda_tensor2f_for_array_index_copy = bm_tensor_for_array_index_copy<cuda::tensor<float, 2>>; BENCHMARK(bm_cuda_tensor1f_for_array_index_copy)->Arg(1_G); // cuda 2% BENCHMARK(bm_cuda_tensor2f_for_array_index_copy)->Arg(10_K);
7888d7429c861dc500a3add62fd8f472f5784d76.cu
#include "../bm_config.hpp" __global__ void raw1f_copy_kernel(float* p_src, float* p_dst, int_t size) { for (int_t i = threadIdx.x + blockIdx.x * blockDim.x; i < size; i += blockDim.x * gridDim.x) { p_dst[i] = p_src[i]; } } void bm_cuda_raw1f_for_copy(benchmark::State& state) { int ts_size = state.range(0); cuda::tensor<float, 1> cts_src(ts_size); cuda::tensor<float, 1> cts_dst(ts_size); cuda::for_index_execution_policy policy; policy.total_size(cts_src.size()); cuda::configure_grid(policy, raw1f_copy_kernel); while (state.KeepRunning()) { raw1f_copy_kernel<<<policy.grid_dim()[0], policy.block_dim()[0], policy.shared_mem_bytes(), policy.stream()>>>(cts_src.data(), cts_dst.data(), cts_src.size()); cudaDeviceSynchronize(); benchmark::DoNotOptimize(cts_dst.data()); } state.SetBytesProcessed(state.iterations() * static_cast<size_t>(cts_src.size()) * sizeof(cts_src[0])); state.SetItemsProcessed(state.iterations() * static_cast<size_t>(cts_src.size())); } BENCHMARK(bm_cuda_raw1f_for_copy)->Arg(1_G); template <typename tensor_type> inline void bm_tensor_for_array_index_copy(benchmark::State& state) { int ts_size = state.range(0); constexpr int_t rank = tensor_type::rank; pointi<rank> shape; fill(shape, ts_size); tensor_type ts_src(shape); tensor_type ts_dst(shape); while (state.KeepRunning()) { cuda::for_index(shape, [ts_src, ts_dst] MATAZURE_GENERAL(pointi<rank> idx) { ts_dst(idx) = ts_src(idx); }); // cuda::copy(ts_src, ts_dst); benchmark::DoNotOptimize(ts_dst.data()); } state.SetBytesProcessed(state.iterations() * static_cast<size_t>(ts_src.size()) * sizeof(ts_src[0])); state.SetItemsProcessed(state.iterations() * static_cast<size_t>(ts_src.size())); } auto bm_cuda_tensor1f_for_array_index_copy = bm_tensor_for_array_index_copy<cuda::tensor<float, 1>>; auto bm_cuda_tensor2f_for_array_index_copy = bm_tensor_for_array_index_copy<cuda::tensor<float, 2>>; BENCHMARK(bm_cuda_tensor1f_for_array_index_copy)->Arg(1_G); // cuda中二维的坐标是会耗时更多, 大概有2%的损耗,说明编译器无法将数组访问形式的代码优化点 BENCHMARK(bm_cuda_tensor2f_for_array_index_copy)->Arg(10_K);
b7916a7e40fe6d25dfe4c159c46f067d1df46c62.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void sync_gdn_groups() { }
b7916a7e40fe6d25dfe4c159c46f067d1df46c62.cu
#include "includes.h" __global__ void sync_gdn_groups() { }
6a5292b37ce89efa3d579c9107f5a98c24416a97.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * @File main.cu * * The main file of the project * * Parallel Computations on GPU (PCG 2020) * Assignment no. 1 (cuda) * Login: xstupi00 */ #include <sys/time.h> #include <cstdio> #include <cmath> #include <vector> #include "nbody.h" #include "h5Helper.h" #define checkCudaErrors(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true) { if (code != hipSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line); if (abort) exit(code); } } /** * Main rotine * @param argc * @param argv * @return */ int main(int argc, char **argv) { // Time measurement struct timeval t1, t2; if (argc != 10) { printf("Usage: nbody <N> <dt> <steps> <threads/block> <write intesity> <reduction threads> <reduction threads/block> <input> <output>\n"); exit(1); } // Number of particles const int N = std::stoi(argv[1]); // Length of time step const float dt = std::stof(argv[2]); // Number of steps const int steps = std::stoi(argv[3]); // Number of thread blocks const int thr_blc = std::stoi(argv[4]); // Write frequency int writeFreq = std::stoi(argv[5]); // number of reduction threads const int red_thr = std::stoi(argv[6]); // Number of reduction threads/blocks const int red_thr_blc = std::stoi(argv[7]); // Size of the simulation CUDA gird - number of blocks const size_t simulationGrid = (N + thr_blc - 1) / thr_blc; // Size of the reduction CUDA grid - number of blocks const size_t reductionGrid = (red_thr + red_thr_blc - 1) / red_thr_blc; // Log benchmark setup printf("N: %d\n", N); printf("dt: %f\n", dt); printf("steps: %d\n", steps); printf("threads/block: %d\n", thr_blc); printf("blocks/grid: %lu\n", simulationGrid); printf("reduction threads/block: %d\n", red_thr_blc); printf("reduction blocks/grid: %lu\n", reductionGrid); // Number of records to continuous writing of partial results const size_t recordsNum = (writeFreq > 0) ? (steps + writeFreq - 1) / writeFreq : 0; writeFreq = (writeFreq > 0) ? writeFreq : 0; // CPU particles structures t_particles particles_cpu; //////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // FILL IN: CPU side memory allocation (step 0) // //////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // The overall memory size of input particles size_t size = N * sizeof(float); // Allocates page-locked memory on the host. Maps the allocation into the CUDA address space checkCudaErrors(hipHostMalloc(&particles_cpu.pos_x, size, hipHostMallocMapped)); checkCudaErrors(hipHostMalloc(&particles_cpu.pos_y, size, hipHostMallocMapped)); checkCudaErrors(hipHostMalloc(&particles_cpu.pos_z, size, hipHostMallocMapped)); checkCudaErrors(hipHostMalloc(&particles_cpu.vel_x, size, hipHostMallocMapped)); checkCudaErrors(hipHostMalloc(&particles_cpu.vel_y, size, hipHostMallocMapped)); checkCudaErrors(hipHostMalloc(&particles_cpu.vel_z, size, hipHostMallocMapped)); checkCudaErrors(hipHostMalloc(&particles_cpu.weight, size, hipHostMallocMapped)); //////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // FILL IN: memory layout descriptor (step 0) // //////////////////////////////////////////////////////////////////////////////////////////////////////////////////// /* * Caution! Create only after CPU side allocation * parameters: * Stride of two Offset of the first * Data pointer consecutive elements element in floats, * in floats, not bytes not bytes */ MemDesc md( particles_cpu.pos_x, 1, 0, // Position in X particles_cpu.pos_y, 1, 0, // Position in Y particles_cpu.pos_z, 1, 0, // Position in Z particles_cpu.vel_x, 1, 0, // Velocity in X particles_cpu.vel_y, 1, 0, // Velocity in Y particles_cpu.vel_z, 1, 0, // Velocity in Z particles_cpu.weight, 1, 0, // Weight N, // Number of particles recordsNum // Number of records in output file ); // Initialisation of helper class and loading of input data H5Helper h5Helper(argv[8], argv[9], md); try { h5Helper.init(); h5Helper.readParticleData(); } catch (const std::exception &e) { std::cerr << e.what() << std::endl; return -1; } //////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // FILL IN: GPU side memory allocation (step 0) // //////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // GPU particles structure std::vector<t_particles> particles_gpu(2); // Allocate memory on the device for (auto &p_gpu : particles_gpu) { checkCudaErrors(hipMalloc(&p_gpu.pos_x, size)); checkCudaErrors(hipMalloc(&p_gpu.pos_y, size)); checkCudaErrors(hipMalloc(&p_gpu.pos_z, size)); checkCudaErrors(hipMalloc(&p_gpu.vel_x, size)); checkCudaErrors(hipMalloc(&p_gpu.vel_y, size)); checkCudaErrors(hipMalloc(&p_gpu.vel_z, size)); checkCudaErrors(hipMalloc(&p_gpu.weight, size)); } //////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // FILL IN: memory transfers (step 0) // //////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // Copies particles data from host to device. checkCudaErrors(hipMemcpy(particles_gpu[0].pos_x, particles_cpu.pos_x, size, hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(particles_gpu[0].pos_y, particles_cpu.pos_y, size, hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(particles_gpu[0].pos_z, particles_cpu.pos_z, size, hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(particles_gpu[0].vel_x, particles_cpu.vel_x, size, hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(particles_gpu[0].vel_y, particles_cpu.vel_y, size, hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(particles_gpu[0].vel_z, particles_cpu.vel_z, size, hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(particles_gpu[0].weight, particles_cpu.weight, size, hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(particles_gpu[1].weight, particles_gpu[0].weight, size, hipMemcpyDeviceToDevice)); // CPU Center of Mass structure - x, y, z, w float4* comCPU; // GPU Center of Mass structure - x, y, z, w float4* comGPU; // Lock to ensure the mutual exclusion for reducing the result to the global memory int* lock; // Allocates page-locked memory on the host checkCudaErrors(hipHostMalloc(&comCPU, sizeof(float4), hipHostMallocMapped)); // Allocate memory on the device checkCudaErrors(hipMalloc(&comGPU, sizeof(float4))); checkCudaErrors(hipMalloc(&lock, sizeof(int))); // Initializes or sets device memory to a zero value checkCudaErrors(hipMemset(comCPU, 0, sizeof(float4))); checkCudaErrors(hipMemset(lock, 0, sizeof(int))); // Compute the size of the shared memory (for one grid block) size_t shm_mem_calc = thr_blc * sizeof(float) * 7; // Computes the size of the shared memory for reduction block size_t shm_mem_mass = (red_thr_blc / 32) * sizeof(float) * 4; // Create CUDA stream to perform effect concurrency hipStream_t cm_stream, cp_stream, mem_stream; // Create an asynchronous stream. checkCudaErrors(hipStreamCreate(&cm_stream)); // Compute Mass stream checkCudaErrors(hipStreamCreate(&cp_stream)); // Compute Particles stream checkCudaErrors(hipStreamCreate(&mem_stream)); // Write particles stream // Create CUDA events to synchronize individual CUDA stream during the computation hipEvent_t cm_event, cp_event; // Creates an event object. checkCudaErrors(hipEventCreate(&cm_event)); // Compute Mass event checkCudaErrors(hipEventCreate(&cp_event)); // Compute Particles event // Auxiliary variable to register a record number at continuous writing to the file size_t records = 0; gettimeofday(&t1, 0); for (int s = 0; s < steps; s++) { //////////////////////////////////////////////////////////////////////////////////////////////////////////////// // FILL IN: kernels invocation (step 0) // //////////////////////////////////////////////////////////////////////////////////////////////////////////////// // Run the kernel computing particles velocity in relevant Compute Particles stream hipLaunchKernelGGL(( calculate_velocity), dim3(simulationGrid), dim3(thr_blc), shm_mem_calc, cp_stream , particles_gpu[s & 1ul], particles_gpu[(s + 1) & 1ul], N, dt); // Records an event for finish the calculation of velocity checkCudaErrors(hipEventRecord(cp_event, cp_stream)); if (writeFreq > 0 && (s % writeFreq == 0)) { // Initializes or sets device memory to a value in the relevant Compute mass stream checkCudaErrors(hipMemsetAsync(comGPU, 0, sizeof(float4), cm_stream)); // Calls reduction kernel to compute the Center of Mass callCenterOfMass( particles_gpu[s & 1ul], &comGPU[0], &lock[0], N, reductionGrid, red_thr_blc, shm_mem_mass, cm_stream ); // Sets event when finished the computation of Center of Mass in relevant stream checkCudaErrors(hipEventRecord(cm_event, cm_stream)); // Copies particles data from device to host // Copies the particles data computed in the previous iteration to CPU for writing to the file checkCudaErrors(hipMemcpyAsync( particles_cpu.pos_x, particles_gpu[s & 1ul].pos_x, size, hipMemcpyDeviceToHost, mem_stream )); checkCudaErrors(hipMemcpyAsync( particles_cpu.pos_y, particles_gpu[s & 1ul].pos_y, size, hipMemcpyDeviceToHost, mem_stream )); checkCudaErrors(hipMemcpyAsync( particles_cpu.pos_z, particles_gpu[s & 1ul].pos_z, size, hipMemcpyDeviceToHost, mem_stream )); checkCudaErrors(hipMemcpyAsync( particles_cpu.vel_x, particles_gpu[s & 1ul].vel_x, size, hipMemcpyDeviceToHost, mem_stream )); checkCudaErrors(hipMemcpyAsync( particles_cpu.vel_y, particles_gpu[s & 1ul].vel_y, size, hipMemcpyDeviceToHost, mem_stream )); checkCudaErrors(hipMemcpyAsync( particles_cpu.vel_z, particles_gpu[s & 1ul].vel_z, size, hipMemcpyDeviceToHost, mem_stream )); //////////////////////////////////////////////////////////////////////////////////////////////////////////// // FILL IN: synchronization (step 4) // //////////////////////////////////////////////////////////////////////////////////////////////////////////// // Waits for memory copy stream when finished the tasks to copy particles data from GPU to CPU. checkCudaErrors(hipStreamSynchronize(mem_stream)); // Waits for event which marks the finished of the Center of Mass computation checkCudaErrors(hipStreamWaitEvent(mem_stream, cm_event, 0)); // Copies Center of mass data from device to host in the relevant memory copy stream checkCudaErrors(hipMemcpyAsync(comCPU, comGPU, sizeof(float4), hipMemcpyDeviceToHost, mem_stream)); //////////////////////////////////////////////////////////////////////////////////////////////////////////// // FILL IN: synchronization and file access logic (step 4) // //////////////////////////////////////////////////////////////////////////////////////////////////////////// // Writing final values to the file h5Helper.writeParticleData(records); // Waits for memory stream finished the copying of center of mass data from CPU to GPU. checkCudaErrors(hipStreamSynchronize(mem_stream)); // Writing center of mass data to the file h5Helper.writeCom( comCPU[0].x / comCPU[0].w, comCPU[0].y / comCPU[0].w, comCPU[0].z / comCPU[0].w, comCPU[0].w, records++ ); } // Make a compute stream wait on an event. // Before the next iteration have to be finished the calculation of velocities within relevant kernel checkCudaErrors(hipStreamWaitEvent(mem_stream, cp_event, 0)); checkCudaErrors(hipStreamWaitEvent(cm_stream, cp_event, 0)); } //////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // FILL IN: invocation of center-of-mass kernel (step 3.1, step 3.2, step 4) // //////////////////////////////////////////////////////////////////////////////////////////////////////////////////// hipDeviceSynchronize(); // Initializes or sets device memory to a zero value checkCudaErrors(hipMemset(comGPU, 0, sizeof(float4))); // Calls reduction kernel to compute the final Center of Mass results callCenterOfMass( particles_gpu[steps & 1], &comGPU[0], &lock[0], N, reductionGrid, red_thr_blc, shm_mem_mass, (hipStream_t) 0 ); gettimeofday(&t2, 0); // Approximate simulation wall time double t = (1000000.0 * (t2.tv_sec - t1.tv_sec) + t2.tv_usec - t1.tv_usec) / 1000000.0; printf("Time: %f s\n", t); //////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // FILL IN: memory transfers for particle data (step 0) // //////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // Copies particles data from device to host checkCudaErrors(hipMemcpy(particles_cpu.pos_x, particles_gpu[steps & 1].pos_x, size, hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(particles_cpu.pos_y, particles_gpu[steps & 1].pos_y, size, hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(particles_cpu.pos_z, particles_gpu[steps & 1].pos_z, size, hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(particles_cpu.vel_x, particles_gpu[steps & 1].vel_x, size, hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(particles_cpu.vel_y, particles_gpu[steps & 1].vel_y, size, hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(particles_cpu.vel_z, particles_gpu[steps & 1].vel_z, size, hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(particles_cpu.weight, particles_gpu[steps & 1].weight, size, hipMemcpyDeviceToHost)); // Copies Center of Mass data from device to host checkCudaErrors(hipMemcpy(comCPU, comGPU, sizeof(float4), hipMemcpyDeviceToHost)); //////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // FILL IN: memory transfers for center-of-mass (step 3.1, step 3.2) // //////////////////////////////////////////////////////////////////////////////////////////////////////////////////// float4 comOnCPU = centerOfMassCPU(md); std::cout << "Center of mass on CPU:" << std::endl << comOnCPU.x << ", " << comOnCPU.y << ", " << comOnCPU.z << ", " << comOnCPU.w << std::endl; std::cout << "Center of mass on GPU:" << std::endl << comCPU[0].x / comCPU[0].w << ", " << comCPU[0].y / comCPU[0].w << ", " << comCPU[0].z / comCPU[0].w << ", " << comCPU[0].w << std::endl; // Writing final values to the file h5Helper.writeComFinal( comCPU[0].x / comCPU[0].w, comCPU[0].y / comCPU[0].w, comCPU[0].z / comCPU[0].w, comCPU[0].w / comCPU[0].w ); h5Helper.writeParticleDataFinal(); // Destroys and cleans up an asynchronous streams checkCudaErrors(hipStreamDestroy(cp_stream)); checkCudaErrors(hipStreamDestroy(cm_stream)); checkCudaErrors(hipStreamDestroy(mem_stream)); // Destroys an event objects checkCudaErrors(hipEventDestroy(cp_event)); checkCudaErrors(hipEventDestroy(cm_event)); // Free page-locked memory checkCudaErrors(hipHostFree(particles_cpu.pos_x)); checkCudaErrors(hipHostFree(particles_cpu.pos_y)); checkCudaErrors(hipHostFree(particles_cpu.pos_z)); checkCudaErrors(hipHostFree(particles_cpu.vel_x)); checkCudaErrors(hipHostFree(particles_cpu.vel_y)); checkCudaErrors(hipHostFree(particles_cpu.vel_z)); checkCudaErrors(hipHostFree(particles_cpu.weight)); checkCudaErrors(hipHostFree(comCPU)); // Free memory on the device. for (auto p_gpu : particles_gpu) { checkCudaErrors(hipFree(p_gpu.pos_x)); checkCudaErrors(hipFree(p_gpu.pos_y)); checkCudaErrors(hipFree(p_gpu.pos_z)); checkCudaErrors(hipFree(p_gpu.vel_x)); checkCudaErrors(hipFree(p_gpu.vel_y)); checkCudaErrors(hipFree(p_gpu.vel_z)); checkCudaErrors(hipFree(p_gpu.weight)); } checkCudaErrors(hipFree(comGPU)); checkCudaErrors(hipFree(lock)); return 0; }// end of main //----------------------------------------------------------------------------------------------------------------------
6a5292b37ce89efa3d579c9107f5a98c24416a97.cu
/** * @File main.cu * * The main file of the project * * Parallel Computations on GPU (PCG 2020) * Assignment no. 1 (cuda) * Login: xstupi00 */ #include <sys/time.h> #include <cstdio> #include <cmath> #include <vector> #include "nbody.h" #include "h5Helper.h" #define checkCudaErrors(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true) { if (code != cudaSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } /** * Main rotine * @param argc * @param argv * @return */ int main(int argc, char **argv) { // Time measurement struct timeval t1, t2; if (argc != 10) { printf("Usage: nbody <N> <dt> <steps> <threads/block> <write intesity> <reduction threads> <reduction threads/block> <input> <output>\n"); exit(1); } // Number of particles const int N = std::stoi(argv[1]); // Length of time step const float dt = std::stof(argv[2]); // Number of steps const int steps = std::stoi(argv[3]); // Number of thread blocks const int thr_blc = std::stoi(argv[4]); // Write frequency int writeFreq = std::stoi(argv[5]); // number of reduction threads const int red_thr = std::stoi(argv[6]); // Number of reduction threads/blocks const int red_thr_blc = std::stoi(argv[7]); // Size of the simulation CUDA gird - number of blocks const size_t simulationGrid = (N + thr_blc - 1) / thr_blc; // Size of the reduction CUDA grid - number of blocks const size_t reductionGrid = (red_thr + red_thr_blc - 1) / red_thr_blc; // Log benchmark setup printf("N: %d\n", N); printf("dt: %f\n", dt); printf("steps: %d\n", steps); printf("threads/block: %d\n", thr_blc); printf("blocks/grid: %lu\n", simulationGrid); printf("reduction threads/block: %d\n", red_thr_blc); printf("reduction blocks/grid: %lu\n", reductionGrid); // Number of records to continuous writing of partial results const size_t recordsNum = (writeFreq > 0) ? (steps + writeFreq - 1) / writeFreq : 0; writeFreq = (writeFreq > 0) ? writeFreq : 0; // CPU particles structures t_particles particles_cpu; //////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // FILL IN: CPU side memory allocation (step 0) // //////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // The overall memory size of input particles size_t size = N * sizeof(float); // Allocates page-locked memory on the host. Maps the allocation into the CUDA address space checkCudaErrors(cudaHostAlloc(&particles_cpu.pos_x, size, cudaHostAllocMapped)); checkCudaErrors(cudaHostAlloc(&particles_cpu.pos_y, size, cudaHostAllocMapped)); checkCudaErrors(cudaHostAlloc(&particles_cpu.pos_z, size, cudaHostAllocMapped)); checkCudaErrors(cudaHostAlloc(&particles_cpu.vel_x, size, cudaHostAllocMapped)); checkCudaErrors(cudaHostAlloc(&particles_cpu.vel_y, size, cudaHostAllocMapped)); checkCudaErrors(cudaHostAlloc(&particles_cpu.vel_z, size, cudaHostAllocMapped)); checkCudaErrors(cudaHostAlloc(&particles_cpu.weight, size, cudaHostAllocMapped)); //////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // FILL IN: memory layout descriptor (step 0) // //////////////////////////////////////////////////////////////////////////////////////////////////////////////////// /* * Caution! Create only after CPU side allocation * parameters: * Stride of two Offset of the first * Data pointer consecutive elements element in floats, * in floats, not bytes not bytes */ MemDesc md( particles_cpu.pos_x, 1, 0, // Position in X particles_cpu.pos_y, 1, 0, // Position in Y particles_cpu.pos_z, 1, 0, // Position in Z particles_cpu.vel_x, 1, 0, // Velocity in X particles_cpu.vel_y, 1, 0, // Velocity in Y particles_cpu.vel_z, 1, 0, // Velocity in Z particles_cpu.weight, 1, 0, // Weight N, // Number of particles recordsNum // Number of records in output file ); // Initialisation of helper class and loading of input data H5Helper h5Helper(argv[8], argv[9], md); try { h5Helper.init(); h5Helper.readParticleData(); } catch (const std::exception &e) { std::cerr << e.what() << std::endl; return -1; } //////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // FILL IN: GPU side memory allocation (step 0) // //////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // GPU particles structure std::vector<t_particles> particles_gpu(2); // Allocate memory on the device for (auto &p_gpu : particles_gpu) { checkCudaErrors(cudaMalloc(&p_gpu.pos_x, size)); checkCudaErrors(cudaMalloc(&p_gpu.pos_y, size)); checkCudaErrors(cudaMalloc(&p_gpu.pos_z, size)); checkCudaErrors(cudaMalloc(&p_gpu.vel_x, size)); checkCudaErrors(cudaMalloc(&p_gpu.vel_y, size)); checkCudaErrors(cudaMalloc(&p_gpu.vel_z, size)); checkCudaErrors(cudaMalloc(&p_gpu.weight, size)); } //////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // FILL IN: memory transfers (step 0) // //////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // Copies particles data from host to device. checkCudaErrors(cudaMemcpy(particles_gpu[0].pos_x, particles_cpu.pos_x, size, cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(particles_gpu[0].pos_y, particles_cpu.pos_y, size, cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(particles_gpu[0].pos_z, particles_cpu.pos_z, size, cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(particles_gpu[0].vel_x, particles_cpu.vel_x, size, cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(particles_gpu[0].vel_y, particles_cpu.vel_y, size, cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(particles_gpu[0].vel_z, particles_cpu.vel_z, size, cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(particles_gpu[0].weight, particles_cpu.weight, size, cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(particles_gpu[1].weight, particles_gpu[0].weight, size, cudaMemcpyDeviceToDevice)); // CPU Center of Mass structure - x, y, z, w float4* comCPU; // GPU Center of Mass structure - x, y, z, w float4* comGPU; // Lock to ensure the mutual exclusion for reducing the result to the global memory int* lock; // Allocates page-locked memory on the host checkCudaErrors(cudaHostAlloc(&comCPU, sizeof(float4), cudaHostAllocMapped)); // Allocate memory on the device checkCudaErrors(cudaMalloc(&comGPU, sizeof(float4))); checkCudaErrors(cudaMalloc(&lock, sizeof(int))); // Initializes or sets device memory to a zero value checkCudaErrors(cudaMemset(comCPU, 0, sizeof(float4))); checkCudaErrors(cudaMemset(lock, 0, sizeof(int))); // Compute the size of the shared memory (for one grid block) size_t shm_mem_calc = thr_blc * sizeof(float) * 7; // Computes the size of the shared memory for reduction block size_t shm_mem_mass = (red_thr_blc / 32) * sizeof(float) * 4; // Create CUDA stream to perform effect concurrency cudaStream_t cm_stream, cp_stream, mem_stream; // Create an asynchronous stream. checkCudaErrors(cudaStreamCreate(&cm_stream)); // Compute Mass stream checkCudaErrors(cudaStreamCreate(&cp_stream)); // Compute Particles stream checkCudaErrors(cudaStreamCreate(&mem_stream)); // Write particles stream // Create CUDA events to synchronize individual CUDA stream during the computation cudaEvent_t cm_event, cp_event; // Creates an event object. checkCudaErrors(cudaEventCreate(&cm_event)); // Compute Mass event checkCudaErrors(cudaEventCreate(&cp_event)); // Compute Particles event // Auxiliary variable to register a record number at continuous writing to the file size_t records = 0; gettimeofday(&t1, 0); for (int s = 0; s < steps; s++) { //////////////////////////////////////////////////////////////////////////////////////////////////////////////// // FILL IN: kernels invocation (step 0) // //////////////////////////////////////////////////////////////////////////////////////////////////////////////// // Run the kernel computing particles velocity in relevant Compute Particles stream calculate_velocity<<< simulationGrid, thr_blc, shm_mem_calc, cp_stream >>> (particles_gpu[s & 1ul], particles_gpu[(s + 1) & 1ul], N, dt); // Records an event for finish the calculation of velocity checkCudaErrors(cudaEventRecord(cp_event, cp_stream)); if (writeFreq > 0 && (s % writeFreq == 0)) { // Initializes or sets device memory to a value in the relevant Compute mass stream checkCudaErrors(cudaMemsetAsync(comGPU, 0, sizeof(float4), cm_stream)); // Calls reduction kernel to compute the Center of Mass callCenterOfMass( particles_gpu[s & 1ul], &comGPU[0], &lock[0], N, reductionGrid, red_thr_blc, shm_mem_mass, cm_stream ); // Sets event when finished the computation of Center of Mass in relevant stream checkCudaErrors(cudaEventRecord(cm_event, cm_stream)); // Copies particles data from device to host // Copies the particles data computed in the previous iteration to CPU for writing to the file checkCudaErrors(cudaMemcpyAsync( particles_cpu.pos_x, particles_gpu[s & 1ul].pos_x, size, cudaMemcpyDeviceToHost, mem_stream )); checkCudaErrors(cudaMemcpyAsync( particles_cpu.pos_y, particles_gpu[s & 1ul].pos_y, size, cudaMemcpyDeviceToHost, mem_stream )); checkCudaErrors(cudaMemcpyAsync( particles_cpu.pos_z, particles_gpu[s & 1ul].pos_z, size, cudaMemcpyDeviceToHost, mem_stream )); checkCudaErrors(cudaMemcpyAsync( particles_cpu.vel_x, particles_gpu[s & 1ul].vel_x, size, cudaMemcpyDeviceToHost, mem_stream )); checkCudaErrors(cudaMemcpyAsync( particles_cpu.vel_y, particles_gpu[s & 1ul].vel_y, size, cudaMemcpyDeviceToHost, mem_stream )); checkCudaErrors(cudaMemcpyAsync( particles_cpu.vel_z, particles_gpu[s & 1ul].vel_z, size, cudaMemcpyDeviceToHost, mem_stream )); //////////////////////////////////////////////////////////////////////////////////////////////////////////// // FILL IN: synchronization (step 4) // //////////////////////////////////////////////////////////////////////////////////////////////////////////// // Waits for memory copy stream when finished the tasks to copy particles data from GPU to CPU. checkCudaErrors(cudaStreamSynchronize(mem_stream)); // Waits for event which marks the finished of the Center of Mass computation checkCudaErrors(cudaStreamWaitEvent(mem_stream, cm_event, 0)); // Copies Center of mass data from device to host in the relevant memory copy stream checkCudaErrors(cudaMemcpyAsync(comCPU, comGPU, sizeof(float4), cudaMemcpyDeviceToHost, mem_stream)); //////////////////////////////////////////////////////////////////////////////////////////////////////////// // FILL IN: synchronization and file access logic (step 4) // //////////////////////////////////////////////////////////////////////////////////////////////////////////// // Writing final values to the file h5Helper.writeParticleData(records); // Waits for memory stream finished the copying of center of mass data from CPU to GPU. checkCudaErrors(cudaStreamSynchronize(mem_stream)); // Writing center of mass data to the file h5Helper.writeCom( comCPU[0].x / comCPU[0].w, comCPU[0].y / comCPU[0].w, comCPU[0].z / comCPU[0].w, comCPU[0].w, records++ ); } // Make a compute stream wait on an event. // Before the next iteration have to be finished the calculation of velocities within relevant kernel checkCudaErrors(cudaStreamWaitEvent(mem_stream, cp_event, 0)); checkCudaErrors(cudaStreamWaitEvent(cm_stream, cp_event, 0)); } //////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // FILL IN: invocation of center-of-mass kernel (step 3.1, step 3.2, step 4) // //////////////////////////////////////////////////////////////////////////////////////////////////////////////////// cudaDeviceSynchronize(); // Initializes or sets device memory to a zero value checkCudaErrors(cudaMemset(comGPU, 0, sizeof(float4))); // Calls reduction kernel to compute the final Center of Mass results callCenterOfMass( particles_gpu[steps & 1], &comGPU[0], &lock[0], N, reductionGrid, red_thr_blc, shm_mem_mass, (cudaStream_t) 0 ); gettimeofday(&t2, 0); // Approximate simulation wall time double t = (1000000.0 * (t2.tv_sec - t1.tv_sec) + t2.tv_usec - t1.tv_usec) / 1000000.0; printf("Time: %f s\n", t); //////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // FILL IN: memory transfers for particle data (step 0) // //////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // Copies particles data from device to host checkCudaErrors(cudaMemcpy(particles_cpu.pos_x, particles_gpu[steps & 1].pos_x, size, cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(particles_cpu.pos_y, particles_gpu[steps & 1].pos_y, size, cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(particles_cpu.pos_z, particles_gpu[steps & 1].pos_z, size, cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(particles_cpu.vel_x, particles_gpu[steps & 1].vel_x, size, cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(particles_cpu.vel_y, particles_gpu[steps & 1].vel_y, size, cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(particles_cpu.vel_z, particles_gpu[steps & 1].vel_z, size, cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(particles_cpu.weight, particles_gpu[steps & 1].weight, size, cudaMemcpyDeviceToHost)); // Copies Center of Mass data from device to host checkCudaErrors(cudaMemcpy(comCPU, comGPU, sizeof(float4), cudaMemcpyDeviceToHost)); //////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // FILL IN: memory transfers for center-of-mass (step 3.1, step 3.2) // //////////////////////////////////////////////////////////////////////////////////////////////////////////////////// float4 comOnCPU = centerOfMassCPU(md); std::cout << "Center of mass on CPU:" << std::endl << comOnCPU.x << ", " << comOnCPU.y << ", " << comOnCPU.z << ", " << comOnCPU.w << std::endl; std::cout << "Center of mass on GPU:" << std::endl << comCPU[0].x / comCPU[0].w << ", " << comCPU[0].y / comCPU[0].w << ", " << comCPU[0].z / comCPU[0].w << ", " << comCPU[0].w << std::endl; // Writing final values to the file h5Helper.writeComFinal( comCPU[0].x / comCPU[0].w, comCPU[0].y / comCPU[0].w, comCPU[0].z / comCPU[0].w, comCPU[0].w / comCPU[0].w ); h5Helper.writeParticleDataFinal(); // Destroys and cleans up an asynchronous streams checkCudaErrors(cudaStreamDestroy(cp_stream)); checkCudaErrors(cudaStreamDestroy(cm_stream)); checkCudaErrors(cudaStreamDestroy(mem_stream)); // Destroys an event objects checkCudaErrors(cudaEventDestroy(cp_event)); checkCudaErrors(cudaEventDestroy(cm_event)); // Free page-locked memory checkCudaErrors(cudaFreeHost(particles_cpu.pos_x)); checkCudaErrors(cudaFreeHost(particles_cpu.pos_y)); checkCudaErrors(cudaFreeHost(particles_cpu.pos_z)); checkCudaErrors(cudaFreeHost(particles_cpu.vel_x)); checkCudaErrors(cudaFreeHost(particles_cpu.vel_y)); checkCudaErrors(cudaFreeHost(particles_cpu.vel_z)); checkCudaErrors(cudaFreeHost(particles_cpu.weight)); checkCudaErrors(cudaFreeHost(comCPU)); // Free memory on the device. for (auto p_gpu : particles_gpu) { checkCudaErrors(cudaFree(p_gpu.pos_x)); checkCudaErrors(cudaFree(p_gpu.pos_y)); checkCudaErrors(cudaFree(p_gpu.pos_z)); checkCudaErrors(cudaFree(p_gpu.vel_x)); checkCudaErrors(cudaFree(p_gpu.vel_y)); checkCudaErrors(cudaFree(p_gpu.vel_z)); checkCudaErrors(cudaFree(p_gpu.weight)); } checkCudaErrors(cudaFree(comGPU)); checkCudaErrors(cudaFree(lock)); return 0; }// end of main //----------------------------------------------------------------------------------------------------------------------
560748189963157f2c14e2445aa142edffc3d12b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <iostream> #include <fstream> #include <sstream> #include <string> #include <cstring> #include <cstdlib> #include <cassert> using namespace std; typedef unsigned char byte; __global__ void count1itemset(byte *, int, int, int *); struct bitmap { int n, m; unsigned int size; unsigned int *bits; bitmap(int rows, int cols) { int bitcols = (cols + 31) / 32; n = rows; m = bitcols; size = n*m; bits = new unsigned int[size]; memset(bits, 0, size*sizeof(unsigned int)); } ~bitmap() { delete[] bits; } void set_bit(int row, int col) { bits[row*m + col / 32] |= (((unsigned int)1) << (31 - col % 32)); } bool get_bit(int row, int col) { int i = row*m + col / 32; unsigned int flag = 1; flag = flag << (31 - col % 32); if ((flag&bits[i]) == 0) return false; else return true; } }; //matrix[i][[j] = matrix[i*M+j] void parse_transactions(string &file,bitmap &t) { ifstream csv(file, ios::in); string line; int i = 0; while (getline(csv, line)) { stringstream ss(line); int value; while (ss >> value) { t.set_bit(i, value); if (ss.peek() == ',') ss.ignore(); } i++; } csv.close(); } void copy_transactions_to_device(bitmap &t, unsigned int **dev_matrix) { hipError_t status; unsigned int *temp; status = hipMalloc((void **)&temp, t.n * t.m * sizeof(unsigned int)); if (status != hipSuccess) { fprintf(stderr, "hipMalloc failed! dev_matrix"); return; } status = hipMemcpy(temp, t.bits, t.n * t.m * sizeof(unsigned int), hipMemcpyHostToDevice); if (status != hipSuccess) { fprintf(stderr, "hipMalloc failed! dev_matrix"); return; } *dev_matrix = temp; } __device__ __host__ unsigned int popcount(unsigned int value) { unsigned int count = 0; while (value > 0) { // until all bits are zero if ((value & 1) == 1) // check lower bit count++; value >>= 1; // shift bits, removing lower bit } return count; } __global__ void count1itemset(unsigned int *transactions, int N, int M, int *counter) { int idx = blockDim.x*blockIdx.x + threadIdx.x; for (int i = idx; i < N; i += blockDim.x*gridDim.x) { for (int j = 0; j < M; j++) { counter[i] += popcount(transactions[i*M + j]); } } } void get_1itemset(unsigned int *dev_transactions, int nitems, int M, int ntrans, float min_sup, bitmap &first_itemset, int &x) { int *counter = new int[nitems]; int *dev_counter; hipError_t status; status = hipMalloc((void **)&dev_counter, nitems*sizeof(int)); if (status != hipSuccess) { fprintf(stderr, "cudamalloc failed! dev_counter %s\n", hipGetErrorString(status)); goto error; } status = hipMemset(dev_counter, 0, nitems*sizeof(int)); if (status != hipSuccess) { fprintf(stderr, "cudamemset failed! dev_counter %s\n", hipGetErrorString(status)); goto error; } hipLaunchKernelGGL(( count1itemset) , dim3(5),dim3(15), 0, 0, dev_transactions, nitems, M, dev_counter); status = hipDeviceSynchronize(); if (status != hipSuccess) { fprintf(stderr, "cudadevicesynch failed! %s\n", hipGetErrorString(status)); goto error; } status = hipMemcpy(counter, dev_counter, nitems*sizeof(int), hipMemcpyDeviceToHost); if (status != hipSuccess) { fprintf(stderr, "hipMemcpy failed! counter"); goto error; } for (int i = 0; i < nitems; i++) { if (((float)counter[i]) / ntrans >= 0.05) { first_itemset.set_bit(0, i); x++; cout << i << " "; } } cout << endl; error: hipFree(dev_counter); delete[] counter; } __global__ void gen_candidates_and_count(unsigned int* dev_transactions, unsigned int* bm, int n, unsigned int *result) { extern __shared__ unsigned int next[]; int idx = blockDim.x*blockIdx.x + threadIdx.x; int count = 0; int nn = (n * (n - 1)) / 2; for (int i = idx; i < n; i += blockDim.x*gridDim.x) { for (int j = i + 1; j < n; j++) { for (int k = 0; k < n; k++) { next[(i+count)*n+k] = bm[i*n+k] | bm[j*n+k]; count++; } } } __syncthreads(); memcpy(result, next, nn*sizeof(unsigned int)); } void gen_candidates(unsigned int* d_t, unsigned int *bm, int n) { unsigned int *dev_bm; hipMalloc((void **)dev_bm, n*sizeof(unsigned int)); hipMemcpy(dev_bm, bm, n*sizeof(unsigned int), hipMemcpyHostToDevice); int dim = ((n * (n - 1)) / 2)*n; unsigned int*d_result; hipMalloc((void**)&d_result, dim*sizeof(unsigned int)); hipMemset(d_result, 0, dim*sizeof(unsigned int)); hipLaunchKernelGGL(( gen_candidates_and_count), dim3(1),dim3(n),dim, 0, d_t, dev_bm, n, d_result); unsigned int *result = new unsigned int[dim]; hipMemcpy(result,d_result, dim*sizeof(unsigned int), hipMemcpyDeviceToHost); } int main() { float min_sup = 0.01; ifstream input("input.txt", ios::in); string s; int n, ntrans, first_count = 0; input >> s >> n >> ntrans; cout << s << " " << n << " " << ntrans << endl; input.close(); ntrans++; bitmap transactions(n, ntrans); bitmap first_itemset(1, n); parse_transactions(s, transactions); unsigned int *dev_transactions; copy_transactions_to_device(transactions, &dev_transactions); get_1itemset(dev_transactions, transactions.n, transactions.m, ntrans, min_sup, first_itemset, first_count); /*bitmap f(first_count, first_count); for (int i = 0; i < first_count; i++) f.set_bit(i, i);*/ unsigned int *f = new unsigned int[first_count*first_count](); for (int i = 0; i < first_count; i++) { f[i*first_count+i] = 1; } gen_candidates(dev_transactions, f, first_count); cout << first_count << endl; hipFree(dev_transactions); }
560748189963157f2c14e2445aa142edffc3d12b.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <iostream> #include <fstream> #include <sstream> #include <string> #include <cstring> #include <cstdlib> #include <cassert> using namespace std; typedef unsigned char byte; __global__ void count1itemset(byte *, int, int, int *); struct bitmap { int n, m; unsigned int size; unsigned int *bits; bitmap(int rows, int cols) { int bitcols = (cols + 31) / 32; n = rows; m = bitcols; size = n*m; bits = new unsigned int[size]; memset(bits, 0, size*sizeof(unsigned int)); } ~bitmap() { delete[] bits; } void set_bit(int row, int col) { bits[row*m + col / 32] |= (((unsigned int)1) << (31 - col % 32)); } bool get_bit(int row, int col) { int i = row*m + col / 32; unsigned int flag = 1; flag = flag << (31 - col % 32); if ((flag&bits[i]) == 0) return false; else return true; } }; //matrix[i][[j] = matrix[i*M+j] void parse_transactions(string &file,bitmap &t) { ifstream csv(file, ios::in); string line; int i = 0; while (getline(csv, line)) { stringstream ss(line); int value; while (ss >> value) { t.set_bit(i, value); if (ss.peek() == ',') ss.ignore(); } i++; } csv.close(); } void copy_transactions_to_device(bitmap &t, unsigned int **dev_matrix) { cudaError_t status; unsigned int *temp; status = cudaMalloc((void **)&temp, t.n * t.m * sizeof(unsigned int)); if (status != cudaSuccess) { fprintf(stderr, "cudaMalloc failed! dev_matrix"); return; } status = cudaMemcpy(temp, t.bits, t.n * t.m * sizeof(unsigned int), cudaMemcpyHostToDevice); if (status != cudaSuccess) { fprintf(stderr, "cudaMalloc failed! dev_matrix"); return; } *dev_matrix = temp; } __device__ __host__ unsigned int popcount(unsigned int value) { unsigned int count = 0; while (value > 0) { // until all bits are zero if ((value & 1) == 1) // check lower bit count++; value >>= 1; // shift bits, removing lower bit } return count; } __global__ void count1itemset(unsigned int *transactions, int N, int M, int *counter) { int idx = blockDim.x*blockIdx.x + threadIdx.x; for (int i = idx; i < N; i += blockDim.x*gridDim.x) { for (int j = 0; j < M; j++) { counter[i] += popcount(transactions[i*M + j]); } } } void get_1itemset(unsigned int *dev_transactions, int nitems, int M, int ntrans, float min_sup, bitmap &first_itemset, int &x) { int *counter = new int[nitems]; int *dev_counter; cudaError_t status; status = cudaMalloc((void **)&dev_counter, nitems*sizeof(int)); if (status != cudaSuccess) { fprintf(stderr, "cudamalloc failed! dev_counter %s\n", cudaGetErrorString(status)); goto error; } status = cudaMemset(dev_counter, 0, nitems*sizeof(int)); if (status != cudaSuccess) { fprintf(stderr, "cudamemset failed! dev_counter %s\n", cudaGetErrorString(status)); goto error; } count1itemset <<<5,15>>> (dev_transactions, nitems, M, dev_counter); status = cudaDeviceSynchronize(); if (status != cudaSuccess) { fprintf(stderr, "cudadevicesynch failed! %s\n", cudaGetErrorString(status)); goto error; } status = cudaMemcpy(counter, dev_counter, nitems*sizeof(int), cudaMemcpyDeviceToHost); if (status != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed! counter"); goto error; } for (int i = 0; i < nitems; i++) { if (((float)counter[i]) / ntrans >= 0.05) { first_itemset.set_bit(0, i); x++; cout << i << " "; } } cout << endl; error: cudaFree(dev_counter); delete[] counter; } __global__ void gen_candidates_and_count(unsigned int* dev_transactions, unsigned int* bm, int n, unsigned int *result) { extern __shared__ unsigned int next[]; int idx = blockDim.x*blockIdx.x + threadIdx.x; int count = 0; int nn = (n * (n - 1)) / 2; for (int i = idx; i < n; i += blockDim.x*gridDim.x) { for (int j = i + 1; j < n; j++) { for (int k = 0; k < n; k++) { next[(i+count)*n+k] = bm[i*n+k] | bm[j*n+k]; count++; } } } __syncthreads(); memcpy(result, next, nn*sizeof(unsigned int)); } void gen_candidates(unsigned int* d_t, unsigned int *bm, int n) { unsigned int *dev_bm; cudaMalloc((void **)dev_bm, n*sizeof(unsigned int)); cudaMemcpy(dev_bm, bm, n*sizeof(unsigned int), cudaMemcpyHostToDevice); int dim = ((n * (n - 1)) / 2)*n; unsigned int*d_result; cudaMalloc((void**)&d_result, dim*sizeof(unsigned int)); cudaMemset(d_result, 0, dim*sizeof(unsigned int)); gen_candidates_and_count<<<1,n,dim>>>(d_t, dev_bm, n, d_result); unsigned int *result = new unsigned int[dim]; cudaMemcpy(result,d_result, dim*sizeof(unsigned int), cudaMemcpyDeviceToHost); } int main() { float min_sup = 0.01; ifstream input("input.txt", ios::in); string s; int n, ntrans, first_count = 0; input >> s >> n >> ntrans; cout << s << " " << n << " " << ntrans << endl; input.close(); ntrans++; bitmap transactions(n, ntrans); bitmap first_itemset(1, n); parse_transactions(s, transactions); unsigned int *dev_transactions; copy_transactions_to_device(transactions, &dev_transactions); get_1itemset(dev_transactions, transactions.n, transactions.m, ntrans, min_sup, first_itemset, first_count); /*bitmap f(first_count, first_count); for (int i = 0; i < first_count; i++) f.set_bit(i, i);*/ unsigned int *f = new unsigned int[first_count*first_count](); for (int i = 0; i < first_count; i++) { f[i*first_count+i] = 1; } gen_candidates(dev_transactions, f, first_count); cout << first_count << endl; cudaFree(dev_transactions); }
4ba112fe58721beda90523bb066f6f185cb42d58.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /********************************************************************************************/ /* ReconGPU.cu */ /* Copyright 2017, XinRay Inc., All rights reserved */ /********************************************************************************************/ #include "TomoRecon.h" /********************************************************************************************/ /* CUDA specific helper functions */ /********************************************************************************************/ TomoError cuda_assert(const hipError_t code, const char* const file, const int line) { if (code != hipSuccess) { std::cout << "Cuda failure " << file << ":" << line << ": " << hipGetErrorString(code) << "\n"; return Tomo_CUDA_err; } else return Tomo_OK; } TomoError cuda_assert_void(const char* const file, const int line) { hipError_t code = hipGetLastError(); if (code != hipSuccess) { std::cout << "Cuda failure " << file << ":" << line << ": " << hipGetErrorString(code) << "\n"; return Tomo_CUDA_err; } else return Tomo_OK; } union pxl_rgbx_24{ uint1 b32; struct { unsigned r : 8; unsigned g : 8; unsigned b : 8; unsigned na : 8; }; }; #define PXL_KERNEL_THREADS_PER_BLOCK 256 surface<void, cudaSurfaceType2D> displaySurface; texture<float, hipTextureType3D, hipReadModeElementType> textRecon; texture<float, hipTextureType3D, hipReadModeElementType> textDelta; texture<float, hipTextureType2D, hipReadModeElementType> textImage; texture<float, hipTextureType2D, hipReadModeElementType> textError; texture<float, hipTextureType2D, hipReadModeElementType> textSino; texture<float, hipTextureType2D, hipReadModeElementType> textWeight; /********************************************************************************************/ /* GPU Function specific functions */ /********************************************************************************************/ //Conversion Helpers __host__ __device__ float xP2MM(float p, float Px, float PitchPx) { return (p + 0.5f - Px / 2.0f) * PitchPx; } __host__ __device__ float yP2MM(float p, float Py, float PitchPy) { return (p + 0.5f - Py / 2.0f) * PitchPy; } __host__ __device__ float xR2MM(float r, float Rx, float PitchRx) { return (r + 0.5f - Rx / 2.0f) * PitchRx; } __host__ __device__ float yR2MM(float r, float Ry, float PitchRy) { return (r + 0.5f - Ry / 2.0f) * PitchRy; } __host__ __device__ float xMM2P(float m, float Px, float PitchPx) { return m / PitchPx - 0.5f + Px / 2.0f; } __host__ __device__ float yMM2P(float m, float Py, float PitchPy) { return m / PitchPy - 0.5f + Py / 2.0f; } __host__ __device__ float xMM2R(float m, float Rx, float PitchRx) { return m / PitchRx - 0.5f + Rx / 2.0f; } __host__ __device__ float yMM2R(float m, float Ry, float PitchRy) { return m / PitchRy - 0.5f + Ry / 2.0f; } //Loop unrolling templates, device functions are mapped to inlines 99% of the time template<int i> __device__ float convolutionRow(float x, float y, float kernel[KERNELSIZE]){ return tex2D(textImage, x + (float)(KERNELRADIUS - i), y) * kernel[i] + convolutionRow<i - 1>(x, y, kernel); } template<> __device__ float convolutionRow<-1>(float x, float y, float kernel[KERNELSIZE]){ return 0; } template<int i> __device__ float convolutionColumn(float x, float y, float kernel[KERNELSIZE]){ return tex2D(textImage, x, y + (float)(KERNELRADIUS - i)) * kernel[i] + convolutionColumn<i - 1>(x, y, kernel); } template<> __device__ float convolutionColumn<-1>(float x, float y, float kernel[KERNELSIZE]){ return 0; } //Interploation helper __device__ float interpolateSino(float x, float y, int view, params consts) { float xWeight = x - floor(x); float yWeight = y - floor(y); float temp, value = 0, count = 0; temp = tex2D(textSino, x - xWeight + 0.5f, y - yWeight + 0.5f + view * consts.Py); value += (1 - xWeight) * (1 - yWeight) * (temp); if (temp != 0.0f) count += (1 - xWeight) * (1 - yWeight); temp = tex2D(textSino, x - xWeight + 1.5f, y - yWeight + 0.5f + view * consts.Py); value += xWeight * (1 - yWeight) * (temp); if (temp != 0.0f) count += xWeight * (1 - yWeight); temp = tex2D(textSino, x - xWeight + 0.5f, y - yWeight + 1.5f + view * consts.Py); value += (1 - xWeight) * yWeight * (temp); if (temp != 0.0f) count += (1 - xWeight) * yWeight; temp = tex2D(textSino, x - xWeight + 1.5f, y - yWeight + 1.5f + view * consts.Py); value += xWeight * yWeight * (temp); if (temp != 0.0f) count += xWeight * yWeight; //if (count > 0.0f) value /= count; return value; } //Image metric generators __global__ void convolutionRowsKernel(float *d_Dst, float kernel[KERNELSIZE], params consts) { const int ix = MUL_ADD(blockDim.x, blockIdx.x, threadIdx.x); const int iy = MUL_ADD(blockDim.y, blockIdx.y, threadIdx.y); const float x = (float)ix + 0.5f; const float y = (float)iy + 0.5f; const int pitch = consts.dataDisplay == reconstruction || consts.dataDisplay == iterRecon ? consts.ReconPitchNum : consts.ProjPitchNum; if (consts.dataDisplay == reconstruction || consts.dataDisplay == iterRecon) { if (ix >= consts.Rx || iy >= consts.Ry) return; if (ix >= consts.Rx - KERNELRADIUS || ix < KERNELRADIUS ) {// || iy >= consts.Ry - KERNELRADIUS || iy < KERNELRADIUS d_Dst[MUL_ADD(iy, pitch, ix)] = 0.0f; return; } } else { if (ix >= consts.Px || iy >= consts.Py) return; if (ix >= consts.Px - KERNELRADIUS || ix < KERNELRADIUS) {// || iy >= consts.Py - KERNELRADIUS || iy < KERNELRADIUS d_Dst[MUL_ADD(iy, pitch, ix)] = 0.0f; return; } } d_Dst[MUL_ADD(iy, pitch, ix)] = convolutionRow<KERNELSIZE>(x, y, kernel); } __global__ void convolutionColumnsKernel(float *d_Dst, float kernel[KERNELSIZE], params consts){ const int ix = MUL_ADD(blockDim.x, blockIdx.x, threadIdx.x); const int iy = MUL_ADD(blockDim.y, blockIdx.y, threadIdx.y); const float x = (float)ix + 0.5f; const float y = (float)iy + 0.5f; const int pitch = consts.dataDisplay == reconstruction || consts.dataDisplay == iterRecon ? consts.ReconPitchNum : consts.ProjPitchNum; if (consts.dataDisplay == reconstruction || consts.dataDisplay == iterRecon) { if (ix >= consts.Rx || iy >= consts.Ry) return; if (iy >= consts.Ry - KERNELRADIUS || iy < KERNELRADIUS) {//ix >= consts.Rx - KERNELRADIUS || || ix < KERNELRADIUS d_Dst[MUL_ADD(iy, pitch, ix)] = 0.0f; return; } } else { if (ix >= consts.Px || iy >= consts.Py) return; if (iy >= consts.Py - KERNELRADIUS || iy < KERNELRADIUS) {//ix >= consts.Px - KERNELRADIUS || || ix < KERNELRADIUS d_Dst[MUL_ADD(iy, pitch, ix)] = 0.0f; return; } } d_Dst[MUL_ADD(iy, pitch, ix)] = convolutionColumn<KERNELSIZE>(x, y, kernel); } __global__ void squareMag(float *d_Dst, float *src1, float *src2, params consts) { const int x = MUL_ADD(blockDim.x, blockIdx.x, threadIdx.x); const int y = MUL_ADD(blockDim.y, blockIdx.y, threadIdx.y); bool recon = consts.dataDisplay == reconstruction || consts.dataDisplay == iterRecon; int pitch = recon ? consts.ReconPitchNum : consts.ProjPitchNum; if ((recon && x >= consts.Rx) || (recon && y >= consts.Ry) || (!recon && x >= consts.Px) || (!recon && y >= consts.Py) || x < 0 || y < 0) return; d_Dst[MUL_ADD(y, pitch, x)] = pow((double)src1[MUL_ADD(y, pitch, x)],2) + pow((double)src2[MUL_ADD(y, pitch, x)],2); } __global__ void mag(float *d_Dst, float *src1, float *src2, params consts) { const int x = MUL_ADD(blockDim.x, blockIdx.x, threadIdx.x); const int y = MUL_ADD(blockDim.y, blockIdx.y, threadIdx.y); bool recon = consts.dataDisplay == reconstruction || consts.dataDisplay == iterRecon; int pitch = recon ? consts.ReconPitchNum : consts.ProjPitchNum; if ((recon && x >= consts.Rx) || (recon && y >= consts.Ry) || (!recon && x >= consts.Px) || (!recon && y >= consts.Py) || x < 0 || y < 0) return; //d_Dst[MUL_ADD(y, pitch, x)] = (float)sqrt(pow((double)src1[MUL_ADD(y, pitch, x)], 2) + pow((double)src2[MUL_ADD(y, pitch, x)], 2)); d_Dst[MUL_ADD(y, pitch, x)] = (abs(src1[MUL_ADD(y, pitch, x)]) + abs(src2[MUL_ADD(y, pitch, x)])) / 2.0f; } __global__ void abs(float *d_Dst, float *src, params consts) { const int x = MUL_ADD(blockDim.x, blockIdx.x, threadIdx.x); const int y = MUL_ADD(blockDim.y, blockIdx.y, threadIdx.y); bool recon = consts.dataDisplay == reconstruction || consts.dataDisplay == iterRecon; int pitch = recon ? consts.ReconPitchNum : consts.ProjPitchNum; if ((recon && x >= consts.Rx) || (recon && y >= consts.Ry) || (!recon && x >= consts.Px) || (!recon && y >= consts.Py) || x < 0 || y < 0) return; //d_Dst[MUL_ADD(y, pitch, x)] = (float)sqrt(pow((double)src1[MUL_ADD(y, pitch, x)], 2) + pow((double)src2[MUL_ADD(y, pitch, x)], 2)); d_Dst[MUL_ADD(y, pitch, x)] = abs(src[MUL_ADD(y, pitch, x)]); } __global__ void pow(float *d_Dst, float *src, float exponent, params consts) { const int x = MUL_ADD(blockDim.x, blockIdx.x, threadIdx.x); const int y = MUL_ADD(blockDim.y, blockIdx.y, threadIdx.y); bool recon = consts.dataDisplay == reconstruction || consts.dataDisplay == iterRecon; int pitch = recon ? consts.ReconPitchNum : consts.ProjPitchNum; if ((recon && x >= consts.Rx) || (recon && y >= consts.Ry) || (!recon && x >= consts.Px) || (!recon && y >= consts.Py) || x < 0 || y < 0) return; //d_Dst[MUL_ADD(y, pitch, x)] = (float)sqrt(pow((double)src1[MUL_ADD(y, pitch, x)], 2) + pow((double)src2[MUL_ADD(y, pitch, x)], 2)); d_Dst[MUL_ADD(y, pitch, x)] = pow(src[MUL_ADD(y, pitch, x)], exponent); } __global__ void squareDiff(float *d_Dst, int view, float xOff, float yOff, int pitchOut, params consts) { const int x = MUL_ADD(blockDim.x, blockIdx.x, threadIdx.x); const int y = MUL_ADD(blockDim.y, blockIdx.y, threadIdx.y); if (x >= consts.Px || y >= consts.Py || x < 0 || y < 0) return; d_Dst[MUL_ADD(y, pitchOut, x)] = pow(tex2D(textError, x - xOff, y - yOff + view*consts.Py) - tex2D(textError, x, y + (NUMVIEWS / 2)*consts.Py), 2); } __global__ void add(float* src1, float* src2, float *d_Dst, bool useRatio, bool useAbs, params consts) { const int x = MUL_ADD(blockDim.x, blockIdx.x, threadIdx.x); const int y = MUL_ADD(blockDim.y, blockIdx.y, threadIdx.y); bool recon = consts.dataDisplay == reconstruction || consts.dataDisplay == iterRecon; int pitch = recon ? consts.ReconPitchNum : consts.ProjPitchNum; if ((recon && x >= consts.Rx) || (recon && y >= consts.Ry) || (!recon && x >= consts.Px) || (!recon && y >= consts.Py) || x < 0 || y < 0) return; if (useRatio) { if (useAbs) { float val = consts.log ? abs(src2[MUL_ADD(y, pitch, x)]) : USHRT_MAX - abs(src2[MUL_ADD(y, pitch, x)]); d_Dst[MUL_ADD(y, pitch, x)] = src1[MUL_ADD(y, pitch, x)] * consts.ratio + val * (1 - consts.ratio); } else { float val = consts.log ? src2[MUL_ADD(y, pitch, x)] : USHRT_MAX - src2[MUL_ADD(y, pitch, x)]; d_Dst[MUL_ADD(y, pitch, x)] = src1[MUL_ADD(y, pitch, x)] * consts.ratio + val * (1 - consts.ratio); } } else d_Dst[MUL_ADD(y, pitch, x)] = (src1[MUL_ADD(y, pitch, x)] + src2[MUL_ADD(y, pitch, x)]) / 2; } __global__ void div(float* src1, float* src2, float *d_Dst, int pitch, params consts) { const int x = MUL_ADD(blockDim.x, blockIdx.x, threadIdx.x); const int y = MUL_ADD(blockDim.y, blockIdx.y, threadIdx.y); if (x >= consts.Px || y >= consts.Py || x < 0 || y < 0) return; d_Dst[MUL_ADD(y, pitch, x)] = 100 * src1[MUL_ADD(y, pitch, x)] / (abs(src2[MUL_ADD(y, pitch, x)]) + 1); } __global__ void thresh(float* src1, float* src2, float *d_Dst, int pitch, params consts) { const int x = MUL_ADD(blockDim.x, blockIdx.x, threadIdx.x); const int y = MUL_ADD(blockDim.y, blockIdx.y, threadIdx.y); if (x >= consts.Px || y >= consts.Py || x < 0 || y < 0) return; if(src1[MUL_ADD(y, pitch, x)] < 50.0f) d_Dst[MUL_ADD(y, pitch, x)] = src2[MUL_ADD(y, pitch, x)]; else d_Dst[MUL_ADD(y, pitch, x)] = 0.0f; } __global__ void sub(float* src1, float* src2, float *d_Dst, int pitch, params consts) { const int x = MUL_ADD(blockDim.x, blockIdx.x, threadIdx.x); const int y = MUL_ADD(blockDim.y, blockIdx.y, threadIdx.y); if (x >= consts.Px || y >= consts.Py || x < 0 || y < 0) return; d_Dst[MUL_ADD(y, pitch, x)] = src1[MUL_ADD(y, pitch, x)] - src2[MUL_ADD(y, pitch, x)]; } __global__ void invert(float* image, params consts) { const int x = MUL_ADD(blockDim.x, blockIdx.x, threadIdx.x); const int y = MUL_ADD(blockDim.y, blockIdx.y, threadIdx.y); if (x >= consts.Px || y >= consts.Py || x < 0 || y < 0) return; float val = image[MUL_ADD(y, consts.ProjPitchNum, x)]; float correctedMax = logf(USHRT_MAX); if (val <= 0.0f) image[MUL_ADD(y, consts.ProjPitchNum, x)] = 0.0f; else if (val >= USHRT_MAX) image[MUL_ADD(y, consts.ProjPitchNum, x)] = USHRT_MAX; #ifdef USELOGITER else image[MUL_ADD(y, consts.ProjPitchNum, x)] = (correctedMax - logf(val + 1)) / correctedMax * USHRT_MAX; #else else image[MUL_ADD(y, consts.ProjPitchNum, x)] = USHRT_MAX - val; #endif } __global__ void projectSliceZ(float * zBuff[KERNELSIZE], int index, int projIndex, float distance, params consts) { //Define pixel location in x, y, and z int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; float values[NUMVIEWS]; //Set a normalization and pixel value to 0 float error = 0.0f; float count = 0.0f; //Check image boundaries if ((i >= consts.Rx) || (j >= consts.Ry)) return; for (int view = 0; view < NUMVIEWS; view++) { if (projIndex >= 0 && projIndex != view) { count++; continue; } float dz = distance / consts.d_Beamz[view]; if (consts.orientation) dz = -dz;//z changes sign when flipped in the x direction float x = xMM2P((xR2MM(i, consts.Rx, consts.PitchRx) + consts.d_Beamx[consts.revGeo ? consts.Views - 1 - view : view] * dz), consts.Px, consts.PitchPx);// / (1 + dz) float y = yMM2P((yR2MM(j, consts.Ry, consts.PitchRy) + consts.d_Beamy[view] * dz), consts.Py, consts.PitchPy); //Update the value based on the error scaled and save the scale if (y > 0 && y < consts.Py && x > 0 && x < consts.Px) { values[view] = tex2D(textError, x, y + view*consts.Py); if (values[view] != 0) { error += values[view]; count++; } } if (projIndex >= 0) break; } if (count > 0) zBuff[index][j*consts.ReconPitchNum + i] = error / count; else zBuff[index][j*consts.ReconPitchNum + i] = 0; } __global__ void zConvolution(float *d_Dst, float * zSrc[KERNELSIZE], float kernel[KERNELSIZE], params consts) { const int x = MUL_ADD(blockDim.x, blockIdx.x, threadIdx.x); const int y = MUL_ADD(blockDim.y, blockIdx.y, threadIdx.y); if (x >= consts.Rx || y >= consts.Ry || x < 0 || y < 0) return; float out = 0.0f; for (int i = 0; i < KERNELSIZE; i++) out += zSrc[i][MUL_ADD(y, consts.ReconPitchNum, x)] * kernel[i]; //if(abs(out) > 10.0f) d_Dst[MUL_ADD(y, consts.ReconPitchNum, x)] = out; //else d_Dst[MUL_ADD(y, consts.ReconPitchNum, x)] = 0.0f; } //Display functions __global__ void resizeKernelTex(int wIn, int hIn, int wOut, int hOut, float scale, int xOff, int yOff, bool derDisplay, params consts) { // pixel coordinates const int idx = (blockDim.x * blockIdx.x) + threadIdx.x; const int x = idx % wOut; const int y = idx / wOut; bool negative = false; bool saturate = false; float sum = 0; float i = (x - (wOut - wIn / scale) / 2.0f)*scale + xOff; float j = (y - (hOut - hIn / scale) / 2.0f)*scale + yOff; if (consts.orientation) i = wIn - 1 - i; if (consts.flip) j = hIn - 1 - j; if (i > 0 && j > 0 && i < wIn && j < hIn) sum = tex2D(textImage, i + 0.5f, j + 0.5f); if (sum < 0) { negative = true; sum = abs(sum); } if (consts.log) { if (sum > 0.0f) { float correctedMax = logf(USHRT_MAX); sum = (correctedMax - logf(sum + 1)) / correctedMax * USHRT_MAX; } else sum = consts.minVal; } sum = (sum - consts.minVal) / (consts.maxVal - consts.minVal) * UCHAR_MAX; //if (!consts.log) sum = UCHAR_MAX - sum; saturate = sum > UCHAR_MAX; union pxl_rgbx_24 rgbx; if (saturate) { rgbx.na = UCHAR_MAX; rgbx.r = UCHAR_MAX;//flag errors with big red spots rgbx.g = UCHAR_MAX;//0 rgbx.b = UCHAR_MAX;//0 } else { rgbx.na = UCHAR_MAX; if (negative) { if (consts.showNegative) { rgbx.r = 0; rgbx.g = 0; rgbx.b = sum; } else { rgbx.r = 0; rgbx.g = 0; rgbx.b = 0; } } else { rgbx.r = sum; rgbx.g = sum; rgbx.b = sum; } } surf2Dwrite(rgbx.b32, displaySurface, x * sizeof(rgbx), y, hipBoundaryModeZero); // squelches out-of-bound writes } __global__ void drawSelectionBox(int UX, int UY, int LX, int LY, int wOut) { const int idx = (blockDim.x * blockIdx.x) + threadIdx.x; const int x = idx % wOut; const int y = idx / wOut; if ((x >= UX && x < UX + LINEWIDTH && y >= LY - LINEWIDTH && y < UY + LINEWIDTH) || (x >= LX - LINEWIDTH && x < LX && y >= LY - LINEWIDTH && y < UY + LINEWIDTH) || (y >= UY && y < UY + LINEWIDTH && x >= LX && x < UX) || (y >= LY - LINEWIDTH && y < LY && x >= LX && x < UX)) { union pxl_rgbx_24 rgbx; rgbx.na = 0xFF; rgbx.r = 255;//flag errors with big red spots rgbx.g = 0; rgbx.b = 0; surf2Dwrite(rgbx.b32, displaySurface, x * sizeof(rgbx), y, hipBoundaryModeZero); // squelches out-of-bound writes } } __global__ void drawSelectionBar(int X, int Y, int wOut, bool vertical) { const int idx = (blockDim.x * blockIdx.x) + threadIdx.x; const int x = idx % wOut; const int y = idx / wOut; if (!vertical && (x >= X && x < X + LINEWIDTH && y >= Y && y < Y + BARHEIGHT) || vertical && (y >= Y && y < Y + LINEWIDTH && x >= X - BARHEIGHT && x < X)) { union pxl_rgbx_24 rgbx; rgbx.na = 0xFF; rgbx.r = 255;//flag errors with big red spots rgbx.g = 0; rgbx.b = 0; surf2Dwrite(rgbx.b32, displaySurface, x * sizeof(rgbx), y, hipBoundaryModeZero); // squelches out-of-bound writes } } //Functions to do initial correction of raw data: log and scatter correction __global__ void LogCorrectProj(float * Sino, int view, unsigned short *Proj, unsigned short *Gain, params consts){ //Define pixel location in x and y int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; //Check image boundaries if ((i < consts.Px) && (j < consts.Py)){ //Flip and invert while converting to float int x = i; int y = j; float val = Proj[j*consts.Px + i]; if (consts.useGain) { val /= (float)Gain[j*consts.Px + i] * (float)consts.exposure / (float)EXPOSUREBASE; if (val > HIGHTHRESH) val = 0.0f; val *= USHRT_MAX; } else val *= 32.0f;//conversion from 10 to 16 bit Sino[(y + view * consts.Py)*consts.ProjPitchNum + x] = val; //large noise correction if (consts.useMaxNoise) { //Get a second round to aviod gain correction issues __syncthreads(); if (x > 1 && x < consts.Px - 1) { float val1 = Sino[(y + view*consts.Py)*consts.ProjPitchNum + x - 1]; float val2 = Sino[(y + view*consts.Py)*consts.ProjPitchNum + x + 1]; float val3 = (val1 + val2) / 2; if (abs(val1 - val2) < 2 * consts.maxNoise && abs(val3 - val) > consts.maxNoise) val = val3; } if (y > 1 && y < consts.Py - 1) { float val1 = Sino[(y - 1 + view*consts.Py)*consts.ProjPitchNum + x]; float val2 = Sino[(y + 1 + view*consts.Py)*consts.ProjPitchNum + x]; float val3 = (val1 + val2) / 2; if (abs(val1 - val2) < 2 * consts.maxNoise && abs(val3 - val) > consts.maxNoise) val = val3; } Sino[(y + view*consts.Py)*consts.ProjPitchNum + x] = val; } } } __global__ void rescale(float * Sino, float * raw, int view, float * MaxVal, float * MinVal, float * colShifts, float * rowShifts, float scale, params consts) { //Define pixel location in x and y int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; //Check image boundaries if ((i < consts.Px) && (j < consts.Py)) { float test = Sino[(j + view*consts.Py)*consts.ProjPitchNum + i] -*MinVal; if (test > 0) { test = (test - colShifts[i] - rowShifts[j]) / scale / (*MaxVal - *MinVal) * USHRT_MAX;//scale from 1 to max if (test > consts.metalThresh || !consts.useMetal) Sino[(j + view*consts.Py)*consts.ProjPitchNum + i] = test;// && test < ABSHIGHTHRESH else Sino[(j + view*consts.Py)*consts.ProjPitchNum + i] = 0.0f; raw[(j + view*consts.Py)*consts.ProjPitchNum + i] = test; } else { Sino[(j + view*consts.Py)*consts.ProjPitchNum + i] = 0.0f; raw[(j + view*consts.Py)*consts.ProjPitchNum + i] = 0.0f; } } } //Create the single slice projection image __global__ void projectSlice(float * IM, float distance, params consts) { //Define pixel location in x, y, and z int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; float value; //Set a normalization and pixel value to 0 float error = 0.0f; float count = 0.0f; //Check image boundaries if ((i >= consts.Rx) || (j >= consts.Ry)) return; for (int view = 0; view < NUMVIEWS; view++) { if (!consts.useBeams[view])continue; float dz = distance / consts.d_Beamz[view]; float x = xMM2P((xR2MM(i, consts.Rx, consts.PitchRx) + consts.d_Beamx[consts.revGeo ? consts.Views - 1 - view : view] * dz), consts.Px, consts.PitchPx);// * (1 - dz) float y = yMM2P((yR2MM(j, consts.Ry, consts.PitchRy) + consts.d_Beamy[view] * dz), consts.Py, consts.PitchPy); //Update the value based on the error scaled and save the scale if (y >= 0 && y < consts.Py && x >= 0 && x < consts.Px) { //value = interpolateSino(x, y, view, consts); value = tex2D(textSino, x, y + view * consts.Py); float increment = 1.0f; if (y < TAPERSIZE) increment *= y / TAPERSIZE; if (y > consts.Py - TAPERSIZE) increment *= (consts.Py - y) / TAPERSIZE; if (x < TAPERSIZE) increment *= x / TAPERSIZE; if (x > consts.Px - TAPERSIZE) increment *= (consts.Px - x) / TAPERSIZE; //Corner correction if (consts.Px - x + consts.Py - y < TRISIZE) increment = 0.0f; else if (consts.Px - x + consts.Py - y < TRISIZE + TAPERSIZE) increment *= (consts.Px - x + consts.Py - y - TRISIZE) / TAPERSIZE; if (consts.Px - x + y < TRISIZE) increment = 0.0f; else if (consts.Px - x + y < TRISIZE + TAPERSIZE) increment *= (consts.Px - x + y - TRISIZE) / TAPERSIZE; if (value != 0) { error += value * increment; count += increment; } } } if (count > 0) IM[j*consts.ReconPitchNum + i] = error / count; else IM[j*consts.ReconPitchNum + i] = 0.0f; } __global__ void normProjectSlice(float * IM, float distance, float alignStr, params consts) { //Define pixel location in x, y, and z int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; float value; //Set a normalization and pixel value to 0 float error = 0.0f; float sqInputs = 0.0f; float count = 0.0f; //Check image boundaries if ((i >= consts.Rx) || (j >= consts.Ry)) return; for (int view = 0; view < NUMVIEWS; view++) { if (!consts.useBeams[view])continue; float dz = distance / consts.d_Beamz[view]; float x = xMM2P((xR2MM(i, consts.Rx, consts.PitchRx) + consts.d_Beamx[consts.revGeo ? consts.Views - 1 - view : view] * dz), consts.Px, consts.PitchPx);// * (1 - dz) float y = yMM2P((yR2MM(j, consts.Ry, consts.PitchRy) + consts.d_Beamy[view] * dz), consts.Py, consts.PitchPy); //Update the value based on the error scaled and save the scale if (y >= 0 && y < consts.Py && x >= 0 && x < consts.Px) { //value = interpolateSino(x, y, view, consts); value = tex2D(textSino, x, y + view * consts.Py); float increment = 1.0f; if (y < TAPERSIZE) increment *= y / TAPERSIZE; if (y > consts.Py - TAPERSIZE) increment *= (consts.Py - y) / TAPERSIZE; if (x < TAPERSIZE) increment *= x / TAPERSIZE; if (x > consts.Px - TAPERSIZE) increment *= (consts.Px - x) / TAPERSIZE; //Corner correction if (consts.Px - x + consts.Py - y < TRISIZE) increment = 0.0f; else if (consts.Px - x + consts.Py - y < TRISIZE + TAPERSIZE) increment *= (consts.Px - x + consts.Py - y - TRISIZE) / TAPERSIZE; if (consts.Px - x + y < TRISIZE) increment = 0.0f; else if (consts.Px - x + y < TRISIZE + TAPERSIZE) increment *= (consts.Px - x + y - TRISIZE) / TAPERSIZE; if (value != 0) { error += value * increment; sqInputs += pow(value * increment, 2.0f); count += increment; } } } if (count > 0) { float factor = pow(error, 2.0f) / sqInputs; if (factor > alignStr) factor -= alignStr; else factor = 0.0f; IM[j*consts.ReconPitchNum + i] = error / count * factor / (consts.Views - alignStr); //IM[j*consts.ReconPitchNum + i] = error / count * sqrt(factor / (consts.Views - alignStr)); } else IM[j*consts.ReconPitchNum + i] = 0.0f; } __global__ void xIntegrate(float * output, float * derInput, float * input, int slice, params consts, hipSurfaceObject_t surfRecon = NULL) { //Define pixel location in x, y, and z int j = blockDim.y * blockIdx.y + threadIdx.y; if (j >= consts.Ry) return; if (blockDim.x * blockIdx.x + threadIdx.x != 0) return; float sum = 0.0; //float sum = input[j*consts.ReconPitchNum]; for (int i = 0; i < consts.Rx; i++) { sum += derInput[j*consts.ReconPitchNum + i]; if (surfRecon == NULL) output[j*consts.ReconPitchNum + i] = sum; else surf3Dwrite(sum, surfRecon, i * sizeof(float), j, slice); } } __global__ void xConvIntegrate(float * output, float * derInput, float * input, int slice, params consts, hipSurfaceObject_t surfRecon = NULL) { //Define pixel location in x, y, and z int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; if ((i >= consts.Rx) || (j >= consts.Ry)) return; int width = min(200, min(abs(consts.Rx - 1 - i), i)); float sum = 0.0f; float count = 0.0f; for (int iter = i - width; iter <= i + width; iter++) {//min(consts.Rx, i + 1 + width) float val = input[j*consts.ReconPitchNum + max(0, iter)]; //float val = 20000; if (val > 0.0f) { if (iter > i) sum -= derInput[j*consts.ReconPitchNum + iter] * (width - abs(iter - i)); else sum += derInput[j*consts.ReconPitchNum + iter] * (width - abs(iter - i)); sum += val; count++; } } if(surfRecon == NULL) output[j*consts.ReconPitchNum + i] = sum / count; else surf3Dwrite(sum / count, surfRecon, i * sizeof(float), j, slice); } #ifdef SHOWERROR __global__ void projectIter(float * oldRecon, int slice, float iteration, bool skipTV, params consts, hipSurfaceObject_t surfRecon, hipSurfaceObject_t errorRecon) { #else __global__ void projectIter(float * proj, float * oldRecon, float * weights, int slice, float iteration, bool skipTV, float alpha, params consts, hipSurfaceObject_t surfRecon, hipSurfaceObject_t surfWeight, bool firstRun = false) { #endif //Define pixel location in x, y, and z int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; //Set a normalization and pixel value to 0 float count = 0.0f; float error = 0.0f; float sqInputs = 0.0f; //firstRun = true; //float maximum = 0.0f; //float minimum = FLT_MAX; //Check image boundaries if ((i >= consts.Rx) || (j >= consts.Ry)) return; for (int view = 0; view < NUMVIEWS; view++) { float dz = (consts.startDis + slice * consts.pitchZ) / consts.d_Beamz[view]; float x = xMM2P((xR2MM(i, consts.Rx, consts.PitchRx) + consts.d_Beamx[consts.revGeo ? consts.Views - 1 - view : view] * dz), consts.Px, consts.PitchPx);// / (1 + dz) float y = yMM2P((yR2MM(j, consts.Ry, consts.PitchRy) + consts.d_Beamy[view] * dz), consts.Py, consts.PitchPy); //Update the value based on the error scaled and save the scale if (y > 0 && y < consts.Py && x > 0 && x < consts.Px) { float value = tex2D(textError, x + 0.5f, y + 0.5f + view*consts.Py); float increment = 1.0f; if (y < TAPERSIZE) increment *= y / TAPERSIZE; if (y > consts.Py - TAPERSIZE) increment *= (consts.Py - y) / TAPERSIZE; if (x < TAPERSIZE) increment *= x / TAPERSIZE; if (x > consts.Px - TAPERSIZE) increment *= (consts.Px - x) / TAPERSIZE; //Corner correction if (consts.Px - x + consts.Py - y < TRISIZE) increment = 0.0f; else if (consts.Px - x + consts.Py - y < TRISIZE + TAPERSIZE) increment *= (consts.Px - x + consts.Py - y - TRISIZE) / TAPERSIZE; if (consts.Px - x + y < TRISIZE) increment = 0.0f; else if (consts.Px - x + y < TRISIZE + TAPERSIZE) increment *= (consts.Px - x + y - TRISIZE) / TAPERSIZE; if (abs(value) > 0.1f) { //float singleTemp = tex2D(textSino, x, y + view*consts.Py); count += increment; sqInputs += pow(value * increment, 2.0f); if (!firstRun) { //float weight = tex2D(textWeight, x, y + view*consts.Py); //if(weight > 0) // error += value * increment / weight; //else error += value * increment; } else error += value * increment; //if (singleTemp > maximum) maximum = singleTemp; //if (singleTemp < minimum) minimum = singleTemp; //singleVal += singleTemp * increment; } //float minTest = proj[(view * consts.Py + j)*consts.ProjPitchNum + i]; //if (minTest < minimum) minimum = minTest; } } if (count > 0) { /*float factor = pow(error, 2.0f) / sqInputs; if (DERWEIGHTSTR > count) { if (factor > DERWEIGHTSTR) factor -= DERWEIGHTSTR; else factor = 0.0f; error = error / count * factor / (ceil(count) - DERWEIGHTSTR) / (float)consts.slices; } else { error = error / count * factor / max(1.0f, count) / (float)consts.slices; }*/ error /= ((float)count * (float)consts.slices); } else error = 0.0f; float returnVal; surf3Dread(&returnVal, surfRecon, i * sizeof(float), j, slice); if (!skipTV && returnVal > 0.0f) { float AX = 0, BX = 0, temp; if (i > 0) { temp = oldRecon[i - 1 + j*consts.ReconPitchNum]; if (temp > 0.1f) BX += temp * TVX; AX += TVX; } if (i < consts.Rx - 1) { temp = oldRecon[i + 1 + j*consts.ReconPitchNum]; if (temp > 0.1f) BX += temp * TVX; AX += TVX; } if (j > 0) { temp = oldRecon[i + (j - 1)*consts.ReconPitchNum]; if (temp > 0.1f) BX += temp * TVY; AX += TVY; } if (j < consts.Ry - 1) { temp = oldRecon[i + (j + 1)*consts.ReconPitchNum]; if (temp > 0.1f) BX += temp * TVY; AX += TVY; } if (slice > 0) { surf3Dread(&returnVal, surfRecon, i * sizeof(float), j, slice - 1); BX += returnVal * TVZ; AX += TVZ; } if (slice < consts.slices - 1) { surf3Dread(&returnVal, surfRecon, i * sizeof(float), j, slice + 1); BX += returnVal * TVZ; AX += TVZ; } surf3Dread(&returnVal, surfRecon, i * sizeof(float), j, slice); if (AX > 0.0f) error += BX - AX*returnVal; } float weight; surf3Dread(&weight, surfWeight, i * sizeof(float), j, slice); error *= abs(alpha); //error *= weight / consts.weightMax; returnVal += error; //maximum /= (float)count; //minimum /= (float)count; //if (returnVal > maximum) returnVal = maximum; //if (returnVal < minimum) returnVal = minimum; /*if (returnVal > 0) { returnVal *= 0.97f; returnVal += 100.0f; }*/ //returnVal += (8000.0f - returnVal) * 0.1f; //if (returnVal > 10000) returnVal = 10000; //if (returnVal < 0.1f) returnVal = 0.1f; #ifdef SHOWERROR surf3Dwrite(error, errorRecon, i * sizeof(float), j, slice); #endif #ifdef RECONDERIVATIVE if (count == 0 || returnVal < 0.0f) surf3Dwrite(0.0f, surfRecon, i * sizeof(float), j, slice); else surf3Dwrite(returnVal, surfRecon, i * sizeof(float), j, slice); #else //surf3Dwrite(error, surfDelta, i * sizeof(float), j, slice); //surf3Dwrite(delta, surfDelta, i * sizeof(float), j, slice); surf3Dwrite(returnVal, surfRecon, i * sizeof(float), j, slice); #endif // RECONDERIVATIVE } __global__ void projectFinalIter(int slice, params consts, hipSurfaceObject_t surfRecon) { //Define pixel location in x, y, and z int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; //Set a normalization and pixel value to 0 float count = 0.0f; float error = 0.0f; //Check image boundaries if ((i >= consts.Rx) || (j >= consts.Ry)) return; for (int view = 0; view < NUMVIEWS; view++) { //int view = 3; { float dz = (consts.startDis + slice * consts.pitchZ) / consts.d_Beamz[view]; float x = xMM2P((xR2MM(i, consts.Rx, consts.PitchRx) + consts.d_Beamx[consts.revGeo ? consts.Views - 1 - view : view] * dz), consts.Px, consts.PitchPx);// / (1 + dz) float y = yMM2P((yR2MM(j, consts.Ry, consts.PitchRy) + consts.d_Beamy[view] * dz), consts.Py, consts.PitchPy); //Update the value based on the error scaled and save the scale if (y > 0 && y < consts.Py && x > 0 && x < consts.Px) { float value = tex2D(textSino, x + 0.5f, y + 0.5f + view*consts.Py); float increment = 1.0f; if (y < TAPERSIZE) increment *= y / TAPERSIZE; if (y > consts.Py - TAPERSIZE) increment *= (consts.Py - y) / TAPERSIZE; if (x < TAPERSIZE) increment *= x / TAPERSIZE; if (x > consts.Px - TAPERSIZE) increment *= (consts.Px - x) / TAPERSIZE; //Corner correction if (consts.Px - x + consts.Py - y < TRISIZE) increment = 0.0f; else if (consts.Px - x + consts.Py - y < TRISIZE + TAPERSIZE) increment *= (consts.Px - x + consts.Py - y - TRISIZE) / TAPERSIZE; if (consts.Px - x + y < TRISIZE) increment = 0.0f; else if (consts.Px - x + y < TRISIZE + TAPERSIZE) increment *= (consts.Px - x + y - TRISIZE) / TAPERSIZE; if (abs(value) > 0.1f) { error += value * increment; count += increment; } } } if (count > 0) error /= (float)count; else error = 0.0f; float returnVal; surf3Dread(&returnVal, surfRecon, i * sizeof(float), j, slice); if(returnVal < 0.1f) surf3Dwrite(error, surfRecon, i * sizeof(float), j, slice); } __global__ void backProject(float * proj, float * error, float * weights, int view, float iteration, float totalIterations, hipSurfaceObject_t surfWeight, params consts) { //Define pixel location in x, y, and z int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; float value = 0; float deltaSum = 0.0f; int count = 0; //Check image boundaries if ((i >= consts.Px) || (j >= consts.Py)) return; for (int slice = 0; slice < consts.slices; slice++) { float dz = (consts.startDis + slice * consts.pitchZ) / consts.d_Beamz[view]; float x = xMM2R((xP2MM(i, consts.Px, consts.PitchPx) - consts.d_Beamx[consts.revGeo ? consts.Views - 1 - view : view] * dz), consts.Rx, consts.PitchRx); float y = yMM2R((yP2MM(j, consts.Py, consts.PitchPy) - consts.d_Beamy[view] * dz), consts.Ry, consts.PitchRy); //Update the value based on the error scaled and save the scale if (y >= 0 && y < consts.Ry && x >= 0 && x < consts.Rx) { //value += tex2D(textSino, x, y + slice*consts.Ry); float returnVal = 0.0f, delta; //surf3Dread(&returnVal, surfRecon, x * sizeof(float), y, slice); returnVal = tex3D(textRecon, x + 0.5f, y + 0.5f, slice + 0.5f); /*{ float tempVal; int tempCount = 0; tempVal = tex3D(textRecon, x, y, slice); if (tempVal > 0.0f) { returnVal += tempVal; tempCount++; } tempVal = tex3D(textRecon, x, y + 1.0f, slice); if (tempVal > 0.0f) { returnVal += tempVal; tempCount++; } tempVal = tex3D(textRecon, x + 1.0f, y + 1.0f, slice); if (tempVal > 0.0f) { returnVal += tempVal; tempCount++; } tempVal = tex3D(textRecon, x + 1.0f, y, slice); if (tempVal > 0.0f) { returnVal += tempVal; tempCount++; } if (tempCount > 0) returnVal /= tempCount; else returnVal = 0.0f; }*/ //surf3Dread(&delta, surfDelta, i * sizeof(float), j, slice); //deltaSum += abs(delta); deltaSum++; if (returnVal >= 0.1f) count++; value += returnVal; } } float projVal = proj[j*consts.ProjPitchNum + i]; #ifdef RECONDERIVATIVE if (projVal > 0.0f && abs(projVal) <= USHRT_MAX && count > 0) { error[j*consts.ProjPitchNum + i] = projVal - (value * (float)consts.Views / (float)count); } #else if (projVal > 0.0f && count > 0) { #ifdef USELOGITER float correctedMax = logf(USHRT_MAX); projVal = (correctedMax - logf(projVal + 1)) / correctedMax * USHRT_MAX; #else #ifdef INVERSEITER projVal = USHRT_MAX - projVal; #endif #endif error[j*consts.ProjPitchNum + i] = (projVal - (value * (float)consts.Views / (float)count)); } #endif // RECONDERIVATIVE else error[j*consts.ProjPitchNum + i] = 0.0f; weights[j*consts.ProjPitchNum + i] = deltaSum; } __global__ void synthetic2D(float * synth, int sliceIndex, params consts) { int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; float value = 0; int count = 0; //Check image boundaries if ((i >= consts.Px) || (j >= consts.Py)) return; /*const int buffer = 1; int min = sliceIndex - buffer; int max = sliceIndex + buffer + 1; if (min < 0) min = 0; if (max > consts.slices) max = consts.slices;*/ //for (int slice = min; slice < max; slice++) { for (int slice = 0; slice < consts.slices; slice++) { float dz = (consts.startDis + slice * consts.pitchZ) / consts.d_Beamz[0]; float x = xMM2R((xP2MM(i, consts.Px, consts.PitchPx) + consts.projectionAngle * dz), consts.Rx, consts.PitchRx); //int x = consts.projectionAngle * slice / consts.slices + i; int y = j; if ((x >= consts.Px) || (x < 0)) continue; float returnVal = 0.0f; returnVal = tex3D(textRecon, x + 0.5f, y + 0.5f, slice + 0.5f); if (returnVal >= 0.1f) count++; value += returnVal; } if (count > 0) synth[j*consts.ReconPitchNum + i] = value / count; else synth[j*consts.ReconPitchNum + i] = 0.0f; } __global__ void getSinogram(float * output, params consts) { //Define pixel location in x, y, and z int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; float value; //Check image boundaries if ((i >= consts.Px) || (j >= consts.Py)) return; int view = j * NUMVIEWS / consts.Py; output[j*consts.ProjPitchNum + i] = tex2D(textSino, i, consts.pixelLine + view * consts.Py); } __global__ void copySlice(float * image, int slice, params consts, hipSurfaceObject_t surfRecon, bool invertLogCorrect = false) { //Define pixel location in x, y, and z int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; //Check image boundaries if ((i >= consts.Rx) || (j >= consts.Ry)) return; float returnVal; surf3Dread(&returnVal, surfRecon, i * sizeof(float), j, slice); if (invertLogCorrect) { if (returnVal > 10) { float correctedMax = logf(USHRT_MAX); returnVal = (correctedMax - logf(USHRT_MAX - returnVal + 1)) / correctedMax * USHRT_MAX; } } image[j*consts.ReconPitchNum + i] = returnVal; } __global__ void invertRecon(int slice, params consts, hipSurfaceObject_t surfRecon) { //Define pixel location in x, y, and z int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; //Check image boundaries if ((i >= consts.Rx) || (j >= consts.Ry)) return; float test; surf3Dread(&test, surfRecon, i * sizeof(float), j, slice); surf3Dwrite(USHRT_MAX - test, surfRecon, i * sizeof(float), j, slice); } __global__ void scaleRecon(int slice, float * scales, float * offsets, params consts, hipSurfaceObject_t surfRecon) { //Define pixel location in x, y, and z int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; //Check image boundaries if ((i >= consts.Rx) || (j >= consts.Ry)) return; float test; surf3Dread(&test, surfRecon, i * sizeof(float), j, slice); if (test == 0.0f) return; unsigned int index = (unsigned short)test >> 8; test = test * scales[index] + offsets[index] * 256.0f; if (test > 1.0f) { surf3Dwrite(test, surfRecon, i * sizeof(float), j, slice); } else surf3Dwrite(1.0f, surfRecon, i * sizeof(float), j, slice); } __global__ void initArray(int slice, float value, params consts, hipSurfaceObject_t surfRecon) { //Define pixel location in x, y, and z int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; //Check image boundaries if ((i >= consts.Rx) || (j >= consts.Ry)) return; surf3Dwrite(value, surfRecon, i * sizeof(float), j, slice); } //Ruduction and histogram functions __global__ void sumReduction(float * Image, int pitch, float * sum, float lowX, float upX, float lowY, float upY) { //Define shared memory to read all the threads extern __shared__ float data[]; //define the thread and block location const int thread = threadIdx.x; const int x = MUL_ADD(blockDim.x, blockIdx.x, threadIdx.x); const int y = MUL_ADD(blockDim.y, blockIdx.y, threadIdx.y); float val; if (x >= ceil(upX) || y >= ceil(upY) || x <= floor(lowX) || y <= floor(lowY)) { val = 0.0; data[thread] = 0.0; } else { val = Image[y*pitch + x]; if (x == floor(upX)) { val += Image[y*pitch + x + 1] * (upX - floor(upX)); Image[y*pitch + x + 1] = 0.0; } if (y == floor(upY)) { val += Image[(y + 1)*pitch + x] * (upY - floor(upY)); Image[(y + 1)*pitch + x] = 0.0; } if (x == ceil(lowX)) { val += Image[y*pitch + x - 1] * (ceil(lowX) - lowX); Image[y*pitch + x - 1] = 0.0; } if (y == ceil(lowY)) { val += Image[(y - 1)*pitch + x] * (ceil(lowY) - lowY); Image[(y - 1)*pitch + x] = 0.0; } data[thread] = val; Image[y*pitch + x] = 0.0;//test display } //Each thread puts its local sum into shared memory __syncthreads(); //Do reduction in shared memory if (thread < 512) data[thread] = val += data[thread + 512]; __syncthreads(); if (thread < 256) data[thread] = val += data[thread + 256]; __syncthreads(); if (thread < 128) data[thread] = val += data[thread + 128]; __syncthreads(); if (thread < 64) data[thread] = val += data[thread + 64]; __syncthreads(); if (thread < 32) { // Fetch final intermediate sum from 2nd warp data[thread] = val += data[thread + 32]; // Reduce final warp using shuffle for (int offset = warpSize / 2; offset > 0; offset /= 2) { val += __shfl_down(val, offset); } } //write the result for this block to global memory if (thread == 0 && val > 0) { atomicAdd(sum, val); } } __global__ void sumRowsOrCols(float * sum, bool cols, params consts) { //Define shared memory to read all the threads extern __shared__ float data[]; __shared__ int counts[1024]; //define the thread and block location const int thread = threadIdx.x; const int x = MUL_ADD(blockDim.x, blockIdx.x, threadIdx.x); const int y = MUL_ADD(blockDim.y, blockIdx.y, threadIdx.y); float val = 0; int count = 0; int i = x; int limit; if (cols) limit = consts.Py; else limit = consts.Px; while(i < limit){ float temp; if(cols) temp = tex2D(textSino, y, i); else temp = tex2D(textSino, i, y); val += temp; if (temp > 0.0f) count++; i += blockDim.x; } data[thread] = val; counts[thread] = count; //Each thread puts its local sum into shared memory __syncthreads(); //Do reduction in shared memory if (thread < 512) { data[thread] = val += data[thread + 512]; counts[thread] = count += counts[thread + 512]; } __syncthreads(); if (thread < 256) { data[thread] = val += data[thread + 256]; counts[thread] = count += counts[thread + 256]; } __syncthreads(); if (thread < 128) { data[thread] = val += data[thread + 128]; counts[thread] = count += counts[thread + 128]; } __syncthreads(); if (thread < 64) { data[thread] = val += data[thread + 64]; counts[thread] = count += counts[thread + 64]; } __syncthreads(); if (thread < 32) { // Fetch final intermediate sum from 2nd warp data[thread] = val += data[thread + 32]; counts[thread] = count += counts[thread + 32]; // Reduce final warp using shuffle for (int offset = warpSize / 2; offset > 0; offset /= 2) { val += __shfl_down(val, offset); count += __shfl_down(count, offset); } } //write the result for this block to global memory if (thread == 0) { if (cols) val *= consts.Py; else val *= consts.Px; if (count > 0) sum[y] = val / (float)count; else sum[y] = 0.0f; } } template <typename T> __global__ void histogram256Kernel(unsigned int *d_Histogram, T *d_Data, unsigned int dataCount, params consts) { //Define pixel location in x, y, and z int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; int minX = min(consts.baseXr, consts.currXr); int maxX = max(consts.baseXr, consts.currXr); int minY = min(consts.baseYr, consts.currYr); int maxY = max(consts.baseYr, consts.currYr); if (i < minX || i > maxX || j < minY || j > maxY) return; //if (consts.orientation) i = consts.Px - 1 - i; //if (consts.flip) j = consts.Py - 1 - j; float data; if (consts.dataDisplay == projections) { data = abs(d_Data[MUL_ADD(j, consts.ProjPitchNum, i)]); } else { data = abs(d_Data[MUL_ADD(j, consts.ReconPitchNum, i)]); } //whatever it currently is, cast it to ushort //if (data <= 0.0f) return; if (consts.log) { if (data > 0) { float correctedMax = logf(USHRT_MAX); data = (correctedMax - logf(data + 1)) / correctedMax * USHRT_MAX; } } if (data > USHRT_MAX) data = USHRT_MAX; if (data <= 0.0f) return;// data = 0.0f; atomicAdd(d_Histogram + ((unsigned short)data >> 8), 1);//bin by the upper 256 bits } __global__ void histogramReconKernel(unsigned int *d_Histogram, int slice, bool useLog, params consts, hipSurfaceObject_t surfRecon) { //Define pixel location in x, y, and z int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; int minX = min(consts.baseXr, consts.currXr); int maxX = max(consts.baseXr, consts.currXr); int minY = min(consts.baseYr, consts.currYr); int maxY = max(consts.baseYr, consts.currYr); if (i < minX || i >= maxX || j < minY || j >= maxY) return; if (consts.orientation) i = consts.Rx - 1 - i; if (consts.flip) j = consts.Ry - 1 - j; float data = 0; surf3Dread(&data, surfRecon, i * sizeof(float), j, slice); if (consts.log && useLog) { if (data > 0) { float correctedMax = logf(USHRT_MAX); data = (correctedMax - logf(data + 1)) / correctedMax * USHRT_MAX; } } if (consts.isReconstructing) { if (data > 0) { float correctedMax = logf(USHRT_MAX); data = (correctedMax - logf(USHRT_MAX - data + 1)) / correctedMax * USHRT_MAX; } } if (data > USHRT_MAX) data = USHRT_MAX; if (data <= 0.0f) return;// data = 0.0f; atomicAdd(d_Histogram + ((unsigned short)data >> 8), 1);//bin by the upper 8 bits } // u= (1 - tu) * uold + tu .* ( f + 1/lambda*div(z) ); __global__ void updhgF_SoA(float *f, float *z1, float *z2, float *g, float tf, float invlambda, int nx, int ny){ int px = blockIdx.x * blockDim.x + threadIdx.x; int py = blockIdx.y * blockDim.y + threadIdx.y; int idx = px + py*nx; float DIVZ; if (px<nx && py<ny){ // compute the divergence DIVZ = 0; if ((px<(nx - 1))) DIVZ += z1[idx]; if ((px>0)) DIVZ -= z1[idx - 1]; if ((py<(ny - 1))) DIVZ += z2[idx]; if ((py>0)) DIVZ -= z2[idx - nx]; // update f float val = g[idx]; if(val > 0) f[idx] = (1 - tf) *f[idx] + tf * (val + invlambda*DIVZ); else f[idx] = 0; } } // z= zold + tz*lambda* grad(u); // and normalize z: //n=max(1,sqrt(z(:,:,1).*z(:,:,1) +z(:,:,2).*z(:,:,2) ) ); // z= z/n; __global__ void updhgZ_SoA(float *z1, float *z2, float *f, float tz, float lambda, int nx, int ny){ int px = blockIdx.x * blockDim.x + threadIdx.x; int py = blockIdx.y * blockDim.y + threadIdx.y; int idx = px + py*nx; if (px<nx && py<ny){ // compute the gradient float a = 0; float b = 0; float fc = f[idx]; if (px < (nx - 1)) { float val = f[idx + 1]; if(val > 0) a = val - fc; } if (py < (ny - 1)) { float val = f[idx + nx]; if(val > 0) b = val - fc; } // update z a = z1[idx] + tz*lambda*a; b = z2[idx] + tz*lambda*b; // project float t = 0; t = sqrtf(a*a + b*b); t = (t <= 1 ? 1. : t); z1[idx] = a / t; z2[idx] = b / t; } } /********************************************************************************************/ /* Function to interface the CPU with the GPU: */ /********************************************************************************************/ //Function to set up the memory on the GPU TomoError TomoRecon::initGPU(){ //init recon space float redFac = 1.0f; Sys.Recon.Pitch_x = Sys.Proj.Pitch_x * redFac; Sys.Recon.Pitch_y = Sys.Proj.Pitch_y * redFac; Sys.Recon.Nx = Sys.Proj.Nx / redFac; Sys.Recon.Ny = Sys.Proj.Ny / redFac; //Normalize Geometries Sys.Geo.IsoX = Sys.Geo.EmitX[NUMVIEWS / 2]; Sys.Geo.IsoY = Sys.Geo.EmitY[NUMVIEWS / 2]; Sys.Geo.IsoZ = Sys.Geo.EmitZ[NUMVIEWS / 2]; for (int i = 0; i < NUMVIEWS; i++) { Sys.Geo.EmitX[i] -= Sys.Geo.IsoX; Sys.Geo.EmitY[i] -= Sys.Geo.IsoY; } constants.pitchZ = Sys.Geo.ZPitch; //hipDeviceSynchronize(); #ifdef PRINTMEMORYUSAGE size_t avail_mem; size_t total_mem; hipMemGetInfo(&avail_mem, &total_mem); std::cout << "Init start available memory: " << avail_mem << "/" << total_mem << "\n"; #endif // PRINTMEMORYUSAGE //Get Device Number hipError_t cudaStatus; int deviceCount; int failedAttempts = 0; cuda(GetDeviceCount(&deviceCount)); for (int i = 0; i < deviceCount; i++) { if (hipSetDevice(i) == hipSuccess) break; failedAttempts++; } if (failedAttempts == deviceCount) return Tomo_CUDA_err; cuda(StreamCreateWithFlags(&stream, hipStreamDefault)); //Thread and block sizes for standard kernel calls (2d optimized) contThreads.x = WARPSIZE; contThreads.y = MAXTHREADS / WARPSIZE; //Thread and block sizes for reductions (1d optimized) reductionThreads.x = MAXTHREADS; reductionBlocks.x = (Sys.Proj.Nx + reductionThreads.x - 1) / reductionThreads.x; reductionBlocks.y = Sys.Proj.Ny; //Set up display and buffer regions //cuda(MallocPitch((void**)&d_Image, &projPitch, Sys.Proj.Nx * sizeof(float), Sys.Proj.Ny)); if (redFac <= 1.0f) { cuda(MallocPitch((void**)&d_Image, &displayPitch, Sys.Recon.Nx * sizeof(float), Sys.Recon.Ny)); cuda(MallocPitch((void**)&d_Image2, &displayPitch, Sys.Recon.Nx * sizeof(float), Sys.Recon.Ny)); contBlocks.x = (Sys.Recon.Nx + contThreads.x - 1) / contThreads.x; contBlocks.y = (Sys.Recon.Ny + contThreads.y - 1) / contThreads.y; } else { cuda(MallocPitch((void**)&d_Image, &displayPitch, Sys.Proj.Nx * sizeof(float), Sys.Proj.Ny)); cuda(MallocPitch((void**)&d_Image2, &displayPitch, Sys.Recon.Nx * sizeof(float), Sys.Recon.Ny)); contBlocks.x = (Sys.Proj.Nx + contThreads.x - 1) / contThreads.x; contBlocks.y = (Sys.Proj.Ny + contThreads.y - 1) / contThreads.y; } constants.DisplayPitchNum = displayPitch / sizeof(float); cuda(MallocPitch((void**)&d_Sino, &projPitch, Sys.Proj.Nx * sizeof(float), Sys.Proj.Ny * Sys.Proj.NumViews)); cuda(MallocPitch((void**)&d_Raw, &projPitch, Sys.Proj.Nx * sizeof(float), Sys.Proj.Ny * Sys.Proj.NumViews)); cuda(MallocPitch((void**)&d_Weights, &projPitch, Sys.Proj.Nx * sizeof(float), Sys.Proj.Ny * Sys.Proj.NumViews)); cuda(MallocPitch((void**)&inXBuff, &projPitch, Sys.Proj.Nx * sizeof(float), Sys.Proj.Ny * Sys.Proj.NumViews)); cuda(MallocPitch((void**)&inYBuff, &projPitch, Sys.Proj.Nx * sizeof(float), Sys.Proj.Ny * Sys.Proj.NumViews)); #ifdef USEITERATIVE cuda(MallocPitch((void**)&d_Error, &projPitch, Sys.Proj.Nx * sizeof(float), Sys.Proj.Ny * Sys.Proj.NumViews)); #else cuda(MallocPitch((void**)&d_Error, &projPitch, Sys.Proj.Nx * sizeof(float), Sys.Proj.Ny)); #endif reconPitch = max(projPitch, displayPitch); reconPitchNum = (int)reconPitch / sizeof(float); constants.ReconPitchNum = reconPitchNum; //Define the size of each of the memory spaces on the gpu in number of bytes sizeProj = Sys.Proj.Nx * Sys.Proj.Ny * sizeof(unsigned short); sizeSino = projPitch * Sys.Proj.Ny * Sys.Proj.NumViews; sizeIM = projPitch * Sys.Proj.Ny; sizeError = projPitch * Sys.Proj.Ny; cuda(Malloc((void**)&constants.d_Beamx, Sys.Proj.NumViews * sizeof(float))); cuda(Malloc((void**)&constants.d_Beamy, Sys.Proj.NumViews * sizeof(float))); cuda(Malloc((void**)&constants.d_Beamz, Sys.Proj.NumViews * sizeof(float))); cuda(Malloc((void**)&constants.useBeams, Sys.Proj.NumViews * sizeof(bool))); cuda(Malloc((void**)&d_MaxVal, sizeof(float))); cuda(Malloc((void**)&d_MinVal, sizeof(float))); //Copy geometries cuda(MemcpyAsync(constants.d_Beamx, Sys.Geo.EmitX, Sys.Proj.NumViews * sizeof(float), hipMemcpyHostToDevice)); cuda(MemcpyAsync(constants.d_Beamy, Sys.Geo.EmitY, Sys.Proj.NumViews * sizeof(float), hipMemcpyHostToDevice)); cuda(MemcpyAsync(constants.d_Beamz, Sys.Geo.EmitZ, Sys.Proj.NumViews * sizeof(float), hipMemcpyHostToDevice)); cuda(MemcpyAsync(constants.useBeams, Sys.Proj.activeBeams, Sys.Proj.NumViews * sizeof(bool), hipMemcpyHostToDevice)); //Define the textures textImage.filterMode = hipFilterModeLinear; textImage.addressMode[0] = hipAddressModeClamp; textImage.addressMode[1] = hipAddressModeClamp; textError.filterMode = hipFilterModeLinear; textError.addressMode[0] = hipAddressModeClamp; textError.addressMode[1] = hipAddressModeClamp; textWeight.filterMode = hipFilterModeLinear; textWeight.addressMode[0] = hipAddressModeClamp; textWeight.addressMode[1] = hipAddressModeClamp; textSino.filterMode = hipFilterModeLinear; textSino.addressMode[0] = hipAddressModeClamp; textSino.addressMode[1] = hipAddressModeClamp; textRecon.filterMode = hipFilterModePoint; textRecon.addressMode[0] = hipAddressModeClamp; textRecon.addressMode[1] = hipAddressModeClamp; constants.Px = Sys.Proj.Nx; constants.Py = Sys.Proj.Ny; constants.Rx = Sys.Recon.Nx; constants.Ry = Sys.Recon.Ny; constants.PitchPx = Sys.Proj.Pitch_x; constants.PitchPy = Sys.Proj.Pitch_y; constants.PitchRx = Sys.Recon.Pitch_x; constants.PitchRy = Sys.Recon.Pitch_y; constants.Views = Sys.Proj.NumViews; constants.log = true; constants.orientation = false; constants.flip = false; int pitch = (int)projPitch / sizeof(float); constants.ProjPitchNum = pitch; //Setup derivative buffers cuda(Malloc(&buff1, sizeIM * sizeof(float))); cuda(Malloc(&buff2, sizeIM * sizeof(float))); #ifdef ENABLEZDER //Z buffer cuda(MallocPitch((void**)&inZBuff, &projPitch, Sys.Proj.Nx * sizeof(float), Sys.Proj.Ny * Sys.Proj.NumViews)); cuda(MallocPitch((void**)&maxZVal, &projPitch, Sys.Proj.Nx * sizeof(float), Sys.Proj.Ny * Sys.Proj.NumViews)); cuda(MallocPitch((void**)&maxZPos, &projPitch, Sys.Proj.Nx * sizeof(float), Sys.Proj.Ny * Sys.Proj.NumViews)); float * tempZBuffs[KERNELSIZE]; cuda(Malloc(&zBuffs, KERNELSIZE * sizeof(float*))); for (int i = 0; i < KERNELSIZE; i++) { cuda(Malloc(&tempZBuffs[i], sizeIM * sizeof(float))); } cuda(MemcpyAsync(zBuffs, tempZBuffs, KERNELSIZE * sizeof(float*), hipMemcpyHostToDevice)); #endif // ENABLEZDER //Set up all kernels cuda(Malloc(&d_gauss, KERNELSIZE * sizeof(float))); cuda(Malloc(&d_gaussDer, KERNELSIZE * sizeof(float))); cuda(Malloc(&d_gaussDer2, KERNELSIZE * sizeof(float))); //cuda(Malloc(&d_gaussDer3, KERNELSIZE * sizeof(float))); float tempKernel[KERNELSIZE]; setGauss(tempKernel); cuda(MemcpyAsync(d_gauss, tempKernel, KERNELSIZE * sizeof(float), hipMemcpyHostToDevice)); float tempKernelDer[KERNELSIZE]; setGaussDer(tempKernelDer); cuda(MemcpyAsync(d_gaussDer, tempKernelDer, KERNELSIZE * sizeof(float), hipMemcpyHostToDevice)); float tempKernelDer2[KERNELSIZE]; setGaussDer2(tempKernelDer2); cuda(MemcpyAsync(d_gaussDer2, tempKernelDer2, KERNELSIZE * sizeof(float), hipMemcpyHostToDevice)); /*float tempKernelDer3[KERNELSIZE]; setGaussDer3(tempKernelDer3); cuda(MemcpyAsync(d_gaussDer3, tempKernelDer3, KERNELSIZE * sizeof(float), hipMemcpyHostToDevice));*/ #ifdef PRINTMEMORYUSAGE hipMemGetInfo(&avail_mem, &total_mem); std::cout << "Init end available memory: " << avail_mem << "/" << total_mem << "\n"; #endif // PRINTMEMORYUSAGE return Tomo_OK; } TomoError TomoRecon::ReadProjections(unsigned short ** GainData, unsigned short ** RawData) { //Correct projections float * sumValsVert = new float[NumViews * Sys.Proj.Nx]; float * sumValsHor = new float[NumViews * Sys.Proj.Ny]; float * vertOff = new float[NumViews * Sys.Proj.Nx]; float * horOff = new float[NumViews * Sys.Proj.Ny]; float * d_SumValsVert; float * d_SumValsHor; #ifdef VERBOSEMEMORY size_t avail_mem; size_t total_mem; hipMemGetInfo(&avail_mem, &total_mem); std::cout << "Read start available memory: " << avail_mem << "/" << total_mem << "\n"; #endif // VERBOSEMEMORY cuda(Malloc((void**)&d_SumValsVert, Sys.Proj.Nx * sizeof(float))); cuda(Malloc((void**)&d_SumValsHor, Sys.Proj.Ny * sizeof(float))); //define the GPU kernel based on size of "ideal projection" dim3 dimBlockProj(32, 32); dim3 dimGridProj((Sys.Proj.Nx + 31) / 32, (Sys.Proj.Ny + 31) / 32); dim3 dimGridSum(1, 1); dim3 dimBlockSum(1024, 1); unsigned short * d_Proj; unsigned short * d_Gain; cuda(Malloc((void**)&d_Proj, sizeProj)); cuda(Malloc((void**)&d_Gain, sizeProj)); constants.baseXr = 0; constants.baseYr = 0; constants.currXr = Sys.Proj.Nx; constants.currYr = Sys.Proj.Ny; bool oldLog = constants.log; constants.log = false; sourceData oldData = constants.dataDisplay; constants.dataDisplay = projections; //setStep(1.0); //Read the rest of the blank images for given projection sample set for (int view = 0; view < NumViews; view++) { cuda(MemcpyAsync(d_Proj, RawData[view], sizeProj, hipMemcpyHostToDevice)); cuda(MemcpyAsync(d_Gain, GainData[view], sizeProj, hipMemcpyHostToDevice)); KERNELCALL2(LogCorrectProj, dimGridProj, dimBlockProj, d_Sino, view, d_Proj, d_Gain, constants); cuda(BindTexture2D(NULL, textSino, d_Sino + view*Sys.Proj.Ny*projPitch / sizeof(float), hipCreateChannelDesc<float>(), Sys.Proj.Nx, Sys.Proj.Ny, projPitch)); KERNELCALL3(sumRowsOrCols, dim3(1, Sys.Proj.Nx), reductionThreads, reductionSize, d_SumValsVert, true, constants); cuda(MemcpyAsync(sumValsVert + view * Sys.Proj.Nx, d_SumValsVert, Sys.Proj.Nx * sizeof(float), hipMemcpyDeviceToHost)); cuda(BindTexture2D(NULL, textSino, d_Sino + view*Sys.Proj.Ny*projPitch / sizeof(float), hipCreateChannelDesc<float>(), Sys.Proj.Nx, Sys.Proj.Ny, projPitch)); KERNELCALL3(sumRowsOrCols, dim3(1, Sys.Proj.Ny), reductionThreads, reductionSize, d_SumValsHor, false, constants); cuda(MemcpyAsync(sumValsHor + view * Sys.Proj.Ny, d_SumValsHor, Sys.Proj.Ny * sizeof(float), hipMemcpyDeviceToHost)); } for (int view = 0; view < NumViews; view++) { tomo_err_throw(scanLineDetect(view, d_SumValsVert, sumValsVert + view * Sys.Proj.Nx, vertOff + view * Sys.Proj.Nx, true, cConstants.scanVertEnable)); tomo_err_throw(scanLineDetect(view, d_SumValsHor, sumValsHor + view * Sys.Proj.Ny, horOff + view * Sys.Proj.Ny, false, cConstants.scanHorEnable)); } //Normalize projection image lighting float maxVal, minVal; unsigned int histogram[HIST_BIN_COUNT]; tomo_err_throw(getHistogram(d_Sino + (NumViews / 2)*projPitch / sizeof(float)*Sys.Proj.Ny, projPitch*Sys.Proj.Ny, histogram)); tomo_err_throw(autoLight(histogram, 1, &minVal, &maxVal)); for (int view = 0; view < NumViews; view++) { if (view == (NumViews / 2) || !Sys.Proj.activeBeams[view]) continue; float thisMax, thisMin; unsigned int thisHistogram[HIST_BIN_COUNT]; tomo_err_throw(getHistogram(d_Sino + view*projPitch / sizeof(float)*Sys.Proj.Ny, projPitch*Sys.Proj.Ny, thisHistogram)); tomo_err_throw(autoLight(thisHistogram, 1, &thisMin, &thisMax)); if (thisMax > maxVal) maxVal = thisMax; if (thisMin < minVal) minVal = thisMin; } cuda(Memcpy(d_MinVal, &minVal, sizeof(float), hipMemcpyHostToDevice)); if (histogram[HIST_BIN_COUNT - 1] > SATURATIONLIMIT * Sys.Proj.Nx * Sys.Proj.Ny) Sys.Proj.saturated = true; float *g, *z1, *z2; size_t size; int j; int nx = projPitch / sizeof(float); int ny = Sys.Proj.Ny; size = nx*ny * sizeof(float); float tz = 2, tf = .2, beta = 0.0001; /* allocate device memory */ cuda(Malloc((void **)&g, size)); cuda(Malloc((void **)&z1, size)); cuda(Malloc((void **)&z2, size)); /* setup a 2D thread grid, with 16x16 blocks */ /* each block is will use nearby memory*/ dim3 block_size(16, 16); dim3 n_blocks((nx + block_size.x - 1) / block_size.x, (ny + block_size.y - 1) / block_size.y); int bailCount = 0; for (int view = 0; view < NumViews; view++) { if (!Sys.Proj.activeBeams[view]) continue; float bestScale = 1.0f; unsigned int histogram2[HIST_BIN_COUNT]; tomo_err_throw(getHistogram(d_Sino + view*projPitch / sizeof(float)*Sys.Proj.Ny, projPitch*Sys.Proj.Ny, histogram2)); int finalIndex = 255; while (histogram2[finalIndex] < 1000) finalIndex--; finalIndex -= 50; //manually check range of offset values int bestOffset = -100; if (view != NumViews / 2) { float step = 0.01f, scale = 1.0f, scaleError = FLT_MAX; while(abs(step) > 0.0005f){ float innerError = FLT_MAX; int innerOffset = -100; float offset = 0.0f, innerStep = 10.0f; while(abs(innerStep) > 0.5f){ //find average error float avgError = 0.0f; for (int test = 0; test < finalIndex; test++) { float index2 = test*scale + offset; if (index2 >= 0 && index2 < 256) { int lower = floor(index2); int upper = ceil(index2); float intopVal; if (upper == lower) intopVal = histogram2[lower]; else intopVal = ((float)upper - index2) * histogram2[lower] + (index2 - (float)lower) * histogram2[upper]; //avgError += pow((float)histogram[test] - intopVal, 2.0f) / (100 + test); avgError += abs((float)histogram[test] - intopVal); } else avgError += histogram[test]; } //MAX logic if (avgError < innerError) { innerError = avgError; innerOffset = offset; bailCount++; if (bailCount > 1000) { offset = 0; bailCount = 0; break; } } else { offset -= innerStep; innerStep *= -0.5f; } offset += innerStep; } if (innerError < scaleError) { scaleError = innerError; bestScale = scale; bestOffset = innerOffset; } else { scale -= step; step *= -0.5f; } scale += step; } for (int j = 0; j < Sys.Proj.Nx; j++) { vertOff[j + view * Sys.Proj.Nx] += bestOffset * 255;//offsets are subtracted } } cuda(MemcpyAsync(d_MaxVal, &maxVal, sizeof(float), hipMemcpyHostToDevice)); cuda(MemcpyAsync(d_SumValsVert, vertOff + view * Sys.Proj.Nx, Sys.Proj.Nx * sizeof(float), hipMemcpyHostToDevice)); cuda(MemcpyAsync(d_SumValsHor, horOff + view * Sys.Proj.Ny, Sys.Proj.Ny * sizeof(float), hipMemcpyHostToDevice)); KERNELCALL2(rescale, dimGridProj, dimBlockProj, d_Sino, d_Raw, view, d_MaxVal, d_MinVal, d_SumValsVert, d_SumValsHor, bestScale, constants); if (useTV) { //chaTV(d_Sino + projPitch / sizeof(float) * Sys.Proj.Ny * view, iter, projPitch / sizeof(float), Sys.Proj.Ny, lambda); float * input = d_Sino + projPitch / sizeof(float) * Sys.Proj.Ny * view; /* Copy input to device*/ cuda(MemcpyAsync(g, input, size, hipMemcpyDeviceToDevice)); cuda(MemsetAsync(z1, 0, size)); cuda(MemsetAsync(z2, 0, size)); /* call the functions */ for (j = 0; j < iter; j++) { tz = 0.2 + 0.08*j; tf = (0.5 - 5. / (15 + j)) / tz; // z= zold + tauz.* grad(u); // and normalize z: n=max(1,sqrt(z(:,:,1).*z(:,:,1) +z(:,:,2).*z(:,:,2) + beta) ); z/=n; KERNELCALL2(updhgZ_SoA, n_blocks, block_size, z1, z2, input, tz, 1 / lambda, nx, ny); // u= (1 - tauu*lambda) * uold + tauu .* div(z) + tauu*lambda.*f; KERNELCALL2(updhgF_SoA, n_blocks, block_size, input, z1, z2, g, tf, lambda, nx, ny); } } //Get x and y derivatives and save to their own buffers cuda(Memcpy(d_Image, d_Sino + view * projPitch / sizeof(float) * Sys.Proj.Ny, sizeIM, hipMemcpyDeviceToDevice)); sourceData oldDisplay = constants.dataDisplay; constants.dataDisplay = projections; tomo_err_throw(imageKernel(d_gaussDer, d_gauss, inXBuff + view * projPitch / sizeof(float) * Sys.Proj.Ny, true)); tomo_err_throw(imageKernel(d_gauss, d_gaussDer, inYBuff + view * projPitch / sizeof(float) * Sys.Proj.Ny, true)); #ifdef SQUAREMAGINX cuda(Memcpy(buff1, inXBuff + view * projPitch / sizeof(float) * Sys.Proj.Ny, sizeIM, hipMemcpyDeviceToDevice)); cuda(Memcpy(d_Image, inYBuff + view * projPitch / sizeof(float) * Sys.Proj.Ny, sizeIM, hipMemcpyDeviceToDevice)); KERNELCALL2(mag, contBlocks, contThreads, inXBuff + view * projPitch / sizeof(float) * Sys.Proj.Ny, buff1, d_Image, constants); #endif //SQUAREMAGINX constants.dataDisplay = oldDisplay; #ifdef ENABLEZDER cuda(BindTexture2D(NULL, textError, d_Sino, hipCreateChannelDesc<float>(), Sys.Proj.Nx, Sys.Proj.Ny*Sys.Proj.NumViews, projPitch)); for (int i = -KERNELRADIUS; i <= KERNELRADIUS; i++) { KERNELCALL2(projectSliceZ, contBlocks, contThreads, zBuffs, i + KERNELRADIUS, view, i*Sys.Geo.ZPitch, constants); } cuda(UnbindTexture(textError)); KERNELCALL2(zConvolution, contBlocks, contThreads, inZBuff + view * projPitch / sizeof(float) * Sys.Proj.Ny, zBuffs, d_gaussDer, constants); } /*for (float dis = 0.0f; dis < MAXDIS; dis += Sys.Geo.ZPitch) { //Find the normalized z derivative at every step cuda(BindTexture2D(NULL, textError, d_Sino, hipCreateChannelDesc<float>(), Sys.Proj.Nx, Sys.Proj.Ny*Sys.Proj.NumViews, projPitch)); for (int i = -KERNELRADIUS; i <= KERNELRADIUS; i++) { KERNELCALL2(projectSliceZ, contBlocks, contThreads, zBuffs, i + KERNELRADIUS, -1, dis + i*Sys.Geo.ZPitch, constants); } cuda(UnbindTexture(textError)); KERNELCALL2(zConvolution, contBlocks, contThreads, buff1, zBuffs, d_gaussDer, constants); tomo_err_throw(project(inZBuff, buff2)); KERNELCALL2(sub, contBlocks, contThreads, buff2, buff1, d_Image, reconPitchNum, constants); //Check if the value is at a maximum, and make sure it was a contributor }*/ #else } #endif for (int i = 0; i < HIST_BIN_COUNT; i++) inputHistogram[i] = 0; for (int beam = 0; beam < 7; beam++) { unsigned int histogram2[HIST_BIN_COUNT]; tomo_err_throw(getHistogram(d_Sino + beam*projPitch / sizeof(float)*Sys.Proj.Ny, projPitch*Sys.Proj.Ny, histogram2)); if(Sys.Proj.activeBeams[beam]) for (int i = 0; i < HIST_BIN_COUNT; i++) inputHistogram[i] += histogram2[i]; #ifdef PRINTINTENSITIES std::ofstream outputFile; char outFilename[250]; sprintf(outFilename, "./histogramOut%d.txt", beam); outputFile.open(outFilename); for (int test = 1; test < HIST_BIN_COUNT; test++) outputFile << histogram2[test] << "\n"; outputFile.close(); #endif //PRINTINTENSITIES } /* free device memory */ cuda(Free(g)); cuda(Free(z1)); cuda(Free(z2)); constants.log = oldLog; constants.dataDisplay = oldData; constants.baseXr = -1; constants.baseYr = -1; constants.currXr = -1; constants.currYr = -1; delete[] sumValsHor; delete[] sumValsVert; delete[] vertOff; delete[] horOff; cuda(Free(d_Proj)); cuda(Free(d_Gain)); cuda(Free(d_SumValsHor)); cuda(Free(d_SumValsVert)); #ifdef ENABLESOLVER int numSlices = 20; int sqrtNumSl = ceil(sqrt(numSlices)); int matrixSize = Sys.Proj.Nx * Sys.Proj.Ny / pow(sqrtNumSl, 2) * numSlices * sizeof(float); cuda(Malloc(&d_Recon, matrixSize)); cuda(Memset(d_Recon, 0, matrixSize)); #endif // ENABLESOLVER #ifdef VERBOSEMEMORY hipMemGetInfo(&avail_mem, &total_mem); std::cout << "Read end available memory: " << avail_mem << "/" << total_mem << "\n"; #endif // VERBOSEMEMORY return Tomo_OK; } TomoError TomoRecon::exportRecon(unsigned short * exportData) { float * RawData = new float[reconPitch / sizeof(float)*Sys.Recon.Ny]; int oldProjection = getActiveProjection(); //Create the reconstruction volume around the current location float oldDistance = distance; distance -= constants.slices / 2 * Sys.Geo.ZPitch; for (int i = 0; i < constants.slices; i++) { setActiveProjection(i); singleFrame(); distance += Sys.Geo.ZPitch; cuda(Memcpy(RawData, d_Image, reconPitch*Sys.Recon.Ny, hipMemcpyDeviceToHost)); for (int j = 0; j < Sys.Recon.Ny; j++) { int y = j; if (constants.flip) y = Sys.Recon.Ny - 1 - j; for (int k = 0; k < Sys.Recon.Nx; k++) { float data = RawData[reconPitch / sizeof(float) * j + k]; if (data != 0.0) { //data *= 2.0f; if (constants.log) { if (data > USHRT_MAX) data = USHRT_MAX; data = (logf(USHRT_MAX) - logf(data)) / logf(USHRT_MAX) * USHRT_MAX; } if (data > USHRT_MAX) data = USHRT_MAX; if (data < 0.0f) data = 0.0f; } int x = k; if (constants.orientation) x = Sys.Recon.Nx - 1 - k; exportData[Sys.Recon.Nx * (y + Sys.Recon.Ny * i) + x] = (unsigned short)data; } } } setActiveProjection(oldProjection); distance = oldDistance; delete[] RawData; tomo_err_throw(singleFrame()); return Tomo_OK; } TomoError TomoRecon::scanLineDetect(int view, float * d_sum, float * sum, float * offset, bool vert, bool enable) { int vectorSize; if (vert) vectorSize = Sys.Proj.Nx; else vectorSize = Sys.Proj.Ny; float * sumCorr = new float[vectorSize]; #ifdef PRINTSCANCORRECTIONS { std::ofstream FILE; std::stringstream outputfile; outputfile << "C:\\Users\\jdean\\Downloads\\cudaTV\\cudaTV\\original" << view << ".txt"; FILE.open(outputfile.str()); for (int i = 0; i < vectorSize; i++) { sum[i] /= vectorSize; FILE << sum[i] << "\n"; sumCorr[i] = sum[i]; } FILE.close(); } #else for (int i = 0; i < vectorSize; i++) { sum[i] /= vectorSize; sumCorr[i] = sum[i]; } #endif float *di; size_t size; int i, j; int N = vectorSize; size = N * sizeof(float); di = (float*)malloc(size); float tau = vert ? cConstants.vertTau : cConstants.horTau; for (j = 0; j < cConstants.iterations; j++) { lapla(sumCorr, di, N); for (i = 0; i < N; i++) sumCorr[i] += di[i] * tau; } free(di); #ifdef PRINTSCANCORRECTIONS { std::ofstream FILE; std::stringstream outputfile; outputfile << "C:\\Users\\jdean\\Downloads\\cudaTV\\cudaTV\\corrected" << view << ".txt"; FILE.open(outputfile.str()); for (int i = 0; i < vectorSize; i++) { FILE << sumCorr[i] << "\n"; if(enable) offset[i] = sum[i] - sumCorr[i]; else offset[i] = 0.0; sum[i] = sumCorr[i]; } FILE.close(); } #else for (int i = 0; i < vectorSize; i++) { if (enable) offset[i] = sum[i] - sumCorr[i]; else offset[i] = 0.0; sum[i] = sumCorr[i]; } #endif delete[] sumCorr; return Tomo_OK; } //Fucntion to free the gpu memory after program finishes TomoError TomoRecon::FreeGPUMemory(void){ if (iterativeInitialized) { resetIterative(); } #ifdef PRINTMEMORYUSAGE size_t avail_mem; size_t total_mem; hipMemGetInfo(&avail_mem, &total_mem); std::cout << "Free start available memory: " << avail_mem << "/" << total_mem << "\n"; #endif // PRINTMEMORYUSAGE //Free memory allocated on the GPU cuda(Free(d_Image)); cuda(Free(d_Image2)); cuda(Free(d_Error)); cuda(Free(d_Sino)); cuda(Free(buff1)); cuda(Free(buff2)); cuda(Free(inXBuff)); cuda(Free(inYBuff)); cuda(Free(d_Raw)); cuda(Free(d_Weights)); cuda(Free(constants.d_Beamx)); cuda(Free(constants.d_Beamy)); cuda(Free(constants.d_Beamz)); cuda(Free(constants.useBeams)); cuda(Free(d_MaxVal)); cuda(Free(d_MinVal)); cuda(Free(d_gauss)); cuda(Free(d_gaussDer)); cuda(Free(d_gaussDer2)); //cuda(Free(d_gaussDer3)); #ifdef ENABLESOLVER cuda(Free(d_Recon)); #endif #ifdef ENABLEZDER cuda(Free(inZBuff)); cuda(Free(maxZVal)); cuda(Free(maxZPos)); cuda(Free(zBuffs)); #endif // ENABLEZDER #ifdef PRINTMEMORYUSAGE hipMemGetInfo(&avail_mem, &total_mem); std::cout << "Free end available memory: " << avail_mem << "/" << total_mem << "\n"; #endif // PRINTMEMORYUSAGE return Tomo_OK; } template <typename T> TomoError TomoRecon::getHistogram(T * image, unsigned int byteSize, unsigned int *histogram) { unsigned int * d_Histogram; cuda(Malloc((void **)&d_Histogram, HIST_BIN_COUNT * sizeof(unsigned int))); cuda(Memset(d_Histogram, 0, HIST_BIN_COUNT * sizeof(unsigned int))); KERNELCALL2(histogram256Kernel, contBlocks, contThreads, d_Histogram, image, byteSize / sizeof(T), constants); cuda(Memcpy(histogram, d_Histogram, HIST_BIN_COUNT * sizeof(unsigned int), hipMemcpyDeviceToHost)); cuda(Free(d_Histogram)); return Tomo_OK; } TomoError TomoRecon::getHistogramRecon(unsigned int *histogram, hipSurfaceObject_t volume, bool useall = false, bool useLog = true) { unsigned int * d_Histogram; cuda(Malloc((void **)&d_Histogram, HIST_BIN_COUNT * sizeof(unsigned int))); cuda(Memset(d_Histogram, 0, HIST_BIN_COUNT * sizeof(unsigned int))); //cuda(BindSurfaceToArray(surfRecon, d_Recon2)); if (useall) { for (int slice = 0; slice < Sys.Recon.Nz; slice++) { KERNELCALL2(histogramReconKernel, contBlocks, contThreads, d_Histogram, slice, useLog, constants, volume); } } else { KERNELCALL2(histogramReconKernel, contBlocks, contThreads, d_Histogram, sliceIndex, useLog, constants, volume); } cuda(Memcpy(histogram, d_Histogram, HIST_BIN_COUNT * sizeof(unsigned int), hipMemcpyDeviceToHost)); cuda(Free(d_Histogram)); return Tomo_OK; } TomoError TomoRecon::draw(int x, int y) { //interop update display(x, y); map(stream); if(constants.dataDisplay == projections){ scale = max((float)Sys.Proj.Nx / (float)width, (float)Sys.Proj.Ny / (float)height) * pow(ZOOMFACTOR, -zoom); cuda(BindTexture2D(NULL, textImage, d_Image, hipCreateChannelDesc<float>(), Sys.Proj.Nx, Sys.Proj.Ny, projPitch)); } else { scale = max((float)Sys.Recon.Nx / (float)width, (float)Sys.Recon.Ny / (float)height) * pow(ZOOMFACTOR, -zoom); cuda(BindTexture2D(NULL, textImage, d_Image, hipCreateChannelDesc<float>(), Sys.Recon.Nx, Sys.Recon.Ny, reconPitch)); } checkOffsets(&xOff, &yOff); cuda(BindSurfaceToArray(displaySurface, ca)); const int blocks = (width * height + PXL_KERNEL_THREADS_PER_BLOCK - 1) / PXL_KERNEL_THREADS_PER_BLOCK; if (blocks > 0) { if (constants.dataDisplay == projections) { KERNELCALL4(resizeKernelTex, blocks, PXL_KERNEL_THREADS_PER_BLOCK, 0, stream, Sys.Proj.Nx, Sys.Proj.Ny, width, height, scale, xOff, yOff, derDisplay != no_der, constants); if (constants.baseXr >= 0 && constants.currXr >= 0) KERNELCALL4(drawSelectionBox, blocks, PXL_KERNEL_THREADS_PER_BLOCK, 0, stream, max(I2D(baseX, true), I2D(currX, true)), max(I2D(baseY, false), I2D(currY, false)), min(I2D(baseX, true), I2D(currX, true)), min(I2D(baseY, false), I2D(currY, false)), width); if (lowXr >= 0) KERNELCALL4(drawSelectionBar, blocks, PXL_KERNEL_THREADS_PER_BLOCK, 0, stream, I2D(lowXr, true), I2D(lowYr, false), width, vertical); if (upXr >= 0) KERNELCALL4(drawSelectionBar, blocks, PXL_KERNEL_THREADS_PER_BLOCK, 0, stream, I2D(upXr, true), I2D(upYr, false), width, vertical); } else { KERNELCALL4(resizeKernelTex, blocks, PXL_KERNEL_THREADS_PER_BLOCK, 0, stream, Sys.Recon.Nx, Sys.Recon.Ny, width, height, scale, xOff, yOff, derDisplay != no_der, constants); if (constants.baseXr >= 0 && constants.currXr >= 0) KERNELCALL4(drawSelectionBox, blocks, PXL_KERNEL_THREADS_PER_BLOCK, 0, stream, max(I2D(constants.baseXr, true), I2D(constants.currXr, true)), max(I2D(constants.baseYr, false), I2D(constants.currYr, false)), min(I2D(constants.baseXr, true), I2D(constants.currXr, true)), min(I2D(constants.baseYr, false), I2D(constants.currYr, false)), width); if (lowXr >= 0) KERNELCALL4(drawSelectionBar, blocks, PXL_KERNEL_THREADS_PER_BLOCK, 0, stream, I2D(lowXr, true), I2D(lowYr, false), width, vertical); if (upXr >= 0) KERNELCALL4(drawSelectionBar, blocks, PXL_KERNEL_THREADS_PER_BLOCK, 0, stream, I2D(upXr, true), I2D(upYr, false), width, vertical); } } cuda(UnbindTexture(textImage)); //interop commands to ready buffer unmap(stream); blit(); return Tomo_OK; } TomoError TomoRecon::singleFrame(bool outputFrame, float** output, unsigned int * histogram) { //Initial projection switch (constants.dataDisplay) { case reconstruction: if (derDisplay != square_mag) {//only case frequently used that doesn't need this, leads to 3/2x speedup in autofocus //cuda(BindTexture2D(NULL, textError, d_Raw, hipCreateChannelDesc<float>(), Sys.Proj.Nx, Sys.Proj.Ny*Sys.Proj.NumViews, projPitch)); cuda(BindTexture2D(NULL, textSino, d_Sino, hipCreateChannelDesc<float>(), Sys.Proj.Nx, Sys.Proj.Ny*Sys.Proj.NumViews, projPitch)); KERNELCALL2(projectSlice, contBlocks, contThreads, d_Image, distance, constants); cuda(UnbindTexture(textSino)); //cuda(UnbindTexture(textError)); } break; case experimental: cuda(BindTexture2D(NULL, textSino, d_Raw, hipCreateChannelDesc<float>(), Sys.Proj.Nx, Sys.Proj.Ny*Sys.Proj.NumViews, projPitch)); KERNELCALL2(projectSlice, contBlocks, contThreads, buff1, distance, constants); cuda(UnbindTexture(textSino)); break; case projections: cuda(Memcpy(d_Image, d_Sino + sliceIndex * projPitch / sizeof(float) * Sys.Proj.Ny, sizeIM, hipMemcpyDeviceToDevice)); break; case iterRecon: #ifdef SHOWERROR KERNELCALL2(copySlice, contBlocks, contThreads, d_Image, sliceIndex, constants, surfErrorObj, constants.isReconstructing); #else //KERNELCALL2(copySlice, contBlocks, contThreads, d_Image, sliceIndex, constants, surfWeightObj, constants.isReconstructing); KERNELCALL2(copySlice, contBlocks, contThreads, d_Image, sliceIndex, constants, surfReconObj, constants.isReconstructing); #endif break; case synthetic2d: cuda(BindTextureToArray(textRecon, d_Recon2)); KERNELCALL2(synthetic2D, contBlocks, contThreads, d_Image, sliceIndex, constants); cuda(UnbindTexture(textRecon)); break; case sinogram: cuda(BindTexture2D(NULL, textSino, inXBuff, hipCreateChannelDesc<float>(), Sys.Proj.Nx, Sys.Proj.Ny*Sys.Proj.NumViews, projPitch)); KERNELCALL2(getSinogram, contBlocks, contThreads, d_Image, constants); cuda(UnbindTexture(textSino)); break; case error: cuda(Memcpy2DAsync(d_Image, projPitch, d_Error + sliceIndex * projPitch / sizeof(float) * Sys.Proj.Ny, projPitch, Sys.Proj.Nx * sizeof(float), Sys.Proj.Ny, hipMemcpyDeviceToDevice)); break; } switch (derDisplay) { case no_der: break; case x_mag_enhance: switch (constants.dataDisplay) { case reconstruction: tomo_err_throw(project(inXBuff, buff1)); break; case projections: cuda(Memcpy(buff1, inXBuff + sliceIndex * projPitch / sizeof(float) * Sys.Proj.Ny, sizeIM, hipMemcpyDeviceToDevice)); break; case iterRecon: tomo_err_throw(imageKernel(d_gaussDer, d_gauss, buff1, false)); break; case error: break; } KERNELCALL2(add, contBlocks, contThreads, d_Image, buff1, d_Image, true, true, constants); break; case y_mag_enhance: switch (constants.dataDisplay) { case reconstruction: tomo_err_throw(project(inYBuff, buff1)); break; case projections: cuda(Memcpy(buff1, inYBuff + sliceIndex * projPitch / sizeof(float) * Sys.Proj.Ny, sizeIM, hipMemcpyDeviceToDevice)); break; case iterRecon: tomo_err_throw(imageKernel(d_gauss, d_gaussDer, buff1, false)); break; case error: break; } KERNELCALL2(add, contBlocks, contThreads, d_Image, buff1, d_Image, true, true, constants); break; case mag_enhance: switch (constants.dataDisplay) { case reconstruction: tomo_err_throw(project(inXBuff, buff1)); tomo_err_throw(project(inYBuff, buff2)); break; case projections: cuda(Memcpy(buff1, inXBuff + sliceIndex * projPitch / sizeof(float) * Sys.Proj.Ny, sizeIM, hipMemcpyDeviceToDevice)); cuda(Memcpy(buff2, inYBuff + sliceIndex * projPitch / sizeof(float) * Sys.Proj.Ny, sizeIM, hipMemcpyDeviceToDevice)); break; case iterRecon: case sinogram: case error: tomo_err_throw(imageKernel(d_gaussDer, d_gauss, buff1, false)); tomo_err_throw(imageKernel(d_gauss, d_gaussDer, buff2, false)); break; } KERNELCALL2(mag, contBlocks, contThreads, buff1, buff2, buff1, constants); KERNELCALL2(add, contBlocks, contThreads, d_Image, buff1, d_Image, true, false, constants); break; case x_enhance: switch (constants.dataDisplay) { case reconstruction: tomo_err_throw(project(inXBuff, buff1)); break; case projections: cuda(Memcpy(buff1, inXBuff + sliceIndex * projPitch / sizeof(float) * Sys.Proj.Ny, sizeIM, hipMemcpyDeviceToDevice)); break; case iterRecon: tomo_err_throw(imageKernel(d_gaussDer, d_gauss, buff1, false)); break; case error: break; } KERNELCALL2(add, contBlocks, contThreads, d_Image, buff1, d_Image, true, false, constants); break; case y_enhance: switch (constants.dataDisplay) { case reconstruction: tomo_err_throw(project(inYBuff, buff1)); break; case projections: cuda(Memcpy(buff1, inYBuff + sliceIndex * projPitch / sizeof(float) * Sys.Proj.Ny, sizeIM, hipMemcpyDeviceToDevice)); break; case iterRecon: tomo_err_throw(imageKernel(d_gauss, d_gaussDer, buff1, false)); break; case error: break; } KERNELCALL2(add, contBlocks, contThreads, d_Image, buff1, d_Image, true, false, constants); break; case both_enhance: switch (constants.dataDisplay) { case reconstruction: tomo_err_throw(project(inXBuff, buff1)); tomo_err_throw(project(inYBuff, buff2)); break; case projections: cuda(Memcpy(buff1, inXBuff + sliceIndex * projPitch / sizeof(float) * Sys.Proj.Ny, sizeIM, hipMemcpyDeviceToDevice)); cuda(Memcpy(buff2, inYBuff + sliceIndex * projPitch / sizeof(float) * Sys.Proj.Ny, sizeIM, hipMemcpyDeviceToDevice)); break; case iterRecon: tomo_err_throw(imageKernel(d_gaussDer, d_gauss, buff1, false)); tomo_err_throw(imageKernel(d_gauss, d_gaussDer, buff2, false)); break; case error: break; } KERNELCALL2(add, contBlocks, contThreads, buff1, buff2, buff1, false, false, constants); KERNELCALL2(add, contBlocks, contThreads, d_Image, buff1, d_Image, true, false, constants); break; case der_x: if (constants.dataDisplay == projections) { cuda(Memcpy(d_Image, inXBuff + sliceIndex * projPitch / sizeof(float) * Sys.Proj.Ny, sizeIM, hipMemcpyDeviceToDevice)); } else { tomo_err_throw(project(inXBuff, d_Image)); } break; case der_y: if (constants.dataDisplay == projections) { cuda(Memcpy(d_Image, inYBuff + sliceIndex * projPitch / sizeof(float) * Sys.Proj.Ny, sizeIM, hipMemcpyDeviceToDevice)); } else { tomo_err_throw(project(inYBuff, d_Image)); } break; case square_mag: switch (constants.dataDisplay) { case reconstruction: tomo_err_throw(project(inXBuff, buff1)); tomo_err_throw(project(inYBuff, d_Image)); break; case projections: cuda(Memcpy(buff1, inXBuff + sliceIndex * projPitch / sizeof(float) * Sys.Proj.Ny, sizeIM, hipMemcpyDeviceToDevice)); cuda(Memcpy(d_Image, inYBuff + sliceIndex * projPitch / sizeof(float) * Sys.Proj.Ny, sizeIM, hipMemcpyDeviceToDevice)); break; case iterRecon: tomo_err_throw(imageKernel(d_gaussDer, d_gauss, buff1, false)); tomo_err_throw(imageKernel(d_gauss, d_gaussDer, d_Image, false)); break; case error: break; } KERNELCALL2(squareMag, contBlocks, contThreads, d_Image, buff1, d_Image, constants); break; case slice_diff: { float xOff = -Sys.Geo.EmitX[constants.revGeo ? constants.Views - 1 - diffSlice : diffSlice] * distance / Sys.Geo.EmitZ[diffSlice] / Sys.Recon.Pitch_x; float yOff = -Sys.Geo.EmitY[diffSlice] * distance / Sys.Geo.EmitZ[diffSlice] / Sys.Recon.Pitch_y; KERNELCALL2(squareDiff, contBlocks, contThreads, d_Image, diffSlice, xOff, yOff, reconPitchNum, constants); } break; case der2_x: tomo_err_throw(imageKernel(d_gaussDer2, d_gauss, d_Image, constants.dataDisplay == projections)); break; case der2_y: tomo_err_throw(imageKernel(d_gauss, d_gaussDer2, d_Image, constants.dataDisplay == projections)); break; case der3_x: //imageKernel(d_gaussDer3, d_gauss, d_Image); break; case der3_y: //imageKernel(d_gauss, d_gaussDer3, d_Image); break; case mag_der: if (constants.dataDisplay == projections) { cuda(Memcpy(buff1, inXBuff + sliceIndex * projPitch / sizeof(float) * Sys.Proj.Ny, sizeIM, hipMemcpyDeviceToDevice)); cuda(Memcpy(buff2, inYBuff + sliceIndex * projPitch / sizeof(float) * Sys.Proj.Ny, sizeIM, hipMemcpyDeviceToDevice)); } else { //tomo_err_throw(project(inXBuff, buff1)); //tomo_err_throw(project(inYBuff, buff2)); tomo_err_throw(imageKernel(d_gaussDer, d_gauss, buff1, false)); tomo_err_throw(imageKernel(d_gauss, d_gaussDer, buff2, false)); } KERNELCALL2(mag, contBlocks, contThreads, d_Image, buff1, buff1, constants); //KERNELCALL2(add, contBlocks, contThreads, d_Image, buff2, buff1, false, true, constants); //KERNELCALL2(abs, contBlocks, contThreads, d_Image, buff1, constants); /*if (constants.dataDisplay == projections) { cuda(Memcpy(d_Image, inZBuff + sliceIndex * projPitch / sizeof(float) * Sys.Proj.Ny, sizeIM, hipMemcpyDeviceToDevice)); } else { tomo_err_throw(project(inZBuff, d_Image)); }*/ //tomo_err_throw(project(inXBuff, d_Image)); /*cuda(BindTexture2D(NULL, textError, inXBuff, hipCreateChannelDesc<float>(), Sys.Proj.Nx, Sys.Proj.Ny*Sys.Proj.NumViews, projPitch)); for (int i = -KERNELRADIUS; i <= KERNELRADIUS; i++) KERNELCALL2(projectSliceZ, contBlocks, contThreads, zBuffs, i + KERNELRADIUS, distance + i*Sys.Geo.ZPitch, constants); cuda(UnbindTexture(textError)); KERNELCALL2(zConvolution, contBlocks, contThreads, d_Image, zBuffs, d_gaussDer, constants);*/ break; case z_der_mag: { /*if (dataDisplay == projections) { cuda(Memcpy(buff1, inXBuff + sliceIndex * projPitch / sizeof(float) * Sys.Proj.Ny, sizeIM, hipMemcpyDeviceToDevice)); cuda(Memcpy(buff2, inYBuff + sliceIndex * projPitch / sizeof(float) * Sys.Proj.Ny, sizeIM, hipMemcpyDeviceToDevice)); } else { tomo_err_throw(project(inXBuff, buff1)); tomo_err_throw(project(inYBuff, buff2)); } KERNELCALL2(mag, contBlocks, contThreads, buff1, buff2, buff1, reconPitchNum, reconPitchNum, constants);*/ if (constants.dataDisplay == projections) { cuda(Memcpy(d_Image, inZBuff + sliceIndex * projPitch / sizeof(float) * Sys.Proj.Ny, sizeIM, hipMemcpyDeviceToDevice)); } else { cuda(BindTexture2D(NULL, textError, d_Sino, hipCreateChannelDesc<float>(), Sys.Proj.Nx, Sys.Proj.Ny*Sys.Proj.NumViews, projPitch)); for (int i = -KERNELRADIUS; i <= KERNELRADIUS; i++) { KERNELCALL2(projectSliceZ, contBlocks, contThreads, zBuffs, i + KERNELRADIUS, -1, distance + i*Sys.Geo.ZPitch, constants); } cuda(UnbindTexture(textError)); KERNELCALL2(zConvolution, contBlocks, contThreads, buff1, zBuffs, d_gaussDer, constants); tomo_err_throw(project(inZBuff, buff2)); KERNELCALL2(sub, contBlocks, contThreads, buff1, buff2, d_Image, reconPitchNum, constants); } //KERNELCALL2(thresh, contBlocks, contThreads, buff2, buff1, d_Image, reconPitchNum, constants); /*KERNELCALL2(zConvolution, contBlocks, contThreads, buff1, zBuffs, d_gaussDer2, constants); KERNELCALL2(zConvolution, contBlocks, contThreads, buff2, zBuffs, d_gaussDer, constants); KERNELCALL2(div, contBlocks, contThreads, buff1, buff2, d_Image, reconPitchNum, constants);*/ //imageKernel(d_gauss, d_gauss, d_Image); //KERNELCALL2(add, contBlocks, contThreads, d_Image, buff1, d_Image, reconPitchNum, true, false, constants); } break; case norm_der: { #ifdef PRINTLINEDER //output image line, derivative and normalized derivative for some height //memcpy to line float float imageLine[1915]; int lineNum = 930; cuda(Memcpy(imageLine, &buff1[lineNum * constants.ReconPitchNum], 1915 * sizeof(float), hipMemcpyDeviceToHost)); //output to csv std::ofstream outputFile; outputFile.open("imageLine.csv"); for (int test = 0; test < 1915; test++) outputFile << imageLine[test] << "\n"; outputFile.close(); tomo_err_throw(project(inXBuff, buff2)); cuda(Memcpy(imageLine, &buff2[lineNum * constants.ReconPitchNum], 1915 * sizeof(float), hipMemcpyDeviceToHost)); outputFile.open("derLine.csv"); for (int test = 0; test < 1915; test++) outputFile << imageLine[test] << "\n"; outputFile.close(); tomo_err_throw(normProject(inXBuff, buff2, DERWEIGHTSTR)); cuda(Memcpy(imageLine, &buff2[lineNum * constants.ReconPitchNum], 1915 * sizeof(float), hipMemcpyDeviceToHost)); outputFile.open("normDerLine.csv"); for (int test = 0; test < 1915; test++) outputFile << imageLine[test] << "\n"; outputFile.close(); KERNELCALL2(xConvIntegrate, contBlocks, contThreads, d_Image, buff2, buff1, 0, constants); //KERNELCALL2(xIntegrate, contBlocks, contThreads, d_Image, buff2, buff1, constants); cuda(Memcpy(imageLine, &d_Image[lineNum * constants.ReconPitchNum], 1915 * sizeof(float), hipMemcpyDeviceToHost)); outputFile.open("intLine.csv"); for (int test = 0; test < 1915; test++) outputFile << imageLine[test] << "\n"; outputFile.close(); #else tomo_err_throw(normProject(inXBuff, d_Image, DERWEIGHTSTR)); //tomo_err_throw(normProject(d_Sino, buff1, DERWEIGHTSTR)); //tomo_err_throw(project(d_Sino, buff1)); //tomo_err_throw(project(inXBuff, d_Image)); //KERNELCALL2(xIntegrate, contBlocks, contThreads, d_Image, buff2, buff1, 0, constants); /* //current best integration method tomo_err_throw(normProject(inXBuff, buff2, DERWEIGHTSTR)); tomo_err_throw(project(d_Sino, buff1)); KERNELCALL2(xConvIntegrate, contBlocks, contThreads, d_Image, buff2, buff1, 0, constants); */ #endif //PRINTLINEDER } break; case abs_norm_der: tomo_err_throw(normProject(inXBuff, buff2, 6.0f)); KERNELCALL2(pow, contBlocks, contThreads, d_Image, buff2, 2.0f, constants); break; case square_norm_der: //cuda(BindTexture2D(NULL, textSino, inXBuff, hipCreateChannelDesc<float>(), Sys.Proj.Nx, Sys.Proj.Ny*Sys.Proj.NumViews, projPitch)); //KERNELCALL2(normProjectSlice, contBlocks, contThreads, d_Image, distance, alignStr, constants); //cuda(UnbindTexture(textSino)); tomo_err_throw(normProject(inXBuff, buff1, 1.0f)); tomo_err_throw(normProject(inXBuff, buff2, 1.0f)); //KERNELCALL2(mag, contBlocks, contThreads, d_Image, buff1, buff2, constants); KERNELCALL2(pow, contBlocks, contThreads, d_Image, buff2, 2.0f, constants); break; } if (outputFrame) { *output = new float[Sys.Proj.Nx * Sys.Proj.Ny]; constants.baseXr = 7 * Sys.Proj.Nx / 8; constants.baseYr = 7 * Sys.Proj.Ny / 8; constants.currXr = Sys.Proj.Nx / 8; constants.currYr = Sys.Proj.Ny / 8; cuda(Memcpy2D(*output, Sys.Proj.Nx * sizeof(float), d_Image, projPitch, Sys.Proj.Nx * sizeof(float), Sys.Proj.Ny, hipMemcpyDeviceToHost)); getHistogram(d_Image, reconPitch*Sys.Recon.Ny, histogram); constants.baseXr = -1; constants.baseYr = -1; constants.currXr = -1; constants.currYr = -1; } return Tomo_OK; } TomoError TomoRecon::findStartDistance() { derivative_t oldDisplay = derDisplay; derDisplay = abs_norm_der; float oldDis = distance; float currentVal; cuda(MemsetAsync(d_MaxVal, 0, sizeof(float))); singleFrame(); KERNELCALL3(sumReduction, reductionBlocks, reductionThreads, reductionSize, d_Image, reconPitchNum, d_MaxVal, 0.0f, constants.Rx, 0.0f, constants.Ry); cuda(Memcpy(&currentVal, d_MaxVal, sizeof(float), hipMemcpyDeviceToHost)); float focusVal = currentVal; do { distance -= constants.pitchZ; cuda(MemsetAsync(d_MaxVal, 0, sizeof(float))); singleFrame(); KERNELCALL3(sumReduction, reductionBlocks, reductionThreads, reductionSize, d_Image, reconPitchNum, d_MaxVal, 0.0f, constants.Rx, 0.0f, constants.Ry); cuda(Memcpy(&currentVal, d_MaxVal, sizeof(float), hipMemcpyDeviceToHost)); } while (currentVal > focusVal / 20.0f); constants.startDis = distance - 1.0f; if (constants.startDis < 0.0f) constants.startDis = 0.0f; distance = oldDis; do { distance += constants.pitchZ; cuda(MemsetAsync(d_MaxVal, 0, sizeof(float))); singleFrame(); KERNELCALL3(sumReduction, reductionBlocks, reductionThreads, reductionSize, d_Image, reconPitchNum, d_MaxVal, //constants.Rx / 8.0f, 7.0f * constants.Rx / 8.0f, constants.Ry / 8.0f, 7.0f * constants.Ry / 8.0f); 0.0f, constants.Rx, 0.0f, constants.Ry); cuda(Memcpy(&currentVal, d_MaxVal, sizeof(float), hipMemcpyDeviceToHost)); } while (currentVal > focusVal / 20.0f); constants.slices = (distance - constants.startDis + 1.0f) / constants.pitchZ; if (constants.slices > 50) constants.slices = 50; Sys.Recon.Nz = constants.slices; distance = oldDis; derDisplay = oldDisplay; singleFrame(); } float TomoRecon::binarySearch(float(TomoRecon::*getError)(), float ** var, float * startPos, int dimensions, float startStep, float resolution, float limit) { **var = *startPos; float bestVar = **var; float bestErr = (*this.*getError)(); for (**var -= limit; **var < *startPos + limit; **var += startStep) { float newErr = dimensions == 1 ? (*this.*getError)() : binarySearch(getError, var + 1, startPos + 1, dimensions - 1, startStep, resolution, limit); if (newErr > bestErr) { bestErr = newErr; bestVar = **var; } if (binFile.is_open() && dimensions == 1) { binFile << **(var - 1) << ", "; binFile << **var << ", "; binFile << std::setprecision(10) << newErr << "\n"; } } **var = bestVar; float thisStep = startStep / 2.0f; while (abs(thisStep) > resolution) { **var += thisStep; float newErr = dimensions == 1 ? (*this.*getError)() : binarySearch(getError, var + 1, startPos + 1, dimensions - 1, startStep, resolution, limit); if (binFile.is_open() && dimensions == 1) { binFile << **(var - 1) << ", "; binFile << **var << ", "; binFile << std::setprecision(10) << newErr << "\n"; } if (newErr > bestErr) { bestErr = newErr; bestVar = **var; } else { **var -= thisStep; thisStep /= -2.0f; } } **var = bestVar; //rerun to reset best value of lower dimensions //if(dimensions > 1) binarySearch(getError, var + 1, startPos + 1, dimensions - 1, startStep, resolution, limit); return bestErr; } TomoError TomoRecon::autoFocus2() { derivative_t oldDisplay = derDisplay; derDisplay = square_mag; distance = constants.startDis + constants.pitchZ*constants.slices / 2.0f; float startDis = distance; float * disPtr = &distance; binarySearch(&TomoRecon::focusHelper, &disPtr, &startDis, 1, constants.pitchZ, LASTSTEP, constants.pitchZ*constants.slices / 2.0f); derDisplay = oldDisplay; singleFrame(); return Tomo_OK; } TomoError TomoRecon::autoFocus(bool firstRun, bool checkFlip) { static float step; static float best; static bool linearRegion; static bool firstLin = true; static float bestDist; static int bestSlice; static derivative_t oldDisplay; if (firstRun) { step = constants.pitchZ; distance = constants.startDis; bestDist = constants.startDis; sliceIndex = 0; bestSlice = sliceIndex; best = 0; linearRegion = false; oldDisplay = derDisplay; derDisplay = square_mag; } float newVal = focusHelper(); if (checkFlip && firstRun) { constants.revGeo = !constants.revGeo; float testVal = focusHelper(); if (testVal < newVal) constants.revGeo = !constants.revGeo; } if (!linearRegion) { if (newVal > best) { best = newVal; bestDist = distance; bestSlice = sliceIndex; } distance += step; sliceIndex++; if (distance > constants.startDis + constants.pitchZ*constants.slices || sliceIndex >= constants.slices) { linearRegion = true; firstLin = true; distance = bestDist; sliceIndex = bestSlice; if (constants.dataDisplay == iterRecon) { derDisplay = oldDisplay; singleFrame(); return Tomo_Done; } } } else { //compare to current if (newVal > best) { best = newVal; bestDist = distance; distance += step; } else { if(!firstLin) distance -= step;//revert last move //find next step step = -step / 2; if (abs(step) < LASTSTEP) { derDisplay = oldDisplay; singleFrame(); return Tomo_Done; } else distance += step; } firstLin = false; } return Tomo_OK; } TomoError TomoRecon::autoGeo2(int beam, float & XVal, float & YVal) { derivative_t oldDisplay = derDisplay; //derDisplay = square_mag; derDisplay = square_norm_der; float * vars[2]; float start[2]; vars[0] = &Sys.Geo.EmitX[beam]; vars[1] = &Sys.Geo.EmitY[beam]; start[0] = *vars[0]; start[1] = *vars[1]; float oldX = *vars[0]; float oldY = *vars[1]; sliceIndex = beam; constants.geoTesting = true; bool activeBeams[NUMVIEWS] = {}; activeBeams[NUMVIEWS / 2] = true; activeBeams[beam] = true; cuda(MemcpyAsync(constants.useBeams, activeBeams, Sys.Proj.NumViews * sizeof(bool), hipMemcpyHostToDevice)); char filename[100]; sprintf(filename, "geoOutBeam%d.txt", beam); binFile.open(filename); binarySearch(&TomoRecon::geoHelper, vars, start, 2, 0.1f, 0.1f, 3.0f); //binarySearch(&TomoRecon::geoHelper, vars, start, 2, 0.3f, 0.01f, 3.0f); binFile.close(); constants.geoTesting = false; XVal = *vars[0]; YVal = *vars[1]; *vars[0] = oldX; *vars[1] = oldY; cuda(MemcpyAsync(constants.d_Beamx, Sys.Geo.EmitX, Sys.Proj.NumViews * sizeof(float), hipMemcpyHostToDevice)); cuda(MemcpyAsync(constants.d_Beamy, Sys.Geo.EmitY, Sys.Proj.NumViews * sizeof(float), hipMemcpyHostToDevice)); cuda(MemcpyAsync(constants.useBeams, Sys.Proj.activeBeams, Sys.Proj.NumViews * sizeof(bool), hipMemcpyHostToDevice)); derDisplay = oldDisplay; singleFrame(); return Tomo_OK; } TomoError TomoRecon::autoGeo(bool firstRun, int beam, float &returnVal, int &yIter, float &maxXVal, float &maxYVal) { static float newXGeo; static float newYGeo; static float maxMag; static bool xLinear = false; static bool yLinear = false; float xLimit = 5.0f; static float xStep = 1.0f; float yLimit = 3.0f; static float yStep = 1.0f; static derivative_t oldDisplay; static int oldActiveSlice; static bool oldLog; static float xGeo[NUMVIEWS]; static float yGeo[NUMVIEWS]; bool activeBeams[NUMVIEWS] = {}; activeBeams[NUMVIEWS / 2] = true; activeBeams[beam] = true; constants.geoTesting = true; if (firstRun) { memcpy(xGeo, Sys.Geo.EmitX, sizeof(float)*NUMVIEWS); memcpy(yGeo, Sys.Geo.EmitY, sizeof(float)*NUMVIEWS); newXGeo = Sys.Geo.EmitX[beam] - xLimit; newYGeo = Sys.Geo.EmitY[beam] - yLimit; oldDisplay = derDisplay; oldActiveSlice = sliceIndex; oldLog = constants.log; constants.log = false; derDisplay = square_mag; //derDisplay = mag_der; yIter = 0; maxMag = 0.0f; xLinear = false; yLinear = false; xStep = 1.0f; yStep = 1.0f; } else { newXGeo += xStep; } Sys.Geo.EmitX[beam] = newXGeo; Sys.Geo.EmitY[beam] = newYGeo; cuda(MemcpyAsync(constants.d_Beamx, Sys.Geo.EmitX, Sys.Proj.NumViews * sizeof(float), hipMemcpyHostToDevice)); cuda(MemcpyAsync(constants.d_Beamy, Sys.Geo.EmitY, Sys.Proj.NumViews * sizeof(float), hipMemcpyHostToDevice)); cuda(MemcpyAsync(constants.useBeams, activeBeams, Sys.Proj.NumViews * sizeof(bool), hipMemcpyHostToDevice)); constants.dataDisplay = projections; sliceIndex = beam; setProjBox(beam); float normalVal = focusHelper() / 2; constants.dataDisplay = reconstruction; returnVal = focusHelper(); returnVal -= normalVal; if (returnVal > maxMag) { maxMag = returnVal; maxXVal = newXGeo; maxYVal = newYGeo; } else { //revert step if (xLinear) { newXGeo -= xStep; xStep = -xStep / 2; } else { if (newXGeo >= xGeo[beam] + xLimit) { xLinear = true; newXGeo = maxXVal; } } } if (abs(xStep) < 0.01f) { if (yLinear) { newYGeo -= yStep; yStep = -yStep / 2; if (abs(yStep) < 0.01f) { memcpy(Sys.Geo.EmitX, xGeo, sizeof(float)*NUMVIEWS); memcpy(Sys.Geo.EmitY, yGeo, sizeof(float)*NUMVIEWS); cuda(MemcpyAsync(constants.d_Beamx, Sys.Geo.EmitX, Sys.Proj.NumViews * sizeof(float), hipMemcpyHostToDevice)); cuda(MemcpyAsync(constants.d_Beamy, Sys.Geo.EmitY, Sys.Proj.NumViews * sizeof(float), hipMemcpyHostToDevice)); cuda(MemcpyAsync(constants.useBeams, Sys.Proj.activeBeams, Sys.Proj.NumViews * sizeof(bool), hipMemcpyHostToDevice)); constants.geoTesting = false; derDisplay = oldDisplay; sliceIndex = oldActiveSlice; constants.log = oldLog; constants.dataDisplay = reconstruction; yStep = 1.0f; return Tomo_Done; } } else { if (newYGeo >= yGeo[beam] + yLimit) { yLinear = true; newYGeo = maxYVal; } } xStep = 1.0f; newXGeo = xGeo[beam] - xLimit - xStep; newYGeo += yStep; xLinear = false; yIter++; } return Tomo_OK; } TomoError TomoRecon::autoLight(unsigned int histogram[HIST_BIN_COUNT], int threshold, float * minVal, float * maxVal) { int innerThresh = threshold; bool emptyHist = false; if (histogram == NULL) { emptyHist = true; histogram = new unsigned int[HIST_BIN_COUNT]; if (constants.dataDisplay == projections) { tomo_err_throw(getHistogram(d_Image, projPitch*Sys.Proj.Ny, histogram)); } else { tomo_err_throw(getHistogram(d_Image, reconPitch*Sys.Recon.Ny, histogram)); } innerThresh = abs(constants.baseXr - constants.currXr) * abs(constants.baseYr - constants.currYr) / AUTOTHRESHOLD; minVal = &constants.minVal; maxVal = &constants.maxVal; } /*std::ofstream outputFile; char outFilename[250]; sprintf(outFilename, "./histogramOutRecon.txt"); outputFile.open(outFilename); for (int test = 1; test < 255; test++) outputFile << histogram[test] << "\n"; outputFile.close();*/ int i; for (i = 0; i < HIST_BIN_COUNT; i++) { unsigned int count = histogram[i]; if (count > innerThresh) break; } if (i >= HIST_BIN_COUNT) i = 0; *minVal = i * UCHAR_MAX; //go from the reverse direction for maxval for (i = HIST_BIN_COUNT - 1; i >= 0; i--) { unsigned int count = histogram[i]; if (count > innerThresh) break; } if (i < 0) i = HIST_BIN_COUNT; *maxVal = i * UCHAR_MAX; if (*minVal == *maxVal) *maxVal += UCHAR_MAX; if (emptyHist) delete[] histogram; return Tomo_OK; } TomoError TomoRecon::readPhantom(float * resolution) { if (vertical) { float phanScale = (lowYr - upYr) / (1/LOWERBOUND - 1/UPPERBOUND); float * h_xDer2 = (float*)malloc(reconPitch*Sys.Recon.Ny); cuda(Memcpy(h_xDer2, d_Image, reconPitch*Sys.Recon.Ny, hipMemcpyDeviceToHost)); //Get x range from the bouding box int startX = min(constants.baseXr, constants.currXr); int endX = max(constants.baseXr, constants.currXr); int thisY = lowYr;//Get beginning y val from tick mark bool ascend = upYr > lowYr; int increment = ascend ? 1 : -1; while ((!ascend && thisY >= upYr) || (ascend && thisY <= upYr)) {//y counts down int thisX = startX; int negCross = 0; bool negativeSpace = false; float negAcc = 0; while (thisX < endX) { float val = h_xDer2[thisY * reconPitchNum + thisX]; h_xDer2[thisY * reconPitchNum + thisX] = val / 10.0f; if (negativeSpace) { if (val > 0) { negativeSpace = false; if (negAcc < -INTENSITYTHRESH) { negCross++; } } else { negAcc += val; } } else { if (val < 0) { negativeSpace = true; negAcc = val; } } thisX++; } if (negCross < LINEPAIRS) { thisY -= increment; break; } thisY += increment; } *resolution = phanScale / (thisY - lowYr + phanScale / LOWERBOUND); cuda(Memcpy(d_Image, h_xDer2, reconPitch*Sys.Recon.Ny, hipMemcpyHostToDevice)); free(h_xDer2); } else { float phanScale = (lowXr - upXr) / (1 / LOWERBOUND - 1 / UPPERBOUND); float * h_yDer2 = (float*)malloc(reconPitchNum*Sys.Recon.Ny * sizeof(float)); cuda(Memcpy(h_yDer2, d_Image, reconPitchNum*Sys.Recon.Ny * sizeof(float), hipMemcpyDeviceToHost)); //Get x range from the bouding box int startY = min(constants.baseYr, constants.currYr); int endY = max(constants.baseYr, constants.currYr); int thisX = lowXr;//Get beginning y val from tick mark bool ascend = upXr > lowXr; int increment = ascend ? 1 : -1; while ((!ascend && thisX >= upXr) || (ascend && thisX <= upXr)) {//y counts down int thisY = startY; int negCross = 0; bool negativeSpace = false; float negAcc = 0; while (thisY < endY) { float val = h_yDer2[thisY * reconPitchNum + thisX]; h_yDer2[thisY * reconPitchNum + thisX] = val / 10.0f; if (negativeSpace) { if (val > 0) { negativeSpace = false; if (negAcc < -INTENSITYTHRESH) { negCross++; } } else { negAcc += val; } } else { if (val < 0) { negativeSpace = true; negAcc = val; } } thisY++; } if (negCross < LINEPAIRS) { thisX -= increment; break; } thisX += increment; } *resolution = phanScale / (thisX - lowXr + phanScale / LOWERBOUND); cuda(Memcpy(d_Image, h_yDer2, reconPitch*Sys.Recon.Ny, hipMemcpyHostToDevice)); free(h_yDer2); } return Tomo_OK; } TomoError TomoRecon::initTolerances(std::vector<toleranceData> &data, int numTests, std::vector<float> offsets) { //start with a control, but make sure toleranceData control; control.name += " none"; control.numViewsChanged = 0; control.viewsChanged = 0; control.offset = 0; data.push_back(control); //start set as just the combinations for (int i = 0; i < NUMVIEWS; i++) { int resultsLen = (int)data.size();//size will change every iteration, pre-record it int binRep = 1 << i; for (int j = 1; j < resultsLen; j++) { toleranceData newData = data[j]; newData.name += "+"; newData.name += std::to_string(i); newData.numViewsChanged++; newData.viewsChanged |= binRep; data.push_back(newData); } //add the base toleranceData newData; newData.name += std::to_string(i); newData.numViewsChanged = 1; newData.viewsChanged = binRep; data.push_back(newData); } //blow up with the diffent directions int combinations = (int)data.size();//again, changing sizes later on for (int i = 1; i < combinations; i++) { toleranceData baseline = data[i]; baseline.thisDir = dir_y; data.push_back(baseline); baseline.thisDir = dir_z; data.push_back(baseline); } //then fill in the set with all the view changes combinations = (int)data.size();//again, changing sizes later on for (int i = 1; i < combinations; i++) { toleranceData baseline = data[i]; for (int j = 0; j < offsets.size() - 1; j++) {//skip the last toleranceData newData = baseline; newData.offset = offsets[j]; data.push_back(newData); } //the last one is done in place data[i].offset = offsets[offsets.size() - 1]; } return Tomo_OK; } TomoError TomoRecon::testTolerances(std::vector<toleranceData> &data, bool firstRun) { static auto iter = data.begin(); if (firstRun) { if(vertical) derDisplay = der2_x; else derDisplay = der2_y; tomo_err_throw(singleFrame()); tomo_err_throw(autoLight()); iter = data.begin(); return Tomo_OK; } if (iter == data.end()) { derDisplay = no_der; return Tomo_Done; } float geo[NUMVIEWS]; switch (iter->thisDir) { case dir_x: memcpy(geo, Sys.Geo.EmitX, sizeof(float)*NUMVIEWS); break; case dir_y: memcpy(geo, Sys.Geo.EmitY, sizeof(float)*NUMVIEWS); break; case dir_z: memcpy(geo, Sys.Geo.EmitZ, sizeof(float)*NUMVIEWS); break; } for (int i = 0; i < NUMVIEWS; i++) { bool active = ((iter->viewsChanged >> i) & 1) > 0;//Shift, mask and check if (!active) continue; if (i < NUMVIEWS / 2) geo[i] -= iter->offset; else geo[i] += iter->offset; } //be safe, recopy values to overwrite previous iterations switch (iter->thisDir) { case dir_x: cuda(MemcpyAsync(constants.d_Beamx, geo, Sys.Proj.NumViews * sizeof(float), hipMemcpyHostToDevice)); cuda(MemcpyAsync(constants.d_Beamy, Sys.Geo.EmitY, Sys.Proj.NumViews * sizeof(float), hipMemcpyHostToDevice)); cuda(MemcpyAsync(constants.d_Beamz, Sys.Geo.EmitZ, Sys.Proj.NumViews * sizeof(float), hipMemcpyHostToDevice)); break; case dir_y: cuda(MemcpyAsync(constants.d_Beamx, Sys.Geo.EmitX, Sys.Proj.NumViews * sizeof(float), hipMemcpyHostToDevice)); cuda(MemcpyAsync(constants.d_Beamy, geo, Sys.Proj.NumViews * sizeof(float), hipMemcpyHostToDevice)); cuda(MemcpyAsync(constants.d_Beamz, Sys.Geo.EmitZ, Sys.Proj.NumViews * sizeof(float), hipMemcpyHostToDevice)); break; case dir_z: cuda(MemcpyAsync(constants.d_Beamx, Sys.Geo.EmitX, Sys.Proj.NumViews * sizeof(float), hipMemcpyHostToDevice)); cuda(MemcpyAsync(constants.d_Beamy, Sys.Geo.EmitY, Sys.Proj.NumViews * sizeof(float), hipMemcpyHostToDevice)); cuda(MemcpyAsync(constants.d_Beamz, geo, Sys.Proj.NumViews * sizeof(float), hipMemcpyHostToDevice)); break; } singleFrame(); float readVal; tomo_err_throw(readPhantom(&readVal)); iter->phantomData = readVal; ++iter; return Tomo_OK; } TomoError TomoRecon::initIterative() { #ifdef PRINTMEMORYUSAGE size_t avail_mem; size_t total_mem; hipMemGetInfo(&avail_mem, &total_mem); std::cout << "Iter start vailable memory: " << avail_mem << "/" << total_mem << "\n"; #endif // PRINTMEMORYUSAGE iteration = 0; decay = 1.0f; iterativeInitialized = true; constants.isReconstructing = true; hipChannelFormatDesc channelDesc = hipCreateChannelDesc<float>(); hipExtent vol = { Sys.Recon.Nx, Sys.Recon.Ny, Sys.Recon.Nz }; cuda(Malloc3DArray(&d_Recon2, &channelDesc, vol, hipArraySurfaceLoadStore)); cuda(Malloc3DArray(&d_ReconWeight, &channelDesc, vol, hipArraySurfaceLoadStore)); #ifdef SHOWERROR cuda(Malloc3DArray(&d_ReconError, &channelDesc, vol, hipArraySurfaceLoadStore)); #endif cuda(MallocPitch((void**)&d_ReconOld, &reconPitch, Sys.Recon.Nx * sizeof(float), Sys.Recon.Ny)); reconPitchNum = (int)reconPitch / sizeof(float); constants.ReconPitchNum = reconPitchNum; struct hipResourceDesc resDesc; memset(&resDesc, 0, sizeof(resDesc)); resDesc.resType = hipResourceTypeArray; resDesc.res.array.array = d_Recon2; cuda(CreateSurfaceObject(&surfReconObj, &resDesc)); resDesc.res.array.array = d_ReconWeight; cuda(CreateSurfaceObject(&surfWeightObj, &resDesc)); #ifdef SHOWERROR resDesc.res.array.array = d_ReconError; cuda(CreateSurfaceObject(&surfErrorObj, &resDesc)); #endif #ifdef RECONDERIVATIVE cuda(Memcpy2DAsync(d_Error, projPitch, inXBuff, projPitch, Sys.Proj.Nx * sizeof(float), Sys.Proj.Ny*NUMVIEWS, hipMemcpyDeviceToDevice)); #else for (int view = 0; view < NumViews; view++) { cuda(Memcpy2DAsync(d_Error + view * projPitch / sizeof(float) * Sys.Proj.Ny, projPitch, d_Sino + view * projPitch / sizeof(float) * Sys.Proj.Ny, projPitch, Sys.Proj.Nx * sizeof(float), Sys.Proj.Ny, hipMemcpyDeviceToDevice)); //cuda(Memset2DAsync(d_Error + view * projPitch / sizeof(float) * Sys.Proj.Ny, projPitch, 0, Sys.Proj.Nx * sizeof(float), Sys.Proj.Ny)); #ifdef INVERSEITER KERNELCALL2(invert, contBlocks, contThreads, d_Error + view * projPitch / sizeof(float) * Sys.Proj.Ny, constants); #endif // INVERSEITER } #endif // RECONDERIVATIVE cuda(BindTexture2D(NULL, textError, d_Error, hipCreateChannelDesc<float>(), Sys.Proj.Nx, Sys.Proj.Ny*Sys.Proj.NumViews, projPitch)); cuda(BindTexture2D(NULL, textWeight, d_Weights, hipCreateChannelDesc<float>(), Sys.Proj.Nx, Sys.Proj.Ny*Sys.Proj.NumViews, projPitch)); for (int slice = 0; slice < Sys.Recon.Nz; slice++) { distance = constants.startDis + slice * constants.pitchZ; cuda(BindTexture2D(NULL, textSino, d_Raw, hipCreateChannelDesc<float>(), Sys.Proj.Nx, Sys.Proj.Ny*Sys.Proj.NumViews, projPitch)); KERNELCALL2(projectSlice, contBlocks, contThreads, buff1, distance, constants); cuda(UnbindTexture(textSino)); tomo_err_throw(normProject(inXBuff, buff2, DERWEIGHTSTR)); //tomo_err_throw(project(inXBuff, buff2)); KERNELCALL2(xConvIntegrate, contBlocks, contThreads, NULL, buff2, buff1, slice, constants, surfWeightObj); //KERNELCALL2(initArray, contBlocks, contThreads, slice, slice * 1000.0f, constants, surfWeightObj); KERNELCALL2(initArray, contBlocks, contThreads, slice, 0.0f, constants, surfReconObj); KERNELCALL2(copySlice, contBlocks, contThreads, d_ReconOld, slice, constants, surfReconObj); #ifdef SHOWERROR KERNELCALL2(projectIter, contBlocks, contThreads, d_ReconOld, slice, 1.0f, true, constants, surfReconObj, surfErrorObj); #else KERNELCALL2(projectIter, contBlocks, contThreads, d_Sino, d_ReconOld, d_Weights, slice, iteration, true, decay, constants, surfReconObj, surfWeightObj, true); #endif } cuda(UnbindTexture(textError)); cuda(UnbindTexture(textWeight)); //Get normalization factor for weight volume constants.baseXr = 0; constants.baseYr = 0; constants.currXr = Sys.Recon.Nx; constants.currYr = Sys.Recon.Ny; float minVal; unsigned int histogram[HIST_BIN_COUNT]; tomo_err_throw(getHistogramRecon(histogram, surfWeightObj, true, false)); tomo_err_throw(autoLight(histogram, 20, &minVal, &constants.weightMax)); constants.baseXr = -1; constants.baseYr = -1; constants.currXr = -1; constants.currYr = -1; #ifdef PRINTMEMORYUSAGE hipMemGetInfo(&avail_mem, &total_mem); std::cout << "Iter end available memory: " << avail_mem << "/" << total_mem << "\n"; #endif // PRINTMEMORYUSAGE return Tomo_OK; } TomoError TomoRecon::resetIterative() { #ifdef PRINTMEMORYUSAGE size_t avail_mem; size_t total_mem; hipMemGetInfo(&avail_mem, &total_mem); std::cout << "Iter free start vailable memory: " << avail_mem << "/" << total_mem << "\n"; #endif // PRINTMEMORYUSAGE cuda(DestroySurfaceObject(surfReconObj)); cuda(Free(d_ReconOld)); cuda(FreeArray(d_Recon2)); #ifdef PRINTMEMORYUSAGE hipMemGetInfo(&avail_mem, &total_mem); std::cout << "Iter free end vailable memory: " << avail_mem << "/" << total_mem << "\n"; #endif // PRINTMEMORYUSAGE return Tomo_OK; } TomoError TomoRecon::iterStep() { iteration = 1.0f; decay *= ALPHA; cuda(BindTextureToArray(textRecon, d_Recon2)); #ifdef RECONDERIVATIVE for (int view = 0; view < NumViews; view++) { KERNELCALL2(backProject, contBlocks, contThreads, inXBuff + view * projPitch / sizeof(float) * Sys.Proj.Ny, d_Error + view * projPitch / sizeof(float) * Sys.Proj.Ny, view, constants); } #else for (int view = 0; view < NumViews; view++) { KERNELCALL2(backProject, contBlocks, contThreads, d_Sino + view * projPitch / sizeof(float) * Sys.Proj.Ny, d_Error + view * projPitch / sizeof(float) * Sys.Proj.Ny, d_Weights + view * projPitch / sizeof(float) * Sys.Proj.Ny, view, iteration, ITERATIONS, surfWeightObj, constants); } #endif // RECONDERIVATIVE cuda(UnbindTexture(textRecon)); cuda(BindTexture2D(NULL, textSino, d_Sino, hipCreateChannelDesc<float>(), Sys.Proj.Nx, Sys.Proj.Ny*Sys.Proj.NumViews, projPitch)); cuda(BindTexture2D(NULL, textError, d_Error, hipCreateChannelDesc<float>(), Sys.Proj.Nx, Sys.Proj.Ny*Sys.Proj.NumViews, projPitch)); cuda(BindTexture2D(NULL, textWeight, d_Weights, hipCreateChannelDesc<float>(), Sys.Proj.Nx, Sys.Proj.Ny*Sys.Proj.NumViews, projPitch)); for (int slice = 0; slice < Sys.Recon.Nz; slice++) { KERNELCALL2(copySlice, contBlocks, contThreads, d_ReconOld, slice, constants, surfReconObj); #ifdef SHOWERROR KERNELCALL2(projectIter, contBlocks, contThreads, d_ReconOld, slice, iteration, SKIPITERTV, constants, surfReconObj, surfErrorObj); #else KERNELCALL2(projectIter, contBlocks, contThreads, d_Sino, d_ReconOld, d_Weights, slice, iteration, SKIPITERTV, decay, constants, surfReconObj, surfWeightObj); #endif /*for (int i = 0; i < TVITERATIONS; i++) { KERNELCALL2(copySlice, contBlocks, contThreads, d_ReconOld, slice, constants); KERNELCALL2(projectIter, contBlocks, contThreads, d_ReconOld, slice, iteration++, true, constants); }*/ } cuda(UnbindTexture(textError)); cuda(UnbindTexture(textWeight)); //cuda(UnbindTexture(textSino)); iteration++; return Tomo_OK; } TomoError TomoRecon::finalizeIter() { //no longer need gradient records //cuda(FreeArray(d_ReconDelta)); //cuda(DestroySurfaceObject(surfDeltaObj)); #ifdef VERBOSEMEMORY size_t avail_mem, total_mem; hipMemGetInfo(&avail_mem, &total_mem); std::cout << "Iter final start available memory: " << avail_mem << "/" << total_mem << "\n"; #endif // VERBOSEMEMORY constants.isReconstructing = false; #ifdef INVERSEITER for (int slice = 0; slice < Sys.Recon.Nz; slice++) KERNELCALL2(invertRecon, contBlocks, contThreads, slice, constants, surfReconObj); #endif //INVERSEITER constants.baseXr = 0; constants.baseYr = 0; constants.currXr = Sys.Recon.Nx; constants.currYr = Sys.Recon.Ny; float maxVal, minVal; unsigned int histogram[HIST_BIN_COUNT]; tomo_err_throw(getHistogramRecon(histogram, surfReconObj, true, false)); tomo_err_throw(autoLight(histogram, 20, &minVal, &maxVal)); //histogram equalization approximation by width and offset float scales[HIST_BIN_COUNT]; float offsets[HIST_BIN_COUNT]; int yIndex = 0; int activeViews = 0; float reconRatio = 0, sumRecon = 0; for (int i = 0; i < HIST_BIN_COUNT; i++) { reconRatio += inputHistogram[i]; sumRecon += histogram[i]; } reconRatio /= sumRecon; for (int i = 0; i < NumViews; i++) if (Sys.Proj.activeBeams[i]) activeViews++; float y1 = 0.0f, y2, h2 = inputHistogram[yIndex]; for (int i = 0; i < HIST_BIN_COUNT; i++) { float h1 = histogram[i] * reconRatio; while (h1 > h2) { h1 -= h2; if (++yIndex >= HIST_BIN_COUNT) break; h2 = inputHistogram[yIndex]; } if (yIndex >= HIST_BIN_COUNT) { //Overflow logic scales[i] = scales[i - 1]; offsets[i] = offsets[i - 1]; continue; } h2 -= h1; float maxH2 = inputHistogram[yIndex]; if(maxH2 > 0) y2 = yIndex + (maxH2 - h2) / maxH2; else y2 = yIndex; scales[i] = y2 - y1;//scale * (x2 - x1) = (y2 - y1) offsets[i] = (y1 + y2) / 2.0f - scales[i] * (float)(2 * i + 1) / 2.0f;//offset + scale * (x1 + x2) / 2 = (y1 + y2) / 2 y1 = y2; } float * d_scales, * d_offsets; hipMalloc(&d_scales, HIST_BIN_COUNT * sizeof(float)); hipMalloc(&d_offsets, HIST_BIN_COUNT * sizeof(float)); hipMemcpy(d_scales, scales, HIST_BIN_COUNT * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_offsets, offsets, HIST_BIN_COUNT * sizeof(float), hipMemcpyHostToDevice); //cuda(BindSurfaceToArray(surfRecon, d_Recon2)); if(!Sys.Proj.saturated) for (int slice = 0; slice < Sys.Recon.Nz; slice++) KERNELCALL2(scaleRecon, contBlocks, contThreads, slice, d_scales, d_offsets, constants, surfReconObj); cuda(Free(d_scales)); cuda(Free(d_offsets)); cuda(UnbindTexture(textSino)); cuda(BindTexture2D(NULL, textSino, d_Raw, hipCreateChannelDesc<float>(), Sys.Proj.Nx, Sys.Proj.Ny*Sys.Proj.NumViews, projPitch)); for (int slice = 0; slice < Sys.Recon.Nz; slice++) { KERNELCALL2(projectFinalIter, contBlocks, contThreads, slice, constants, surfReconObj); } cuda(UnbindTexture(textSino)); #ifdef PRINTINTENSITIES tomo_err_throw(getHistogramRecon(histogram, surfReconObj, true, false)); std::ofstream outputFile; char outFilename[250]; sprintf(outFilename, "./histogramOutRecon.txt"); outputFile.open(outFilename); float scaleFactor = (float)Sys.Proj.Nx / (float)Sys.Recon.Nx * (float)Sys.Proj.Ny / (float)Sys.Recon.Ny / (float)Sys.Recon.Nz; for (int test = 1; test < HIST_BIN_COUNT; test++) outputFile << histogram[test] * scaleFactor << "\n";// / Sys.Recon.Nz outputFile.close(); #endif //PRINTINTENSITIES constants.baseXr = -1; constants.baseYr = -1; constants.currXr = -1; constants.currYr = -1; #ifdef VERBOSEMEMORY hipMemGetInfo(&avail_mem, &total_mem); std::cout << "Iter final end available memory: " << avail_mem << "/" << total_mem << "\n"; #endif // VERBOSEMEMORY //cuda(DestroySurfaceObject(surfWeightObj)); //cuda(FreeArray(d_ReconWeight)); return Tomo_OK; } /****************************************************************************/ /* Kernel launch helpers */ /****************************************************************************/ inline float TomoRecon::geoHelper() { cuda(MemcpyAsync(constants.d_Beamx, Sys.Geo.EmitX, Sys.Proj.NumViews * sizeof(float), hipMemcpyHostToDevice)); cuda(MemcpyAsync(constants.d_Beamy, Sys.Geo.EmitY, Sys.Proj.NumViews * sizeof(float), hipMemcpyHostToDevice)); float returnVal; //constants.dataDisplay = projections; //setProjBox(sliceIndex); //float normalVal = focusHelper() / 2; constants.dataDisplay = reconstruction; returnVal = focusHelper(); //returnVal -= normalVal; return returnVal; } inline float TomoRecon::focusHelper() { //Render new frame singleFrame(); //get the focus metric float currentBest; cuda(MemsetAsync(d_MaxVal, 0, sizeof(float))); //TODO: check boundary conditions if (constants.dataDisplay == projections) { KERNELCALL3(sumReduction, reductionBlocks, reductionThreads, reductionSize, d_Image, projPitch / sizeof(float), d_MaxVal, min(baseX, currX), max(baseX, currX), min(baseY, currY), max(baseY, currY)); } else { KERNELCALL3(sumReduction, reductionBlocks, reductionThreads, reductionSize, d_Image, reconPitchNum, d_MaxVal, min(constants.baseXr, constants.currXr), max(constants.baseXr, constants.currXr), min(constants.baseYr, constants.currYr), max(constants.baseYr, constants.currYr)); } cuda(Memcpy(&currentBest, d_MaxVal, sizeof(float), hipMemcpyDeviceToHost)); return currentBest; } inline TomoError TomoRecon::imageKernel(float xK[KERNELSIZE], float yK[KERNELSIZE], float * output, bool projs) { if (projs) { cuda(BindTexture2D(NULL, textImage, d_Image, hipCreateChannelDesc<float>(), Sys.Proj.Nx, Sys.Proj.Ny, projPitch)); KERNELCALL2(convolutionRowsKernel, contBlocks, contThreads, d_Image2, xK, constants); cuda(UnbindTexture(textImage)); cuda(BindTexture2D(NULL, textImage, d_Image2, hipCreateChannelDesc<float>(), Sys.Proj.Nx, Sys.Proj.Ny, projPitch)); KERNELCALL2(convolutionColumnsKernel, contBlocks, contThreads, output, yK, constants); cuda(UnbindTexture(textImage)); } else { cuda(BindTexture2D(NULL, textImage, d_Image, hipCreateChannelDesc<float>(), Sys.Recon.Nx, Sys.Recon.Ny, reconPitch)); KERNELCALL2(convolutionRowsKernel, contBlocks, contThreads, d_Image2, xK, constants); cuda(UnbindTexture(textImage)); cuda(BindTexture2D(NULL, textImage, d_Image2, hipCreateChannelDesc<float>(), Sys.Recon.Nx, Sys.Recon.Ny, reconPitch)); KERNELCALL2(convolutionColumnsKernel, contBlocks, contThreads, output, yK, constants); cuda(UnbindTexture(textImage)); } return Tomo_OK; } inline TomoError TomoRecon::project(float * projections, float * reconstruction) { //cuda(BindTexture2D(NULL, textError, projections, hipCreateChannelDesc<float>(), Sys.Proj.Nx, Sys.Proj.Ny*Sys.Proj.NumViews, projPitch)); cuda(BindTexture2D(NULL, textSino, projections, hipCreateChannelDesc<float>(), Sys.Proj.Nx, Sys.Proj.Ny*Sys.Proj.NumViews, projPitch)); KERNELCALL2(projectSlice, contBlocks, contThreads, reconstruction, distance, constants); cuda(UnbindTexture(textSino)); //cuda(UnbindTexture(textError)); } inline TomoError TomoRecon::normProject(float * projections, float * reconstruction, float alignStr) { cuda(BindTexture2D(NULL, textSino, projections, hipCreateChannelDesc<float>(), Sys.Proj.Nx, Sys.Proj.Ny*Sys.Proj.NumViews, projPitch)); KERNELCALL2(normProjectSlice, contBlocks, contThreads, reconstruction, distance, alignStr, constants); cuda(UnbindTexture(textSino)); } TomoError TomoRecon::resetLight() { if (constants.dataDisplay == projections) { constants.baseXr = 7 * Sys.Proj.Nx / 8; constants.baseYr = 7 * Sys.Proj.Ny / 8; constants.currXr = Sys.Proj.Nx / 8; constants.currYr = Sys.Proj.Ny / 8; } else { constants.baseXr = 7 * Sys.Recon.Nx / 8; constants.baseYr = 7 * Sys.Recon.Ny / 8; constants.currXr = Sys.Recon.Nx / 8; constants.currYr = Sys.Recon.Ny / 8; } tomo_err_throw(autoLight()); constants.baseXr = -1; constants.baseYr = -1; constants.currXr = -1; constants.currYr = -1; return Tomo_OK; } TomoError TomoRecon::resetFocus(bool checkFlip) { if (constants.dataDisplay == projections) { constants.baseXr = 3 * Sys.Proj.Nx / 4; constants.baseYr = 3 * Sys.Proj.Ny / 4; constants.currXr = Sys.Proj.Nx / 4; constants.currYr = Sys.Proj.Ny / 4; } else { constants.baseXr = 3 * Sys.Recon.Nx / 4; constants.baseYr = 3 * Sys.Recon.Ny / 4; constants.currXr = Sys.Recon.Nx / 4; constants.currYr = Sys.Recon.Ny / 4; } tomo_err_throw(autoFocus(true, checkFlip)); while (autoFocus(false, checkFlip) == Tomo_OK); constants.baseXr = -1; constants.baseYr = -1; constants.currXr = -1; constants.currYr = -1; return Tomo_OK; } float TomoRecon::getMax(float * d_Im) { constants.baseXr = 3 * Sys.Recon.Nx / 4; constants.baseYr = 3 * Sys.Recon.Ny / 4; constants.currXr = Sys.Recon.Nx / 4; constants.currYr = Sys.Recon.Ny / 4; unsigned int histogram[HIST_BIN_COUNT]; int threshold = Sys.Recon.Nx * Sys.Recon.Ny / AUTOTHRESHOLD; getHistogram(d_Im, reconPitch*Sys.Recon.Ny, histogram); int i; for (i = HIST_BIN_COUNT - 1; i >= 0; i--) { unsigned int count = histogram[i]; if (count > threshold) break; } if (i < 0) i = HIST_BIN_COUNT; constants.baseXr = -1; constants.baseYr = -1; constants.currXr = -1; constants.currYr = -1; return i * UCHAR_MAX; } /****************************************************************************/ /* Conversions */ /****************************************************************************/ //Projection space to recon space TomoError TomoRecon::P2R(int* rX, int* rY, int pX, int pY, int view) { float dz = distance / Sys.Geo.EmitZ[view]; *rX = xMM2R(xP2MM(pX, constants.Px, constants.PitchPx) * (1 + dz) - Sys.Geo.EmitX[constants.revGeo ? constants.Views - 1 - view : view] * dz, constants.Rx, constants.PitchRx); *rY = yMM2R(yP2MM(pY, constants.Py, constants.PitchPy) * (1 + dz) - Sys.Geo.EmitY[view] * dz, constants.Ry, constants.PitchRy); return Tomo_OK; } //Recon space to projection space TomoError TomoRecon::R2P(float* pX, float* pY, int rX, int rY, int view) { float dz = distance / Sys.Geo.EmitZ[view]; *pX = xMM2P((xR2MM(rX, constants.Rx, constants.PitchRx) + Sys.Geo.EmitX[constants.revGeo ? constants.Views - 1 - view : view] * dz), constants.Px, constants.PitchPx); *pY = yMM2P((yR2MM(rY, constants.Ry, constants.PitchRy) + Sys.Geo.EmitY[view] * dz), constants.Py, constants.PitchPy); return Tomo_OK; } //Image space to on-screen display TomoError TomoRecon::I2D(int* dX, int* dY, int iX, int iY) { float innerOffx = (width - Sys.Proj.Nx / scale) / 2; float innerOffy = (height - Sys.Proj.Ny / scale) / 2; *dX = (int)((iX - xOff) / scale + innerOffx); *dY = (int)((iY - yOff) / scale + innerOffy); return Tomo_OK; } //Projection space to recon space int TomoRecon::P2R(int p, int view, bool xDir) { float dz = distance / Sys.Geo.EmitZ[view]; if (xDir) return (int)(xMM2R(xP2MM(p, constants.Px, constants.PitchPx) * (1 + dz) - Sys.Geo.EmitX[constants.revGeo ? constants.Views - 1 - view : view] * dz, constants.Rx, constants.PitchRx)); //else return (int)(yMM2R(yP2MM(p, constants.Py, constants.PitchPy) * (1 + dz) - Sys.Geo.EmitY[view] * dz, constants.Ry, constants.PitchRy)); } //Recon space to projection space int TomoRecon::R2P(int r, int view, bool xDir) { float dz = distance / Sys.Geo.EmitZ[view]; if (xDir) return (int)(xMM2P((xR2MM(r, constants.Rx, constants.PitchRx) + Sys.Geo.EmitX[constants.revGeo ? constants.Views - 1 - view : view] * dz) / (1.0f + dz), constants.Px, constants.PitchPx)); //else return (int)(yMM2P((yR2MM(r, constants.Ry, constants.PitchRy) + Sys.Geo.EmitY[view] * dz) / (1.0f + dz), constants.Py, constants.PitchPy)); } //Image space to on-screen display int TomoRecon::I2D(int i, bool xDir) { if (xDir) { int sysWidth; if (constants.dataDisplay == projections) sysWidth = Sys.Proj.Nx; else sysWidth = Sys.Recon.Nx; float innerOffx = (width - sysWidth / scale) / 2.0f; return constants.orientation ? (int)((sysWidth - 1 - i - xOff) / scale + innerOffx) : (int)((i - xOff) / scale + innerOffx); } //else int sysHeight; if (constants.dataDisplay == projections) sysHeight = Sys.Proj.Ny; else sysHeight = Sys.Recon.Ny; float innerOffy = (height - sysHeight / scale) / 2.0f; return constants.flip ? (int)((sysHeight - 1 - i - yOff) / scale + innerOffy) : (int)((i - yOff) / scale + innerOffy); } //On-screen coordinates to image space int TomoRecon::D2I(int d, bool xDir) { if (xDir) { int sysWidth; if (constants.dataDisplay == projections) sysWidth = Sys.Proj.Nx; else sysWidth = Sys.Recon.Nx; float innerOffx = (width - sysWidth / scale) / 2.0f; return (int)((d - innerOffx) * scale + xOff); } //else int sysHeight; if (constants.dataDisplay == projections) sysHeight = Sys.Proj.Ny; else sysHeight = Sys.Recon.Ny; float innerOffy = (height - sysHeight / scale) / 2.0f; return (int)((d - innerOffy) * scale + yOff); }
4ba112fe58721beda90523bb066f6f185cb42d58.cu
/********************************************************************************************/ /* ReconGPU.cu */ /* Copyright 2017, XinRay Inc., All rights reserved */ /********************************************************************************************/ #include "TomoRecon.h" /********************************************************************************************/ /* CUDA specific helper functions */ /********************************************************************************************/ TomoError cuda_assert(const cudaError_t code, const char* const file, const int line) { if (code != cudaSuccess) { std::cout << "Cuda failure " << file << ":" << line << ": " << cudaGetErrorString(code) << "\n"; return Tomo_CUDA_err; } else return Tomo_OK; } TomoError cuda_assert_void(const char* const file, const int line) { cudaError_t code = cudaGetLastError(); if (code != cudaSuccess) { std::cout << "Cuda failure " << file << ":" << line << ": " << cudaGetErrorString(code) << "\n"; return Tomo_CUDA_err; } else return Tomo_OK; } union pxl_rgbx_24{ uint1 b32; struct { unsigned r : 8; unsigned g : 8; unsigned b : 8; unsigned na : 8; }; }; #define PXL_KERNEL_THREADS_PER_BLOCK 256 surface<void, cudaSurfaceType2D> displaySurface; texture<float, cudaTextureType3D, cudaReadModeElementType> textRecon; texture<float, cudaTextureType3D, cudaReadModeElementType> textDelta; texture<float, cudaTextureType2D, cudaReadModeElementType> textImage; texture<float, cudaTextureType2D, cudaReadModeElementType> textError; texture<float, cudaTextureType2D, cudaReadModeElementType> textSino; texture<float, cudaTextureType2D, cudaReadModeElementType> textWeight; /********************************************************************************************/ /* GPU Function specific functions */ /********************************************************************************************/ //Conversion Helpers __host__ __device__ float xP2MM(float p, float Px, float PitchPx) { return (p + 0.5f - Px / 2.0f) * PitchPx; } __host__ __device__ float yP2MM(float p, float Py, float PitchPy) { return (p + 0.5f - Py / 2.0f) * PitchPy; } __host__ __device__ float xR2MM(float r, float Rx, float PitchRx) { return (r + 0.5f - Rx / 2.0f) * PitchRx; } __host__ __device__ float yR2MM(float r, float Ry, float PitchRy) { return (r + 0.5f - Ry / 2.0f) * PitchRy; } __host__ __device__ float xMM2P(float m, float Px, float PitchPx) { return m / PitchPx - 0.5f + Px / 2.0f; } __host__ __device__ float yMM2P(float m, float Py, float PitchPy) { return m / PitchPy - 0.5f + Py / 2.0f; } __host__ __device__ float xMM2R(float m, float Rx, float PitchRx) { return m / PitchRx - 0.5f + Rx / 2.0f; } __host__ __device__ float yMM2R(float m, float Ry, float PitchRy) { return m / PitchRy - 0.5f + Ry / 2.0f; } //Loop unrolling templates, device functions are mapped to inlines 99% of the time template<int i> __device__ float convolutionRow(float x, float y, float kernel[KERNELSIZE]){ return tex2D(textImage, x + (float)(KERNELRADIUS - i), y) * kernel[i] + convolutionRow<i - 1>(x, y, kernel); } template<> __device__ float convolutionRow<-1>(float x, float y, float kernel[KERNELSIZE]){ return 0; } template<int i> __device__ float convolutionColumn(float x, float y, float kernel[KERNELSIZE]){ return tex2D(textImage, x, y + (float)(KERNELRADIUS - i)) * kernel[i] + convolutionColumn<i - 1>(x, y, kernel); } template<> __device__ float convolutionColumn<-1>(float x, float y, float kernel[KERNELSIZE]){ return 0; } //Interploation helper __device__ float interpolateSino(float x, float y, int view, params consts) { float xWeight = x - floor(x); float yWeight = y - floor(y); float temp, value = 0, count = 0; temp = tex2D(textSino, x - xWeight + 0.5f, y - yWeight + 0.5f + view * consts.Py); value += (1 - xWeight) * (1 - yWeight) * (temp); if (temp != 0.0f) count += (1 - xWeight) * (1 - yWeight); temp = tex2D(textSino, x - xWeight + 1.5f, y - yWeight + 0.5f + view * consts.Py); value += xWeight * (1 - yWeight) * (temp); if (temp != 0.0f) count += xWeight * (1 - yWeight); temp = tex2D(textSino, x - xWeight + 0.5f, y - yWeight + 1.5f + view * consts.Py); value += (1 - xWeight) * yWeight * (temp); if (temp != 0.0f) count += (1 - xWeight) * yWeight; temp = tex2D(textSino, x - xWeight + 1.5f, y - yWeight + 1.5f + view * consts.Py); value += xWeight * yWeight * (temp); if (temp != 0.0f) count += xWeight * yWeight; //if (count > 0.0f) value /= count; return value; } //Image metric generators __global__ void convolutionRowsKernel(float *d_Dst, float kernel[KERNELSIZE], params consts) { const int ix = MUL_ADD(blockDim.x, blockIdx.x, threadIdx.x); const int iy = MUL_ADD(blockDim.y, blockIdx.y, threadIdx.y); const float x = (float)ix + 0.5f; const float y = (float)iy + 0.5f; const int pitch = consts.dataDisplay == reconstruction || consts.dataDisplay == iterRecon ? consts.ReconPitchNum : consts.ProjPitchNum; if (consts.dataDisplay == reconstruction || consts.dataDisplay == iterRecon) { if (ix >= consts.Rx || iy >= consts.Ry) return; if (ix >= consts.Rx - KERNELRADIUS || ix < KERNELRADIUS ) {// || iy >= consts.Ry - KERNELRADIUS || iy < KERNELRADIUS d_Dst[MUL_ADD(iy, pitch, ix)] = 0.0f; return; } } else { if (ix >= consts.Px || iy >= consts.Py) return; if (ix >= consts.Px - KERNELRADIUS || ix < KERNELRADIUS) {// || iy >= consts.Py - KERNELRADIUS || iy < KERNELRADIUS d_Dst[MUL_ADD(iy, pitch, ix)] = 0.0f; return; } } d_Dst[MUL_ADD(iy, pitch, ix)] = convolutionRow<KERNELSIZE>(x, y, kernel); } __global__ void convolutionColumnsKernel(float *d_Dst, float kernel[KERNELSIZE], params consts){ const int ix = MUL_ADD(blockDim.x, blockIdx.x, threadIdx.x); const int iy = MUL_ADD(blockDim.y, blockIdx.y, threadIdx.y); const float x = (float)ix + 0.5f; const float y = (float)iy + 0.5f; const int pitch = consts.dataDisplay == reconstruction || consts.dataDisplay == iterRecon ? consts.ReconPitchNum : consts.ProjPitchNum; if (consts.dataDisplay == reconstruction || consts.dataDisplay == iterRecon) { if (ix >= consts.Rx || iy >= consts.Ry) return; if (iy >= consts.Ry - KERNELRADIUS || iy < KERNELRADIUS) {//ix >= consts.Rx - KERNELRADIUS || || ix < KERNELRADIUS d_Dst[MUL_ADD(iy, pitch, ix)] = 0.0f; return; } } else { if (ix >= consts.Px || iy >= consts.Py) return; if (iy >= consts.Py - KERNELRADIUS || iy < KERNELRADIUS) {//ix >= consts.Px - KERNELRADIUS || || ix < KERNELRADIUS d_Dst[MUL_ADD(iy, pitch, ix)] = 0.0f; return; } } d_Dst[MUL_ADD(iy, pitch, ix)] = convolutionColumn<KERNELSIZE>(x, y, kernel); } __global__ void squareMag(float *d_Dst, float *src1, float *src2, params consts) { const int x = MUL_ADD(blockDim.x, blockIdx.x, threadIdx.x); const int y = MUL_ADD(blockDim.y, blockIdx.y, threadIdx.y); bool recon = consts.dataDisplay == reconstruction || consts.dataDisplay == iterRecon; int pitch = recon ? consts.ReconPitchNum : consts.ProjPitchNum; if ((recon && x >= consts.Rx) || (recon && y >= consts.Ry) || (!recon && x >= consts.Px) || (!recon && y >= consts.Py) || x < 0 || y < 0) return; d_Dst[MUL_ADD(y, pitch, x)] = pow((double)src1[MUL_ADD(y, pitch, x)],2) + pow((double)src2[MUL_ADD(y, pitch, x)],2); } __global__ void mag(float *d_Dst, float *src1, float *src2, params consts) { const int x = MUL_ADD(blockDim.x, blockIdx.x, threadIdx.x); const int y = MUL_ADD(blockDim.y, blockIdx.y, threadIdx.y); bool recon = consts.dataDisplay == reconstruction || consts.dataDisplay == iterRecon; int pitch = recon ? consts.ReconPitchNum : consts.ProjPitchNum; if ((recon && x >= consts.Rx) || (recon && y >= consts.Ry) || (!recon && x >= consts.Px) || (!recon && y >= consts.Py) || x < 0 || y < 0) return; //d_Dst[MUL_ADD(y, pitch, x)] = (float)sqrt(pow((double)src1[MUL_ADD(y, pitch, x)], 2) + pow((double)src2[MUL_ADD(y, pitch, x)], 2)); d_Dst[MUL_ADD(y, pitch, x)] = (abs(src1[MUL_ADD(y, pitch, x)]) + abs(src2[MUL_ADD(y, pitch, x)])) / 2.0f; } __global__ void abs(float *d_Dst, float *src, params consts) { const int x = MUL_ADD(blockDim.x, blockIdx.x, threadIdx.x); const int y = MUL_ADD(blockDim.y, blockIdx.y, threadIdx.y); bool recon = consts.dataDisplay == reconstruction || consts.dataDisplay == iterRecon; int pitch = recon ? consts.ReconPitchNum : consts.ProjPitchNum; if ((recon && x >= consts.Rx) || (recon && y >= consts.Ry) || (!recon && x >= consts.Px) || (!recon && y >= consts.Py) || x < 0 || y < 0) return; //d_Dst[MUL_ADD(y, pitch, x)] = (float)sqrt(pow((double)src1[MUL_ADD(y, pitch, x)], 2) + pow((double)src2[MUL_ADD(y, pitch, x)], 2)); d_Dst[MUL_ADD(y, pitch, x)] = abs(src[MUL_ADD(y, pitch, x)]); } __global__ void pow(float *d_Dst, float *src, float exponent, params consts) { const int x = MUL_ADD(blockDim.x, blockIdx.x, threadIdx.x); const int y = MUL_ADD(blockDim.y, blockIdx.y, threadIdx.y); bool recon = consts.dataDisplay == reconstruction || consts.dataDisplay == iterRecon; int pitch = recon ? consts.ReconPitchNum : consts.ProjPitchNum; if ((recon && x >= consts.Rx) || (recon && y >= consts.Ry) || (!recon && x >= consts.Px) || (!recon && y >= consts.Py) || x < 0 || y < 0) return; //d_Dst[MUL_ADD(y, pitch, x)] = (float)sqrt(pow((double)src1[MUL_ADD(y, pitch, x)], 2) + pow((double)src2[MUL_ADD(y, pitch, x)], 2)); d_Dst[MUL_ADD(y, pitch, x)] = pow(src[MUL_ADD(y, pitch, x)], exponent); } __global__ void squareDiff(float *d_Dst, int view, float xOff, float yOff, int pitchOut, params consts) { const int x = MUL_ADD(blockDim.x, blockIdx.x, threadIdx.x); const int y = MUL_ADD(blockDim.y, blockIdx.y, threadIdx.y); if (x >= consts.Px || y >= consts.Py || x < 0 || y < 0) return; d_Dst[MUL_ADD(y, pitchOut, x)] = pow(tex2D(textError, x - xOff, y - yOff + view*consts.Py) - tex2D(textError, x, y + (NUMVIEWS / 2)*consts.Py), 2); } __global__ void add(float* src1, float* src2, float *d_Dst, bool useRatio, bool useAbs, params consts) { const int x = MUL_ADD(blockDim.x, blockIdx.x, threadIdx.x); const int y = MUL_ADD(blockDim.y, blockIdx.y, threadIdx.y); bool recon = consts.dataDisplay == reconstruction || consts.dataDisplay == iterRecon; int pitch = recon ? consts.ReconPitchNum : consts.ProjPitchNum; if ((recon && x >= consts.Rx) || (recon && y >= consts.Ry) || (!recon && x >= consts.Px) || (!recon && y >= consts.Py) || x < 0 || y < 0) return; if (useRatio) { if (useAbs) { float val = consts.log ? abs(src2[MUL_ADD(y, pitch, x)]) : USHRT_MAX - abs(src2[MUL_ADD(y, pitch, x)]); d_Dst[MUL_ADD(y, pitch, x)] = src1[MUL_ADD(y, pitch, x)] * consts.ratio + val * (1 - consts.ratio); } else { float val = consts.log ? src2[MUL_ADD(y, pitch, x)] : USHRT_MAX - src2[MUL_ADD(y, pitch, x)]; d_Dst[MUL_ADD(y, pitch, x)] = src1[MUL_ADD(y, pitch, x)] * consts.ratio + val * (1 - consts.ratio); } } else d_Dst[MUL_ADD(y, pitch, x)] = (src1[MUL_ADD(y, pitch, x)] + src2[MUL_ADD(y, pitch, x)]) / 2; } __global__ void div(float* src1, float* src2, float *d_Dst, int pitch, params consts) { const int x = MUL_ADD(blockDim.x, blockIdx.x, threadIdx.x); const int y = MUL_ADD(blockDim.y, blockIdx.y, threadIdx.y); if (x >= consts.Px || y >= consts.Py || x < 0 || y < 0) return; d_Dst[MUL_ADD(y, pitch, x)] = 100 * src1[MUL_ADD(y, pitch, x)] / (abs(src2[MUL_ADD(y, pitch, x)]) + 1); } __global__ void thresh(float* src1, float* src2, float *d_Dst, int pitch, params consts) { const int x = MUL_ADD(blockDim.x, blockIdx.x, threadIdx.x); const int y = MUL_ADD(blockDim.y, blockIdx.y, threadIdx.y); if (x >= consts.Px || y >= consts.Py || x < 0 || y < 0) return; if(src1[MUL_ADD(y, pitch, x)] < 50.0f) d_Dst[MUL_ADD(y, pitch, x)] = src2[MUL_ADD(y, pitch, x)]; else d_Dst[MUL_ADD(y, pitch, x)] = 0.0f; } __global__ void sub(float* src1, float* src2, float *d_Dst, int pitch, params consts) { const int x = MUL_ADD(blockDim.x, blockIdx.x, threadIdx.x); const int y = MUL_ADD(blockDim.y, blockIdx.y, threadIdx.y); if (x >= consts.Px || y >= consts.Py || x < 0 || y < 0) return; d_Dst[MUL_ADD(y, pitch, x)] = src1[MUL_ADD(y, pitch, x)] - src2[MUL_ADD(y, pitch, x)]; } __global__ void invert(float* image, params consts) { const int x = MUL_ADD(blockDim.x, blockIdx.x, threadIdx.x); const int y = MUL_ADD(blockDim.y, blockIdx.y, threadIdx.y); if (x >= consts.Px || y >= consts.Py || x < 0 || y < 0) return; float val = image[MUL_ADD(y, consts.ProjPitchNum, x)]; float correctedMax = logf(USHRT_MAX); if (val <= 0.0f) image[MUL_ADD(y, consts.ProjPitchNum, x)] = 0.0f; else if (val >= USHRT_MAX) image[MUL_ADD(y, consts.ProjPitchNum, x)] = USHRT_MAX; #ifdef USELOGITER else image[MUL_ADD(y, consts.ProjPitchNum, x)] = (correctedMax - logf(val + 1)) / correctedMax * USHRT_MAX; #else else image[MUL_ADD(y, consts.ProjPitchNum, x)] = USHRT_MAX - val; #endif } __global__ void projectSliceZ(float * zBuff[KERNELSIZE], int index, int projIndex, float distance, params consts) { //Define pixel location in x, y, and z int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; float values[NUMVIEWS]; //Set a normalization and pixel value to 0 float error = 0.0f; float count = 0.0f; //Check image boundaries if ((i >= consts.Rx) || (j >= consts.Ry)) return; for (int view = 0; view < NUMVIEWS; view++) { if (projIndex >= 0 && projIndex != view) { count++; continue; } float dz = distance / consts.d_Beamz[view]; if (consts.orientation) dz = -dz;//z changes sign when flipped in the x direction float x = xMM2P((xR2MM(i, consts.Rx, consts.PitchRx) + consts.d_Beamx[consts.revGeo ? consts.Views - 1 - view : view] * dz), consts.Px, consts.PitchPx);// / (1 + dz) float y = yMM2P((yR2MM(j, consts.Ry, consts.PitchRy) + consts.d_Beamy[view] * dz), consts.Py, consts.PitchPy); //Update the value based on the error scaled and save the scale if (y > 0 && y < consts.Py && x > 0 && x < consts.Px) { values[view] = tex2D(textError, x, y + view*consts.Py); if (values[view] != 0) { error += values[view]; count++; } } if (projIndex >= 0) break; } if (count > 0) zBuff[index][j*consts.ReconPitchNum + i] = error / count; else zBuff[index][j*consts.ReconPitchNum + i] = 0; } __global__ void zConvolution(float *d_Dst, float * zSrc[KERNELSIZE], float kernel[KERNELSIZE], params consts) { const int x = MUL_ADD(blockDim.x, blockIdx.x, threadIdx.x); const int y = MUL_ADD(blockDim.y, blockIdx.y, threadIdx.y); if (x >= consts.Rx || y >= consts.Ry || x < 0 || y < 0) return; float out = 0.0f; for (int i = 0; i < KERNELSIZE; i++) out += zSrc[i][MUL_ADD(y, consts.ReconPitchNum, x)] * kernel[i]; //if(abs(out) > 10.0f) d_Dst[MUL_ADD(y, consts.ReconPitchNum, x)] = out; //else d_Dst[MUL_ADD(y, consts.ReconPitchNum, x)] = 0.0f; } //Display functions __global__ void resizeKernelTex(int wIn, int hIn, int wOut, int hOut, float scale, int xOff, int yOff, bool derDisplay, params consts) { // pixel coordinates const int idx = (blockDim.x * blockIdx.x) + threadIdx.x; const int x = idx % wOut; const int y = idx / wOut; bool negative = false; bool saturate = false; float sum = 0; float i = (x - (wOut - wIn / scale) / 2.0f)*scale + xOff; float j = (y - (hOut - hIn / scale) / 2.0f)*scale + yOff; if (consts.orientation) i = wIn - 1 - i; if (consts.flip) j = hIn - 1 - j; if (i > 0 && j > 0 && i < wIn && j < hIn) sum = tex2D(textImage, i + 0.5f, j + 0.5f); if (sum < 0) { negative = true; sum = abs(sum); } if (consts.log) { if (sum > 0.0f) { float correctedMax = logf(USHRT_MAX); sum = (correctedMax - logf(sum + 1)) / correctedMax * USHRT_MAX; } else sum = consts.minVal; } sum = (sum - consts.minVal) / (consts.maxVal - consts.minVal) * UCHAR_MAX; //if (!consts.log) sum = UCHAR_MAX - sum; saturate = sum > UCHAR_MAX; union pxl_rgbx_24 rgbx; if (saturate) { rgbx.na = UCHAR_MAX; rgbx.r = UCHAR_MAX;//flag errors with big red spots rgbx.g = UCHAR_MAX;//0 rgbx.b = UCHAR_MAX;//0 } else { rgbx.na = UCHAR_MAX; if (negative) { if (consts.showNegative) { rgbx.r = 0; rgbx.g = 0; rgbx.b = sum; } else { rgbx.r = 0; rgbx.g = 0; rgbx.b = 0; } } else { rgbx.r = sum; rgbx.g = sum; rgbx.b = sum; } } surf2Dwrite(rgbx.b32, displaySurface, x * sizeof(rgbx), y, cudaBoundaryModeZero); // squelches out-of-bound writes } __global__ void drawSelectionBox(int UX, int UY, int LX, int LY, int wOut) { const int idx = (blockDim.x * blockIdx.x) + threadIdx.x; const int x = idx % wOut; const int y = idx / wOut; if ((x >= UX && x < UX + LINEWIDTH && y >= LY - LINEWIDTH && y < UY + LINEWIDTH) || (x >= LX - LINEWIDTH && x < LX && y >= LY - LINEWIDTH && y < UY + LINEWIDTH) || (y >= UY && y < UY + LINEWIDTH && x >= LX && x < UX) || (y >= LY - LINEWIDTH && y < LY && x >= LX && x < UX)) { union pxl_rgbx_24 rgbx; rgbx.na = 0xFF; rgbx.r = 255;//flag errors with big red spots rgbx.g = 0; rgbx.b = 0; surf2Dwrite(rgbx.b32, displaySurface, x * sizeof(rgbx), y, cudaBoundaryModeZero); // squelches out-of-bound writes } } __global__ void drawSelectionBar(int X, int Y, int wOut, bool vertical) { const int idx = (blockDim.x * blockIdx.x) + threadIdx.x; const int x = idx % wOut; const int y = idx / wOut; if (!vertical && (x >= X && x < X + LINEWIDTH && y >= Y && y < Y + BARHEIGHT) || vertical && (y >= Y && y < Y + LINEWIDTH && x >= X - BARHEIGHT && x < X)) { union pxl_rgbx_24 rgbx; rgbx.na = 0xFF; rgbx.r = 255;//flag errors with big red spots rgbx.g = 0; rgbx.b = 0; surf2Dwrite(rgbx.b32, displaySurface, x * sizeof(rgbx), y, cudaBoundaryModeZero); // squelches out-of-bound writes } } //Functions to do initial correction of raw data: log and scatter correction __global__ void LogCorrectProj(float * Sino, int view, unsigned short *Proj, unsigned short *Gain, params consts){ //Define pixel location in x and y int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; //Check image boundaries if ((i < consts.Px) && (j < consts.Py)){ //Flip and invert while converting to float int x = i; int y = j; float val = Proj[j*consts.Px + i]; if (consts.useGain) { val /= (float)Gain[j*consts.Px + i] * (float)consts.exposure / (float)EXPOSUREBASE; if (val > HIGHTHRESH) val = 0.0f; val *= USHRT_MAX; } else val *= 32.0f;//conversion from 10 to 16 bit Sino[(y + view * consts.Py)*consts.ProjPitchNum + x] = val; //large noise correction if (consts.useMaxNoise) { //Get a second round to aviod gain correction issues __syncthreads(); if (x > 1 && x < consts.Px - 1) { float val1 = Sino[(y + view*consts.Py)*consts.ProjPitchNum + x - 1]; float val2 = Sino[(y + view*consts.Py)*consts.ProjPitchNum + x + 1]; float val3 = (val1 + val2) / 2; if (abs(val1 - val2) < 2 * consts.maxNoise && abs(val3 - val) > consts.maxNoise) val = val3; } if (y > 1 && y < consts.Py - 1) { float val1 = Sino[(y - 1 + view*consts.Py)*consts.ProjPitchNum + x]; float val2 = Sino[(y + 1 + view*consts.Py)*consts.ProjPitchNum + x]; float val3 = (val1 + val2) / 2; if (abs(val1 - val2) < 2 * consts.maxNoise && abs(val3 - val) > consts.maxNoise) val = val3; } Sino[(y + view*consts.Py)*consts.ProjPitchNum + x] = val; } } } __global__ void rescale(float * Sino, float * raw, int view, float * MaxVal, float * MinVal, float * colShifts, float * rowShifts, float scale, params consts) { //Define pixel location in x and y int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; //Check image boundaries if ((i < consts.Px) && (j < consts.Py)) { float test = Sino[(j + view*consts.Py)*consts.ProjPitchNum + i] -*MinVal; if (test > 0) { test = (test - colShifts[i] - rowShifts[j]) / scale / (*MaxVal - *MinVal) * USHRT_MAX;//scale from 1 to max if (test > consts.metalThresh || !consts.useMetal) Sino[(j + view*consts.Py)*consts.ProjPitchNum + i] = test;// && test < ABSHIGHTHRESH else Sino[(j + view*consts.Py)*consts.ProjPitchNum + i] = 0.0f; raw[(j + view*consts.Py)*consts.ProjPitchNum + i] = test; } else { Sino[(j + view*consts.Py)*consts.ProjPitchNum + i] = 0.0f; raw[(j + view*consts.Py)*consts.ProjPitchNum + i] = 0.0f; } } } //Create the single slice projection image __global__ void projectSlice(float * IM, float distance, params consts) { //Define pixel location in x, y, and z int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; float value; //Set a normalization and pixel value to 0 float error = 0.0f; float count = 0.0f; //Check image boundaries if ((i >= consts.Rx) || (j >= consts.Ry)) return; for (int view = 0; view < NUMVIEWS; view++) { if (!consts.useBeams[view])continue; float dz = distance / consts.d_Beamz[view]; float x = xMM2P((xR2MM(i, consts.Rx, consts.PitchRx) + consts.d_Beamx[consts.revGeo ? consts.Views - 1 - view : view] * dz), consts.Px, consts.PitchPx);// * (1 - dz) float y = yMM2P((yR2MM(j, consts.Ry, consts.PitchRy) + consts.d_Beamy[view] * dz), consts.Py, consts.PitchPy); //Update the value based on the error scaled and save the scale if (y >= 0 && y < consts.Py && x >= 0 && x < consts.Px) { //value = interpolateSino(x, y, view, consts); value = tex2D(textSino, x, y + view * consts.Py); float increment = 1.0f; if (y < TAPERSIZE) increment *= y / TAPERSIZE; if (y > consts.Py - TAPERSIZE) increment *= (consts.Py - y) / TAPERSIZE; if (x < TAPERSIZE) increment *= x / TAPERSIZE; if (x > consts.Px - TAPERSIZE) increment *= (consts.Px - x) / TAPERSIZE; //Corner correction if (consts.Px - x + consts.Py - y < TRISIZE) increment = 0.0f; else if (consts.Px - x + consts.Py - y < TRISIZE + TAPERSIZE) increment *= (consts.Px - x + consts.Py - y - TRISIZE) / TAPERSIZE; if (consts.Px - x + y < TRISIZE) increment = 0.0f; else if (consts.Px - x + y < TRISIZE + TAPERSIZE) increment *= (consts.Px - x + y - TRISIZE) / TAPERSIZE; if (value != 0) { error += value * increment; count += increment; } } } if (count > 0) IM[j*consts.ReconPitchNum + i] = error / count; else IM[j*consts.ReconPitchNum + i] = 0.0f; } __global__ void normProjectSlice(float * IM, float distance, float alignStr, params consts) { //Define pixel location in x, y, and z int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; float value; //Set a normalization and pixel value to 0 float error = 0.0f; float sqInputs = 0.0f; float count = 0.0f; //Check image boundaries if ((i >= consts.Rx) || (j >= consts.Ry)) return; for (int view = 0; view < NUMVIEWS; view++) { if (!consts.useBeams[view])continue; float dz = distance / consts.d_Beamz[view]; float x = xMM2P((xR2MM(i, consts.Rx, consts.PitchRx) + consts.d_Beamx[consts.revGeo ? consts.Views - 1 - view : view] * dz), consts.Px, consts.PitchPx);// * (1 - dz) float y = yMM2P((yR2MM(j, consts.Ry, consts.PitchRy) + consts.d_Beamy[view] * dz), consts.Py, consts.PitchPy); //Update the value based on the error scaled and save the scale if (y >= 0 && y < consts.Py && x >= 0 && x < consts.Px) { //value = interpolateSino(x, y, view, consts); value = tex2D(textSino, x, y + view * consts.Py); float increment = 1.0f; if (y < TAPERSIZE) increment *= y / TAPERSIZE; if (y > consts.Py - TAPERSIZE) increment *= (consts.Py - y) / TAPERSIZE; if (x < TAPERSIZE) increment *= x / TAPERSIZE; if (x > consts.Px - TAPERSIZE) increment *= (consts.Px - x) / TAPERSIZE; //Corner correction if (consts.Px - x + consts.Py - y < TRISIZE) increment = 0.0f; else if (consts.Px - x + consts.Py - y < TRISIZE + TAPERSIZE) increment *= (consts.Px - x + consts.Py - y - TRISIZE) / TAPERSIZE; if (consts.Px - x + y < TRISIZE) increment = 0.0f; else if (consts.Px - x + y < TRISIZE + TAPERSIZE) increment *= (consts.Px - x + y - TRISIZE) / TAPERSIZE; if (value != 0) { error += value * increment; sqInputs += pow(value * increment, 2.0f); count += increment; } } } if (count > 0) { float factor = pow(error, 2.0f) / sqInputs; if (factor > alignStr) factor -= alignStr; else factor = 0.0f; IM[j*consts.ReconPitchNum + i] = error / count * factor / (consts.Views - alignStr); //IM[j*consts.ReconPitchNum + i] = error / count * sqrt(factor / (consts.Views - alignStr)); } else IM[j*consts.ReconPitchNum + i] = 0.0f; } __global__ void xIntegrate(float * output, float * derInput, float * input, int slice, params consts, cudaSurfaceObject_t surfRecon = NULL) { //Define pixel location in x, y, and z int j = blockDim.y * blockIdx.y + threadIdx.y; if (j >= consts.Ry) return; if (blockDim.x * blockIdx.x + threadIdx.x != 0) return; float sum = 0.0; //float sum = input[j*consts.ReconPitchNum]; for (int i = 0; i < consts.Rx; i++) { sum += derInput[j*consts.ReconPitchNum + i]; if (surfRecon == NULL) output[j*consts.ReconPitchNum + i] = sum; else surf3Dwrite(sum, surfRecon, i * sizeof(float), j, slice); } } __global__ void xConvIntegrate(float * output, float * derInput, float * input, int slice, params consts, cudaSurfaceObject_t surfRecon = NULL) { //Define pixel location in x, y, and z int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; if ((i >= consts.Rx) || (j >= consts.Ry)) return; int width = min(200, min(abs(consts.Rx - 1 - i), i)); float sum = 0.0f; float count = 0.0f; for (int iter = i - width; iter <= i + width; iter++) {//min(consts.Rx, i + 1 + width) float val = input[j*consts.ReconPitchNum + max(0, iter)]; //float val = 20000; if (val > 0.0f) { if (iter > i) sum -= derInput[j*consts.ReconPitchNum + iter] * (width - abs(iter - i)); else sum += derInput[j*consts.ReconPitchNum + iter] * (width - abs(iter - i)); sum += val; count++; } } if(surfRecon == NULL) output[j*consts.ReconPitchNum + i] = sum / count; else surf3Dwrite(sum / count, surfRecon, i * sizeof(float), j, slice); } #ifdef SHOWERROR __global__ void projectIter(float * oldRecon, int slice, float iteration, bool skipTV, params consts, cudaSurfaceObject_t surfRecon, cudaSurfaceObject_t errorRecon) { #else __global__ void projectIter(float * proj, float * oldRecon, float * weights, int slice, float iteration, bool skipTV, float alpha, params consts, cudaSurfaceObject_t surfRecon, cudaSurfaceObject_t surfWeight, bool firstRun = false) { #endif //Define pixel location in x, y, and z int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; //Set a normalization and pixel value to 0 float count = 0.0f; float error = 0.0f; float sqInputs = 0.0f; //firstRun = true; //float maximum = 0.0f; //float minimum = FLT_MAX; //Check image boundaries if ((i >= consts.Rx) || (j >= consts.Ry)) return; for (int view = 0; view < NUMVIEWS; view++) { float dz = (consts.startDis + slice * consts.pitchZ) / consts.d_Beamz[view]; float x = xMM2P((xR2MM(i, consts.Rx, consts.PitchRx) + consts.d_Beamx[consts.revGeo ? consts.Views - 1 - view : view] * dz), consts.Px, consts.PitchPx);// / (1 + dz) float y = yMM2P((yR2MM(j, consts.Ry, consts.PitchRy) + consts.d_Beamy[view] * dz), consts.Py, consts.PitchPy); //Update the value based on the error scaled and save the scale if (y > 0 && y < consts.Py && x > 0 && x < consts.Px) { float value = tex2D(textError, x + 0.5f, y + 0.5f + view*consts.Py); float increment = 1.0f; if (y < TAPERSIZE) increment *= y / TAPERSIZE; if (y > consts.Py - TAPERSIZE) increment *= (consts.Py - y) / TAPERSIZE; if (x < TAPERSIZE) increment *= x / TAPERSIZE; if (x > consts.Px - TAPERSIZE) increment *= (consts.Px - x) / TAPERSIZE; //Corner correction if (consts.Px - x + consts.Py - y < TRISIZE) increment = 0.0f; else if (consts.Px - x + consts.Py - y < TRISIZE + TAPERSIZE) increment *= (consts.Px - x + consts.Py - y - TRISIZE) / TAPERSIZE; if (consts.Px - x + y < TRISIZE) increment = 0.0f; else if (consts.Px - x + y < TRISIZE + TAPERSIZE) increment *= (consts.Px - x + y - TRISIZE) / TAPERSIZE; if (abs(value) > 0.1f) { //float singleTemp = tex2D(textSino, x, y + view*consts.Py); count += increment; sqInputs += pow(value * increment, 2.0f); if (!firstRun) { //float weight = tex2D(textWeight, x, y + view*consts.Py); //if(weight > 0) // error += value * increment / weight; //else error += value * increment; } else error += value * increment; //if (singleTemp > maximum) maximum = singleTemp; //if (singleTemp < minimum) minimum = singleTemp; //singleVal += singleTemp * increment; } //float minTest = proj[(view * consts.Py + j)*consts.ProjPitchNum + i]; //if (minTest < minimum) minimum = minTest; } } if (count > 0) { /*float factor = pow(error, 2.0f) / sqInputs; if (DERWEIGHTSTR > count) { if (factor > DERWEIGHTSTR) factor -= DERWEIGHTSTR; else factor = 0.0f; error = error / count * factor / (ceil(count) - DERWEIGHTSTR) / (float)consts.slices; } else { error = error / count * factor / max(1.0f, count) / (float)consts.slices; }*/ error /= ((float)count * (float)consts.slices); } else error = 0.0f; float returnVal; surf3Dread(&returnVal, surfRecon, i * sizeof(float), j, slice); if (!skipTV && returnVal > 0.0f) { float AX = 0, BX = 0, temp; if (i > 0) { temp = oldRecon[i - 1 + j*consts.ReconPitchNum]; if (temp > 0.1f) BX += temp * TVX; AX += TVX; } if (i < consts.Rx - 1) { temp = oldRecon[i + 1 + j*consts.ReconPitchNum]; if (temp > 0.1f) BX += temp * TVX; AX += TVX; } if (j > 0) { temp = oldRecon[i + (j - 1)*consts.ReconPitchNum]; if (temp > 0.1f) BX += temp * TVY; AX += TVY; } if (j < consts.Ry - 1) { temp = oldRecon[i + (j + 1)*consts.ReconPitchNum]; if (temp > 0.1f) BX += temp * TVY; AX += TVY; } if (slice > 0) { surf3Dread(&returnVal, surfRecon, i * sizeof(float), j, slice - 1); BX += returnVal * TVZ; AX += TVZ; } if (slice < consts.slices - 1) { surf3Dread(&returnVal, surfRecon, i * sizeof(float), j, slice + 1); BX += returnVal * TVZ; AX += TVZ; } surf3Dread(&returnVal, surfRecon, i * sizeof(float), j, slice); if (AX > 0.0f) error += BX - AX*returnVal; } float weight; surf3Dread(&weight, surfWeight, i * sizeof(float), j, slice); error *= abs(alpha); //error *= weight / consts.weightMax; returnVal += error; //maximum /= (float)count; //minimum /= (float)count; //if (returnVal > maximum) returnVal = maximum; //if (returnVal < minimum) returnVal = minimum; /*if (returnVal > 0) { returnVal *= 0.97f; returnVal += 100.0f; }*/ //returnVal += (8000.0f - returnVal) * 0.1f; //if (returnVal > 10000) returnVal = 10000; //if (returnVal < 0.1f) returnVal = 0.1f; #ifdef SHOWERROR surf3Dwrite(error, errorRecon, i * sizeof(float), j, slice); #endif #ifdef RECONDERIVATIVE if (count == 0 || returnVal < 0.0f) surf3Dwrite(0.0f, surfRecon, i * sizeof(float), j, slice); else surf3Dwrite(returnVal, surfRecon, i * sizeof(float), j, slice); #else //surf3Dwrite(error, surfDelta, i * sizeof(float), j, slice); //surf3Dwrite(delta, surfDelta, i * sizeof(float), j, slice); surf3Dwrite(returnVal, surfRecon, i * sizeof(float), j, slice); #endif // RECONDERIVATIVE } __global__ void projectFinalIter(int slice, params consts, cudaSurfaceObject_t surfRecon) { //Define pixel location in x, y, and z int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; //Set a normalization and pixel value to 0 float count = 0.0f; float error = 0.0f; //Check image boundaries if ((i >= consts.Rx) || (j >= consts.Ry)) return; for (int view = 0; view < NUMVIEWS; view++) { //int view = 3; { float dz = (consts.startDis + slice * consts.pitchZ) / consts.d_Beamz[view]; float x = xMM2P((xR2MM(i, consts.Rx, consts.PitchRx) + consts.d_Beamx[consts.revGeo ? consts.Views - 1 - view : view] * dz), consts.Px, consts.PitchPx);// / (1 + dz) float y = yMM2P((yR2MM(j, consts.Ry, consts.PitchRy) + consts.d_Beamy[view] * dz), consts.Py, consts.PitchPy); //Update the value based on the error scaled and save the scale if (y > 0 && y < consts.Py && x > 0 && x < consts.Px) { float value = tex2D(textSino, x + 0.5f, y + 0.5f + view*consts.Py); float increment = 1.0f; if (y < TAPERSIZE) increment *= y / TAPERSIZE; if (y > consts.Py - TAPERSIZE) increment *= (consts.Py - y) / TAPERSIZE; if (x < TAPERSIZE) increment *= x / TAPERSIZE; if (x > consts.Px - TAPERSIZE) increment *= (consts.Px - x) / TAPERSIZE; //Corner correction if (consts.Px - x + consts.Py - y < TRISIZE) increment = 0.0f; else if (consts.Px - x + consts.Py - y < TRISIZE + TAPERSIZE) increment *= (consts.Px - x + consts.Py - y - TRISIZE) / TAPERSIZE; if (consts.Px - x + y < TRISIZE) increment = 0.0f; else if (consts.Px - x + y < TRISIZE + TAPERSIZE) increment *= (consts.Px - x + y - TRISIZE) / TAPERSIZE; if (abs(value) > 0.1f) { error += value * increment; count += increment; } } } if (count > 0) error /= (float)count; else error = 0.0f; float returnVal; surf3Dread(&returnVal, surfRecon, i * sizeof(float), j, slice); if(returnVal < 0.1f) surf3Dwrite(error, surfRecon, i * sizeof(float), j, slice); } __global__ void backProject(float * proj, float * error, float * weights, int view, float iteration, float totalIterations, cudaSurfaceObject_t surfWeight, params consts) { //Define pixel location in x, y, and z int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; float value = 0; float deltaSum = 0.0f; int count = 0; //Check image boundaries if ((i >= consts.Px) || (j >= consts.Py)) return; for (int slice = 0; slice < consts.slices; slice++) { float dz = (consts.startDis + slice * consts.pitchZ) / consts.d_Beamz[view]; float x = xMM2R((xP2MM(i, consts.Px, consts.PitchPx) - consts.d_Beamx[consts.revGeo ? consts.Views - 1 - view : view] * dz), consts.Rx, consts.PitchRx); float y = yMM2R((yP2MM(j, consts.Py, consts.PitchPy) - consts.d_Beamy[view] * dz), consts.Ry, consts.PitchRy); //Update the value based on the error scaled and save the scale if (y >= 0 && y < consts.Ry && x >= 0 && x < consts.Rx) { //value += tex2D(textSino, x, y + slice*consts.Ry); float returnVal = 0.0f, delta; //surf3Dread(&returnVal, surfRecon, x * sizeof(float), y, slice); returnVal = tex3D(textRecon, x + 0.5f, y + 0.5f, slice + 0.5f); /*{ float tempVal; int tempCount = 0; tempVal = tex3D(textRecon, x, y, slice); if (tempVal > 0.0f) { returnVal += tempVal; tempCount++; } tempVal = tex3D(textRecon, x, y + 1.0f, slice); if (tempVal > 0.0f) { returnVal += tempVal; tempCount++; } tempVal = tex3D(textRecon, x + 1.0f, y + 1.0f, slice); if (tempVal > 0.0f) { returnVal += tempVal; tempCount++; } tempVal = tex3D(textRecon, x + 1.0f, y, slice); if (tempVal > 0.0f) { returnVal += tempVal; tempCount++; } if (tempCount > 0) returnVal /= tempCount; else returnVal = 0.0f; }*/ //surf3Dread(&delta, surfDelta, i * sizeof(float), j, slice); //deltaSum += abs(delta); deltaSum++; if (returnVal >= 0.1f) count++; value += returnVal; } } float projVal = proj[j*consts.ProjPitchNum + i]; #ifdef RECONDERIVATIVE if (projVal > 0.0f && abs(projVal) <= USHRT_MAX && count > 0) { error[j*consts.ProjPitchNum + i] = projVal - (value * (float)consts.Views / (float)count); } #else if (projVal > 0.0f && count > 0) { #ifdef USELOGITER float correctedMax = logf(USHRT_MAX); projVal = (correctedMax - logf(projVal + 1)) / correctedMax * USHRT_MAX; #else #ifdef INVERSEITER projVal = USHRT_MAX - projVal; #endif #endif error[j*consts.ProjPitchNum + i] = (projVal - (value * (float)consts.Views / (float)count)); } #endif // RECONDERIVATIVE else error[j*consts.ProjPitchNum + i] = 0.0f; weights[j*consts.ProjPitchNum + i] = deltaSum; } __global__ void synthetic2D(float * synth, int sliceIndex, params consts) { int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; float value = 0; int count = 0; //Check image boundaries if ((i >= consts.Px) || (j >= consts.Py)) return; /*const int buffer = 1; int min = sliceIndex - buffer; int max = sliceIndex + buffer + 1; if (min < 0) min = 0; if (max > consts.slices) max = consts.slices;*/ //for (int slice = min; slice < max; slice++) { for (int slice = 0; slice < consts.slices; slice++) { float dz = (consts.startDis + slice * consts.pitchZ) / consts.d_Beamz[0]; float x = xMM2R((xP2MM(i, consts.Px, consts.PitchPx) + consts.projectionAngle * dz), consts.Rx, consts.PitchRx); //int x = consts.projectionAngle * slice / consts.slices + i; int y = j; if ((x >= consts.Px) || (x < 0)) continue; float returnVal = 0.0f; returnVal = tex3D(textRecon, x + 0.5f, y + 0.5f, slice + 0.5f); if (returnVal >= 0.1f) count++; value += returnVal; } if (count > 0) synth[j*consts.ReconPitchNum + i] = value / count; else synth[j*consts.ReconPitchNum + i] = 0.0f; } __global__ void getSinogram(float * output, params consts) { //Define pixel location in x, y, and z int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; float value; //Check image boundaries if ((i >= consts.Px) || (j >= consts.Py)) return; int view = j * NUMVIEWS / consts.Py; output[j*consts.ProjPitchNum + i] = tex2D(textSino, i, consts.pixelLine + view * consts.Py); } __global__ void copySlice(float * image, int slice, params consts, cudaSurfaceObject_t surfRecon, bool invertLogCorrect = false) { //Define pixel location in x, y, and z int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; //Check image boundaries if ((i >= consts.Rx) || (j >= consts.Ry)) return; float returnVal; surf3Dread(&returnVal, surfRecon, i * sizeof(float), j, slice); if (invertLogCorrect) { if (returnVal > 10) { float correctedMax = logf(USHRT_MAX); returnVal = (correctedMax - logf(USHRT_MAX - returnVal + 1)) / correctedMax * USHRT_MAX; } } image[j*consts.ReconPitchNum + i] = returnVal; } __global__ void invertRecon(int slice, params consts, cudaSurfaceObject_t surfRecon) { //Define pixel location in x, y, and z int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; //Check image boundaries if ((i >= consts.Rx) || (j >= consts.Ry)) return; float test; surf3Dread(&test, surfRecon, i * sizeof(float), j, slice); surf3Dwrite(USHRT_MAX - test, surfRecon, i * sizeof(float), j, slice); } __global__ void scaleRecon(int slice, float * scales, float * offsets, params consts, cudaSurfaceObject_t surfRecon) { //Define pixel location in x, y, and z int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; //Check image boundaries if ((i >= consts.Rx) || (j >= consts.Ry)) return; float test; surf3Dread(&test, surfRecon, i * sizeof(float), j, slice); if (test == 0.0f) return; unsigned int index = (unsigned short)test >> 8; test = test * scales[index] + offsets[index] * 256.0f; if (test > 1.0f) { surf3Dwrite(test, surfRecon, i * sizeof(float), j, slice); } else surf3Dwrite(1.0f, surfRecon, i * sizeof(float), j, slice); } __global__ void initArray(int slice, float value, params consts, cudaSurfaceObject_t surfRecon) { //Define pixel location in x, y, and z int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; //Check image boundaries if ((i >= consts.Rx) || (j >= consts.Ry)) return; surf3Dwrite(value, surfRecon, i * sizeof(float), j, slice); } //Ruduction and histogram functions __global__ void sumReduction(float * Image, int pitch, float * sum, float lowX, float upX, float lowY, float upY) { //Define shared memory to read all the threads extern __shared__ float data[]; //define the thread and block location const int thread = threadIdx.x; const int x = MUL_ADD(blockDim.x, blockIdx.x, threadIdx.x); const int y = MUL_ADD(blockDim.y, blockIdx.y, threadIdx.y); float val; if (x >= ceil(upX) || y >= ceil(upY) || x <= floor(lowX) || y <= floor(lowY)) { val = 0.0; data[thread] = 0.0; } else { val = Image[y*pitch + x]; if (x == floor(upX)) { val += Image[y*pitch + x + 1] * (upX - floor(upX)); Image[y*pitch + x + 1] = 0.0; } if (y == floor(upY)) { val += Image[(y + 1)*pitch + x] * (upY - floor(upY)); Image[(y + 1)*pitch + x] = 0.0; } if (x == ceil(lowX)) { val += Image[y*pitch + x - 1] * (ceil(lowX) - lowX); Image[y*pitch + x - 1] = 0.0; } if (y == ceil(lowY)) { val += Image[(y - 1)*pitch + x] * (ceil(lowY) - lowY); Image[(y - 1)*pitch + x] = 0.0; } data[thread] = val; Image[y*pitch + x] = 0.0;//test display } //Each thread puts its local sum into shared memory __syncthreads(); //Do reduction in shared memory if (thread < 512) data[thread] = val += data[thread + 512]; __syncthreads(); if (thread < 256) data[thread] = val += data[thread + 256]; __syncthreads(); if (thread < 128) data[thread] = val += data[thread + 128]; __syncthreads(); if (thread < 64) data[thread] = val += data[thread + 64]; __syncthreads(); if (thread < 32) { // Fetch final intermediate sum from 2nd warp data[thread] = val += data[thread + 32]; // Reduce final warp using shuffle for (int offset = warpSize / 2; offset > 0; offset /= 2) { val += __shfl_down(val, offset); } } //write the result for this block to global memory if (thread == 0 && val > 0) { atomicAdd(sum, val); } } __global__ void sumRowsOrCols(float * sum, bool cols, params consts) { //Define shared memory to read all the threads extern __shared__ float data[]; __shared__ int counts[1024]; //define the thread and block location const int thread = threadIdx.x; const int x = MUL_ADD(blockDim.x, blockIdx.x, threadIdx.x); const int y = MUL_ADD(blockDim.y, blockIdx.y, threadIdx.y); float val = 0; int count = 0; int i = x; int limit; if (cols) limit = consts.Py; else limit = consts.Px; while(i < limit){ float temp; if(cols) temp = tex2D(textSino, y, i); else temp = tex2D(textSino, i, y); val += temp; if (temp > 0.0f) count++; i += blockDim.x; } data[thread] = val; counts[thread] = count; //Each thread puts its local sum into shared memory __syncthreads(); //Do reduction in shared memory if (thread < 512) { data[thread] = val += data[thread + 512]; counts[thread] = count += counts[thread + 512]; } __syncthreads(); if (thread < 256) { data[thread] = val += data[thread + 256]; counts[thread] = count += counts[thread + 256]; } __syncthreads(); if (thread < 128) { data[thread] = val += data[thread + 128]; counts[thread] = count += counts[thread + 128]; } __syncthreads(); if (thread < 64) { data[thread] = val += data[thread + 64]; counts[thread] = count += counts[thread + 64]; } __syncthreads(); if (thread < 32) { // Fetch final intermediate sum from 2nd warp data[thread] = val += data[thread + 32]; counts[thread] = count += counts[thread + 32]; // Reduce final warp using shuffle for (int offset = warpSize / 2; offset > 0; offset /= 2) { val += __shfl_down(val, offset); count += __shfl_down(count, offset); } } //write the result for this block to global memory if (thread == 0) { if (cols) val *= consts.Py; else val *= consts.Px; if (count > 0) sum[y] = val / (float)count; else sum[y] = 0.0f; } } template <typename T> __global__ void histogram256Kernel(unsigned int *d_Histogram, T *d_Data, unsigned int dataCount, params consts) { //Define pixel location in x, y, and z int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; int minX = min(consts.baseXr, consts.currXr); int maxX = max(consts.baseXr, consts.currXr); int minY = min(consts.baseYr, consts.currYr); int maxY = max(consts.baseYr, consts.currYr); if (i < minX || i > maxX || j < minY || j > maxY) return; //if (consts.orientation) i = consts.Px - 1 - i; //if (consts.flip) j = consts.Py - 1 - j; float data; if (consts.dataDisplay == projections) { data = abs(d_Data[MUL_ADD(j, consts.ProjPitchNum, i)]); } else { data = abs(d_Data[MUL_ADD(j, consts.ReconPitchNum, i)]); } //whatever it currently is, cast it to ushort //if (data <= 0.0f) return; if (consts.log) { if (data > 0) { float correctedMax = logf(USHRT_MAX); data = (correctedMax - logf(data + 1)) / correctedMax * USHRT_MAX; } } if (data > USHRT_MAX) data = USHRT_MAX; if (data <= 0.0f) return;// data = 0.0f; atomicAdd(d_Histogram + ((unsigned short)data >> 8), 1);//bin by the upper 256 bits } __global__ void histogramReconKernel(unsigned int *d_Histogram, int slice, bool useLog, params consts, cudaSurfaceObject_t surfRecon) { //Define pixel location in x, y, and z int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; int minX = min(consts.baseXr, consts.currXr); int maxX = max(consts.baseXr, consts.currXr); int minY = min(consts.baseYr, consts.currYr); int maxY = max(consts.baseYr, consts.currYr); if (i < minX || i >= maxX || j < minY || j >= maxY) return; if (consts.orientation) i = consts.Rx - 1 - i; if (consts.flip) j = consts.Ry - 1 - j; float data = 0; surf3Dread(&data, surfRecon, i * sizeof(float), j, slice); if (consts.log && useLog) { if (data > 0) { float correctedMax = logf(USHRT_MAX); data = (correctedMax - logf(data + 1)) / correctedMax * USHRT_MAX; } } if (consts.isReconstructing) { if (data > 0) { float correctedMax = logf(USHRT_MAX); data = (correctedMax - logf(USHRT_MAX - data + 1)) / correctedMax * USHRT_MAX; } } if (data > USHRT_MAX) data = USHRT_MAX; if (data <= 0.0f) return;// data = 0.0f; atomicAdd(d_Histogram + ((unsigned short)data >> 8), 1);//bin by the upper 8 bits } // u= (1 - tu) * uold + tu .* ( f + 1/lambda*div(z) ); __global__ void updhgF_SoA(float *f, float *z1, float *z2, float *g, float tf, float invlambda, int nx, int ny){ int px = blockIdx.x * blockDim.x + threadIdx.x; int py = blockIdx.y * blockDim.y + threadIdx.y; int idx = px + py*nx; float DIVZ; if (px<nx && py<ny){ // compute the divergence DIVZ = 0; if ((px<(nx - 1))) DIVZ += z1[idx]; if ((px>0)) DIVZ -= z1[idx - 1]; if ((py<(ny - 1))) DIVZ += z2[idx]; if ((py>0)) DIVZ -= z2[idx - nx]; // update f float val = g[idx]; if(val > 0) f[idx] = (1 - tf) *f[idx] + tf * (val + invlambda*DIVZ); else f[idx] = 0; } } // z= zold + tz*lambda* grad(u); // and normalize z: //n=max(1,sqrt(z(:,:,1).*z(:,:,1) +z(:,:,2).*z(:,:,2) ) ); // z= z/n; __global__ void updhgZ_SoA(float *z1, float *z2, float *f, float tz, float lambda, int nx, int ny){ int px = blockIdx.x * blockDim.x + threadIdx.x; int py = blockIdx.y * blockDim.y + threadIdx.y; int idx = px + py*nx; if (px<nx && py<ny){ // compute the gradient float a = 0; float b = 0; float fc = f[idx]; if (px < (nx - 1)) { float val = f[idx + 1]; if(val > 0) a = val - fc; } if (py < (ny - 1)) { float val = f[idx + nx]; if(val > 0) b = val - fc; } // update z a = z1[idx] + tz*lambda*a; b = z2[idx] + tz*lambda*b; // project float t = 0; t = sqrtf(a*a + b*b); t = (t <= 1 ? 1. : t); z1[idx] = a / t; z2[idx] = b / t; } } /********************************************************************************************/ /* Function to interface the CPU with the GPU: */ /********************************************************************************************/ //Function to set up the memory on the GPU TomoError TomoRecon::initGPU(){ //init recon space float redFac = 1.0f; Sys.Recon.Pitch_x = Sys.Proj.Pitch_x * redFac; Sys.Recon.Pitch_y = Sys.Proj.Pitch_y * redFac; Sys.Recon.Nx = Sys.Proj.Nx / redFac; Sys.Recon.Ny = Sys.Proj.Ny / redFac; //Normalize Geometries Sys.Geo.IsoX = Sys.Geo.EmitX[NUMVIEWS / 2]; Sys.Geo.IsoY = Sys.Geo.EmitY[NUMVIEWS / 2]; Sys.Geo.IsoZ = Sys.Geo.EmitZ[NUMVIEWS / 2]; for (int i = 0; i < NUMVIEWS; i++) { Sys.Geo.EmitX[i] -= Sys.Geo.IsoX; Sys.Geo.EmitY[i] -= Sys.Geo.IsoY; } constants.pitchZ = Sys.Geo.ZPitch; //cudaDeviceSynchronize(); #ifdef PRINTMEMORYUSAGE size_t avail_mem; size_t total_mem; cudaMemGetInfo(&avail_mem, &total_mem); std::cout << "Init start available memory: " << avail_mem << "/" << total_mem << "\n"; #endif // PRINTMEMORYUSAGE //Get Device Number cudaError_t cudaStatus; int deviceCount; int failedAttempts = 0; cuda(GetDeviceCount(&deviceCount)); for (int i = 0; i < deviceCount; i++) { if (cudaSetDevice(i) == cudaSuccess) break; failedAttempts++; } if (failedAttempts == deviceCount) return Tomo_CUDA_err; cuda(StreamCreateWithFlags(&stream, cudaStreamDefault)); //Thread and block sizes for standard kernel calls (2d optimized) contThreads.x = WARPSIZE; contThreads.y = MAXTHREADS / WARPSIZE; //Thread and block sizes for reductions (1d optimized) reductionThreads.x = MAXTHREADS; reductionBlocks.x = (Sys.Proj.Nx + reductionThreads.x - 1) / reductionThreads.x; reductionBlocks.y = Sys.Proj.Ny; //Set up display and buffer regions //cuda(MallocPitch((void**)&d_Image, &projPitch, Sys.Proj.Nx * sizeof(float), Sys.Proj.Ny)); if (redFac <= 1.0f) { cuda(MallocPitch((void**)&d_Image, &displayPitch, Sys.Recon.Nx * sizeof(float), Sys.Recon.Ny)); cuda(MallocPitch((void**)&d_Image2, &displayPitch, Sys.Recon.Nx * sizeof(float), Sys.Recon.Ny)); contBlocks.x = (Sys.Recon.Nx + contThreads.x - 1) / contThreads.x; contBlocks.y = (Sys.Recon.Ny + contThreads.y - 1) / contThreads.y; } else { cuda(MallocPitch((void**)&d_Image, &displayPitch, Sys.Proj.Nx * sizeof(float), Sys.Proj.Ny)); cuda(MallocPitch((void**)&d_Image2, &displayPitch, Sys.Recon.Nx * sizeof(float), Sys.Recon.Ny)); contBlocks.x = (Sys.Proj.Nx + contThreads.x - 1) / contThreads.x; contBlocks.y = (Sys.Proj.Ny + contThreads.y - 1) / contThreads.y; } constants.DisplayPitchNum = displayPitch / sizeof(float); cuda(MallocPitch((void**)&d_Sino, &projPitch, Sys.Proj.Nx * sizeof(float), Sys.Proj.Ny * Sys.Proj.NumViews)); cuda(MallocPitch((void**)&d_Raw, &projPitch, Sys.Proj.Nx * sizeof(float), Sys.Proj.Ny * Sys.Proj.NumViews)); cuda(MallocPitch((void**)&d_Weights, &projPitch, Sys.Proj.Nx * sizeof(float), Sys.Proj.Ny * Sys.Proj.NumViews)); cuda(MallocPitch((void**)&inXBuff, &projPitch, Sys.Proj.Nx * sizeof(float), Sys.Proj.Ny * Sys.Proj.NumViews)); cuda(MallocPitch((void**)&inYBuff, &projPitch, Sys.Proj.Nx * sizeof(float), Sys.Proj.Ny * Sys.Proj.NumViews)); #ifdef USEITERATIVE cuda(MallocPitch((void**)&d_Error, &projPitch, Sys.Proj.Nx * sizeof(float), Sys.Proj.Ny * Sys.Proj.NumViews)); #else cuda(MallocPitch((void**)&d_Error, &projPitch, Sys.Proj.Nx * sizeof(float), Sys.Proj.Ny)); #endif reconPitch = max(projPitch, displayPitch); reconPitchNum = (int)reconPitch / sizeof(float); constants.ReconPitchNum = reconPitchNum; //Define the size of each of the memory spaces on the gpu in number of bytes sizeProj = Sys.Proj.Nx * Sys.Proj.Ny * sizeof(unsigned short); sizeSino = projPitch * Sys.Proj.Ny * Sys.Proj.NumViews; sizeIM = projPitch * Sys.Proj.Ny; sizeError = projPitch * Sys.Proj.Ny; cuda(Malloc((void**)&constants.d_Beamx, Sys.Proj.NumViews * sizeof(float))); cuda(Malloc((void**)&constants.d_Beamy, Sys.Proj.NumViews * sizeof(float))); cuda(Malloc((void**)&constants.d_Beamz, Sys.Proj.NumViews * sizeof(float))); cuda(Malloc((void**)&constants.useBeams, Sys.Proj.NumViews * sizeof(bool))); cuda(Malloc((void**)&d_MaxVal, sizeof(float))); cuda(Malloc((void**)&d_MinVal, sizeof(float))); //Copy geometries cuda(MemcpyAsync(constants.d_Beamx, Sys.Geo.EmitX, Sys.Proj.NumViews * sizeof(float), cudaMemcpyHostToDevice)); cuda(MemcpyAsync(constants.d_Beamy, Sys.Geo.EmitY, Sys.Proj.NumViews * sizeof(float), cudaMemcpyHostToDevice)); cuda(MemcpyAsync(constants.d_Beamz, Sys.Geo.EmitZ, Sys.Proj.NumViews * sizeof(float), cudaMemcpyHostToDevice)); cuda(MemcpyAsync(constants.useBeams, Sys.Proj.activeBeams, Sys.Proj.NumViews * sizeof(bool), cudaMemcpyHostToDevice)); //Define the textures textImage.filterMode = cudaFilterModeLinear; textImage.addressMode[0] = cudaAddressModeClamp; textImage.addressMode[1] = cudaAddressModeClamp; textError.filterMode = cudaFilterModeLinear; textError.addressMode[0] = cudaAddressModeClamp; textError.addressMode[1] = cudaAddressModeClamp; textWeight.filterMode = cudaFilterModeLinear; textWeight.addressMode[0] = cudaAddressModeClamp; textWeight.addressMode[1] = cudaAddressModeClamp; textSino.filterMode = cudaFilterModeLinear; textSino.addressMode[0] = cudaAddressModeClamp; textSino.addressMode[1] = cudaAddressModeClamp; textRecon.filterMode = cudaFilterModePoint; textRecon.addressMode[0] = cudaAddressModeClamp; textRecon.addressMode[1] = cudaAddressModeClamp; constants.Px = Sys.Proj.Nx; constants.Py = Sys.Proj.Ny; constants.Rx = Sys.Recon.Nx; constants.Ry = Sys.Recon.Ny; constants.PitchPx = Sys.Proj.Pitch_x; constants.PitchPy = Sys.Proj.Pitch_y; constants.PitchRx = Sys.Recon.Pitch_x; constants.PitchRy = Sys.Recon.Pitch_y; constants.Views = Sys.Proj.NumViews; constants.log = true; constants.orientation = false; constants.flip = false; int pitch = (int)projPitch / sizeof(float); constants.ProjPitchNum = pitch; //Setup derivative buffers cuda(Malloc(&buff1, sizeIM * sizeof(float))); cuda(Malloc(&buff2, sizeIM * sizeof(float))); #ifdef ENABLEZDER //Z buffer cuda(MallocPitch((void**)&inZBuff, &projPitch, Sys.Proj.Nx * sizeof(float), Sys.Proj.Ny * Sys.Proj.NumViews)); cuda(MallocPitch((void**)&maxZVal, &projPitch, Sys.Proj.Nx * sizeof(float), Sys.Proj.Ny * Sys.Proj.NumViews)); cuda(MallocPitch((void**)&maxZPos, &projPitch, Sys.Proj.Nx * sizeof(float), Sys.Proj.Ny * Sys.Proj.NumViews)); float * tempZBuffs[KERNELSIZE]; cuda(Malloc(&zBuffs, KERNELSIZE * sizeof(float*))); for (int i = 0; i < KERNELSIZE; i++) { cuda(Malloc(&tempZBuffs[i], sizeIM * sizeof(float))); } cuda(MemcpyAsync(zBuffs, tempZBuffs, KERNELSIZE * sizeof(float*), cudaMemcpyHostToDevice)); #endif // ENABLEZDER //Set up all kernels cuda(Malloc(&d_gauss, KERNELSIZE * sizeof(float))); cuda(Malloc(&d_gaussDer, KERNELSIZE * sizeof(float))); cuda(Malloc(&d_gaussDer2, KERNELSIZE * sizeof(float))); //cuda(Malloc(&d_gaussDer3, KERNELSIZE * sizeof(float))); float tempKernel[KERNELSIZE]; setGauss(tempKernel); cuda(MemcpyAsync(d_gauss, tempKernel, KERNELSIZE * sizeof(float), cudaMemcpyHostToDevice)); float tempKernelDer[KERNELSIZE]; setGaussDer(tempKernelDer); cuda(MemcpyAsync(d_gaussDer, tempKernelDer, KERNELSIZE * sizeof(float), cudaMemcpyHostToDevice)); float tempKernelDer2[KERNELSIZE]; setGaussDer2(tempKernelDer2); cuda(MemcpyAsync(d_gaussDer2, tempKernelDer2, KERNELSIZE * sizeof(float), cudaMemcpyHostToDevice)); /*float tempKernelDer3[KERNELSIZE]; setGaussDer3(tempKernelDer3); cuda(MemcpyAsync(d_gaussDer3, tempKernelDer3, KERNELSIZE * sizeof(float), cudaMemcpyHostToDevice));*/ #ifdef PRINTMEMORYUSAGE cudaMemGetInfo(&avail_mem, &total_mem); std::cout << "Init end available memory: " << avail_mem << "/" << total_mem << "\n"; #endif // PRINTMEMORYUSAGE return Tomo_OK; } TomoError TomoRecon::ReadProjections(unsigned short ** GainData, unsigned short ** RawData) { //Correct projections float * sumValsVert = new float[NumViews * Sys.Proj.Nx]; float * sumValsHor = new float[NumViews * Sys.Proj.Ny]; float * vertOff = new float[NumViews * Sys.Proj.Nx]; float * horOff = new float[NumViews * Sys.Proj.Ny]; float * d_SumValsVert; float * d_SumValsHor; #ifdef VERBOSEMEMORY size_t avail_mem; size_t total_mem; cudaMemGetInfo(&avail_mem, &total_mem); std::cout << "Read start available memory: " << avail_mem << "/" << total_mem << "\n"; #endif // VERBOSEMEMORY cuda(Malloc((void**)&d_SumValsVert, Sys.Proj.Nx * sizeof(float))); cuda(Malloc((void**)&d_SumValsHor, Sys.Proj.Ny * sizeof(float))); //define the GPU kernel based on size of "ideal projection" dim3 dimBlockProj(32, 32); dim3 dimGridProj((Sys.Proj.Nx + 31) / 32, (Sys.Proj.Ny + 31) / 32); dim3 dimGridSum(1, 1); dim3 dimBlockSum(1024, 1); unsigned short * d_Proj; unsigned short * d_Gain; cuda(Malloc((void**)&d_Proj, sizeProj)); cuda(Malloc((void**)&d_Gain, sizeProj)); constants.baseXr = 0; constants.baseYr = 0; constants.currXr = Sys.Proj.Nx; constants.currYr = Sys.Proj.Ny; bool oldLog = constants.log; constants.log = false; sourceData oldData = constants.dataDisplay; constants.dataDisplay = projections; //setStep(1.0); //Read the rest of the blank images for given projection sample set for (int view = 0; view < NumViews; view++) { cuda(MemcpyAsync(d_Proj, RawData[view], sizeProj, cudaMemcpyHostToDevice)); cuda(MemcpyAsync(d_Gain, GainData[view], sizeProj, cudaMemcpyHostToDevice)); KERNELCALL2(LogCorrectProj, dimGridProj, dimBlockProj, d_Sino, view, d_Proj, d_Gain, constants); cuda(BindTexture2D(NULL, textSino, d_Sino + view*Sys.Proj.Ny*projPitch / sizeof(float), cudaCreateChannelDesc<float>(), Sys.Proj.Nx, Sys.Proj.Ny, projPitch)); KERNELCALL3(sumRowsOrCols, dim3(1, Sys.Proj.Nx), reductionThreads, reductionSize, d_SumValsVert, true, constants); cuda(MemcpyAsync(sumValsVert + view * Sys.Proj.Nx, d_SumValsVert, Sys.Proj.Nx * sizeof(float), cudaMemcpyDeviceToHost)); cuda(BindTexture2D(NULL, textSino, d_Sino + view*Sys.Proj.Ny*projPitch / sizeof(float), cudaCreateChannelDesc<float>(), Sys.Proj.Nx, Sys.Proj.Ny, projPitch)); KERNELCALL3(sumRowsOrCols, dim3(1, Sys.Proj.Ny), reductionThreads, reductionSize, d_SumValsHor, false, constants); cuda(MemcpyAsync(sumValsHor + view * Sys.Proj.Ny, d_SumValsHor, Sys.Proj.Ny * sizeof(float), cudaMemcpyDeviceToHost)); } for (int view = 0; view < NumViews; view++) { tomo_err_throw(scanLineDetect(view, d_SumValsVert, sumValsVert + view * Sys.Proj.Nx, vertOff + view * Sys.Proj.Nx, true, cConstants.scanVertEnable)); tomo_err_throw(scanLineDetect(view, d_SumValsHor, sumValsHor + view * Sys.Proj.Ny, horOff + view * Sys.Proj.Ny, false, cConstants.scanHorEnable)); } //Normalize projection image lighting float maxVal, minVal; unsigned int histogram[HIST_BIN_COUNT]; tomo_err_throw(getHistogram(d_Sino + (NumViews / 2)*projPitch / sizeof(float)*Sys.Proj.Ny, projPitch*Sys.Proj.Ny, histogram)); tomo_err_throw(autoLight(histogram, 1, &minVal, &maxVal)); for (int view = 0; view < NumViews; view++) { if (view == (NumViews / 2) || !Sys.Proj.activeBeams[view]) continue; float thisMax, thisMin; unsigned int thisHistogram[HIST_BIN_COUNT]; tomo_err_throw(getHistogram(d_Sino + view*projPitch / sizeof(float)*Sys.Proj.Ny, projPitch*Sys.Proj.Ny, thisHistogram)); tomo_err_throw(autoLight(thisHistogram, 1, &thisMin, &thisMax)); if (thisMax > maxVal) maxVal = thisMax; if (thisMin < minVal) minVal = thisMin; } cuda(Memcpy(d_MinVal, &minVal, sizeof(float), cudaMemcpyHostToDevice)); if (histogram[HIST_BIN_COUNT - 1] > SATURATIONLIMIT * Sys.Proj.Nx * Sys.Proj.Ny) Sys.Proj.saturated = true; float *g, *z1, *z2; size_t size; int j; int nx = projPitch / sizeof(float); int ny = Sys.Proj.Ny; size = nx*ny * sizeof(float); float tz = 2, tf = .2, beta = 0.0001; /* allocate device memory */ cuda(Malloc((void **)&g, size)); cuda(Malloc((void **)&z1, size)); cuda(Malloc((void **)&z2, size)); /* setup a 2D thread grid, with 16x16 blocks */ /* each block is will use nearby memory*/ dim3 block_size(16, 16); dim3 n_blocks((nx + block_size.x - 1) / block_size.x, (ny + block_size.y - 1) / block_size.y); int bailCount = 0; for (int view = 0; view < NumViews; view++) { if (!Sys.Proj.activeBeams[view]) continue; float bestScale = 1.0f; unsigned int histogram2[HIST_BIN_COUNT]; tomo_err_throw(getHistogram(d_Sino + view*projPitch / sizeof(float)*Sys.Proj.Ny, projPitch*Sys.Proj.Ny, histogram2)); int finalIndex = 255; while (histogram2[finalIndex] < 1000) finalIndex--; finalIndex -= 50; //manually check range of offset values int bestOffset = -100; if (view != NumViews / 2) { float step = 0.01f, scale = 1.0f, scaleError = FLT_MAX; while(abs(step) > 0.0005f){ float innerError = FLT_MAX; int innerOffset = -100; float offset = 0.0f, innerStep = 10.0f; while(abs(innerStep) > 0.5f){ //find average error float avgError = 0.0f; for (int test = 0; test < finalIndex; test++) { float index2 = test*scale + offset; if (index2 >= 0 && index2 < 256) { int lower = floor(index2); int upper = ceil(index2); float intopVal; if (upper == lower) intopVal = histogram2[lower]; else intopVal = ((float)upper - index2) * histogram2[lower] + (index2 - (float)lower) * histogram2[upper]; //avgError += pow((float)histogram[test] - intopVal, 2.0f) / (100 + test); avgError += abs((float)histogram[test] - intopVal); } else avgError += histogram[test]; } //MAX logic if (avgError < innerError) { innerError = avgError; innerOffset = offset; bailCount++; if (bailCount > 1000) { offset = 0; bailCount = 0; break; } } else { offset -= innerStep; innerStep *= -0.5f; } offset += innerStep; } if (innerError < scaleError) { scaleError = innerError; bestScale = scale; bestOffset = innerOffset; } else { scale -= step; step *= -0.5f; } scale += step; } for (int j = 0; j < Sys.Proj.Nx; j++) { vertOff[j + view * Sys.Proj.Nx] += bestOffset * 255;//offsets are subtracted } } cuda(MemcpyAsync(d_MaxVal, &maxVal, sizeof(float), cudaMemcpyHostToDevice)); cuda(MemcpyAsync(d_SumValsVert, vertOff + view * Sys.Proj.Nx, Sys.Proj.Nx * sizeof(float), cudaMemcpyHostToDevice)); cuda(MemcpyAsync(d_SumValsHor, horOff + view * Sys.Proj.Ny, Sys.Proj.Ny * sizeof(float), cudaMemcpyHostToDevice)); KERNELCALL2(rescale, dimGridProj, dimBlockProj, d_Sino, d_Raw, view, d_MaxVal, d_MinVal, d_SumValsVert, d_SumValsHor, bestScale, constants); if (useTV) { //chaTV(d_Sino + projPitch / sizeof(float) * Sys.Proj.Ny * view, iter, projPitch / sizeof(float), Sys.Proj.Ny, lambda); float * input = d_Sino + projPitch / sizeof(float) * Sys.Proj.Ny * view; /* Copy input to device*/ cuda(MemcpyAsync(g, input, size, cudaMemcpyDeviceToDevice)); cuda(MemsetAsync(z1, 0, size)); cuda(MemsetAsync(z2, 0, size)); /* call the functions */ for (j = 0; j < iter; j++) { tz = 0.2 + 0.08*j; tf = (0.5 - 5. / (15 + j)) / tz; // z= zold + tauz.* grad(u); // and normalize z: n=max(1,sqrt(z(:,:,1).*z(:,:,1) +z(:,:,2).*z(:,:,2) + beta) ); z/=n; KERNELCALL2(updhgZ_SoA, n_blocks, block_size, z1, z2, input, tz, 1 / lambda, nx, ny); // u= (1 - tauu*lambda) * uold + tauu .* div(z) + tauu*lambda.*f; KERNELCALL2(updhgF_SoA, n_blocks, block_size, input, z1, z2, g, tf, lambda, nx, ny); } } //Get x and y derivatives and save to their own buffers cuda(Memcpy(d_Image, d_Sino + view * projPitch / sizeof(float) * Sys.Proj.Ny, sizeIM, cudaMemcpyDeviceToDevice)); sourceData oldDisplay = constants.dataDisplay; constants.dataDisplay = projections; tomo_err_throw(imageKernel(d_gaussDer, d_gauss, inXBuff + view * projPitch / sizeof(float) * Sys.Proj.Ny, true)); tomo_err_throw(imageKernel(d_gauss, d_gaussDer, inYBuff + view * projPitch / sizeof(float) * Sys.Proj.Ny, true)); #ifdef SQUAREMAGINX cuda(Memcpy(buff1, inXBuff + view * projPitch / sizeof(float) * Sys.Proj.Ny, sizeIM, cudaMemcpyDeviceToDevice)); cuda(Memcpy(d_Image, inYBuff + view * projPitch / sizeof(float) * Sys.Proj.Ny, sizeIM, cudaMemcpyDeviceToDevice)); KERNELCALL2(mag, contBlocks, contThreads, inXBuff + view * projPitch / sizeof(float) * Sys.Proj.Ny, buff1, d_Image, constants); #endif //SQUAREMAGINX constants.dataDisplay = oldDisplay; #ifdef ENABLEZDER cuda(BindTexture2D(NULL, textError, d_Sino, cudaCreateChannelDesc<float>(), Sys.Proj.Nx, Sys.Proj.Ny*Sys.Proj.NumViews, projPitch)); for (int i = -KERNELRADIUS; i <= KERNELRADIUS; i++) { KERNELCALL2(projectSliceZ, contBlocks, contThreads, zBuffs, i + KERNELRADIUS, view, i*Sys.Geo.ZPitch, constants); } cuda(UnbindTexture(textError)); KERNELCALL2(zConvolution, contBlocks, contThreads, inZBuff + view * projPitch / sizeof(float) * Sys.Proj.Ny, zBuffs, d_gaussDer, constants); } /*for (float dis = 0.0f; dis < MAXDIS; dis += Sys.Geo.ZPitch) { //Find the normalized z derivative at every step cuda(BindTexture2D(NULL, textError, d_Sino, cudaCreateChannelDesc<float>(), Sys.Proj.Nx, Sys.Proj.Ny*Sys.Proj.NumViews, projPitch)); for (int i = -KERNELRADIUS; i <= KERNELRADIUS; i++) { KERNELCALL2(projectSliceZ, contBlocks, contThreads, zBuffs, i + KERNELRADIUS, -1, dis + i*Sys.Geo.ZPitch, constants); } cuda(UnbindTexture(textError)); KERNELCALL2(zConvolution, contBlocks, contThreads, buff1, zBuffs, d_gaussDer, constants); tomo_err_throw(project(inZBuff, buff2)); KERNELCALL2(sub, contBlocks, contThreads, buff2, buff1, d_Image, reconPitchNum, constants); //Check if the value is at a maximum, and make sure it was a contributor }*/ #else } #endif for (int i = 0; i < HIST_BIN_COUNT; i++) inputHistogram[i] = 0; for (int beam = 0; beam < 7; beam++) { unsigned int histogram2[HIST_BIN_COUNT]; tomo_err_throw(getHistogram(d_Sino + beam*projPitch / sizeof(float)*Sys.Proj.Ny, projPitch*Sys.Proj.Ny, histogram2)); if(Sys.Proj.activeBeams[beam]) for (int i = 0; i < HIST_BIN_COUNT; i++) inputHistogram[i] += histogram2[i]; #ifdef PRINTINTENSITIES std::ofstream outputFile; char outFilename[250]; sprintf(outFilename, "./histogramOut%d.txt", beam); outputFile.open(outFilename); for (int test = 1; test < HIST_BIN_COUNT; test++) outputFile << histogram2[test] << "\n"; outputFile.close(); #endif //PRINTINTENSITIES } /* free device memory */ cuda(Free(g)); cuda(Free(z1)); cuda(Free(z2)); constants.log = oldLog; constants.dataDisplay = oldData; constants.baseXr = -1; constants.baseYr = -1; constants.currXr = -1; constants.currYr = -1; delete[] sumValsHor; delete[] sumValsVert; delete[] vertOff; delete[] horOff; cuda(Free(d_Proj)); cuda(Free(d_Gain)); cuda(Free(d_SumValsHor)); cuda(Free(d_SumValsVert)); #ifdef ENABLESOLVER int numSlices = 20; int sqrtNumSl = ceil(sqrt(numSlices)); int matrixSize = Sys.Proj.Nx * Sys.Proj.Ny / pow(sqrtNumSl, 2) * numSlices * sizeof(float); cuda(Malloc(&d_Recon, matrixSize)); cuda(Memset(d_Recon, 0, matrixSize)); #endif // ENABLESOLVER #ifdef VERBOSEMEMORY cudaMemGetInfo(&avail_mem, &total_mem); std::cout << "Read end available memory: " << avail_mem << "/" << total_mem << "\n"; #endif // VERBOSEMEMORY return Tomo_OK; } TomoError TomoRecon::exportRecon(unsigned short * exportData) { float * RawData = new float[reconPitch / sizeof(float)*Sys.Recon.Ny]; int oldProjection = getActiveProjection(); //Create the reconstruction volume around the current location float oldDistance = distance; distance -= constants.slices / 2 * Sys.Geo.ZPitch; for (int i = 0; i < constants.slices; i++) { setActiveProjection(i); singleFrame(); distance += Sys.Geo.ZPitch; cuda(Memcpy(RawData, d_Image, reconPitch*Sys.Recon.Ny, cudaMemcpyDeviceToHost)); for (int j = 0; j < Sys.Recon.Ny; j++) { int y = j; if (constants.flip) y = Sys.Recon.Ny - 1 - j; for (int k = 0; k < Sys.Recon.Nx; k++) { float data = RawData[reconPitch / sizeof(float) * j + k]; if (data != 0.0) { //data *= 2.0f; if (constants.log) { if (data > USHRT_MAX) data = USHRT_MAX; data = (logf(USHRT_MAX) - logf(data)) / logf(USHRT_MAX) * USHRT_MAX; } if (data > USHRT_MAX) data = USHRT_MAX; if (data < 0.0f) data = 0.0f; } int x = k; if (constants.orientation) x = Sys.Recon.Nx - 1 - k; exportData[Sys.Recon.Nx * (y + Sys.Recon.Ny * i) + x] = (unsigned short)data; } } } setActiveProjection(oldProjection); distance = oldDistance; delete[] RawData; tomo_err_throw(singleFrame()); return Tomo_OK; } TomoError TomoRecon::scanLineDetect(int view, float * d_sum, float * sum, float * offset, bool vert, bool enable) { int vectorSize; if (vert) vectorSize = Sys.Proj.Nx; else vectorSize = Sys.Proj.Ny; float * sumCorr = new float[vectorSize]; #ifdef PRINTSCANCORRECTIONS { std::ofstream FILE; std::stringstream outputfile; outputfile << "C:\\Users\\jdean\\Downloads\\cudaTV\\cudaTV\\original" << view << ".txt"; FILE.open(outputfile.str()); for (int i = 0; i < vectorSize; i++) { sum[i] /= vectorSize; FILE << sum[i] << "\n"; sumCorr[i] = sum[i]; } FILE.close(); } #else for (int i = 0; i < vectorSize; i++) { sum[i] /= vectorSize; sumCorr[i] = sum[i]; } #endif float *di; size_t size; int i, j; int N = vectorSize; size = N * sizeof(float); di = (float*)malloc(size); float tau = vert ? cConstants.vertTau : cConstants.horTau; for (j = 0; j < cConstants.iterations; j++) { lapla(sumCorr, di, N); for (i = 0; i < N; i++) sumCorr[i] += di[i] * tau; } free(di); #ifdef PRINTSCANCORRECTIONS { std::ofstream FILE; std::stringstream outputfile; outputfile << "C:\\Users\\jdean\\Downloads\\cudaTV\\cudaTV\\corrected" << view << ".txt"; FILE.open(outputfile.str()); for (int i = 0; i < vectorSize; i++) { FILE << sumCorr[i] << "\n"; if(enable) offset[i] = sum[i] - sumCorr[i]; else offset[i] = 0.0; sum[i] = sumCorr[i]; } FILE.close(); } #else for (int i = 0; i < vectorSize; i++) { if (enable) offset[i] = sum[i] - sumCorr[i]; else offset[i] = 0.0; sum[i] = sumCorr[i]; } #endif delete[] sumCorr; return Tomo_OK; } //Fucntion to free the gpu memory after program finishes TomoError TomoRecon::FreeGPUMemory(void){ if (iterativeInitialized) { resetIterative(); } #ifdef PRINTMEMORYUSAGE size_t avail_mem; size_t total_mem; cudaMemGetInfo(&avail_mem, &total_mem); std::cout << "Free start available memory: " << avail_mem << "/" << total_mem << "\n"; #endif // PRINTMEMORYUSAGE //Free memory allocated on the GPU cuda(Free(d_Image)); cuda(Free(d_Image2)); cuda(Free(d_Error)); cuda(Free(d_Sino)); cuda(Free(buff1)); cuda(Free(buff2)); cuda(Free(inXBuff)); cuda(Free(inYBuff)); cuda(Free(d_Raw)); cuda(Free(d_Weights)); cuda(Free(constants.d_Beamx)); cuda(Free(constants.d_Beamy)); cuda(Free(constants.d_Beamz)); cuda(Free(constants.useBeams)); cuda(Free(d_MaxVal)); cuda(Free(d_MinVal)); cuda(Free(d_gauss)); cuda(Free(d_gaussDer)); cuda(Free(d_gaussDer2)); //cuda(Free(d_gaussDer3)); #ifdef ENABLESOLVER cuda(Free(d_Recon)); #endif #ifdef ENABLEZDER cuda(Free(inZBuff)); cuda(Free(maxZVal)); cuda(Free(maxZPos)); cuda(Free(zBuffs)); #endif // ENABLEZDER #ifdef PRINTMEMORYUSAGE cudaMemGetInfo(&avail_mem, &total_mem); std::cout << "Free end available memory: " << avail_mem << "/" << total_mem << "\n"; #endif // PRINTMEMORYUSAGE return Tomo_OK; } template <typename T> TomoError TomoRecon::getHistogram(T * image, unsigned int byteSize, unsigned int *histogram) { unsigned int * d_Histogram; cuda(Malloc((void **)&d_Histogram, HIST_BIN_COUNT * sizeof(unsigned int))); cuda(Memset(d_Histogram, 0, HIST_BIN_COUNT * sizeof(unsigned int))); KERNELCALL2(histogram256Kernel, contBlocks, contThreads, d_Histogram, image, byteSize / sizeof(T), constants); cuda(Memcpy(histogram, d_Histogram, HIST_BIN_COUNT * sizeof(unsigned int), cudaMemcpyDeviceToHost)); cuda(Free(d_Histogram)); return Tomo_OK; } TomoError TomoRecon::getHistogramRecon(unsigned int *histogram, cudaSurfaceObject_t volume, bool useall = false, bool useLog = true) { unsigned int * d_Histogram; cuda(Malloc((void **)&d_Histogram, HIST_BIN_COUNT * sizeof(unsigned int))); cuda(Memset(d_Histogram, 0, HIST_BIN_COUNT * sizeof(unsigned int))); //cuda(BindSurfaceToArray(surfRecon, d_Recon2)); if (useall) { for (int slice = 0; slice < Sys.Recon.Nz; slice++) { KERNELCALL2(histogramReconKernel, contBlocks, contThreads, d_Histogram, slice, useLog, constants, volume); } } else { KERNELCALL2(histogramReconKernel, contBlocks, contThreads, d_Histogram, sliceIndex, useLog, constants, volume); } cuda(Memcpy(histogram, d_Histogram, HIST_BIN_COUNT * sizeof(unsigned int), cudaMemcpyDeviceToHost)); cuda(Free(d_Histogram)); return Tomo_OK; } TomoError TomoRecon::draw(int x, int y) { //interop update display(x, y); map(stream); if(constants.dataDisplay == projections){ scale = max((float)Sys.Proj.Nx / (float)width, (float)Sys.Proj.Ny / (float)height) * pow(ZOOMFACTOR, -zoom); cuda(BindTexture2D(NULL, textImage, d_Image, cudaCreateChannelDesc<float>(), Sys.Proj.Nx, Sys.Proj.Ny, projPitch)); } else { scale = max((float)Sys.Recon.Nx / (float)width, (float)Sys.Recon.Ny / (float)height) * pow(ZOOMFACTOR, -zoom); cuda(BindTexture2D(NULL, textImage, d_Image, cudaCreateChannelDesc<float>(), Sys.Recon.Nx, Sys.Recon.Ny, reconPitch)); } checkOffsets(&xOff, &yOff); cuda(BindSurfaceToArray(displaySurface, ca)); const int blocks = (width * height + PXL_KERNEL_THREADS_PER_BLOCK - 1) / PXL_KERNEL_THREADS_PER_BLOCK; if (blocks > 0) { if (constants.dataDisplay == projections) { KERNELCALL4(resizeKernelTex, blocks, PXL_KERNEL_THREADS_PER_BLOCK, 0, stream, Sys.Proj.Nx, Sys.Proj.Ny, width, height, scale, xOff, yOff, derDisplay != no_der, constants); if (constants.baseXr >= 0 && constants.currXr >= 0) KERNELCALL4(drawSelectionBox, blocks, PXL_KERNEL_THREADS_PER_BLOCK, 0, stream, max(I2D(baseX, true), I2D(currX, true)), max(I2D(baseY, false), I2D(currY, false)), min(I2D(baseX, true), I2D(currX, true)), min(I2D(baseY, false), I2D(currY, false)), width); if (lowXr >= 0) KERNELCALL4(drawSelectionBar, blocks, PXL_KERNEL_THREADS_PER_BLOCK, 0, stream, I2D(lowXr, true), I2D(lowYr, false), width, vertical); if (upXr >= 0) KERNELCALL4(drawSelectionBar, blocks, PXL_KERNEL_THREADS_PER_BLOCK, 0, stream, I2D(upXr, true), I2D(upYr, false), width, vertical); } else { KERNELCALL4(resizeKernelTex, blocks, PXL_KERNEL_THREADS_PER_BLOCK, 0, stream, Sys.Recon.Nx, Sys.Recon.Ny, width, height, scale, xOff, yOff, derDisplay != no_der, constants); if (constants.baseXr >= 0 && constants.currXr >= 0) KERNELCALL4(drawSelectionBox, blocks, PXL_KERNEL_THREADS_PER_BLOCK, 0, stream, max(I2D(constants.baseXr, true), I2D(constants.currXr, true)), max(I2D(constants.baseYr, false), I2D(constants.currYr, false)), min(I2D(constants.baseXr, true), I2D(constants.currXr, true)), min(I2D(constants.baseYr, false), I2D(constants.currYr, false)), width); if (lowXr >= 0) KERNELCALL4(drawSelectionBar, blocks, PXL_KERNEL_THREADS_PER_BLOCK, 0, stream, I2D(lowXr, true), I2D(lowYr, false), width, vertical); if (upXr >= 0) KERNELCALL4(drawSelectionBar, blocks, PXL_KERNEL_THREADS_PER_BLOCK, 0, stream, I2D(upXr, true), I2D(upYr, false), width, vertical); } } cuda(UnbindTexture(textImage)); //interop commands to ready buffer unmap(stream); blit(); return Tomo_OK; } TomoError TomoRecon::singleFrame(bool outputFrame, float** output, unsigned int * histogram) { //Initial projection switch (constants.dataDisplay) { case reconstruction: if (derDisplay != square_mag) {//only case frequently used that doesn't need this, leads to 3/2x speedup in autofocus //cuda(BindTexture2D(NULL, textError, d_Raw, cudaCreateChannelDesc<float>(), Sys.Proj.Nx, Sys.Proj.Ny*Sys.Proj.NumViews, projPitch)); cuda(BindTexture2D(NULL, textSino, d_Sino, cudaCreateChannelDesc<float>(), Sys.Proj.Nx, Sys.Proj.Ny*Sys.Proj.NumViews, projPitch)); KERNELCALL2(projectSlice, contBlocks, contThreads, d_Image, distance, constants); cuda(UnbindTexture(textSino)); //cuda(UnbindTexture(textError)); } break; case experimental: cuda(BindTexture2D(NULL, textSino, d_Raw, cudaCreateChannelDesc<float>(), Sys.Proj.Nx, Sys.Proj.Ny*Sys.Proj.NumViews, projPitch)); KERNELCALL2(projectSlice, contBlocks, contThreads, buff1, distance, constants); cuda(UnbindTexture(textSino)); break; case projections: cuda(Memcpy(d_Image, d_Sino + sliceIndex * projPitch / sizeof(float) * Sys.Proj.Ny, sizeIM, cudaMemcpyDeviceToDevice)); break; case iterRecon: #ifdef SHOWERROR KERNELCALL2(copySlice, contBlocks, contThreads, d_Image, sliceIndex, constants, surfErrorObj, constants.isReconstructing); #else //KERNELCALL2(copySlice, contBlocks, contThreads, d_Image, sliceIndex, constants, surfWeightObj, constants.isReconstructing); KERNELCALL2(copySlice, contBlocks, contThreads, d_Image, sliceIndex, constants, surfReconObj, constants.isReconstructing); #endif break; case synthetic2d: cuda(BindTextureToArray(textRecon, d_Recon2)); KERNELCALL2(synthetic2D, contBlocks, contThreads, d_Image, sliceIndex, constants); cuda(UnbindTexture(textRecon)); break; case sinogram: cuda(BindTexture2D(NULL, textSino, inXBuff, cudaCreateChannelDesc<float>(), Sys.Proj.Nx, Sys.Proj.Ny*Sys.Proj.NumViews, projPitch)); KERNELCALL2(getSinogram, contBlocks, contThreads, d_Image, constants); cuda(UnbindTexture(textSino)); break; case error: cuda(Memcpy2DAsync(d_Image, projPitch, d_Error + sliceIndex * projPitch / sizeof(float) * Sys.Proj.Ny, projPitch, Sys.Proj.Nx * sizeof(float), Sys.Proj.Ny, cudaMemcpyDeviceToDevice)); break; } switch (derDisplay) { case no_der: break; case x_mag_enhance: switch (constants.dataDisplay) { case reconstruction: tomo_err_throw(project(inXBuff, buff1)); break; case projections: cuda(Memcpy(buff1, inXBuff + sliceIndex * projPitch / sizeof(float) * Sys.Proj.Ny, sizeIM, cudaMemcpyDeviceToDevice)); break; case iterRecon: tomo_err_throw(imageKernel(d_gaussDer, d_gauss, buff1, false)); break; case error: break; } KERNELCALL2(add, contBlocks, contThreads, d_Image, buff1, d_Image, true, true, constants); break; case y_mag_enhance: switch (constants.dataDisplay) { case reconstruction: tomo_err_throw(project(inYBuff, buff1)); break; case projections: cuda(Memcpy(buff1, inYBuff + sliceIndex * projPitch / sizeof(float) * Sys.Proj.Ny, sizeIM, cudaMemcpyDeviceToDevice)); break; case iterRecon: tomo_err_throw(imageKernel(d_gauss, d_gaussDer, buff1, false)); break; case error: break; } KERNELCALL2(add, contBlocks, contThreads, d_Image, buff1, d_Image, true, true, constants); break; case mag_enhance: switch (constants.dataDisplay) { case reconstruction: tomo_err_throw(project(inXBuff, buff1)); tomo_err_throw(project(inYBuff, buff2)); break; case projections: cuda(Memcpy(buff1, inXBuff + sliceIndex * projPitch / sizeof(float) * Sys.Proj.Ny, sizeIM, cudaMemcpyDeviceToDevice)); cuda(Memcpy(buff2, inYBuff + sliceIndex * projPitch / sizeof(float) * Sys.Proj.Ny, sizeIM, cudaMemcpyDeviceToDevice)); break; case iterRecon: case sinogram: case error: tomo_err_throw(imageKernel(d_gaussDer, d_gauss, buff1, false)); tomo_err_throw(imageKernel(d_gauss, d_gaussDer, buff2, false)); break; } KERNELCALL2(mag, contBlocks, contThreads, buff1, buff2, buff1, constants); KERNELCALL2(add, contBlocks, contThreads, d_Image, buff1, d_Image, true, false, constants); break; case x_enhance: switch (constants.dataDisplay) { case reconstruction: tomo_err_throw(project(inXBuff, buff1)); break; case projections: cuda(Memcpy(buff1, inXBuff + sliceIndex * projPitch / sizeof(float) * Sys.Proj.Ny, sizeIM, cudaMemcpyDeviceToDevice)); break; case iterRecon: tomo_err_throw(imageKernel(d_gaussDer, d_gauss, buff1, false)); break; case error: break; } KERNELCALL2(add, contBlocks, contThreads, d_Image, buff1, d_Image, true, false, constants); break; case y_enhance: switch (constants.dataDisplay) { case reconstruction: tomo_err_throw(project(inYBuff, buff1)); break; case projections: cuda(Memcpy(buff1, inYBuff + sliceIndex * projPitch / sizeof(float) * Sys.Proj.Ny, sizeIM, cudaMemcpyDeviceToDevice)); break; case iterRecon: tomo_err_throw(imageKernel(d_gauss, d_gaussDer, buff1, false)); break; case error: break; } KERNELCALL2(add, contBlocks, contThreads, d_Image, buff1, d_Image, true, false, constants); break; case both_enhance: switch (constants.dataDisplay) { case reconstruction: tomo_err_throw(project(inXBuff, buff1)); tomo_err_throw(project(inYBuff, buff2)); break; case projections: cuda(Memcpy(buff1, inXBuff + sliceIndex * projPitch / sizeof(float) * Sys.Proj.Ny, sizeIM, cudaMemcpyDeviceToDevice)); cuda(Memcpy(buff2, inYBuff + sliceIndex * projPitch / sizeof(float) * Sys.Proj.Ny, sizeIM, cudaMemcpyDeviceToDevice)); break; case iterRecon: tomo_err_throw(imageKernel(d_gaussDer, d_gauss, buff1, false)); tomo_err_throw(imageKernel(d_gauss, d_gaussDer, buff2, false)); break; case error: break; } KERNELCALL2(add, contBlocks, contThreads, buff1, buff2, buff1, false, false, constants); KERNELCALL2(add, contBlocks, contThreads, d_Image, buff1, d_Image, true, false, constants); break; case der_x: if (constants.dataDisplay == projections) { cuda(Memcpy(d_Image, inXBuff + sliceIndex * projPitch / sizeof(float) * Sys.Proj.Ny, sizeIM, cudaMemcpyDeviceToDevice)); } else { tomo_err_throw(project(inXBuff, d_Image)); } break; case der_y: if (constants.dataDisplay == projections) { cuda(Memcpy(d_Image, inYBuff + sliceIndex * projPitch / sizeof(float) * Sys.Proj.Ny, sizeIM, cudaMemcpyDeviceToDevice)); } else { tomo_err_throw(project(inYBuff, d_Image)); } break; case square_mag: switch (constants.dataDisplay) { case reconstruction: tomo_err_throw(project(inXBuff, buff1)); tomo_err_throw(project(inYBuff, d_Image)); break; case projections: cuda(Memcpy(buff1, inXBuff + sliceIndex * projPitch / sizeof(float) * Sys.Proj.Ny, sizeIM, cudaMemcpyDeviceToDevice)); cuda(Memcpy(d_Image, inYBuff + sliceIndex * projPitch / sizeof(float) * Sys.Proj.Ny, sizeIM, cudaMemcpyDeviceToDevice)); break; case iterRecon: tomo_err_throw(imageKernel(d_gaussDer, d_gauss, buff1, false)); tomo_err_throw(imageKernel(d_gauss, d_gaussDer, d_Image, false)); break; case error: break; } KERNELCALL2(squareMag, contBlocks, contThreads, d_Image, buff1, d_Image, constants); break; case slice_diff: { float xOff = -Sys.Geo.EmitX[constants.revGeo ? constants.Views - 1 - diffSlice : diffSlice] * distance / Sys.Geo.EmitZ[diffSlice] / Sys.Recon.Pitch_x; float yOff = -Sys.Geo.EmitY[diffSlice] * distance / Sys.Geo.EmitZ[diffSlice] / Sys.Recon.Pitch_y; KERNELCALL2(squareDiff, contBlocks, contThreads, d_Image, diffSlice, xOff, yOff, reconPitchNum, constants); } break; case der2_x: tomo_err_throw(imageKernel(d_gaussDer2, d_gauss, d_Image, constants.dataDisplay == projections)); break; case der2_y: tomo_err_throw(imageKernel(d_gauss, d_gaussDer2, d_Image, constants.dataDisplay == projections)); break; case der3_x: //imageKernel(d_gaussDer3, d_gauss, d_Image); break; case der3_y: //imageKernel(d_gauss, d_gaussDer3, d_Image); break; case mag_der: if (constants.dataDisplay == projections) { cuda(Memcpy(buff1, inXBuff + sliceIndex * projPitch / sizeof(float) * Sys.Proj.Ny, sizeIM, cudaMemcpyDeviceToDevice)); cuda(Memcpy(buff2, inYBuff + sliceIndex * projPitch / sizeof(float) * Sys.Proj.Ny, sizeIM, cudaMemcpyDeviceToDevice)); } else { //tomo_err_throw(project(inXBuff, buff1)); //tomo_err_throw(project(inYBuff, buff2)); tomo_err_throw(imageKernel(d_gaussDer, d_gauss, buff1, false)); tomo_err_throw(imageKernel(d_gauss, d_gaussDer, buff2, false)); } KERNELCALL2(mag, contBlocks, contThreads, d_Image, buff1, buff1, constants); //KERNELCALL2(add, contBlocks, contThreads, d_Image, buff2, buff1, false, true, constants); //KERNELCALL2(abs, contBlocks, contThreads, d_Image, buff1, constants); /*if (constants.dataDisplay == projections) { cuda(Memcpy(d_Image, inZBuff + sliceIndex * projPitch / sizeof(float) * Sys.Proj.Ny, sizeIM, cudaMemcpyDeviceToDevice)); } else { tomo_err_throw(project(inZBuff, d_Image)); }*/ //tomo_err_throw(project(inXBuff, d_Image)); /*cuda(BindTexture2D(NULL, textError, inXBuff, cudaCreateChannelDesc<float>(), Sys.Proj.Nx, Sys.Proj.Ny*Sys.Proj.NumViews, projPitch)); for (int i = -KERNELRADIUS; i <= KERNELRADIUS; i++) KERNELCALL2(projectSliceZ, contBlocks, contThreads, zBuffs, i + KERNELRADIUS, distance + i*Sys.Geo.ZPitch, constants); cuda(UnbindTexture(textError)); KERNELCALL2(zConvolution, contBlocks, contThreads, d_Image, zBuffs, d_gaussDer, constants);*/ break; case z_der_mag: { /*if (dataDisplay == projections) { cuda(Memcpy(buff1, inXBuff + sliceIndex * projPitch / sizeof(float) * Sys.Proj.Ny, sizeIM, cudaMemcpyDeviceToDevice)); cuda(Memcpy(buff2, inYBuff + sliceIndex * projPitch / sizeof(float) * Sys.Proj.Ny, sizeIM, cudaMemcpyDeviceToDevice)); } else { tomo_err_throw(project(inXBuff, buff1)); tomo_err_throw(project(inYBuff, buff2)); } KERNELCALL2(mag, contBlocks, contThreads, buff1, buff2, buff1, reconPitchNum, reconPitchNum, constants);*/ if (constants.dataDisplay == projections) { cuda(Memcpy(d_Image, inZBuff + sliceIndex * projPitch / sizeof(float) * Sys.Proj.Ny, sizeIM, cudaMemcpyDeviceToDevice)); } else { cuda(BindTexture2D(NULL, textError, d_Sino, cudaCreateChannelDesc<float>(), Sys.Proj.Nx, Sys.Proj.Ny*Sys.Proj.NumViews, projPitch)); for (int i = -KERNELRADIUS; i <= KERNELRADIUS; i++) { KERNELCALL2(projectSliceZ, contBlocks, contThreads, zBuffs, i + KERNELRADIUS, -1, distance + i*Sys.Geo.ZPitch, constants); } cuda(UnbindTexture(textError)); KERNELCALL2(zConvolution, contBlocks, contThreads, buff1, zBuffs, d_gaussDer, constants); tomo_err_throw(project(inZBuff, buff2)); KERNELCALL2(sub, contBlocks, contThreads, buff1, buff2, d_Image, reconPitchNum, constants); } //KERNELCALL2(thresh, contBlocks, contThreads, buff2, buff1, d_Image, reconPitchNum, constants); /*KERNELCALL2(zConvolution, contBlocks, contThreads, buff1, zBuffs, d_gaussDer2, constants); KERNELCALL2(zConvolution, contBlocks, contThreads, buff2, zBuffs, d_gaussDer, constants); KERNELCALL2(div, contBlocks, contThreads, buff1, buff2, d_Image, reconPitchNum, constants);*/ //imageKernel(d_gauss, d_gauss, d_Image); //KERNELCALL2(add, contBlocks, contThreads, d_Image, buff1, d_Image, reconPitchNum, true, false, constants); } break; case norm_der: { #ifdef PRINTLINEDER //output image line, derivative and normalized derivative for some height //memcpy to line float float imageLine[1915]; int lineNum = 930; cuda(Memcpy(imageLine, &buff1[lineNum * constants.ReconPitchNum], 1915 * sizeof(float), cudaMemcpyDeviceToHost)); //output to csv std::ofstream outputFile; outputFile.open("imageLine.csv"); for (int test = 0; test < 1915; test++) outputFile << imageLine[test] << "\n"; outputFile.close(); tomo_err_throw(project(inXBuff, buff2)); cuda(Memcpy(imageLine, &buff2[lineNum * constants.ReconPitchNum], 1915 * sizeof(float), cudaMemcpyDeviceToHost)); outputFile.open("derLine.csv"); for (int test = 0; test < 1915; test++) outputFile << imageLine[test] << "\n"; outputFile.close(); tomo_err_throw(normProject(inXBuff, buff2, DERWEIGHTSTR)); cuda(Memcpy(imageLine, &buff2[lineNum * constants.ReconPitchNum], 1915 * sizeof(float), cudaMemcpyDeviceToHost)); outputFile.open("normDerLine.csv"); for (int test = 0; test < 1915; test++) outputFile << imageLine[test] << "\n"; outputFile.close(); KERNELCALL2(xConvIntegrate, contBlocks, contThreads, d_Image, buff2, buff1, 0, constants); //KERNELCALL2(xIntegrate, contBlocks, contThreads, d_Image, buff2, buff1, constants); cuda(Memcpy(imageLine, &d_Image[lineNum * constants.ReconPitchNum], 1915 * sizeof(float), cudaMemcpyDeviceToHost)); outputFile.open("intLine.csv"); for (int test = 0; test < 1915; test++) outputFile << imageLine[test] << "\n"; outputFile.close(); #else tomo_err_throw(normProject(inXBuff, d_Image, DERWEIGHTSTR)); //tomo_err_throw(normProject(d_Sino, buff1, DERWEIGHTSTR)); //tomo_err_throw(project(d_Sino, buff1)); //tomo_err_throw(project(inXBuff, d_Image)); //KERNELCALL2(xIntegrate, contBlocks, contThreads, d_Image, buff2, buff1, 0, constants); /* //current best integration method tomo_err_throw(normProject(inXBuff, buff2, DERWEIGHTSTR)); tomo_err_throw(project(d_Sino, buff1)); KERNELCALL2(xConvIntegrate, contBlocks, contThreads, d_Image, buff2, buff1, 0, constants); */ #endif //PRINTLINEDER } break; case abs_norm_der: tomo_err_throw(normProject(inXBuff, buff2, 6.0f)); KERNELCALL2(pow, contBlocks, contThreads, d_Image, buff2, 2.0f, constants); break; case square_norm_der: //cuda(BindTexture2D(NULL, textSino, inXBuff, cudaCreateChannelDesc<float>(), Sys.Proj.Nx, Sys.Proj.Ny*Sys.Proj.NumViews, projPitch)); //KERNELCALL2(normProjectSlice, contBlocks, contThreads, d_Image, distance, alignStr, constants); //cuda(UnbindTexture(textSino)); tomo_err_throw(normProject(inXBuff, buff1, 1.0f)); tomo_err_throw(normProject(inXBuff, buff2, 1.0f)); //KERNELCALL2(mag, contBlocks, contThreads, d_Image, buff1, buff2, constants); KERNELCALL2(pow, contBlocks, contThreads, d_Image, buff2, 2.0f, constants); break; } if (outputFrame) { *output = new float[Sys.Proj.Nx * Sys.Proj.Ny]; constants.baseXr = 7 * Sys.Proj.Nx / 8; constants.baseYr = 7 * Sys.Proj.Ny / 8; constants.currXr = Sys.Proj.Nx / 8; constants.currYr = Sys.Proj.Ny / 8; cuda(Memcpy2D(*output, Sys.Proj.Nx * sizeof(float), d_Image, projPitch, Sys.Proj.Nx * sizeof(float), Sys.Proj.Ny, cudaMemcpyDeviceToHost)); getHistogram(d_Image, reconPitch*Sys.Recon.Ny, histogram); constants.baseXr = -1; constants.baseYr = -1; constants.currXr = -1; constants.currYr = -1; } return Tomo_OK; } TomoError TomoRecon::findStartDistance() { derivative_t oldDisplay = derDisplay; derDisplay = abs_norm_der; float oldDis = distance; float currentVal; cuda(MemsetAsync(d_MaxVal, 0, sizeof(float))); singleFrame(); KERNELCALL3(sumReduction, reductionBlocks, reductionThreads, reductionSize, d_Image, reconPitchNum, d_MaxVal, 0.0f, constants.Rx, 0.0f, constants.Ry); cuda(Memcpy(&currentVal, d_MaxVal, sizeof(float), cudaMemcpyDeviceToHost)); float focusVal = currentVal; do { distance -= constants.pitchZ; cuda(MemsetAsync(d_MaxVal, 0, sizeof(float))); singleFrame(); KERNELCALL3(sumReduction, reductionBlocks, reductionThreads, reductionSize, d_Image, reconPitchNum, d_MaxVal, 0.0f, constants.Rx, 0.0f, constants.Ry); cuda(Memcpy(&currentVal, d_MaxVal, sizeof(float), cudaMemcpyDeviceToHost)); } while (currentVal > focusVal / 20.0f); constants.startDis = distance - 1.0f; if (constants.startDis < 0.0f) constants.startDis = 0.0f; distance = oldDis; do { distance += constants.pitchZ; cuda(MemsetAsync(d_MaxVal, 0, sizeof(float))); singleFrame(); KERNELCALL3(sumReduction, reductionBlocks, reductionThreads, reductionSize, d_Image, reconPitchNum, d_MaxVal, //constants.Rx / 8.0f, 7.0f * constants.Rx / 8.0f, constants.Ry / 8.0f, 7.0f * constants.Ry / 8.0f); 0.0f, constants.Rx, 0.0f, constants.Ry); cuda(Memcpy(&currentVal, d_MaxVal, sizeof(float), cudaMemcpyDeviceToHost)); } while (currentVal > focusVal / 20.0f); constants.slices = (distance - constants.startDis + 1.0f) / constants.pitchZ; if (constants.slices > 50) constants.slices = 50; Sys.Recon.Nz = constants.slices; distance = oldDis; derDisplay = oldDisplay; singleFrame(); } float TomoRecon::binarySearch(float(TomoRecon::*getError)(), float ** var, float * startPos, int dimensions, float startStep, float resolution, float limit) { **var = *startPos; float bestVar = **var; float bestErr = (*this.*getError)(); for (**var -= limit; **var < *startPos + limit; **var += startStep) { float newErr = dimensions == 1 ? (*this.*getError)() : binarySearch(getError, var + 1, startPos + 1, dimensions - 1, startStep, resolution, limit); if (newErr > bestErr) { bestErr = newErr; bestVar = **var; } if (binFile.is_open() && dimensions == 1) { binFile << **(var - 1) << ", "; binFile << **var << ", "; binFile << std::setprecision(10) << newErr << "\n"; } } **var = bestVar; float thisStep = startStep / 2.0f; while (abs(thisStep) > resolution) { **var += thisStep; float newErr = dimensions == 1 ? (*this.*getError)() : binarySearch(getError, var + 1, startPos + 1, dimensions - 1, startStep, resolution, limit); if (binFile.is_open() && dimensions == 1) { binFile << **(var - 1) << ", "; binFile << **var << ", "; binFile << std::setprecision(10) << newErr << "\n"; } if (newErr > bestErr) { bestErr = newErr; bestVar = **var; } else { **var -= thisStep; thisStep /= -2.0f; } } **var = bestVar; //rerun to reset best value of lower dimensions //if(dimensions > 1) binarySearch(getError, var + 1, startPos + 1, dimensions - 1, startStep, resolution, limit); return bestErr; } TomoError TomoRecon::autoFocus2() { derivative_t oldDisplay = derDisplay; derDisplay = square_mag; distance = constants.startDis + constants.pitchZ*constants.slices / 2.0f; float startDis = distance; float * disPtr = &distance; binarySearch(&TomoRecon::focusHelper, &disPtr, &startDis, 1, constants.pitchZ, LASTSTEP, constants.pitchZ*constants.slices / 2.0f); derDisplay = oldDisplay; singleFrame(); return Tomo_OK; } TomoError TomoRecon::autoFocus(bool firstRun, bool checkFlip) { static float step; static float best; static bool linearRegion; static bool firstLin = true; static float bestDist; static int bestSlice; static derivative_t oldDisplay; if (firstRun) { step = constants.pitchZ; distance = constants.startDis; bestDist = constants.startDis; sliceIndex = 0; bestSlice = sliceIndex; best = 0; linearRegion = false; oldDisplay = derDisplay; derDisplay = square_mag; } float newVal = focusHelper(); if (checkFlip && firstRun) { constants.revGeo = !constants.revGeo; float testVal = focusHelper(); if (testVal < newVal) constants.revGeo = !constants.revGeo; } if (!linearRegion) { if (newVal > best) { best = newVal; bestDist = distance; bestSlice = sliceIndex; } distance += step; sliceIndex++; if (distance > constants.startDis + constants.pitchZ*constants.slices || sliceIndex >= constants.slices) { linearRegion = true; firstLin = true; distance = bestDist; sliceIndex = bestSlice; if (constants.dataDisplay == iterRecon) { derDisplay = oldDisplay; singleFrame(); return Tomo_Done; } } } else { //compare to current if (newVal > best) { best = newVal; bestDist = distance; distance += step; } else { if(!firstLin) distance -= step;//revert last move //find next step step = -step / 2; if (abs(step) < LASTSTEP) { derDisplay = oldDisplay; singleFrame(); return Tomo_Done; } else distance += step; } firstLin = false; } return Tomo_OK; } TomoError TomoRecon::autoGeo2(int beam, float & XVal, float & YVal) { derivative_t oldDisplay = derDisplay; //derDisplay = square_mag; derDisplay = square_norm_der; float * vars[2]; float start[2]; vars[0] = &Sys.Geo.EmitX[beam]; vars[1] = &Sys.Geo.EmitY[beam]; start[0] = *vars[0]; start[1] = *vars[1]; float oldX = *vars[0]; float oldY = *vars[1]; sliceIndex = beam; constants.geoTesting = true; bool activeBeams[NUMVIEWS] = {}; activeBeams[NUMVIEWS / 2] = true; activeBeams[beam] = true; cuda(MemcpyAsync(constants.useBeams, activeBeams, Sys.Proj.NumViews * sizeof(bool), cudaMemcpyHostToDevice)); char filename[100]; sprintf(filename, "geoOutBeam%d.txt", beam); binFile.open(filename); binarySearch(&TomoRecon::geoHelper, vars, start, 2, 0.1f, 0.1f, 3.0f); //binarySearch(&TomoRecon::geoHelper, vars, start, 2, 0.3f, 0.01f, 3.0f); binFile.close(); constants.geoTesting = false; XVal = *vars[0]; YVal = *vars[1]; *vars[0] = oldX; *vars[1] = oldY; cuda(MemcpyAsync(constants.d_Beamx, Sys.Geo.EmitX, Sys.Proj.NumViews * sizeof(float), cudaMemcpyHostToDevice)); cuda(MemcpyAsync(constants.d_Beamy, Sys.Geo.EmitY, Sys.Proj.NumViews * sizeof(float), cudaMemcpyHostToDevice)); cuda(MemcpyAsync(constants.useBeams, Sys.Proj.activeBeams, Sys.Proj.NumViews * sizeof(bool), cudaMemcpyHostToDevice)); derDisplay = oldDisplay; singleFrame(); return Tomo_OK; } TomoError TomoRecon::autoGeo(bool firstRun, int beam, float &returnVal, int &yIter, float &maxXVal, float &maxYVal) { static float newXGeo; static float newYGeo; static float maxMag; static bool xLinear = false; static bool yLinear = false; float xLimit = 5.0f; static float xStep = 1.0f; float yLimit = 3.0f; static float yStep = 1.0f; static derivative_t oldDisplay; static int oldActiveSlice; static bool oldLog; static float xGeo[NUMVIEWS]; static float yGeo[NUMVIEWS]; bool activeBeams[NUMVIEWS] = {}; activeBeams[NUMVIEWS / 2] = true; activeBeams[beam] = true; constants.geoTesting = true; if (firstRun) { memcpy(xGeo, Sys.Geo.EmitX, sizeof(float)*NUMVIEWS); memcpy(yGeo, Sys.Geo.EmitY, sizeof(float)*NUMVIEWS); newXGeo = Sys.Geo.EmitX[beam] - xLimit; newYGeo = Sys.Geo.EmitY[beam] - yLimit; oldDisplay = derDisplay; oldActiveSlice = sliceIndex; oldLog = constants.log; constants.log = false; derDisplay = square_mag; //derDisplay = mag_der; yIter = 0; maxMag = 0.0f; xLinear = false; yLinear = false; xStep = 1.0f; yStep = 1.0f; } else { newXGeo += xStep; } Sys.Geo.EmitX[beam] = newXGeo; Sys.Geo.EmitY[beam] = newYGeo; cuda(MemcpyAsync(constants.d_Beamx, Sys.Geo.EmitX, Sys.Proj.NumViews * sizeof(float), cudaMemcpyHostToDevice)); cuda(MemcpyAsync(constants.d_Beamy, Sys.Geo.EmitY, Sys.Proj.NumViews * sizeof(float), cudaMemcpyHostToDevice)); cuda(MemcpyAsync(constants.useBeams, activeBeams, Sys.Proj.NumViews * sizeof(bool), cudaMemcpyHostToDevice)); constants.dataDisplay = projections; sliceIndex = beam; setProjBox(beam); float normalVal = focusHelper() / 2; constants.dataDisplay = reconstruction; returnVal = focusHelper(); returnVal -= normalVal; if (returnVal > maxMag) { maxMag = returnVal; maxXVal = newXGeo; maxYVal = newYGeo; } else { //revert step if (xLinear) { newXGeo -= xStep; xStep = -xStep / 2; } else { if (newXGeo >= xGeo[beam] + xLimit) { xLinear = true; newXGeo = maxXVal; } } } if (abs(xStep) < 0.01f) { if (yLinear) { newYGeo -= yStep; yStep = -yStep / 2; if (abs(yStep) < 0.01f) { memcpy(Sys.Geo.EmitX, xGeo, sizeof(float)*NUMVIEWS); memcpy(Sys.Geo.EmitY, yGeo, sizeof(float)*NUMVIEWS); cuda(MemcpyAsync(constants.d_Beamx, Sys.Geo.EmitX, Sys.Proj.NumViews * sizeof(float), cudaMemcpyHostToDevice)); cuda(MemcpyAsync(constants.d_Beamy, Sys.Geo.EmitY, Sys.Proj.NumViews * sizeof(float), cudaMemcpyHostToDevice)); cuda(MemcpyAsync(constants.useBeams, Sys.Proj.activeBeams, Sys.Proj.NumViews * sizeof(bool), cudaMemcpyHostToDevice)); constants.geoTesting = false; derDisplay = oldDisplay; sliceIndex = oldActiveSlice; constants.log = oldLog; constants.dataDisplay = reconstruction; yStep = 1.0f; return Tomo_Done; } } else { if (newYGeo >= yGeo[beam] + yLimit) { yLinear = true; newYGeo = maxYVal; } } xStep = 1.0f; newXGeo = xGeo[beam] - xLimit - xStep; newYGeo += yStep; xLinear = false; yIter++; } return Tomo_OK; } TomoError TomoRecon::autoLight(unsigned int histogram[HIST_BIN_COUNT], int threshold, float * minVal, float * maxVal) { int innerThresh = threshold; bool emptyHist = false; if (histogram == NULL) { emptyHist = true; histogram = new unsigned int[HIST_BIN_COUNT]; if (constants.dataDisplay == projections) { tomo_err_throw(getHistogram(d_Image, projPitch*Sys.Proj.Ny, histogram)); } else { tomo_err_throw(getHistogram(d_Image, reconPitch*Sys.Recon.Ny, histogram)); } innerThresh = abs(constants.baseXr - constants.currXr) * abs(constants.baseYr - constants.currYr) / AUTOTHRESHOLD; minVal = &constants.minVal; maxVal = &constants.maxVal; } /*std::ofstream outputFile; char outFilename[250]; sprintf(outFilename, "./histogramOutRecon.txt"); outputFile.open(outFilename); for (int test = 1; test < 255; test++) outputFile << histogram[test] << "\n"; outputFile.close();*/ int i; for (i = 0; i < HIST_BIN_COUNT; i++) { unsigned int count = histogram[i]; if (count > innerThresh) break; } if (i >= HIST_BIN_COUNT) i = 0; *minVal = i * UCHAR_MAX; //go from the reverse direction for maxval for (i = HIST_BIN_COUNT - 1; i >= 0; i--) { unsigned int count = histogram[i]; if (count > innerThresh) break; } if (i < 0) i = HIST_BIN_COUNT; *maxVal = i * UCHAR_MAX; if (*minVal == *maxVal) *maxVal += UCHAR_MAX; if (emptyHist) delete[] histogram; return Tomo_OK; } TomoError TomoRecon::readPhantom(float * resolution) { if (vertical) { float phanScale = (lowYr - upYr) / (1/LOWERBOUND - 1/UPPERBOUND); float * h_xDer2 = (float*)malloc(reconPitch*Sys.Recon.Ny); cuda(Memcpy(h_xDer2, d_Image, reconPitch*Sys.Recon.Ny, cudaMemcpyDeviceToHost)); //Get x range from the bouding box int startX = min(constants.baseXr, constants.currXr); int endX = max(constants.baseXr, constants.currXr); int thisY = lowYr;//Get beginning y val from tick mark bool ascend = upYr > lowYr; int increment = ascend ? 1 : -1; while ((!ascend && thisY >= upYr) || (ascend && thisY <= upYr)) {//y counts down int thisX = startX; int negCross = 0; bool negativeSpace = false; float negAcc = 0; while (thisX < endX) { float val = h_xDer2[thisY * reconPitchNum + thisX]; h_xDer2[thisY * reconPitchNum + thisX] = val / 10.0f; if (negativeSpace) { if (val > 0) { negativeSpace = false; if (negAcc < -INTENSITYTHRESH) { negCross++; } } else { negAcc += val; } } else { if (val < 0) { negativeSpace = true; negAcc = val; } } thisX++; } if (negCross < LINEPAIRS) { thisY -= increment; break; } thisY += increment; } *resolution = phanScale / (thisY - lowYr + phanScale / LOWERBOUND); cuda(Memcpy(d_Image, h_xDer2, reconPitch*Sys.Recon.Ny, cudaMemcpyHostToDevice)); free(h_xDer2); } else { float phanScale = (lowXr - upXr) / (1 / LOWERBOUND - 1 / UPPERBOUND); float * h_yDer2 = (float*)malloc(reconPitchNum*Sys.Recon.Ny * sizeof(float)); cuda(Memcpy(h_yDer2, d_Image, reconPitchNum*Sys.Recon.Ny * sizeof(float), cudaMemcpyDeviceToHost)); //Get x range from the bouding box int startY = min(constants.baseYr, constants.currYr); int endY = max(constants.baseYr, constants.currYr); int thisX = lowXr;//Get beginning y val from tick mark bool ascend = upXr > lowXr; int increment = ascend ? 1 : -1; while ((!ascend && thisX >= upXr) || (ascend && thisX <= upXr)) {//y counts down int thisY = startY; int negCross = 0; bool negativeSpace = false; float negAcc = 0; while (thisY < endY) { float val = h_yDer2[thisY * reconPitchNum + thisX]; h_yDer2[thisY * reconPitchNum + thisX] = val / 10.0f; if (negativeSpace) { if (val > 0) { negativeSpace = false; if (negAcc < -INTENSITYTHRESH) { negCross++; } } else { negAcc += val; } } else { if (val < 0) { negativeSpace = true; negAcc = val; } } thisY++; } if (negCross < LINEPAIRS) { thisX -= increment; break; } thisX += increment; } *resolution = phanScale / (thisX - lowXr + phanScale / LOWERBOUND); cuda(Memcpy(d_Image, h_yDer2, reconPitch*Sys.Recon.Ny, cudaMemcpyHostToDevice)); free(h_yDer2); } return Tomo_OK; } TomoError TomoRecon::initTolerances(std::vector<toleranceData> &data, int numTests, std::vector<float> offsets) { //start with a control, but make sure toleranceData control; control.name += " none"; control.numViewsChanged = 0; control.viewsChanged = 0; control.offset = 0; data.push_back(control); //start set as just the combinations for (int i = 0; i < NUMVIEWS; i++) { int resultsLen = (int)data.size();//size will change every iteration, pre-record it int binRep = 1 << i; for (int j = 1; j < resultsLen; j++) { toleranceData newData = data[j]; newData.name += "+"; newData.name += std::to_string(i); newData.numViewsChanged++; newData.viewsChanged |= binRep; data.push_back(newData); } //add the base toleranceData newData; newData.name += std::to_string(i); newData.numViewsChanged = 1; newData.viewsChanged = binRep; data.push_back(newData); } //blow up with the diffent directions int combinations = (int)data.size();//again, changing sizes later on for (int i = 1; i < combinations; i++) { toleranceData baseline = data[i]; baseline.thisDir = dir_y; data.push_back(baseline); baseline.thisDir = dir_z; data.push_back(baseline); } //then fill in the set with all the view changes combinations = (int)data.size();//again, changing sizes later on for (int i = 1; i < combinations; i++) { toleranceData baseline = data[i]; for (int j = 0; j < offsets.size() - 1; j++) {//skip the last toleranceData newData = baseline; newData.offset = offsets[j]; data.push_back(newData); } //the last one is done in place data[i].offset = offsets[offsets.size() - 1]; } return Tomo_OK; } TomoError TomoRecon::testTolerances(std::vector<toleranceData> &data, bool firstRun) { static auto iter = data.begin(); if (firstRun) { if(vertical) derDisplay = der2_x; else derDisplay = der2_y; tomo_err_throw(singleFrame()); tomo_err_throw(autoLight()); iter = data.begin(); return Tomo_OK; } if (iter == data.end()) { derDisplay = no_der; return Tomo_Done; } float geo[NUMVIEWS]; switch (iter->thisDir) { case dir_x: memcpy(geo, Sys.Geo.EmitX, sizeof(float)*NUMVIEWS); break; case dir_y: memcpy(geo, Sys.Geo.EmitY, sizeof(float)*NUMVIEWS); break; case dir_z: memcpy(geo, Sys.Geo.EmitZ, sizeof(float)*NUMVIEWS); break; } for (int i = 0; i < NUMVIEWS; i++) { bool active = ((iter->viewsChanged >> i) & 1) > 0;//Shift, mask and check if (!active) continue; if (i < NUMVIEWS / 2) geo[i] -= iter->offset; else geo[i] += iter->offset; } //be safe, recopy values to overwrite previous iterations switch (iter->thisDir) { case dir_x: cuda(MemcpyAsync(constants.d_Beamx, geo, Sys.Proj.NumViews * sizeof(float), cudaMemcpyHostToDevice)); cuda(MemcpyAsync(constants.d_Beamy, Sys.Geo.EmitY, Sys.Proj.NumViews * sizeof(float), cudaMemcpyHostToDevice)); cuda(MemcpyAsync(constants.d_Beamz, Sys.Geo.EmitZ, Sys.Proj.NumViews * sizeof(float), cudaMemcpyHostToDevice)); break; case dir_y: cuda(MemcpyAsync(constants.d_Beamx, Sys.Geo.EmitX, Sys.Proj.NumViews * sizeof(float), cudaMemcpyHostToDevice)); cuda(MemcpyAsync(constants.d_Beamy, geo, Sys.Proj.NumViews * sizeof(float), cudaMemcpyHostToDevice)); cuda(MemcpyAsync(constants.d_Beamz, Sys.Geo.EmitZ, Sys.Proj.NumViews * sizeof(float), cudaMemcpyHostToDevice)); break; case dir_z: cuda(MemcpyAsync(constants.d_Beamx, Sys.Geo.EmitX, Sys.Proj.NumViews * sizeof(float), cudaMemcpyHostToDevice)); cuda(MemcpyAsync(constants.d_Beamy, Sys.Geo.EmitY, Sys.Proj.NumViews * sizeof(float), cudaMemcpyHostToDevice)); cuda(MemcpyAsync(constants.d_Beamz, geo, Sys.Proj.NumViews * sizeof(float), cudaMemcpyHostToDevice)); break; } singleFrame(); float readVal; tomo_err_throw(readPhantom(&readVal)); iter->phantomData = readVal; ++iter; return Tomo_OK; } TomoError TomoRecon::initIterative() { #ifdef PRINTMEMORYUSAGE size_t avail_mem; size_t total_mem; cudaMemGetInfo(&avail_mem, &total_mem); std::cout << "Iter start vailable memory: " << avail_mem << "/" << total_mem << "\n"; #endif // PRINTMEMORYUSAGE iteration = 0; decay = 1.0f; iterativeInitialized = true; constants.isReconstructing = true; cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<float>(); cudaExtent vol = { Sys.Recon.Nx, Sys.Recon.Ny, Sys.Recon.Nz }; cuda(Malloc3DArray(&d_Recon2, &channelDesc, vol, cudaArraySurfaceLoadStore)); cuda(Malloc3DArray(&d_ReconWeight, &channelDesc, vol, cudaArraySurfaceLoadStore)); #ifdef SHOWERROR cuda(Malloc3DArray(&d_ReconError, &channelDesc, vol, cudaArraySurfaceLoadStore)); #endif cuda(MallocPitch((void**)&d_ReconOld, &reconPitch, Sys.Recon.Nx * sizeof(float), Sys.Recon.Ny)); reconPitchNum = (int)reconPitch / sizeof(float); constants.ReconPitchNum = reconPitchNum; struct cudaResourceDesc resDesc; memset(&resDesc, 0, sizeof(resDesc)); resDesc.resType = cudaResourceTypeArray; resDesc.res.array.array = d_Recon2; cuda(CreateSurfaceObject(&surfReconObj, &resDesc)); resDesc.res.array.array = d_ReconWeight; cuda(CreateSurfaceObject(&surfWeightObj, &resDesc)); #ifdef SHOWERROR resDesc.res.array.array = d_ReconError; cuda(CreateSurfaceObject(&surfErrorObj, &resDesc)); #endif #ifdef RECONDERIVATIVE cuda(Memcpy2DAsync(d_Error, projPitch, inXBuff, projPitch, Sys.Proj.Nx * sizeof(float), Sys.Proj.Ny*NUMVIEWS, cudaMemcpyDeviceToDevice)); #else for (int view = 0; view < NumViews; view++) { cuda(Memcpy2DAsync(d_Error + view * projPitch / sizeof(float) * Sys.Proj.Ny, projPitch, d_Sino + view * projPitch / sizeof(float) * Sys.Proj.Ny, projPitch, Sys.Proj.Nx * sizeof(float), Sys.Proj.Ny, cudaMemcpyDeviceToDevice)); //cuda(Memset2DAsync(d_Error + view * projPitch / sizeof(float) * Sys.Proj.Ny, projPitch, 0, Sys.Proj.Nx * sizeof(float), Sys.Proj.Ny)); #ifdef INVERSEITER KERNELCALL2(invert, contBlocks, contThreads, d_Error + view * projPitch / sizeof(float) * Sys.Proj.Ny, constants); #endif // INVERSEITER } #endif // RECONDERIVATIVE cuda(BindTexture2D(NULL, textError, d_Error, cudaCreateChannelDesc<float>(), Sys.Proj.Nx, Sys.Proj.Ny*Sys.Proj.NumViews, projPitch)); cuda(BindTexture2D(NULL, textWeight, d_Weights, cudaCreateChannelDesc<float>(), Sys.Proj.Nx, Sys.Proj.Ny*Sys.Proj.NumViews, projPitch)); for (int slice = 0; slice < Sys.Recon.Nz; slice++) { distance = constants.startDis + slice * constants.pitchZ; cuda(BindTexture2D(NULL, textSino, d_Raw, cudaCreateChannelDesc<float>(), Sys.Proj.Nx, Sys.Proj.Ny*Sys.Proj.NumViews, projPitch)); KERNELCALL2(projectSlice, contBlocks, contThreads, buff1, distance, constants); cuda(UnbindTexture(textSino)); tomo_err_throw(normProject(inXBuff, buff2, DERWEIGHTSTR)); //tomo_err_throw(project(inXBuff, buff2)); KERNELCALL2(xConvIntegrate, contBlocks, contThreads, NULL, buff2, buff1, slice, constants, surfWeightObj); //KERNELCALL2(initArray, contBlocks, contThreads, slice, slice * 1000.0f, constants, surfWeightObj); KERNELCALL2(initArray, contBlocks, contThreads, slice, 0.0f, constants, surfReconObj); KERNELCALL2(copySlice, contBlocks, contThreads, d_ReconOld, slice, constants, surfReconObj); #ifdef SHOWERROR KERNELCALL2(projectIter, contBlocks, contThreads, d_ReconOld, slice, 1.0f, true, constants, surfReconObj, surfErrorObj); #else KERNELCALL2(projectIter, contBlocks, contThreads, d_Sino, d_ReconOld, d_Weights, slice, iteration, true, decay, constants, surfReconObj, surfWeightObj, true); #endif } cuda(UnbindTexture(textError)); cuda(UnbindTexture(textWeight)); //Get normalization factor for weight volume constants.baseXr = 0; constants.baseYr = 0; constants.currXr = Sys.Recon.Nx; constants.currYr = Sys.Recon.Ny; float minVal; unsigned int histogram[HIST_BIN_COUNT]; tomo_err_throw(getHistogramRecon(histogram, surfWeightObj, true, false)); tomo_err_throw(autoLight(histogram, 20, &minVal, &constants.weightMax)); constants.baseXr = -1; constants.baseYr = -1; constants.currXr = -1; constants.currYr = -1; #ifdef PRINTMEMORYUSAGE cudaMemGetInfo(&avail_mem, &total_mem); std::cout << "Iter end available memory: " << avail_mem << "/" << total_mem << "\n"; #endif // PRINTMEMORYUSAGE return Tomo_OK; } TomoError TomoRecon::resetIterative() { #ifdef PRINTMEMORYUSAGE size_t avail_mem; size_t total_mem; cudaMemGetInfo(&avail_mem, &total_mem); std::cout << "Iter free start vailable memory: " << avail_mem << "/" << total_mem << "\n"; #endif // PRINTMEMORYUSAGE cuda(DestroySurfaceObject(surfReconObj)); cuda(Free(d_ReconOld)); cuda(FreeArray(d_Recon2)); #ifdef PRINTMEMORYUSAGE cudaMemGetInfo(&avail_mem, &total_mem); std::cout << "Iter free end vailable memory: " << avail_mem << "/" << total_mem << "\n"; #endif // PRINTMEMORYUSAGE return Tomo_OK; } TomoError TomoRecon::iterStep() { iteration = 1.0f; decay *= ALPHA; cuda(BindTextureToArray(textRecon, d_Recon2)); #ifdef RECONDERIVATIVE for (int view = 0; view < NumViews; view++) { KERNELCALL2(backProject, contBlocks, contThreads, inXBuff + view * projPitch / sizeof(float) * Sys.Proj.Ny, d_Error + view * projPitch / sizeof(float) * Sys.Proj.Ny, view, constants); } #else for (int view = 0; view < NumViews; view++) { KERNELCALL2(backProject, contBlocks, contThreads, d_Sino + view * projPitch / sizeof(float) * Sys.Proj.Ny, d_Error + view * projPitch / sizeof(float) * Sys.Proj.Ny, d_Weights + view * projPitch / sizeof(float) * Sys.Proj.Ny, view, iteration, ITERATIONS, surfWeightObj, constants); } #endif // RECONDERIVATIVE cuda(UnbindTexture(textRecon)); cuda(BindTexture2D(NULL, textSino, d_Sino, cudaCreateChannelDesc<float>(), Sys.Proj.Nx, Sys.Proj.Ny*Sys.Proj.NumViews, projPitch)); cuda(BindTexture2D(NULL, textError, d_Error, cudaCreateChannelDesc<float>(), Sys.Proj.Nx, Sys.Proj.Ny*Sys.Proj.NumViews, projPitch)); cuda(BindTexture2D(NULL, textWeight, d_Weights, cudaCreateChannelDesc<float>(), Sys.Proj.Nx, Sys.Proj.Ny*Sys.Proj.NumViews, projPitch)); for (int slice = 0; slice < Sys.Recon.Nz; slice++) { KERNELCALL2(copySlice, contBlocks, contThreads, d_ReconOld, slice, constants, surfReconObj); #ifdef SHOWERROR KERNELCALL2(projectIter, contBlocks, contThreads, d_ReconOld, slice, iteration, SKIPITERTV, constants, surfReconObj, surfErrorObj); #else KERNELCALL2(projectIter, contBlocks, contThreads, d_Sino, d_ReconOld, d_Weights, slice, iteration, SKIPITERTV, decay, constants, surfReconObj, surfWeightObj); #endif /*for (int i = 0; i < TVITERATIONS; i++) { KERNELCALL2(copySlice, contBlocks, contThreads, d_ReconOld, slice, constants); KERNELCALL2(projectIter, contBlocks, contThreads, d_ReconOld, slice, iteration++, true, constants); }*/ } cuda(UnbindTexture(textError)); cuda(UnbindTexture(textWeight)); //cuda(UnbindTexture(textSino)); iteration++; return Tomo_OK; } TomoError TomoRecon::finalizeIter() { //no longer need gradient records //cuda(FreeArray(d_ReconDelta)); //cuda(DestroySurfaceObject(surfDeltaObj)); #ifdef VERBOSEMEMORY size_t avail_mem, total_mem; cudaMemGetInfo(&avail_mem, &total_mem); std::cout << "Iter final start available memory: " << avail_mem << "/" << total_mem << "\n"; #endif // VERBOSEMEMORY constants.isReconstructing = false; #ifdef INVERSEITER for (int slice = 0; slice < Sys.Recon.Nz; slice++) KERNELCALL2(invertRecon, contBlocks, contThreads, slice, constants, surfReconObj); #endif //INVERSEITER constants.baseXr = 0; constants.baseYr = 0; constants.currXr = Sys.Recon.Nx; constants.currYr = Sys.Recon.Ny; float maxVal, minVal; unsigned int histogram[HIST_BIN_COUNT]; tomo_err_throw(getHistogramRecon(histogram, surfReconObj, true, false)); tomo_err_throw(autoLight(histogram, 20, &minVal, &maxVal)); //histogram equalization approximation by width and offset float scales[HIST_BIN_COUNT]; float offsets[HIST_BIN_COUNT]; int yIndex = 0; int activeViews = 0; float reconRatio = 0, sumRecon = 0; for (int i = 0; i < HIST_BIN_COUNT; i++) { reconRatio += inputHistogram[i]; sumRecon += histogram[i]; } reconRatio /= sumRecon; for (int i = 0; i < NumViews; i++) if (Sys.Proj.activeBeams[i]) activeViews++; float y1 = 0.0f, y2, h2 = inputHistogram[yIndex]; for (int i = 0; i < HIST_BIN_COUNT; i++) { float h1 = histogram[i] * reconRatio; while (h1 > h2) { h1 -= h2; if (++yIndex >= HIST_BIN_COUNT) break; h2 = inputHistogram[yIndex]; } if (yIndex >= HIST_BIN_COUNT) { //Overflow logic scales[i] = scales[i - 1]; offsets[i] = offsets[i - 1]; continue; } h2 -= h1; float maxH2 = inputHistogram[yIndex]; if(maxH2 > 0) y2 = yIndex + (maxH2 - h2) / maxH2; else y2 = yIndex; scales[i] = y2 - y1;//scale * (x2 - x1) = (y2 - y1) offsets[i] = (y1 + y2) / 2.0f - scales[i] * (float)(2 * i + 1) / 2.0f;//offset + scale * (x1 + x2) / 2 = (y1 + y2) / 2 y1 = y2; } float * d_scales, * d_offsets; cudaMalloc(&d_scales, HIST_BIN_COUNT * sizeof(float)); cudaMalloc(&d_offsets, HIST_BIN_COUNT * sizeof(float)); cudaMemcpy(d_scales, scales, HIST_BIN_COUNT * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_offsets, offsets, HIST_BIN_COUNT * sizeof(float), cudaMemcpyHostToDevice); //cuda(BindSurfaceToArray(surfRecon, d_Recon2)); if(!Sys.Proj.saturated) for (int slice = 0; slice < Sys.Recon.Nz; slice++) KERNELCALL2(scaleRecon, contBlocks, contThreads, slice, d_scales, d_offsets, constants, surfReconObj); cuda(Free(d_scales)); cuda(Free(d_offsets)); cuda(UnbindTexture(textSino)); cuda(BindTexture2D(NULL, textSino, d_Raw, cudaCreateChannelDesc<float>(), Sys.Proj.Nx, Sys.Proj.Ny*Sys.Proj.NumViews, projPitch)); for (int slice = 0; slice < Sys.Recon.Nz; slice++) { KERNELCALL2(projectFinalIter, contBlocks, contThreads, slice, constants, surfReconObj); } cuda(UnbindTexture(textSino)); #ifdef PRINTINTENSITIES tomo_err_throw(getHistogramRecon(histogram, surfReconObj, true, false)); std::ofstream outputFile; char outFilename[250]; sprintf(outFilename, "./histogramOutRecon.txt"); outputFile.open(outFilename); float scaleFactor = (float)Sys.Proj.Nx / (float)Sys.Recon.Nx * (float)Sys.Proj.Ny / (float)Sys.Recon.Ny / (float)Sys.Recon.Nz; for (int test = 1; test < HIST_BIN_COUNT; test++) outputFile << histogram[test] * scaleFactor << "\n";// / Sys.Recon.Nz outputFile.close(); #endif //PRINTINTENSITIES constants.baseXr = -1; constants.baseYr = -1; constants.currXr = -1; constants.currYr = -1; #ifdef VERBOSEMEMORY cudaMemGetInfo(&avail_mem, &total_mem); std::cout << "Iter final end available memory: " << avail_mem << "/" << total_mem << "\n"; #endif // VERBOSEMEMORY //cuda(DestroySurfaceObject(surfWeightObj)); //cuda(FreeArray(d_ReconWeight)); return Tomo_OK; } /****************************************************************************/ /* Kernel launch helpers */ /****************************************************************************/ inline float TomoRecon::geoHelper() { cuda(MemcpyAsync(constants.d_Beamx, Sys.Geo.EmitX, Sys.Proj.NumViews * sizeof(float), cudaMemcpyHostToDevice)); cuda(MemcpyAsync(constants.d_Beamy, Sys.Geo.EmitY, Sys.Proj.NumViews * sizeof(float), cudaMemcpyHostToDevice)); float returnVal; //constants.dataDisplay = projections; //setProjBox(sliceIndex); //float normalVal = focusHelper() / 2; constants.dataDisplay = reconstruction; returnVal = focusHelper(); //returnVal -= normalVal; return returnVal; } inline float TomoRecon::focusHelper() { //Render new frame singleFrame(); //get the focus metric float currentBest; cuda(MemsetAsync(d_MaxVal, 0, sizeof(float))); //TODO: check boundary conditions if (constants.dataDisplay == projections) { KERNELCALL3(sumReduction, reductionBlocks, reductionThreads, reductionSize, d_Image, projPitch / sizeof(float), d_MaxVal, min(baseX, currX), max(baseX, currX), min(baseY, currY), max(baseY, currY)); } else { KERNELCALL3(sumReduction, reductionBlocks, reductionThreads, reductionSize, d_Image, reconPitchNum, d_MaxVal, min(constants.baseXr, constants.currXr), max(constants.baseXr, constants.currXr), min(constants.baseYr, constants.currYr), max(constants.baseYr, constants.currYr)); } cuda(Memcpy(&currentBest, d_MaxVal, sizeof(float), cudaMemcpyDeviceToHost)); return currentBest; } inline TomoError TomoRecon::imageKernel(float xK[KERNELSIZE], float yK[KERNELSIZE], float * output, bool projs) { if (projs) { cuda(BindTexture2D(NULL, textImage, d_Image, cudaCreateChannelDesc<float>(), Sys.Proj.Nx, Sys.Proj.Ny, projPitch)); KERNELCALL2(convolutionRowsKernel, contBlocks, contThreads, d_Image2, xK, constants); cuda(UnbindTexture(textImage)); cuda(BindTexture2D(NULL, textImage, d_Image2, cudaCreateChannelDesc<float>(), Sys.Proj.Nx, Sys.Proj.Ny, projPitch)); KERNELCALL2(convolutionColumnsKernel, contBlocks, contThreads, output, yK, constants); cuda(UnbindTexture(textImage)); } else { cuda(BindTexture2D(NULL, textImage, d_Image, cudaCreateChannelDesc<float>(), Sys.Recon.Nx, Sys.Recon.Ny, reconPitch)); KERNELCALL2(convolutionRowsKernel, contBlocks, contThreads, d_Image2, xK, constants); cuda(UnbindTexture(textImage)); cuda(BindTexture2D(NULL, textImage, d_Image2, cudaCreateChannelDesc<float>(), Sys.Recon.Nx, Sys.Recon.Ny, reconPitch)); KERNELCALL2(convolutionColumnsKernel, contBlocks, contThreads, output, yK, constants); cuda(UnbindTexture(textImage)); } return Tomo_OK; } inline TomoError TomoRecon::project(float * projections, float * reconstruction) { //cuda(BindTexture2D(NULL, textError, projections, cudaCreateChannelDesc<float>(), Sys.Proj.Nx, Sys.Proj.Ny*Sys.Proj.NumViews, projPitch)); cuda(BindTexture2D(NULL, textSino, projections, cudaCreateChannelDesc<float>(), Sys.Proj.Nx, Sys.Proj.Ny*Sys.Proj.NumViews, projPitch)); KERNELCALL2(projectSlice, contBlocks, contThreads, reconstruction, distance, constants); cuda(UnbindTexture(textSino)); //cuda(UnbindTexture(textError)); } inline TomoError TomoRecon::normProject(float * projections, float * reconstruction, float alignStr) { cuda(BindTexture2D(NULL, textSino, projections, cudaCreateChannelDesc<float>(), Sys.Proj.Nx, Sys.Proj.Ny*Sys.Proj.NumViews, projPitch)); KERNELCALL2(normProjectSlice, contBlocks, contThreads, reconstruction, distance, alignStr, constants); cuda(UnbindTexture(textSino)); } TomoError TomoRecon::resetLight() { if (constants.dataDisplay == projections) { constants.baseXr = 7 * Sys.Proj.Nx / 8; constants.baseYr = 7 * Sys.Proj.Ny / 8; constants.currXr = Sys.Proj.Nx / 8; constants.currYr = Sys.Proj.Ny / 8; } else { constants.baseXr = 7 * Sys.Recon.Nx / 8; constants.baseYr = 7 * Sys.Recon.Ny / 8; constants.currXr = Sys.Recon.Nx / 8; constants.currYr = Sys.Recon.Ny / 8; } tomo_err_throw(autoLight()); constants.baseXr = -1; constants.baseYr = -1; constants.currXr = -1; constants.currYr = -1; return Tomo_OK; } TomoError TomoRecon::resetFocus(bool checkFlip) { if (constants.dataDisplay == projections) { constants.baseXr = 3 * Sys.Proj.Nx / 4; constants.baseYr = 3 * Sys.Proj.Ny / 4; constants.currXr = Sys.Proj.Nx / 4; constants.currYr = Sys.Proj.Ny / 4; } else { constants.baseXr = 3 * Sys.Recon.Nx / 4; constants.baseYr = 3 * Sys.Recon.Ny / 4; constants.currXr = Sys.Recon.Nx / 4; constants.currYr = Sys.Recon.Ny / 4; } tomo_err_throw(autoFocus(true, checkFlip)); while (autoFocus(false, checkFlip) == Tomo_OK); constants.baseXr = -1; constants.baseYr = -1; constants.currXr = -1; constants.currYr = -1; return Tomo_OK; } float TomoRecon::getMax(float * d_Im) { constants.baseXr = 3 * Sys.Recon.Nx / 4; constants.baseYr = 3 * Sys.Recon.Ny / 4; constants.currXr = Sys.Recon.Nx / 4; constants.currYr = Sys.Recon.Ny / 4; unsigned int histogram[HIST_BIN_COUNT]; int threshold = Sys.Recon.Nx * Sys.Recon.Ny / AUTOTHRESHOLD; getHistogram(d_Im, reconPitch*Sys.Recon.Ny, histogram); int i; for (i = HIST_BIN_COUNT - 1; i >= 0; i--) { unsigned int count = histogram[i]; if (count > threshold) break; } if (i < 0) i = HIST_BIN_COUNT; constants.baseXr = -1; constants.baseYr = -1; constants.currXr = -1; constants.currYr = -1; return i * UCHAR_MAX; } /****************************************************************************/ /* Conversions */ /****************************************************************************/ //Projection space to recon space TomoError TomoRecon::P2R(int* rX, int* rY, int pX, int pY, int view) { float dz = distance / Sys.Geo.EmitZ[view]; *rX = xMM2R(xP2MM(pX, constants.Px, constants.PitchPx) * (1 + dz) - Sys.Geo.EmitX[constants.revGeo ? constants.Views - 1 - view : view] * dz, constants.Rx, constants.PitchRx); *rY = yMM2R(yP2MM(pY, constants.Py, constants.PitchPy) * (1 + dz) - Sys.Geo.EmitY[view] * dz, constants.Ry, constants.PitchRy); return Tomo_OK; } //Recon space to projection space TomoError TomoRecon::R2P(float* pX, float* pY, int rX, int rY, int view) { float dz = distance / Sys.Geo.EmitZ[view]; *pX = xMM2P((xR2MM(rX, constants.Rx, constants.PitchRx) + Sys.Geo.EmitX[constants.revGeo ? constants.Views - 1 - view : view] * dz), constants.Px, constants.PitchPx); *pY = yMM2P((yR2MM(rY, constants.Ry, constants.PitchRy) + Sys.Geo.EmitY[view] * dz), constants.Py, constants.PitchPy); return Tomo_OK; } //Image space to on-screen display TomoError TomoRecon::I2D(int* dX, int* dY, int iX, int iY) { float innerOffx = (width - Sys.Proj.Nx / scale) / 2; float innerOffy = (height - Sys.Proj.Ny / scale) / 2; *dX = (int)((iX - xOff) / scale + innerOffx); *dY = (int)((iY - yOff) / scale + innerOffy); return Tomo_OK; } //Projection space to recon space int TomoRecon::P2R(int p, int view, bool xDir) { float dz = distance / Sys.Geo.EmitZ[view]; if (xDir) return (int)(xMM2R(xP2MM(p, constants.Px, constants.PitchPx) * (1 + dz) - Sys.Geo.EmitX[constants.revGeo ? constants.Views - 1 - view : view] * dz, constants.Rx, constants.PitchRx)); //else return (int)(yMM2R(yP2MM(p, constants.Py, constants.PitchPy) * (1 + dz) - Sys.Geo.EmitY[view] * dz, constants.Ry, constants.PitchRy)); } //Recon space to projection space int TomoRecon::R2P(int r, int view, bool xDir) { float dz = distance / Sys.Geo.EmitZ[view]; if (xDir) return (int)(xMM2P((xR2MM(r, constants.Rx, constants.PitchRx) + Sys.Geo.EmitX[constants.revGeo ? constants.Views - 1 - view : view] * dz) / (1.0f + dz), constants.Px, constants.PitchPx)); //else return (int)(yMM2P((yR2MM(r, constants.Ry, constants.PitchRy) + Sys.Geo.EmitY[view] * dz) / (1.0f + dz), constants.Py, constants.PitchPy)); } //Image space to on-screen display int TomoRecon::I2D(int i, bool xDir) { if (xDir) { int sysWidth; if (constants.dataDisplay == projections) sysWidth = Sys.Proj.Nx; else sysWidth = Sys.Recon.Nx; float innerOffx = (width - sysWidth / scale) / 2.0f; return constants.orientation ? (int)((sysWidth - 1 - i - xOff) / scale + innerOffx) : (int)((i - xOff) / scale + innerOffx); } //else int sysHeight; if (constants.dataDisplay == projections) sysHeight = Sys.Proj.Ny; else sysHeight = Sys.Recon.Ny; float innerOffy = (height - sysHeight / scale) / 2.0f; return constants.flip ? (int)((sysHeight - 1 - i - yOff) / scale + innerOffy) : (int)((i - yOff) / scale + innerOffy); } //On-screen coordinates to image space int TomoRecon::D2I(int d, bool xDir) { if (xDir) { int sysWidth; if (constants.dataDisplay == projections) sysWidth = Sys.Proj.Nx; else sysWidth = Sys.Recon.Nx; float innerOffx = (width - sysWidth / scale) / 2.0f; return (int)((d - innerOffx) * scale + xOff); } //else int sysHeight; if (constants.dataDisplay == projections) sysHeight = Sys.Proj.Ny; else sysHeight = Sys.Recon.Ny; float innerOffy = (height - sysHeight / scale) / 2.0f; return (int)((d - innerOffy) * scale + yOff); }
8d4a030fa6c0f1a019b6d024afc7458d679a7d36.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <errno.h> #include <math.h> #include <assert.h> #include <time.h> #include "key-p2p.h" #include "key_api.h" #include "gpukv/timer.h" #include <algorithm> #ifndef CUDA_SAFE_CALL #define CUDA_SAFE_CALL(err) __checkCudaErrors (err, __FILE__, __LINE__) #endif inline void __checkCudaErrors(hipError_t err, const char *file, const int line) { if (hipSuccess != err) { fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n", file, line, (int) err, hipGetErrorString(err)); exit(-1); } } int main(int argc, char** argv){ srand(time(NULL)); int device = 0; int gpu_memory_size = 192*1024*1024; CUDA_SAFE_CALL(hipSetDeviceFlags(hipDeviceMapHost)); CUDA_SAFE_CALL(hipSetDevice(device)); hipDeviceProp_t deviceProp; CUDA_SAFE_CALL(hipGetDeviceProperties(&deviceProp, device)); void *dummy; ASSERTRT(hipMalloc(&dummy,0)); hipDeviceptr_t gpu_memory; ASSERTDRV(cuMemAlloc(&gpu_memory,gpu_memory_size)); ASSERTDRV(cuMemsetD8(gpu_memory, 0x00, gpu_memory_size)); unsigned int flag = 1; ASSERTDRV(hipPointerSetAttribute(&flag, hipPointerAttributeSyncMemops, gpu_memory)); struct I_KEY_PIN_PARAM pin; struct I_KEY_UNPIN_PARAM unpin; int pin_fd; pin_fd=open(D,O_RDWR); pin.addr=gpu_memory; pin.size=gpu_memory_size; pin.id=0; int ret=ioctl(pin_fd,I_KEY_PIN_BUF,&pin); if(ret!=0){printf("gpu pin error!\n"); return 0;} int key_ret=key_open(F_NAME); int *request_list; int start_key = 0; int request_num = 10; int i; request_list=(int*)malloc(sizeof(int)*request_num); for(i=0;i<request_num;i++){ request_list[i]=i+start_key; request_list[i] += 10000*i; } random_shuffle(request_list,request_list+request_num); printf("aaaa\n"); // KEY_PAGE_SIZE: 4096 int buffer_size=KEY_PAGE_SIZE*1024; int request_size = 1024*512; char *buffer[10]; for(i = 0; i < request_num; i++){ char num = rand() % 10 + '0'; if(posix_memalign((void**)&buffer[i],KEY_PAGE_SIZE,buffer_size)){ printf("can not allocate io payload buffer!\n"); return 0; } memset(buffer[i], num, buffer_size); } printf("setup data at ssd start\n"); // setup data at ssd for pinned memory char tmp[524288] = {0}; for(i = 0; i < request_num; ++i){ // mode, key, buf, length ret = key_op(KEY_PUT, request_list[i], buffer[request_list[i] % 10], request_size); printf("key - %d PUT:\n", request_list[i]); if(ret != request_size) printf("error! ret=%d\n", ret); for(int j = 0; j < 10; ++j) printf("%c", buffer[request_list[i] % 10][j]); printf("\n"); /*printf("GET:\n"); ret = key_op(KEY_GET, request_list[i], tmp, request_size); for(int j = 0; j < 10; ++j) printf("%c", buffer[i][j]); printf("\n\n");*/ } printf("put complete\n"); getchar(); // load data at pinned memory /*for(i = 0; i < request_num; ++i) memset(buffer[i], 0, buffer_size); printf("aaa\n"); for(i = 0; i < request_num; ++i){ ret = key_p2p_op(KEY_GET, request_list[i], i*request_size, request_size); //request_list[i] += start_key; printf("%d\n", i); } printf("p2p ready complete\n");*/ /*cuMemcpyDtoH(buffer, gpu_memory, request_size*request_num); printf("p2p check\n"); for(i = 0; i < request_num; ++i){ for(int j = 0; j < 10; ++j){ printf("%d ", buffer[i*request_size/buffer_size][i*request_size % buffer_size + j]); } printf("\n"); }*/ printf("gpu pinned memory data setup complete\n"); getchar(); // p2p put test //for(i = 0; i < request_num; ++i){ // ret = key_p2p_op(KEY_PUT, request_list[i], i*request_size, request_size); // printf("put p2p ret: %d\n", ret); // } for(i = 0; i < request_num; ++i) { memset(buffer[i], 0, buffer_size); for(int j = 0; j< 10; ++j) printf("%02X", (unsigned char)buffer[i][j]); printf("\n"); } for(i = 0; i < request_num; ++i){ ret = key_op(KEY_GET, request_list[i], buffer[i], request_size); printf("key - %d get ret: %d\n", request_list[i], ret); for(int j = 0; j < 10; ++j) printf("%c", buffer[i][j]); printf("\n"); } printf("good\n"); //key_close(F_NAME); //close(pin_fd); /*ASSERT_EQ(ioctl(pin_fd,I_KEY_UNPIN_BUF,&unpin),0); ASSERTDRV(hipFree(gpu_memory)); key_close(F_NAME); close(pin_fd); hipDeviceReset(); free(request_list); for(i = 0; i < request_num; ++i){ free(buffer[i]); }*/ return 0; }
8d4a030fa6c0f1a019b6d024afc7458d679a7d36.cu
#include <stdio.h> #include <stdlib.h> #include <errno.h> #include <math.h> #include <assert.h> #include <time.h> #include "key-p2p.h" #include "key_api.h" #include "gpukv/timer.h" #include <algorithm> #ifndef CUDA_SAFE_CALL #define CUDA_SAFE_CALL(err) __checkCudaErrors (err, __FILE__, __LINE__) #endif inline void __checkCudaErrors(cudaError err, const char *file, const int line) { if (cudaSuccess != err) { fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n", file, line, (int) err, cudaGetErrorString(err)); exit(-1); } } int main(int argc, char** argv){ srand(time(NULL)); int device = 0; int gpu_memory_size = 192*1024*1024; CUDA_SAFE_CALL(cudaSetDeviceFlags(cudaDeviceMapHost)); CUDA_SAFE_CALL(cudaSetDevice(device)); cudaDeviceProp deviceProp; CUDA_SAFE_CALL(cudaGetDeviceProperties(&deviceProp, device)); void *dummy; ASSERTRT(cudaMalloc(&dummy,0)); CUdeviceptr gpu_memory; ASSERTDRV(cuMemAlloc(&gpu_memory,gpu_memory_size)); ASSERTDRV(cuMemsetD8(gpu_memory, 0x00, gpu_memory_size)); unsigned int flag = 1; ASSERTDRV(cuPointerSetAttribute(&flag, CU_POINTER_ATTRIBUTE_SYNC_MEMOPS, gpu_memory)); struct I_KEY_PIN_PARAM pin; struct I_KEY_UNPIN_PARAM unpin; int pin_fd; pin_fd=open(D,O_RDWR); pin.addr=gpu_memory; pin.size=gpu_memory_size; pin.id=0; int ret=ioctl(pin_fd,I_KEY_PIN_BUF,&pin); if(ret!=0){printf("gpu pin error!\n"); return 0;} int key_ret=key_open(F_NAME); int *request_list; int start_key = 0; int request_num = 10; int i; request_list=(int*)malloc(sizeof(int)*request_num); for(i=0;i<request_num;i++){ request_list[i]=i+start_key; request_list[i] += 10000*i; } random_shuffle(request_list,request_list+request_num); printf("aaaa\n"); // KEY_PAGE_SIZE: 4096 int buffer_size=KEY_PAGE_SIZE*1024; int request_size = 1024*512; char *buffer[10]; for(i = 0; i < request_num; i++){ char num = rand() % 10 + '0'; if(posix_memalign((void**)&buffer[i],KEY_PAGE_SIZE,buffer_size)){ printf("can not allocate io payload buffer!\n"); return 0; } memset(buffer[i], num, buffer_size); } printf("setup data at ssd start\n"); // setup data at ssd for pinned memory char tmp[524288] = {0}; for(i = 0; i < request_num; ++i){ // mode, key, buf, length ret = key_op(KEY_PUT, request_list[i], buffer[request_list[i] % 10], request_size); printf("key - %d PUT:\n", request_list[i]); if(ret != request_size) printf("error! ret=%d\n", ret); for(int j = 0; j < 10; ++j) printf("%c", buffer[request_list[i] % 10][j]); printf("\n"); /*printf("GET:\n"); ret = key_op(KEY_GET, request_list[i], tmp, request_size); for(int j = 0; j < 10; ++j) printf("%c", buffer[i][j]); printf("\n\n");*/ } printf("put complete\n"); getchar(); // load data at pinned memory /*for(i = 0; i < request_num; ++i) memset(buffer[i], 0, buffer_size); printf("aaa\n"); for(i = 0; i < request_num; ++i){ ret = key_p2p_op(KEY_GET, request_list[i], i*request_size, request_size); //request_list[i] += start_key; printf("%d\n", i); } printf("p2p ready complete\n");*/ /*cuMemcpyDtoH(buffer, gpu_memory, request_size*request_num); printf("p2p check\n"); for(i = 0; i < request_num; ++i){ for(int j = 0; j < 10; ++j){ printf("%d ", buffer[i*request_size/buffer_size][i*request_size % buffer_size + j]); } printf("\n"); }*/ printf("gpu pinned memory data setup complete\n"); getchar(); // p2p put test //for(i = 0; i < request_num; ++i){ // ret = key_p2p_op(KEY_PUT, request_list[i], i*request_size, request_size); // printf("put p2p ret: %d\n", ret); // } for(i = 0; i < request_num; ++i) { memset(buffer[i], 0, buffer_size); for(int j = 0; j< 10; ++j) printf("%02X", (unsigned char)buffer[i][j]); printf("\n"); } for(i = 0; i < request_num; ++i){ ret = key_op(KEY_GET, request_list[i], buffer[i], request_size); printf("key - %d get ret: %d\n", request_list[i], ret); for(int j = 0; j < 10; ++j) printf("%c", buffer[i][j]); printf("\n"); } printf("good\n"); //key_close(F_NAME); //close(pin_fd); /*ASSERT_EQ(ioctl(pin_fd,I_KEY_UNPIN_BUF,&unpin),0); ASSERTDRV(cuMemFree(gpu_memory)); key_close(F_NAME); close(pin_fd); cudaDeviceReset(); free(request_list); for(i = 0; i < request_num; ++i){ free(buffer[i]); }*/ return 0; }
2fad44e1653f760857fb61f143f44321224bacfa.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <device_launch_parameters.h> #include <stdio.h> __global__ void print_threadIDs() { printf("threadIdx_x : %d, threadIdx : %d, threadIdx : %d \n", threadIdx.x, threadIdx.y, threadIdx.z); } int main() { int nx, ny; nx = 16; ny = 16; dim3 block(8, 8); dim3 grid(nx / block.x, ny / block.y); print_threadIDs << <grid, block >> > (); hipDeviceSynchronize(); hipDeviceReset(); return 0; }
2fad44e1653f760857fb61f143f44321224bacfa.cu
#include <cuda_runtime.h> #include <device_launch_parameters.h> #include <stdio.h> __global__ void print_threadIDs() { printf("threadIdx_x : %d, threadIdx : %d, threadIdx : %d \n", threadIdx.x, threadIdx.y, threadIdx.z); } int main() { int nx, ny; nx = 16; ny = 16; dim3 block(8, 8); dim3 grid(nx / block.x, ny / block.y); print_threadIDs << <grid, block >> > (); cudaDeviceSynchronize(); cudaDeviceReset(); return 0; }
0c51d1545dd0a4ddc40344cfe4308bee6bd3ec74.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.5.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date September 2014 @generated from zlascl2.cu normal z -> d, Tue Sep 2 12:38:16 2014 @author Theo Mary */ #include "common_magma.h" #define NB 64 // each thread block does one NB x n block row of A. // each thread does one row, starting from left edge and moving right. __global__ void dlascl2_full(int m, int n, const double* D, double* A, int lda) { int ind = blockIdx.x * NB + threadIdx.x; double mul = D[ind]; A += ind; if (ind < m) { for(int j=0; j < n; j++ ) A[j*lda] *= mul; } } // each thread block does one NB x n block row of A. // each thread does one row, starting from left edge and moving right to diagonal. __global__ void dlascl2_lower(int m, int n, const double* D, double* A, int lda) { int ind = blockIdx.x * NB + threadIdx.x; int break_d = (ind < n) ? ind : n-1; double mul = D[ind]; A += ind; if (ind < m) { for(int j=0; j <= break_d; j++ ) A[j*lda] *= mul; } } // each thread block does one NB x n block row of A. // each thread does one row, starting from right edge and moving left to diagonal. __global__ void dlascl2_upper(int m, int n, const double *D, double* A, int lda) { int ind = blockIdx.x * NB + threadIdx.x; double mul = D[ind]; A += ind; if (ind < m) { for(int j=n-1; j >= ind; j--) A[j*lda] *= mul; } } /** Purpose ------- DLASCL2 scales the M by N real matrix A by the real diagonal matrix dD. TYPE specifies that A may be full, upper triangular, lower triangular. Arguments --------- \param[in] type magma_type_t TYPE indices the storage type of the input matrix A. = MagmaFull: full matrix. = MagmaLower: lower triangular matrix. = MagmaUpper: upper triangular matrix. Other formats that LAPACK supports, MAGMA does not currently support. \param[in] m INTEGER The number of rows of the matrix A. M >= 0. \param[in] n INTEGER The number of columns of the matrix A. N >= 0. \param[in] dD DOUBLE PRECISION vector, dimension (M) The diagonal matrix containing the scalar factors. Stored as a vector. \param[in,out] dA DOUBLE PRECISION array, dimension (LDDA,N) The matrix to be scaled by dD. See TYPE for the storage type. \param[in] ldda INTEGER The leading dimension of the array A. LDDA >= max(1,M). \param[out] info INTEGER - = 0: successful exit - < 0: if INFO = -i, the i-th argument had an illegal value. @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_daux2 ********************************************************************/ extern "C" void magmablas_dlascl2_q( magma_type_t type, magma_int_t m, magma_int_t n, const double *dD, double *dA, magma_int_t ldda, magma_int_t *info, magma_queue_t queue ) { *info = 0; if ( type != MagmaLower && type != MagmaUpper && type != MagmaFull ) *info = -1; else if ( m < 0 ) *info = -2; else if ( n < 0 ) *info = -3; else if ( ldda < max(1,m) ) *info = -5; if (*info != 0) { magma_xerbla( __func__, -(*info) ); return; //info; } dim3 grid( (m + NB - 1)/NB ); dim3 threads( NB ); if (type == MagmaLower) { hipLaunchKernelGGL(( dlascl2_lower) , dim3(grid), dim3(threads), 0, queue , m, n, dD, dA, ldda); } else if (type == MagmaUpper) { hipLaunchKernelGGL(( dlascl2_upper) , dim3(grid), dim3(threads), 0, queue , m, n, dD, dA, ldda); } else if (type == MagmaFull) { hipLaunchKernelGGL(( dlascl2_full) , dim3(grid), dim3(threads), 0, queue , m, n, dD, dA, ldda); } } /** @see magmablas_dlascl2_q @ingroup magma_daux2 ********************************************************************/ extern "C" void magmablas_dlascl2( magma_type_t type, magma_int_t m, magma_int_t n, const double *dD, double *dA, magma_int_t ldda, magma_int_t *info ) { magmablas_dlascl2_q( type, m, n, dD, dA, ldda, info, magma_stream ); }
0c51d1545dd0a4ddc40344cfe4308bee6bd3ec74.cu
/* -- MAGMA (version 1.5.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date September 2014 @generated from zlascl2.cu normal z -> d, Tue Sep 2 12:38:16 2014 @author Theo Mary */ #include "common_magma.h" #define NB 64 // each thread block does one NB x n block row of A. // each thread does one row, starting from left edge and moving right. __global__ void dlascl2_full(int m, int n, const double* D, double* A, int lda) { int ind = blockIdx.x * NB + threadIdx.x; double mul = D[ind]; A += ind; if (ind < m) { for(int j=0; j < n; j++ ) A[j*lda] *= mul; } } // each thread block does one NB x n block row of A. // each thread does one row, starting from left edge and moving right to diagonal. __global__ void dlascl2_lower(int m, int n, const double* D, double* A, int lda) { int ind = blockIdx.x * NB + threadIdx.x; int break_d = (ind < n) ? ind : n-1; double mul = D[ind]; A += ind; if (ind < m) { for(int j=0; j <= break_d; j++ ) A[j*lda] *= mul; } } // each thread block does one NB x n block row of A. // each thread does one row, starting from right edge and moving left to diagonal. __global__ void dlascl2_upper(int m, int n, const double *D, double* A, int lda) { int ind = blockIdx.x * NB + threadIdx.x; double mul = D[ind]; A += ind; if (ind < m) { for(int j=n-1; j >= ind; j--) A[j*lda] *= mul; } } /** Purpose ------- DLASCL2 scales the M by N real matrix A by the real diagonal matrix dD. TYPE specifies that A may be full, upper triangular, lower triangular. Arguments --------- \param[in] type magma_type_t TYPE indices the storage type of the input matrix A. = MagmaFull: full matrix. = MagmaLower: lower triangular matrix. = MagmaUpper: upper triangular matrix. Other formats that LAPACK supports, MAGMA does not currently support. \param[in] m INTEGER The number of rows of the matrix A. M >= 0. \param[in] n INTEGER The number of columns of the matrix A. N >= 0. \param[in] dD DOUBLE PRECISION vector, dimension (M) The diagonal matrix containing the scalar factors. Stored as a vector. \param[in,out] dA DOUBLE PRECISION array, dimension (LDDA,N) The matrix to be scaled by dD. See TYPE for the storage type. \param[in] ldda INTEGER The leading dimension of the array A. LDDA >= max(1,M). \param[out] info INTEGER - = 0: successful exit - < 0: if INFO = -i, the i-th argument had an illegal value. @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_daux2 ********************************************************************/ extern "C" void magmablas_dlascl2_q( magma_type_t type, magma_int_t m, magma_int_t n, const double *dD, double *dA, magma_int_t ldda, magma_int_t *info, magma_queue_t queue ) { *info = 0; if ( type != MagmaLower && type != MagmaUpper && type != MagmaFull ) *info = -1; else if ( m < 0 ) *info = -2; else if ( n < 0 ) *info = -3; else if ( ldda < max(1,m) ) *info = -5; if (*info != 0) { magma_xerbla( __func__, -(*info) ); return; //info; } dim3 grid( (m + NB - 1)/NB ); dim3 threads( NB ); if (type == MagmaLower) { dlascl2_lower <<< grid, threads, 0, queue >>> (m, n, dD, dA, ldda); } else if (type == MagmaUpper) { dlascl2_upper <<< grid, threads, 0, queue >>> (m, n, dD, dA, ldda); } else if (type == MagmaFull) { dlascl2_full <<< grid, threads, 0, queue >>> (m, n, dD, dA, ldda); } } /** @see magmablas_dlascl2_q @ingroup magma_daux2 ********************************************************************/ extern "C" void magmablas_dlascl2( magma_type_t type, magma_int_t m, magma_int_t n, const double *dD, double *dA, magma_int_t ldda, magma_int_t *info ) { magmablas_dlascl2_q( type, m, n, dD, dA, ldda, info, magma_stream ); }
8e1187473ea440aebd5f02e554a71664d24b3655.hip
// !!! This is a file automatically generated by hipify!!! //////////////////////////////////////////////////////////// //Ho Thien Luan -> History Tracking! // 1. Ver_0: Approximate string matching with k-mismatches // 2. Ver_1: Optimize by using sharing_memory for storing pattern // // // //////////////////////////////////////////////////////////// #include <stdio.h> #include <stdlib.h> #include <string.h> #include <assert.h> #include <hip/hip_runtime.h> #include <math.h> #include <hip/hip_runtime.h> #include "cuPrintf.hip" #include <time.h> #define FILENAME_MAXLEN 256 #define THREAD_BLOCK_EXP (7) #define THREAD_BLOCK_SIZE (1 << THREAD_BLOCK_EXP) __global__ void ASM_kernel(char *g_input_string, int input_size, char *g_pattern, int real_pattern_size, int l_par, int mask, int *g_matrix_M, int *g_matrix_B) { int tid = threadIdx.x ; int gbid = blockIdx.y * gridDim.x + blockIdx.x ; int start = gbid*THREAD_BLOCK_SIZE; int start_1st_tid = start + tid; int start_2nd_tid = start + tid - (real_pattern_size - l_par); //unsigned int bit_vector=0; int match; int sub_sum; int sub_init; //__shared__ char sub_string_shared [THREAD_BLOCK_SIZE + pattern_length - 1] ; __shared__ char pattern_shared [32] ; // int pow_2b = 1 << b; // unsigned int bit_vector = 0; // sub_string_shared[tid] = g_input_string[start+tid]; // if ( tid < (pattern_length - 1) ){ // sub_string_shared[THREAD_BLOCK_SIZE + tid] = g_input_string[start+THREAD_BLOCK_SIZE+tid]; // } if ( tid < real_pattern_size ){ pattern_shared[tid] = g_pattern[tid]; } __syncthreads(); if(start_1st_tid < real_pattern_size - l_par) { //initialization sub_sum = 0; sub_init = 0; for (int i=1; i<=start_1st_tid+1; i++) { sub_init = ((sub_init << 1) & mask) | 1; sub_sum++; } //g_matrix_B[start_1st_tid+1] = sub_init; //g_matrix_M[start_1st_tid+1] = sub_sum; //Processing for(int i = 1; i<=real_pattern_size-start_1st_tid; i++) { //if (g_input_string[i-1] == g_pattern[start_1st_tid+i]) {match = 0;} if (g_input_string[i-1] == pattern_shared[start_1st_tid+i]) {match = 0;} else {match = 1;} g_matrix_B[(real_pattern_size+1)*(i) + start_1st_tid+i+1] = ((g_matrix_B[(real_pattern_size+1)*(i-1) + (start_1st_tid+i)] << 1) & mask) | match; sub_sum = 0; for (int k = 0; k < l_par; k++) { //g_matrix_M[(real_pattern_size+1)*(start_1st_tid+i) + i] += (g_matrix_B[(real_pattern_size+1)*(start_1st_tid+i) + i] >> k) & 1; sub_sum += (g_matrix_B[(real_pattern_size+1)*(i) + start_1st_tid+i+1] >> k) & 1; g_matrix_M[(real_pattern_size+1)*(i) + start_1st_tid+i+1] = sub_sum; } } } //////////////////////////////////////////////////////////////// if (start_2nd_tid <= input_size-l_par+1) { //initialization //g_matrix_B[(real_pattern_size+1)*(start_2nd_tid+0) + 0] = 0; //g_matrix_M[(real_pattern_size+1)*(start_2nd_tid+0) + 0] = 0; for (int i = 1; i <= real_pattern_size; i++) { //if (g_input_string[start_2nd_tid+i-1] == g_pattern[i-1]) {match = 0;} if (g_input_string[start_2nd_tid+i-1] == pattern_shared[i-1]) {match = 0;} else {match = 1;} g_matrix_B[(real_pattern_size+1)*(start_2nd_tid+i) + i] = ((g_matrix_B[(real_pattern_size+1)*(start_2nd_tid+i-1) + (i-1)] << 1) & mask) | match; sub_sum = 0; for (int k = 0; k < l_par; k++) { //g_matrix_M[(real_pattern_size+1)*(start_2nd_tid+i) + i] += (g_matrix_B[(real_pattern_size+1)*(start_2nd_tid+i) + i] >> k) & 1; sub_sum += (g_matrix_B[(real_pattern_size+1)*(start_2nd_tid+i) + i] >> k) & 1; g_matrix_M[(real_pattern_size+1)*(start_2nd_tid+i) + i] = sub_sum; } //cuPrintf("threadIdx.x = %d \t ,start = %d, matrix_M = %d, matrix_B = %d, string = %s, pattern = %s, pattern_size = %d\n", tid, start_2nd_tid, g_matrix_M[(real_pattern_size+1)*(start_2nd_tid+i) + i],g_matrix_B[(real_pattern_size+1)*(start_2nd_tid+i) + i], g_input_string[i-1], g_pattern[i-1], l_par); } } } //////////////////////////////// void ASM_process_top (char *g_input_string, size_t input_size, char *g_pattern, int real_pattern_size, int l_par, int mask, int *g_matrix_M, int *g_matrix_B) { // num_blocks = # of thread blocks to cover input stream int num_blocks = (input_size+real_pattern_size-2*l_par+1)/THREAD_BLOCK_SIZE + 1 ; //total thread = (m-l) + (n-l+1) dim3 dimBlock( THREAD_BLOCK_SIZE, 1 ) ; dim3 dimGrid ; int p = num_blocks >> 15 ; dimGrid.x = num_blocks ; if ( p ){ dimGrid.x = 1<<15 ; dimGrid.y = p+1 ; } cudaPrintfInit();////for cuPrintf hipLaunchKernelGGL(( ASM_kernel) , dim3(dimGrid), dim3(dimBlock) , 0, 0, g_input_string, input_size, g_pattern, real_pattern_size, l_par, mask, g_matrix_M, g_matrix_B); cudaPrintfDisplay();////for cuPrintf cudaPrintfEnd(); ////for cuPrintf } ////////////////////////////////////////////// int main(int argc, char **argv) { char inputFile[FILENAME_MAXLEN]; char patternFile[FILENAME_MAXLEN]; strcpy( inputFile, argv[2]) ; strcpy( patternFile, argv[1]) ; int l_par; int k_par; l_par = strtol(argv[3], NULL, 10); k_par = strtol(argv[4], NULL, 10); //////////////////////////////////////////////////////////////////////////////////// //Process input patterns int input_size; int pattern_size; int real_pattern_size; char *h_input_string = NULL ; char *h_pattern = NULL ; //int *h_matched_result = NULL ; int *h_matrix_M = NULL ; int *h_matrix_B = NULL ; // step 1: read patterns and dump transition table // int deviceID = 0 ; // hipDeviceProp_t deviceProp; // hipGetDeviceProperties(&deviceProp, deviceID); //readPatternFromFile( patternFile) ; //step 2: prepare input stream FILE* fpin = fopen( inputFile, "rb"); assert ( NULL != fpin ) ; // obtain file size fseek (fpin , 0 , SEEK_END); input_size = ftell (fpin); rewind (fpin); //step2: prepare input pattern FILE* fpattern = fopen( patternFile, "rb"); assert ( NULL != fpattern ) ; // obtain file size fseek (fpattern , 0 , SEEK_END); pattern_size = ftell (fpattern); rewind (fpattern); // allocate memory to contain the whole file h_input_string = (char *) malloc (sizeof(char)*input_size); assert( NULL != h_input_string ); h_pattern = (char *) malloc (sizeof(char)*pattern_size); assert( NULL != h_pattern ); real_pattern_size = pattern_size-1; // del char "\n" h_matrix_M = (int *) malloc (sizeof(int)*(input_size+1)*(2*pattern_size+1)); assert( NULL != h_matrix_M ); memset( h_matrix_M, 0, sizeof(int)*(input_size+1)*(2*pattern_size+1)) ; h_matrix_B = (int *) malloc (sizeof(int)*(input_size+1)*(2*pattern_size+1)); assert( NULL != h_matrix_B ); memset( h_matrix_B, 0, sizeof(int)*(input_size+1)*(2*pattern_size+1)) ; //h_matched_result = (int *) malloc (sizeof(int)*(input_size-l_par+1)*l_par); //assert( NULL != h_matched_result ); //memset( h_matched_result, 0, sizeof(int)*(input_size-l_par+1)*l_par ) ; // copy the file into the buffer input_size = fread (h_input_string, 1, input_size, fpin); fclose(fpin); pattern_size = fread (h_pattern, 1, pattern_size, fpattern); fclose(fpattern); //printf("Cir string = %s, length = %d\n", h_pattern, real_pattern_size); //Parallel Bit-vector-mismaeches alg. #define BIT(x) (1<<(x)) unsigned int mask = 0; for (int i = 0; i < l_par ; i++) { mask = (mask << 1) | 1; } /* //printf("Mask = %d\n", mask); unsigned int match = 0; unsigned int bit_vector=0; //Bit-vector process struct timespec t_start, t_end; double elapsedTime; clock_gettime (CLOCK_REALTIME, &t_start); for (int i = 0; i <= input_size ; i++) { h_matrix_B[i*real_pattern_size] = 0; h_matrix_M[i*real_pattern_size] = 0; } for (int i = 1; i <=real_pattern_size; i++) { bit_vector = ((bit_vector << 1) & mask) | 1; h_matrix_B[i] = bit_vector; for (int j = 0; j < l_par; j++) { h_matrix_M[i] += (bit_vector >> j) & 1; } //printf("position %d -> h_matrix_B = %u, h_matrix_M = %u\n",i, h_matrix_B[i], h_matrix_M[i]); } for (int i = 1; i <= real_pattern_size; i++) { for (int j = 1; j <= input_size ; j++) { //circular patterns if (h_input_string[j-1] == h_pattern[i-1]) {match = 0;} else {match = 1;} h_matrix_B[(real_pattern_size+1)*j + i] = ((h_matrix_B[(real_pattern_size+1)*(j-1) + (i-1)] << 1) & mask) | match; for (int k = 0; k < l_par; k++) { h_matrix_M[(real_pattern_size+1)*j + i] += (h_matrix_B[(real_pattern_size+1)*j + i] >> k) & 1; } } } clock_gettime(CLOCK_REALTIME, &t_end); elapsedTime = (t_end.tv_sec*1000+t_end.tv_nsec/1000000)-(t_start.tv_sec*1000+t_start.tv_nsec/1000000); */ // Process in GPU char *g_input_string; char *g_pattern; int *g_matrix_M; int *g_matrix_B; hipMalloc (&g_input_string, sizeof(char)*input_size); hipMalloc (&g_pattern, sizeof(char)*pattern_size); hipMalloc (&g_matrix_M, sizeof(int)*(input_size+1)*(2*pattern_size+1)); hipMalloc (&g_matrix_B, sizeof(int)*(input_size+1)*(2*pattern_size+1)); hipMemcpy (g_input_string, h_input_string, sizeof(char)*input_size, hipMemcpyHostToDevice ); hipMemcpy (g_pattern, h_pattern, sizeof(char)*pattern_size, hipMemcpyHostToDevice ); // record time setting hipEvent_t start, stop; float time; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); /* unsigned int bit_vector=0; for (int i = 0; i <= input_size ; i++) { h_matrix_B[i*real_pattern_size] = 0; h_matrix_M[i*real_pattern_size] = 0; } for (int i = 1; i <=real_pattern_size; i++) { bit_vector = ((bit_vector << 1) & mask) | 1; h_matrix_B[i] = bit_vector; for (int j = 0; j < l_par; j++) { h_matrix_M[i] += (bit_vector >> j) & 1; } //printf("position %d -> h_matrix_B = %u, h_matrix_M = %u\n",i, h_matrix_B[i], h_matrix_M[i]); } hipMemcpy (g_matrix_M, h_matrix_M, sizeof(int)*(input_size+1)*(2*pattern_size+1), hipMemcpyHostToDevice ); hipMemcpy (g_matrix_B, h_matrix_B, sizeof(int)*(input_size+1)*(2*pattern_size+1), hipMemcpyHostToDevice ); */ // step 3: run ASM on GPU ASM_process_top ( g_input_string, input_size, g_pattern, real_pattern_size, l_par, mask, g_matrix_M, g_matrix_B) ; //With circular string matching l_par = l; // record time setting hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&time, start, stop); hipMemcpy (h_matrix_M, g_matrix_M, sizeof(int)*(input_size+1)*(2*pattern_size+1), hipMemcpyDeviceToHost ); hipMemcpy (h_matrix_B, g_matrix_B, sizeof(int)*(input_size+1)*(2*pattern_size+1), hipMemcpyDeviceToHost ); // step 4: output matched result int total_result = 0; //for (int i = l_par; i<= input_size; i++) { for (int i = l_par; i <= input_size; i++) { for (int j = l_par; j <= real_pattern_size; j++) { //printf("At position %4d, circular pattern %4d : match_M %d, match_B = %d\n", i, j, h_matrix_M[i*(real_pattern_size+1) + j], h_matrix_B[i*(real_pattern_size+1) + j]); if(h_matrix_M[i*(real_pattern_size+1) + j] <= k_par) {total_result++;} } } /* //Print out Matrix M for (int j = 0; j <= real_pattern_size; j++) { for (int i = 0; i<= input_size; i++) { printf("%d\t", h_matrix_M[i*(real_pattern_size+1) + j]); } printf("\n"); } */ printf("\n\n"); printf("############################################################\n"); printf("#--Approximate Circular String Matching with k-Mismatches--#\n"); printf("#----------------------------------------------------------#\n"); printf("#---------------Parallel BVM Alg. in GPU-------------------#\n"); printf("############################################################\n"); printf("#--Pattern Length |\t\t %10d \t #\n",real_pattern_size); printf("#----------------------------------------------------------#\n"); printf("#--Input integer l |\t\t %10d \t #\n",l_par); printf("#----------------------------------------------------------#\n"); printf("#--Input Size (bytes) |\t\t %10d \t #\n", input_size ); printf("#----------------------------------------------------------#\n"); printf("#--Total matched with k = %d |\t\t %10d \t #\n", k_par, total_result); printf("#----------------------------------------------------------#\n"); //printf("#--Total elapsed time (ms) |\t\t %10f \t #\n", elapsedTime); printf("#--Total elapsed time (ms) |\t\t %10f \t #\n", time); printf("#----------------------------------------------------------#\n"); //printf("#--Throughput Result (Gbps) |\t\t %10f \t #\n", (float)(input_size*8)/(elapsedTime*1000000) ); //printf("#--Throughput Result (Mbps) |\t\t %10f \t #\n", (float)(input_size*8)/(elapsedTime*1000) ); printf("#--Throughput Result (Gbps) |\t\t %10f \t #\n", (float)(input_size*8)/(time*1000000) ); printf("#--Throughput Result (Mbps) |\t\t %10f \t #\n", (float)(input_size*8)/(time*1000) ); printf("############################################################\n"); free(h_input_string); free(h_pattern); //free(h_matched_result); free(h_matrix_M); free(h_matrix_B); hipFree(g_input_string); hipFree(g_pattern); hipFree(g_matrix_M); hipFree(g_matrix_B); return 0; }
8e1187473ea440aebd5f02e554a71664d24b3655.cu
//////////////////////////////////////////////////////////// //Ho Thien Luan -> History Tracking! // 1. Ver_0: Approximate string matching with k-mismatches // 2. Ver_1: Optimize by using sharing_memory for storing pattern // // // //////////////////////////////////////////////////////////// #include <stdio.h> #include <stdlib.h> #include <string.h> #include <assert.h> #include <cuda.h> #include <math.h> #include <cuda_runtime.h> #include "cuPrintf.cu" #include <time.h> #define FILENAME_MAXLEN 256 #define THREAD_BLOCK_EXP (7) #define THREAD_BLOCK_SIZE (1 << THREAD_BLOCK_EXP) __global__ void ASM_kernel(char *g_input_string, int input_size, char *g_pattern, int real_pattern_size, int l_par, int mask, int *g_matrix_M, int *g_matrix_B) { int tid = threadIdx.x ; int gbid = blockIdx.y * gridDim.x + blockIdx.x ; int start = gbid*THREAD_BLOCK_SIZE; int start_1st_tid = start + tid; int start_2nd_tid = start + tid - (real_pattern_size - l_par); //unsigned int bit_vector=0; int match; int sub_sum; int sub_init; //__shared__ char sub_string_shared [THREAD_BLOCK_SIZE + pattern_length - 1] ; __shared__ char pattern_shared [32] ; // int pow_2b = 1 << b; // unsigned int bit_vector = 0; // sub_string_shared[tid] = g_input_string[start+tid]; // if ( tid < (pattern_length - 1) ){ // sub_string_shared[THREAD_BLOCK_SIZE + tid] = g_input_string[start+THREAD_BLOCK_SIZE+tid]; // } if ( tid < real_pattern_size ){ pattern_shared[tid] = g_pattern[tid]; } __syncthreads(); if(start_1st_tid < real_pattern_size - l_par) { //initialization sub_sum = 0; sub_init = 0; for (int i=1; i<=start_1st_tid+1; i++) { sub_init = ((sub_init << 1) & mask) | 1; sub_sum++; } //g_matrix_B[start_1st_tid+1] = sub_init; //g_matrix_M[start_1st_tid+1] = sub_sum; //Processing for(int i = 1; i<=real_pattern_size-start_1st_tid; i++) { //if (g_input_string[i-1] == g_pattern[start_1st_tid+i]) {match = 0;} if (g_input_string[i-1] == pattern_shared[start_1st_tid+i]) {match = 0;} else {match = 1;} g_matrix_B[(real_pattern_size+1)*(i) + start_1st_tid+i+1] = ((g_matrix_B[(real_pattern_size+1)*(i-1) + (start_1st_tid+i)] << 1) & mask) | match; sub_sum = 0; for (int k = 0; k < l_par; k++) { //g_matrix_M[(real_pattern_size+1)*(start_1st_tid+i) + i] += (g_matrix_B[(real_pattern_size+1)*(start_1st_tid+i) + i] >> k) & 1; sub_sum += (g_matrix_B[(real_pattern_size+1)*(i) + start_1st_tid+i+1] >> k) & 1; g_matrix_M[(real_pattern_size+1)*(i) + start_1st_tid+i+1] = sub_sum; } } } //////////////////////////////////////////////////////////////// if (start_2nd_tid <= input_size-l_par+1) { //initialization //g_matrix_B[(real_pattern_size+1)*(start_2nd_tid+0) + 0] = 0; //g_matrix_M[(real_pattern_size+1)*(start_2nd_tid+0) + 0] = 0; for (int i = 1; i <= real_pattern_size; i++) { //if (g_input_string[start_2nd_tid+i-1] == g_pattern[i-1]) {match = 0;} if (g_input_string[start_2nd_tid+i-1] == pattern_shared[i-1]) {match = 0;} else {match = 1;} g_matrix_B[(real_pattern_size+1)*(start_2nd_tid+i) + i] = ((g_matrix_B[(real_pattern_size+1)*(start_2nd_tid+i-1) + (i-1)] << 1) & mask) | match; sub_sum = 0; for (int k = 0; k < l_par; k++) { //g_matrix_M[(real_pattern_size+1)*(start_2nd_tid+i) + i] += (g_matrix_B[(real_pattern_size+1)*(start_2nd_tid+i) + i] >> k) & 1; sub_sum += (g_matrix_B[(real_pattern_size+1)*(start_2nd_tid+i) + i] >> k) & 1; g_matrix_M[(real_pattern_size+1)*(start_2nd_tid+i) + i] = sub_sum; } //cuPrintf("threadIdx.x = %d \t ,start = %d, matrix_M = %d, matrix_B = %d, string = %s, pattern = %s, pattern_size = %d\n", tid, start_2nd_tid, g_matrix_M[(real_pattern_size+1)*(start_2nd_tid+i) + i],g_matrix_B[(real_pattern_size+1)*(start_2nd_tid+i) + i], g_input_string[i-1], g_pattern[i-1], l_par); } } } //////////////////////////////// void ASM_process_top (char *g_input_string, size_t input_size, char *g_pattern, int real_pattern_size, int l_par, int mask, int *g_matrix_M, int *g_matrix_B) { // num_blocks = # of thread blocks to cover input stream int num_blocks = (input_size+real_pattern_size-2*l_par+1)/THREAD_BLOCK_SIZE + 1 ; //total thread = (m-l) + (n-l+1) dim3 dimBlock( THREAD_BLOCK_SIZE, 1 ) ; dim3 dimGrid ; int p = num_blocks >> 15 ; dimGrid.x = num_blocks ; if ( p ){ dimGrid.x = 1<<15 ; dimGrid.y = p+1 ; } cudaPrintfInit();////for cuPrintf ASM_kernel <<< dimGrid, dimBlock >>>(g_input_string, input_size, g_pattern, real_pattern_size, l_par, mask, g_matrix_M, g_matrix_B); cudaPrintfDisplay();////for cuPrintf cudaPrintfEnd(); ////for cuPrintf } ////////////////////////////////////////////// int main(int argc, char **argv) { char inputFile[FILENAME_MAXLEN]; char patternFile[FILENAME_MAXLEN]; strcpy( inputFile, argv[2]) ; strcpy( patternFile, argv[1]) ; int l_par; int k_par; l_par = strtol(argv[3], NULL, 10); k_par = strtol(argv[4], NULL, 10); //////////////////////////////////////////////////////////////////////////////////// //Process input patterns int input_size; int pattern_size; int real_pattern_size; char *h_input_string = NULL ; char *h_pattern = NULL ; //int *h_matched_result = NULL ; int *h_matrix_M = NULL ; int *h_matrix_B = NULL ; // step 1: read patterns and dump transition table // int deviceID = 0 ; // cudaDeviceProp deviceProp; // cudaGetDeviceProperties(&deviceProp, deviceID); //readPatternFromFile( patternFile) ; //step 2: prepare input stream FILE* fpin = fopen( inputFile, "rb"); assert ( NULL != fpin ) ; // obtain file size fseek (fpin , 0 , SEEK_END); input_size = ftell (fpin); rewind (fpin); //step2: prepare input pattern FILE* fpattern = fopen( patternFile, "rb"); assert ( NULL != fpattern ) ; // obtain file size fseek (fpattern , 0 , SEEK_END); pattern_size = ftell (fpattern); rewind (fpattern); // allocate memory to contain the whole file h_input_string = (char *) malloc (sizeof(char)*input_size); assert( NULL != h_input_string ); h_pattern = (char *) malloc (sizeof(char)*pattern_size); assert( NULL != h_pattern ); real_pattern_size = pattern_size-1; // del char "\n" h_matrix_M = (int *) malloc (sizeof(int)*(input_size+1)*(2*pattern_size+1)); assert( NULL != h_matrix_M ); memset( h_matrix_M, 0, sizeof(int)*(input_size+1)*(2*pattern_size+1)) ; h_matrix_B = (int *) malloc (sizeof(int)*(input_size+1)*(2*pattern_size+1)); assert( NULL != h_matrix_B ); memset( h_matrix_B, 0, sizeof(int)*(input_size+1)*(2*pattern_size+1)) ; //h_matched_result = (int *) malloc (sizeof(int)*(input_size-l_par+1)*l_par); //assert( NULL != h_matched_result ); //memset( h_matched_result, 0, sizeof(int)*(input_size-l_par+1)*l_par ) ; // copy the file into the buffer input_size = fread (h_input_string, 1, input_size, fpin); fclose(fpin); pattern_size = fread (h_pattern, 1, pattern_size, fpattern); fclose(fpattern); //printf("Cir string = %s, length = %d\n", h_pattern, real_pattern_size); //Parallel Bit-vector-mismaeches alg. #define BIT(x) (1<<(x)) unsigned int mask = 0; for (int i = 0; i < l_par ; i++) { mask = (mask << 1) | 1; } /* //printf("Mask = %d\n", mask); unsigned int match = 0; unsigned int bit_vector=0; //Bit-vector process struct timespec t_start, t_end; double elapsedTime; clock_gettime (CLOCK_REALTIME, &t_start); for (int i = 0; i <= input_size ; i++) { h_matrix_B[i*real_pattern_size] = 0; h_matrix_M[i*real_pattern_size] = 0; } for (int i = 1; i <=real_pattern_size; i++) { bit_vector = ((bit_vector << 1) & mask) | 1; h_matrix_B[i] = bit_vector; for (int j = 0; j < l_par; j++) { h_matrix_M[i] += (bit_vector >> j) & 1; } //printf("position %d -> h_matrix_B = %u, h_matrix_M = %u\n",i, h_matrix_B[i], h_matrix_M[i]); } for (int i = 1; i <= real_pattern_size; i++) { for (int j = 1; j <= input_size ; j++) { //circular patterns if (h_input_string[j-1] == h_pattern[i-1]) {match = 0;} else {match = 1;} h_matrix_B[(real_pattern_size+1)*j + i] = ((h_matrix_B[(real_pattern_size+1)*(j-1) + (i-1)] << 1) & mask) | match; for (int k = 0; k < l_par; k++) { h_matrix_M[(real_pattern_size+1)*j + i] += (h_matrix_B[(real_pattern_size+1)*j + i] >> k) & 1; } } } clock_gettime(CLOCK_REALTIME, &t_end); elapsedTime = (t_end.tv_sec*1000+t_end.tv_nsec/1000000)-(t_start.tv_sec*1000+t_start.tv_nsec/1000000); */ // Process in GPU char *g_input_string; char *g_pattern; int *g_matrix_M; int *g_matrix_B; cudaMalloc (&g_input_string, sizeof(char)*input_size); cudaMalloc (&g_pattern, sizeof(char)*pattern_size); cudaMalloc (&g_matrix_M, sizeof(int)*(input_size+1)*(2*pattern_size+1)); cudaMalloc (&g_matrix_B, sizeof(int)*(input_size+1)*(2*pattern_size+1)); cudaMemcpy (g_input_string, h_input_string, sizeof(char)*input_size, cudaMemcpyHostToDevice ); cudaMemcpy (g_pattern, h_pattern, sizeof(char)*pattern_size, cudaMemcpyHostToDevice ); // record time setting cudaEvent_t start, stop; float time; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); /* unsigned int bit_vector=0; for (int i = 0; i <= input_size ; i++) { h_matrix_B[i*real_pattern_size] = 0; h_matrix_M[i*real_pattern_size] = 0; } for (int i = 1; i <=real_pattern_size; i++) { bit_vector = ((bit_vector << 1) & mask) | 1; h_matrix_B[i] = bit_vector; for (int j = 0; j < l_par; j++) { h_matrix_M[i] += (bit_vector >> j) & 1; } //printf("position %d -> h_matrix_B = %u, h_matrix_M = %u\n",i, h_matrix_B[i], h_matrix_M[i]); } cudaMemcpy (g_matrix_M, h_matrix_M, sizeof(int)*(input_size+1)*(2*pattern_size+1), cudaMemcpyHostToDevice ); cudaMemcpy (g_matrix_B, h_matrix_B, sizeof(int)*(input_size+1)*(2*pattern_size+1), cudaMemcpyHostToDevice ); */ // step 3: run ASM on GPU ASM_process_top ( g_input_string, input_size, g_pattern, real_pattern_size, l_par, mask, g_matrix_M, g_matrix_B) ; //With circular string matching l_par = l; // record time setting cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop); cudaMemcpy (h_matrix_M, g_matrix_M, sizeof(int)*(input_size+1)*(2*pattern_size+1), cudaMemcpyDeviceToHost ); cudaMemcpy (h_matrix_B, g_matrix_B, sizeof(int)*(input_size+1)*(2*pattern_size+1), cudaMemcpyDeviceToHost ); // step 4: output matched result int total_result = 0; //for (int i = l_par; i<= input_size; i++) { for (int i = l_par; i <= input_size; i++) { for (int j = l_par; j <= real_pattern_size; j++) { //printf("At position %4d, circular pattern %4d : match_M %d, match_B = %d\n", i, j, h_matrix_M[i*(real_pattern_size+1) + j], h_matrix_B[i*(real_pattern_size+1) + j]); if(h_matrix_M[i*(real_pattern_size+1) + j] <= k_par) {total_result++;} } } /* //Print out Matrix M for (int j = 0; j <= real_pattern_size; j++) { for (int i = 0; i<= input_size; i++) { printf("%d\t", h_matrix_M[i*(real_pattern_size+1) + j]); } printf("\n"); } */ printf("\n\n"); printf("############################################################\n"); printf("#--Approximate Circular String Matching with k-Mismatches--#\n"); printf("#----------------------------------------------------------#\n"); printf("#---------------Parallel BVM Alg. in GPU-------------------#\n"); printf("############################################################\n"); printf("#--Pattern Length |\t\t %10d \t #\n",real_pattern_size); printf("#----------------------------------------------------------#\n"); printf("#--Input integer l |\t\t %10d \t #\n",l_par); printf("#----------------------------------------------------------#\n"); printf("#--Input Size (bytes) |\t\t %10d \t #\n", input_size ); printf("#----------------------------------------------------------#\n"); printf("#--Total matched with k = %d |\t\t %10d \t #\n", k_par, total_result); printf("#----------------------------------------------------------#\n"); //printf("#--Total elapsed time (ms) |\t\t %10f \t #\n", elapsedTime); printf("#--Total elapsed time (ms) |\t\t %10f \t #\n", time); printf("#----------------------------------------------------------#\n"); //printf("#--Throughput Result (Gbps) |\t\t %10f \t #\n", (float)(input_size*8)/(elapsedTime*1000000) ); //printf("#--Throughput Result (Mbps) |\t\t %10f \t #\n", (float)(input_size*8)/(elapsedTime*1000) ); printf("#--Throughput Result (Gbps) |\t\t %10f \t #\n", (float)(input_size*8)/(time*1000000) ); printf("#--Throughput Result (Mbps) |\t\t %10f \t #\n", (float)(input_size*8)/(time*1000) ); printf("############################################################\n"); free(h_input_string); free(h_pattern); //free(h_matched_result); free(h_matrix_M); free(h_matrix_B); cudaFree(g_input_string); cudaFree(g_pattern); cudaFree(g_matrix_M); cudaFree(g_matrix_B); return 0; }
70a877cfca05d7dd736d82b599fe198b6f9b61d5.hip
// !!! This is a file automatically generated by hipify!!! /* * File description: cumat_mul.cu * Author information: Mike Ranzinger [email protected] * Copyright information: Copyright Orchestr8 LLC */ #include "cumat.cuh" #include <rocblas.h> #include <stdexcept> #include <assert.h> using namespace std; CuMat operator*(const CuMat &a, const CuMat &b) { return ScaledMultiply(1.0f, a, b, 0); } CuMat operator*(const CuScopedWeakTranspose &tA, const CuMat &b) { return ScaledMultiply(1.0f, tA, b, 0); } CuMat operator*(const CuMat &a, const CuScopedWeakTranspose &tB) { return ScaledMultiply(1.0f, a, tB, 0); } CuMat operator*(const CuScopedWeakTranspose &tA, const CuScopedWeakTranspose &tB) { return ScaledMultiply(1.0f, tA, tB, 0); } CuMat ScaledMultiply(Real scale, const CuMat &a, const CuMat &b, hipblasHandle_t cublasHandle) { CuMat dest(a._handle); ScaledMultiply(scale, a, b, 0.0f, dest, cublasHandle); return dest; } CuMat ScaledMultiply(Real scale, const CuScopedWeakTranspose &tA, const CuMat &b, hipblasHandle_t cublasHandle) { CuMat dest(b._handle); ScaledMultiply(scale, tA, b, 0.0f, dest, cublasHandle); return dest; } CuMat ScaledMultiply(Real scale, const CuMat &a, const CuScopedWeakTranspose &tB, hipblasHandle_t cublasHandle) { CuMat dest(a._handle); ScaledMultiply(scale, a, tB, 0.0f, dest, cublasHandle); return dest; } CuMat ScaledMultiply(Real scale, const CuScopedWeakTranspose &tA, const CuScopedWeakTranspose &tB, hipblasHandle_t cublasHandle) { CuMat dest(tA.Mat._handle); ScaledMultiply(scale, tA, tB, 0.0f, dest, cublasHandle); return dest; } CuMat MultiplyTrans3D(const CuMat &a, uint32_t rows, uint32_t cols, const CuMat &b) { CuMat dest(a._handle); MultiplyTrans3D(a, rows, cols, b, dest); return dest; } void ScaledMultiply(Real mulScale, const CuMat &a, const CuMat &b, Real scaleDest, CuMat &dest, hipblasHandle_t cublasHandle) { assert(!a.Empty() && !b.Empty()); // Make sure the matrices are valid assert(a._cols == b._rows); // TODO: Support these other cases assert(a._storageOrder == CuColMajor && b._storageOrder == CuColMajor); if (!cublasHandle) cublasHandle = a._handle.CublasHandle; if (scaleDest == 0.0f) { dest.Resize(a._rows, b._cols); } else { assert(dest._rows == a._rows && dest._cols == b._cols); } hipblasStatus_t status = hipblasSgemm(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, a._rows, b._cols, a._cols, &mulScale, a._dMat, a._rows, b._dMat, b._rows, &scaleDest, dest._dMat, dest._rows); if (status != HIPBLAS_STATUS_SUCCESS) throw runtime_error("The matrix multiplication failed."); } void MultiplyTrans3D(const CuMat &a, uint32_t rows, uint32_t cols, const CuMat &b, CuMat &dest) { assert(!a.Empty() && !b.Empty()); assert(a._storageOrder == CuColMajor && b._storageOrder == CuColMajor); assert(cols == b._rows); assert(a._rows == (rows * cols)); dest.Resize(rows, b._cols); const float alpha = 1.0f, beta = 0.0f; for (uint32_t i = 0; i < b._cols; ++i) { hipblasStatus_t status = hipblasSgemm(a._handle.CublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, rows, 1, cols, &alpha, a._dMat + i * a._rows, rows, b._dMat + i * b._rows, b._rows, &beta, dest._dMat + i * rows, rows); if (status != HIPBLAS_STATUS_SUCCESS) throw runtime_error("The matrix multiplication failed."); } } void ScaledMultiply(Real mulScale, const CuScopedWeakTranspose &tA, const CuMat &b, Real scaleDest, CuMat &dest, hipblasHandle_t cublasHandle) { const CuMat &a = tA.Mat; assert(a._rows == b._rows); assert(!a.Empty() && !b.Empty()); assert(a._storageOrder == CuColMajor && b._storageOrder == CuColMajor); if (!cublasHandle) cublasHandle = a._handle.CublasHandle; if (scaleDest == 0.0f) { dest.Resize(a._cols, b._cols); } else { assert(dest._rows == a._cols && dest._cols == b._cols); } hipblasStatus_t status = hipblasSgemm(cublasHandle, HIPBLAS_OP_T, HIPBLAS_OP_N, a._cols, b._cols, a._rows, &mulScale, a._dMat, a._rows, b._dMat, b._rows, &scaleDest, dest._dMat, dest._rows); if (status != HIPBLAS_STATUS_SUCCESS) throw runtime_error("The matrix multiplication failed."); } void ScaledMultiply(Real mulScale, const CuMat &a, const CuScopedWeakTranspose &tB, Real scaleDest, CuMat &dest, hipblasHandle_t cublasHandle) { const CuMat &b = tB.Mat; assert(a._cols == b._cols); assert(!a.Empty() && !b.Empty()); assert(a._storageOrder == CuColMajor && b._storageOrder == CuColMajor); if (!cublasHandle) cublasHandle = a._handle.CublasHandle; if (scaleDest == 0.0f) { dest.Resize(a._rows, b._rows); } else { assert(dest._rows == a._rows && dest._cols == b._rows); } hipblasStatus_t status = hipblasSgemm(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_T, a._rows, b._rows, a._cols, &mulScale, a._dMat, a._rows, b._dMat, b._rows, &scaleDest, dest._dMat, dest._rows); if (status != HIPBLAS_STATUS_SUCCESS) throw runtime_error("The matrix multiplication failed."); } void ScaledMultiply(Real mulScale, const CuScopedWeakTranspose &tA, const CuScopedWeakTranspose &tB, Real scaleDest, CuMat &dest, hipblasHandle_t cublasHandle) { const CuMat &a = tA.Mat; const CuMat &b = tB.Mat; assert(a._rows == b._cols); assert(!a.Empty() && !b.Empty()); assert(a._storageOrder == CuColMajor && b._storageOrder == CuColMajor); if (!cublasHandle) cublasHandle = a._handle.CublasHandle; if (scaleDest == 0.0f) { dest.Resize(a._cols, b._rows); } else { assert(dest._rows == a._cols && dest._cols == b._rows); } hipblasStatus_t status = hipblasSgemm(cublasHandle, HIPBLAS_OP_T, HIPBLAS_OP_T, a._cols, b._rows, a._rows, &mulScale, a._dMat, a._rows, b._dMat, b._rows, &scaleDest, dest._dMat, dest._rows); if (status != HIPBLAS_STATUS_SUCCESS) throw runtime_error("The matrix multiplication failed."); }
70a877cfca05d7dd736d82b599fe198b6f9b61d5.cu
/* * File description: cumat_mul.cu * Author information: Mike Ranzinger [email protected] * Copyright information: Copyright Orchestr8 LLC */ #include "cumat.cuh" #include <cublas_v2.h> #include <stdexcept> #include <assert.h> using namespace std; CuMat operator*(const CuMat &a, const CuMat &b) { return ScaledMultiply(1.0f, a, b, 0); } CuMat operator*(const CuScopedWeakTranspose &tA, const CuMat &b) { return ScaledMultiply(1.0f, tA, b, 0); } CuMat operator*(const CuMat &a, const CuScopedWeakTranspose &tB) { return ScaledMultiply(1.0f, a, tB, 0); } CuMat operator*(const CuScopedWeakTranspose &tA, const CuScopedWeakTranspose &tB) { return ScaledMultiply(1.0f, tA, tB, 0); } CuMat ScaledMultiply(Real scale, const CuMat &a, const CuMat &b, cublasHandle_t cublasHandle) { CuMat dest(a._handle); ScaledMultiply(scale, a, b, 0.0f, dest, cublasHandle); return dest; } CuMat ScaledMultiply(Real scale, const CuScopedWeakTranspose &tA, const CuMat &b, cublasHandle_t cublasHandle) { CuMat dest(b._handle); ScaledMultiply(scale, tA, b, 0.0f, dest, cublasHandle); return dest; } CuMat ScaledMultiply(Real scale, const CuMat &a, const CuScopedWeakTranspose &tB, cublasHandle_t cublasHandle) { CuMat dest(a._handle); ScaledMultiply(scale, a, tB, 0.0f, dest, cublasHandle); return dest; } CuMat ScaledMultiply(Real scale, const CuScopedWeakTranspose &tA, const CuScopedWeakTranspose &tB, cublasHandle_t cublasHandle) { CuMat dest(tA.Mat._handle); ScaledMultiply(scale, tA, tB, 0.0f, dest, cublasHandle); return dest; } CuMat MultiplyTrans3D(const CuMat &a, uint32_t rows, uint32_t cols, const CuMat &b) { CuMat dest(a._handle); MultiplyTrans3D(a, rows, cols, b, dest); return dest; } void ScaledMultiply(Real mulScale, const CuMat &a, const CuMat &b, Real scaleDest, CuMat &dest, cublasHandle_t cublasHandle) { assert(!a.Empty() && !b.Empty()); // Make sure the matrices are valid assert(a._cols == b._rows); // TODO: Support these other cases assert(a._storageOrder == CuColMajor && b._storageOrder == CuColMajor); if (!cublasHandle) cublasHandle = a._handle.CublasHandle; if (scaleDest == 0.0f) { dest.Resize(a._rows, b._cols); } else { assert(dest._rows == a._rows && dest._cols == b._cols); } cublasStatus_t status = cublasSgemm_v2(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, a._rows, b._cols, a._cols, &mulScale, a._dMat, a._rows, b._dMat, b._rows, &scaleDest, dest._dMat, dest._rows); if (status != CUBLAS_STATUS_SUCCESS) throw runtime_error("The matrix multiplication failed."); } void MultiplyTrans3D(const CuMat &a, uint32_t rows, uint32_t cols, const CuMat &b, CuMat &dest) { assert(!a.Empty() && !b.Empty()); assert(a._storageOrder == CuColMajor && b._storageOrder == CuColMajor); assert(cols == b._rows); assert(a._rows == (rows * cols)); dest.Resize(rows, b._cols); const float alpha = 1.0f, beta = 0.0f; for (uint32_t i = 0; i < b._cols; ++i) { cublasStatus_t status = cublasSgemm_v2(a._handle.CublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, rows, 1, cols, &alpha, a._dMat + i * a._rows, rows, b._dMat + i * b._rows, b._rows, &beta, dest._dMat + i * rows, rows); if (status != CUBLAS_STATUS_SUCCESS) throw runtime_error("The matrix multiplication failed."); } } void ScaledMultiply(Real mulScale, const CuScopedWeakTranspose &tA, const CuMat &b, Real scaleDest, CuMat &dest, cublasHandle_t cublasHandle) { const CuMat &a = tA.Mat; assert(a._rows == b._rows); assert(!a.Empty() && !b.Empty()); assert(a._storageOrder == CuColMajor && b._storageOrder == CuColMajor); if (!cublasHandle) cublasHandle = a._handle.CublasHandle; if (scaleDest == 0.0f) { dest.Resize(a._cols, b._cols); } else { assert(dest._rows == a._cols && dest._cols == b._cols); } cublasStatus_t status = cublasSgemm_v2(cublasHandle, CUBLAS_OP_T, CUBLAS_OP_N, a._cols, b._cols, a._rows, &mulScale, a._dMat, a._rows, b._dMat, b._rows, &scaleDest, dest._dMat, dest._rows); if (status != CUBLAS_STATUS_SUCCESS) throw runtime_error("The matrix multiplication failed."); } void ScaledMultiply(Real mulScale, const CuMat &a, const CuScopedWeakTranspose &tB, Real scaleDest, CuMat &dest, cublasHandle_t cublasHandle) { const CuMat &b = tB.Mat; assert(a._cols == b._cols); assert(!a.Empty() && !b.Empty()); assert(a._storageOrder == CuColMajor && b._storageOrder == CuColMajor); if (!cublasHandle) cublasHandle = a._handle.CublasHandle; if (scaleDest == 0.0f) { dest.Resize(a._rows, b._rows); } else { assert(dest._rows == a._rows && dest._cols == b._rows); } cublasStatus_t status = cublasSgemm_v2(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_T, a._rows, b._rows, a._cols, &mulScale, a._dMat, a._rows, b._dMat, b._rows, &scaleDest, dest._dMat, dest._rows); if (status != CUBLAS_STATUS_SUCCESS) throw runtime_error("The matrix multiplication failed."); } void ScaledMultiply(Real mulScale, const CuScopedWeakTranspose &tA, const CuScopedWeakTranspose &tB, Real scaleDest, CuMat &dest, cublasHandle_t cublasHandle) { const CuMat &a = tA.Mat; const CuMat &b = tB.Mat; assert(a._rows == b._cols); assert(!a.Empty() && !b.Empty()); assert(a._storageOrder == CuColMajor && b._storageOrder == CuColMajor); if (!cublasHandle) cublasHandle = a._handle.CublasHandle; if (scaleDest == 0.0f) { dest.Resize(a._cols, b._rows); } else { assert(dest._rows == a._cols && dest._cols == b._rows); } cublasStatus_t status = cublasSgemm_v2(cublasHandle, CUBLAS_OP_T, CUBLAS_OP_T, a._cols, b._rows, a._rows, &mulScale, a._dMat, a._rows, b._dMat, b._rows, &scaleDest, dest._dMat, dest._rows); if (status != CUBLAS_STATUS_SUCCESS) throw runtime_error("The matrix multiplication failed."); }
c1da389d3e0aaa0442deca9e5e6d03309c1e5cf8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifdef USE_CUDNN #include <vector> #include "caffe/layers/cudnn_conv_layer.hpp" namespace caffe { __global__ void sync_conv_groups() { } template <typename Dtype> void CuDNNConvolutionLayer<Dtype>::Forward_gpu( const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* weight = this->blobs_[0]->gpu_data(); for (int i = 0; i < bottom.size(); ++i) { const Dtype* bottom_data = bottom[i]->gpu_data(); Dtype* top_data = top[i]->mutable_gpu_data(); // Forward through cuDNN in parallel over groups. for (int g = 0; g < this->group_; g++) { // Filters. CUDNN_CHECK(cudnnConvolutionForward(handle_[g], cudnn::dataType<Dtype>::one, bottom_descs_[i], bottom_data + bottom_offset_ * g, filter_desc_, weight + this->weight_offset_ * g, conv_descs_[i], fwd_algo_[i], workspace[g], workspace_fwd_sizes_[i], cudnn::dataType<Dtype>::zero, top_descs_[i], top_data + top_offset_ * g)); // Bias. if (this->bias_term_) { const Dtype* bias_data = this->blobs_[1]->gpu_data(); CUDNN_CHECK(cudnnAddTensor(handle_[g], cudnn::dataType<Dtype>::one, bias_desc_, bias_data + bias_offset_ * g, cudnn::dataType<Dtype>::one, top_descs_[i], top_data + top_offset_ * g)); } } // Synchronize the work across groups, each of which went into its own // stream, by launching an empty kernel into the default (null) stream. // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( sync_conv_groups), dim3(1), dim3(1), 0, 0, ); } } template <typename Dtype> void CuDNNConvolutionLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { const Dtype* weight = NULL; Dtype* weight_diff = NULL; if (this->param_propagate_down_[0]) { weight = this->blobs_[0]->gpu_data(); weight_diff = this->blobs_[0]->mutable_gpu_diff(); } Dtype* bias_diff = NULL; if (this->bias_term_ && this->param_propagate_down_[1]) { bias_diff = this->blobs_[1]->mutable_gpu_diff(); } for (int i = 0; i < top.size(); ++i) { const Dtype* top_diff = top[i]->gpu_diff(); // Backward through cuDNN in parallel over groups and gradients. for (int g = 0; g < this->group_; g++) { // Gradient w.r.t. bias. if (this->bias_term_ && this->param_propagate_down_[1]) { CUDNN_CHECK(cudnnConvolutionBackwardBias(handle_[0*this->group_ + g], cudnn::dataType<Dtype>::one, top_descs_[i], top_diff + top_offset_ * g, cudnn::dataType<Dtype>::one, bias_desc_, bias_diff + bias_offset_ * g)); } // Gradient w.r.t. weights. if (this->param_propagate_down_[0]) { const Dtype* bottom_data = bottom[i]->gpu_data(); CUDNN_CHECK(cudnnConvolutionBackwardFilter( handle_[1*this->group_ + g], cudnn::dataType<Dtype>::one, bottom_descs_[i], bottom_data + bottom_offset_ * g, top_descs_[i], top_diff + top_offset_ * g, conv_descs_[i], bwd_filter_algo_[i], workspace[1*this->group_ + g], workspace_bwd_filter_sizes_[i], cudnn::dataType<Dtype>::one, filter_desc_, weight_diff + this->weight_offset_ * g)); } // Gradient w.r.t. bottom data. if (propagate_down[i]) { if (weight == NULL) { weight = this->blobs_[0]->gpu_data(); } Dtype* bottom_diff = bottom[i]->mutable_gpu_diff(); CUDNN_CHECK(cudnnConvolutionBackwardData( handle_[2*this->group_ + g], cudnn::dataType<Dtype>::one, filter_desc_, weight + this->weight_offset_ * g, top_descs_[i], top_diff + top_offset_ * g, conv_descs_[i], bwd_data_algo_[i], workspace[2*this->group_ + g], workspace_bwd_data_sizes_[i], cudnn::dataType<Dtype>::zero, bottom_descs_[i], bottom_diff + bottom_offset_ * g)); } } // Synchronize the work across groups, each of which went into its own // stream, by launching an empty kernel into the default (null) stream. // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( sync_conv_groups), dim3(1), dim3(1), 0, 0, ); } } INSTANTIATE_LAYER_GPU_FUNCS(CuDNNConvolutionLayer); } // namespace caffe #endif
c1da389d3e0aaa0442deca9e5e6d03309c1e5cf8.cu
#ifdef USE_CUDNN #include <vector> #include "caffe/layers/cudnn_conv_layer.hpp" namespace caffe { __global__ void sync_conv_groups() { } template <typename Dtype> void CuDNNConvolutionLayer<Dtype>::Forward_gpu( const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* weight = this->blobs_[0]->gpu_data(); for (int i = 0; i < bottom.size(); ++i) { const Dtype* bottom_data = bottom[i]->gpu_data(); Dtype* top_data = top[i]->mutable_gpu_data(); // Forward through cuDNN in parallel over groups. for (int g = 0; g < this->group_; g++) { // Filters. CUDNN_CHECK(cudnnConvolutionForward(handle_[g], cudnn::dataType<Dtype>::one, bottom_descs_[i], bottom_data + bottom_offset_ * g, filter_desc_, weight + this->weight_offset_ * g, conv_descs_[i], fwd_algo_[i], workspace[g], workspace_fwd_sizes_[i], cudnn::dataType<Dtype>::zero, top_descs_[i], top_data + top_offset_ * g)); // Bias. if (this->bias_term_) { const Dtype* bias_data = this->blobs_[1]->gpu_data(); CUDNN_CHECK(cudnnAddTensor(handle_[g], cudnn::dataType<Dtype>::one, bias_desc_, bias_data + bias_offset_ * g, cudnn::dataType<Dtype>::one, top_descs_[i], top_data + top_offset_ * g)); } } // Synchronize the work across groups, each of which went into its own // stream, by launching an empty kernel into the default (null) stream. // NOLINT_NEXT_LINE(whitespace/operators) sync_conv_groups<<<1, 1>>>(); } } template <typename Dtype> void CuDNNConvolutionLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { const Dtype* weight = NULL; Dtype* weight_diff = NULL; if (this->param_propagate_down_[0]) { weight = this->blobs_[0]->gpu_data(); weight_diff = this->blobs_[0]->mutable_gpu_diff(); } Dtype* bias_diff = NULL; if (this->bias_term_ && this->param_propagate_down_[1]) { bias_diff = this->blobs_[1]->mutable_gpu_diff(); } for (int i = 0; i < top.size(); ++i) { const Dtype* top_diff = top[i]->gpu_diff(); // Backward through cuDNN in parallel over groups and gradients. for (int g = 0; g < this->group_; g++) { // Gradient w.r.t. bias. if (this->bias_term_ && this->param_propagate_down_[1]) { CUDNN_CHECK(cudnnConvolutionBackwardBias(handle_[0*this->group_ + g], cudnn::dataType<Dtype>::one, top_descs_[i], top_diff + top_offset_ * g, cudnn::dataType<Dtype>::one, bias_desc_, bias_diff + bias_offset_ * g)); } // Gradient w.r.t. weights. if (this->param_propagate_down_[0]) { const Dtype* bottom_data = bottom[i]->gpu_data(); CUDNN_CHECK(cudnnConvolutionBackwardFilter( handle_[1*this->group_ + g], cudnn::dataType<Dtype>::one, bottom_descs_[i], bottom_data + bottom_offset_ * g, top_descs_[i], top_diff + top_offset_ * g, conv_descs_[i], bwd_filter_algo_[i], workspace[1*this->group_ + g], workspace_bwd_filter_sizes_[i], cudnn::dataType<Dtype>::one, filter_desc_, weight_diff + this->weight_offset_ * g)); } // Gradient w.r.t. bottom data. if (propagate_down[i]) { if (weight == NULL) { weight = this->blobs_[0]->gpu_data(); } Dtype* bottom_diff = bottom[i]->mutable_gpu_diff(); CUDNN_CHECK(cudnnConvolutionBackwardData( handle_[2*this->group_ + g], cudnn::dataType<Dtype>::one, filter_desc_, weight + this->weight_offset_ * g, top_descs_[i], top_diff + top_offset_ * g, conv_descs_[i], bwd_data_algo_[i], workspace[2*this->group_ + g], workspace_bwd_data_sizes_[i], cudnn::dataType<Dtype>::zero, bottom_descs_[i], bottom_diff + bottom_offset_ * g)); } } // Synchronize the work across groups, each of which went into its own // stream, by launching an empty kernel into the default (null) stream. // NOLINT_NEXT_LINE(whitespace/operators) sync_conv_groups<<<1, 1>>>(); } } INSTANTIATE_LAYER_GPU_FUNCS(CuDNNConvolutionLayer); } // namespace caffe #endif
a0c0829742a658e77b216a976e34af5721ef8490.hip
// !!! This is a file automatically generated by hipify!!! #include "THHUNN.h" #include "THHHalf.h" #include "THHHalfAutoNumerics.cuh" #include <THH/THHApply.cuh> template <typename T> struct LeakyReLUUpdateOutput { const T negval_; LeakyReLUUpdateOutput(T negval) : negval_(negval) {} __device__ __forceinline__ void operator()(T *out, T *in) { T x = *in; *out = (x > 0) ? x : x * negval_; } }; // in-place variant template <typename T> struct LeakyReLUUpdateOutputIP { const T negval_; LeakyReLUUpdateOutputIP(T negval) : negval_(negval) {} __device__ __forceinline__ void operator()(T *x) { *x = (*x > 0) ? *x : negval_ * (*x); } }; template <typename T> struct LeakyReLUUpdateGradInput { const T negval_; LeakyReLUUpdateGradInput(T negval) : negval_(negval) {} __device__ __forceinline__ void operator()( T* gradInput, T* input, T* gradOutput) const { *gradInput = (*input > 0) ? *gradOutput : (*gradOutput) * negval_; } }; template <typename T> struct LeakyReLUUpdateGradInputIP { const T negval_; LeakyReLUUpdateGradInputIP(T negval) : negval_(negval) {} __device__ __forceinline__ void operator()( T* gradOutput, T* input) const { *gradOutput = (*input > 0) ? *gradOutput : (*gradOutput) * negval_; } }; #include "generic/LeakyReLU.cu" #include "THHGenerateFloatTypes.h"
a0c0829742a658e77b216a976e34af5721ef8490.cu
#include "THCUNN.h" #include "THCHalf.h" #include "THCHalfAutoNumerics.cuh" #include <THC/THCApply.cuh> template <typename T> struct LeakyReLUUpdateOutput { const T negval_; LeakyReLUUpdateOutput(T negval) : negval_(negval) {} __device__ __forceinline__ void operator()(T *out, T *in) { T x = *in; *out = (x > 0) ? x : x * negval_; } }; // in-place variant template <typename T> struct LeakyReLUUpdateOutputIP { const T negval_; LeakyReLUUpdateOutputIP(T negval) : negval_(negval) {} __device__ __forceinline__ void operator()(T *x) { *x = (*x > 0) ? *x : negval_ * (*x); } }; template <typename T> struct LeakyReLUUpdateGradInput { const T negval_; LeakyReLUUpdateGradInput(T negval) : negval_(negval) {} __device__ __forceinline__ void operator()( T* gradInput, T* input, T* gradOutput) const { *gradInput = (*input > 0) ? *gradOutput : (*gradOutput) * negval_; } }; template <typename T> struct LeakyReLUUpdateGradInputIP { const T negval_; LeakyReLUUpdateGradInputIP(T negval) : negval_(negval) {} __device__ __forceinline__ void operator()( T* gradOutput, T* input) const { *gradOutput = (*input > 0) ? *gradOutput : (*gradOutput) * negval_; } }; #include "generic/LeakyReLU.cu" #include "THCGenerateFloatTypes.h"
cfb4e4d2d9928104e14141bb9a7fdc71308c125e.hip
// !!! This is a file automatically generated by hipify!!! #include "../THCTensorSort.cuh" #include "THHTensor.hpp" #include "../generic/THCTensorSort.cu" #include <THH/THHGenerateLongType.h>
cfb4e4d2d9928104e14141bb9a7fdc71308c125e.cu
#include "../THCTensorSort.cuh" #include "THCTensor.hpp" #include "../generic/THCTensorSort.cu" #include <THC/THCGenerateLongType.h>
7ae63f743f7d0b6c319a44deb7cfbb3eb6d617f6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include <gd.h> #include <gdfontl.h> #include <math.h> void drawImage(double * buffer,int n,const char * fname); double XMIN=-2.0; double XMAX=2.0; double YMIN=-2.0; double YMAX=2.0; int COUNT_MAX=1000; __global__ void pixelValue(int n, double XMIN, double XMAX, double YMIN, double YMAX, int COUNT_MAX, double * img_buffer) { double cx, cy; double tx, ty; double zx, zy; double img_val; int count; double BOUNDARY=5.0; int ij,i,j; // ADD COMMAND HERE TO SET VALUE OF IJ, GLOBAL THREAD INDEX ij = threadIdx.x + blockIdx.x*blockDim.x; while(ij<n*n) { i = ij/n; j = ij%n; cx = XMIN + (double)i/(double)n*(XMAX-XMIN); cy = YMAX - (double)j/(double)n*(YMAX-YMIN); zx = 0.0; zy = 0.0; count=0; img_val=0.0; while(count<COUNT_MAX) { count++; tx = zx*zx-zy*zy+cx; ty = 2*zx*zy+cy; zx = tx; zy = ty; if((zx*zx+zy*zy)>BOUNDARY) { img_val= log((double)count); break; } } img_buffer[ij]=img_val; // ADD COMMAND HERE TO INCREMENT IJ, BY THE GLOBAL SIZE ij += blockDim.x*gridDim.x; } } int main(int argc, char ** argv) { int i,j,ij,n; double * img_buffer; // DEFINE dev_img_buffer double * dev_img_buffer; hipEvent_t start,stop; float time_elapsed=0; hipEventCreate(&start); hipEventCreate(&stop); n=100; i=1; if(argc>i) sscanf(argv[i++],"%d",&n); if(argc>i) sscanf(argv[i++],"%lf",&XMIN); if(argc>i) sscanf(argv[i++],"%lf",&XMAX); if(argc>i) sscanf(argv[i++],"%lf",&YMIN); if(argc>i) sscanf(argv[i++],"%lf",&YMAX); if(argc>i) sscanf(argv[i++],"%d",&COUNT_MAX); printf("Mandelbrot Example\n"); printf("N = %d\n",n); printf("X = %lf -> %lf, Y = %lf -> %lf\n",XMIN,XMAX,YMIN,YMAX); printf("DEPTH = %d\n",COUNT_MAX); img_buffer = (double *)malloc(sizeof(double)*n*n); // ALLOCATE dev_img_buffer hipMalloc((void **)&dev_img_buffer,sizeof(double)*n*n); hipEventRecord(start); // CALL pixelValue with 39 blocks and 64 threads per block hipLaunchKernelGGL(( pixelValue), dim3(39),dim3(64), 0, 0, n, XMIN, XMAX, YMIN, YMAX, COUNT_MAX,dev_img_buffer); // COPY dev_img_buffer to img_buffer hipMemcpy(img_buffer,dev_img_buffer,sizeof(double)*n*n,hipMemcpyDeviceToHost); hipEventRecord(stop); hipEventSynchronize(stop); hipEventElapsedTime(&time_elapsed,start,stop); printf("ELAPSED TIME IN CALCULATION LOOP = %lf ms\n",time_elapsed); printf("Drawing image\n"); drawImage(img_buffer,n,"file.png"); free(img_buffer); // FREE dev_img_buffer hipFree((void*)dev_img_buffer); return 0; } gdImagePtr im; FILE *pngout; int black; int white; #define NCOLORS 200 int cmap[NCOLORS]; void drawImage(double * buffer,int n,const char * fname) { int i,j,scale; double max,min,ncolors; im = gdImageCreate(n,n); black = gdImageColorResolve(im, 0, 0, 0); white = gdImageColorResolve(im, 255, 255, 255); ncolors = (double)NCOLORS; for (i=0;i<NCOLORS;i++) { cmap[i] = gdImageColorResolve(im, (int)((ncolors-i)/ncolors*255.0), (int)((ncolors/2.0-abs(ncolors/2.0-i))/(ncolors/2.0)*255.0), (int)((i)/ncolors*255.0) ); } cmap[0] = black; gdImageFilledRectangle(im,0,0,n,n,white); max=buffer[0]; min=buffer[0]; for(i=0;i<n*n;i++) { if(buffer[i]>max) max=buffer[i]; if(buffer[i]<min) min=buffer[i]; } if(max==min) { max += 0.5; min -= 0.5; } for(i=0;i<n;i++) { for(j=0;j<n;j++) { scale = (int)((buffer[i*n+j]-min)/(max-min)*ncolors); if(scale<0) scale=0; if(scale>99) scale=99; gdImageSetPixel(im,i,j,cmap[scale]); } } gdImageRectangle(im,0,0,n-1,n-1,black); pngout = fopen(fname, "wb"); gdImagePng(im, pngout); fclose(pngout); //cleanup gdImageDestroy(im); }
7ae63f743f7d0b6c319a44deb7cfbb3eb6d617f6.cu
#include <stdio.h> #include <stdlib.h> #include <gd.h> #include <gdfontl.h> #include <math.h> void drawImage(double * buffer,int n,const char * fname); double XMIN=-2.0; double XMAX=2.0; double YMIN=-2.0; double YMAX=2.0; int COUNT_MAX=1000; __global__ void pixelValue(int n, double XMIN, double XMAX, double YMIN, double YMAX, int COUNT_MAX, double * img_buffer) { double cx, cy; double tx, ty; double zx, zy; double img_val; int count; double BOUNDARY=5.0; int ij,i,j; // ADD COMMAND HERE TO SET VALUE OF IJ, GLOBAL THREAD INDEX ij = threadIdx.x + blockIdx.x*blockDim.x; while(ij<n*n) { i = ij/n; j = ij%n; cx = XMIN + (double)i/(double)n*(XMAX-XMIN); cy = YMAX - (double)j/(double)n*(YMAX-YMIN); zx = 0.0; zy = 0.0; count=0; img_val=0.0; while(count<COUNT_MAX) { count++; tx = zx*zx-zy*zy+cx; ty = 2*zx*zy+cy; zx = tx; zy = ty; if((zx*zx+zy*zy)>BOUNDARY) { img_val= log((double)count); break; } } img_buffer[ij]=img_val; // ADD COMMAND HERE TO INCREMENT IJ, BY THE GLOBAL SIZE ij += blockDim.x*gridDim.x; } } int main(int argc, char ** argv) { int i,j,ij,n; double * img_buffer; // DEFINE dev_img_buffer double * dev_img_buffer; cudaEvent_t start,stop; float time_elapsed=0; cudaEventCreate(&start); cudaEventCreate(&stop); n=100; i=1; if(argc>i) sscanf(argv[i++],"%d",&n); if(argc>i) sscanf(argv[i++],"%lf",&XMIN); if(argc>i) sscanf(argv[i++],"%lf",&XMAX); if(argc>i) sscanf(argv[i++],"%lf",&YMIN); if(argc>i) sscanf(argv[i++],"%lf",&YMAX); if(argc>i) sscanf(argv[i++],"%d",&COUNT_MAX); printf("Mandelbrot Example\n"); printf("N = %d\n",n); printf("X = %lf -> %lf, Y = %lf -> %lf\n",XMIN,XMAX,YMIN,YMAX); printf("DEPTH = %d\n",COUNT_MAX); img_buffer = (double *)malloc(sizeof(double)*n*n); // ALLOCATE dev_img_buffer cudaMalloc((void **)&dev_img_buffer,sizeof(double)*n*n); cudaEventRecord(start); // CALL pixelValue with 39 blocks and 64 threads per block pixelValue<<<39,64>>>(n, XMIN, XMAX, YMIN, YMAX, COUNT_MAX,dev_img_buffer); // COPY dev_img_buffer to img_buffer cudaMemcpy(img_buffer,dev_img_buffer,sizeof(double)*n*n,cudaMemcpyDeviceToHost); cudaEventRecord(stop); cudaEventSynchronize(stop); cudaEventElapsedTime(&time_elapsed,start,stop); printf("ELAPSED TIME IN CALCULATION LOOP = %lf ms\n",time_elapsed); printf("Drawing image\n"); drawImage(img_buffer,n,"file.png"); free(img_buffer); // FREE dev_img_buffer cudaFree((void*)dev_img_buffer); return 0; } gdImagePtr im; FILE *pngout; int black; int white; #define NCOLORS 200 int cmap[NCOLORS]; void drawImage(double * buffer,int n,const char * fname) { int i,j,scale; double max,min,ncolors; im = gdImageCreate(n,n); black = gdImageColorResolve(im, 0, 0, 0); white = gdImageColorResolve(im, 255, 255, 255); ncolors = (double)NCOLORS; for (i=0;i<NCOLORS;i++) { cmap[i] = gdImageColorResolve(im, (int)((ncolors-i)/ncolors*255.0), (int)((ncolors/2.0-abs(ncolors/2.0-i))/(ncolors/2.0)*255.0), (int)((i)/ncolors*255.0) ); } cmap[0] = black; gdImageFilledRectangle(im,0,0,n,n,white); max=buffer[0]; min=buffer[0]; for(i=0;i<n*n;i++) { if(buffer[i]>max) max=buffer[i]; if(buffer[i]<min) min=buffer[i]; } if(max==min) { max += 0.5; min -= 0.5; } for(i=0;i<n;i++) { for(j=0;j<n;j++) { scale = (int)((buffer[i*n+j]-min)/(max-min)*ncolors); if(scale<0) scale=0; if(scale>99) scale=99; gdImageSetPixel(im,i,j,cmap[scale]); } } gdImageRectangle(im,0,0,n-1,n-1,black); pngout = fopen(fname, "wb"); gdImagePng(im, pngout); fclose(pngout); //cleanup gdImageDestroy(im); }
17fd903490bd8fc4c77fb1eede950a769f375f62.hip
// !!! This is a file automatically generated by hipify!!! #include "CPUGPUMemory.cuh" #include "RandUtils.cuh" #include <cstdlib> #include <cstdio> #include <cstring> #include "hip/hip_runtime.h" #include "device_launch_parameters.h" CPUGPUMemory::CPUGPUMemory(bool _is_float, int _size, float _initValues) { is_float = _is_float; size = _size; memCPU = is_float ? (void*)new float[size] : (void*)new int[size]; memset(memCPU, 0, size * (is_float ? sizeof(float) : sizeof(int))); if (_initValues != 0) { if (is_float) { float *t = (float*)memCPU; for (int i = 0; i < size; i++) { t[i] = (getRand() * 2.0f - 1.0f) * _initValues; } } } hipError_t cudaStatus = hipMalloc((void**)&memGPU, size * (is_float ? sizeof(float) : sizeof(int))); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); exit(-1); } CopyCPUtoGPU(); } void CPUGPUMemory::Resize(int newSize) { size = newSize; } CPUGPUMemory::~CPUGPUMemory() { if (is_float) { delete[] (float*)memCPU; } else { delete[] (int*)memCPU; } hipFree(memGPU); } void CPUGPUMemory::CopyCPUtoGPU() { hipError_t cudaStatus = hipMemcpy(memGPU, memCPU, size * (is_float ? sizeof(float) : sizeof(int)), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); exit(-1); } } void CPUGPUMemory::CopyGPUtoCPU() { hipError_t cudaStatus = hipMemcpy(memCPU, memGPU, size * (is_float ? sizeof(float) : sizeof(int)), hipMemcpyDeviceToHost); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); exit(-1); } } void* CPUGPUMemory::GetCPUMemory() { return memCPU; } void* CPUGPUMemory::GetGPUMemory() { return memGPU; } int CPUGPUMemory::GetSize() { return size; } void CPUGPUMemory::Reset() { memset(memCPU, 0, size * (is_float ? sizeof(float) : sizeof(int))); CopyCPUtoGPU(); } void CPUGPUMemory::SaveToFile(ofstream &os) { CopyGPUtoCPU(); os.write((char*)memCPU, size * (is_float ? sizeof(float) : sizeof(int))); } void CPUGPUMemory::LoadFromFile(ifstream &is) { is.read((char*)memCPU, size * (is_float ? sizeof(float) : sizeof(int))); CopyCPUtoGPU(); }
17fd903490bd8fc4c77fb1eede950a769f375f62.cu
#include "CPUGPUMemory.cuh" #include "RandUtils.cuh" #include <cstdlib> #include <cstdio> #include <cstring> #include "cuda_runtime.h" #include "device_launch_parameters.h" CPUGPUMemory::CPUGPUMemory(bool _is_float, int _size, float _initValues) { is_float = _is_float; size = _size; memCPU = is_float ? (void*)new float[size] : (void*)new int[size]; memset(memCPU, 0, size * (is_float ? sizeof(float) : sizeof(int))); if (_initValues != 0) { if (is_float) { float *t = (float*)memCPU; for (int i = 0; i < size; i++) { t[i] = (getRand() * 2.0f - 1.0f) * _initValues; } } } cudaError_t cudaStatus = cudaMalloc((void**)&memGPU, size * (is_float ? sizeof(float) : sizeof(int))); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); exit(-1); } CopyCPUtoGPU(); } void CPUGPUMemory::Resize(int newSize) { size = newSize; } CPUGPUMemory::~CPUGPUMemory() { if (is_float) { delete[] (float*)memCPU; } else { delete[] (int*)memCPU; } cudaFree(memGPU); } void CPUGPUMemory::CopyCPUtoGPU() { cudaError_t cudaStatus = cudaMemcpy(memGPU, memCPU, size * (is_float ? sizeof(float) : sizeof(int)), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); exit(-1); } } void CPUGPUMemory::CopyGPUtoCPU() { cudaError_t cudaStatus = cudaMemcpy(memCPU, memGPU, size * (is_float ? sizeof(float) : sizeof(int)), cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); exit(-1); } } void* CPUGPUMemory::GetCPUMemory() { return memCPU; } void* CPUGPUMemory::GetGPUMemory() { return memGPU; } int CPUGPUMemory::GetSize() { return size; } void CPUGPUMemory::Reset() { memset(memCPU, 0, size * (is_float ? sizeof(float) : sizeof(int))); CopyCPUtoGPU(); } void CPUGPUMemory::SaveToFile(ofstream &os) { CopyGPUtoCPU(); os.write((char*)memCPU, size * (is_float ? sizeof(float) : sizeof(int))); } void CPUGPUMemory::LoadFromFile(ifstream &is) { is.read((char*)memCPU, size * (is_float ? sizeof(float) : sizeof(int))); CopyCPUtoGPU(); }
5fb74f595eb8120981854582ccfd55b3c80394d4.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <iostream> using namespace std; int main() { int * input; int * output; int * input_d; int * output_d; input = (int*)malloc(2*sizeof(int)); output = (int*)malloc(2*sizeof(int)); input[0] = 10; input[1] = 20; output[0] = 0; output[1] = 0; cout << "Before the copy kernel." << endl; cout << "Input value:\t" << input[0] << "\t" << input[1] << endl; cout << "Output value:\t" << output[0] << "\t" << output[1] << endl; hipMalloc((void**)&input_d, 2*sizeof(int)); hipMalloc((void**)&output_d, 2*sizeof(int)); hipMemcpy(input_d, input, 2*sizeof(int), hipMemcpyHostToDevice); hipModule_t module; hipFunction_t kernel; hipModuleLoad(&module, "copy.cubin"); hipModuleGetFunction(&kernel, module, "kern"); void * args[2] = {&input_d, &output_d}; hipModuleLaunchKernel(kernel, 1, 1, 1, 1, 1, 1, 0, 0, args, 0); hipDeviceSynchronize(); hipMemcpy(output, output_d, 2*sizeof(int), hipMemcpyDeviceToHost); hipMemcpy(input, input_d, 2*sizeof(int), hipMemcpyDeviceToHost); cout << "After the copy kernel." << endl; cout << "Input value:\t" << input[0] << "\t" << input[1] << endl; cout << "Output value:\t" << output[0] << "\t" << output[1] << endl; return 0; }
5fb74f595eb8120981854582ccfd55b3c80394d4.cu
#include <cuda.h> #include <iostream> using namespace std; int main() { int * input; int * output; int * input_d; int * output_d; input = (int*)malloc(2*sizeof(int)); output = (int*)malloc(2*sizeof(int)); input[0] = 10; input[1] = 20; output[0] = 0; output[1] = 0; cout << "Before the copy kernel." << endl; cout << "Input value:\t" << input[0] << "\t" << input[1] << endl; cout << "Output value:\t" << output[0] << "\t" << output[1] << endl; cudaMalloc((void**)&input_d, 2*sizeof(int)); cudaMalloc((void**)&output_d, 2*sizeof(int)); cudaMemcpy(input_d, input, 2*sizeof(int), cudaMemcpyHostToDevice); CUmodule module; CUfunction kernel; cuModuleLoad(&module, "copy.cubin"); cuModuleGetFunction(&kernel, module, "kern"); void * args[2] = {&input_d, &output_d}; cuLaunchKernel(kernel, 1, 1, 1, 1, 1, 1, 0, 0, args, 0); cudaDeviceSynchronize(); cudaMemcpy(output, output_d, 2*sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(input, input_d, 2*sizeof(int), cudaMemcpyDeviceToHost); cout << "After the copy kernel." << endl; cout << "Input value:\t" << input[0] << "\t" << input[1] << endl; cout << "Output value:\t" << output[0] << "\t" << output[1] << endl; return 0; }
3975fde90e3ec868458f5557aa03c6aeeeb939cd.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void IndexInteranlNode(bool *forest, int base, int step) { int left = 2*(base+threadIdx.x); int right = left + 1; int offset = blockIdx.x*step; forest[offset+base+threadIdx.x] = (forest[offset+left]&&forest[offset+right]); }
3975fde90e3ec868458f5557aa03c6aeeeb939cd.cu
#include "includes.h" __global__ void IndexInteranlNode(bool *forest, int base, int step) { int left = 2*(base+threadIdx.x); int right = left + 1; int offset = blockIdx.x*step; forest[offset+base+threadIdx.x] = (forest[offset+left]&&forest[offset+right]); }
d06b2c4241217766d0ba0d43f7f3e0fe918c349d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include "myers-common.h" #define NUM_BITS 4 #define NUM_BASES 5 #define BASES_PER_THREAD 128 #define BASES_PER_ENTRY 8 #define SIZE_GPU_HW_WORD 32 #define SIZE_WARP 32 #define HIGH_MASK_32 0x80000000 #define LOW_MASK_32 0x00000001 #define MAX_VALUE 0xFFFFFFFF #ifndef DEVICE #define DEVICE 0 #endif // output temporal carry in internal register #define UADD__CARRY_OUT(c, a, b) \ asm volatile("add.cc.u32 %0, %1, %2;" : "=r"(c) : "r"(a) , "r"(b)); // add & output with temporal carry of internal register #define UADD__IN_CARRY_OUT(c, a, b) \ asm volatile("addc.cc.u32 %0, %1, %2;" : "=r"(c) : "r"(a) , "r"(b)); // add with temporal carry of internal register #define UADD__IN_CARRY(c, a, b) \ asm volatile("addc.u32 %0, %1, %2;" : "=r"(c) : "r"(a) , "r"(b)); inline __device__ void shuffle_collaborative_shift_CC35(uint32_t value_A, uint32_t value_B, uint32_t value_C, uint32_t value_D, const uint32_t localThreadIdx, uint32_t* res_A, uint32_t* res_B, uint32_t* res_C, uint32_t* res_D) { uint32_t carry; carry = __shfl_up((int) value_D, 1); carry = (localThreadIdx) ? carry : 0; value_D = __funnelshift_lc(value_C, value_D, 1); value_C = __funnelshift_lc(value_B, value_C, 1); value_B = __funnelshift_lc(value_A, value_B, 1); value_A = __funnelshift_lc(carry, value_A, 1); (* res_A) = value_A; (* res_B) = value_B; (* res_C) = value_C; (* res_D) = value_D; } inline __device__ void shuffle_collaborative_sum_CC35(const uint32_t a_A, const uint32_t a_B, const uint32_t a_C, const uint32_t a_D, const uint32_t b_A, const uint32_t b_B, const uint32_t b_C, const uint32_t b_D, const uint32_t localThreadIdx, uint32_t* sum_A, uint32_t* sum_B, uint32_t* sum_C, uint32_t* sum_D) { uint32_t carry, c_A, c_B, c_C, c_D; UADD__CARRY_OUT (c_A, a_A, b_A) UADD__IN_CARRY_OUT(c_B, a_B, b_B) UADD__IN_CARRY_OUT(c_C, a_C, b_C) UADD__IN_CARRY_OUT(c_D, a_D, b_D) UADD__IN_CARRY (carry, 0, 0) while(__any(carry)){ carry = __shfl_up((int) (carry), 1); carry = (localThreadIdx) ? carry : 0; UADD__CARRY_OUT (c_A, c_A, carry) UADD__IN_CARRY_OUT(c_B, c_B, 0) UADD__IN_CARRY_OUT(c_C, c_C, 0) UADD__IN_CARRY_OUT(c_D, c_D, 0) UADD__IN_CARRY (carry, 0, 0) } (* sum_A) = c_A; (* sum_B) = c_B; (* sum_C) = c_C; (* sum_D) = c_D; } inline __device__ uint32_t selectEq_CC35(const uint32_t indexBase, const uint32_t Eq0, const uint32_t Eq1, const uint32_t Eq2, const uint32_t Eq3, const uint32_t Eq4) { uint32_t Eq = Eq0; Eq = (indexBase == 1) ? Eq1 : Eq; Eq = (indexBase == 2) ? Eq2 : Eq; Eq = (indexBase == 3) ? Eq3 : Eq; Eq = (indexBase == 4) ? Eq4 : Eq; return Eq; } inline __device__ uint32_t select_CC35(const uint32_t indexWord, const uint32_t A, const uint32_t B, const uint32_t C, const uint32_t D) { uint32_t value = A; value = (indexWord == 1) ? B : value; value = (indexWord == 2) ? C : value; value = (indexWord == 3) ? D : value; return value; } __device__ void myerslocalKeplerKernel_CC35( const d_qryEntry_t *d_queries, const uint32_t * __restrict d_reference, const candInfo_t *d_candidates, const uint32_t *d_reorderBuffer, resEntry_t *d_reorderResults, const qryInfo_t *d_qinfo, const uint32_t idCandidate, const uint32_t sizeRef, const uint32_t numReorderedResults, const float distance, const uint32_t intraQueryThreadIdx, const uint32_t threadsPerQuery) { if (idCandidate < numReorderedResults){ const uint32_t * __restrict localCandidate; uint32_t Ph_A, Mh_A, Pv_A, Mv_A, Xv_A, Xh_A, Eq_A, tEq_A; uint32_t Ph_B, Mh_B, Pv_B, Mv_B, Xv_B, Xh_B, Eq_B, tEq_B; uint32_t Ph_C, Mh_C, Pv_C, Mv_C, Xv_C, Xh_C, Eq_C, tEq_C; uint32_t Ph_D, Mh_D, Pv_D, Mv_D, Xv_D, Xh_D, Eq_D, tEq_D; uint4 Eq0, Eq1, Eq2, Eq3, Eq4; uint32_t PH, MH, indexWord; uint32_t sum_A, sum_B, sum_C, sum_D; const uint32_t originalCandidate = d_reorderBuffer[idCandidate]; const uint64_t positionRef = d_candidates[originalCandidate].position; const uint32_t sizeQuery = d_qinfo[d_candidates[originalCandidate].query].size; const uint32_t entry = d_qinfo[d_candidates[originalCandidate].query].posEntry + intraQueryThreadIdx; const uint32_t sizeCandidate = sizeQuery * (1 + 2 * distance); const uint32_t numEntriesPerCandidate = (sizeCandidate / BASES_PER_ENTRY) + ((sizeCandidate % BASES_PER_ENTRY) ? 2 : 1); uint32_t candidate; const uint32_t mask = ((sizeQuery % SIZE_GPU_HW_WORD) == 0) ? HIGH_MASK_32 : 1 << ((sizeQuery % SIZE_GPU_HW_WORD) - 1); int32_t score = sizeQuery, minScore = sizeQuery; uint32_t idColumn = 0, minColumn = 0, indexBase; uint32_t intraBase, idEntry; indexWord = ((sizeQuery - 1) & (BASES_PER_THREAD - 1)) / SIZE_GPU_HW_WORD; if((positionRef < sizeRef) && ((sizeRef - positionRef) > sizeCandidate)){ localCandidate = d_reference + (positionRef / BASES_PER_ENTRY); Pv_A = MAX_VALUE; Mv_A = 0; Pv_B = MAX_VALUE; Mv_B = 0; Pv_C = MAX_VALUE; Mv_C = 0; Pv_D = MAX_VALUE; Mv_D = 0; Eq0 = d_queries[entry].bitmap[0]; Eq1 = d_queries[entry].bitmap[1]; Eq2 = d_queries[entry].bitmap[2]; Eq3 = d_queries[entry].bitmap[3]; Eq4 = d_queries[entry].bitmap[4]; for(idEntry = 0; idEntry < numEntriesPerCandidate; idEntry++){ candidate = localCandidate[idEntry]; for(intraBase = 0; intraBase < BASES_PER_ENTRY; intraBase++){ indexBase = candidate & 0x07; Eq_A = selectEq_CC35(indexBase, Eq0.x, Eq1.x, Eq2.x, Eq3.x, Eq4.x); Eq_B = selectEq_CC35(indexBase, Eq0.y, Eq1.y, Eq2.y, Eq3.y, Eq4.y); Eq_C = selectEq_CC35(indexBase, Eq0.z, Eq1.z, Eq2.z, Eq3.z, Eq4.z); Eq_D = selectEq_CC35(indexBase, Eq0.w, Eq1.w, Eq2.w, Eq3.w, Eq4.w); Xv_A = Eq_A | Mv_A; Xv_B = Eq_B | Mv_B; Xv_C = Eq_C | Mv_C; Xv_D = Eq_D | Mv_D; tEq_A = Eq_A & Pv_A; tEq_B = Eq_B & Pv_B; tEq_C = Eq_C & Pv_C; tEq_D = Eq_D & Pv_D; shuffle_collaborative_sum_CC35(tEq_A, tEq_B, tEq_C, tEq_D, Pv_A, Pv_B, Pv_C, Pv_D, intraQueryThreadIdx, &sum_A, &sum_B, &sum_C, &sum_D); Xh_A = (sum_A ^ Pv_A) | Eq_A; Xh_B = (sum_B ^ Pv_B) | Eq_B; Xh_C = (sum_C ^ Pv_C) | Eq_C; Xh_D = (sum_D ^ Pv_D) | Eq_D; Ph_A = Mv_A | ~(Xh_A | Pv_A); Ph_B = Mv_B | ~(Xh_B | Pv_B); Ph_C = Mv_C | ~(Xh_C | Pv_C); Ph_D = Mv_D | ~(Xh_D | Pv_D); Mh_A = Pv_A & Xh_A; Mh_B = Pv_B & Xh_B; Mh_C = Pv_C & Xh_C; Mh_D = Pv_D & Xh_D; PH = select_CC35(indexWord, Ph_A, Ph_B, Ph_C, Ph_D); MH = select_CC35(indexWord, Mh_A, Mh_B, Mh_C, Mh_D); score += (((PH & mask) != 0) - ((MH & mask) != 0)); shuffle_collaborative_shift_CC35(Ph_A, Ph_B, Ph_C, Ph_D, intraQueryThreadIdx, &Ph_A, &Ph_B, &Ph_C, &Ph_D); shuffle_collaborative_shift_CC35(Mh_A, Mh_B, Mh_C, Mh_D, intraQueryThreadIdx, &Mh_A, &Mh_B, &Mh_C, &Mh_D); Pv_A = Mh_A | ~(Xv_A | Ph_A); Pv_B = Mh_B | ~(Xv_B | Ph_B); Pv_C = Mh_C | ~(Xv_C | Ph_C); Pv_D = Mh_D | ~(Xv_D | Ph_D); Mv_A = Ph_A & Xv_A; Mv_B = Ph_B & Xv_B; Mv_C = Ph_C & Xv_C; Mv_D = Ph_D & Xv_D; candidate >>= NUM_BITS; minColumn = (score < minScore) ? idColumn : minColumn; minScore = (score < minScore) ? score : minScore; if(intraQueryThreadIdx == (threadsPerQuery - 1)) idColumn++; } } if(intraQueryThreadIdx == (threadsPerQuery - 1)){ d_reorderResults[idCandidate].column = minColumn/* - (positionRef % BASES_PER_ENTRY)*/; d_reorderResults[idCandidate].score = minScore; } } } } __global__ void myersKeplerKernel_CC35(const d_qryEntry_t *d_queries, const uint32_t * d_reference, const candInfo_t *d_candidates, const uint32_t *d_reorderBuffer, resEntry_t *d_reorderResults, const qryInfo_t *d_qinfo, const uint32_t sizeRef, const uint32_t numReorderedResults, const float distance, uint32_t *d_initPosPerBucket, uint32_t *d_initWarpPerBucket, uint32_t numWarps) { uint32_t bucketIdx = 0; uint32_t globalThreadIdx = blockIdx.x * blockDim.x + threadIdx.x; uint32_t globalWarpIdx = globalThreadIdx / SIZE_WARP; uint32_t localThreadInTheBucket, idCandidate, intraQueryThreadIdx, threadsPerQuery, queriesPerWarp, localIdCandidateInTheBucket; while((bucketIdx != (SIZE_WARP + 1)) && (d_initWarpPerBucket[bucketIdx] <= globalWarpIdx)){ bucketIdx++; } bucketIdx--; localThreadInTheBucket = globalThreadIdx - (d_initWarpPerBucket[bucketIdx] * SIZE_WARP); threadsPerQuery = bucketIdx + 1; queriesPerWarp = SIZE_WARP / threadsPerQuery; localIdCandidateInTheBucket = ((localThreadInTheBucket / SIZE_WARP) * queriesPerWarp) + ((threadIdx.x % SIZE_WARP) / threadsPerQuery); idCandidate = d_initPosPerBucket[bucketIdx] + localIdCandidateInTheBucket; intraQueryThreadIdx = (threadIdx.x % SIZE_WARP) % threadsPerQuery; myerslocalKeplerKernel_CC35(d_queries, d_reference, d_candidates, d_reorderBuffer, d_reorderResults, d_qinfo, idCandidate, sizeRef, numReorderedResults, distance, intraQueryThreadIdx, threadsPerQuery); } extern "C" myersError_t processMyersBufferOnKepler2ndGen(buffer_t *mBuff) { reference_buffer_t *ref = mBuff->reference; queries_buffer_t *qry = mBuff->queries; candidates_buffer_t *cand = mBuff->candidates; reorder_buffer_t *rebuff = mBuff->reorderBuffer; results_buffer_t *res = mBuff->results; hipStream_t idStream = mBuff->idStream; uint32_t threadsPerBlock = 128; uint32_t numThreads = rebuff->numWarps * SIZE_WARP; uint32_t blocksPerGrid = (numThreads / threadsPerBlock) + ((numThreads % threadsPerBlock) ? 1 : 0); if(DEVICE == 0){ //printf("KEPLER: LAUNCH KERNEL 0 -- Bloques: %d - Th_block %d\n", blocksPerGrid, threadsPerBlock); hipLaunchKernelGGL(( myersKeplerKernel_CC35), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, idStream, (d_qryEntry_t *)qry->d_queries, ref->d_reference, cand->d_candidates, rebuff->d_reorderBuffer, res->d_reorderResults, qry->d_qinfo, ref->size, res->numReorderedResults, qry->distance, rebuff->d_initPosPerBucket, rebuff->d_initWarpPerBucket, rebuff->numWarps); } return(SUCCESS); }
d06b2c4241217766d0ba0d43f7f3e0fe918c349d.cu
#include <stdio.h> #include "myers-common.h" #define NUM_BITS 4 #define NUM_BASES 5 #define BASES_PER_THREAD 128 #define BASES_PER_ENTRY 8 #define SIZE_GPU_HW_WORD 32 #define SIZE_WARP 32 #define HIGH_MASK_32 0x80000000 #define LOW_MASK_32 0x00000001 #define MAX_VALUE 0xFFFFFFFF #ifndef DEVICE #define DEVICE 0 #endif // output temporal carry in internal register #define UADD__CARRY_OUT(c, a, b) \ asm volatile("add.cc.u32 %0, %1, %2;" : "=r"(c) : "r"(a) , "r"(b)); // add & output with temporal carry of internal register #define UADD__IN_CARRY_OUT(c, a, b) \ asm volatile("addc.cc.u32 %0, %1, %2;" : "=r"(c) : "r"(a) , "r"(b)); // add with temporal carry of internal register #define UADD__IN_CARRY(c, a, b) \ asm volatile("addc.u32 %0, %1, %2;" : "=r"(c) : "r"(a) , "r"(b)); inline __device__ void shuffle_collaborative_shift_CC35(uint32_t value_A, uint32_t value_B, uint32_t value_C, uint32_t value_D, const uint32_t localThreadIdx, uint32_t* res_A, uint32_t* res_B, uint32_t* res_C, uint32_t* res_D) { uint32_t carry; carry = __shfl_up((int) value_D, 1); carry = (localThreadIdx) ? carry : 0; value_D = __funnelshift_lc(value_C, value_D, 1); value_C = __funnelshift_lc(value_B, value_C, 1); value_B = __funnelshift_lc(value_A, value_B, 1); value_A = __funnelshift_lc(carry, value_A, 1); (* res_A) = value_A; (* res_B) = value_B; (* res_C) = value_C; (* res_D) = value_D; } inline __device__ void shuffle_collaborative_sum_CC35(const uint32_t a_A, const uint32_t a_B, const uint32_t a_C, const uint32_t a_D, const uint32_t b_A, const uint32_t b_B, const uint32_t b_C, const uint32_t b_D, const uint32_t localThreadIdx, uint32_t* sum_A, uint32_t* sum_B, uint32_t* sum_C, uint32_t* sum_D) { uint32_t carry, c_A, c_B, c_C, c_D; UADD__CARRY_OUT (c_A, a_A, b_A) UADD__IN_CARRY_OUT(c_B, a_B, b_B) UADD__IN_CARRY_OUT(c_C, a_C, b_C) UADD__IN_CARRY_OUT(c_D, a_D, b_D) UADD__IN_CARRY (carry, 0, 0) while(__any(carry)){ carry = __shfl_up((int) (carry), 1); carry = (localThreadIdx) ? carry : 0; UADD__CARRY_OUT (c_A, c_A, carry) UADD__IN_CARRY_OUT(c_B, c_B, 0) UADD__IN_CARRY_OUT(c_C, c_C, 0) UADD__IN_CARRY_OUT(c_D, c_D, 0) UADD__IN_CARRY (carry, 0, 0) } (* sum_A) = c_A; (* sum_B) = c_B; (* sum_C) = c_C; (* sum_D) = c_D; } inline __device__ uint32_t selectEq_CC35(const uint32_t indexBase, const uint32_t Eq0, const uint32_t Eq1, const uint32_t Eq2, const uint32_t Eq3, const uint32_t Eq4) { uint32_t Eq = Eq0; Eq = (indexBase == 1) ? Eq1 : Eq; Eq = (indexBase == 2) ? Eq2 : Eq; Eq = (indexBase == 3) ? Eq3 : Eq; Eq = (indexBase == 4) ? Eq4 : Eq; return Eq; } inline __device__ uint32_t select_CC35(const uint32_t indexWord, const uint32_t A, const uint32_t B, const uint32_t C, const uint32_t D) { uint32_t value = A; value = (indexWord == 1) ? B : value; value = (indexWord == 2) ? C : value; value = (indexWord == 3) ? D : value; return value; } __device__ void myerslocalKeplerKernel_CC35( const d_qryEntry_t *d_queries, const uint32_t * __restrict d_reference, const candInfo_t *d_candidates, const uint32_t *d_reorderBuffer, resEntry_t *d_reorderResults, const qryInfo_t *d_qinfo, const uint32_t idCandidate, const uint32_t sizeRef, const uint32_t numReorderedResults, const float distance, const uint32_t intraQueryThreadIdx, const uint32_t threadsPerQuery) { if (idCandidate < numReorderedResults){ const uint32_t * __restrict localCandidate; uint32_t Ph_A, Mh_A, Pv_A, Mv_A, Xv_A, Xh_A, Eq_A, tEq_A; uint32_t Ph_B, Mh_B, Pv_B, Mv_B, Xv_B, Xh_B, Eq_B, tEq_B; uint32_t Ph_C, Mh_C, Pv_C, Mv_C, Xv_C, Xh_C, Eq_C, tEq_C; uint32_t Ph_D, Mh_D, Pv_D, Mv_D, Xv_D, Xh_D, Eq_D, tEq_D; uint4 Eq0, Eq1, Eq2, Eq3, Eq4; uint32_t PH, MH, indexWord; uint32_t sum_A, sum_B, sum_C, sum_D; const uint32_t originalCandidate = d_reorderBuffer[idCandidate]; const uint64_t positionRef = d_candidates[originalCandidate].position; const uint32_t sizeQuery = d_qinfo[d_candidates[originalCandidate].query].size; const uint32_t entry = d_qinfo[d_candidates[originalCandidate].query].posEntry + intraQueryThreadIdx; const uint32_t sizeCandidate = sizeQuery * (1 + 2 * distance); const uint32_t numEntriesPerCandidate = (sizeCandidate / BASES_PER_ENTRY) + ((sizeCandidate % BASES_PER_ENTRY) ? 2 : 1); uint32_t candidate; const uint32_t mask = ((sizeQuery % SIZE_GPU_HW_WORD) == 0) ? HIGH_MASK_32 : 1 << ((sizeQuery % SIZE_GPU_HW_WORD) - 1); int32_t score = sizeQuery, minScore = sizeQuery; uint32_t idColumn = 0, minColumn = 0, indexBase; uint32_t intraBase, idEntry; indexWord = ((sizeQuery - 1) & (BASES_PER_THREAD - 1)) / SIZE_GPU_HW_WORD; if((positionRef < sizeRef) && ((sizeRef - positionRef) > sizeCandidate)){ localCandidate = d_reference + (positionRef / BASES_PER_ENTRY); Pv_A = MAX_VALUE; Mv_A = 0; Pv_B = MAX_VALUE; Mv_B = 0; Pv_C = MAX_VALUE; Mv_C = 0; Pv_D = MAX_VALUE; Mv_D = 0; Eq0 = d_queries[entry].bitmap[0]; Eq1 = d_queries[entry].bitmap[1]; Eq2 = d_queries[entry].bitmap[2]; Eq3 = d_queries[entry].bitmap[3]; Eq4 = d_queries[entry].bitmap[4]; for(idEntry = 0; idEntry < numEntriesPerCandidate; idEntry++){ candidate = localCandidate[idEntry]; for(intraBase = 0; intraBase < BASES_PER_ENTRY; intraBase++){ indexBase = candidate & 0x07; Eq_A = selectEq_CC35(indexBase, Eq0.x, Eq1.x, Eq2.x, Eq3.x, Eq4.x); Eq_B = selectEq_CC35(indexBase, Eq0.y, Eq1.y, Eq2.y, Eq3.y, Eq4.y); Eq_C = selectEq_CC35(indexBase, Eq0.z, Eq1.z, Eq2.z, Eq3.z, Eq4.z); Eq_D = selectEq_CC35(indexBase, Eq0.w, Eq1.w, Eq2.w, Eq3.w, Eq4.w); Xv_A = Eq_A | Mv_A; Xv_B = Eq_B | Mv_B; Xv_C = Eq_C | Mv_C; Xv_D = Eq_D | Mv_D; tEq_A = Eq_A & Pv_A; tEq_B = Eq_B & Pv_B; tEq_C = Eq_C & Pv_C; tEq_D = Eq_D & Pv_D; shuffle_collaborative_sum_CC35(tEq_A, tEq_B, tEq_C, tEq_D, Pv_A, Pv_B, Pv_C, Pv_D, intraQueryThreadIdx, &sum_A, &sum_B, &sum_C, &sum_D); Xh_A = (sum_A ^ Pv_A) | Eq_A; Xh_B = (sum_B ^ Pv_B) | Eq_B; Xh_C = (sum_C ^ Pv_C) | Eq_C; Xh_D = (sum_D ^ Pv_D) | Eq_D; Ph_A = Mv_A | ~(Xh_A | Pv_A); Ph_B = Mv_B | ~(Xh_B | Pv_B); Ph_C = Mv_C | ~(Xh_C | Pv_C); Ph_D = Mv_D | ~(Xh_D | Pv_D); Mh_A = Pv_A & Xh_A; Mh_B = Pv_B & Xh_B; Mh_C = Pv_C & Xh_C; Mh_D = Pv_D & Xh_D; PH = select_CC35(indexWord, Ph_A, Ph_B, Ph_C, Ph_D); MH = select_CC35(indexWord, Mh_A, Mh_B, Mh_C, Mh_D); score += (((PH & mask) != 0) - ((MH & mask) != 0)); shuffle_collaborative_shift_CC35(Ph_A, Ph_B, Ph_C, Ph_D, intraQueryThreadIdx, &Ph_A, &Ph_B, &Ph_C, &Ph_D); shuffle_collaborative_shift_CC35(Mh_A, Mh_B, Mh_C, Mh_D, intraQueryThreadIdx, &Mh_A, &Mh_B, &Mh_C, &Mh_D); Pv_A = Mh_A | ~(Xv_A | Ph_A); Pv_B = Mh_B | ~(Xv_B | Ph_B); Pv_C = Mh_C | ~(Xv_C | Ph_C); Pv_D = Mh_D | ~(Xv_D | Ph_D); Mv_A = Ph_A & Xv_A; Mv_B = Ph_B & Xv_B; Mv_C = Ph_C & Xv_C; Mv_D = Ph_D & Xv_D; candidate >>= NUM_BITS; minColumn = (score < minScore) ? idColumn : minColumn; minScore = (score < minScore) ? score : minScore; if(intraQueryThreadIdx == (threadsPerQuery - 1)) idColumn++; } } if(intraQueryThreadIdx == (threadsPerQuery - 1)){ d_reorderResults[idCandidate].column = minColumn/* - (positionRef % BASES_PER_ENTRY)*/; d_reorderResults[idCandidate].score = minScore; } } } } __global__ void myersKeplerKernel_CC35(const d_qryEntry_t *d_queries, const uint32_t * d_reference, const candInfo_t *d_candidates, const uint32_t *d_reorderBuffer, resEntry_t *d_reorderResults, const qryInfo_t *d_qinfo, const uint32_t sizeRef, const uint32_t numReorderedResults, const float distance, uint32_t *d_initPosPerBucket, uint32_t *d_initWarpPerBucket, uint32_t numWarps) { uint32_t bucketIdx = 0; uint32_t globalThreadIdx = blockIdx.x * blockDim.x + threadIdx.x; uint32_t globalWarpIdx = globalThreadIdx / SIZE_WARP; uint32_t localThreadInTheBucket, idCandidate, intraQueryThreadIdx, threadsPerQuery, queriesPerWarp, localIdCandidateInTheBucket; while((bucketIdx != (SIZE_WARP + 1)) && (d_initWarpPerBucket[bucketIdx] <= globalWarpIdx)){ bucketIdx++; } bucketIdx--; localThreadInTheBucket = globalThreadIdx - (d_initWarpPerBucket[bucketIdx] * SIZE_WARP); threadsPerQuery = bucketIdx + 1; queriesPerWarp = SIZE_WARP / threadsPerQuery; localIdCandidateInTheBucket = ((localThreadInTheBucket / SIZE_WARP) * queriesPerWarp) + ((threadIdx.x % SIZE_WARP) / threadsPerQuery); idCandidate = d_initPosPerBucket[bucketIdx] + localIdCandidateInTheBucket; intraQueryThreadIdx = (threadIdx.x % SIZE_WARP) % threadsPerQuery; myerslocalKeplerKernel_CC35(d_queries, d_reference, d_candidates, d_reorderBuffer, d_reorderResults, d_qinfo, idCandidate, sizeRef, numReorderedResults, distance, intraQueryThreadIdx, threadsPerQuery); } extern "C" myersError_t processMyersBufferOnKepler2ndGen(buffer_t *mBuff) { reference_buffer_t *ref = mBuff->reference; queries_buffer_t *qry = mBuff->queries; candidates_buffer_t *cand = mBuff->candidates; reorder_buffer_t *rebuff = mBuff->reorderBuffer; results_buffer_t *res = mBuff->results; cudaStream_t idStream = mBuff->idStream; uint32_t threadsPerBlock = 128; uint32_t numThreads = rebuff->numWarps * SIZE_WARP; uint32_t blocksPerGrid = (numThreads / threadsPerBlock) + ((numThreads % threadsPerBlock) ? 1 : 0); if(DEVICE == 0){ //printf("KEPLER: LAUNCH KERNEL 0 -- Bloques: %d - Th_block %d\n", blocksPerGrid, threadsPerBlock); myersKeplerKernel_CC35<<<blocksPerGrid, threadsPerBlock, 0, idStream>>>((d_qryEntry_t *)qry->d_queries, ref->d_reference, cand->d_candidates, rebuff->d_reorderBuffer, res->d_reorderResults, qry->d_qinfo, ref->size, res->numReorderedResults, qry->distance, rebuff->d_initPosPerBucket, rebuff->d_initWarpPerBucket, rebuff->numWarps); } return(SUCCESS); }
e5991b23b47d6d69bc5f60507d94306d5460e90e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /************************************************************************************\ * * * Copyright 2014 Advanced Micro Devices, Inc. * * Copyright (c) 2015 Mark D. Hill and David A. Wood * * All rights reserved. * * * * Redistribution and use in source and binary forms, with or without * * modification, are permitted provided that the following are met: * * * * You must reproduce the above copyright notice. * * * * Neither the name of the copyright holder nor the names of its contributors * * may be used to endorse or promote products derived from this software * * without specific, prior, written permission from at least the copyright holder. * * * * You must include the following terms in your license and/or other materials * * provided with the software. * * * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * * IMPLIED WARRANTIES OF MERCHANTABILITY, NON-INFRINGEMENT, AND FITNESS FOR A * * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER * * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT * * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY * * OF SUCH DAMAGE. * * * * Without limiting the foregoing, the software may implement third party * * technologies for which you must obtain licenses from parties other than AMD. * * You agree that AMD has not obtained or conveyed to you, and that you shall * * be responsible for obtaining the rights to use and/or distribute the applicable * * underlying intellectual property rights related to the third party technologies. * * These third party technologies are not licensed hereunder. * * * * If you use the software (in whole or in part), you shall adhere to all * * applicable U.S., European, and other export laws, including but not limited to * * the U.S. Export Administration Regulations ("EAR") (15 C.F.R Sections 730-774), * * and E.U. Council Regulation (EC) No 428/2009 of 5 May 2009. Further, pursuant * * to Section 740.6 of the EAR, you hereby certify that, except pursuant to a * * license granted by the United States Department of Commerce Bureau of Industry * * and Security or as otherwise permitted pursuant to a License Exception under * * the U.S. Export Administration Regulations ("EAR"), you will not (1) export, * * re-export or release to a national of a country in Country Groups D:1, E:1 or * * E:2 any restricted technology, software, or source code you receive hereunder, * * or (2) export to Country Groups D:1, E:1 or E:2 the direct product of such * * technology or software, if such foreign produced direct product is subject to * * national security controls as identified on the Commerce Control List (currently * * found in Supplement 1 to Part 774 of EAR). For the most current Country Group * * listings, or for additional information about the EAR or your obligations under * * those regulations, please refer to the U.S. Bureau of Industry and Security's * * website at http://www.bis.doc.gov/. * * * \************************************************************************************/ /** * @brief Breadth-first traversal * @param row CSR pointer array * @param col CSR column array * @param d Distance array * @param rho Rho array * @param p Dependency array * @param cont Termination variable * @param num_nodes Termination variable * @param num_edges Termination variable * @param dist Current traversal layer */ __global__ void bfs_kernel(int *row, int *col, int *d, float *rho, int *cont, const int num_nodes, const int num_edges, const int dist) { int tid = blockIdx.x * blockDim.x + threadIdx.x; //navigate the current layer if (tid < num_nodes && d[tid] == dist) { //get the starting and ending pointers //of the neighbor list int start = row[tid]; int end; if (tid + 1 < num_nodes) end = row[tid + 1]; else end = num_edges; //navigate through the neighbor list for (int edge = start; edge < end; edge++) { int w = col[edge]; if (d[w] < 0) { *cont = 1; //traverse another layer d[w] = dist + 1; } //transfer the rho value to the neighbor if (d[w] == (dist + 1)) { atomicAdd(&rho[w], rho[tid]); } } } } /** * @brief Back traversal * @param row CSR pointer array * @param col CSR column array * @param d Distance array * @param rho Rho array * @param sigma Sigma array * @param p Dependency array * @param cont Termination variable * @param num_nodes Termination variable * @param num_edges Termination variable * @param dist Current traversal layer * @param s Source vertex * @param bc Betweeness Centrality array */ __global__ void backtrack_kernel(int *row, int *col, int *d, float *rho, float *sigma, const int num_nodes, const int num_edges, const int dist, const int s, float* bc) { int tid = blockIdx.x * blockDim.x + threadIdx.x; // Navigate the current layer if (tid < num_nodes && d[tid] == dist - 1) { int start = row[tid]; int end; if (tid + 1 < num_nodes) end = row[tid + 1]; else end = num_edges; // Get the starting and ending pointers // of the neighbor list in the reverse graph for (int edge = start; edge < end; edge++) { int w = col[edge]; // Update the sigma value traversing back if (d[w] == dist - 2) atomicAdd(&sigma[w], rho[w] / rho[tid] * (1 + sigma[tid])); } // Update the BC value if (tid != s) bc[tid] = bc[tid] + sigma[tid]; } } /** * @brief back_sum_kernel (not used) * @param s Source vertex * @param dist Current traversal layer * @param d Distance array * @param sigma Sigma array * @param bc Betweeness Centrality array * @param num_nodes Termination variable * @param num_edges Termination variable */ __global__ void back_sum_kernel(const int s, const int dist, int *d, float *sigma, float *bc, const int num_nodes) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < num_nodes) { // If it is not the source if (s != tid && d[tid] == dist - 1) { bc[tid] = bc[tid] + sigma[tid]; } } } /** * @brief array set 1D * @param s Source vertex * @param dist_array Distance array * @param sigma Sigma array * @param rho Rho array * @param num_nodes Termination variable */ __global__ void clean_1d_array(const int source, int *dist_array, float *sigma, float *rho, const int num_nodes) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < num_nodes) { sigma[tid] = 0; if (tid == source) { // If source vertex rho = 1, dist = 0 rho[tid] = 1; dist_array[tid] = 0; } else { // If other vertices rho = 0, dist = -1 rho[tid] = 0; dist_array[tid] = -1; } } } /** * @brief array set 2D * @param p Dependency array * @param num_nodes Number of vertices */ __global__ void clean_2d_array(int *p, const int num_nodes) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < num_nodes * num_nodes) p[tid] = 0; } /** * @brief clean BC * @param bc_d Betweeness Centrality array * @param num_nodes Number of vertices */ __global__ void clean_bc(float *bc_d, const int num_nodes) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < num_nodes) bc_d[tid] = 0; }
e5991b23b47d6d69bc5f60507d94306d5460e90e.cu
/************************************************************************************\ * * * Copyright � 2014 Advanced Micro Devices, Inc. * * Copyright (c) 2015 Mark D. Hill and David A. Wood * * All rights reserved. * * * * Redistribution and use in source and binary forms, with or without * * modification, are permitted provided that the following are met: * * * * You must reproduce the above copyright notice. * * * * Neither the name of the copyright holder nor the names of its contributors * * may be used to endorse or promote products derived from this software * * without specific, prior, written permission from at least the copyright holder. * * * * You must include the following terms in your license and/or other materials * * provided with the software. * * * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * * IMPLIED WARRANTIES OF MERCHANTABILITY, NON-INFRINGEMENT, AND FITNESS FOR A * * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER * * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT * * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY * * OF SUCH DAMAGE. * * * * Without limiting the foregoing, the software may implement third party * * technologies for which you must obtain licenses from parties other than AMD. * * You agree that AMD has not obtained or conveyed to you, and that you shall * * be responsible for obtaining the rights to use and/or distribute the applicable * * underlying intellectual property rights related to the third party technologies. * * These third party technologies are not licensed hereunder. * * * * If you use the software (in whole or in part), you shall adhere to all * * applicable U.S., European, and other export laws, including but not limited to * * the U.S. Export Administration Regulations ("EAR") (15 C.F.R Sections 730-774), * * and E.U. Council Regulation (EC) No 428/2009 of 5 May 2009. Further, pursuant * * to Section 740.6 of the EAR, you hereby certify that, except pursuant to a * * license granted by the United States Department of Commerce Bureau of Industry * * and Security or as otherwise permitted pursuant to a License Exception under * * the U.S. Export Administration Regulations ("EAR"), you will not (1) export, * * re-export or release to a national of a country in Country Groups D:1, E:1 or * * E:2 any restricted technology, software, or source code you receive hereunder, * * or (2) export to Country Groups D:1, E:1 or E:2 the direct product of such * * technology or software, if such foreign produced direct product is subject to * * national security controls as identified on the Commerce Control List (currently * * found in Supplement 1 to Part 774 of EAR). For the most current Country Group * * listings, or for additional information about the EAR or your obligations under * * those regulations, please refer to the U.S. Bureau of Industry and Security's * * website at http://www.bis.doc.gov/. * * * \************************************************************************************/ /** * @brief Breadth-first traversal * @param row CSR pointer array * @param col CSR column array * @param d Distance array * @param rho Rho array * @param p Dependency array * @param cont Termination variable * @param num_nodes Termination variable * @param num_edges Termination variable * @param dist Current traversal layer */ __global__ void bfs_kernel(int *row, int *col, int *d, float *rho, int *cont, const int num_nodes, const int num_edges, const int dist) { int tid = blockIdx.x * blockDim.x + threadIdx.x; //navigate the current layer if (tid < num_nodes && d[tid] == dist) { //get the starting and ending pointers //of the neighbor list int start = row[tid]; int end; if (tid + 1 < num_nodes) end = row[tid + 1]; else end = num_edges; //navigate through the neighbor list for (int edge = start; edge < end; edge++) { int w = col[edge]; if (d[w] < 0) { *cont = 1; //traverse another layer d[w] = dist + 1; } //transfer the rho value to the neighbor if (d[w] == (dist + 1)) { atomicAdd(&rho[w], rho[tid]); } } } } /** * @brief Back traversal * @param row CSR pointer array * @param col CSR column array * @param d Distance array * @param rho Rho array * @param sigma Sigma array * @param p Dependency array * @param cont Termination variable * @param num_nodes Termination variable * @param num_edges Termination variable * @param dist Current traversal layer * @param s Source vertex * @param bc Betweeness Centrality array */ __global__ void backtrack_kernel(int *row, int *col, int *d, float *rho, float *sigma, const int num_nodes, const int num_edges, const int dist, const int s, float* bc) { int tid = blockIdx.x * blockDim.x + threadIdx.x; // Navigate the current layer if (tid < num_nodes && d[tid] == dist - 1) { int start = row[tid]; int end; if (tid + 1 < num_nodes) end = row[tid + 1]; else end = num_edges; // Get the starting and ending pointers // of the neighbor list in the reverse graph for (int edge = start; edge < end; edge++) { int w = col[edge]; // Update the sigma value traversing back if (d[w] == dist - 2) atomicAdd(&sigma[w], rho[w] / rho[tid] * (1 + sigma[tid])); } // Update the BC value if (tid != s) bc[tid] = bc[tid] + sigma[tid]; } } /** * @brief back_sum_kernel (not used) * @param s Source vertex * @param dist Current traversal layer * @param d Distance array * @param sigma Sigma array * @param bc Betweeness Centrality array * @param num_nodes Termination variable * @param num_edges Termination variable */ __global__ void back_sum_kernel(const int s, const int dist, int *d, float *sigma, float *bc, const int num_nodes) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < num_nodes) { // If it is not the source if (s != tid && d[tid] == dist - 1) { bc[tid] = bc[tid] + sigma[tid]; } } } /** * @brief array set 1D * @param s Source vertex * @param dist_array Distance array * @param sigma Sigma array * @param rho Rho array * @param num_nodes Termination variable */ __global__ void clean_1d_array(const int source, int *dist_array, float *sigma, float *rho, const int num_nodes) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < num_nodes) { sigma[tid] = 0; if (tid == source) { // If source vertex rho = 1, dist = 0 rho[tid] = 1; dist_array[tid] = 0; } else { // If other vertices rho = 0, dist = -1 rho[tid] = 0; dist_array[tid] = -1; } } } /** * @brief array set 2D * @param p Dependency array * @param num_nodes Number of vertices */ __global__ void clean_2d_array(int *p, const int num_nodes) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < num_nodes * num_nodes) p[tid] = 0; } /** * @brief clean BC * @param bc_d Betweeness Centrality array * @param num_nodes Number of vertices */ __global__ void clean_bc(float *bc_d, const int num_nodes) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < num_nodes) bc_d[tid] = 0; }
67771cb44fd42d3a4d03ce467dbdd8eb691d5f0b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // Created by binhpht on 27.3.2021. // #include <stdio.h> #include "iostream" #include "square.cuh" __global__ void square (float * d_out, float * d_in) { // int idx = threadIdx.x; int idx = blockIdx.x * blockDim.x + threadIdx.x; float f = d_in[idx]; d_out[idx] = f * f; } void call_square (int thread_num, float * d_out, float * d_in) { hipLaunchKernelGGL(( ::square), dim3(8),dim3(8), 0, 0, d_out, d_in); }
67771cb44fd42d3a4d03ce467dbdd8eb691d5f0b.cu
// // Created by binhpht on 27.3.2021. // #include <stdio.h> #include "iostream" #include "square.cuh" __global__ void square (float * d_out, float * d_in) { // int idx = threadIdx.x; int idx = blockIdx.x * blockDim.x + threadIdx.x; float f = d_in[idx]; d_out[idx] = f * f; } void call_square (int thread_num, float * d_out, float * d_in) { ::square<<<8,8>>>(d_out, d_in); }
ecb2e64078cf324bc51b9904707b9510a865fef7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*! * Copyright (c) 2020 by Contributors * \file array/cuda/coo2csr.cc * \brief COO2CSR */ #include <dgl/array.h> #include "../../runtime/cuda/cuda_common.h" #include "./utils.h" namespace dgl { using runtime::NDArray; namespace aten { namespace impl { template <DLDeviceType XPU, typename IdType> CSRMatrix COOToCSR(COOMatrix coo) { LOG(FATAL) << "Unreachable code."; return {}; } template <> CSRMatrix COOToCSR<kDLGPU, int32_t>(COOMatrix coo) { auto* thr_entry = runtime::CUDAThreadEntry::ThreadLocal(); // allocate cusparse handle if needed if (!thr_entry->cusparse_handle) { CUSPARSE_CALL(hipsparseCreate(&(thr_entry->cusparse_handle))); } CUSPARSE_CALL(hipsparseSetStream(thr_entry->cusparse_handle, thr_entry->stream)); bool row_sorted = coo.row_sorted; bool col_sorted = coo.col_sorted; if (!row_sorted) { // It is possible that the flag is simply not set (default value is false), // so we still perform a linear scan to check the flag. std::tie(row_sorted, col_sorted) = COOIsSorted(coo); } if (!row_sorted) { coo = COOSort(coo); col_sorted = coo.col_sorted; } const int64_t nnz = coo.row->shape[0]; // TODO(minjie): Many of our current implementation assumes that CSR must have // a data array. This is a temporary workaround. Remove this after: // - The old immutable graph implementation is deprecated. // - The old binary reduce kernel is deprecated. if (!COOHasData(coo)) coo.data = aten::Range(0, nnz, coo.row->dtype.bits, coo.row->ctx); NDArray indptr = aten::NewIdArray(coo.num_rows + 1, coo.row->ctx, coo.row->dtype.bits); int32_t* indptr_ptr = static_cast<int32_t*>(indptr->data); CUSPARSE_CALL(hipsparseXcoo2csr( thr_entry->cusparse_handle, coo.row.Ptr<int32_t>(), nnz, coo.num_rows, indptr_ptr, HIPSPARSE_INDEX_BASE_ZERO)); return CSRMatrix(coo.num_rows, coo.num_cols, indptr, coo.col, coo.data, col_sorted); } /*! * \brief Search for the insertion positions for needle in the hay. * * The hay is a list of sorted elements and the result is the insertion position * of each needle so that the insertion still gives sorted order. * * It essentially perform binary search to find upper bound for each needle * elements. * * For example: * hay = [0, 0, 1, 2, 2] * needle = [0, 1, 2, 3] * then, * out = [2, 3, 5, 5] */ template <typename IdType> __global__ void _SortedSearchKernelUpperBound( const IdType* hay, int64_t hay_size, const IdType* needles, int64_t num_needles, IdType* pos) { int tx = blockIdx.x * blockDim.x + threadIdx.x; const int stride_x = gridDim.x * blockDim.x; while (tx < num_needles) { const IdType ele = needles[tx]; // binary search IdType lo = 0, hi = hay_size; while (lo < hi) { IdType mid = (lo + hi) >> 1; if (hay[mid] <= ele) { lo = mid + 1; } else { hi = mid; } } pos[tx] = lo; tx += stride_x; } } template <> CSRMatrix COOToCSR<kDLGPU, int64_t>(COOMatrix coo) { const auto& ctx = coo.row->ctx; const auto nbits = coo.row->dtype.bits; auto* thr_entry = runtime::CUDAThreadEntry::ThreadLocal(); bool row_sorted = coo.row_sorted; bool col_sorted = coo.col_sorted; if (!row_sorted) { // It is possible that the flag is simply not set (default value is false), // so we still perform a linear scan to check the flag. std::tie(row_sorted, col_sorted) = COOIsSorted(coo); } if (!row_sorted) { coo = COOSort(coo); col_sorted = coo.col_sorted; } const int64_t nnz = coo.row->shape[0]; // TODO(minjie): Many of our current implementation assumes that CSR must have // a data array. This is a temporary workaround. Remove this after: // - The old immutable graph implementation is deprecated. // - The old binary reduce kernel is deprecated. if (!COOHasData(coo)) coo.data = aten::Range(0, nnz, coo.row->dtype.bits, coo.row->ctx); IdArray rowids = Range(0, coo.num_rows, nbits, ctx); const int nt = cuda::FindNumThreads(coo.num_rows); const int nb = (coo.num_rows + nt - 1) / nt; IdArray indptr = Full(0, coo.num_rows + 1, nbits, ctx); CUDA_KERNEL_CALL(_SortedSearchKernelUpperBound, nb, nt, 0, thr_entry->stream, coo.row.Ptr<int64_t>(), nnz, rowids.Ptr<int64_t>(), coo.num_rows, indptr.Ptr<int64_t>() + 1); return CSRMatrix(coo.num_rows, coo.num_cols, indptr, coo.col, coo.data, col_sorted); } template CSRMatrix COOToCSR<kDLGPU, int32_t>(COOMatrix coo); template CSRMatrix COOToCSR<kDLGPU, int64_t>(COOMatrix coo); } // namespace impl } // namespace aten } // namespace dgl
ecb2e64078cf324bc51b9904707b9510a865fef7.cu
/*! * Copyright (c) 2020 by Contributors * \file array/cuda/coo2csr.cc * \brief COO2CSR */ #include <dgl/array.h> #include "../../runtime/cuda/cuda_common.h" #include "./utils.h" namespace dgl { using runtime::NDArray; namespace aten { namespace impl { template <DLDeviceType XPU, typename IdType> CSRMatrix COOToCSR(COOMatrix coo) { LOG(FATAL) << "Unreachable code."; return {}; } template <> CSRMatrix COOToCSR<kDLGPU, int32_t>(COOMatrix coo) { auto* thr_entry = runtime::CUDAThreadEntry::ThreadLocal(); // allocate cusparse handle if needed if (!thr_entry->cusparse_handle) { CUSPARSE_CALL(cusparseCreate(&(thr_entry->cusparse_handle))); } CUSPARSE_CALL(cusparseSetStream(thr_entry->cusparse_handle, thr_entry->stream)); bool row_sorted = coo.row_sorted; bool col_sorted = coo.col_sorted; if (!row_sorted) { // It is possible that the flag is simply not set (default value is false), // so we still perform a linear scan to check the flag. std::tie(row_sorted, col_sorted) = COOIsSorted(coo); } if (!row_sorted) { coo = COOSort(coo); col_sorted = coo.col_sorted; } const int64_t nnz = coo.row->shape[0]; // TODO(minjie): Many of our current implementation assumes that CSR must have // a data array. This is a temporary workaround. Remove this after: // - The old immutable graph implementation is deprecated. // - The old binary reduce kernel is deprecated. if (!COOHasData(coo)) coo.data = aten::Range(0, nnz, coo.row->dtype.bits, coo.row->ctx); NDArray indptr = aten::NewIdArray(coo.num_rows + 1, coo.row->ctx, coo.row->dtype.bits); int32_t* indptr_ptr = static_cast<int32_t*>(indptr->data); CUSPARSE_CALL(cusparseXcoo2csr( thr_entry->cusparse_handle, coo.row.Ptr<int32_t>(), nnz, coo.num_rows, indptr_ptr, CUSPARSE_INDEX_BASE_ZERO)); return CSRMatrix(coo.num_rows, coo.num_cols, indptr, coo.col, coo.data, col_sorted); } /*! * \brief Search for the insertion positions for needle in the hay. * * The hay is a list of sorted elements and the result is the insertion position * of each needle so that the insertion still gives sorted order. * * It essentially perform binary search to find upper bound for each needle * elements. * * For example: * hay = [0, 0, 1, 2, 2] * needle = [0, 1, 2, 3] * then, * out = [2, 3, 5, 5] */ template <typename IdType> __global__ void _SortedSearchKernelUpperBound( const IdType* hay, int64_t hay_size, const IdType* needles, int64_t num_needles, IdType* pos) { int tx = blockIdx.x * blockDim.x + threadIdx.x; const int stride_x = gridDim.x * blockDim.x; while (tx < num_needles) { const IdType ele = needles[tx]; // binary search IdType lo = 0, hi = hay_size; while (lo < hi) { IdType mid = (lo + hi) >> 1; if (hay[mid] <= ele) { lo = mid + 1; } else { hi = mid; } } pos[tx] = lo; tx += stride_x; } } template <> CSRMatrix COOToCSR<kDLGPU, int64_t>(COOMatrix coo) { const auto& ctx = coo.row->ctx; const auto nbits = coo.row->dtype.bits; auto* thr_entry = runtime::CUDAThreadEntry::ThreadLocal(); bool row_sorted = coo.row_sorted; bool col_sorted = coo.col_sorted; if (!row_sorted) { // It is possible that the flag is simply not set (default value is false), // so we still perform a linear scan to check the flag. std::tie(row_sorted, col_sorted) = COOIsSorted(coo); } if (!row_sorted) { coo = COOSort(coo); col_sorted = coo.col_sorted; } const int64_t nnz = coo.row->shape[0]; // TODO(minjie): Many of our current implementation assumes that CSR must have // a data array. This is a temporary workaround. Remove this after: // - The old immutable graph implementation is deprecated. // - The old binary reduce kernel is deprecated. if (!COOHasData(coo)) coo.data = aten::Range(0, nnz, coo.row->dtype.bits, coo.row->ctx); IdArray rowids = Range(0, coo.num_rows, nbits, ctx); const int nt = cuda::FindNumThreads(coo.num_rows); const int nb = (coo.num_rows + nt - 1) / nt; IdArray indptr = Full(0, coo.num_rows + 1, nbits, ctx); CUDA_KERNEL_CALL(_SortedSearchKernelUpperBound, nb, nt, 0, thr_entry->stream, coo.row.Ptr<int64_t>(), nnz, rowids.Ptr<int64_t>(), coo.num_rows, indptr.Ptr<int64_t>() + 1); return CSRMatrix(coo.num_rows, coo.num_cols, indptr, coo.col, coo.data, col_sorted); } template CSRMatrix COOToCSR<kDLGPU, int32_t>(COOMatrix coo); template CSRMatrix COOToCSR<kDLGPU, int64_t>(COOMatrix coo); } // namespace impl } // namespace aten } // namespace dgl
7e0bc3bc8a9cca1b5c9be72a66d56894ba0ba491.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * @file writer_impl.cu * @brief cuDF-IO ORC writer class implementation */ #include "writer_impl.hpp" #include <nvstrings/NVStrings.h> #include <cudf/null_mask.hpp> #include <cudf/strings/strings_column_view.hpp> #include <algorithm> #include <cstring> #include <utility> #include <rmm/thrust_rmm_allocator.h> #include <rmm/device_buffer.hpp> namespace cudf { namespace experimental { namespace io { namespace detail { namespace orc { using namespace cudf::io::orc; using namespace cudf::io; namespace { /** * @brief Helper for pinned host memory **/ template <typename T> using pinned_buffer = std::unique_ptr<T, decltype(&hipHostFree)>; /** * @brief Function that translates GDF compression to ORC compression **/ constexpr orc::CompressionKind to_orc_compression( compression_type compression) { switch (compression) { case compression_type::SNAPPY: return orc::CompressionKind::SNAPPY; case compression_type::NONE: default: return orc::CompressionKind::NONE; } } /** * @brief Function that translates GDF dtype to ORC datatype **/ constexpr orc::TypeKind to_orc_type(cudf::type_id id) { switch (id) { case cudf::type_id::INT8: return TypeKind::BYTE; case cudf::type_id::INT16: return TypeKind::SHORT; case cudf::type_id::INT32: return TypeKind::INT; case cudf::type_id::INT64: return TypeKind::LONG; case cudf::type_id::FLOAT32: return TypeKind::FLOAT; case cudf::type_id::FLOAT64: return TypeKind::DOUBLE; case cudf::type_id::BOOL8: return TypeKind::BOOLEAN; case cudf::type_id::TIMESTAMP_DAYS: return TypeKind::DATE; case cudf::type_id::TIMESTAMP_SECONDS: case cudf::type_id::TIMESTAMP_MICROSECONDS: case cudf::type_id::TIMESTAMP_MILLISECONDS: case cudf::type_id::TIMESTAMP_NANOSECONDS: return TypeKind::TIMESTAMP; case cudf::type_id::CATEGORY: return TypeKind::INT; case cudf::type_id::STRING: return TypeKind::STRING; default: return TypeKind::INVALID_TYPE_KIND; } } /** * @brief Function that translates time unit to nanoscale multiple **/ template <typename T> constexpr T to_clockscale(cudf::type_id timestamp_id) { switch (timestamp_id) { case cudf::type_id::TIMESTAMP_SECONDS: return 9; case cudf::type_id::TIMESTAMP_MICROSECONDS: return 6; case cudf::type_id::TIMESTAMP_MILLISECONDS: return 3; case cudf::type_id::TIMESTAMP_NANOSECONDS: default: return 0; } } } // namespace /** * @brief Helper class that adds ORC-specific column info **/ class orc_column_view { using str_pair = std::pair<const char *, size_t>; public: /** * @brief Constructor that extracts out the string position + length pairs * for building dictionaries for string columns **/ explicit orc_column_view(size_t id, size_t str_id, column_view const &col, hipStream_t stream) : _id(id), _str_id(str_id), _string_type(col.type().id() == type_id::STRING), _type_width(_string_type ? 0 : cudf::size_of(col.type())), _data_count(col.size()), _null_count(col.null_count()), _data(col.data<uint8_t>()), _nulls(col.has_nulls() ? col.null_mask() : nullptr), _clockscale(to_clockscale<uint8_t>(col.type().id())), _type_kind(to_orc_type(col.type().id())) { if (_string_type) { strings_column_view view{col}; _nvstr = NVStrings::create_from_offsets(view.chars().data<char>(), view.size(), view.offsets().data<size_type>()); _indexes = rmm::device_buffer(_data_count * sizeof(str_pair), stream); CUDF_EXPECTS( _nvstr->create_index(static_cast<str_pair *>(_indexes.data())) == 0, "Cannot retrieve string pairs"); _data = _indexes.data(); } _name = "_col" + std::to_string(_id); } auto is_string() const noexcept { return _string_type; } void set_dict_stride(size_t stride) noexcept { dict_stride = stride; } auto get_dict_stride() const noexcept { return dict_stride; } /** * @brief Function that associates an existing dictionary chunk allocation **/ void attach_dict_chunk(gpu::DictionaryChunk *host_dict, gpu::DictionaryChunk *dev_dict) { dict = host_dict; d_dict = dev_dict; } auto host_dict_chunk(size_t rowgroup) { assert(_string_type); return &dict[rowgroup * dict_stride + _str_id]; } auto device_dict_chunk() const { return d_dict; } /** * @brief Function that associates an existing stripe dictionary allocation **/ void attach_stripe_dict(gpu::StripeDictionary *host_stripe_dict, gpu::StripeDictionary *dev_stripe_dict) { stripe_dict = host_stripe_dict; d_stripe_dict = dev_stripe_dict; } auto host_stripe_dict(size_t stripe) const { assert(_string_type); return &stripe_dict[stripe * dict_stride + _str_id]; } auto device_stripe_dict() const { return d_stripe_dict; } size_t type_width() const noexcept { return _type_width; } size_t data_count() const noexcept { return _data_count; } size_t null_count() const noexcept { return _null_count; } void const *data() const noexcept { return _data; } uint32_t const *nulls() const noexcept { return _nulls; } uint8_t clockscale() const noexcept { return _clockscale; } void set_orc_encoding(ColumnEncodingKind e) { _encoding_kind = e; } auto orc_kind() const noexcept { return _type_kind; } auto orc_encoding() const noexcept { return _encoding_kind; } auto orc_name() const noexcept { return _name; } private: // Identifier within set of columns and string columns, respectively size_t _id = 0; size_t _str_id = 0; bool _string_type = false; size_t _type_width = 0; size_t _data_count = 0; size_t _null_count = 0; void const *_data = nullptr; uint32_t const *_nulls = nullptr; uint8_t _clockscale = 0; // ORC-related members std::string _name{}; TypeKind _type_kind; ColumnEncodingKind _encoding_kind; // String dictionary-related members NVStrings *_nvstr = nullptr; rmm::device_buffer _indexes; size_t dict_stride = 0; gpu::DictionaryChunk const *dict = nullptr; gpu::StripeDictionary const *stripe_dict = nullptr; gpu::DictionaryChunk *d_dict = nullptr; gpu::StripeDictionary *d_stripe_dict = nullptr; }; void writer::impl::init_dictionaries( orc_column_view *columns, size_t num_rows, std::vector<int> const &str_col_ids, uint32_t *dict_data, uint32_t *dict_index, hostdevice_vector<gpu::DictionaryChunk> &dict, hipStream_t stream) { const size_t num_rowgroups = dict.size() / str_col_ids.size(); // Setup per-rowgroup dictionary indexes for each dictionary-aware column for (size_t i = 0; i < str_col_ids.size(); ++i) { auto &str_column = columns[str_col_ids[i]]; str_column.set_dict_stride(str_col_ids.size()); str_column.attach_dict_chunk(dict.host_ptr(), dict.device_ptr()); for (size_t g = 0; g < num_rowgroups; g++) { auto *ck = &dict[g * str_col_ids.size() + i]; ck->valid_map_base = str_column.nulls(); ck->column_data_base = str_column.data(); ck->dict_data = dict_data + i * num_rows + g * row_index_stride_; ck->dict_index = dict_index + i * num_rows; // Indexed by abs row ck->start_row = g * row_index_stride_; ck->num_rows = std::min<uint32_t>( row_index_stride_, std::max<int>(str_column.data_count() - ck->start_row, 0)); ck->num_strings = 0; ck->string_char_count = 0; ck->num_dict_strings = 0; ck->dict_char_count = 0; } } CUDA_TRY(hipMemcpyAsync(dict.device_ptr(), dict.host_ptr(), dict.memory_size(), hipMemcpyHostToDevice, stream)); CUDA_TRY(gpu::InitDictionaryIndices(dict.device_ptr(), str_col_ids.size(), num_rowgroups, stream)); CUDA_TRY(hipMemcpyAsync(dict.host_ptr(), dict.device_ptr(), dict.memory_size(), hipMemcpyDeviceToHost, stream)); CUDA_TRY(hipStreamSynchronize(stream)); } void writer::impl::build_dictionaries( orc_column_view *columns, size_t num_rows, std::vector<int> const &str_col_ids, std::vector<uint32_t> const &stripe_list, hostdevice_vector<gpu::DictionaryChunk> const &dict, uint32_t *dict_index, hostdevice_vector<gpu::StripeDictionary> &stripe_dict, hipStream_t stream) { const auto num_rowgroups = dict.size() / str_col_ids.size(); for (size_t i = 0; i < str_col_ids.size(); i++) { size_t direct_cost = 0, dict_cost = 0; auto &str_column = columns[str_col_ids[i]]; str_column.attach_stripe_dict(stripe_dict.host_ptr(), stripe_dict.device_ptr()); for (size_t j = 0, g = 0; j < stripe_list.size(); j++) { const auto num_chunks = stripe_list[j]; auto *sd = &stripe_dict[j * str_col_ids.size() + i]; sd->column_data_base = str_column.host_dict_chunk(0)->column_data_base; sd->dict_data = str_column.host_dict_chunk(g)->dict_data; sd->dict_index = dict_index + i * num_rows; // Indexed by abs row sd->column_id = str_col_ids[i]; sd->start_chunk = (uint32_t)g; sd->num_chunks = num_chunks; sd->num_strings = 0; sd->dict_char_count = 0; for (size_t k = g; k < g + num_chunks; k++) { const auto &dt = dict[k * str_col_ids.size() + i]; sd->num_strings += dt.num_dict_strings; direct_cost += dt.string_char_count; dict_cost += dt.dict_char_count + dt.num_dict_strings; } g += num_chunks; } // Early disable of dictionary if it doesn't look good at the chunk level if (enable_dictionary_ && dict_cost >= direct_cost) { for (size_t j = 0; j < stripe_list.size(); j++) { stripe_dict[j * str_col_ids.size() + i].dict_data = nullptr; } } } CUDA_TRY(hipMemcpyAsync(stripe_dict.device_ptr(), stripe_dict.host_ptr(), stripe_dict.memory_size(), hipMemcpyHostToDevice, stream)); CUDA_TRY(gpu::BuildStripeDictionaries( stripe_dict.device_ptr(), stripe_dict.host_ptr(), dict.device_ptr(), stripe_list.size(), num_rowgroups, str_col_ids.size(), stream)); CUDA_TRY(hipMemcpyAsync(stripe_dict.host_ptr(), stripe_dict.device_ptr(), stripe_dict.memory_size(), hipMemcpyDeviceToHost, stream)); CUDA_TRY(hipStreamSynchronize(stream)); } std::vector<Stream> writer::impl::gather_streams( orc_column_view *columns, size_t num_columns, size_t num_rows, std::vector<uint32_t> const &stripe_list, std::vector<int32_t> &strm_ids) { // First n + 1 streams are row index streams, including 'column 0' std::vector<Stream> streams; streams.resize(num_columns + 1); streams[0].column = 0; streams[0].kind = ROW_INDEX; streams[0].length = 0; for (size_t i = 0; i < num_columns; ++i) { TypeKind kind = columns[i].orc_kind(); StreamKind data_kind = DATA; StreamKind data2_kind = LENGTH; ColumnEncodingKind encoding_kind = DIRECT; int64_t present_stream_size = 0; int64_t data_stream_size = 0; int64_t data2_stream_size = 0; int64_t dict_stream_size = 0; if (columns[i].null_count() != 0 || columns[i].data_count() != num_rows) { present_stream_size = ((row_index_stride_ + 7) >> 3); present_stream_size += (present_stream_size + 0x7f) >> 7; } switch (kind) { case TypeKind::BOOLEAN: data_stream_size = div_rowgroups_by<int64_t>(1024) * (128 + 1); encoding_kind = DIRECT; break; case TypeKind::BYTE: data_stream_size = div_rowgroups_by<int64_t>(128) * (128 + 1); encoding_kind = DIRECT; break; case TypeKind::SHORT: data_stream_size = div_rowgroups_by<int64_t>(512) * (512 * 2 + 2); encoding_kind = DIRECT_V2; break; case TypeKind::FLOAT: // Pass through if no nulls (no RLE encoding for floating point) data_stream_size = (columns[i].null_count() != 0) ? div_rowgroups_by<int64_t>(512) * (512 * 4 + 2) : INT64_C(-1); encoding_kind = DIRECT; break; case TypeKind::INT: case TypeKind::DATE: data_stream_size = div_rowgroups_by<int64_t>(512) * (512 * 4 + 2); encoding_kind = DIRECT_V2; break; case TypeKind::DOUBLE: // Pass through if no nulls (no RLE encoding for floating point) data_stream_size = (columns[i].null_count() != 0) ? div_rowgroups_by<int64_t>(512) * (512 * 8 + 2) : INT64_C(-1); encoding_kind = DIRECT; break; case TypeKind::LONG: data_stream_size = div_rowgroups_by<int64_t>(512) * (512 * 8 + 2); encoding_kind = DIRECT_V2; break; case TypeKind::STRING: { bool enable_dict = enable_dictionary_; size_t direct_data_size = 0; size_t dict_data_size = 0; size_t dict_strings = 0; size_t dict_lengths_div512 = 0; for (size_t stripe = 0, g = 0; stripe < stripe_list.size(); stripe++) { const auto sd = columns[i].host_stripe_dict(stripe); enable_dict = (enable_dict && sd->dict_data != nullptr); if (enable_dict) { dict_strings += sd->num_strings; dict_lengths_div512 += (sd->num_strings + 0x1ff) >> 9; dict_data_size += sd->dict_char_count; } for (uint32_t k = 0; k < stripe_list[stripe]; k++, g++) { direct_data_size += columns[i].host_dict_chunk(g)->string_char_count; } } if (enable_dict) { uint32_t dict_bits = 0; for (dict_bits = 1; dict_bits < 32; dict_bits <<= 1) { if (dict_strings <= (1ull << dict_bits)) break; } const auto valid_count = columns[i].data_count() - columns[i].null_count(); dict_data_size += (dict_bits * valid_count + 7) >> 3; } // Decide between direct or dictionary encoding if (enable_dict && dict_data_size < direct_data_size) { data_stream_size = div_rowgroups_by<int64_t>(512) * (512 * 4 + 2); data2_stream_size = dict_lengths_div512 * (512 * 4 + 2); dict_stream_size = std::max<size_t>(dict_data_size, 1); encoding_kind = DICTIONARY_V2; } else { data_stream_size = std::max<size_t>(direct_data_size, 1); data2_stream_size = div_rowgroups_by<int64_t>(512) * (512 * 4 + 2); encoding_kind = DIRECT_V2; } break; } case TypeKind::TIMESTAMP: data_stream_size = ((row_index_stride_ + 0x1ff) >> 9) * (512 * 4 + 2); data2_stream_size = data_stream_size; data2_kind = SECONDARY; encoding_kind = DIRECT_V2; break; default: CUDF_FAIL("Unsupported ORC type kind"); } // Initialize the column's metadata columns[i].set_orc_encoding(encoding_kind); // Initialize the column's index stream const auto id = static_cast<uint32_t>(1 + i); streams[id].column = id; streams[id].kind = ROW_INDEX; streams[id].length = 0; // Initialize the column's data stream(s) const auto base = i * gpu::CI_NUM_STREAMS; if (present_stream_size != 0) { auto len = static_cast<uint64_t>(present_stream_size); strm_ids[base + gpu::CI_PRESENT] = streams.size(); streams.push_back(orc::Stream{PRESENT, id, len}); } if (data_stream_size != 0) { auto len = static_cast<uint64_t>(std::max<int64_t>(data_stream_size, 0)); strm_ids[base + gpu::CI_DATA] = streams.size(); streams.push_back(orc::Stream{data_kind, id, len}); } if (data2_stream_size != 0) { auto len = static_cast<uint64_t>(std::max<int64_t>(data2_stream_size, 0)); strm_ids[base + gpu::CI_DATA2] = streams.size(); streams.push_back(orc::Stream{data2_kind, id, len}); } if (dict_stream_size != 0) { auto len = static_cast<uint64_t>(dict_stream_size); strm_ids[base + gpu::CI_DICTIONARY] = streams.size(); streams.push_back(orc::Stream{DICTIONARY_DATA, id, len}); } } return streams; } rmm::device_buffer writer::impl::encode_columns( orc_column_view *columns, size_t num_columns, size_t num_rows, size_t num_rowgroups, std::vector<int> const &str_col_ids, std::vector<uint32_t> const &stripe_list, std::vector<Stream> const &streams, std::vector<int32_t> const &strm_ids, hostdevice_vector<gpu::EncChunk> &chunks, hipStream_t stream) { // Allocate combined buffer for RLE data and string data output std::vector<size_t> strm_offsets(streams.size()); size_t str_data_size = 0; auto output = [&]() { size_t rle_data_size = 0; for (size_t i = 0; i < streams.size(); ++i) { const auto &stream = streams[i]; const auto &column = columns[stream.column - 1]; if (((stream.kind == DICTIONARY_DATA || stream.kind == LENGTH) && (column.orc_encoding() == DICTIONARY_V2)) || ((stream.kind == DATA) && (column.orc_kind() == TypeKind::STRING && column.orc_encoding() == DIRECT_V2))) { strm_offsets[i] = str_data_size; str_data_size += stream.length; } else { strm_offsets[i] = rle_data_size; rle_data_size += (stream.length * num_rowgroups + 7) & ~7; } } str_data_size = (str_data_size + 7) & ~7; return rmm::device_buffer(rle_data_size + str_data_size, stream); }(); auto dst_base = static_cast<uint8_t *>(output.data()); // Initialize column chunks' descriptions size_t stripe_start = 0; size_t stripe_id = 0; for (size_t j = 0; j < num_rowgroups; j++) { for (size_t i = 0; i < num_columns; i++) { auto *ck = &chunks[j * num_columns + i]; ck->start_row = (j * row_index_stride_); ck->num_rows = std::min<uint32_t>(row_index_stride_, num_rows - ck->start_row); ck->valid_rows = columns[i].data_count(); ck->encoding_kind = columns[i].orc_encoding(); ck->type_kind = columns[i].orc_kind(); if (ck->type_kind == TypeKind::STRING) { ck->valid_map_base = columns[i].nulls(); ck->column_data_base = (ck->encoding_kind == DICTIONARY_V2) ? columns[i].host_stripe_dict(stripe_id)->dict_index : columns[i].data(); ck->dtype_len = 1; } else { ck->valid_map_base = columns[i].nulls(); ck->column_data_base = columns[i].data(); ck->dtype_len = columns[i].type_width(); } ck->scale = columns[i].clockscale(); for (int k = 0; k < gpu::CI_NUM_STREAMS; k++) { const auto strm_id = strm_ids[i * gpu::CI_NUM_STREAMS + k]; ck->strm_id[k] = strm_id; if (strm_id >= 0) { if ((k == gpu::CI_DICTIONARY) || (k == gpu::CI_DATA2 && ck->encoding_kind == DICTIONARY_V2)) { if (j == stripe_start) { const int32_t dict_stride = columns[i].get_dict_stride(); const auto stripe = columns[i].host_stripe_dict(stripe_id); ck->strm_len[k] = (k == gpu::CI_DICTIONARY) ? stripe->dict_char_count : (((stripe->num_strings + 0x1ff) >> 9) * (512 * 4 + 2)); if (stripe_id == 0) { ck->streams[k] = dst_base + strm_offsets[strm_id]; } else { const auto *ck_up = &chunks[stripe[-dict_stride].start_chunk * num_columns + i]; ck->streams[k] = ck_up->streams[k] + ck_up->strm_len[k]; } } else { ck->strm_len[k] = 0; ck->streams[k] = ck[-num_columns].streams[k]; } } else if (k == gpu::CI_DATA && ck->type_kind == TypeKind::STRING && ck->encoding_kind == DIRECT_V2) { ck->strm_len[k] = columns[i].host_dict_chunk(j)->string_char_count; ck->streams[k] = (j == 0) ? dst_base + strm_offsets[strm_id] : (ck[-num_columns].streams[k] + ck[-num_columns].strm_len[k]); } else if (k == gpu::CI_DATA && streams[strm_id].length == 0 && (ck->type_kind == DOUBLE || ck->type_kind == FLOAT)) { // Pass-through ck->strm_len[k] = ck->num_rows * ck->dtype_len; ck->streams[k] = nullptr; } else { ck->strm_len[k] = streams[strm_id].length; ck->streams[k] = dst_base + str_data_size + strm_offsets[strm_id] + streams[strm_id].length * j; } } else { ck->strm_len[k] = 0; ck->streams[k] = nullptr; } } } // Track the current stripe this rowgroup chunk belongs if (j + 1 == stripe_start + stripe_list[stripe_id]) { stripe_start = j + 1; stripe_id++; } } CUDA_TRY(hipMemcpyAsync(chunks.device_ptr(), chunks.host_ptr(), chunks.memory_size(), hipMemcpyHostToDevice, stream)); if (!str_col_ids.empty()) { auto d_stripe_dict = columns[str_col_ids[0]].device_stripe_dict(); CUDA_TRY(gpu::EncodeStripeDictionaries(d_stripe_dict, chunks.device_ptr(), str_col_ids.size(), num_columns, stripe_list.size(), stream)); } CUDA_TRY(gpu::EncodeOrcColumnData(chunks.device_ptr(), num_columns, num_rowgroups, stream)); CUDA_TRY(hipStreamSynchronize(stream)); return output; } std::vector<StripeInformation> writer::impl::gather_stripes( size_t num_columns, size_t num_rows, size_t num_index_streams, size_t num_data_streams, std::vector<uint32_t> const &stripe_list, hostdevice_vector<gpu::EncChunk> &chunks, hostdevice_vector<gpu::StripeStream> &strm_desc, hipStream_t stream) { std::vector<StripeInformation> stripes(stripe_list.size()); size_t group = 0; size_t stripe_start = 0; for (size_t s = 0; s < stripe_list.size(); s++) { size_t stripe_group_end = group + stripe_list[s]; for (size_t i = 0; i < num_columns; i++) { const auto *ck = &chunks[group * num_columns + i]; // Assign stream data of column data stream(s) for (int k = 0; k < gpu::CI_INDEX; k++) { const auto stream_id = ck->strm_id[k]; if (stream_id != -1) { auto *ss = &strm_desc[s * num_data_streams + stream_id - num_index_streams]; ss->stream_size = 0; ss->first_chunk_id = (group * num_columns + i); ss->num_chunks = (stripe_group_end - group); ss->column_id = i; ss->strm_type = k; } } } group = stripe_group_end; size_t stripe_end = ::min(group * row_index_stride_, num_rows); stripes[s].numberOfRows = stripe_end - stripe_start; stripe_start = stripe_end; } CUDA_TRY(hipMemcpyAsync(strm_desc.device_ptr(), strm_desc.host_ptr(), strm_desc.memory_size(), hipMemcpyHostToDevice, stream)); CUDA_TRY(gpu::CompactOrcDataStreams(strm_desc.device_ptr(), chunks.device_ptr(), strm_desc.size(), num_columns, stream)); CUDA_TRY(hipMemcpyAsync(strm_desc.host_ptr(), strm_desc.device_ptr(), strm_desc.memory_size(), hipMemcpyDeviceToHost, stream)); CUDA_TRY(hipMemcpyAsync(chunks.host_ptr(), chunks.device_ptr(), chunks.memory_size(), hipMemcpyDeviceToHost, stream)); CUDA_TRY(hipStreamSynchronize(stream)); return stripes; } void writer::impl::write_index_stream( int32_t stripe_id, int32_t stream_id, orc_column_view *columns, size_t num_columns, size_t num_data_streams, size_t group, size_t groups_in_stripe, hostdevice_vector<gpu::EncChunk> const &chunks, hostdevice_vector<gpu::StripeStream> const &strm_desc, hostdevice_vector<gpu_inflate_status_s> const &comp_out, StripeInformation &stripe, std::vector<Stream> &streams, ProtobufWriter *pbw) { // 0: position, 1: block position, 2: compressed position, 3: compressed size std::array<int32_t, 4> present; std::array<int32_t, 4> data; std::array<int32_t, 4> data2; auto kind = TypeKind::STRUCT; auto find_record = [=, &strm_desc](gpu::EncChunk const &chunk, gpu::StreamIndexType type) { std::array<int32_t, 4> record{-1, -1, -1, -1}; if (chunk.strm_id[type] > 0) { record[0] = 0; if (compression_kind_ != NONE) { const auto *ss = &strm_desc[stripe_id * num_data_streams + chunk.strm_id[type] - (num_columns + 1)]; record[1] = ss->first_block; record[2] = 0; record[3] = ss->stream_size; } } return record; }; auto scan_record = [=, &comp_out](gpu::EncChunk const &chunk, gpu::StreamIndexType type, std::array<int32_t, 4> &record) { if (record[0] >= 0) { record[0] += chunk.strm_len[type]; while ((record[1] >= 0) && (static_cast<size_t>(record[0]) >= compression_blocksize_) && (record[3] + 3 + comp_out[record[1]].bytes_written < static_cast<size_t>(record[4]))) { record[0] -= compression_blocksize_; record[3] += 3 + comp_out[record[1]].bytes_written; record[1] += 1; } } }; // TBD: Not sure we need an empty index stream for column 0 if (stream_id != 0) { const auto &ck = chunks[stream_id - 1]; present = find_record(ck, gpu::CI_PRESENT); data = find_record(ck, gpu::CI_DATA); data2 = find_record(ck, gpu::CI_DATA2); // Change string dictionary to int from index point of view kind = columns[stream_id - 1].orc_kind(); if (kind == TypeKind::STRING && columns[stream_id - 1].orc_encoding() == DICTIONARY_V2) { kind = TypeKind::INT; } } buffer_.resize((compression_kind_ != NONE) ? 3 : 0); // Add row index entries for (size_t g = group; g < group + groups_in_stripe; g++) { pbw->put_row_index_entry(present[2], present[0], data[2], data[0], data2[2], data2[0], kind); if (stream_id != 0) { const auto &ck = chunks[g * num_columns + stream_id - 1]; scan_record(ck, gpu::CI_PRESENT, present); scan_record(ck, gpu::CI_DATA, data); scan_record(ck, gpu::CI_DATA2, data2); } } streams[stream_id].length = buffer_.size(); if (compression_kind_ != NONE) { uint32_t uncomp_ix_len = (uint32_t)(streams[stream_id].length - 3) * 2 + 1; buffer_[0] = static_cast<uint8_t>(uncomp_ix_len >> 0); buffer_[1] = static_cast<uint8_t>(uncomp_ix_len >> 8); buffer_[2] = static_cast<uint8_t>(uncomp_ix_len >> 16); } outfile_.write(reinterpret_cast<char *>(buffer_.data()), buffer_.size()); stripe.indexLength += buffer_.size(); } void writer::impl::write_data_stream(gpu::StripeStream const &strm_desc, gpu::EncChunk const &chunk, uint8_t const *compressed_data, uint8_t *stream_out, StripeInformation &stripe, std::vector<Stream> &streams, hipStream_t stream) { const auto length = strm_desc.stream_size; streams[chunk.strm_id[strm_desc.strm_type]].length = length; if (length != 0) { const auto *stream_in = (compression_kind_ == NONE) ? chunk.streams[strm_desc.strm_type] : (compressed_data + strm_desc.bfr_offset); CUDA_TRY(hipMemcpyAsync(stream_out, stream_in, length, hipMemcpyDeviceToHost, stream)); CUDA_TRY(hipStreamSynchronize(stream)); outfile_.write(reinterpret_cast<char *>(stream_out), length); } stripe.dataLength += length; } writer::impl::impl(std::string filepath, writer_options const &options, rmm::mr::device_memory_resource *mr) : _mr(mr) { compression_kind_ = to_orc_compression(options.compression); outfile_.open(filepath, std::ios::out | std::ios::binary | std::ios::trunc); CUDF_EXPECTS(outfile_.is_open(), "Cannot open output file"); } void writer::impl::write(table_view const &table, hipStream_t stream) { size_type num_columns = table.num_columns(); size_type num_rows = 0; // Mapping of string columns for quick look-up std::vector<int> str_col_ids; // Wrapper around cudf columns to attach ORC-specific type info std::vector<orc_column_view> orc_columns; for (auto it = table.begin(); it < table.end(); ++it) { const auto col = *it; const auto current_id = orc_columns.size(); const auto current_str_id = str_col_ids.size(); num_rows = std::max<uint32_t>(num_rows, col.size()); orc_columns.emplace_back(current_id, current_str_id, col, stream); if (orc_columns.back().is_string()) { str_col_ids.push_back(current_id); } } rmm::device_vector<uint32_t> dict_index(str_col_ids.size() * num_rows); rmm::device_vector<uint32_t> dict_data(str_col_ids.size() * num_rows); // Build per-column dictionary indices const auto num_rowgroups = div_by_rowgroups<size_t>(num_rows); const auto num_dict_chunks = num_rowgroups * str_col_ids.size(); hostdevice_vector<gpu::DictionaryChunk> dict(num_dict_chunks); if (str_col_ids.size() != 0) { init_dictionaries(orc_columns.data(), num_rows, str_col_ids, dict_data.data().get(), dict_index.data().get(), dict, stream); } // Decide stripe boundaries early on, based on uncompressed size std::vector<uint32_t> stripe_list; for (size_t g = 0, stripe_start = 0, stripe_size = 0; g < num_rowgroups; g++) { size_t rowgroup_size = 0; for (int i = 0; i < num_columns; i++) { if (orc_columns[i].is_string()) { const auto dt = orc_columns[i].host_dict_chunk(g); rowgroup_size += 1 * row_index_stride_; rowgroup_size += dt->string_char_count; } else { rowgroup_size += orc_columns[i].type_width() * row_index_stride_; } } // Apply rows per stripe limit to limit string dictionaries const size_t max_stripe_rows = !str_col_ids.empty() ? 1000000 : 5000000; if ((g > stripe_start) && (stripe_size + rowgroup_size > max_stripe_size_ || (g + 1 - stripe_start) * row_index_stride_ > max_stripe_rows)) { stripe_list.push_back(g - stripe_start); stripe_start = g; stripe_size = 0; } stripe_size += rowgroup_size; if (g + 1 == num_rowgroups) { stripe_list.push_back(num_rowgroups - stripe_start); } } // Build stripe-level dictionaries const auto num_stripe_dict = stripe_list.size() * str_col_ids.size(); hostdevice_vector<gpu::StripeDictionary> stripe_dict(num_stripe_dict); if (str_col_ids.size() != 0) { build_dictionaries(orc_columns.data(), num_rows, str_col_ids, stripe_list, dict, dict_index.data().get(), stripe_dict, stream); } // Initialize streams std::vector<int32_t> strm_ids(num_columns * gpu::CI_NUM_STREAMS, -1); auto streams = gather_streams(orc_columns.data(), orc_columns.size(), num_rows, stripe_list, strm_ids); // Encode column data chunks const auto num_chunks = num_rowgroups * num_columns; hostdevice_vector<gpu::EncChunk> chunks(num_chunks); auto output = encode_columns(orc_columns.data(), num_columns, num_rows, num_rowgroups, str_col_ids, stripe_list, streams, strm_ids, chunks, stream); // Assemble individual desparate column chunks into contiguous data streams const auto num_index_streams = (num_columns + 1); const auto num_data_streams = streams.size() - num_index_streams; const auto num_stripe_streams = stripe_list.size() * num_data_streams; hostdevice_vector<gpu::StripeStream> strm_desc(num_stripe_streams); auto stripes = gather_stripes(num_columns, num_rows, num_index_streams, num_data_streams, stripe_list, chunks, strm_desc, stream); // Allocate intermediate output stream buffer size_t compressed_bfr_size = 0; size_t num_compressed_blocks = 0; auto stream_output = [&]() { size_t max_stream_size = 0; for (size_t stripe_id = 0; stripe_id < stripe_list.size(); stripe_id++) { for (size_t i = 0; i < num_data_streams; i++) { gpu::StripeStream *ss = &strm_desc[stripe_id * num_data_streams + i]; size_t stream_size = ss->stream_size; if (compression_kind_ != NONE) { ss->first_block = num_compressed_blocks; ss->bfr_offset = compressed_bfr_size; auto num_blocks = std::max<uint32_t>((stream_size + compression_blocksize_ - 1) / compression_blocksize_, 1); stream_size += num_blocks * 3; num_compressed_blocks += num_blocks; compressed_bfr_size += stream_size; } max_stream_size = ::max(max_stream_size, stream_size); } } return pinned_buffer<uint8_t>{[](size_t size) { uint8_t *ptr = nullptr; CUDA_TRY(hipHostMalloc(&ptr, size)); return ptr; }(max_stream_size), hipHostFree}; }(); // Compress the data streams rmm::device_buffer compressed_data(compressed_bfr_size, stream); hostdevice_vector<gpu_inflate_status_s> comp_out(num_compressed_blocks); hostdevice_vector<gpu_inflate_input_s> comp_in(num_compressed_blocks); if (compression_kind_ != NONE) { CUDA_TRY(hipMemcpyAsync(strm_desc.device_ptr(), strm_desc.host_ptr(), strm_desc.memory_size(), hipMemcpyHostToDevice, stream)); CUDA_TRY(gpu::CompressOrcDataStreams( static_cast<uint8_t *>(compressed_data.data()), strm_desc.device_ptr(), chunks.device_ptr(), comp_in.device_ptr(), comp_out.device_ptr(), num_stripe_streams, num_compressed_blocks, compression_kind_, compression_blocksize_, stream)); CUDA_TRY(hipMemcpyAsync(strm_desc.host_ptr(), strm_desc.device_ptr(), strm_desc.memory_size(), hipMemcpyDeviceToHost, stream)); CUDA_TRY(hipMemcpyAsync(comp_out.host_ptr(), comp_out.device_ptr(), comp_out.memory_size(), hipMemcpyDeviceToHost, stream)); CUDA_TRY(hipStreamSynchronize(stream)); } ProtobufWriter pbw_(&buffer_); // Write file header outfile_.write(MAGIC, std::strlen(MAGIC)); // Write stripes size_t group = 0; for (size_t stripe_id = 0; stripe_id < stripes.size(); stripe_id++) { auto groups_in_stripe = div_by_rowgroups(stripes[stripe_id].numberOfRows); stripes[stripe_id].offset = outfile_.tellp(); // Column (skippable) index streams appear at the start of the stripe stripes[stripe_id].indexLength = 0; for (size_t col_id = 0; col_id <= (size_t)num_columns; col_id++) { write_index_stream(stripe_id, col_id, orc_columns.data(), num_columns, num_data_streams, group, groups_in_stripe, chunks, strm_desc, comp_out, stripes[stripe_id], streams, &pbw_); } // Column data consisting one or more separate streams stripes[stripe_id].dataLength = 0; for (size_t i = 0; i < num_data_streams; i++) { const auto &ss = strm_desc[stripe_id * num_data_streams + i]; const auto &ck = chunks[group * num_columns + ss.column_id]; write_data_stream(ss, ck, static_cast<uint8_t *>(compressed_data.data()), stream_output.get(), stripes[stripe_id], streams, stream); } // Write stripefooter consisting of stream information StripeFooter sf; sf.streams = streams; sf.columns.resize(num_columns + 1); sf.columns[0].kind = DIRECT; sf.columns[0].dictionarySize = 0; for (size_t i = 1; i < sf.columns.size(); ++i) { sf.columns[i].kind = orc_columns[i - 1].orc_encoding(); sf.columns[i].dictionarySize = (sf.columns[i].kind == DICTIONARY_V2) ? orc_columns[i - 1].host_stripe_dict(stripe_id)->num_strings : 0; if (orc_columns[i - 1].orc_kind() == TIMESTAMP) { sf.writerTimezone = "UTC"; } } buffer_.resize((compression_kind_ != NONE) ? 3 : 0); pbw_.write(&sf); stripes[stripe_id].footerLength = buffer_.size(); if (compression_kind_ != NONE) { uint32_t uncomp_sf_len = (stripes[stripe_id].footerLength - 3) * 2 + 1; buffer_[0] = static_cast<uint8_t>(uncomp_sf_len >> 0); buffer_[1] = static_cast<uint8_t>(uncomp_sf_len >> 8); buffer_[2] = static_cast<uint8_t>(uncomp_sf_len >> 16); } outfile_.write(reinterpret_cast<char *>(buffer_.data()), buffer_.size()); group += groups_in_stripe; } // Write filefooter metadata // TBD: We may want to add pandas or spark column metadata strings here FileFooter ff; ff.headerLength = std::strlen(MAGIC); ff.contentLength = outfile_.tellp(); ff.stripes = std::move(stripes); ff.numberOfRows = num_rows; ff.rowIndexStride = row_index_stride_; ff.types.resize(1 + num_columns); ff.types[0].kind = STRUCT; ff.types[0].subtypes.resize(num_columns); ff.types[0].fieldNames.resize(num_columns); for (int i = 0; i < num_columns; ++i) { ff.types[1 + i].kind = orc_columns[i].orc_kind(); ff.types[0].subtypes[i] = 1 + i; ff.types[0].fieldNames[i] = orc_columns[i].orc_name(); } buffer_.resize((compression_kind_ != NONE) ? 3 : 0); pbw_.write(&ff); // Write postscript metadata PostScript ps; ps.footerLength = buffer_.size(); ps.compression = compression_kind_; ps.compressionBlockSize = compression_blocksize_; ps.version = {0, 12}; ps.metadataLength = 0; // TODO: Write stripe statistics ps.magic = MAGIC; if (compression_kind_ != NONE) { // TODO: If the file footer ends up larger than the compression block // size, we'll need to insert additional 3-byte block headers uint32_t uncomp_ff_len = (uint32_t)(ps.footerLength - 3) * 2 + 1; buffer_[0] = static_cast<uint8_t>(uncomp_ff_len >> 0); buffer_[1] = static_cast<uint8_t>(uncomp_ff_len >> 8); buffer_[2] = static_cast<uint8_t>(uncomp_ff_len >> 16); } const auto ps_length = static_cast<uint8_t>(pbw_.write(&ps)); buffer_.push_back(ps_length); outfile_.write(reinterpret_cast<char *>(buffer_.data()), buffer_.size()); outfile_.flush(); } // Forward to implementation writer::writer(std::string filepath, writer_options const &options, rmm::mr::device_memory_resource *mr) : _impl(std::make_unique<impl>(filepath, options, mr)) {} // Destructor within this translation unit writer::~writer() = default; // Forward to implementation void writer::write_all(table_view const &table, hipStream_t stream) { _impl->write(table, stream); } } // namespace orc } // namespace detail } // namespace io } // namespace experimental } // namespace cudf
7e0bc3bc8a9cca1b5c9be72a66d56894ba0ba491.cu
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * @file writer_impl.cu * @brief cuDF-IO ORC writer class implementation */ #include "writer_impl.hpp" #include <nvstrings/NVStrings.h> #include <cudf/null_mask.hpp> #include <cudf/strings/strings_column_view.hpp> #include <algorithm> #include <cstring> #include <utility> #include <rmm/thrust_rmm_allocator.h> #include <rmm/device_buffer.hpp> namespace cudf { namespace experimental { namespace io { namespace detail { namespace orc { using namespace cudf::io::orc; using namespace cudf::io; namespace { /** * @brief Helper for pinned host memory **/ template <typename T> using pinned_buffer = std::unique_ptr<T, decltype(&cudaFreeHost)>; /** * @brief Function that translates GDF compression to ORC compression **/ constexpr orc::CompressionKind to_orc_compression( compression_type compression) { switch (compression) { case compression_type::SNAPPY: return orc::CompressionKind::SNAPPY; case compression_type::NONE: default: return orc::CompressionKind::NONE; } } /** * @brief Function that translates GDF dtype to ORC datatype **/ constexpr orc::TypeKind to_orc_type(cudf::type_id id) { switch (id) { case cudf::type_id::INT8: return TypeKind::BYTE; case cudf::type_id::INT16: return TypeKind::SHORT; case cudf::type_id::INT32: return TypeKind::INT; case cudf::type_id::INT64: return TypeKind::LONG; case cudf::type_id::FLOAT32: return TypeKind::FLOAT; case cudf::type_id::FLOAT64: return TypeKind::DOUBLE; case cudf::type_id::BOOL8: return TypeKind::BOOLEAN; case cudf::type_id::TIMESTAMP_DAYS: return TypeKind::DATE; case cudf::type_id::TIMESTAMP_SECONDS: case cudf::type_id::TIMESTAMP_MICROSECONDS: case cudf::type_id::TIMESTAMP_MILLISECONDS: case cudf::type_id::TIMESTAMP_NANOSECONDS: return TypeKind::TIMESTAMP; case cudf::type_id::CATEGORY: return TypeKind::INT; case cudf::type_id::STRING: return TypeKind::STRING; default: return TypeKind::INVALID_TYPE_KIND; } } /** * @brief Function that translates time unit to nanoscale multiple **/ template <typename T> constexpr T to_clockscale(cudf::type_id timestamp_id) { switch (timestamp_id) { case cudf::type_id::TIMESTAMP_SECONDS: return 9; case cudf::type_id::TIMESTAMP_MICROSECONDS: return 6; case cudf::type_id::TIMESTAMP_MILLISECONDS: return 3; case cudf::type_id::TIMESTAMP_NANOSECONDS: default: return 0; } } } // namespace /** * @brief Helper class that adds ORC-specific column info **/ class orc_column_view { using str_pair = std::pair<const char *, size_t>; public: /** * @brief Constructor that extracts out the string position + length pairs * for building dictionaries for string columns **/ explicit orc_column_view(size_t id, size_t str_id, column_view const &col, cudaStream_t stream) : _id(id), _str_id(str_id), _string_type(col.type().id() == type_id::STRING), _type_width(_string_type ? 0 : cudf::size_of(col.type())), _data_count(col.size()), _null_count(col.null_count()), _data(col.data<uint8_t>()), _nulls(col.has_nulls() ? col.null_mask() : nullptr), _clockscale(to_clockscale<uint8_t>(col.type().id())), _type_kind(to_orc_type(col.type().id())) { if (_string_type) { strings_column_view view{col}; _nvstr = NVStrings::create_from_offsets(view.chars().data<char>(), view.size(), view.offsets().data<size_type>()); _indexes = rmm::device_buffer(_data_count * sizeof(str_pair), stream); CUDF_EXPECTS( _nvstr->create_index(static_cast<str_pair *>(_indexes.data())) == 0, "Cannot retrieve string pairs"); _data = _indexes.data(); } _name = "_col" + std::to_string(_id); } auto is_string() const noexcept { return _string_type; } void set_dict_stride(size_t stride) noexcept { dict_stride = stride; } auto get_dict_stride() const noexcept { return dict_stride; } /** * @brief Function that associates an existing dictionary chunk allocation **/ void attach_dict_chunk(gpu::DictionaryChunk *host_dict, gpu::DictionaryChunk *dev_dict) { dict = host_dict; d_dict = dev_dict; } auto host_dict_chunk(size_t rowgroup) { assert(_string_type); return &dict[rowgroup * dict_stride + _str_id]; } auto device_dict_chunk() const { return d_dict; } /** * @brief Function that associates an existing stripe dictionary allocation **/ void attach_stripe_dict(gpu::StripeDictionary *host_stripe_dict, gpu::StripeDictionary *dev_stripe_dict) { stripe_dict = host_stripe_dict; d_stripe_dict = dev_stripe_dict; } auto host_stripe_dict(size_t stripe) const { assert(_string_type); return &stripe_dict[stripe * dict_stride + _str_id]; } auto device_stripe_dict() const { return d_stripe_dict; } size_t type_width() const noexcept { return _type_width; } size_t data_count() const noexcept { return _data_count; } size_t null_count() const noexcept { return _null_count; } void const *data() const noexcept { return _data; } uint32_t const *nulls() const noexcept { return _nulls; } uint8_t clockscale() const noexcept { return _clockscale; } void set_orc_encoding(ColumnEncodingKind e) { _encoding_kind = e; } auto orc_kind() const noexcept { return _type_kind; } auto orc_encoding() const noexcept { return _encoding_kind; } auto orc_name() const noexcept { return _name; } private: // Identifier within set of columns and string columns, respectively size_t _id = 0; size_t _str_id = 0; bool _string_type = false; size_t _type_width = 0; size_t _data_count = 0; size_t _null_count = 0; void const *_data = nullptr; uint32_t const *_nulls = nullptr; uint8_t _clockscale = 0; // ORC-related members std::string _name{}; TypeKind _type_kind; ColumnEncodingKind _encoding_kind; // String dictionary-related members NVStrings *_nvstr = nullptr; rmm::device_buffer _indexes; size_t dict_stride = 0; gpu::DictionaryChunk const *dict = nullptr; gpu::StripeDictionary const *stripe_dict = nullptr; gpu::DictionaryChunk *d_dict = nullptr; gpu::StripeDictionary *d_stripe_dict = nullptr; }; void writer::impl::init_dictionaries( orc_column_view *columns, size_t num_rows, std::vector<int> const &str_col_ids, uint32_t *dict_data, uint32_t *dict_index, hostdevice_vector<gpu::DictionaryChunk> &dict, cudaStream_t stream) { const size_t num_rowgroups = dict.size() / str_col_ids.size(); // Setup per-rowgroup dictionary indexes for each dictionary-aware column for (size_t i = 0; i < str_col_ids.size(); ++i) { auto &str_column = columns[str_col_ids[i]]; str_column.set_dict_stride(str_col_ids.size()); str_column.attach_dict_chunk(dict.host_ptr(), dict.device_ptr()); for (size_t g = 0; g < num_rowgroups; g++) { auto *ck = &dict[g * str_col_ids.size() + i]; ck->valid_map_base = str_column.nulls(); ck->column_data_base = str_column.data(); ck->dict_data = dict_data + i * num_rows + g * row_index_stride_; ck->dict_index = dict_index + i * num_rows; // Indexed by abs row ck->start_row = g * row_index_stride_; ck->num_rows = std::min<uint32_t>( row_index_stride_, std::max<int>(str_column.data_count() - ck->start_row, 0)); ck->num_strings = 0; ck->string_char_count = 0; ck->num_dict_strings = 0; ck->dict_char_count = 0; } } CUDA_TRY(cudaMemcpyAsync(dict.device_ptr(), dict.host_ptr(), dict.memory_size(), cudaMemcpyHostToDevice, stream)); CUDA_TRY(gpu::InitDictionaryIndices(dict.device_ptr(), str_col_ids.size(), num_rowgroups, stream)); CUDA_TRY(cudaMemcpyAsync(dict.host_ptr(), dict.device_ptr(), dict.memory_size(), cudaMemcpyDeviceToHost, stream)); CUDA_TRY(cudaStreamSynchronize(stream)); } void writer::impl::build_dictionaries( orc_column_view *columns, size_t num_rows, std::vector<int> const &str_col_ids, std::vector<uint32_t> const &stripe_list, hostdevice_vector<gpu::DictionaryChunk> const &dict, uint32_t *dict_index, hostdevice_vector<gpu::StripeDictionary> &stripe_dict, cudaStream_t stream) { const auto num_rowgroups = dict.size() / str_col_ids.size(); for (size_t i = 0; i < str_col_ids.size(); i++) { size_t direct_cost = 0, dict_cost = 0; auto &str_column = columns[str_col_ids[i]]; str_column.attach_stripe_dict(stripe_dict.host_ptr(), stripe_dict.device_ptr()); for (size_t j = 0, g = 0; j < stripe_list.size(); j++) { const auto num_chunks = stripe_list[j]; auto *sd = &stripe_dict[j * str_col_ids.size() + i]; sd->column_data_base = str_column.host_dict_chunk(0)->column_data_base; sd->dict_data = str_column.host_dict_chunk(g)->dict_data; sd->dict_index = dict_index + i * num_rows; // Indexed by abs row sd->column_id = str_col_ids[i]; sd->start_chunk = (uint32_t)g; sd->num_chunks = num_chunks; sd->num_strings = 0; sd->dict_char_count = 0; for (size_t k = g; k < g + num_chunks; k++) { const auto &dt = dict[k * str_col_ids.size() + i]; sd->num_strings += dt.num_dict_strings; direct_cost += dt.string_char_count; dict_cost += dt.dict_char_count + dt.num_dict_strings; } g += num_chunks; } // Early disable of dictionary if it doesn't look good at the chunk level if (enable_dictionary_ && dict_cost >= direct_cost) { for (size_t j = 0; j < stripe_list.size(); j++) { stripe_dict[j * str_col_ids.size() + i].dict_data = nullptr; } } } CUDA_TRY(cudaMemcpyAsync(stripe_dict.device_ptr(), stripe_dict.host_ptr(), stripe_dict.memory_size(), cudaMemcpyHostToDevice, stream)); CUDA_TRY(gpu::BuildStripeDictionaries( stripe_dict.device_ptr(), stripe_dict.host_ptr(), dict.device_ptr(), stripe_list.size(), num_rowgroups, str_col_ids.size(), stream)); CUDA_TRY(cudaMemcpyAsync(stripe_dict.host_ptr(), stripe_dict.device_ptr(), stripe_dict.memory_size(), cudaMemcpyDeviceToHost, stream)); CUDA_TRY(cudaStreamSynchronize(stream)); } std::vector<Stream> writer::impl::gather_streams( orc_column_view *columns, size_t num_columns, size_t num_rows, std::vector<uint32_t> const &stripe_list, std::vector<int32_t> &strm_ids) { // First n + 1 streams are row index streams, including 'column 0' std::vector<Stream> streams; streams.resize(num_columns + 1); streams[0].column = 0; streams[0].kind = ROW_INDEX; streams[0].length = 0; for (size_t i = 0; i < num_columns; ++i) { TypeKind kind = columns[i].orc_kind(); StreamKind data_kind = DATA; StreamKind data2_kind = LENGTH; ColumnEncodingKind encoding_kind = DIRECT; int64_t present_stream_size = 0; int64_t data_stream_size = 0; int64_t data2_stream_size = 0; int64_t dict_stream_size = 0; if (columns[i].null_count() != 0 || columns[i].data_count() != num_rows) { present_stream_size = ((row_index_stride_ + 7) >> 3); present_stream_size += (present_stream_size + 0x7f) >> 7; } switch (kind) { case TypeKind::BOOLEAN: data_stream_size = div_rowgroups_by<int64_t>(1024) * (128 + 1); encoding_kind = DIRECT; break; case TypeKind::BYTE: data_stream_size = div_rowgroups_by<int64_t>(128) * (128 + 1); encoding_kind = DIRECT; break; case TypeKind::SHORT: data_stream_size = div_rowgroups_by<int64_t>(512) * (512 * 2 + 2); encoding_kind = DIRECT_V2; break; case TypeKind::FLOAT: // Pass through if no nulls (no RLE encoding for floating point) data_stream_size = (columns[i].null_count() != 0) ? div_rowgroups_by<int64_t>(512) * (512 * 4 + 2) : INT64_C(-1); encoding_kind = DIRECT; break; case TypeKind::INT: case TypeKind::DATE: data_stream_size = div_rowgroups_by<int64_t>(512) * (512 * 4 + 2); encoding_kind = DIRECT_V2; break; case TypeKind::DOUBLE: // Pass through if no nulls (no RLE encoding for floating point) data_stream_size = (columns[i].null_count() != 0) ? div_rowgroups_by<int64_t>(512) * (512 * 8 + 2) : INT64_C(-1); encoding_kind = DIRECT; break; case TypeKind::LONG: data_stream_size = div_rowgroups_by<int64_t>(512) * (512 * 8 + 2); encoding_kind = DIRECT_V2; break; case TypeKind::STRING: { bool enable_dict = enable_dictionary_; size_t direct_data_size = 0; size_t dict_data_size = 0; size_t dict_strings = 0; size_t dict_lengths_div512 = 0; for (size_t stripe = 0, g = 0; stripe < stripe_list.size(); stripe++) { const auto sd = columns[i].host_stripe_dict(stripe); enable_dict = (enable_dict && sd->dict_data != nullptr); if (enable_dict) { dict_strings += sd->num_strings; dict_lengths_div512 += (sd->num_strings + 0x1ff) >> 9; dict_data_size += sd->dict_char_count; } for (uint32_t k = 0; k < stripe_list[stripe]; k++, g++) { direct_data_size += columns[i].host_dict_chunk(g)->string_char_count; } } if (enable_dict) { uint32_t dict_bits = 0; for (dict_bits = 1; dict_bits < 32; dict_bits <<= 1) { if (dict_strings <= (1ull << dict_bits)) break; } const auto valid_count = columns[i].data_count() - columns[i].null_count(); dict_data_size += (dict_bits * valid_count + 7) >> 3; } // Decide between direct or dictionary encoding if (enable_dict && dict_data_size < direct_data_size) { data_stream_size = div_rowgroups_by<int64_t>(512) * (512 * 4 + 2); data2_stream_size = dict_lengths_div512 * (512 * 4 + 2); dict_stream_size = std::max<size_t>(dict_data_size, 1); encoding_kind = DICTIONARY_V2; } else { data_stream_size = std::max<size_t>(direct_data_size, 1); data2_stream_size = div_rowgroups_by<int64_t>(512) * (512 * 4 + 2); encoding_kind = DIRECT_V2; } break; } case TypeKind::TIMESTAMP: data_stream_size = ((row_index_stride_ + 0x1ff) >> 9) * (512 * 4 + 2); data2_stream_size = data_stream_size; data2_kind = SECONDARY; encoding_kind = DIRECT_V2; break; default: CUDF_FAIL("Unsupported ORC type kind"); } // Initialize the column's metadata columns[i].set_orc_encoding(encoding_kind); // Initialize the column's index stream const auto id = static_cast<uint32_t>(1 + i); streams[id].column = id; streams[id].kind = ROW_INDEX; streams[id].length = 0; // Initialize the column's data stream(s) const auto base = i * gpu::CI_NUM_STREAMS; if (present_stream_size != 0) { auto len = static_cast<uint64_t>(present_stream_size); strm_ids[base + gpu::CI_PRESENT] = streams.size(); streams.push_back(orc::Stream{PRESENT, id, len}); } if (data_stream_size != 0) { auto len = static_cast<uint64_t>(std::max<int64_t>(data_stream_size, 0)); strm_ids[base + gpu::CI_DATA] = streams.size(); streams.push_back(orc::Stream{data_kind, id, len}); } if (data2_stream_size != 0) { auto len = static_cast<uint64_t>(std::max<int64_t>(data2_stream_size, 0)); strm_ids[base + gpu::CI_DATA2] = streams.size(); streams.push_back(orc::Stream{data2_kind, id, len}); } if (dict_stream_size != 0) { auto len = static_cast<uint64_t>(dict_stream_size); strm_ids[base + gpu::CI_DICTIONARY] = streams.size(); streams.push_back(orc::Stream{DICTIONARY_DATA, id, len}); } } return streams; } rmm::device_buffer writer::impl::encode_columns( orc_column_view *columns, size_t num_columns, size_t num_rows, size_t num_rowgroups, std::vector<int> const &str_col_ids, std::vector<uint32_t> const &stripe_list, std::vector<Stream> const &streams, std::vector<int32_t> const &strm_ids, hostdevice_vector<gpu::EncChunk> &chunks, cudaStream_t stream) { // Allocate combined buffer for RLE data and string data output std::vector<size_t> strm_offsets(streams.size()); size_t str_data_size = 0; auto output = [&]() { size_t rle_data_size = 0; for (size_t i = 0; i < streams.size(); ++i) { const auto &stream = streams[i]; const auto &column = columns[stream.column - 1]; if (((stream.kind == DICTIONARY_DATA || stream.kind == LENGTH) && (column.orc_encoding() == DICTIONARY_V2)) || ((stream.kind == DATA) && (column.orc_kind() == TypeKind::STRING && column.orc_encoding() == DIRECT_V2))) { strm_offsets[i] = str_data_size; str_data_size += stream.length; } else { strm_offsets[i] = rle_data_size; rle_data_size += (stream.length * num_rowgroups + 7) & ~7; } } str_data_size = (str_data_size + 7) & ~7; return rmm::device_buffer(rle_data_size + str_data_size, stream); }(); auto dst_base = static_cast<uint8_t *>(output.data()); // Initialize column chunks' descriptions size_t stripe_start = 0; size_t stripe_id = 0; for (size_t j = 0; j < num_rowgroups; j++) { for (size_t i = 0; i < num_columns; i++) { auto *ck = &chunks[j * num_columns + i]; ck->start_row = (j * row_index_stride_); ck->num_rows = std::min<uint32_t>(row_index_stride_, num_rows - ck->start_row); ck->valid_rows = columns[i].data_count(); ck->encoding_kind = columns[i].orc_encoding(); ck->type_kind = columns[i].orc_kind(); if (ck->type_kind == TypeKind::STRING) { ck->valid_map_base = columns[i].nulls(); ck->column_data_base = (ck->encoding_kind == DICTIONARY_V2) ? columns[i].host_stripe_dict(stripe_id)->dict_index : columns[i].data(); ck->dtype_len = 1; } else { ck->valid_map_base = columns[i].nulls(); ck->column_data_base = columns[i].data(); ck->dtype_len = columns[i].type_width(); } ck->scale = columns[i].clockscale(); for (int k = 0; k < gpu::CI_NUM_STREAMS; k++) { const auto strm_id = strm_ids[i * gpu::CI_NUM_STREAMS + k]; ck->strm_id[k] = strm_id; if (strm_id >= 0) { if ((k == gpu::CI_DICTIONARY) || (k == gpu::CI_DATA2 && ck->encoding_kind == DICTIONARY_V2)) { if (j == stripe_start) { const int32_t dict_stride = columns[i].get_dict_stride(); const auto stripe = columns[i].host_stripe_dict(stripe_id); ck->strm_len[k] = (k == gpu::CI_DICTIONARY) ? stripe->dict_char_count : (((stripe->num_strings + 0x1ff) >> 9) * (512 * 4 + 2)); if (stripe_id == 0) { ck->streams[k] = dst_base + strm_offsets[strm_id]; } else { const auto *ck_up = &chunks[stripe[-dict_stride].start_chunk * num_columns + i]; ck->streams[k] = ck_up->streams[k] + ck_up->strm_len[k]; } } else { ck->strm_len[k] = 0; ck->streams[k] = ck[-num_columns].streams[k]; } } else if (k == gpu::CI_DATA && ck->type_kind == TypeKind::STRING && ck->encoding_kind == DIRECT_V2) { ck->strm_len[k] = columns[i].host_dict_chunk(j)->string_char_count; ck->streams[k] = (j == 0) ? dst_base + strm_offsets[strm_id] : (ck[-num_columns].streams[k] + ck[-num_columns].strm_len[k]); } else if (k == gpu::CI_DATA && streams[strm_id].length == 0 && (ck->type_kind == DOUBLE || ck->type_kind == FLOAT)) { // Pass-through ck->strm_len[k] = ck->num_rows * ck->dtype_len; ck->streams[k] = nullptr; } else { ck->strm_len[k] = streams[strm_id].length; ck->streams[k] = dst_base + str_data_size + strm_offsets[strm_id] + streams[strm_id].length * j; } } else { ck->strm_len[k] = 0; ck->streams[k] = nullptr; } } } // Track the current stripe this rowgroup chunk belongs if (j + 1 == stripe_start + stripe_list[stripe_id]) { stripe_start = j + 1; stripe_id++; } } CUDA_TRY(cudaMemcpyAsync(chunks.device_ptr(), chunks.host_ptr(), chunks.memory_size(), cudaMemcpyHostToDevice, stream)); if (!str_col_ids.empty()) { auto d_stripe_dict = columns[str_col_ids[0]].device_stripe_dict(); CUDA_TRY(gpu::EncodeStripeDictionaries(d_stripe_dict, chunks.device_ptr(), str_col_ids.size(), num_columns, stripe_list.size(), stream)); } CUDA_TRY(gpu::EncodeOrcColumnData(chunks.device_ptr(), num_columns, num_rowgroups, stream)); CUDA_TRY(cudaStreamSynchronize(stream)); return output; } std::vector<StripeInformation> writer::impl::gather_stripes( size_t num_columns, size_t num_rows, size_t num_index_streams, size_t num_data_streams, std::vector<uint32_t> const &stripe_list, hostdevice_vector<gpu::EncChunk> &chunks, hostdevice_vector<gpu::StripeStream> &strm_desc, cudaStream_t stream) { std::vector<StripeInformation> stripes(stripe_list.size()); size_t group = 0; size_t stripe_start = 0; for (size_t s = 0; s < stripe_list.size(); s++) { size_t stripe_group_end = group + stripe_list[s]; for (size_t i = 0; i < num_columns; i++) { const auto *ck = &chunks[group * num_columns + i]; // Assign stream data of column data stream(s) for (int k = 0; k < gpu::CI_INDEX; k++) { const auto stream_id = ck->strm_id[k]; if (stream_id != -1) { auto *ss = &strm_desc[s * num_data_streams + stream_id - num_index_streams]; ss->stream_size = 0; ss->first_chunk_id = (group * num_columns + i); ss->num_chunks = (stripe_group_end - group); ss->column_id = i; ss->strm_type = k; } } } group = stripe_group_end; size_t stripe_end = std::min(group * row_index_stride_, num_rows); stripes[s].numberOfRows = stripe_end - stripe_start; stripe_start = stripe_end; } CUDA_TRY(cudaMemcpyAsync(strm_desc.device_ptr(), strm_desc.host_ptr(), strm_desc.memory_size(), cudaMemcpyHostToDevice, stream)); CUDA_TRY(gpu::CompactOrcDataStreams(strm_desc.device_ptr(), chunks.device_ptr(), strm_desc.size(), num_columns, stream)); CUDA_TRY(cudaMemcpyAsync(strm_desc.host_ptr(), strm_desc.device_ptr(), strm_desc.memory_size(), cudaMemcpyDeviceToHost, stream)); CUDA_TRY(cudaMemcpyAsync(chunks.host_ptr(), chunks.device_ptr(), chunks.memory_size(), cudaMemcpyDeviceToHost, stream)); CUDA_TRY(cudaStreamSynchronize(stream)); return stripes; } void writer::impl::write_index_stream( int32_t stripe_id, int32_t stream_id, orc_column_view *columns, size_t num_columns, size_t num_data_streams, size_t group, size_t groups_in_stripe, hostdevice_vector<gpu::EncChunk> const &chunks, hostdevice_vector<gpu::StripeStream> const &strm_desc, hostdevice_vector<gpu_inflate_status_s> const &comp_out, StripeInformation &stripe, std::vector<Stream> &streams, ProtobufWriter *pbw) { // 0: position, 1: block position, 2: compressed position, 3: compressed size std::array<int32_t, 4> present; std::array<int32_t, 4> data; std::array<int32_t, 4> data2; auto kind = TypeKind::STRUCT; auto find_record = [=, &strm_desc](gpu::EncChunk const &chunk, gpu::StreamIndexType type) { std::array<int32_t, 4> record{-1, -1, -1, -1}; if (chunk.strm_id[type] > 0) { record[0] = 0; if (compression_kind_ != NONE) { const auto *ss = &strm_desc[stripe_id * num_data_streams + chunk.strm_id[type] - (num_columns + 1)]; record[1] = ss->first_block; record[2] = 0; record[3] = ss->stream_size; } } return record; }; auto scan_record = [=, &comp_out](gpu::EncChunk const &chunk, gpu::StreamIndexType type, std::array<int32_t, 4> &record) { if (record[0] >= 0) { record[0] += chunk.strm_len[type]; while ((record[1] >= 0) && (static_cast<size_t>(record[0]) >= compression_blocksize_) && (record[3] + 3 + comp_out[record[1]].bytes_written < static_cast<size_t>(record[4]))) { record[0] -= compression_blocksize_; record[3] += 3 + comp_out[record[1]].bytes_written; record[1] += 1; } } }; // TBD: Not sure we need an empty index stream for column 0 if (stream_id != 0) { const auto &ck = chunks[stream_id - 1]; present = find_record(ck, gpu::CI_PRESENT); data = find_record(ck, gpu::CI_DATA); data2 = find_record(ck, gpu::CI_DATA2); // Change string dictionary to int from index point of view kind = columns[stream_id - 1].orc_kind(); if (kind == TypeKind::STRING && columns[stream_id - 1].orc_encoding() == DICTIONARY_V2) { kind = TypeKind::INT; } } buffer_.resize((compression_kind_ != NONE) ? 3 : 0); // Add row index entries for (size_t g = group; g < group + groups_in_stripe; g++) { pbw->put_row_index_entry(present[2], present[0], data[2], data[0], data2[2], data2[0], kind); if (stream_id != 0) { const auto &ck = chunks[g * num_columns + stream_id - 1]; scan_record(ck, gpu::CI_PRESENT, present); scan_record(ck, gpu::CI_DATA, data); scan_record(ck, gpu::CI_DATA2, data2); } } streams[stream_id].length = buffer_.size(); if (compression_kind_ != NONE) { uint32_t uncomp_ix_len = (uint32_t)(streams[stream_id].length - 3) * 2 + 1; buffer_[0] = static_cast<uint8_t>(uncomp_ix_len >> 0); buffer_[1] = static_cast<uint8_t>(uncomp_ix_len >> 8); buffer_[2] = static_cast<uint8_t>(uncomp_ix_len >> 16); } outfile_.write(reinterpret_cast<char *>(buffer_.data()), buffer_.size()); stripe.indexLength += buffer_.size(); } void writer::impl::write_data_stream(gpu::StripeStream const &strm_desc, gpu::EncChunk const &chunk, uint8_t const *compressed_data, uint8_t *stream_out, StripeInformation &stripe, std::vector<Stream> &streams, cudaStream_t stream) { const auto length = strm_desc.stream_size; streams[chunk.strm_id[strm_desc.strm_type]].length = length; if (length != 0) { const auto *stream_in = (compression_kind_ == NONE) ? chunk.streams[strm_desc.strm_type] : (compressed_data + strm_desc.bfr_offset); CUDA_TRY(cudaMemcpyAsync(stream_out, stream_in, length, cudaMemcpyDeviceToHost, stream)); CUDA_TRY(cudaStreamSynchronize(stream)); outfile_.write(reinterpret_cast<char *>(stream_out), length); } stripe.dataLength += length; } writer::impl::impl(std::string filepath, writer_options const &options, rmm::mr::device_memory_resource *mr) : _mr(mr) { compression_kind_ = to_orc_compression(options.compression); outfile_.open(filepath, std::ios::out | std::ios::binary | std::ios::trunc); CUDF_EXPECTS(outfile_.is_open(), "Cannot open output file"); } void writer::impl::write(table_view const &table, cudaStream_t stream) { size_type num_columns = table.num_columns(); size_type num_rows = 0; // Mapping of string columns for quick look-up std::vector<int> str_col_ids; // Wrapper around cudf columns to attach ORC-specific type info std::vector<orc_column_view> orc_columns; for (auto it = table.begin(); it < table.end(); ++it) { const auto col = *it; const auto current_id = orc_columns.size(); const auto current_str_id = str_col_ids.size(); num_rows = std::max<uint32_t>(num_rows, col.size()); orc_columns.emplace_back(current_id, current_str_id, col, stream); if (orc_columns.back().is_string()) { str_col_ids.push_back(current_id); } } rmm::device_vector<uint32_t> dict_index(str_col_ids.size() * num_rows); rmm::device_vector<uint32_t> dict_data(str_col_ids.size() * num_rows); // Build per-column dictionary indices const auto num_rowgroups = div_by_rowgroups<size_t>(num_rows); const auto num_dict_chunks = num_rowgroups * str_col_ids.size(); hostdevice_vector<gpu::DictionaryChunk> dict(num_dict_chunks); if (str_col_ids.size() != 0) { init_dictionaries(orc_columns.data(), num_rows, str_col_ids, dict_data.data().get(), dict_index.data().get(), dict, stream); } // Decide stripe boundaries early on, based on uncompressed size std::vector<uint32_t> stripe_list; for (size_t g = 0, stripe_start = 0, stripe_size = 0; g < num_rowgroups; g++) { size_t rowgroup_size = 0; for (int i = 0; i < num_columns; i++) { if (orc_columns[i].is_string()) { const auto dt = orc_columns[i].host_dict_chunk(g); rowgroup_size += 1 * row_index_stride_; rowgroup_size += dt->string_char_count; } else { rowgroup_size += orc_columns[i].type_width() * row_index_stride_; } } // Apply rows per stripe limit to limit string dictionaries const size_t max_stripe_rows = !str_col_ids.empty() ? 1000000 : 5000000; if ((g > stripe_start) && (stripe_size + rowgroup_size > max_stripe_size_ || (g + 1 - stripe_start) * row_index_stride_ > max_stripe_rows)) { stripe_list.push_back(g - stripe_start); stripe_start = g; stripe_size = 0; } stripe_size += rowgroup_size; if (g + 1 == num_rowgroups) { stripe_list.push_back(num_rowgroups - stripe_start); } } // Build stripe-level dictionaries const auto num_stripe_dict = stripe_list.size() * str_col_ids.size(); hostdevice_vector<gpu::StripeDictionary> stripe_dict(num_stripe_dict); if (str_col_ids.size() != 0) { build_dictionaries(orc_columns.data(), num_rows, str_col_ids, stripe_list, dict, dict_index.data().get(), stripe_dict, stream); } // Initialize streams std::vector<int32_t> strm_ids(num_columns * gpu::CI_NUM_STREAMS, -1); auto streams = gather_streams(orc_columns.data(), orc_columns.size(), num_rows, stripe_list, strm_ids); // Encode column data chunks const auto num_chunks = num_rowgroups * num_columns; hostdevice_vector<gpu::EncChunk> chunks(num_chunks); auto output = encode_columns(orc_columns.data(), num_columns, num_rows, num_rowgroups, str_col_ids, stripe_list, streams, strm_ids, chunks, stream); // Assemble individual desparate column chunks into contiguous data streams const auto num_index_streams = (num_columns + 1); const auto num_data_streams = streams.size() - num_index_streams; const auto num_stripe_streams = stripe_list.size() * num_data_streams; hostdevice_vector<gpu::StripeStream> strm_desc(num_stripe_streams); auto stripes = gather_stripes(num_columns, num_rows, num_index_streams, num_data_streams, stripe_list, chunks, strm_desc, stream); // Allocate intermediate output stream buffer size_t compressed_bfr_size = 0; size_t num_compressed_blocks = 0; auto stream_output = [&]() { size_t max_stream_size = 0; for (size_t stripe_id = 0; stripe_id < stripe_list.size(); stripe_id++) { for (size_t i = 0; i < num_data_streams; i++) { gpu::StripeStream *ss = &strm_desc[stripe_id * num_data_streams + i]; size_t stream_size = ss->stream_size; if (compression_kind_ != NONE) { ss->first_block = num_compressed_blocks; ss->bfr_offset = compressed_bfr_size; auto num_blocks = std::max<uint32_t>((stream_size + compression_blocksize_ - 1) / compression_blocksize_, 1); stream_size += num_blocks * 3; num_compressed_blocks += num_blocks; compressed_bfr_size += stream_size; } max_stream_size = std::max(max_stream_size, stream_size); } } return pinned_buffer<uint8_t>{[](size_t size) { uint8_t *ptr = nullptr; CUDA_TRY(cudaMallocHost(&ptr, size)); return ptr; }(max_stream_size), cudaFreeHost}; }(); // Compress the data streams rmm::device_buffer compressed_data(compressed_bfr_size, stream); hostdevice_vector<gpu_inflate_status_s> comp_out(num_compressed_blocks); hostdevice_vector<gpu_inflate_input_s> comp_in(num_compressed_blocks); if (compression_kind_ != NONE) { CUDA_TRY(cudaMemcpyAsync(strm_desc.device_ptr(), strm_desc.host_ptr(), strm_desc.memory_size(), cudaMemcpyHostToDevice, stream)); CUDA_TRY(gpu::CompressOrcDataStreams( static_cast<uint8_t *>(compressed_data.data()), strm_desc.device_ptr(), chunks.device_ptr(), comp_in.device_ptr(), comp_out.device_ptr(), num_stripe_streams, num_compressed_blocks, compression_kind_, compression_blocksize_, stream)); CUDA_TRY(cudaMemcpyAsync(strm_desc.host_ptr(), strm_desc.device_ptr(), strm_desc.memory_size(), cudaMemcpyDeviceToHost, stream)); CUDA_TRY(cudaMemcpyAsync(comp_out.host_ptr(), comp_out.device_ptr(), comp_out.memory_size(), cudaMemcpyDeviceToHost, stream)); CUDA_TRY(cudaStreamSynchronize(stream)); } ProtobufWriter pbw_(&buffer_); // Write file header outfile_.write(MAGIC, std::strlen(MAGIC)); // Write stripes size_t group = 0; for (size_t stripe_id = 0; stripe_id < stripes.size(); stripe_id++) { auto groups_in_stripe = div_by_rowgroups(stripes[stripe_id].numberOfRows); stripes[stripe_id].offset = outfile_.tellp(); // Column (skippable) index streams appear at the start of the stripe stripes[stripe_id].indexLength = 0; for (size_t col_id = 0; col_id <= (size_t)num_columns; col_id++) { write_index_stream(stripe_id, col_id, orc_columns.data(), num_columns, num_data_streams, group, groups_in_stripe, chunks, strm_desc, comp_out, stripes[stripe_id], streams, &pbw_); } // Column data consisting one or more separate streams stripes[stripe_id].dataLength = 0; for (size_t i = 0; i < num_data_streams; i++) { const auto &ss = strm_desc[stripe_id * num_data_streams + i]; const auto &ck = chunks[group * num_columns + ss.column_id]; write_data_stream(ss, ck, static_cast<uint8_t *>(compressed_data.data()), stream_output.get(), stripes[stripe_id], streams, stream); } // Write stripefooter consisting of stream information StripeFooter sf; sf.streams = streams; sf.columns.resize(num_columns + 1); sf.columns[0].kind = DIRECT; sf.columns[0].dictionarySize = 0; for (size_t i = 1; i < sf.columns.size(); ++i) { sf.columns[i].kind = orc_columns[i - 1].orc_encoding(); sf.columns[i].dictionarySize = (sf.columns[i].kind == DICTIONARY_V2) ? orc_columns[i - 1].host_stripe_dict(stripe_id)->num_strings : 0; if (orc_columns[i - 1].orc_kind() == TIMESTAMP) { sf.writerTimezone = "UTC"; } } buffer_.resize((compression_kind_ != NONE) ? 3 : 0); pbw_.write(&sf); stripes[stripe_id].footerLength = buffer_.size(); if (compression_kind_ != NONE) { uint32_t uncomp_sf_len = (stripes[stripe_id].footerLength - 3) * 2 + 1; buffer_[0] = static_cast<uint8_t>(uncomp_sf_len >> 0); buffer_[1] = static_cast<uint8_t>(uncomp_sf_len >> 8); buffer_[2] = static_cast<uint8_t>(uncomp_sf_len >> 16); } outfile_.write(reinterpret_cast<char *>(buffer_.data()), buffer_.size()); group += groups_in_stripe; } // Write filefooter metadata // TBD: We may want to add pandas or spark column metadata strings here FileFooter ff; ff.headerLength = std::strlen(MAGIC); ff.contentLength = outfile_.tellp(); ff.stripes = std::move(stripes); ff.numberOfRows = num_rows; ff.rowIndexStride = row_index_stride_; ff.types.resize(1 + num_columns); ff.types[0].kind = STRUCT; ff.types[0].subtypes.resize(num_columns); ff.types[0].fieldNames.resize(num_columns); for (int i = 0; i < num_columns; ++i) { ff.types[1 + i].kind = orc_columns[i].orc_kind(); ff.types[0].subtypes[i] = 1 + i; ff.types[0].fieldNames[i] = orc_columns[i].orc_name(); } buffer_.resize((compression_kind_ != NONE) ? 3 : 0); pbw_.write(&ff); // Write postscript metadata PostScript ps; ps.footerLength = buffer_.size(); ps.compression = compression_kind_; ps.compressionBlockSize = compression_blocksize_; ps.version = {0, 12}; ps.metadataLength = 0; // TODO: Write stripe statistics ps.magic = MAGIC; if (compression_kind_ != NONE) { // TODO: If the file footer ends up larger than the compression block // size, we'll need to insert additional 3-byte block headers uint32_t uncomp_ff_len = (uint32_t)(ps.footerLength - 3) * 2 + 1; buffer_[0] = static_cast<uint8_t>(uncomp_ff_len >> 0); buffer_[1] = static_cast<uint8_t>(uncomp_ff_len >> 8); buffer_[2] = static_cast<uint8_t>(uncomp_ff_len >> 16); } const auto ps_length = static_cast<uint8_t>(pbw_.write(&ps)); buffer_.push_back(ps_length); outfile_.write(reinterpret_cast<char *>(buffer_.data()), buffer_.size()); outfile_.flush(); } // Forward to implementation writer::writer(std::string filepath, writer_options const &options, rmm::mr::device_memory_resource *mr) : _impl(std::make_unique<impl>(filepath, options, mr)) {} // Destructor within this translation unit writer::~writer() = default; // Forward to implementation void writer::write_all(table_view const &table, cudaStream_t stream) { _impl->write(table, stream); } } // namespace orc } // namespace detail } // namespace io } // namespace experimental } // namespace cudf
e8950ca82f32cbaa37bcdface1d787e190dadb98.hip
// !!! This is a file automatically generated by hipify!!! #include <cassert> #include <cfloat> #include <hip/hip_runtime_api.h> #include <hip/hip_runtime.h> #include <iostream> #include <stdio.h> #include <list> #include <map> #include <math.h> #include <stdlib.h> #include <vector> #include <set> #include <algorithm> #include <iterator> #include <fstream> #define K 1 using namespace std; #define spmv_NBLOCKS 256 #define spmv_BLOCK_SIZE 128 #define WARP_SIZE 32 __constant__ float vec[spmv_NBLOCKS*spmv_BLOCK_SIZE/WARP_SIZE]; static const double MAX_RELATIVE_ERROR = .02; static const int PAD_FACTOR = 16; void fill(float *A, const int n, const float maxi) { for (int j = 0; j < n; j++) { A[j] = ((float) maxi * (rand() / (RAND_MAX + 1.0f))); } } void initRandomMatrix(int *cols, int *rowDelimiters, const int n, const int dim) { int nnzAssigned = 0; // Figure out the probability that a nonzero should be assigned to a given // spot in the matrix double prob = (double)n / ((double)dim * (double)dim); // Seed random number generator srand48(2013); // Randomly decide whether entry i,j gets a value, but ensure n values // are assigned bool fillRemaining = false; for (int i = 0; i < dim; i++) { rowDelimiters[i] = nnzAssigned; for (int j = 0; j < dim; j++) { int numEntriesLeft = (dim * dim) - ((i * dim) + j); int needToAssign = n - nnzAssigned; if (numEntriesLeft <= needToAssign) { fillRemaining = true; } if ((nnzAssigned < n && drand48() <= prob) || fillRemaining) { // Assign (i,j) a value cols[nnzAssigned] = j; nnzAssigned++; } } } // Observe the convention to put the number of non zeroes at the end of the // row delimiters array rowDelimiters[dim] = n; assert(nnzAssigned == n); } void convertToPadded(float *A, int *cols, int dim, int *rowDelimiters, float **newA_ptr, int **newcols_ptr, int *newIndices, int *newSize) { // determine total padded size and new row indices int paddedSize = 0; int rowSize; for (int i=0; i<dim; i++) { newIndices[i] = paddedSize; rowSize = rowDelimiters[i+1] - rowDelimiters[i]; if (rowSize % PAD_FACTOR != 0) { rowSize += PAD_FACTOR - rowSize % PAD_FACTOR; } paddedSize += rowSize; } *newSize = paddedSize; newIndices[dim] = paddedSize; hipHostMalloc(newA_ptr, paddedSize * sizeof(float)); hipHostMalloc(newcols_ptr, paddedSize * sizeof(int)); float *newA = *newA_ptr; int *newcols = *newcols_ptr; memset(newA, 0, paddedSize * sizeof(float)); // fill newA and newcols for (int i=0; i<dim; i++) { for (int j=rowDelimiters[i], k=newIndices[i]; j<rowDelimiters[i+1]; j++, k++) { newA[k] = A[j]; newcols[k] = cols[j]; } } } void spmvCpu(const float *val, const int *cols, const int *rowDelimiters, const float *vec, int dim, float *out) { for (int i=0; i<dim; i++) { float t = 0; for (int j = rowDelimiters[i]; j < rowDelimiters[i + 1]; j++) { int col = cols[j]; t += val[j] * vec[col]; } out[i] = t; } } void spmv_verifyResults(const float *cpuResults, const float *gpuResults, const int size) { bool passed = true; for (int i = 0; i < size; i++) { if (fabs(cpuResults[i] - gpuResults[i]) / cpuResults[i] > MAX_RELATIVE_ERROR) { cout << "Failed! Mismatch at i: "<< i << " ref: " << cpuResults[i] << " dev: " << gpuResults[i] << endl; return; } } cout << "spmv passed" << endl; } __global__ void spmv_kernel(const float* val, const int * cols, const int * rowDelimiters, const float * vec1, const int dim, float * out) { // Thread ID in block int t = threadIdx.x; // Thread ID within warp int id = t & (WARP_SIZE-1); int warpsPerBlock = blockDim.x / WARP_SIZE; // One row per warp int myRow = (blockIdx.x * warpsPerBlock) + (t / WARP_SIZE); __shared__ volatile float partialSums[spmv_BLOCK_SIZE]; if (myRow < dim) { int warpStart = rowDelimiters[myRow]; int warpEnd = rowDelimiters[myRow+1]; float mySum = 0; for (int j = warpStart + id; j < warpEnd; j += WARP_SIZE) { int col = cols[j]; mySum += val[j] * vec[col];//tex1Dfetch(tex_vec,col);//vec[col]; } partialSums[t] = mySum; // Reduce partial sums if (id < 16) partialSums[t] += partialSums[t+16]; if (id < 8) partialSums[t] += partialSums[t+ 8]; if (id < 4) partialSums[t] += partialSums[t+ 4]; if (id < 2) partialSums[t] += partialSums[t+ 2]; if (id < 1) partialSums[t] += partialSums[t+ 1]; // Write result if (id == 0) { out[myRow] = partialSums[t]; } } } int main(int argc, char **argv) { hipSetDevice(2); srand(2013); float *h_spmv_val, *h_spmv_valPad; int *h_spmv_cols, *h_spmv_colsPad; int *h_rowDelimiters, *h_rowDelimitersPad; float *h_spmv_vec, *h_spmv_out, *spmv_refOut; int spmv_nItems, nItemsPadded, spmv_numRows; spmv_numRows = spmv_NBLOCKS * (spmv_BLOCK_SIZE/WARP_SIZE); spmv_nItems = spmv_numRows * spmv_numRows / 50; // 1% of entries will be non-zero float maxval = 200.0; hipHostMalloc(&h_spmv_val, spmv_nItems * sizeof(float)); hipHostMalloc(&h_spmv_cols, spmv_nItems * sizeof(int)); hipHostMalloc(&h_rowDelimiters, (spmv_numRows + 1) * sizeof(int)); fill(h_spmv_val, spmv_nItems, maxval); initRandomMatrix(h_spmv_cols, h_rowDelimiters, spmv_nItems, spmv_numRows); // Set up remaining host data int paddedSize = spmv_numRows + (PAD_FACTOR - spmv_numRows % PAD_FACTOR); hipHostMalloc(&h_spmv_vec, spmv_numRows * sizeof(float)) ; spmv_refOut = new float[spmv_numRows]; hipHostMalloc(&h_rowDelimitersPad, (spmv_numRows + 1) * sizeof(int)); fill(h_spmv_vec, spmv_numRows, maxval); hipHostMalloc(&h_spmv_out, paddedSize * sizeof(float)); convertToPadded(h_spmv_val, h_spmv_cols, spmv_numRows, h_rowDelimiters, &h_spmv_valPad, &h_spmv_colsPad, h_rowDelimitersPad, &nItemsPadded); // Compute reference solution spmvCpu(h_spmv_val, h_spmv_cols, h_rowDelimiters, h_spmv_vec, spmv_numRows, spmv_refOut); float *d_spmv_val, *d_spmv_vec, *d_spmv_out; int *d_spmv_cols, *d_rowDelimiters; // Allocate device memory hipMalloc(&d_spmv_val, spmv_nItems * sizeof(float)); hipMalloc(&d_spmv_cols, spmv_nItems * sizeof(int)); hipMalloc(&d_spmv_vec, spmv_numRows * sizeof(float)); hipMalloc(&d_spmv_out, spmv_numRows * sizeof(float)); hipMalloc(&d_rowDelimiters, (spmv_numRows+1) * sizeof(int)); // Transfer data to device hipMemcpy(d_spmv_val, h_spmv_val, spmv_nItems * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_spmv_cols, h_spmv_cols, spmv_nItems * sizeof(int), hipMemcpyHostToDevice); hipMemcpy(d_spmv_vec, h_spmv_vec, spmv_numRows * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_rowDelimiters, h_rowDelimiters, (spmv_numRows+1) * sizeof(int), hipMemcpyHostToDevice); hipMemcpyToSymbol(vec, h_spmv_vec, spmv_numRows * sizeof(float)); hipEvent_t kernel_start, kernel_stop; hipEventCreate(&kernel_start); hipEventCreate(&kernel_stop); float kernel_time = 0.0f; hipEventRecord(kernel_start, 0); // Setup thread configuration int spmv_grid = (int) ceil(spmv_numRows / (float)(spmv_BLOCK_SIZE / WARP_SIZE)); hipLaunchKernelGGL(( spmv_kernel) , dim3(spmv_grid), dim3(spmv_BLOCK_SIZE), 0, 0, d_spmv_val, d_spmv_cols, d_rowDelimiters, d_spmv_vec, spmv_numRows, d_spmv_out); hipDeviceSynchronize(); hipEventRecord(kernel_stop, 0); hipEventSynchronize(kernel_stop); // get elapsed time kernel_time = 0.0f; hipEventElapsedTime(&kernel_time, kernel_start, kernel_stop); kernel_time *= 1.e-3; // Convert to seconds cout << "kernel exe time: " << kernel_time << endl; hipMemcpy(h_spmv_out, d_spmv_out, spmv_numRows * sizeof(float), hipMemcpyDeviceToHost); spmv_verifyResults(spmv_refOut, h_spmv_out, spmv_numRows); return 0; }
e8950ca82f32cbaa37bcdface1d787e190dadb98.cu
#include <cassert> #include <cfloat> #include <cuda_runtime_api.h> #include <cuda.h> #include <iostream> #include <stdio.h> #include <list> #include <map> #include <math.h> #include <stdlib.h> #include <vector> #include <set> #include <algorithm> #include <iterator> #include <fstream> #define K 1 using namespace std; #define spmv_NBLOCKS 256 #define spmv_BLOCK_SIZE 128 #define WARP_SIZE 32 __constant__ float vec[spmv_NBLOCKS*spmv_BLOCK_SIZE/WARP_SIZE]; static const double MAX_RELATIVE_ERROR = .02; static const int PAD_FACTOR = 16; void fill(float *A, const int n, const float maxi) { for (int j = 0; j < n; j++) { A[j] = ((float) maxi * (rand() / (RAND_MAX + 1.0f))); } } void initRandomMatrix(int *cols, int *rowDelimiters, const int n, const int dim) { int nnzAssigned = 0; // Figure out the probability that a nonzero should be assigned to a given // spot in the matrix double prob = (double)n / ((double)dim * (double)dim); // Seed random number generator srand48(2013); // Randomly decide whether entry i,j gets a value, but ensure n values // are assigned bool fillRemaining = false; for (int i = 0; i < dim; i++) { rowDelimiters[i] = nnzAssigned; for (int j = 0; j < dim; j++) { int numEntriesLeft = (dim * dim) - ((i * dim) + j); int needToAssign = n - nnzAssigned; if (numEntriesLeft <= needToAssign) { fillRemaining = true; } if ((nnzAssigned < n && drand48() <= prob) || fillRemaining) { // Assign (i,j) a value cols[nnzAssigned] = j; nnzAssigned++; } } } // Observe the convention to put the number of non zeroes at the end of the // row delimiters array rowDelimiters[dim] = n; assert(nnzAssigned == n); } void convertToPadded(float *A, int *cols, int dim, int *rowDelimiters, float **newA_ptr, int **newcols_ptr, int *newIndices, int *newSize) { // determine total padded size and new row indices int paddedSize = 0; int rowSize; for (int i=0; i<dim; i++) { newIndices[i] = paddedSize; rowSize = rowDelimiters[i+1] - rowDelimiters[i]; if (rowSize % PAD_FACTOR != 0) { rowSize += PAD_FACTOR - rowSize % PAD_FACTOR; } paddedSize += rowSize; } *newSize = paddedSize; newIndices[dim] = paddedSize; cudaMallocHost(newA_ptr, paddedSize * sizeof(float)); cudaMallocHost(newcols_ptr, paddedSize * sizeof(int)); float *newA = *newA_ptr; int *newcols = *newcols_ptr; memset(newA, 0, paddedSize * sizeof(float)); // fill newA and newcols for (int i=0; i<dim; i++) { for (int j=rowDelimiters[i], k=newIndices[i]; j<rowDelimiters[i+1]; j++, k++) { newA[k] = A[j]; newcols[k] = cols[j]; } } } void spmvCpu(const float *val, const int *cols, const int *rowDelimiters, const float *vec, int dim, float *out) { for (int i=0; i<dim; i++) { float t = 0; for (int j = rowDelimiters[i]; j < rowDelimiters[i + 1]; j++) { int col = cols[j]; t += val[j] * vec[col]; } out[i] = t; } } void spmv_verifyResults(const float *cpuResults, const float *gpuResults, const int size) { bool passed = true; for (int i = 0; i < size; i++) { if (fabs(cpuResults[i] - gpuResults[i]) / cpuResults[i] > MAX_RELATIVE_ERROR) { cout << "Failed! Mismatch at i: "<< i << " ref: " << cpuResults[i] << " dev: " << gpuResults[i] << endl; return; } } cout << "spmv passed" << endl; } __global__ void spmv_kernel(const float* val, const int * cols, const int * rowDelimiters, const float * vec1, const int dim, float * out) { // Thread ID in block int t = threadIdx.x; // Thread ID within warp int id = t & (WARP_SIZE-1); int warpsPerBlock = blockDim.x / WARP_SIZE; // One row per warp int myRow = (blockIdx.x * warpsPerBlock) + (t / WARP_SIZE); __shared__ volatile float partialSums[spmv_BLOCK_SIZE]; if (myRow < dim) { int warpStart = rowDelimiters[myRow]; int warpEnd = rowDelimiters[myRow+1]; float mySum = 0; for (int j = warpStart + id; j < warpEnd; j += WARP_SIZE) { int col = cols[j]; mySum += val[j] * vec[col];//tex1Dfetch(tex_vec,col);//vec[col]; } partialSums[t] = mySum; // Reduce partial sums if (id < 16) partialSums[t] += partialSums[t+16]; if (id < 8) partialSums[t] += partialSums[t+ 8]; if (id < 4) partialSums[t] += partialSums[t+ 4]; if (id < 2) partialSums[t] += partialSums[t+ 2]; if (id < 1) partialSums[t] += partialSums[t+ 1]; // Write result if (id == 0) { out[myRow] = partialSums[t]; } } } int main(int argc, char **argv) { cudaSetDevice(2); srand(2013); float *h_spmv_val, *h_spmv_valPad; int *h_spmv_cols, *h_spmv_colsPad; int *h_rowDelimiters, *h_rowDelimitersPad; float *h_spmv_vec, *h_spmv_out, *spmv_refOut; int spmv_nItems, nItemsPadded, spmv_numRows; spmv_numRows = spmv_NBLOCKS * (spmv_BLOCK_SIZE/WARP_SIZE); spmv_nItems = spmv_numRows * spmv_numRows / 50; // 1% of entries will be non-zero float maxval = 200.0; cudaMallocHost(&h_spmv_val, spmv_nItems * sizeof(float)); cudaMallocHost(&h_spmv_cols, spmv_nItems * sizeof(int)); cudaMallocHost(&h_rowDelimiters, (spmv_numRows + 1) * sizeof(int)); fill(h_spmv_val, spmv_nItems, maxval); initRandomMatrix(h_spmv_cols, h_rowDelimiters, spmv_nItems, spmv_numRows); // Set up remaining host data int paddedSize = spmv_numRows + (PAD_FACTOR - spmv_numRows % PAD_FACTOR); cudaMallocHost(&h_spmv_vec, spmv_numRows * sizeof(float)) ; spmv_refOut = new float[spmv_numRows]; cudaMallocHost(&h_rowDelimitersPad, (spmv_numRows + 1) * sizeof(int)); fill(h_spmv_vec, spmv_numRows, maxval); cudaMallocHost(&h_spmv_out, paddedSize * sizeof(float)); convertToPadded(h_spmv_val, h_spmv_cols, spmv_numRows, h_rowDelimiters, &h_spmv_valPad, &h_spmv_colsPad, h_rowDelimitersPad, &nItemsPadded); // Compute reference solution spmvCpu(h_spmv_val, h_spmv_cols, h_rowDelimiters, h_spmv_vec, spmv_numRows, spmv_refOut); float *d_spmv_val, *d_spmv_vec, *d_spmv_out; int *d_spmv_cols, *d_rowDelimiters; // Allocate device memory cudaMalloc(&d_spmv_val, spmv_nItems * sizeof(float)); cudaMalloc(&d_spmv_cols, spmv_nItems * sizeof(int)); cudaMalloc(&d_spmv_vec, spmv_numRows * sizeof(float)); cudaMalloc(&d_spmv_out, spmv_numRows * sizeof(float)); cudaMalloc(&d_rowDelimiters, (spmv_numRows+1) * sizeof(int)); // Transfer data to device cudaMemcpy(d_spmv_val, h_spmv_val, spmv_nItems * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_spmv_cols, h_spmv_cols, spmv_nItems * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_spmv_vec, h_spmv_vec, spmv_numRows * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_rowDelimiters, h_rowDelimiters, (spmv_numRows+1) * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpyToSymbol(vec, h_spmv_vec, spmv_numRows * sizeof(float)); cudaEvent_t kernel_start, kernel_stop; cudaEventCreate(&kernel_start); cudaEventCreate(&kernel_stop); float kernel_time = 0.0f; cudaEventRecord(kernel_start, 0); // Setup thread configuration int spmv_grid = (int) ceil(spmv_numRows / (float)(spmv_BLOCK_SIZE / WARP_SIZE)); spmv_kernel <<<spmv_grid, spmv_BLOCK_SIZE>>> (d_spmv_val, d_spmv_cols, d_rowDelimiters, d_spmv_vec, spmv_numRows, d_spmv_out); cudaDeviceSynchronize(); cudaEventRecord(kernel_stop, 0); cudaEventSynchronize(kernel_stop); // get elapsed time kernel_time = 0.0f; cudaEventElapsedTime(&kernel_time, kernel_start, kernel_stop); kernel_time *= 1.e-3; // Convert to seconds cout << "kernel exe time: " << kernel_time << endl; cudaMemcpy(h_spmv_out, d_spmv_out, spmv_numRows * sizeof(float), cudaMemcpyDeviceToHost); spmv_verifyResults(spmv_refOut, h_spmv_out, spmv_numRows); return 0; }
2b71136031c2d014a1023b31b20e9c343d7e9ebd.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <hiprand/hiprand_kernel.h> #include <stdio.h> #include <MatKernel.hpp> #define CDIM 5 #if __CUDA_ARCH__ >= 300 /* * Convolutional kernel for word2vec. This handles the positively-label word pairs with * one context word and the current word. */ template<int SKIP, int YDIM, int NREPS> __global__ void __word2vecPos(int nrows, int ncols, int *W, int *LB, int *UB, float *A, float *B, float lrate) { const int nwindow = 2*SKIP+1; int words[nwindow]; float aa[NREPS][nwindow]; float daa[NREPS][nwindow]; float bb[NREPS]; float dbb[NREPS]; __shared__ float CC[YDIM * nwindow]; int i, j, k, tid, indx, icol, dxy, lb, ub; float prod, v; tid = threadIdx.x + blockDim.x * threadIdx.y; dxy = blockDim.x * blockDim.y; bool good; int istart = (int)((1L * blockIdx.x * ncols) / gridDim.x); int iend = (int)((1L * (blockIdx.x+1) * ncols) / gridDim.x); #pragma unroll for (i = 0; i < nwindow; i++) { // Prefill the word and aa window buffers if (istart + i - SKIP - 1 > 0) { words[i] = nrows * W[istart + i - SKIP - 1]; // Get a new word } else { words[i] = -1; } good = (words[i] >= 0); #pragma unroll for (j = 0; j < NREPS; j++) { // Get the A vector for this word indx = tid + j * dxy; if (good && indx < nrows) { aa[j][i] = A[indx + words[i]]; } else { aa[j][i] = 0; } daa[j][i] = 0; } } for (icol = istart; icol < iend; icol++) { // Iterate over columns #pragma unroll for (i = 0; i < nwindow-1; i++) { // slide words down words[i] = words[i+1]; #pragma unroll for (j = 0; j < NREPS; j++) { aa[j][i] = aa[j][i+1]; // slide data down daa[j][i] = daa[j][i+1]; // slide deriv down } } good = (icol + SKIP < ncols); if (good) { words[nwindow - 1] = nrows * W[icol + SKIP]; // Get a new word } else { words[nwindow - 1] = -1; } good = good && words[nwindow-1] >= 0; #pragma unroll for (j = 0; j < NREPS; j++) { // Get a new A column indx = tid + j * dxy; if (good && indx < nrows) { aa[j][nwindow - 1] = A[indx + words[nwindow - 1]]; } else { aa[j][nwindow - 1] = 0; } if (words[SKIP] >= 0 && indx < nrows) { // Get a new B column bb[j] = B[indx + words[SKIP]]; } else { bb[j] = 0; } } __syncthreads(); lb = LB[icol]; ub = UB[icol]; #pragma unroll for (i = 0; i < nwindow; i++) { // Iterate across the window for A cols prod = 0; if (i >= SKIP + lb && i <= SKIP + ub) { #pragma unroll for (j = 0; j < NREPS; j++) { // Iterate over blocks of elements prod += aa[j][i] * bb[j]; // Compute the product between current A, B cols } #pragma unroll for (k = 1; k < 32; k = k + k) { prod += __shfl_down(prod, k); // Reduce within warp } if (threadIdx.x == 0) { CC[i - SKIP -lb + threadIdx.y * nwindow] = prod; // Save to SHMEM } } } __syncthreads(); for (i = 0; i < blockDim.y; i++) { // Reduce across warps for (k = tid; k <= ub - lb; k += dxy) { CC[k] = CC[k + i * nwindow]; } __syncthreads(); } __syncthreads(); // Apply the sigmoid map for (i = tid; i <= ub - lb; i += dxy) { v = CC[i]; if (v > 16.0f) { v = 1.0f; } else { v = exp(v); v = v / (1.0f + v); } CC[i] = 1.0f - v; // All pairs have label 1 } __syncthreads(); #pragma unroll for (j = 0; j < NREPS; j++) { dbb[j] = 0; } #pragma unroll for (i = 0; i < nwindow; i++) { // Iterate across the window for A cols if (i >= SKIP + lb && i <= SKIP + ub) { v = lrate * CC[i - SKIP - lb]; #pragma unroll for (j = 0; j < NREPS; j++) { dbb[j] += v * aa[j][i]; daa[j][i] += v * bb[j]; // Compute the product with the current A, B cols } } } #pragma unroll for (j = 0; j < NREPS; j++) { if (words[SKIP] >= 0 && tid + j * dxy < nrows) { // Save the B column atomicAdd(&B[tid + j * dxy + words[SKIP]], dbb[j]); } } __syncthreads(); if (icol - SKIP >= 0 && words[0] >= 0) { for (j = 0; j < NREPS; j++) { // Save the A column if (tid + j * dxy < nrows) { atomicAdd(&A[tid + j * dxy + words[0]], daa[j][0]); } } } } } /* * Convolutional kernel for word2vec. This handles the positively-label word pairs with * one context word and the current word. */ template<int SKIP, int YDIM, int NREPS> __global__ void __word2vecPos_exp(int nrows, int ncols, int *W, int *LB, int *UB, float *A, float *B, float lrate) { const int nwindow = 2*SKIP+1; float aa[NREPS]; float da[NREPS]; __shared__ float CC[YDIM * nwindow]; int i, j, k, tid, icol, dxy, lb, ub, iword, cword; float bb, db, prod, v; tid = threadIdx.x + blockDim.x * threadIdx.y; dxy = blockDim.x * blockDim.y; int istart = (int)((1L * blockIdx.x * ncols) / gridDim.x); int iend = (int)((1L * (blockIdx.x+1) * ncols) / gridDim.x); for (icol = istart; icol < iend; icol++) { // Iterate over columns iword = nrows * W[icol]; // Get the current word __syncthreads(); lb = LB[icol]; ub = UB[icol]; if (iword >= 0) { #pragma unroll for (j = 0; j < NREPS; j++) { // Iterate over blocks of elements if (tid + j * dxy < nrows) { // Get A aa[j] = A[tid + j * dxy + iword]; } else { aa[j] = 0; } } for (i = lb; i <= ub; i++) { // Iterate across the window for A cols __syncthreads(); cword = nrows * W[icol + i]; // Get the current word prod = 0; if (cword >= 0) { #pragma unroll for (j = 0; j < NREPS; j++) { // Iterate over blocks of elements if (tid + j * dxy < nrows) { // Get B col bb = B[tid + j * dxy + cword]; prod += aa[j] * bb; // Compute the product between current A, B cols } } #pragma unroll for (k = 1; k < 32; k = k + k) { prod += __shfl_down(prod, k); // Reduce within warp } } if (threadIdx.x == 0) { CC[i - lb + threadIdx.y * nwindow] = prod; // Save to SHMEM } } __syncthreads(); for (j = 1; j < blockDim.y; j++) { // Reduce across warps for (i = tid; i < ub - lb; i += dxy) { CC[i] += CC[i + j * nwindow]; } __syncthreads(); } __syncthreads(); // Apply the sigmoid map for (i = tid; i < ub - lb; i += dxy) { v = CC[i]; if (v > 16.0f) { v = 1.0f; } else { v = exp(v); v = v / (1.0f + v); } CC[i] = lrate * (1.0f - v); // All pairs have label 1 } __syncthreads(); #pragma unroll for (j = 0; j < NREPS; j++) { da[j] = 0; } for (i = lb; i <= ub; i++) { // Iterate across the window for A cols cword = nrows * W[icol + i]; // Get the context word v = CC[i - lb]; if (cword >= 0) { #pragma unroll for (j = 0; j < NREPS; j++) { // Iterate over blocks of elements if (tid + j * dxy < nrows) { // Get B col bb = B[tid + j * dxy + cword]; da[j] += v * bb; db = v * aa[j]; atomicAdd(&B[tid + j * dxy + cword], db); } } } } #pragma unroll for (j = 0; j < NREPS; j++) { if (tid + j * dxy < nrows) { atomicAdd(&A[tid + j * dxy + iword], da[j]); } } } } } #else template<int SKIP, int YDIM, int NREPS> __global__ void __word2vecPos(int nrows, int ncols, int *W, int *LB, int *UB, float *A, float *B, float lrate) {} #endif int word2vecPos(int nrows, int ncols, int skip, int *W, int *LB, int *UB, float *A, float *B, float lrate) { dim3 threads(32, CDIM, 1); int nblocks = 1 + (nrows - 1)/threads.y; switch(skip) { case 5 :hipLaunchKernelGGL(( __word2vecPos<5, CDIM, 10/CDIM>), dim3(nblocks),dim3(threads), 0, 0, nrows, ncols, W, LB, UB, A, B, lrate); break; case 3 :hipLaunchKernelGGL(( __word2vecPos<3, CDIM, 10/CDIM>), dim3(nblocks),dim3(threads), 0, 0, nrows, ncols, W, LB, UB, A, B, lrate); break; case 2 :hipLaunchKernelGGL(( __word2vecPos<2, CDIM, 10/CDIM>), dim3(nblocks),dim3(threads), 0, 0, nrows, ncols, W, LB, UB, A, B, lrate); break; default : printf("word2vecPos unsupport size %d\n", skip); return 1; } hipStreamSynchronize(SYNC_STREAM); int err = hipGetLastError(); return err; }
2b71136031c2d014a1023b31b20e9c343d7e9ebd.cu
#include <cuda_runtime.h> #include <curand_kernel.h> #include <stdio.h> #include <MatKernel.hpp> #define CDIM 5 #if __CUDA_ARCH__ >= 300 /* * Convolutional kernel for word2vec. This handles the positively-label word pairs with * one context word and the current word. */ template<int SKIP, int YDIM, int NREPS> __global__ void __word2vecPos(int nrows, int ncols, int *W, int *LB, int *UB, float *A, float *B, float lrate) { const int nwindow = 2*SKIP+1; int words[nwindow]; float aa[NREPS][nwindow]; float daa[NREPS][nwindow]; float bb[NREPS]; float dbb[NREPS]; __shared__ float CC[YDIM * nwindow]; int i, j, k, tid, indx, icol, dxy, lb, ub; float prod, v; tid = threadIdx.x + blockDim.x * threadIdx.y; dxy = blockDim.x * blockDim.y; bool good; int istart = (int)((1L * blockIdx.x * ncols) / gridDim.x); int iend = (int)((1L * (blockIdx.x+1) * ncols) / gridDim.x); #pragma unroll for (i = 0; i < nwindow; i++) { // Prefill the word and aa window buffers if (istart + i - SKIP - 1 > 0) { words[i] = nrows * W[istart + i - SKIP - 1]; // Get a new word } else { words[i] = -1; } good = (words[i] >= 0); #pragma unroll for (j = 0; j < NREPS; j++) { // Get the A vector for this word indx = tid + j * dxy; if (good && indx < nrows) { aa[j][i] = A[indx + words[i]]; } else { aa[j][i] = 0; } daa[j][i] = 0; } } for (icol = istart; icol < iend; icol++) { // Iterate over columns #pragma unroll for (i = 0; i < nwindow-1; i++) { // slide words down words[i] = words[i+1]; #pragma unroll for (j = 0; j < NREPS; j++) { aa[j][i] = aa[j][i+1]; // slide data down daa[j][i] = daa[j][i+1]; // slide deriv down } } good = (icol + SKIP < ncols); if (good) { words[nwindow - 1] = nrows * W[icol + SKIP]; // Get a new word } else { words[nwindow - 1] = -1; } good = good && words[nwindow-1] >= 0; #pragma unroll for (j = 0; j < NREPS; j++) { // Get a new A column indx = tid + j * dxy; if (good && indx < nrows) { aa[j][nwindow - 1] = A[indx + words[nwindow - 1]]; } else { aa[j][nwindow - 1] = 0; } if (words[SKIP] >= 0 && indx < nrows) { // Get a new B column bb[j] = B[indx + words[SKIP]]; } else { bb[j] = 0; } } __syncthreads(); lb = LB[icol]; ub = UB[icol]; #pragma unroll for (i = 0; i < nwindow; i++) { // Iterate across the window for A cols prod = 0; if (i >= SKIP + lb && i <= SKIP + ub) { #pragma unroll for (j = 0; j < NREPS; j++) { // Iterate over blocks of elements prod += aa[j][i] * bb[j]; // Compute the product between current A, B cols } #pragma unroll for (k = 1; k < 32; k = k + k) { prod += __shfl_down(prod, k); // Reduce within warp } if (threadIdx.x == 0) { CC[i - SKIP -lb + threadIdx.y * nwindow] = prod; // Save to SHMEM } } } __syncthreads(); for (i = 0; i < blockDim.y; i++) { // Reduce across warps for (k = tid; k <= ub - lb; k += dxy) { CC[k] = CC[k + i * nwindow]; } __syncthreads(); } __syncthreads(); // Apply the sigmoid map for (i = tid; i <= ub - lb; i += dxy) { v = CC[i]; if (v > 16.0f) { v = 1.0f; } else { v = exp(v); v = v / (1.0f + v); } CC[i] = 1.0f - v; // All pairs have label 1 } __syncthreads(); #pragma unroll for (j = 0; j < NREPS; j++) { dbb[j] = 0; } #pragma unroll for (i = 0; i < nwindow; i++) { // Iterate across the window for A cols if (i >= SKIP + lb && i <= SKIP + ub) { v = lrate * CC[i - SKIP - lb]; #pragma unroll for (j = 0; j < NREPS; j++) { dbb[j] += v * aa[j][i]; daa[j][i] += v * bb[j]; // Compute the product with the current A, B cols } } } #pragma unroll for (j = 0; j < NREPS; j++) { if (words[SKIP] >= 0 && tid + j * dxy < nrows) { // Save the B column atomicAdd(&B[tid + j * dxy + words[SKIP]], dbb[j]); } } __syncthreads(); if (icol - SKIP >= 0 && words[0] >= 0) { for (j = 0; j < NREPS; j++) { // Save the A column if (tid + j * dxy < nrows) { atomicAdd(&A[tid + j * dxy + words[0]], daa[j][0]); } } } } } /* * Convolutional kernel for word2vec. This handles the positively-label word pairs with * one context word and the current word. */ template<int SKIP, int YDIM, int NREPS> __global__ void __word2vecPos_exp(int nrows, int ncols, int *W, int *LB, int *UB, float *A, float *B, float lrate) { const int nwindow = 2*SKIP+1; float aa[NREPS]; float da[NREPS]; __shared__ float CC[YDIM * nwindow]; int i, j, k, tid, icol, dxy, lb, ub, iword, cword; float bb, db, prod, v; tid = threadIdx.x + blockDim.x * threadIdx.y; dxy = blockDim.x * blockDim.y; int istart = (int)((1L * blockIdx.x * ncols) / gridDim.x); int iend = (int)((1L * (blockIdx.x+1) * ncols) / gridDim.x); for (icol = istart; icol < iend; icol++) { // Iterate over columns iword = nrows * W[icol]; // Get the current word __syncthreads(); lb = LB[icol]; ub = UB[icol]; if (iword >= 0) { #pragma unroll for (j = 0; j < NREPS; j++) { // Iterate over blocks of elements if (tid + j * dxy < nrows) { // Get A aa[j] = A[tid + j * dxy + iword]; } else { aa[j] = 0; } } for (i = lb; i <= ub; i++) { // Iterate across the window for A cols __syncthreads(); cword = nrows * W[icol + i]; // Get the current word prod = 0; if (cword >= 0) { #pragma unroll for (j = 0; j < NREPS; j++) { // Iterate over blocks of elements if (tid + j * dxy < nrows) { // Get B col bb = B[tid + j * dxy + cword]; prod += aa[j] * bb; // Compute the product between current A, B cols } } #pragma unroll for (k = 1; k < 32; k = k + k) { prod += __shfl_down(prod, k); // Reduce within warp } } if (threadIdx.x == 0) { CC[i - lb + threadIdx.y * nwindow] = prod; // Save to SHMEM } } __syncthreads(); for (j = 1; j < blockDim.y; j++) { // Reduce across warps for (i = tid; i < ub - lb; i += dxy) { CC[i] += CC[i + j * nwindow]; } __syncthreads(); } __syncthreads(); // Apply the sigmoid map for (i = tid; i < ub - lb; i += dxy) { v = CC[i]; if (v > 16.0f) { v = 1.0f; } else { v = exp(v); v = v / (1.0f + v); } CC[i] = lrate * (1.0f - v); // All pairs have label 1 } __syncthreads(); #pragma unroll for (j = 0; j < NREPS; j++) { da[j] = 0; } for (i = lb; i <= ub; i++) { // Iterate across the window for A cols cword = nrows * W[icol + i]; // Get the context word v = CC[i - lb]; if (cword >= 0) { #pragma unroll for (j = 0; j < NREPS; j++) { // Iterate over blocks of elements if (tid + j * dxy < nrows) { // Get B col bb = B[tid + j * dxy + cword]; da[j] += v * bb; db = v * aa[j]; atomicAdd(&B[tid + j * dxy + cword], db); } } } } #pragma unroll for (j = 0; j < NREPS; j++) { if (tid + j * dxy < nrows) { atomicAdd(&A[tid + j * dxy + iword], da[j]); } } } } } #else template<int SKIP, int YDIM, int NREPS> __global__ void __word2vecPos(int nrows, int ncols, int *W, int *LB, int *UB, float *A, float *B, float lrate) {} #endif int word2vecPos(int nrows, int ncols, int skip, int *W, int *LB, int *UB, float *A, float *B, float lrate) { dim3 threads(32, CDIM, 1); int nblocks = 1 + (nrows - 1)/threads.y; switch(skip) { case 5 : __word2vecPos<5, CDIM, 10/CDIM><<<nblocks,threads>>>(nrows, ncols, W, LB, UB, A, B, lrate); break; case 3 : __word2vecPos<3, CDIM, 10/CDIM><<<nblocks,threads>>>(nrows, ncols, W, LB, UB, A, B, lrate); break; case 2 : __word2vecPos<2, CDIM, 10/CDIM><<<nblocks,threads>>>(nrows, ncols, W, LB, UB, A, B, lrate); break; default : printf("word2vecPos unsupport size %d\n", skip); return 1; } cudaStreamSynchronize(SYNC_STREAM); int err = cudaGetLastError(); return err; }