hip_filename
stringlengths
5
84
hip_content
stringlengths
79
9.69M
cuda_filename
stringlengths
4
83
cuda_content
stringlengths
19
9.69M
b76498e64baa5917c137eaa19a2266673eebec63.hip
// !!! This is a file automatically generated by hipify!!! /** * Copyright 2022 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "matrix_diag_part_v3_impl.cuh" #include <hip/hip_runtime.h> #include <algorithm> #include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/complex.h" __device__ inline int64_t ComputeOffset(const int64_t diag_idx, const int64_t num_rows, const int64_t num_cols, const int64_t max_diag_len, const bool left_align_super_diag, const bool left_align_sub_diag) { if ((diag_idx >= 0 && left_align_super_diag) || (diag_idx <= 0 && left_align_sub_diag)) { return 0; } int64_t diag_len1 = num_cols - max(diag_idx, int64_t(0)); int64_t diag_len2 = num_rows + min(diag_idx, int64_t(0)); return max_diag_len - min(diag_len1, diag_len2); } template <typename T> __global__ void MatrixDiagPartV3Kernel(const T *matrix_ptr, const T *padding_value_ptr, T *diagnal_ptr, const int64_t num_rows, const int64_t num_cols, const int64_t upper_diag_index, const int64_t diag_size, const int64_t num_diag, const int64_t max_diag_len, const bool left_align_super_diag, const bool left_align_sub_diag) { for (int64_t idx = blockIdx.x * blockDim.x + threadIdx.x; idx < diag_size; idx += blockDim.x * gridDim.x) { const int64_t batch_diag_index = idx / max_diag_len; const int64_t ith_diag = idx % max_diag_len; const int64_t batch = batch_diag_index / num_diag; const int64_t diag_index = upper_diag_index - (batch_diag_index % num_diag); const int64_t offset = ComputeOffset(diag_index, num_rows, num_cols, max_diag_len, left_align_super_diag, left_align_sub_diag); const int64_t y_idx = ith_diag + max(static_cast<int64_t>(0), -diag_index) - offset; const int64_t x_idx = ith_diag + max(static_cast<int64_t>(0), diag_index) - offset; if ((0 <= y_idx && y_idx < num_rows) && (0 <= x_idx && x_idx < num_cols)) { diagnal_ptr[idx] = matrix_ptr[batch * num_rows * num_cols + y_idx * num_cols + x_idx]; } else { diagnal_ptr[idx] = *padding_value_ptr; } } } template <typename T> void MatrixDiagPartV3(const T *matrix_ptr, const T *padding_value_ptr, T *diagnal_ptr, const int64_t num_rows, const int64_t num_cols, const int64_t lower_diag_idx, const int64_t upper_diag_index, const int64_t diag_size, const int64_t max_diag_len, const bool left_align_super_diag, const bool left_align_sub_diag, uint32_t device_id, hipStream_t cuda_stream) { const int64_t num_diag = upper_diag_index - lower_diag_idx + 1; hipLaunchKernelGGL(( MatrixDiagPartV3Kernel), dim3(CUDA_BLOCKS(device_id, diag_size)), dim3(CUDA_THREADS(device_id)), 0, cuda_stream, matrix_ptr, padding_value_ptr, diagnal_ptr, num_rows, num_cols, upper_diag_index, diag_size, num_diag, max_diag_len, left_align_super_diag, left_align_sub_diag); } template CUDA_LIB_EXPORT void MatrixDiagPartV3<int8_t>(const int8_t *matrix_ptr, const int8_t *padding_value_ptr, int8_t *diagnal_ptr, const int64_t num_rows, const int64_t num_cols, const int64_t lower_diag_idx, const int64_t upper_diag_index, const int64_t diag_size, const int64_t max_diag_len, const bool left_align_super_diag, const bool left_align_sub_diag, uint32_t device_id, hipStream_t cuda_stream); template CUDA_LIB_EXPORT void MatrixDiagPartV3<uint8_t>(const uint8_t *matrix_ptr, const uint8_t *padding_value_ptr, uint8_t *diagnal_ptr, const int64_t num_rows, const int64_t num_cols, const int64_t lower_diag_idx, const int64_t upper_diag_index, const int64_t diag_size, const int64_t max_diag_len, const bool left_align_super_diag, const bool left_align_sub_diag, uint32_t device_id, hipStream_t cuda_stream); template CUDA_LIB_EXPORT void MatrixDiagPartV3<int16_t>(const int16_t *matrix_ptr, const int16_t *padding_value_ptr, int16_t *diagnal_ptr, const int64_t num_rows, const int64_t num_cols, const int64_t lower_diag_idx, const int64_t upper_diag_index, const int64_t diag_size, const int64_t max_diag_len, const bool left_align_super_diag, const bool left_align_sub_diag, uint32_t device_id, hipStream_t cuda_stream); template CUDA_LIB_EXPORT void MatrixDiagPartV3<uint16_t>(const uint16_t *matrix_ptr, const uint16_t *padding_value_ptr, uint16_t *diagnal_ptr, const int64_t num_rows, const int64_t num_cols, const int64_t lower_diag_idx, const int64_t upper_diag_index, const int64_t diag_size, const int64_t max_diag_len, const bool left_align_super_diag, const bool left_align_sub_diag, uint32_t device_id, hipStream_t cuda_stream); template CUDA_LIB_EXPORT void MatrixDiagPartV3<int32_t>(const int32_t *matrix_ptr, const int32_t *padding_value_ptr, int32_t *diagnal_ptr, const int64_t num_rows, const int64_t num_cols, const int64_t lower_diag_idx, const int64_t upper_diag_index, const int64_t diag_size, const int64_t max_diag_len, const bool left_align_super_diag, const bool left_align_sub_diag, uint32_t device_id, hipStream_t cuda_stream); template CUDA_LIB_EXPORT void MatrixDiagPartV3<uint32_t>(const uint32_t *matrix_ptr, const uint32_t *padding_value_ptr, uint32_t *diagnal_ptr, const int64_t num_rows, const int64_t num_cols, const int64_t lower_diag_idx, const int64_t upper_diag_index, const int64_t diag_size, const int64_t max_diag_len, const bool left_align_super_diag, const bool left_align_sub_diag, uint32_t device_id, hipStream_t cuda_stream); template CUDA_LIB_EXPORT void MatrixDiagPartV3<int64_t>(const int64_t *matrix_ptr, const int64_t *padding_value_ptr, int64_t *diagnal_ptr, const int64_t num_rows, const int64_t num_cols, const int64_t lower_diag_idx, const int64_t upper_diag_index, const int64_t diag_size, const int64_t max_diag_len, const bool left_align_super_diag, const bool left_align_sub_diag, uint32_t device_id, hipStream_t cuda_stream); template CUDA_LIB_EXPORT void MatrixDiagPartV3<uint64_t>(const uint64_t *matrix_ptr, const uint64_t *padding_value_ptr, uint64_t *diagnal_ptr, const int64_t num_rows, const int64_t num_cols, const int64_t lower_diag_idx, const int64_t upper_diag_index, const int64_t diag_size, const int64_t max_diag_len, const bool left_align_super_diag, const bool left_align_sub_diag, uint32_t device_id, hipStream_t cuda_stream); template CUDA_LIB_EXPORT void MatrixDiagPartV3<half>(const half *matrix_ptr, const half *padding_value_ptr, half *diagnal_ptr, const int64_t num_rows, const int64_t num_cols, const int64_t lower_diag_idx, const int64_t upper_diag_index, const int64_t diag_size, const int64_t max_diag_len, const bool left_align_super_diag, const bool left_align_sub_diag, uint32_t device_id, hipStream_t cuda_stream); template CUDA_LIB_EXPORT void MatrixDiagPartV3<float>(const float *matrix_ptr, const float *padding_value_ptr, float *diagnal_ptr, const int64_t num_rows, const int64_t num_cols, const int64_t lower_diag_idx, const int64_t upper_diag_index, const int64_t diag_size, const int64_t max_diag_len, const bool left_align_super_diag, const bool left_align_sub_diag, uint32_t device_id, hipStream_t cuda_stream); template CUDA_LIB_EXPORT void MatrixDiagPartV3<double>(const double *matrix_ptr, const double *padding_value_ptr, double *diagnal_ptr, const int64_t num_rows, const int64_t num_cols, const int64_t lower_diag_idx, const int64_t upper_diag_index, const int64_t diag_size, const int64_t max_diag_len, const bool left_align_super_diag, const bool left_align_sub_diag, uint32_t device_id, hipStream_t cuda_stream);
b76498e64baa5917c137eaa19a2266673eebec63.cu
/** * Copyright 2022 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "matrix_diag_part_v3_impl.cuh" #include <cuda_runtime.h> #include <algorithm> #include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/complex.h" __device__ inline int64_t ComputeOffset(const int64_t diag_idx, const int64_t num_rows, const int64_t num_cols, const int64_t max_diag_len, const bool left_align_super_diag, const bool left_align_sub_diag) { if ((diag_idx >= 0 && left_align_super_diag) || (diag_idx <= 0 && left_align_sub_diag)) { return 0; } int64_t diag_len1 = num_cols - max(diag_idx, int64_t(0)); int64_t diag_len2 = num_rows + min(diag_idx, int64_t(0)); return max_diag_len - min(diag_len1, diag_len2); } template <typename T> __global__ void MatrixDiagPartV3Kernel(const T *matrix_ptr, const T *padding_value_ptr, T *diagnal_ptr, const int64_t num_rows, const int64_t num_cols, const int64_t upper_diag_index, const int64_t diag_size, const int64_t num_diag, const int64_t max_diag_len, const bool left_align_super_diag, const bool left_align_sub_diag) { for (int64_t idx = blockIdx.x * blockDim.x + threadIdx.x; idx < diag_size; idx += blockDim.x * gridDim.x) { const int64_t batch_diag_index = idx / max_diag_len; const int64_t ith_diag = idx % max_diag_len; const int64_t batch = batch_diag_index / num_diag; const int64_t diag_index = upper_diag_index - (batch_diag_index % num_diag); const int64_t offset = ComputeOffset(diag_index, num_rows, num_cols, max_diag_len, left_align_super_diag, left_align_sub_diag); const int64_t y_idx = ith_diag + max(static_cast<int64_t>(0), -diag_index) - offset; const int64_t x_idx = ith_diag + max(static_cast<int64_t>(0), diag_index) - offset; if ((0 <= y_idx && y_idx < num_rows) && (0 <= x_idx && x_idx < num_cols)) { diagnal_ptr[idx] = matrix_ptr[batch * num_rows * num_cols + y_idx * num_cols + x_idx]; } else { diagnal_ptr[idx] = *padding_value_ptr; } } } template <typename T> void MatrixDiagPartV3(const T *matrix_ptr, const T *padding_value_ptr, T *diagnal_ptr, const int64_t num_rows, const int64_t num_cols, const int64_t lower_diag_idx, const int64_t upper_diag_index, const int64_t diag_size, const int64_t max_diag_len, const bool left_align_super_diag, const bool left_align_sub_diag, uint32_t device_id, cudaStream_t cuda_stream) { const int64_t num_diag = upper_diag_index - lower_diag_idx + 1; MatrixDiagPartV3Kernel<<<CUDA_BLOCKS(device_id, diag_size), CUDA_THREADS(device_id), 0, cuda_stream>>>( matrix_ptr, padding_value_ptr, diagnal_ptr, num_rows, num_cols, upper_diag_index, diag_size, num_diag, max_diag_len, left_align_super_diag, left_align_sub_diag); } template CUDA_LIB_EXPORT void MatrixDiagPartV3<int8_t>(const int8_t *matrix_ptr, const int8_t *padding_value_ptr, int8_t *diagnal_ptr, const int64_t num_rows, const int64_t num_cols, const int64_t lower_diag_idx, const int64_t upper_diag_index, const int64_t diag_size, const int64_t max_diag_len, const bool left_align_super_diag, const bool left_align_sub_diag, uint32_t device_id, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT void MatrixDiagPartV3<uint8_t>(const uint8_t *matrix_ptr, const uint8_t *padding_value_ptr, uint8_t *diagnal_ptr, const int64_t num_rows, const int64_t num_cols, const int64_t lower_diag_idx, const int64_t upper_diag_index, const int64_t diag_size, const int64_t max_diag_len, const bool left_align_super_diag, const bool left_align_sub_diag, uint32_t device_id, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT void MatrixDiagPartV3<int16_t>(const int16_t *matrix_ptr, const int16_t *padding_value_ptr, int16_t *diagnal_ptr, const int64_t num_rows, const int64_t num_cols, const int64_t lower_diag_idx, const int64_t upper_diag_index, const int64_t diag_size, const int64_t max_diag_len, const bool left_align_super_diag, const bool left_align_sub_diag, uint32_t device_id, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT void MatrixDiagPartV3<uint16_t>(const uint16_t *matrix_ptr, const uint16_t *padding_value_ptr, uint16_t *diagnal_ptr, const int64_t num_rows, const int64_t num_cols, const int64_t lower_diag_idx, const int64_t upper_diag_index, const int64_t diag_size, const int64_t max_diag_len, const bool left_align_super_diag, const bool left_align_sub_diag, uint32_t device_id, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT void MatrixDiagPartV3<int32_t>(const int32_t *matrix_ptr, const int32_t *padding_value_ptr, int32_t *diagnal_ptr, const int64_t num_rows, const int64_t num_cols, const int64_t lower_diag_idx, const int64_t upper_diag_index, const int64_t diag_size, const int64_t max_diag_len, const bool left_align_super_diag, const bool left_align_sub_diag, uint32_t device_id, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT void MatrixDiagPartV3<uint32_t>(const uint32_t *matrix_ptr, const uint32_t *padding_value_ptr, uint32_t *diagnal_ptr, const int64_t num_rows, const int64_t num_cols, const int64_t lower_diag_idx, const int64_t upper_diag_index, const int64_t diag_size, const int64_t max_diag_len, const bool left_align_super_diag, const bool left_align_sub_diag, uint32_t device_id, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT void MatrixDiagPartV3<int64_t>(const int64_t *matrix_ptr, const int64_t *padding_value_ptr, int64_t *diagnal_ptr, const int64_t num_rows, const int64_t num_cols, const int64_t lower_diag_idx, const int64_t upper_diag_index, const int64_t diag_size, const int64_t max_diag_len, const bool left_align_super_diag, const bool left_align_sub_diag, uint32_t device_id, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT void MatrixDiagPartV3<uint64_t>(const uint64_t *matrix_ptr, const uint64_t *padding_value_ptr, uint64_t *diagnal_ptr, const int64_t num_rows, const int64_t num_cols, const int64_t lower_diag_idx, const int64_t upper_diag_index, const int64_t diag_size, const int64_t max_diag_len, const bool left_align_super_diag, const bool left_align_sub_diag, uint32_t device_id, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT void MatrixDiagPartV3<half>(const half *matrix_ptr, const half *padding_value_ptr, half *diagnal_ptr, const int64_t num_rows, const int64_t num_cols, const int64_t lower_diag_idx, const int64_t upper_diag_index, const int64_t diag_size, const int64_t max_diag_len, const bool left_align_super_diag, const bool left_align_sub_diag, uint32_t device_id, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT void MatrixDiagPartV3<float>(const float *matrix_ptr, const float *padding_value_ptr, float *diagnal_ptr, const int64_t num_rows, const int64_t num_cols, const int64_t lower_diag_idx, const int64_t upper_diag_index, const int64_t diag_size, const int64_t max_diag_len, const bool left_align_super_diag, const bool left_align_sub_diag, uint32_t device_id, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT void MatrixDiagPartV3<double>(const double *matrix_ptr, const double *padding_value_ptr, double *diagnal_ptr, const int64_t num_rows, const int64_t num_cols, const int64_t lower_diag_idx, const int64_t upper_diag_index, const int64_t diag_size, const int64_t max_diag_len, const bool left_align_super_diag, const bool left_align_sub_diag, uint32_t device_id, cudaStream_t cuda_stream);
136a8548a5b8a148e4086f6960f54851ab28f949.hip
// !!! This is a file automatically generated by hipify!!! // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2014 Benoit Steiner <[email protected]> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #define EIGEN_TEST_NO_LONGDOUBLE #define EIGEN_TEST_NO_COMPLEX #define EIGEN_TEST_FUNC cxx11_tensor_random_cuda #define EIGEN_DEFAULT_DENSE_INDEX_TYPE int #define EIGEN_USE_GPU #if defined __CUDACC_VER__ && __CUDACC_VER__ >= 70500 #include <hip/hip_fp16.h> #endif #include "main.h" #include <Eigen/CXX11/Tensor> void test_cuda_random_uniform() { Tensor<float, 2> out(72,97); out.setZero(); std::size_t out_bytes = out.size() * sizeof(float); float* d_out; hipMalloc((void**)(&d_out), out_bytes); Eigen::CudaStreamDevice stream; Eigen::GpuDevice gpu_device(&stream); Eigen::TensorMap<Eigen::Tensor<float, 2> > gpu_out(d_out, 72,97); gpu_out.device(gpu_device) = gpu_out.random(); assert(hipMemcpyAsync(out.data(), d_out, out_bytes, hipMemcpyDeviceToHost, gpu_device.stream()) == hipSuccess); assert(hipStreamSynchronize(gpu_device.stream()) == hipSuccess); // For now we just check thes code doesn't crash. // TODO: come up with a valid test of randomness } void test_cuda_random_normal() { Tensor<float, 2> out(72,97); out.setZero(); std::size_t out_bytes = out.size() * sizeof(float); float* d_out; hipMalloc((void**)(&d_out), out_bytes); Eigen::CudaStreamDevice stream; Eigen::GpuDevice gpu_device(&stream); Eigen::TensorMap<Eigen::Tensor<float, 2> > gpu_out(d_out, 72,97); Eigen::internal::NormalRandomGenerator<float> gen(true); gpu_out.device(gpu_device) = gpu_out.random(gen); assert(hipMemcpyAsync(out.data(), d_out, out_bytes, hipMemcpyDeviceToHost, gpu_device.stream()) == hipSuccess); assert(hipStreamSynchronize(gpu_device.stream()) == hipSuccess); } static void test_complex() { Tensor<std::complex<float>, 1> vec(6); vec.setRandom(); // Fixme: we should check that the generated numbers follow a uniform // distribution instead. for (int i = 1; i < 6; ++i) { VERIFY_IS_NOT_EQUAL(vec(i), vec(i-1)); } } void test_cxx11_tensor_random_cuda() { CALL_SUBTEST(test_cuda_random_uniform()); CALL_SUBTEST(test_cuda_random_normal()); CALL_SUBTEST(test_complex()); }
136a8548a5b8a148e4086f6960f54851ab28f949.cu
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2014 Benoit Steiner <[email protected]> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #define EIGEN_TEST_NO_LONGDOUBLE #define EIGEN_TEST_NO_COMPLEX #define EIGEN_TEST_FUNC cxx11_tensor_random_cuda #define EIGEN_DEFAULT_DENSE_INDEX_TYPE int #define EIGEN_USE_GPU #if defined __CUDACC_VER__ && __CUDACC_VER__ >= 70500 #include <cuda_fp16.h> #endif #include "main.h" #include <Eigen/CXX11/Tensor> void test_cuda_random_uniform() { Tensor<float, 2> out(72,97); out.setZero(); std::size_t out_bytes = out.size() * sizeof(float); float* d_out; cudaMalloc((void**)(&d_out), out_bytes); Eigen::CudaStreamDevice stream; Eigen::GpuDevice gpu_device(&stream); Eigen::TensorMap<Eigen::Tensor<float, 2> > gpu_out(d_out, 72,97); gpu_out.device(gpu_device) = gpu_out.random(); assert(cudaMemcpyAsync(out.data(), d_out, out_bytes, cudaMemcpyDeviceToHost, gpu_device.stream()) == cudaSuccess); assert(cudaStreamSynchronize(gpu_device.stream()) == cudaSuccess); // For now we just check thes code doesn't crash. // TODO: come up with a valid test of randomness } void test_cuda_random_normal() { Tensor<float, 2> out(72,97); out.setZero(); std::size_t out_bytes = out.size() * sizeof(float); float* d_out; cudaMalloc((void**)(&d_out), out_bytes); Eigen::CudaStreamDevice stream; Eigen::GpuDevice gpu_device(&stream); Eigen::TensorMap<Eigen::Tensor<float, 2> > gpu_out(d_out, 72,97); Eigen::internal::NormalRandomGenerator<float> gen(true); gpu_out.device(gpu_device) = gpu_out.random(gen); assert(cudaMemcpyAsync(out.data(), d_out, out_bytes, cudaMemcpyDeviceToHost, gpu_device.stream()) == cudaSuccess); assert(cudaStreamSynchronize(gpu_device.stream()) == cudaSuccess); } static void test_complex() { Tensor<std::complex<float>, 1> vec(6); vec.setRandom(); // Fixme: we should check that the generated numbers follow a uniform // distribution instead. for (int i = 1; i < 6; ++i) { VERIFY_IS_NOT_EQUAL(vec(i), vec(i-1)); } } void test_cxx11_tensor_random_cuda() { CALL_SUBTEST(test_cuda_random_uniform()); CALL_SUBTEST(test_cuda_random_normal()); CALL_SUBTEST(test_complex()); }
b84e53ed5ee2cb81398d413cd6d09c05c73e071a.hip
// !!! This is a file automatically generated by hipify!!! /*******************\ The Task List: 1. Author: Vu Duc Thai \*******************/ #include "ReadingImage.hpp" #include "MedianFilter.hpp" #include <cstdio> #include <fstream> // #include <hip/hip_runtime.h> // #include <hip/hip_runtime_api.h> // #include <device_launch_parameters.h> int main() { std::ofstream file_out; file_out.open(FILE_NAME, std::ios_base::out | std::ios_base::app ); file_out << "file_name, rows, cols, kernel_size, bandwidth(GB/s), time_GPU(msec)\n" ; std::string input_folder_path(INPUT_FOLDER_PATH); std::string output_folder_path(OUTPUT_FOLDER_PATH); std::vector<std::string> files; read_files_in_dir(input_folder_path.c_str(), files); std::string temp_file_path; hipEvent_t start, stop; // Allocate CUDA events that we'll use for timing gpuErrchk(hipEventCreate(&start)); gpuErrchk(hipEventCreate(&stop)); for(std::string file : files) { temp_file_path = input_folder_path + file; std::cout << temp_file_path << std::endl; std::cout << file << std::endl; Matrix *input_mat = new Matrix(temp_file_path, KERNEL_SIZE); // std::cout << *input_mat << std::endl; // GPUassert: unspecified launch failure testMain1.cu (line: gpuErrchk(hipDeviceSynchronize()) ) ???? Matrix *output_mat = new Matrix(input_mat->rows, input_mat->cols); // std::cout << *output_mat << std::endl; // GPUassert: unspecified launch failure testMain1.cu (line: gpuErrchk(hipDeviceSynchronize()) ) ???? //the number of elements for padding matrix int new_rows = input_mat->rows + (int)(KERNEL_SIZE/2) * 2; int new_cols = input_mat->cols + (int)(KERNEL_SIZE/2) * 2; // Set our CTA and Grid dimensions dim3 dimBlock(TILE_SIZE, TILE_SIZE); dim3 dimGrid((int)ceil((float)new_cols / (float)TILE_SIZE), (int)ceil((float)new_rows / (float)TILE_SIZE)); // Record the start event gpuErrchk(hipEventRecord(start, NULL)); for(int j = 0; j < ITER_NUM; j++) { // Launch our kernel hipLaunchKernelGGL(( actPaddingMedianFilter) , dim3(dimGrid), dim3(dimBlock), 0, 0, input_mat->d_elements, output_mat->d_elements, input_mat->rows, input_mat->cols); // gpuErrchk(hipPeekAtLastError()); gpuErrchk(hipDeviceSynchronize()); } // Record the stop event gpuErrchk(hipEventRecord(stop, NULL)); // Wait for the stop event to complete gpuErrchk(hipEventSynchronize(stop)); float msecTotal = 0.0f; gpuErrchk(hipEventElapsedTime(&msecTotal, start, stop)); // Compute and print the performance float msecPerFilter = msecTotal / ITER_NUM; double gigaBytePerFilter = ((double)(input_mat->rows * input_mat->cols) + (double)(new_rows + new_cols)) * 1 * 1.0e-9f; double bandWidth = gigaBytePerFilter / (msecPerFilter / 1000.0f); printf( "BandWidth= %.3f, Time= %.3f msec\n", bandWidth, msecPerFilter); file_out << std::to_string(KERNEL_SIZE) + "_" + file << ", " << input_mat->rows << ", " << input_mat->cols << ", " << KERNEL_SIZE << ", " << bandWidth << ", " << msecPerFilter << "\n"; // copy data back to host memory output_mat->copyCudaMemoryD2H(); // save the output image output_mat->saveImage(output_folder_path + std::to_string(KERNEL_SIZE) + "_" + file); delete input_mat, output_mat; } file_out.close(); std::cout << "===============DONE!================" << std::endl; return 0; }
b84e53ed5ee2cb81398d413cd6d09c05c73e071a.cu
/*******************\ The Task List: 1. Author: Vu Duc Thai \*******************/ #include "ReadingImage.hpp" #include "MedianFilter.hpp" #include <cstdio> #include <fstream> // #include <cuda.h> // #include <cuda_runtime_api.h> // #include <device_launch_parameters.h> int main() { std::ofstream file_out; file_out.open(FILE_NAME, std::ios_base::out | std::ios_base::app ); file_out << "file_name, rows, cols, kernel_size, bandwidth(GB/s), time_GPU(msec)\n" ; std::string input_folder_path(INPUT_FOLDER_PATH); std::string output_folder_path(OUTPUT_FOLDER_PATH); std::vector<std::string> files; read_files_in_dir(input_folder_path.c_str(), files); std::string temp_file_path; cudaEvent_t start, stop; // Allocate CUDA events that we'll use for timing gpuErrchk(cudaEventCreate(&start)); gpuErrchk(cudaEventCreate(&stop)); for(std::string file : files) { temp_file_path = input_folder_path + file; std::cout << temp_file_path << std::endl; std::cout << file << std::endl; Matrix *input_mat = new Matrix(temp_file_path, KERNEL_SIZE); // std::cout << *input_mat << std::endl; // GPUassert: unspecified launch failure testMain1.cu (line: gpuErrchk(cudaDeviceSynchronize()) ) ???? Matrix *output_mat = new Matrix(input_mat->rows, input_mat->cols); // std::cout << *output_mat << std::endl; // GPUassert: unspecified launch failure testMain1.cu (line: gpuErrchk(cudaDeviceSynchronize()) ) ???? //the number of elements for padding matrix int new_rows = input_mat->rows + (int)(KERNEL_SIZE/2) * 2; int new_cols = input_mat->cols + (int)(KERNEL_SIZE/2) * 2; // Set our CTA and Grid dimensions dim3 dimBlock(TILE_SIZE, TILE_SIZE); dim3 dimGrid((int)ceil((float)new_cols / (float)TILE_SIZE), (int)ceil((float)new_rows / (float)TILE_SIZE)); // Record the start event gpuErrchk(cudaEventRecord(start, NULL)); for(int j = 0; j < ITER_NUM; j++) { // Launch our kernel actPaddingMedianFilter <<<dimGrid, dimBlock>>> (input_mat->d_elements, output_mat->d_elements, input_mat->rows, input_mat->cols); // gpuErrchk(cudaPeekAtLastError()); gpuErrchk(cudaDeviceSynchronize()); } // Record the stop event gpuErrchk(cudaEventRecord(stop, NULL)); // Wait for the stop event to complete gpuErrchk(cudaEventSynchronize(stop)); float msecTotal = 0.0f; gpuErrchk(cudaEventElapsedTime(&msecTotal, start, stop)); // Compute and print the performance float msecPerFilter = msecTotal / ITER_NUM; double gigaBytePerFilter = ((double)(input_mat->rows * input_mat->cols) + (double)(new_rows + new_cols)) * 1 * 1.0e-9f; double bandWidth = gigaBytePerFilter / (msecPerFilter / 1000.0f); printf( "BandWidth= %.3f, Time= %.3f msec\n", bandWidth, msecPerFilter); file_out << std::to_string(KERNEL_SIZE) + "_" + file << ", " << input_mat->rows << ", " << input_mat->cols << ", " << KERNEL_SIZE << ", " << bandWidth << ", " << msecPerFilter << "\n"; // copy data back to host memory output_mat->copyCudaMemoryD2H(); // save the output image output_mat->saveImage(output_folder_path + std::to_string(KERNEL_SIZE) + "_" + file); delete input_mat, output_mat; } file_out.close(); std::cout << "===============DONE!================" << std::endl; return 0; }
7797941ca51408c399f6b587b3132d4626af139d.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright 2020 NVIDIA Corporation. All rights reserved. * * NOTICE TO LICENSEE: * * This source code and/or documentation ("Licensed Deliverables") are * subject to NVIDIA intellectual property rights under U.S. and * international Copyright laws. * * These Licensed Deliverables contained herein is PROPRIETARY and * CONFIDENTIAL to NVIDIA and is being provided under the terms and * conditions of a form of NVIDIA software license agreement by and * between NVIDIA and Licensee ("License Agreement") or electronically * accepted by Licensee. Notwithstanding any terms or conditions to * the contrary in the License Agreement, reproduction or disclosure * of the Licensed Deliverables to any third party without the express * written consent of NVIDIA is prohibited. * * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE * OF THESE LICENSED DELIVERABLES. * * U.S. Government End Users. These Licensed Deliverables are a * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT * 1995), consisting of "commercial computer software" and "commercial * computer software documentation" as such terms are used in 48 * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government * only as a commercial end item. Consistent with 48 C.F.R.12.212 and * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all * U.S. Government End Users acquire the Licensed Deliverables with * only those rights set forth herein. * * Any use of the Licensed Deliverables in individual and commercial * software must include, in the user documentation and internal * comments to the code, the above Disclaimer and U.S. Government End * Users Notice. */ #include <cstdio> #include <cstdlib> #include <vector> #include <hip/hip_runtime.h> #include <cusolverDn.h> #include "cusolver_utils.h" int main(int argc, char *argv[]) { hipsolverDnHandle_t cusolverH = NULL; hipStream_t stream = NULL; const int m = 3; const int lda = m; const int ldb = m; /* * | 1 2 3 | * A = | 4 5 6 | * | 7 8 10 | * * without pivoting: A = L*U * | 1 0 0 | | 1 2 3 | * L = | 4 1 0 |, U = | 0 -3 -6 | * | 7 2 1 | | 0 0 1 | * * with pivoting: P*A = L*U * | 0 0 1 | * P = | 1 0 0 | * | 0 1 0 | * * | 1 0 0 | | 7 8 10 | * L = | 0.1429 1 0 |, U = | 0 0.8571 1.5714 | * | 0.5714 0.5 1 | | 0 0 -0.5 | */ const std::vector<double> A = {1.0, 4.0, 7.0, 2.0, 5.0, 8.0, 3.0, 6.0, 10.0}; const std::vector<double> B = {1.0, 2.0, 3.0}; std::vector<double> X(m, 0); std::vector<double> LU(lda * m, 0); std::vector<int> Ipiv(m, 0); int info = 0; double *d_A = nullptr; /* device copy of A */ double *d_B = nullptr; /* device copy of B */ int *d_Ipiv = nullptr; /* pivoting sequence */ int *d_info = nullptr; /* error info */ int lwork = 0; /* size of workspace */ double *d_work = nullptr; /* device workspace for getrf */ const int pivot_on = 0; if (pivot_on) { printf("pivot is on : compute P*A = L*U \n"); } else { printf("pivot is off: compute A = L*U (not numerically stable)\n"); } printf("A = (matlab base-1)\n"); print_matrix(m, m, A.data(), lda); printf("=====\n"); printf("B = (matlab base-1)\n"); print_matrix(m, 1, B.data(), ldb); printf("=====\n"); /* step 1: create cusolver handle, bind a stream */ CUSOLVER_CHECK(hipsolverDnCreate(&cusolverH)); CUDA_CHECK(hipStreamCreateWithFlags(&stream, hipStreamNonBlocking)); CUSOLVER_CHECK(hipsolverDnSetStream(cusolverH, stream)); /* step 2: copy A to device */ CUDA_CHECK(hipMalloc(reinterpret_cast<void **>(&d_A), sizeof(double) * A.size())); CUDA_CHECK(hipMalloc(reinterpret_cast<void **>(&d_B), sizeof(double) * B.size())); CUDA_CHECK(hipMalloc(reinterpret_cast<void **>(&d_Ipiv), sizeof(int) * Ipiv.size())); CUDA_CHECK(hipMalloc(reinterpret_cast<void **>(&d_info), sizeof(int))); CUDA_CHECK( hipMemcpyAsync(d_A, A.data(), sizeof(double) * A.size(), hipMemcpyHostToDevice, stream)); CUDA_CHECK( hipMemcpyAsync(d_B, B.data(), sizeof(double) * B.size(), hipMemcpyHostToDevice, stream)); /* step 3: query working space of getrf */ CUSOLVER_CHECK(hipsolverDnDgetrf_bufferSize(cusolverH, m, m, d_A, lda, &lwork)); CUDA_CHECK(hipMalloc(reinterpret_cast<void **>(&d_work), sizeof(double) * lwork)); /* step 4: LU factorization */ if (pivot_on) { CUSOLVER_CHECK(hipsolverDnDgetrf(cusolverH, m, m, d_A, lda, d_work, d_Ipiv, d_info)); } else { CUSOLVER_CHECK(hipsolverDnDgetrf(cusolverH, m, m, d_A, lda, d_work, NULL, d_info)); } if (pivot_on) { CUDA_CHECK(hipMemcpyAsync(Ipiv.data(), d_Ipiv, sizeof(int) * Ipiv.size(), hipMemcpyDeviceToHost, stream)); } CUDA_CHECK( hipMemcpyAsync(LU.data(), d_A, sizeof(double) * A.size(), hipMemcpyDeviceToHost, stream)); CUDA_CHECK(hipMemcpyAsync(&info, d_info, sizeof(int), hipMemcpyDeviceToHost, stream)); CUDA_CHECK(hipStreamSynchronize(stream)); if (0 > info) { printf("%d-th parameter is wrong \n", -info); exit(1); } if (pivot_on) { printf("pivoting sequence, matlab base-1\n"); for (int j = 0; j < m; j++) { printf("Ipiv(%d) = %d\n", j + 1, Ipiv[j]); } } printf("L and U = (matlab base-1)\n"); print_matrix(m, m, LU.data(), lda); printf("=====\n"); /* * step 5: solve A*X = B * | 1 | | -0.3333 | * B = | 2 |, X = | 0.6667 | * | 3 | | 0 | * */ if (pivot_on) { CUSOLVER_CHECK(hipsolverDnDgetrs(cusolverH, HIPBLAS_OP_N, m, 1, /* nrhs */ d_A, lda, d_Ipiv, d_B, ldb, d_info)); } else { CUSOLVER_CHECK(hipsolverDnDgetrs(cusolverH, HIPBLAS_OP_N, m, 1, /* nrhs */ d_A, lda, NULL, d_B, ldb, d_info)); } CUDA_CHECK( hipMemcpyAsync(X.data(), d_B, sizeof(double) * X.size(), hipMemcpyDeviceToHost, stream)); CUDA_CHECK(hipStreamSynchronize(stream)); printf("X = (matlab base-1)\n"); print_matrix(m, 1, X.data(), ldb); printf("=====\n"); /* free resources */ CUDA_CHECK(hipFree(d_A)); CUDA_CHECK(hipFree(d_B)); CUDA_CHECK(hipFree(d_Ipiv)); CUDA_CHECK(hipFree(d_info)); CUDA_CHECK(hipFree(d_work)); CUSOLVER_CHECK(hipsolverDnDestroy(cusolverH)); CUDA_CHECK(hipStreamDestroy(stream)); CUDA_CHECK(hipDeviceReset()); return EXIT_SUCCESS; }
7797941ca51408c399f6b587b3132d4626af139d.cu
/* * Copyright 2020 NVIDIA Corporation. All rights reserved. * * NOTICE TO LICENSEE: * * This source code and/or documentation ("Licensed Deliverables") are * subject to NVIDIA intellectual property rights under U.S. and * international Copyright laws. * * These Licensed Deliverables contained herein is PROPRIETARY and * CONFIDENTIAL to NVIDIA and is being provided under the terms and * conditions of a form of NVIDIA software license agreement by and * between NVIDIA and Licensee ("License Agreement") or electronically * accepted by Licensee. Notwithstanding any terms or conditions to * the contrary in the License Agreement, reproduction or disclosure * of the Licensed Deliverables to any third party without the express * written consent of NVIDIA is prohibited. * * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE * OF THESE LICENSED DELIVERABLES. * * U.S. Government End Users. These Licensed Deliverables are a * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT * 1995), consisting of "commercial computer software" and "commercial * computer software documentation" as such terms are used in 48 * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government * only as a commercial end item. Consistent with 48 C.F.R.12.212 and * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all * U.S. Government End Users acquire the Licensed Deliverables with * only those rights set forth herein. * * Any use of the Licensed Deliverables in individual and commercial * software must include, in the user documentation and internal * comments to the code, the above Disclaimer and U.S. Government End * Users Notice. */ #include <cstdio> #include <cstdlib> #include <vector> #include <cuda_runtime.h> #include <cusolverDn.h> #include "cusolver_utils.h" int main(int argc, char *argv[]) { cusolverDnHandle_t cusolverH = NULL; cudaStream_t stream = NULL; const int m = 3; const int lda = m; const int ldb = m; /* * | 1 2 3 | * A = | 4 5 6 | * | 7 8 10 | * * without pivoting: A = L*U * | 1 0 0 | | 1 2 3 | * L = | 4 1 0 |, U = | 0 -3 -6 | * | 7 2 1 | | 0 0 1 | * * with pivoting: P*A = L*U * | 0 0 1 | * P = | 1 0 0 | * | 0 1 0 | * * | 1 0 0 | | 7 8 10 | * L = | 0.1429 1 0 |, U = | 0 0.8571 1.5714 | * | 0.5714 0.5 1 | | 0 0 -0.5 | */ const std::vector<double> A = {1.0, 4.0, 7.0, 2.0, 5.0, 8.0, 3.0, 6.0, 10.0}; const std::vector<double> B = {1.0, 2.0, 3.0}; std::vector<double> X(m, 0); std::vector<double> LU(lda * m, 0); std::vector<int> Ipiv(m, 0); int info = 0; double *d_A = nullptr; /* device copy of A */ double *d_B = nullptr; /* device copy of B */ int *d_Ipiv = nullptr; /* pivoting sequence */ int *d_info = nullptr; /* error info */ int lwork = 0; /* size of workspace */ double *d_work = nullptr; /* device workspace for getrf */ const int pivot_on = 0; if (pivot_on) { printf("pivot is on : compute P*A = L*U \n"); } else { printf("pivot is off: compute A = L*U (not numerically stable)\n"); } printf("A = (matlab base-1)\n"); print_matrix(m, m, A.data(), lda); printf("=====\n"); printf("B = (matlab base-1)\n"); print_matrix(m, 1, B.data(), ldb); printf("=====\n"); /* step 1: create cusolver handle, bind a stream */ CUSOLVER_CHECK(cusolverDnCreate(&cusolverH)); CUDA_CHECK(cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking)); CUSOLVER_CHECK(cusolverDnSetStream(cusolverH, stream)); /* step 2: copy A to device */ CUDA_CHECK(cudaMalloc(reinterpret_cast<void **>(&d_A), sizeof(double) * A.size())); CUDA_CHECK(cudaMalloc(reinterpret_cast<void **>(&d_B), sizeof(double) * B.size())); CUDA_CHECK(cudaMalloc(reinterpret_cast<void **>(&d_Ipiv), sizeof(int) * Ipiv.size())); CUDA_CHECK(cudaMalloc(reinterpret_cast<void **>(&d_info), sizeof(int))); CUDA_CHECK( cudaMemcpyAsync(d_A, A.data(), sizeof(double) * A.size(), cudaMemcpyHostToDevice, stream)); CUDA_CHECK( cudaMemcpyAsync(d_B, B.data(), sizeof(double) * B.size(), cudaMemcpyHostToDevice, stream)); /* step 3: query working space of getrf */ CUSOLVER_CHECK(cusolverDnDgetrf_bufferSize(cusolverH, m, m, d_A, lda, &lwork)); CUDA_CHECK(cudaMalloc(reinterpret_cast<void **>(&d_work), sizeof(double) * lwork)); /* step 4: LU factorization */ if (pivot_on) { CUSOLVER_CHECK(cusolverDnDgetrf(cusolverH, m, m, d_A, lda, d_work, d_Ipiv, d_info)); } else { CUSOLVER_CHECK(cusolverDnDgetrf(cusolverH, m, m, d_A, lda, d_work, NULL, d_info)); } if (pivot_on) { CUDA_CHECK(cudaMemcpyAsync(Ipiv.data(), d_Ipiv, sizeof(int) * Ipiv.size(), cudaMemcpyDeviceToHost, stream)); } CUDA_CHECK( cudaMemcpyAsync(LU.data(), d_A, sizeof(double) * A.size(), cudaMemcpyDeviceToHost, stream)); CUDA_CHECK(cudaMemcpyAsync(&info, d_info, sizeof(int), cudaMemcpyDeviceToHost, stream)); CUDA_CHECK(cudaStreamSynchronize(stream)); if (0 > info) { printf("%d-th parameter is wrong \n", -info); exit(1); } if (pivot_on) { printf("pivoting sequence, matlab base-1\n"); for (int j = 0; j < m; j++) { printf("Ipiv(%d) = %d\n", j + 1, Ipiv[j]); } } printf("L and U = (matlab base-1)\n"); print_matrix(m, m, LU.data(), lda); printf("=====\n"); /* * step 5: solve A*X = B * | 1 | | -0.3333 | * B = | 2 |, X = | 0.6667 | * | 3 | | 0 | * */ if (pivot_on) { CUSOLVER_CHECK(cusolverDnDgetrs(cusolverH, CUBLAS_OP_N, m, 1, /* nrhs */ d_A, lda, d_Ipiv, d_B, ldb, d_info)); } else { CUSOLVER_CHECK(cusolverDnDgetrs(cusolverH, CUBLAS_OP_N, m, 1, /* nrhs */ d_A, lda, NULL, d_B, ldb, d_info)); } CUDA_CHECK( cudaMemcpyAsync(X.data(), d_B, sizeof(double) * X.size(), cudaMemcpyDeviceToHost, stream)); CUDA_CHECK(cudaStreamSynchronize(stream)); printf("X = (matlab base-1)\n"); print_matrix(m, 1, X.data(), ldb); printf("=====\n"); /* free resources */ CUDA_CHECK(cudaFree(d_A)); CUDA_CHECK(cudaFree(d_B)); CUDA_CHECK(cudaFree(d_Ipiv)); CUDA_CHECK(cudaFree(d_info)); CUDA_CHECK(cudaFree(d_work)); CUSOLVER_CHECK(cusolverDnDestroy(cusolverH)); CUDA_CHECK(cudaStreamDestroy(stream)); CUDA_CHECK(cudaDeviceReset()); return EXIT_SUCCESS; }
296e9fa35739e0bd3cfe57e8a8ef2045ac66aa65.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved // modified from // https://github.com/facebookresearch/detectron2/blob/master/detectron2/layers/csrc/box_iou_rotated/box_iou_rotated_cuda.cu #include "box_iou_rotated_cuda.cuh" #include "pytorch_cuda_helper.hpp" Tensor box_iou_rotated_cuda(const Tensor boxes1, const Tensor boxes2) { using scalar_t = float; AT_ASSERTM(boxes1.type().is_cuda(), "boxes1 must be a CUDA tensor"); AT_ASSERTM(boxes2.type().is_cuda(), "boxes2 must be a CUDA tensor"); at::hip::HIPGuardMasqueradingAsCUDA device_guard(boxes1.device()); int num_boxes1 = boxes1.size(0); int num_boxes2 = boxes2.size(0); Tensor ious = at::empty({num_boxes1 * num_boxes2}, boxes1.options().dtype(at::kFloat)); if (num_boxes1 > 0 && num_boxes2 > 0) { const int blocks_x = at::cuda::ATenCeilDiv(num_boxes1, BLOCK_DIM_X); const int blocks_y = at::cuda::ATenCeilDiv(num_boxes2, BLOCK_DIM_Y); dim3 blocks(blocks_x, blocks_y); dim3 threads(BLOCK_DIM_X, BLOCK_DIM_Y); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); hipLaunchKernelGGL(( box_iou_rotated_cuda_kernel<scalar_t>), dim3(blocks), dim3(threads), 0, stream, num_boxes1, num_boxes2, boxes1.data_ptr<scalar_t>(), boxes2.data_ptr<scalar_t>(), (scalar_t*)ious.data_ptr<scalar_t>()); AT_CUDA_CHECK(hipGetLastError()); } // reshape from 1d array to 2d array auto shape = std::vector<int64_t>{num_boxes1, num_boxes2}; return ious.reshape(shape); }
296e9fa35739e0bd3cfe57e8a8ef2045ac66aa65.cu
// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved // modified from // https://github.com/facebookresearch/detectron2/blob/master/detectron2/layers/csrc/box_iou_rotated/box_iou_rotated_cuda.cu #include "box_iou_rotated_cuda.cuh" #include "pytorch_cuda_helper.hpp" Tensor box_iou_rotated_cuda(const Tensor boxes1, const Tensor boxes2) { using scalar_t = float; AT_ASSERTM(boxes1.type().is_cuda(), "boxes1 must be a CUDA tensor"); AT_ASSERTM(boxes2.type().is_cuda(), "boxes2 must be a CUDA tensor"); at::cuda::CUDAGuard device_guard(boxes1.device()); int num_boxes1 = boxes1.size(0); int num_boxes2 = boxes2.size(0); Tensor ious = at::empty({num_boxes1 * num_boxes2}, boxes1.options().dtype(at::kFloat)); if (num_boxes1 > 0 && num_boxes2 > 0) { const int blocks_x = at::cuda::ATenCeilDiv(num_boxes1, BLOCK_DIM_X); const int blocks_y = at::cuda::ATenCeilDiv(num_boxes2, BLOCK_DIM_Y); dim3 blocks(blocks_x, blocks_y); dim3 threads(BLOCK_DIM_X, BLOCK_DIM_Y); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); box_iou_rotated_cuda_kernel<scalar_t><<<blocks, threads, 0, stream>>>( num_boxes1, num_boxes2, boxes1.data_ptr<scalar_t>(), boxes2.data_ptr<scalar_t>(), (scalar_t*)ious.data_ptr<scalar_t>()); AT_CUDA_CHECK(cudaGetLastError()); } // reshape from 1d array to 2d array auto shape = std::vector<int64_t>{num_boxes1, num_boxes2}; return ious.reshape(shape); }
a3bbb640a53fde38fb182192beaa14d2d2299399.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Luigy Machaca Arcana // Computer science - Arequipa, Per 2017 #include <stdlib.h> #include <stdio.h> #include <fstream> #include <iostream> #include <string> using namespace std; #define WIDTH_TILE 32 __global__ void convolution(int** dd_mat_a, int n_rows_a, int n_cols_a ,double** dd_mat_b, int n_rows_b, int n_cols_b, int** dd_mat_c, int n_rows_c, int n_cols_c){ int n_kernel_row = n_rows_b; //n_cols_b int n_kernel_col = n_cols_b; //n_cols_b int row = blockIdx.y*blockDim.y + threadIdx.y; int col = blockIdx.x*blockDim.x + threadIdx.x; if( ((int)(n_kernel_row/2)-1)< row && row<(n_rows_a-(int)(n_kernel_row/2)) && ((int)(n_kernel_col/2)-1)< col && col<(n_cols_a-(int)(n_kernel_col/2)) ){ double offset = 0; for(int k=0 ; k<n_kernel_row ; k++){ for(int l=0 ; l<n_kernel_col ; l++){ double cc = dd_mat_b[k][l]; double dd = 0; dd = (double)dd_mat_a[row-(int)(n_kernel_row/2)+k][col-(int)(n_kernel_col/2)+l]; offset += cc*dd; } } offset = offset>0?offset:0; dd_mat_c[row][col] = offset; //dd_mat_c[row][col] = dd_mat_a[row][col]; } } __global__ void convolution_complete(int** dd_mat_a, int n_rows_a, int n_cols_a ,double** dd_mat_b, int n_rows_b, int n_cols_b, int** dd_mat_c, int n_rows_c, int n_cols_c){ int n_kernel_row = n_rows_b; //n_cols_b int n_kernel_col = n_cols_b; //n_cols_b int row = blockIdx.y*blockDim.y + threadIdx.y; int col = blockIdx.x*blockDim.x + threadIdx.x; if( row<n_rows_a && col<n_cols_a ){ double offset = 0; for(int k=0 ; k<n_kernel_row ; k++){ for(int l=0 ; l<n_kernel_col ; l++){ double cc = dd_mat_b[k][l]; double dd = 0; //dd = dd_mat_a[row-(int)(n_kernel_row/2)+k][col-(int)(n_kernel_col/2)+l]; if( (row-(int)(n_kernel_row/2)+k)>=0 && (row-(int)(n_kernel_row/2)+k)<n_rows_a && (col-(int)(n_kernel_col/2)+l)>=0 && (col-(int)(n_kernel_col/2)+l)<n_cols_a ){ dd = dd_mat_a[row-(int)(n_kernel_row/2)+k][col-(int)(n_kernel_col/2)+l]; } offset += cc*dd; } } offset = -1/256*offset; offset = offset>0?offset:0; offset = (int)offset%255 + 1; dd_mat_c[row][col] = offset; //dd_mat_c[row][col] = -1; } } __global__ void matrix_mult_shared(int** dd_mat_a, int n_rows_a, int n_cols_a ,int** dd_mat_b, int n_rows_b, int n_cols_b, int** dd_mat_c, int n_rows_c, int n_cols_c){ __shared__ int Mds[WIDTH_TILE][WIDTH_TILE]; __shared__ int Nds[WIDTH_TILE][WIDTH_TILE]; int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; int value = 0; int row = by*WIDTH_TILE + ty; int col = bx*WIDTH_TILE + tx; int width = n_cols_a; //n_cols_a == n_rows_b int k; for( k=0 ; k<(int)(width-1+WIDTH_TILE)/(int)WIDTH_TILE ; ++k ){ if (k*WIDTH_TILE+tx < n_cols_a && row < n_rows_a){ Mds[ty][tx] = dd_mat_a[row][k*WIDTH_TILE+tx]; } else{ Mds[ty][tx] = 0; } if (k*WIDTH_TILE+ty < n_rows_b && col < n_cols_b){ Nds[ty][tx] = dd_mat_b[k*WIDTH_TILE+ty][col]; } else{ Nds[ty][tx] = 0; } __syncthreads(); int m; for(m=0 ; m<WIDTH_TILE ; ++m){ value += Mds[ty][m]*Nds[m][tx]; } __syncthreads(); } if(row<n_rows_c && col<n_cols_c){ dd_mat_c[row][col]=value; } } __global__ void matrix_mult(int** dd_mat_a, int n_rows_a, int n_cols_a ,int** dd_mat_b, int n_rows_b, int n_cols_b, int** dd_mat_c, int n_rows_c, int n_cols_c){ int value=0; int tx=threadIdx.x; int ty=threadIdx.y; int x = tx + blockIdx.x*blockDim.x; int y = ty + blockIdx.y*blockDim.y; if( y<n_rows_c && x<n_cols_c ){ int i; for(i=0 ; i<n_cols_a ; i++){ value += dd_mat_a[y][i] * dd_mat_b[i][x]; } dd_mat_c[y][x]=value; } } void fill(int** mat, int n, int m){ srand(time(0)); int i,j; for(i=0; i<n ;i++){ for(j=0; j<m ;j++) //mat[i][j] = rand()%3+1; mat[i][j] = 1; } } void fill_value(int** mat,int n, int m, int value=0){ int i,j; for(i=0;i<n;i++) for(j=0;j<m;j++) mat[i][j] = value; } void print(int** mat,int n, int m){ int i,j; for(i=0; i<n ;i++){ for(j=0; j<m ;j++) printf("%d ",mat[i][j]); printf("\n"); } } void print2(double** mat,int n, int m){ int i,j; for(i=0; i<n ;i++){ for(j=0; j<m ;j++) printf("%f ",mat[i][j]); printf("\n"); } } double max_value_matrix(int** mat,int n, int m){ int i,j; int max = -100000; for(i=0; i<n ;i++){ for(j=0; j<m ;j++){ max = (mat[i][j] > max)?mat[i][j]:max; } } return max; } void normalize(int** mat,int n, int m, double value_normalice){ int i,j; for(i=0; i<n ;i++){ for(j=0; j<m ;j++){ mat[i][j] = mat[i][j] / (double)value_normalice ; } } } void create_copy(int**& mat, int**& d_mat, int**& dd_mat, int n_rows, int n_cols){ int i; int size_row = sizeof(int*) * n_rows; d_mat = (int**) malloc(size_row); hipMalloc((void**)& d_mat[0], sizeof(int) * n_rows * n_cols ); hipMemcpy( d_mat[0], mat[0], sizeof(int) * n_rows * n_cols ,hipMemcpyHostToDevice); for( i=1 ; i<n_rows ; i++ ){ d_mat[i] = (d_mat[0]+i*n_cols); } hipMalloc((void***)& dd_mat, size_row ); hipMemcpy( dd_mat, d_mat, size_row, hipMemcpyHostToDevice ); } void create(int**& mat, int**& d_mat, int**& dd_mat, int n_rows, int n_cols, int fillValue=-1){ int i; mat = (int** )malloc(sizeof(int*) * n_rows ); mat[0] = (int* )malloc(sizeof(int ) * n_rows * n_cols ); for( i=1 ; i<n_rows ; i++ ){ mat[i] = mat[i-1]+n_cols; } if(fillValue==-1){ fill(mat,n_rows,n_cols); } else{ fill_value(mat,n_rows,n_cols,fillValue); } int size_row = sizeof(int*) * n_rows; d_mat = (int**) malloc(size_row); hipMalloc((void**)& d_mat[0], sizeof(int) * n_rows * n_cols ); hipMemcpy( d_mat[0], mat[0], sizeof(int) * n_rows * n_cols ,hipMemcpyHostToDevice); for( i=1 ; i<n_rows ; i++ ){ d_mat[i] = (d_mat[0]+i*n_cols); } hipMalloc((void***)& dd_mat, size_row ); hipMemcpy( dd_mat, d_mat, size_row, hipMemcpyHostToDevice ); } void create_kernell_random(double**& mat, double**& d_mat, double**& dd_mat, int n_rows, int n_cols){ int i,j; mat = (double** )malloc(sizeof(double*) * n_rows ); mat[0] = (double* )malloc(sizeof(double ) * n_rows * n_cols ); for( i=1 ; i<n_rows ; i++ ){ mat[i] = mat[i-1]+n_cols; } srand(time(0)); for(i=0; i<n_rows ;i++){ for(j=0; j<n_cols ;j++){ mat[i][j] = (double)(rand()%100-50); //mat[i][j] = 1; } } int size_row = sizeof(double*) * n_rows; d_mat = (double**) malloc(size_row); hipMalloc((void**)& d_mat[0], sizeof(double) * n_rows * n_cols ); hipMemcpy( d_mat[0], mat[0], sizeof(double) * n_rows * n_cols ,hipMemcpyHostToDevice); for( i=1 ; i<n_rows ; i++ ){ d_mat[i] = (d_mat[0]+i*n_cols); } hipMalloc((void***)& dd_mat, size_row ); hipMemcpy( dd_mat, d_mat, size_row, hipMemcpyHostToDevice ); } ///////////////////////////////////////////////////////////////////////// ///////////////// Filter Edge detection ///////////////////////////////////////////////////////////////////////// void fill_kernel_3x3_Edge_detection(double** mat, int n, int m, double scalar_kernel=1){ mat[0][0]=0; mat[0][1]= 1; mat[0][2]=0; mat[1][0]=1; mat[1][1]=-4; mat[1][2]=1; mat[2][0]=0; mat[2][1]= 1; mat[2][2]=0; for(int i=0 ; i<n ; i++){ for(int j=0 ; j<m ; j++){ mat[i][j]=scalar_kernel*mat[i][j]; } } } ///////////////////////////////////////////////////////////////////////// ///////////////// Filter Sharpen ///////////////////////////////////////////////////////////////////////// void fill_kernel_3x3_Sharpen(double** mat, int n, int m, double scalar_kernel=1){ // 0 -1 0 //-1 5 -1 // 0 -1 0 mat[0][0]=0; mat[0][1]=-1; mat[0][2]=0; mat[1][0]=-1; mat[1][1]=5; mat[1][2]=-1; mat[2][0]=0; mat[2][1]=-1; mat[2][2]=0; for(int i=0 ; i<n ; i++){ for(int j=0 ; j<m ; j++){ mat[i][j]=scalar_kernel*mat[i][j]; } } } ///////////////////////////////////////////////////////////////////////// ///////////////// Gaussian blur ///////////////////////////////////////////////////////////////////////// void fill_kernel_5x5_Gaussian_blur(double** mat, int n, int m, double scalar_kernel=1){ // 1 4 6 4 1 // 4 16 24 16 4 //(-1/256) // 6 24 -476 24 6 // 4 16 24 16 4 // 1 4 6 4 1 mat[0][0]=1; mat[0][1]=4 ; mat[0][2]=6 ; mat[0][3]=4 ; mat[0][4]=1; mat[1][0]=4; mat[1][1]=16; mat[1][2]=24 ; mat[1][3]=16; mat[1][4]=4; mat[2][0]=6; mat[2][1]=24; mat[2][2]=-476; mat[2][3]=24; mat[2][4]=6; mat[3][0]=4; mat[3][1]=16; mat[3][2]=24 ; mat[3][3]=16; mat[3][4]=4; mat[4][0]=1; mat[4][1]=4 ; mat[4][2]=6 ; mat[4][3]=4 ; mat[4][4]=1; printf("2222xxxxxxx %.25f\n",scalar_kernel); for(int i=0 ; i<n ; i++){ for(int j=0 ; j<m ; j++){ mat[i][j] = scalar_kernel*mat[i][j]; } } } void create_kernell_static(double**& mat, double**& d_mat, double**& dd_mat, int n_rows, int n_cols, double scalar_kernel=1){ int i; mat = (double** )malloc(sizeof(double*) * n_rows ); mat[0] = (double* )malloc(sizeof(double ) * n_rows * n_cols ); for( i=1 ; i<n_rows ; i++ ){ mat[i] = mat[i-1]+n_cols; } //fill_kernel_3x3_Edge_detection(mat,n_rows,n_cols, scalar_kernel); fill_kernel_3x3_Sharpen(mat,n_rows,n_cols, scalar_kernel); //fill_kernel_5x5_Gaussian_blur(mat,n_rows,n_cols, scalar_kernel); int size_row = sizeof(double*) * n_rows; d_mat = (double**) malloc(size_row); hipMalloc((void**)& d_mat[0], sizeof(double) * n_rows * n_cols ); hipMemcpy( d_mat[0], mat[0], sizeof(double) * n_rows * n_cols ,hipMemcpyHostToDevice); for( i=1 ; i<n_rows ; i++ ){ d_mat[i] = (d_mat[0]+i*n_cols); } hipMalloc((void***)& dd_mat, size_row ); hipMemcpy( dd_mat, d_mat, size_row, hipMemcpyHostToDevice ); } int main(int argc, char *argv[]){ printf("//////////////////////////////////\n"); char temp1[350]; strcpy (temp1 , argv[1]); const char* img_input_name = temp1; char temp2[150]; strcpy (temp2 , argv[1]); strcat (temp2 , ".out.random.kernel.sharpen.pgm"); const char* img_output_name = temp2; printf ("name in: %s\n",img_input_name); printf ("name out: %s\n",img_output_name); string title1,title2; char rows[15]; char cols[15]; char max_val[15]; int n_rows = -1; int n_cols = -1; //int max_value = -1; ///////////////////////////////////////////////////////////// ifstream myReadFile; myReadFile.open(img_input_name); char out_temp[100]; int** mat_a; if (myReadFile.is_open()){ std::getline(myReadFile,title1); std::getline(myReadFile,title2); myReadFile >> cols; n_cols = atoi(cols); //n_cols = 15; //cout << n_cols << endl; myReadFile >> rows; n_rows = atoi(rows); //n_rows = 15; //cout << n_rows << endl; myReadFile >> max_val; //max_value = atoi(max_val); //cout << max_value << endl; ///////////////////////////////////////////////////////////// mat_a = (int** )malloc(sizeof(int*) * n_rows ); mat_a[0] = (int* )malloc(sizeof(int ) * n_rows * n_cols ); for( int i=1 ; i<n_rows ; i++ ){ mat_a[i] = mat_a[i-1]+n_cols; } ///////////////////////////////////////////////////////////// int n_temp; for(int i=0 ; i<n_rows ; i++){ for(int j=0 ; j<n_cols ; j++){ if(!myReadFile.eof()){ myReadFile >> out_temp; n_temp = atoi(out_temp); mat_a[i][j] = n_temp; //cout << n_temp << endl; } } } } myReadFile.close(); ///////////////////////////////////////////////////// int n_rows_a = n_rows; int n_cols_a = n_cols; int n_rows_b = 3; //n_kernel int n_cols_b = 3; //n_kernel //double scalar_kernel = (-1)/(double)256; //escalar_kernel double scalar_kernel = 1; //solo con static_kernel //printf("escalar_kernel: %f\n",scalar_kernel); int n_rows_c = n_rows; int n_cols_c = n_cols; //int** mat_a; int** d_mat_a; int** dd_mat_a; //int** mat_a; int** d_mat_a; int** dd_mat_a; double** mat_b; double** d_mat_b; double** dd_mat_b; int** mat_c; int** d_mat_c; int** dd_mat_c; create_copy( mat_a, d_mat_a, dd_mat_a, n_rows_a, n_cols_a); //create( mat_a, d_mat_a, dd_mat_a, n_rows_a, n_cols_a ); create_kernell_static( mat_b, d_mat_b, dd_mat_b, n_rows_b, n_cols_b, scalar_kernel ); //create_kernell_random( mat_b, d_mat_b, dd_mat_b, n_rows_b, n_cols_b ); create( mat_c, d_mat_c, dd_mat_c, n_rows_c, n_cols_c, 0 ); ///////////////////////////////////////// dim3 blockNum(WIDTH_TILE,WIDTH_TILE,1); dim3 grid((int)(n_cols_c-1+blockNum.x)/blockNum.x,(int)(n_rows_c-1+blockNum.y)/blockNum.y,1); printf("ty: %d, tx: %d\n",(int)(n_rows_c-1+blockNum.y)/blockNum.y, (int)(n_cols_c-1+blockNum.x)/blockNum.x); printf("grid_row: %d, grid_col: %d\n",grid.x , grid.y ); //////////////////////////////////////////////////// hipLaunchKernelGGL(( convolution), dim3(grid),dim3(blockNum), 0, 0, dd_mat_a, n_rows_a, n_cols_a, dd_mat_b, n_rows_b, n_cols_b, dd_mat_c, n_rows_c, n_cols_c); //convolution_complete<<<grid,blockNum>>>(dd_mat_a, n_rows_a, n_cols_a, dd_mat_b, n_rows_b, n_cols_b, dd_mat_c, n_rows_c, n_cols_c); //matrix_mult_shared<<<grid,blockNum>>>(dd_mat_a, n_rows_a, n_cols_a, dd_mat_b, n_rows_b, n_cols_b, dd_mat_c, n_rows_c, n_cols_c); //matrix_mult<<<grid,blockNum>>>(dd_mat_a, n_rows_a, n_cols_a, dd_mat_b, n_rows_b, n_cols_b, dd_mat_c, n_rows_c, n_cols_c); ///////////////////////////////////////////////////// hipMemcpy(mat_c[0],d_mat_c[0],sizeof(int)*n_rows_c*n_cols_c,hipMemcpyDeviceToHost); //printf("//////////////////////////////////\n"); //printf("//////////////////////////////////\n"); //print(mat_a,n_rows_a,n_cols_a); printf("//////// KERNELL SHARPEN //////////\n"); print2(mat_b,n_rows_b,n_cols_b); printf("//////////////////////////////////\n"); //print(mat_c,n_rows_c,n_cols_c); ////////////////////////////////////////////// double max_matrix = max_value_matrix(mat_c, n_rows_c, n_cols_c); //printf("<<<<<<<<<<<<<<<<<<<<<%f\n",max_matrix); ofstream myfile; myfile.open (img_output_name); myfile << title1 <<endl; myfile << title2 <<endl; myfile << n_cols_c <<" "<< n_rows_c <<endl; //myfile << max_value <<endl; myfile << max_matrix <<endl; for(int i=0 ; i<n_rows_c ; i++){ for(int j=0 ; j<n_cols_c ; j++){ myfile << mat_c[i][j] <<endl; } } myfile.close(); ////////////////////////////////////////////// hipFree(dd_mat_a); hipFree(dd_mat_b); hipFree(dd_mat_c); hipFree(d_mat_a); hipFree(d_mat_b); hipFree(d_mat_c); free(mat_a); free(mat_b); free(mat_c); return 0; }
a3bbb640a53fde38fb182192beaa14d2d2299399.cu
// Luigy Machaca Arcana // Computer science - Arequipa, Perú 2017 #include <stdlib.h> #include <stdio.h> #include <fstream> #include <iostream> #include <string> using namespace std; #define WIDTH_TILE 32 __global__ void convolution(int** dd_mat_a, int n_rows_a, int n_cols_a ,double** dd_mat_b, int n_rows_b, int n_cols_b, int** dd_mat_c, int n_rows_c, int n_cols_c){ int n_kernel_row = n_rows_b; //n_cols_b int n_kernel_col = n_cols_b; //n_cols_b int row = blockIdx.y*blockDim.y + threadIdx.y; int col = blockIdx.x*blockDim.x + threadIdx.x; if( ((int)(n_kernel_row/2)-1)< row && row<(n_rows_a-(int)(n_kernel_row/2)) && ((int)(n_kernel_col/2)-1)< col && col<(n_cols_a-(int)(n_kernel_col/2)) ){ double offset = 0; for(int k=0 ; k<n_kernel_row ; k++){ for(int l=0 ; l<n_kernel_col ; l++){ double cc = dd_mat_b[k][l]; double dd = 0; dd = (double)dd_mat_a[row-(int)(n_kernel_row/2)+k][col-(int)(n_kernel_col/2)+l]; offset += cc*dd; } } offset = offset>0?offset:0; dd_mat_c[row][col] = offset; //dd_mat_c[row][col] = dd_mat_a[row][col]; } } __global__ void convolution_complete(int** dd_mat_a, int n_rows_a, int n_cols_a ,double** dd_mat_b, int n_rows_b, int n_cols_b, int** dd_mat_c, int n_rows_c, int n_cols_c){ int n_kernel_row = n_rows_b; //n_cols_b int n_kernel_col = n_cols_b; //n_cols_b int row = blockIdx.y*blockDim.y + threadIdx.y; int col = blockIdx.x*blockDim.x + threadIdx.x; if( row<n_rows_a && col<n_cols_a ){ double offset = 0; for(int k=0 ; k<n_kernel_row ; k++){ for(int l=0 ; l<n_kernel_col ; l++){ double cc = dd_mat_b[k][l]; double dd = 0; //dd = dd_mat_a[row-(int)(n_kernel_row/2)+k][col-(int)(n_kernel_col/2)+l]; if( (row-(int)(n_kernel_row/2)+k)>=0 && (row-(int)(n_kernel_row/2)+k)<n_rows_a && (col-(int)(n_kernel_col/2)+l)>=0 && (col-(int)(n_kernel_col/2)+l)<n_cols_a ){ dd = dd_mat_a[row-(int)(n_kernel_row/2)+k][col-(int)(n_kernel_col/2)+l]; } offset += cc*dd; } } offset = -1/256*offset; offset = offset>0?offset:0; offset = (int)offset%255 + 1; dd_mat_c[row][col] = offset; //dd_mat_c[row][col] = -1; } } __global__ void matrix_mult_shared(int** dd_mat_a, int n_rows_a, int n_cols_a ,int** dd_mat_b, int n_rows_b, int n_cols_b, int** dd_mat_c, int n_rows_c, int n_cols_c){ __shared__ int Mds[WIDTH_TILE][WIDTH_TILE]; __shared__ int Nds[WIDTH_TILE][WIDTH_TILE]; int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; int value = 0; int row = by*WIDTH_TILE + ty; int col = bx*WIDTH_TILE + tx; int width = n_cols_a; //n_cols_a == n_rows_b int k; for( k=0 ; k<(int)(width-1+WIDTH_TILE)/(int)WIDTH_TILE ; ++k ){ if (k*WIDTH_TILE+tx < n_cols_a && row < n_rows_a){ Mds[ty][tx] = dd_mat_a[row][k*WIDTH_TILE+tx]; } else{ Mds[ty][tx] = 0; } if (k*WIDTH_TILE+ty < n_rows_b && col < n_cols_b){ Nds[ty][tx] = dd_mat_b[k*WIDTH_TILE+ty][col]; } else{ Nds[ty][tx] = 0; } __syncthreads(); int m; for(m=0 ; m<WIDTH_TILE ; ++m){ value += Mds[ty][m]*Nds[m][tx]; } __syncthreads(); } if(row<n_rows_c && col<n_cols_c){ dd_mat_c[row][col]=value; } } __global__ void matrix_mult(int** dd_mat_a, int n_rows_a, int n_cols_a ,int** dd_mat_b, int n_rows_b, int n_cols_b, int** dd_mat_c, int n_rows_c, int n_cols_c){ int value=0; int tx=threadIdx.x; int ty=threadIdx.y; int x = tx + blockIdx.x*blockDim.x; int y = ty + blockIdx.y*blockDim.y; if( y<n_rows_c && x<n_cols_c ){ int i; for(i=0 ; i<n_cols_a ; i++){ value += dd_mat_a[y][i] * dd_mat_b[i][x]; } dd_mat_c[y][x]=value; } } void fill(int** mat, int n, int m){ srand(time(0)); int i,j; for(i=0; i<n ;i++){ for(j=0; j<m ;j++) //mat[i][j] = rand()%3+1; mat[i][j] = 1; } } void fill_value(int** mat,int n, int m, int value=0){ int i,j; for(i=0;i<n;i++) for(j=0;j<m;j++) mat[i][j] = value; } void print(int** mat,int n, int m){ int i,j; for(i=0; i<n ;i++){ for(j=0; j<m ;j++) printf("%d ",mat[i][j]); printf("\n"); } } void print2(double** mat,int n, int m){ int i,j; for(i=0; i<n ;i++){ for(j=0; j<m ;j++) printf("%f ",mat[i][j]); printf("\n"); } } double max_value_matrix(int** mat,int n, int m){ int i,j; int max = -100000; for(i=0; i<n ;i++){ for(j=0; j<m ;j++){ max = (mat[i][j] > max)?mat[i][j]:max; } } return max; } void normalize(int** mat,int n, int m, double value_normalice){ int i,j; for(i=0; i<n ;i++){ for(j=0; j<m ;j++){ mat[i][j] = mat[i][j] / (double)value_normalice ; } } } void create_copy(int**& mat, int**& d_mat, int**& dd_mat, int n_rows, int n_cols){ int i; int size_row = sizeof(int*) * n_rows; d_mat = (int**) malloc(size_row); cudaMalloc((void**)& d_mat[0], sizeof(int) * n_rows * n_cols ); cudaMemcpy( d_mat[0], mat[0], sizeof(int) * n_rows * n_cols ,cudaMemcpyHostToDevice); for( i=1 ; i<n_rows ; i++ ){ d_mat[i] = (d_mat[0]+i*n_cols); } cudaMalloc((void***)& dd_mat, size_row ); cudaMemcpy( dd_mat, d_mat, size_row, cudaMemcpyHostToDevice ); } void create(int**& mat, int**& d_mat, int**& dd_mat, int n_rows, int n_cols, int fillValue=-1){ int i; mat = (int** )malloc(sizeof(int*) * n_rows ); mat[0] = (int* )malloc(sizeof(int ) * n_rows * n_cols ); for( i=1 ; i<n_rows ; i++ ){ mat[i] = mat[i-1]+n_cols; } if(fillValue==-1){ fill(mat,n_rows,n_cols); } else{ fill_value(mat,n_rows,n_cols,fillValue); } int size_row = sizeof(int*) * n_rows; d_mat = (int**) malloc(size_row); cudaMalloc((void**)& d_mat[0], sizeof(int) * n_rows * n_cols ); cudaMemcpy( d_mat[0], mat[0], sizeof(int) * n_rows * n_cols ,cudaMemcpyHostToDevice); for( i=1 ; i<n_rows ; i++ ){ d_mat[i] = (d_mat[0]+i*n_cols); } cudaMalloc((void***)& dd_mat, size_row ); cudaMemcpy( dd_mat, d_mat, size_row, cudaMemcpyHostToDevice ); } void create_kernell_random(double**& mat, double**& d_mat, double**& dd_mat, int n_rows, int n_cols){ int i,j; mat = (double** )malloc(sizeof(double*) * n_rows ); mat[0] = (double* )malloc(sizeof(double ) * n_rows * n_cols ); for( i=1 ; i<n_rows ; i++ ){ mat[i] = mat[i-1]+n_cols; } srand(time(0)); for(i=0; i<n_rows ;i++){ for(j=0; j<n_cols ;j++){ mat[i][j] = (double)(rand()%100-50); //mat[i][j] = 1; } } int size_row = sizeof(double*) * n_rows; d_mat = (double**) malloc(size_row); cudaMalloc((void**)& d_mat[0], sizeof(double) * n_rows * n_cols ); cudaMemcpy( d_mat[0], mat[0], sizeof(double) * n_rows * n_cols ,cudaMemcpyHostToDevice); for( i=1 ; i<n_rows ; i++ ){ d_mat[i] = (d_mat[0]+i*n_cols); } cudaMalloc((void***)& dd_mat, size_row ); cudaMemcpy( dd_mat, d_mat, size_row, cudaMemcpyHostToDevice ); } ///////////////////////////////////////////////////////////////////////// ///////////////// Filter Edge detection ///////////////////////////////////////////////////////////////////////// void fill_kernel_3x3_Edge_detection(double** mat, int n, int m, double scalar_kernel=1){ mat[0][0]=0; mat[0][1]= 1; mat[0][2]=0; mat[1][0]=1; mat[1][1]=-4; mat[1][2]=1; mat[2][0]=0; mat[2][1]= 1; mat[2][2]=0; for(int i=0 ; i<n ; i++){ for(int j=0 ; j<m ; j++){ mat[i][j]=scalar_kernel*mat[i][j]; } } } ///////////////////////////////////////////////////////////////////////// ///////////////// Filter Sharpen ///////////////////////////////////////////////////////////////////////// void fill_kernel_3x3_Sharpen(double** mat, int n, int m, double scalar_kernel=1){ // 0 -1 0 //-1 5 -1 // 0 -1 0 mat[0][0]=0; mat[0][1]=-1; mat[0][2]=0; mat[1][0]=-1; mat[1][1]=5; mat[1][2]=-1; mat[2][0]=0; mat[2][1]=-1; mat[2][2]=0; for(int i=0 ; i<n ; i++){ for(int j=0 ; j<m ; j++){ mat[i][j]=scalar_kernel*mat[i][j]; } } } ///////////////////////////////////////////////////////////////////////// ///////////////// Gaussian blur ///////////////////////////////////////////////////////////////////////// void fill_kernel_5x5_Gaussian_blur(double** mat, int n, int m, double scalar_kernel=1){ // 1 4 6 4 1 // 4 16 24 16 4 //(-1/256) // 6 24 -476 24 6 // 4 16 24 16 4 // 1 4 6 4 1 mat[0][0]=1; mat[0][1]=4 ; mat[0][2]=6 ; mat[0][3]=4 ; mat[0][4]=1; mat[1][0]=4; mat[1][1]=16; mat[1][2]=24 ; mat[1][3]=16; mat[1][4]=4; mat[2][0]=6; mat[2][1]=24; mat[2][2]=-476; mat[2][3]=24; mat[2][4]=6; mat[3][0]=4; mat[3][1]=16; mat[3][2]=24 ; mat[3][3]=16; mat[3][4]=4; mat[4][0]=1; mat[4][1]=4 ; mat[4][2]=6 ; mat[4][3]=4 ; mat[4][4]=1; printf("2222xxxxxxx %.25f\n",scalar_kernel); for(int i=0 ; i<n ; i++){ for(int j=0 ; j<m ; j++){ mat[i][j] = scalar_kernel*mat[i][j]; } } } void create_kernell_static(double**& mat, double**& d_mat, double**& dd_mat, int n_rows, int n_cols, double scalar_kernel=1){ int i; mat = (double** )malloc(sizeof(double*) * n_rows ); mat[0] = (double* )malloc(sizeof(double ) * n_rows * n_cols ); for( i=1 ; i<n_rows ; i++ ){ mat[i] = mat[i-1]+n_cols; } //fill_kernel_3x3_Edge_detection(mat,n_rows,n_cols, scalar_kernel); fill_kernel_3x3_Sharpen(mat,n_rows,n_cols, scalar_kernel); //fill_kernel_5x5_Gaussian_blur(mat,n_rows,n_cols, scalar_kernel); int size_row = sizeof(double*) * n_rows; d_mat = (double**) malloc(size_row); cudaMalloc((void**)& d_mat[0], sizeof(double) * n_rows * n_cols ); cudaMemcpy( d_mat[0], mat[0], sizeof(double) * n_rows * n_cols ,cudaMemcpyHostToDevice); for( i=1 ; i<n_rows ; i++ ){ d_mat[i] = (d_mat[0]+i*n_cols); } cudaMalloc((void***)& dd_mat, size_row ); cudaMemcpy( dd_mat, d_mat, size_row, cudaMemcpyHostToDevice ); } int main(int argc, char *argv[]){ printf("//////////////////////////////////\n"); char temp1[350]; strcpy (temp1 , argv[1]); const char* img_input_name = temp1; char temp2[150]; strcpy (temp2 , argv[1]); strcat (temp2 , ".out.random.kernel.sharpen.pgm"); const char* img_output_name = temp2; printf ("name in: %s\n",img_input_name); printf ("name out: %s\n",img_output_name); string title1,title2; char rows[15]; char cols[15]; char max_val[15]; int n_rows = -1; int n_cols = -1; //int max_value = -1; ///////////////////////////////////////////////////////////// ifstream myReadFile; myReadFile.open(img_input_name); char out_temp[100]; int** mat_a; if (myReadFile.is_open()){ std::getline(myReadFile,title1); std::getline(myReadFile,title2); myReadFile >> cols; n_cols = atoi(cols); //n_cols = 15; //cout << n_cols << endl; myReadFile >> rows; n_rows = atoi(rows); //n_rows = 15; //cout << n_rows << endl; myReadFile >> max_val; //max_value = atoi(max_val); //cout << max_value << endl; ///////////////////////////////////////////////////////////// mat_a = (int** )malloc(sizeof(int*) * n_rows ); mat_a[0] = (int* )malloc(sizeof(int ) * n_rows * n_cols ); for( int i=1 ; i<n_rows ; i++ ){ mat_a[i] = mat_a[i-1]+n_cols; } ///////////////////////////////////////////////////////////// int n_temp; for(int i=0 ; i<n_rows ; i++){ for(int j=0 ; j<n_cols ; j++){ if(!myReadFile.eof()){ myReadFile >> out_temp; n_temp = atoi(out_temp); mat_a[i][j] = n_temp; //cout << n_temp << endl; } } } } myReadFile.close(); ///////////////////////////////////////////////////// int n_rows_a = n_rows; int n_cols_a = n_cols; int n_rows_b = 3; //n_kernel int n_cols_b = 3; //n_kernel //double scalar_kernel = (-1)/(double)256; //escalar_kernel double scalar_kernel = 1; //solo con static_kernel //printf("escalar_kernel: %f\n",scalar_kernel); int n_rows_c = n_rows; int n_cols_c = n_cols; //int** mat_a; int** d_mat_a; int** dd_mat_a; //int** mat_a; int** d_mat_a; int** dd_mat_a; double** mat_b; double** d_mat_b; double** dd_mat_b; int** mat_c; int** d_mat_c; int** dd_mat_c; create_copy( mat_a, d_mat_a, dd_mat_a, n_rows_a, n_cols_a); //create( mat_a, d_mat_a, dd_mat_a, n_rows_a, n_cols_a ); create_kernell_static( mat_b, d_mat_b, dd_mat_b, n_rows_b, n_cols_b, scalar_kernel ); //create_kernell_random( mat_b, d_mat_b, dd_mat_b, n_rows_b, n_cols_b ); create( mat_c, d_mat_c, dd_mat_c, n_rows_c, n_cols_c, 0 ); ///////////////////////////////////////// dim3 blockNum(WIDTH_TILE,WIDTH_TILE,1); dim3 grid((int)(n_cols_c-1+blockNum.x)/blockNum.x,(int)(n_rows_c-1+blockNum.y)/blockNum.y,1); printf("ty: %d, tx: %d\n",(int)(n_rows_c-1+blockNum.y)/blockNum.y, (int)(n_cols_c-1+blockNum.x)/blockNum.x); printf("grid_row: %d, grid_col: %d\n",grid.x , grid.y ); //////////////////////////////////////////////////// convolution<<<grid,blockNum>>>(dd_mat_a, n_rows_a, n_cols_a, dd_mat_b, n_rows_b, n_cols_b, dd_mat_c, n_rows_c, n_cols_c); //convolution_complete<<<grid,blockNum>>>(dd_mat_a, n_rows_a, n_cols_a, dd_mat_b, n_rows_b, n_cols_b, dd_mat_c, n_rows_c, n_cols_c); //matrix_mult_shared<<<grid,blockNum>>>(dd_mat_a, n_rows_a, n_cols_a, dd_mat_b, n_rows_b, n_cols_b, dd_mat_c, n_rows_c, n_cols_c); //matrix_mult<<<grid,blockNum>>>(dd_mat_a, n_rows_a, n_cols_a, dd_mat_b, n_rows_b, n_cols_b, dd_mat_c, n_rows_c, n_cols_c); ///////////////////////////////////////////////////// cudaMemcpy(mat_c[0],d_mat_c[0],sizeof(int)*n_rows_c*n_cols_c,cudaMemcpyDeviceToHost); //printf("//////////////////////////////////\n"); //printf("//////////////////////////////////\n"); //print(mat_a,n_rows_a,n_cols_a); printf("//////// KERNELL SHARPEN //////////\n"); print2(mat_b,n_rows_b,n_cols_b); printf("//////////////////////////////////\n"); //print(mat_c,n_rows_c,n_cols_c); ////////////////////////////////////////////// double max_matrix = max_value_matrix(mat_c, n_rows_c, n_cols_c); //printf("<<<<<<<<<<<<<<<<<<<<<%f\n",max_matrix); ofstream myfile; myfile.open (img_output_name); myfile << title1 <<endl; myfile << title2 <<endl; myfile << n_cols_c <<" "<< n_rows_c <<endl; //myfile << max_value <<endl; myfile << max_matrix <<endl; for(int i=0 ; i<n_rows_c ; i++){ for(int j=0 ; j<n_cols_c ; j++){ myfile << mat_c[i][j] <<endl; } } myfile.close(); ////////////////////////////////////////////// cudaFree(dd_mat_a); cudaFree(dd_mat_b); cudaFree(dd_mat_c); cudaFree(d_mat_a); cudaFree(d_mat_b); cudaFree(d_mat_c); free(mat_a); free(mat_b); free(mat_c); return 0; }
7baba92555961c93ec8ddcf980370f51f3bb416e.hip
// !!! This is a file automatically generated by hipify!!! #include <stdlib.h> #include <string.h> #include <time.h> #include <math.h> #include <stdio.h> #include <hip/hip_runtime.h> #include <sys/time.h> double cpuSecond() { struct timeval tp; gettimeofday(&tp, NULL); return ((double) tp.tv_sec + (double) tp.tv_usec * 1.e-6); } #define CHECK(call) \ { \ const hipError_t error = call; \ if (error != hipSuccess) { \ fprintf(stderr, "Error: %s:%d, ", __FILE__, __LINE__); \ fprintf(stderr, "code: %d, reason: %s\n", error, \ hipGetErrorString(error)); \ exit(1); \ } \ } void identityData(int* I, int nElem) { for (int i = 0; i < nElem; i++) { I[i] = i; } } void initialData(float *ip, int size){ time_t t; srand((unsigned int) time (&t)); for (int i = 0; i < size; i++){ ip[i] = (float)(rand() & 0xFF) / 10.0f; } } void initialDataInt(int *ip, int size){ time_t t; srand((unsigned int) time (&t)); for (int i = 0; i < size; i++){ ip[i] = floor((rand() & 0xFF) / 10.0f); } } void sumArraysOnHost(float *A, float *B, float *C, const int N) { for (int idx = 0; idx < N; idx++) { C[idx] = A[idx] + B[idx]; } } __global__ void sumArraysOnGpu(float *A, float *B, float *C, int* I, int* R, int strike, const int N) { __shared__ float smem[512]; // nmero de conflitos int conflicts = 6; int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < N) { smem[threadIdx.x] += i; C[i] = A[i] + B[i] + smem[(threadIdx.x * conflicts) % blockDim.x]; } } int main(int argc, char**argv) { // Configura tamanho dos vetores int nElem = 100 * 1.e6; int strike = 1; printf("Tamanho dos vetores: %d, Strike: %d\n", nElem, strike); // Alocando memoria na CPU size_t nBytes = nElem * sizeof(float); float *h_A, *h_B, *hostRef, *gpuRef; int *R, *I; h_A = (float *) malloc(nBytes); h_B = (float *) malloc(nBytes); R = (int *) malloc(nBytes); I = (int *) malloc(nBytes); hostRef = (float *) malloc(nBytes); gpuRef = (float *) malloc(nBytes); initialData(h_A, nElem); initialData(h_B, nElem); initialDataInt(R, nElem); identityData(I, nElem); // Alocando memoria global (GPU) float *d_A, *d_B, *d_C; CHECK(hipMalloc((float **)&d_A, nBytes)); CHECK(hipMalloc((float **)&d_B, nBytes)); CHECK(hipMalloc((float **)&d_C, nBytes)); // Transferindo dados da CPU pra GPU CHECK(hipMemcpy(d_A, h_A, nBytes, hipMemcpyHostToDevice)); CHECK(hipMemcpy(d_B, h_B, nBytes, hipMemcpyHostToDevice)); // CHECK(hipMemcpy(d_A, h_A, nBytes, hipMemcpyHostToDevice)); // Invocando o Kernel na CPU int iLen = 512; dim3 block(iLen); dim3 grid((nElem + block.x - 1) / block.x); hipLaunchKernelGGL(( sumArraysOnGpu), dim3(grid), dim3(block), 0, 0, d_A, d_B, d_C, I, R, strike, nElem); // Copia os resultados do Kernel de volta pra CPU CHECK(hipMemcpy(gpuRef, d_C, nBytes, hipMemcpyDeviceToHost)); // Libera memoria da GPU CHECK(hipFree(d_A)); CHECK(hipFree(d_B)); CHECK(hipFree(d_C)); // Libera memria da CPU free(h_A); free(h_B); free(R); free(I); free(hostRef); free(gpuRef); hipDeviceReset(); return 0; }
7baba92555961c93ec8ddcf980370f51f3bb416e.cu
#include <stdlib.h> #include <string.h> #include <time.h> #include <math.h> #include <stdio.h> #include <cuda_runtime.h> #include <sys/time.h> double cpuSecond() { struct timeval tp; gettimeofday(&tp, NULL); return ((double) tp.tv_sec + (double) tp.tv_usec * 1.e-6); } #define CHECK(call) \ { \ const cudaError_t error = call; \ if (error != cudaSuccess) { \ fprintf(stderr, "Error: %s:%d, ", __FILE__, __LINE__); \ fprintf(stderr, "code: %d, reason: %s\n", error, \ cudaGetErrorString(error)); \ exit(1); \ } \ } void identityData(int* I, int nElem) { for (int i = 0; i < nElem; i++) { I[i] = i; } } void initialData(float *ip, int size){ time_t t; srand((unsigned int) time (&t)); for (int i = 0; i < size; i++){ ip[i] = (float)(rand() & 0xFF) / 10.0f; } } void initialDataInt(int *ip, int size){ time_t t; srand((unsigned int) time (&t)); for (int i = 0; i < size; i++){ ip[i] = floor((rand() & 0xFF) / 10.0f); } } void sumArraysOnHost(float *A, float *B, float *C, const int N) { for (int idx = 0; idx < N; idx++) { C[idx] = A[idx] + B[idx]; } } __global__ void sumArraysOnGpu(float *A, float *B, float *C, int* I, int* R, int strike, const int N) { __shared__ float smem[512]; // número de conflitos int conflicts = 6; int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < N) { smem[threadIdx.x] += i; C[i] = A[i] + B[i] + smem[(threadIdx.x * conflicts) % blockDim.x]; } } int main(int argc, char**argv) { // Configura tamanho dos vetores int nElem = 100 * 1.e6; int strike = 1; printf("Tamanho dos vetores: %d, Strike: %d\n", nElem, strike); // Alocando memoria na CPU size_t nBytes = nElem * sizeof(float); float *h_A, *h_B, *hostRef, *gpuRef; int *R, *I; h_A = (float *) malloc(nBytes); h_B = (float *) malloc(nBytes); R = (int *) malloc(nBytes); I = (int *) malloc(nBytes); hostRef = (float *) malloc(nBytes); gpuRef = (float *) malloc(nBytes); initialData(h_A, nElem); initialData(h_B, nElem); initialDataInt(R, nElem); identityData(I, nElem); // Alocando memoria global (GPU) float *d_A, *d_B, *d_C; CHECK(cudaMalloc((float **)&d_A, nBytes)); CHECK(cudaMalloc((float **)&d_B, nBytes)); CHECK(cudaMalloc((float **)&d_C, nBytes)); // Transferindo dados da CPU pra GPU CHECK(cudaMemcpy(d_A, h_A, nBytes, cudaMemcpyHostToDevice)); CHECK(cudaMemcpy(d_B, h_B, nBytes, cudaMemcpyHostToDevice)); // CHECK(cudaMemcpy(d_A, h_A, nBytes, cudaMemcpyHostToDevice)); // Invocando o Kernel na CPU int iLen = 512; dim3 block(iLen); dim3 grid((nElem + block.x - 1) / block.x); sumArraysOnGpu<<<grid, block>>>(d_A, d_B, d_C, I, R, strike, nElem); // Copia os resultados do Kernel de volta pra CPU CHECK(cudaMemcpy(gpuRef, d_C, nBytes, cudaMemcpyDeviceToHost)); // Libera memoria da GPU CHECK(cudaFree(d_A)); CHECK(cudaFree(d_B)); CHECK(cudaFree(d_C)); // Libera memória da CPU free(h_A); free(h_B); free(R); free(I); free(hostRef); free(gpuRef); cudaDeviceReset(); return 0; }
0f0feece27c129e0697247f0666b4e0c5313a5b2.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <thrust/device_vector.h> #include <thrust/host_vector.h> #include <thrust/scan.h> #include <iostream> #include "common.h" #include "thrust.h" namespace StreamCompaction { namespace Thrust { /** * Performs prefix-sum (aka scan) on idata, storing the result into odata. */ void scan(int n, int *odata, const int *idata) { // TODO use `thrust::exclusive_scan` // example: for device_vectors dv_in and dv_out: // thrust::exclusive_scan(dv_in.begin(), dv_in.end(), dv_out.begin()); //cuda event init hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); float milliseconds = 0; hipEventRecord(start); thrust::exclusive_scan(idata, idata + n, odata); // in-place scan hipEventRecord(stop); hipEventSynchronize(stop); milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); std::cout << "thrust method: " << milliseconds << "ms" << std::endl; } } }
0f0feece27c129e0697247f0666b4e0c5313a5b2.cu
#include <cuda.h> #include <cuda_runtime.h> #include <thrust/device_vector.h> #include <thrust/host_vector.h> #include <thrust/scan.h> #include <iostream> #include "common.h" #include "thrust.h" namespace StreamCompaction { namespace Thrust { /** * Performs prefix-sum (aka scan) on idata, storing the result into odata. */ void scan(int n, int *odata, const int *idata) { // TODO use `thrust::exclusive_scan` // example: for device_vectors dv_in and dv_out: // thrust::exclusive_scan(dv_in.begin(), dv_in.end(), dv_out.begin()); //cuda event init cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); float milliseconds = 0; cudaEventRecord(start); thrust::exclusive_scan(idata, idata + n, odata); // in-place scan cudaEventRecord(stop); cudaEventSynchronize(stop); milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); std::cout << "thrust method: " << milliseconds << "ms" << std::endl; } } }
4eefab2fe39e275eaa9ea9a04c4ecb376bf1d3a3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "forcing.h" #definehipLaunchKernelGGL(( GSINGLE) , dim3(1), dim3(1) , 0, 0, void generate_random_numbersfloat *random_real, float *random_imag, float forcing_amp_, float dt); /* The following knobs in forcing_knobs need to be set to turn on forcing: forcing_init = T forcing_amp = 1 forcing_type = "Kz" or "KzImpulse" */ // Set forcing_type to "Kz" KzForcing::KzForcing(Parameters *pars) : pars_(pars) { forcing_amp_ = pars_->forcing_amp; printf("forcing_amp = %f \n", forcing_amp_); } KzForcing::~KzForcing() { } // Langevin forcing done at the end of each timestep in timestepper's advance method void KzForcing::stir(MomentsG *G) { float random_real, random_imag; generate_random_numbers (&random_real, &random_imag, forcing_amp_, pars_->dt); rf.x = random_real; // rf.y = random_imag; rf.y = 0.; switch (pars_->stirf) { case stirs::density : stirring_kernel GSINGLE (rf, G->dens_ptr[0], pars_->forcing_index); break; case stirs::upar : stirring_kernel GSINGLE (rf, G->upar_ptr[0], pars_->forcing_index); break; case stirs::tpar : stirring_kernel GSINGLE (rf*sqrt(2.0), G->tpar_ptr[0], pars_->forcing_index); break; case stirs::tperp : stirring_kernel GSINGLE (rf, G->tprp_ptr[0], pars_->forcing_index); break; case stirs::qpar : stirring_kernel GSINGLE (rf*sqrt(6.0), G->qpar_ptr[0], pars_->forcing_index); break; case stirs::qperp : stirring_kernel GSINGLE (rf*sqrt(2.0), G->qprp_ptr[0], pars_->forcing_index); break; case stirs::ppar : stirring_kernel GSINGLE (rf, G->dens_ptr[0], pars_->forcing_index); stirring_kernel GSINGLE (rf*sqrt(2.0), G->tpar_ptr[0], pars_->forcing_index); break; case stirs::pperp : stirring_kernel GSINGLE (rf, G->dens_ptr[0], pars_->forcing_index); stirring_kernel GSINGLE (rf, G->tprp_ptr[0], pars_->forcing_index); break; } } genForcing::genForcing(Parameters *pars) : pars_(pars) { forcing_amp_ = pars_->forcing_amp; } genForcing::~genForcing() { } void genForcing::stir(MomentsG *G) { float random_real, random_imag; generate_random_numbers (&random_real, &random_imag, forcing_amp_, pars_->dt); rf.x = random_real; rf.y = random_imag; switch (pars_->stirf) { case stirs::density : stirring_kernel GSINGLE (rf, G->dens_ptr[0], pars_->forcing_index); break; case stirs::upar : stirring_kernel GSINGLE (rf, G->upar_ptr[0], pars_->forcing_index); break; case stirs::tpar : stirring_kernel GSINGLE (rf*sqrt(2.0), G->tpar_ptr[0], pars_->forcing_index); break; case stirs::tperp : stirring_kernel GSINGLE (rf, G->tprp_ptr[0], pars_->forcing_index); break; case stirs::qpar : stirring_kernel GSINGLE (rf*sqrt(6.0), G->qpar_ptr[0], pars_->forcing_index); break; case stirs::qperp : stirring_kernel GSINGLE (rf*sqrt(2.0), G->qprp_ptr[0], pars_->forcing_index); break; case stirs::ppar : stirring_kernel GSINGLE (rf, G->dens_ptr[0], pars_->forcing_index); stirring_kernel GSINGLE (rf*sqrt(2.0), G->tpar_ptr[0], pars_->forcing_index); break; case stirs::pperp : stirring_kernel GSINGLE (rf, G->dens_ptr[0], pars_->forcing_index); stirring_kernel GSINGLE (rf, G->tprp_ptr[0], pars_->forcing_index); break; } } // Set forcing_type to "KzImpulse" KzForcingImpulse::KzForcingImpulse(Parameters *pars) : KzForcing(pars) { stirring_done = false; } // Langevin forcing done once at the first timestep void KzForcingImpulse::stir(MomentsG *G) { if(stirring_done) { return; } float random_real, random_imag; generate_random_numbers(&random_real, &random_imag, forcing_amp_, pars_->dt); rf.x = random_real; rf.y = random_imag; switch (pars_->stirf) { case stirs::density : stirring_kernel GSINGLE (rf, G->dens_ptr[0], pars_->forcing_index); break; case stirs::upar : stirring_kernel GSINGLE (rf, G->upar_ptr[0], pars_->forcing_index); break; case stirs::tpar : stirring_kernel GSINGLE (rf*sqrt(2.0), G->tpar_ptr[0], pars_->forcing_index); break; case stirs::tperp : stirring_kernel GSINGLE (rf, G->tprp_ptr[0], pars_->forcing_index); break; case stirs::qpar : stirring_kernel GSINGLE (rf*sqrt(6.0), G->qpar_ptr[0], pars_->forcing_index); break; case stirs::qperp : stirring_kernel GSINGLE (rf*sqrt(2.0), G->qprp_ptr[0], pars_->forcing_index); break; case stirs::ppar : stirring_kernel GSINGLE (rf, G->dens_ptr[0], pars_->forcing_index); stirring_kernel GSINGLE (rf*sqrt(2.0), G->tpar_ptr[0], pars_->forcing_index); break; case stirs::pperp : stirring_kernel GSINGLE (rf, G->dens_ptr[0], pars_->forcing_index); stirring_kernel GSINGLE (rf, G->tprp_ptr[0], pars_->forcing_index); break; } stirring_done = true; } void generate_random_numbers(float *random_real, float *random_imag, float forcing_amp_, float dt) { // Box-Muller transform to generate random normal variables float ran_amp = ( (float) rand()) / ((float) RAND_MAX + 1.0 ); // dt term in timestepper scheme accounted for in amp float amp = sqrt(abs(forcing_amp_*dt*log(ran_amp))); float phase = M_PI*(2.0*( (float) rand()) / ((float) RAND_MAX + 1.0 ) -1.0); *random_real = amp*cos(phase); *random_imag = amp*sin(phase); }
4eefab2fe39e275eaa9ea9a04c4ecb376bf1d3a3.cu
#include "forcing.h" #define GSINGLE <<< 1, 1 >>> void generate_random_numbers(float *random_real, float *random_imag, float forcing_amp_, float dt); /* The following knobs in forcing_knobs need to be set to turn on forcing: forcing_init = T forcing_amp = 1 forcing_type = "Kz" or "KzImpulse" */ // Set forcing_type to "Kz" KzForcing::KzForcing(Parameters *pars) : pars_(pars) { forcing_amp_ = pars_->forcing_amp; printf("forcing_amp = %f \n", forcing_amp_); } KzForcing::~KzForcing() { } // Langevin forcing done at the end of each timestep in timestepper's advance method void KzForcing::stir(MomentsG *G) { float random_real, random_imag; generate_random_numbers (&random_real, &random_imag, forcing_amp_, pars_->dt); rf.x = random_real; // rf.y = random_imag; rf.y = 0.; switch (pars_->stirf) { case stirs::density : stirring_kernel GSINGLE (rf, G->dens_ptr[0], pars_->forcing_index); break; case stirs::upar : stirring_kernel GSINGLE (rf, G->upar_ptr[0], pars_->forcing_index); break; case stirs::tpar : stirring_kernel GSINGLE (rf*sqrt(2.0), G->tpar_ptr[0], pars_->forcing_index); break; case stirs::tperp : stirring_kernel GSINGLE (rf, G->tprp_ptr[0], pars_->forcing_index); break; case stirs::qpar : stirring_kernel GSINGLE (rf*sqrt(6.0), G->qpar_ptr[0], pars_->forcing_index); break; case stirs::qperp : stirring_kernel GSINGLE (rf*sqrt(2.0), G->qprp_ptr[0], pars_->forcing_index); break; case stirs::ppar : stirring_kernel GSINGLE (rf, G->dens_ptr[0], pars_->forcing_index); stirring_kernel GSINGLE (rf*sqrt(2.0), G->tpar_ptr[0], pars_->forcing_index); break; case stirs::pperp : stirring_kernel GSINGLE (rf, G->dens_ptr[0], pars_->forcing_index); stirring_kernel GSINGLE (rf, G->tprp_ptr[0], pars_->forcing_index); break; } } genForcing::genForcing(Parameters *pars) : pars_(pars) { forcing_amp_ = pars_->forcing_amp; } genForcing::~genForcing() { } void genForcing::stir(MomentsG *G) { float random_real, random_imag; generate_random_numbers (&random_real, &random_imag, forcing_amp_, pars_->dt); rf.x = random_real; rf.y = random_imag; switch (pars_->stirf) { case stirs::density : stirring_kernel GSINGLE (rf, G->dens_ptr[0], pars_->forcing_index); break; case stirs::upar : stirring_kernel GSINGLE (rf, G->upar_ptr[0], pars_->forcing_index); break; case stirs::tpar : stirring_kernel GSINGLE (rf*sqrt(2.0), G->tpar_ptr[0], pars_->forcing_index); break; case stirs::tperp : stirring_kernel GSINGLE (rf, G->tprp_ptr[0], pars_->forcing_index); break; case stirs::qpar : stirring_kernel GSINGLE (rf*sqrt(6.0), G->qpar_ptr[0], pars_->forcing_index); break; case stirs::qperp : stirring_kernel GSINGLE (rf*sqrt(2.0), G->qprp_ptr[0], pars_->forcing_index); break; case stirs::ppar : stirring_kernel GSINGLE (rf, G->dens_ptr[0], pars_->forcing_index); stirring_kernel GSINGLE (rf*sqrt(2.0), G->tpar_ptr[0], pars_->forcing_index); break; case stirs::pperp : stirring_kernel GSINGLE (rf, G->dens_ptr[0], pars_->forcing_index); stirring_kernel GSINGLE (rf, G->tprp_ptr[0], pars_->forcing_index); break; } } // Set forcing_type to "KzImpulse" KzForcingImpulse::KzForcingImpulse(Parameters *pars) : KzForcing(pars) { stirring_done = false; } // Langevin forcing done once at the first timestep void KzForcingImpulse::stir(MomentsG *G) { if(stirring_done) { return; } float random_real, random_imag; generate_random_numbers(&random_real, &random_imag, forcing_amp_, pars_->dt); rf.x = random_real; rf.y = random_imag; switch (pars_->stirf) { case stirs::density : stirring_kernel GSINGLE (rf, G->dens_ptr[0], pars_->forcing_index); break; case stirs::upar : stirring_kernel GSINGLE (rf, G->upar_ptr[0], pars_->forcing_index); break; case stirs::tpar : stirring_kernel GSINGLE (rf*sqrt(2.0), G->tpar_ptr[0], pars_->forcing_index); break; case stirs::tperp : stirring_kernel GSINGLE (rf, G->tprp_ptr[0], pars_->forcing_index); break; case stirs::qpar : stirring_kernel GSINGLE (rf*sqrt(6.0), G->qpar_ptr[0], pars_->forcing_index); break; case stirs::qperp : stirring_kernel GSINGLE (rf*sqrt(2.0), G->qprp_ptr[0], pars_->forcing_index); break; case stirs::ppar : stirring_kernel GSINGLE (rf, G->dens_ptr[0], pars_->forcing_index); stirring_kernel GSINGLE (rf*sqrt(2.0), G->tpar_ptr[0], pars_->forcing_index); break; case stirs::pperp : stirring_kernel GSINGLE (rf, G->dens_ptr[0], pars_->forcing_index); stirring_kernel GSINGLE (rf, G->tprp_ptr[0], pars_->forcing_index); break; } stirring_done = true; } void generate_random_numbers(float *random_real, float *random_imag, float forcing_amp_, float dt) { // Box-Muller transform to generate random normal variables float ran_amp = ( (float) rand()) / ((float) RAND_MAX + 1.0 ); // dt term in timestepper scheme accounted for in amp float amp = sqrt(abs(forcing_amp_*dt*log(ran_amp))); float phase = M_PI*(2.0*( (float) rand()) / ((float) RAND_MAX + 1.0 ) -1.0); *random_real = amp*cos(phase); *random_imag = amp*sin(phase); }
0187d740c0d2f252ac1bc3d32816b97182b58543.hip
// !!! This is a file automatically generated by hipify!!! #include "light_transport_common.cuh" namespace VLR { // Context-scope Variables rtDeclareVariable(optix::uint2, pv_imageSize, , ); rtDeclareVariable(uint32_t, pv_numAccumFrames, , ); rtDeclareVariable(ProgSigSampleLensPosition, pv_progSampleLensPosition, , ); rtDeclareVariable(ProgSigSampleIDF, pv_progSampleIDF, , ); rtBuffer<KernelRNG, 2> pv_rngBuffer; rtBuffer<SpectrumStorage, 2> pv_outputBuffer; // Common Closest Hit Program for All Primitive Types and Materials RT_PROGRAM void pathTracingIteration() { KernelRNG &rng = sm_payload.rng; WavelengthSamples &wls = sm_payload.wls; SurfacePoint surfPt; float hypAreaPDF; calcSurfacePoint(&surfPt, &hypAreaPDF); const SurfaceMaterialDescriptor matDesc = pv_materialDescriptorBuffer[pv_materialIndex]; BSDF bsdf(matDesc, surfPt, wls); EDF edf(matDesc, surfPt, wls); Vector3D dirOutLocal = surfPt.shadingFrame.toLocal(-asVector3D(sm_ray.direction)); // implicit light sampling SampledSpectrum spEmittance = edf.evaluateEmittance(); if (spEmittance.hasNonZero()) { SampledSpectrum Le = spEmittance * edf.evaluate(EDFQuery(), dirOutLocal); float MISWeight = 1.0f; if (!sm_payload.prevSampledType.isDelta() && sm_ray.ray_type != RayType::Primary) { float bsdfPDF = sm_payload.prevDirPDF; float dist2 = surfPt.calcSquaredDistance(asPoint3D(sm_ray.origin)); float lightPDF = pv_importance / getSumLightImportances() * hypAreaPDF * dist2 / ::fabs(dirOutLocal.z); MISWeight = (bsdfPDF * bsdfPDF) / (lightPDF * lightPDF + bsdfPDF * bsdfPDF); } sm_payload.contribution += sm_payload.alpha * Le * MISWeight; } if (surfPt.atInfinity || sm_payload.maxLengthTerminate) return; // Russian roulette float continueProb = std::fmin(sm_payload.alpha.importance(wls.selectedLambdaIndex()) / sm_payload.initImportance, 1.0f); if (rng.getFloat0cTo1o() >= continueProb) return; sm_payload.alpha /= continueProb; Normal3D geomNormalLocal = surfPt.shadingFrame.toLocal(surfPt.geometricNormal); BSDFQuery fsQuery(dirOutLocal, geomNormalLocal, DirectionType::All(), wls); // Next Event Estimation (explicit light sampling) if (bsdf.hasNonDelta()) { SurfaceLight light; float lightProb; float uPrim; selectSurfaceLight(rng.getFloat0cTo1o(), &light, &lightProb, &uPrim); SurfaceLightPosSample lpSample(uPrim, rng.getFloat0cTo1o(), rng.getFloat0cTo1o()); SurfaceLightPosQueryResult lpResult; light.sample(lpSample, &lpResult); const SurfaceMaterialDescriptor lightMatDesc = pv_materialDescriptorBuffer[lpResult.materialIndex]; EDF ledf(lightMatDesc, lpResult.surfPt, wls); SampledSpectrum M = ledf.evaluateEmittance(); Vector3D shadowRayDir; float squaredDistance; float fractionalVisibility; if (M.hasNonZero() && testVisibility(surfPt, lpResult.surfPt, &shadowRayDir, &squaredDistance, &fractionalVisibility)) { Vector3D shadowRayDir_l = lpResult.surfPt.toLocal(-shadowRayDir); Vector3D shadowRayDir_sn = surfPt.toLocal(shadowRayDir); SampledSpectrum Le = M * ledf.evaluate(EDFQuery(), shadowRayDir_l); float lightPDF = lightProb * lpResult.areaPDF; SampledSpectrum fs = bsdf.evaluate(fsQuery, shadowRayDir_sn); float cosLight = lpResult.surfPt.calcCosTerm(-shadowRayDir); float bsdfPDF = bsdf.evaluatePDF(fsQuery, shadowRayDir_sn) * cosLight / squaredDistance; float MISWeight = 1.0f; if (!lpResult.posType.isDelta() && !std::isinf(lightPDF)) MISWeight = (lightPDF * lightPDF) / (lightPDF * lightPDF + bsdfPDF * bsdfPDF); float G = fractionalVisibility * absDot(shadowRayDir_sn, geomNormalLocal) * cosLight / squaredDistance; float scalarCoeff = G * MISWeight / lightPDF; // contributionCUDA sm_payload.contribution += sm_payload.alpha * Le * fs * scalarCoeff; } } BSDFSample sample(rng.getFloat0cTo1o(), rng.getFloat0cTo1o(), rng.getFloat0cTo1o()); BSDFQueryResult fsResult; SampledSpectrum fs = bsdf.sample(fsQuery, sample, &fsResult); if (fs == SampledSpectrum::Zero() || fsResult.dirPDF == 0.0f) return; if (fsResult.sampledType.isDispersive() && !wls.singleIsSelected()) { fsResult.dirPDF /= SampledSpectrum::NumComponents(); wls.setSingleIsSelected(); } float cosFactor = dot(fsResult.dirLocal, geomNormalLocal); sm_payload.alpha *= fs * (::fabs(cosFactor) / fsResult.dirPDF); Vector3D dirIn = surfPt.fromLocal(fsResult.dirLocal); sm_payload.origin = offsetRayOrigin(surfPt.position, cosFactor > 0.0f ? surfPt.geometricNormal : -surfPt.geometricNormal); sm_payload.direction = dirIn; sm_payload.prevDirPDF = fsResult.dirPDF; sm_payload.prevSampledType = fsResult.sampledType; sm_payload.terminate = false; } // JP: Intersection/Bounding Box ProgramClosest Hit Program // OptiXBVHLBVHAABB // Miss Program RT_PROGRAM void pathTracingMiss() { if (pv_envLightDescriptor.importance == 0) return; Vector3D direction = asVector3D(sm_ray.direction); float phi, theta; direction.toPolarYUp(&theta, &phi); float sinPhi, cosPhi; VLR::sincos(phi, &sinPhi, &cosPhi); Vector3D texCoord0Dir = normalize(Vector3D(-cosPhi, 0.0f, -sinPhi)); ReferenceFrame shadingFrame; shadingFrame.x = texCoord0Dir; shadingFrame.z = -direction; shadingFrame.y = cross(shadingFrame.z, shadingFrame.x); SurfacePoint surfPt; surfPt.position = Point3D(direction.x, direction.y, direction.z); surfPt.shadingFrame = shadingFrame; surfPt.isPoint = false; surfPt.atInfinity = true; surfPt.geometricNormal = -direction; surfPt.u = phi; surfPt.v = theta; phi += pv_envLightDescriptor.body.asEnvironmentLight.rotationPhi; phi = phi - ::floor(phi / (2 * M_PIf)) * 2 * M_PIf; surfPt.texCoord = TexCoord2D(phi / (2 * M_PIf), theta / M_PIf); float hypAreaPDF = evaluateEnvironmentAreaPDF(phi, theta); const SurfaceMaterialDescriptor matDesc = pv_materialDescriptorBuffer[pv_envLightDescriptor.body.asEnvironmentLight.materialIndex]; EDF edf(matDesc, surfPt, sm_payload.wls); Vector3D dirOutLocal = surfPt.shadingFrame.toLocal(-asVector3D(sm_ray.direction)); // implicit light sampling SampledSpectrum spEmittance = edf.evaluateEmittance(); if (spEmittance.hasNonZero()) { SampledSpectrum Le = spEmittance * edf.evaluate(EDFQuery(), dirOutLocal); float MISWeight = 1.0f; if (!sm_payload.prevSampledType.isDelta() && sm_ray.ray_type != RayType::Primary) { float bsdfPDF = sm_payload.prevDirPDF; float dist2 = surfPt.calcSquaredDistance(asPoint3D(sm_ray.origin)); float lightPDF = pv_envLightDescriptor.importance / getSumLightImportances() * hypAreaPDF * dist2 / ::fabs(dirOutLocal.z); MISWeight = (bsdfPDF * bsdfPDF) / (lightPDF * lightPDF + bsdfPDF * bsdfPDF); } sm_payload.contribution += sm_payload.alpha * Le * MISWeight; } } // Common Ray Generation Program for All Camera Types RT_PROGRAM void pathTracing() { KernelRNG rng = pv_rngBuffer[sm_launchIndex]; optix::float2 p = make_float2(sm_launchIndex.x + rng.getFloat0cTo1o(), sm_launchIndex.y + rng.getFloat0cTo1o()); float selectWLPDF; WavelengthSamples wls = WavelengthSamples::createWithEqualOffsets(rng.getFloat0cTo1o(), rng.getFloat0cTo1o(), &selectWLPDF); LensPosSample We0Sample(rng.getFloat0cTo1o(), rng.getFloat0cTo1o()); LensPosQueryResult We0Result; SampledSpectrum We0 = pv_progSampleLensPosition(wls, We0Sample, &We0Result); IDFSample We1Sample(p.x / pv_imageSize.x, p.y / pv_imageSize.y); IDFQueryResult We1Result; SampledSpectrum We1 = pv_progSampleIDF(We0Result.surfPt, wls, We1Sample, &We1Result); Vector3D rayDir = We0Result.surfPt.fromLocal(We1Result.dirLocal); SampledSpectrum alpha = (We0 * We1) * (We0Result.surfPt.calcCosTerm(rayDir) / (We0Result.areaPDF * We1Result.dirPDF * selectWLPDF)); optix::Ray ray = optix::make_Ray(asOptiXType(We0Result.surfPt.position), asOptiXType(rayDir), RayType::Primary, 0.0f, FLT_MAX); Payload payload; payload.maxLengthTerminate = false; payload.rng = rng; payload.initImportance = alpha.importance(wls.selectedLambdaIndex()); payload.wls = wls; payload.alpha = alpha; payload.contribution = SampledSpectrum::Zero(); const uint32_t MaxPathLength = 25; uint32_t pathLength = 0; while (true) { payload.terminate = true; ++pathLength; if (pathLength >= MaxPathLength) payload.maxLengthTerminate = true; rtTrace(pv_topGroup, ray, payload); if (payload.terminate) break; VLRAssert(pathLength < MaxPathLength, "Path should be terminated... Something went wrong..."); ray = optix::make_Ray(asOptiXType(payload.origin), asOptiXType(payload.direction), RayType::Scattered, 0.0f, FLT_MAX); } pv_rngBuffer[sm_launchIndex] = payload.rng; if (!payload.contribution.allFinite()) { vlrprintf("Pass %u, (%u, %u): Not a finite value.\n", pv_numAccumFrames, sm_launchIndex.x, sm_launchIndex.y); return; } if (pv_numAccumFrames == 1) pv_outputBuffer[sm_launchIndex].reset(); pv_outputBuffer[sm_launchIndex].add(wls, payload.contribution); } // Exception Program RT_PROGRAM void exception() { //uint32_t code = rtGetExceptionCode(); rtPrintExceptionDetails(); } }
0187d740c0d2f252ac1bc3d32816b97182b58543.cu
#include "light_transport_common.cuh" namespace VLR { // Context-scope Variables rtDeclareVariable(optix::uint2, pv_imageSize, , ); rtDeclareVariable(uint32_t, pv_numAccumFrames, , ); rtDeclareVariable(ProgSigSampleLensPosition, pv_progSampleLensPosition, , ); rtDeclareVariable(ProgSigSampleIDF, pv_progSampleIDF, , ); rtBuffer<KernelRNG, 2> pv_rngBuffer; rtBuffer<SpectrumStorage, 2> pv_outputBuffer; // Common Closest Hit Program for All Primitive Types and Materials RT_PROGRAM void pathTracingIteration() { KernelRNG &rng = sm_payload.rng; WavelengthSamples &wls = sm_payload.wls; SurfacePoint surfPt; float hypAreaPDF; calcSurfacePoint(&surfPt, &hypAreaPDF); const SurfaceMaterialDescriptor matDesc = pv_materialDescriptorBuffer[pv_materialIndex]; BSDF bsdf(matDesc, surfPt, wls); EDF edf(matDesc, surfPt, wls); Vector3D dirOutLocal = surfPt.shadingFrame.toLocal(-asVector3D(sm_ray.direction)); // implicit light sampling SampledSpectrum spEmittance = edf.evaluateEmittance(); if (spEmittance.hasNonZero()) { SampledSpectrum Le = spEmittance * edf.evaluate(EDFQuery(), dirOutLocal); float MISWeight = 1.0f; if (!sm_payload.prevSampledType.isDelta() && sm_ray.ray_type != RayType::Primary) { float bsdfPDF = sm_payload.prevDirPDF; float dist2 = surfPt.calcSquaredDistance(asPoint3D(sm_ray.origin)); float lightPDF = pv_importance / getSumLightImportances() * hypAreaPDF * dist2 / std::fabs(dirOutLocal.z); MISWeight = (bsdfPDF * bsdfPDF) / (lightPDF * lightPDF + bsdfPDF * bsdfPDF); } sm_payload.contribution += sm_payload.alpha * Le * MISWeight; } if (surfPt.atInfinity || sm_payload.maxLengthTerminate) return; // Russian roulette float continueProb = std::fmin(sm_payload.alpha.importance(wls.selectedLambdaIndex()) / sm_payload.initImportance, 1.0f); if (rng.getFloat0cTo1o() >= continueProb) return; sm_payload.alpha /= continueProb; Normal3D geomNormalLocal = surfPt.shadingFrame.toLocal(surfPt.geometricNormal); BSDFQuery fsQuery(dirOutLocal, geomNormalLocal, DirectionType::All(), wls); // Next Event Estimation (explicit light sampling) if (bsdf.hasNonDelta()) { SurfaceLight light; float lightProb; float uPrim; selectSurfaceLight(rng.getFloat0cTo1o(), &light, &lightProb, &uPrim); SurfaceLightPosSample lpSample(uPrim, rng.getFloat0cTo1o(), rng.getFloat0cTo1o()); SurfaceLightPosQueryResult lpResult; light.sample(lpSample, &lpResult); const SurfaceMaterialDescriptor lightMatDesc = pv_materialDescriptorBuffer[lpResult.materialIndex]; EDF ledf(lightMatDesc, lpResult.surfPt, wls); SampledSpectrum M = ledf.evaluateEmittance(); Vector3D shadowRayDir; float squaredDistance; float fractionalVisibility; if (M.hasNonZero() && testVisibility(surfPt, lpResult.surfPt, &shadowRayDir, &squaredDistance, &fractionalVisibility)) { Vector3D shadowRayDir_l = lpResult.surfPt.toLocal(-shadowRayDir); Vector3D shadowRayDir_sn = surfPt.toLocal(shadowRayDir); SampledSpectrum Le = M * ledf.evaluate(EDFQuery(), shadowRayDir_l); float lightPDF = lightProb * lpResult.areaPDF; SampledSpectrum fs = bsdf.evaluate(fsQuery, shadowRayDir_sn); float cosLight = lpResult.surfPt.calcCosTerm(-shadowRayDir); float bsdfPDF = bsdf.evaluatePDF(fsQuery, shadowRayDir_sn) * cosLight / squaredDistance; float MISWeight = 1.0f; if (!lpResult.posType.isDelta() && !std::isinf(lightPDF)) MISWeight = (lightPDF * lightPDF) / (lightPDF * lightPDF + bsdfPDF * bsdfPDF); float G = fractionalVisibility * absDot(shadowRayDir_sn, geomNormalLocal) * cosLight / squaredDistance; float scalarCoeff = G * MISWeight / lightPDF; // 直接contributionの計算式に入れるとCUDAのバグなのかおかしな結果になる。 sm_payload.contribution += sm_payload.alpha * Le * fs * scalarCoeff; } } BSDFSample sample(rng.getFloat0cTo1o(), rng.getFloat0cTo1o(), rng.getFloat0cTo1o()); BSDFQueryResult fsResult; SampledSpectrum fs = bsdf.sample(fsQuery, sample, &fsResult); if (fs == SampledSpectrum::Zero() || fsResult.dirPDF == 0.0f) return; if (fsResult.sampledType.isDispersive() && !wls.singleIsSelected()) { fsResult.dirPDF /= SampledSpectrum::NumComponents(); wls.setSingleIsSelected(); } float cosFactor = dot(fsResult.dirLocal, geomNormalLocal); sm_payload.alpha *= fs * (std::fabs(cosFactor) / fsResult.dirPDF); Vector3D dirIn = surfPt.fromLocal(fsResult.dirLocal); sm_payload.origin = offsetRayOrigin(surfPt.position, cosFactor > 0.0f ? surfPt.geometricNormal : -surfPt.geometricNormal); sm_payload.direction = dirIn; sm_payload.prevDirPDF = fsResult.dirPDF; sm_payload.prevSampledType = fsResult.sampledType; sm_payload.terminate = false; } // JP: 本当は無限大の球のIntersection/Bounding Box Programを使用して環境光に関する処理もClosest Hit Programで統一的に行いたい。 // が、OptiXのBVHビルダーがLBVHベースなので無限大のAABBを生成するのは危険。 // 仕方なくMiss Programで環境光を処理する。 RT_PROGRAM void pathTracingMiss() { if (pv_envLightDescriptor.importance == 0) return; Vector3D direction = asVector3D(sm_ray.direction); float phi, theta; direction.toPolarYUp(&theta, &phi); float sinPhi, cosPhi; VLR::sincos(phi, &sinPhi, &cosPhi); Vector3D texCoord0Dir = normalize(Vector3D(-cosPhi, 0.0f, -sinPhi)); ReferenceFrame shadingFrame; shadingFrame.x = texCoord0Dir; shadingFrame.z = -direction; shadingFrame.y = cross(shadingFrame.z, shadingFrame.x); SurfacePoint surfPt; surfPt.position = Point3D(direction.x, direction.y, direction.z); surfPt.shadingFrame = shadingFrame; surfPt.isPoint = false; surfPt.atInfinity = true; surfPt.geometricNormal = -direction; surfPt.u = phi; surfPt.v = theta; phi += pv_envLightDescriptor.body.asEnvironmentLight.rotationPhi; phi = phi - std::floor(phi / (2 * M_PIf)) * 2 * M_PIf; surfPt.texCoord = TexCoord2D(phi / (2 * M_PIf), theta / M_PIf); float hypAreaPDF = evaluateEnvironmentAreaPDF(phi, theta); const SurfaceMaterialDescriptor matDesc = pv_materialDescriptorBuffer[pv_envLightDescriptor.body.asEnvironmentLight.materialIndex]; EDF edf(matDesc, surfPt, sm_payload.wls); Vector3D dirOutLocal = surfPt.shadingFrame.toLocal(-asVector3D(sm_ray.direction)); // implicit light sampling SampledSpectrum spEmittance = edf.evaluateEmittance(); if (spEmittance.hasNonZero()) { SampledSpectrum Le = spEmittance * edf.evaluate(EDFQuery(), dirOutLocal); float MISWeight = 1.0f; if (!sm_payload.prevSampledType.isDelta() && sm_ray.ray_type != RayType::Primary) { float bsdfPDF = sm_payload.prevDirPDF; float dist2 = surfPt.calcSquaredDistance(asPoint3D(sm_ray.origin)); float lightPDF = pv_envLightDescriptor.importance / getSumLightImportances() * hypAreaPDF * dist2 / std::fabs(dirOutLocal.z); MISWeight = (bsdfPDF * bsdfPDF) / (lightPDF * lightPDF + bsdfPDF * bsdfPDF); } sm_payload.contribution += sm_payload.alpha * Le * MISWeight; } } // Common Ray Generation Program for All Camera Types RT_PROGRAM void pathTracing() { KernelRNG rng = pv_rngBuffer[sm_launchIndex]; optix::float2 p = make_float2(sm_launchIndex.x + rng.getFloat0cTo1o(), sm_launchIndex.y + rng.getFloat0cTo1o()); float selectWLPDF; WavelengthSamples wls = WavelengthSamples::createWithEqualOffsets(rng.getFloat0cTo1o(), rng.getFloat0cTo1o(), &selectWLPDF); LensPosSample We0Sample(rng.getFloat0cTo1o(), rng.getFloat0cTo1o()); LensPosQueryResult We0Result; SampledSpectrum We0 = pv_progSampleLensPosition(wls, We0Sample, &We0Result); IDFSample We1Sample(p.x / pv_imageSize.x, p.y / pv_imageSize.y); IDFQueryResult We1Result; SampledSpectrum We1 = pv_progSampleIDF(We0Result.surfPt, wls, We1Sample, &We1Result); Vector3D rayDir = We0Result.surfPt.fromLocal(We1Result.dirLocal); SampledSpectrum alpha = (We0 * We1) * (We0Result.surfPt.calcCosTerm(rayDir) / (We0Result.areaPDF * We1Result.dirPDF * selectWLPDF)); optix::Ray ray = optix::make_Ray(asOptiXType(We0Result.surfPt.position), asOptiXType(rayDir), RayType::Primary, 0.0f, FLT_MAX); Payload payload; payload.maxLengthTerminate = false; payload.rng = rng; payload.initImportance = alpha.importance(wls.selectedLambdaIndex()); payload.wls = wls; payload.alpha = alpha; payload.contribution = SampledSpectrum::Zero(); const uint32_t MaxPathLength = 25; uint32_t pathLength = 0; while (true) { payload.terminate = true; ++pathLength; if (pathLength >= MaxPathLength) payload.maxLengthTerminate = true; rtTrace(pv_topGroup, ray, payload); if (payload.terminate) break; VLRAssert(pathLength < MaxPathLength, "Path should be terminated... Something went wrong..."); ray = optix::make_Ray(asOptiXType(payload.origin), asOptiXType(payload.direction), RayType::Scattered, 0.0f, FLT_MAX); } pv_rngBuffer[sm_launchIndex] = payload.rng; if (!payload.contribution.allFinite()) { vlrprintf("Pass %u, (%u, %u): Not a finite value.\n", pv_numAccumFrames, sm_launchIndex.x, sm_launchIndex.y); return; } if (pv_numAccumFrames == 1) pv_outputBuffer[sm_launchIndex].reset(); pv_outputBuffer[sm_launchIndex].add(wls, payload.contribution); } // Exception Program RT_PROGRAM void exception() { //uint32_t code = rtGetExceptionCode(); rtPrintExceptionDetails(); } }
65d031ef164c3a0d9467127642c696bbe34e2efd.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #if GOOGLE_CUDA #include "ew_op_gpu.h" #include <stdio.h> template <typename T, typename V> __global__ void __launch_bounds__(32) LSTM_Forward( T* C_next, T* H_next, const T* __restrict__ C_prev, const T* __restrict__ H_prev, float forget_bias, int K, int K4) { int tid = threadIdx.x; int k = blockIdx.x; int n = blockIdx.y; int k4 = k*32 + tid; int x0 = n*K + k4; int x1 = x0 + K4; int x2 = x1 + K4; int x3 = x2 + K4; int z = n*K4 + k4; bool b = k4 < K4; // Match tf.nn.rnn_cell.BasicLSTMCell layout V c = load(C_prev, z, b); V i = load(H_prev, x0, b); V u = load(H_prev, x1, b); V f = load(H_prev, x2, b); V o = load(H_prev, x3, b); V sig_i = ew_sig(i); V tan_u = ew_tanh(u); V sig_f = ew_sig(ew_add(f, forget_bias)); V sig_o = ew_sig(o); V c_nxt = ew_add(ew_mul(sig_f, c), ew_mul(sig_i, tan_u)); V c_act = ew_tanh(c_nxt); V h_nxt = ew_mul(sig_o, c_act); store(C_next, c_nxt, z, b); store(H_next, h_nxt, z, b); } template <typename T, typename V> __global__ void __launch_bounds__(32) LSTM_Bias_Forward( T* C_next, T* H_next, const T* __restrict__ C_prev, const T* __restrict__ H_prev, const V* __restrict__ Bias, float forget_bias, int K, int K4) { int tid = threadIdx.x; int k = blockIdx.x; int n = blockIdx.y; int k4 = k*32 + tid; int x0 = n*K + k4; int x1 = x0 + K4; int x2 = x1 + K4; int x3 = x2 + K4; int z = n*K4 + k4; bool b = k4 < K4; // Match tf.nn.rnn_cell.BasicLSTMCell layout V ib = load(Bias, k4, b); V ub = load(Bias, k4 + K4, b); V fb = load(Bias, k4 + K4*2, b); V ob = load(Bias, k4 + K4*2 + K4, b); V c = load(C_prev, z, b); V i = load(H_prev, x0, b); V u = load(H_prev, x1, b); V f = load(H_prev, x2, b); V o = load(H_prev, x3, b); V sig_i = ew_sig(ew_add(i, ib)); V sig_f = ew_sig(ew_add(ew_add(f, fb), forget_bias)); V sig_o = ew_sig(ew_add(o, ob)); V tan_u = ew_tanh(ew_add(u, ub)); V c_nxt = ew_add(ew_mul(sig_f, c), ew_mul(sig_i, tan_u)); V c_act = ew_tanh(c_nxt); V h_nxt = ew_mul(sig_o, c_act); store(C_next, c_nxt, z, b); store(H_next, h_nxt, z, b); } template <typename T, typename V> __global__ void __launch_bounds__(32) LSTM4_Forward( T* C_next, T* H_next, const T* __restrict__ C, const T* __restrict__ I, const T* __restrict__ F, const T* __restrict__ O, const T* __restrict__ U, float forget_bias, int size) { int tid = threadIdx.x; int x = blockIdx.x*32 + tid; bool b = x < size; V c = load(C, x, b); V i = load(I, x, b); V f = load(F, x, b); V o = load(O, x, b); V u = load(U, x, b); V sig_i = ew_sig(i); V sig_f = ew_sig(ew_add(f, forget_bias)); V sig_o = ew_sig(o); V tan_u = ew_tanh(u); V c_nxt = ew_add(ew_mul(sig_f, c), ew_mul(sig_i, tan_u)); V c_act = ew_tanh(c_nxt); V h_nxt = ew_mul(sig_o, c_act); store(C_next, c_nxt, x, b); store(H_next, h_nxt, x, b); } template <typename B, typename F, typename V> __global__ void __launch_bounds__(32) LSTM_Backward( B* DC, B* DH, const B* __restrict__ EC, const B* __restrict__ EH, const F* __restrict__ C_prev, const F* __restrict__ H_prev, int K, int K4, int ec_valid, float forget_bias) { int tid = threadIdx.x; int k = blockIdx.x; int n = blockIdx.y; int k4 = k*32 + tid; int x0 = n*K + k4; int x1 = x0 + K4; int x2 = x1 + K4; int x3 = x2 + K4; int z = n*K4 + k4; bool b = k4 < K4; V i = load(H_prev, x0, b); V u = load(H_prev, x1, b); V f = load(H_prev, x2, b); V o = load(H_prev, x3, b); V c = load(C_prev, z, b); V eh = load(EH, z, b); V ec = load(EC, z, b && ec_valid); V sig_i = ew_sig(i); V sig_f = ew_sig(ew_add(f, forget_bias)); V sig_o = ew_sig(o); V tan_u = ew_tanh(u); V c_nxt = ew_add(ew_mul(sig_f, c), ew_mul(sig_i, tan_u)); V c_act = ew_tanh(c_nxt); V dC = ew_add(ew_tanh_grad(ew_mul(eh, sig_o), c_act), ec); V dI = ew_sig_grad(ew_mul(dC, tan_u), sig_i); V dF = ew_sig_grad(ew_mul(dC, c), sig_f); V dO = ew_sig_grad(ew_mul( eh, c_act), sig_o); V dU = ew_tanh_grad(ew_mul(dC, sig_i), tan_u); dC = ew_mul(dC, sig_f); store(DC, dC, z, b); store(DH, dI, x0, b); store(DH, dU, x1, b); store(DH, dF, x2, b); store(DH, dO, x3, b); } template <typename B, typename F, typename V> __global__ void __launch_bounds__(32) LSTM_Bias_Backward( B* DC, B* DH, const B* __restrict__ EC, const B* __restrict__ EH, const F* __restrict__ C_prev, const F* __restrict__ H_prev, const V* __restrict__ Bias, int K, int K4, int ec_valid, float forget_bias) { int tid = threadIdx.x; int k = blockIdx.x; int n = blockIdx.y; int k4 = k*32 + tid; int x0 = n*K + k4; int x1 = x0 + K4; int x2 = x1 + K4; int x3 = x2 + K4; int z = n*K4 + k4; bool b = k4 < K4; // Match tf.nn.rnn_cell.BasicLSTMCell layout V ib = load(Bias, k4, b); V ub = load(Bias, k4 + K4, b); V fb = load(Bias, k4 + K4*2, b); V ob = load(Bias, k4 + K4*2 + K4, b); V i = load(H_prev, x0, b); V u = load(H_prev, x1, b); V f = load(H_prev, x2, b); V o = load(H_prev, x3, b); V c = load(C_prev, z, b); V eh = load(EH, z, b); V ec = load(EC, z, b && ec_valid); V sig_i = ew_sig(ew_add(i, ib)); V sig_f = ew_sig(ew_add(ew_add(f, fb), forget_bias)); V sig_o = ew_sig(ew_add(o, ob)); V tan_u = ew_tanh(ew_add(u, ub)); V c_nxt = ew_add(ew_mul(sig_f, c), ew_mul(sig_i, tan_u)); V c_act = ew_tanh(c_nxt); V dC = ew_add(ew_tanh_grad(ew_mul(eh, sig_o), c_act), ec); V dI = ew_sig_grad(ew_mul(dC, tan_u), sig_i); V dF = ew_sig_grad(ew_mul(dC, c), sig_f); V dO = ew_sig_grad(ew_mul( eh, c_act), sig_o); V dU = ew_tanh_grad(ew_mul(dC, sig_i), tan_u); dC = ew_mul(dC, sig_f); store(DC, dC, z, b); store(DH, dI, x0, b); store(DH, dU, x1, b); store(DH, dF, x2, b); store(DH, dO, x3, b); } template <typename B, typename A, typename V> __global__ void __launch_bounds__(32) LSTM4_Backward( B* DC, B* DI, B* DF, B* DO, B* DU, const B* __restrict__ EC, const B* __restrict__ EH, const A* __restrict__ C, const A* __restrict__ I, const A* __restrict__ F, const A* __restrict__ O, const A* __restrict__ U, int size, int ec_valid, float forget_bias) { int tid = threadIdx.x; int x = blockIdx.x*32 + tid; bool b = x < size; V c = load(C, x, b); V i = load(I, x, b); V f = load(F, x, b); V o = load(O, x, b); V u = load(U, x, b); V eh = load(EH, x, b); V ec = load(EC, x, b && ec_valid); V sig_i = ew_sig(i); V sig_f = ew_sig(ew_add(f, forget_bias)); V sig_o = ew_sig(o); V tan_u = ew_tanh(u); V c_nxt = ew_add(ew_mul(sig_f, c), ew_mul(sig_i, tan_u)); V c_act = ew_tanh(c_nxt); V dC = ew_add(ew_tanh_grad(ew_mul(eh, sig_o), c_act), ec); V dI = ew_sig_grad(ew_mul(dC, tan_u), sig_i); V dF = ew_sig_grad(ew_mul(dC, c), sig_f); V dO = ew_sig_grad(ew_mul( eh, c_act), sig_o); V dU = ew_tanh_grad(ew_mul(dC, sig_i), tan_u); dC = ew_mul(dC, sig_f); store(DC, dC, x, b); store(DI, dI, x, b); store(DF, dF, x, b); store(DO, dO, x, b); store(DU, dU, x, b); } template <typename T, typename V> bool LSTM_Gates_Forward(hipStream_t stream, T* c_next, T* h_next, const T* c_prev, const T* h_prev, const float* bias, float forget_bias, int N, int K) { int K4 = K >> 2; if ((K4 & 3) == 0) { K >>= 2; // use vector loads K4 >>= 2; dim3 grid((K4 >> 5) + ((K4 & 31) != 0), N); V* C_next = (V*)c_next; V* H_next = (V*)h_next; const V* C_prev = (const V*)c_prev; const V* H_prev = (const V*)h_prev; const float4* Bias = (const float4*)bias; if (bias == NULL) hipLaunchKernelGGL(( LSTM_Forward<V,float4>), dim3(grid),dim3(32),0,stream, C_next, H_next, C_prev, H_prev, forget_bias, K, K4); else hipLaunchKernelGGL(( LSTM_Bias_Forward<V,float4>), dim3(grid),dim3(32),0,stream, C_next, H_next, C_prev, H_prev, Bias, forget_bias, K, K4); } else { dim3 grid((K4 >> 5) + ((K4 & 31) != 0), N); if (bias == NULL) hipLaunchKernelGGL(( LSTM_Forward<T,float>), dim3(grid),dim3(32),0,stream, c_next, h_next, c_prev, h_prev, forget_bias, K, K4); else hipLaunchKernelGGL(( LSTM_Bias_Forward<T,float>), dim3(grid),dim3(32),0,stream, c_next, h_next, c_prev, h_prev, bias, forget_bias, K, K4); } return true; } template <typename T, typename V> bool LSTM4_Gates_Forward(hipStream_t stream, T* c_next, T* h_next, const T* c, const T* i, const T* f, const T* o, const T* u, float forget_bias, int N, int K) { int size = N * K; if ((size & 3) == 0) { size >>= 2; // use vector loads int grid = (size >> 5) + ((size & 31) != 0); V* C_next = (V*)c_next; V* H_next = (V*)h_next; const V* C = (const V*)c; const V* I = (const V*)i; const V* F = (const V*)f; const V* O = (const V*)o; const V* U = (const V*)u; hipLaunchKernelGGL(( LSTM4_Forward<V,float4>), dim3(grid),dim3(32),0,stream, C_next, H_next, C, I, F, O, U, forget_bias, size); } else { int grid = (size >> 5) + ((size & 31) != 0); hipLaunchKernelGGL(( LSTM4_Forward<T,float >), dim3(grid),dim3(32),0,stream, c_next, h_next, c, i, f, o, u, forget_bias, size); } return true; } template <typename B, typename F, typename VB, typename VF> bool LSTM_Gates_Backward(hipStream_t stream, B* dc, B* dh, const B* ec, const B* eh, const F* c_prev, const F* h_prev, const float* bias, int N, int K, float forget_bias) { int K4 = K >> 2; if ((K4 & 3) == 0) { K >>= 2; // use vector loads K4 >>= 2; dim3 grid((K4 >> 5) + ((K4 & 31) != 0), N); VB* DC = ( VB*)dc; VB* DH = ( VB*)dh; const VB* EC = (const VB*)ec; const VB* EH = (const VB*)eh; const VF* C_prev = (const VF*)c_prev; const VF* H_prev = (const VF*)h_prev; const float4* Bias = (const float4*)bias; if (bias == NULL) hipLaunchKernelGGL(( LSTM_Backward<VB,VF,float4>), dim3(grid),dim3(32),0,stream, DC, DH, EC, EH, C_prev, H_prev, K, K4, ec != 0, forget_bias); else hipLaunchKernelGGL(( LSTM_Bias_Backward<VB,VF,float4>), dim3(grid),dim3(32),0,stream, DC, DH, EC, EH, C_prev, H_prev, Bias, K, K4, ec != 0, forget_bias); } else { dim3 grid((K4 >> 5) + ((K4 & 31) != 0), N); if (bias == NULL) hipLaunchKernelGGL(( LSTM_Backward< B, F,float >), dim3(grid),dim3(32),0,stream, dc, dh, ec, eh, c_prev, h_prev, K, K4, ec != 0, forget_bias); else hipLaunchKernelGGL(( LSTM_Bias_Backward< B, F,float >), dim3(grid),dim3(32),0,stream, dc, dh, ec, eh, c_prev, h_prev, bias, K, K4, ec != 0, forget_bias); } return true; } template <typename B, typename A, typename VB, typename VA> bool LSTM4_Gates_Backward(hipStream_t stream, B* dc, B* di, B* df, B* doo, B* du, const B* ec, const B* eh, const A* c, const A* i, const A* f, const A* o, const A* u, int N, int K, float forget_bias) { int size = N * K; if ((size & 3) == 0) { size >>= 2; // use vector loads int grid = (size >> 5) + ((size & 31) != 0); VB* DC = ( VB*)dc; VB* DI = ( VB*)di; VB* DF = ( VB*)df; VB* DO = ( VB*)doo; VB* DU = ( VB*)du; const VB* EC = (const VB*)ec; const VB* EH = (const VB*)eh; const VA* C = (const VA*)c; const VA* I = (const VA*)i; const VA* F = (const VA*)f; const VA* O = (const VA*)o; const VA* U = (const VA*)u; hipLaunchKernelGGL(( LSTM4_Backward<VB,VA,float4>), dim3(grid),dim3(32),0,stream, DC, DI, DF, DO, DU, EC, EH, C, I, F, O, U, size, ec != 0, forget_bias); } else { int grid = (size >> 5) + ((size & 31) != 0); hipLaunchKernelGGL(( LSTM4_Backward< B, A,float >), dim3(grid),dim3(32),0,stream, dc, di, df, doo, du, ec, eh, c, i, f, o, u, size, ec != 0, forget_bias); } return true; } template bool LSTM_Gates_Forward <float,float4>(hipStream_t stream, float* c_next, float* h_next, const float* c_prev, const float* h_prev, const float* bias, float forget_bias, int N, int K); template bool LSTM_Gates_Forward <ehalf,ehalf4>(hipStream_t stream, ehalf* c_next, ehalf* h_next, const ehalf* c_prev, const ehalf* h_prev, const float* bias, float forget_bias, int N, int K); template bool LSTM_Gates_Forward <bhalf,bhalf4>(hipStream_t stream, bhalf* c_next, bhalf* h_next, const bhalf* c_prev, const bhalf* h_prev, const float* bias, float forget_bias, int N, int K); template bool LSTM_Gates_Backward<float,float,float4,float4>(hipStream_t stream, float* dc, float* dh, const float* ec, const float* eh, const float* c_prev, const float* h_prev, const float* bias, int N, int K, float forget_bias); template bool LSTM_Gates_Backward<ehalf,ehalf,ehalf4,ehalf4>(hipStream_t stream, ehalf* dc, ehalf* dh, const ehalf* ec, const ehalf* eh, const ehalf* c_prev, const ehalf* h_prev, const float* bias, int N, int K, float forget_bias); //template bool LSTM_Gates_Backward<float,ehalf,float4,ehalf4>(hipStream_t stream, float* dc, float* dh, const float* ec, const float* eh, const ehalf* c_prev, const ehalf* h_prev, const float* bias, int N, int K, float forget_bias); template bool LSTM_Gates_Backward<bhalf,bhalf,bhalf4,bhalf4>(hipStream_t stream, bhalf* dc, bhalf* dh, const bhalf* ec, const bhalf* eh, const bhalf* c_prev, const bhalf* h_prev, const float* bias, int N, int K, float forget_bias); //template bool LSTM_Gates_Backward<float,bhalf,float4,bhalf4>(hipStream_t stream, float* dc, float* dh, const float* ec, const float* eh, const bhalf* c_prev, const bhalf* h_prev, const float* bias, int N, int K, float forget_bias); template bool LSTM4_Gates_Forward <float,float4>(hipStream_t stream, float* c_next, float* h_next, const float* c, const float* i, const float* f, const float* o, const float* u, float forget_bias, int N, int K); template bool LSTM4_Gates_Forward <ehalf,ehalf4>(hipStream_t stream, ehalf* c_next, ehalf* h_next, const ehalf* c, const ehalf* i, const ehalf* f, const ehalf* o, const ehalf* u, float forget_bias, int N, int K); template bool LSTM4_Gates_Forward <bhalf,bhalf4>(hipStream_t stream, bhalf* c_next, bhalf* h_next, const bhalf* c, const bhalf* i, const bhalf* f, const bhalf* o, const bhalf* u, float forget_bias, int N, int K); template bool LSTM4_Gates_Backward<float,float,float4,float4>(hipStream_t stream, float* dc, float* di, float* df, float* doo, float* du, const float* ec, const float* eh, const float* c, const float* i, const float* f, const float* o, const float* u, int N, int K, float forget_bias); template bool LSTM4_Gates_Backward<ehalf,ehalf,ehalf4,ehalf4>(hipStream_t stream, ehalf* dc, ehalf* di, ehalf* df, ehalf* doo, ehalf* du, const ehalf* ec, const ehalf* eh, const ehalf* c, const ehalf* i, const ehalf* f, const ehalf* o, const ehalf* u, int N, int K, float forget_bias); //template bool LSTM4_Gates_Backward<float,ehalf,float4,ehalf4>(hipStream_t stream, float* dc, float* di, float* df, float* doo, float* du, const float* ec, const float* eh, const ehalf* c, const ehalf* i, const ehalf* f, const ehalf* o, const ehalf* u, int N, int K, float forget_bias); template bool LSTM4_Gates_Backward<bhalf,bhalf,bhalf4,bhalf4>(hipStream_t stream, bhalf* dc, bhalf* di, bhalf* df, bhalf* doo, bhalf* du, const bhalf* ec, const bhalf* eh, const bhalf* c, const bhalf* i, const bhalf* f, const bhalf* o, const bhalf* u, int N, int K, float forget_bias); //template bool LSTM4_Gates_Backward<float,bhalf,float4,bhalf4>(hipStream_t stream, float* dc, float* di, float* df, float* doo, float* du, const float* ec, const float* eh, const bhalf* c, const bhalf* i, const bhalf* f, const bhalf* o, const bhalf* u, int N, int K, float forget_bias); template <typename T, typename V> __global__ void __launch_bounds__(32) Split4( T* Z0, T* Z1, T* Z2, T* Z3, const T* __restrict__ X, int K, int K4) { int tid = threadIdx.x; int k = blockIdx.x; int n = blockIdx.y; int k4 = k*32 + tid; int i0 = n*K + k4; int i1 = i0 + K4; int i2 = i1 + K4; int i3 = i2 + K4; int z = n*K4 + k4; bool b = k4 < K4; V x0 = load(X, i0, b); V x1 = load(X, i1, b); V x2 = load(X, i2, b); V x3 = load(X, i3, b); store(Z0, x0, z, b); store(Z1, x1, z, b); store(Z2, x2, z, b); store(Z3, x3, z, b); } template <typename T, typename V> __global__ void __launch_bounds__(32) Concat4( T* DX, const T* __restrict__ DZ0, const T* __restrict__ DZ1, const T* __restrict__ DZ2, const T* __restrict__ DZ3, int K, int K4) { int tid = threadIdx.x; int k = blockIdx.x; int n = blockIdx.y; int k4 = k*32 + tid; int i0 = n*K + k4; int i1 = i0 + K4; int i2 = i1 + K4; int i3 = i2 + K4; int z = n*K4 + k4; bool b = k4 < K4; V dx0 = load(DZ0, z, b); V dx1 = load(DZ1, z, b); V dx2 = load(DZ2, z, b); V dx3 = load(DZ3, z, b); store(DX, dx0, i0, b); store(DX, dx1, i1, b); store(DX, dx2, i2, b); store(DX, dx3, i3, b); } template <typename T, typename V> bool Split4_Forward(hipStream_t stream, T* z0, T* z1, T* z2, T* z3, const T* x, int N, int K) { int K4 = K >> 2; if ((K4 & 3) == 0) { K >>= 2; // use vector loads K4 >>= 2; dim3 grid((K4 >> 5) + ((K4 & 31) != 0), N); V* Z0 = (V*)z0; V* Z1 = (V*)z1; V* Z2 = (V*)z2; V* Z3 = (V*)z3; const V* X = (const V*)x; hipLaunchKernelGGL(( Split4<V,float4>), dim3(grid),dim3(32),0,stream, Z0, Z1, Z2, Z3, X, K, K4); } else { dim3 grid((K4 >> 5) + ((K4 & 31) != 0), N); hipLaunchKernelGGL(( Split4<T,float >), dim3(grid),dim3(32),0,stream, z0, z1, z2, z3, x, K, K4); } return true; } template <typename T, typename V> bool Concat4_Forward(hipStream_t stream, T* dx, const T* z0, const T* z1, const T* z2, const T* z3, int N, int K) { int K4 = K >> 2; if ((K4 & 3) == 0) { K >>= 2; // use vector loads K4 >>= 2; dim3 grid((K4 >> 5) + ((K4 & 31) != 0), N); V* DX = (V*)dx; const V* Z0 = (const V*)z0; const V* Z1 = (const V*)z1; const V* Z2 = (const V*)z2; const V* Z3 = (const V*)z3; hipLaunchKernelGGL(( Concat4<V,float4>), dim3(grid),dim3(32),0,stream, DX, Z0, Z1, Z2, Z3, K, K4); } else { dim3 grid((K4 >> 5) + ((K4 & 31) != 0), N); hipLaunchKernelGGL(( Concat4<T,float >), dim3(grid),dim3(32),0,stream, dx, z0, z1, z2, z3, K, K4); } return true; } template bool Split4_Forward <float,float4>(hipStream_t stream, float* z0, float* z1, float* z2, float* z3, const float* x, int N, int K); template bool Split4_Forward <ehalf,ehalf4>(hipStream_t stream, ehalf* z0, ehalf* z1, ehalf* z2, ehalf* z3, const ehalf* x, int N, int K); template bool Split4_Forward <bhalf,bhalf4>(hipStream_t stream, bhalf* z0, bhalf* z1, bhalf* z2, bhalf* z3, const bhalf* x, int N, int K); template bool Concat4_Forward<float,float4>(hipStream_t stream, float* dx, const float* z0, const float* z1, const float* z2, const float* z3, int N, int K); template bool Concat4_Forward<ehalf,ehalf4>(hipStream_t stream, ehalf* dx, const ehalf* z0, const ehalf* z1, const ehalf* z2, const ehalf* z3, int N, int K); template bool Concat4_Forward<bhalf,bhalf4>(hipStream_t stream, bhalf* dx, const bhalf* z0, const bhalf* z1, const bhalf* z2, const bhalf* z3, int N, int K); // mean = mean(x, axis=1) // std = std(x, axis=1) // cutoff = mean + alpha*std // y = fmaxf(x, cutoff) - cutoff; template <typename T, typename V, int THREADS> __global__ void __launch_bounds__(THREADS) sparse_relu_forward( T* Y, const T* __restrict__ X, float alpha, uint K, float rcpK) { int tid = threadIdx.x; int n = blockIdx.x; int offset = n*K + tid; // Mean const T* X1 = X + offset; V v_mean1, v_mean2; ew_zero(v_mean1); ew_zero(v_mean2); #pragma unroll 4 for (int k = tid; k < K; k += THREADS) { V x = load(X1); v_mean1 = ew_add(v_mean1, x); v_mean2 = ew_add(v_mean2, ew_sqr(x)); X1 += THREADS; } float2 mean; mean.x = ew_sum(v_mean1) * rcpK; mean.y = ew_sum(v_mean2) * rcpK; // reduce within warp for (int i = 16; i > 0; i >>= 1) { mean.x += shfl_xor(mean.x, i); mean.y += shfl_xor(mean.y, i); } // if using more than 1 warp, further reduced with shared memory if (THREADS > 32) { __shared__ float2 Share[THREADS/32]; // first thread of each warp store to shared if ((tid & 31) == 0) Share[tid/32] = mean; __syncthreads(); if (tid < THREADS/32) { // first warp loads all prior reductions mean = Share[tid]; // reduce within this first warp for (int i = THREADS/64; i > 0; i >>= 1) { mean.x += shfl_xor(mean.x, i); mean.y += shfl_xor(mean.y, i); } // outputs final reduction to shared if (tid == 0) Share[0] = mean; } __syncthreads(); // broadcast result to all threads mean = Share[0]; } // var = avg(x**2) - avg(x)**2 // std = sqrt(var) float std = sqrtf(mean.y - mean.x*mean.x); // Norm/Gain/Bias X += offset; Y += offset; for (int k = tid; k < K; k += THREADS) { float cutoff = mean.x + alpha*std; V x = load(X); V y = ew_sub(ew_maximum(x, cutoff), cutoff); store(Y, ew_relu(y), 0, true); X += THREADS; Y += THREADS; } } template <typename T, typename V> bool SparseReluForward(hipStream_t stream, T* y, const T* x, float alpha, uint K, uint N) { dim3 grid(N, 1, 1); float rcpK = 1.0f / (float)K; if ((K & 3) == 0) { K >>= 2; // use vector loads V* Y = (V*)y; const V* X = (const V*)x; // if (K >= 1024) // sparse_relu_forward<V,float4,1024><<<grid,1024,0,stream>>>(Y, X, alpha, K, rcpK); if (K >= 256) hipLaunchKernelGGL(( sparse_relu_forward<V,float4, 256>), dim3(grid), dim3(256),0,stream, Y, X, alpha, K, rcpK); else hipLaunchKernelGGL(( sparse_relu_forward<V,float4, 64>), dim3(grid), dim3(64),0,stream, Y, X, alpha, K, rcpK); } else { // if (K >= 1024) // sparse_relu_forward<T,float ,1024><<<grid,1024,0,stream>>>(y, x, alpha, K, rcpK); if (K >= 256) hipLaunchKernelGGL(( sparse_relu_forward<T,float , 256>), dim3(grid), dim3(256),0,stream, y, x, alpha, K, rcpK); else hipLaunchKernelGGL(( sparse_relu_forward<T,float , 64>), dim3(grid), dim3(64),0,stream, y, x, alpha, K, rcpK); } return true; // TODO } template bool SparseReluForward<float,float4>(hipStream_t stream, float* y, const float* x,float alpha, uint K, uint N); template bool SparseReluForward<ehalf,ehalf4>(hipStream_t stream, ehalf* y, const ehalf* x,float alpha, uint K, uint N); template bool SparseReluForward<bhalf,bhalf4>(hipStream_t stream, bhalf* y, const bhalf* x,float alpha, uint K, uint N); #endif
65d031ef164c3a0d9467127642c696bbe34e2efd.cu
#if GOOGLE_CUDA #include "ew_op_gpu.h" #include <stdio.h> template <typename T, typename V> __global__ void __launch_bounds__(32) LSTM_Forward( T* C_next, T* H_next, const T* __restrict__ C_prev, const T* __restrict__ H_prev, float forget_bias, int K, int K4) { int tid = threadIdx.x; int k = blockIdx.x; int n = blockIdx.y; int k4 = k*32 + tid; int x0 = n*K + k4; int x1 = x0 + K4; int x2 = x1 + K4; int x3 = x2 + K4; int z = n*K4 + k4; bool b = k4 < K4; // Match tf.nn.rnn_cell.BasicLSTMCell layout V c = load(C_prev, z, b); V i = load(H_prev, x0, b); V u = load(H_prev, x1, b); V f = load(H_prev, x2, b); V o = load(H_prev, x3, b); V sig_i = ew_sig(i); V tan_u = ew_tanh(u); V sig_f = ew_sig(ew_add(f, forget_bias)); V sig_o = ew_sig(o); V c_nxt = ew_add(ew_mul(sig_f, c), ew_mul(sig_i, tan_u)); V c_act = ew_tanh(c_nxt); V h_nxt = ew_mul(sig_o, c_act); store(C_next, c_nxt, z, b); store(H_next, h_nxt, z, b); } template <typename T, typename V> __global__ void __launch_bounds__(32) LSTM_Bias_Forward( T* C_next, T* H_next, const T* __restrict__ C_prev, const T* __restrict__ H_prev, const V* __restrict__ Bias, float forget_bias, int K, int K4) { int tid = threadIdx.x; int k = blockIdx.x; int n = blockIdx.y; int k4 = k*32 + tid; int x0 = n*K + k4; int x1 = x0 + K4; int x2 = x1 + K4; int x3 = x2 + K4; int z = n*K4 + k4; bool b = k4 < K4; // Match tf.nn.rnn_cell.BasicLSTMCell layout V ib = load(Bias, k4, b); V ub = load(Bias, k4 + K4, b); V fb = load(Bias, k4 + K4*2, b); V ob = load(Bias, k4 + K4*2 + K4, b); V c = load(C_prev, z, b); V i = load(H_prev, x0, b); V u = load(H_prev, x1, b); V f = load(H_prev, x2, b); V o = load(H_prev, x3, b); V sig_i = ew_sig(ew_add(i, ib)); V sig_f = ew_sig(ew_add(ew_add(f, fb), forget_bias)); V sig_o = ew_sig(ew_add(o, ob)); V tan_u = ew_tanh(ew_add(u, ub)); V c_nxt = ew_add(ew_mul(sig_f, c), ew_mul(sig_i, tan_u)); V c_act = ew_tanh(c_nxt); V h_nxt = ew_mul(sig_o, c_act); store(C_next, c_nxt, z, b); store(H_next, h_nxt, z, b); } template <typename T, typename V> __global__ void __launch_bounds__(32) LSTM4_Forward( T* C_next, T* H_next, const T* __restrict__ C, const T* __restrict__ I, const T* __restrict__ F, const T* __restrict__ O, const T* __restrict__ U, float forget_bias, int size) { int tid = threadIdx.x; int x = blockIdx.x*32 + tid; bool b = x < size; V c = load(C, x, b); V i = load(I, x, b); V f = load(F, x, b); V o = load(O, x, b); V u = load(U, x, b); V sig_i = ew_sig(i); V sig_f = ew_sig(ew_add(f, forget_bias)); V sig_o = ew_sig(o); V tan_u = ew_tanh(u); V c_nxt = ew_add(ew_mul(sig_f, c), ew_mul(sig_i, tan_u)); V c_act = ew_tanh(c_nxt); V h_nxt = ew_mul(sig_o, c_act); store(C_next, c_nxt, x, b); store(H_next, h_nxt, x, b); } template <typename B, typename F, typename V> __global__ void __launch_bounds__(32) LSTM_Backward( B* DC, B* DH, const B* __restrict__ EC, const B* __restrict__ EH, const F* __restrict__ C_prev, const F* __restrict__ H_prev, int K, int K4, int ec_valid, float forget_bias) { int tid = threadIdx.x; int k = blockIdx.x; int n = blockIdx.y; int k4 = k*32 + tid; int x0 = n*K + k4; int x1 = x0 + K4; int x2 = x1 + K4; int x3 = x2 + K4; int z = n*K4 + k4; bool b = k4 < K4; V i = load(H_prev, x0, b); V u = load(H_prev, x1, b); V f = load(H_prev, x2, b); V o = load(H_prev, x3, b); V c = load(C_prev, z, b); V eh = load(EH, z, b); V ec = load(EC, z, b && ec_valid); V sig_i = ew_sig(i); V sig_f = ew_sig(ew_add(f, forget_bias)); V sig_o = ew_sig(o); V tan_u = ew_tanh(u); V c_nxt = ew_add(ew_mul(sig_f, c), ew_mul(sig_i, tan_u)); V c_act = ew_tanh(c_nxt); V dC = ew_add(ew_tanh_grad(ew_mul(eh, sig_o), c_act), ec); V dI = ew_sig_grad(ew_mul(dC, tan_u), sig_i); V dF = ew_sig_grad(ew_mul(dC, c), sig_f); V dO = ew_sig_grad(ew_mul( eh, c_act), sig_o); V dU = ew_tanh_grad(ew_mul(dC, sig_i), tan_u); dC = ew_mul(dC, sig_f); store(DC, dC, z, b); store(DH, dI, x0, b); store(DH, dU, x1, b); store(DH, dF, x2, b); store(DH, dO, x3, b); } template <typename B, typename F, typename V> __global__ void __launch_bounds__(32) LSTM_Bias_Backward( B* DC, B* DH, const B* __restrict__ EC, const B* __restrict__ EH, const F* __restrict__ C_prev, const F* __restrict__ H_prev, const V* __restrict__ Bias, int K, int K4, int ec_valid, float forget_bias) { int tid = threadIdx.x; int k = blockIdx.x; int n = blockIdx.y; int k4 = k*32 + tid; int x0 = n*K + k4; int x1 = x0 + K4; int x2 = x1 + K4; int x3 = x2 + K4; int z = n*K4 + k4; bool b = k4 < K4; // Match tf.nn.rnn_cell.BasicLSTMCell layout V ib = load(Bias, k4, b); V ub = load(Bias, k4 + K4, b); V fb = load(Bias, k4 + K4*2, b); V ob = load(Bias, k4 + K4*2 + K4, b); V i = load(H_prev, x0, b); V u = load(H_prev, x1, b); V f = load(H_prev, x2, b); V o = load(H_prev, x3, b); V c = load(C_prev, z, b); V eh = load(EH, z, b); V ec = load(EC, z, b && ec_valid); V sig_i = ew_sig(ew_add(i, ib)); V sig_f = ew_sig(ew_add(ew_add(f, fb), forget_bias)); V sig_o = ew_sig(ew_add(o, ob)); V tan_u = ew_tanh(ew_add(u, ub)); V c_nxt = ew_add(ew_mul(sig_f, c), ew_mul(sig_i, tan_u)); V c_act = ew_tanh(c_nxt); V dC = ew_add(ew_tanh_grad(ew_mul(eh, sig_o), c_act), ec); V dI = ew_sig_grad(ew_mul(dC, tan_u), sig_i); V dF = ew_sig_grad(ew_mul(dC, c), sig_f); V dO = ew_sig_grad(ew_mul( eh, c_act), sig_o); V dU = ew_tanh_grad(ew_mul(dC, sig_i), tan_u); dC = ew_mul(dC, sig_f); store(DC, dC, z, b); store(DH, dI, x0, b); store(DH, dU, x1, b); store(DH, dF, x2, b); store(DH, dO, x3, b); } template <typename B, typename A, typename V> __global__ void __launch_bounds__(32) LSTM4_Backward( B* DC, B* DI, B* DF, B* DO, B* DU, const B* __restrict__ EC, const B* __restrict__ EH, const A* __restrict__ C, const A* __restrict__ I, const A* __restrict__ F, const A* __restrict__ O, const A* __restrict__ U, int size, int ec_valid, float forget_bias) { int tid = threadIdx.x; int x = blockIdx.x*32 + tid; bool b = x < size; V c = load(C, x, b); V i = load(I, x, b); V f = load(F, x, b); V o = load(O, x, b); V u = load(U, x, b); V eh = load(EH, x, b); V ec = load(EC, x, b && ec_valid); V sig_i = ew_sig(i); V sig_f = ew_sig(ew_add(f, forget_bias)); V sig_o = ew_sig(o); V tan_u = ew_tanh(u); V c_nxt = ew_add(ew_mul(sig_f, c), ew_mul(sig_i, tan_u)); V c_act = ew_tanh(c_nxt); V dC = ew_add(ew_tanh_grad(ew_mul(eh, sig_o), c_act), ec); V dI = ew_sig_grad(ew_mul(dC, tan_u), sig_i); V dF = ew_sig_grad(ew_mul(dC, c), sig_f); V dO = ew_sig_grad(ew_mul( eh, c_act), sig_o); V dU = ew_tanh_grad(ew_mul(dC, sig_i), tan_u); dC = ew_mul(dC, sig_f); store(DC, dC, x, b); store(DI, dI, x, b); store(DF, dF, x, b); store(DO, dO, x, b); store(DU, dU, x, b); } template <typename T, typename V> bool LSTM_Gates_Forward(CUstream stream, T* c_next, T* h_next, const T* c_prev, const T* h_prev, const float* bias, float forget_bias, int N, int K) { int K4 = K >> 2; if ((K4 & 3) == 0) { K >>= 2; // use vector loads K4 >>= 2; dim3 grid((K4 >> 5) + ((K4 & 31) != 0), N); V* C_next = (V*)c_next; V* H_next = (V*)h_next; const V* C_prev = (const V*)c_prev; const V* H_prev = (const V*)h_prev; const float4* Bias = (const float4*)bias; if (bias == NULL) LSTM_Forward<V,float4><<<grid,32,0,stream>>>(C_next, H_next, C_prev, H_prev, forget_bias, K, K4); else LSTM_Bias_Forward<V,float4><<<grid,32,0,stream>>>(C_next, H_next, C_prev, H_prev, Bias, forget_bias, K, K4); } else { dim3 grid((K4 >> 5) + ((K4 & 31) != 0), N); if (bias == NULL) LSTM_Forward<T,float><<<grid,32,0,stream>>>(c_next, h_next, c_prev, h_prev, forget_bias, K, K4); else LSTM_Bias_Forward<T,float><<<grid,32,0,stream>>>(c_next, h_next, c_prev, h_prev, bias, forget_bias, K, K4); } return true; } template <typename T, typename V> bool LSTM4_Gates_Forward(CUstream stream, T* c_next, T* h_next, const T* c, const T* i, const T* f, const T* o, const T* u, float forget_bias, int N, int K) { int size = N * K; if ((size & 3) == 0) { size >>= 2; // use vector loads int grid = (size >> 5) + ((size & 31) != 0); V* C_next = (V*)c_next; V* H_next = (V*)h_next; const V* C = (const V*)c; const V* I = (const V*)i; const V* F = (const V*)f; const V* O = (const V*)o; const V* U = (const V*)u; LSTM4_Forward<V,float4><<<grid,32,0,stream>>>(C_next, H_next, C, I, F, O, U, forget_bias, size); } else { int grid = (size >> 5) + ((size & 31) != 0); LSTM4_Forward<T,float ><<<grid,32,0,stream>>>(c_next, h_next, c, i, f, o, u, forget_bias, size); } return true; } template <typename B, typename F, typename VB, typename VF> bool LSTM_Gates_Backward(CUstream stream, B* dc, B* dh, const B* ec, const B* eh, const F* c_prev, const F* h_prev, const float* bias, int N, int K, float forget_bias) { int K4 = K >> 2; if ((K4 & 3) == 0) { K >>= 2; // use vector loads K4 >>= 2; dim3 grid((K4 >> 5) + ((K4 & 31) != 0), N); VB* DC = ( VB*)dc; VB* DH = ( VB*)dh; const VB* EC = (const VB*)ec; const VB* EH = (const VB*)eh; const VF* C_prev = (const VF*)c_prev; const VF* H_prev = (const VF*)h_prev; const float4* Bias = (const float4*)bias; if (bias == NULL) LSTM_Backward<VB,VF,float4><<<grid,32,0,stream>>>(DC, DH, EC, EH, C_prev, H_prev, K, K4, ec != 0, forget_bias); else LSTM_Bias_Backward<VB,VF,float4><<<grid,32,0,stream>>>(DC, DH, EC, EH, C_prev, H_prev, Bias, K, K4, ec != 0, forget_bias); } else { dim3 grid((K4 >> 5) + ((K4 & 31) != 0), N); if (bias == NULL) LSTM_Backward< B, F,float ><<<grid,32,0,stream>>>(dc, dh, ec, eh, c_prev, h_prev, K, K4, ec != 0, forget_bias); else LSTM_Bias_Backward< B, F,float ><<<grid,32,0,stream>>>(dc, dh, ec, eh, c_prev, h_prev, bias, K, K4, ec != 0, forget_bias); } return true; } template <typename B, typename A, typename VB, typename VA> bool LSTM4_Gates_Backward(CUstream stream, B* dc, B* di, B* df, B* doo, B* du, const B* ec, const B* eh, const A* c, const A* i, const A* f, const A* o, const A* u, int N, int K, float forget_bias) { int size = N * K; if ((size & 3) == 0) { size >>= 2; // use vector loads int grid = (size >> 5) + ((size & 31) != 0); VB* DC = ( VB*)dc; VB* DI = ( VB*)di; VB* DF = ( VB*)df; VB* DO = ( VB*)doo; VB* DU = ( VB*)du; const VB* EC = (const VB*)ec; const VB* EH = (const VB*)eh; const VA* C = (const VA*)c; const VA* I = (const VA*)i; const VA* F = (const VA*)f; const VA* O = (const VA*)o; const VA* U = (const VA*)u; LSTM4_Backward<VB,VA,float4><<<grid,32,0,stream>>>(DC, DI, DF, DO, DU, EC, EH, C, I, F, O, U, size, ec != 0, forget_bias); } else { int grid = (size >> 5) + ((size & 31) != 0); LSTM4_Backward< B, A,float ><<<grid,32,0,stream>>>(dc, di, df, doo, du, ec, eh, c, i, f, o, u, size, ec != 0, forget_bias); } return true; } template bool LSTM_Gates_Forward <float,float4>(CUstream stream, float* c_next, float* h_next, const float* c_prev, const float* h_prev, const float* bias, float forget_bias, int N, int K); template bool LSTM_Gates_Forward <ehalf,ehalf4>(CUstream stream, ehalf* c_next, ehalf* h_next, const ehalf* c_prev, const ehalf* h_prev, const float* bias, float forget_bias, int N, int K); template bool LSTM_Gates_Forward <bhalf,bhalf4>(CUstream stream, bhalf* c_next, bhalf* h_next, const bhalf* c_prev, const bhalf* h_prev, const float* bias, float forget_bias, int N, int K); template bool LSTM_Gates_Backward<float,float,float4,float4>(CUstream stream, float* dc, float* dh, const float* ec, const float* eh, const float* c_prev, const float* h_prev, const float* bias, int N, int K, float forget_bias); template bool LSTM_Gates_Backward<ehalf,ehalf,ehalf4,ehalf4>(CUstream stream, ehalf* dc, ehalf* dh, const ehalf* ec, const ehalf* eh, const ehalf* c_prev, const ehalf* h_prev, const float* bias, int N, int K, float forget_bias); //template bool LSTM_Gates_Backward<float,ehalf,float4,ehalf4>(CUstream stream, float* dc, float* dh, const float* ec, const float* eh, const ehalf* c_prev, const ehalf* h_prev, const float* bias, int N, int K, float forget_bias); template bool LSTM_Gates_Backward<bhalf,bhalf,bhalf4,bhalf4>(CUstream stream, bhalf* dc, bhalf* dh, const bhalf* ec, const bhalf* eh, const bhalf* c_prev, const bhalf* h_prev, const float* bias, int N, int K, float forget_bias); //template bool LSTM_Gates_Backward<float,bhalf,float4,bhalf4>(CUstream stream, float* dc, float* dh, const float* ec, const float* eh, const bhalf* c_prev, const bhalf* h_prev, const float* bias, int N, int K, float forget_bias); template bool LSTM4_Gates_Forward <float,float4>(CUstream stream, float* c_next, float* h_next, const float* c, const float* i, const float* f, const float* o, const float* u, float forget_bias, int N, int K); template bool LSTM4_Gates_Forward <ehalf,ehalf4>(CUstream stream, ehalf* c_next, ehalf* h_next, const ehalf* c, const ehalf* i, const ehalf* f, const ehalf* o, const ehalf* u, float forget_bias, int N, int K); template bool LSTM4_Gates_Forward <bhalf,bhalf4>(CUstream stream, bhalf* c_next, bhalf* h_next, const bhalf* c, const bhalf* i, const bhalf* f, const bhalf* o, const bhalf* u, float forget_bias, int N, int K); template bool LSTM4_Gates_Backward<float,float,float4,float4>(CUstream stream, float* dc, float* di, float* df, float* doo, float* du, const float* ec, const float* eh, const float* c, const float* i, const float* f, const float* o, const float* u, int N, int K, float forget_bias); template bool LSTM4_Gates_Backward<ehalf,ehalf,ehalf4,ehalf4>(CUstream stream, ehalf* dc, ehalf* di, ehalf* df, ehalf* doo, ehalf* du, const ehalf* ec, const ehalf* eh, const ehalf* c, const ehalf* i, const ehalf* f, const ehalf* o, const ehalf* u, int N, int K, float forget_bias); //template bool LSTM4_Gates_Backward<float,ehalf,float4,ehalf4>(CUstream stream, float* dc, float* di, float* df, float* doo, float* du, const float* ec, const float* eh, const ehalf* c, const ehalf* i, const ehalf* f, const ehalf* o, const ehalf* u, int N, int K, float forget_bias); template bool LSTM4_Gates_Backward<bhalf,bhalf,bhalf4,bhalf4>(CUstream stream, bhalf* dc, bhalf* di, bhalf* df, bhalf* doo, bhalf* du, const bhalf* ec, const bhalf* eh, const bhalf* c, const bhalf* i, const bhalf* f, const bhalf* o, const bhalf* u, int N, int K, float forget_bias); //template bool LSTM4_Gates_Backward<float,bhalf,float4,bhalf4>(CUstream stream, float* dc, float* di, float* df, float* doo, float* du, const float* ec, const float* eh, const bhalf* c, const bhalf* i, const bhalf* f, const bhalf* o, const bhalf* u, int N, int K, float forget_bias); template <typename T, typename V> __global__ void __launch_bounds__(32) Split4( T* Z0, T* Z1, T* Z2, T* Z3, const T* __restrict__ X, int K, int K4) { int tid = threadIdx.x; int k = blockIdx.x; int n = blockIdx.y; int k4 = k*32 + tid; int i0 = n*K + k4; int i1 = i0 + K4; int i2 = i1 + K4; int i3 = i2 + K4; int z = n*K4 + k4; bool b = k4 < K4; V x0 = load(X, i0, b); V x1 = load(X, i1, b); V x2 = load(X, i2, b); V x3 = load(X, i3, b); store(Z0, x0, z, b); store(Z1, x1, z, b); store(Z2, x2, z, b); store(Z3, x3, z, b); } template <typename T, typename V> __global__ void __launch_bounds__(32) Concat4( T* DX, const T* __restrict__ DZ0, const T* __restrict__ DZ1, const T* __restrict__ DZ2, const T* __restrict__ DZ3, int K, int K4) { int tid = threadIdx.x; int k = blockIdx.x; int n = blockIdx.y; int k4 = k*32 + tid; int i0 = n*K + k4; int i1 = i0 + K4; int i2 = i1 + K4; int i3 = i2 + K4; int z = n*K4 + k4; bool b = k4 < K4; V dx0 = load(DZ0, z, b); V dx1 = load(DZ1, z, b); V dx2 = load(DZ2, z, b); V dx3 = load(DZ3, z, b); store(DX, dx0, i0, b); store(DX, dx1, i1, b); store(DX, dx2, i2, b); store(DX, dx3, i3, b); } template <typename T, typename V> bool Split4_Forward(CUstream stream, T* z0, T* z1, T* z2, T* z3, const T* x, int N, int K) { int K4 = K >> 2; if ((K4 & 3) == 0) { K >>= 2; // use vector loads K4 >>= 2; dim3 grid((K4 >> 5) + ((K4 & 31) != 0), N); V* Z0 = (V*)z0; V* Z1 = (V*)z1; V* Z2 = (V*)z2; V* Z3 = (V*)z3; const V* X = (const V*)x; Split4<V,float4><<<grid,32,0,stream>>>(Z0, Z1, Z2, Z3, X, K, K4); } else { dim3 grid((K4 >> 5) + ((K4 & 31) != 0), N); Split4<T,float ><<<grid,32,0,stream>>>(z0, z1, z2, z3, x, K, K4); } return true; } template <typename T, typename V> bool Concat4_Forward(CUstream stream, T* dx, const T* z0, const T* z1, const T* z2, const T* z3, int N, int K) { int K4 = K >> 2; if ((K4 & 3) == 0) { K >>= 2; // use vector loads K4 >>= 2; dim3 grid((K4 >> 5) + ((K4 & 31) != 0), N); V* DX = (V*)dx; const V* Z0 = (const V*)z0; const V* Z1 = (const V*)z1; const V* Z2 = (const V*)z2; const V* Z3 = (const V*)z3; Concat4<V,float4><<<grid,32,0,stream>>>(DX, Z0, Z1, Z2, Z3, K, K4); } else { dim3 grid((K4 >> 5) + ((K4 & 31) != 0), N); Concat4<T,float ><<<grid,32,0,stream>>>(dx, z0, z1, z2, z3, K, K4); } return true; } template bool Split4_Forward <float,float4>(CUstream stream, float* z0, float* z1, float* z2, float* z3, const float* x, int N, int K); template bool Split4_Forward <ehalf,ehalf4>(CUstream stream, ehalf* z0, ehalf* z1, ehalf* z2, ehalf* z3, const ehalf* x, int N, int K); template bool Split4_Forward <bhalf,bhalf4>(CUstream stream, bhalf* z0, bhalf* z1, bhalf* z2, bhalf* z3, const bhalf* x, int N, int K); template bool Concat4_Forward<float,float4>(CUstream stream, float* dx, const float* z0, const float* z1, const float* z2, const float* z3, int N, int K); template bool Concat4_Forward<ehalf,ehalf4>(CUstream stream, ehalf* dx, const ehalf* z0, const ehalf* z1, const ehalf* z2, const ehalf* z3, int N, int K); template bool Concat4_Forward<bhalf,bhalf4>(CUstream stream, bhalf* dx, const bhalf* z0, const bhalf* z1, const bhalf* z2, const bhalf* z3, int N, int K); // mean = mean(x, axis=1) // std = std(x, axis=1) // cutoff = mean + alpha*std // y = fmaxf(x, cutoff) - cutoff; template <typename T, typename V, int THREADS> __global__ void __launch_bounds__(THREADS) sparse_relu_forward( T* Y, const T* __restrict__ X, float alpha, uint K, float rcpK) { int tid = threadIdx.x; int n = blockIdx.x; int offset = n*K + tid; // Mean const T* X1 = X + offset; V v_mean1, v_mean2; ew_zero(v_mean1); ew_zero(v_mean2); #pragma unroll 4 for (int k = tid; k < K; k += THREADS) { V x = load(X1); v_mean1 = ew_add(v_mean1, x); v_mean2 = ew_add(v_mean2, ew_sqr(x)); X1 += THREADS; } float2 mean; mean.x = ew_sum(v_mean1) * rcpK; mean.y = ew_sum(v_mean2) * rcpK; // reduce within warp for (int i = 16; i > 0; i >>= 1) { mean.x += shfl_xor(mean.x, i); mean.y += shfl_xor(mean.y, i); } // if using more than 1 warp, further reduced with shared memory if (THREADS > 32) { __shared__ float2 Share[THREADS/32]; // first thread of each warp store to shared if ((tid & 31) == 0) Share[tid/32] = mean; __syncthreads(); if (tid < THREADS/32) { // first warp loads all prior reductions mean = Share[tid]; // reduce within this first warp for (int i = THREADS/64; i > 0; i >>= 1) { mean.x += shfl_xor(mean.x, i); mean.y += shfl_xor(mean.y, i); } // outputs final reduction to shared if (tid == 0) Share[0] = mean; } __syncthreads(); // broadcast result to all threads mean = Share[0]; } // var = avg(x**2) - avg(x)**2 // std = sqrt(var) float std = sqrtf(mean.y - mean.x*mean.x); // Norm/Gain/Bias X += offset; Y += offset; for (int k = tid; k < K; k += THREADS) { float cutoff = mean.x + alpha*std; V x = load(X); V y = ew_sub(ew_maximum(x, cutoff), cutoff); store(Y, ew_relu(y), 0, true); X += THREADS; Y += THREADS; } } template <typename T, typename V> bool SparseReluForward(CUstream stream, T* y, const T* x, float alpha, uint K, uint N) { dim3 grid(N, 1, 1); float rcpK = 1.0f / (float)K; if ((K & 3) == 0) { K >>= 2; // use vector loads V* Y = (V*)y; const V* X = (const V*)x; // if (K >= 1024) // sparse_relu_forward<V,float4,1024><<<grid,1024,0,stream>>>(Y, X, alpha, K, rcpK); if (K >= 256) sparse_relu_forward<V,float4, 256><<<grid, 256,0,stream>>>(Y, X, alpha, K, rcpK); else sparse_relu_forward<V,float4, 64><<<grid, 64,0,stream>>>(Y, X, alpha, K, rcpK); } else { // if (K >= 1024) // sparse_relu_forward<T,float ,1024><<<grid,1024,0,stream>>>(y, x, alpha, K, rcpK); if (K >= 256) sparse_relu_forward<T,float , 256><<<grid, 256,0,stream>>>(y, x, alpha, K, rcpK); else sparse_relu_forward<T,float , 64><<<grid, 64,0,stream>>>(y, x, alpha, K, rcpK); } return true; // TODO } template bool SparseReluForward<float,float4>(CUstream stream, float* y, const float* x,float alpha, uint K, uint N); template bool SparseReluForward<ehalf,ehalf4>(CUstream stream, ehalf* y, const ehalf* x,float alpha, uint K, uint N); template bool SparseReluForward<bhalf,bhalf4>(CUstream stream, bhalf* y, const bhalf* x,float alpha, uint K, uint N); #endif
018725e7dd76931d58d933be1c8ed39f12331062.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * upscale.cu * * Author: Alan_Huang */ #include "caffe/util/upscale.hpp" #include "hip/device_functions.h" namespace caffe { /* * nthreads should be n*c*4 */ template <typename IterpolateAction, typename Dtype> __global__ void kernel_upscale_2x_corner(const int nthreads, Dtype* src, const int src_h, const int src_w, Dtype* dst) { const int src_spatial_dim = src_h * src_w; const int dst_spatial_dim = src_spatial_dim * 4; const int dst_h = src_h * 2; const int dst_w = src_w * 2; int dst_offset[] = {0, dst_w - 1, dst_w * (dst_h -1), dst_w * dst_h - 1}; int src_offset[] = {0, src_w - 1, src_w * (src_h -1), src_w * src_h - 1}; CUDA_KERNEL_LOOP(index, nthreads) { int c_id = index / 4; IterpolateAction::DoEltwise(src, c_id * src_spatial_dim + src_offset[index % 4], dst, c_id * dst_spatial_dim + dst_offset[index % 4]); } } /* * upscale_all_border_horizontal_lines. * nthreads should be n*c*(dst_w -2) * 2 */ template <typename IterpolateAction, typename Dtype> __global__ void kernel_upscale_2x_border_line_horizontal(const int nthreads, Dtype* src, const int src_h, const int src_w, Dtype* dst) { const int src_spatial_dim = src_h * src_w; const int dst_spatial_dim = src_spatial_dim * 4; const int dst_h = src_h * 2; const int dst_w = src_w * 2; int dst_offset[] = {0, dst_w * (dst_h -1)}; int src_offset[] = {0, src_w * (src_h -1)}; __shared__ Dtype zero; CUDA_KERNEL_LOOP(index, nthreads) { int c_id = index / ((dst_w -2) * 2); int line_id = (index / (dst_w -2)) % 2; int dst_w_id = 1 + (index % (dst_w -2)); Dtype* src_p11 = src + c_id * src_spatial_dim + src_offset[line_id] + (dst_w_id-1)/2; Dtype* dst_p = dst + c_id * dst_spatial_dim + dst_offset[line_id] + dst_w_id; IterpolateAction::template Do<Dtype, 1>(src_p11, src_p11 + 1, &zero, &zero, dst_p, 256/4 + 128 * ((dst_w_id-1)%2), 0); } } /* * upscale_all_border_horizontal_lines. * nthreads should be n*c*(dst_h -2) * 2 */ template <typename IterpolateAction, typename Dtype> __global__ void kernel_upscale_2x_border_line_vertical(const int nthreads, Dtype* src, const int src_h, const int src_w, Dtype* dst) { const int src_spatial_dim = src_h * src_w; const int dst_spatial_dim = src_spatial_dim * 4; const int dst_h = src_h * 2; const int dst_w = src_w * 2; int dst_offset[] = {0, dst_w - 1}; int src_offset[] = {0, src_w - 1}; __shared__ Dtype zero ; CUDA_KERNEL_LOOP(index, nthreads) { int c_id = index / ((dst_h -2) * 2); int id_inside_c = index % ((dst_h -2) * 2); int dst_h_id = id_inside_c / 2 + 1; int col_id = id_inside_c % 2 ; Dtype* src_p11 = src + c_id * src_spatial_dim + src_offset[col_id] + (dst_h_id-1)/2 * src_w; Dtype* dst_p = dst + c_id * dst_spatial_dim + dst_offset[col_id] + dst_h_id * dst_w; IterpolateAction::template Do<Dtype, 1>(src_p11, &zero, src_p11 + src_w, &zero, dst_p, 0, 256/4 + 128 * ((dst_h_id-1)%2)); } } /* * upscale_all_border_horizontal_lines. * nthreads should be n*c*(dst_h -2) * (dst_w -2) */ template <typename IterpolateAction, typename Dtype> __global__ void kernel_upscale_2x_lines(const int nthreads, Dtype* src, const int src_h, const int src_w, Dtype* dst) { const int src_spatial_dim = src_h * src_w; const int dst_spatial_dim = src_spatial_dim * 4; const int dst_h = src_h * 2; const int dst_w = src_w * 2; CUDA_KERNEL_LOOP(index, nthreads) { int c_id = index / ((dst_h -2) * (dst_w -2)); int id_inside_c = index % ((dst_h -2) * (dst_w -2)); int dst_h_id = 1 + id_inside_c / (dst_w -2); int dst_w_id = 1 + id_inside_c % (dst_w -2); Dtype* src_p11 = src + c_id * src_spatial_dim + (dst_h_id-1)/2 * src_w + (dst_w_id-1)/2; Dtype* dst_p = dst + c_id * dst_spatial_dim + dst_h_id * dst_w + dst_w_id; IterpolateAction::template Do<Dtype, 1>(src_p11, src_p11 + 1, src_p11 + src_w, src_p11 + src_w + 1, dst_p, 256/4 + 128 * ((dst_w_id-1)%2), 256/4 + 128 * ((dst_h_id-1)%2)); } } template <typename IterpolateAction, typename Dtype> void upscale_2x_gpu(Dtype* src_data, const int src_n, const int src_c, const int src_h, const int src_w, Dtype* dst_data) { int total_channel_num = src_n * src_c; int dst_h = src_h * 2; int dst_w = src_w * 2; hipLaunchKernelGGL(( kernel_upscale_2x_corner<IterpolateAction, Dtype>) , dim3(CAFFE_GET_BLOCKS(total_channel_num * 4)), dim3(CAFFE_CUDA_NUM_THREADS) , 0, 0, total_channel_num * 4, src_data, src_h, src_w, dst_data); if (dst_w -2 > 0) { hipLaunchKernelGGL(( kernel_upscale_2x_border_line_horizontal<IterpolateAction, Dtype>) , dim3(CAFFE_GET_BLOCKS(total_channel_num * (dst_w -2) * 2)), dim3(CAFFE_CUDA_NUM_THREADS) , 0, 0, total_channel_num * (dst_w -2) * 2, src_data, src_h, src_w, dst_data); } if (dst_h -2 > 0) { hipLaunchKernelGGL(( kernel_upscale_2x_border_line_vertical<IterpolateAction, Dtype>) , dim3(CAFFE_GET_BLOCKS(total_channel_num * (dst_h -2) * 2)), dim3(CAFFE_CUDA_NUM_THREADS) , 0, 0, total_channel_num * (dst_h -2) * 2, src_data, src_h, src_w, dst_data); } if (dst_w -2 > 0 && dst_h -2 > 0) { hipLaunchKernelGGL(( kernel_upscale_2x_lines<IterpolateAction, Dtype>) , dim3(CAFFE_GET_BLOCKS(total_channel_num * (dst_h -2) * (dst_w -2))), dim3(CAFFE_CUDA_NUM_THREADS) , 0, 0, total_channel_num * (dst_h -2) * (dst_w -2), src_data, src_h, src_w, dst_data); } CUDA_POST_KERNEL_CHECK; } template <typename Dtype> void Blob2xUpscaler<Dtype>::Forward_gpu(const Blob<Dtype>& src_blob, Blob<Dtype>& dst_blob) { Blob2xUpscaler<Dtype>::Check(src_blob, dst_blob); int last_dim = src_blob.shape().size() - 1; int total_channel_num = src_blob.count(0, src_blob.shape().size() - 2); int src_spatial_dim = src_blob.count(last_dim - 1); int dst_spatial_dim = dst_blob.count(last_dim - 1); int src_h = src_blob.shape(last_dim - 1); int src_w = src_blob.shape(last_dim); Dtype* src_data = const_cast<Dtype*>(src_blob.gpu_data()); Dtype* dst_data = dst_blob.mutable_gpu_data(); upscale_2x_gpu<PointInterpolateForward, Dtype>(src_data, total_channel_num, 1, src_h, src_w, dst_data); } template <typename Dtype> void Blob2xUpscaler<Dtype>::Backward_gpu(const Blob<Dtype>& dst_blob, Blob<Dtype>& src_blob) { Blob2xUpscaler<Dtype>::Check(src_blob, dst_blob); int last_dim = src_blob.shape().size() - 1; int total_channel_num = src_blob.count(0, src_blob.shape().size() - 2); int src_spatial_dim = src_blob.count(last_dim - 1); int dst_spatial_dim = dst_blob.count(last_dim - 1); int src_h = src_blob.shape(last_dim - 1); int src_w = src_blob.shape(last_dim); Dtype* dst_data = const_cast<Dtype*>(dst_blob.gpu_diff()); Dtype* src_data = src_blob.mutable_gpu_diff(); upscale_2x_gpu<PointInterpolateBackward, Dtype>(src_data, total_channel_num, 1, src_h, src_w, dst_data); } template void Blob2xUpscaler<float>::Forward_gpu(const Blob<float>& src_blob, Blob<float>& dst_blob); template void Blob2xUpscaler<double>::Forward_gpu(const Blob<double>& src_blob, Blob<double>& dst_blob); template void Blob2xUpscaler<float>::Backward_gpu(const Blob<float>& dst_blob, Blob<float>& src_blob); template void Blob2xUpscaler<double>::Backward_gpu(const Blob<double>& dst_blob, Blob<double>& src_blob); } // namespace caffe
018725e7dd76931d58d933be1c8ed39f12331062.cu
/* * upscale.cu * * Author: Alan_Huang */ #include "caffe/util/upscale.hpp" #include "device_functions.h" namespace caffe { /* * nthreads should be n*c*4 */ template <typename IterpolateAction, typename Dtype> __global__ void kernel_upscale_2x_corner(const int nthreads, Dtype* src, const int src_h, const int src_w, Dtype* dst) { const int src_spatial_dim = src_h * src_w; const int dst_spatial_dim = src_spatial_dim * 4; const int dst_h = src_h * 2; const int dst_w = src_w * 2; int dst_offset[] = {0, dst_w - 1, dst_w * (dst_h -1), dst_w * dst_h - 1}; int src_offset[] = {0, src_w - 1, src_w * (src_h -1), src_w * src_h - 1}; CUDA_KERNEL_LOOP(index, nthreads) { int c_id = index / 4; IterpolateAction::DoEltwise(src, c_id * src_spatial_dim + src_offset[index % 4], dst, c_id * dst_spatial_dim + dst_offset[index % 4]); } } /* * upscale_all_border_horizontal_lines. * nthreads should be n*c*(dst_w -2) * 2 */ template <typename IterpolateAction, typename Dtype> __global__ void kernel_upscale_2x_border_line_horizontal(const int nthreads, Dtype* src, const int src_h, const int src_w, Dtype* dst) { const int src_spatial_dim = src_h * src_w; const int dst_spatial_dim = src_spatial_dim * 4; const int dst_h = src_h * 2; const int dst_w = src_w * 2; int dst_offset[] = {0, dst_w * (dst_h -1)}; int src_offset[] = {0, src_w * (src_h -1)}; __shared__ Dtype zero; CUDA_KERNEL_LOOP(index, nthreads) { int c_id = index / ((dst_w -2) * 2); int line_id = (index / (dst_w -2)) % 2; int dst_w_id = 1 + (index % (dst_w -2)); Dtype* src_p11 = src + c_id * src_spatial_dim + src_offset[line_id] + (dst_w_id-1)/2; Dtype* dst_p = dst + c_id * dst_spatial_dim + dst_offset[line_id] + dst_w_id; IterpolateAction::template Do<Dtype, 1>(src_p11, src_p11 + 1, &zero, &zero, dst_p, 256/4 + 128 * ((dst_w_id-1)%2), 0); } } /* * upscale_all_border_horizontal_lines. * nthreads should be n*c*(dst_h -2) * 2 */ template <typename IterpolateAction, typename Dtype> __global__ void kernel_upscale_2x_border_line_vertical(const int nthreads, Dtype* src, const int src_h, const int src_w, Dtype* dst) { const int src_spatial_dim = src_h * src_w; const int dst_spatial_dim = src_spatial_dim * 4; const int dst_h = src_h * 2; const int dst_w = src_w * 2; int dst_offset[] = {0, dst_w - 1}; int src_offset[] = {0, src_w - 1}; __shared__ Dtype zero ; CUDA_KERNEL_LOOP(index, nthreads) { int c_id = index / ((dst_h -2) * 2); int id_inside_c = index % ((dst_h -2) * 2); int dst_h_id = id_inside_c / 2 + 1; int col_id = id_inside_c % 2 ; Dtype* src_p11 = src + c_id * src_spatial_dim + src_offset[col_id] + (dst_h_id-1)/2 * src_w; Dtype* dst_p = dst + c_id * dst_spatial_dim + dst_offset[col_id] + dst_h_id * dst_w; IterpolateAction::template Do<Dtype, 1>(src_p11, &zero, src_p11 + src_w, &zero, dst_p, 0, 256/4 + 128 * ((dst_h_id-1)%2)); } } /* * upscale_all_border_horizontal_lines. * nthreads should be n*c*(dst_h -2) * (dst_w -2) */ template <typename IterpolateAction, typename Dtype> __global__ void kernel_upscale_2x_lines(const int nthreads, Dtype* src, const int src_h, const int src_w, Dtype* dst) { const int src_spatial_dim = src_h * src_w; const int dst_spatial_dim = src_spatial_dim * 4; const int dst_h = src_h * 2; const int dst_w = src_w * 2; CUDA_KERNEL_LOOP(index, nthreads) { int c_id = index / ((dst_h -2) * (dst_w -2)); int id_inside_c = index % ((dst_h -2) * (dst_w -2)); int dst_h_id = 1 + id_inside_c / (dst_w -2); int dst_w_id = 1 + id_inside_c % (dst_w -2); Dtype* src_p11 = src + c_id * src_spatial_dim + (dst_h_id-1)/2 * src_w + (dst_w_id-1)/2; Dtype* dst_p = dst + c_id * dst_spatial_dim + dst_h_id * dst_w + dst_w_id; IterpolateAction::template Do<Dtype, 1>(src_p11, src_p11 + 1, src_p11 + src_w, src_p11 + src_w + 1, dst_p, 256/4 + 128 * ((dst_w_id-1)%2), 256/4 + 128 * ((dst_h_id-1)%2)); } } template <typename IterpolateAction, typename Dtype> void upscale_2x_gpu(Dtype* src_data, const int src_n, const int src_c, const int src_h, const int src_w, Dtype* dst_data) { int total_channel_num = src_n * src_c; int dst_h = src_h * 2; int dst_w = src_w * 2; kernel_upscale_2x_corner<IterpolateAction, Dtype> <<< CAFFE_GET_BLOCKS(total_channel_num * 4), CAFFE_CUDA_NUM_THREADS >>> (total_channel_num * 4, src_data, src_h, src_w, dst_data); if (dst_w -2 > 0) { kernel_upscale_2x_border_line_horizontal<IterpolateAction, Dtype> <<< CAFFE_GET_BLOCKS(total_channel_num * (dst_w -2) * 2), CAFFE_CUDA_NUM_THREADS >>> (total_channel_num * (dst_w -2) * 2, src_data, src_h, src_w, dst_data); } if (dst_h -2 > 0) { kernel_upscale_2x_border_line_vertical<IterpolateAction, Dtype> <<< CAFFE_GET_BLOCKS(total_channel_num * (dst_h -2) * 2), CAFFE_CUDA_NUM_THREADS >>> (total_channel_num * (dst_h -2) * 2, src_data, src_h, src_w, dst_data); } if (dst_w -2 > 0 && dst_h -2 > 0) { kernel_upscale_2x_lines<IterpolateAction, Dtype> <<< CAFFE_GET_BLOCKS(total_channel_num * (dst_h -2) * (dst_w -2)), CAFFE_CUDA_NUM_THREADS >>> (total_channel_num * (dst_h -2) * (dst_w -2), src_data, src_h, src_w, dst_data); } CUDA_POST_KERNEL_CHECK; } template <typename Dtype> void Blob2xUpscaler<Dtype>::Forward_gpu(const Blob<Dtype>& src_blob, Blob<Dtype>& dst_blob) { Blob2xUpscaler<Dtype>::Check(src_blob, dst_blob); int last_dim = src_blob.shape().size() - 1; int total_channel_num = src_blob.count(0, src_blob.shape().size() - 2); int src_spatial_dim = src_blob.count(last_dim - 1); int dst_spatial_dim = dst_blob.count(last_dim - 1); int src_h = src_blob.shape(last_dim - 1); int src_w = src_blob.shape(last_dim); Dtype* src_data = const_cast<Dtype*>(src_blob.gpu_data()); Dtype* dst_data = dst_blob.mutable_gpu_data(); upscale_2x_gpu<PointInterpolateForward, Dtype>(src_data, total_channel_num, 1, src_h, src_w, dst_data); } template <typename Dtype> void Blob2xUpscaler<Dtype>::Backward_gpu(const Blob<Dtype>& dst_blob, Blob<Dtype>& src_blob) { Blob2xUpscaler<Dtype>::Check(src_blob, dst_blob); int last_dim = src_blob.shape().size() - 1; int total_channel_num = src_blob.count(0, src_blob.shape().size() - 2); int src_spatial_dim = src_blob.count(last_dim - 1); int dst_spatial_dim = dst_blob.count(last_dim - 1); int src_h = src_blob.shape(last_dim - 1); int src_w = src_blob.shape(last_dim); Dtype* dst_data = const_cast<Dtype*>(dst_blob.gpu_diff()); Dtype* src_data = src_blob.mutable_gpu_diff(); upscale_2x_gpu<PointInterpolateBackward, Dtype>(src_data, total_channel_num, 1, src_h, src_w, dst_data); } template void Blob2xUpscaler<float>::Forward_gpu(const Blob<float>& src_blob, Blob<float>& dst_blob); template void Blob2xUpscaler<double>::Forward_gpu(const Blob<double>& src_blob, Blob<double>& dst_blob); template void Blob2xUpscaler<float>::Backward_gpu(const Blob<float>& dst_blob, Blob<float>& src_blob); template void Blob2xUpscaler<double>::Backward_gpu(const Blob<double>& dst_blob, Blob<double>& src_blob); } // namespace caffe
b759793a381067eca2be3e96c3f0e5ae2f8ac331.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.5.0-beta3) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date July 2014 @precisions normal d -> s */ #include "common_magma.h" #include "commonblas_d.h" /* * daxpy computes c += alpha*b, where b and c are 16-element vectors. */ static __device__ void daxpy( double alpha, const double* __restrict__ b, double* __restrict__ c ) { c[0] += alpha * b[0]; c[1] += alpha * b[1]; c[2] += alpha * b[2]; c[3] += alpha * b[3]; c[4] += alpha * b[4]; c[5] += alpha * b[5]; c[6] += alpha * b[6]; c[7] += alpha * b[7]; c[8] += alpha * b[8]; c[9] += alpha * b[9]; c[10] += alpha * b[10]; c[11] += alpha * b[11]; c[12] += alpha * b[12]; c[13] += alpha * b[13]; c[14] += alpha * b[14]; c[15] += alpha * b[15]; } /** Purpose: -------- This routine computes C = alpha * A*B + beta * C B is put into shared memory Parameters Used: blk_M=64 blk_N=16 blk_K=16 nthd_x=16 nthd_y=4 This kernel is for matrices divisible by the corresponding blocking sizes. @ingroup magma_dblas3 ********************************************************************/ __global__ void dgemm_kernel_N_N_64_16_16_16_4_special( double* __restrict__ C, const double* __restrict__ A, const double* __restrict__ B, int m, int n, int k, int lda, int ldb, int ldc, double alpha, double beta ) { const int tx = threadIdx.x; const int ty = threadIdx.y; const int ibx = blockIdx.x * 64; const int iby = blockIdx.y * 16; const int idt = ty * 16 + tx; B += tx + __mul24(iby+ty, ldb); A += ibx + idt; C += ibx + idt + __mul24(iby, ldc); const double *Bend = B + k; double Cb[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; m = 2*lda; n = 3*lda; do { //double Ab[4] = {A[0], A[lda], A[2*lda], A[3*lda]}; double Ab[4] = {A[0], A[lda], A[m], A[n]}; __shared__ double Bb[16][17]; Bb[tx][ty+0 ] = B[0]; Bb[tx][ty+4 ] = B[4*ldb]; Bb[tx][ty+8 ] = B[8*ldb]; Bb[tx][ty+12] = B[12*ldb]; __syncthreads(); A += 4 * lda; daxpy( Ab[0], &Bb[0][0], Cb ); Ab[0] = A[0]; daxpy( Ab[1], &Bb[1][0], Cb ); Ab[1] = A[lda]; daxpy( Ab[2], &Bb[2][0], Cb ); Ab[2] = A[m]; daxpy( Ab[3], &Bb[3][0], Cb ); Ab[3] = A[n]; A += 4 * lda; daxpy( Ab[0], &Bb[4][0], Cb ); Ab[0] = A[0]; daxpy( Ab[1], &Bb[5][0], Cb ); Ab[1] = A[lda]; daxpy( Ab[2], &Bb[6][0], Cb ); Ab[2] = A[m]; daxpy( Ab[3], &Bb[7][0], Cb ); Ab[3] = A[n]; A += 4 * lda; daxpy( Ab[0], &Bb[8][0], Cb ); Ab[0] = A[0]; daxpy( Ab[1], &Bb[9][0], Cb ); Ab[1] = A[lda]; daxpy( Ab[2], &Bb[10][0], Cb ); Ab[2] = A[m]; daxpy( Ab[3], &Bb[11][0], Cb ); Ab[3] = A[n]; A += 4 * lda; daxpy( Ab[0], &Bb[12][0], Cb ); daxpy( Ab[1], &Bb[13][0], Cb ); daxpy( Ab[2], &Bb[14][0], Cb ); daxpy( Ab[3], &Bb[15][0], Cb ); B += 16; __syncthreads(); } while (B < Bend); #pragma unroll 16 for(int i=0; i < 16; i++) { C[0] = alpha * Cb[i] + beta * C[0]; C += ldc; } } extern "C" void magmablas_dgemm_N_N_64_16_16_16_4_special( double *C, const double *A, const double *B, magma_int_t m, magma_int_t n, magma_int_t k, magma_int_t lda, magma_int_t ldb, magma_int_t ldc, double alpha, double beta ) { dim3 threads( 16, 4 ); dim3 grid( m/64, n/16 ); hipLaunchKernelGGL(( dgemm_kernel_N_N_64_16_16_16_4_special), dim3(grid), dim3(threads), 0, magma_stream , C, A, B, m, n, k, lda, ldb, ldc, alpha, beta ); }
b759793a381067eca2be3e96c3f0e5ae2f8ac331.cu
/* -- MAGMA (version 1.5.0-beta3) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date July 2014 @precisions normal d -> s */ #include "common_magma.h" #include "commonblas_d.h" /* * daxpy computes c += alpha*b, where b and c are 16-element vectors. */ static __device__ void daxpy( double alpha, const double* __restrict__ b, double* __restrict__ c ) { c[0] += alpha * b[0]; c[1] += alpha * b[1]; c[2] += alpha * b[2]; c[3] += alpha * b[3]; c[4] += alpha * b[4]; c[5] += alpha * b[5]; c[6] += alpha * b[6]; c[7] += alpha * b[7]; c[8] += alpha * b[8]; c[9] += alpha * b[9]; c[10] += alpha * b[10]; c[11] += alpha * b[11]; c[12] += alpha * b[12]; c[13] += alpha * b[13]; c[14] += alpha * b[14]; c[15] += alpha * b[15]; } /** Purpose: -------- This routine computes C = alpha * A*B + beta * C B is put into shared memory Parameters Used: blk_M=64 blk_N=16 blk_K=16 nthd_x=16 nthd_y=4 This kernel is for matrices divisible by the corresponding blocking sizes. @ingroup magma_dblas3 ********************************************************************/ __global__ void dgemm_kernel_N_N_64_16_16_16_4_special( double* __restrict__ C, const double* __restrict__ A, const double* __restrict__ B, int m, int n, int k, int lda, int ldb, int ldc, double alpha, double beta ) { const int tx = threadIdx.x; const int ty = threadIdx.y; const int ibx = blockIdx.x * 64; const int iby = blockIdx.y * 16; const int idt = ty * 16 + tx; B += tx + __mul24(iby+ty, ldb); A += ibx + idt; C += ibx + idt + __mul24(iby, ldc); const double *Bend = B + k; double Cb[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; m = 2*lda; n = 3*lda; do { //double Ab[4] = {A[0], A[lda], A[2*lda], A[3*lda]}; double Ab[4] = {A[0], A[lda], A[m], A[n]}; __shared__ double Bb[16][17]; Bb[tx][ty+0 ] = B[0]; Bb[tx][ty+4 ] = B[4*ldb]; Bb[tx][ty+8 ] = B[8*ldb]; Bb[tx][ty+12] = B[12*ldb]; __syncthreads(); A += 4 * lda; daxpy( Ab[0], &Bb[0][0], Cb ); Ab[0] = A[0]; daxpy( Ab[1], &Bb[1][0], Cb ); Ab[1] = A[lda]; daxpy( Ab[2], &Bb[2][0], Cb ); Ab[2] = A[m]; daxpy( Ab[3], &Bb[3][0], Cb ); Ab[3] = A[n]; A += 4 * lda; daxpy( Ab[0], &Bb[4][0], Cb ); Ab[0] = A[0]; daxpy( Ab[1], &Bb[5][0], Cb ); Ab[1] = A[lda]; daxpy( Ab[2], &Bb[6][0], Cb ); Ab[2] = A[m]; daxpy( Ab[3], &Bb[7][0], Cb ); Ab[3] = A[n]; A += 4 * lda; daxpy( Ab[0], &Bb[8][0], Cb ); Ab[0] = A[0]; daxpy( Ab[1], &Bb[9][0], Cb ); Ab[1] = A[lda]; daxpy( Ab[2], &Bb[10][0], Cb ); Ab[2] = A[m]; daxpy( Ab[3], &Bb[11][0], Cb ); Ab[3] = A[n]; A += 4 * lda; daxpy( Ab[0], &Bb[12][0], Cb ); daxpy( Ab[1], &Bb[13][0], Cb ); daxpy( Ab[2], &Bb[14][0], Cb ); daxpy( Ab[3], &Bb[15][0], Cb ); B += 16; __syncthreads(); } while (B < Bend); #pragma unroll 16 for(int i=0; i < 16; i++) { C[0] = alpha * Cb[i] + beta * C[0]; C += ldc; } } extern "C" void magmablas_dgemm_N_N_64_16_16_16_4_special( double *C, const double *A, const double *B, magma_int_t m, magma_int_t n, magma_int_t k, magma_int_t lda, magma_int_t ldb, magma_int_t ldc, double alpha, double beta ) { dim3 threads( 16, 4 ); dim3 grid( m/64, n/16 ); dgemm_kernel_N_N_64_16_16_16_4_special<<< grid, threads, 0, magma_stream >>> ( C, A, B, m, n, k, lda, ldb, ldc, alpha, beta ); }
9a5900e4948391f0726c51912fdfdc677348d0c8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <vector> #include <cfloat> #include "caffe/blob.hpp" #include "caffe/common.hpp" #include "caffe/filler.hpp" #include "caffe/layer.hpp" #include "caffe/util/math_functions.hpp" #include "caffe/layers/margin_inner_product_layer.hpp" namespace caffe { template <typename Dtype> __global__ void Weight_norm_gpu(int nthreads, const int K_, Dtype* weight) { CUDA_KERNEL_LOOP(index, nthreads) { Dtype sum_sqaure = 0.; for (int i = 0; i < K_; i++) { sum_sqaure += weight[index * K_ + i] * weight[index * K_ + i]; } sum_sqaure = sqrt(sum_sqaure); for (int i = 0; i < K_; i++) { weight[index * K_ + i] = weight[index * K_ + i] / sum_sqaure; } } } template <typename Dtype> __global__ void Compute_bottom_norm_gpu(int nthreads, const int K_, const Dtype* bottom, Dtype* x_norm) { CUDA_KERNEL_LOOP(index, nthreads) { Dtype sum_sqaure = 0.; for (int i = 0; i < K_; i++) { sum_sqaure += bottom[index * K_ + i] * bottom[index * K_ + i]; } x_norm[index] = sqrt(sum_sqaure); } } template <typename Dtype> __global__ void Compute_cos_theta_gpu(int nthreads, const int N_, const Dtype* x_norm, Dtype* cos_theta) { CUDA_KERNEL_LOOP(index, nthreads) { const int i = index / N_; cos_theta[index] = cos_theta[index] / x_norm[i]; } } template <typename Dtype> __global__ void Compute_sign_1_gpu(int nthreads, const Dtype* cos_theta, Dtype* sign_1) { CUDA_KERNEL_LOOP(index, nthreads) { sign_1[index] = abs(cos_theta[index]) - (Dtype)0.5; } } template <typename Dtype> __global__ void Compute_sign_2_gpu(int nthreads, const Dtype* sign_0, const Dtype* sign_1, Dtype* sign_2) { CUDA_KERNEL_LOOP(index, nthreads) { sign_2[index] = sign_0[index] * ((Dtype)1. + sign_1[index]) - (Dtype)2.; } } template <typename Dtype> __global__ void Compute_sign_3_gpu(int nthreads, const Dtype* sign_0, const Dtype* cos_theta_quadratic, Dtype* sign_3) { CUDA_KERNEL_LOOP(index, nthreads) { sign_3[index] = sign_0[index] * ((Dtype)2. * cos_theta_quadratic[index] - (Dtype)1.); } } template <typename Dtype> __global__ void Compute_sign_4_gpu(int nthreads, const Dtype* sign_0, const Dtype* sign_3, Dtype* sign_4) { CUDA_KERNEL_LOOP(index, nthreads) { sign_4[index] = (Dtype)2. * sign_0[index] + sign_3[index] - (Dtype)3.; } } template <typename Dtype> __global__ void Margin_double_forward_gpu(int nthreads, const int N_, Dtype lambda, const Dtype* label, const Dtype* x_norm, const Dtype* sign_0, const Dtype* cos_theta_quadratic, Dtype* top) { CUDA_KERNEL_LOOP(index, nthreads) { // the label[i]_th top_data const int i = index / N_; const int j = index % N_; const int label_value = static_cast<int>(label[i]); if (label_value == j) { top[index] *= lambda; top[index] += x_norm[i] * ((Dtype)2. * sign_0[index] * cos_theta_quadratic[index] - (Dtype)1.); top[index] /= ((Dtype)1. + lambda); } } } template <typename Dtype> __global__ void Margin_triple_forward_gpu(int nthreads, const int N_, Dtype lambda, const Dtype* label, const Dtype* x_norm, const Dtype* sign_1, const Dtype* sign_2, const Dtype* cos_theta, const Dtype* cos_theta_cubic, Dtype* top) { CUDA_KERNEL_LOOP(index, nthreads) { // the label[i]_th top_data const int i = index / N_; const int j = index % N_; const int label_value = static_cast<int>(label[i]); if (label_value == j) { top[index] *= lambda; top[index] += x_norm[i] * (sign_1[index] * ((Dtype)4. * cos_theta_cubic[index] - (Dtype)3. * cos_theta[index]) + sign_2[index]); top[index] /= ((Dtype)1. + lambda); } } } template <typename Dtype> __global__ void Margin_quadruple_forward_gpu(int nthreads, const int N_, Dtype lambda, const Dtype* label, const Dtype* x_norm, const Dtype* sign_3, const Dtype* sign_4, const Dtype* cos_theta_quadratic, const Dtype* cos_theta_quartic, Dtype* top) { CUDA_KERNEL_LOOP(index, nthreads) { // the label[i]_th top_data const int i = index / N_; const int j = index % N_; const int label_value = static_cast<int>(label[i]); if (label_value == j) { top[index] *= lambda; top[index] += x_norm[i] * (sign_3[index] * ((Dtype)8. * cos_theta_quartic[index] - (Dtype)8. * cos_theta_quadratic[index] + (Dtype)1.) + sign_4[index]); top[index] /= ((Dtype)1. + lambda); } } } template <typename Dtype> __global__ void Margin_bottom_double_backward_gpu(int nthreads, const int N_, const int K_, Dtype lambda, const Dtype* bottom, const Dtype* weight, const Dtype* top_diff, const Dtype* label, const Dtype* x_norm, const Dtype* sign_0, const Dtype* cos_theta, const Dtype* cos_theta_quadratic, Dtype* bottom_diff) { CUDA_KERNEL_LOOP(index, nthreads) { const int i = index / K_; const int j = index % K_; bottom_diff[index] = (Dtype)0.; const int label_value = static_cast<int>(label[i]); for (int n = 0; n < N_; n++) { if (label_value != n) { bottom_diff[index] += top_diff[i * N_ + n] * weight[n * K_ + j]; } else { Dtype coeff_w = (Dtype)4. * sign_0[i * N_ + n] * cos_theta[i * N_ + n]; Dtype coeff_x = - (Dtype)1./ x_norm[i] * ((Dtype)2. * sign_0[i * N_ + n] * cos_theta_quadratic[i * N_ + n] + (Dtype)1.); Dtype coeff_norm = sqrt(coeff_w * coeff_w + coeff_x * coeff_x); coeff_w = coeff_w / coeff_norm; coeff_x = coeff_x / coeff_norm; bottom_diff[index] += (Dtype)1./ ((Dtype)1. + lambda) * top_diff[i * N_ + n] * (coeff_w * weight[n * K_ + j] + coeff_x * bottom[index]); bottom_diff[index] += lambda / ((Dtype)1. + lambda) * top_diff[i * N_ + n] * weight[n * K_ + j]; } } } } template <typename Dtype> __global__ void Margin_bottom_triple_backward_gpu(int nthreads, const int N_, const int K_, Dtype lambda, const Dtype* bottom, const Dtype* weight, const Dtype* top_diff, const Dtype* label, const Dtype* x_norm, const Dtype* sign_1, const Dtype* sign_2, const Dtype* cos_theta_quadratic, const Dtype* cos_theta_cubic, Dtype* bottom_diff) { CUDA_KERNEL_LOOP(index, nthreads) { const int i = index / K_; const int j = index % K_; bottom_diff[index] = (Dtype)0.; const int label_value = static_cast<int>(label[i]); for (int n = 0; n < N_; n++) { if (label_value != n) { bottom_diff[index] += top_diff[i * N_ + n] * weight[n * K_ + j]; } else { Dtype coeff_w = sign_1[i * N_ + n] * ((Dtype)12. * cos_theta_quadratic[i * N_ + n] - (Dtype)3.); Dtype coeff_x = - (Dtype)1./ x_norm[i] * ((Dtype)8. * sign_1[i * N_ + n] * cos_theta_cubic[i * N_ + n] - sign_2[i * N_ + n]); Dtype coeff_norm = sqrt(coeff_w * coeff_w + coeff_x * coeff_x); coeff_w = coeff_w / coeff_norm; coeff_x = coeff_x / coeff_norm; bottom_diff[index] += (Dtype)1./ ((Dtype)1. + lambda) * top_diff[i * N_ + n] * (coeff_w * weight[n * K_ + j] + coeff_x * bottom[index]); bottom_diff[index] += lambda / ((Dtype)1. + lambda) * top_diff[i * N_ + n] * weight[n * K_ + j]; } } } } template <typename Dtype> __global__ void Margin_bottom_quadruple_backward_gpu(int nthreads, const int N_, const int K_, Dtype lambda, const Dtype* bottom, const Dtype* weight, const Dtype* top_diff, const Dtype* label, const Dtype* x_norm, const Dtype* sign_3, const Dtype* sign_4, const Dtype* cos_theta, const Dtype* cos_theta_quadratic, const Dtype* cos_theta_cubic, const Dtype* cos_theta_quartic, Dtype* bottom_diff) { CUDA_KERNEL_LOOP(index, nthreads) { const int i = index / K_; const int j = index % K_; bottom_diff[index] = (Dtype)0.; const int label_value = static_cast<int>(label[i]); for (int n = 0; n < N_; n++) { if (label_value != n) { bottom_diff[index] += top_diff[i * N_ + n] * weight[n * K_ + j]; } else { Dtype coeff_w = sign_3[i * N_ + n] * ((Dtype)32. * cos_theta_cubic[i * N_ + n] - (Dtype)16. * cos_theta[i * N_ + n]); Dtype coeff_x = - (Dtype)1./ x_norm[i] * (sign_3[i * N_ + n] * ((Dtype)24. * cos_theta_quartic[i * N_ + n] - (Dtype)8. * cos_theta_quadratic[i * N_ + n] - 1) - sign_4[i * N_ + n]); Dtype coeff_norm = sqrt(coeff_w * coeff_w + coeff_x * coeff_x); coeff_w = coeff_w / coeff_norm; coeff_x = coeff_x / coeff_norm; bottom_diff[index] += (Dtype)1./ ((Dtype)1. + lambda) * top_diff[i * N_ + n] * (coeff_w * weight[n * K_ + j] + coeff_x * bottom[index]); bottom_diff[index] += lambda / ((Dtype)1. + lambda) * top_diff[i * N_ + n] * weight[n * K_ + j]; } } } } template <typename Dtype> void MarginInnerProductLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { iter_ += (Dtype)1.; Dtype base_ = this->layer_param_.margin_inner_product_param().base(); Dtype gamma_ = this->layer_param_.margin_inner_product_param().gamma(); Dtype power_ = this->layer_param_.margin_inner_product_param().power(); Dtype lambda_min_ = this->layer_param_.margin_inner_product_param().lambda_min(); lambda_ = base_ * powf(((Dtype)1. + gamma_ * iter_), -power_); lambda_ = max(lambda_, lambda_min_); top[1]->mutable_cpu_data()[0] = lambda_; const Dtype* bottom_data = bottom[0]->gpu_data(); const Dtype* weight = this->blobs_[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); const Dtype* label = bottom[1]->gpu_data(); /************************* normalize weight *************************/ int nthreads = N_; hipLaunchKernelGGL(( Weight_norm_gpu<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, K_, this->blobs_[0]->mutable_gpu_data()); /************************* common variables *************************/ // x_norm_ = |x| nthreads = M_; hipLaunchKernelGGL(( Compute_bottom_norm_gpu<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, K_, bottom_data, x_norm_.mutable_gpu_data()); nthreads = M_ * N_; // cos_theta = x'w / |x| caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasTrans, M_, N_, K_, (Dtype)1., bottom_data, weight, (Dtype)0., cos_theta_.mutable_gpu_data()); hipLaunchKernelGGL(( Compute_cos_theta_gpu<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, N_, x_norm_.gpu_data(), cos_theta_.mutable_gpu_data()); // sign_0 caffe_gpu_sign(M_ * N_, cos_theta_.gpu_data(), sign_0_.mutable_gpu_data()); /************************* optional variables *************************/ switch (type_) { case MarginInnerProductParameter_MarginType_SINGLE: break; case MarginInnerProductParameter_MarginType_DOUBLE: // cos_theta_quadratic caffe_gpu_powx(M_ * N_, cos_theta_.gpu_data(), (Dtype)2., cos_theta_quadratic_.mutable_gpu_data()); break; case MarginInnerProductParameter_MarginType_TRIPLE: // cos_theta_quadratic && cos_theta_cubic caffe_gpu_powx(M_ * N_, cos_theta_.gpu_data(), (Dtype)2., cos_theta_quadratic_.mutable_gpu_data()); caffe_gpu_powx(M_ * N_, cos_theta_.gpu_data(), (Dtype)3., cos_theta_cubic_.mutable_gpu_data()); // sign_1 = sign(abs(cos_theta) - 0.5) hipLaunchKernelGGL(( Compute_sign_1_gpu<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, cos_theta_.gpu_data(), sign_1_.mutable_gpu_data()); caffe_gpu_sign(M_ * N_, sign_1_.gpu_data(), sign_1_.mutable_gpu_data()); // sign_2 = sign_0 * (1 + sign_1) - 2 hipLaunchKernelGGL(( Compute_sign_2_gpu<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, sign_0_.gpu_data(), sign_1_.gpu_data(), sign_2_.mutable_gpu_data()); break; case MarginInnerProductParameter_MarginType_QUADRUPLE: // cos_theta_quadratic && cos_theta_cubic && cos_theta_quartic caffe_gpu_powx(M_ * N_, cos_theta_.gpu_data(), (Dtype)2., cos_theta_quadratic_.mutable_gpu_data()); caffe_gpu_powx(M_ * N_, cos_theta_.gpu_data(), (Dtype)3., cos_theta_cubic_.mutable_gpu_data()); caffe_gpu_powx(M_ * N_, cos_theta_.gpu_data(), (Dtype)4., cos_theta_quartic_.mutable_gpu_data()); // sign_3 = sign_0 * sign(2 * cos_theta_quadratic_ - 1) hipLaunchKernelGGL(( Compute_sign_3_gpu<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, sign_0_.gpu_data(), cos_theta_quadratic_.gpu_data(), sign_3_.mutable_gpu_data()); caffe_gpu_sign(M_ * N_, sign_3_.gpu_data(), sign_3_.mutable_gpu_data()); // sign_4 = 2 * sign_0 + sign_3 - 3 hipLaunchKernelGGL(( Compute_sign_4_gpu<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, sign_0_.gpu_data(), sign_3_.gpu_data(), sign_4_.mutable_gpu_data()); break; default: LOG(FATAL) << "Unknown margin type."; } /************************* Forward *************************/ caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasTrans, M_, N_, K_, (Dtype)1., bottom_data, weight, (Dtype)0., top_data); switch (type_) { case MarginInnerProductParameter_MarginType_SINGLE: break; case MarginInnerProductParameter_MarginType_DOUBLE: // caffe_gpu_memcpy(M_ * N_, cos_theta_.gpu_data(), top_data); hipLaunchKernelGGL(( Margin_double_forward_gpu<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, N_, lambda_, label, x_norm_.gpu_data(), sign_0_.gpu_data(), cos_theta_quadratic_.gpu_data(), top_data); break; case MarginInnerProductParameter_MarginType_TRIPLE: hipLaunchKernelGGL(( Margin_triple_forward_gpu<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, N_, lambda_, label, x_norm_.gpu_data(), sign_1_.gpu_data(), sign_2_.gpu_data(), cos_theta_.gpu_data(), cos_theta_cubic_.gpu_data(), top_data); break; case MarginInnerProductParameter_MarginType_QUADRUPLE: hipLaunchKernelGGL(( Margin_quadruple_forward_gpu<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, N_, lambda_, label, x_norm_.gpu_data(), sign_3_.gpu_data(), sign_4_.gpu_data(), cos_theta_quadratic_.gpu_data(), cos_theta_quartic_.gpu_data(), top_data); break; default: LOG(FATAL) << "Unknown margin type."; } } template <typename Dtype> void MarginInnerProductLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { const Dtype* top_diff = top[0]->gpu_diff(); const Dtype* bottom_data = bottom[0]->gpu_data(); const Dtype* label = bottom[1]->gpu_data(); const Dtype* weight = this->blobs_[0]->gpu_data(); if (this->param_propagate_down_[0]) { // Gradient with respect to weight caffe_gpu_gemm<Dtype>(CblasTrans, CblasNoTrans, N_, K_, M_, (Dtype)1., top_diff, bottom_data, (Dtype)1., this->blobs_[0]->mutable_gpu_diff()); } if (propagate_down[0]) { Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); // Gradient with respect to bottom data int nthreads = M_ * K_; switch (type_) { case MarginInnerProductParameter_MarginType_SINGLE: caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, M_, K_, N_, (Dtype)1., top_diff, this->blobs_[0]->gpu_data(), (Dtype)0., bottom[0]->mutable_gpu_diff()); break; case MarginInnerProductParameter_MarginType_DOUBLE: hipLaunchKernelGGL(( Margin_bottom_double_backward_gpu<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, N_, K_, lambda_, bottom_data, weight, top_diff, label, x_norm_.gpu_data(), sign_0_.gpu_data(), cos_theta_.gpu_data(), cos_theta_quadratic_.gpu_data(), bottom_diff); break; case MarginInnerProductParameter_MarginType_TRIPLE: hipLaunchKernelGGL(( Margin_bottom_triple_backward_gpu<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, N_, K_, lambda_, bottom_data, weight, top_diff, label, x_norm_.gpu_data(), sign_1_.gpu_data(), sign_2_.gpu_data(), cos_theta_quadratic_.gpu_data(), cos_theta_cubic_.gpu_data(), bottom_diff); break; case MarginInnerProductParameter_MarginType_QUADRUPLE: hipLaunchKernelGGL(( Margin_bottom_quadruple_backward_gpu<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, N_, K_, lambda_, bottom_data, weight, top_diff, label, x_norm_.gpu_data(), sign_3_.gpu_data(), sign_4_.gpu_data(), cos_theta_.gpu_data(), cos_theta_quadratic_.gpu_data(), cos_theta_cubic_.gpu_data(), cos_theta_quartic_.gpu_data(), bottom_diff); break; default: LOG(FATAL) << "Unknown margin type."; } } } INSTANTIATE_LAYER_GPU_FUNCS(MarginInnerProductLayer); } // namespace caffe
9a5900e4948391f0726c51912fdfdc677348d0c8.cu
#include <vector> #include <cfloat> #include "caffe/blob.hpp" #include "caffe/common.hpp" #include "caffe/filler.hpp" #include "caffe/layer.hpp" #include "caffe/util/math_functions.hpp" #include "caffe/layers/margin_inner_product_layer.hpp" namespace caffe { template <typename Dtype> __global__ void Weight_norm_gpu(int nthreads, const int K_, Dtype* weight) { CUDA_KERNEL_LOOP(index, nthreads) { Dtype sum_sqaure = 0.; for (int i = 0; i < K_; i++) { sum_sqaure += weight[index * K_ + i] * weight[index * K_ + i]; } sum_sqaure = sqrt(sum_sqaure); for (int i = 0; i < K_; i++) { weight[index * K_ + i] = weight[index * K_ + i] / sum_sqaure; } } } template <typename Dtype> __global__ void Compute_bottom_norm_gpu(int nthreads, const int K_, const Dtype* bottom, Dtype* x_norm) { CUDA_KERNEL_LOOP(index, nthreads) { Dtype sum_sqaure = 0.; for (int i = 0; i < K_; i++) { sum_sqaure += bottom[index * K_ + i] * bottom[index * K_ + i]; } x_norm[index] = sqrt(sum_sqaure); } } template <typename Dtype> __global__ void Compute_cos_theta_gpu(int nthreads, const int N_, const Dtype* x_norm, Dtype* cos_theta) { CUDA_KERNEL_LOOP(index, nthreads) { const int i = index / N_; cos_theta[index] = cos_theta[index] / x_norm[i]; } } template <typename Dtype> __global__ void Compute_sign_1_gpu(int nthreads, const Dtype* cos_theta, Dtype* sign_1) { CUDA_KERNEL_LOOP(index, nthreads) { sign_1[index] = abs(cos_theta[index]) - (Dtype)0.5; } } template <typename Dtype> __global__ void Compute_sign_2_gpu(int nthreads, const Dtype* sign_0, const Dtype* sign_1, Dtype* sign_2) { CUDA_KERNEL_LOOP(index, nthreads) { sign_2[index] = sign_0[index] * ((Dtype)1. + sign_1[index]) - (Dtype)2.; } } template <typename Dtype> __global__ void Compute_sign_3_gpu(int nthreads, const Dtype* sign_0, const Dtype* cos_theta_quadratic, Dtype* sign_3) { CUDA_KERNEL_LOOP(index, nthreads) { sign_3[index] = sign_0[index] * ((Dtype)2. * cos_theta_quadratic[index] - (Dtype)1.); } } template <typename Dtype> __global__ void Compute_sign_4_gpu(int nthreads, const Dtype* sign_0, const Dtype* sign_3, Dtype* sign_4) { CUDA_KERNEL_LOOP(index, nthreads) { sign_4[index] = (Dtype)2. * sign_0[index] + sign_3[index] - (Dtype)3.; } } template <typename Dtype> __global__ void Margin_double_forward_gpu(int nthreads, const int N_, Dtype lambda, const Dtype* label, const Dtype* x_norm, const Dtype* sign_0, const Dtype* cos_theta_quadratic, Dtype* top) { CUDA_KERNEL_LOOP(index, nthreads) { // the label[i]_th top_data const int i = index / N_; const int j = index % N_; const int label_value = static_cast<int>(label[i]); if (label_value == j) { top[index] *= lambda; top[index] += x_norm[i] * ((Dtype)2. * sign_0[index] * cos_theta_quadratic[index] - (Dtype)1.); top[index] /= ((Dtype)1. + lambda); } } } template <typename Dtype> __global__ void Margin_triple_forward_gpu(int nthreads, const int N_, Dtype lambda, const Dtype* label, const Dtype* x_norm, const Dtype* sign_1, const Dtype* sign_2, const Dtype* cos_theta, const Dtype* cos_theta_cubic, Dtype* top) { CUDA_KERNEL_LOOP(index, nthreads) { // the label[i]_th top_data const int i = index / N_; const int j = index % N_; const int label_value = static_cast<int>(label[i]); if (label_value == j) { top[index] *= lambda; top[index] += x_norm[i] * (sign_1[index] * ((Dtype)4. * cos_theta_cubic[index] - (Dtype)3. * cos_theta[index]) + sign_2[index]); top[index] /= ((Dtype)1. + lambda); } } } template <typename Dtype> __global__ void Margin_quadruple_forward_gpu(int nthreads, const int N_, Dtype lambda, const Dtype* label, const Dtype* x_norm, const Dtype* sign_3, const Dtype* sign_4, const Dtype* cos_theta_quadratic, const Dtype* cos_theta_quartic, Dtype* top) { CUDA_KERNEL_LOOP(index, nthreads) { // the label[i]_th top_data const int i = index / N_; const int j = index % N_; const int label_value = static_cast<int>(label[i]); if (label_value == j) { top[index] *= lambda; top[index] += x_norm[i] * (sign_3[index] * ((Dtype)8. * cos_theta_quartic[index] - (Dtype)8. * cos_theta_quadratic[index] + (Dtype)1.) + sign_4[index]); top[index] /= ((Dtype)1. + lambda); } } } template <typename Dtype> __global__ void Margin_bottom_double_backward_gpu(int nthreads, const int N_, const int K_, Dtype lambda, const Dtype* bottom, const Dtype* weight, const Dtype* top_diff, const Dtype* label, const Dtype* x_norm, const Dtype* sign_0, const Dtype* cos_theta, const Dtype* cos_theta_quadratic, Dtype* bottom_diff) { CUDA_KERNEL_LOOP(index, nthreads) { const int i = index / K_; const int j = index % K_; bottom_diff[index] = (Dtype)0.; const int label_value = static_cast<int>(label[i]); for (int n = 0; n < N_; n++) { if (label_value != n) { bottom_diff[index] += top_diff[i * N_ + n] * weight[n * K_ + j]; } else { Dtype coeff_w = (Dtype)4. * sign_0[i * N_ + n] * cos_theta[i * N_ + n]; Dtype coeff_x = - (Dtype)1./ x_norm[i] * ((Dtype)2. * sign_0[i * N_ + n] * cos_theta_quadratic[i * N_ + n] + (Dtype)1.); Dtype coeff_norm = sqrt(coeff_w * coeff_w + coeff_x * coeff_x); coeff_w = coeff_w / coeff_norm; coeff_x = coeff_x / coeff_norm; bottom_diff[index] += (Dtype)1./ ((Dtype)1. + lambda) * top_diff[i * N_ + n] * (coeff_w * weight[n * K_ + j] + coeff_x * bottom[index]); bottom_diff[index] += lambda / ((Dtype)1. + lambda) * top_diff[i * N_ + n] * weight[n * K_ + j]; } } } } template <typename Dtype> __global__ void Margin_bottom_triple_backward_gpu(int nthreads, const int N_, const int K_, Dtype lambda, const Dtype* bottom, const Dtype* weight, const Dtype* top_diff, const Dtype* label, const Dtype* x_norm, const Dtype* sign_1, const Dtype* sign_2, const Dtype* cos_theta_quadratic, const Dtype* cos_theta_cubic, Dtype* bottom_diff) { CUDA_KERNEL_LOOP(index, nthreads) { const int i = index / K_; const int j = index % K_; bottom_diff[index] = (Dtype)0.; const int label_value = static_cast<int>(label[i]); for (int n = 0; n < N_; n++) { if (label_value != n) { bottom_diff[index] += top_diff[i * N_ + n] * weight[n * K_ + j]; } else { Dtype coeff_w = sign_1[i * N_ + n] * ((Dtype)12. * cos_theta_quadratic[i * N_ + n] - (Dtype)3.); Dtype coeff_x = - (Dtype)1./ x_norm[i] * ((Dtype)8. * sign_1[i * N_ + n] * cos_theta_cubic[i * N_ + n] - sign_2[i * N_ + n]); Dtype coeff_norm = sqrt(coeff_w * coeff_w + coeff_x * coeff_x); coeff_w = coeff_w / coeff_norm; coeff_x = coeff_x / coeff_norm; bottom_diff[index] += (Dtype)1./ ((Dtype)1. + lambda) * top_diff[i * N_ + n] * (coeff_w * weight[n * K_ + j] + coeff_x * bottom[index]); bottom_diff[index] += lambda / ((Dtype)1. + lambda) * top_diff[i * N_ + n] * weight[n * K_ + j]; } } } } template <typename Dtype> __global__ void Margin_bottom_quadruple_backward_gpu(int nthreads, const int N_, const int K_, Dtype lambda, const Dtype* bottom, const Dtype* weight, const Dtype* top_diff, const Dtype* label, const Dtype* x_norm, const Dtype* sign_3, const Dtype* sign_4, const Dtype* cos_theta, const Dtype* cos_theta_quadratic, const Dtype* cos_theta_cubic, const Dtype* cos_theta_quartic, Dtype* bottom_diff) { CUDA_KERNEL_LOOP(index, nthreads) { const int i = index / K_; const int j = index % K_; bottom_diff[index] = (Dtype)0.; const int label_value = static_cast<int>(label[i]); for (int n = 0; n < N_; n++) { if (label_value != n) { bottom_diff[index] += top_diff[i * N_ + n] * weight[n * K_ + j]; } else { Dtype coeff_w = sign_3[i * N_ + n] * ((Dtype)32. * cos_theta_cubic[i * N_ + n] - (Dtype)16. * cos_theta[i * N_ + n]); Dtype coeff_x = - (Dtype)1./ x_norm[i] * (sign_3[i * N_ + n] * ((Dtype)24. * cos_theta_quartic[i * N_ + n] - (Dtype)8. * cos_theta_quadratic[i * N_ + n] - 1) - sign_4[i * N_ + n]); Dtype coeff_norm = sqrt(coeff_w * coeff_w + coeff_x * coeff_x); coeff_w = coeff_w / coeff_norm; coeff_x = coeff_x / coeff_norm; bottom_diff[index] += (Dtype)1./ ((Dtype)1. + lambda) * top_diff[i * N_ + n] * (coeff_w * weight[n * K_ + j] + coeff_x * bottom[index]); bottom_diff[index] += lambda / ((Dtype)1. + lambda) * top_diff[i * N_ + n] * weight[n * K_ + j]; } } } } template <typename Dtype> void MarginInnerProductLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { iter_ += (Dtype)1.; Dtype base_ = this->layer_param_.margin_inner_product_param().base(); Dtype gamma_ = this->layer_param_.margin_inner_product_param().gamma(); Dtype power_ = this->layer_param_.margin_inner_product_param().power(); Dtype lambda_min_ = this->layer_param_.margin_inner_product_param().lambda_min(); lambda_ = base_ * powf(((Dtype)1. + gamma_ * iter_), -power_); lambda_ = max(lambda_, lambda_min_); top[1]->mutable_cpu_data()[0] = lambda_; const Dtype* bottom_data = bottom[0]->gpu_data(); const Dtype* weight = this->blobs_[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); const Dtype* label = bottom[1]->gpu_data(); /************************* normalize weight *************************/ int nthreads = N_; Weight_norm_gpu<Dtype><<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS>>>(nthreads, K_, this->blobs_[0]->mutable_gpu_data()); /************************* common variables *************************/ // x_norm_ = |x| nthreads = M_; Compute_bottom_norm_gpu<Dtype><<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS>>>(nthreads, K_, bottom_data, x_norm_.mutable_gpu_data()); nthreads = M_ * N_; // cos_theta = x'w / |x| caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasTrans, M_, N_, K_, (Dtype)1., bottom_data, weight, (Dtype)0., cos_theta_.mutable_gpu_data()); Compute_cos_theta_gpu<Dtype><<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS>>>(nthreads, N_, x_norm_.gpu_data(), cos_theta_.mutable_gpu_data()); // sign_0 caffe_gpu_sign(M_ * N_, cos_theta_.gpu_data(), sign_0_.mutable_gpu_data()); /************************* optional variables *************************/ switch (type_) { case MarginInnerProductParameter_MarginType_SINGLE: break; case MarginInnerProductParameter_MarginType_DOUBLE: // cos_theta_quadratic caffe_gpu_powx(M_ * N_, cos_theta_.gpu_data(), (Dtype)2., cos_theta_quadratic_.mutable_gpu_data()); break; case MarginInnerProductParameter_MarginType_TRIPLE: // cos_theta_quadratic && cos_theta_cubic caffe_gpu_powx(M_ * N_, cos_theta_.gpu_data(), (Dtype)2., cos_theta_quadratic_.mutable_gpu_data()); caffe_gpu_powx(M_ * N_, cos_theta_.gpu_data(), (Dtype)3., cos_theta_cubic_.mutable_gpu_data()); // sign_1 = sign(abs(cos_theta) - 0.5) Compute_sign_1_gpu<Dtype><<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS>>>(nthreads, cos_theta_.gpu_data(), sign_1_.mutable_gpu_data()); caffe_gpu_sign(M_ * N_, sign_1_.gpu_data(), sign_1_.mutable_gpu_data()); // sign_2 = sign_0 * (1 + sign_1) - 2 Compute_sign_2_gpu<Dtype><<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS>>>(nthreads, sign_0_.gpu_data(), sign_1_.gpu_data(), sign_2_.mutable_gpu_data()); break; case MarginInnerProductParameter_MarginType_QUADRUPLE: // cos_theta_quadratic && cos_theta_cubic && cos_theta_quartic caffe_gpu_powx(M_ * N_, cos_theta_.gpu_data(), (Dtype)2., cos_theta_quadratic_.mutable_gpu_data()); caffe_gpu_powx(M_ * N_, cos_theta_.gpu_data(), (Dtype)3., cos_theta_cubic_.mutable_gpu_data()); caffe_gpu_powx(M_ * N_, cos_theta_.gpu_data(), (Dtype)4., cos_theta_quartic_.mutable_gpu_data()); // sign_3 = sign_0 * sign(2 * cos_theta_quadratic_ - 1) Compute_sign_3_gpu<Dtype><<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS>>>(nthreads, sign_0_.gpu_data(), cos_theta_quadratic_.gpu_data(), sign_3_.mutable_gpu_data()); caffe_gpu_sign(M_ * N_, sign_3_.gpu_data(), sign_3_.mutable_gpu_data()); // sign_4 = 2 * sign_0 + sign_3 - 3 Compute_sign_4_gpu<Dtype><<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS>>>(nthreads, sign_0_.gpu_data(), sign_3_.gpu_data(), sign_4_.mutable_gpu_data()); break; default: LOG(FATAL) << "Unknown margin type."; } /************************* Forward *************************/ caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasTrans, M_, N_, K_, (Dtype)1., bottom_data, weight, (Dtype)0., top_data); switch (type_) { case MarginInnerProductParameter_MarginType_SINGLE: break; case MarginInnerProductParameter_MarginType_DOUBLE: // caffe_gpu_memcpy(M_ * N_, cos_theta_.gpu_data(), top_data); Margin_double_forward_gpu<Dtype><<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS>>>(nthreads, N_, lambda_, label, x_norm_.gpu_data(), sign_0_.gpu_data(), cos_theta_quadratic_.gpu_data(), top_data); break; case MarginInnerProductParameter_MarginType_TRIPLE: Margin_triple_forward_gpu<Dtype><<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS>>>(nthreads, N_, lambda_, label, x_norm_.gpu_data(), sign_1_.gpu_data(), sign_2_.gpu_data(), cos_theta_.gpu_data(), cos_theta_cubic_.gpu_data(), top_data); break; case MarginInnerProductParameter_MarginType_QUADRUPLE: Margin_quadruple_forward_gpu<Dtype><<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS>>>(nthreads, N_, lambda_, label, x_norm_.gpu_data(), sign_3_.gpu_data(), sign_4_.gpu_data(), cos_theta_quadratic_.gpu_data(), cos_theta_quartic_.gpu_data(), top_data); break; default: LOG(FATAL) << "Unknown margin type."; } } template <typename Dtype> void MarginInnerProductLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { const Dtype* top_diff = top[0]->gpu_diff(); const Dtype* bottom_data = bottom[0]->gpu_data(); const Dtype* label = bottom[1]->gpu_data(); const Dtype* weight = this->blobs_[0]->gpu_data(); if (this->param_propagate_down_[0]) { // Gradient with respect to weight caffe_gpu_gemm<Dtype>(CblasTrans, CblasNoTrans, N_, K_, M_, (Dtype)1., top_diff, bottom_data, (Dtype)1., this->blobs_[0]->mutable_gpu_diff()); } if (propagate_down[0]) { Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); // Gradient with respect to bottom data int nthreads = M_ * K_; switch (type_) { case MarginInnerProductParameter_MarginType_SINGLE: caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, M_, K_, N_, (Dtype)1., top_diff, this->blobs_[0]->gpu_data(), (Dtype)0., bottom[0]->mutable_gpu_diff()); break; case MarginInnerProductParameter_MarginType_DOUBLE: Margin_bottom_double_backward_gpu<Dtype><<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS>>>(nthreads, N_, K_, lambda_, bottom_data, weight, top_diff, label, x_norm_.gpu_data(), sign_0_.gpu_data(), cos_theta_.gpu_data(), cos_theta_quadratic_.gpu_data(), bottom_diff); break; case MarginInnerProductParameter_MarginType_TRIPLE: Margin_bottom_triple_backward_gpu<Dtype><<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS>>>(nthreads, N_, K_, lambda_, bottom_data, weight, top_diff, label, x_norm_.gpu_data(), sign_1_.gpu_data(), sign_2_.gpu_data(), cos_theta_quadratic_.gpu_data(), cos_theta_cubic_.gpu_data(), bottom_diff); break; case MarginInnerProductParameter_MarginType_QUADRUPLE: Margin_bottom_quadruple_backward_gpu<Dtype><<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS>>>(nthreads, N_, K_, lambda_, bottom_data, weight, top_diff, label, x_norm_.gpu_data(), sign_3_.gpu_data(), sign_4_.gpu_data(), cos_theta_.gpu_data(), cos_theta_quadratic_.gpu_data(), cos_theta_cubic_.gpu_data(), cos_theta_quartic_.gpu_data(), bottom_diff); break; default: LOG(FATAL) << "Unknown margin type."; } } } INSTANTIATE_LAYER_GPU_FUNCS(MarginInnerProductLayer); } // namespace caffe
d489533d95b604a9b4c0e98ab0402fdbc92b71f5.hip
// !!! This is a file automatically generated by hipify!!! /*! * Copyright 2015-2019 XGBoost contributors */ #include <dmlc/omp.h> #include <dmlc/timer.h> #include <xgboost/logging.h> #include <xgboost/objective.h> #include <vector> #include <algorithm> #include <utility> #include "xgboost/json.h" #include "xgboost/parameter.h" #include "../common/math.h" #include "../common/random.h" #if defined(__HIPCC__) #include <thrust/sort.h> #include <thrust/gather.h> #include <thrust/iterator/discard_iterator.h> #include <thrust/random/uniform_int_distribution.h> #include <thrust/random/linear_congruential_engine.h> #include <hipcub/hipcub.hpp> #include "../common/device_helpers.cuh" #endif namespace xgboost { namespace obj { #if defined(XGBOOST_USE_CUDA) && !defined(GTEST_TEST) DMLC_REGISTRY_FILE_TAG(rank_obj_gpu); #endif // defined(XGBOOST_USE_CUDA) struct LambdaRankParam : public XGBoostParameter<LambdaRankParam> { size_t num_pairsample; float fix_list_weight; // declare parameters DMLC_DECLARE_PARAMETER(LambdaRankParam) { DMLC_DECLARE_FIELD(num_pairsample).set_lower_bound(1).set_default(1) .describe("Number of pair generated for each instance."); DMLC_DECLARE_FIELD(fix_list_weight).set_lower_bound(0.0f).set_default(0.0f) .describe("Normalize the weight of each list by this value," " if equals 0, no effect will happen"); } }; #if defined(__HIPCC__) // Helper functions template <typename T> XGBOOST_DEVICE __forceinline__ uint32_t CountNumItemsToTheLeftOf(const T *__restrict__ items, uint32_t n, T v) { return thrust::lower_bound(thrust::seq, items, items + n, v, thrust::greater<T>()) - items; } template <typename T> XGBOOST_DEVICE __forceinline__ uint32_t CountNumItemsToTheRightOf(const T *__restrict__ items, uint32_t n, T v) { return n - (thrust::upper_bound(thrust::seq, items, items + n, v, thrust::greater<T>()) - items); } #endif /*! \brief helper information in a list */ struct ListEntry { /*! \brief the predict score we in the data */ bst_float pred; /*! \brief the actual label of the entry */ bst_float label; /*! \brief row index in the data matrix */ unsigned rindex; // constructor ListEntry(bst_float pred, bst_float label, unsigned rindex) : pred(pred), label(label), rindex(rindex) {} // comparator by prediction inline static bool CmpPred(const ListEntry &a, const ListEntry &b) { return a.pred > b.pred; } // comparator by label inline static bool CmpLabel(const ListEntry &a, const ListEntry &b) { return a.label > b.label; } }; /*! \brief a pair in the lambda rank */ struct LambdaPair { /*! \brief positive index: this is a position in the list */ unsigned pos_index; /*! \brief negative index: this is a position in the list */ unsigned neg_index; /*! \brief weight to be filled in */ bst_float weight; // constructor LambdaPair(unsigned pos_index, unsigned neg_index) : pos_index(pos_index), neg_index(neg_index), weight(1.0f) {} // constructor LambdaPair(unsigned pos_index, unsigned neg_index, bst_float weight) : pos_index(pos_index), neg_index(neg_index), weight(weight) {} }; class PairwiseLambdaWeightComputer { public: /*! * \brief get lambda weight for existing pairs - for pairwise objective * \param list a list that is sorted by pred score * \param io_pairs record of pairs, containing the pairs to fill in weights */ static void GetLambdaWeight(const std::vector<ListEntry> &sorted_list, std::vector<LambdaPair> *io_pairs) {} static char const* Name() { return "rank:pairwise"; } #if defined(__HIPCC__) PairwiseLambdaWeightComputer(const bst_float *dpreds, const bst_float *dlabels, const dh::SegmentSorter<float> &segment_label_sorter) {} class PairwiseLambdaWeightMultiplier { public: // Adjust the items weight by this value __device__ __forceinline__ bst_float GetWeight(uint32_t gidx, int pidx, int nidx) const { return 1.0f; } }; inline const PairwiseLambdaWeightMultiplier GetWeightMultiplier() const { return {}; } #endif }; #if defined(__HIPCC__) class BaseLambdaWeightMultiplier { public: BaseLambdaWeightMultiplier(const dh::SegmentSorter<float> &segment_label_sorter, const dh::SegmentSorter<float> &segment_pred_sorter) : dsorted_labels_(segment_label_sorter.GetItemsSpan()), dorig_pos_(segment_label_sorter.GetOriginalPositionsSpan()), dgroups_(segment_label_sorter.GetGroupsSpan()), dindexable_sorted_preds_pos_(segment_pred_sorter.GetIndexableSortedPositionsSpan()) {} protected: const common::Span<const float> dsorted_labels_; // Labels sorted within a group const common::Span<const uint32_t> dorig_pos_; // Original indices of the labels // before they are sorted const common::Span<const uint32_t> dgroups_; // The group indices // Where can a prediction for a label be found in the original array, when they are sorted const common::Span<const uint32_t> dindexable_sorted_preds_pos_; }; // While computing the weight that needs to be adjusted by this ranking objective, we need // to figure out where positive and negative labels chosen earlier exists, if the group // were to be sorted by its predictions. To accommodate this, we employ the following algorithm. // For a given group, let's assume the following: // labels: 1 5 9 2 4 8 0 7 6 3 // predictions: 1 9 0 8 2 7 3 6 5 4 // position: 0 1 2 3 4 5 6 7 8 9 // // After label sort: // labels: 9 8 7 6 5 4 3 2 1 0 // position: 2 5 7 8 1 4 9 3 0 6 // // After prediction sort: // predictions: 9 8 7 6 5 4 3 2 1 0 // position: 1 3 5 7 8 9 6 4 0 2 // // If a sorted label at position 'x' is chosen, then we need to find out where the prediction // for this label 'x' exists, if the group were to be sorted by predictions. // We first take the sorted prediction positions: // position: 1 3 5 7 8 9 6 4 0 2 // at indices: 0 1 2 3 4 5 6 7 8 9 // // We create a sorted prediction positional array, such that value at position 'x' gives // us the position in the sorted prediction array where its related prediction lies. // dindexable_sorted_preds_pos_: 8 0 9 1 7 2 6 3 4 5 // at indices: 0 1 2 3 4 5 6 7 8 9 // Basically, swap the previous 2 arrays, sort the indices and reorder positions // for an O(1) lookup using the position where the sorted label exists. // // This type does that using the SegmentSorter class IndexablePredictionSorter { public: IndexablePredictionSorter(const bst_float *dpreds, const dh::SegmentSorter<float> &segment_label_sorter) { // Sort the predictions first segment_pred_sorter_.SortItems(dpreds, segment_label_sorter.GetNumItems(), segment_label_sorter.GetGroupSegmentsSpan()); // Create an index for the sorted prediction positions segment_pred_sorter_.CreateIndexableSortedPositions(); } inline const dh::SegmentSorter<float> &GetPredictionSorter() const { return segment_pred_sorter_; } private: dh::SegmentSorter<float> segment_pred_sorter_; // For sorting the predictions }; #endif // beta version: NDCG lambda rank class NDCGLambdaWeightComputer #if defined(__HIPCC__) : public IndexablePredictionSorter #endif { public: #if defined(__HIPCC__) // This function object computes the item's DCG value class ComputeItemDCG : public thrust::unary_function<uint32_t, float> { public: XGBOOST_DEVICE ComputeItemDCG(const common::Span<const float> &dsorted_labels, const common::Span<const uint32_t> &dgroups, const common::Span<const uint32_t> &gidxs) : dsorted_labels_(dsorted_labels), dgroups_(dgroups), dgidxs_(gidxs) {} // Compute DCG for the item at 'idx' __device__ __forceinline__ float operator()(uint32_t idx) const { return ComputeItemDCGWeight(dsorted_labels_[idx], idx - dgroups_[dgidxs_[idx]]); } private: const common::Span<const float> dsorted_labels_; // Labels sorted within a group const common::Span<const uint32_t> dgroups_; // The group indices - where each group // begins and ends const common::Span<const uint32_t> dgidxs_; // The group each items belongs to }; // Type containing device pointers that can be cheaply copied on the kernel class NDCGLambdaWeightMultiplier : public BaseLambdaWeightMultiplier { public: NDCGLambdaWeightMultiplier(const dh::SegmentSorter<float> &segment_label_sorter, const NDCGLambdaWeightComputer &lwc) : BaseLambdaWeightMultiplier(segment_label_sorter, lwc.GetPredictionSorter()), dgroup_dcgs_(lwc.GetGroupDcgsSpan()) {} // Adjust the items weight by this value __device__ __forceinline__ bst_float GetWeight(uint32_t gidx, int pidx, int nidx) const { if (dgroup_dcgs_[gidx] == 0.0) return 0.0f; uint32_t group_begin = dgroups_[gidx]; auto pos_lab_orig_posn = dorig_pos_[pidx]; auto neg_lab_orig_posn = dorig_pos_[nidx]; KERNEL_CHECK(pos_lab_orig_posn != neg_lab_orig_posn); // Note: the label positive and negative indices are relative to the entire dataset. // Hence, scale them back to an index within the group auto pos_pred_pos = dindexable_sorted_preds_pos_[pos_lab_orig_posn] - group_begin; auto neg_pred_pos = dindexable_sorted_preds_pos_[neg_lab_orig_posn] - group_begin; return NDCGLambdaWeightComputer::ComputeDeltaWeight( pos_pred_pos, neg_pred_pos, static_cast<int>(dsorted_labels_[pidx]), static_cast<int>(dsorted_labels_[nidx]), dgroup_dcgs_[gidx]); } private: const common::Span<const float> dgroup_dcgs_; // Group DCG values }; NDCGLambdaWeightComputer(const bst_float *dpreds, const bst_float *dlabels, const dh::SegmentSorter<float> &segment_label_sorter) : IndexablePredictionSorter(dpreds, segment_label_sorter), dgroup_dcg_(segment_label_sorter.GetNumGroups(), 0.0f), weight_multiplier_(segment_label_sorter, *this) { const auto &group_segments = segment_label_sorter.GetGroupSegmentsSpan(); // Allocator to be used for managing space overhead while performing transformed reductions dh::XGBCachingDeviceAllocator<char> alloc; // Compute each elements DCG values and reduce them across groups concurrently. auto end_range = thrust::reduce_by_key(thrust::hip::par(alloc), dh::tcbegin(group_segments), dh::tcend(group_segments), thrust::make_transform_iterator( // The indices need not be sequential within a group, as we care only // about the sum of items DCG values within a group dh::tcbegin(segment_label_sorter.GetOriginalPositionsSpan()), ComputeItemDCG(segment_label_sorter.GetItemsSpan(), segment_label_sorter.GetGroupsSpan(), group_segments)), thrust::make_discard_iterator(), // We don't care for the group indices dgroup_dcg_.begin()); // Sum of the item's DCG values in the group CHECK(end_range.second - dgroup_dcg_.begin() == dgroup_dcg_.size()); } inline const common::Span<const float> GetGroupDcgsSpan() const { return { dgroup_dcg_.data().get(), dgroup_dcg_.size() }; } inline const NDCGLambdaWeightMultiplier GetWeightMultiplier() const { return weight_multiplier_; } #endif static void GetLambdaWeight(const std::vector<ListEntry> &sorted_list, std::vector<LambdaPair> *io_pairs) { std::vector<LambdaPair> &pairs = *io_pairs; float IDCG; // NOLINT { std::vector<bst_float> labels(sorted_list.size()); for (size_t i = 0; i < sorted_list.size(); ++i) { labels[i] = sorted_list[i].label; } std::stable_sort(labels.begin(), labels.end(), std::greater<>()); IDCG = ComputeGroupDCGWeight(&labels[0], labels.size()); } if (IDCG == 0.0) { for (auto & pair : pairs) { pair.weight = 0.0f; } } else { for (auto & pair : pairs) { unsigned pos_idx = pair.pos_index; unsigned neg_idx = pair.neg_index; pair.weight *= ComputeDeltaWeight(pos_idx, neg_idx, sorted_list[pos_idx].label, sorted_list[neg_idx].label, IDCG); } } } static char const* Name() { return "rank:ndcg"; } inline static bst_float ComputeGroupDCGWeight(const float *sorted_labels, uint32_t size) { double sumdcg = 0.0; for (uint32_t i = 0; i < size; ++i) { sumdcg += ComputeItemDCGWeight(sorted_labels[i], i); } return static_cast<bst_float>(sumdcg); } private: XGBOOST_DEVICE inline static bst_float ComputeItemDCGWeight(unsigned label, uint32_t idx) { return (label != 0) ? (((1 << label) - 1) / std::log2(static_cast<bst_float>(idx + 2))) : 0; } // Compute the weight adjustment for an item within a group: // pos_pred_pos => Where does the positive label live, had the list been sorted by prediction // neg_pred_pos => Where does the negative label live, had the list been sorted by prediction // pos_label => positive label value from sorted label list // neg_label => negative label value from sorted label list XGBOOST_DEVICE inline static bst_float ComputeDeltaWeight(uint32_t pos_pred_pos, uint32_t neg_pred_pos, int pos_label, int neg_label, float idcg) { float pos_loginv = 1.0f / std::log2(pos_pred_pos + 2.0f); float neg_loginv = 1.0f / std::log2(neg_pred_pos + 2.0f); bst_float original = ((1 << pos_label) - 1) * pos_loginv + ((1 << neg_label) - 1) * neg_loginv; float changed = ((1 << neg_label) - 1) * pos_loginv + ((1 << pos_label) - 1) * neg_loginv; bst_float delta = (original - changed) * (1.0f / idcg); if (delta < 0.0f) delta = - delta; return delta; } #if defined(__HIPCC__) dh::caching_device_vector<float> dgroup_dcg_; // This computes the adjustment to the weight const NDCGLambdaWeightMultiplier weight_multiplier_; #endif }; class MAPLambdaWeightComputer #if defined(__HIPCC__) : public IndexablePredictionSorter #endif { public: struct MAPStats { /*! \brief the accumulated precision */ float ap_acc{0.0f}; /*! * \brief the accumulated precision, * assuming a positive instance is missing */ float ap_acc_miss{0.0f}; /*! * \brief the accumulated precision, * assuming that one more positive instance is inserted ahead */ float ap_acc_add{0.0f}; /* \brief the accumulated positive instance count */ float hits{0.0f}; XGBOOST_DEVICE MAPStats() {} // NOLINT XGBOOST_DEVICE MAPStats(float ap_acc, float ap_acc_miss, float ap_acc_add, float hits) : ap_acc(ap_acc), ap_acc_miss(ap_acc_miss), ap_acc_add(ap_acc_add), hits(hits) {} // For prefix scan XGBOOST_DEVICE MAPStats operator +(const MAPStats &v1) const { return {ap_acc + v1.ap_acc, ap_acc_miss + v1.ap_acc_miss, ap_acc_add + v1.ap_acc_add, hits + v1.hits}; } // For test purposes - compare for equality XGBOOST_DEVICE bool operator ==(const MAPStats &rhs) const { return ap_acc == rhs.ap_acc && ap_acc_miss == rhs.ap_acc_miss && ap_acc_add == rhs.ap_acc_add && hits == rhs.hits; } }; private: template <typename T> XGBOOST_DEVICE inline static void Swap(T &v0, T &v1) { #if defined(__HIPCC__) thrust::swap(v0, v1); #else std::swap(v0, v1); #endif } /*! * \brief Obtain the delta MAP by trying to switch the positions of labels in pos_pred_pos or * neg_pred_pos when sorted by predictions * \param pos_pred_pos positive label's prediction value position when the groups prediction * values are sorted * \param neg_pred_pos negative label's prediction value position when the groups prediction * values are sorted * \param pos_label, neg_label the chosen positive and negative labels * \param p_map_stats a vector containing the accumulated precisions for each position in a list * \param map_stats_size size of the accumulated precisions vector */ XGBOOST_DEVICE inline static bst_float GetLambdaMAP( int pos_pred_pos, int neg_pred_pos, bst_float pos_label, bst_float neg_label, const MAPStats *p_map_stats, uint32_t map_stats_size) { if (pos_pred_pos == neg_pred_pos || p_map_stats[map_stats_size - 1].hits == 0) { return 0.0f; } if (pos_pred_pos > neg_pred_pos) { Swap(pos_pred_pos, neg_pred_pos); Swap(pos_label, neg_label); } bst_float original = p_map_stats[neg_pred_pos].ap_acc; if (pos_pred_pos != 0) original -= p_map_stats[pos_pred_pos - 1].ap_acc; bst_float changed = 0; bst_float label1 = pos_label > 0.0f ? 1.0f : 0.0f; bst_float label2 = neg_label > 0.0f ? 1.0f : 0.0f; if (label1 == label2) { return 0.0; } else if (label1 < label2) { changed += p_map_stats[neg_pred_pos - 1].ap_acc_add - p_map_stats[pos_pred_pos].ap_acc_add; changed += (p_map_stats[pos_pred_pos].hits + 1.0f) / (pos_pred_pos + 1); } else { changed += p_map_stats[neg_pred_pos - 1].ap_acc_miss - p_map_stats[pos_pred_pos].ap_acc_miss; changed += p_map_stats[neg_pred_pos].hits / (neg_pred_pos + 1); } bst_float ans = (changed - original) / (p_map_stats[map_stats_size - 1].hits); if (ans < 0) ans = -ans; return ans; } public: /* * \brief obtain preprocessing results for calculating delta MAP * \param sorted_list the list containing entry information * \param map_stats a vector containing the accumulated precisions for each position in a list */ inline static void GetMAPStats(const std::vector<ListEntry> &sorted_list, std::vector<MAPStats> *p_map_acc) { std::vector<MAPStats> &map_acc = *p_map_acc; map_acc.resize(sorted_list.size()); bst_float hit = 0, acc1 = 0, acc2 = 0, acc3 = 0; for (size_t i = 1; i <= sorted_list.size(); ++i) { if (sorted_list[i - 1].label > 0.0f) { hit++; acc1 += hit / i; acc2 += (hit - 1) / i; acc3 += (hit + 1) / i; } map_acc[i - 1] = MAPStats(acc1, acc2, acc3, hit); } } static char const* Name() { return "rank:map"; } static void GetLambdaWeight(const std::vector<ListEntry> &sorted_list, std::vector<LambdaPair> *io_pairs) { std::vector<LambdaPair> &pairs = *io_pairs; std::vector<MAPStats> map_stats; GetMAPStats(sorted_list, &map_stats); for (auto & pair : pairs) { pair.weight *= GetLambdaMAP(pair.pos_index, pair.neg_index, sorted_list[pair.pos_index].label, sorted_list[pair.neg_index].label, &map_stats[0], map_stats.size()); } } #if defined(__HIPCC__) MAPLambdaWeightComputer(const bst_float *dpreds, const bst_float *dlabels, const dh::SegmentSorter<float> &segment_label_sorter) : IndexablePredictionSorter(dpreds, segment_label_sorter), dmap_stats_(segment_label_sorter.GetNumItems(), MAPStats()), weight_multiplier_(segment_label_sorter, *this) { this->CreateMAPStats(dlabels, segment_label_sorter); } void CreateMAPStats(const bst_float *dlabels, const dh::SegmentSorter<float> &segment_label_sorter) { // For each group, go through the sorted prediction positions, and look up its corresponding // label from the unsorted labels (from the original label list) // For each item in the group, compute its MAP stats. // Interleave the computation of map stats amongst different groups. // First, determine postive labels in the dataset individually auto nitems = segment_label_sorter.GetNumItems(); dh::caching_device_vector<uint32_t> dhits(nitems, 0); // Original positions of the predictions after they have been sorted const auto &pred_original_pos = this->GetPredictionSorter().GetOriginalPositionsSpan(); // Unsorted labels const float *unsorted_labels = dlabels; auto DeterminePositiveLabelLambda = [=] __device__(uint32_t idx) { return (unsorted_labels[pred_original_pos[idx]] > 0.0f) ? 1 : 0; }; // NOLINT thrust::transform(thrust::make_counting_iterator(static_cast<uint32_t>(0)), thrust::make_counting_iterator(nitems), dhits.begin(), DeterminePositiveLabelLambda); // Allocator to be used by sort for managing space overhead while performing prefix scans dh::XGBCachingDeviceAllocator<char> alloc; // Next, prefix scan the positive labels that are segmented to accumulate them. // This is required for computing the accumulated precisions const auto &group_segments = segment_label_sorter.GetGroupSegmentsSpan(); // Data segmented into different groups... thrust::inclusive_scan_by_key(thrust::hip::par(alloc), dh::tcbegin(group_segments), dh::tcend(group_segments), dhits.begin(), // Input value dhits.begin()); // In-place scan // Compute accumulated precisions for each item, assuming positive and // negative instances are missing. // But first, compute individual item precisions const auto *dhits_arr = dhits.data().get(); // Group info on device const auto &dgroups = segment_label_sorter.GetGroupsSpan(); auto ComputeItemPrecisionLambda = [=] __device__(uint32_t idx) { if (unsorted_labels[pred_original_pos[idx]] > 0.0f) { auto idx_within_group = (idx - dgroups[group_segments[idx]]) + 1; return MAPStats{static_cast<float>(dhits_arr[idx]) / idx_within_group, static_cast<float>(dhits_arr[idx] - 1) / idx_within_group, static_cast<float>(dhits_arr[idx] + 1) / idx_within_group, 1.0f}; } return MAPStats{}; }; // NOLINT thrust::transform(thrust::make_counting_iterator(static_cast<uint32_t>(0)), thrust::make_counting_iterator(nitems), this->dmap_stats_.begin(), ComputeItemPrecisionLambda); // Lastly, compute the accumulated precisions for all the items segmented by groups. // The precisions are accumulated within each group thrust::inclusive_scan_by_key(thrust::hip::par(alloc), dh::tcbegin(group_segments), dh::tcend(group_segments), this->dmap_stats_.begin(), // Input map stats this->dmap_stats_.begin()); // In-place scan and output here } inline const common::Span<const MAPStats> GetMapStatsSpan() const { return { dmap_stats_.data().get(), dmap_stats_.size() }; } // Type containing device pointers that can be cheaply copied on the kernel class MAPLambdaWeightMultiplier : public BaseLambdaWeightMultiplier { public: MAPLambdaWeightMultiplier(const dh::SegmentSorter<float> &segment_label_sorter, const MAPLambdaWeightComputer &lwc) : BaseLambdaWeightMultiplier(segment_label_sorter, lwc.GetPredictionSorter()), dmap_stats_(lwc.GetMapStatsSpan()) {} // Adjust the items weight by this value __device__ __forceinline__ bst_float GetWeight(uint32_t gidx, int pidx, int nidx) const { uint32_t group_begin = dgroups_[gidx]; uint32_t group_end = dgroups_[gidx + 1]; auto pos_lab_orig_posn = dorig_pos_[pidx]; auto neg_lab_orig_posn = dorig_pos_[nidx]; KERNEL_CHECK(pos_lab_orig_posn != neg_lab_orig_posn); // Note: the label positive and negative indices are relative to the entire dataset. // Hence, scale them back to an index within the group auto pos_pred_pos = dindexable_sorted_preds_pos_[pos_lab_orig_posn] - group_begin; auto neg_pred_pos = dindexable_sorted_preds_pos_[neg_lab_orig_posn] - group_begin; return MAPLambdaWeightComputer::GetLambdaMAP( pos_pred_pos, neg_pred_pos, dsorted_labels_[pidx], dsorted_labels_[nidx], &dmap_stats_[group_begin], group_end - group_begin); } private: common::Span<const MAPStats> dmap_stats_; // Start address of the map stats for every sorted // prediction value }; inline const MAPLambdaWeightMultiplier GetWeightMultiplier() const { return weight_multiplier_; } private: dh::caching_device_vector<MAPStats> dmap_stats_; // This computes the adjustment to the weight const MAPLambdaWeightMultiplier weight_multiplier_; #endif }; #if defined(__HIPCC__) class SortedLabelList : dh::SegmentSorter<float> { private: const LambdaRankParam &param_; // Objective configuration public: explicit SortedLabelList(const LambdaRankParam &param) : param_(param) {} // Sort the labels that are grouped by 'groups' void Sort(const HostDeviceVector<bst_float> &dlabels, const std::vector<uint32_t> &groups) { this->SortItems(dlabels.ConstDevicePointer(), dlabels.Size(), groups); } // This kernel can only run *after* the kernel in sort is completed, as they // use the default stream template <typename LambdaWeightComputerT> void ComputeGradients(const bst_float *dpreds, // Unsorted predictions const bst_float *dlabels, // Unsorted labels const HostDeviceVector<bst_float> &weights, int iter, GradientPair *out_gpair, float weight_normalization_factor) { // Group info on device const auto &dgroups = this->GetGroupsSpan(); uint32_t ngroups = this->GetNumGroups() + 1; uint32_t total_items = this->GetNumItems(); uint32_t niter = param_.num_pairsample * total_items; float fix_list_weight = param_.fix_list_weight; const auto &original_pos = this->GetOriginalPositionsSpan(); uint32_t num_weights = weights.Size(); auto dweights = num_weights ? weights.ConstDevicePointer() : nullptr; const auto &sorted_labels = this->GetItemsSpan(); // This is used to adjust the weight of different elements based on the different ranking // objective function policies LambdaWeightComputerT weight_computer(dpreds, dlabels, *this); auto wmultiplier = weight_computer.GetWeightMultiplier(); int device_id = -1; dh::safe_cuda(hipGetDevice(&device_id)); // For each instance in the group, compute the gradient pair concurrently dh::LaunchN(device_id, niter, nullptr, [=] __device__(uint32_t idx) { // First, determine the group 'idx' belongs to uint32_t item_idx = idx % total_items; uint32_t group_idx = thrust::upper_bound(thrust::seq, dgroups.begin(), dgroups.begin() + ngroups, item_idx) - dgroups.begin(); // Span of this group within the larger labels/predictions sorted tuple uint32_t group_begin = dgroups[group_idx - 1]; uint32_t group_end = dgroups[group_idx]; uint32_t total_group_items = group_end - group_begin; // Are the labels diverse enough? If they are all the same, then there is nothing to pick // from another group - bail sooner if (sorted_labels[group_begin] == sorted_labels[group_end - 1]) return; // Find the number of labels less than and greater than the current label // at the sorted index position item_idx uint32_t nleft = CountNumItemsToTheLeftOf( sorted_labels.data() + group_begin, item_idx - group_begin + 1, sorted_labels[item_idx]); uint32_t nright = CountNumItemsToTheRightOf( sorted_labels.data() + item_idx, group_end - item_idx, sorted_labels[item_idx]); // Create a minstd_rand object to act as our source of randomness thrust::minstd_rand rng((iter + 1) * 1111); rng.discard(((idx / total_items) * total_group_items) + item_idx - group_begin); // Create a uniform_int_distribution to produce a sample from outside of the // present label group thrust::uniform_int_distribution<int> dist(0, nleft + nright - 1); int sample = dist(rng); int pos_idx = -1; // Bigger label int neg_idx = -1; // Smaller label // Are we picking a sample to the left/right of the current group? if (sample < nleft) { // Go left pos_idx = sample + group_begin; neg_idx = item_idx; } else { pos_idx = item_idx; uint32_t items_in_group = total_group_items - nleft - nright; neg_idx = sample + items_in_group + group_begin; } // Compute and assign the gradients now const float eps = 1e-16f; bst_float p = common::Sigmoid(dpreds[original_pos[pos_idx]] - dpreds[original_pos[neg_idx]]); bst_float g = p - 1.0f; bst_float h = thrust::max(p * (1.0f - p), eps); // Rescale each gradient and hessian so that the group has a weighted constant float scale = __frcp_ru(niter / total_items); if (fix_list_weight != 0.0f) { scale *= fix_list_weight / total_group_items; } float weight = num_weights ? dweights[group_idx - 1] : 1.0f; weight *= weight_normalization_factor; weight *= wmultiplier.GetWeight(group_idx - 1, pos_idx, neg_idx); weight *= scale; // Accumulate gradient and hessian in both positive and negative indices const GradientPair in_pos_gpair(g * weight, 2.0f * weight * h); dh::AtomicAddGpair(&out_gpair[original_pos[pos_idx]], in_pos_gpair); const GradientPair in_neg_gpair(-g * weight, 2.0f * weight * h); dh::AtomicAddGpair(&out_gpair[original_pos[neg_idx]], in_neg_gpair); }); // Wait until the computations done by the kernel is complete dh::safe_cuda(hipStreamSynchronize(nullptr)); } }; #endif // objective for lambda rank template <typename LambdaWeightComputerT> class LambdaRankObj : public ObjFunction { public: void Configure(const std::vector<std::pair<std::string, std::string> >& args) override { param_.UpdateAllowUnknown(args); } void GetGradient(const HostDeviceVector<bst_float>& preds, const MetaInfo& info, int iter, HostDeviceVector<GradientPair>* out_gpair) override { CHECK_EQ(preds.Size(), info.labels_.Size()) << "label size predict size not match"; // quick consistency when group is not available std::vector<unsigned> tgptr(2, 0); tgptr[1] = static_cast<unsigned>(info.labels_.Size()); const std::vector<unsigned> &gptr = info.group_ptr_.size() == 0 ? tgptr : info.group_ptr_; CHECK(gptr.size() != 0 && gptr.back() == info.labels_.Size()) << "group structure not consistent with #rows" << ", " << "group ponter size: " << gptr.size() << ", " << "labels size: " << info.labels_.Size() << ", " << "group pointer back: " << (gptr.size() == 0 ? 0 : gptr.back()); #if defined(__HIPCC__) // Check if we have a GPU assignment; else, revert back to CPU auto device = tparam_->gpu_id; if (device >= 0) { ComputeGradientsOnGPU(preds, info, iter, out_gpair, gptr); } else { // Revert back to CPU #endif ComputeGradientsOnCPU(preds, info, iter, out_gpair, gptr); #if defined(__HIPCC__) } #endif } const char* DefaultEvalMetric() const override { return "map"; } void SaveConfig(Json* p_out) const override { auto& out = *p_out; out["name"] = String(LambdaWeightComputerT::Name()); out["lambda_rank_param"] = ToJson(param_); } void LoadConfig(Json const& in) override { FromJson(in["lambda_rank_param"], &param_); } private: bst_float ComputeWeightNormalizationFactor(const MetaInfo& info, const std::vector<unsigned> &gptr) { const auto ngroup = static_cast<bst_omp_uint>(gptr.size() - 1); bst_float sum_weights = 0; for (bst_omp_uint k = 0; k < ngroup; ++k) { sum_weights += info.GetWeight(k); } return ngroup / sum_weights; } void ComputeGradientsOnCPU(const HostDeviceVector<bst_float>& preds, const MetaInfo& info, int iter, HostDeviceVector<GradientPair>* out_gpair, const std::vector<unsigned> &gptr) { LOG(DEBUG) << "Computing " << LambdaWeightComputerT::Name() << " gradients on CPU."; bst_float weight_normalization_factor = ComputeWeightNormalizationFactor(info, gptr); const auto& preds_h = preds.HostVector(); const auto& labels = info.labels_.HostVector(); std::vector<GradientPair>& gpair = out_gpair->HostVector(); const auto ngroup = static_cast<bst_omp_uint>(gptr.size() - 1); out_gpair->Resize(preds.Size()); #pragma omp parallel { // parallel construct, declare random number generator here, so that each // thread use its own random number generator, seed by thread id and current iteration std::minstd_rand rnd((iter + 1) * 1111); std::vector<LambdaPair> pairs; std::vector<ListEntry> lst; std::vector< std::pair<bst_float, unsigned> > rec; #pragma omp for schedule(static) for (bst_omp_uint k = 0; k < ngroup; ++k) { lst.clear(); pairs.clear(); for (unsigned j = gptr[k]; j < gptr[k+1]; ++j) { lst.emplace_back(preds_h[j], labels[j], j); gpair[j] = GradientPair(0.0f, 0.0f); } std::stable_sort(lst.begin(), lst.end(), ListEntry::CmpPred); rec.resize(lst.size()); for (unsigned i = 0; i < lst.size(); ++i) { rec[i] = std::make_pair(lst[i].label, i); } std::stable_sort(rec.begin(), rec.end(), common::CmpFirst); // enumerate buckets with same label, for each item in the lst, grab another sample randomly for (unsigned i = 0; i < rec.size(); ) { unsigned j = i + 1; while (j < rec.size() && rec[j].first == rec[i].first) ++j; // bucket in [i,j), get a sample outside bucket unsigned nleft = i, nright = static_cast<unsigned>(rec.size() - j); if (nleft + nright != 0) { int nsample = param_.num_pairsample; while (nsample --) { for (unsigned pid = i; pid < j; ++pid) { unsigned ridx = std::uniform_int_distribution<unsigned>(0, nleft + nright - 1)(rnd); if (ridx < nleft) { pairs.emplace_back(rec[ridx].second, rec[pid].second, info.GetWeight(k) * weight_normalization_factor); } else { pairs.emplace_back(rec[pid].second, rec[ridx+j-i].second, info.GetWeight(k) * weight_normalization_factor); } } } } i = j; } // get lambda weight for the pairs LambdaWeightComputerT::GetLambdaWeight(lst, &pairs); // rescale each gradient and hessian so that the lst have constant weighted float scale = 1.0f / param_.num_pairsample; if (param_.fix_list_weight != 0.0f) { scale *= param_.fix_list_weight / (gptr[k + 1] - gptr[k]); } for (auto & pair : pairs) { const ListEntry &pos = lst[pair.pos_index]; const ListEntry &neg = lst[pair.neg_index]; const bst_float w = pair.weight * scale; const float eps = 1e-16f; bst_float p = common::Sigmoid(pos.pred - neg.pred); bst_float g = p - 1.0f; bst_float h = ::max(p * (1.0f - p), eps); // accumulate gradient and hessian in both pid, and nid gpair[pos.rindex] += GradientPair(g * w, 2.0f*w*h); gpair[neg.rindex] += GradientPair(-g * w, 2.0f*w*h); } } } } #if defined(__HIPCC__) void ComputeGradientsOnGPU(const HostDeviceVector<bst_float>& preds, const MetaInfo& info, int iter, HostDeviceVector<GradientPair>* out_gpair, const std::vector<unsigned> &gptr) { LOG(DEBUG) << "Computing " << LambdaWeightComputerT::Name() << " gradients on GPU."; auto device = tparam_->gpu_id; dh::safe_cuda(hipSetDevice(device)); bst_float weight_normalization_factor = ComputeWeightNormalizationFactor(info, gptr); // Set the device ID and copy them to the device out_gpair->SetDevice(device); info.labels_.SetDevice(device); preds.SetDevice(device); info.weights_.SetDevice(device); out_gpair->Resize(preds.Size()); auto d_preds = preds.ConstDevicePointer(); auto d_gpair = out_gpair->DevicePointer(); auto d_labels = info.labels_.ConstDevicePointer(); SortedLabelList slist(param_); // Sort the labels within the groups on the device slist.Sort(info.labels_, gptr); // Initialize the gradients next out_gpair->Fill(GradientPair(0.0f, 0.0f)); // Finally, compute the gradients slist.ComputeGradients<LambdaWeightComputerT> (d_preds, d_labels, info.weights_, iter, d_gpair, weight_normalization_factor); } #endif LambdaRankParam param_; }; #if !defined(GTEST_TEST) // register the objective functions DMLC_REGISTER_PARAMETER(LambdaRankParam); XGBOOST_REGISTER_OBJECTIVE(PairwiseRankObj, PairwiseLambdaWeightComputer::Name()) .describe("Pairwise rank objective.") .set_body([]() { return new LambdaRankObj<PairwiseLambdaWeightComputer>(); }); XGBOOST_REGISTER_OBJECTIVE(LambdaRankNDCG, NDCGLambdaWeightComputer::Name()) .describe("LambdaRank with NDCG as objective.") .set_body([]() { return new LambdaRankObj<NDCGLambdaWeightComputer>(); }); XGBOOST_REGISTER_OBJECTIVE(LambdaRankObjMAP, MAPLambdaWeightComputer::Name()) .describe("LambdaRank with MAP as objective.") .set_body([]() { return new LambdaRankObj<MAPLambdaWeightComputer>(); }); #endif } // namespace obj } // namespace xgboost
d489533d95b604a9b4c0e98ab0402fdbc92b71f5.cu
/*! * Copyright 2015-2019 XGBoost contributors */ #include <dmlc/omp.h> #include <dmlc/timer.h> #include <xgboost/logging.h> #include <xgboost/objective.h> #include <vector> #include <algorithm> #include <utility> #include "xgboost/json.h" #include "xgboost/parameter.h" #include "../common/math.h" #include "../common/random.h" #if defined(__CUDACC__) #include <thrust/sort.h> #include <thrust/gather.h> #include <thrust/iterator/discard_iterator.h> #include <thrust/random/uniform_int_distribution.h> #include <thrust/random/linear_congruential_engine.h> #include <cub/util_allocator.cuh> #include "../common/device_helpers.cuh" #endif namespace xgboost { namespace obj { #if defined(XGBOOST_USE_CUDA) && !defined(GTEST_TEST) DMLC_REGISTRY_FILE_TAG(rank_obj_gpu); #endif // defined(XGBOOST_USE_CUDA) struct LambdaRankParam : public XGBoostParameter<LambdaRankParam> { size_t num_pairsample; float fix_list_weight; // declare parameters DMLC_DECLARE_PARAMETER(LambdaRankParam) { DMLC_DECLARE_FIELD(num_pairsample).set_lower_bound(1).set_default(1) .describe("Number of pair generated for each instance."); DMLC_DECLARE_FIELD(fix_list_weight).set_lower_bound(0.0f).set_default(0.0f) .describe("Normalize the weight of each list by this value," " if equals 0, no effect will happen"); } }; #if defined(__CUDACC__) // Helper functions template <typename T> XGBOOST_DEVICE __forceinline__ uint32_t CountNumItemsToTheLeftOf(const T *__restrict__ items, uint32_t n, T v) { return thrust::lower_bound(thrust::seq, items, items + n, v, thrust::greater<T>()) - items; } template <typename T> XGBOOST_DEVICE __forceinline__ uint32_t CountNumItemsToTheRightOf(const T *__restrict__ items, uint32_t n, T v) { return n - (thrust::upper_bound(thrust::seq, items, items + n, v, thrust::greater<T>()) - items); } #endif /*! \brief helper information in a list */ struct ListEntry { /*! \brief the predict score we in the data */ bst_float pred; /*! \brief the actual label of the entry */ bst_float label; /*! \brief row index in the data matrix */ unsigned rindex; // constructor ListEntry(bst_float pred, bst_float label, unsigned rindex) : pred(pred), label(label), rindex(rindex) {} // comparator by prediction inline static bool CmpPred(const ListEntry &a, const ListEntry &b) { return a.pred > b.pred; } // comparator by label inline static bool CmpLabel(const ListEntry &a, const ListEntry &b) { return a.label > b.label; } }; /*! \brief a pair in the lambda rank */ struct LambdaPair { /*! \brief positive index: this is a position in the list */ unsigned pos_index; /*! \brief negative index: this is a position in the list */ unsigned neg_index; /*! \brief weight to be filled in */ bst_float weight; // constructor LambdaPair(unsigned pos_index, unsigned neg_index) : pos_index(pos_index), neg_index(neg_index), weight(1.0f) {} // constructor LambdaPair(unsigned pos_index, unsigned neg_index, bst_float weight) : pos_index(pos_index), neg_index(neg_index), weight(weight) {} }; class PairwiseLambdaWeightComputer { public: /*! * \brief get lambda weight for existing pairs - for pairwise objective * \param list a list that is sorted by pred score * \param io_pairs record of pairs, containing the pairs to fill in weights */ static void GetLambdaWeight(const std::vector<ListEntry> &sorted_list, std::vector<LambdaPair> *io_pairs) {} static char const* Name() { return "rank:pairwise"; } #if defined(__CUDACC__) PairwiseLambdaWeightComputer(const bst_float *dpreds, const bst_float *dlabels, const dh::SegmentSorter<float> &segment_label_sorter) {} class PairwiseLambdaWeightMultiplier { public: // Adjust the items weight by this value __device__ __forceinline__ bst_float GetWeight(uint32_t gidx, int pidx, int nidx) const { return 1.0f; } }; inline const PairwiseLambdaWeightMultiplier GetWeightMultiplier() const { return {}; } #endif }; #if defined(__CUDACC__) class BaseLambdaWeightMultiplier { public: BaseLambdaWeightMultiplier(const dh::SegmentSorter<float> &segment_label_sorter, const dh::SegmentSorter<float> &segment_pred_sorter) : dsorted_labels_(segment_label_sorter.GetItemsSpan()), dorig_pos_(segment_label_sorter.GetOriginalPositionsSpan()), dgroups_(segment_label_sorter.GetGroupsSpan()), dindexable_sorted_preds_pos_(segment_pred_sorter.GetIndexableSortedPositionsSpan()) {} protected: const common::Span<const float> dsorted_labels_; // Labels sorted within a group const common::Span<const uint32_t> dorig_pos_; // Original indices of the labels // before they are sorted const common::Span<const uint32_t> dgroups_; // The group indices // Where can a prediction for a label be found in the original array, when they are sorted const common::Span<const uint32_t> dindexable_sorted_preds_pos_; }; // While computing the weight that needs to be adjusted by this ranking objective, we need // to figure out where positive and negative labels chosen earlier exists, if the group // were to be sorted by its predictions. To accommodate this, we employ the following algorithm. // For a given group, let's assume the following: // labels: 1 5 9 2 4 8 0 7 6 3 // predictions: 1 9 0 8 2 7 3 6 5 4 // position: 0 1 2 3 4 5 6 7 8 9 // // After label sort: // labels: 9 8 7 6 5 4 3 2 1 0 // position: 2 5 7 8 1 4 9 3 0 6 // // After prediction sort: // predictions: 9 8 7 6 5 4 3 2 1 0 // position: 1 3 5 7 8 9 6 4 0 2 // // If a sorted label at position 'x' is chosen, then we need to find out where the prediction // for this label 'x' exists, if the group were to be sorted by predictions. // We first take the sorted prediction positions: // position: 1 3 5 7 8 9 6 4 0 2 // at indices: 0 1 2 3 4 5 6 7 8 9 // // We create a sorted prediction positional array, such that value at position 'x' gives // us the position in the sorted prediction array where its related prediction lies. // dindexable_sorted_preds_pos_: 8 0 9 1 7 2 6 3 4 5 // at indices: 0 1 2 3 4 5 6 7 8 9 // Basically, swap the previous 2 arrays, sort the indices and reorder positions // for an O(1) lookup using the position where the sorted label exists. // // This type does that using the SegmentSorter class IndexablePredictionSorter { public: IndexablePredictionSorter(const bst_float *dpreds, const dh::SegmentSorter<float> &segment_label_sorter) { // Sort the predictions first segment_pred_sorter_.SortItems(dpreds, segment_label_sorter.GetNumItems(), segment_label_sorter.GetGroupSegmentsSpan()); // Create an index for the sorted prediction positions segment_pred_sorter_.CreateIndexableSortedPositions(); } inline const dh::SegmentSorter<float> &GetPredictionSorter() const { return segment_pred_sorter_; } private: dh::SegmentSorter<float> segment_pred_sorter_; // For sorting the predictions }; #endif // beta version: NDCG lambda rank class NDCGLambdaWeightComputer #if defined(__CUDACC__) : public IndexablePredictionSorter #endif { public: #if defined(__CUDACC__) // This function object computes the item's DCG value class ComputeItemDCG : public thrust::unary_function<uint32_t, float> { public: XGBOOST_DEVICE ComputeItemDCG(const common::Span<const float> &dsorted_labels, const common::Span<const uint32_t> &dgroups, const common::Span<const uint32_t> &gidxs) : dsorted_labels_(dsorted_labels), dgroups_(dgroups), dgidxs_(gidxs) {} // Compute DCG for the item at 'idx' __device__ __forceinline__ float operator()(uint32_t idx) const { return ComputeItemDCGWeight(dsorted_labels_[idx], idx - dgroups_[dgidxs_[idx]]); } private: const common::Span<const float> dsorted_labels_; // Labels sorted within a group const common::Span<const uint32_t> dgroups_; // The group indices - where each group // begins and ends const common::Span<const uint32_t> dgidxs_; // The group each items belongs to }; // Type containing device pointers that can be cheaply copied on the kernel class NDCGLambdaWeightMultiplier : public BaseLambdaWeightMultiplier { public: NDCGLambdaWeightMultiplier(const dh::SegmentSorter<float> &segment_label_sorter, const NDCGLambdaWeightComputer &lwc) : BaseLambdaWeightMultiplier(segment_label_sorter, lwc.GetPredictionSorter()), dgroup_dcgs_(lwc.GetGroupDcgsSpan()) {} // Adjust the items weight by this value __device__ __forceinline__ bst_float GetWeight(uint32_t gidx, int pidx, int nidx) const { if (dgroup_dcgs_[gidx] == 0.0) return 0.0f; uint32_t group_begin = dgroups_[gidx]; auto pos_lab_orig_posn = dorig_pos_[pidx]; auto neg_lab_orig_posn = dorig_pos_[nidx]; KERNEL_CHECK(pos_lab_orig_posn != neg_lab_orig_posn); // Note: the label positive and negative indices are relative to the entire dataset. // Hence, scale them back to an index within the group auto pos_pred_pos = dindexable_sorted_preds_pos_[pos_lab_orig_posn] - group_begin; auto neg_pred_pos = dindexable_sorted_preds_pos_[neg_lab_orig_posn] - group_begin; return NDCGLambdaWeightComputer::ComputeDeltaWeight( pos_pred_pos, neg_pred_pos, static_cast<int>(dsorted_labels_[pidx]), static_cast<int>(dsorted_labels_[nidx]), dgroup_dcgs_[gidx]); } private: const common::Span<const float> dgroup_dcgs_; // Group DCG values }; NDCGLambdaWeightComputer(const bst_float *dpreds, const bst_float *dlabels, const dh::SegmentSorter<float> &segment_label_sorter) : IndexablePredictionSorter(dpreds, segment_label_sorter), dgroup_dcg_(segment_label_sorter.GetNumGroups(), 0.0f), weight_multiplier_(segment_label_sorter, *this) { const auto &group_segments = segment_label_sorter.GetGroupSegmentsSpan(); // Allocator to be used for managing space overhead while performing transformed reductions dh::XGBCachingDeviceAllocator<char> alloc; // Compute each elements DCG values and reduce them across groups concurrently. auto end_range = thrust::reduce_by_key(thrust::cuda::par(alloc), dh::tcbegin(group_segments), dh::tcend(group_segments), thrust::make_transform_iterator( // The indices need not be sequential within a group, as we care only // about the sum of items DCG values within a group dh::tcbegin(segment_label_sorter.GetOriginalPositionsSpan()), ComputeItemDCG(segment_label_sorter.GetItemsSpan(), segment_label_sorter.GetGroupsSpan(), group_segments)), thrust::make_discard_iterator(), // We don't care for the group indices dgroup_dcg_.begin()); // Sum of the item's DCG values in the group CHECK(end_range.second - dgroup_dcg_.begin() == dgroup_dcg_.size()); } inline const common::Span<const float> GetGroupDcgsSpan() const { return { dgroup_dcg_.data().get(), dgroup_dcg_.size() }; } inline const NDCGLambdaWeightMultiplier GetWeightMultiplier() const { return weight_multiplier_; } #endif static void GetLambdaWeight(const std::vector<ListEntry> &sorted_list, std::vector<LambdaPair> *io_pairs) { std::vector<LambdaPair> &pairs = *io_pairs; float IDCG; // NOLINT { std::vector<bst_float> labels(sorted_list.size()); for (size_t i = 0; i < sorted_list.size(); ++i) { labels[i] = sorted_list[i].label; } std::stable_sort(labels.begin(), labels.end(), std::greater<>()); IDCG = ComputeGroupDCGWeight(&labels[0], labels.size()); } if (IDCG == 0.0) { for (auto & pair : pairs) { pair.weight = 0.0f; } } else { for (auto & pair : pairs) { unsigned pos_idx = pair.pos_index; unsigned neg_idx = pair.neg_index; pair.weight *= ComputeDeltaWeight(pos_idx, neg_idx, sorted_list[pos_idx].label, sorted_list[neg_idx].label, IDCG); } } } static char const* Name() { return "rank:ndcg"; } inline static bst_float ComputeGroupDCGWeight(const float *sorted_labels, uint32_t size) { double sumdcg = 0.0; for (uint32_t i = 0; i < size; ++i) { sumdcg += ComputeItemDCGWeight(sorted_labels[i], i); } return static_cast<bst_float>(sumdcg); } private: XGBOOST_DEVICE inline static bst_float ComputeItemDCGWeight(unsigned label, uint32_t idx) { return (label != 0) ? (((1 << label) - 1) / std::log2(static_cast<bst_float>(idx + 2))) : 0; } // Compute the weight adjustment for an item within a group: // pos_pred_pos => Where does the positive label live, had the list been sorted by prediction // neg_pred_pos => Where does the negative label live, had the list been sorted by prediction // pos_label => positive label value from sorted label list // neg_label => negative label value from sorted label list XGBOOST_DEVICE inline static bst_float ComputeDeltaWeight(uint32_t pos_pred_pos, uint32_t neg_pred_pos, int pos_label, int neg_label, float idcg) { float pos_loginv = 1.0f / std::log2(pos_pred_pos + 2.0f); float neg_loginv = 1.0f / std::log2(neg_pred_pos + 2.0f); bst_float original = ((1 << pos_label) - 1) * pos_loginv + ((1 << neg_label) - 1) * neg_loginv; float changed = ((1 << neg_label) - 1) * pos_loginv + ((1 << pos_label) - 1) * neg_loginv; bst_float delta = (original - changed) * (1.0f / idcg); if (delta < 0.0f) delta = - delta; return delta; } #if defined(__CUDACC__) dh::caching_device_vector<float> dgroup_dcg_; // This computes the adjustment to the weight const NDCGLambdaWeightMultiplier weight_multiplier_; #endif }; class MAPLambdaWeightComputer #if defined(__CUDACC__) : public IndexablePredictionSorter #endif { public: struct MAPStats { /*! \brief the accumulated precision */ float ap_acc{0.0f}; /*! * \brief the accumulated precision, * assuming a positive instance is missing */ float ap_acc_miss{0.0f}; /*! * \brief the accumulated precision, * assuming that one more positive instance is inserted ahead */ float ap_acc_add{0.0f}; /* \brief the accumulated positive instance count */ float hits{0.0f}; XGBOOST_DEVICE MAPStats() {} // NOLINT XGBOOST_DEVICE MAPStats(float ap_acc, float ap_acc_miss, float ap_acc_add, float hits) : ap_acc(ap_acc), ap_acc_miss(ap_acc_miss), ap_acc_add(ap_acc_add), hits(hits) {} // For prefix scan XGBOOST_DEVICE MAPStats operator +(const MAPStats &v1) const { return {ap_acc + v1.ap_acc, ap_acc_miss + v1.ap_acc_miss, ap_acc_add + v1.ap_acc_add, hits + v1.hits}; } // For test purposes - compare for equality XGBOOST_DEVICE bool operator ==(const MAPStats &rhs) const { return ap_acc == rhs.ap_acc && ap_acc_miss == rhs.ap_acc_miss && ap_acc_add == rhs.ap_acc_add && hits == rhs.hits; } }; private: template <typename T> XGBOOST_DEVICE inline static void Swap(T &v0, T &v1) { #if defined(__CUDACC__) thrust::swap(v0, v1); #else std::swap(v0, v1); #endif } /*! * \brief Obtain the delta MAP by trying to switch the positions of labels in pos_pred_pos or * neg_pred_pos when sorted by predictions * \param pos_pred_pos positive label's prediction value position when the groups prediction * values are sorted * \param neg_pred_pos negative label's prediction value position when the groups prediction * values are sorted * \param pos_label, neg_label the chosen positive and negative labels * \param p_map_stats a vector containing the accumulated precisions for each position in a list * \param map_stats_size size of the accumulated precisions vector */ XGBOOST_DEVICE inline static bst_float GetLambdaMAP( int pos_pred_pos, int neg_pred_pos, bst_float pos_label, bst_float neg_label, const MAPStats *p_map_stats, uint32_t map_stats_size) { if (pos_pred_pos == neg_pred_pos || p_map_stats[map_stats_size - 1].hits == 0) { return 0.0f; } if (pos_pred_pos > neg_pred_pos) { Swap(pos_pred_pos, neg_pred_pos); Swap(pos_label, neg_label); } bst_float original = p_map_stats[neg_pred_pos].ap_acc; if (pos_pred_pos != 0) original -= p_map_stats[pos_pred_pos - 1].ap_acc; bst_float changed = 0; bst_float label1 = pos_label > 0.0f ? 1.0f : 0.0f; bst_float label2 = neg_label > 0.0f ? 1.0f : 0.0f; if (label1 == label2) { return 0.0; } else if (label1 < label2) { changed += p_map_stats[neg_pred_pos - 1].ap_acc_add - p_map_stats[pos_pred_pos].ap_acc_add; changed += (p_map_stats[pos_pred_pos].hits + 1.0f) / (pos_pred_pos + 1); } else { changed += p_map_stats[neg_pred_pos - 1].ap_acc_miss - p_map_stats[pos_pred_pos].ap_acc_miss; changed += p_map_stats[neg_pred_pos].hits / (neg_pred_pos + 1); } bst_float ans = (changed - original) / (p_map_stats[map_stats_size - 1].hits); if (ans < 0) ans = -ans; return ans; } public: /* * \brief obtain preprocessing results for calculating delta MAP * \param sorted_list the list containing entry information * \param map_stats a vector containing the accumulated precisions for each position in a list */ inline static void GetMAPStats(const std::vector<ListEntry> &sorted_list, std::vector<MAPStats> *p_map_acc) { std::vector<MAPStats> &map_acc = *p_map_acc; map_acc.resize(sorted_list.size()); bst_float hit = 0, acc1 = 0, acc2 = 0, acc3 = 0; for (size_t i = 1; i <= sorted_list.size(); ++i) { if (sorted_list[i - 1].label > 0.0f) { hit++; acc1 += hit / i; acc2 += (hit - 1) / i; acc3 += (hit + 1) / i; } map_acc[i - 1] = MAPStats(acc1, acc2, acc3, hit); } } static char const* Name() { return "rank:map"; } static void GetLambdaWeight(const std::vector<ListEntry> &sorted_list, std::vector<LambdaPair> *io_pairs) { std::vector<LambdaPair> &pairs = *io_pairs; std::vector<MAPStats> map_stats; GetMAPStats(sorted_list, &map_stats); for (auto & pair : pairs) { pair.weight *= GetLambdaMAP(pair.pos_index, pair.neg_index, sorted_list[pair.pos_index].label, sorted_list[pair.neg_index].label, &map_stats[0], map_stats.size()); } } #if defined(__CUDACC__) MAPLambdaWeightComputer(const bst_float *dpreds, const bst_float *dlabels, const dh::SegmentSorter<float> &segment_label_sorter) : IndexablePredictionSorter(dpreds, segment_label_sorter), dmap_stats_(segment_label_sorter.GetNumItems(), MAPStats()), weight_multiplier_(segment_label_sorter, *this) { this->CreateMAPStats(dlabels, segment_label_sorter); } void CreateMAPStats(const bst_float *dlabels, const dh::SegmentSorter<float> &segment_label_sorter) { // For each group, go through the sorted prediction positions, and look up its corresponding // label from the unsorted labels (from the original label list) // For each item in the group, compute its MAP stats. // Interleave the computation of map stats amongst different groups. // First, determine postive labels in the dataset individually auto nitems = segment_label_sorter.GetNumItems(); dh::caching_device_vector<uint32_t> dhits(nitems, 0); // Original positions of the predictions after they have been sorted const auto &pred_original_pos = this->GetPredictionSorter().GetOriginalPositionsSpan(); // Unsorted labels const float *unsorted_labels = dlabels; auto DeterminePositiveLabelLambda = [=] __device__(uint32_t idx) { return (unsorted_labels[pred_original_pos[idx]] > 0.0f) ? 1 : 0; }; // NOLINT thrust::transform(thrust::make_counting_iterator(static_cast<uint32_t>(0)), thrust::make_counting_iterator(nitems), dhits.begin(), DeterminePositiveLabelLambda); // Allocator to be used by sort for managing space overhead while performing prefix scans dh::XGBCachingDeviceAllocator<char> alloc; // Next, prefix scan the positive labels that are segmented to accumulate them. // This is required for computing the accumulated precisions const auto &group_segments = segment_label_sorter.GetGroupSegmentsSpan(); // Data segmented into different groups... thrust::inclusive_scan_by_key(thrust::cuda::par(alloc), dh::tcbegin(group_segments), dh::tcend(group_segments), dhits.begin(), // Input value dhits.begin()); // In-place scan // Compute accumulated precisions for each item, assuming positive and // negative instances are missing. // But first, compute individual item precisions const auto *dhits_arr = dhits.data().get(); // Group info on device const auto &dgroups = segment_label_sorter.GetGroupsSpan(); auto ComputeItemPrecisionLambda = [=] __device__(uint32_t idx) { if (unsorted_labels[pred_original_pos[idx]] > 0.0f) { auto idx_within_group = (idx - dgroups[group_segments[idx]]) + 1; return MAPStats{static_cast<float>(dhits_arr[idx]) / idx_within_group, static_cast<float>(dhits_arr[idx] - 1) / idx_within_group, static_cast<float>(dhits_arr[idx] + 1) / idx_within_group, 1.0f}; } return MAPStats{}; }; // NOLINT thrust::transform(thrust::make_counting_iterator(static_cast<uint32_t>(0)), thrust::make_counting_iterator(nitems), this->dmap_stats_.begin(), ComputeItemPrecisionLambda); // Lastly, compute the accumulated precisions for all the items segmented by groups. // The precisions are accumulated within each group thrust::inclusive_scan_by_key(thrust::cuda::par(alloc), dh::tcbegin(group_segments), dh::tcend(group_segments), this->dmap_stats_.begin(), // Input map stats this->dmap_stats_.begin()); // In-place scan and output here } inline const common::Span<const MAPStats> GetMapStatsSpan() const { return { dmap_stats_.data().get(), dmap_stats_.size() }; } // Type containing device pointers that can be cheaply copied on the kernel class MAPLambdaWeightMultiplier : public BaseLambdaWeightMultiplier { public: MAPLambdaWeightMultiplier(const dh::SegmentSorter<float> &segment_label_sorter, const MAPLambdaWeightComputer &lwc) : BaseLambdaWeightMultiplier(segment_label_sorter, lwc.GetPredictionSorter()), dmap_stats_(lwc.GetMapStatsSpan()) {} // Adjust the items weight by this value __device__ __forceinline__ bst_float GetWeight(uint32_t gidx, int pidx, int nidx) const { uint32_t group_begin = dgroups_[gidx]; uint32_t group_end = dgroups_[gidx + 1]; auto pos_lab_orig_posn = dorig_pos_[pidx]; auto neg_lab_orig_posn = dorig_pos_[nidx]; KERNEL_CHECK(pos_lab_orig_posn != neg_lab_orig_posn); // Note: the label positive and negative indices are relative to the entire dataset. // Hence, scale them back to an index within the group auto pos_pred_pos = dindexable_sorted_preds_pos_[pos_lab_orig_posn] - group_begin; auto neg_pred_pos = dindexable_sorted_preds_pos_[neg_lab_orig_posn] - group_begin; return MAPLambdaWeightComputer::GetLambdaMAP( pos_pred_pos, neg_pred_pos, dsorted_labels_[pidx], dsorted_labels_[nidx], &dmap_stats_[group_begin], group_end - group_begin); } private: common::Span<const MAPStats> dmap_stats_; // Start address of the map stats for every sorted // prediction value }; inline const MAPLambdaWeightMultiplier GetWeightMultiplier() const { return weight_multiplier_; } private: dh::caching_device_vector<MAPStats> dmap_stats_; // This computes the adjustment to the weight const MAPLambdaWeightMultiplier weight_multiplier_; #endif }; #if defined(__CUDACC__) class SortedLabelList : dh::SegmentSorter<float> { private: const LambdaRankParam &param_; // Objective configuration public: explicit SortedLabelList(const LambdaRankParam &param) : param_(param) {} // Sort the labels that are grouped by 'groups' void Sort(const HostDeviceVector<bst_float> &dlabels, const std::vector<uint32_t> &groups) { this->SortItems(dlabels.ConstDevicePointer(), dlabels.Size(), groups); } // This kernel can only run *after* the kernel in sort is completed, as they // use the default stream template <typename LambdaWeightComputerT> void ComputeGradients(const bst_float *dpreds, // Unsorted predictions const bst_float *dlabels, // Unsorted labels const HostDeviceVector<bst_float> &weights, int iter, GradientPair *out_gpair, float weight_normalization_factor) { // Group info on device const auto &dgroups = this->GetGroupsSpan(); uint32_t ngroups = this->GetNumGroups() + 1; uint32_t total_items = this->GetNumItems(); uint32_t niter = param_.num_pairsample * total_items; float fix_list_weight = param_.fix_list_weight; const auto &original_pos = this->GetOriginalPositionsSpan(); uint32_t num_weights = weights.Size(); auto dweights = num_weights ? weights.ConstDevicePointer() : nullptr; const auto &sorted_labels = this->GetItemsSpan(); // This is used to adjust the weight of different elements based on the different ranking // objective function policies LambdaWeightComputerT weight_computer(dpreds, dlabels, *this); auto wmultiplier = weight_computer.GetWeightMultiplier(); int device_id = -1; dh::safe_cuda(cudaGetDevice(&device_id)); // For each instance in the group, compute the gradient pair concurrently dh::LaunchN(device_id, niter, nullptr, [=] __device__(uint32_t idx) { // First, determine the group 'idx' belongs to uint32_t item_idx = idx % total_items; uint32_t group_idx = thrust::upper_bound(thrust::seq, dgroups.begin(), dgroups.begin() + ngroups, item_idx) - dgroups.begin(); // Span of this group within the larger labels/predictions sorted tuple uint32_t group_begin = dgroups[group_idx - 1]; uint32_t group_end = dgroups[group_idx]; uint32_t total_group_items = group_end - group_begin; // Are the labels diverse enough? If they are all the same, then there is nothing to pick // from another group - bail sooner if (sorted_labels[group_begin] == sorted_labels[group_end - 1]) return; // Find the number of labels less than and greater than the current label // at the sorted index position item_idx uint32_t nleft = CountNumItemsToTheLeftOf( sorted_labels.data() + group_begin, item_idx - group_begin + 1, sorted_labels[item_idx]); uint32_t nright = CountNumItemsToTheRightOf( sorted_labels.data() + item_idx, group_end - item_idx, sorted_labels[item_idx]); // Create a minstd_rand object to act as our source of randomness thrust::minstd_rand rng((iter + 1) * 1111); rng.discard(((idx / total_items) * total_group_items) + item_idx - group_begin); // Create a uniform_int_distribution to produce a sample from outside of the // present label group thrust::uniform_int_distribution<int> dist(0, nleft + nright - 1); int sample = dist(rng); int pos_idx = -1; // Bigger label int neg_idx = -1; // Smaller label // Are we picking a sample to the left/right of the current group? if (sample < nleft) { // Go left pos_idx = sample + group_begin; neg_idx = item_idx; } else { pos_idx = item_idx; uint32_t items_in_group = total_group_items - nleft - nright; neg_idx = sample + items_in_group + group_begin; } // Compute and assign the gradients now const float eps = 1e-16f; bst_float p = common::Sigmoid(dpreds[original_pos[pos_idx]] - dpreds[original_pos[neg_idx]]); bst_float g = p - 1.0f; bst_float h = thrust::max(p * (1.0f - p), eps); // Rescale each gradient and hessian so that the group has a weighted constant float scale = __frcp_ru(niter / total_items); if (fix_list_weight != 0.0f) { scale *= fix_list_weight / total_group_items; } float weight = num_weights ? dweights[group_idx - 1] : 1.0f; weight *= weight_normalization_factor; weight *= wmultiplier.GetWeight(group_idx - 1, pos_idx, neg_idx); weight *= scale; // Accumulate gradient and hessian in both positive and negative indices const GradientPair in_pos_gpair(g * weight, 2.0f * weight * h); dh::AtomicAddGpair(&out_gpair[original_pos[pos_idx]], in_pos_gpair); const GradientPair in_neg_gpair(-g * weight, 2.0f * weight * h); dh::AtomicAddGpair(&out_gpair[original_pos[neg_idx]], in_neg_gpair); }); // Wait until the computations done by the kernel is complete dh::safe_cuda(cudaStreamSynchronize(nullptr)); } }; #endif // objective for lambda rank template <typename LambdaWeightComputerT> class LambdaRankObj : public ObjFunction { public: void Configure(const std::vector<std::pair<std::string, std::string> >& args) override { param_.UpdateAllowUnknown(args); } void GetGradient(const HostDeviceVector<bst_float>& preds, const MetaInfo& info, int iter, HostDeviceVector<GradientPair>* out_gpair) override { CHECK_EQ(preds.Size(), info.labels_.Size()) << "label size predict size not match"; // quick consistency when group is not available std::vector<unsigned> tgptr(2, 0); tgptr[1] = static_cast<unsigned>(info.labels_.Size()); const std::vector<unsigned> &gptr = info.group_ptr_.size() == 0 ? tgptr : info.group_ptr_; CHECK(gptr.size() != 0 && gptr.back() == info.labels_.Size()) << "group structure not consistent with #rows" << ", " << "group ponter size: " << gptr.size() << ", " << "labels size: " << info.labels_.Size() << ", " << "group pointer back: " << (gptr.size() == 0 ? 0 : gptr.back()); #if defined(__CUDACC__) // Check if we have a GPU assignment; else, revert back to CPU auto device = tparam_->gpu_id; if (device >= 0) { ComputeGradientsOnGPU(preds, info, iter, out_gpair, gptr); } else { // Revert back to CPU #endif ComputeGradientsOnCPU(preds, info, iter, out_gpair, gptr); #if defined(__CUDACC__) } #endif } const char* DefaultEvalMetric() const override { return "map"; } void SaveConfig(Json* p_out) const override { auto& out = *p_out; out["name"] = String(LambdaWeightComputerT::Name()); out["lambda_rank_param"] = ToJson(param_); } void LoadConfig(Json const& in) override { FromJson(in["lambda_rank_param"], &param_); } private: bst_float ComputeWeightNormalizationFactor(const MetaInfo& info, const std::vector<unsigned> &gptr) { const auto ngroup = static_cast<bst_omp_uint>(gptr.size() - 1); bst_float sum_weights = 0; for (bst_omp_uint k = 0; k < ngroup; ++k) { sum_weights += info.GetWeight(k); } return ngroup / sum_weights; } void ComputeGradientsOnCPU(const HostDeviceVector<bst_float>& preds, const MetaInfo& info, int iter, HostDeviceVector<GradientPair>* out_gpair, const std::vector<unsigned> &gptr) { LOG(DEBUG) << "Computing " << LambdaWeightComputerT::Name() << " gradients on CPU."; bst_float weight_normalization_factor = ComputeWeightNormalizationFactor(info, gptr); const auto& preds_h = preds.HostVector(); const auto& labels = info.labels_.HostVector(); std::vector<GradientPair>& gpair = out_gpair->HostVector(); const auto ngroup = static_cast<bst_omp_uint>(gptr.size() - 1); out_gpair->Resize(preds.Size()); #pragma omp parallel { // parallel construct, declare random number generator here, so that each // thread use its own random number generator, seed by thread id and current iteration std::minstd_rand rnd((iter + 1) * 1111); std::vector<LambdaPair> pairs; std::vector<ListEntry> lst; std::vector< std::pair<bst_float, unsigned> > rec; #pragma omp for schedule(static) for (bst_omp_uint k = 0; k < ngroup; ++k) { lst.clear(); pairs.clear(); for (unsigned j = gptr[k]; j < gptr[k+1]; ++j) { lst.emplace_back(preds_h[j], labels[j], j); gpair[j] = GradientPair(0.0f, 0.0f); } std::stable_sort(lst.begin(), lst.end(), ListEntry::CmpPred); rec.resize(lst.size()); for (unsigned i = 0; i < lst.size(); ++i) { rec[i] = std::make_pair(lst[i].label, i); } std::stable_sort(rec.begin(), rec.end(), common::CmpFirst); // enumerate buckets with same label, for each item in the lst, grab another sample randomly for (unsigned i = 0; i < rec.size(); ) { unsigned j = i + 1; while (j < rec.size() && rec[j].first == rec[i].first) ++j; // bucket in [i,j), get a sample outside bucket unsigned nleft = i, nright = static_cast<unsigned>(rec.size() - j); if (nleft + nright != 0) { int nsample = param_.num_pairsample; while (nsample --) { for (unsigned pid = i; pid < j; ++pid) { unsigned ridx = std::uniform_int_distribution<unsigned>(0, nleft + nright - 1)(rnd); if (ridx < nleft) { pairs.emplace_back(rec[ridx].second, rec[pid].second, info.GetWeight(k) * weight_normalization_factor); } else { pairs.emplace_back(rec[pid].second, rec[ridx+j-i].second, info.GetWeight(k) * weight_normalization_factor); } } } } i = j; } // get lambda weight for the pairs LambdaWeightComputerT::GetLambdaWeight(lst, &pairs); // rescale each gradient and hessian so that the lst have constant weighted float scale = 1.0f / param_.num_pairsample; if (param_.fix_list_weight != 0.0f) { scale *= param_.fix_list_weight / (gptr[k + 1] - gptr[k]); } for (auto & pair : pairs) { const ListEntry &pos = lst[pair.pos_index]; const ListEntry &neg = lst[pair.neg_index]; const bst_float w = pair.weight * scale; const float eps = 1e-16f; bst_float p = common::Sigmoid(pos.pred - neg.pred); bst_float g = p - 1.0f; bst_float h = std::max(p * (1.0f - p), eps); // accumulate gradient and hessian in both pid, and nid gpair[pos.rindex] += GradientPair(g * w, 2.0f*w*h); gpair[neg.rindex] += GradientPair(-g * w, 2.0f*w*h); } } } } #if defined(__CUDACC__) void ComputeGradientsOnGPU(const HostDeviceVector<bst_float>& preds, const MetaInfo& info, int iter, HostDeviceVector<GradientPair>* out_gpair, const std::vector<unsigned> &gptr) { LOG(DEBUG) << "Computing " << LambdaWeightComputerT::Name() << " gradients on GPU."; auto device = tparam_->gpu_id; dh::safe_cuda(cudaSetDevice(device)); bst_float weight_normalization_factor = ComputeWeightNormalizationFactor(info, gptr); // Set the device ID and copy them to the device out_gpair->SetDevice(device); info.labels_.SetDevice(device); preds.SetDevice(device); info.weights_.SetDevice(device); out_gpair->Resize(preds.Size()); auto d_preds = preds.ConstDevicePointer(); auto d_gpair = out_gpair->DevicePointer(); auto d_labels = info.labels_.ConstDevicePointer(); SortedLabelList slist(param_); // Sort the labels within the groups on the device slist.Sort(info.labels_, gptr); // Initialize the gradients next out_gpair->Fill(GradientPair(0.0f, 0.0f)); // Finally, compute the gradients slist.ComputeGradients<LambdaWeightComputerT> (d_preds, d_labels, info.weights_, iter, d_gpair, weight_normalization_factor); } #endif LambdaRankParam param_; }; #if !defined(GTEST_TEST) // register the objective functions DMLC_REGISTER_PARAMETER(LambdaRankParam); XGBOOST_REGISTER_OBJECTIVE(PairwiseRankObj, PairwiseLambdaWeightComputer::Name()) .describe("Pairwise rank objective.") .set_body([]() { return new LambdaRankObj<PairwiseLambdaWeightComputer>(); }); XGBOOST_REGISTER_OBJECTIVE(LambdaRankNDCG, NDCGLambdaWeightComputer::Name()) .describe("LambdaRank with NDCG as objective.") .set_body([]() { return new LambdaRankObj<NDCGLambdaWeightComputer>(); }); XGBOOST_REGISTER_OBJECTIVE(LambdaRankObjMAP, MAPLambdaWeightComputer::Name()) .describe("LambdaRank with MAP as objective.") .set_body([]() { return new LambdaRankObj<MAPLambdaWeightComputer>(); }); #endif } // namespace obj } // namespace xgboost
f3f0cd61dff429e126b8404b017f2543e4cba02a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * Assignment 06 Program - moving_average.cu * Sarah Helble * 10/06/17 * * Calculates the average of each index and its neighbors * * Usage ./aout * */ #include <stdio.h> #include <stdlib.h> #include <stdbool.h> // Execution Notes // 512, 512 gives about equivalent times // 512, 256 register is 2x faster // 256, 256 register is slightly better // 256, 128 register is slightly better // 512, 128 register is slightly better // 1024, 256 shared is slightly better // 1024, 512 register is slightly better // // First run always seems to be bad (2x slower) #define NUM_ELEMENTS 512 #define THREADS_PER_BLOCK 256 #define MAX_INT 30 /** * Returns the current time */ __host__ hipEvent_t get_time(void) { hipEvent_t time; hipEventCreate(&time); hipEventRecord(time); return time; } /** * Kernel function that takes a moving average of the values in * @list and puts the results in @averages * Uses registers to store the calculations. */ __global__ void average_using_registers(unsigned int *list, float *averages) { /* Calculate the current index */ const unsigned int idx = (blockIdx.x * blockDim.x) + threadIdx.x; if(idx < NUM_ELEMENTS) { unsigned int sum = list[idx]; unsigned int num = 1; // If there is a previous element, add it to sum if(idx > 0) { sum = sum + list[idx - 1]; num = num + 1; } // If there is a next element, add it to sum if((idx + 1) < NUM_ELEMENTS) { sum = sum + list[idx + 1]; num = num + 1; } averages[idx] = (float) sum / num; } } /** * Kernel function that takes a moving average of the values in * @list and puts the results in @averages * Uses shared memory to store the calculations. */ __global__ void average_using_shared(unsigned int *list, float *averages) { __shared__ unsigned int sums[NUM_ELEMENTS]; __shared__ unsigned int nums[NUM_ELEMENTS]; const unsigned int idx = (blockIdx.x * blockDim.x) + threadIdx.x; if(idx < NUM_ELEMENTS) { sums[idx] = list[idx]; nums[idx] = 1; // If there is a previous element, add it to sum if(idx > 0) { sums[idx] = sums[idx] + list[idx - 1]; nums[idx] = nums[idx] + 1; } // If there is a next element, add it to sum if((idx + 1) < NUM_ELEMENTS) { sums[idx] = sums[idx] + list[idx + 1]; nums[idx] = nums[idx] + 1; } // Calculate the average averages[idx] = (float) sums[idx] / nums[idx]; } } /** * Fuction to handle the printing of results. * @list is the original array * @averages is the result */ void print_results(unsigned int *list, float *averages) { int i = 0; printf("\n"); for(i = 0; i < NUM_ELEMENTS; i++) { printf("Original value at index [%d]: %d, average: %f\n", i, list[i], averages[i]); } printf("\n"); } /** * Function that sets up everything for the kernel function * * @array_size size of array (total number of threads) * @threads_per_block number of threads to put in each block * @use_registers is 1 if registers should be used. Otherwise, will call * kernel that uses shared memory */ void exec_kernel(bool use_registers) { /* Calculate the size of the array */ int array_size_in_bytes = (sizeof(unsigned int) * (NUM_ELEMENTS)); int float_array_size_in_bytes = (sizeof(float) * (NUM_ELEMENTS)); int i = 0; unsigned int *list; float *averages; //pin it hipHostMalloc((void **)&list, array_size_in_bytes); hipHostMalloc((void **)&averages, float_array_size_in_bytes); // Fill array with random numbers between 0 and MAX_INT for(i = 0; i < NUM_ELEMENTS; i++) { list[i] = (unsigned int) rand() % MAX_INT; } /* Declare and allocate pointers for GPU based parameters */ unsigned int *d_list; float *d_averages; hipMalloc((void **)&d_list, array_size_in_bytes); hipMalloc((void **)&d_averages, float_array_size_in_bytes); /* Copy the CPU memory to the GPU memory */ hipMemcpy(d_list, list, array_size_in_bytes, hipMemcpyHostToDevice); /* Designate the number of blocks and threads */ const unsigned int num_blocks = NUM_ELEMENTS/THREADS_PER_BLOCK; const unsigned int num_threads = NUM_ELEMENTS/num_blocks; /* Execute the kernel and keep track of start and end time for duration */ float duration = 0; hipEvent_t start_time = get_time(); if(use_registers) { hipLaunchKernelGGL(( average_using_registers), dim3(num_blocks), dim3(num_threads), 0, 0, d_list, d_averages); } else { hipLaunchKernelGGL(( average_using_shared), dim3(num_blocks), dim3(num_threads), 0, 0, d_list, d_averages); } hipEvent_t end_time = get_time(); hipEventSynchronize(end_time); hipEventElapsedTime(&duration, start_time, end_time); /* Copy the changed GPU memory back to the CPU */ hipMemcpy( averages, d_averages, float_array_size_in_bytes, hipMemcpyDeviceToHost); printf("\tDuration: %fmsn\n", duration); print_results(list, averages); /* Free the GPU memory */ hipFree(d_list); hipFree(d_averages); /* Free the pinned CPU memory */ hipHostFree(list); hipHostFree(averages); } /** * Entry point for execution. Checks command line arguments * then passes execution to subordinate function */ int main(int argc, char *argv[]) { printf("\n"); /* Do the average with shared memory */ printf("First Run of Averages Calculated using Shared Memory"); exec_kernel(false); printf("-----------------------------------------------------------------\n"); printf("Second Run of Averages Calculated using Shared Memory"); exec_kernel(false); printf("-----------------------------------------------------------------\n"); /* Do the average with registers*/ printf("First Run of Averages Calculated using Register Memory"); exec_kernel(true); printf("-----------------------------------------------------------------\n"); printf("Second Run of Averages Calculated using Register Memory"); exec_kernel(true); printf("-----------------------------------------------------------------\n"); return EXIT_SUCCESS; }
f3f0cd61dff429e126b8404b017f2543e4cba02a.cu
/** * Assignment 06 Program - moving_average.cu * Sarah Helble * 10/06/17 * * Calculates the average of each index and its neighbors * * Usage ./aout * */ #include <stdio.h> #include <stdlib.h> #include <stdbool.h> // Execution Notes // 512, 512 gives about equivalent times // 512, 256 register is 2x faster // 256, 256 register is slightly better // 256, 128 register is slightly better // 512, 128 register is slightly better // 1024, 256 shared is slightly better // 1024, 512 register is slightly better // // First run always seems to be bad (2x slower) #define NUM_ELEMENTS 512 #define THREADS_PER_BLOCK 256 #define MAX_INT 30 /** * Returns the current time */ __host__ cudaEvent_t get_time(void) { cudaEvent_t time; cudaEventCreate(&time); cudaEventRecord(time); return time; } /** * Kernel function that takes a moving average of the values in * @list and puts the results in @averages * Uses registers to store the calculations. */ __global__ void average_using_registers(unsigned int *list, float *averages) { /* Calculate the current index */ const unsigned int idx = (blockIdx.x * blockDim.x) + threadIdx.x; if(idx < NUM_ELEMENTS) { unsigned int sum = list[idx]; unsigned int num = 1; // If there is a previous element, add it to sum if(idx > 0) { sum = sum + list[idx - 1]; num = num + 1; } // If there is a next element, add it to sum if((idx + 1) < NUM_ELEMENTS) { sum = sum + list[idx + 1]; num = num + 1; } averages[idx] = (float) sum / num; } } /** * Kernel function that takes a moving average of the values in * @list and puts the results in @averages * Uses shared memory to store the calculations. */ __global__ void average_using_shared(unsigned int *list, float *averages) { __shared__ unsigned int sums[NUM_ELEMENTS]; __shared__ unsigned int nums[NUM_ELEMENTS]; const unsigned int idx = (blockIdx.x * blockDim.x) + threadIdx.x; if(idx < NUM_ELEMENTS) { sums[idx] = list[idx]; nums[idx] = 1; // If there is a previous element, add it to sum if(idx > 0) { sums[idx] = sums[idx] + list[idx - 1]; nums[idx] = nums[idx] + 1; } // If there is a next element, add it to sum if((idx + 1) < NUM_ELEMENTS) { sums[idx] = sums[idx] + list[idx + 1]; nums[idx] = nums[idx] + 1; } // Calculate the average averages[idx] = (float) sums[idx] / nums[idx]; } } /** * Fuction to handle the printing of results. * @list is the original array * @averages is the result */ void print_results(unsigned int *list, float *averages) { int i = 0; printf("\n"); for(i = 0; i < NUM_ELEMENTS; i++) { printf("Original value at index [%d]: %d, average: %f\n", i, list[i], averages[i]); } printf("\n"); } /** * Function that sets up everything for the kernel function * * @array_size size of array (total number of threads) * @threads_per_block number of threads to put in each block * @use_registers is 1 if registers should be used. Otherwise, will call * kernel that uses shared memory */ void exec_kernel(bool use_registers) { /* Calculate the size of the array */ int array_size_in_bytes = (sizeof(unsigned int) * (NUM_ELEMENTS)); int float_array_size_in_bytes = (sizeof(float) * (NUM_ELEMENTS)); int i = 0; unsigned int *list; float *averages; //pin it cudaMallocHost((void **)&list, array_size_in_bytes); cudaMallocHost((void **)&averages, float_array_size_in_bytes); // Fill array with random numbers between 0 and MAX_INT for(i = 0; i < NUM_ELEMENTS; i++) { list[i] = (unsigned int) rand() % MAX_INT; } /* Declare and allocate pointers for GPU based parameters */ unsigned int *d_list; float *d_averages; cudaMalloc((void **)&d_list, array_size_in_bytes); cudaMalloc((void **)&d_averages, float_array_size_in_bytes); /* Copy the CPU memory to the GPU memory */ cudaMemcpy(d_list, list, array_size_in_bytes, cudaMemcpyHostToDevice); /* Designate the number of blocks and threads */ const unsigned int num_blocks = NUM_ELEMENTS/THREADS_PER_BLOCK; const unsigned int num_threads = NUM_ELEMENTS/num_blocks; /* Execute the kernel and keep track of start and end time for duration */ float duration = 0; cudaEvent_t start_time = get_time(); if(use_registers) { average_using_registers<<<num_blocks, num_threads>>>(d_list, d_averages); } else { average_using_shared<<<num_blocks, num_threads>>>(d_list, d_averages); } cudaEvent_t end_time = get_time(); cudaEventSynchronize(end_time); cudaEventElapsedTime(&duration, start_time, end_time); /* Copy the changed GPU memory back to the CPU */ cudaMemcpy( averages, d_averages, float_array_size_in_bytes, cudaMemcpyDeviceToHost); printf("\tDuration: %fmsn\n", duration); print_results(list, averages); /* Free the GPU memory */ cudaFree(d_list); cudaFree(d_averages); /* Free the pinned CPU memory */ cudaFreeHost(list); cudaFreeHost(averages); } /** * Entry point for execution. Checks command line arguments * then passes execution to subordinate function */ int main(int argc, char *argv[]) { printf("\n"); /* Do the average with shared memory */ printf("First Run of Averages Calculated using Shared Memory"); exec_kernel(false); printf("-----------------------------------------------------------------\n"); printf("Second Run of Averages Calculated using Shared Memory"); exec_kernel(false); printf("-----------------------------------------------------------------\n"); /* Do the average with registers*/ printf("First Run of Averages Calculated using Register Memory"); exec_kernel(true); printf("-----------------------------------------------------------------\n"); printf("Second Run of Averages Calculated using Register Memory"); exec_kernel(true); printf("-----------------------------------------------------------------\n"); return EXIT_SUCCESS; }
37d3240236e0ee4d5199d528eca0c4b3a707b0b7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <assert.h> void init(int *a, int N) { int i; for (i = 0; i < N; ++i) { a[i] = i; } } inline hipError_t checkCuda(hipError_t result) { if (result != hipSuccess) { fprintf(stderr, "CUDA Runtime Error: %s\n", hipGetErrorString(result)); assert(result == hipSuccess); } return result; } __global__ void doubleElements(int *a, int N) { int idx = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (int i = idx; i < N + stride; i += stride) { a[i] *= 2; } } bool checkElementsAreDoubled(int *a, int N) { int i; for (i = 0; i < N; ++i) { if (a[i] != i*2) return false; } return true; } int main() { /* * Add error handling to this source code to learn what errors * exist, and then correct them. Googling error messages may be * of service if actions for resolving them are not clear to you. */ int N = 10000; int *a; size_t size = N * sizeof(int); checkCuda(hipMallocManaged(&a, size)); init(a, N); size_t threads_per_block = 1024; size_t number_of_blocks = 32; hipLaunchKernelGGL(( doubleElements), dim3(number_of_blocks), dim3(threads_per_block), 0, 0, a, N); hipError_t err; err = hipGetLastError(); // `hipGetLastError` will return the error from above. if (err != hipSuccess) { printf("Error: %s\n", hipGetErrorString(err)); } checkCuda(hipDeviceSynchronize()); bool areDoubled = checkElementsAreDoubled(a, N); printf("All elements were doubled? %s\n", areDoubled ? "TRUE" : "FALSE"); checkCuda(hipFree(a)); }
37d3240236e0ee4d5199d528eca0c4b3a707b0b7.cu
#include <stdio.h> #include <assert.h> void init(int *a, int N) { int i; for (i = 0; i < N; ++i) { a[i] = i; } } inline cudaError_t checkCuda(cudaError_t result) { if (result != cudaSuccess) { fprintf(stderr, "CUDA Runtime Error: %s\n", cudaGetErrorString(result)); assert(result == cudaSuccess); } return result; } __global__ void doubleElements(int *a, int N) { int idx = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (int i = idx; i < N + stride; i += stride) { a[i] *= 2; } } bool checkElementsAreDoubled(int *a, int N) { int i; for (i = 0; i < N; ++i) { if (a[i] != i*2) return false; } return true; } int main() { /* * Add error handling to this source code to learn what errors * exist, and then correct them. Googling error messages may be * of service if actions for resolving them are not clear to you. */ int N = 10000; int *a; size_t size = N * sizeof(int); checkCuda(cudaMallocManaged(&a, size)); init(a, N); size_t threads_per_block = 1024; size_t number_of_blocks = 32; doubleElements<<<number_of_blocks, threads_per_block>>>(a, N); cudaError_t err; err = cudaGetLastError(); // `cudaGetLastError` will return the error from above. if (err != cudaSuccess) { printf("Error: %s\n", cudaGetErrorString(err)); } checkCuda(cudaDeviceSynchronize()); bool areDoubled = checkElementsAreDoubled(a, N); printf("All elements were doubled? %s\n", areDoubled ? "TRUE" : "FALSE"); checkCuda(cudaFree(a)); }
9f697244a334472a371f669596b47ec9e2a0b504.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void histogram_cuda(int *histogram, float *values, size_t nb, float bin_size, float min, int bins, int nb_thread) { // nb = total size of elems int id = (blockIdx.x * blockDim.x + threadIdx.x) * ITEMS_PER_THREAD; int thread_id = threadIdx.x; int *local_hist = (int *)malloc(sizeof(int) * bins); if (id == 0) printf("Bin size : %f\n", bin_size); // Init local histogram for (int i = 0; i < bins; i++) local_hist[i] = 0; // One shared array per bin extern __shared__ int s_bins[]; // Compute serially local bin for (int i = 0; i < ITEMS_PER_THREAD; i++) { for (int j = 0; j < bins; j += 1) { // if (id + i < NB) // printf("values[%d] = %f <= %f\n", id + i, values[id + i], (float)min + (float)(j + 1) * bin_size); if (id + i < nb && values[id + i] <= ((float)min + (float)(j + 1) * bin_size)) { local_hist[j] += 1; //printf("BlockIdx : %d - Thread %d : values[%d] = %f -> local_hist[%d] = %d\n", blockIdx.x, thread_id, id + i, values[id + i], j, local_hist[j]); break ; } } } __syncthreads(); // Store local bins into shared bins for (int i = 0; i < bins; i++) { s_bins[THREADS * i + thread_id] = local_hist[i]; // printf("Block %d - Thread %d : s_bins[%d] = local_hist[%d] = %d\n", blockIdx.x, thread_id, THREADS * i + thread_id, i, local_hist[i]); } __syncthreads(); // if (thread_id == 0) // { // for (int i = 0; i < nb_thread * 3; i++) // { // printf("s_bins[%d] = %d\n", i, s_bins[i]); // } // } // Reduce each shared bin // int size = (blockIdx.x == gridDim.x - 1) ? (NB % blockDim.x) : blockDim.x; int size = THREADS; for (size_t s = THREADS / 2; s > 0; s >>= 1) { if (thread_id + s < THREADS && thread_id < s) { for (size_t j = 0; j < bins; j++) { s_bins[j * THREADS + thread_id] = s_bins[j * THREADS + thread_id] + s_bins[j * THREADS + thread_id + s]; if (size % 2 == 1 && thread_id + s + s == size - 1) s_bins[j * THREADS + thread_id] = s_bins[j * THREADS + thread_id] + s_bins[j * THREADS + thread_id + s + s]; } } __syncthreads(); size = s; } // Store the result into histogram if (thread_id == 0) { for (int i = 0; i < bins; i++) { histogram[i + blockIdx.x * bins] = s_bins[THREADS * i]; // histogram[0 + blockIdx.x * bins] = s_bins[0]; // histogram[1 + blockIdx.x * bins] = s_bins[THREADS]; // histogram[2 + blockIdx.x * bins] = s_bins[THREADS * 2]; // printf("histogram[%d] = %d\n", 0 + blockIdx.x * bins, s_bins[0]); // printf("histogram[%d] = %d\n", 1 + blockIdx.x * bins, s_bins[THREADS]); // printf("histogram[%d] = %d\n", i + blockIdx.x * bins, s_bins[THREADS * i]); } } }
9f697244a334472a371f669596b47ec9e2a0b504.cu
#include "includes.h" __global__ void histogram_cuda(int *histogram, float *values, size_t nb, float bin_size, float min, int bins, int nb_thread) { // nb = total size of elems int id = (blockIdx.x * blockDim.x + threadIdx.x) * ITEMS_PER_THREAD; int thread_id = threadIdx.x; int *local_hist = (int *)malloc(sizeof(int) * bins); if (id == 0) printf("Bin size : %f\n", bin_size); // Init local histogram for (int i = 0; i < bins; i++) local_hist[i] = 0; // One shared array per bin extern __shared__ int s_bins[]; // Compute serially local bin for (int i = 0; i < ITEMS_PER_THREAD; i++) { for (int j = 0; j < bins; j += 1) { // if (id + i < NB) // printf("values[%d] = %f <= %f\n", id + i, values[id + i], (float)min + (float)(j + 1) * bin_size); if (id + i < nb && values[id + i] <= ((float)min + (float)(j + 1) * bin_size)) { local_hist[j] += 1; //printf("BlockIdx : %d - Thread %d : values[%d] = %f -> local_hist[%d] = %d\n", blockIdx.x, thread_id, id + i, values[id + i], j, local_hist[j]); break ; } } } __syncthreads(); // Store local bins into shared bins for (int i = 0; i < bins; i++) { s_bins[THREADS * i + thread_id] = local_hist[i]; // printf("Block %d - Thread %d : s_bins[%d] = local_hist[%d] = %d\n", blockIdx.x, thread_id, THREADS * i + thread_id, i, local_hist[i]); } __syncthreads(); // if (thread_id == 0) // { // for (int i = 0; i < nb_thread * 3; i++) // { // printf("s_bins[%d] = %d\n", i, s_bins[i]); // } // } // Reduce each shared bin // int size = (blockIdx.x == gridDim.x - 1) ? (NB % blockDim.x) : blockDim.x; int size = THREADS; for (size_t s = THREADS / 2; s > 0; s >>= 1) { if (thread_id + s < THREADS && thread_id < s) { for (size_t j = 0; j < bins; j++) { s_bins[j * THREADS + thread_id] = s_bins[j * THREADS + thread_id] + s_bins[j * THREADS + thread_id + s]; if (size % 2 == 1 && thread_id + s + s == size - 1) s_bins[j * THREADS + thread_id] = s_bins[j * THREADS + thread_id] + s_bins[j * THREADS + thread_id + s + s]; } } __syncthreads(); size = s; } // Store the result into histogram if (thread_id == 0) { for (int i = 0; i < bins; i++) { histogram[i + blockIdx.x * bins] = s_bins[THREADS * i]; // histogram[0 + blockIdx.x * bins] = s_bins[0]; // histogram[1 + blockIdx.x * bins] = s_bins[THREADS]; // histogram[2 + blockIdx.x * bins] = s_bins[THREADS * 2]; // printf("histogram[%d] = %d\n", 0 + blockIdx.x * bins, s_bins[0]); // printf("histogram[%d] = %d\n", 1 + blockIdx.x * bins, s_bins[THREADS]); // printf("histogram[%d] = %d\n", i + blockIdx.x * bins, s_bins[THREADS * i]); } } }
453889edae1e783a2cf8ef50e37bd3650df6e647.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <iostream> #include <vector> #include "cuda_utils.cuh" #include "shared_memory.cuh" #include "shared_memory.h" void executeCUDA(std::vector<int> &a, std::vector<int> &r, std::vector<int> &d, int n) { int *d_d = nullptr; hipMalloc(&d_d, n * sizeof(int)); hipMemcpy(d_d, a.data(), n * sizeof(int), hipMemcpyHostToDevice); hipLaunchKernelGGL(( staticReverse), dim3(1), dim3(n), 0, 0, d_d, n); hipMemcpy(d.data(), d_d, n * sizeof(int), hipMemcpyDeviceToHost); for (int i = 0; i < n; i++) if (d[i] != r[i]) printf("Error: d[%d]!=r[%d] (%d, %d)\n", i, i, d[i], r[i]); hipMemcpy(d_d, a.data(), n * sizeof(int), hipMemcpyHostToDevice); hipLaunchKernelGGL(( dynamicReverse), dim3(1), dim3(n), n * sizeof(int), 0, d_d, n); hipMemcpy(d.data(), d_d, n * sizeof(int), hipMemcpyDeviceToHost); for (int i = 0; i < n; i++) if (d[i] != r[i]) printf("Error: d[%d]!=r[%d] (%d, %d)\n", i, i, d[i], r[i]); } void executeISPC(std::vector<int> &a, std::vector<int> &r, std::vector<int> &d, int n) { int *d_d = (int *)malloc(n * sizeof(int)); memcpy(d_d, a.data(), n * sizeof(int)); ispc::staticReverse({1, 1, 1}, {n, 1, 1}, 0, d_d, n); memcpy(d.data(), d_d, n * sizeof(int)); for (int i = 0; i < n; i++) if (d[i] != r[i]) printf("Error: d[%d]!=r[%d] (%d, %d)\n", i, i, d[i], r[i]); memcpy(d_d, a.data(), n * sizeof(int)); ispc::dynamicReverse({1, 1, 1}, {n, 1, 1}, n * sizeof(int), d_d, n); memcpy(d.data(), d_d, n * sizeof(int)); for (int i = 0; i < n; i++) if (d[i] != r[i]) printf("Error: d[%d]!=r[%d] (%d, %d)\n", i, i, d[i], r[i]); } int main(void) { const int n = 64; std::vector<int> a(n), r(n), cuda(n), ispc(n); for (int i = 0; i < n; i++) { a[i] = i; r[i] = n - i - 1; ispc[i] = cuda[i] = 0; } executeCUDA(a, r, cuda, n); executeISPC(a, r, ispc, n); if (checkResults(n, r, cuda, ispc)) return 1; return 0; }
453889edae1e783a2cf8ef50e37bd3650df6e647.cu
#include <cuda_runtime.h> #include <iostream> #include <vector> #include "cuda_utils.cuh" #include "shared_memory.cuh" #include "shared_memory.h" void executeCUDA(std::vector<int> &a, std::vector<int> &r, std::vector<int> &d, int n) { int *d_d = nullptr; cudaMalloc(&d_d, n * sizeof(int)); cudaMemcpy(d_d, a.data(), n * sizeof(int), cudaMemcpyHostToDevice); staticReverse<<<1, n>>>(d_d, n); cudaMemcpy(d.data(), d_d, n * sizeof(int), cudaMemcpyDeviceToHost); for (int i = 0; i < n; i++) if (d[i] != r[i]) printf("Error: d[%d]!=r[%d] (%d, %d)\n", i, i, d[i], r[i]); cudaMemcpy(d_d, a.data(), n * sizeof(int), cudaMemcpyHostToDevice); dynamicReverse<<<1, n, n * sizeof(int)>>>(d_d, n); cudaMemcpy(d.data(), d_d, n * sizeof(int), cudaMemcpyDeviceToHost); for (int i = 0; i < n; i++) if (d[i] != r[i]) printf("Error: d[%d]!=r[%d] (%d, %d)\n", i, i, d[i], r[i]); } void executeISPC(std::vector<int> &a, std::vector<int> &r, std::vector<int> &d, int n) { int *d_d = (int *)malloc(n * sizeof(int)); memcpy(d_d, a.data(), n * sizeof(int)); ispc::staticReverse({1, 1, 1}, {n, 1, 1}, 0, d_d, n); memcpy(d.data(), d_d, n * sizeof(int)); for (int i = 0; i < n; i++) if (d[i] != r[i]) printf("Error: d[%d]!=r[%d] (%d, %d)\n", i, i, d[i], r[i]); memcpy(d_d, a.data(), n * sizeof(int)); ispc::dynamicReverse({1, 1, 1}, {n, 1, 1}, n * sizeof(int), d_d, n); memcpy(d.data(), d_d, n * sizeof(int)); for (int i = 0; i < n; i++) if (d[i] != r[i]) printf("Error: d[%d]!=r[%d] (%d, %d)\n", i, i, d[i], r[i]); } int main(void) { const int n = 64; std::vector<int> a(n), r(n), cuda(n), ispc(n); for (int i = 0; i < n; i++) { a[i] = i; r[i] = n - i - 1; ispc[i] = cuda[i] = 0; } executeCUDA(a, r, cuda, n); executeISPC(a, r, ispc, n); if (checkResults(n, r, cuda, ispc)) return 1; return 0; }
0a028bdd5dae307216faca9dfb50b4cee01eed7b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "caffe2/core/context_gpu.h" #include "caffe2/operators/boolean_unmask_ops.h" namespace caffe2 { namespace { __global__ void ComputeIndicesKernel( const int numMasks, const int maskSize, int* indices, bool* const masks[]) { CUDA_1D_KERNEL_LOOP(i, maskSize) { for (int j = 0; j < numMasks; ++j) { if (masks[j][i]) { indices[i] = j; return; } } CUDA_KERNEL_ASSERT(false); } } __global__ void FillValuesKernel( const int numMasks, const int maskSize, const size_t itemSize, const int* indices, char* const values[], int* valueSizes, char* dest) { CUDA_1D_KERNEL_LOOP(j, numMasks) { int k = 0; for (int i = 0; i < maskSize; ++i) { if (indices[i] == j) { for (int h = 0; h < itemSize; ++h) { dest[i * itemSize + h] = values[j][k * itemSize + h]; } ++k; } } CUDA_KERNEL_ASSERT(valueSizes[j] == k); } } } // namespace template <> class BooleanUnmaskOp<CUDAContext> final : public Operator<CUDAContext> { public: BooleanUnmaskOp(const OperatorDef& def, Workspace* ws) : Operator<CUDAContext>(def, ws) {} bool RunOnDevice() override { int maskSize = Input(0).size(); int numMasks = InputSize() / 2; const auto& meta = Input(1).meta(); auto* out = Output(0); out->Resize(maskSize); auto* dest = (char*)out->raw_mutable_data(meta); hostMasks_.Resize(numMasks); auto* hostMasksData = hostMasks_.mutable_data<bool*>(); hostValues_.Resize(numMasks); auto* hostValuesData = hostValues_.mutable_data<char*>(); hostValueSizes_.Resize(numMasks); auto* hostValueSizesData = hostValueSizes_.mutable_data<int>(); for (int i = 0; i < numMasks; ++i) { auto& mask = Input(i * 2); CAFFE_ENFORCE_EQ(mask.ndim(), 1); CAFFE_ENFORCE_EQ(mask.size(), maskSize); hostMasksData[i] = const_cast<bool*>(mask.data<bool>()); const auto& value = Input(i * 2 + 1); CAFFE_ENFORCE_EQ(value.ndim(), 1); hostValuesData[i] = (char*)value.raw_data(); hostValueSizesData[i] = value.size(); } masks_.CopyFrom(hostMasks_); values_.CopyFrom(hostValues_); valueSizes_.CopyFrom(hostValueSizes_); indices_.Resize(maskSize); auto* indicesData = indices_.mutable_data<int>(); hipLaunchKernelGGL(( ComputeIndicesKernel), dim3(min(maskSize, CAFFE_MAXIMUM_NUM_BLOCKS)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), numMasks, maskSize, indicesData, masks_.data<bool*>()); auto* valueSizesData = valueSizes_.mutable_data<int>(); hipLaunchKernelGGL(( FillValuesKernel), dim3(min(numMasks, CAFFE_MAXIMUM_NUM_BLOCKS)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), numMasks, maskSize, meta.itemsize(), indicesData, values_.data<char*>(), valueSizesData, dest); return true; } private: Tensor indices_{CUDA}; Tensor masks_{CUDA}; Tensor values_{CUDA}; Tensor valueSizes_{CUDA}; Tensor hostMasks_{CPU}; Tensor hostValues_{CPU}; Tensor hostValueSizes_{CPU}; }; REGISTER_CUDA_OPERATOR(BooleanUnmask, BooleanUnmaskOp<CUDAContext>); } // caffe2
0a028bdd5dae307216faca9dfb50b4cee01eed7b.cu
#include "caffe2/core/context_gpu.h" #include "caffe2/operators/boolean_unmask_ops.h" namespace caffe2 { namespace { __global__ void ComputeIndicesKernel( const int numMasks, const int maskSize, int* indices, bool* const masks[]) { CUDA_1D_KERNEL_LOOP(i, maskSize) { for (int j = 0; j < numMasks; ++j) { if (masks[j][i]) { indices[i] = j; return; } } CUDA_KERNEL_ASSERT(false); } } __global__ void FillValuesKernel( const int numMasks, const int maskSize, const size_t itemSize, const int* indices, char* const values[], int* valueSizes, char* dest) { CUDA_1D_KERNEL_LOOP(j, numMasks) { int k = 0; for (int i = 0; i < maskSize; ++i) { if (indices[i] == j) { for (int h = 0; h < itemSize; ++h) { dest[i * itemSize + h] = values[j][k * itemSize + h]; } ++k; } } CUDA_KERNEL_ASSERT(valueSizes[j] == k); } } } // namespace template <> class BooleanUnmaskOp<CUDAContext> final : public Operator<CUDAContext> { public: BooleanUnmaskOp(const OperatorDef& def, Workspace* ws) : Operator<CUDAContext>(def, ws) {} bool RunOnDevice() override { int maskSize = Input(0).size(); int numMasks = InputSize() / 2; const auto& meta = Input(1).meta(); auto* out = Output(0); out->Resize(maskSize); auto* dest = (char*)out->raw_mutable_data(meta); hostMasks_.Resize(numMasks); auto* hostMasksData = hostMasks_.mutable_data<bool*>(); hostValues_.Resize(numMasks); auto* hostValuesData = hostValues_.mutable_data<char*>(); hostValueSizes_.Resize(numMasks); auto* hostValueSizesData = hostValueSizes_.mutable_data<int>(); for (int i = 0; i < numMasks; ++i) { auto& mask = Input(i * 2); CAFFE_ENFORCE_EQ(mask.ndim(), 1); CAFFE_ENFORCE_EQ(mask.size(), maskSize); hostMasksData[i] = const_cast<bool*>(mask.data<bool>()); const auto& value = Input(i * 2 + 1); CAFFE_ENFORCE_EQ(value.ndim(), 1); hostValuesData[i] = (char*)value.raw_data(); hostValueSizesData[i] = value.size(); } masks_.CopyFrom(hostMasks_); values_.CopyFrom(hostValues_); valueSizes_.CopyFrom(hostValueSizes_); indices_.Resize(maskSize); auto* indicesData = indices_.mutable_data<int>(); ComputeIndicesKernel<<< min(maskSize, CAFFE_MAXIMUM_NUM_BLOCKS), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( numMasks, maskSize, indicesData, masks_.data<bool*>()); auto* valueSizesData = valueSizes_.mutable_data<int>(); FillValuesKernel<<< min(numMasks, CAFFE_MAXIMUM_NUM_BLOCKS), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( numMasks, maskSize, meta.itemsize(), indicesData, values_.data<char*>(), valueSizesData, dest); return true; } private: Tensor indices_{CUDA}; Tensor masks_{CUDA}; Tensor values_{CUDA}; Tensor valueSizes_{CUDA}; Tensor hostMasks_{CPU}; Tensor hostValues_{CPU}; Tensor hostValueSizes_{CPU}; }; REGISTER_CUDA_OPERATOR(BooleanUnmask, BooleanUnmaskOp<CUDAContext>); } // caffe2
67f159edcb9e1c4cceb9a07a3c7b37c1c72ff5e9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <cfloat> #include <vector> #include "caffe/layer.hpp" #include "caffe/util/math_functions.hpp" #include "caffe/layers/scalar_layer.hpp" namespace caffe { template <typename Dtype> __global__ void ScalarForward(const int n, const Dtype* in, const Dtype* scalars, const int scalar_dim, const int inner_dim, Dtype* out) { CUDA_KERNEL_LOOP(index, n) { const int scalar_index = (index / inner_dim) % scalar_dim; out[index] = in[index] * scalars[scalar_index]; } } template <typename Dtype> void ScalarLayer<Dtype>::Forward_gpu( const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const int count = top[0]->count(); const Dtype* bottom_data = bottom[0]->gpu_data(); const Dtype* scalar_data = bottom[1]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); ScalarForward<Dtype> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators)) , dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, bottom_data, scalar_data, scalar_dim_, inner_dim_, top_data); } template <typename Dtype> void ScalarLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (propagate_down[1]) { const Dtype* top_diff = top[0]->gpu_diff(); const Dtype* bottom_data = bottom[0]->gpu_data(); // Hack: store big eltwise product in bottom[0] diff, except in the special // case where this layer itself does the eltwise product, in which case we // can store it directly in the scalar diff, and we're done. const bool is_eltwise = (inner_dim_ == 1 && outer_dim_ == 1); Dtype* product = is_eltwise ? bottom[1]->mutable_gpu_diff() : bottom[0]->mutable_gpu_diff(); caffe_gpu_mul(top[0]->count(), top_diff, bottom_data, product); if (!is_eltwise) { Dtype* sum_result = NULL; if (inner_dim_ == 1) { sum_result = product; } else if (sum_result_.count() == 1) { const Dtype* sum_mult = sum_multiplier_.gpu_data(); Dtype* scalar_diff = bottom[1]->mutable_cpu_diff(); caffe_gpu_dot(inner_dim_, product, sum_mult, scalar_diff); } else { const Dtype* sum_mult = sum_multiplier_.gpu_data(); sum_result = (outer_dim_ == 1) ? bottom[1]->mutable_gpu_diff() : sum_result_.mutable_gpu_data(); caffe_gpu_gemv(CblasNoTrans, sum_result_.count(), inner_dim_, Dtype(1), product, sum_mult, Dtype(0), sum_result); } if (outer_dim_ != 1) { const Dtype* sum_mult = sum_multiplier_.gpu_data(); if (scalar_dim_ == 1) { Dtype* scalar_diff = bottom[1]->mutable_cpu_diff(); caffe_gpu_dot(outer_dim_, sum_mult, sum_result, scalar_diff); } else { Dtype* scalar_diff = bottom[1]->mutable_gpu_diff(); caffe_gpu_gemv(CblasTrans, outer_dim_, scalar_dim_, Dtype(1), sum_result, sum_mult, Dtype(0), scalar_diff); } } } } if (propagate_down[0]) { const int count = top[0]->count(); const Dtype* top_diff = top[0]->gpu_diff(); const Dtype* scalar_data = bottom[1]->gpu_data(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); ScalarForward<Dtype> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators)) , dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, top_diff, scalar_data, scalar_dim_, inner_dim_, bottom_diff); } } INSTANTIATE_LAYER_GPU_FUNCS(ScalarLayer); } // namespace caffe
67f159edcb9e1c4cceb9a07a3c7b37c1c72ff5e9.cu
#include <cfloat> #include <vector> #include "caffe/layer.hpp" #include "caffe/util/math_functions.hpp" #include "caffe/layers/scalar_layer.hpp" namespace caffe { template <typename Dtype> __global__ void ScalarForward(const int n, const Dtype* in, const Dtype* scalars, const int scalar_dim, const int inner_dim, Dtype* out) { CUDA_KERNEL_LOOP(index, n) { const int scalar_index = (index / inner_dim) % scalar_dim; out[index] = in[index] * scalars[scalar_index]; } } template <typename Dtype> void ScalarLayer<Dtype>::Forward_gpu( const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const int count = top[0]->count(); const Dtype* bottom_data = bottom[0]->gpu_data(); const Dtype* scalar_data = bottom[1]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); ScalarForward<Dtype> // NOLINT_NEXT_LINE(whitespace/operators) <<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, bottom_data, scalar_data, scalar_dim_, inner_dim_, top_data); } template <typename Dtype> void ScalarLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (propagate_down[1]) { const Dtype* top_diff = top[0]->gpu_diff(); const Dtype* bottom_data = bottom[0]->gpu_data(); // Hack: store big eltwise product in bottom[0] diff, except in the special // case where this layer itself does the eltwise product, in which case we // can store it directly in the scalar diff, and we're done. const bool is_eltwise = (inner_dim_ == 1 && outer_dim_ == 1); Dtype* product = is_eltwise ? bottom[1]->mutable_gpu_diff() : bottom[0]->mutable_gpu_diff(); caffe_gpu_mul(top[0]->count(), top_diff, bottom_data, product); if (!is_eltwise) { Dtype* sum_result = NULL; if (inner_dim_ == 1) { sum_result = product; } else if (sum_result_.count() == 1) { const Dtype* sum_mult = sum_multiplier_.gpu_data(); Dtype* scalar_diff = bottom[1]->mutable_cpu_diff(); caffe_gpu_dot(inner_dim_, product, sum_mult, scalar_diff); } else { const Dtype* sum_mult = sum_multiplier_.gpu_data(); sum_result = (outer_dim_ == 1) ? bottom[1]->mutable_gpu_diff() : sum_result_.mutable_gpu_data(); caffe_gpu_gemv(CblasNoTrans, sum_result_.count(), inner_dim_, Dtype(1), product, sum_mult, Dtype(0), sum_result); } if (outer_dim_ != 1) { const Dtype* sum_mult = sum_multiplier_.gpu_data(); if (scalar_dim_ == 1) { Dtype* scalar_diff = bottom[1]->mutable_cpu_diff(); caffe_gpu_dot(outer_dim_, sum_mult, sum_result, scalar_diff); } else { Dtype* scalar_diff = bottom[1]->mutable_gpu_diff(); caffe_gpu_gemv(CblasTrans, outer_dim_, scalar_dim_, Dtype(1), sum_result, sum_mult, Dtype(0), scalar_diff); } } } } if (propagate_down[0]) { const int count = top[0]->count(); const Dtype* top_diff = top[0]->gpu_diff(); const Dtype* scalar_data = bottom[1]->gpu_data(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); ScalarForward<Dtype> // NOLINT_NEXT_LINE(whitespace/operators) <<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, top_diff, scalar_data, scalar_dim_, inner_dim_, bottom_diff); } } INSTANTIATE_LAYER_GPU_FUNCS(ScalarLayer); } // namespace caffe
41019f0855aa152322f1b067f851aa4f86fe6a18.hip
// !!! This is a file automatically generated by hipify!!! #include <backend/base/base_l2_norm.h> #include <kernels/gpu/operator_on_gpu.h> #include <core/tensor_builder.h> #include "backend/name.h" #include "global/operator_factory.h" #include "global/fp16_operator_factory.h" //#include <algorithm> #include <math.h> #include <hip/hip_runtime.h> #include <hip/hip_fp16.h> #include <device_launch_parameters.h> #include "kernels/gpu/gpu_kernel.h" #include "kernels/gpu/cudax_fp16_math.h" namespace ts { namespace gpu { class L2Norm : public OperatorOnGPU<base::L2Norm> { public: using self = L2Norm; using supper = OperatorOnGPU<base::L2Norm>; void normalize(const Tensor &x, int dim, float epsilon, Tensor &out) override; }; } } namespace ts { namespace gpu { template<typename T> __global__ static void square_kernel(const T *input_data, T *output_data, int count) { int index = blockDim.x * blockIdx.x + threadIdx.x; for (; index < count; index += blockDim.x * gridDim.x) { output_data[index] = input_data[index] * input_data[index]; } } template<typename T> __global__ static void sum_kernel(const T* input_data, T* output_data, int dim_num, int outer_num, int inner_num) { int index = blockDim.x * blockIdx.x + threadIdx.x; int size = outer_num * inner_num; for (; index < size; index += blockDim.x * gridDim.x) { int n = index / inner_num; int s = index % inner_num; T sum = T(0.f); for (int k = 0; k < dim_num; k++) { sum += input_data[(n * dim_num + k) * inner_num + s]; } output_data[index] = sum; } } template<typename T> __global__ static void div_kernel(const T *input_data, const T* scale_data, T* output_data, int count, int dim_num, int outer_num, int inner_num, T epsilon) { int index = blockDim.x * blockIdx.x + threadIdx.x; for (; index < count; index += blockDim.x * gridDim.x) { int n = index / dim_num / inner_num; int s = index % inner_num; output_data[index] = input_data[index] / sqrt(scale_data[n * inner_num + s] + epsilon); } } template <typename T> class float_to { public: static T F(float a) { return T(a); } }; #ifdef TS_USE_CUDA_FP16 template <> class float_to<half> { public: static half F(float a) { return __float2half(a); } }; #endif template<typename T> void cpu_l2_norm_compute_run(const Tensor &x, int m_dim, float epsilon, Tensor &out, MemoryDevice& mem_device) { auto output_shape = out.sizes(); int pre_num = 1; for (int i = 0; i < m_dim; i++) { pre_num *= output_shape[i]; } int inner_num = 1; for (int i = m_dim + 1; i < output_shape.size(); i++) { inner_num *= output_shape[i]; } int axis = output_shape[m_dim]; const T *input_data = x.data<T>(); T *output_data = out.data<T>(); int count = out.count(); // memcpy(output_data, out.device(), count * sizeof(T), input_data, x.device(), count * sizeof(T)); int scale_data_size = out.count() / axis; Shape scale_shape; scale_shape.resize(1); scale_shape[0] = scale_data_size; Tensor scale_tensor(Tensor::InFlow::DEVICE, out.dtype(), scale_shape); T *scale_data = scale_tensor.data<T>(); dim3 block_size(CUDA_THREAD_NUM); dim3 square_kernel_grid_size((count + block_size.x - 1) / block_size.x); RUN_KERNEL(square_kernel<T>, square_kernel_grid_size, block_size, input_data, output_data, count); dim3 sum_kernel_grid_size((pre_num * inner_num + block_size.x - 1) / block_size.x); RUN_KERNEL(sum_kernel<T>, sum_kernel_grid_size, block_size, output_data, scale_data, axis, pre_num, inner_num); dim3 div_kernel_grid_size((count + block_size.x - 1) / block_size.x); RUN_KERNEL(div_kernel<T>, div_kernel_grid_size,block_size, input_data, scale_data, output_data, count,axis,pre_num,inner_num, float_to<T>::F(epsilon)); } void L2Norm::normalize(const Tensor &x, int dim, float epsilon, Tensor &out) { // Notice: the all tensor' memory device are CPU, as given in running_memory_device DTYPE dtype = out.dtype(); auto running_mem_device = running_memory_device(); switch (dtype) { #define DECLARE_COMPUTE_RUN(DTYPE, TYPE) \ case DTYPE: { cpu_l2_norm_compute_run<TYPE>(x, dim, epsilon, out, running_mem_device); break; } #ifdef TS_USE_CUDA_FP16 DECLARE_COMPUTE_RUN(FLOAT16, half); #endif DECLARE_COMPUTE_RUN(FLOAT32, float); DECLARE_COMPUTE_RUN(FLOAT64, double); #undef DECLARE_COMPUTE_RUN default: { TS_LOG_ERROR << this->op() << " not support data type(" << dtype << "): " << type_str(dtype) << eject; break; } } } } } using namespace ts; using namespace gpu; TS_REGISTER_OPERATOR(L2Norm, ts::GPU, name::layer::l2_norm()) #ifdef TS_USE_CUDA_FP16 TS_REGISTER_FP16_OPERATOR(L2Norm, ts::GPU, name::layer::l2_norm()) #endif
41019f0855aa152322f1b067f851aa4f86fe6a18.cu
#include <backend/base/base_l2_norm.h> #include <kernels/gpu/operator_on_gpu.h> #include <core/tensor_builder.h> #include "backend/name.h" #include "global/operator_factory.h" #include "global/fp16_operator_factory.h" //#include <algorithm> #include <math.h> #include <cuda_runtime.h> #include <cuda_fp16.h> #include <device_launch_parameters.h> #include "kernels/gpu/gpu_kernel.h" #include "kernels/gpu/cudax_fp16_math.h" namespace ts { namespace gpu { class L2Norm : public OperatorOnGPU<base::L2Norm> { public: using self = L2Norm; using supper = OperatorOnGPU<base::L2Norm>; void normalize(const Tensor &x, int dim, float epsilon, Tensor &out) override; }; } } namespace ts { namespace gpu { template<typename T> __global__ static void square_kernel(const T *input_data, T *output_data, int count) { int index = blockDim.x * blockIdx.x + threadIdx.x; for (; index < count; index += blockDim.x * gridDim.x) { output_data[index] = input_data[index] * input_data[index]; } } template<typename T> __global__ static void sum_kernel(const T* input_data, T* output_data, int dim_num, int outer_num, int inner_num) { int index = blockDim.x * blockIdx.x + threadIdx.x; int size = outer_num * inner_num; for (; index < size; index += blockDim.x * gridDim.x) { int n = index / inner_num; int s = index % inner_num; T sum = T(0.f); for (int k = 0; k < dim_num; k++) { sum += input_data[(n * dim_num + k) * inner_num + s]; } output_data[index] = sum; } } template<typename T> __global__ static void div_kernel(const T *input_data, const T* scale_data, T* output_data, int count, int dim_num, int outer_num, int inner_num, T epsilon) { int index = blockDim.x * blockIdx.x + threadIdx.x; for (; index < count; index += blockDim.x * gridDim.x) { int n = index / dim_num / inner_num; int s = index % inner_num; output_data[index] = input_data[index] / sqrt(scale_data[n * inner_num + s] + epsilon); } } template <typename T> class float_to { public: static T F(float a) { return T(a); } }; #ifdef TS_USE_CUDA_FP16 template <> class float_to<half> { public: static half F(float a) { return __float2half(a); } }; #endif template<typename T> void cpu_l2_norm_compute_run(const Tensor &x, int m_dim, float epsilon, Tensor &out, MemoryDevice& mem_device) { auto output_shape = out.sizes(); int pre_num = 1; for (int i = 0; i < m_dim; i++) { pre_num *= output_shape[i]; } int inner_num = 1; for (int i = m_dim + 1; i < output_shape.size(); i++) { inner_num *= output_shape[i]; } int axis = output_shape[m_dim]; const T *input_data = x.data<T>(); T *output_data = out.data<T>(); int count = out.count(); // memcpy(output_data, out.device(), count * sizeof(T), input_data, x.device(), count * sizeof(T)); int scale_data_size = out.count() / axis; Shape scale_shape; scale_shape.resize(1); scale_shape[0] = scale_data_size; Tensor scale_tensor(Tensor::InFlow::DEVICE, out.dtype(), scale_shape); T *scale_data = scale_tensor.data<T>(); dim3 block_size(CUDA_THREAD_NUM); dim3 square_kernel_grid_size((count + block_size.x - 1) / block_size.x); RUN_KERNEL(square_kernel<T>, square_kernel_grid_size, block_size, input_data, output_data, count); dim3 sum_kernel_grid_size((pre_num * inner_num + block_size.x - 1) / block_size.x); RUN_KERNEL(sum_kernel<T>, sum_kernel_grid_size, block_size, output_data, scale_data, axis, pre_num, inner_num); dim3 div_kernel_grid_size((count + block_size.x - 1) / block_size.x); RUN_KERNEL(div_kernel<T>, div_kernel_grid_size,block_size, input_data, scale_data, output_data, count,axis,pre_num,inner_num, float_to<T>::F(epsilon)); } void L2Norm::normalize(const Tensor &x, int dim, float epsilon, Tensor &out) { // Notice: the all tensor' memory device are CPU, as given in running_memory_device DTYPE dtype = out.dtype(); auto running_mem_device = running_memory_device(); switch (dtype) { #define DECLARE_COMPUTE_RUN(DTYPE, TYPE) \ case DTYPE: { cpu_l2_norm_compute_run<TYPE>(x, dim, epsilon, out, running_mem_device); break; } #ifdef TS_USE_CUDA_FP16 DECLARE_COMPUTE_RUN(FLOAT16, half); #endif DECLARE_COMPUTE_RUN(FLOAT32, float); DECLARE_COMPUTE_RUN(FLOAT64, double); #undef DECLARE_COMPUTE_RUN default: { TS_LOG_ERROR << this->op() << " not support data type(" << dtype << "): " << type_str(dtype) << eject; break; } } } } } using namespace ts; using namespace gpu; TS_REGISTER_OPERATOR(L2Norm, ts::GPU, name::layer::l2_norm()) #ifdef TS_USE_CUDA_FP16 TS_REGISTER_FP16_OPERATOR(L2Norm, ts::GPU, name::layer::l2_norm()) #endif
366f225ce62a61ec6e2b5844579657b081ee0993.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.5.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date January 2019 @author Mark Gates @author Azzam Haidar @author Ichitaro Yamazaki @precisions normal z -> s d c */ #include "magma_internal.h" #define BLK_X 64 #define BLK_Y 32 /******************************************************************************/ /* Divides matrix into ceil( m/BLK_X ) x ceil( n/BLK_Y ) blocks. Each block has BLK_X threads. Each thread loops across one row, updating BLK_Y entries. Code similar to zlaset, zlacpy, zlag2c, clag2z, zgeadd. */ static __device__ void zlacpy_sym_out_full_device( int m, int n, const magmaDoubleComplex *dA, int ldda, magmaDoubleComplex *dB, int lddb ) { int ind = blockIdx.x*BLK_X + threadIdx.x; int iby = blockIdx.y*BLK_Y; /* check if full block-column */ bool full = (iby + BLK_Y <= n); /* do only rows inside matrix */ if ( ind < m ) { dA += ind + iby*ldda; dB += ind + iby*lddb; if ( full ) { // full block-column #pragma unroll for( int j=0; j < BLK_Y; ++j ) { dB[j*lddb] = dA[j*ldda]; } } else { // partial block-column for( int j=0; j < BLK_Y && iby+j < n; ++j ) { dB[j*lddb] = dA[j*ldda]; } } } } /******************************************************************************/ /* Similar to zlacpy_full, but updates only the diagonal and below. Blocks that are fully above the diagonal exit immediately. Code similar to zlaset, zlacpy, zlat2c, clat2z. */ static __device__ void zlacpy_sym_out_lower_device( int m, int n, magma_int_t *rows, magma_int_t *perm, const magmaDoubleComplex *dA, int ldda, magmaDoubleComplex *dB, int lddb ) { int ind = blockIdx.x*BLK_X + threadIdx.x; // row int iby = blockIdx.y*BLK_Y; // col /* check if full block-column && (below diag) */ bool full = (iby + BLK_Y <= n); for (int jj=0; jj < n; jj++) { perm[rows[2*jj+1]] = rows[2*jj+1]; } /* do only rows inside matrix, and blocks not above diag */ if ( ind < m ) { if ( full ) { // full block-column, off-diagonal block #pragma unroll for( int jj=0; jj < BLK_Y; ++jj ) { int j = rows[2*(iby+jj)+1]; if (ind <= j) dB[j + ind*ldda] = MAGMA_Z_CONJ( dA[ind + (iby+jj)*lddb] ); else dB[ind + j*ldda] = dA[ind + (iby+jj)*lddb]; } } else { // either partial block-column or diagonal block for( int jj=0; jj < BLK_Y && iby+jj < n; ++jj ) { int j = rows[2*(iby+jj)+1]; if (ind <= j) dB[j + ind*ldda] = MAGMA_Z_CONJ( dA[ind + (iby+jj)*lddb] ); else dB[ind + j*ldda] = dA[ind + (iby+jj)*lddb]; } } } } /******************************************************************************/ /* Similar to zlacpy_full, but updates only the diagonal and above. Blocks that are fully below the diagonal exit immediately. Code similar to zlaset, zlacpy, zlat2c, clat2z. */ static __device__ void zlacpy_sym_out_upper_device( int m, int n, const magmaDoubleComplex *dA, int ldda, magmaDoubleComplex *dB, int lddb ) { int ind = blockIdx.x*BLK_X + threadIdx.x; int iby = blockIdx.y*BLK_Y; /* check if full block-column && (above diag) */ bool full = (iby + BLK_Y <= n && (ind + BLK_X <= iby)); /* do only rows inside matrix, and blocks not below diag */ if ( ind < m && ind < iby + BLK_Y ) { dA += ind + iby*ldda; dB += ind + iby*lddb; if ( full ) { // full block-column, off-diagonal block #pragma unroll for( int j=0; j < BLK_Y; ++j ) { dB[j*lddb] = dA[j*ldda]; } } else { // either partial block-column or diagonal block for( int j=0; j < BLK_Y && iby+j < n; ++j ) { if ( ind <= iby+j ) { dB[j*lddb] = dA[j*ldda]; } } } } } /******************************************************************************/ /* kernel wrappers to call the device functions. */ __global__ void zlacpy_sym_out_full_kernel( int m, int n, const magmaDoubleComplex *dA, int ldda, magmaDoubleComplex *dB, int lddb ) { zlacpy_sym_out_full_device(m, n, dA, ldda, dB, lddb); } __global__ void zlacpy_sym_out_lower_kernel( int m, int n, magma_int_t *rows, magma_int_t *perm, const magmaDoubleComplex *dA, int ldda, magmaDoubleComplex *dB, int lddb ) { zlacpy_sym_out_lower_device(m, n, rows, perm, dA, ldda, dB, lddb); } __global__ void zlacpy_sym_out_upper_kernel( int m, int n, const magmaDoubleComplex *dA, int ldda, magmaDoubleComplex *dB, int lddb ) { zlacpy_sym_out_upper_device(m, n, dA, ldda, dB, lddb); } /***************************************************************************//** Purpose ------- ZLACPY_SYM_OUT copies all or part of a two-dimensional matrix dA to another matrix dB. This is the same as ZLACPY, but adds queue argument. Arguments --------- @param[in] uplo magma_uplo_t Specifies the part of the matrix dA to be copied to dB. - = MagmaUpper: Upper triangular part - = MagmaLower: Lower triangular part - = MagmaFull: All of the matrix dA @param[in] m INTEGER The number of rows of the matrix dA. M >= 0. @param[in] n INTEGER The number of columns of the matrix dA. N >= 0. @param[in] rows INTEGER array, on GPU, dimension (2*n) On entry, it stores the new pivots such that rows[i]-th and rows[n+i]-th rows are swapped. @param[in,out] perm INTEGER array, on GPU, dimension (m) On entry, it stores the permutation array such that i-th row will be the original perm[i]-th row after the pivots are applied. On exit, it is restored to be identity permutation. @param[in,out] dA COMPLEX_16 array, dimension (LDDA,N) The M-by-N matrix dA. If UPLO = MagmaUpper, only the upper triangle or trapezoid is accessed; if UPLO = MagmaLower, only the lower triangle or trapezoid is accessed. On exit, the matrix after the symmetric pivoting is applied. @param[in] ldda INTEGER The leading dimension of the array dA. LDDA >= max(1,M). @param[in] dB COMPLEX_16 array, dimension (LDDB,N) The M-by-N matrix dB. On entry, dB stores the columns after row pivoting is applied. @param[in] lddb INTEGER The leading dimension of the array dB. LDDB >= max(1,M). @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_lacpy *******************************************************************************/ extern "C" void magmablas_zlacpy_sym_out( magma_uplo_t uplo, magma_int_t m, magma_int_t n, magma_int_t *rows, magma_int_t *perm, magmaDoubleComplex_const_ptr dA, magma_int_t ldda, magmaDoubleComplex_ptr dB, magma_int_t lddb, magma_queue_t queue ) { magma_int_t info = 0; if ( uplo != MagmaLower && uplo != MagmaUpper && uplo != MagmaFull ) info = -1; else if ( m < 0 ) info = -2; else if ( n < 0 ) info = -3; else if ( ldda < max(1,m)) info = -5; else if ( lddb < max(1,m)) info = -7; if ( info != 0 ) { magma_xerbla( __func__, -(info) ); return; //info; } if ( m == 0 || n == 0 ) { return; } dim3 threads( BLK_X, 1 ); dim3 grid( magma_ceildiv(m, BLK_X), magma_ceildiv(n, BLK_Y) ); if ( uplo == MagmaLower ) { hipLaunchKernelGGL(( zlacpy_sym_out_lower_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, n, rows, perm, dA, ldda, dB, lddb ); } else if ( uplo == MagmaUpper ) { hipLaunchKernelGGL(( zlacpy_sym_out_upper_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, n, dA, ldda, dB, lddb ); } else { hipLaunchKernelGGL(( zlacpy_sym_out_full_kernel) , dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, n, dA, ldda, dB, lddb ); } }
366f225ce62a61ec6e2b5844579657b081ee0993.cu
/* -- MAGMA (version 2.5.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date January 2019 @author Mark Gates @author Azzam Haidar @author Ichitaro Yamazaki @precisions normal z -> s d c */ #include "magma_internal.h" #define BLK_X 64 #define BLK_Y 32 /******************************************************************************/ /* Divides matrix into ceil( m/BLK_X ) x ceil( n/BLK_Y ) blocks. Each block has BLK_X threads. Each thread loops across one row, updating BLK_Y entries. Code similar to zlaset, zlacpy, zlag2c, clag2z, zgeadd. */ static __device__ void zlacpy_sym_out_full_device( int m, int n, const magmaDoubleComplex *dA, int ldda, magmaDoubleComplex *dB, int lddb ) { int ind = blockIdx.x*BLK_X + threadIdx.x; int iby = blockIdx.y*BLK_Y; /* check if full block-column */ bool full = (iby + BLK_Y <= n); /* do only rows inside matrix */ if ( ind < m ) { dA += ind + iby*ldda; dB += ind + iby*lddb; if ( full ) { // full block-column #pragma unroll for( int j=0; j < BLK_Y; ++j ) { dB[j*lddb] = dA[j*ldda]; } } else { // partial block-column for( int j=0; j < BLK_Y && iby+j < n; ++j ) { dB[j*lddb] = dA[j*ldda]; } } } } /******************************************************************************/ /* Similar to zlacpy_full, but updates only the diagonal and below. Blocks that are fully above the diagonal exit immediately. Code similar to zlaset, zlacpy, zlat2c, clat2z. */ static __device__ void zlacpy_sym_out_lower_device( int m, int n, magma_int_t *rows, magma_int_t *perm, const magmaDoubleComplex *dA, int ldda, magmaDoubleComplex *dB, int lddb ) { int ind = blockIdx.x*BLK_X + threadIdx.x; // row int iby = blockIdx.y*BLK_Y; // col /* check if full block-column && (below diag) */ bool full = (iby + BLK_Y <= n); for (int jj=0; jj < n; jj++) { perm[rows[2*jj+1]] = rows[2*jj+1]; } /* do only rows inside matrix, and blocks not above diag */ if ( ind < m ) { if ( full ) { // full block-column, off-diagonal block #pragma unroll for( int jj=0; jj < BLK_Y; ++jj ) { int j = rows[2*(iby+jj)+1]; if (ind <= j) dB[j + ind*ldda] = MAGMA_Z_CONJ( dA[ind + (iby+jj)*lddb] ); else dB[ind + j*ldda] = dA[ind + (iby+jj)*lddb]; } } else { // either partial block-column or diagonal block for( int jj=0; jj < BLK_Y && iby+jj < n; ++jj ) { int j = rows[2*(iby+jj)+1]; if (ind <= j) dB[j + ind*ldda] = MAGMA_Z_CONJ( dA[ind + (iby+jj)*lddb] ); else dB[ind + j*ldda] = dA[ind + (iby+jj)*lddb]; } } } } /******************************************************************************/ /* Similar to zlacpy_full, but updates only the diagonal and above. Blocks that are fully below the diagonal exit immediately. Code similar to zlaset, zlacpy, zlat2c, clat2z. */ static __device__ void zlacpy_sym_out_upper_device( int m, int n, const magmaDoubleComplex *dA, int ldda, magmaDoubleComplex *dB, int lddb ) { int ind = blockIdx.x*BLK_X + threadIdx.x; int iby = blockIdx.y*BLK_Y; /* check if full block-column && (above diag) */ bool full = (iby + BLK_Y <= n && (ind + BLK_X <= iby)); /* do only rows inside matrix, and blocks not below diag */ if ( ind < m && ind < iby + BLK_Y ) { dA += ind + iby*ldda; dB += ind + iby*lddb; if ( full ) { // full block-column, off-diagonal block #pragma unroll for( int j=0; j < BLK_Y; ++j ) { dB[j*lddb] = dA[j*ldda]; } } else { // either partial block-column or diagonal block for( int j=0; j < BLK_Y && iby+j < n; ++j ) { if ( ind <= iby+j ) { dB[j*lddb] = dA[j*ldda]; } } } } } /******************************************************************************/ /* kernel wrappers to call the device functions. */ __global__ void zlacpy_sym_out_full_kernel( int m, int n, const magmaDoubleComplex *dA, int ldda, magmaDoubleComplex *dB, int lddb ) { zlacpy_sym_out_full_device(m, n, dA, ldda, dB, lddb); } __global__ void zlacpy_sym_out_lower_kernel( int m, int n, magma_int_t *rows, magma_int_t *perm, const magmaDoubleComplex *dA, int ldda, magmaDoubleComplex *dB, int lddb ) { zlacpy_sym_out_lower_device(m, n, rows, perm, dA, ldda, dB, lddb); } __global__ void zlacpy_sym_out_upper_kernel( int m, int n, const magmaDoubleComplex *dA, int ldda, magmaDoubleComplex *dB, int lddb ) { zlacpy_sym_out_upper_device(m, n, dA, ldda, dB, lddb); } /***************************************************************************//** Purpose ------- ZLACPY_SYM_OUT copies all or part of a two-dimensional matrix dA to another matrix dB. This is the same as ZLACPY, but adds queue argument. Arguments --------- @param[in] uplo magma_uplo_t Specifies the part of the matrix dA to be copied to dB. - = MagmaUpper: Upper triangular part - = MagmaLower: Lower triangular part - = MagmaFull: All of the matrix dA @param[in] m INTEGER The number of rows of the matrix dA. M >= 0. @param[in] n INTEGER The number of columns of the matrix dA. N >= 0. @param[in] rows INTEGER array, on GPU, dimension (2*n) On entry, it stores the new pivots such that rows[i]-th and rows[n+i]-th rows are swapped. @param[in,out] perm INTEGER array, on GPU, dimension (m) On entry, it stores the permutation array such that i-th row will be the original perm[i]-th row after the pivots are applied. On exit, it is restored to be identity permutation. @param[in,out] dA COMPLEX_16 array, dimension (LDDA,N) The M-by-N matrix dA. If UPLO = MagmaUpper, only the upper triangle or trapezoid is accessed; if UPLO = MagmaLower, only the lower triangle or trapezoid is accessed. On exit, the matrix after the symmetric pivoting is applied. @param[in] ldda INTEGER The leading dimension of the array dA. LDDA >= max(1,M). @param[in] dB COMPLEX_16 array, dimension (LDDB,N) The M-by-N matrix dB. On entry, dB stores the columns after row pivoting is applied. @param[in] lddb INTEGER The leading dimension of the array dB. LDDB >= max(1,M). @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_lacpy *******************************************************************************/ extern "C" void magmablas_zlacpy_sym_out( magma_uplo_t uplo, magma_int_t m, magma_int_t n, magma_int_t *rows, magma_int_t *perm, magmaDoubleComplex_const_ptr dA, magma_int_t ldda, magmaDoubleComplex_ptr dB, magma_int_t lddb, magma_queue_t queue ) { magma_int_t info = 0; if ( uplo != MagmaLower && uplo != MagmaUpper && uplo != MagmaFull ) info = -1; else if ( m < 0 ) info = -2; else if ( n < 0 ) info = -3; else if ( ldda < max(1,m)) info = -5; else if ( lddb < max(1,m)) info = -7; if ( info != 0 ) { magma_xerbla( __func__, -(info) ); return; //info; } if ( m == 0 || n == 0 ) { return; } dim3 threads( BLK_X, 1 ); dim3 grid( magma_ceildiv(m, BLK_X), magma_ceildiv(n, BLK_Y) ); if ( uplo == MagmaLower ) { zlacpy_sym_out_lower_kernel<<< grid, threads, 0, queue->cuda_stream() >>> ( m, n, rows, perm, dA, ldda, dB, lddb ); } else if ( uplo == MagmaUpper ) { zlacpy_sym_out_upper_kernel<<< grid, threads, 0, queue->cuda_stream() >>> ( m, n, dA, ldda, dB, lddb ); } else { zlacpy_sym_out_full_kernel <<< grid, threads, 0, queue->cuda_stream() >>> ( m, n, dA, ldda, dB, lddb ); } }
189307398ab5e5078aa2e139fb1a3436c84f1678.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "VectorAddition.h" __global__ void VectorAddition(float* g_A, float* g_B, float* g_C, int Size); { const int idx = threadIdx.x + (blockIdx.x * blockDim.x); if (idx < Size) { g_C[idx] = g_A[idx] + g_B[idx]; } } __host__ void GPUAdditionHelper(float* h_A, float* h_B, float* h_C_GPU, const int nSize); { float* dev_A{}, * dev_B{}, * dev_C{}; // pointer pointing to address on GPU memory chrono::time_point<std::chrono::system_clock>start, end; hipError_t cudaStatus = hipMalloc((void**)&dev_A, VECTOR_SIZE_IN_BYTES); if (cudaStatus != hipSuccess) { cout << "dev_a: cudsMalloc Falied" << endl; goto Error; } cudaStatus = hipMalloc((void**)&dev_B, VECTOR_SIZE_IN_BYTES); if (cudaStatus != hipSuccess) { cout << "dev_b: cudsMalloc Falied" << endl; goto Error; } cudaStatus = hipMalloc((void**)&dev_C, VECTOR_SIZE_IN_BYTES); if (cudaStatus != hipSuccess) { cout << "dev_c: cudsMalloc Falied" << endl; goto Error; } //Copy data from the host to the device global memory cudaStatus = hipMemcpy(dev_A, h_A, VECTOR_SIZE_IN_BYTES, hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { cout << "h_A to dev_A: cudsMemcpy Falied" << endl; goto Error; } cudaStatus = hipMemcpy(dev_B, h_B, VECTOR_SIZE_IN_BYTES, hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { cout << "h_B to dev_B: cudsMemcpy Falied" << endl; goto Error; } //Prepare the thread and block configuration int thread_per_block = 1024; int blocks_per_grid = (int)ceil(SIZE / thread_per_block); cout << "Vector Size" << SIZE << endl; cout << "Vector size in memory (Bytes): " << (SIZE * sizeof(float) / 1e6) << endl; cout << "Threads/Block: " << thread_per_block << endl; cout << "Block/Grid" << blocks_per_grid << endl; //Launch the kernel start = std::chrono::system_clock::now(); VectorAddition << <blocks_per_grid, thread_per_block >> > (dev_A, dev_B, dev_C, nSize); cudaStatus = hipGetLastError(); if (cudaStatus != hipSuccess) { cout << "Vector addition falied with Error code: " << hipGetErrorString(cudaStatus) << endl; goto Error; } //wait for the kernal to finish cudaStatus = hipDeviceSynchronize(); if (cudaStatus != hipSuccess) { cout << "hipDeviceSynchronize Error with Error code: " << cudaStatus << endl; } end = std::chrono::system_clock::now(); std::chrono::duration<double>elapsed_seconds = end - start; cout << "GPU execution time: " << (elapsed_seconds.count() * 1000.0f) << " msecs " << endl; cudaStatus = hipMemcpy(h_C_GPU, dev_C, VECTOR_SIZE_IN_BYTES, hipMemcpyDeviceToHost); if (cudaStatus != hipSuccess) { cout << "h_C to h_C_GPU: hipMemcpy Falied" << endl; goto Error; } Error: hipFree(dev_A); hipFree(dev_B); hipFree(dev_C); }
189307398ab5e5078aa2e139fb1a3436c84f1678.cu
#include "VectorAddition.h" __global__ void VectorAddition(float* g_A, float* g_B, float* g_C, int Size); { const int idx = threadIdx.x + (blockIdx.x * blockDim.x); if (idx < Size) { g_C[idx] = g_A[idx] + g_B[idx]; } } __host__ void GPUAdditionHelper(float* h_A, float* h_B, float* h_C_GPU, const int nSize); { float* dev_A{}, * dev_B{}, * dev_C{}; // pointer pointing to address on GPU memory chrono::time_point<std::chrono::system_clock>start, end; cudaError_t cudaStatus = cudaMalloc((void**)&dev_A, VECTOR_SIZE_IN_BYTES); if (cudaStatus != cudaSuccess) { cout << "dev_a: cudsMalloc Falied" << endl; goto Error; } cudaStatus = cudaMalloc((void**)&dev_B, VECTOR_SIZE_IN_BYTES); if (cudaStatus != cudaSuccess) { cout << "dev_b: cudsMalloc Falied" << endl; goto Error; } cudaStatus = cudaMalloc((void**)&dev_C, VECTOR_SIZE_IN_BYTES); if (cudaStatus != cudaSuccess) { cout << "dev_c: cudsMalloc Falied" << endl; goto Error; } //Copy data from the host to the device global memory cudaStatus = cudaMemcpy(dev_A, h_A, VECTOR_SIZE_IN_BYTES, cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { cout << "h_A to dev_A: cudsMemcpy Falied" << endl; goto Error; } cudaStatus = cudaMemcpy(dev_B, h_B, VECTOR_SIZE_IN_BYTES, cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { cout << "h_B to dev_B: cudsMemcpy Falied" << endl; goto Error; } //Prepare the thread and block configuration int thread_per_block = 1024; int blocks_per_grid = (int)ceil(SIZE / thread_per_block); cout << "Vector Size" << SIZE << endl; cout << "Vector size in memory (Bytes): " << (SIZE * sizeof(float) / 1e6) << endl; cout << "Threads/Block: " << thread_per_block << endl; cout << "Block/Grid" << blocks_per_grid << endl; //Launch the kernel start = std::chrono::system_clock::now(); VectorAddition << <blocks_per_grid, thread_per_block >> > (dev_A, dev_B, dev_C, nSize); cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { cout << "Vector addition falied with Error code: " << cudaGetErrorString(cudaStatus) << endl; goto Error; } //wait for the kernal to finish cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { cout << "cudaDeviceSynchronize Error with Error code: " << cudaStatus << endl; } end = std::chrono::system_clock::now(); std::chrono::duration<double>elapsed_seconds = end - start; cout << "GPU execution time: " << (elapsed_seconds.count() * 1000.0f) << " msecs " << endl; cudaStatus = cudaMemcpy(h_C_GPU, dev_C, VECTOR_SIZE_IN_BYTES, cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { cout << "h_C to h_C_GPU: cudaMemcpy Falied" << endl; goto Error; } Error: cudaFree(dev_A); cudaFree(dev_B); cudaFree(dev_C); }
6921a32b4e48160deabc2679a95996494924be19.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.6.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date November 2014 @generated from zgeadd_batched.cu normal z -> d, Sat Nov 15 19:53:59 2014 @author Mark Gates */ #include "common_magma.h" #define NB 64 /* ===================================================================== Batches dlacpy of multiple arrays; y-dimension of grid is different arrays, x-dimension of grid is blocks for each array. Matrix is m x n, and is divided into block rows, each NB x n. Each CUDA block has NB threads to handle one block row. Each thread adds one row, iterating across all columns. The bottom block of rows may be partially outside the matrix; if so, rows outside the matrix (i >= m) are disabled. TODO. Block in both directions, for large matrices. E.g., each block does 64x64 tile, instead of 64xN tile. */ __global__ void dgeadd_batched_kernel( int m, int n, double alpha, const double * const *dAarray, int ldda, double **dBarray, int lddb ) { // dA and dB iterate across row i const double *dA = dAarray[ blockIdx.y ]; double *dB = dBarray[ blockIdx.y ]; int i = blockIdx.x*blockDim.x + threadIdx.x; if ( i < m ) { dA += i; dB += i; const double *dAend = dA + n*ldda; while( dA < dAend ) { *dB = alpha*(*dA) + (*dB); dA += ldda; dB += lddb; } } } /* ===================================================================== */ /** Purpose ------- ZGEADD adds two sets of matrices, dAarray[i] = alpha*dAarray[i] + dBarray[i], for i = 0, ..., batchCount-1. Arguments --------- @param[in] m INTEGER The number of rows of each matrix dAarray[i]. M >= 0. @param[in] n INTEGER The number of columns of each matrix dAarray[i]. N >= 0. @param[in] alpha DOUBLE_PRECISION The scalar alpha. @param[in] dAarray array on GPU, dimension(batchCount), of pointers to arrays, with each array a DOUBLE_PRECISION array, dimension (LDDA,N) The m by n matrices dAarray[i]. @param[in] ldda INTEGER The leading dimension of each array dAarray[i]. LDDA >= max(1,M). @param[in,out] dBarray array on GPU, dimension(batchCount), of pointers to arrays, with each array a DOUBLE_PRECISION array, dimension (LDDB,N) The m by n matrices dBarray[i]. @param[in] lddb INTEGER The leading dimension of each array dBarray[i]. LDDB >= max(1,M). @param[in] batchCount INTEGER The number of matrices to add; length of dAarray and dBarray. batchCount >= 0. @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_daux2 ********************************************************************/ extern "C" void magmablas_dgeadd_batched_q( magma_int_t m, magma_int_t n, double alpha, magmaDouble_const_ptr const dAarray[], magma_int_t ldda, magmaDouble_ptr dBarray[], magma_int_t lddb, magma_int_t batchCount, magma_queue_t queue ) { magma_int_t info = 0; if ( m < 0 ) info = -1; else if ( n < 0 ) info = -2; else if ( ldda < max(1,m)) info = -5; else if ( lddb < max(1,m)) info = -7; else if ( batchCount < 0 ) info = -8; if ( info != 0 ) { magma_xerbla( __func__, -(info) ); return; } if ( m == 0 || n == 0 || batchCount == 0 ) return; dim3 threads( NB ); dim3 grid( (m + NB - 1)/NB, batchCount ); hipLaunchKernelGGL(( dgeadd_batched_kernel), dim3(grid), dim3(threads), 0, queue , m, n, alpha, dAarray, ldda, dBarray, lddb ); } /** @see magmablas_dgeadd_batched_q @ingroup magma_daux2 ********************************************************************/ extern "C" void magmablas_dgeadd_batched( magma_int_t m, magma_int_t n, double alpha, magmaDouble_const_ptr const dAarray[], magma_int_t ldda, magmaDouble_ptr dBarray[], magma_int_t lddb, magma_int_t batchCount ) { magmablas_dgeadd_batched_q( m, n, alpha, dAarray, ldda, dBarray, lddb, batchCount, magma_stream ); }
6921a32b4e48160deabc2679a95996494924be19.cu
/* -- MAGMA (version 1.6.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date November 2014 @generated from zgeadd_batched.cu normal z -> d, Sat Nov 15 19:53:59 2014 @author Mark Gates */ #include "common_magma.h" #define NB 64 /* ===================================================================== Batches dlacpy of multiple arrays; y-dimension of grid is different arrays, x-dimension of grid is blocks for each array. Matrix is m x n, and is divided into block rows, each NB x n. Each CUDA block has NB threads to handle one block row. Each thread adds one row, iterating across all columns. The bottom block of rows may be partially outside the matrix; if so, rows outside the matrix (i >= m) are disabled. TODO. Block in both directions, for large matrices. E.g., each block does 64x64 tile, instead of 64xN tile. */ __global__ void dgeadd_batched_kernel( int m, int n, double alpha, const double * const *dAarray, int ldda, double **dBarray, int lddb ) { // dA and dB iterate across row i const double *dA = dAarray[ blockIdx.y ]; double *dB = dBarray[ blockIdx.y ]; int i = blockIdx.x*blockDim.x + threadIdx.x; if ( i < m ) { dA += i; dB += i; const double *dAend = dA + n*ldda; while( dA < dAend ) { *dB = alpha*(*dA) + (*dB); dA += ldda; dB += lddb; } } } /* ===================================================================== */ /** Purpose ------- ZGEADD adds two sets of matrices, dAarray[i] = alpha*dAarray[i] + dBarray[i], for i = 0, ..., batchCount-1. Arguments --------- @param[in] m INTEGER The number of rows of each matrix dAarray[i]. M >= 0. @param[in] n INTEGER The number of columns of each matrix dAarray[i]. N >= 0. @param[in] alpha DOUBLE_PRECISION The scalar alpha. @param[in] dAarray array on GPU, dimension(batchCount), of pointers to arrays, with each array a DOUBLE_PRECISION array, dimension (LDDA,N) The m by n matrices dAarray[i]. @param[in] ldda INTEGER The leading dimension of each array dAarray[i]. LDDA >= max(1,M). @param[in,out] dBarray array on GPU, dimension(batchCount), of pointers to arrays, with each array a DOUBLE_PRECISION array, dimension (LDDB,N) The m by n matrices dBarray[i]. @param[in] lddb INTEGER The leading dimension of each array dBarray[i]. LDDB >= max(1,M). @param[in] batchCount INTEGER The number of matrices to add; length of dAarray and dBarray. batchCount >= 0. @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_daux2 ********************************************************************/ extern "C" void magmablas_dgeadd_batched_q( magma_int_t m, magma_int_t n, double alpha, magmaDouble_const_ptr const dAarray[], magma_int_t ldda, magmaDouble_ptr dBarray[], magma_int_t lddb, magma_int_t batchCount, magma_queue_t queue ) { magma_int_t info = 0; if ( m < 0 ) info = -1; else if ( n < 0 ) info = -2; else if ( ldda < max(1,m)) info = -5; else if ( lddb < max(1,m)) info = -7; else if ( batchCount < 0 ) info = -8; if ( info != 0 ) { magma_xerbla( __func__, -(info) ); return; } if ( m == 0 || n == 0 || batchCount == 0 ) return; dim3 threads( NB ); dim3 grid( (m + NB - 1)/NB, batchCount ); dgeadd_batched_kernel<<< grid, threads, 0, queue >>>( m, n, alpha, dAarray, ldda, dBarray, lddb ); } /** @see magmablas_dgeadd_batched_q @ingroup magma_daux2 ********************************************************************/ extern "C" void magmablas_dgeadd_batched( magma_int_t m, magma_int_t n, double alpha, magmaDouble_const_ptr const dAarray[], magma_int_t ldda, magmaDouble_ptr dBarray[], magma_int_t lddb, magma_int_t batchCount ) { magmablas_dgeadd_batched_q( m, n, alpha, dAarray, ldda, dBarray, lddb, batchCount, magma_stream ); }
9695575ecb6b787f7468a65fc4236bd3d8eda152.hip
// !!! This is a file automatically generated by hipify!!! #include <cudatbx/cuda_utilities.cuh> namespace cudatbx { int number_of_gpus() { int device_count = 0; cudaSafeCall( hipGetDeviceCount(&device_count) ); return device_count; } void reset_gpu(const int& gpu_id) { cudaSafeCall( hipSetDevice(gpu_id) ); cudaSafeCall( hipDeviceReset() ); } int calculate_padded_size(const int& size, const int& padding) { int padded_size = int(::floor(size/padding + 1.0)) * padding; return padded_size; } int calculate_blocks_per_grid(const int& size, const int& threads_per_block) { int blocks_per_grid = (size + threads_per_block - 1)/threads_per_block; return blocks_per_grid; } /* ========================================================================== Basic timer for CUDA using events, one stream only, no checks Usage: cuda_timer t; t.start(); < run CUDA stuff > t.stop(); float elapsed_time = t.get_elapsed_time(); std::cout << elapsed_time << "\n"; */ cudatbx::cuda_timer::cuda_timer() { hipEventCreate(&start_event); hipEventCreate(&stop_event); } cudatbx::cuda_timer::~cuda_timer() { hipEventDestroy(start_event); hipEventDestroy(stop_event); } void cudatbx::cuda_timer::start() { hipEventRecord(start_event); } void cudatbx::cuda_timer::stop() { hipEventRecord(stop_event); hipEventSynchronize(stop_event); hipEventElapsedTime(&elapsed_time, start_event, stop_event); } float cudatbx::cuda_timer::get_elapsed_time() { return elapsed_time; } // ========================================================================== }
9695575ecb6b787f7468a65fc4236bd3d8eda152.cu
#include <cudatbx/cuda_utilities.cuh> namespace cudatbx { int number_of_gpus() { int device_count = 0; cudaSafeCall( cudaGetDeviceCount(&device_count) ); return device_count; } void reset_gpu(const int& gpu_id) { cudaSafeCall( cudaSetDevice(gpu_id) ); cudaSafeCall( cudaDeviceReset() ); } int calculate_padded_size(const int& size, const int& padding) { int padded_size = int(std::floor(size/padding + 1.0)) * padding; return padded_size; } int calculate_blocks_per_grid(const int& size, const int& threads_per_block) { int blocks_per_grid = (size + threads_per_block - 1)/threads_per_block; return blocks_per_grid; } /* ========================================================================== Basic timer for CUDA using events, one stream only, no checks Usage: cuda_timer t; t.start(); < run CUDA stuff > t.stop(); float elapsed_time = t.get_elapsed_time(); std::cout << elapsed_time << "\n"; */ cudatbx::cuda_timer::cuda_timer() { cudaEventCreate(&start_event); cudaEventCreate(&stop_event); } cudatbx::cuda_timer::~cuda_timer() { cudaEventDestroy(start_event); cudaEventDestroy(stop_event); } void cudatbx::cuda_timer::start() { cudaEventRecord(start_event); } void cudatbx::cuda_timer::stop() { cudaEventRecord(stop_event); cudaEventSynchronize(stop_event); cudaEventElapsedTime(&elapsed_time, start_event, stop_event); } float cudatbx::cuda_timer::get_elapsed_time() { return elapsed_time; } // ========================================================================== }
9fad92505ddf1e88231bbbd08dbf8a189d6ddf74.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ extern "C" { #define MPEG_LUMA_MIN (16) #define MPEG_CHROMA_MIN (16) #define MPEG_LUMA_MAX (235) #define MPEG_CHROMA_MAX (240) #define JPEG_LUMA_MIN (0) #define JPEG_CHROMA_MIN (1) #define JPEG_LUMA_MAX (255) #define JPEG_CHROMA_MAX (255) __device__ int mpeg_min[] = {MPEG_LUMA_MIN, MPEG_CHROMA_MIN}; __device__ int mpeg_max[] = {MPEG_LUMA_MAX, MPEG_CHROMA_MAX}; __device__ int jpeg_min[] = {JPEG_LUMA_MIN, JPEG_CHROMA_MIN}; __device__ int jpeg_max[] = {JPEG_LUMA_MAX, JPEG_CHROMA_MAX}; __device__ int clamp(int val, int min, int max) { if (val < min) return min; else if (val > max) return max; else return val; } __global__ void to_jpeg_cuda(const unsigned char* src, unsigned char* dst, int pitch, int comp_id) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; int src_, dst_; // 8 bit -> 15 bit for better precision src_ = static_cast<int>(src[x + y * pitch]) << 7; // Conversion dst_ = comp_id ? (min(src_, 30775) * 4663 - 9289992) >> 12 // chroma : (min(src_, 30189) * 19077 - 39057361) >> 14; // luma // Dither replacement dst_ = dst_ + 64; // Back to 8 bit dst_ = clamp(dst_ >> 7, jpeg_min[comp_id], jpeg_max[comp_id]); dst[x + y * pitch] = static_cast<unsigned char>(dst_); } __global__ void to_mpeg_cuda(const unsigned char* src, unsigned char* dst, int pitch, int comp_id) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; int src_, dst_; // 8 bit -> 15 bit for better precision src_ = static_cast<int>(src[x + y * pitch]) << 7; // Conversion dst_ = comp_id ? (src_ * 1799 + 4081085) >> 11 // chroma : (src_ * 14071 + 33561947) >> 14; // luma // Dither replacement dst_ = dst_ + 64; // Back to 8 bit dst_ = clamp(dst_ >> 7, mpeg_min[comp_id], mpeg_max[comp_id]); dst[x + y * pitch] = static_cast<unsigned char>(dst_); } }
9fad92505ddf1e88231bbbd08dbf8a189d6ddf74.cu
/* * Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ extern "C" { #define MPEG_LUMA_MIN (16) #define MPEG_CHROMA_MIN (16) #define MPEG_LUMA_MAX (235) #define MPEG_CHROMA_MAX (240) #define JPEG_LUMA_MIN (0) #define JPEG_CHROMA_MIN (1) #define JPEG_LUMA_MAX (255) #define JPEG_CHROMA_MAX (255) __device__ int mpeg_min[] = {MPEG_LUMA_MIN, MPEG_CHROMA_MIN}; __device__ int mpeg_max[] = {MPEG_LUMA_MAX, MPEG_CHROMA_MAX}; __device__ int jpeg_min[] = {JPEG_LUMA_MIN, JPEG_CHROMA_MIN}; __device__ int jpeg_max[] = {JPEG_LUMA_MAX, JPEG_CHROMA_MAX}; __device__ int clamp(int val, int min, int max) { if (val < min) return min; else if (val > max) return max; else return val; } __global__ void to_jpeg_cuda(const unsigned char* src, unsigned char* dst, int pitch, int comp_id) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; int src_, dst_; // 8 bit -> 15 bit for better precision src_ = static_cast<int>(src[x + y * pitch]) << 7; // Conversion dst_ = comp_id ? (min(src_, 30775) * 4663 - 9289992) >> 12 // chroma : (min(src_, 30189) * 19077 - 39057361) >> 14; // luma // Dither replacement dst_ = dst_ + 64; // Back to 8 bit dst_ = clamp(dst_ >> 7, jpeg_min[comp_id], jpeg_max[comp_id]); dst[x + y * pitch] = static_cast<unsigned char>(dst_); } __global__ void to_mpeg_cuda(const unsigned char* src, unsigned char* dst, int pitch, int comp_id) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; int src_, dst_; // 8 bit -> 15 bit for better precision src_ = static_cast<int>(src[x + y * pitch]) << 7; // Conversion dst_ = comp_id ? (src_ * 1799 + 4081085) >> 11 // chroma : (src_ * 14071 + 33561947) >> 14; // luma // Dither replacement dst_ = dst_ + 64; // Back to 8 bit dst_ = clamp(dst_ >> 7, mpeg_min[comp_id], mpeg_max[comp_id]); dst[x + y * pitch] = static_cast<unsigned char>(dst_); } }
7a6e37d65cb1b170cf30d9fbf0311d328e246aca.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" <% unless type_name == 'robject' %> __global__ void <%="cumo_#{c_iter}_stride_kernel"%>(char *p1, char *p2, CUMO_BIT_DIGIT *a3, size_t p3, ssize_t s1, ssize_t s2, ssize_t s3, uint64_t n) { for (uint64_t i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) { dtype x = *(dtype*)(p1+(i*s1)); dtype y = *(dtype*)(p2+(i*s2)); CUMO_BIT_DIGIT b = (m_<%=name%>(x,y)) ? 1:0; CUMO_STORE_BIT(a3,p3+(i*s3),b); } } void <%="cumo_#{c_iter}_stride_kernel_launch"%>(char *p1, char *p2, CUMO_BIT_DIGIT *a3, size_t p3, ssize_t s1, ssize_t s2, ssize_t s3, uint64_t n) { size_t grid_dim = cumo_get_grid_dim(n); size_t block_dim =hipLaunchKernelGGL(( cumo_get_block_dim(n); <%="cumo_#{c_iter}_stride_kernel"%>), dim3(grid_dim), dim3(block_dim), 0, 0, p1,p2,a3,p3,s1,s2,s3,n); } <% end %>
7a6e37d65cb1b170cf30d9fbf0311d328e246aca.cu
<% unless type_name == 'robject' %> __global__ void <%="cumo_#{c_iter}_stride_kernel"%>(char *p1, char *p2, CUMO_BIT_DIGIT *a3, size_t p3, ssize_t s1, ssize_t s2, ssize_t s3, uint64_t n) { for (uint64_t i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) { dtype x = *(dtype*)(p1+(i*s1)); dtype y = *(dtype*)(p2+(i*s2)); CUMO_BIT_DIGIT b = (m_<%=name%>(x,y)) ? 1:0; CUMO_STORE_BIT(a3,p3+(i*s3),b); } } void <%="cumo_#{c_iter}_stride_kernel_launch"%>(char *p1, char *p2, CUMO_BIT_DIGIT *a3, size_t p3, ssize_t s1, ssize_t s2, ssize_t s3, uint64_t n) { size_t grid_dim = cumo_get_grid_dim(n); size_t block_dim = cumo_get_block_dim(n); <%="cumo_#{c_iter}_stride_kernel"%><<<grid_dim, block_dim>>>(p1,p2,a3,p3,s1,s2,s3,n); } <% end %>
6ae3cc9251196ef1ce0fcbc49d82562fcb6c1948.hip
// !!! This is a file automatically generated by hipify!!! /* * ARQUITECTURA DE COMPUTADORES * 2 Grado en Ingenieria Informatica * * PRACTICA 0: "Hola Mundo" * >> Comprobacion de la instalacion de CUDA * * AUTOR: APELLIDO APELLIDO Nombre */ /////////////////////////////////////////////////////////////////////////// // includes #include <stdio.h> #include <stdlib.h> #include <time.h> #include <hip/hip_runtime.h> /////////////////////////////////////////////////////////////////////////// // defines /////////////////////////////////////////////////////////////////////////// // declaracion de funciones // DEVICE: funcion llamada desde el device y ejecutada en el device // GLOBAL: funcion llamada desde el host y ejecutada en el device (kernel) // HOST: funcion llamada desde el host y ejecutada en el host void suma(int *a, int *b, int *sumatorio); /////////////////////////////////////////////////////////////////////////// // MAIN: rutina principal ejecutada en el host int main(int argc, char** argv) { // cuerpo del programa int *a; int *b; int *sumatorio; suma(a, b, sumatorio); // salida del programa time_t fecha; time(&fecha); printf("\n***************************************************\n"); printf("Programa ejecutado el dia: %s\n", ctime(&fecha)); printf("<pulsa INTRO para finalizar>"); // Esto es necesario para que el IDE no cierre la consola de salida getchar(); return 0; } /////////////////////////////////////////////////////////////////////////// void suma(int *a, int *b, int *sumatorio){ int n; printf("Dime la longitud del array: "); scanf("%d", &n); getchar(); a=(int *)malloc(n*sizeof(int)); b=(int *)malloc(n*sizeof(int)); sumatorio=(int *)malloc(n*sizeof(int)); for (int k=0 ; k < n; k++) { a[k] = k+1; b[k] = k+2; } for(int i = 0; i < n; i++){ sumatorio[i] = a[i] + b[i]; } for (int i=0 ; i < n; i++) { printf("\n\nLa suma de %i y %i es: %i",a[i],b[i],sumatorio[i]); } }
6ae3cc9251196ef1ce0fcbc49d82562fcb6c1948.cu
/* * ARQUITECTURA DE COMPUTADORES * 2º Grado en Ingenieria Informatica * * PRACTICA 0: "Hola Mundo" * >> Comprobacion de la instalacion de CUDA * * AUTOR: APELLIDO APELLIDO Nombre */ /////////////////////////////////////////////////////////////////////////// // includes #include <stdio.h> #include <stdlib.h> #include <time.h> #include <cuda_runtime.h> /////////////////////////////////////////////////////////////////////////// // defines /////////////////////////////////////////////////////////////////////////// // declaracion de funciones // DEVICE: funcion llamada desde el device y ejecutada en el device // GLOBAL: funcion llamada desde el host y ejecutada en el device (kernel) // HOST: funcion llamada desde el host y ejecutada en el host void suma(int *a, int *b, int *sumatorio); /////////////////////////////////////////////////////////////////////////// // MAIN: rutina principal ejecutada en el host int main(int argc, char** argv) { // cuerpo del programa int *a; int *b; int *sumatorio; suma(a, b, sumatorio); // salida del programa time_t fecha; time(&fecha); printf("\n***************************************************\n"); printf("Programa ejecutado el dia: %s\n", ctime(&fecha)); printf("<pulsa INTRO para finalizar>"); // Esto es necesario para que el IDE no cierre la consola de salida getchar(); return 0; } /////////////////////////////////////////////////////////////////////////// void suma(int *a, int *b, int *sumatorio){ int n; printf("Dime la longitud del array: "); scanf("%d", &n); getchar(); a=(int *)malloc(n*sizeof(int)); b=(int *)malloc(n*sizeof(int)); sumatorio=(int *)malloc(n*sizeof(int)); for (int k=0 ; k < n; k++) { a[k] = k+1; b[k] = k+2; } for(int i = 0; i < n; i++){ sumatorio[i] = a[i] + b[i]; } for (int i=0 ; i < n; i++) { printf("\n\nLa suma de %i y %i es: %i",a[i],b[i],sumatorio[i]); } }
eff68139b4f5eccd52ab5b8b5013b4638aca5843.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime_api.h> #include <cuda_gl_interop.h> #include <assert.h> #include <sys/time.h> #define Tix 32 #define MAX(x, y) (((x) > (y)) ? (x) : (y)) __global__ void GaussJordan_gpu(float *Aaug, float *subpivot, int N, int iter) { int c = iter + blockIdx.x * Tix*Tix; int r = blockIdx.y; float scale; int ti = threadIdx.x; scale = Aaug[r*2*N+iter]; __shared__ float col[Tix*Tix]; __shared__ float colj[Tix*Tix]; if (r != iter){ if (c + ti < 2*N){ col[ti] = Aaug[iter*2*N+c+ti]; colj[ti] = Aaug[r*2*N+c+ti]; colj[ti] -= scale*col[ti]; Aaug[r*2*N+c+ti] = colj[ti]; } if (blockIdx.x == 0){ if (ti == 0){ subpivot[r] = Aaug[r*2*N+iter+1]; } } } } __global__ void update_row_gpu(float *Aaug, float *subpivot, int N, int iter, int use) { int c = iter + blockIdx.x * Tix*Tix; int ti = threadIdx.x; if (c + ti < 2*N){ Aaug[iter*2*N+c+ti] += Aaug[use*2*N+c+ti]; } if (blockIdx.x == 0){ if (ti == 0){ subpivot[iter] = Aaug[iter+iter*2*N]; } } } __global__ void scale_row_gpu(float *Aaug, float *subpivot, int N, int iter) { int c = iter + blockIdx.x * Tix*Tix; int ti = threadIdx.x; if (c + ti < 2*N){ Aaug[iter*2*N+c+ti] = Aaug[iter*2*N+c+ti]/subpivot[iter]; } } int main(int argc, char *argv[]){ float *Aaug, *Aaug_cu, *subpivot, *subpivot_cu; int iter, m, i, j, N, use; FILE * f; // Checks to see valid number of inputs given if (argc > 4 || argc < 4){ printf("Error: expected 3 input (A .mtx file, Ainv .mtx file, N), given %d\n", argc-1); exit(-1); } N = strtol(argv[3],NULL,10); // Checks to see if a valid .mtx file was given int memSize = 2*N*N*sizeof(float); Aaug = (float *)malloc(memSize); subpivot = (float *)malloc(N*sizeof(float)); f = fopen(argv[1], "rb"); for (i=0; i<N; i++){ for (j=0; j<N; j++){ fscanf(f, "%f", &Aaug[2*N*i+j]); if (i==j){ Aaug[2*N*i+N+j] = 1; } else{ Aaug[2*N*i+N+j] = 0; } } subpivot[i] = Aaug[i*2*N]; } fclose(f); hipMalloc((void**)&Aaug_cu, memSize); hipMalloc((void**)&subpivot_cu, N*sizeof(float)); hipMemcpy(Aaug_cu, Aaug, memSize, hipMemcpyHostToDevice); hipMemcpy(subpivot_cu, subpivot, N*sizeof(float), hipMemcpyHostToDevice); // Runs GPU Code int bn, rn; dim3 nblocks, nthreads, nblocks_1, nthreads_1; nthreads.x = Tix*Tix; nthreads.y = 1; nthreads_1.x = Tix*Tix; nthreads_1.y = 1; hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start); for (iter=0;iter<N; iter++){ bn = MAX((2*N-iter)/(Tix*Tix),1); // Defines number of subdivisions in the row rn = N; // Defines how many rows to update nblocks.x = bn; nblocks.y = rn; nblocks_1.x = bn; nblocks_1.y = 1; if (sqrt(subpivot[iter]*subpivot[iter])<.00000000000001){ // checks for invertability for (m=1; m+iter<N; m++){ // loops through lower rows for nonzero in pivot if (sqrt(subpivot[iter+m]*subpivot[iter+m])>.000000000000001){ // checks if nonzero pivot use = m+iter; goto update; // exits if pivot found } else if(m==N-1){ printf("Error matrix not invertible \n"); // if no pivot found, not inverable exit(-1); } } printf("Error matrix not invertible \n"); // if at the last pivot and zero, not invertable exit(-1); update:hipLaunchKernelGGL(( update_row_gpu), dim3(nblocks_1), dim3(nthreads_1), 0, 0, Aaug_cu, subpivot_cu, N, iter, use); hipDeviceSynchronize(); } hipLaunchKernelGGL(( scale_row_gpu), dim3(nblocks_1), dim3(nthreads_1), 0, 0, Aaug_cu, subpivot_cu, N, iter); hipDeviceSynchronize(); if(iter<N){ // Won't perform reduction if iter = N (at the bottom) hipLaunchKernelGGL(( GaussJordan_gpu), dim3(nblocks), dim3(nthreads), 0, 0, Aaug_cu, subpivot_cu, N, iter); hipDeviceSynchronize(); hipMemcpy(subpivot, subpivot_cu, N*sizeof(float), hipMemcpyDeviceToHost); } } hipEventRecord(stop); hipEventSynchronize(stop); float milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); printf ("gpu time: %f ms\n", milliseconds); hipMemcpy(Aaug, Aaug_cu, memSize, hipMemcpyDeviceToHost); // Writes output to file FILE *of2 = fopen(argv[2], "wb"); for (i=0; i<N; i++){ for(j=0; j<N; j++){ fprintf(of2, "%f ", Aaug[N+2*N*i+j]); } fprintf(of2, "\n"); } fclose(of2); }
eff68139b4f5eccd52ab5b8b5013b4638aca5843.cu
#include <stdio.h> #include <cuda.h> #include <cuda_runtime_api.h> #include <cuda_gl_interop.h> #include <assert.h> #include <sys/time.h> #define Tix 32 #define MAX(x, y) (((x) > (y)) ? (x) : (y)) __global__ void GaussJordan_gpu(float *Aaug, float *subpivot, int N, int iter) { int c = iter + blockIdx.x * Tix*Tix; int r = blockIdx.y; float scale; int ti = threadIdx.x; scale = Aaug[r*2*N+iter]; __shared__ float col[Tix*Tix]; __shared__ float colj[Tix*Tix]; if (r != iter){ if (c + ti < 2*N){ col[ti] = Aaug[iter*2*N+c+ti]; colj[ti] = Aaug[r*2*N+c+ti]; colj[ti] -= scale*col[ti]; Aaug[r*2*N+c+ti] = colj[ti]; } if (blockIdx.x == 0){ if (ti == 0){ subpivot[r] = Aaug[r*2*N+iter+1]; } } } } __global__ void update_row_gpu(float *Aaug, float *subpivot, int N, int iter, int use) { int c = iter + blockIdx.x * Tix*Tix; int ti = threadIdx.x; if (c + ti < 2*N){ Aaug[iter*2*N+c+ti] += Aaug[use*2*N+c+ti]; } if (blockIdx.x == 0){ if (ti == 0){ subpivot[iter] = Aaug[iter+iter*2*N]; } } } __global__ void scale_row_gpu(float *Aaug, float *subpivot, int N, int iter) { int c = iter + blockIdx.x * Tix*Tix; int ti = threadIdx.x; if (c + ti < 2*N){ Aaug[iter*2*N+c+ti] = Aaug[iter*2*N+c+ti]/subpivot[iter]; } } int main(int argc, char *argv[]){ float *Aaug, *Aaug_cu, *subpivot, *subpivot_cu; int iter, m, i, j, N, use; FILE * f; // Checks to see valid number of inputs given if (argc > 4 || argc < 4){ printf("Error: expected 3 input (A .mtx file, Ainv .mtx file, N), given %d\n", argc-1); exit(-1); } N = strtol(argv[3],NULL,10); // Checks to see if a valid .mtx file was given int memSize = 2*N*N*sizeof(float); Aaug = (float *)malloc(memSize); subpivot = (float *)malloc(N*sizeof(float)); f = fopen(argv[1], "rb"); for (i=0; i<N; i++){ for (j=0; j<N; j++){ fscanf(f, "%f", &Aaug[2*N*i+j]); if (i==j){ Aaug[2*N*i+N+j] = 1; } else{ Aaug[2*N*i+N+j] = 0; } } subpivot[i] = Aaug[i*2*N]; } fclose(f); cudaMalloc((void**)&Aaug_cu, memSize); cudaMalloc((void**)&subpivot_cu, N*sizeof(float)); cudaMemcpy(Aaug_cu, Aaug, memSize, cudaMemcpyHostToDevice); cudaMemcpy(subpivot_cu, subpivot, N*sizeof(float), cudaMemcpyHostToDevice); // Runs GPU Code int bn, rn; dim3 nblocks, nthreads, nblocks_1, nthreads_1; nthreads.x = Tix*Tix; nthreads.y = 1; nthreads_1.x = Tix*Tix; nthreads_1.y = 1; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); for (iter=0;iter<N; iter++){ bn = MAX((2*N-iter)/(Tix*Tix),1); // Defines number of subdivisions in the row rn = N; // Defines how many rows to update nblocks.x = bn; nblocks.y = rn; nblocks_1.x = bn; nblocks_1.y = 1; if (sqrt(subpivot[iter]*subpivot[iter])<.00000000000001){ // checks for invertability for (m=1; m+iter<N; m++){ // loops through lower rows for nonzero in pivot if (sqrt(subpivot[iter+m]*subpivot[iter+m])>.000000000000001){ // checks if nonzero pivot use = m+iter; goto update; // exits if pivot found } else if(m==N-1){ printf("Error matrix not invertible \n"); // if no pivot found, not inverable exit(-1); } } printf("Error matrix not invertible \n"); // if at the last pivot and zero, not invertable exit(-1); update: update_row_gpu<<<nblocks_1, nthreads_1>>>(Aaug_cu, subpivot_cu, N, iter, use); cudaDeviceSynchronize(); } scale_row_gpu<<<nblocks_1, nthreads_1>>>(Aaug_cu, subpivot_cu, N, iter); cudaDeviceSynchronize(); if(iter<N){ // Won't perform reduction if iter = N (at the bottom) GaussJordan_gpu<<<nblocks, nthreads>>>(Aaug_cu, subpivot_cu, N, iter); cudaDeviceSynchronize(); cudaMemcpy(subpivot, subpivot_cu, N*sizeof(float), cudaMemcpyDeviceToHost); } } cudaEventRecord(stop); cudaEventSynchronize(stop); float milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); printf ("gpu time: %f ms\n", milliseconds); cudaMemcpy(Aaug, Aaug_cu, memSize, cudaMemcpyDeviceToHost); // Writes output to file FILE *of2 = fopen(argv[2], "wb"); for (i=0; i<N; i++){ for(j=0; j<N; j++){ fprintf(of2, "%f ", Aaug[N+2*N*i+j]); } fprintf(of2, "\n"); } fclose(of2); }
3b92dd665f3f2db236540b302629614e3fc7b27a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * ldcp_decoder.h * ldpc3 * * Created by legal on 02/04/11. * Copyright 2011 ENSEIRB. All rights reserved. * */ /*----------------------------------------------------------------------------*/ #include "ADMM_GPU_decoder_16b.h" #include "../gpu/ADMM_GPU_16b.h" #if 0 #include "../codes/Constantes_4000x2000.h" #else #include "./admm/admm_2640x1320.h" #endif ADMM_GPU_decoder_16b::ADMM_GPU_decoder_16b( int _frames ) { hipError_t Status; frames = _frames; VNs_per_frame = NOEUD; CNs_per_frame = PARITE; MSGs_per_frame = MESSAGES; VNs_per_load = frames * VNs_per_frame; CNs_per_load = frames * CNs_per_frame; MSGs_per_load = frames * MSGs_per_frame; // LLRs entrant dans le decodeur CUDA_MALLOC_HOST (&h_iLLR, VNs_per_load); CUDA_MALLOC_DEVICE(&d_iLLR, VNs_per_load); // LLRs interne au decodeur CUDA_MALLOC_DEVICE(&d_oLLR, VNs_per_load); // LLRs (decision dure) sortant du le decodeur CUDA_MALLOC_HOST (&h_hDecision, VNs_per_load); CUDA_MALLOC_DEVICE(&d_hDecision, VNs_per_load); // Le tableau fournissant le degree des noeuds VNs CUDA_MALLOC_DEVICE(&d_degVNs, VNs_per_frame); // Status = hipMemcpy(d_degVNs, t_degVN, nb_Node * sizeof(unsigned int), hipMemcpyHostToDevice); // ERROR_CHECK(Status, (char*)__FILE__, __LINE__); // Le tableau fournissant le degree des noeuds CNs CUDA_MALLOC_DEVICE(&d_degCNs, CNs_per_frame); // Status = hipMemcpy(d_degCNs, t_degCN, nb_Check * sizeof(unsigned int), hipMemcpyHostToDevice); // ERROR_CHECK(Status, (char*)__FILE__, __LINE__); #if 0 CUDA_MALLOC_DEVICE(&d_t_row, nb_Msg); Status = hipMemcpy(d_t_row, t_row, nb_Msg * sizeof(unsigned int), hipMemcpyHostToDevice); ERROR_CHECK(Status, (char*)__FILE__, __LINE__); #else CUDA_MALLOC_DEVICE(&d_t_row, MSGs_per_frame); Status = hipMemcpy(d_t_row, t_row_pad_4, MSGs_per_frame * sizeof(unsigned int), hipMemcpyHostToDevice); ERROR_CHECK(Status, (char*)__FILE__, __LINE__); #endif #if 1 CUDA_MALLOC_DEVICE(&d_t_col, MSGs_per_frame); unsigned short* t_col_m = new unsigned short[MSGs_per_frame]; for(int i=0; i<MSGs_per_frame; i++) t_col_m[i] = t_col[i]; Status = hipMemcpy(d_t_col, (int*)t_col_m, MSGs_per_frame * sizeof(unsigned int), hipMemcpyHostToDevice); ERROR_CHECK(Status, (char*)__FILE__, __LINE__); delete t_col_m; #else CUDA_MALLOC_DEVICE(&d_t_col, MSGs_per_frame); Status = hipMemcpy(d_t_col, (int*)t_col, MSGs_per_frame * sizeof(unsigned int), hipMemcpyHostToDevice); ERROR_CHECK(Status, (char*)__FILE__, __LINE__); #endif // Espace memoire pour l'change de messages dans le decodeur CUDA_MALLOC_DEVICE(&LZr, 2 * MSGs_per_load); // exit( 0 ); } ADMM_GPU_decoder_16b::~ADMM_GPU_decoder_16b() { hipError_t Status; Status = hipHostFree(h_iLLR); ERROR_CHECK(Status, (char*)__FILE__, __LINE__); Status = hipFree(d_iLLR); ERROR_CHECK(Status, (char*)__FILE__, __LINE__); Status = hipFree(d_oLLR); ERROR_CHECK(Status, (char*)__FILE__, __LINE__); Status = hipHostFree(h_hDecision); ERROR_CHECK(Status, (char*)__FILE__, __LINE__); Status = hipFree(d_hDecision); ERROR_CHECK(Status, (char*)__FILE__, __LINE__); Status = hipFree(d_degCNs); ERROR_CHECK(Status, (char*)__FILE__, __LINE__); Status = hipFree(d_degVNs); ERROR_CHECK(Status, (char*)__FILE__, __LINE__); Status = hipFree(d_t_row); ERROR_CHECK(Status, (char*)__FILE__, __LINE__); Status = hipFree(d_t_col); ERROR_CHECK(Status, (char*)__FILE__, __LINE__); Status = hipFree(LZr); ERROR_CHECK(Status, (char*)__FILE__, __LINE__); } void ADMM_GPU_decoder_16b::decode(float* llrs, int* bits, int nb_iters) { // #define CHECK_ERRORS hipError_t Status; int threadsPerBlock = 128; int blocksPerGridNode = (VNs_per_load + threadsPerBlock - 1) / threadsPerBlock; int blocksPerGridCheck = (CNs_per_load + threadsPerBlock - 1) / threadsPerBlock; int blocksPerGridMsgs = (MSGs_per_load + threadsPerBlock - 1) / threadsPerBlock; /* On copie les donnees d'entree du decodeur */ hipMemcpyAsync(d_iLLR, llrs, VNs_per_load * sizeof(float), hipMemcpyHostToDevice); /* INITIALISATION DU DECODEUR LDPC SUR GPU */ hipLaunchKernelGGL(( ADMM_InitArrays_16b), dim3(blocksPerGridMsgs), dim3(threadsPerBlock), 0, 0, LZr, MSGs_per_load); #ifdef CHECK_ERRORS ERROR_CHECK(hipGetLastError( ), __FILE__, __LINE__); #endif hipLaunchKernelGGL(( ADMM_ScaleLLRs), dim3(blocksPerGridNode), dim3(threadsPerBlock), 0, 0, d_iLLR, VNs_per_load); #ifdef CHECK_ERRORS ERROR_CHECK(hipGetLastError( ), __FILE__, __LINE__); #endif // LANCEMENT DU PROCESSUS DE DECODAGE SUR n ITERATIONS for(int k = 0; k < 200; k++) { hipLaunchKernelGGL(( ADMM_VN_kernel_deg3_16b), dim3(blocksPerGridNode), dim3(threadsPerBlock), 0, 0, d_iLLR, d_oLLR, LZr, d_t_row, VNs_per_load); #ifdef CHECK_ERRORS ERROR_CHECK(hipGetLastError( ), __FILE__, __LINE__); #endif hipLaunchKernelGGL(( ADMM_CN_kernel_deg6_16b), dim3(blocksPerGridCheck), dim3(threadsPerBlock), 0, 0, d_oLLR, LZr, d_t_col, d_hDecision, CNs_per_load); #ifdef CHECK_ERRORS ERROR_CHECK(hipGetLastError( ), __FILE__, __LINE__); #endif // GESTION DU CRITERE D'ARRET DES CODEWORDS if( (k > 10) && (k%2 == 0) ) { hipLaunchKernelGGL(( reduce), dim3(blocksPerGridCheck), dim3(threadsPerBlock), 0, 0, d_hDecision, CNs_per_load); #ifdef CHECK_ERRORS ERROR_CHECK(hipGetLastError( ), __FILE__, __LINE__); #endif Status = hipMemcpy(h_hDecision, d_hDecision, blocksPerGridCheck * sizeof(int), hipMemcpyDeviceToHost); #ifdef CHECK_ERRORS ERROR_CHECK(Status, __FILE__, __LINE__); #endif int sum = 0; for(int p=0; p<blocksPerGridCheck; p++) sum += h_hDecision[p]; if( sum == 0 ) break; } } // LANCEMENT DU PROCESSUS DE DECODAGE SUR n ITERATIONS hipLaunchKernelGGL(( ADMM_HardDecision), dim3(blocksPerGridNode), dim3(threadsPerBlock), 0, 0, d_oLLR, d_hDecision, VNs_per_load); #ifdef CHECK_ERRORS ERROR_CHECK(hipGetLastError(), __FILE__, __LINE__); #endif // LANCEMENT DU PROCESSUS DE DECODAGE SUR n ITERATIONS Status = hipMemcpy(bits, d_hDecision, VNs_per_load * sizeof(int), hipMemcpyDeviceToHost); #ifdef CHECK_ERRORS ERROR_CHECK(Status, __FILE__, __LINE__); #endif }
3b92dd665f3f2db236540b302629614e3fc7b27a.cu
/* * ldcp_decoder.h * ldpc3 * * Created by legal on 02/04/11. * Copyright 2011 ENSEIRB. All rights reserved. * */ /*----------------------------------------------------------------------------*/ #include "ADMM_GPU_decoder_16b.h" #include "../gpu/ADMM_GPU_16b.h" #if 0 #include "../codes/Constantes_4000x2000.h" #else #include "./admm/admm_2640x1320.h" #endif ADMM_GPU_decoder_16b::ADMM_GPU_decoder_16b( int _frames ) { cudaError_t Status; frames = _frames; VNs_per_frame = NOEUD; CNs_per_frame = PARITE; MSGs_per_frame = MESSAGES; VNs_per_load = frames * VNs_per_frame; CNs_per_load = frames * CNs_per_frame; MSGs_per_load = frames * MSGs_per_frame; // LLRs entrant dans le decodeur CUDA_MALLOC_HOST (&h_iLLR, VNs_per_load); CUDA_MALLOC_DEVICE(&d_iLLR, VNs_per_load); // LLRs interne au decodeur CUDA_MALLOC_DEVICE(&d_oLLR, VNs_per_load); // LLRs (decision dure) sortant du le decodeur CUDA_MALLOC_HOST (&h_hDecision, VNs_per_load); CUDA_MALLOC_DEVICE(&d_hDecision, VNs_per_load); // Le tableau fournissant le degree des noeuds VNs CUDA_MALLOC_DEVICE(&d_degVNs, VNs_per_frame); // Status = cudaMemcpy(d_degVNs, t_degVN, nb_Node * sizeof(unsigned int), cudaMemcpyHostToDevice); // ERROR_CHECK(Status, (char*)__FILE__, __LINE__); // Le tableau fournissant le degree des noeuds CNs CUDA_MALLOC_DEVICE(&d_degCNs, CNs_per_frame); // Status = cudaMemcpy(d_degCNs, t_degCN, nb_Check * sizeof(unsigned int), cudaMemcpyHostToDevice); // ERROR_CHECK(Status, (char*)__FILE__, __LINE__); #if 0 CUDA_MALLOC_DEVICE(&d_t_row, nb_Msg); Status = cudaMemcpy(d_t_row, t_row, nb_Msg * sizeof(unsigned int), cudaMemcpyHostToDevice); ERROR_CHECK(Status, (char*)__FILE__, __LINE__); #else CUDA_MALLOC_DEVICE(&d_t_row, MSGs_per_frame); Status = cudaMemcpy(d_t_row, t_row_pad_4, MSGs_per_frame * sizeof(unsigned int), cudaMemcpyHostToDevice); ERROR_CHECK(Status, (char*)__FILE__, __LINE__); #endif #if 1 CUDA_MALLOC_DEVICE(&d_t_col, MSGs_per_frame); unsigned short* t_col_m = new unsigned short[MSGs_per_frame]; for(int i=0; i<MSGs_per_frame; i++) t_col_m[i] = t_col[i]; Status = cudaMemcpy(d_t_col, (int*)t_col_m, MSGs_per_frame * sizeof(unsigned int), cudaMemcpyHostToDevice); ERROR_CHECK(Status, (char*)__FILE__, __LINE__); delete t_col_m; #else CUDA_MALLOC_DEVICE(&d_t_col, MSGs_per_frame); Status = cudaMemcpy(d_t_col, (int*)t_col, MSGs_per_frame * sizeof(unsigned int), cudaMemcpyHostToDevice); ERROR_CHECK(Status, (char*)__FILE__, __LINE__); #endif // Espace memoire pour l'échange de messages dans le decodeur CUDA_MALLOC_DEVICE(&LZr, 2 * MSGs_per_load); // exit( 0 ); } ADMM_GPU_decoder_16b::~ADMM_GPU_decoder_16b() { cudaError_t Status; Status = cudaFreeHost(h_iLLR); ERROR_CHECK(Status, (char*)__FILE__, __LINE__); Status = cudaFree(d_iLLR); ERROR_CHECK(Status, (char*)__FILE__, __LINE__); Status = cudaFree(d_oLLR); ERROR_CHECK(Status, (char*)__FILE__, __LINE__); Status = cudaFreeHost(h_hDecision); ERROR_CHECK(Status, (char*)__FILE__, __LINE__); Status = cudaFree(d_hDecision); ERROR_CHECK(Status, (char*)__FILE__, __LINE__); Status = cudaFree(d_degCNs); ERROR_CHECK(Status, (char*)__FILE__, __LINE__); Status = cudaFree(d_degVNs); ERROR_CHECK(Status, (char*)__FILE__, __LINE__); Status = cudaFree(d_t_row); ERROR_CHECK(Status, (char*)__FILE__, __LINE__); Status = cudaFree(d_t_col); ERROR_CHECK(Status, (char*)__FILE__, __LINE__); Status = cudaFree(LZr); ERROR_CHECK(Status, (char*)__FILE__, __LINE__); } void ADMM_GPU_decoder_16b::decode(float* llrs, int* bits, int nb_iters) { // #define CHECK_ERRORS cudaError_t Status; int threadsPerBlock = 128; int blocksPerGridNode = (VNs_per_load + threadsPerBlock - 1) / threadsPerBlock; int blocksPerGridCheck = (CNs_per_load + threadsPerBlock - 1) / threadsPerBlock; int blocksPerGridMsgs = (MSGs_per_load + threadsPerBlock - 1) / threadsPerBlock; /* On copie les donnees d'entree du decodeur */ cudaMemcpyAsync(d_iLLR, llrs, VNs_per_load * sizeof(float), cudaMemcpyHostToDevice); /* INITIALISATION DU DECODEUR LDPC SUR GPU */ ADMM_InitArrays_16b<<<blocksPerGridMsgs, threadsPerBlock>>>(LZr, MSGs_per_load); #ifdef CHECK_ERRORS ERROR_CHECK(cudaGetLastError( ), __FILE__, __LINE__); #endif ADMM_ScaleLLRs<<<blocksPerGridNode, threadsPerBlock>>>(d_iLLR, VNs_per_load); #ifdef CHECK_ERRORS ERROR_CHECK(cudaGetLastError( ), __FILE__, __LINE__); #endif // LANCEMENT DU PROCESSUS DE DECODAGE SUR n ITERATIONS for(int k = 0; k < 200; k++) { ADMM_VN_kernel_deg3_16b<<<blocksPerGridNode, threadsPerBlock>>> (d_iLLR, d_oLLR, LZr, d_t_row, VNs_per_load); #ifdef CHECK_ERRORS ERROR_CHECK(cudaGetLastError( ), __FILE__, __LINE__); #endif ADMM_CN_kernel_deg6_16b<<<blocksPerGridCheck, threadsPerBlock>>> (d_oLLR, LZr, d_t_col, d_hDecision, CNs_per_load); #ifdef CHECK_ERRORS ERROR_CHECK(cudaGetLastError( ), __FILE__, __LINE__); #endif // GESTION DU CRITERE D'ARRET DES CODEWORDS if( (k > 10) && (k%2 == 0) ) { reduce<<<blocksPerGridCheck, threadsPerBlock>>>(d_hDecision, CNs_per_load); #ifdef CHECK_ERRORS ERROR_CHECK(cudaGetLastError( ), __FILE__, __LINE__); #endif Status = cudaMemcpy(h_hDecision, d_hDecision, blocksPerGridCheck * sizeof(int), cudaMemcpyDeviceToHost); #ifdef CHECK_ERRORS ERROR_CHECK(Status, __FILE__, __LINE__); #endif int sum = 0; for(int p=0; p<blocksPerGridCheck; p++) sum += h_hDecision[p]; if( sum == 0 ) break; } } // LANCEMENT DU PROCESSUS DE DECODAGE SUR n ITERATIONS ADMM_HardDecision<<<blocksPerGridNode, threadsPerBlock>>>(d_oLLR, d_hDecision, VNs_per_load); #ifdef CHECK_ERRORS ERROR_CHECK(cudaGetLastError(), __FILE__, __LINE__); #endif // LANCEMENT DU PROCESSUS DE DECODAGE SUR n ITERATIONS Status = cudaMemcpy(bits, d_hDecision, VNs_per_load * sizeof(int), cudaMemcpyDeviceToHost); #ifdef CHECK_ERRORS ERROR_CHECK(Status, __FILE__, __LINE__); #endif }
385bc155a4818559a2702d531ab8052e0c9e5a39.hip
// !!! This is a file automatically generated by hipify!!! // includes, system #include <stdlib.h> #include <iostream> #include <stdio.h> #include <string.h> #include <math.h> #include <sys/time.h> // includes CUDA #include <hip/hip_runtime.h> // includes, project // #include <helper_cuda.h> // #include <helper_functions.h> // helper functions for SDK examples #include "MatUtil.h" #include "floyd.h" using namespace std; unsigned long long time_diff(const struct timeval& tv1, const struct timeval& tv2) { return (tv2.tv_sec-tv1.tv_sec)*1000000 + tv2.tv_usec-tv1.tv_usec; } int main(int argc, char **argv) { if (argc != 2) { return -1; } size_t size_mat = atoi(argv[1]); size_t num_node = size_mat * size_mat; cout << "N = " << size_mat << endl; int *mat = (int*)malloc(sizeof(int) * num_node); int *ans = (int*)malloc(sizeof(int) * num_node); GenMatrix(mat, size_mat); memcpy(ans, mat, sizeof(int) * num_node); struct timeval start_time, end_time; gettimeofday(&start_time, NULL); ST_APSP(ans, size_mat); gettimeofday(&end_time, NULL); int sequential_time = time_diff(start_time, end_time); gettimeofday(&start_time, NULL); PL_APSP(mat, size_mat); gettimeofday(&end_time, NULL); int parallel_time = time_diff(start_time, end_time); cout << "Sequential time: " << sequential_time << " us" << endl; cout << " Parallel time: " << parallel_time << " us" << endl; cout << " Speedup: " << (1.0 * sequential_time / parallel_time) << endl; if (CmpArray(mat, ans, num_node)) { cout << "Parallel answer is correct!" << endl; } else { cout << "Parallel answer is wrong!" << endl; } }
385bc155a4818559a2702d531ab8052e0c9e5a39.cu
// includes, system #include <stdlib.h> #include <iostream> #include <stdio.h> #include <string.h> #include <math.h> #include <sys/time.h> // includes CUDA #include <cuda_runtime.h> // includes, project // #include <helper_cuda.h> // #include <helper_functions.h> // helper functions for SDK examples #include "MatUtil.h" #include "floyd.h" using namespace std; unsigned long long time_diff(const struct timeval& tv1, const struct timeval& tv2) { return (tv2.tv_sec-tv1.tv_sec)*1000000 + tv2.tv_usec-tv1.tv_usec; } int main(int argc, char **argv) { if (argc != 2) { return -1; } size_t size_mat = atoi(argv[1]); size_t num_node = size_mat * size_mat; cout << "N = " << size_mat << endl; int *mat = (int*)malloc(sizeof(int) * num_node); int *ans = (int*)malloc(sizeof(int) * num_node); GenMatrix(mat, size_mat); memcpy(ans, mat, sizeof(int) * num_node); struct timeval start_time, end_time; gettimeofday(&start_time, NULL); ST_APSP(ans, size_mat); gettimeofday(&end_time, NULL); int sequential_time = time_diff(start_time, end_time); gettimeofday(&start_time, NULL); PL_APSP(mat, size_mat); gettimeofday(&end_time, NULL); int parallel_time = time_diff(start_time, end_time); cout << "Sequential time: " << sequential_time << " us" << endl; cout << " Parallel time: " << parallel_time << " us" << endl; cout << " Speedup: " << (1.0 * sequential_time / parallel_time) << endl; if (CmpArray(mat, ans, num_node)) { cout << "Parallel answer is correct!" << endl; } else { cout << "Parallel answer is wrong!" << endl; } }
06e9794d39e029d8ab353ed2571f96abac189242.hip
// !!! This is a file automatically generated by hipify!!! // TODO : test this: //#define CUSP_USE_TEXTURE_MEMORY // THIS IS adapted from verbose_monitor.cu // PROVIDED BY THE CUSP v0.1 EXAMPLES #include "grids/grid_reader.h" #include "rbffd/rbffd.h" #include "timer_eb.h" #include <cusp/hyb_matrix.h> #include <cusp/ell_matrix.h> #include <cusp/csr_matrix.h> #include <cusp/coo_matrix.h> #include <cusp/monitor.h> #include <cusp/krylov/cg.h> #include <cusp/krylov/gmres.h> #include <cusp/gallery/poisson.h> #include <cusp/print.h> #include <cusp/array2d.h> #include <cusp/multiply.h> #include <cusp/blas.h> #include <cusp/io/matrix_market.h> #include <cusp/precond/diagonal.h> #include <cusp/precond/ainv.h> #include <cusp/precond/smoothed_aggregation.h> #include <cusp/precond/aggregate.h> #include <cusp/precond/smooth.h> #include <cusp/precond/strength.h> #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/generate.h> #include "utils/spherical_harmonics.h" #include <iomanip> #include <iostream> #include <sstream> #include <map> #include <fstream> #include <typeinfo> using namespace std; typedef std::vector< std::map< unsigned int, double> > STL_MAT_t; typedef std::vector<double> STL_VEC_t; typedef cusp::array1d<double, cusp::host_memory> HOST_VEC_t; typedef cusp::array1d<double, cusp::device_memory> DEVICE_VEC_t; typedef cusp::csr_matrix<unsigned int, double, cusp::host_memory> HOST_MAT_t; typedef cusp::csr_matrix<unsigned int, double, cusp::device_memory> DEVICE_MAT_t; EB::TimerList timers; //--------------------------------- // Perform GMRES on GPU void GMRES_Device(DEVICE_MAT_t& A, DEVICE_VEC_t& F, DEVICE_VEC_t& U_exact, DEVICE_VEC_t& U_approx_gpu) { #if 1 size_t restart = 300; int max_iters = 1000; double rel_tol = 1e-6; #else // Maximum number of iterations (total) size_t max_iters = 500; // restart the process every "restart" iterations size_t restart = 200; double rel_tol = 1e-8; #endif try { // cusp::convergence_monitor<double> monitor( F, max_iters, 0, 1e-3); cusp::default_monitor<double> monitor( F, max_iters, rel_tol ); //, max_iters, rel_tol);// , 1e-3); //cusp::default_monitor<double> monitor( F, -1, rel_tol ); //, max_iters, rel_tol);// , 1e-3); std::cout << "GMRES Starting Residual Norm: " << monitor.residual_norm() << std::endl; // 1e-8, 10000, 300); int precondType = -1; switch (precondType) { case 0: { // Jacobi Preconditioning (DIAGONAL) // Probably wont work well for RBF-FD since we're not diagonally dominant cusp::precond::diagonal<double, cusp::device_memory> M(A); cusp::krylov::gmres(A, U_approx_gpu, F, restart, monitor, M); } break; case 1: { // Smoothed Aggregation (Algebraic MultiGrid. Works for Nonsym?) cusp::precond::smoothed_aggregation<int, double, cusp::device_memory> M(A); cusp::krylov::gmres(A, U_approx_gpu, F, restart, monitor, M); } break; #if 0 // ONLY SPD MATRICES case 0: // AINV using static dropping cusp::precond::scaled_bridson_ainv<double, cusp::device_memory> M(A, 0, 10); cusp::krylov::gmres(A, U_approx_gpu, F, restart, monitor, M); break; #endif #if 0 // ONLY SPD MATRICES case 1: // AINV using standard drop tolerance cusp::precond::scaled_bridson_ainv<double, cusp::device_memory> M(A, .1); cusp::krylov::gmres(A, U_approx_gpu, F, restart, monitor, M); break; #endif #if 0 // ONLY FOR SPD MATRICES case 2: // AINV using novel cusp dropping strategy (TODO: lookup) cusp::precond::bridson_ainv<double, cusp::device_memory> M(A, 0, -1, true, 2); cusp::krylov::gmres(A, U_approx_gpu, F, restart, monitor, M); #endif case 2: { // AINV using novel cusp dropping strategy // assumes that sparsity pattern of precond is same as A, plus // 2 extra nonzeros per row // VERY SLOW TO BUILD; DOES NOT CONVERGE cusp::precond::nonsym_bridson_ainv<double, cusp::device_memory> M(A, 0, -1, true, 2); cusp::krylov::gmres(A, U_approx_gpu, F, restart, monitor, M); } case 3: { // AINV using novel cusp dropping strategy // Assume 40 nonzeros per row, drop everthing else. // VERY SLOW TO BUILD; DOES NOT CONVERGE cusp::precond::nonsym_bridson_ainv<double, cusp::device_memory> M(A, 0.1, 10, false, 0); cusp::krylov::gmres(A, U_approx_gpu, F, restart, monitor, M); } default: // Solve unpreconditioned Au = F cusp::krylov::gmres(A, U_approx_gpu, F, restart, monitor); } hipDeviceSynchronize(); // monitor.print(); if (monitor.converged()) { std::cout << "\n[+++] Solver converged to " << monitor.relative_tolerance() << " relative tolerance"; std::cout << " after " << monitor.iteration_count() << " iterations" << std::endl << std::endl; } else { std::cout << "\n[XXX] Solver reached iteration limit " << monitor.iteration_limit() << " before converging"; std::cout << " to " << monitor.relative_tolerance() << " relative tolerance " << std::endl << std::endl; } std::cout << "GMRES Iterations: " << monitor.iteration_count() << std::endl; std::cout << "GMRES Iteration Limit: " << monitor.iteration_limit() << std::endl; std::cout << "GMRES Residual Norm: " << monitor.residual_norm() << std::endl; std::cout << "GMRES Relative Tol: " << monitor.relative_tolerance() << std::endl; std::cout << "GMRES Absolute Tol: " << monitor.absolute_tolerance() << std::endl; std::cout << "GMRES Target Residual (Abs + Rel*norm(F)): " << monitor.tolerance() << std::endl; } catch(std::bad_alloc &e) { std::cerr << "Ran out of memory trying to compute GMRES: " << e.what() << std::endl; exit(-1); } catch(thrust::system_error &e) { std::cerr << "Some other error happened during GMRES: " << e.what() << std::endl; exit(-1); } try { typedef cusp::array1d<double, DEVICE_VEC_t>::view DEVICE_VEC_VIEW_t; DEVICE_VEC_VIEW_t U_approx_view(U_exact.begin()+(U_exact.size() - F.size()), U_exact.end()); DEVICE_VEC_t diff(U_approx_gpu); //cusp::blas::axpy(U_exact.begin()+(U_exact.size() - F.size()), U_exact.end(), diff.begin(), -1); cusp::blas::axpy(U_approx_view, diff, -1); std::cout << "Rel l1 Norm: " << cusp::blas::nrm1(diff) / cusp::blas::nrm1(U_exact) << std::endl; std::cout << "Rel l2 Norm: " << cusp::blas::nrm2(diff) / cusp::blas::nrm2(U_exact) << std::endl; std::cout << "Rel linf Norm: " << cusp::blas::nrmmax(diff) / cusp::blas::nrmmax(U_exact) << std::endl; } catch(std::bad_alloc &e) { std::cerr << "Ran out of memory trying to compute Error Norms: " << e.what() << std::endl; exit(-1); } catch(thrust::system_error &e) { std::cerr << "Some other error happened during Error Norms: " << e.what() << std::endl; exit(-1); } } //--------------------------------- void assemble_System_Stokes( RBFFD& der, Grid& grid, HOST_MAT_t& A, HOST_VEC_t& F, HOST_VEC_t& U_exact){ double eta = 1.; //double Ra = 1.e6; // We have different nb_stencils and nb_nodes when we parallelize. The subblocks might not be full unsigned int nb_stencils = grid.getStencilsSize(); unsigned int nb_nodes = grid.getNodeListSize(); unsigned int max_stencil_size = grid.getMaxStencilSize(); unsigned int N = nb_nodes; // --------------------------------------------------- //------------- Fill the RHS of the System ------------- // This is our manufactured solution: SphericalHarmonic::Sph32 UU; SphericalHarmonic::Sph32105 VV; SphericalHarmonic::Sph32 WW; SphericalHarmonic::Sph32 PP; std::vector<NodeType>& nodes = grid.getNodeList(); //------------- Fill F ------------- // U for (unsigned int j = 0; j < N; j++) { unsigned int row_ind = j + 0*N; NodeType& node = nodes[j]; double Xx = node.x(); double Yy = node.y(); double Zz = node.z(); U_exact[row_ind] = UU.eval(Xx,Yy,Zz); F[row_ind] = -UU.lapl(Xx,Yy,Zz) + PP.d_dx(Xx,Yy,Zz); } #if 1 // V for (unsigned int j = 0; j < N; j++) { unsigned int row_ind = j + 1*N; NodeType& node = nodes[j]; double Xx = node.x(); double Yy = node.y(); double Zz = node.z(); //double rr = sqrt(node.x()*node.x() + node.y()*node.y() + node.z()*node.z()); //double dir = node.y(); // F[row_ind] = (Ra * Temperature(j) * dir) / rr; U_exact[row_ind] = VV.eval(Xx,Yy,Zz); F[row_ind] = -VV.lapl(Xx,Yy,Zz) + PP.d_dy(Xx,Yy,Zz); } // W for (unsigned int j = 0; j < N; j++) { unsigned int row_ind = j + 2*N; NodeType& node = nodes[j]; double Xx = node.x(); double Yy = node.y(); double Zz = node.z(); U_exact[row_ind] = WW.eval(Xx,Yy,Zz); F[row_ind] = -WW.lapl(Xx,Yy,Zz) + PP.d_dz(Xx,Yy,Zz); } // P for (unsigned int j = 0; j < N; j++) { unsigned int row_ind = j + 3*N; NodeType& node = nodes[j]; double Xx = node.x(); double Yy = node.y(); double Zz = node.z(); U_exact[row_ind] = PP.eval(Xx,Yy,Zz); F[row_ind] = UU.d_dx(Xx,Yy,Zz) + VV.d_dy(Xx,Yy,Zz) + WW.d_dz(Xx,Yy,Zz); } #endif // Sum of U F[4*N+0] = 0.; // Sum of V F[4*N+1] = 0.; // Sum of W F[4*N+2] = 0.; // Sum of P F[4*N+3] = 0.; unsigned int ind = 0; // ----------------- Fill LHS -------------------- // // U (block) row for (unsigned int i = 0; i < nb_stencils; i++) { StencilType& st = grid.getStencil(i); // TODO: change these to *SFC weights (when computed) double* ddx = der.getStencilWeights(RBFFD::XSFC, i); double* lapl = der.getStencilWeights(RBFFD::LSFC, i); unsigned int diag_row_ind = i + 0*N; A.row_offsets[diag_row_ind] = ind; for (unsigned int j = 0; j < st.size(); j++) { unsigned int diag_col_ind = st[j] + 0*N; A.column_indices[ind] = diag_col_ind; A.values[ind] = -eta * lapl[j]; ind++; } for (unsigned int j = 0; j < st.size(); j++) { unsigned int diag_col_ind = st[j] + 3*N; A.column_indices[ind] = diag_col_ind; A.values[ind] = ddx[j]; ind++; } // Added constraint to square mat and close nullspace A.column_indices[ind] = 4*N+0; A.values[ind] = 1; ind++; } // V (block) row for (unsigned int i = 0; i < nb_stencils; i++) { StencilType& st = grid.getStencil(i); // TODO: change these to *SFC weights (when computed) double* ddy = der.getStencilWeights(RBFFD::YSFC, i); double* lapl = der.getStencilWeights(RBFFD::LSFC, i); unsigned int diag_row_ind = i + 1*N; A.row_offsets[diag_row_ind] = ind; for (unsigned int j = 0; j < st.size(); j++) { unsigned int diag_col_ind = st[j] + 1*N; A.column_indices[ind] = diag_col_ind; A.values[ind] = -eta * lapl[j]; ind++; } for (unsigned int j = 0; j < st.size(); j++) { unsigned int diag_col_ind = st[j] + 3*N; A.column_indices[ind] = diag_col_ind; A.values[ind] = ddy[j]; ind++; } // Added constraint to square mat and close nullspace A.column_indices[ind] = 4*N+1; A.values[ind] = 1; ind++; } // W (block) row for (unsigned int i = 0; i < nb_stencils; i++) { StencilType& st = grid.getStencil(i); // TODO: change these to *SFC weights (when computed) double* ddz = der.getStencilWeights(RBFFD::ZSFC, i); double* lapl = der.getStencilWeights(RBFFD::LSFC, i); unsigned int diag_row_ind = i + 2*N; A.row_offsets[diag_row_ind] = ind; for (unsigned int j = 0; j < st.size(); j++) { unsigned int diag_col_ind = st[j] + 2*N; A.column_indices[ind] = diag_col_ind; A.values[ind] = -eta * lapl[j]; ind++; } for (unsigned int j = 0; j < st.size(); j++) { unsigned int diag_col_ind = st[j] + 3*N; A.column_indices[ind] = diag_col_ind; A.values[ind] = ddz[j]; ind++; } // Added constraint to square mat and close nullspace A.column_indices[ind] = 4*N+2; A.values[ind] = 1; ind++; } // P (block) row for (unsigned int i = 0; i < nb_stencils; i++) { StencilType& st = grid.getStencil(i); // TODO: change these to *SFC weights (when computed) double* ddx = der.getStencilWeights(RBFFD::XSFC, i); double* ddy = der.getStencilWeights(RBFFD::YSFC, i); double* ddz = der.getStencilWeights(RBFFD::ZSFC, i); unsigned int diag_row_ind = i + 3*N; A.row_offsets[diag_row_ind] = ind; for (unsigned int j = 0; j < st.size(); j++) { unsigned int diag_col_ind = st[j] + 0*N; A.column_indices[ind] = diag_col_ind; A.values[ind] = ddx[j]; ind++; } for (unsigned int j = 0; j < st.size(); j++) { unsigned int diag_col_ind = st[j] + 1*N; A.column_indices[ind] = diag_col_ind; A.values[ind] = ddy[j]; ind++; } for (unsigned int j = 0; j < st.size(); j++) { unsigned int diag_col_ind = st[j] + 2*N; A.column_indices[ind] = diag_col_ind; A.values[ind] = ddz[j]; ind++; } // Added constraint to square mat and close nullspace A.column_indices[ind] = 4*N+3; A.values[ind] = 1; ind++; } // ------ EXTRA CONSTRAINT ROWS ----- unsigned int diag_row_ind = 4*N; A.row_offsets[diag_row_ind] = ind; // U for (unsigned int j = 0; j < N; j++) { unsigned int diag_col_ind = j + 0*N; A.column_indices[ind] = diag_col_ind; A.values[ind] = 1; ind++; } diag_row_ind++; A.row_offsets[diag_row_ind] = ind; // V for (unsigned int j = 0; j < N; j++) { unsigned int diag_col_ind = j + 1*N; A.column_indices[ind] = diag_col_ind; A.values[ind] = 1; ind++; } diag_row_ind++; A.row_offsets[diag_row_ind] = ind; // W for (unsigned int j = 0; j < N; j++) { unsigned int diag_col_ind = j + 2*N; A.column_indices[ind] = diag_col_ind; A.values[ind] = 1; ind++; } diag_row_ind++; A.row_offsets[diag_row_ind] = ind; // P for (unsigned int j = 0; j < N; j++) { unsigned int diag_col_ind = j + 3*N; A.column_indices[ind] = diag_col_ind; A.values[ind] = 1; ind++; } // VERY IMPORTANT. UNSPECIFIED LAUNCH FAILURES ARE CAUSED BY FORGETTING THIS! A.row_offsets[4*N+4] = ind; } template <typename VecT> void write_to_file(VecT vec, std::string filename) { std::ofstream fout; fout.open(filename.c_str()); for (size_t i = 0; i < vec.size(); i++) { fout << std::setprecision(10) << vec[i] << std::endl; } fout.close(); std::cout << "Wrote " << filename << std::endl; } void write_System ( HOST_MAT_t& A, HOST_VEC_t& F, HOST_VEC_t& U_exact ) { write_to_file(F, "output/F.mtx"); write_to_file(U_exact, "output/U_exact.mtx"); cusp::io::write_matrix_market_file(A,"output/LHS.mtx"); } void write_Solution( Grid& grid, HOST_VEC_t& U_exact, DEVICE_VEC_t& U_approx_gpu ) { unsigned int nb_bnd = grid.getBoundaryIndicesSize(); // IF we want to write details we need to copy back to host. HOST_VEC_t U_approx(U_exact.size()); if (U_approx_gpu.size() == U_exact.size()) { thrust::copy(U_approx_gpu.begin(), U_approx_gpu.end(), U_approx.begin()); } else { thrust::copy(U_exact.begin(), U_exact.begin()+nb_bnd, U_approx.begin()); thrust::copy(U_approx_gpu.begin(), U_approx_gpu.end(), U_approx.begin()+nb_bnd); } write_to_file(U_approx, "output/U_gpu.mtx"); } //--------------------------------- void gpuTest(RBFFD& der, Grid& grid, int primeGPU=0) { unsigned int N = grid.getNodeListSize(); unsigned int n = grid.getMaxStencilSize(); unsigned int nrows = 4 * N + 4; unsigned int ncols = 4 * N + 4; unsigned int NNZ = 9*n*N+2*(4*N)+2*(3*N); char test_name[256]; char assemble_timer_name[256]; char copy_timer_name[512]; char test_timer_name[256]; if (primeGPU) { sprintf(test_name, "%u PRIMING THE GPU", N); sprintf(assemble_timer_name, "%u Primer Assemble", N); sprintf(copy_timer_name, "%u Primer Copy To CUSP_DEVICE_CSR", N); sprintf(test_timer_name, "%u Primer GMRES test", N); } else { sprintf(test_name, "%u GMRES GPU (CUSP_DEVICE_CSR)", N); sprintf(assemble_timer_name, "%u CUSP_HOST_CSR Assemble", N); sprintf(copy_timer_name, "%u CUSP_HOST_CSR Copy To CUSP_DEVICE_CSR", N); sprintf(test_timer_name, "%u GPU GMRES test", N); } if (!timers.contains(assemble_timer_name)) { timers[assemble_timer_name] = new EB::Timer(assemble_timer_name); } if (!timers.contains(copy_timer_name)) { timers[copy_timer_name] = new EB::Timer(copy_timer_name); } if (!timers.contains(test_timer_name)) { timers[test_timer_name] = new EB::Timer(test_timer_name); } std::cout << test_name << std::endl; // ----- ASSEMBLE ----- timers[assemble_timer_name]->start(); HOST_MAT_t* A = new HOST_MAT_t(nrows, ncols, NNZ); HOST_VEC_t* F = new HOST_VEC_t(nrows, 0); HOST_VEC_t* U_exact = new HOST_VEC_t(nrows, 0); assemble_System_Stokes(der, grid, *A, *F, *U_exact); timers[assemble_timer_name]->stop(); if (!primeGPU) { //write_System(*A, *F, *U_exact); } // ----- SOLVE ----- timers[copy_timer_name]->start(); DEVICE_MAT_t* A_gpu = new DEVICE_MAT_t(*A); DEVICE_VEC_t* F_gpu = new DEVICE_VEC_t(*F); DEVICE_VEC_t* U_exact_gpu = new DEVICE_VEC_t(*U_exact); DEVICE_VEC_t* U_approx_gpu = new DEVICE_VEC_t(F->size(), 0); timers[copy_timer_name]->stop(); timers[test_timer_name]->start(); // Use GMRES to solve A*u = F GMRES_Device(*A_gpu, *F_gpu, *U_exact_gpu, *U_approx_gpu); timers[test_timer_name]->stop(); if (!primeGPU) { write_Solution(grid, *U_exact, *U_approx_gpu); } // Cleanup delete(A); delete(A_gpu); delete(F); delete(U_exact); delete(F_gpu); delete(U_exact_gpu); delete(U_approx_gpu); } int main(void) { bool writeIntermediate = true; bool primed = false; std::vector<std::string> grids; //grids.push_back("~/GRIDS/md/md005.00036"); // grids.push_back("~/GRIDS/md/md165.27556"); //grids.push_back("~/GRIDS/md/md063.04096"); #if 1 grids.push_back("~/GRIDS/md/md031.01024"); grids.push_back("~/GRIDS/md/md050.02601"); grids.push_back("~/GRIDS/md/md063.04096"); grids.push_back("~/GRIDS/md/md089.08100"); grids.push_back("~/GRIDS/md/md127.16384"); grids.push_back("~/GRIDS/md/md165.27556"); #endif #if 0 grids.push_back("~/GRIDS/geoff/scvtimersesh_100k_nodes.ascii"); grids.push_back("~/GRIDS/geoff/scvtimersesh_500k_nodes.ascii"); grids.push_back("~/GRIDS/geoff/scvtimersesh_100k_nodes.ascii"); grids.push_back("~/GRIDS/geoff/scvtimersesh_500k_nodes.ascii"); grids.push_back("~/GRIDS/geoff/scvtimersesh_1m_nodes.ascii"); #endif //grids.push_back("~/GRIDS/geoff/scvtimersesh_1m_nodes.ascii"); for (size_t i = 0; i < grids.size(); i++) { std::string& grid_name = grids[i]; std::string weight_timer_name = grid_name + " Calc Weights"; timers[weight_timer_name] = new EB::Timer(weight_timer_name.c_str()); // Get contours from rbfzone.blogspot.com to choose eps_c1 and eps_c2 based on stencil_size (n) unsigned int stencil_size = 40; double eps_c1 = 0.027; double eps_c2 = 0.274; GridReader* grid = new GridReader(grid_name, 4); grid->setMaxStencilSize(stencil_size); // We do not read until generate is called: Grid::GridLoadErrType err = grid->loadFromFile(); if (err == Grid::NO_GRID_FILES) { grid->generate(); // NOTE: We force at least one node in the domain to be a boundary. //----------------------------- // We will set the first node as a boundary/ground point. We know // the normal because we're on teh sphere centered at (0,0,0) unsigned int nodeIndex = 0; NodeType& node = grid->getNode(nodeIndex); Vec3 nodeNormal = node - Vec3(0,0,0); grid->appendBoundaryIndex(nodeIndex, nodeNormal); //----------------------------- if (writeIntermediate) { grid->writeToFile(); } } std::cout << "Generate Stencils\n"; Grid::GridLoadErrType st_err = grid->loadStencilsFromFile(); if (st_err == Grid::NO_STENCIL_FILES) { // grid->generateStencils(Grid::ST_BRUTE_FORCE); #if 1 grid->generateStencils(Grid::ST_KDTREE); #else grid->setNSHashDims(50, 50,50); grid->generateStencils(Grid::ST_HASH); #endif if (writeIntermediate) { grid->writeToFile(); } } std::cout << "Generate RBFFD Weights\n"; timers[weight_timer_name]->start(); RBFFD der(RBFFD::LSFC | RBFFD::XSFC | RBFFD::YSFC | RBFFD::ZSFC, grid, 3, 0); der.setEpsilonByParameters(eps_c1, eps_c2); int der_err = der.loadAllWeightsFromFile(); if (der_err) { der.computeAllWeightsForAllStencils(); timers[weight_timer_name]->stop(); #if 0 // Im finding that its more efficient to compute the weights than write and load from disk. if (writeIntermediate) { der.writeAllWeightsToFile(); } #endif } if (!primed) { std::cout << "\n\n"; cout << "Priming GPU with dummy operations (removes compile from benchmarks)\n"; gpuTest(der,*grid, 1); primed = true; std::cout << "\n\n"; } // No support for GMRES on the CPU yet. //cpuTest(der,*grid); gpuTest(der,*grid); delete(grid); } timers.printAll(); timers.writeToFile(); return EXIT_SUCCESS; }
06e9794d39e029d8ab353ed2571f96abac189242.cu
// TODO : test this: //#define CUSP_USE_TEXTURE_MEMORY // THIS IS adapted from verbose_monitor.cu // PROVIDED BY THE CUSP v0.1 EXAMPLES #include "grids/grid_reader.h" #include "rbffd/rbffd.h" #include "timer_eb.h" #include <cusp/hyb_matrix.h> #include <cusp/ell_matrix.h> #include <cusp/csr_matrix.h> #include <cusp/coo_matrix.h> #include <cusp/monitor.h> #include <cusp/krylov/cg.h> #include <cusp/krylov/gmres.h> #include <cusp/gallery/poisson.h> #include <cusp/print.h> #include <cusp/array2d.h> #include <cusp/multiply.h> #include <cusp/blas.h> #include <cusp/io/matrix_market.h> #include <cusp/precond/diagonal.h> #include <cusp/precond/ainv.h> #include <cusp/precond/smoothed_aggregation.h> #include <cusp/precond/aggregate.h> #include <cusp/precond/smooth.h> #include <cusp/precond/strength.h> #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/generate.h> #include "utils/spherical_harmonics.h" #include <iomanip> #include <iostream> #include <sstream> #include <map> #include <fstream> #include <typeinfo> using namespace std; typedef std::vector< std::map< unsigned int, double> > STL_MAT_t; typedef std::vector<double> STL_VEC_t; typedef cusp::array1d<double, cusp::host_memory> HOST_VEC_t; typedef cusp::array1d<double, cusp::device_memory> DEVICE_VEC_t; typedef cusp::csr_matrix<unsigned int, double, cusp::host_memory> HOST_MAT_t; typedef cusp::csr_matrix<unsigned int, double, cusp::device_memory> DEVICE_MAT_t; EB::TimerList timers; //--------------------------------- // Perform GMRES on GPU void GMRES_Device(DEVICE_MAT_t& A, DEVICE_VEC_t& F, DEVICE_VEC_t& U_exact, DEVICE_VEC_t& U_approx_gpu) { #if 1 size_t restart = 300; int max_iters = 1000; double rel_tol = 1e-6; #else // Maximum number of iterations (total) size_t max_iters = 500; // restart the process every "restart" iterations size_t restart = 200; double rel_tol = 1e-8; #endif try { // cusp::convergence_monitor<double> monitor( F, max_iters, 0, 1e-3); cusp::default_monitor<double> monitor( F, max_iters, rel_tol ); //, max_iters, rel_tol);// , 1e-3); //cusp::default_monitor<double> monitor( F, -1, rel_tol ); //, max_iters, rel_tol);// , 1e-3); std::cout << "GMRES Starting Residual Norm: " << monitor.residual_norm() << std::endl; // 1e-8, 10000, 300); int precondType = -1; switch (precondType) { case 0: { // Jacobi Preconditioning (DIAGONAL) // Probably wont work well for RBF-FD since we're not diagonally dominant cusp::precond::diagonal<double, cusp::device_memory> M(A); cusp::krylov::gmres(A, U_approx_gpu, F, restart, monitor, M); } break; case 1: { // Smoothed Aggregation (Algebraic MultiGrid. Works for Nonsym?) cusp::precond::smoothed_aggregation<int, double, cusp::device_memory> M(A); cusp::krylov::gmres(A, U_approx_gpu, F, restart, monitor, M); } break; #if 0 // ONLY SPD MATRICES case 0: // AINV using static dropping cusp::precond::scaled_bridson_ainv<double, cusp::device_memory> M(A, 0, 10); cusp::krylov::gmres(A, U_approx_gpu, F, restart, monitor, M); break; #endif #if 0 // ONLY SPD MATRICES case 1: // AINV using standard drop tolerance cusp::precond::scaled_bridson_ainv<double, cusp::device_memory> M(A, .1); cusp::krylov::gmres(A, U_approx_gpu, F, restart, monitor, M); break; #endif #if 0 // ONLY FOR SPD MATRICES case 2: // AINV using novel cusp dropping strategy (TODO: lookup) cusp::precond::bridson_ainv<double, cusp::device_memory> M(A, 0, -1, true, 2); cusp::krylov::gmres(A, U_approx_gpu, F, restart, monitor, M); #endif case 2: { // AINV using novel cusp dropping strategy // assumes that sparsity pattern of precond is same as A, plus // 2 extra nonzeros per row // VERY SLOW TO BUILD; DOES NOT CONVERGE cusp::precond::nonsym_bridson_ainv<double, cusp::device_memory> M(A, 0, -1, true, 2); cusp::krylov::gmres(A, U_approx_gpu, F, restart, monitor, M); } case 3: { // AINV using novel cusp dropping strategy // Assume 40 nonzeros per row, drop everthing else. // VERY SLOW TO BUILD; DOES NOT CONVERGE cusp::precond::nonsym_bridson_ainv<double, cusp::device_memory> M(A, 0.1, 10, false, 0); cusp::krylov::gmres(A, U_approx_gpu, F, restart, monitor, M); } default: // Solve unpreconditioned Au = F cusp::krylov::gmres(A, U_approx_gpu, F, restart, monitor); } cudaThreadSynchronize(); // monitor.print(); if (monitor.converged()) { std::cout << "\n[+++] Solver converged to " << monitor.relative_tolerance() << " relative tolerance"; std::cout << " after " << monitor.iteration_count() << " iterations" << std::endl << std::endl; } else { std::cout << "\n[XXX] Solver reached iteration limit " << monitor.iteration_limit() << " before converging"; std::cout << " to " << monitor.relative_tolerance() << " relative tolerance " << std::endl << std::endl; } std::cout << "GMRES Iterations: " << monitor.iteration_count() << std::endl; std::cout << "GMRES Iteration Limit: " << monitor.iteration_limit() << std::endl; std::cout << "GMRES Residual Norm: " << monitor.residual_norm() << std::endl; std::cout << "GMRES Relative Tol: " << monitor.relative_tolerance() << std::endl; std::cout << "GMRES Absolute Tol: " << monitor.absolute_tolerance() << std::endl; std::cout << "GMRES Target Residual (Abs + Rel*norm(F)): " << monitor.tolerance() << std::endl; } catch(std::bad_alloc &e) { std::cerr << "Ran out of memory trying to compute GMRES: " << e.what() << std::endl; exit(-1); } catch(thrust::system_error &e) { std::cerr << "Some other error happened during GMRES: " << e.what() << std::endl; exit(-1); } try { typedef cusp::array1d<double, DEVICE_VEC_t>::view DEVICE_VEC_VIEW_t; DEVICE_VEC_VIEW_t U_approx_view(U_exact.begin()+(U_exact.size() - F.size()), U_exact.end()); DEVICE_VEC_t diff(U_approx_gpu); //cusp::blas::axpy(U_exact.begin()+(U_exact.size() - F.size()), U_exact.end(), diff.begin(), -1); cusp::blas::axpy(U_approx_view, diff, -1); std::cout << "Rel l1 Norm: " << cusp::blas::nrm1(diff) / cusp::blas::nrm1(U_exact) << std::endl; std::cout << "Rel l2 Norm: " << cusp::blas::nrm2(diff) / cusp::blas::nrm2(U_exact) << std::endl; std::cout << "Rel linf Norm: " << cusp::blas::nrmmax(diff) / cusp::blas::nrmmax(U_exact) << std::endl; } catch(std::bad_alloc &e) { std::cerr << "Ran out of memory trying to compute Error Norms: " << e.what() << std::endl; exit(-1); } catch(thrust::system_error &e) { std::cerr << "Some other error happened during Error Norms: " << e.what() << std::endl; exit(-1); } } //--------------------------------- void assemble_System_Stokes( RBFFD& der, Grid& grid, HOST_MAT_t& A, HOST_VEC_t& F, HOST_VEC_t& U_exact){ double eta = 1.; //double Ra = 1.e6; // We have different nb_stencils and nb_nodes when we parallelize. The subblocks might not be full unsigned int nb_stencils = grid.getStencilsSize(); unsigned int nb_nodes = grid.getNodeListSize(); unsigned int max_stencil_size = grid.getMaxStencilSize(); unsigned int N = nb_nodes; // --------------------------------------------------- //------------- Fill the RHS of the System ------------- // This is our manufactured solution: SphericalHarmonic::Sph32 UU; SphericalHarmonic::Sph32105 VV; SphericalHarmonic::Sph32 WW; SphericalHarmonic::Sph32 PP; std::vector<NodeType>& nodes = grid.getNodeList(); //------------- Fill F ------------- // U for (unsigned int j = 0; j < N; j++) { unsigned int row_ind = j + 0*N; NodeType& node = nodes[j]; double Xx = node.x(); double Yy = node.y(); double Zz = node.z(); U_exact[row_ind] = UU.eval(Xx,Yy,Zz); F[row_ind] = -UU.lapl(Xx,Yy,Zz) + PP.d_dx(Xx,Yy,Zz); } #if 1 // V for (unsigned int j = 0; j < N; j++) { unsigned int row_ind = j + 1*N; NodeType& node = nodes[j]; double Xx = node.x(); double Yy = node.y(); double Zz = node.z(); //double rr = sqrt(node.x()*node.x() + node.y()*node.y() + node.z()*node.z()); //double dir = node.y(); // F[row_ind] = (Ra * Temperature(j) * dir) / rr; U_exact[row_ind] = VV.eval(Xx,Yy,Zz); F[row_ind] = -VV.lapl(Xx,Yy,Zz) + PP.d_dy(Xx,Yy,Zz); } // W for (unsigned int j = 0; j < N; j++) { unsigned int row_ind = j + 2*N; NodeType& node = nodes[j]; double Xx = node.x(); double Yy = node.y(); double Zz = node.z(); U_exact[row_ind] = WW.eval(Xx,Yy,Zz); F[row_ind] = -WW.lapl(Xx,Yy,Zz) + PP.d_dz(Xx,Yy,Zz); } // P for (unsigned int j = 0; j < N; j++) { unsigned int row_ind = j + 3*N; NodeType& node = nodes[j]; double Xx = node.x(); double Yy = node.y(); double Zz = node.z(); U_exact[row_ind] = PP.eval(Xx,Yy,Zz); F[row_ind] = UU.d_dx(Xx,Yy,Zz) + VV.d_dy(Xx,Yy,Zz) + WW.d_dz(Xx,Yy,Zz); } #endif // Sum of U F[4*N+0] = 0.; // Sum of V F[4*N+1] = 0.; // Sum of W F[4*N+2] = 0.; // Sum of P F[4*N+3] = 0.; unsigned int ind = 0; // ----------------- Fill LHS -------------------- // // U (block) row for (unsigned int i = 0; i < nb_stencils; i++) { StencilType& st = grid.getStencil(i); // TODO: change these to *SFC weights (when computed) double* ddx = der.getStencilWeights(RBFFD::XSFC, i); double* lapl = der.getStencilWeights(RBFFD::LSFC, i); unsigned int diag_row_ind = i + 0*N; A.row_offsets[diag_row_ind] = ind; for (unsigned int j = 0; j < st.size(); j++) { unsigned int diag_col_ind = st[j] + 0*N; A.column_indices[ind] = diag_col_ind; A.values[ind] = -eta * lapl[j]; ind++; } for (unsigned int j = 0; j < st.size(); j++) { unsigned int diag_col_ind = st[j] + 3*N; A.column_indices[ind] = diag_col_ind; A.values[ind] = ddx[j]; ind++; } // Added constraint to square mat and close nullspace A.column_indices[ind] = 4*N+0; A.values[ind] = 1; ind++; } // V (block) row for (unsigned int i = 0; i < nb_stencils; i++) { StencilType& st = grid.getStencil(i); // TODO: change these to *SFC weights (when computed) double* ddy = der.getStencilWeights(RBFFD::YSFC, i); double* lapl = der.getStencilWeights(RBFFD::LSFC, i); unsigned int diag_row_ind = i + 1*N; A.row_offsets[diag_row_ind] = ind; for (unsigned int j = 0; j < st.size(); j++) { unsigned int diag_col_ind = st[j] + 1*N; A.column_indices[ind] = diag_col_ind; A.values[ind] = -eta * lapl[j]; ind++; } for (unsigned int j = 0; j < st.size(); j++) { unsigned int diag_col_ind = st[j] + 3*N; A.column_indices[ind] = diag_col_ind; A.values[ind] = ddy[j]; ind++; } // Added constraint to square mat and close nullspace A.column_indices[ind] = 4*N+1; A.values[ind] = 1; ind++; } // W (block) row for (unsigned int i = 0; i < nb_stencils; i++) { StencilType& st = grid.getStencil(i); // TODO: change these to *SFC weights (when computed) double* ddz = der.getStencilWeights(RBFFD::ZSFC, i); double* lapl = der.getStencilWeights(RBFFD::LSFC, i); unsigned int diag_row_ind = i + 2*N; A.row_offsets[diag_row_ind] = ind; for (unsigned int j = 0; j < st.size(); j++) { unsigned int diag_col_ind = st[j] + 2*N; A.column_indices[ind] = diag_col_ind; A.values[ind] = -eta * lapl[j]; ind++; } for (unsigned int j = 0; j < st.size(); j++) { unsigned int diag_col_ind = st[j] + 3*N; A.column_indices[ind] = diag_col_ind; A.values[ind] = ddz[j]; ind++; } // Added constraint to square mat and close nullspace A.column_indices[ind] = 4*N+2; A.values[ind] = 1; ind++; } // P (block) row for (unsigned int i = 0; i < nb_stencils; i++) { StencilType& st = grid.getStencil(i); // TODO: change these to *SFC weights (when computed) double* ddx = der.getStencilWeights(RBFFD::XSFC, i); double* ddy = der.getStencilWeights(RBFFD::YSFC, i); double* ddz = der.getStencilWeights(RBFFD::ZSFC, i); unsigned int diag_row_ind = i + 3*N; A.row_offsets[diag_row_ind] = ind; for (unsigned int j = 0; j < st.size(); j++) { unsigned int diag_col_ind = st[j] + 0*N; A.column_indices[ind] = diag_col_ind; A.values[ind] = ddx[j]; ind++; } for (unsigned int j = 0; j < st.size(); j++) { unsigned int diag_col_ind = st[j] + 1*N; A.column_indices[ind] = diag_col_ind; A.values[ind] = ddy[j]; ind++; } for (unsigned int j = 0; j < st.size(); j++) { unsigned int diag_col_ind = st[j] + 2*N; A.column_indices[ind] = diag_col_ind; A.values[ind] = ddz[j]; ind++; } // Added constraint to square mat and close nullspace A.column_indices[ind] = 4*N+3; A.values[ind] = 1; ind++; } // ------ EXTRA CONSTRAINT ROWS ----- unsigned int diag_row_ind = 4*N; A.row_offsets[diag_row_ind] = ind; // U for (unsigned int j = 0; j < N; j++) { unsigned int diag_col_ind = j + 0*N; A.column_indices[ind] = diag_col_ind; A.values[ind] = 1; ind++; } diag_row_ind++; A.row_offsets[diag_row_ind] = ind; // V for (unsigned int j = 0; j < N; j++) { unsigned int diag_col_ind = j + 1*N; A.column_indices[ind] = diag_col_ind; A.values[ind] = 1; ind++; } diag_row_ind++; A.row_offsets[diag_row_ind] = ind; // W for (unsigned int j = 0; j < N; j++) { unsigned int diag_col_ind = j + 2*N; A.column_indices[ind] = diag_col_ind; A.values[ind] = 1; ind++; } diag_row_ind++; A.row_offsets[diag_row_ind] = ind; // P for (unsigned int j = 0; j < N; j++) { unsigned int diag_col_ind = j + 3*N; A.column_indices[ind] = diag_col_ind; A.values[ind] = 1; ind++; } // VERY IMPORTANT. UNSPECIFIED LAUNCH FAILURES ARE CAUSED BY FORGETTING THIS! A.row_offsets[4*N+4] = ind; } template <typename VecT> void write_to_file(VecT vec, std::string filename) { std::ofstream fout; fout.open(filename.c_str()); for (size_t i = 0; i < vec.size(); i++) { fout << std::setprecision(10) << vec[i] << std::endl; } fout.close(); std::cout << "Wrote " << filename << std::endl; } void write_System ( HOST_MAT_t& A, HOST_VEC_t& F, HOST_VEC_t& U_exact ) { write_to_file(F, "output/F.mtx"); write_to_file(U_exact, "output/U_exact.mtx"); cusp::io::write_matrix_market_file(A,"output/LHS.mtx"); } void write_Solution( Grid& grid, HOST_VEC_t& U_exact, DEVICE_VEC_t& U_approx_gpu ) { unsigned int nb_bnd = grid.getBoundaryIndicesSize(); // IF we want to write details we need to copy back to host. HOST_VEC_t U_approx(U_exact.size()); if (U_approx_gpu.size() == U_exact.size()) { thrust::copy(U_approx_gpu.begin(), U_approx_gpu.end(), U_approx.begin()); } else { thrust::copy(U_exact.begin(), U_exact.begin()+nb_bnd, U_approx.begin()); thrust::copy(U_approx_gpu.begin(), U_approx_gpu.end(), U_approx.begin()+nb_bnd); } write_to_file(U_approx, "output/U_gpu.mtx"); } //--------------------------------- void gpuTest(RBFFD& der, Grid& grid, int primeGPU=0) { unsigned int N = grid.getNodeListSize(); unsigned int n = grid.getMaxStencilSize(); unsigned int nrows = 4 * N + 4; unsigned int ncols = 4 * N + 4; unsigned int NNZ = 9*n*N+2*(4*N)+2*(3*N); char test_name[256]; char assemble_timer_name[256]; char copy_timer_name[512]; char test_timer_name[256]; if (primeGPU) { sprintf(test_name, "%u PRIMING THE GPU", N); sprintf(assemble_timer_name, "%u Primer Assemble", N); sprintf(copy_timer_name, "%u Primer Copy To CUSP_DEVICE_CSR", N); sprintf(test_timer_name, "%u Primer GMRES test", N); } else { sprintf(test_name, "%u GMRES GPU (CUSP_DEVICE_CSR)", N); sprintf(assemble_timer_name, "%u CUSP_HOST_CSR Assemble", N); sprintf(copy_timer_name, "%u CUSP_HOST_CSR Copy To CUSP_DEVICE_CSR", N); sprintf(test_timer_name, "%u GPU GMRES test", N); } if (!timers.contains(assemble_timer_name)) { timers[assemble_timer_name] = new EB::Timer(assemble_timer_name); } if (!timers.contains(copy_timer_name)) { timers[copy_timer_name] = new EB::Timer(copy_timer_name); } if (!timers.contains(test_timer_name)) { timers[test_timer_name] = new EB::Timer(test_timer_name); } std::cout << test_name << std::endl; // ----- ASSEMBLE ----- timers[assemble_timer_name]->start(); HOST_MAT_t* A = new HOST_MAT_t(nrows, ncols, NNZ); HOST_VEC_t* F = new HOST_VEC_t(nrows, 0); HOST_VEC_t* U_exact = new HOST_VEC_t(nrows, 0); assemble_System_Stokes(der, grid, *A, *F, *U_exact); timers[assemble_timer_name]->stop(); if (!primeGPU) { //write_System(*A, *F, *U_exact); } // ----- SOLVE ----- timers[copy_timer_name]->start(); DEVICE_MAT_t* A_gpu = new DEVICE_MAT_t(*A); DEVICE_VEC_t* F_gpu = new DEVICE_VEC_t(*F); DEVICE_VEC_t* U_exact_gpu = new DEVICE_VEC_t(*U_exact); DEVICE_VEC_t* U_approx_gpu = new DEVICE_VEC_t(F->size(), 0); timers[copy_timer_name]->stop(); timers[test_timer_name]->start(); // Use GMRES to solve A*u = F GMRES_Device(*A_gpu, *F_gpu, *U_exact_gpu, *U_approx_gpu); timers[test_timer_name]->stop(); if (!primeGPU) { write_Solution(grid, *U_exact, *U_approx_gpu); } // Cleanup delete(A); delete(A_gpu); delete(F); delete(U_exact); delete(F_gpu); delete(U_exact_gpu); delete(U_approx_gpu); } int main(void) { bool writeIntermediate = true; bool primed = false; std::vector<std::string> grids; //grids.push_back("~/GRIDS/md/md005.00036"); // grids.push_back("~/GRIDS/md/md165.27556"); //grids.push_back("~/GRIDS/md/md063.04096"); #if 1 grids.push_back("~/GRIDS/md/md031.01024"); grids.push_back("~/GRIDS/md/md050.02601"); grids.push_back("~/GRIDS/md/md063.04096"); grids.push_back("~/GRIDS/md/md089.08100"); grids.push_back("~/GRIDS/md/md127.16384"); grids.push_back("~/GRIDS/md/md165.27556"); #endif #if 0 grids.push_back("~/GRIDS/geoff/scvtimersesh_100k_nodes.ascii"); grids.push_back("~/GRIDS/geoff/scvtimersesh_500k_nodes.ascii"); grids.push_back("~/GRIDS/geoff/scvtimersesh_100k_nodes.ascii"); grids.push_back("~/GRIDS/geoff/scvtimersesh_500k_nodes.ascii"); grids.push_back("~/GRIDS/geoff/scvtimersesh_1m_nodes.ascii"); #endif //grids.push_back("~/GRIDS/geoff/scvtimersesh_1m_nodes.ascii"); for (size_t i = 0; i < grids.size(); i++) { std::string& grid_name = grids[i]; std::string weight_timer_name = grid_name + " Calc Weights"; timers[weight_timer_name] = new EB::Timer(weight_timer_name.c_str()); // Get contours from rbfzone.blogspot.com to choose eps_c1 and eps_c2 based on stencil_size (n) unsigned int stencil_size = 40; double eps_c1 = 0.027; double eps_c2 = 0.274; GridReader* grid = new GridReader(grid_name, 4); grid->setMaxStencilSize(stencil_size); // We do not read until generate is called: Grid::GridLoadErrType err = grid->loadFromFile(); if (err == Grid::NO_GRID_FILES) { grid->generate(); // NOTE: We force at least one node in the domain to be a boundary. //----------------------------- // We will set the first node as a boundary/ground point. We know // the normal because we're on teh sphere centered at (0,0,0) unsigned int nodeIndex = 0; NodeType& node = grid->getNode(nodeIndex); Vec3 nodeNormal = node - Vec3(0,0,0); grid->appendBoundaryIndex(nodeIndex, nodeNormal); //----------------------------- if (writeIntermediate) { grid->writeToFile(); } } std::cout << "Generate Stencils\n"; Grid::GridLoadErrType st_err = grid->loadStencilsFromFile(); if (st_err == Grid::NO_STENCIL_FILES) { // grid->generateStencils(Grid::ST_BRUTE_FORCE); #if 1 grid->generateStencils(Grid::ST_KDTREE); #else grid->setNSHashDims(50, 50,50); grid->generateStencils(Grid::ST_HASH); #endif if (writeIntermediate) { grid->writeToFile(); } } std::cout << "Generate RBFFD Weights\n"; timers[weight_timer_name]->start(); RBFFD der(RBFFD::LSFC | RBFFD::XSFC | RBFFD::YSFC | RBFFD::ZSFC, grid, 3, 0); der.setEpsilonByParameters(eps_c1, eps_c2); int der_err = der.loadAllWeightsFromFile(); if (der_err) { der.computeAllWeightsForAllStencils(); timers[weight_timer_name]->stop(); #if 0 // Im finding that its more efficient to compute the weights than write and load from disk. if (writeIntermediate) { der.writeAllWeightsToFile(); } #endif } if (!primed) { std::cout << "\n\n"; cout << "Priming GPU with dummy operations (removes compile from benchmarks)\n"; gpuTest(der,*grid, 1); primed = true; std::cout << "\n\n"; } // No support for GMRES on the CPU yet. //cpuTest(der,*grid); gpuTest(der,*grid); delete(grid); } timers.printAll(); timers.writeToFile(); return EXIT_SUCCESS; }
84d4ce6a0074c5dbf479773add8be11b73319f44.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "hiprand/hiprand.h" #include "rocblas.h" extern "C" { #include "im2col.h" #include "hip/hip_runtime.h" } // src: https://github.com/BVLC/caffe/blob/master/src/caffe/util/im2col.cu // You may also want to read: https://github.com/BVLC/caffe/blob/master/LICENSE __global__ void im2col_gpu_kernel(const int n, const float* data_im, const int height, const int width, const int ksize, const int pad, const int stride, const int height_col, const int width_col, float *data_col) { int index = blockIdx.x*blockDim.x+threadIdx.x; for(; index<n; index+=blockDim.x*gridDim.x) { int w_out = index % width_col; int h_index = index / width_col; int h_out = h_index % height_col; int channel_in = h_index / height_col; int channel_out = channel_in * ksize * ksize; int h_in = h_out * stride - pad; int w_in = w_out * stride - pad; float* data_col_ptr = data_col; data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out; const float* data_im_ptr = data_im; data_im_ptr += (channel_in * height + h_in) * width + w_in; for (int i=0; i<ksize; ++i) { for (int j=0; j<ksize; ++j) { int h = h_in + i; int w = w_in + j; *data_col_ptr = (h >= 0 && w >= 0 && h < height && w < width) ? data_im_ptr[i * width + j] : 0; //*data_col_ptr = data_im_ptr[ii * width + jj]; data_col_ptr += height_col * width_col; } } } } void im2col_gpu(float *im, int channels, int height, int width, int ksize, int stride, int pad, float *data_col) { // We are going to launch channels * height_col * width_col kernels, each // kernel responsible for copying a single-channel grid. int height_col = (height + 2 * pad - ksize) / stride + 1; int width_col = (width + 2 * pad - ksize) / stride + 1; int num_kernels = channels * height_col * width_col; hipLaunchKernelGGL(( im2col_gpu_kernel), dim3((num_kernels+BLOCK-1)/BLOCK), dim3(BLOCK), 0, 0, num_kernels, im, height, width, ksize, pad, stride, height_col, width_col, data_col); }
84d4ce6a0074c5dbf479773add8be11b73319f44.cu
#include "cuda_runtime.h" #include "curand.h" #include "cublas_v2.h" extern "C" { #include "im2col.h" #include "cuda.h" } // src: https://github.com/BVLC/caffe/blob/master/src/caffe/util/im2col.cu // You may also want to read: https://github.com/BVLC/caffe/blob/master/LICENSE __global__ void im2col_gpu_kernel(const int n, const float* data_im, const int height, const int width, const int ksize, const int pad, const int stride, const int height_col, const int width_col, float *data_col) { int index = blockIdx.x*blockDim.x+threadIdx.x; for(; index<n; index+=blockDim.x*gridDim.x) { int w_out = index % width_col; int h_index = index / width_col; int h_out = h_index % height_col; int channel_in = h_index / height_col; int channel_out = channel_in * ksize * ksize; int h_in = h_out * stride - pad; int w_in = w_out * stride - pad; float* data_col_ptr = data_col; data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out; const float* data_im_ptr = data_im; data_im_ptr += (channel_in * height + h_in) * width + w_in; for (int i=0; i<ksize; ++i) { for (int j=0; j<ksize; ++j) { int h = h_in + i; int w = w_in + j; *data_col_ptr = (h >= 0 && w >= 0 && h < height && w < width) ? data_im_ptr[i * width + j] : 0; //*data_col_ptr = data_im_ptr[ii * width + jj]; data_col_ptr += height_col * width_col; } } } } void im2col_gpu(float *im, int channels, int height, int width, int ksize, int stride, int pad, float *data_col) { // We are going to launch channels * height_col * width_col kernels, each // kernel responsible for copying a single-channel grid. int height_col = (height + 2 * pad - ksize) / stride + 1; int width_col = (width + 2 * pad - ksize) / stride + 1; int num_kernels = channels * height_col * width_col; im2col_gpu_kernel<<<(num_kernels+BLOCK-1)/BLOCK, BLOCK>>>( num_kernels, im, height, width, ksize, pad, stride, height_col, width_col, data_col); }
627bc3904e1a9e90d5511a72181970bbd6ded150.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include "CudaInfo.cuh" #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #define SIZE 3 __host__ void matrix_viewold(float(*array)[SIZE], char *q) { // int i, j; for (i = 0; i<SIZE; i++) { for (j = 0; j<SIZE; j++) printf(q, array[i][j]); puts(""); } puts(""); } __host__ void matrix_randold(float(*array)[SIZE]) { // int i, j; for (i = 0; i<SIZE; i++) for (j = 0; j<SIZE; j++) array[i][j] = 1 + rand() % 16; } __device__ void subrowold(float(*array)[SIZE], int m, int n, float k) { // int g = blockIdx.y*blockDim.y + threadIdx.y; if (g<SIZE) array[m][g] -= k*array[n][g]; } __global__ void determinantold(float(*mtx)[SIZE]) { //: int i = i = blockIdx.x*blockDim.x + threadIdx.x; int j; float coeff; for (j = 0; j<SIZE - 1; j++) { if (!mtx[j][j]) subrowold(mtx, j, SIZE - 1, 3); if (i >= j && i<SIZE - 1) { coeff = mtx[i + 1][j] / mtx[j][j]; subrowold(mtx, i + 1, j, coeff); } __syncthreads(); } } int CudaInfo::Opredelit(void) { int i; float mtx_h[SIZE][SIZE], (*mtx_d)[SIZE]; long double det; hipMalloc((void **)&mtx_d, sizeof(float)*SIZE*SIZE); // puts(" \n"); matrix_randold(mtx_h); hipMemcpy(mtx_d, mtx_h, sizeof(float)*SIZE*SIZE, // hipMemcpyHostToDevice); matrix_viewold(mtx_h, "| %.0f "); dim3 threadsPerBlock(16, 16); dim3 numBlock(SIZE / threadsPerBlock.x, SIZE / threadsPerBlock.y); determinantold << <numBlock, threadsPerBlock >> >(mtx_d); // hipDeviceSynchronize(); hipMemcpy(mtx_h, mtx_d, sizeof(float)*SIZE*SIZE, // hipMemcpyDeviceToHost); puts(" \n"); matrix_viewold(mtx_h, "| %.8f "); det = 1; for (i = 0; i<SIZE; i++) { printf("%.8f\n", mtx_h[i][i]); det *= mtx_h[i][i]; } printf("det=%.0Lf\n ", det); hipFree(mtx_d); getchar(); return 0; }
627bc3904e1a9e90d5511a72181970bbd6ded150.cu
#include <stdio.h> #include <stdlib.h> #include "CudaInfo.cuh" #include "cuda_runtime.h" #include "device_launch_parameters.h" #define SIZE 3 __host__ void matrix_viewold(float(*array)[SIZE], char *q) { //вывод матрицы на экран int i, j; for (i = 0; i<SIZE; i++) { for (j = 0; j<SIZE; j++) printf(q, array[i][j]); puts(""); } puts(""); } __host__ void matrix_randold(float(*array)[SIZE]) { //заполнение случайным образом на девайсе int i, j; for (i = 0; i<SIZE; i++) for (j = 0; j<SIZE; j++) array[i][j] = 1 + rand() % 16; } __device__ void subrowold(float(*array)[SIZE], int m, int n, float k) { //вычитание строк на устройстве int g = blockIdx.y*blockDim.y + threadIdx.y; if (g<SIZE) array[m][g] -= k*array[n][g]; } __global__ void determinantold(float(*mtx)[SIZE]) { //Ядро: приведение к треугольному виду int i = i = blockIdx.x*blockDim.x + threadIdx.x; int j; float coeff; for (j = 0; j<SIZE - 1; j++) { if (!mtx[j][j]) subrowold(mtx, j, SIZE - 1, 3); if (i >= j && i<SIZE - 1) { coeff = mtx[i + 1][j] / mtx[j][j]; subrowold(mtx, i + 1, j, coeff); } __syncthreads(); } } int CudaInfo::Opredelit(void) { int i; float mtx_h[SIZE][SIZE], (*mtx_d)[SIZE]; long double det; cudaMalloc((void **)&mtx_d, sizeof(float)*SIZE*SIZE); //выделение пам¤ти на устройстве puts("Исходная матрица\n"); matrix_randold(mtx_h); cudaMemcpy(mtx_d, mtx_h, sizeof(float)*SIZE*SIZE, //копирование массива в пам¤ть видеокарты cudaMemcpyHostToDevice); matrix_viewold(mtx_h, "| %.0f "); dim3 threadsPerBlock(16, 16); dim3 numBlock(SIZE / threadsPerBlock.x, SIZE / threadsPerBlock.y); determinantold << <numBlock, threadsPerBlock >> >(mtx_d); //вызов ядра cudaThreadSynchronize(); cudaMemcpy(mtx_h, mtx_d, sizeof(float)*SIZE*SIZE, //копирование массива из памяти видеокарты cudaMemcpyDeviceToHost); puts("Треугольный вид\n"); matrix_viewold(mtx_h, "| %.8f "); det = 1; for (i = 0; i<SIZE; i++) { printf("%.8f\n", mtx_h[i][i]); det *= mtx_h[i][i]; } printf("det=%.0Lf\n ", det); cudaFree(mtx_d); getchar(); return 0; }
1c53874ed4a80b330aa1bd814717e20f4ed687b9.hip
// !!! This is a file automatically generated by hipify!!! #include <chrono> #include <iostream> #include <random> #include <vector> #include <functional> #include <hip/hip_runtime.h> #include "a3.hpp" int main(int argc, char* argv[]) { if (argc != 3) { std::cout << "usage: " << argv[0] << " n h" << std::endl; return -1; } int n = std::atoi(argv[1]); float h = std::atof(argv[2]); if (n < 32) { std::cout << "hey, n is too small even for debugging!" << std::endl; return -1; } if (h < 0.00001) { std::cout << "this bandwidth is too small" << std::endl; return -1; } // in and out (in is set to 1s for fun) std::vector<float> x(n); std::vector<float> y(n, 0.0); std::random_device rd; std::mt19937 gen(rd()); std::lognormal_distribution<float> N(0.0, 1.0); // std::generate(std::begin(x), std::end(x), std::bind(N, gen)); auto rng = std::bind(N,gen); for (auto& e : x) e = rng(); // now running your awesome code from a3.hpp auto t0 = std::chrono::system_clock::now(); gaussian_kde(n, h, x, y); auto t1 = std::chrono::system_clock::now(); auto elapsed_par = std::chrono::duration<double>(t1 - t0); std::cout << "Tp: " << elapsed_par.count() << "s" << std::endl; return 0; } // main
1c53874ed4a80b330aa1bd814717e20f4ed687b9.cu
#include <chrono> #include <iostream> #include <random> #include <vector> #include <functional> #include <cuda_runtime.h> #include "a3.hpp" int main(int argc, char* argv[]) { if (argc != 3) { std::cout << "usage: " << argv[0] << " n h" << std::endl; return -1; } int n = std::atoi(argv[1]); float h = std::atof(argv[2]); if (n < 32) { std::cout << "hey, n is too small even for debugging!" << std::endl; return -1; } if (h < 0.00001) { std::cout << "this bandwidth is too small" << std::endl; return -1; } // in and out (in is set to 1s for fun) std::vector<float> x(n); std::vector<float> y(n, 0.0); std::random_device rd; std::mt19937 gen(rd()); std::lognormal_distribution<float> N(0.0, 1.0); // std::generate(std::begin(x), std::end(x), std::bind(N, gen)); auto rng = std::bind(N,gen); for (auto& e : x) e = rng(); // now running your awesome code from a3.hpp auto t0 = std::chrono::system_clock::now(); gaussian_kde(n, h, x, y); auto t1 = std::chrono::system_clock::now(); auto elapsed_par = std::chrono::duration<double>(t1 - t0); std::cout << "Tp: " << elapsed_par.count() << "s" << std::endl; return 0; } // main
124808162b47f762e131c4bae934df8b88de225f.hip
// !!! This is a file automatically generated by hipify!!! #include <stdlib.h> #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <iostream> #include <stddef.h> #include <limits.h> __global__ void fun(int *z){ int x = 5; int a[-x]; *z = a[-x]; } int main(void) { int z; int *dev_z; hipMalloc((void**)&dev_z, sizeof(int)); hipLaunchKernelGGL(( fun), dim3(1),dim3(1), 0, 0, dev_z); hipMemcpy(&z, dev_z, sizeof(int), hipMemcpyDeviceToHost); hipFree(dev_z); return 0; } //; gcc:0;clang:segmentation fault: 11;nvcc: error: expression must have a constant value
124808162b47f762e131c4bae934df8b88de225f.cu
#include <stdlib.h> #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <iostream> #include <stddef.h> #include <limits.h> __global__ void fun(int *z){ int x = 5; int a[-x]; *z = a[-x]; } int main(void) { int z; int *dev_z; cudaMalloc((void**)&dev_z, sizeof(int)); fun<<<1,1>>>(dev_z); cudaMemcpy(&z, dev_z, sizeof(int), cudaMemcpyDeviceToHost); cudaFree(dev_z); return 0; } //编译错误; gcc:0;clang:segmentation fault: 11;nvcc: error: expression must have a constant value;
2673034817709b209791ccdb8d1bb32fdda57a09.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //pass //--gridDim=6624 --blockDim=256 #include "common.h" __device__ static __attribute__((always_inline)) uint4 scan4Inclusive(uint4 idata4, volatile uint *s_Data, uint size) { //Level-0 inclusive scan idata4.y += idata4.x; idata4.z += idata4.y; idata4.w += idata4.z; //Level-1 exclusive scan uint oval = scan1Exclusive(idata4.w, s_Data, size / 4); idata4.x += oval; idata4.y += oval; idata4.z += oval; idata4.w += oval; return idata4; } __device__ static __attribute__((always_inline)) uint4 scan4Exclusive(uint4 idata4, volatile uint *s_Data, uint size) { uint4 odata4 = scan4Inclusive(idata4, s_Data, size); odata4.x -= idata4.x; odata4.y -= idata4.y; odata4.z -= idata4.z; odata4.w -= idata4.w; return odata4; } __global__ void scanExclusiveShared( uint4 *d_Dst, uint4 *d_Src, uint size ) { __requires((size & (size - 1)) == 0); __shared__ uint s_Data[2 * THREADBLOCK_SIZE]; uint pos = blockIdx.x * blockDim.x + threadIdx.x; //Load data uint4 idata4 = d_Src[pos]; //Calculate exclusive scan uint4 odata4 = scan4Exclusive(idata4, s_Data, size); //Write back d_Dst[pos] = odata4; }
2673034817709b209791ccdb8d1bb32fdda57a09.cu
//pass //--gridDim=6624 --blockDim=256 #include "common.h" __device__ static __attribute__((always_inline)) uint4 scan4Inclusive(uint4 idata4, volatile uint *s_Data, uint size) { //Level-0 inclusive scan idata4.y += idata4.x; idata4.z += idata4.y; idata4.w += idata4.z; //Level-1 exclusive scan uint oval = scan1Exclusive(idata4.w, s_Data, size / 4); idata4.x += oval; idata4.y += oval; idata4.z += oval; idata4.w += oval; return idata4; } __device__ static __attribute__((always_inline)) uint4 scan4Exclusive(uint4 idata4, volatile uint *s_Data, uint size) { uint4 odata4 = scan4Inclusive(idata4, s_Data, size); odata4.x -= idata4.x; odata4.y -= idata4.y; odata4.z -= idata4.z; odata4.w -= idata4.w; return odata4; } __global__ void scanExclusiveShared( uint4 *d_Dst, uint4 *d_Src, uint size ) { __requires((size & (size - 1)) == 0); __shared__ uint s_Data[2 * THREADBLOCK_SIZE]; uint pos = blockIdx.x * blockDim.x + threadIdx.x; //Load data uint4 idata4 = d_Src[pos]; //Calculate exclusive scan uint4 odata4 = scan4Exclusive(idata4, s_Data, size); //Write back d_Dst[pos] = odata4; }
98948666581a5f8e7a875bb4b6cfdbe675093121.hip
// !!! This is a file automatically generated by hipify!!! //nvcc SeayJohnnyHW5.cu -o SeayJohnnyHW5 -lglut -lGL -lm #include <GL/glut.h> #include <stdlib.h> #include <stdio.h> #include <math.h> #include <sys/time.h> #include "../../headers/arrays.h" #include "../../headers/drawing.h" #include "../../headers/density.cuh" #include "../../headers/rendering.h" #define DIM 1024 #define NODES 5000 float2 *nodes = (float2*)malloc((NODES)*sizeof(float2)); float *pixels; float *buffer; int trigger = 1; float rnd(float x) { return(x*rand() / RAND_MAX); } void display() { glClear(GL_COLOR_BUFFER_BIT); int b = 32; drawDensity(nodes, NODES, b, 1.0); drawPoints(nodes, NODES, 5.0, NULL); drawGrid(2.0/b, 2.0/b, 1.0); glFlush(); } int main(int argc, char** argv) { srand( time(NULL) ); for(int i = 0; i < NODES; i++) { nodes[i].x = rnd(2.0) - 1.0; nodes[i].y = rnd(2.0) - 1.0; } pixels = (float*)malloc(DIM*DIM*3*sizeof(float)); hipMalloc(&buffer, DIM*DIM*3*sizeof(float)); // Initialize OpenGL glutInit(&argc, argv); glutInitDisplayMode(GLUT_RGB | GLUT_SINGLE | GLUT_MULTISAMPLE); glutInitWindowSize(DIM, DIM); glutCreateWindow("GPU | Time to render:\t---"); glutDisplayFunc(display); glClearColor(0.0, 0.0, 0.0, 0.1); glEnable(GL_MULTISAMPLE_ARB); glEnable(GL_POINT_SMOOTH); glHint(GL_POINT_SMOOTH_HINT, GL_NICEST); glEnable(GL_BLEND); glDisable(GL_DEPTH_TEST); glutMainLoop(); free(pixels); free(nodes); hipFree(buffer); return(0); }
98948666581a5f8e7a875bb4b6cfdbe675093121.cu
//nvcc SeayJohnnyHW5.cu -o SeayJohnnyHW5 -lglut -lGL -lm #include <GL/glut.h> #include <stdlib.h> #include <stdio.h> #include <math.h> #include <sys/time.h> #include "../../headers/arrays.h" #include "../../headers/drawing.h" #include "../../headers/density.cuh" #include "../../headers/rendering.h" #define DIM 1024 #define NODES 5000 float2 *nodes = (float2*)malloc((NODES)*sizeof(float2)); float *pixels; float *buffer; int trigger = 1; float rnd(float x) { return(x*rand() / RAND_MAX); } void display() { glClear(GL_COLOR_BUFFER_BIT); int b = 32; drawDensity(nodes, NODES, b, 1.0); drawPoints(nodes, NODES, 5.0, NULL); drawGrid(2.0/b, 2.0/b, 1.0); glFlush(); } int main(int argc, char** argv) { srand( time(NULL) ); for(int i = 0; i < NODES; i++) { nodes[i].x = rnd(2.0) - 1.0; nodes[i].y = rnd(2.0) - 1.0; } pixels = (float*)malloc(DIM*DIM*3*sizeof(float)); cudaMalloc(&buffer, DIM*DIM*3*sizeof(float)); // Initialize OpenGL glutInit(&argc, argv); glutInitDisplayMode(GLUT_RGB | GLUT_SINGLE | GLUT_MULTISAMPLE); glutInitWindowSize(DIM, DIM); glutCreateWindow("GPU | Time to render:\t---"); glutDisplayFunc(display); glClearColor(0.0, 0.0, 0.0, 0.1); glEnable(GL_MULTISAMPLE_ARB); glEnable(GL_POINT_SMOOTH); glHint(GL_POINT_SMOOTH_HINT, GL_NICEST); glEnable(GL_BLEND); glDisable(GL_DEPTH_TEST); glutMainLoop(); free(pixels); free(nodes); cudaFree(buffer); return(0); }
518e40d2c43bdf88ed2b73f67ea59f26930dc440.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.5.0-beta3) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date July 2014 @generated from zbajac_csr.cu normal z -> d, Fri Jul 18 17:34:28 2014 */ #include "common_magma.h" #include "../include/magmasparse_d.h" #include "../../include/magma.h" #define PRECISION_d #define BLOCKSIZE 256 __global__ void magma_dbajac_csr_ls_kernel(int localiters, int n, double *valD, magma_index_t *rowD, magma_index_t *colD, double *valR, magma_index_t *rowR, magma_index_t *colR, const double * __restrict__ b, double *x ){ int ind_diag = blockIdx.x*blockDim.x; int index = blockIdx.x*blockDim.x+threadIdx.x; int i, j, start, end; if(index<n){ start=rowR[index]; end =rowR[index+1]; double zero = MAGMA_D_MAKE(0.0, 0.0); double bl, tmp = zero, v = zero; #if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s)) bl = __ldg( b+index ); #else bl = b[index]; #endif #pragma unroll for( i=start; i<end; i++ ) v += valR[i] * x[ colR[i] ]; start=rowD[index]; end =rowD[index+1]; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * x[ colD[i] ]; v = bl - v; /* add more local iterations */ __shared__ double local_x[ BLOCKSIZE ]; local_x[threadIdx.x] = x[index] + ( v - tmp) / (valD[start]); __syncthreads(); #pragma unroll for( j=0; j<localiters; j++ ) { tmp = zero; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * local_x[ colD[i] - ind_diag]; local_x[threadIdx.x] += ( v - tmp) / (valD[start]); } x[index] = local_x[threadIdx.x]; } } __global__ void magma_dbajac_csr_kernel( int n, double *valD, magma_index_t *rowD, magma_index_t *colD, double *valR, magma_index_t *rowR, magma_index_t *colR, double *b, double *x ){ int index = blockIdx.x*blockDim.x+threadIdx.x; int i, start, end; if(index<n){ double zero = MAGMA_D_MAKE(0.0, 0.0); double bl, tmp = zero, v = zero; #if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s)) bl = __ldg( b+index ); #else bl = b[index]; #endif start=rowR[index]; end =rowR[index+1]; #pragma unroll for( i=start; i<end; i++ ) v += valR[i] * x[ colR[i] ]; v = bl - v; start=rowD[index]; end =rowD[index+1]; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * x[ colD[i] ]; x[index] = x[index] + ( v - tmp ) / (valD[start]); } } /** Purpose ------- This routine is a block-asynchronous Jacobi iteration performing s local Jacobi-updates within the block. Input format is two CSR matrices, one containing the diagonal blocks, one containing the rest. Arguments --------- @param localiters magma_int_t number of local Jacobi-like updates @param D magma_d_sparse_matrix input matrix with diagonal blocks @param R magma_d_sparse_matrix input matrix with non-diagonal parts @param b magma_d_vector RHS @param x magma_d_vector* iterate/solution @ingroup magmasparse_dgegpuk ********************************************************************/ extern "C" magma_int_t magma_dbajac_csr( magma_int_t localiters, magma_d_sparse_matrix D, magma_d_sparse_matrix R, magma_d_vector b, magma_d_vector *x ){ int blocksize1 = BLOCKSIZE; int blocksize2 = 1; int dimgrid1 = ( D.num_rows + blocksize1 -1 ) / blocksize1; int dimgrid2 = 1; int dimgrid3 = 1; dim3 grid( dimgrid1, dimgrid2, dimgrid3 ); dim3 block( blocksize1, blocksize2, 1 ); if( R.nnz > 0 ){ if( localiters == 1 ) hipLaunchKernelGGL(( magma_dbajac_csr_kernel), dim3(grid), dim3(block), 0, magma_stream , D.num_rows, D.val, D.row, D.col, R.val, R.row, R.col, b.val, x->val ); else hipLaunchKernelGGL(( magma_dbajac_csr_ls_kernel), dim3(grid), dim3(block), 0, magma_stream , localiters, D.num_rows, D.val, D.row, D.col, R.val, R.row, R.col, b.val, x->val ); } else{ printf("error: all elements in diagonal block.\n"); } return MAGMA_SUCCESS; }
518e40d2c43bdf88ed2b73f67ea59f26930dc440.cu
/* -- MAGMA (version 1.5.0-beta3) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date July 2014 @generated from zbajac_csr.cu normal z -> d, Fri Jul 18 17:34:28 2014 */ #include "common_magma.h" #include "../include/magmasparse_d.h" #include "../../include/magma.h" #define PRECISION_d #define BLOCKSIZE 256 __global__ void magma_dbajac_csr_ls_kernel(int localiters, int n, double *valD, magma_index_t *rowD, magma_index_t *colD, double *valR, magma_index_t *rowR, magma_index_t *colR, const double * __restrict__ b, double *x ){ int ind_diag = blockIdx.x*blockDim.x; int index = blockIdx.x*blockDim.x+threadIdx.x; int i, j, start, end; if(index<n){ start=rowR[index]; end =rowR[index+1]; double zero = MAGMA_D_MAKE(0.0, 0.0); double bl, tmp = zero, v = zero; #if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s)) bl = __ldg( b+index ); #else bl = b[index]; #endif #pragma unroll for( i=start; i<end; i++ ) v += valR[i] * x[ colR[i] ]; start=rowD[index]; end =rowD[index+1]; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * x[ colD[i] ]; v = bl - v; /* add more local iterations */ __shared__ double local_x[ BLOCKSIZE ]; local_x[threadIdx.x] = x[index] + ( v - tmp) / (valD[start]); __syncthreads(); #pragma unroll for( j=0; j<localiters; j++ ) { tmp = zero; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * local_x[ colD[i] - ind_diag]; local_x[threadIdx.x] += ( v - tmp) / (valD[start]); } x[index] = local_x[threadIdx.x]; } } __global__ void magma_dbajac_csr_kernel( int n, double *valD, magma_index_t *rowD, magma_index_t *colD, double *valR, magma_index_t *rowR, magma_index_t *colR, double *b, double *x ){ int index = blockIdx.x*blockDim.x+threadIdx.x; int i, start, end; if(index<n){ double zero = MAGMA_D_MAKE(0.0, 0.0); double bl, tmp = zero, v = zero; #if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s)) bl = __ldg( b+index ); #else bl = b[index]; #endif start=rowR[index]; end =rowR[index+1]; #pragma unroll for( i=start; i<end; i++ ) v += valR[i] * x[ colR[i] ]; v = bl - v; start=rowD[index]; end =rowD[index+1]; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * x[ colD[i] ]; x[index] = x[index] + ( v - tmp ) / (valD[start]); } } /** Purpose ------- This routine is a block-asynchronous Jacobi iteration performing s local Jacobi-updates within the block. Input format is two CSR matrices, one containing the diagonal blocks, one containing the rest. Arguments --------- @param localiters magma_int_t number of local Jacobi-like updates @param D magma_d_sparse_matrix input matrix with diagonal blocks @param R magma_d_sparse_matrix input matrix with non-diagonal parts @param b magma_d_vector RHS @param x magma_d_vector* iterate/solution @ingroup magmasparse_dgegpuk ********************************************************************/ extern "C" magma_int_t magma_dbajac_csr( magma_int_t localiters, magma_d_sparse_matrix D, magma_d_sparse_matrix R, magma_d_vector b, magma_d_vector *x ){ int blocksize1 = BLOCKSIZE; int blocksize2 = 1; int dimgrid1 = ( D.num_rows + blocksize1 -1 ) / blocksize1; int dimgrid2 = 1; int dimgrid3 = 1; dim3 grid( dimgrid1, dimgrid2, dimgrid3 ); dim3 block( blocksize1, blocksize2, 1 ); if( R.nnz > 0 ){ if( localiters == 1 ) magma_dbajac_csr_kernel<<< grid, block, 0, magma_stream >>> ( D.num_rows, D.val, D.row, D.col, R.val, R.row, R.col, b.val, x->val ); else magma_dbajac_csr_ls_kernel<<< grid, block, 0, magma_stream >>> ( localiters, D.num_rows, D.val, D.row, D.col, R.val, R.row, R.col, b.val, x->val ); } else{ printf("error: all elements in diagonal block.\n"); } return MAGMA_SUCCESS; }
1b436844fcaf8b3e4c99ee69d89a1bfe9d517763.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" using namespace std; #define TILE 16 /* LU Decomposition using Shared Memory \ \ CUDA \ \ \ \ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/ //Initialize a 2D matrix __global__ void elim(double *A, int n, int index, int bsize){ extern __shared__ double pivot[]; int idThread=threadIdx.x; int idBlock=blockIdx.x; int blockSize=bsize; if(idThread==0){ for(int i=index;i<n;i++) pivot[i]=A[(index*n)+i]; } __syncthreads(); //Varitables for pivot, row, start and end int pivotRow=(index*n); int currentRow=(((blockSize*idBlock) + idThread)*n); int start=currentRow+index; int end=currentRow+n; //If greater than pivot row, loop from start index + 1(next row) to end of column if(currentRow >pivotRow){ for(int i= start+1; i<end; ++i){ //Set the matrix value of next row and its column - pivot A[i]=A[i]-(A[start]*pivot[i-currentRow]); } } }
1b436844fcaf8b3e4c99ee69d89a1bfe9d517763.cu
#include "includes.h" using namespace std; #define TILE 16 /* LU Decomposition using Shared Memory \ \ CUDA \ \ \ \ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/ //Initialize a 2D matrix __global__ void elim(double *A, int n, int index, int bsize){ extern __shared__ double pivot[]; int idThread=threadIdx.x; int idBlock=blockIdx.x; int blockSize=bsize; if(idThread==0){ for(int i=index;i<n;i++) pivot[i]=A[(index*n)+i]; } __syncthreads(); //Varitables for pivot, row, start and end int pivotRow=(index*n); int currentRow=(((blockSize*idBlock) + idThread)*n); int start=currentRow+index; int end=currentRow+n; //If greater than pivot row, loop from start index + 1(next row) to end of column if(currentRow >pivotRow){ for(int i= start+1; i<end; ++i){ //Set the matrix value of next row and its column - pivot A[i]=A[i]-(A[start]*pivot[i-currentRow]); } } }
80b491a37cbc87836468aec5094c04994a10dded.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright 2018 Stanford University, NVIDIA Corporation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "realm_saxpy.h" __global__ void gpu_saxpy(const float alpha, //const int num_elements, Rect<1> bounds, AffineAccessor<float, 1> ra_x, AffineAccessor<float, 1> ra_y, AffineAccessor<float, 1> ra_z) // const float *x, const float *y, float *z) { int p = bounds.lo + (blockIdx.x * blockDim.x) + threadIdx.x; if (p <= bounds.hi) ra_z[p] = alpha * ra_x[p] + ra_y[p]; } __host__ void gpu_saxpy_task(const void *args, size_t arglen, const void *userdata, size_t userlen, Processor p) { assert(arglen == sizeof(SaxpyArgs)); const SaxpyArgs *saxpy_args = (const SaxpyArgs*)args; printf("Running GPU Saxpy Task\n\n"); // get affine accessors for each of our three instances AffineAccessor<float, 1> ra_x = AffineAccessor<float, 1>(saxpy_args->x_inst, FID_X); AffineAccessor<float, 1> ra_y = AffineAccessor<float, 1>(saxpy_args->y_inst, FID_Y); AffineAccessor<float, 1> ra_z = AffineAccessor<float, 1>(saxpy_args->z_inst, FID_Z); size_t num_elements = saxpy_args->bounds.volume(); size_t cta_threads = 256; size_t total_ctas = (num_elements + (cta_threads-1))/cta_threads; hipLaunchKernelGGL(( gpu_saxpy), dim3(total_ctas), dim3(cta_threads), 0, 0, saxpy_args->alpha, saxpy_args->bounds, ra_x, ra_y, ra_z); // LOOK: NO WAIT! :) }
80b491a37cbc87836468aec5094c04994a10dded.cu
/* Copyright 2018 Stanford University, NVIDIA Corporation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "realm_saxpy.h" __global__ void gpu_saxpy(const float alpha, //const int num_elements, Rect<1> bounds, AffineAccessor<float, 1> ra_x, AffineAccessor<float, 1> ra_y, AffineAccessor<float, 1> ra_z) // const float *x, const float *y, float *z) { int p = bounds.lo + (blockIdx.x * blockDim.x) + threadIdx.x; if (p <= bounds.hi) ra_z[p] = alpha * ra_x[p] + ra_y[p]; } __host__ void gpu_saxpy_task(const void *args, size_t arglen, const void *userdata, size_t userlen, Processor p) { assert(arglen == sizeof(SaxpyArgs)); const SaxpyArgs *saxpy_args = (const SaxpyArgs*)args; printf("Running GPU Saxpy Task\n\n"); // get affine accessors for each of our three instances AffineAccessor<float, 1> ra_x = AffineAccessor<float, 1>(saxpy_args->x_inst, FID_X); AffineAccessor<float, 1> ra_y = AffineAccessor<float, 1>(saxpy_args->y_inst, FID_Y); AffineAccessor<float, 1> ra_z = AffineAccessor<float, 1>(saxpy_args->z_inst, FID_Z); size_t num_elements = saxpy_args->bounds.volume(); size_t cta_threads = 256; size_t total_ctas = (num_elements + (cta_threads-1))/cta_threads; gpu_saxpy<<<total_ctas, cta_threads>>>(saxpy_args->alpha, saxpy_args->bounds, ra_x, ra_y, ra_z); // LOOK: NO WAIT! :) }
6966b385574dce9d706be53f6a3116f5777b5339.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * @file SegmAccuracyLayer.cu * @brief SegmAccuracyLayer * * @author Abhijit Kundu */ #include "SegmAccuracyLayer.h" namespace caffe { template<typename ImageScalar, typename CMScalar> __global__ void confusion_matrix_ssa1d(const int num_of_pixels, const ImageScalar* const gt_image, const ImageScalar* const pred_image, const int num_of_labels, CMScalar* d_conf_mat) { const int num_of_bins = num_of_labels * num_of_labels; // Initialize shared mem extern __shared__ CMScalar smem[]; if (threadIdx.x < num_of_bins) smem[threadIdx.x] = 0; __syncthreads(); // stride length const int stride = blockDim.x * gridDim.x; for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < num_of_pixels; i += stride) { int gt_label = static_cast<int>(gt_image[i]); int pred_label = static_cast<int>(pred_image[i]); atomicAdd(&smem[gt_label * num_of_labels + pred_label] , 1 ); } __syncthreads(); if (threadIdx.x < num_of_bins) atomicAdd(&(d_conf_mat[threadIdx.x]), smem[threadIdx.x]); } template<typename ImageScalar, typename CMScalar> __global__ void confusion_matrix_ssa1d(const int num_of_pixels, const ImageScalar* const gt_image, const ImageScalar* const pred_image, const int num_of_labels, const CMScalar* const label_map, CMScalar* d_conf_mat) { const int num_of_bins = num_of_labels * num_of_labels; // Initialize shared mem extern __shared__ CMScalar smem[]; if (threadIdx.x < num_of_bins) smem[threadIdx.x] = 0; __syncthreads(); // stride length const int stride = blockDim.x * gridDim.x; for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < num_of_pixels; i += stride) { int gt_label = label_map[static_cast<int>(gt_image[i])]; int pred_label = label_map[static_cast<int>(pred_image[i])]; atomicAdd(&smem[gt_label * num_of_labels + pred_label] , 1 ); } __syncthreads(); if (threadIdx.x < num_of_bins) atomicAdd(&(d_conf_mat[threadIdx.x]), smem[threadIdx.x]); } template <typename Scalar> __inline__ __device__ Scalar warpReduceSum(Scalar val) { for (unsigned int offset = warpSize/2; offset > 0; offset /= 2) val += __shfl_down(val, offset); return val; } __global__ void histogram_from_confidence_matrix(const int num_of_labels, const int* const conf_mat, int* tp_hist, int* gt_hist, int* pred_hist) { int row_sum = 0; int col_sum = 0; if ((threadIdx.x < num_of_labels) && (threadIdx.y < num_of_labels)) { row_sum = conf_mat[threadIdx.y * num_of_labels + threadIdx.x]; col_sum = conf_mat[threadIdx.x * num_of_labels + threadIdx.y]; if (threadIdx.x == threadIdx.y) { tp_hist[threadIdx.x] = col_sum; } } row_sum = warpReduceSum(row_sum); col_sum = warpReduceSum(col_sum); if (threadIdx.x == 0 && (threadIdx.y < num_of_labels)) { gt_hist[threadIdx.y] = row_sum; pred_hist[threadIdx.y] = col_sum; } } template <typename HistScalar, typename Scalar> __global__ void mean_class_iou_from_histogram(const int num_of_labels, const HistScalar* const tp_hist, const HistScalar* const gt_hist, const HistScalar* const pred_hist, Scalar* mean_class_iou) { Scalar c_iou = 0; if (threadIdx.x < num_of_labels) { c_iou = Scalar(tp_hist[threadIdx.x]) / (gt_hist[threadIdx.x] + pred_hist[threadIdx.x]- tp_hist[threadIdx.x]); } c_iou = warpReduceSum(c_iou); if (threadIdx.x==0) mean_class_iou[0] = c_iou / num_of_labels; } template <typename HistScalar, typename Scalar> __global__ void mean_class_acc_from_histogram(const int num_of_labels, const HistScalar* const tp_hist, const HistScalar* const pred_hist, Scalar* mean_class_acc) { float c_acc = 0; if (threadIdx.x < num_of_labels) { c_acc = Scalar(tp_hist[threadIdx.x]) / pred_hist[threadIdx.x]; } c_acc = warpReduceSum(c_acc); if (threadIdx.x==0) mean_class_acc[0] = c_acc / num_of_labels; } template <typename HistScalar, typename Scalar> __global__ void global_pixel_acc_from_histogram(const int num_of_labels, const HistScalar* const tp_hist, const HistScalar* const gt_hist, Scalar* global_pixel_acc) { int tp_sum = 0; int gt_sum = 0; if (threadIdx.x < num_of_labels) { tp_sum = tp_hist[threadIdx.x]; gt_sum = gt_hist[threadIdx.x]; } tp_sum = warpReduceSum(tp_sum); gt_sum = warpReduceSum(gt_sum); if (threadIdx.x==0) global_pixel_acc[0] = Scalar(tp_sum) / gt_sum; } // simple routine to print contents of a vector template <typename Vector> void print_vector(const std::string& name, const Vector& v) { typedef typename Vector::value_type T; std::cout << name << " = ["; thrust::copy(v.begin(), v.end(), std::ostream_iterator<T>(std::cout, ", ")); std::cout << "\b\b]" << std::endl; } template <typename Dtype> void SegmAccuracyLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { if (reset_) { caffe_gpu_set(confidence_matrix_.count(), 0, confidence_matrix_.mutable_gpu_data()); } CHECK_LE(num_of_labels_, 32) << "GPU implementation does not support more than 32 labels"; const Dtype* pred_labels_data = bottom[0]->gpu_data(); const Dtype* gt_labels_data = bottom[1]->gpu_data(); const int* label_map_data = label_map_.gpu_data(); int* confidence_matrix_data = confidence_matrix_.mutable_gpu_data(); // Since confidence_matrix_.mutable_gpu_diff() is not used we repurpose it for histogram int* tp_hist = confidence_matrix_.mutable_gpu_diff(); int* gt_hist = tp_hist + num_of_labels_; int* pred_hist = tp_hist + 2 * num_of_labels_; // Update confidence matrix { const int image_blob_size = bottom[0]->count(); const int blocksize = 1024; // Minimum: num_of_labels_ * num_of_labels_ const int gridsize = (image_blob_size + blocksize - 1) / blocksize; assert(blocksize >= num_of_labels_ * num_of_labels_); const int shared_mem_size = num_of_labels_ * num_of_labels_ * sizeof(int); if (label_map_.count()) hipLaunchKernelGGL(( confusion_matrix_ssa1d), dim3(gridsize), dim3(blocksize), shared_mem_size, 0, image_blob_size, gt_labels_data, pred_labels_data, num_of_labels_, label_map_data, confidence_matrix_data); else hipLaunchKernelGGL(( confusion_matrix_ssa1d), dim3(gridsize), dim3(blocksize), shared_mem_size, 0, image_blob_size, gt_labels_data, pred_labels_data, num_of_labels_, confidence_matrix_data); } // Compute histograms from conf matrix { dim3 blockdim(32, 32); hipLaunchKernelGGL(( histogram_from_confidence_matrix), dim3(1), dim3(blockdim), 0, 0, num_of_labels_, confidence_matrix_data, tp_hist, gt_hist, pred_hist); } for (std::size_t i =0; i < metrics_.size(); ++i) { switch (metrics_[i]) { case SegmAccuracyParameter_AccuracyMetric_PixelAccuracy: hipLaunchKernelGGL(( global_pixel_acc_from_histogram), dim3(1), dim3(32), 0, 0, num_of_labels_, tp_hist, gt_hist, top[i]->mutable_gpu_data()); break; case SegmAccuracyParameter_AccuracyMetric_ClassAccuracy: hipLaunchKernelGGL(( mean_class_acc_from_histogram), dim3(1), dim3(32), 0, 0, num_of_labels_, tp_hist, pred_hist, top[i]->mutable_gpu_data()); break; case SegmAccuracyParameter_AccuracyMetric_ClassIoU: hipLaunchKernelGGL(( mean_class_iou_from_histogram), dim3(1), dim3(32), 0, 0, num_of_labels_, tp_hist, gt_hist, pred_hist, top[i]->mutable_gpu_data()); break; default: LOG(FATAL) << "Unknown Accuracy metric."; } } } INSTANTIATE_LAYER_GPU_FUNCS(SegmAccuracyLayer); } // namespace caffe
6966b385574dce9d706be53f6a3116f5777b5339.cu
/** * @file SegmAccuracyLayer.cu * @brief SegmAccuracyLayer * * @author Abhijit Kundu */ #include "SegmAccuracyLayer.h" namespace caffe { template<typename ImageScalar, typename CMScalar> __global__ void confusion_matrix_ssa1d(const int num_of_pixels, const ImageScalar* const gt_image, const ImageScalar* const pred_image, const int num_of_labels, CMScalar* d_conf_mat) { const int num_of_bins = num_of_labels * num_of_labels; // Initialize shared mem extern __shared__ CMScalar smem[]; if (threadIdx.x < num_of_bins) smem[threadIdx.x] = 0; __syncthreads(); // stride length const int stride = blockDim.x * gridDim.x; for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < num_of_pixels; i += stride) { int gt_label = static_cast<int>(gt_image[i]); int pred_label = static_cast<int>(pred_image[i]); atomicAdd(&smem[gt_label * num_of_labels + pred_label] , 1 ); } __syncthreads(); if (threadIdx.x < num_of_bins) atomicAdd(&(d_conf_mat[threadIdx.x]), smem[threadIdx.x]); } template<typename ImageScalar, typename CMScalar> __global__ void confusion_matrix_ssa1d(const int num_of_pixels, const ImageScalar* const gt_image, const ImageScalar* const pred_image, const int num_of_labels, const CMScalar* const label_map, CMScalar* d_conf_mat) { const int num_of_bins = num_of_labels * num_of_labels; // Initialize shared mem extern __shared__ CMScalar smem[]; if (threadIdx.x < num_of_bins) smem[threadIdx.x] = 0; __syncthreads(); // stride length const int stride = blockDim.x * gridDim.x; for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < num_of_pixels; i += stride) { int gt_label = label_map[static_cast<int>(gt_image[i])]; int pred_label = label_map[static_cast<int>(pred_image[i])]; atomicAdd(&smem[gt_label * num_of_labels + pred_label] , 1 ); } __syncthreads(); if (threadIdx.x < num_of_bins) atomicAdd(&(d_conf_mat[threadIdx.x]), smem[threadIdx.x]); } template <typename Scalar> __inline__ __device__ Scalar warpReduceSum(Scalar val) { for (unsigned int offset = warpSize/2; offset > 0; offset /= 2) val += __shfl_down(val, offset); return val; } __global__ void histogram_from_confidence_matrix(const int num_of_labels, const int* const conf_mat, int* tp_hist, int* gt_hist, int* pred_hist) { int row_sum = 0; int col_sum = 0; if ((threadIdx.x < num_of_labels) && (threadIdx.y < num_of_labels)) { row_sum = conf_mat[threadIdx.y * num_of_labels + threadIdx.x]; col_sum = conf_mat[threadIdx.x * num_of_labels + threadIdx.y]; if (threadIdx.x == threadIdx.y) { tp_hist[threadIdx.x] = col_sum; } } row_sum = warpReduceSum(row_sum); col_sum = warpReduceSum(col_sum); if (threadIdx.x == 0 && (threadIdx.y < num_of_labels)) { gt_hist[threadIdx.y] = row_sum; pred_hist[threadIdx.y] = col_sum; } } template <typename HistScalar, typename Scalar> __global__ void mean_class_iou_from_histogram(const int num_of_labels, const HistScalar* const tp_hist, const HistScalar* const gt_hist, const HistScalar* const pred_hist, Scalar* mean_class_iou) { Scalar c_iou = 0; if (threadIdx.x < num_of_labels) { c_iou = Scalar(tp_hist[threadIdx.x]) / (gt_hist[threadIdx.x] + pred_hist[threadIdx.x]- tp_hist[threadIdx.x]); } c_iou = warpReduceSum(c_iou); if (threadIdx.x==0) mean_class_iou[0] = c_iou / num_of_labels; } template <typename HistScalar, typename Scalar> __global__ void mean_class_acc_from_histogram(const int num_of_labels, const HistScalar* const tp_hist, const HistScalar* const pred_hist, Scalar* mean_class_acc) { float c_acc = 0; if (threadIdx.x < num_of_labels) { c_acc = Scalar(tp_hist[threadIdx.x]) / pred_hist[threadIdx.x]; } c_acc = warpReduceSum(c_acc); if (threadIdx.x==0) mean_class_acc[0] = c_acc / num_of_labels; } template <typename HistScalar, typename Scalar> __global__ void global_pixel_acc_from_histogram(const int num_of_labels, const HistScalar* const tp_hist, const HistScalar* const gt_hist, Scalar* global_pixel_acc) { int tp_sum = 0; int gt_sum = 0; if (threadIdx.x < num_of_labels) { tp_sum = tp_hist[threadIdx.x]; gt_sum = gt_hist[threadIdx.x]; } tp_sum = warpReduceSum(tp_sum); gt_sum = warpReduceSum(gt_sum); if (threadIdx.x==0) global_pixel_acc[0] = Scalar(tp_sum) / gt_sum; } // simple routine to print contents of a vector template <typename Vector> void print_vector(const std::string& name, const Vector& v) { typedef typename Vector::value_type T; std::cout << name << " = ["; thrust::copy(v.begin(), v.end(), std::ostream_iterator<T>(std::cout, ", ")); std::cout << "\b\b]" << std::endl; } template <typename Dtype> void SegmAccuracyLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { if (reset_) { caffe_gpu_set(confidence_matrix_.count(), 0, confidence_matrix_.mutable_gpu_data()); } CHECK_LE(num_of_labels_, 32) << "GPU implementation does not support more than 32 labels"; const Dtype* pred_labels_data = bottom[0]->gpu_data(); const Dtype* gt_labels_data = bottom[1]->gpu_data(); const int* label_map_data = label_map_.gpu_data(); int* confidence_matrix_data = confidence_matrix_.mutable_gpu_data(); // Since confidence_matrix_.mutable_gpu_diff() is not used we repurpose it for histogram int* tp_hist = confidence_matrix_.mutable_gpu_diff(); int* gt_hist = tp_hist + num_of_labels_; int* pred_hist = tp_hist + 2 * num_of_labels_; // Update confidence matrix { const int image_blob_size = bottom[0]->count(); const int blocksize = 1024; // Minimum: num_of_labels_ * num_of_labels_ const int gridsize = (image_blob_size + blocksize - 1) / blocksize; assert(blocksize >= num_of_labels_ * num_of_labels_); const int shared_mem_size = num_of_labels_ * num_of_labels_ * sizeof(int); if (label_map_.count()) confusion_matrix_ssa1d<<<gridsize, blocksize, shared_mem_size>>>(image_blob_size, gt_labels_data, pred_labels_data, num_of_labels_, label_map_data, confidence_matrix_data); else confusion_matrix_ssa1d<<<gridsize, blocksize, shared_mem_size>>>(image_blob_size, gt_labels_data, pred_labels_data, num_of_labels_, confidence_matrix_data); } // Compute histograms from conf matrix { dim3 blockdim(32, 32); histogram_from_confidence_matrix<<<1, blockdim>>>(num_of_labels_, confidence_matrix_data, tp_hist, gt_hist, pred_hist); } for (std::size_t i =0; i < metrics_.size(); ++i) { switch (metrics_[i]) { case SegmAccuracyParameter_AccuracyMetric_PixelAccuracy: global_pixel_acc_from_histogram<<<1, 32>>>(num_of_labels_, tp_hist, gt_hist, top[i]->mutable_gpu_data()); break; case SegmAccuracyParameter_AccuracyMetric_ClassAccuracy: mean_class_acc_from_histogram<<<1, 32>>>(num_of_labels_, tp_hist, pred_hist, top[i]->mutable_gpu_data()); break; case SegmAccuracyParameter_AccuracyMetric_ClassIoU: mean_class_iou_from_histogram<<<1, 32>>>(num_of_labels_, tp_hist, gt_hist, pred_hist, top[i]->mutable_gpu_data()); break; default: LOG(FATAL) << "Unknown Accuracy metric."; } } } INSTANTIATE_LAYER_GPU_FUNCS(SegmAccuracyLayer); } // namespace caffe
e7e674235ec1420a52b8be287b47d25f9638615c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "THHUNN.h" #include "common.h" #include <stdio.h> #include <assert.h> #include <thrust/functional.h> __global__ void cunn_SpatialClassNLLCriterion_updateOutput_kernel( float *output, float *total_weight, float *input, float *target, float *weights, int size_average, int batch_size, int n_classes, int map_nelem, int blocks_per_sample) { __shared__ float partial_sums[CUDA_NUM_THREADS]; int i, t; float cur_weight; float input_sum = 0; float acc_weight = 0; int sample = blockIdx.x / blocks_per_sample; int toffset = sample * map_nelem; int ioffset = sample * map_nelem * n_classes; int step = blockDim.x * blocks_per_sample; for (i = (blockIdx.x % blocks_per_sample) * blockDim.x + threadIdx.x; i < map_nelem; i += step) { t = target[toffset + i] - 1; assert(t >= 0 && t < n_classes); cur_weight = weights ? weights[t] : 1.0f; input_sum -= input[ioffset + i + map_nelem * t] * cur_weight; acc_weight += cur_weight; } __syncthreads(); input_sum = reduceBlock(partial_sums, blockDim.x, input_sum, thrust::plus<float>(), 0.0f); acc_weight = reduceBlock(partial_sums, blockDim.x, acc_weight, thrust::plus<float>(), 0.0f); if (threadIdx.x == 0) { atomicAdd(total_weight, acc_weight); if (size_average && acc_weight > 0) atomicAdd(output, input_sum / acc_weight / gridDim.x); else atomicAdd(output, input_sum); } } __global__ void cunn_SpatialClassNLLCriterion_updateGradInput_kernel( float *gradInput, float *target, float *weights, float *total_weight, int size_average, int batch_size, int n_classes, int map_nelem, int blocks_per_sample) { if (*total_weight <= 0) return; int i, t; float norm = size_average ? (1.0f / *total_weight) : 1.0f; int sample = blockIdx.x / blocks_per_sample; int step = blockDim.x * blocks_per_sample; int toffset = sample * map_nelem; int ioffset = sample * map_nelem * n_classes; for (i = (blockIdx.x % blocks_per_sample) * blockDim.x + threadIdx.x; i < map_nelem; i += step) { t = (int)target[toffset + i] - 1; assert(t >= 0 && t < n_classes); gradInput[ioffset + i + map_nelem * t] = -(weights ? weights[t] : 1.0f) * norm; } } void THNN_CudaSpatialClassNLLCriterion_updateOutput( THCState *state, THCudaTensor *input, THCudaTensor *target, THCudaTensor *output, bool sizeAverage, THCudaTensor *weights, THCudaTensor *total_weight) { THArgCheck(THCudaTensor_nDimension(state, target) == 3, 1, "only batches of spatial targets supported (3D tensors)"); THArgCheck(THCudaTensor_nDimension(state, input) == 4, 2, "only batches of spatial inputs supported (4D tensors)"); if (weights) THCUNN_assertSameGPU(state, 5, input, target, weights, output, total_weight); else THCUNN_assertSameGPU(state, 4, input, target, output, total_weight); input = THCudaTensor_newContiguous(state, input); weights = weights ? THCudaTensor_newContiguous(state, weights) : NULL; target = THCudaTensor_newContiguous(state, target); float *input_data = THCudaTensor_data(state, input); float *weights_data = weights ? THCudaTensor_data(state, weights) : NULL; float *target_data = THCudaTensor_data(state, target); float *output_data = THCudaTensor_data(state, output); float *total_weight_data = THCudaTensor_data(state, total_weight); long batch_size = THCudaTensor_size(state, target, 0); long map_nelem = THCudaTensor_nElement(state, target) / batch_size; int blocks_per_sample = GET_BLOCKS(map_nelem) / 128; blocks_per_sample = (blocks_per_sample == 0) ? 1 : blocks_per_sample; int total_blocks = blocks_per_sample * batch_size; THCudaTensor_fill(state, output, 0); THCudaTensor_fill(state, total_weight, 0); hipLaunchKernelGGL(( cunn_SpatialClassNLLCriterion_updateOutput_kernel) , dim3(total_blocks), dim3(CUDA_NUM_THREADS), 0, THCState_getCurrentStream(state), output_data, total_weight_data, input_data, target_data, weights_data, sizeAverage, THCudaTensor_size(state, input, 0), THCudaTensor_size(state, input, 1), THCudaTensor_size(state, input, 2) * THCudaTensor_size(state, input, 3), blocks_per_sample ); THCudaCheck(hipGetLastError()); if (weights) THCudaTensor_free(state, weights); THCudaTensor_free(state, target); THCudaTensor_free(state, input); } void THNN_CudaSpatialClassNLLCriterion_updateGradInput( THCState *state, THCudaTensor *input, THCudaTensor *target, THCudaTensor *gradInput, bool sizeAverage, THCudaTensor *weights, THCudaTensor *total_weight) { THArgCheck(THCudaTensor_nDimension(state, target) == 3, 1, "only batches of spatial targets supported (3D tensors)"); THArgCheck(THCudaTensor_nDimension(state, input) == 4, 2, "only batches of spatial inputs supported (4D tensors)"); THArgCheck(THCudaTensor_isContiguous(state, gradInput), 4, "gradInput must be contiguous"); if (weights) THCUNN_assertSameGPU(state, 5, weights, input, target, gradInput, total_weight); else THCUNN_assertSameGPU(state, 4, input, target, gradInput, total_weight); input = THCudaTensor_newContiguous(state, input); weights = weights ? THCudaTensor_newContiguous(state, weights) : NULL; target = THCudaTensor_newContiguous(state, target); float *weights_data = weights ? THCudaTensor_data(state, weights) : NULL; float *gradInput_data = THCudaTensor_data(state, gradInput); float *target_data = THCudaTensor_data(state, target); float *total_weight_data = THCudaTensor_data(state, total_weight); long batch_size = THCudaTensor_size(state, target, 0); long map_nelem = THCudaTensor_nElement(state, target) / batch_size; int blocks_per_sample = GET_BLOCKS(map_nelem) / 128; blocks_per_sample = (blocks_per_sample == 0) ? 1 : blocks_per_sample; int total_blocks = blocks_per_sample * batch_size; hipLaunchKernelGGL(( cunn_SpatialClassNLLCriterion_updateGradInput_kernel) , dim3(total_blocks), dim3(CUDA_NUM_THREADS), 0, THCState_getCurrentStream(state), gradInput_data, target_data, weights_data, total_weight_data, sizeAverage, THCudaTensor_size(state, input, 0), THCudaTensor_size(state, input, 1), THCudaTensor_size(state, input, 2) *THCudaTensor_size(state, input, 3), blocks_per_sample ); THCudaCheck(hipGetLastError()); if (weights) THCudaTensor_free(state, weights); THCudaTensor_free(state, target); THCudaTensor_free(state, input); }
e7e674235ec1420a52b8be287b47d25f9638615c.cu
#include "THCUNN.h" #include "common.h" #include <stdio.h> #include <assert.h> #include <thrust/functional.h> __global__ void cunn_SpatialClassNLLCriterion_updateOutput_kernel( float *output, float *total_weight, float *input, float *target, float *weights, int size_average, int batch_size, int n_classes, int map_nelem, int blocks_per_sample) { __shared__ float partial_sums[CUDA_NUM_THREADS]; int i, t; float cur_weight; float input_sum = 0; float acc_weight = 0; int sample = blockIdx.x / blocks_per_sample; int toffset = sample * map_nelem; int ioffset = sample * map_nelem * n_classes; int step = blockDim.x * blocks_per_sample; for (i = (blockIdx.x % blocks_per_sample) * blockDim.x + threadIdx.x; i < map_nelem; i += step) { t = target[toffset + i] - 1; assert(t >= 0 && t < n_classes); cur_weight = weights ? weights[t] : 1.0f; input_sum -= input[ioffset + i + map_nelem * t] * cur_weight; acc_weight += cur_weight; } __syncthreads(); input_sum = reduceBlock(partial_sums, blockDim.x, input_sum, thrust::plus<float>(), 0.0f); acc_weight = reduceBlock(partial_sums, blockDim.x, acc_weight, thrust::plus<float>(), 0.0f); if (threadIdx.x == 0) { atomicAdd(total_weight, acc_weight); if (size_average && acc_weight > 0) atomicAdd(output, input_sum / acc_weight / gridDim.x); else atomicAdd(output, input_sum); } } __global__ void cunn_SpatialClassNLLCriterion_updateGradInput_kernel( float *gradInput, float *target, float *weights, float *total_weight, int size_average, int batch_size, int n_classes, int map_nelem, int blocks_per_sample) { if (*total_weight <= 0) return; int i, t; float norm = size_average ? (1.0f / *total_weight) : 1.0f; int sample = blockIdx.x / blocks_per_sample; int step = blockDim.x * blocks_per_sample; int toffset = sample * map_nelem; int ioffset = sample * map_nelem * n_classes; for (i = (blockIdx.x % blocks_per_sample) * blockDim.x + threadIdx.x; i < map_nelem; i += step) { t = (int)target[toffset + i] - 1; assert(t >= 0 && t < n_classes); gradInput[ioffset + i + map_nelem * t] = -(weights ? weights[t] : 1.0f) * norm; } } void THNN_CudaSpatialClassNLLCriterion_updateOutput( THCState *state, THCudaTensor *input, THCudaTensor *target, THCudaTensor *output, bool sizeAverage, THCudaTensor *weights, THCudaTensor *total_weight) { THArgCheck(THCudaTensor_nDimension(state, target) == 3, 1, "only batches of spatial targets supported (3D tensors)"); THArgCheck(THCudaTensor_nDimension(state, input) == 4, 2, "only batches of spatial inputs supported (4D tensors)"); if (weights) THCUNN_assertSameGPU(state, 5, input, target, weights, output, total_weight); else THCUNN_assertSameGPU(state, 4, input, target, output, total_weight); input = THCudaTensor_newContiguous(state, input); weights = weights ? THCudaTensor_newContiguous(state, weights) : NULL; target = THCudaTensor_newContiguous(state, target); float *input_data = THCudaTensor_data(state, input); float *weights_data = weights ? THCudaTensor_data(state, weights) : NULL; float *target_data = THCudaTensor_data(state, target); float *output_data = THCudaTensor_data(state, output); float *total_weight_data = THCudaTensor_data(state, total_weight); long batch_size = THCudaTensor_size(state, target, 0); long map_nelem = THCudaTensor_nElement(state, target) / batch_size; int blocks_per_sample = GET_BLOCKS(map_nelem) / 128; blocks_per_sample = (blocks_per_sample == 0) ? 1 : blocks_per_sample; int total_blocks = blocks_per_sample * batch_size; THCudaTensor_fill(state, output, 0); THCudaTensor_fill(state, total_weight, 0); cunn_SpatialClassNLLCriterion_updateOutput_kernel <<<total_blocks, CUDA_NUM_THREADS, 0, THCState_getCurrentStream(state)>>>( output_data, total_weight_data, input_data, target_data, weights_data, sizeAverage, THCudaTensor_size(state, input, 0), THCudaTensor_size(state, input, 1), THCudaTensor_size(state, input, 2) * THCudaTensor_size(state, input, 3), blocks_per_sample ); THCudaCheck(cudaGetLastError()); if (weights) THCudaTensor_free(state, weights); THCudaTensor_free(state, target); THCudaTensor_free(state, input); } void THNN_CudaSpatialClassNLLCriterion_updateGradInput( THCState *state, THCudaTensor *input, THCudaTensor *target, THCudaTensor *gradInput, bool sizeAverage, THCudaTensor *weights, THCudaTensor *total_weight) { THArgCheck(THCudaTensor_nDimension(state, target) == 3, 1, "only batches of spatial targets supported (3D tensors)"); THArgCheck(THCudaTensor_nDimension(state, input) == 4, 2, "only batches of spatial inputs supported (4D tensors)"); THArgCheck(THCudaTensor_isContiguous(state, gradInput), 4, "gradInput must be contiguous"); if (weights) THCUNN_assertSameGPU(state, 5, weights, input, target, gradInput, total_weight); else THCUNN_assertSameGPU(state, 4, input, target, gradInput, total_weight); input = THCudaTensor_newContiguous(state, input); weights = weights ? THCudaTensor_newContiguous(state, weights) : NULL; target = THCudaTensor_newContiguous(state, target); float *weights_data = weights ? THCudaTensor_data(state, weights) : NULL; float *gradInput_data = THCudaTensor_data(state, gradInput); float *target_data = THCudaTensor_data(state, target); float *total_weight_data = THCudaTensor_data(state, total_weight); long batch_size = THCudaTensor_size(state, target, 0); long map_nelem = THCudaTensor_nElement(state, target) / batch_size; int blocks_per_sample = GET_BLOCKS(map_nelem) / 128; blocks_per_sample = (blocks_per_sample == 0) ? 1 : blocks_per_sample; int total_blocks = blocks_per_sample * batch_size; cunn_SpatialClassNLLCriterion_updateGradInput_kernel <<<total_blocks, CUDA_NUM_THREADS, 0, THCState_getCurrentStream(state)>>>( gradInput_data, target_data, weights_data, total_weight_data, sizeAverage, THCudaTensor_size(state, input, 0), THCudaTensor_size(state, input, 1), THCudaTensor_size(state, input, 2) *THCudaTensor_size(state, input, 3), blocks_per_sample ); THCudaCheck(cudaGetLastError()); if (weights) THCudaTensor_free(state, weights); THCudaTensor_free(state, target); THCudaTensor_free(state, input); }
3e5f2ef465bce4c6bf7b9612dbe8e712143d6a78.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Two vector add together, a+b=c get familar with the thread and block indexing Here only use one block with N thread */ #include <stdio.h> void random_ints(int* a, int N){ for (int i=0;i<N; ++i){ a[i]=rand(); } } void ones_ints(int* a, int N){ for (int i=0;i<N; ++i){ a[i]=1; } } // Kernal, call on Host, run on Device __global__ void vectoradd(int* a, int *b, int *c){ c[threadIdx.x]=a[threadIdx.x]+b[threadIdx.x]; } int main(){ int *h_a, *h_b, *h_c; int *d_a, *d_b, *d_c; // None of the three variables below has to be constant const int N=100; const int Nblock=1; const int Nthread=N; int size=N*sizeof(int); // Allocate space for host copy of variables, and initialize h_a=(int *)malloc(size); ones_ints(h_a,N); h_b=(int *)malloc(size); ones_ints(h_b,N); h_c=(int *)malloc(size); // Allocate space for device copy hipMalloc((void **) &d_a, size); hipMalloc((void **) &d_b, size); hipMalloc((void **) &d_c, size); for(int i=0;i<N;++i){ printf("%d ",h_a[i]); } printf("\n"); for(int i=0;i<N;++i){ printf("%d ",h_b[i]); } printf("\n"); // Copy from Host to Device hipMemcpy(d_a, h_a, size, hipMemcpyHostToDevice); hipMemcpy(d_b, h_b, size, hipMemcpyHostToDevice); // run kernal hipLaunchKernelGGL(( vectoradd), dim3(Nblock),dim3(Nthread), 0, 0, d_a,d_b,d_c); // Copy from Device to Host hipMemcpy(h_c,d_c, size, hipMemcpyDeviceToHost); for(int i=0;i<N;++i){ printf("%d ",h_c[i]); } printf("\n"); // free host memory free(h_a); free(h_b); free(h_c); // free device memory hipFree(d_a); hipFree(d_b); hipFree(d_c); }
3e5f2ef465bce4c6bf7b9612dbe8e712143d6a78.cu
/* Two vector add together, a+b=c get familar with the thread and block indexing Here only use one block with N thread */ #include <stdio.h> void random_ints(int* a, int N){ for (int i=0;i<N; ++i){ a[i]=rand(); } } void ones_ints(int* a, int N){ for (int i=0;i<N; ++i){ a[i]=1; } } // Kernal, call on Host, run on Device __global__ void vectoradd(int* a, int *b, int *c){ c[threadIdx.x]=a[threadIdx.x]+b[threadIdx.x]; } int main(){ int *h_a, *h_b, *h_c; int *d_a, *d_b, *d_c; // None of the three variables below has to be constant const int N=100; const int Nblock=1; const int Nthread=N; int size=N*sizeof(int); // Allocate space for host copy of variables, and initialize h_a=(int *)malloc(size); ones_ints(h_a,N); h_b=(int *)malloc(size); ones_ints(h_b,N); h_c=(int *)malloc(size); // Allocate space for device copy cudaMalloc((void **) &d_a, size); cudaMalloc((void **) &d_b, size); cudaMalloc((void **) &d_c, size); for(int i=0;i<N;++i){ printf("%d ",h_a[i]); } printf("\n"); for(int i=0;i<N;++i){ printf("%d ",h_b[i]); } printf("\n"); // Copy from Host to Device cudaMemcpy(d_a, h_a, size, cudaMemcpyHostToDevice); cudaMemcpy(d_b, h_b, size, cudaMemcpyHostToDevice); // run kernal vectoradd<<<Nblock,Nthread>>>(d_a,d_b,d_c); // Copy from Device to Host cudaMemcpy(h_c,d_c, size, cudaMemcpyDeviceToHost); for(int i=0;i<N;++i){ printf("%d ",h_c[i]); } printf("\n"); // free host memory free(h_a); free(h_b); free(h_c); // free device memory cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); }
3a9678b849ea28c102e59f9a61adfed6b67a4257.hip
// !!! This is a file automatically generated by hipify!!! /* * Software License Agreement (BSD License) * * Copyright (c) 2011, Willow Garage, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * Neither the name of Willow Garage, Inc. nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * * Author: Anatoly Baskeheev, Itseez Ltd, ([email protected]) */ #include <thrust/copy.h> #include <thrust/device_ptr.h> #include <thrust/iterator/counting_iterator.h> #include "internal.hpp" #include "hip/hip_runtime.h" using namespace std; using namespace thrust; namespace pcl { namespace device { struct InSphere { float x_, y_, z_, radius2_; InSphere(float x, float y, float z, float radius) : x_(x), y_(y), z_(z), radius2_(radius * radius) {} __device__ __host__ __forceinline__ bool operator()(const float3& point) const { float dx = point.x - x_; float dy = point.y - y_; float dz = point.z - z_; return (dx * dx + dy * dy + dz * dz) < radius2_; } __device__ __host__ __forceinline__ bool operator()(const float4& point) const { return (*this)(make_float3(point.x, point.y, point.z)); } }; } } void pcl::device::bruteForceRadiusSearch(const OctreeImpl::PointCloud& cloud, const OctreeImpl::PointType& query, float radius, DeviceArray<int>& result, DeviceArray<int>& buffer) { using PointType = OctreeImpl::PointType; if (buffer.size() < cloud.size()) buffer.create(cloud.size()); InSphere cond(query.x, query.y, query.z, radius); device_ptr<const PointType> cloud_ptr((const PointType*)cloud.ptr()); device_ptr<int> res_ptr(buffer.ptr()); counting_iterator<int> first(0); counting_iterator<int> last = first + cloud.size(); //main bottle neck is a kernel call overhead/allocs //work time for 871k points ~0.8ms int count = (int)(thrust::copy_if(first, last, cloud_ptr, res_ptr, cond) - res_ptr); result = DeviceArray<int>(buffer.ptr(), count); }
3a9678b849ea28c102e59f9a61adfed6b67a4257.cu
/* * Software License Agreement (BSD License) * * Copyright (c) 2011, Willow Garage, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * Neither the name of Willow Garage, Inc. nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * * Author: Anatoly Baskeheev, Itseez Ltd, ([email protected]) */ #include <thrust/copy.h> #include <thrust/device_ptr.h> #include <thrust/iterator/counting_iterator.h> #include "internal.hpp" #include "cuda.h" using namespace std; using namespace thrust; namespace pcl { namespace device { struct InSphere { float x_, y_, z_, radius2_; InSphere(float x, float y, float z, float radius) : x_(x), y_(y), z_(z), radius2_(radius * radius) {} __device__ __host__ __forceinline__ bool operator()(const float3& point) const { float dx = point.x - x_; float dy = point.y - y_; float dz = point.z - z_; return (dx * dx + dy * dy + dz * dz) < radius2_; } __device__ __host__ __forceinline__ bool operator()(const float4& point) const { return (*this)(make_float3(point.x, point.y, point.z)); } }; } } void pcl::device::bruteForceRadiusSearch(const OctreeImpl::PointCloud& cloud, const OctreeImpl::PointType& query, float radius, DeviceArray<int>& result, DeviceArray<int>& buffer) { using PointType = OctreeImpl::PointType; if (buffer.size() < cloud.size()) buffer.create(cloud.size()); InSphere cond(query.x, query.y, query.z, radius); device_ptr<const PointType> cloud_ptr((const PointType*)cloud.ptr()); device_ptr<int> res_ptr(buffer.ptr()); counting_iterator<int> first(0); counting_iterator<int> last = first + cloud.size(); //main bottle neck is a kernel call overhead/allocs //work time for 871k points ~0.8ms int count = (int)(thrust::copy_if(first, last, cloud_ptr, res_ptr, cond) - res_ptr); result = DeviceArray<int>(buffer.ptr(), count); }
1ecc88be217d782d14592b36b3587c3c9caacb92.hip
// !!! This is a file automatically generated by hipify!!! // Copyright (c) 2009-2022 The Regents of the University of Michigan. // Part of HOOMD-blue, released under the BSD 3-Clause License. #include "hip/hip_runtime.h" // Copyright (c) 2009-2021 The Regents of the University of Michigan // This file is part of the HOOMD-blue project, released under the BSD 3-Clause License. #include "NeighborListGPUStencil_hip.cuh" #include "hoomd/TextureTools.h" #include "hoomd/WarpTools.cuh" #include <hipcub/hipcub.hpp> /*! \file NeighborListGPUStencil.cu \brief Defines GPU kernel code for O(N) neighbor list generation on the GPU with multiple bin stencils */ namespace hoomd { namespace md { namespace kernel { //! Kernel call for generating neighbor list on the GPU using multiple stencils (Kepler optimized //! version) /*! \tparam flags Set bit 1 to enable body filtering. Set bit 2 to enable diameter filtering. \tparam threads_per_particle Number of threads cooperatively computing the neighbor list \param d_nlist Neighbor list data structure to write \param d_n_neigh Number of neighbors to write \param d_last_updated_pos Particle positions at this update are written to this array \param d_conditions Conditions array for writing overflow condition \param d_Nmax Maximum number of neighbors per type \param d_head_list List of indexes to access \a d_nlist \param d_pos Particle positions \param d_body Particle body indices \param d_diameter Particle diameters \param N Number of particles \param d_cell_size Number of particles in each cell \param d_cell_xyzf Cell contents (xyzf array from CellList with flag=type) \param d_cell_tdb Cell contents (tdb array from CellList with) \param ci Cell indexer for indexing cells \param cli Cell list indexer for indexing into d_cell_xyzf \param d_stencil 2D array of stencil offsets per type \param d_n_stencil Number of stencils per type \param stencil_idx Indexer into \a d_stencil \param box Simulation box dimensions \param d_r_cut Cutoff radius stored by pair type r_cut(i,j) \param r_buff The maximum radius for which to include particles as neighbors \param ntypes Number of particle types \param ghost_width Width of ghost cell layer \note optimized for Kepler */ template<unsigned char flags, int threads_per_particle> __global__ void gpu_compute_nlist_stencil_kernel(unsigned int* d_nlist, unsigned int* d_n_neigh, Scalar4* d_last_updated_pos, unsigned int* d_conditions, const unsigned int* d_Nmax, const size_t* d_head_list, const unsigned int* d_pid_map, const Scalar4* d_pos, const unsigned int* d_body, const Scalar* d_diameter, const unsigned int N, const unsigned int* d_cell_size, const Scalar4* d_cell_xyzf, const Scalar4* d_cell_tdb, const Index3D ci, const Index2D cli, const Scalar4* d_stencil, const unsigned int* d_n_stencil, const Index2D stencil_idx, const BoxDim box, const Scalar* d_r_cut, const Scalar r_buff, const unsigned int ntypes, const Scalar3 ghost_width) { bool filter_body = flags & 1; bool diameter_shift = flags & 2; // cache the r_listsq parameters into shared memory Index2D typpair_idx(ntypes); const unsigned int num_typ_parameters = typpair_idx.getNumElements(); // shared data for per type pair parameters HIP_DYNAMIC_SHARED(unsigned char, s_data) // pointer for the r_listsq data Scalar* s_r_list = (Scalar*)(&s_data[0]); unsigned int* s_Nmax = (unsigned int*)(&s_data[sizeof(Scalar) * num_typ_parameters]); // load in the per type pair r_list for (unsigned int cur_offset = 0; cur_offset < num_typ_parameters; cur_offset += blockDim.x) { if (cur_offset + threadIdx.x < num_typ_parameters) { Scalar r_cut = d_r_cut[cur_offset + threadIdx.x]; // force the r_list(i,j) to a skippable value if r_cut(i,j) is skippable s_r_list[cur_offset + threadIdx.x] = (r_cut > Scalar(0.0)) ? r_cut + r_buff : Scalar(-1.0); } if (cur_offset + threadIdx.x < ntypes) { s_Nmax[cur_offset + threadIdx.x] = d_Nmax[cur_offset + threadIdx.x]; } } __syncthreads(); // each set of threads_per_particle threads is going to compute the neighbor list for a single // particle const int idx = blockIdx.x * (blockDim.x / threads_per_particle) + threadIdx.x / threads_per_particle; // one thread per particle if (idx >= N) return; // get the write particle id int my_pidx = d_pid_map[idx]; Scalar4 my_postype = d_pos[my_pidx]; Scalar3 my_pos = make_scalar3(my_postype.x, my_postype.y, my_postype.z); unsigned int my_type = __scalar_as_int(my_postype.w); unsigned int my_body = d_body[my_pidx]; Scalar my_diam = d_diameter[my_pidx]; size_t my_head = d_head_list[my_pidx]; Scalar3 f = box.makeFraction(my_pos, ghost_width); // find the bin each particle belongs in int ib = (int)(f.x * ci.getW()); int jb = (int)(f.y * ci.getH()); int kb = (int)(f.z * ci.getD()); uchar3 periodic = box.getPeriodic(); // need to handle the case where the particle is exactly at the box hi if (ib == ci.getW() && periodic.x) ib = 0; if (jb == ci.getH() && periodic.y) jb = 0; if (kb == ci.getD() && periodic.z) kb = 0; int my_cell = ci(ib, jb, kb); // number of available stencils unsigned int n_stencil = d_n_stencil[my_type]; // index of current stencil (-1 to initialize) int cur_adj = -1; Scalar cell_dist2 = 0.0; // current cell (0 to initialize) unsigned int neigh_cell = 0; // size of current cell (0 to initialize) unsigned int neigh_size = 0; // current index in cell int cur_offset = threadIdx.x % threads_per_particle; bool done = false; // total number of neighbors unsigned int nneigh = 0; while (!done) { // initialize with default unsigned int neighbor; unsigned char has_neighbor = 0; // advance neighbor cell while (cur_offset >= neigh_size && !done) { cur_offset -= neigh_size; cur_adj++; if (cur_adj < n_stencil) { // compute the stenciled cell cartesian coordinates Scalar4 stencil = __ldg(d_stencil + stencil_idx(cur_adj, my_type)); int sib = ib + __scalar_as_int(stencil.x); int sjb = jb + __scalar_as_int(stencil.y); int skb = kb + __scalar_as_int(stencil.z); cell_dist2 = stencil.w; // wrap through the boundary if (sib >= (int)ci.getW() && periodic.x) sib -= ci.getW(); if (sib < 0 && periodic.x) sib += ci.getW(); if (sjb >= (int)ci.getH() && periodic.y) sjb -= ci.getH(); if (sjb < 0 && periodic.y) sjb += ci.getH(); if (skb >= (int)ci.getD() && periodic.z) skb -= ci.getD(); if (skb < 0 && periodic.z) skb += ci.getD(); neigh_cell = ci(sib, sjb, skb); neigh_size = d_cell_size[neigh_cell]; } else { // we are past the end of the cell neighbors done = true; } } // check for a neighbor if thread is still working if (!done) { // use a do {} while(0) loop to process this particle so we can break for exclusions // in microbenchmarks, this is was faster than using bool exclude because it saved flops // it's a little easier to read than having 4 levels of if{} statements nested do { // read in the particle type (diameter and body as well while we've got the Scalar4 // in) const Scalar4 neigh_tdb = __ldg(d_cell_tdb + cli(cur_offset, neigh_cell)); const unsigned int type_j = __scalar_as_int(neigh_tdb.x); const Scalar diam_j = neigh_tdb.y; const unsigned int body_j = __scalar_as_int(neigh_tdb.z); // skip any particles belonging to the same rigid body if requested if (filter_body && my_body != 0xffffffff && my_body == body_j) break; // compute the rlist based on the particle type we're interacting with Scalar r_list = s_r_list[typpair_idx(my_type, type_j)]; if (r_list <= Scalar(0.0)) break; Scalar sqshift = Scalar(0.0); if (diameter_shift) { const Scalar delta = (my_diam + diam_j) * Scalar(0.5) - Scalar(1.0); // r^2 < (r_list + delta)^2 // r^2 < r_listsq + delta^2 + 2*r_list*delta sqshift = (delta + Scalar(2.0) * r_list) * delta; } Scalar r_listsq = r_list * r_list + sqshift; // compare the check distance to the minimum cell distance, and pass without // distance check if unnecessary if (cell_dist2 > r_listsq) break; // only load in the particle position and id if distance check is required const Scalar4 neigh_xyzf = __ldg(d_cell_xyzf + cli(cur_offset, neigh_cell)); const Scalar3 neigh_pos = make_scalar3(neigh_xyzf.x, neigh_xyzf.y, neigh_xyzf.z); unsigned int cur_neigh = __scalar_as_int(neigh_xyzf.w); // a particle cannot neighbor itself if (my_pidx == (int)cur_neigh) break; Scalar3 dx = my_pos - neigh_pos; dx = box.minImage(dx); Scalar dr_sq = dot(dx, dx); if (dr_sq <= r_listsq) { neighbor = cur_neigh; has_neighbor = 1; } } while (0); // particle is processed exactly once // advance cur_offset cur_offset += threads_per_particle; } // now that possible neighbor checks are finished, done (for the cta) depends only on first // thread neighbor list only needs to get written into if thread 0 is not done done = hoomd::detail::WarpScan<bool, threads_per_particle>().Broadcast(done, 0); if (!done) { // scan over flags unsigned char k(0), n(0); hoomd::detail::WarpScan<unsigned char, threads_per_particle>().ExclusiveSum( has_neighbor, k, n); // write neighbor if it fits in list if (has_neighbor && (nneigh + k) < s_Nmax[my_type]) d_nlist[my_head + nneigh + k] = neighbor; // increment total neighbor count nneigh += n; } } // end while if (threadIdx.x % threads_per_particle == 0) { // flag if we need to grow the neighbor list if (nneigh >= s_Nmax[my_type]) atomicMax(&d_conditions[my_type], nneigh); d_n_neigh[my_pidx] = nneigh; d_last_updated_pos[my_pidx] = my_postype; } } //! determine maximum possible block size template<typename T> int get_max_block_size_stencil(T func) { hipFuncAttributes attr; hipFuncGetAttributes(&attr, (const void*)func); int max_threads = attr.maxThreadsPerBlock; // number of threads has to be multiple of warp size max_threads -= max_threads % max_threads_per_particle; return max_threads; } //! recursive template to launch neighborlist with given template parameters /* \tparam cur_tpp Number of threads per particle (assumed to be power of two) */ template<int cur_tpp> inline void stencil_launcher(unsigned int* d_nlist, unsigned int* d_n_neigh, Scalar4* d_last_updated_pos, unsigned int* d_conditions, const unsigned int* d_Nmax, const size_t* d_head_list, const unsigned int* d_pid_map, const Scalar4* d_pos, const unsigned int* d_body, const Scalar* d_diameter, const unsigned int N, const unsigned int* d_cell_size, const Scalar4* d_cell_xyzf, const Scalar4* d_cell_tdb, const Index3D& ci, const Index2D& cli, const Scalar4* d_stencil, const unsigned int* d_n_stencil, const Index2D& stencil_idx, const BoxDim& box, const Scalar* d_r_cut, const Scalar r_buff, const unsigned int ntypes, const Scalar3& ghost_width, bool filter_body, bool diameter_shift, const unsigned int threads_per_particle, const unsigned int block_size, const hipDeviceProp_t& devprop) { // shared memory = r_listsq + Nmax + stuff needed for neighborlist (computed below) Index2D typpair_idx(ntypes); unsigned int shared_size = (unsigned int)(sizeof(Scalar) * typpair_idx.getNumElements() + sizeof(unsigned int) * ntypes); if (shared_size > devprop.sharedMemPerBlock) { throw std::runtime_error("Neighborlist r_cut matrix exceeds the available shared memory " "per block."); } if (threads_per_particle == cur_tpp && cur_tpp != 0) { if (!diameter_shift && !filter_body) { unsigned int max_block_size; max_block_size = get_max_block_size_stencil(gpu_compute_nlist_stencil_kernel<0, cur_tpp>); unsigned int run_block_size = (block_size < max_block_size) ? block_size : max_block_size; dim3 grid(N / (block_size / threads_per_particle) + 1); hipLaunchKernelGGL((gpu_compute_nlist_stencil_kernel<0, cur_tpp>), dim3(grid), dim3(run_block_size), shared_size, 0, d_nlist, d_n_neigh, d_last_updated_pos, d_conditions, d_Nmax, d_head_list, d_pid_map, d_pos, d_body, d_diameter, N, d_cell_size, d_cell_xyzf, d_cell_tdb, ci, cli, d_stencil, d_n_stencil, stencil_idx, box, d_r_cut, r_buff, ntypes, ghost_width); } else if (!diameter_shift && filter_body) { unsigned int max_block_size; max_block_size = get_max_block_size_stencil(gpu_compute_nlist_stencil_kernel<1, cur_tpp>); unsigned int run_block_size = (block_size < max_block_size) ? block_size : max_block_size; dim3 grid(N / (block_size / threads_per_particle) + 1); hipLaunchKernelGGL((gpu_compute_nlist_stencil_kernel<1, cur_tpp>), dim3(grid), dim3(run_block_size), shared_size, 0, d_nlist, d_n_neigh, d_last_updated_pos, d_conditions, d_Nmax, d_head_list, d_pid_map, d_pos, d_body, d_diameter, N, d_cell_size, d_cell_xyzf, d_cell_tdb, ci, cli, d_stencil, d_n_stencil, stencil_idx, box, d_r_cut, r_buff, ntypes, ghost_width); } else if (diameter_shift && !filter_body) { unsigned int max_block_size; max_block_size = get_max_block_size_stencil(gpu_compute_nlist_stencil_kernel<2, cur_tpp>); unsigned int run_block_size = (block_size < max_block_size) ? block_size : max_block_size; dim3 grid(N / (block_size / threads_per_particle) + 1); hipLaunchKernelGGL((gpu_compute_nlist_stencil_kernel<2, cur_tpp>), dim3(grid), dim3(run_block_size), shared_size, 0, d_nlist, d_n_neigh, d_last_updated_pos, d_conditions, d_Nmax, d_head_list, d_pid_map, d_pos, d_body, d_diameter, N, d_cell_size, d_cell_xyzf, d_cell_tdb, ci, cli, d_stencil, d_n_stencil, stencil_idx, box, d_r_cut, r_buff, ntypes, ghost_width); } else if (diameter_shift && filter_body) { unsigned int max_block_size; max_block_size = get_max_block_size_stencil(gpu_compute_nlist_stencil_kernel<3, cur_tpp>); unsigned int run_block_size = (block_size < max_block_size) ? block_size : max_block_size; dim3 grid(N / (block_size / threads_per_particle) + 1); hipLaunchKernelGGL((gpu_compute_nlist_stencil_kernel<3, cur_tpp>), dim3(grid), dim3(run_block_size), shared_size, 0, d_nlist, d_n_neigh, d_last_updated_pos, d_conditions, d_Nmax, d_head_list, d_pid_map, d_pos, d_body, d_diameter, N, d_cell_size, d_cell_xyzf, d_cell_tdb, ci, cli, d_stencil, d_n_stencil, stencil_idx, box, d_r_cut, r_buff, ntypes, ghost_width); } } else { stencil_launcher<cur_tpp / 2>(d_nlist, d_n_neigh, d_last_updated_pos, d_conditions, d_Nmax, d_head_list, d_pid_map, d_pos, d_body, d_diameter, N, d_cell_size, d_cell_xyzf, d_cell_tdb, ci, cli, d_stencil, d_n_stencil, stencil_idx, box, d_r_cut, r_buff, ntypes, ghost_width, filter_body, diameter_shift, threads_per_particle, block_size, devprop); } } //! template specialization to terminate recursion template<> inline void stencil_launcher<min_threads_per_particle / 2>(unsigned int* d_nlist, unsigned int* d_n_neigh, Scalar4* d_last_updated_pos, unsigned int* d_conditions, const unsigned int* d_Nmax, const size_t* d_head_list, const unsigned int* d_pid_map, const Scalar4* d_pos, const unsigned int* d_body, const Scalar* d_diameter, const unsigned int N, const unsigned int* d_cell_size, const Scalar4* d_cell_xyzf, const Scalar4* d_cell_tdb, const Index3D& ci, const Index2D& cli, const Scalar4* d_stencil, const unsigned int* d_n_stencil, const Index2D& stencil_idx, const BoxDim& box, const Scalar* d_r_cut, const Scalar r_buff, const unsigned int ntypes, const Scalar3& ghost_width, bool filter_body, bool diameter_shift, const unsigned int threads_per_particle, const unsigned int block_size, const hipDeviceProp_t& devprop) { } hipError_t gpu_compute_nlist_stencil(unsigned int* d_nlist, unsigned int* d_n_neigh, Scalar4* d_last_updated_pos, unsigned int* d_conditions, const unsigned int* d_Nmax, const size_t* d_head_list, const unsigned int* d_pid_map, const Scalar4* d_pos, const unsigned int* d_body, const Scalar* d_diameter, const unsigned int N, const unsigned int* d_cell_size, const Scalar4* d_cell_xyzf, const Scalar4* d_cell_tdb, const Index3D& ci, const Index2D& cli, const Scalar4* d_stencil, const unsigned int* d_n_stencil, const Index2D& stencil_idx, const BoxDim& box, const Scalar* d_r_cut, const Scalar r_buff, const unsigned int ntypes, const Scalar3& ghost_width, bool filter_body, bool diameter_shift, const unsigned int threads_per_particle, const unsigned int block_size, const hipDeviceProp_t& devprop) { stencil_launcher<max_threads_per_particle>(d_nlist, d_n_neigh, d_last_updated_pos, d_conditions, d_Nmax, d_head_list, d_pid_map, d_pos, d_body, d_diameter, N, d_cell_size, d_cell_xyzf, d_cell_tdb, ci, cli, d_stencil, d_n_stencil, stencil_idx, box, d_r_cut, r_buff, ntypes, ghost_width, filter_body, diameter_shift, threads_per_particle, block_size, devprop); return hipSuccess; } /*! * \param d_pids Unsorted particle indexes * \param d_types Unsorted particle types * \param d_pos Particle position array * \param N Number of particles * * \a d_pids and \a d_types are trivially initialized to their current (unsorted) values. They are * later sorted in gpu_compute_nlist_stencil_sort_types(). */ __global__ void gpu_compute_nlist_stencil_fill_types_kernel(unsigned int* d_pids, unsigned int* d_types, const Scalar4* d_pos, const unsigned int N) { unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= N) return; Scalar4 pos_i = d_pos[idx]; unsigned int type = __scalar_as_int(pos_i.w); d_types[idx] = type; d_pids[idx] = idx; } /*! * \param d_pids Unsorted particle indexes * \param d_types Unsorted particle types * \param d_pos Particle position array * \param N Number of particles */ hipError_t gpu_compute_nlist_stencil_fill_types(unsigned int* d_pids, unsigned int* d_types, const Scalar4* d_pos, const unsigned int N) { const unsigned int block_size = 128; hipLaunchKernelGGL((gpu_compute_nlist_stencil_fill_types_kernel), dim3(N / block_size + 1), dim3(block_size), 0, 0, d_pids, d_types, d_pos, N); return hipSuccess; } /*! * \param d_pids Array of unsorted particle indexes * \param d_pids_alt Double buffer for particle indexes * \param d_types Array of unsorted particle types * \param d_types_alt Double buffer for particle types * \param d_tmp_storage Temporary allocation for sorting * \param tmp_storage_bytes Size of temporary allocation * \param swap Flag to swap the sorted particle indexes into the correct buffer * \param N number of particles * * This wrapper calls the CUB radix sorting methods, and so it needs to be called twice. Initially, * \a d_tmp_storage should be NULL, and the necessary temporary storage is saved into \a * tmp_storage_bytes. This space must then be allocated into \a d_tmp_storage, and on the second * call, the sorting is performed. */ void gpu_compute_nlist_stencil_sort_types(unsigned int* d_pids, unsigned int* d_pids_alt, unsigned int* d_types, unsigned int* d_types_alt, void* d_tmp_storage, size_t& tmp_storage_bytes, bool& swap, const unsigned int N) { hipcub::DoubleBuffer<unsigned int> d_keys(d_types, d_types_alt); hipcub::DoubleBuffer<unsigned int> d_vals(d_pids, d_pids_alt); hipcub::DeviceRadixSort::SortPairs(d_tmp_storage, tmp_storage_bytes, d_keys, d_vals, N); if (d_tmp_storage != NULL) { swap = (d_vals.selector == 1); } } } // end namespace kernel } // end namespace md } // end namespace hoomd
1ecc88be217d782d14592b36b3587c3c9caacb92.cu
// Copyright (c) 2009-2022 The Regents of the University of Michigan. // Part of HOOMD-blue, released under the BSD 3-Clause License. #include "hip/hip_runtime.h" // Copyright (c) 2009-2021 The Regents of the University of Michigan // This file is part of the HOOMD-blue project, released under the BSD 3-Clause License. #include "NeighborListGPUStencil.cuh" #include "hoomd/TextureTools.h" #include "hoomd/WarpTools.cuh" #include <hipcub/hipcub.hpp> /*! \file NeighborListGPUStencil.cu \brief Defines GPU kernel code for O(N) neighbor list generation on the GPU with multiple bin stencils */ namespace hoomd { namespace md { namespace kernel { //! Kernel call for generating neighbor list on the GPU using multiple stencils (Kepler optimized //! version) /*! \tparam flags Set bit 1 to enable body filtering. Set bit 2 to enable diameter filtering. \tparam threads_per_particle Number of threads cooperatively computing the neighbor list \param d_nlist Neighbor list data structure to write \param d_n_neigh Number of neighbors to write \param d_last_updated_pos Particle positions at this update are written to this array \param d_conditions Conditions array for writing overflow condition \param d_Nmax Maximum number of neighbors per type \param d_head_list List of indexes to access \a d_nlist \param d_pos Particle positions \param d_body Particle body indices \param d_diameter Particle diameters \param N Number of particles \param d_cell_size Number of particles in each cell \param d_cell_xyzf Cell contents (xyzf array from CellList with flag=type) \param d_cell_tdb Cell contents (tdb array from CellList with) \param ci Cell indexer for indexing cells \param cli Cell list indexer for indexing into d_cell_xyzf \param d_stencil 2D array of stencil offsets per type \param d_n_stencil Number of stencils per type \param stencil_idx Indexer into \a d_stencil \param box Simulation box dimensions \param d_r_cut Cutoff radius stored by pair type r_cut(i,j) \param r_buff The maximum radius for which to include particles as neighbors \param ntypes Number of particle types \param ghost_width Width of ghost cell layer \note optimized for Kepler */ template<unsigned char flags, int threads_per_particle> __global__ void gpu_compute_nlist_stencil_kernel(unsigned int* d_nlist, unsigned int* d_n_neigh, Scalar4* d_last_updated_pos, unsigned int* d_conditions, const unsigned int* d_Nmax, const size_t* d_head_list, const unsigned int* d_pid_map, const Scalar4* d_pos, const unsigned int* d_body, const Scalar* d_diameter, const unsigned int N, const unsigned int* d_cell_size, const Scalar4* d_cell_xyzf, const Scalar4* d_cell_tdb, const Index3D ci, const Index2D cli, const Scalar4* d_stencil, const unsigned int* d_n_stencil, const Index2D stencil_idx, const BoxDim box, const Scalar* d_r_cut, const Scalar r_buff, const unsigned int ntypes, const Scalar3 ghost_width) { bool filter_body = flags & 1; bool diameter_shift = flags & 2; // cache the r_listsq parameters into shared memory Index2D typpair_idx(ntypes); const unsigned int num_typ_parameters = typpair_idx.getNumElements(); // shared data for per type pair parameters HIP_DYNAMIC_SHARED(unsigned char, s_data) // pointer for the r_listsq data Scalar* s_r_list = (Scalar*)(&s_data[0]); unsigned int* s_Nmax = (unsigned int*)(&s_data[sizeof(Scalar) * num_typ_parameters]); // load in the per type pair r_list for (unsigned int cur_offset = 0; cur_offset < num_typ_parameters; cur_offset += blockDim.x) { if (cur_offset + threadIdx.x < num_typ_parameters) { Scalar r_cut = d_r_cut[cur_offset + threadIdx.x]; // force the r_list(i,j) to a skippable value if r_cut(i,j) is skippable s_r_list[cur_offset + threadIdx.x] = (r_cut > Scalar(0.0)) ? r_cut + r_buff : Scalar(-1.0); } if (cur_offset + threadIdx.x < ntypes) { s_Nmax[cur_offset + threadIdx.x] = d_Nmax[cur_offset + threadIdx.x]; } } __syncthreads(); // each set of threads_per_particle threads is going to compute the neighbor list for a single // particle const int idx = blockIdx.x * (blockDim.x / threads_per_particle) + threadIdx.x / threads_per_particle; // one thread per particle if (idx >= N) return; // get the write particle id int my_pidx = d_pid_map[idx]; Scalar4 my_postype = d_pos[my_pidx]; Scalar3 my_pos = make_scalar3(my_postype.x, my_postype.y, my_postype.z); unsigned int my_type = __scalar_as_int(my_postype.w); unsigned int my_body = d_body[my_pidx]; Scalar my_diam = d_diameter[my_pidx]; size_t my_head = d_head_list[my_pidx]; Scalar3 f = box.makeFraction(my_pos, ghost_width); // find the bin each particle belongs in int ib = (int)(f.x * ci.getW()); int jb = (int)(f.y * ci.getH()); int kb = (int)(f.z * ci.getD()); uchar3 periodic = box.getPeriodic(); // need to handle the case where the particle is exactly at the box hi if (ib == ci.getW() && periodic.x) ib = 0; if (jb == ci.getH() && periodic.y) jb = 0; if (kb == ci.getD() && periodic.z) kb = 0; int my_cell = ci(ib, jb, kb); // number of available stencils unsigned int n_stencil = d_n_stencil[my_type]; // index of current stencil (-1 to initialize) int cur_adj = -1; Scalar cell_dist2 = 0.0; // current cell (0 to initialize) unsigned int neigh_cell = 0; // size of current cell (0 to initialize) unsigned int neigh_size = 0; // current index in cell int cur_offset = threadIdx.x % threads_per_particle; bool done = false; // total number of neighbors unsigned int nneigh = 0; while (!done) { // initialize with default unsigned int neighbor; unsigned char has_neighbor = 0; // advance neighbor cell while (cur_offset >= neigh_size && !done) { cur_offset -= neigh_size; cur_adj++; if (cur_adj < n_stencil) { // compute the stenciled cell cartesian coordinates Scalar4 stencil = __ldg(d_stencil + stencil_idx(cur_adj, my_type)); int sib = ib + __scalar_as_int(stencil.x); int sjb = jb + __scalar_as_int(stencil.y); int skb = kb + __scalar_as_int(stencil.z); cell_dist2 = stencil.w; // wrap through the boundary if (sib >= (int)ci.getW() && periodic.x) sib -= ci.getW(); if (sib < 0 && periodic.x) sib += ci.getW(); if (sjb >= (int)ci.getH() && periodic.y) sjb -= ci.getH(); if (sjb < 0 && periodic.y) sjb += ci.getH(); if (skb >= (int)ci.getD() && periodic.z) skb -= ci.getD(); if (skb < 0 && periodic.z) skb += ci.getD(); neigh_cell = ci(sib, sjb, skb); neigh_size = d_cell_size[neigh_cell]; } else { // we are past the end of the cell neighbors done = true; } } // check for a neighbor if thread is still working if (!done) { // use a do {} while(0) loop to process this particle so we can break for exclusions // in microbenchmarks, this is was faster than using bool exclude because it saved flops // it's a little easier to read than having 4 levels of if{} statements nested do { // read in the particle type (diameter and body as well while we've got the Scalar4 // in) const Scalar4 neigh_tdb = __ldg(d_cell_tdb + cli(cur_offset, neigh_cell)); const unsigned int type_j = __scalar_as_int(neigh_tdb.x); const Scalar diam_j = neigh_tdb.y; const unsigned int body_j = __scalar_as_int(neigh_tdb.z); // skip any particles belonging to the same rigid body if requested if (filter_body && my_body != 0xffffffff && my_body == body_j) break; // compute the rlist based on the particle type we're interacting with Scalar r_list = s_r_list[typpair_idx(my_type, type_j)]; if (r_list <= Scalar(0.0)) break; Scalar sqshift = Scalar(0.0); if (diameter_shift) { const Scalar delta = (my_diam + diam_j) * Scalar(0.5) - Scalar(1.0); // r^2 < (r_list + delta)^2 // r^2 < r_listsq + delta^2 + 2*r_list*delta sqshift = (delta + Scalar(2.0) * r_list) * delta; } Scalar r_listsq = r_list * r_list + sqshift; // compare the check distance to the minimum cell distance, and pass without // distance check if unnecessary if (cell_dist2 > r_listsq) break; // only load in the particle position and id if distance check is required const Scalar4 neigh_xyzf = __ldg(d_cell_xyzf + cli(cur_offset, neigh_cell)); const Scalar3 neigh_pos = make_scalar3(neigh_xyzf.x, neigh_xyzf.y, neigh_xyzf.z); unsigned int cur_neigh = __scalar_as_int(neigh_xyzf.w); // a particle cannot neighbor itself if (my_pidx == (int)cur_neigh) break; Scalar3 dx = my_pos - neigh_pos; dx = box.minImage(dx); Scalar dr_sq = dot(dx, dx); if (dr_sq <= r_listsq) { neighbor = cur_neigh; has_neighbor = 1; } } while (0); // particle is processed exactly once // advance cur_offset cur_offset += threads_per_particle; } // now that possible neighbor checks are finished, done (for the cta) depends only on first // thread neighbor list only needs to get written into if thread 0 is not done done = hoomd::detail::WarpScan<bool, threads_per_particle>().Broadcast(done, 0); if (!done) { // scan over flags unsigned char k(0), n(0); hoomd::detail::WarpScan<unsigned char, threads_per_particle>().ExclusiveSum( has_neighbor, k, n); // write neighbor if it fits in list if (has_neighbor && (nneigh + k) < s_Nmax[my_type]) d_nlist[my_head + nneigh + k] = neighbor; // increment total neighbor count nneigh += n; } } // end while if (threadIdx.x % threads_per_particle == 0) { // flag if we need to grow the neighbor list if (nneigh >= s_Nmax[my_type]) atomicMax(&d_conditions[my_type], nneigh); d_n_neigh[my_pidx] = nneigh; d_last_updated_pos[my_pidx] = my_postype; } } //! determine maximum possible block size template<typename T> int get_max_block_size_stencil(T func) { hipFuncAttributes attr; hipFuncGetAttributes(&attr, (const void*)func); int max_threads = attr.maxThreadsPerBlock; // number of threads has to be multiple of warp size max_threads -= max_threads % max_threads_per_particle; return max_threads; } //! recursive template to launch neighborlist with given template parameters /* \tparam cur_tpp Number of threads per particle (assumed to be power of two) */ template<int cur_tpp> inline void stencil_launcher(unsigned int* d_nlist, unsigned int* d_n_neigh, Scalar4* d_last_updated_pos, unsigned int* d_conditions, const unsigned int* d_Nmax, const size_t* d_head_list, const unsigned int* d_pid_map, const Scalar4* d_pos, const unsigned int* d_body, const Scalar* d_diameter, const unsigned int N, const unsigned int* d_cell_size, const Scalar4* d_cell_xyzf, const Scalar4* d_cell_tdb, const Index3D& ci, const Index2D& cli, const Scalar4* d_stencil, const unsigned int* d_n_stencil, const Index2D& stencil_idx, const BoxDim& box, const Scalar* d_r_cut, const Scalar r_buff, const unsigned int ntypes, const Scalar3& ghost_width, bool filter_body, bool diameter_shift, const unsigned int threads_per_particle, const unsigned int block_size, const hipDeviceProp_t& devprop) { // shared memory = r_listsq + Nmax + stuff needed for neighborlist (computed below) Index2D typpair_idx(ntypes); unsigned int shared_size = (unsigned int)(sizeof(Scalar) * typpair_idx.getNumElements() + sizeof(unsigned int) * ntypes); if (shared_size > devprop.sharedMemPerBlock) { throw std::runtime_error("Neighborlist r_cut matrix exceeds the available shared memory " "per block."); } if (threads_per_particle == cur_tpp && cur_tpp != 0) { if (!diameter_shift && !filter_body) { unsigned int max_block_size; max_block_size = get_max_block_size_stencil(gpu_compute_nlist_stencil_kernel<0, cur_tpp>); unsigned int run_block_size = (block_size < max_block_size) ? block_size : max_block_size; dim3 grid(N / (block_size / threads_per_particle) + 1); hipLaunchKernelGGL((gpu_compute_nlist_stencil_kernel<0, cur_tpp>), dim3(grid), dim3(run_block_size), shared_size, 0, d_nlist, d_n_neigh, d_last_updated_pos, d_conditions, d_Nmax, d_head_list, d_pid_map, d_pos, d_body, d_diameter, N, d_cell_size, d_cell_xyzf, d_cell_tdb, ci, cli, d_stencil, d_n_stencil, stencil_idx, box, d_r_cut, r_buff, ntypes, ghost_width); } else if (!diameter_shift && filter_body) { unsigned int max_block_size; max_block_size = get_max_block_size_stencil(gpu_compute_nlist_stencil_kernel<1, cur_tpp>); unsigned int run_block_size = (block_size < max_block_size) ? block_size : max_block_size; dim3 grid(N / (block_size / threads_per_particle) + 1); hipLaunchKernelGGL((gpu_compute_nlist_stencil_kernel<1, cur_tpp>), dim3(grid), dim3(run_block_size), shared_size, 0, d_nlist, d_n_neigh, d_last_updated_pos, d_conditions, d_Nmax, d_head_list, d_pid_map, d_pos, d_body, d_diameter, N, d_cell_size, d_cell_xyzf, d_cell_tdb, ci, cli, d_stencil, d_n_stencil, stencil_idx, box, d_r_cut, r_buff, ntypes, ghost_width); } else if (diameter_shift && !filter_body) { unsigned int max_block_size; max_block_size = get_max_block_size_stencil(gpu_compute_nlist_stencil_kernel<2, cur_tpp>); unsigned int run_block_size = (block_size < max_block_size) ? block_size : max_block_size; dim3 grid(N / (block_size / threads_per_particle) + 1); hipLaunchKernelGGL((gpu_compute_nlist_stencil_kernel<2, cur_tpp>), dim3(grid), dim3(run_block_size), shared_size, 0, d_nlist, d_n_neigh, d_last_updated_pos, d_conditions, d_Nmax, d_head_list, d_pid_map, d_pos, d_body, d_diameter, N, d_cell_size, d_cell_xyzf, d_cell_tdb, ci, cli, d_stencil, d_n_stencil, stencil_idx, box, d_r_cut, r_buff, ntypes, ghost_width); } else if (diameter_shift && filter_body) { unsigned int max_block_size; max_block_size = get_max_block_size_stencil(gpu_compute_nlist_stencil_kernel<3, cur_tpp>); unsigned int run_block_size = (block_size < max_block_size) ? block_size : max_block_size; dim3 grid(N / (block_size / threads_per_particle) + 1); hipLaunchKernelGGL((gpu_compute_nlist_stencil_kernel<3, cur_tpp>), dim3(grid), dim3(run_block_size), shared_size, 0, d_nlist, d_n_neigh, d_last_updated_pos, d_conditions, d_Nmax, d_head_list, d_pid_map, d_pos, d_body, d_diameter, N, d_cell_size, d_cell_xyzf, d_cell_tdb, ci, cli, d_stencil, d_n_stencil, stencil_idx, box, d_r_cut, r_buff, ntypes, ghost_width); } } else { stencil_launcher<cur_tpp / 2>(d_nlist, d_n_neigh, d_last_updated_pos, d_conditions, d_Nmax, d_head_list, d_pid_map, d_pos, d_body, d_diameter, N, d_cell_size, d_cell_xyzf, d_cell_tdb, ci, cli, d_stencil, d_n_stencil, stencil_idx, box, d_r_cut, r_buff, ntypes, ghost_width, filter_body, diameter_shift, threads_per_particle, block_size, devprop); } } //! template specialization to terminate recursion template<> inline void stencil_launcher<min_threads_per_particle / 2>(unsigned int* d_nlist, unsigned int* d_n_neigh, Scalar4* d_last_updated_pos, unsigned int* d_conditions, const unsigned int* d_Nmax, const size_t* d_head_list, const unsigned int* d_pid_map, const Scalar4* d_pos, const unsigned int* d_body, const Scalar* d_diameter, const unsigned int N, const unsigned int* d_cell_size, const Scalar4* d_cell_xyzf, const Scalar4* d_cell_tdb, const Index3D& ci, const Index2D& cli, const Scalar4* d_stencil, const unsigned int* d_n_stencil, const Index2D& stencil_idx, const BoxDim& box, const Scalar* d_r_cut, const Scalar r_buff, const unsigned int ntypes, const Scalar3& ghost_width, bool filter_body, bool diameter_shift, const unsigned int threads_per_particle, const unsigned int block_size, const hipDeviceProp_t& devprop) { } hipError_t gpu_compute_nlist_stencil(unsigned int* d_nlist, unsigned int* d_n_neigh, Scalar4* d_last_updated_pos, unsigned int* d_conditions, const unsigned int* d_Nmax, const size_t* d_head_list, const unsigned int* d_pid_map, const Scalar4* d_pos, const unsigned int* d_body, const Scalar* d_diameter, const unsigned int N, const unsigned int* d_cell_size, const Scalar4* d_cell_xyzf, const Scalar4* d_cell_tdb, const Index3D& ci, const Index2D& cli, const Scalar4* d_stencil, const unsigned int* d_n_stencil, const Index2D& stencil_idx, const BoxDim& box, const Scalar* d_r_cut, const Scalar r_buff, const unsigned int ntypes, const Scalar3& ghost_width, bool filter_body, bool diameter_shift, const unsigned int threads_per_particle, const unsigned int block_size, const hipDeviceProp_t& devprop) { stencil_launcher<max_threads_per_particle>(d_nlist, d_n_neigh, d_last_updated_pos, d_conditions, d_Nmax, d_head_list, d_pid_map, d_pos, d_body, d_diameter, N, d_cell_size, d_cell_xyzf, d_cell_tdb, ci, cli, d_stencil, d_n_stencil, stencil_idx, box, d_r_cut, r_buff, ntypes, ghost_width, filter_body, diameter_shift, threads_per_particle, block_size, devprop); return hipSuccess; } /*! * \param d_pids Unsorted particle indexes * \param d_types Unsorted particle types * \param d_pos Particle position array * \param N Number of particles * * \a d_pids and \a d_types are trivially initialized to their current (unsorted) values. They are * later sorted in gpu_compute_nlist_stencil_sort_types(). */ __global__ void gpu_compute_nlist_stencil_fill_types_kernel(unsigned int* d_pids, unsigned int* d_types, const Scalar4* d_pos, const unsigned int N) { unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= N) return; Scalar4 pos_i = d_pos[idx]; unsigned int type = __scalar_as_int(pos_i.w); d_types[idx] = type; d_pids[idx] = idx; } /*! * \param d_pids Unsorted particle indexes * \param d_types Unsorted particle types * \param d_pos Particle position array * \param N Number of particles */ hipError_t gpu_compute_nlist_stencil_fill_types(unsigned int* d_pids, unsigned int* d_types, const Scalar4* d_pos, const unsigned int N) { const unsigned int block_size = 128; hipLaunchKernelGGL((gpu_compute_nlist_stencil_fill_types_kernel), dim3(N / block_size + 1), dim3(block_size), 0, 0, d_pids, d_types, d_pos, N); return hipSuccess; } /*! * \param d_pids Array of unsorted particle indexes * \param d_pids_alt Double buffer for particle indexes * \param d_types Array of unsorted particle types * \param d_types_alt Double buffer for particle types * \param d_tmp_storage Temporary allocation for sorting * \param tmp_storage_bytes Size of temporary allocation * \param swap Flag to swap the sorted particle indexes into the correct buffer * \param N number of particles * * This wrapper calls the CUB radix sorting methods, and so it needs to be called twice. Initially, * \a d_tmp_storage should be NULL, and the necessary temporary storage is saved into \a * tmp_storage_bytes. This space must then be allocated into \a d_tmp_storage, and on the second * call, the sorting is performed. */ void gpu_compute_nlist_stencil_sort_types(unsigned int* d_pids, unsigned int* d_pids_alt, unsigned int* d_types, unsigned int* d_types_alt, void* d_tmp_storage, size_t& tmp_storage_bytes, bool& swap, const unsigned int N) { hipcub::DoubleBuffer<unsigned int> d_keys(d_types, d_types_alt); hipcub::DoubleBuffer<unsigned int> d_vals(d_pids, d_pids_alt); hipcub::DeviceRadixSort::SortPairs(d_tmp_storage, tmp_storage_bytes, d_keys, d_vals, N); if (d_tmp_storage != NULL) { swap = (d_vals.selector == 1); } } } // end namespace kernel } // end namespace md } // end namespace hoomd
615d63e5d2dc3a4e33961982fd8befbe7074ef3e.hip
// !!! This is a file automatically generated by hipify!!! #include "cp.h" #include <hip/hip_runtime.h> #include <cstdlib> #include <iostream> #include<vector> using namespace std; inline void check(hipError_t err, const char* context) { if (err != hipSuccess) { std::cerr << "CUDA error: " << context << ": " << hipGetErrorString(err) << std::endl; std::exit(EXIT_FAILURE); } } #define CHECK(x) check(x, #x) inline int static divup(int a, int b) { return (a + b - 1)/b; } inline int static roundup(int a, int b) { return divup(a, b) * b; } __global__ void mykernel(int nx, int ny, const float * data, float * result) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if ( x >= ny || y >= ny || x < y ) { return; } float sum = 0.0; for( int k = 0; k < nx; ++k ) { sum += data[ k + y*nx ] * data[ x*nx + k ]; } result[ x + ny * y ] = sum; } void correlate(int ny, int nx, const float* data, float* result) { //preprocess data into ProcessedData float * ProcessedData = (float * )calloc(nx*ny , sizeof(float)); for( int y = 0; y < ny; y++ ) { float s = 0.0; for( int x = 0; x < nx; x++ ) { s += data[ x + y * nx ]; } s /= (float)nx; float t = 0.0; for( int x = 0; x < nx; x++ ) { ProcessedData[ x + y*nx ] = (double)data[ x + y*nx ] - s; t += ProcessedData[ x + y * nx ] * ProcessedData[ x + y * nx ]; } t = sqrt( t ); for( int x = 0; x < nx; x ++ ) { ProcessedData[ x + y * nx ] = ProcessedData[ x + y * nx ] / t; } } //Allocate memory and copy data to GPU float * dGPU = NULL;//data for GPU CHECK(hipMalloc((void**)&dGPU, nx * ny * sizeof(float))); float * rGPU = NULL;//result for GPU CHECK(hipMalloc((void**)&rGPU, ny * ny * sizeof(float))); CHECK(hipMemcpy(dGPU, ProcessedData, nx * ny * sizeof(float), hipMemcpyHostToDevice)); // Run kernel dim3 dimBlock(8, 8); dim3 dimGrid(roundup(ny, dimBlock.x), roundup(ny, dimBlock.y)); hipLaunchKernelGGL(( mykernel), dim3(dimGrid), dim3(dimBlock), 0, 0, nx, ny, dGPU, rGPU); CHECK(hipGetLastError()); // Copy data back to CPU & release memory CHECK(hipMemcpy(result, rGPU, ny * ny * sizeof(float), hipMemcpyDeviceToHost )); CHECK(hipFree(dGPU)); CHECK(hipFree(rGPU)); std::free(ProcessedData); }
615d63e5d2dc3a4e33961982fd8befbe7074ef3e.cu
#include "cp.h" #include <cuda_runtime.h> #include <cstdlib> #include <iostream> #include<vector> using namespace std; inline void check(cudaError_t err, const char* context) { if (err != cudaSuccess) { std::cerr << "CUDA error: " << context << ": " << cudaGetErrorString(err) << std::endl; std::exit(EXIT_FAILURE); } } #define CHECK(x) check(x, #x) inline int static divup(int a, int b) { return (a + b - 1)/b; } inline int static roundup(int a, int b) { return divup(a, b) * b; } __global__ void mykernel(int nx, int ny, const float * data, float * result) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if ( x >= ny || y >= ny || x < y ) { return; } float sum = 0.0; for( int k = 0; k < nx; ++k ) { sum += data[ k + y*nx ] * data[ x*nx + k ]; } result[ x + ny * y ] = sum; } void correlate(int ny, int nx, const float* data, float* result) { //preprocess data into ProcessedData float * ProcessedData = (float * )calloc(nx*ny , sizeof(float)); for( int y = 0; y < ny; y++ ) { float s = 0.0; for( int x = 0; x < nx; x++ ) { s += data[ x + y * nx ]; } s /= (float)nx; float t = 0.0; for( int x = 0; x < nx; x++ ) { ProcessedData[ x + y*nx ] = (double)data[ x + y*nx ] - s; t += ProcessedData[ x + y * nx ] * ProcessedData[ x + y * nx ]; } t = sqrt( t ); for( int x = 0; x < nx; x ++ ) { ProcessedData[ x + y * nx ] = ProcessedData[ x + y * nx ] / t; } } //Allocate memory and copy data to GPU float * dGPU = NULL;//data for GPU CHECK(cudaMalloc((void**)&dGPU, nx * ny * sizeof(float))); float * rGPU = NULL;//result for GPU CHECK(cudaMalloc((void**)&rGPU, ny * ny * sizeof(float))); CHECK(cudaMemcpy(dGPU, ProcessedData, nx * ny * sizeof(float), cudaMemcpyHostToDevice)); // Run kernel dim3 dimBlock(8, 8); dim3 dimGrid(roundup(ny, dimBlock.x), roundup(ny, dimBlock.y)); mykernel<<<dimGrid, dimBlock>>>( nx, ny, dGPU, rGPU); CHECK(cudaGetLastError()); // Copy data back to CPU & release memory CHECK(cudaMemcpy(result, rGPU, ny * ny * sizeof(float), cudaMemcpyDeviceToHost )); CHECK(cudaFree(dGPU)); CHECK(cudaFree(rGPU)); std::free(ProcessedData); }
04a4b12fa3fa9db4e0c7638db544c6fcd3c652f4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "caffe/util/math_functions.hpp" namespace caffe9 { template <typename Dtype> __global__ void SGDUpdate(int N, Dtype* g, Dtype* h, Dtype momentum, Dtype local_rate) { CUDA_KERNEL_LOOP(i, N) { g[i] = h[i] = momentum*h[i] + local_rate*g[i]; } } template <typename Dtype> void sgd_update_gpu(int N, Dtype* g, Dtype* h, Dtype momentum, Dtype local_rate) { SGDUpdate<Dtype> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators)) , dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, g, h, momentum, local_rate); CUDA_POST_KERNEL_CHECK; } template void sgd_update_gpu<float>(int, float*, float*, float, float); template void sgd_update_gpu<double>(int, double*, double*, double, double); } // namespace caffe9
04a4b12fa3fa9db4e0c7638db544c6fcd3c652f4.cu
#include "caffe/util/math_functions.hpp" namespace caffe9 { template <typename Dtype> __global__ void SGDUpdate(int N, Dtype* g, Dtype* h, Dtype momentum, Dtype local_rate) { CUDA_KERNEL_LOOP(i, N) { g[i] = h[i] = momentum*h[i] + local_rate*g[i]; } } template <typename Dtype> void sgd_update_gpu(int N, Dtype* g, Dtype* h, Dtype momentum, Dtype local_rate) { SGDUpdate<Dtype> // NOLINT_NEXT_LINE(whitespace/operators) <<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, g, h, momentum, local_rate); CUDA_POST_KERNEL_CHECK; } template void sgd_update_gpu<float>(int, float*, float*, float, float); template void sgd_update_gpu<double>(int, double*, double*, double, double); } // namespace caffe9
31d4ff20c612e70f800124d0110246e1e67cdc73.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "hiprand/hiprand.h" #include "rocblas.h" extern "C" { #include "im2col_dilated.h" #include "hip/hip_runtime.h" } // src: https://github.com/BVLC/caffe/blob/master/src/caffe/util/im2col.cu // You may also want to read: https://github.com/BVLC/caffe/blob/master/LICENSE __global__ void im2col_dilated_gpu_kernel(const int n, const float* im_gpu, const int height, const int width, const int ksize, const int pad, const int stride, const int height_col, const int width_col, int dilate_rate, float *col_gpu) { int index = blockIdx.x*blockDim.x+threadIdx.x; for(; index < n; index += blockDim.x*gridDim.x){ int w_out = index % width_col; int h_index = index / width_col; int h_out = h_index % height_col; int channel_in = h_index / height_col; int channel_out = channel_in * ksize * ksize; int h_in = h_out * stride - pad; // height offset int w_in = w_out * stride - pad; // width offset float* data_col_ptr = col_gpu; data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;//data_col_ptr += channel_out * width_col * height_col + h_out * width_col + w_out const float* data_im_ptr = im_gpu; data_im_ptr += (channel_in * height + h_in) * width + w_in;//data_im_ptr += channel_in * height * width + h_in * width + w_in for (int i = 0; i < ksize; ++i) { // i = row for (int j = 0; j < ksize; ++j) { // j = column int h = h_in + (i+1)*dilate_rate -1; int w = w_in + (j+1)*dilate_rate -1; *data_col_ptr = (h >= 0 && w >= 0 && h < height && w < width) ? data_im_ptr[((i+1)*dilate_rate-1)*width + (j+1) * dilate_rate -1] : 0; data_col_ptr += height_col * width_col; // window selection function } } } } void im2col_dilated_gpu(float *im_gpu, int channels, int height, int width, int ksize, int stride, int pad, int dilate_rate, float *col_gpu){ // We are going to launch channels * height_col * width_col kernels, each // kernel responsible for copying a single-channel grid. int dilate_ksize = (dilate_rate - 1) * (ksize + 1) + ksize; int height_col = (height + 2 * pad - dilate_ksize) / stride + 1; // convolutional layer output height int width_col = (width + 2 * pad - dilate_ksize) / stride + 1; // convolutional layer output width int num_kernels = channels * height_col * width_col; // number of elements in each kernel hipLaunchKernelGGL(( im2col_dilated_gpu_kernel), dim3((num_kernels+BLOCK-1)/BLOCK), dim3(//1girdblock), 2blockthread BLOCK, 0, 0, num_kernels, im_gpu, height, width, ksize, pad,stride, height_col,width_col, dilate_rate, col_gpu); // //hipFree(im_gpu); //hipFree(col_gpu); }
31d4ff20c612e70f800124d0110246e1e67cdc73.cu
#include "cuda_runtime.h" #include "curand.h" #include "cublas_v2.h" extern "C" { #include "im2col_dilated.h" #include "cuda.h" } // src: https://github.com/BVLC/caffe/blob/master/src/caffe/util/im2col.cu // You may also want to read: https://github.com/BVLC/caffe/blob/master/LICENSE __global__ void im2col_dilated_gpu_kernel(const int n, const float* im_gpu, const int height, const int width, const int ksize, const int pad, const int stride, const int height_col, const int width_col, int dilate_rate, float *col_gpu) { int index = blockIdx.x*blockDim.x+threadIdx.x; for(; index < n; index += blockDim.x*gridDim.x){ int w_out = index % width_col; int h_index = index / width_col; int h_out = h_index % height_col; int channel_in = h_index / height_col; int channel_out = channel_in * ksize * ksize; int h_in = h_out * stride - pad; // height offset int w_in = w_out * stride - pad; // width offset float* data_col_ptr = col_gpu; data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;//data_col_ptr += channel_out * width_col * height_col + h_out * width_col + w_out const float* data_im_ptr = im_gpu; data_im_ptr += (channel_in * height + h_in) * width + w_in;//data_im_ptr += channel_in * height * width + h_in * width + w_in for (int i = 0; i < ksize; ++i) { // i = row for (int j = 0; j < ksize; ++j) { // j = column int h = h_in + (i+1)*dilate_rate -1; int w = w_in + (j+1)*dilate_rate -1; *data_col_ptr = (h >= 0 && w >= 0 && h < height && w < width) ? data_im_ptr[((i+1)*dilate_rate-1)*width + (j+1) * dilate_rate -1] : 0; data_col_ptr += height_col * width_col; // 从这里看出这里是一列一列写,因此每次写一个window selection function的区域 } } } } void im2col_dilated_gpu(float *im_gpu, int channels, int height, int width, int ksize, int stride, int pad, int dilate_rate, float *col_gpu){ // We are going to launch channels * height_col * width_col kernels, each // kernel responsible for copying a single-channel grid. int dilate_ksize = (dilate_rate - 1) * (ksize + 1) + ksize; int height_col = (height + 2 * pad - dilate_ksize) / stride + 1; // convolutional layer output height int width_col = (width + 2 * pad - dilate_ksize) / stride + 1; // convolutional layer output width int num_kernels = channels * height_col * width_col; // number of elements in each kernel im2col_dilated_gpu_kernel<<<(num_kernels+BLOCK-1)/BLOCK, //参数1:一个gird里有这么多block, 参数2:一个block里有这么多thread BLOCK>>>(num_kernels, im_gpu, height, width, ksize, pad,stride, height_col,width_col, dilate_rate, col_gpu); // 释放内存 //cudaFree(im_gpu); //cudaFree(col_gpu); }
2192bc072773306a9ff8b98d67dfec6a93d0a0cb.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "caffe/util/math_functions.hpp" namespace caffe { template <typename Dtype> __global__ void RangerUpdate(int N, const Dtype* g_gpu_data, Dtype* g_mut_gpu_diff, Dtype* m_mut_gpu_data, Dtype* v_mut_gpu_data, const Dtype* slow_gpu_data, Dtype* slow_mut_gpu_data, Dtype beta1, Dtype beta2, Dtype eps_hat, Dtype corrected_local_rate, Dtype N_sma, const Dtype N_sma_threshold, const int t, const int k_thres, const Dtype alpha, const bool use_lookahead ) { CUDA_KERNEL_LOOP(i, N) { float gdiff = g_mut_gpu_diff[i]; float mi = m_mut_gpu_data[i] = m_mut_gpu_data[i]*beta1 + gdiff*(1-beta1); float vi = v_mut_gpu_data[i] = v_mut_gpu_data[i]*beta2 + gdiff*gdiff*(1-beta2); if (N_sma > N_sma_threshold){ g_mut_gpu_diff[i] = corrected_local_rate * mi / (sqrt(vi) + eps_hat); } else{ g_mut_gpu_diff[i] = corrected_local_rate * mi; } if (use_lookahead && ((t%k_thres) == 0)){ // set slow float slow = slow_mut_gpu_data[i] = slow_gpu_data[i] + alpha * ( g_gpu_data[i] - slow_gpu_data[i] ); //p => slow_p //p_diff = p - slow_p g_mut_gpu_diff[i] = g_gpu_data[i] - slow; } } } template <typename Dtype> void ranger_update_gpu(int N, const Dtype* g_gpu_data, Dtype* g_mut_gpu_diff, Dtype* m_mut_gpu_data, Dtype* v_mut_gpu_data, const Dtype* slow_gpu_data, Dtype* slow_mut_gpu_data, Dtype beta1, Dtype beta2, Dtype eps_hat, Dtype corrected_local_rate, const Dtype N_sma, const Dtype N_sma_threshold, const int t, const int k_thres, const Dtype alpha, const bool use_lookahead ) { RangerUpdate<Dtype> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators)) , dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, g_gpu_data, g_mut_gpu_diff, m_mut_gpu_data, v_mut_gpu_data, slow_gpu_data, slow_mut_gpu_data, beta1, beta2, eps_hat, corrected_local_rate, N_sma, N_sma_threshold, t, k_thres, alpha, use_lookahead ); CUDA_POST_KERNEL_CHECK; } template void ranger_update_gpu<float>(int, const float*, float*, float*, float*, const float*, float*, float, float, float, float, const float, const float, const int, const int, const float, const bool ); template void ranger_update_gpu<double>(int, const double*, double*, double*, double*, const double*, double*, double, double, double, double, const double, const double, const int, const int, const double, const bool use_lookahead ); } // namespace caffe
2192bc072773306a9ff8b98d67dfec6a93d0a0cb.cu
#include "caffe/util/math_functions.hpp" namespace caffe { template <typename Dtype> __global__ void RangerUpdate(int N, const Dtype* g_gpu_data, Dtype* g_mut_gpu_diff, Dtype* m_mut_gpu_data, Dtype* v_mut_gpu_data, const Dtype* slow_gpu_data, Dtype* slow_mut_gpu_data, Dtype beta1, Dtype beta2, Dtype eps_hat, Dtype corrected_local_rate, Dtype N_sma, const Dtype N_sma_threshold, const int t, const int k_thres, const Dtype alpha, const bool use_lookahead ) { CUDA_KERNEL_LOOP(i, N) { float gdiff = g_mut_gpu_diff[i]; float mi = m_mut_gpu_data[i] = m_mut_gpu_data[i]*beta1 + gdiff*(1-beta1); float vi = v_mut_gpu_data[i] = v_mut_gpu_data[i]*beta2 + gdiff*gdiff*(1-beta2); if (N_sma > N_sma_threshold){ g_mut_gpu_diff[i] = corrected_local_rate * mi / (sqrt(vi) + eps_hat); } else{ g_mut_gpu_diff[i] = corrected_local_rate * mi; } if (use_lookahead && ((t%k_thres) == 0)){ // set slow float slow = slow_mut_gpu_data[i] = slow_gpu_data[i] + alpha * ( g_gpu_data[i] - slow_gpu_data[i] ); //p => slow_p //p_diff = p - slow_p g_mut_gpu_diff[i] = g_gpu_data[i] - slow; } } } template <typename Dtype> void ranger_update_gpu(int N, const Dtype* g_gpu_data, Dtype* g_mut_gpu_diff, Dtype* m_mut_gpu_data, Dtype* v_mut_gpu_data, const Dtype* slow_gpu_data, Dtype* slow_mut_gpu_data, Dtype beta1, Dtype beta2, Dtype eps_hat, Dtype corrected_local_rate, const Dtype N_sma, const Dtype N_sma_threshold, const int t, const int k_thres, const Dtype alpha, const bool use_lookahead ) { RangerUpdate<Dtype> // NOLINT_NEXT_LINE(whitespace/operators) <<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, g_gpu_data, g_mut_gpu_diff, m_mut_gpu_data, v_mut_gpu_data, slow_gpu_data, slow_mut_gpu_data, beta1, beta2, eps_hat, corrected_local_rate, N_sma, N_sma_threshold, t, k_thres, alpha, use_lookahead ); CUDA_POST_KERNEL_CHECK; } template void ranger_update_gpu<float>(int, const float*, float*, float*, float*, const float*, float*, float, float, float, float, const float, const float, const int, const int, const float, const bool ); template void ranger_update_gpu<double>(int, const double*, double*, double*, double*, const double*, double*, double, double, double, double, const double, const double, const int, const int, const double, const bool use_lookahead ); } // namespace caffe
018f09946b65ec38c7fa8c9b8d90a97eb9fb2f27.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifndef _PRESCAN_CU_ #define _PRESCAN_CU_ // includes, kernels #include <assert.h> #define NUM_BANKS 32 #define LOG_NUM_BANKS 5 // Lab4: You can use any other block size you wish. #define BLOCK_SIZE 1024 #define CONFLICT_FREE_OFFSET(n) \ ((n) >> NUM_BANKS + (n) >> (2 * LOG_NUM_BANKS)) __global__ void scan(float *g_odata, float *g_idata, int blockSize); __global__ void copy(float *from_scanned,float *from_unscanned, float *to, int numCopies); __global__ void addArray(float *from, float *to, int divisor, int numElements); // **===-------- Lab4: Modify the body of this function -----------===** // You may need to make multiple kernel calls, make your own kernel // function in this file, and then call them from here. void prescanArray(float *outArray, float *inArray, int numElements, float *array1, float *array2, float *array3, float *array4) { // each block computes 1024 indices, so 2 indices per thread // do scan on inArray, write back to outArray hipLaunchKernelGGL(( scan), dim3(16384), dim3(512), 0, 0, outArray, inArray, 1024); // read every 1024 elements from outArray and write it to array1 hipLaunchKernelGGL(( copy), dim3(16), dim3(1024), 0, 0, outArray, inArray, array1, 16384); // do scan on array1, write back to array2 hipLaunchKernelGGL(( scan), dim3(16), dim3(512), 0, 0, array2, array1, 1024); // read every 1024 elements from array2 and write it to array3 hipLaunchKernelGGL(( copy), dim3(1), dim3(16), 0, 0, array2, array1, array3, 16); // do scan on array3, write back to array4 hipLaunchKernelGGL(( scan), dim3(1), dim3(8), 0, 0, array4, array3, 16); // add array2[i] to outArray[i*1048576:((i+1)*1048576)-1] hipLaunchKernelGGL(( addArray), dim3(16384), dim3(1024), 0, 0, array4, outArray, 1024*1024, numElements); // add array1[i] to outArray[i*1024:((i+1)*1024)-1] hipLaunchKernelGGL(( addArray), dim3(16384), dim3(1024), 0, 0, array2, outArray, 1024, numElements); } // **===-----------------------------------------------------------===** // read every 1024 elements from from, and write it to to __global__ void copy(float *from_scanned, float *from_unscanned, float *to, int numCopies) { int thid = blockIdx.x * blockDim.x + threadIdx.x; int numThreads = gridDim.x * blockDim.x; //if (thid < numCopies) for (int i = thid; i < numCopies; i += numThreads) { //to[i] = from[(i + 1) * 1024 - 1]; to[i] = from_scanned[(i + 1) * 1024 - 1] + from_unscanned[(i + 1) * 1024 - 1]; } } // do scan on g_idata, write back to g_odata __global__ void scan(float *g_odata, float *g_idata, int blockSize) { __shared__ float temp[1024]; int allid = blockIdx.x * blockDim.x + threadIdx.x; int thid = threadIdx.x; int offset = 1; temp[2*thid] = g_idata[2*allid]; // load input into shared memory temp[2*thid+1] = g_idata[2*allid+1]; for (int d = blockSize>>1; d > 0; d >>= 1) // build sum in place up the tree { __syncthreads(); if (thid < d) { int ai = offset*(2*thid+1)-1; int bi = offset*(2*thid+2)-1; temp[bi] += temp[ai]; } offset *= 2; } if (thid == 0) { temp[blockSize - 1] = 0; } // clear the last element for (int d = 1; d < blockSize; d *= 2) // traverse down tree & build scan { offset >>= 1; __syncthreads(); if (thid < d) { int ai = offset*(2*thid+1)-1; int bi = offset*(2*thid+2)-1; float t = temp[ai]; temp[ai] = temp[bi]; temp[bi] += t; } } __syncthreads(); g_odata[2*allid] = temp[2*thid]; // write results to device memory g_odata[2*allid+1] = temp[2*thid+1]; } // add from[i] to to[i*divisor:((i+1)*divisor)-1] __global__ void addArray(float *from, float *to, int divisor, int numElements) { int numThreads = gridDim.x * blockDim.x; int thid = blockDim.x * blockIdx.x + threadIdx.x; for (int i = thid; i < numElements; i += numThreads) { to[i] += from[(i / divisor)]; } } /* // below is scan optimized for bank conflicts __global__ void scan(float *g_odata, float *g_idata, int n) { __shared__ float temp[1024]; int thid = blockIdx.x * blockDim.x + threadIdx.x; int offset = 1; int ai = thid; int bi = thid + (n/2); int bankOffsetA = CONFLICT_FREE_OFFSET(ai); int bankOffsetB = CONFLICT_FREE_OFFSET(bi); temp[ai + bankOffsetA] = g_idata[ai]; temp[bi + bankOffsetB] = g_idata[bi]; // load input into shared memory for (int d = n>>1; d > 0; d >>= 1) // build sum in place up the tree { __syncthreads(); if (thid < d) { int ai = offset*(2*thid+1)-1; int bi = offset*(2*thid+2)-1; ai += CONFLICT_FREE_OFFSET(ai); bi += CONFLICT_FREE_OFFSET(bi); temp[bi] += temp[ai]; } offset *= 2; } if (thid==0) { temp[n - 1 + CONFLICT_FREE_OFFSET(n - 1)] = 0;} // clear the last element for (int d = 1; d < n; d *= 2) // traverse down tree & build scan { offset >>= 1; __syncthreads(); if (thid < d) { int ai = offset*(2*thid+1)-1; int bi = offset*(2*thid+2)-1; ai += CONFLICT_FREE_OFFSET(ai); bi += CONFLICT_FREE_OFFSET(bi); float t = temp[ai]; temp[ai] = temp[bi]; temp[bi] += t; } } __syncthreads(); g_odata[ai] = temp[ai + bankOffsetA]; g_odata[bi] = temp[bi + bankOffsetB]; // write results to device memory } */ #endif // _PRESCAN_CU_
018f09946b65ec38c7fa8c9b8d90a97eb9fb2f27.cu
#ifndef _PRESCAN_CU_ #define _PRESCAN_CU_ // includes, kernels #include <assert.h> #define NUM_BANKS 32 #define LOG_NUM_BANKS 5 // Lab4: You can use any other block size you wish. #define BLOCK_SIZE 1024 #define CONFLICT_FREE_OFFSET(n) \ ((n) >> NUM_BANKS + (n) >> (2 * LOG_NUM_BANKS)) __global__ void scan(float *g_odata, float *g_idata, int blockSize); __global__ void copy(float *from_scanned,float *from_unscanned, float *to, int numCopies); __global__ void addArray(float *from, float *to, int divisor, int numElements); // **===-------- Lab4: Modify the body of this function -----------===** // You may need to make multiple kernel calls, make your own kernel // function in this file, and then call them from here. void prescanArray(float *outArray, float *inArray, int numElements, float *array1, float *array2, float *array3, float *array4) { // each block computes 1024 indices, so 2 indices per thread // do scan on inArray, write back to outArray scan<<<16384, 512>>>(outArray, inArray, 1024); // read every 1024 elements from outArray and write it to array1 copy<<<16, 1024>>>(outArray, inArray, array1, 16384); // do scan on array1, write back to array2 scan<<<16, 512>>>(array2, array1, 1024); // read every 1024 elements from array2 and write it to array3 copy<<<1, 16>>>(array2, array1, array3, 16); // do scan on array3, write back to array4 scan<<<1, 8>>>(array4, array3, 16); // add array2[i] to outArray[i*1048576:((i+1)*1048576)-1] addArray<<<16384, 1024>>>(array4, outArray, 1024*1024, numElements); // add array1[i] to outArray[i*1024:((i+1)*1024)-1] addArray<<<16384, 1024>>>(array2, outArray, 1024, numElements); } // **===-----------------------------------------------------------===** // read every 1024 elements from from, and write it to to __global__ void copy(float *from_scanned, float *from_unscanned, float *to, int numCopies) { int thid = blockIdx.x * blockDim.x + threadIdx.x; int numThreads = gridDim.x * blockDim.x; //if (thid < numCopies) for (int i = thid; i < numCopies; i += numThreads) { //to[i] = from[(i + 1) * 1024 - 1]; to[i] = from_scanned[(i + 1) * 1024 - 1] + from_unscanned[(i + 1) * 1024 - 1]; } } // do scan on g_idata, write back to g_odata __global__ void scan(float *g_odata, float *g_idata, int blockSize) { __shared__ float temp[1024]; int allid = blockIdx.x * blockDim.x + threadIdx.x; int thid = threadIdx.x; int offset = 1; temp[2*thid] = g_idata[2*allid]; // load input into shared memory temp[2*thid+1] = g_idata[2*allid+1]; for (int d = blockSize>>1; d > 0; d >>= 1) // build sum in place up the tree { __syncthreads(); if (thid < d) { int ai = offset*(2*thid+1)-1; int bi = offset*(2*thid+2)-1; temp[bi] += temp[ai]; } offset *= 2; } if (thid == 0) { temp[blockSize - 1] = 0; } // clear the last element for (int d = 1; d < blockSize; d *= 2) // traverse down tree & build scan { offset >>= 1; __syncthreads(); if (thid < d) { int ai = offset*(2*thid+1)-1; int bi = offset*(2*thid+2)-1; float t = temp[ai]; temp[ai] = temp[bi]; temp[bi] += t; } } __syncthreads(); g_odata[2*allid] = temp[2*thid]; // write results to device memory g_odata[2*allid+1] = temp[2*thid+1]; } // add from[i] to to[i*divisor:((i+1)*divisor)-1] __global__ void addArray(float *from, float *to, int divisor, int numElements) { int numThreads = gridDim.x * blockDim.x; int thid = blockDim.x * blockIdx.x + threadIdx.x; for (int i = thid; i < numElements; i += numThreads) { to[i] += from[(i / divisor)]; } } /* // below is scan optimized for bank conflicts __global__ void scan(float *g_odata, float *g_idata, int n) { __shared__ float temp[1024]; int thid = blockIdx.x * blockDim.x + threadIdx.x; int offset = 1; int ai = thid; int bi = thid + (n/2); int bankOffsetA = CONFLICT_FREE_OFFSET(ai); int bankOffsetB = CONFLICT_FREE_OFFSET(bi); temp[ai + bankOffsetA] = g_idata[ai]; temp[bi + bankOffsetB] = g_idata[bi]; // load input into shared memory for (int d = n>>1; d > 0; d >>= 1) // build sum in place up the tree { __syncthreads(); if (thid < d) { int ai = offset*(2*thid+1)-1; int bi = offset*(2*thid+2)-1; ai += CONFLICT_FREE_OFFSET(ai); bi += CONFLICT_FREE_OFFSET(bi); temp[bi] += temp[ai]; } offset *= 2; } if (thid==0) { temp[n - 1 + CONFLICT_FREE_OFFSET(n - 1)] = 0;} // clear the last element for (int d = 1; d < n; d *= 2) // traverse down tree & build scan { offset >>= 1; __syncthreads(); if (thid < d) { int ai = offset*(2*thid+1)-1; int bi = offset*(2*thid+2)-1; ai += CONFLICT_FREE_OFFSET(ai); bi += CONFLICT_FREE_OFFSET(bi); float t = temp[ai]; temp[ai] = temp[bi]; temp[bi] += t; } } __syncthreads(); g_odata[ai] = temp[ai + bankOffsetA]; g_odata[bi] = temp[bi + bankOffsetB]; // write results to device memory } */ #endif // _PRESCAN_CU_
8d76aae7bc81f59600f6666498843c788cefad4c.hip
// !!! This is a file automatically generated by hipify!!! // needed for thrust compilation ... #include <thrust/device_ptr.h> #include <thrust/device_vector.h> #include <thrust/inner_product.h> #include <thrust/functional.h> #include <thrust/extrema.h> #include <stdlib.h> #include <stdio.h> #include <math.h> #include <complex.h> #include <stdint.h> #include <hip/hip_runtime.h> #include <hipfft.h> #include <hip/hip_complex.h> // custom includes #include <utils/compare_thrust.cuh> #include <Coulomb/Coulomb.hpp> #define SQRT2 ((double) sqrt(2.)) #define SQRTPI ((double) 1.7724538509055159) inline __complex__ double gaussian3D(double x, double y,double z, double sigma) { return cexp( -1.*(x*x + y*y + z*z)/( 2.*sigma*sigma) ); //return 1. + I*0.; } inline void fill_array(__complex__ double *psi, double *params) { #pragma omp parallel for num_threads(8) for (int ixyz = 0; ixyz < NXYZ; ixyz++) { int ix,iy,iz,i; ixyz2ixiyiz(ixyz,ix,iy,iz,i); psi[ixyz] = pow(1./SQRTPI/params[0],1.5) * gaussian3D((double) ix-NX/2.,(double) iy-NY/2.,(double) iz-NZ/2.,params[0]) + I*0.; } } inline void analytical(double* potential, double* params) { #pragma omp parallel for num_threads(8) for (int ixyz = 0; ixyz < NXYZ; ixyz++) { int ix,iy,iz,i; ixyz2ixiyiz(ixyz,ix,iy,iz,i); const double r = sqrt( pow((double) ix-NX/2.,2) + pow((double) iy-NY/2.,2) + pow((double) iz-NZ/2.,2) ); potential[ixyz] = (r == 0) ? params[1]*params[1] / ( 2.0 * SQRTPI*SQRTPI*SQRTPI * params[0]) : params[1]*params[1] * erf( r / params[0] ) / (4. * M_PI * r); } } template<typename T> inline void save_file(const char* filename, void* data, size_t size) { FILE* file = fopen(filename,"wb"); fwrite(data,(size_t) size * sizeof(T),1,file); fclose(file); } int main(int argc, char* argv[]) { __complex__ double *h_psi; // used as a buffer on host side, be careful of 'magic' with pointer casting double* h_analytical; hipDoubleComplex *d_psi; double *d_vcoulomb, *d_analytical; cuErrCheck( hipHostMalloc((void**) &h_psi,(size_t) (NX+2*CX)*(NY+2*CY)*(NZ+2*CZ) * sizeof(__complex__ double)) ); cuErrCheck( hipHostMalloc((void**) &h_analytical,(size_t) NX*NY*NZ * sizeof(double)) ); cuErrCheck( hipMalloc((void**) &d_psi,(size_t) NX*NY*NZ * sizeof(hipDoubleComplex)) ); cuErrCheck( hipMalloc((void**) &d_vcoulomb,(size_t) NX*NY*NZ * sizeof(double)) ); cuErrCheck( hipMalloc((void**) &d_analytical,(size_t) NX*NY*NZ * sizeof(double)) ); printf("cx: %u %u %u\n",CX,CY,CZ); printf("Nx: %ux%ux%u\r%lu\n",NX,NY,NZ,NXYZ); // initialize Coulomb double sigma = 3.; // rather aho double charge = 1.0; Coulomb* c = new Coulomb(); c->set_charge(charge); double lcutoff = 0.; unsigned lattice[6] = {0}; c->get_lattice(lattice, &lcutoff); // get analytical result double params[2] = { sigma, charge }; // a_ho, charge analytical(h_analytical,params); save_file<double>("analytical.bin",(void*) h_analytical, NX*NY*NZ ); cuErrCheck( hipMemcpy(d_analytical, h_analytical, (size_t) NX*NY*NZ * sizeof(double), hipMemcpyHostToDevice) ); // init timing file char timing_filename[256]; snprintf(timing_filename, 256, "timing%ux%ux%u.dat", lattice[0], lattice[1], lattice[2]); FILE* file_timing = fopen(timing_filename,"w"); fprintf(file_timing,"nx\tny\tnz\tthreads\tenlarging\t\t\tcoulomb\t\t\tlessening\n"); unsigned threads = 1024; //for (unsigned threads = 32; threads <= 1024; threads += 32) //{ printf("Threads: %u\n",threads); // timing double mean_time_enlarging = 0.0; double mean_time_coulomb = 0.0; double mean_time_lessening = 0.0; double var_time_enlarging = 0.0; double var_time_coulomb = 0.0; double var_time_lessening = 0.0; unsigned iters = 100; for (unsigned it = 0; it < iters; it++) { // fill orginal array with values fill_array(h_psi,&sigma); cuErrCheck( hipMemcpy( d_psi, h_psi, (size_t) NX*NY*NZ * sizeof(hipDoubleComplex), hipMemcpyHostToDevice) ); if (it == 0) save_file<hipDoubleComplex>("orginal3D.bin",(void*) h_psi, NX*NY*NZ); // timing double time_enlarging = 0.0; double time_coulomb = 0.0; double time_lessening = 0.0; // get density from given wavefunction and save to enlarged array c->get_density_enlarged(d_psi,(double*) h_psi,&time_enlarging,threads); //if (it == 0) save_file<double>("data/resized3D.bin",(void*) h_psi, (NX+2*CX)*(NY+2*CY)*(NZ+2*CZ) ); // perform FFTs and convole with Coulomb kernel c->get_vcoulomb_enlarged((hipDoubleComplex*) h_psi, &time_coulomb,192,5); //if (it == 0) save_file<hipDoubleComplex>("data/density_transform3D.bin",(void*) h_psi, (NX+2*CX)*(NY+2*CY)*(NZ+2*CZ)); // truncate vcoulomb array to size of orginal array and save to given location c->get_vcoulomb_lessened(d_vcoulomb, (double*) h_psi, &time_lessening,threads); if (it == 0) save_file<double>("vcoulomb.bin",(void*) h_psi, NXYZ); int differences = 0; differences = thrust_compare_arrays<double,-10>(d_vcoulomb, d_analytical, (size_t) NX*NY*NZ); if (differences) printf("WARNING: Number of differences: %d\n",differences); mean_time_enlarging += time_enlarging; mean_time_coulomb += time_coulomb; mean_time_lessening += time_lessening; var_time_enlarging += time_enlarging*time_enlarging; var_time_coulomb += time_coulomb*time_coulomb; var_time_lessening += time_lessening*time_lessening; } mean_time_enlarging /= iters; mean_time_coulomb /= iters; mean_time_lessening /= iters; var_time_enlarging = (var_time_enlarging - mean_time_enlarging*mean_time_enlarging)/(iters - 1); var_time_coulomb = (var_time_coulomb - mean_time_coulomb*mean_time_coulomb)/(iters - 1); var_time_lessening = (var_time_lessening - mean_time_lessening*mean_time_lessening)/(iters - 1); fprintf(file_timing,"%u\t%u\t%u\t%u\t%.6e\t%.6e\t%.6e\t%.6e\t%.6e\t%.6e\n", lattice[0],lattice[1],lattice[2],threads, mean_time_enlarging,var_time_enlarging, mean_time_coulomb,var_time_coulomb, mean_time_lessening,var_time_lessening); //printf("\n\n"); //} c->save_info(sigma); // close files fclose(file_timing); // clean memory delete c; hipFree(d_psi); hipFree(d_vcoulomb); hipFree(d_analytical); hipHostFree(h_psi); hipHostFree(h_analytical); return EXIT_SUCCESS; }
8d76aae7bc81f59600f6666498843c788cefad4c.cu
// needed for thrust compilation ... #include <thrust/device_ptr.h> #include <thrust/device_vector.h> #include <thrust/inner_product.h> #include <thrust/functional.h> #include <thrust/extrema.h> #include <stdlib.h> #include <stdio.h> #include <math.h> #include <complex.h> #include <stdint.h> #include <cuda.h> #include <cufft.h> #include <cuComplex.h> // custom includes #include <utils/compare_thrust.cuh> #include <Coulomb/Coulomb.hpp> #define SQRT2 ((double) sqrt(2.)) #define SQRTPI ((double) 1.7724538509055159) inline __complex__ double gaussian3D(double x, double y,double z, double sigma) { return cexp( -1.*(x*x + y*y + z*z)/( 2.*sigma*sigma) ); //return 1. + I*0.; } inline void fill_array(__complex__ double *psi, double *params) { #pragma omp parallel for num_threads(8) for (int ixyz = 0; ixyz < NXYZ; ixyz++) { int ix,iy,iz,i; ixyz2ixiyiz(ixyz,ix,iy,iz,i); psi[ixyz] = pow(1./SQRTPI/params[0],1.5) * gaussian3D((double) ix-NX/2.,(double) iy-NY/2.,(double) iz-NZ/2.,params[0]) + I*0.; } } inline void analytical(double* potential, double* params) { #pragma omp parallel for num_threads(8) for (int ixyz = 0; ixyz < NXYZ; ixyz++) { int ix,iy,iz,i; ixyz2ixiyiz(ixyz,ix,iy,iz,i); const double r = sqrt( pow((double) ix-NX/2.,2) + pow((double) iy-NY/2.,2) + pow((double) iz-NZ/2.,2) ); potential[ixyz] = (r == 0) ? params[1]*params[1] / ( 2.0 * SQRTPI*SQRTPI*SQRTPI * params[0]) : params[1]*params[1] * erf( r / params[0] ) / (4. * M_PI * r); } } template<typename T> inline void save_file(const char* filename, void* data, size_t size) { FILE* file = fopen(filename,"wb"); fwrite(data,(size_t) size * sizeof(T),1,file); fclose(file); } int main(int argc, char* argv[]) { __complex__ double *h_psi; // used as a buffer on host side, be careful of 'magic' with pointer casting double* h_analytical; cuDoubleComplex *d_psi; double *d_vcoulomb, *d_analytical; cuErrCheck( cudaMallocHost((void**) &h_psi,(size_t) (NX+2*CX)*(NY+2*CY)*(NZ+2*CZ) * sizeof(__complex__ double)) ); cuErrCheck( cudaMallocHost((void**) &h_analytical,(size_t) NX*NY*NZ * sizeof(double)) ); cuErrCheck( cudaMalloc((void**) &d_psi,(size_t) NX*NY*NZ * sizeof(cuDoubleComplex)) ); cuErrCheck( cudaMalloc((void**) &d_vcoulomb,(size_t) NX*NY*NZ * sizeof(double)) ); cuErrCheck( cudaMalloc((void**) &d_analytical,(size_t) NX*NY*NZ * sizeof(double)) ); printf("cx: %u %u %u\n",CX,CY,CZ); printf("Nx: %ux%ux%u\r%lu\n",NX,NY,NZ,NXYZ); // initialize Coulomb double sigma = 3.; // rather aho double charge = 1.0; Coulomb* c = new Coulomb(); c->set_charge(charge); double lcutoff = 0.; unsigned lattice[6] = {0}; c->get_lattice(lattice, &lcutoff); // get analytical result double params[2] = { sigma, charge }; // a_ho, charge analytical(h_analytical,params); save_file<double>("analytical.bin",(void*) h_analytical, NX*NY*NZ ); cuErrCheck( cudaMemcpy(d_analytical, h_analytical, (size_t) NX*NY*NZ * sizeof(double), cudaMemcpyHostToDevice) ); // init timing file char timing_filename[256]; snprintf(timing_filename, 256, "timing%ux%ux%u.dat", lattice[0], lattice[1], lattice[2]); FILE* file_timing = fopen(timing_filename,"w"); fprintf(file_timing,"nx\tny\tnz\tthreads\tenlarging\t\t\tcoulomb\t\t\tlessening\n"); unsigned threads = 1024; //for (unsigned threads = 32; threads <= 1024; threads += 32) //{ printf("Threads: %u\n",threads); // timing double mean_time_enlarging = 0.0; double mean_time_coulomb = 0.0; double mean_time_lessening = 0.0; double var_time_enlarging = 0.0; double var_time_coulomb = 0.0; double var_time_lessening = 0.0; unsigned iters = 100; for (unsigned it = 0; it < iters; it++) { // fill orginal array with values fill_array(h_psi,&sigma); cuErrCheck( cudaMemcpy( d_psi, h_psi, (size_t) NX*NY*NZ * sizeof(cuDoubleComplex), cudaMemcpyHostToDevice) ); if (it == 0) save_file<cuDoubleComplex>("orginal3D.bin",(void*) h_psi, NX*NY*NZ); // timing double time_enlarging = 0.0; double time_coulomb = 0.0; double time_lessening = 0.0; // get density from given wavefunction and save to enlarged array c->get_density_enlarged(d_psi,(double*) h_psi,&time_enlarging,threads); //if (it == 0) save_file<double>("data/resized3D.bin",(void*) h_psi, (NX+2*CX)*(NY+2*CY)*(NZ+2*CZ) ); // perform FFTs and convole with Coulomb kernel c->get_vcoulomb_enlarged((cuDoubleComplex*) h_psi, &time_coulomb,192,5); //if (it == 0) save_file<cuDoubleComplex>("data/density_transform3D.bin",(void*) h_psi, (NX+2*CX)*(NY+2*CY)*(NZ+2*CZ)); // truncate vcoulomb array to size of orginal array and save to given location c->get_vcoulomb_lessened(d_vcoulomb, (double*) h_psi, &time_lessening,threads); if (it == 0) save_file<double>("vcoulomb.bin",(void*) h_psi, NXYZ); int differences = 0; differences = thrust_compare_arrays<double,-10>(d_vcoulomb, d_analytical, (size_t) NX*NY*NZ); if (differences) printf("WARNING: Number of differences: %d\n",differences); mean_time_enlarging += time_enlarging; mean_time_coulomb += time_coulomb; mean_time_lessening += time_lessening; var_time_enlarging += time_enlarging*time_enlarging; var_time_coulomb += time_coulomb*time_coulomb; var_time_lessening += time_lessening*time_lessening; } mean_time_enlarging /= iters; mean_time_coulomb /= iters; mean_time_lessening /= iters; var_time_enlarging = (var_time_enlarging - mean_time_enlarging*mean_time_enlarging)/(iters - 1); var_time_coulomb = (var_time_coulomb - mean_time_coulomb*mean_time_coulomb)/(iters - 1); var_time_lessening = (var_time_lessening - mean_time_lessening*mean_time_lessening)/(iters - 1); fprintf(file_timing,"%u\t%u\t%u\t%u\t%.6e\t%.6e\t%.6e\t%.6e\t%.6e\t%.6e\n", lattice[0],lattice[1],lattice[2],threads, mean_time_enlarging,var_time_enlarging, mean_time_coulomb,var_time_coulomb, mean_time_lessening,var_time_lessening); //printf("\n\n"); //} c->save_info(sigma); // close files fclose(file_timing); // clean memory delete c; cudaFree(d_psi); cudaFree(d_vcoulomb); cudaFree(d_analytical); cudaFreeHost(h_psi); cudaFreeHost(h_analytical); return EXIT_SUCCESS; }
aace5ba653a8ede8bb329cc3db755b6bcbc6e40b.hip
// !!! This is a file automatically generated by hipify!!! #include <algorithm> #include <fstream> #include <iomanip> #include <iostream> #include <set> #include <sstream> #include <string> #include "../NVConv2d.cuh" #include "../cp4Conv2d.cuh" using namespace std; vector<tensor_shape> get_unique_ordered_shapes(vector<tensor_shape> input) { vector<tensor_shape> output; set<tensor_shape> seen; for (auto& shape : input) { if (seen.count(shape) == 0) output.push_back(shape); seen.insert(shape); } return output; } int main(int argc, char** argv) { ifstream tensors(argv[1]); streambuf* output_buffer = std::cout.rdbuf(); ofstream of; int device = 0; switch (argc) { case 4: device = atoi(argv[3]); case 3: of.open(argv[2]); output_buffer = of.rdbuf(); case 2: break; default: cerr << "USAGE: BenchBoth " " Tensor_file " " [Results_file] " " [device_number]" << endl; return 1; } ostream results(output_buffer); results << showpoint << setw(5); results << "N C H W pad T Y X, cuDNN, Rank 1, Rank 2, Rank 4, Rank 8, Rank 16" << endl; if (!tensors.is_open()) { cerr << "Couldn't open tensors file.\n"; return 1; } vector<tensor_shape> shapes; string line; while (getline(tensors, line)) { if (line[0] == '#' || line.empty()) continue; stringstream line_sm(line); unsigned N, H, W, C, pad, T, Y, X; line_sm >> N >> C >> H >> W >> pad >> T >> Y >> X; tensor_shape params; params.N = N; params.C = C; params.H = H; params.W = W; params.pad = pad; params.T = T; params.Y = Y; params.X = X; shapes.push_back(params); } shapes = get_unique_ordered_shapes(shapes); hipSetDevice(device); for (auto& p : shapes) { results << p.N << " " << p.C << " " << p.H << " " << p.W << " " << p.pad << " " << p.T << " " << p.Y << " " << p.X; p.Rank = 0; pair<float, unsigned> x = NV::run_convolution(p, 47); /* float us = get<0>(x); */ unsigned bytes = get<1>(x); results << ", " << bytes / 1024; for (int r = 1; r <= 16; r *= 2) { p.Rank = r; pair<float, unsigned> x = CP::run_convolution(p, 47); /* float us = get<0>(x); */ unsigned bytes = get<1>(x); results << ", " << bytes / 1024; } results << endl; } }
aace5ba653a8ede8bb329cc3db755b6bcbc6e40b.cu
#include <algorithm> #include <fstream> #include <iomanip> #include <iostream> #include <set> #include <sstream> #include <string> #include "../NVConv2d.cuh" #include "../cp4Conv2d.cuh" using namespace std; vector<tensor_shape> get_unique_ordered_shapes(vector<tensor_shape> input) { vector<tensor_shape> output; set<tensor_shape> seen; for (auto& shape : input) { if (seen.count(shape) == 0) output.push_back(shape); seen.insert(shape); } return output; } int main(int argc, char** argv) { ifstream tensors(argv[1]); streambuf* output_buffer = std::cout.rdbuf(); ofstream of; int device = 0; switch (argc) { case 4: device = atoi(argv[3]); case 3: of.open(argv[2]); output_buffer = of.rdbuf(); case 2: break; default: cerr << "USAGE: BenchBoth " " Tensor_file " " [Results_file] " " [device_number]" << endl; return 1; } ostream results(output_buffer); results << showpoint << setw(5); results << "N C H W pad T Y X, cuDNN, Rank 1, Rank 2, Rank 4, Rank 8, Rank 16" << endl; if (!tensors.is_open()) { cerr << "Couldn't open tensors file.\n"; return 1; } vector<tensor_shape> shapes; string line; while (getline(tensors, line)) { if (line[0] == '#' || line.empty()) continue; stringstream line_sm(line); unsigned N, H, W, C, pad, T, Y, X; line_sm >> N >> C >> H >> W >> pad >> T >> Y >> X; tensor_shape params; params.N = N; params.C = C; params.H = H; params.W = W; params.pad = pad; params.T = T; params.Y = Y; params.X = X; shapes.push_back(params); } shapes = get_unique_ordered_shapes(shapes); cudaSetDevice(device); for (auto& p : shapes) { results << p.N << " " << p.C << " " << p.H << " " << p.W << " " << p.pad << " " << p.T << " " << p.Y << " " << p.X; p.Rank = 0; pair<float, unsigned> x = NV::run_convolution(p, 47); /* float us = get<0>(x); */ unsigned bytes = get<1>(x); results << ", " << bytes / 1024; for (int r = 1; r <= 16; r *= 2) { p.Rank = r; pair<float, unsigned> x = CP::run_convolution(p, 47); /* float us = get<0>(x); */ unsigned bytes = get<1>(x); results << ", " << bytes / 1024; } results << endl; } }
1ca7c9323f8134046836e725e8e8270598116106.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "../common/common.h" #include <stdio.h> #include <stdlib.h> /** * This example illustrates the difference between using atomic operations and * using unsafe accesses to increment a shared variable. * * In both the atomics() and unsafe() kernels, each thread repeatedly increments * a globally shared variable by 1. Each thread also stores the value it reads * from the shared location for the first increment. **/ /** * This version of the kernel uses atomic operations to safely increment a * shared variable from multiple threads. **/ __global__ void atomics(int *shared_var, int *values_read, int N, int iters) { int i; int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid >= N) return; values_read[tid] = atomicAdd(shared_var, 1); for (i = 0; i < iters; i++) { atomicAdd(shared_var, 1); } } /** * This version of the kernel performs the same increments as atomics() but in * an unsafe manner. **/ __global__ void unsafe(int *shared_var, int *values_read, int N, int iters) { int i; int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid >= N) return; int old = *shared_var; *shared_var = old + 1; values_read[tid] = old; for (i = 0; i < iters; i++) { int old = *shared_var; *shared_var = old + 1; } } /** * Utility function for printing the contents of an array. **/ static void print_read_results(int *h_arr, int *d_arr, int N, const char *label) { int i; int maxNumToPrint = 10; int nToPrint = N > maxNumToPrint ? maxNumToPrint : N; CHECK(hipMemcpy(h_arr, d_arr, nToPrint * sizeof(int), hipMemcpyDeviceToHost)); printf("Threads performing %s operations read values", label); for (i = 0; i < nToPrint; i++) { printf(" %d", h_arr[i]); } printf("\n"); } int main(int argc, char **argv) { int N = 64; int block = 32; int runs = 30; int iters = 100000; int r; int *d_shared_var; int h_shared_var_atomic, h_shared_var_unsafe; int *d_values_read_atomic; int *d_values_read_unsafe; int *h_values_read; CHECK(hipMalloc((void **)&d_shared_var, sizeof(int))); CHECK(hipMalloc((void **)&d_values_read_atomic, N * sizeof(int))); CHECK(hipMalloc((void **)&d_values_read_unsafe, N * sizeof(int))); h_values_read = (int *)malloc(N * sizeof(int)); double atomic_mean_time = 0; double unsafe_mean_time = 0; for (r = 0; r < runs; r++) { double start_atomic = seconds(); CHECK(hipMemset(d_shared_var, 0x00, sizeof(int))); hipLaunchKernelGGL(( atomics), dim3(N / block), dim3(block), 0, 0, d_shared_var, d_values_read_atomic, N, iters); CHECK(hipDeviceSynchronize()); atomic_mean_time += seconds() - start_atomic; CHECK(hipMemcpy(&h_shared_var_atomic, d_shared_var, sizeof(int), hipMemcpyDeviceToHost)); double start_unsafe = seconds(); CHECK(hipMemset(d_shared_var, 0x00, sizeof(int))); hipLaunchKernelGGL(( unsafe), dim3(N / block), dim3(block), 0, 0, d_shared_var, d_values_read_unsafe, N, iters); CHECK(hipDeviceSynchronize()); unsafe_mean_time += seconds() - start_unsafe; CHECK(hipMemcpy(&h_shared_var_unsafe, d_shared_var, sizeof(int), hipMemcpyDeviceToHost)); } printf("In total, %d runs using atomic operations took %f s\n", runs, atomic_mean_time); printf(" Using atomic operations also produced an output of %d\n", h_shared_var_atomic); printf("In total, %d runs using unsafe operations took %f s\n", runs, unsafe_mean_time); printf(" Using unsafe operations also produced an output of %d\n", h_shared_var_unsafe); print_read_results(h_values_read, d_values_read_atomic, N, "atomic"); print_read_results(h_values_read, d_values_read_unsafe, N, "unsafe"); return 0; }
1ca7c9323f8134046836e725e8e8270598116106.cu
#include "../common/common.h" #include <stdio.h> #include <stdlib.h> /** * This example illustrates the difference between using atomic operations and * using unsafe accesses to increment a shared variable. * * In both the atomics() and unsafe() kernels, each thread repeatedly increments * a globally shared variable by 1. Each thread also stores the value it reads * from the shared location for the first increment. **/ /** * This version of the kernel uses atomic operations to safely increment a * shared variable from multiple threads. **/ __global__ void atomics(int *shared_var, int *values_read, int N, int iters) { int i; int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid >= N) return; values_read[tid] = atomicAdd(shared_var, 1); for (i = 0; i < iters; i++) { atomicAdd(shared_var, 1); } } /** * This version of the kernel performs the same increments as atomics() but in * an unsafe manner. **/ __global__ void unsafe(int *shared_var, int *values_read, int N, int iters) { int i; int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid >= N) return; int old = *shared_var; *shared_var = old + 1; values_read[tid] = old; for (i = 0; i < iters; i++) { int old = *shared_var; *shared_var = old + 1; } } /** * Utility function for printing the contents of an array. **/ static void print_read_results(int *h_arr, int *d_arr, int N, const char *label) { int i; int maxNumToPrint = 10; int nToPrint = N > maxNumToPrint ? maxNumToPrint : N; CHECK(cudaMemcpy(h_arr, d_arr, nToPrint * sizeof(int), cudaMemcpyDeviceToHost)); printf("Threads performing %s operations read values", label); for (i = 0; i < nToPrint; i++) { printf(" %d", h_arr[i]); } printf("\n"); } int main(int argc, char **argv) { int N = 64; int block = 32; int runs = 30; int iters = 100000; int r; int *d_shared_var; int h_shared_var_atomic, h_shared_var_unsafe; int *d_values_read_atomic; int *d_values_read_unsafe; int *h_values_read; CHECK(cudaMalloc((void **)&d_shared_var, sizeof(int))); CHECK(cudaMalloc((void **)&d_values_read_atomic, N * sizeof(int))); CHECK(cudaMalloc((void **)&d_values_read_unsafe, N * sizeof(int))); h_values_read = (int *)malloc(N * sizeof(int)); double atomic_mean_time = 0; double unsafe_mean_time = 0; for (r = 0; r < runs; r++) { double start_atomic = seconds(); CHECK(cudaMemset(d_shared_var, 0x00, sizeof(int))); atomics<<<N / block, block>>>(d_shared_var, d_values_read_atomic, N, iters); CHECK(cudaDeviceSynchronize()); atomic_mean_time += seconds() - start_atomic; CHECK(cudaMemcpy(&h_shared_var_atomic, d_shared_var, sizeof(int), cudaMemcpyDeviceToHost)); double start_unsafe = seconds(); CHECK(cudaMemset(d_shared_var, 0x00, sizeof(int))); unsafe<<<N / block, block>>>(d_shared_var, d_values_read_unsafe, N, iters); CHECK(cudaDeviceSynchronize()); unsafe_mean_time += seconds() - start_unsafe; CHECK(cudaMemcpy(&h_shared_var_unsafe, d_shared_var, sizeof(int), cudaMemcpyDeviceToHost)); } printf("In total, %d runs using atomic operations took %f s\n", runs, atomic_mean_time); printf(" Using atomic operations also produced an output of %d\n", h_shared_var_atomic); printf("In total, %d runs using unsafe operations took %f s\n", runs, unsafe_mean_time); printf(" Using unsafe operations also produced an output of %d\n", h_shared_var_unsafe); print_read_results(h_values_read, d_values_read_atomic, N, "atomic"); print_read_results(h_values_read, d_values_read_unsafe, N, "unsafe"); return 0; }
3e69ddf17615dc969029b5d7ee7e8f350fba95dd.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <thrust/advance.h> #include <thrust/system_error.h> #include <thrust/transform.h> #include <thrust/functional.h> #include <thrust/sort.h> #include <thrust/copy.h> #include <thrust/binary_search.h> #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/adjacent_difference.h> #include <thrust/iterator/constant_iterator.h> #include <thrust/iterator/counting_iterator.h> #include <math.h> //for math //#include <gmpxx.h> //for precision calculation #include <vector> //to hold search results #include <stdio.h> #include <fstream> #include <iterator> #include <iomanip> #include <algorithm> //compute max of vector #include <numeric> //compute sum of vector (accumulate) #include <omp.h> #include "structures.h" //speed not important for final statistics, so optimising this is silly #define PI 3.14159265359 //this file contains the main statistics functions //This file relies on main's importing of the logFacts data file //this number is the max number of threads in a block on //guillimin #define BLOCK_SIZE = 1024 //extern tells the compiler that this variable is global and //is already initialized elsewhere extern double *logFacts; extern int maxFact; using namespace std; //Forward declaration for use in class double log_odds_ratio(double *counts, int length, int m_max, double nu, double nudot, bool verbose); /* //Holds the result of searches, and the displaying functions //faster to have vectors of the variables within, //rather than a vector of the class struct SearchResults { //the settings of the search vector<double> nu, nudot; //The odds that this model fits vector<double> odds; //Get average and maximum odds double avg_odds(); //return max odds double max_odds() {return *max_element(odds.begin(),odds.end());}; //get the position of the max int max_odds_i() {return max_element(odds.begin(), odds.end()) - odds.begin();}; //print settings for a specific search void print_settings(int i){printf("%lf\n",nu[i]);}; //print all settings void print_stats(); }; //This object organizes the entire search struct PeakSearch { //the bounds on the nu search double nu_min,nu_max; //interval between searches w.r.t nu double d_nu; //the max number of bins int m_max; //the bounds on the nudot search //In terms of radians double nudot_min, nudot_max; //interval between searches w.r.t nudot double d_nudot; //set default parameters void default_params(); //Print all of the above parameters void print_params(); SearchResults search(double *counts, int length, bool verbose); }; //gets the average odds double SearchResults::avg_odds() { //compute the sum of the odds ratios double sum = accumulate(odds.begin(), odds.end(),0); //get the average, and return return sum/odds.size(); } //print some stats about the search void SearchResults::print_stats() { printf("\nSTATS:\n"); printf("The average odds for the search "); printf("are %lf\n",avg_odds()); //Get position of max int max_i = max_odds_i(); printf("The best odds occur for "); printf("a nu of %lf seconds ",nu[max_i]); printf("and a nudot of %lf radians, which ",nudot[max_i]); printf("give odds of %lf\n",odds[max_i]); } //set defaults void PeakSearch::default_params() { //the bounds on the nu search //in terms of Hz nu_min = 5; nu_max = 10; //interval between searches w.r.t nu d_nu = (nu_max-nu_min)/30; //the max number of bins m_max = 30; //the bounds on the nudot search //In terms of Hz/s nudot_min = 1e-100; //don't repeat first nudot nudot_max = 2e-100; //interval between searches w.r.t nudot d_nudot = 1e-100; } //Print out all settings void PeakSearch::print_params() { printf("The nu is tested from %lf to %lf seconds\n", nu_min,nu_max); printf("The interval of this search is %lf seconds\n",d_nu); printf("The nudot is tested from %lf to %lf radians\n", nudot_min,nudot_max); printf("The interval of this search is %lf radians\n",d_nudot); printf("The maximum number of bins in the stepwise model is %d\n" ,m_max); } //double log_odds_ratio(double *counts, int length, int m_max, // double nu, double nudot, double nu) //searches through all settings //length is the number of counts, and verbose //tells the program how much it should talk SearchResults PeakSearch::search(double *counts, int length, bool verbose) { //holds all search settings //an array of three element arrays. SearchResults searches; if(verbose) printf("Starting searches\n"); //iterate through possible nus and nudots for (double nu = nu_min; nu <= nu_max; nu += d_nu) { for (double nudot = nudot_min; nudot <= nudot_max; nudot += d_nudot) { //each setting will be in the same index, //so can be accessed later searches.nu.push_back(nu); searches.nudot.push_back(nudot); //finalodds = 1./nmvals/ log(nu_max/nu_min)/log(nudot_max/nudot_min) * dnu/nu * nudotfrac * moddsratio; double odds = 1; odds *= 1/m_max/log(nu_max/nu_min)/log(nudot_max/nudot_min)*d_nu/nu*fabs(d_nudot/nudot); odds *= log_odds_ratio(counts, length, m_max, nu, nudot, verbose); printf("Odds: %f\n", odds); searches.odds.push_back(odds); if(verbose) { printf("nu=%lf,nudot=%lf,odds=%lf\n", nu,nudot,odds); } } } //return all computed searches return searches; } */ //This function returns the choose function double log_choose(int first, int second) { //sanity check if (first > maxFact || second > maxFact || second > first) { return 0; } //the log of the choose function return logFacts[first] - logFacts[second] - logFacts[first-second]; } //this function normalizes a list of counts to start at 0 and //be in terms of seconds void normalize_counts(double *counts, int length) { double t_min = counts[0]; for (int i = 0; i < length; i ++) { counts[i] -= t_min; } } //This function gets the number of events which occur within //the specified range. Length is the total number of events int num_events(double *counts, int length, double start, double end) { //the number of events in this region of the counts int num = 0; //go through all counts //note that this assumes they are in order for (int i = 0; i < length; i ++) { //check if the count is in the region if (counts[i] >= start && counts[i] < end) { num ++; } //if the count is later, then we choose to not be redundant else if (counts[i] >= end) { break; } } //return the number of events return num; } /* //CUDA kernel to create n_mvals*length matrix of bins __global__ void create_binnings(double *counts, int *mvals, int n_mvals, double nu, double nudot, unsigned char **binning) { //threads=length int idx = blockIdx.x*blockDim.x+threadIdx.x; double t = counts[idx]; unsigned char tmp_bin = 0; for (int i = 0; i < n_mvals; i++) { tmp_bin = (unsigned char)((int)(fmod(t*(nu+0.5*t*nudot),1)*mvals[i])); binning[i][idx] = tmp_bin; binning[i][idx] = 54; } //n[(int)(fmod(counts[i]*(nu+0.5*counts[i]*nudot),1)*m)]++; } */ //function gets decimal portion of double __device__ double get_decimal (double x) {return x - (int)x;} __global__ void create_binnings(double *counts, int *mvals, int length, int n_mvals, double nu, double nudot, unsigned char *binning) { int idx = blockIdx.x*blockDim.x+threadIdx.x; if (idx < length) { double t = counts[idx]; int index = idx; unsigned char tmp_bin = 0; for (int i = 0; i < n_mvals; i++) { tmp_bin = (int)(get_decimal(t*(nu+0.5*t*nudot))*mvals[i]); binning[index] = tmp_bin; index += length; } } } //function makes CUDA calls unsigned char *get_bins(double *counts_d, int length, double *counts_h, int *mvals_d, int *mvals_h, int n_mvals, double nu, double nudot) { unsigned char *binning_h; unsigned char *binning_d; //initialize thrust arrays binning_h = new unsigned char [n_mvals*length]; binning_h[0] = 100; hipError_t error; hipMalloc((void**)&counts_d,length*sizeof(double)); hipMalloc((void**)&mvals_d,n_mvals*sizeof(double)); error = hipMalloc((void**)&binning_d,n_mvals*length); if (error!=hipSuccess) {printf("Error! %s\n",hipGetErrorString(error));} printf("Copying data...\n"); hipMemcpy(counts_d,counts_h,length*sizeof(double), hipMemcpyHostToDevice); hipMemcpy(mvals_d,mvals_h,n_mvals*sizeof(double), hipMemcpyHostToDevice); error = hipGetLastError(); if (error!=hipSuccess) {printf("Error! %s\n",hipGetErrorString(error));} else {printf("Memory Allocated!\n");} printf("Binning data...\n"); hipLaunchKernelGGL(( create_binnings), dim3(40285),dim3(1024), 0, 0, counts_d, mvals_d, length, n_mvals, nu, nudot, binning_d); thrust::sort(counts_d,counts_d+length); //error = hipDeviceSynchronize(); if (error!=hipSuccess) {printf("Error! %s\n",hipGetErrorString(error));} error = hipMemcpy(binning_h,binning_d,n_mvals*length*sizeof(unsigned char), hipMemcpyDeviceToHost); //error = hipGetLastError(); if (error!=hipSuccess) {printf("Error! %s\n",hipGetErrorString(error));} printf("Done GPU. Cleaning up...\n"); hipFree(binning_d); hipFree(counts_d); hipFree(mvals_d); return binning_h; } /*__global__ void t_bin_counts(thrust::device_vector<double> counts, thrust::device_vector<unsigned char> t_binning, double nu, double nudot, thrust::device_vector<int> mvals) */ __global__ void t_bin_counts(double* counts, int length, unsigned char* t_binning, double nu, double nudot, int* mvals, int n_mvals) { int idx = blockIdx.x*blockDim.x+threadIdx.x; if (idx < length) { double t = counts[idx]; int index = idx; unsigned char tmp_bin = 0; for (int i = 0; i < n_mvals; i++) { tmp_bin = (unsigned char)(get_decimal(t*(nu+0.5*t*nudot))*mvals[i]); t_binning[index] = tmp_bin; index += length; } } } __global__ void t_bin_counts_two(double* counts, int length, unsigned char* t_binning, double nu, double nudot) { int idx = blockIdx.x*blockDim.x+threadIdx.x; if (idx < length) { t_binning[idx] = (unsigned char)(get_decimal(counts[idx]*(nu+0.5*counts[idx]*nudot))*256); } } __global__ void count_bins(unsigned char *bins, int *histogram_ss, int length) { int idx = blockIdx.x*blockDim.x+threadIdx.x; if (idx < length-1) { if (bins[idx] != bins[idx+1]) { //log end of one histogram_ss[256+bins[idx]] = idx; //log start of other histogram_ss[bins[idx+1]] = idx+1; } } } __global__ void get_histo (int *histogram, int *histogram_ss) { int idx = threadIdx.x; if (histogram_ss[256+idx] != -1 && histogram_ss[idx] != -1) histogram[idx] = histogram_ss[256+idx] - histogram[idx]; } //function reduces bins by a factor of two __global__ void reduce_bins_two(int* bins) { int idx = threadIdx.x; bins[idx] = bins[2*idx]+bins[2*idx+1]; } __global__ void fake_bins(unsigned char *t_binning, int length) { int idx = threadIdx.x; t_binning[length+idx] = (unsigned char) idx; } double t_odds_two(double *counts_h, int length, double nu_min, double nu_max, double nudot_min, double nudot_max, int verbosity, const char* filename) { //the entered mvals should be 2^1 up to 2^8 try { double d_nu = 1/counts_h[length-1]; //double d_nu = 1e-5; //double d_nudot = 1e-8; //printf("Length: %d\n",length); thrust::device_vector<double> counts_d(counts_h, counts_h+length); thrust::device_vector<unsigned char> t_binning(length,0); thrust::host_vector<int> binned(256,0); double *counts_d_pointer = thrust::raw_pointer_cast(counts_d.data()); unsigned char *t_binning_pointer = thrust::raw_pointer_cast(t_binning.data()); unsigned int blocks = (unsigned int)(length/1024.0 + 1.0); double odds = 0; double om1 = 0; int m; int counter = 0; double best[5][3] = {0}; /* double best[0][] = {0,0,0}; double best[1][] = {0,0,0}; double best[2][] = {0,0,0}; double best[3][] = {0,0,0}; double best[4][] = {0,0,0}; */ for (double nu = nu_min; nu <= nu_max; nu += d_nu) { // for (double // nudot = nudot_min; // nudot <= nudot_max; // nudot += d_nudot) //double d_pdot = d_nu*d_nu/(nu_max*nu); double d_nudot = nu*d_nu*d_nu/(nu_max); //nudot=-Pdot/P^2=-v^2*Pdot //d_nudot=-nu^2*d_nudot //dPdot=Pmin/T^2*P=1/(numax*T^2*nu) for (double nudot = nudot_min; nudot <= nudot_max; nudot += d_nudot) { counter ++; hipLaunchKernelGGL(( t_bin_counts_two), dim3(blocks),dim3(1024), 0, 0, counts_d_pointer, length, t_binning_pointer, nu, nudot); thrust::sort(t_binning.begin(), t_binning.end()); thrust::device_vector<int> histogram(256,0); thrust::counting_iterator<int> search_begin(0); thrust::upper_bound(t_binning.begin(), t_binning.end(), search_begin, search_begin + 256, histogram.begin()); thrust::adjacent_difference(histogram.begin(), histogram.end(), histogram.begin()); binned=histogram; m = 256; odds = 0; om1 = 0; for (int j = 0; j < 256; j++) { om1+=logFacts[binned[j]]; } om1 += logFacts[255]-logFacts[length+255]+((double)length)*log(256); odds += exp(om1); for (int k = 1; k < 8; k++) { /* for (int x = 0; x < m; x ++) { printf("%d,",binned[x]); } printf("\n"); */ m = m >> 1; //printf("m=%d\n",m); //make the pointers int *bins_d = thrust::raw_pointer_cast(histogram.data()); hipLaunchKernelGGL(( reduce_bins_two), dim3(1),dim3(m), 0, 0, bins_d); // histogram.resize(m); // binned.resize(m); binned = histogram; om1 = 0; //for (int j = 0; j < m; j++) //printf("%d,",binned[j]); //printf("\n"); for (int j = 0; j < m; j++) { om1+=logFacts[binned[j]]; } om1 += logFacts[m-1]-logFacts[length+m-1]+((double)length)*log(m); odds += exp(om1); } //if (odds > 1e-3) odds /= 8; odds *= d_nu/nu; //results.nu.push_back(nu); //results.nudot.push_back(nudot); //results.odds.push_back(odds); //if (counter %50000==0 || odds > 1e-4) /* if (verbosity == 2 || (verbosity == 1 && odds > 1e-3) || (verbosity == 0 && odds > 1e-1)) { printf("Search %d gives odds of %e for nu %.9e and nudot -%.9e\n",counter,odds,nu,nudot); } else if (verbosity == 1 && counter%50000==0) { printf("On search %d, and nu=%.9e Hz\n",counter,nu); } */ if (odds > best[4][0]) { for (int i = 3; i >= 0; i --) { if (odds < best[i][0]) { for (int j = 3; j >= i + 1; j--) { best[j+1][0] = best[j][0]; best[j+1][1] = best[j][1]; best[j+1][2] = best[j][2]; } best[i+1][0] = odds; best[i+1][1] = nu; best[i+1][2] = nudot; break; } else if (i == 0) { for (int j = 3; j >= 0; j--) { best[j+1][0] = best[j][0]; best[j+1][1] = best[j][1]; best[j+1][2] = best[j][2]; } best[0][0] = odds; best[0][1] = nu; best[0][2] = nudot; } } } } } //clear up space counts_d.clear(); counts_d.shrink_to_fit(); ofstream file(filename, ios::app); file << "range,"; file << scientific << setprecision(10) << nu_min << "-"; file << scientific << setprecision(10) << nu_max << ","; file << scientific << nudot_min << "-"; file << scientific << nudot_max << "\n"; for (int i = 0; i < 5; i ++) { //printf("The %dth best odds are %e for a nu of %.9e and nudot -%.9e\n", //i+1,best[i][0],best[i][1],best[i][2]); file << scientific << best[i][0] << ","; file << scientific << setprecision(10) << best[i][1] << ","; file << scientific << -best[i][2]; file << "\n"; } file.close(); //results.write_best(10,filename); //best[5][3]; //keep reducing bins //int j = results.max_odds_i(); //printf("\nThe best odds are: %e, which occur for nu of %e Hz and" //" nudot of -%e Hz/s\n\n", // results.odds[j], results.nu[j], results.nudot[j]); //printf("%d searches completed\n",counter); return 0; } catch(thrust::system_error &err) { std::cerr << "Error doing this: " << err.what() << std::endl; return 1; } } double t_odds(double *counts_h, int length, double nu, double nudot, int *mvals_h, int n_mvals) { try { thrust::device_vector<double> counts_d(counts_h, counts_h+length); thrust::device_vector<unsigned char> t_binning(length*n_mvals,0); thrust::device_vector<int> mvals_d(mvals_h, mvals_h+n_mvals); double *counts_d_pointer = thrust::raw_pointer_cast(counts_d.data()); unsigned char *t_binning_pointer = thrust::raw_pointer_cast(t_binning.data()); int *mvals_d_pointer = thrust::raw_pointer_cast(mvals_d.data()); hipLaunchKernelGGL(( t_bin_counts), dim3(40285),dim3(1024), 0, 0, counts_d_pointer, length, t_binning_pointer, nu, nudot, mvals_d_pointer, n_mvals); //clear up space counts_d.clear(); counts_d.shrink_to_fit(); //iterate through segments of array thrust::device_vector<unsigned char>::iterator iter_start = t_binning.begin(); thrust::device_vector<unsigned char>::iterator iter_end = t_binning.begin(); //sort parts of array for (int i = 0; i < n_mvals; i++) { thrust::advance(iter_end,length); thrust::sort(iter_start, iter_end); //thrust::sort(&t_binning[i*length],&t_binning[i*length + length]); thrust::advance(iter_start,length); } double odds = 0; iter_start = t_binning.begin(); iter_end = t_binning.begin(); for (int i = 0; i < n_mvals; i++) { thrust::advance(iter_end,length); thrust::device_vector<int> histogram(mvals_h[i],0); thrust::host_vector<unsigned char> histo_vals_h(mvals_h[i],0); for (unsigned char j = 0; j < mvals_h[i]; j++) { histo_vals_h[j] = j; } thrust::device_vector<unsigned char> histo_vals=histo_vals_h; thrust::reduce_by_key(iter_start, iter_end, thrust::constant_iterator<int>(1), histo_vals.begin(), histogram.begin()); thrust::advance(iter_start,length); //load these values back to the host, as has been binned thrust::host_vector<int> binned = histogram; double om1 = 0; for (int j = 0; j < binned.size(); j++) { om1+=logFacts[binned[j]]; } om1 += logFacts[mvals_h[i]-1]-logFacts[length+mvals_h[i]-1]+((double)length)*log(mvals_h[i]); odds += exp(om1); } return odds; } catch(thrust::system_error &err) { std::cerr << "Error doing this: " << err.what() << std::endl; exit(-1); } } //function uploads static data to the GPU at start of MPI proc void upload_data(double *counts_h, double *counts_d, int length, int *mvals_h, int *mvals_d, int n_mvals) { printf("1000 toa = %f\n",counts_h[1000]); //hipMalloc((void**)&counts_d,length*sizeof(double)); //hipMalloc((void**)&mvals_d,n_mvals*sizeof(double)); //hipMemcpy(counts_d,counts_h,length*sizeof(double), // hipMemcpyHostToDevice); //hipMemcpy(mvals_d,mvals_h,n_mvals*sizeof(double), // hipMemcpyHostToDevice); hipError_t error = hipGetLastError(); if (error!=hipSuccess) {printf("Error! %s\n",hipGetErrorString(error));} else{printf("Static data uploaded!\n");} } void free_data(double *counts_d, int *mvals_d) { //hipFree(mvals_d); //hipFree(counts_d); } double bins_to_odds(unsigned char *bins, int length, int *mvals, int n_mvals) { double odds = 0; for (int i = 0; i < n_mvals; i ++) { double om1 = 0; int m = mvals[i]; int n[m]; for (int j = 0; j < m; j ++) { n[j] = 0; } for (int k = i*length; k < (i+1)*length; k++) { n[bins[k]]++; } /* #pragma omp parallel { int ni[m]; for (int r = 0; r < m; r ++) { ni[r] = 0; } #pragma omp for for (int k = i*length; k < (i+1)*length; k++) { ni[bins[k]]++; } #pragma omp critical for (int q = 0; q < m; q++) { n[q] += ni[q]; } } */ for (int l = 0; l < m; l++) { //part of odds equation om1 += logFacts[n[l]]; } om1 += logFacts[m-1]-logFacts[length+m-1]+((double)length)*log(m); odds += exp(om1); } return odds; } //Equation from gregory and loredo paper to calcluate odds ratio //of m-binned stepwise model w.r.t. constant model double log_m_odds_ratio(double *counts, int length, int m, double nu, double nudot, double t_max) { //create all the bins, init to zero counts unsigned int ng[m]; //init to zero for (int j = 0; j < m; j++) { ng[j] = 0; } //split up into threads #pragma omp parallel default(shared) { //create temp bins for thread unsigned int n[m]; for (int j = 0; j < m; j++) { n[j] = 0; } //variables used in binnings //gets position in nu //long double phi, d_phi; //double phi; //gets bin //int k; //bin the photons #pragma omp for for (int i = 0; i < length; i++) { //d_phi = 0.5*counts[i]*nudot*counts[i]; //get position in nu of photon //phi = fmod(counts[i]*nu+d_phi,1); //get corresponding bin //k = (int)(fmod(counts[i]*(nu+0.5*counts[i]*nudot),1)*m); //one more count n[(int)(fmod(counts[i]*(nu+0.5*counts[i]*nudot),1)*m)]++; } //combine n values #pragma omp critical for (int j = 0; j < m; j++) { ng[j] += n[j]; } } //odds to return double om1 = 0.0; //go through all bins for (int j = 0; j < m; j++) { //part of odds equation om1 += logFacts[ng[j]]; } //final parts of odds equation om1 += logFacts[m-1]-logFacts[length+m-1]+((double)length)*log(m); return om1; } //Equation from gregory and loredo paper to calcluate total odds //ratio double log_odds_ratio(double *counts, int length, int *mvals, int n_mvals, double nu, double nudot, bool verbose) { //normalize the counts with item 0 at t=0.0s normalize_counts(counts, length); //the following assumes the counts are ordered double t_max = counts[length-1]; //The total odds ratio double odds = 0.0; //go through all possible m values for (int i = 0; i <= n_mvals; i++) { if (verbose) printf("Testing %d-binned model\n",i); //Add the next om1 value to the total odds ratio. //We also have to remove the log odds += exp(log_m_odds_ratio(counts,length,mvals[i],nu, nudot,t_max)); } return odds; } //Gets the average time between counts double avg_interval(double *counts, int length) { double total_time; total_time = counts[length-1] - counts[0]; return total_time/length; } //Gets the minimum time between counts double min_interval(double *counts, int length) { double smallest; //start with first interval smallest = counts[1] - counts[0]; //go through the rest for (int i = 2; i < length; i ++) { double tmp = counts[i] - counts[i-1]; //if interval smaller, assume it is now //the smallest if (tmp < smallest) { smallest = tmp; } } return smallest; }
3e69ddf17615dc969029b5d7ee7e8f350fba95dd.cu
#include <thrust/advance.h> #include <thrust/system_error.h> #include <thrust/transform.h> #include <thrust/functional.h> #include <thrust/sort.h> #include <thrust/copy.h> #include <thrust/binary_search.h> #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/adjacent_difference.h> #include <thrust/iterator/constant_iterator.h> #include <thrust/iterator/counting_iterator.h> #include <math.h> //for math //#include <gmpxx.h> //for precision calculation #include <vector> //to hold search results #include <stdio.h> #include <fstream> #include <iterator> #include <iomanip> #include <algorithm> //compute max of vector #include <numeric> //compute sum of vector (accumulate) #include <omp.h> #include "structures.h" //speed not important for final statistics, so optimising this is silly #define PI 3.14159265359 //this file contains the main statistics functions //This file relies on main's importing of the logFacts data file //this number is the max number of threads in a block on //guillimin #define BLOCK_SIZE = 1024 //extern tells the compiler that this variable is global and //is already initialized elsewhere extern double *logFacts; extern int maxFact; using namespace std; //Forward declaration for use in class double log_odds_ratio(double *counts, int length, int m_max, double nu, double nudot, bool verbose); /* //Holds the result of searches, and the displaying functions //faster to have vectors of the variables within, //rather than a vector of the class struct SearchResults { //the settings of the search vector<double> nu, nudot; //The odds that this model fits vector<double> odds; //Get average and maximum odds double avg_odds(); //return max odds double max_odds() {return *max_element(odds.begin(),odds.end());}; //get the position of the max int max_odds_i() {return max_element(odds.begin(), odds.end()) - odds.begin();}; //print settings for a specific search void print_settings(int i){printf("%lf\n",nu[i]);}; //print all settings void print_stats(); }; //This object organizes the entire search struct PeakSearch { //the bounds on the nu search double nu_min,nu_max; //interval between searches w.r.t nu double d_nu; //the max number of bins int m_max; //the bounds on the nudot search //In terms of radians double nudot_min, nudot_max; //interval between searches w.r.t nudot double d_nudot; //set default parameters void default_params(); //Print all of the above parameters void print_params(); SearchResults search(double *counts, int length, bool verbose); }; //gets the average odds double SearchResults::avg_odds() { //compute the sum of the odds ratios double sum = accumulate(odds.begin(), odds.end(),0); //get the average, and return return sum/odds.size(); } //print some stats about the search void SearchResults::print_stats() { printf("\nSTATS:\n"); printf("The average odds for the search "); printf("are %lf\n",avg_odds()); //Get position of max int max_i = max_odds_i(); printf("The best odds occur for "); printf("a nu of %lf seconds ",nu[max_i]); printf("and a nudot of %lf radians, which ",nudot[max_i]); printf("give odds of %lf\n",odds[max_i]); } //set defaults void PeakSearch::default_params() { //the bounds on the nu search //in terms of Hz nu_min = 5; nu_max = 10; //interval between searches w.r.t nu d_nu = (nu_max-nu_min)/30; //the max number of bins m_max = 30; //the bounds on the nudot search //In terms of Hz/s nudot_min = 1e-100; //don't repeat first nudot nudot_max = 2e-100; //interval between searches w.r.t nudot d_nudot = 1e-100; } //Print out all settings void PeakSearch::print_params() { printf("The nu is tested from %lf to %lf seconds\n", nu_min,nu_max); printf("The interval of this search is %lf seconds\n",d_nu); printf("The nudot is tested from %lf to %lf radians\n", nudot_min,nudot_max); printf("The interval of this search is %lf radians\n",d_nudot); printf("The maximum number of bins in the stepwise model is %d\n" ,m_max); } //double log_odds_ratio(double *counts, int length, int m_max, // double nu, double nudot, double nu) //searches through all settings //length is the number of counts, and verbose //tells the program how much it should talk SearchResults PeakSearch::search(double *counts, int length, bool verbose) { //holds all search settings //an array of three element arrays. SearchResults searches; if(verbose) printf("Starting searches\n"); //iterate through possible nus and nudots for (double nu = nu_min; nu <= nu_max; nu += d_nu) { for (double nudot = nudot_min; nudot <= nudot_max; nudot += d_nudot) { //each setting will be in the same index, //so can be accessed later searches.nu.push_back(nu); searches.nudot.push_back(nudot); //finalodds = 1./nmvals/ log(nu_max/nu_min)/log(nudot_max/nudot_min) * dnu/nu * nudotfrac * moddsratio; double odds = 1; odds *= 1/m_max/log(nu_max/nu_min)/log(nudot_max/nudot_min)*d_nu/nu*fabs(d_nudot/nudot); odds *= log_odds_ratio(counts, length, m_max, nu, nudot, verbose); printf("Odds: %f\n", odds); searches.odds.push_back(odds); if(verbose) { printf("nu=%lf,nudot=%lf,odds=%lf\n", nu,nudot,odds); } } } //return all computed searches return searches; } */ //This function returns the choose function double log_choose(int first, int second) { //sanity check if (first > maxFact || second > maxFact || second > first) { return 0; } //the log of the choose function return logFacts[first] - logFacts[second] - logFacts[first-second]; } //this function normalizes a list of counts to start at 0 and //be in terms of seconds void normalize_counts(double *counts, int length) { double t_min = counts[0]; for (int i = 0; i < length; i ++) { counts[i] -= t_min; } } //This function gets the number of events which occur within //the specified range. Length is the total number of events int num_events(double *counts, int length, double start, double end) { //the number of events in this region of the counts int num = 0; //go through all counts //note that this assumes they are in order for (int i = 0; i < length; i ++) { //check if the count is in the region if (counts[i] >= start && counts[i] < end) { num ++; } //if the count is later, then we choose to not be redundant else if (counts[i] >= end) { break; } } //return the number of events return num; } /* //CUDA kernel to create n_mvals*length matrix of bins __global__ void create_binnings(double *counts, int *mvals, int n_mvals, double nu, double nudot, unsigned char **binning) { //threads=length int idx = blockIdx.x*blockDim.x+threadIdx.x; double t = counts[idx]; unsigned char tmp_bin = 0; for (int i = 0; i < n_mvals; i++) { tmp_bin = (unsigned char)((int)(fmod(t*(nu+0.5*t*nudot),1)*mvals[i])); binning[i][idx] = tmp_bin; binning[i][idx] = 54; } //n[(int)(fmod(counts[i]*(nu+0.5*counts[i]*nudot),1)*m)]++; } */ //function gets decimal portion of double __device__ double get_decimal (double x) {return x - (int)x;} __global__ void create_binnings(double *counts, int *mvals, int length, int n_mvals, double nu, double nudot, unsigned char *binning) { int idx = blockIdx.x*blockDim.x+threadIdx.x; if (idx < length) { double t = counts[idx]; int index = idx; unsigned char tmp_bin = 0; for (int i = 0; i < n_mvals; i++) { tmp_bin = (int)(get_decimal(t*(nu+0.5*t*nudot))*mvals[i]); binning[index] = tmp_bin; index += length; } } } //function makes CUDA calls unsigned char *get_bins(double *counts_d, int length, double *counts_h, int *mvals_d, int *mvals_h, int n_mvals, double nu, double nudot) { unsigned char *binning_h; unsigned char *binning_d; //initialize thrust arrays binning_h = new unsigned char [n_mvals*length]; binning_h[0] = 100; cudaError_t error; cudaMalloc((void**)&counts_d,length*sizeof(double)); cudaMalloc((void**)&mvals_d,n_mvals*sizeof(double)); error = cudaMalloc((void**)&binning_d,n_mvals*length); if (error!=cudaSuccess) {printf("Error! %s\n",cudaGetErrorString(error));} printf("Copying data...\n"); cudaMemcpy(counts_d,counts_h,length*sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(mvals_d,mvals_h,n_mvals*sizeof(double), cudaMemcpyHostToDevice); error = cudaGetLastError(); if (error!=cudaSuccess) {printf("Error! %s\n",cudaGetErrorString(error));} else {printf("Memory Allocated!\n");} printf("Binning data...\n"); create_binnings<<<40285,1024>>>(counts_d, mvals_d, length, n_mvals, nu, nudot, binning_d); thrust::sort(counts_d,counts_d+length); //error = cudaThreadSynchronize(); if (error!=cudaSuccess) {printf("Error! %s\n",cudaGetErrorString(error));} error = cudaMemcpy(binning_h,binning_d,n_mvals*length*sizeof(unsigned char), cudaMemcpyDeviceToHost); //error = cudaGetLastError(); if (error!=cudaSuccess) {printf("Error! %s\n",cudaGetErrorString(error));} printf("Done GPU. Cleaning up...\n"); cudaFree(binning_d); cudaFree(counts_d); cudaFree(mvals_d); return binning_h; } /*__global__ void t_bin_counts(thrust::device_vector<double> counts, thrust::device_vector<unsigned char> t_binning, double nu, double nudot, thrust::device_vector<int> mvals) */ __global__ void t_bin_counts(double* counts, int length, unsigned char* t_binning, double nu, double nudot, int* mvals, int n_mvals) { int idx = blockIdx.x*blockDim.x+threadIdx.x; if (idx < length) { double t = counts[idx]; int index = idx; unsigned char tmp_bin = 0; for (int i = 0; i < n_mvals; i++) { tmp_bin = (unsigned char)(get_decimal(t*(nu+0.5*t*nudot))*mvals[i]); t_binning[index] = tmp_bin; index += length; } } } __global__ void t_bin_counts_two(double* counts, int length, unsigned char* t_binning, double nu, double nudot) { int idx = blockIdx.x*blockDim.x+threadIdx.x; if (idx < length) { t_binning[idx] = (unsigned char)(get_decimal(counts[idx]*(nu+0.5*counts[idx]*nudot))*256); } } __global__ void count_bins(unsigned char *bins, int *histogram_ss, int length) { int idx = blockIdx.x*blockDim.x+threadIdx.x; if (idx < length-1) { if (bins[idx] != bins[idx+1]) { //log end of one histogram_ss[256+bins[idx]] = idx; //log start of other histogram_ss[bins[idx+1]] = idx+1; } } } __global__ void get_histo (int *histogram, int *histogram_ss) { int idx = threadIdx.x; if (histogram_ss[256+idx] != -1 && histogram_ss[idx] != -1) histogram[idx] = histogram_ss[256+idx] - histogram[idx]; } //function reduces bins by a factor of two __global__ void reduce_bins_two(int* bins) { int idx = threadIdx.x; bins[idx] = bins[2*idx]+bins[2*idx+1]; } __global__ void fake_bins(unsigned char *t_binning, int length) { int idx = threadIdx.x; t_binning[length+idx] = (unsigned char) idx; } double t_odds_two(double *counts_h, int length, double nu_min, double nu_max, double nudot_min, double nudot_max, int verbosity, const char* filename) { //the entered mvals should be 2^1 up to 2^8 try { double d_nu = 1/counts_h[length-1]; //double d_nu = 1e-5; //double d_nudot = 1e-8; //printf("Length: %d\n",length); thrust::device_vector<double> counts_d(counts_h, counts_h+length); thrust::device_vector<unsigned char> t_binning(length,0); thrust::host_vector<int> binned(256,0); double *counts_d_pointer = thrust::raw_pointer_cast(counts_d.data()); unsigned char *t_binning_pointer = thrust::raw_pointer_cast(t_binning.data()); unsigned int blocks = (unsigned int)(length/1024.0 + 1.0); double odds = 0; double om1 = 0; int m; int counter = 0; double best[5][3] = {0}; /* double best[0][] = {0,0,0}; double best[1][] = {0,0,0}; double best[2][] = {0,0,0}; double best[3][] = {0,0,0}; double best[4][] = {0,0,0}; */ for (double nu = nu_min; nu <= nu_max; nu += d_nu) { // for (double // nudot = nudot_min; // nudot <= nudot_max; // nudot += d_nudot) //double d_pdot = d_nu*d_nu/(nu_max*nu); double d_nudot = nu*d_nu*d_nu/(nu_max); //nudot=-Pdot/P^2=-v^2*Pdot //d_nudot=-nu^2*d_nudot //dPdot=Pmin/T^2*P=1/(numax*T^2*nu) for (double nudot = nudot_min; nudot <= nudot_max; nudot += d_nudot) { counter ++; t_bin_counts_two<<<blocks,1024>>>(counts_d_pointer, length, t_binning_pointer, nu, nudot); thrust::sort(t_binning.begin(), t_binning.end()); thrust::device_vector<int> histogram(256,0); thrust::counting_iterator<int> search_begin(0); thrust::upper_bound(t_binning.begin(), t_binning.end(), search_begin, search_begin + 256, histogram.begin()); thrust::adjacent_difference(histogram.begin(), histogram.end(), histogram.begin()); binned=histogram; m = 256; odds = 0; om1 = 0; for (int j = 0; j < 256; j++) { om1+=logFacts[binned[j]]; } om1 += logFacts[255]-logFacts[length+255]+((double)length)*log(256); odds += exp(om1); for (int k = 1; k < 8; k++) { /* for (int x = 0; x < m; x ++) { printf("%d,",binned[x]); } printf("\n"); */ m = m >> 1; //printf("m=%d\n",m); //make the pointers int *bins_d = thrust::raw_pointer_cast(histogram.data()); reduce_bins_two<<<1,m>>>(bins_d); // histogram.resize(m); // binned.resize(m); binned = histogram; om1 = 0; //for (int j = 0; j < m; j++) //printf("%d,",binned[j]); //printf("\n"); for (int j = 0; j < m; j++) { om1+=logFacts[binned[j]]; } om1 += logFacts[m-1]-logFacts[length+m-1]+((double)length)*log(m); odds += exp(om1); } //if (odds > 1e-3) odds /= 8; odds *= d_nu/nu; //results.nu.push_back(nu); //results.nudot.push_back(nudot); //results.odds.push_back(odds); //if (counter %50000==0 || odds > 1e-4) /* if (verbosity == 2 || (verbosity == 1 && odds > 1e-3) || (verbosity == 0 && odds > 1e-1)) { printf("Search %d gives odds of %e for nu %.9e and nudot -%.9e\n",counter,odds,nu,nudot); } else if (verbosity == 1 && counter%50000==0) { printf("On search %d, and nu=%.9e Hz\n",counter,nu); } */ if (odds > best[4][0]) { for (int i = 3; i >= 0; i --) { if (odds < best[i][0]) { for (int j = 3; j >= i + 1; j--) { best[j+1][0] = best[j][0]; best[j+1][1] = best[j][1]; best[j+1][2] = best[j][2]; } best[i+1][0] = odds; best[i+1][1] = nu; best[i+1][2] = nudot; break; } else if (i == 0) { for (int j = 3; j >= 0; j--) { best[j+1][0] = best[j][0]; best[j+1][1] = best[j][1]; best[j+1][2] = best[j][2]; } best[0][0] = odds; best[0][1] = nu; best[0][2] = nudot; } } } } } //clear up space counts_d.clear(); counts_d.shrink_to_fit(); ofstream file(filename, ios::app); file << "range,"; file << scientific << setprecision(10) << nu_min << "-"; file << scientific << setprecision(10) << nu_max << ","; file << scientific << nudot_min << "-"; file << scientific << nudot_max << "\n"; for (int i = 0; i < 5; i ++) { //printf("The %dth best odds are %e for a nu of %.9e and nudot -%.9e\n", //i+1,best[i][0],best[i][1],best[i][2]); file << scientific << best[i][0] << ","; file << scientific << setprecision(10) << best[i][1] << ","; file << scientific << -best[i][2]; file << "\n"; } file.close(); //results.write_best(10,filename); //best[5][3]; //keep reducing bins //int j = results.max_odds_i(); //printf("\nThe best odds are: %e, which occur for nu of %e Hz and" //" nudot of -%e Hz/s\n\n", // results.odds[j], results.nu[j], results.nudot[j]); //printf("%d searches completed\n",counter); return 0; } catch(thrust::system_error &err) { std::cerr << "Error doing this: " << err.what() << std::endl; return 1; } } double t_odds(double *counts_h, int length, double nu, double nudot, int *mvals_h, int n_mvals) { try { thrust::device_vector<double> counts_d(counts_h, counts_h+length); thrust::device_vector<unsigned char> t_binning(length*n_mvals,0); thrust::device_vector<int> mvals_d(mvals_h, mvals_h+n_mvals); double *counts_d_pointer = thrust::raw_pointer_cast(counts_d.data()); unsigned char *t_binning_pointer = thrust::raw_pointer_cast(t_binning.data()); int *mvals_d_pointer = thrust::raw_pointer_cast(mvals_d.data()); t_bin_counts<<<40285,1024>>>(counts_d_pointer, length, t_binning_pointer, nu, nudot, mvals_d_pointer, n_mvals); //clear up space counts_d.clear(); counts_d.shrink_to_fit(); //iterate through segments of array thrust::device_vector<unsigned char>::iterator iter_start = t_binning.begin(); thrust::device_vector<unsigned char>::iterator iter_end = t_binning.begin(); //sort parts of array for (int i = 0; i < n_mvals; i++) { thrust::advance(iter_end,length); thrust::sort(iter_start, iter_end); //thrust::sort(&t_binning[i*length],&t_binning[i*length + length]); thrust::advance(iter_start,length); } double odds = 0; iter_start = t_binning.begin(); iter_end = t_binning.begin(); for (int i = 0; i < n_mvals; i++) { thrust::advance(iter_end,length); thrust::device_vector<int> histogram(mvals_h[i],0); thrust::host_vector<unsigned char> histo_vals_h(mvals_h[i],0); for (unsigned char j = 0; j < mvals_h[i]; j++) { histo_vals_h[j] = j; } thrust::device_vector<unsigned char> histo_vals=histo_vals_h; thrust::reduce_by_key(iter_start, iter_end, thrust::constant_iterator<int>(1), histo_vals.begin(), histogram.begin()); thrust::advance(iter_start,length); //load these values back to the host, as has been binned thrust::host_vector<int> binned = histogram; double om1 = 0; for (int j = 0; j < binned.size(); j++) { om1+=logFacts[binned[j]]; } om1 += logFacts[mvals_h[i]-1]-logFacts[length+mvals_h[i]-1]+((double)length)*log(mvals_h[i]); odds += exp(om1); } return odds; } catch(thrust::system_error &err) { std::cerr << "Error doing this: " << err.what() << std::endl; exit(-1); } } //function uploads static data to the GPU at start of MPI proc void upload_data(double *counts_h, double *counts_d, int length, int *mvals_h, int *mvals_d, int n_mvals) { printf("1000 toa = %f\n",counts_h[1000]); //cudaMalloc((void**)&counts_d,length*sizeof(double)); //cudaMalloc((void**)&mvals_d,n_mvals*sizeof(double)); //cudaMemcpy(counts_d,counts_h,length*sizeof(double), // cudaMemcpyHostToDevice); //cudaMemcpy(mvals_d,mvals_h,n_mvals*sizeof(double), // cudaMemcpyHostToDevice); cudaError_t error = cudaGetLastError(); if (error!=cudaSuccess) {printf("Error! %s\n",cudaGetErrorString(error));} else{printf("Static data uploaded!\n");} } void free_data(double *counts_d, int *mvals_d) { //cudaFree(mvals_d); //cudaFree(counts_d); } double bins_to_odds(unsigned char *bins, int length, int *mvals, int n_mvals) { double odds = 0; for (int i = 0; i < n_mvals; i ++) { double om1 = 0; int m = mvals[i]; int n[m]; for (int j = 0; j < m; j ++) { n[j] = 0; } for (int k = i*length; k < (i+1)*length; k++) { n[bins[k]]++; } /* #pragma omp parallel { int ni[m]; for (int r = 0; r < m; r ++) { ni[r] = 0; } #pragma omp for for (int k = i*length; k < (i+1)*length; k++) { ni[bins[k]]++; } #pragma omp critical for (int q = 0; q < m; q++) { n[q] += ni[q]; } } */ for (int l = 0; l < m; l++) { //part of odds equation om1 += logFacts[n[l]]; } om1 += logFacts[m-1]-logFacts[length+m-1]+((double)length)*log(m); odds += exp(om1); } return odds; } //Equation from gregory and loredo paper to calcluate odds ratio //of m-binned stepwise model w.r.t. constant model double log_m_odds_ratio(double *counts, int length, int m, double nu, double nudot, double t_max) { //create all the bins, init to zero counts unsigned int ng[m]; //init to zero for (int j = 0; j < m; j++) { ng[j] = 0; } //split up into threads #pragma omp parallel default(shared) { //create temp bins for thread unsigned int n[m]; for (int j = 0; j < m; j++) { n[j] = 0; } //variables used in binnings //gets position in nu //long double phi, d_phi; //double phi; //gets bin //int k; //bin the photons #pragma omp for for (int i = 0; i < length; i++) { //d_phi = 0.5*counts[i]*nudot*counts[i]; //get position in nu of photon //phi = fmod(counts[i]*nu+d_phi,1); //get corresponding bin //k = (int)(fmod(counts[i]*(nu+0.5*counts[i]*nudot),1)*m); //one more count n[(int)(fmod(counts[i]*(nu+0.5*counts[i]*nudot),1)*m)]++; } //combine n values #pragma omp critical for (int j = 0; j < m; j++) { ng[j] += n[j]; } } //odds to return double om1 = 0.0; //go through all bins for (int j = 0; j < m; j++) { //part of odds equation om1 += logFacts[ng[j]]; } //final parts of odds equation om1 += logFacts[m-1]-logFacts[length+m-1]+((double)length)*log(m); return om1; } //Equation from gregory and loredo paper to calcluate total odds //ratio double log_odds_ratio(double *counts, int length, int *mvals, int n_mvals, double nu, double nudot, bool verbose) { //normalize the counts with item 0 at t=0.0s normalize_counts(counts, length); //the following assumes the counts are ordered double t_max = counts[length-1]; //The total odds ratio double odds = 0.0; //go through all possible m values for (int i = 0; i <= n_mvals; i++) { if (verbose) printf("Testing %d-binned model\n",i); //Add the next om1 value to the total odds ratio. //We also have to remove the log odds += exp(log_m_odds_ratio(counts,length,mvals[i],nu, nudot,t_max)); } return odds; } //Gets the average time between counts double avg_interval(double *counts, int length) { double total_time; total_time = counts[length-1] - counts[0]; return total_time/length; } //Gets the minimum time between counts double min_interval(double *counts, int length) { double smallest; //start with first interval smallest = counts[1] - counts[0]; //go through the rest for (int i = 2; i < length; i ++) { double tmp = counts[i] - counts[i-1]; //if interval smaller, assume it is now //the smallest if (tmp < smallest) { smallest = tmp; } } return smallest; }
cae743abc22a71196364bfef98f6f525d697a213.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "cuda_kmeans/cuda_datum.hpp" __device__ float d_l2norm(float f1, float f2){ return f1-f2; } __global__ void d_cudaKMeans(float * dataset_1, float * dataset_2, float * matches, int length){ int id = blockIdx.x*blockDim.x + threadIdx.x; if(id >= length){ return; } float last_distance_matched = INT_MAX, curr_distance_matched = 0; matches[id] = 0.0; for(size_t i = 0; i < length; ++i){ if (i == id){ continue; } curr_distance_matched = d_l2norm(dataset_1[id], dataset_2[i]); if ( last_distance_matched > curr_distance_matched ){ matches[id] = i; last_distance_matched = curr_distance_matched; } } return; } void h_cudaKMeans(CUDADatum * datum_1, CUDADatum * datum_2, CUDADatum * matches){ // 32 = warp size. shared memory is // permitted between threads in a block int blockSize = 32*10; // number of threads in a block int gridSize = (datum_1->length + blockSize - 1)/blockSize; // number of blocks; hipLaunchKernelGGL(( d_cudaKMeans), dim3(gridSize), dim3(blockSize), 0, 0, datum_1->d_in_buffer, datum_2->d_in_buffer, matches->d_out_buffer, datum_1->length ); hipDeviceSynchronize(); }
cae743abc22a71196364bfef98f6f525d697a213.cu
#include "cuda_kmeans/cuda_datum.hpp" __device__ float d_l2norm(float f1, float f2){ return f1-f2; } __global__ void d_cudaKMeans(float * dataset_1, float * dataset_2, float * matches, int length){ int id = blockIdx.x*blockDim.x + threadIdx.x; if(id >= length){ return; } float last_distance_matched = INT_MAX, curr_distance_matched = 0; matches[id] = 0.0; for(size_t i = 0; i < length; ++i){ if (i == id){ continue; } curr_distance_matched = d_l2norm(dataset_1[id], dataset_2[i]); if ( last_distance_matched > curr_distance_matched ){ matches[id] = i; last_distance_matched = curr_distance_matched; } } return; } void h_cudaKMeans(CUDADatum * datum_1, CUDADatum * datum_2, CUDADatum * matches){ // 32 = warp size. shared memory is // permitted between threads in a block int blockSize = 32*10; // number of threads in a block int gridSize = (datum_1->length + blockSize - 1)/blockSize; // number of blocks; d_cudaKMeans<<<gridSize, blockSize>>>( datum_1->d_in_buffer, datum_2->d_in_buffer, matches->d_out_buffer, datum_1->length ); cudaDeviceSynchronize(); }
9764910748615234624a2dd6473dbe88929cfe06.hip
// !!! This is a file automatically generated by hipify!!! ///////////////////////////////////////////////////////////////////////////// // // BSD 3-Clause License // // Copyright (c) 2019, The Regents of the University of California // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * Neither the name of the copyright holder nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE // ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE // LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR // CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF // SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS // INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE // POSSIBILITY OF SUCH DAMAGE. // /////////////////////////////////////////////////////////////////////////////// #include <cusp/precond/diagonal.h> #include <cusp/blas/blas.h> #include <cusp/krylov/bicgstab.h> #include "gpuSolver.h" namespace gpl { using namespace std; using utl::GPL; void GpuSolver::cudaerror(hipError_t code) { if (code != hipSuccess) { log_->error(GPL, 1, "[CUDA ERROR] {} at line {} in file {} \n", hipGetErrorString(code), __LINE__, __FILE__); } } void GpuSolver::cusparseerror(hipsparseStatus_t code) { if (code != HIPSPARSE_STATUS_SUCCESS) { log_->error(GPL, 1, "[CUSPARSE ERROR] {} at line {} in file {}\n", hipsparseGetErrorString(code), __LINE__, __FILE__); } } void GpuSolver::cusolvererror(cusolverStatus_t code) { if (code != CUSOLVER_STATUS_SUCCESS) { log_->error(GPL, 1, "[CUSOLVER ERROR] {} at line {} in file {}\n", hipGetErrorString(*(hipError_t*) &code), __LINE__, __FILE__); } } GpuSolver::GpuSolver(SMatrix& placeInstForceMatrix, Eigen::VectorXf& fixedInstForceVec, utl::Logger* logger) { // {cooRowIndex_, cooColIndex_, cooVal_} are the host vectors used to store // the sparse format of placeInstForceMatrix. nnz_ = placeInstForceMatrix.nonZeros(); vector<int> cooRowIndex, cooColIndex; vector<float> cooVal; cooRowIndex.reserve(nnz_); cooColIndex.reserve(nnz_); cooVal.reserve(nnz_); for (int row = 0; row < placeInstForceMatrix.outerSize(); row++) { for (typename Eigen::SparseMatrix<float, Eigen::RowMajor>::InnerIterator it( placeInstForceMatrix, row); it; ++it) { cooRowIndex.push_back(it.row()); cooColIndex.push_back(it.col()); cooVal.push_back(it.value()); } } m_ = fixedInstForceVec.size(); nnz_ = cooVal.size(); log_ = logger; d_cooRowIndex_.resize(nnz_); d_cooColIndex_.resize(nnz_); d_cooVal_.resize(nnz_); d_fixedInstForceVec_.resize(m_); d_instLocVec_.resize(m_); // Copy the COO formatted triplets to device thrust::copy(cooRowIndex.begin(), cooRowIndex.end(), d_cooRowIndex_.begin()); thrust::copy(cooColIndex.begin(), cooColIndex.end(), d_cooColIndex_.begin()); thrust::copy(cooVal.begin(), cooVal.end(), d_cooVal_.begin()); thrust::copy(&fixedInstForceVec[0], &fixedInstForceVec[m_ - 1], d_fixedInstForceVec_.begin()); // Set raw pointers to point to the triplets in the device r_cooRowIndex_ = thrust::raw_pointer_cast(d_cooRowIndex_.data()); r_cooColIndex_ = thrust::raw_pointer_cast(d_cooColIndex_.data()); r_cooVal_ = thrust::raw_pointer_cast(d_cooVal_.data()); r_fixedInstForceVec_ = thrust::raw_pointer_cast(d_fixedInstForceVec_.data()); r_instLocVec_ = thrust::raw_pointer_cast(d_instLocVec_.data()); } void GpuSolver::cusolverCal(Eigen::VectorXf& instLocVec) { // Updated CUDA solver using CUSP library thrust::device_ptr<int> p_rowInd = thrust::device_pointer_cast(r_cooRowIndex_); thrust::device_ptr<int> p_colInd = thrust::device_pointer_cast(r_cooColIndex_); thrust::device_ptr<float> p_val = thrust::device_pointer_cast(r_cooVal_); thrust::device_ptr<float> d_fixedInstForceVec_ = thrust::device_pointer_cast(r_fixedInstForceVec_); thrust::device_ptr<float> p_instLocVec_ = thrust::device_pointer_cast(r_instLocVec_); // use array1d_view to wrap the individual arrays typedef typename cusp::array1d_view<thrust::device_ptr<int>> DeviceIndexArrayView; typedef typename cusp::array1d_view<thrust::device_ptr<float>> DeviceValueArrayView; DeviceIndexArrayView row_indices(p_rowInd, p_rowInd + nnz_); DeviceIndexArrayView column_indices(p_colInd, p_colInd + nnz_); DeviceValueArrayView values(p_val, p_val + nnz_); DeviceValueArrayView d_x(p_instLocVec_, p_instLocVec_ + m_); DeviceValueArrayView d_b(d_fixedInstForceVec_, d_fixedInstForceVec_ + m_); // combine the three array1d_views into a coo_matrix_view typedef cusp::coo_matrix_view<DeviceIndexArrayView, DeviceIndexArrayView, DeviceValueArrayView> DeviceView; // construct a coo_matrix_view from the array1d_views DeviceView d_A(m_, m_, nnz_, row_indices, column_indices, values); // set stopping criteria. int iteration_limit = 100; float relative_tolerance = 1e-15; bool verbose = false; // Decide if the CUDA solver prints the iteration // details or not. cusp::monitor<float> monitor_( d_b, iteration_limit, relative_tolerance, verbose); // setup preconditioner cusp::precond::diagonal<float, cusp::device_memory> d_M(d_A); // solve the linear system A * x = b with the BICGSTAB method cusp::krylov::bicgstab(d_A, d_x, d_b, monitor_, d_M); // Sync and Copy data to host cudaerror(hipMemcpy(instLocVec.data(), r_instLocVec_, sizeof(float) * m_, hipMemcpyDeviceToHost)); // Calculate AX = A * X - B cusp::coo_matrix<int, float, cusp::device_memory> A(d_A); cusp::array1d<float, cusp::device_memory> X(d_x); cusp::array1d<float, cusp::device_memory> B(d_b); cusp::array1d<float, cusp::device_memory> AX(m_); cusp::multiply(A, X, AX); cusp::blas::axpy(B, AX, -1); // Calculate L1 norm of the residual vector. error_ = cusp::blas::nrm1(AX) / cusp::blas::nrm1(B); } float GpuSolver::error() { return (error_ > 0) ? error_ : -error_; } GpuSolver::~GpuSolver() { } } // namespace gpl
9764910748615234624a2dd6473dbe88929cfe06.cu
///////////////////////////////////////////////////////////////////////////// // // BSD 3-Clause License // // Copyright (c) 2019, The Regents of the University of California // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * Neither the name of the copyright holder nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE // ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE // LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR // CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF // SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS // INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE // POSSIBILITY OF SUCH DAMAGE. // /////////////////////////////////////////////////////////////////////////////// #include <cusp/precond/diagonal.h> #include <cusp/blas/blas.h> #include <cusp/krylov/bicgstab.h> #include "gpuSolver.h" namespace gpl { using namespace std; using utl::GPL; void GpuSolver::cudaerror(cudaError_t code) { if (code != cudaSuccess) { log_->error(GPL, 1, "[CUDA ERROR] {} at line {} in file {} \n", cudaGetErrorString(code), __LINE__, __FILE__); } } void GpuSolver::cusparseerror(cusparseStatus_t code) { if (code != CUSPARSE_STATUS_SUCCESS) { log_->error(GPL, 1, "[CUSPARSE ERROR] {} at line {} in file {}\n", cusparseGetErrorString(code), __LINE__, __FILE__); } } void GpuSolver::cusolvererror(cusolverStatus_t code) { if (code != CUSOLVER_STATUS_SUCCESS) { log_->error(GPL, 1, "[CUSOLVER ERROR] {} at line {} in file {}\n", cudaGetErrorString(*(cudaError_t*) &code), __LINE__, __FILE__); } } GpuSolver::GpuSolver(SMatrix& placeInstForceMatrix, Eigen::VectorXf& fixedInstForceVec, utl::Logger* logger) { // {cooRowIndex_, cooColIndex_, cooVal_} are the host vectors used to store // the sparse format of placeInstForceMatrix. nnz_ = placeInstForceMatrix.nonZeros(); vector<int> cooRowIndex, cooColIndex; vector<float> cooVal; cooRowIndex.reserve(nnz_); cooColIndex.reserve(nnz_); cooVal.reserve(nnz_); for (int row = 0; row < placeInstForceMatrix.outerSize(); row++) { for (typename Eigen::SparseMatrix<float, Eigen::RowMajor>::InnerIterator it( placeInstForceMatrix, row); it; ++it) { cooRowIndex.push_back(it.row()); cooColIndex.push_back(it.col()); cooVal.push_back(it.value()); } } m_ = fixedInstForceVec.size(); nnz_ = cooVal.size(); log_ = logger; d_cooRowIndex_.resize(nnz_); d_cooColIndex_.resize(nnz_); d_cooVal_.resize(nnz_); d_fixedInstForceVec_.resize(m_); d_instLocVec_.resize(m_); // Copy the COO formatted triplets to device thrust::copy(cooRowIndex.begin(), cooRowIndex.end(), d_cooRowIndex_.begin()); thrust::copy(cooColIndex.begin(), cooColIndex.end(), d_cooColIndex_.begin()); thrust::copy(cooVal.begin(), cooVal.end(), d_cooVal_.begin()); thrust::copy(&fixedInstForceVec[0], &fixedInstForceVec[m_ - 1], d_fixedInstForceVec_.begin()); // Set raw pointers to point to the triplets in the device r_cooRowIndex_ = thrust::raw_pointer_cast(d_cooRowIndex_.data()); r_cooColIndex_ = thrust::raw_pointer_cast(d_cooColIndex_.data()); r_cooVal_ = thrust::raw_pointer_cast(d_cooVal_.data()); r_fixedInstForceVec_ = thrust::raw_pointer_cast(d_fixedInstForceVec_.data()); r_instLocVec_ = thrust::raw_pointer_cast(d_instLocVec_.data()); } void GpuSolver::cusolverCal(Eigen::VectorXf& instLocVec) { // Updated CUDA solver using CUSP library thrust::device_ptr<int> p_rowInd = thrust::device_pointer_cast(r_cooRowIndex_); thrust::device_ptr<int> p_colInd = thrust::device_pointer_cast(r_cooColIndex_); thrust::device_ptr<float> p_val = thrust::device_pointer_cast(r_cooVal_); thrust::device_ptr<float> d_fixedInstForceVec_ = thrust::device_pointer_cast(r_fixedInstForceVec_); thrust::device_ptr<float> p_instLocVec_ = thrust::device_pointer_cast(r_instLocVec_); // use array1d_view to wrap the individual arrays typedef typename cusp::array1d_view<thrust::device_ptr<int>> DeviceIndexArrayView; typedef typename cusp::array1d_view<thrust::device_ptr<float>> DeviceValueArrayView; DeviceIndexArrayView row_indices(p_rowInd, p_rowInd + nnz_); DeviceIndexArrayView column_indices(p_colInd, p_colInd + nnz_); DeviceValueArrayView values(p_val, p_val + nnz_); DeviceValueArrayView d_x(p_instLocVec_, p_instLocVec_ + m_); DeviceValueArrayView d_b(d_fixedInstForceVec_, d_fixedInstForceVec_ + m_); // combine the three array1d_views into a coo_matrix_view typedef cusp::coo_matrix_view<DeviceIndexArrayView, DeviceIndexArrayView, DeviceValueArrayView> DeviceView; // construct a coo_matrix_view from the array1d_views DeviceView d_A(m_, m_, nnz_, row_indices, column_indices, values); // set stopping criteria. int iteration_limit = 100; float relative_tolerance = 1e-15; bool verbose = false; // Decide if the CUDA solver prints the iteration // details or not. cusp::monitor<float> monitor_( d_b, iteration_limit, relative_tolerance, verbose); // setup preconditioner cusp::precond::diagonal<float, cusp::device_memory> d_M(d_A); // solve the linear system A * x = b with the BICGSTAB method cusp::krylov::bicgstab(d_A, d_x, d_b, monitor_, d_M); // Sync and Copy data to host cudaerror(cudaMemcpy(instLocVec.data(), r_instLocVec_, sizeof(float) * m_, cudaMemcpyDeviceToHost)); // Calculate AX = A * X - B cusp::coo_matrix<int, float, cusp::device_memory> A(d_A); cusp::array1d<float, cusp::device_memory> X(d_x); cusp::array1d<float, cusp::device_memory> B(d_b); cusp::array1d<float, cusp::device_memory> AX(m_); cusp::multiply(A, X, AX); cusp::blas::axpy(B, AX, -1); // Calculate L1 norm of the residual vector. error_ = cusp::blas::nrm1(AX) / cusp::blas::nrm1(B); } float GpuSolver::error() { return (error_ > 0) ? error_ : -error_; } GpuSolver::~GpuSolver() { } } // namespace gpl
7d21aaa9506f0b5424505f52ef91a7618c1cc8d4.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include<cassert> struct Go { virtual ~Go(){} virtual void go() =0; inline static Go * me(Go * in=nullptr) { static Go * l = nullptr; if (in) l=in; return l; } }; #include<cstdio> __device__ uint32_t hashedIndexEE(uint32_t id); __constant__ int q[] = {0,1,2,3,4}; __device__ __forceinline__ void barf(int * i) { extern __shared__ unsigned char shared_mem[]; shared_mem[threadIdx.x]=i[threadIdx.x]*q[2]*hashedIndexEE(i[7]); __syncthreads(); printf("bar %d %d\n", shared_mem[0],q[3]); int * q = 0; if (i[2]>3) { q = new int[i[2]]; memset(q,0,4*i[2]); atomicAdd(q+2,shared_mem[0]); memcpy(i,q,min(40,4*i[2])); __syncthreads(); i[4] = q[threadIdx.x]; } delete [] q; } __global__ void bar(int * i) { barf(i); } struct Large { int v[100]; }; __global__ void huge(int * i, int * a1, int * a2, int * a3, int * a4, int * a5, int * a6, int * a7, int * a8, Large l1, Large l2, Large l3 ) { extern __shared__ unsigned char shared_mem[]; shared_mem[threadIdx.x]=i[threadIdx.x]; __syncthreads(); printf("bar %d %d\n", shared_mem[0], l1.v[3]); } __global__ void crash(int * i) { hipLaunchKernelGGL(( bar), dim3(1),dim3(1), 0, 0, i); hipDeviceSynchronize(); } /* #include <hip/hip_cooperative_groups.h> using namespace cooperative_groups; __global__ void coop(int * i) { grid_group grid = this_grid(); barf(i); grid.sync(); barf(i); grid.sync(); } */ #include "cudaCheck.h" void wrapper() { int a[10]; a[0]=4; a[2]=0; int * d; hipMalloc(&d,40); hipMemcpyAsync(d,a,40,hipMemcpyHostToDevice,0); hipLaunchKernelGGL(( bar), dim3(1),dim3(1),1024,0, d); cudaCheck(hipGetLastError()); hipDeviceSynchronize(); cudaCheck(hipGetLastError()); Large l1, l2, l3; l1.v[3]=5; hipLaunchKernelGGL(( huge), dim3(1),dim3(1),1024, 0, d, d,d,d,d, d,d,d,d,l1,l2,l3); cudaCheck(hipGetLastError()); hipDeviceSynchronize(); cudaCheck(hipGetLastError()); } #include<iostream> struct Me : private Go { Me(int a) { std::cout << "Loaded " << a << std::endl; assert(this==Go::me(this)); assert(this==Go::me()); } void go() override { std::cout << "go" << std::endl; wrapper(); std::cout << "gone" << std::endl; } }; Me me(3); struct QQ { QQ() { std::cerr << "QQ Loaded"<< std::endl; } }; QQ qq;
7d21aaa9506f0b5424505f52ef91a7618c1cc8d4.cu
#include <cuda.h> #include <cuda_runtime.h> #include<cassert> struct Go { virtual ~Go(){} virtual void go() =0; inline static Go * me(Go * in=nullptr) { static Go * l = nullptr; if (in) l=in; return l; } }; #include<cstdio> __device__ uint32_t hashedIndexEE(uint32_t id); __constant__ int q[] = {0,1,2,3,4}; __device__ __forceinline__ void barf(int * i) { extern __shared__ unsigned char shared_mem[]; shared_mem[threadIdx.x]=i[threadIdx.x]*q[2]*hashedIndexEE(i[7]); __syncthreads(); printf("bar %d %d\n", shared_mem[0],q[3]); int * q = 0; if (i[2]>3) { q = new int[i[2]]; memset(q,0,4*i[2]); atomicAdd(q+2,shared_mem[0]); memcpy(i,q,min(40,4*i[2])); __syncthreads(); i[4] = q[threadIdx.x]; } delete [] q; } __global__ void bar(int * i) { barf(i); } struct Large { int v[100]; }; __global__ void huge(int * i, int * a1, int * a2, int * a3, int * a4, int * a5, int * a6, int * a7, int * a8, Large l1, Large l2, Large l3 ) { extern __shared__ unsigned char shared_mem[]; shared_mem[threadIdx.x]=i[threadIdx.x]; __syncthreads(); printf("bar %d %d\n", shared_mem[0], l1.v[3]); } __global__ void crash(int * i) { bar<<<1,1>>>(i); cudaDeviceSynchronize(); } /* #include <cooperative_groups.h> using namespace cooperative_groups; __global__ void coop(int * i) { grid_group grid = this_grid(); barf(i); grid.sync(); barf(i); grid.sync(); } */ #include "cudaCheck.h" void wrapper() { int a[10]; a[0]=4; a[2]=0; int * d; cudaMalloc(&d,40); cudaMemcpyAsync(d,a,40,cudaMemcpyHostToDevice,0); bar<<<1,1,1024,0>>>(d); cudaCheck(cudaGetLastError()); cudaDeviceSynchronize(); cudaCheck(cudaGetLastError()); Large l1, l2, l3; l1.v[3]=5; huge<<<1,1,1024>>>(d, d,d,d,d, d,d,d,d,l1,l2,l3); cudaCheck(cudaGetLastError()); cudaDeviceSynchronize(); cudaCheck(cudaGetLastError()); } #include<iostream> struct Me : private Go { Me(int a) { std::cout << "Loaded " << a << std::endl; assert(this==Go::me(this)); assert(this==Go::me()); } void go() override { std::cout << "go" << std::endl; wrapper(); std::cout << "gone" << std::endl; } }; Me me(3); struct QQ { QQ() { std::cerr << "QQ Loaded"<< std::endl; } }; QQ qq;
85e9d77cd4c4f4adf01510da5415f3294fbd6583.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf/cudf.h> #include <bitmask/legacy/bit_mask.cuh> #include <cudf/legacy/bitmask.hpp> #include <cudf/copying.hpp> #include <cudf/groupby.hpp> #include <cudf/table.hpp> #include <hash/concurrent_unordered_map.cuh> #include <string/nvcategory_util.hpp> #include <table/device_table.cuh> #include <table/device_table_row_operators.cuh> #include <utilities/column_utils.hpp> #include <utilities/cuda_utils.hpp> #include <utilities/type_dispatcher.hpp> #include "aggregation_requests.hpp" #include "groupby.hpp" #include "groupby_kernels.cuh" #include "type_info.hpp" #include <rmm/thrust_rmm_allocator.h> #include <thrust/fill.h> #include <type_traits> #include <vector> namespace cudf { namespace groupby { namespace hash { namespace { /**---------------------------------------------------------------------------* * @brief Verifies the requested aggregation is valid for the type of the value * column. * * Given a table of values and a set of operators, verifies that `ops[i]` is * valid to perform on `column[i]`. * * @throw cudf::logic_error if an invalid combination of value type and operator * is requested. * * @param values The table of columns * @param ops The aggregation operators *---------------------------------------------------------------------------**/ void verify_operators(table const& values, std::vector<operators> const& ops) { CUDF_EXPECTS(static_cast<gdf_size_type>(ops.size()) == values.num_columns(), "Size mismatch between ops and value columns"); for (gdf_size_type i = 0; i < values.num_columns(); ++i) { // TODO Add more checks here, i.e., can't compute sum of non-arithemtic // types if ((ops[i] == SUM) and (values.get_column(i)->dtype == GDF_STRING_CATEGORY)) { CUDF_FAIL( "Cannot compute SUM aggregation of GDF_STRING_CATEGORY column."); } } } /**---------------------------------------------------------------------------* * @brief Determines target gdf_dtypes to use for combinations of source * gdf_dtypes and aggregation operations. * * Given vectors of source gdf_dtypes and corresponding aggregation operations * to be performed on that type, returns a vector of gdf_dtypes to use to store * the result of the aggregation operations. * * @param source_dtypes The source types * @param op The aggregation operations * @return Target gdf_dtypes to use for the target aggregation columns *---------------------------------------------------------------------------**/ inline std::vector<gdf_dtype> target_dtypes( std::vector<gdf_dtype> const& source_dtypes, std::vector<operators> const& ops) { std::vector<gdf_dtype> output_dtypes(source_dtypes.size()); std::transform( source_dtypes.begin(), source_dtypes.end(), ops.begin(), output_dtypes.begin(), [](gdf_dtype source_dtype, operators op) { gdf_dtype t = cudf::type_dispatcher(source_dtype, target_type_mapper{}, op); CUDF_EXPECTS( t != GDF_invalid, "Invalid combination of input type and aggregation operation."); return t; }); return output_dtypes; } /**---------------------------------------------------------------------------* * @brief Dispatched functor to initialize a column with the identity of an *aggregation operation. *---------------------------------------------------------------------------**/ struct identity_initializer { template <typename T> T get_identity(operators op) { switch (op) { case SUM: return corresponding_functor_t<SUM>::identity<T>(); case MIN: return corresponding_functor_t<MIN>::identity<T>(); case MAX: return corresponding_functor_t<MAX>::identity<T>(); case COUNT: return corresponding_functor_t<COUNT>::identity<T>(); default: CUDF_FAIL("Invalid aggregation operation."); } } template <typename T> void operator()(gdf_column const& col, operators op, hipStream_t stream = 0) { T* typed_data = static_cast<T*>(col.data); thrust::fill(rmm::exec_policy(stream)->on(stream), typed_data, typed_data + col.size, get_identity<T>(op)); // For COUNT operator, initialize column's bitmask to be all valid if ((nullptr != col.valid) and (COUNT == op)) { CUDA_TRY(hipMemsetAsync( col.valid, 0xff, sizeof(gdf_valid_type) * gdf_valid_allocation_size(col.size), stream)); } } }; /**---------------------------------------------------------------------------* * @brief Initializes each column in a table with a corresponding identity value * of an aggregation operation. * * The `i`th column will be initialized with the identity value of the `i`th * aggregation operation. * * @note The validity bitmask (if not `nullptr`) for the column corresponding to * a COUNT operator will be initialized to all valid. * * @param table The table of columns to initialize. * @param operators The aggregation operations whose identity values will be *used to initialize the columns. *---------------------------------------------------------------------------**/ void initialize_with_identity(cudf::table const& table, std::vector<operators> const& ops, hipStream_t stream = 0) { // TODO: Initialize all the columns in a single kernel instead of invoking one // kernel per column for (gdf_size_type i = 0; i < table.num_columns(); ++i) { gdf_column const* col = table.get_column(i); cudf::type_dispatcher(col->dtype, identity_initializer{}, *col, ops[i]); } } /**---------------------------------------------------------------------------* * @brief Compacts any GDF_STRING_CATEGORY columns in the output keys or values. * * After the groupby operation, any GDF_STRING_CATEGORY column in either the * keys or values may reference only a subset of the strings in the original * input category. This function will create a new associated NVCategory object * for the output GDF_STRING_CATEGORY columns whose dictionary contains only the * strings referenced in the output result. * * @param[in] input_keys The set of input key columns * @param[in/out] output_keys The set of output key columns * @param[in] input_values The set of input value columns * @param[in/out] output_values The set of output value columns *---------------------------------------------------------------------------**/ void update_nvcategories(table const& input_keys, table& output_keys, table const& input_values, table& output_values) { nvcategory_gather_table(input_keys, output_keys); nvcategory_gather_table(input_values, output_values); } template <bool keys_have_nulls, bool values_have_nulls> auto build_aggregation_map(table const& input_keys, table const& input_values, device_table const& d_input_keys, device_table const& d_input_values, std::vector<operators> const& ops, Options options, hipStream_t stream) { gdf_size_type constexpr unused_key{std::numeric_limits<gdf_size_type>::max()}; gdf_size_type constexpr unused_value{ std::numeric_limits<gdf_size_type>::max()}; CUDF_EXPECTS(input_keys.num_rows() < unused_key, "Groupby input size too large."); // The exact output size is unknown a priori, therefore, use the input size as // an upper bound gdf_size_type const output_size_estimate{input_keys.num_rows()}; cudf::table sparse_output_values{ output_size_estimate, target_dtypes(column_dtypes(input_values), ops), values_have_nulls, false, stream}; initialize_with_identity(sparse_output_values, ops, stream); auto d_sparse_output_values = device_table::create(sparse_output_values, stream); rmm::device_vector<operators> d_ops(ops); // If we ignore null keys, then nulls are not equivalent bool const null_keys_are_equal{not options.ignore_null_keys}; bool const skip_key_rows_with_nulls{keys_have_nulls and not null_keys_are_equal}; row_hasher<keys_have_nulls> hasher{d_input_keys}; row_equality_comparator<keys_have_nulls> rows_equal{ d_input_keys, d_input_keys, null_keys_are_equal}; using map_type = concurrent_unordered_map<gdf_size_type, gdf_size_type, decltype(hasher), decltype(rows_equal)>; auto map = std::make_unique<map_type>(compute_hash_table_size(input_keys.num_rows()), unused_key, unused_value, hasher, rows_equal); // TODO: Explore optimal block size and work per thread. cudf::util::cuda::grid_config_1d grid_params{input_keys.num_rows(), 256}; if (skip_key_rows_with_nulls) { auto row_bitmask{cudf::row_bitmask(input_keys, stream)}; hipLaunchKernelGGL(( build_aggregation_map<true, values_have_nulls>) , dim3(grid_params.num_blocks), dim3(grid_params.num_threads_per_block), 0, stream, map.get(), d_input_keys, d_input_values, *d_sparse_output_values, d_ops.data().get(), row_bitmask.data().get()); } else { hipLaunchKernelGGL(( build_aggregation_map<false, values_have_nulls>) , dim3(grid_params.num_blocks), dim3(grid_params.num_threads_per_block), 0, stream, map.get(), d_input_keys, d_input_values, *d_sparse_output_values, d_ops.data().get(), nullptr); } CHECK_STREAM(stream); return std::make_pair(std::move(map), sparse_output_values); } template <bool keys_have_nulls, bool values_have_nulls, typename Map> auto extract_results(table const& input_keys, table const& input_values, device_table const& d_input_keys, table const& sparse_output_values, Map* map, hipStream_t stream) { cudf::table output_keys{cudf::allocate_like(input_keys, true, stream)}; cudf::table output_values{ cudf::allocate_like(sparse_output_values, true, stream)}; auto d_sparse_output_values = device_table::create(sparse_output_values, stream); auto d_output_keys = device_table::create(output_keys, stream); auto d_output_values = device_table::create(output_values, stream); gdf_size_type* d_result_size{nullptr}; RMM_TRY(RMM_ALLOC(&d_result_size, sizeof(gdf_size_type), stream)); CUDA_TRY(hipMemsetAsync(d_result_size, 0, sizeof(gdf_size_type), stream)); cudf::util::cuda::grid_config_1d grid_params{input_keys.num_rows(), 256}; hipLaunchKernelGGL(( extract_groupby_result<keys_have_nulls, values_have_nulls>) , dim3(grid_params.num_blocks), dim3(grid_params.num_threads_per_block), 0, stream, map, d_input_keys, *d_output_keys, *d_sparse_output_values, *d_output_values, d_result_size); CHECK_STREAM(stream); gdf_size_type result_size{-1}; CUDA_TRY(hipMemcpyAsync(&result_size, d_result_size, sizeof(gdf_size_type), hipMemcpyDeviceToHost, stream)); // Update size and null count of output columns auto update_column = [result_size](gdf_column* col) { CUDF_EXPECTS(col != nullptr, "Attempt to update Null column."); col->size = result_size; set_null_count(*col); return col; }; std::transform(output_keys.begin(), output_keys.end(), output_keys.begin(), update_column); std::transform(output_values.begin(), output_values.end(), output_values.begin(), update_column); return std::make_pair(output_keys, output_values); } /**---------------------------------------------------------------------------* * @brief Computes the groupby operation for a set of keys, values, and * operators using a hash-based implementation. * * The algorithm has two primary steps: * 1.) Build a hash map * 2.) Extract the non-empty entries from the hash table * * 1.) The hash map is built by inserting every row `i` from the `keys` and * `values` tables as a single (key,value) pair. When the pair is inserted, if * the key was not already present in the map, then the corresponding value is * simply copied to the output. If the key was already present in the map, * then the inserted `values` row is aggregated with the existing row. This * aggregation is done for every element `j` in the row by applying aggregation * operation `j` between the new and existing element. * * This process yields a hash map and table holding the resulting aggregation * rows. The aggregation output table is sparse, i.e., not every row is * populated. This is because the size of the output is not known a priori, and * so the output aggregation table is allocated to be as large as the input (the * upper bound of the output size). * * 2.) The final result is materialized by extracting the non-empty keys from * the hash map and the non-empty rows from the sparse output aggregation table. * Every non-empty key and value row is appended to the output key and value * tables. * * @tparam keys_have_nulls Indicates keys have one or more null values * @tparam values_have_nulls Indicates values have one or more null values * @param keys Table whose rows are used as keys of the groupby * @param values Table whose rows are aggregated in the groupby * @param ops Set of aggregation operations to perform for each element in a row * in the values table * @param options Options to control behavior of the groupby operation * @param stream CUDA stream on which all memory allocations and kernels will be * executed * @return A pair of the output keys table and output values table *---------------------------------------------------------------------------**/ template <bool keys_have_nulls, bool values_have_nulls> auto compute_hash_groupby(cudf::table const& keys, cudf::table const& values, std::vector<operators> const& ops, Options options, hipStream_t stream) { CUDF_EXPECTS(values.num_columns() == static_cast<gdf_size_type>(ops.size()), "Size mismatch between number of value columns and number of " "aggregations."); // An "aggregation request" is the combination of a `gdf_column*` to a column // of values, and an aggregation operation enum indicating the aggregation // requested to be performed on the column std::vector<AggRequestType> original_requests(values.num_columns()); std::transform(values.begin(), values.end(), ops.begin(), original_requests.begin(), [](gdf_column const* col, operators op) { return std::make_pair(const_cast<gdf_column*>(col), op); }); // Some aggregations are "compound", meaning they need be satisfied via the // composition of 1 or more "simple" aggregation requests. For example, MEAN // is satisfied via the division of the SUM by the COUNT aggregation. We // translate these compound requests into simple requests, and compute the // groupby operation for these simple requests. Later, we translate the simple // requests back to compound request results. std::vector<AggRequestType> simple_requests = compound_to_simple(original_requests); std::vector<gdf_column*> simple_values_columns; std::vector<operators> simple_operators; for (auto const& p : simple_requests) { simple_values_columns.push_back(const_cast<gdf_column*>(p.first)); simple_operators.push_back(p.second); } cudf::table simple_values_table{simple_values_columns}; auto const d_input_keys = device_table::create(keys); auto const d_input_values = device_table::create(simple_values_table); // Step 1: Build hash map auto result = build_aggregation_map<keys_have_nulls, values_have_nulls>( keys, simple_values_table, *d_input_keys, *d_input_values, simple_operators, options, stream); auto const map{std::move(result.first)}; cudf::table sparse_output_values{result.second}; // Step 2: Extract non-empty entries cudf::table output_keys; cudf::table simple_output_values; std::tie(output_keys, simple_output_values) = extract_results<keys_have_nulls, values_have_nulls>( keys, values, *d_input_keys, sparse_output_values, map.get(), stream); // Delete intermediate results storage sparse_output_values.destroy(); // If any of the original requests were compound, compute them from the // results of simple aggregation requests cudf::table final_output_values = compute_original_requests( original_requests, simple_requests, simple_output_values, stream); return std::make_pair(output_keys, final_output_values); } /**---------------------------------------------------------------------------* * @brief Returns appropriate callable instantiation of `compute_hash_groupby` * based on presence of null values in keys and values. * * @param keys The groupby key columns * @param values The groupby value columns * @return Instantiated callable of compute_hash_groupby *---------------------------------------------------------------------------**/ auto groupby_null_specialization(table const& keys, table const& values) { if (cudf::has_nulls(keys)) { if (cudf::has_nulls(values)) { return compute_hash_groupby<true, true>; } else { return compute_hash_groupby<true, false>; } } else { if (cudf::has_nulls(values)) { return compute_hash_groupby<false, true>; } else { return compute_hash_groupby<false, false>; } } } } // namespace namespace detail { std::pair<cudf::table, cudf::table> groupby(cudf::table const& keys, cudf::table const& values, std::vector<operators> const& ops, Options options, hipStream_t stream) { CUDF_EXPECTS(keys.num_rows() == values.num_rows(), "Size mismatch between number of rows in keys and values."); verify_operators(values, ops); // Empty inputs if (keys.num_rows() == 0) { return std::make_pair( cudf::empty_like(keys), cudf::table(0, target_dtypes(column_dtypes(values), ops))); } auto compute_groupby = groupby_null_specialization(keys, values); cudf::table output_keys; cudf::table output_values; std::tie(output_keys, output_values) = compute_groupby(keys, values, ops, options, stream); update_nvcategories(keys, output_keys, values, output_values); return std::make_pair(output_keys, output_values); } } // namespace detail std::pair<cudf::table, cudf::table> groupby(cudf::table const& keys, cudf::table const& values, std::vector<operators> const& ops, Options options) { return detail::groupby(keys, values, ops, options); } } // namespace hash } // namespace groupby } // namespace cudf
85e9d77cd4c4f4adf01510da5415f3294fbd6583.cu
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf/cudf.h> #include <bitmask/legacy/bit_mask.cuh> #include <cudf/legacy/bitmask.hpp> #include <cudf/copying.hpp> #include <cudf/groupby.hpp> #include <cudf/table.hpp> #include <hash/concurrent_unordered_map.cuh> #include <string/nvcategory_util.hpp> #include <table/device_table.cuh> #include <table/device_table_row_operators.cuh> #include <utilities/column_utils.hpp> #include <utilities/cuda_utils.hpp> #include <utilities/type_dispatcher.hpp> #include "aggregation_requests.hpp" #include "groupby.hpp" #include "groupby_kernels.cuh" #include "type_info.hpp" #include <rmm/thrust_rmm_allocator.h> #include <thrust/fill.h> #include <type_traits> #include <vector> namespace cudf { namespace groupby { namespace hash { namespace { /**---------------------------------------------------------------------------* * @brief Verifies the requested aggregation is valid for the type of the value * column. * * Given a table of values and a set of operators, verifies that `ops[i]` is * valid to perform on `column[i]`. * * @throw cudf::logic_error if an invalid combination of value type and operator * is requested. * * @param values The table of columns * @param ops The aggregation operators *---------------------------------------------------------------------------**/ void verify_operators(table const& values, std::vector<operators> const& ops) { CUDF_EXPECTS(static_cast<gdf_size_type>(ops.size()) == values.num_columns(), "Size mismatch between ops and value columns"); for (gdf_size_type i = 0; i < values.num_columns(); ++i) { // TODO Add more checks here, i.e., can't compute sum of non-arithemtic // types if ((ops[i] == SUM) and (values.get_column(i)->dtype == GDF_STRING_CATEGORY)) { CUDF_FAIL( "Cannot compute SUM aggregation of GDF_STRING_CATEGORY column."); } } } /**---------------------------------------------------------------------------* * @brief Determines target gdf_dtypes to use for combinations of source * gdf_dtypes and aggregation operations. * * Given vectors of source gdf_dtypes and corresponding aggregation operations * to be performed on that type, returns a vector of gdf_dtypes to use to store * the result of the aggregation operations. * * @param source_dtypes The source types * @param op The aggregation operations * @return Target gdf_dtypes to use for the target aggregation columns *---------------------------------------------------------------------------**/ inline std::vector<gdf_dtype> target_dtypes( std::vector<gdf_dtype> const& source_dtypes, std::vector<operators> const& ops) { std::vector<gdf_dtype> output_dtypes(source_dtypes.size()); std::transform( source_dtypes.begin(), source_dtypes.end(), ops.begin(), output_dtypes.begin(), [](gdf_dtype source_dtype, operators op) { gdf_dtype t = cudf::type_dispatcher(source_dtype, target_type_mapper{}, op); CUDF_EXPECTS( t != GDF_invalid, "Invalid combination of input type and aggregation operation."); return t; }); return output_dtypes; } /**---------------------------------------------------------------------------* * @brief Dispatched functor to initialize a column with the identity of an *aggregation operation. *---------------------------------------------------------------------------**/ struct identity_initializer { template <typename T> T get_identity(operators op) { switch (op) { case SUM: return corresponding_functor_t<SUM>::identity<T>(); case MIN: return corresponding_functor_t<MIN>::identity<T>(); case MAX: return corresponding_functor_t<MAX>::identity<T>(); case COUNT: return corresponding_functor_t<COUNT>::identity<T>(); default: CUDF_FAIL("Invalid aggregation operation."); } } template <typename T> void operator()(gdf_column const& col, operators op, cudaStream_t stream = 0) { T* typed_data = static_cast<T*>(col.data); thrust::fill(rmm::exec_policy(stream)->on(stream), typed_data, typed_data + col.size, get_identity<T>(op)); // For COUNT operator, initialize column's bitmask to be all valid if ((nullptr != col.valid) and (COUNT == op)) { CUDA_TRY(cudaMemsetAsync( col.valid, 0xff, sizeof(gdf_valid_type) * gdf_valid_allocation_size(col.size), stream)); } } }; /**---------------------------------------------------------------------------* * @brief Initializes each column in a table with a corresponding identity value * of an aggregation operation. * * The `i`th column will be initialized with the identity value of the `i`th * aggregation operation. * * @note The validity bitmask (if not `nullptr`) for the column corresponding to * a COUNT operator will be initialized to all valid. * * @param table The table of columns to initialize. * @param operators The aggregation operations whose identity values will be *used to initialize the columns. *---------------------------------------------------------------------------**/ void initialize_with_identity(cudf::table const& table, std::vector<operators> const& ops, cudaStream_t stream = 0) { // TODO: Initialize all the columns in a single kernel instead of invoking one // kernel per column for (gdf_size_type i = 0; i < table.num_columns(); ++i) { gdf_column const* col = table.get_column(i); cudf::type_dispatcher(col->dtype, identity_initializer{}, *col, ops[i]); } } /**---------------------------------------------------------------------------* * @brief Compacts any GDF_STRING_CATEGORY columns in the output keys or values. * * After the groupby operation, any GDF_STRING_CATEGORY column in either the * keys or values may reference only a subset of the strings in the original * input category. This function will create a new associated NVCategory object * for the output GDF_STRING_CATEGORY columns whose dictionary contains only the * strings referenced in the output result. * * @param[in] input_keys The set of input key columns * @param[in/out] output_keys The set of output key columns * @param[in] input_values The set of input value columns * @param[in/out] output_values The set of output value columns *---------------------------------------------------------------------------**/ void update_nvcategories(table const& input_keys, table& output_keys, table const& input_values, table& output_values) { nvcategory_gather_table(input_keys, output_keys); nvcategory_gather_table(input_values, output_values); } template <bool keys_have_nulls, bool values_have_nulls> auto build_aggregation_map(table const& input_keys, table const& input_values, device_table const& d_input_keys, device_table const& d_input_values, std::vector<operators> const& ops, Options options, cudaStream_t stream) { gdf_size_type constexpr unused_key{std::numeric_limits<gdf_size_type>::max()}; gdf_size_type constexpr unused_value{ std::numeric_limits<gdf_size_type>::max()}; CUDF_EXPECTS(input_keys.num_rows() < unused_key, "Groupby input size too large."); // The exact output size is unknown a priori, therefore, use the input size as // an upper bound gdf_size_type const output_size_estimate{input_keys.num_rows()}; cudf::table sparse_output_values{ output_size_estimate, target_dtypes(column_dtypes(input_values), ops), values_have_nulls, false, stream}; initialize_with_identity(sparse_output_values, ops, stream); auto d_sparse_output_values = device_table::create(sparse_output_values, stream); rmm::device_vector<operators> d_ops(ops); // If we ignore null keys, then nulls are not equivalent bool const null_keys_are_equal{not options.ignore_null_keys}; bool const skip_key_rows_with_nulls{keys_have_nulls and not null_keys_are_equal}; row_hasher<keys_have_nulls> hasher{d_input_keys}; row_equality_comparator<keys_have_nulls> rows_equal{ d_input_keys, d_input_keys, null_keys_are_equal}; using map_type = concurrent_unordered_map<gdf_size_type, gdf_size_type, decltype(hasher), decltype(rows_equal)>; auto map = std::make_unique<map_type>(compute_hash_table_size(input_keys.num_rows()), unused_key, unused_value, hasher, rows_equal); // TODO: Explore optimal block size and work per thread. cudf::util::cuda::grid_config_1d grid_params{input_keys.num_rows(), 256}; if (skip_key_rows_with_nulls) { auto row_bitmask{cudf::row_bitmask(input_keys, stream)}; build_aggregation_map<true, values_have_nulls> <<<grid_params.num_blocks, grid_params.num_threads_per_block, 0, stream>>>(map.get(), d_input_keys, d_input_values, *d_sparse_output_values, d_ops.data().get(), row_bitmask.data().get()); } else { build_aggregation_map<false, values_have_nulls> <<<grid_params.num_blocks, grid_params.num_threads_per_block, 0, stream>>>(map.get(), d_input_keys, d_input_values, *d_sparse_output_values, d_ops.data().get(), nullptr); } CHECK_STREAM(stream); return std::make_pair(std::move(map), sparse_output_values); } template <bool keys_have_nulls, bool values_have_nulls, typename Map> auto extract_results(table const& input_keys, table const& input_values, device_table const& d_input_keys, table const& sparse_output_values, Map* map, cudaStream_t stream) { cudf::table output_keys{cudf::allocate_like(input_keys, true, stream)}; cudf::table output_values{ cudf::allocate_like(sparse_output_values, true, stream)}; auto d_sparse_output_values = device_table::create(sparse_output_values, stream); auto d_output_keys = device_table::create(output_keys, stream); auto d_output_values = device_table::create(output_values, stream); gdf_size_type* d_result_size{nullptr}; RMM_TRY(RMM_ALLOC(&d_result_size, sizeof(gdf_size_type), stream)); CUDA_TRY(cudaMemsetAsync(d_result_size, 0, sizeof(gdf_size_type), stream)); cudf::util::cuda::grid_config_1d grid_params{input_keys.num_rows(), 256}; extract_groupby_result<keys_have_nulls, values_have_nulls> <<<grid_params.num_blocks, grid_params.num_threads_per_block, 0, stream>>>(map, d_input_keys, *d_output_keys, *d_sparse_output_values, *d_output_values, d_result_size); CHECK_STREAM(stream); gdf_size_type result_size{-1}; CUDA_TRY(cudaMemcpyAsync(&result_size, d_result_size, sizeof(gdf_size_type), cudaMemcpyDeviceToHost, stream)); // Update size and null count of output columns auto update_column = [result_size](gdf_column* col) { CUDF_EXPECTS(col != nullptr, "Attempt to update Null column."); col->size = result_size; set_null_count(*col); return col; }; std::transform(output_keys.begin(), output_keys.end(), output_keys.begin(), update_column); std::transform(output_values.begin(), output_values.end(), output_values.begin(), update_column); return std::make_pair(output_keys, output_values); } /**---------------------------------------------------------------------------* * @brief Computes the groupby operation for a set of keys, values, and * operators using a hash-based implementation. * * The algorithm has two primary steps: * 1.) Build a hash map * 2.) Extract the non-empty entries from the hash table * * 1.) The hash map is built by inserting every row `i` from the `keys` and * `values` tables as a single (key,value) pair. When the pair is inserted, if * the key was not already present in the map, then the corresponding value is * simply copied to the output. If the key was already present in the map, * then the inserted `values` row is aggregated with the existing row. This * aggregation is done for every element `j` in the row by applying aggregation * operation `j` between the new and existing element. * * This process yields a hash map and table holding the resulting aggregation * rows. The aggregation output table is sparse, i.e., not every row is * populated. This is because the size of the output is not known a priori, and * so the output aggregation table is allocated to be as large as the input (the * upper bound of the output size). * * 2.) The final result is materialized by extracting the non-empty keys from * the hash map and the non-empty rows from the sparse output aggregation table. * Every non-empty key and value row is appended to the output key and value * tables. * * @tparam keys_have_nulls Indicates keys have one or more null values * @tparam values_have_nulls Indicates values have one or more null values * @param keys Table whose rows are used as keys of the groupby * @param values Table whose rows are aggregated in the groupby * @param ops Set of aggregation operations to perform for each element in a row * in the values table * @param options Options to control behavior of the groupby operation * @param stream CUDA stream on which all memory allocations and kernels will be * executed * @return A pair of the output keys table and output values table *---------------------------------------------------------------------------**/ template <bool keys_have_nulls, bool values_have_nulls> auto compute_hash_groupby(cudf::table const& keys, cudf::table const& values, std::vector<operators> const& ops, Options options, cudaStream_t stream) { CUDF_EXPECTS(values.num_columns() == static_cast<gdf_size_type>(ops.size()), "Size mismatch between number of value columns and number of " "aggregations."); // An "aggregation request" is the combination of a `gdf_column*` to a column // of values, and an aggregation operation enum indicating the aggregation // requested to be performed on the column std::vector<AggRequestType> original_requests(values.num_columns()); std::transform(values.begin(), values.end(), ops.begin(), original_requests.begin(), [](gdf_column const* col, operators op) { return std::make_pair(const_cast<gdf_column*>(col), op); }); // Some aggregations are "compound", meaning they need be satisfied via the // composition of 1 or more "simple" aggregation requests. For example, MEAN // is satisfied via the division of the SUM by the COUNT aggregation. We // translate these compound requests into simple requests, and compute the // groupby operation for these simple requests. Later, we translate the simple // requests back to compound request results. std::vector<AggRequestType> simple_requests = compound_to_simple(original_requests); std::vector<gdf_column*> simple_values_columns; std::vector<operators> simple_operators; for (auto const& p : simple_requests) { simple_values_columns.push_back(const_cast<gdf_column*>(p.first)); simple_operators.push_back(p.second); } cudf::table simple_values_table{simple_values_columns}; auto const d_input_keys = device_table::create(keys); auto const d_input_values = device_table::create(simple_values_table); // Step 1: Build hash map auto result = build_aggregation_map<keys_have_nulls, values_have_nulls>( keys, simple_values_table, *d_input_keys, *d_input_values, simple_operators, options, stream); auto const map{std::move(result.first)}; cudf::table sparse_output_values{result.second}; // Step 2: Extract non-empty entries cudf::table output_keys; cudf::table simple_output_values; std::tie(output_keys, simple_output_values) = extract_results<keys_have_nulls, values_have_nulls>( keys, values, *d_input_keys, sparse_output_values, map.get(), stream); // Delete intermediate results storage sparse_output_values.destroy(); // If any of the original requests were compound, compute them from the // results of simple aggregation requests cudf::table final_output_values = compute_original_requests( original_requests, simple_requests, simple_output_values, stream); return std::make_pair(output_keys, final_output_values); } /**---------------------------------------------------------------------------* * @brief Returns appropriate callable instantiation of `compute_hash_groupby` * based on presence of null values in keys and values. * * @param keys The groupby key columns * @param values The groupby value columns * @return Instantiated callable of compute_hash_groupby *---------------------------------------------------------------------------**/ auto groupby_null_specialization(table const& keys, table const& values) { if (cudf::has_nulls(keys)) { if (cudf::has_nulls(values)) { return compute_hash_groupby<true, true>; } else { return compute_hash_groupby<true, false>; } } else { if (cudf::has_nulls(values)) { return compute_hash_groupby<false, true>; } else { return compute_hash_groupby<false, false>; } } } } // namespace namespace detail { std::pair<cudf::table, cudf::table> groupby(cudf::table const& keys, cudf::table const& values, std::vector<operators> const& ops, Options options, cudaStream_t stream) { CUDF_EXPECTS(keys.num_rows() == values.num_rows(), "Size mismatch between number of rows in keys and values."); verify_operators(values, ops); // Empty inputs if (keys.num_rows() == 0) { return std::make_pair( cudf::empty_like(keys), cudf::table(0, target_dtypes(column_dtypes(values), ops))); } auto compute_groupby = groupby_null_specialization(keys, values); cudf::table output_keys; cudf::table output_values; std::tie(output_keys, output_values) = compute_groupby(keys, values, ops, options, stream); update_nvcategories(keys, output_keys, values, output_values); return std::make_pair(output_keys, output_values); } } // namespace detail std::pair<cudf::table, cudf::table> groupby(cudf::table const& keys, cudf::table const& values, std::vector<operators> const& ops, Options options) { return detail::groupby(keys, values, ops, options); } } // namespace hash } // namespace groupby } // namespace cudf
02a92f1e9902a9328afcd810bea1d8f0aeb80214.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.6.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver November 2013 @author Azzam Haidar @author Tingxing Dong @precisions normal z -> s d c */ #include "common_magma.h" #include "batched_kernel_param.h" #include "magma_templates.h" #define PRECISION_z #define A(i, j) (A + (i) + (j)*lda) // A(i, j) means at i row, j column // dynamically allocated shared memory, set to size number of threads when the kernel is launched. // See CUDA Guide B.2.3 extern __shared__ magmaDoubleComplex shared_data[]; // dynamically allocated shared memory, set to size number of threads when the kernel is launched. // See CUDA Guide B.2.3 extern __shared__ double dble_shared_data[]; ///////////////////////////////////////////////////////////////////////////////////////////////// __global__ void zdotc_kernel_batched(int n, magmaDoubleComplex **x_array, int incx, int offset, magma_int_t *info_array, int gbstep) { int tx = threadIdx.x; magmaDoubleComplex *x = x_array[blockIdx.z]+offset; double *sdata = dble_shared_data; magmaDoubleComplex res = MAGMA_Z_ZERO; if (tx < n) { res = x[tx*incx]; } sdata[tx] = MAGMA_Z_REAL(res * MAGMA_Z_CNJG(res)); __syncthreads(); for(int s = blockDim.x/2; s > 32; s >>= 1 ) { if (tx < s) { sdata[tx] += sdata[tx+s]; } __syncthreads(); } if (tx < 32) { volatile double* smem = sdata; smem[tx] += smem[tx+32]; smem[tx] += smem[tx+16]; smem[tx] += smem[tx+8]; smem[tx] += smem[tx+4]; smem[tx] += smem[tx+2]; smem[tx] += smem[tx+1]; } if (tx == 0) { double xreal = MAGMA_Z_REAL(x[n*incx]); //MAGMA_Z_SET2REAL(x[n*incx], sqrt(xreal - sdata[0])); x[n*incx] = MAGMA_Z_MAKE(sqrt(xreal - sdata[0]), 0); if(x[n*incx] == MAGMA_Z_ZERO){ info_array[blockIdx.z] = offset + gbstep + 1; } } } void magma_zpotf2_zdotc_batched(magma_int_t n, magmaDoubleComplex **x_array, magma_int_t incx, magma_int_t offset, magma_int_t *info_array, magma_int_t gbstep, magma_int_t batchCount) { /* Specialized Zdotc 1) performs zdotc sum = x[0:n-1]*conj(x[0:n-1]) 2) updates x[n] = sqrt(x[n]-sum); */ if (n > MAX_NTHREADS) { printf("n = %d > %d is not supported in zpotf2_zdotc\n", (int) n, (int) MAX_NTHREADS); } int threadSize; if (n <= 1024 && n > 512) { threadSize = 1024; } else if (n <= 512 && n > 256 ) { threadSize = 512; } else if (n <= 256 && n > 128) { threadSize = 256; } else if (n <= 128 && n > 64) { threadSize = 128; } else { threadSize = 64; } dim3 grid(1, 1, batchCount); hipLaunchKernelGGL(( zdotc_kernel_batched), dim3(grid), dim3(threadSize), threadSize * sizeof(double), magma_stream, n, x_array, incx, offset, info_array, gbstep); } ///////////////////////////////////////////////////////////////////////////////////////////////// __global__ void zdscal_kernel_batched(int n, magmaDoubleComplex **x_array, int incx, int offset, magma_int_t *info_array) { // checkinfo to avoid computation of the singular matrix if(info_array[blockIdx.z] != 0 ) return; int id = threadIdx.x; magmaDoubleComplex *x = x_array[blockIdx.z]+offset; __shared__ magmaDoubleComplex factor; if (threadIdx.x == 0) { factor = MAGMA_Z_MAKE(1.0/MAGMA_Z_REAL(x[0]), 0.0); } __syncthreads(); if ( id < n && id >0) { x[id*incx] = x[id*incx] * factor; //printf("x=%f", x[id*incx]); } } void magma_zpotf2_zdscal_batched(magma_int_t n, magmaDoubleComplex **x_array, magma_int_t incx, magma_int_t offset, magma_int_t *info_array, magma_int_t batchCount) { /* Specialized Zdscal perform x[1:n-1]/x[0] */ dim3 grid(1, 1, batchCount); dim3 threads(n, 1, 1); hipLaunchKernelGGL(( zdscal_kernel_batched), dim3(grid), dim3(threads), 0, magma_stream , n, x_array, incx, offset, info_array); } ///////////////////////////////////////////////////////////////////////////////////////////////// #if defined(PRECISION_z) || defined(PRECISION_c) __global__ void zlacgv_kernel_batched(int n, magmaDoubleComplex **x_array, int incx, int offset) { int id = threadIdx.x; magmaDoubleComplex *x = x_array[blockIdx.z]+offset; if ( id < n ) { x[id*incx] = MAGMA_Z_CNJG(x[id*incx]); } } void magma_zlacgv_batched(magma_int_t n, magmaDoubleComplex **x_array, magma_int_t incx, int offset, int batchCount) { /* Purpose ======= ZLACGV conjugates a complex vector of length N. Arguments ========= N (input) INTEGER The length of the vector X. N >= 0. X (input/output) COMPLEX*16 array, dimension (1+(N-1)*abs(INCX)) On entry, the vector of length N to be conjugated. On exit, X is overwritten with conjg(X). INCX (input) INTEGER The spacing between successive elements of X. ===================================================================== */ dim3 grid(1, 1, batchCount); dim3 threads(n, 1, 1); hipLaunchKernelGGL(( zlacgv_kernel_batched), dim3(grid), dim3(threads), 0, magma_stream , n, x_array, incx, offset); } #endif // defined(PRECISION_z) || defined(PRECISION_c) ///////////////////////////////////////////////////////////////////////////////////////////////// static __device__ void zpotf2_device(int m, int n, magmaDoubleComplex *A, int lda, magmaDoubleComplex alpha, magmaDoubleComplex beta, magma_int_t *info, int gbstep) { /* Each thread block load entire A into shared memory factorize it and copy back. n must be small enough to fit shared memory. n is checked by a macro POTF2_TILE_SIZE before the kernel. */ // checkinfo to avoid computation of the singular matrix if(*info != 0 ) return; int tx = threadIdx.x; magmaDoubleComplex *sdata_A = shared_data; __shared__ magmaDoubleComplex factor; __shared__ double sum[POTF2_TILE_SIZE]; // load A into sdata_A if(tx < m) { for(int i=0; i<n; i++) { sdata_A[tx + i * m] = A[tx + i * lda]; } } __syncthreads(); for(int iter=0; iter<n; iter++) { double res = MAGMA_D_ZERO; magmaDoubleComplex res1 = MAGMA_Z_ZERO; //1) performs zdotc sum = A[iter, 0:iter-1]*conj(A[iter, 0:iter-1]) //2) updates A[iter,iter] = sqrt(A[iter,iter]-sum); if(tx<iter) { res = MAGMA_Z_REAL (sdata_A[iter + tx * m] * MAGMA_Z_CNJG(sdata_A[iter + tx * m])); sum[tx] = res; } else { sum[tx] = 0.0; } __syncthreads(); magma_sum_reduce<POTF2_TILE_SIZE>(tx, sum);//tried on K40: if m=32 n=32 the overall zpotf2_device routine time is 60ms n=16 time=25 n=8 time=20ms //magma_sum_reduce_n(iter, tx, sum); //tried on K40: if m=32 n=32 the time went from 61ms to 70ms when switching to reduce_n. n=16 time=28. //magma_sum_reduce_inlined(iter, tx, sum); //tried on K40: similar to magma_sum_reduce<POTF2_TILE_SIZE>(tx, sum); if (tx == 0) { double xreal = MAGMA_Z_REAL(sdata_A[iter + iter * m]); sdata_A[iter + iter * m] = MAGMA_Z_MAKE(sqrt(xreal - sum[0]), 0); if(sdata_A[iter + iter * m] == MAGMA_Z_ZERO){ *info = iter + gbstep + 1; } } __syncthreads(); if(sdata_A[iter + iter * m] == MAGMA_Z_ZERO) return; __syncthreads(); //zlacgv conjugates a complex vector of length iter. //TODO #if defined(PRECISION_z) || defined(PRECISION_c) if(tx < iter) { sdata_A[iter + tx * m] = MAGMA_Z_CNJG(sdata_A[iter + tx * m]); } __syncthreads(); #endif // zgemv // Compute elements iter:n-1 of column iter = A(iter:n,0:iter-1) * A(iter-1,0:iter-1) (row). if(tx < m && tx > iter) { for(int j=0; j < iter; j++) { res1 += sdata_A[tx + j * m] * sdata_A[iter + j * m]; // TODO move the zlacgv conj to be done automatically here implicitly. } sdata_A [tx + iter * m] = alpha * res1 + sdata_A [tx + iter * m] * beta; } __syncthreads(); //zlacgv conjugates a complex vector of length iter. #if defined(PRECISION_z) || defined(PRECISION_c) if(tx < iter) { sdata_A[iter + tx * m] = MAGMA_Z_CNJG(sdata_A[iter + tx * m]); } __syncthreads(); #endif // zdscal perform A[iter:n-1, iter]/A[iter,iter]; if (tx == 0) { factor = MAGMA_Z_MAKE(1.0/MAGMA_Z_REAL(sdata_A[iter + iter * m]), 0.0); } __syncthreads(); if ( tx < m && tx > iter) { sdata_A[ tx + iter * m ] *= factor; } __syncthreads(); }// end of iter //copy sdata_A to A if(tx < m) { for(int i=0; i<n; i++) { A[tx + i * lda] = sdata_A[tx + i * m]; } } } ///////////////////////////////////////////////////////////////////////////////////////////////// __global__ void zpotf2_kernel_batched(int m, int n, magmaDoubleComplex **dA_array, int lda, magmaDoubleComplex alpha, magmaDoubleComplex beta, magma_int_t *info_array, int gbstep) { /* Each thread block load entire dA_array[blockIdx.z] into shared memory factorize it and copy back. n must be small enough to fit shared memory. n is checked by a macro POTF2_TILE_SIZE before the kernel. */ int batchid = blockIdx.z; zpotf2_device(m, n, dA_array[batchid], lda, alpha, beta, &(info_array[batchid]), gbstep); } ///////////////////////////////////////////////////////////////////////////////////////////////// __global__ void zpotf2_kernel(int m, int n, magmaDoubleComplex *dA, int lda, magmaDoubleComplex alpha, magmaDoubleComplex beta, magma_int_t *info) { zpotf2_device(m, n, dA, lda, alpha, beta, info, 0); } ///////////////////////////////////////////////////////////////////////////////////////////////// /** Purpose ------- zpotf2 computes the Cholesky factorization of a real symmetric positive definite matrix A. The factorization has the form A = U**H * U, if UPLO = MagmaUpper, or A = L * L**H, if UPLO = MagmaLower, where U is an upper triangular matrix and L is lower triangular. This is the unblocked version of the algorithm, calling Level 2 BLAS. Arguments --------- @param[in] uplo magma_uplo_t Specifies whether the upper or lower triangular part of the symmetric matrix A is stored. - = MagmaUpper: Upper triangular - = MagmaLower: Lower triangular @param[in] n INTEGER The order of the matrix A. N >= 0 and N <= 512. @param[in,out] dA COMPLEX_16 array, dimension (LDDA,N) On entry, the symmetric matrix A. If UPLO = MagmaUpper, the leading n by n upper triangular part of A contains the upper triangular part of the matrix A, and the strictly lower triangular part of A is not referenced. If UPLO = MagmaLower, the leading n by n lower triangular part of A contains the lower triangular part of the matrix A, and the strictly upper triangular part of A is not referenced. \n On exit, if INFO = 0, the factor U or L from the Cholesky factorization A = U**H * U or A = L * L**H. @param[in] ldda INTEGER The leading dimension of the array A. LDDA >= max(1,N). @param[out] info INTEGER - = 0: successful exit - < 0: if INFO = -k, the k-th argument had an illegal value - > 0: if INFO = k, the leading minor of order k is not positive definite, and the factorization could not be completed. @ingroup magma_zposv_aux ********************************************************************/ extern "C" magma_int_t magma_zpotf2_tile_batched( magma_uplo_t uplo, magma_int_t m, magma_int_t n, magmaDoubleComplex **dA_array, magma_int_t lda, magma_int_t *info_array, magma_int_t gbstep, magma_int_t batchCount) { magma_int_t arginfo = 0; if ( uplo != MagmaUpper && uplo != MagmaLower) { arginfo = -1; } else if (m < 0 || n < 0 || m > POTF2_TILE_SIZE || n > POTF2_TILE_SIZE) { arginfo = -2; } else if (lda < max(1,m)) { arginfo = -4; } else if (m < n) { arginfo = -10; } if (uplo == MagmaUpper) { printf("Upper side is unavailable \n"); arginfo = -1; } if (arginfo != 0) { magma_xerbla( __func__, -(arginfo) ); return arginfo; } // Quick return if possible if (m == 0 || n == 0) { return arginfo; } magmaDoubleComplex alpha = MAGMA_Z_NEG_ONE; magmaDoubleComplex beta = MAGMA_Z_ONE; dim3 dimGrid(1, 1, batchCount); dim3 threads(POTF2_TILE_SIZE, 1); int shared_mem_size = sizeof(magmaDoubleComplex)*m*n; // + sizeof(double)*(POTF2_TILE_SIZE+1); hipLaunchKernelGGL(( zpotf2_kernel_batched), dim3(dimGrid), dim3(threads), shared_mem_size , 0, m, n, dA_array, lda, alpha, beta, info_array, gbstep); return arginfo; } ///////////////////////////////////////////////////////////////////////////////////////////////////////// extern "C" magma_int_t magma_zpotf2_tile( magma_uplo_t uplo, magma_int_t m, magma_int_t n, magmaDoubleComplex *dA, magma_int_t lda, magma_int_t *info) { *info = 0; if ( uplo != MagmaUpper && uplo != MagmaLower) { *info = -1; } else if (m < 0 || n < 0 || m > POTF2_TILE_SIZE) { *info = -2; } else if (lda < max(1,m)) { *info = -4; } else if (m < n) { *info = -10; } if (uplo == MagmaUpper) { printf("Upper side is unavailable \n"); *info = -1; } if (*info != 0) { magma_xerbla( __func__, -(*info) ); return *info; } // Quick return if possible if (m == 0 || n == 0) { return *info; } magmaDoubleComplex alpha = MAGMA_Z_NEG_ONE; magmaDoubleComplex beta = MAGMA_Z_ONE; dim3 dimGrid(1); dim3 threads(POTF2_TILE_SIZE, 1); int shared_mem_size = sizeof(magmaDoubleComplex)*m*n; // + sizeof(double)*(POTF2_TILE_SIZE+1); hipLaunchKernelGGL(( zpotf2_kernel), dim3(dimGrid), dim3(threads), shared_mem_size , 0, m, n, dA, lda, alpha, beta, info); return *info; }
02a92f1e9902a9328afcd810bea1d8f0aeb80214.cu
/* -- MAGMA (version 1.6.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver November 2013 @author Azzam Haidar @author Tingxing Dong @precisions normal z -> s d c */ #include "common_magma.h" #include "batched_kernel_param.h" #include "magma_templates.h" #define PRECISION_z #define A(i, j) (A + (i) + (j)*lda) // A(i, j) means at i row, j column // dynamically allocated shared memory, set to size number of threads when the kernel is launched. // See CUDA Guide B.2.3 extern __shared__ magmaDoubleComplex shared_data[]; // dynamically allocated shared memory, set to size number of threads when the kernel is launched. // See CUDA Guide B.2.3 extern __shared__ double dble_shared_data[]; ///////////////////////////////////////////////////////////////////////////////////////////////// __global__ void zdotc_kernel_batched(int n, magmaDoubleComplex **x_array, int incx, int offset, magma_int_t *info_array, int gbstep) { int tx = threadIdx.x; magmaDoubleComplex *x = x_array[blockIdx.z]+offset; double *sdata = dble_shared_data; magmaDoubleComplex res = MAGMA_Z_ZERO; if (tx < n) { res = x[tx*incx]; } sdata[tx] = MAGMA_Z_REAL(res * MAGMA_Z_CNJG(res)); __syncthreads(); for(int s = blockDim.x/2; s > 32; s >>= 1 ) { if (tx < s) { sdata[tx] += sdata[tx+s]; } __syncthreads(); } if (tx < 32) { volatile double* smem = sdata; smem[tx] += smem[tx+32]; smem[tx] += smem[tx+16]; smem[tx] += smem[tx+8]; smem[tx] += smem[tx+4]; smem[tx] += smem[tx+2]; smem[tx] += smem[tx+1]; } if (tx == 0) { double xreal = MAGMA_Z_REAL(x[n*incx]); //MAGMA_Z_SET2REAL(x[n*incx], sqrt(xreal - sdata[0])); x[n*incx] = MAGMA_Z_MAKE(sqrt(xreal - sdata[0]), 0); if(x[n*incx] == MAGMA_Z_ZERO){ info_array[blockIdx.z] = offset + gbstep + 1; } } } void magma_zpotf2_zdotc_batched(magma_int_t n, magmaDoubleComplex **x_array, magma_int_t incx, magma_int_t offset, magma_int_t *info_array, magma_int_t gbstep, magma_int_t batchCount) { /* Specialized Zdotc 1) performs zdotc sum = x[0:n-1]*conj(x[0:n-1]) 2) updates x[n] = sqrt(x[n]-sum); */ if (n > MAX_NTHREADS) { printf("n = %d > %d is not supported in zpotf2_zdotc\n", (int) n, (int) MAX_NTHREADS); } int threadSize; if (n <= 1024 && n > 512) { threadSize = 1024; } else if (n <= 512 && n > 256 ) { threadSize = 512; } else if (n <= 256 && n > 128) { threadSize = 256; } else if (n <= 128 && n > 64) { threadSize = 128; } else { threadSize = 64; } dim3 grid(1, 1, batchCount); zdotc_kernel_batched<<< grid, threadSize, threadSize * sizeof(double), magma_stream>>> (n, x_array, incx, offset, info_array, gbstep); } ///////////////////////////////////////////////////////////////////////////////////////////////// __global__ void zdscal_kernel_batched(int n, magmaDoubleComplex **x_array, int incx, int offset, magma_int_t *info_array) { // checkinfo to avoid computation of the singular matrix if(info_array[blockIdx.z] != 0 ) return; int id = threadIdx.x; magmaDoubleComplex *x = x_array[blockIdx.z]+offset; __shared__ magmaDoubleComplex factor; if (threadIdx.x == 0) { factor = MAGMA_Z_MAKE(1.0/MAGMA_Z_REAL(x[0]), 0.0); } __syncthreads(); if ( id < n && id >0) { x[id*incx] = x[id*incx] * factor; //printf("x=%f", x[id*incx]); } } void magma_zpotf2_zdscal_batched(magma_int_t n, magmaDoubleComplex **x_array, magma_int_t incx, magma_int_t offset, magma_int_t *info_array, magma_int_t batchCount) { /* Specialized Zdscal perform x[1:n-1]/x[0] */ dim3 grid(1, 1, batchCount); dim3 threads(n, 1, 1); zdscal_kernel_batched<<< grid, threads, 0, magma_stream >>> (n, x_array, incx, offset, info_array); } ///////////////////////////////////////////////////////////////////////////////////////////////// #if defined(PRECISION_z) || defined(PRECISION_c) __global__ void zlacgv_kernel_batched(int n, magmaDoubleComplex **x_array, int incx, int offset) { int id = threadIdx.x; magmaDoubleComplex *x = x_array[blockIdx.z]+offset; if ( id < n ) { x[id*incx] = MAGMA_Z_CNJG(x[id*incx]); } } void magma_zlacgv_batched(magma_int_t n, magmaDoubleComplex **x_array, magma_int_t incx, int offset, int batchCount) { /* Purpose ======= ZLACGV conjugates a complex vector of length N. Arguments ========= N (input) INTEGER The length of the vector X. N >= 0. X (input/output) COMPLEX*16 array, dimension (1+(N-1)*abs(INCX)) On entry, the vector of length N to be conjugated. On exit, X is overwritten with conjg(X). INCX (input) INTEGER The spacing between successive elements of X. ===================================================================== */ dim3 grid(1, 1, batchCount); dim3 threads(n, 1, 1); zlacgv_kernel_batched<<< grid, threads, 0, magma_stream >>> (n, x_array, incx, offset); } #endif // defined(PRECISION_z) || defined(PRECISION_c) ///////////////////////////////////////////////////////////////////////////////////////////////// static __device__ void zpotf2_device(int m, int n, magmaDoubleComplex *A, int lda, magmaDoubleComplex alpha, magmaDoubleComplex beta, magma_int_t *info, int gbstep) { /* Each thread block load entire A into shared memory factorize it and copy back. n must be small enough to fit shared memory. n is checked by a macro POTF2_TILE_SIZE before the kernel. */ // checkinfo to avoid computation of the singular matrix if(*info != 0 ) return; int tx = threadIdx.x; magmaDoubleComplex *sdata_A = shared_data; __shared__ magmaDoubleComplex factor; __shared__ double sum[POTF2_TILE_SIZE]; // load A into sdata_A if(tx < m) { for(int i=0; i<n; i++) { sdata_A[tx + i * m] = A[tx + i * lda]; } } __syncthreads(); for(int iter=0; iter<n; iter++) { double res = MAGMA_D_ZERO; magmaDoubleComplex res1 = MAGMA_Z_ZERO; //1) performs zdotc sum = A[iter, 0:iter-1]*conj(A[iter, 0:iter-1]) //2) updates A[iter,iter] = sqrt(A[iter,iter]-sum); if(tx<iter) { res = MAGMA_Z_REAL (sdata_A[iter + tx * m] * MAGMA_Z_CNJG(sdata_A[iter + tx * m])); sum[tx] = res; } else { sum[tx] = 0.0; } __syncthreads(); magma_sum_reduce<POTF2_TILE_SIZE>(tx, sum);//tried on K40: if m=32 n=32 the overall zpotf2_device routine time is 60ms n=16 time=25 n=8 time=20ms //magma_sum_reduce_n(iter, tx, sum); //tried on K40: if m=32 n=32 the time went from 61ms to 70ms when switching to reduce_n. n=16 time=28. //magma_sum_reduce_inlined(iter, tx, sum); //tried on K40: similar to magma_sum_reduce<POTF2_TILE_SIZE>(tx, sum); if (tx == 0) { double xreal = MAGMA_Z_REAL(sdata_A[iter + iter * m]); sdata_A[iter + iter * m] = MAGMA_Z_MAKE(sqrt(xreal - sum[0]), 0); if(sdata_A[iter + iter * m] == MAGMA_Z_ZERO){ *info = iter + gbstep + 1; } } __syncthreads(); if(sdata_A[iter + iter * m] == MAGMA_Z_ZERO) return; __syncthreads(); //zlacgv conjugates a complex vector of length iter. //TODO #if defined(PRECISION_z) || defined(PRECISION_c) if(tx < iter) { sdata_A[iter + tx * m] = MAGMA_Z_CNJG(sdata_A[iter + tx * m]); } __syncthreads(); #endif // zgemv // Compute elements iter:n-1 of column iter = A(iter:n,0:iter-1) * A(iter-1,0:iter-1) (row). if(tx < m && tx > iter) { for(int j=0; j < iter; j++) { res1 += sdata_A[tx + j * m] * sdata_A[iter + j * m]; // TODO move the zlacgv conj to be done automatically here implicitly. } sdata_A [tx + iter * m] = alpha * res1 + sdata_A [tx + iter * m] * beta; } __syncthreads(); //zlacgv conjugates a complex vector of length iter. #if defined(PRECISION_z) || defined(PRECISION_c) if(tx < iter) { sdata_A[iter + tx * m] = MAGMA_Z_CNJG(sdata_A[iter + tx * m]); } __syncthreads(); #endif // zdscal perform A[iter:n-1, iter]/A[iter,iter]; if (tx == 0) { factor = MAGMA_Z_MAKE(1.0/MAGMA_Z_REAL(sdata_A[iter + iter * m]), 0.0); } __syncthreads(); if ( tx < m && tx > iter) { sdata_A[ tx + iter * m ] *= factor; } __syncthreads(); }// end of iter //copy sdata_A to A if(tx < m) { for(int i=0; i<n; i++) { A[tx + i * lda] = sdata_A[tx + i * m]; } } } ///////////////////////////////////////////////////////////////////////////////////////////////// __global__ void zpotf2_kernel_batched(int m, int n, magmaDoubleComplex **dA_array, int lda, magmaDoubleComplex alpha, magmaDoubleComplex beta, magma_int_t *info_array, int gbstep) { /* Each thread block load entire dA_array[blockIdx.z] into shared memory factorize it and copy back. n must be small enough to fit shared memory. n is checked by a macro POTF2_TILE_SIZE before the kernel. */ int batchid = blockIdx.z; zpotf2_device(m, n, dA_array[batchid], lda, alpha, beta, &(info_array[batchid]), gbstep); } ///////////////////////////////////////////////////////////////////////////////////////////////// __global__ void zpotf2_kernel(int m, int n, magmaDoubleComplex *dA, int lda, magmaDoubleComplex alpha, magmaDoubleComplex beta, magma_int_t *info) { zpotf2_device(m, n, dA, lda, alpha, beta, info, 0); } ///////////////////////////////////////////////////////////////////////////////////////////////// /** Purpose ------- zpotf2 computes the Cholesky factorization of a real symmetric positive definite matrix A. The factorization has the form A = U**H * U, if UPLO = MagmaUpper, or A = L * L**H, if UPLO = MagmaLower, where U is an upper triangular matrix and L is lower triangular. This is the unblocked version of the algorithm, calling Level 2 BLAS. Arguments --------- @param[in] uplo magma_uplo_t Specifies whether the upper or lower triangular part of the symmetric matrix A is stored. - = MagmaUpper: Upper triangular - = MagmaLower: Lower triangular @param[in] n INTEGER The order of the matrix A. N >= 0 and N <= 512. @param[in,out] dA COMPLEX_16 array, dimension (LDDA,N) On entry, the symmetric matrix A. If UPLO = MagmaUpper, the leading n by n upper triangular part of A contains the upper triangular part of the matrix A, and the strictly lower triangular part of A is not referenced. If UPLO = MagmaLower, the leading n by n lower triangular part of A contains the lower triangular part of the matrix A, and the strictly upper triangular part of A is not referenced. \n On exit, if INFO = 0, the factor U or L from the Cholesky factorization A = U**H * U or A = L * L**H. @param[in] ldda INTEGER The leading dimension of the array A. LDDA >= max(1,N). @param[out] info INTEGER - = 0: successful exit - < 0: if INFO = -k, the k-th argument had an illegal value - > 0: if INFO = k, the leading minor of order k is not positive definite, and the factorization could not be completed. @ingroup magma_zposv_aux ********************************************************************/ extern "C" magma_int_t magma_zpotf2_tile_batched( magma_uplo_t uplo, magma_int_t m, magma_int_t n, magmaDoubleComplex **dA_array, magma_int_t lda, magma_int_t *info_array, magma_int_t gbstep, magma_int_t batchCount) { magma_int_t arginfo = 0; if ( uplo != MagmaUpper && uplo != MagmaLower) { arginfo = -1; } else if (m < 0 || n < 0 || m > POTF2_TILE_SIZE || n > POTF2_TILE_SIZE) { arginfo = -2; } else if (lda < max(1,m)) { arginfo = -4; } else if (m < n) { arginfo = -10; } if (uplo == MagmaUpper) { printf("Upper side is unavailable \n"); arginfo = -1; } if (arginfo != 0) { magma_xerbla( __func__, -(arginfo) ); return arginfo; } // Quick return if possible if (m == 0 || n == 0) { return arginfo; } magmaDoubleComplex alpha = MAGMA_Z_NEG_ONE; magmaDoubleComplex beta = MAGMA_Z_ONE; dim3 dimGrid(1, 1, batchCount); dim3 threads(POTF2_TILE_SIZE, 1); int shared_mem_size = sizeof(magmaDoubleComplex)*m*n; // + sizeof(double)*(POTF2_TILE_SIZE+1); zpotf2_kernel_batched<<<dimGrid, threads, shared_mem_size >>>(m, n, dA_array, lda, alpha, beta, info_array, gbstep); return arginfo; } ///////////////////////////////////////////////////////////////////////////////////////////////////////// extern "C" magma_int_t magma_zpotf2_tile( magma_uplo_t uplo, magma_int_t m, magma_int_t n, magmaDoubleComplex *dA, magma_int_t lda, magma_int_t *info) { *info = 0; if ( uplo != MagmaUpper && uplo != MagmaLower) { *info = -1; } else if (m < 0 || n < 0 || m > POTF2_TILE_SIZE) { *info = -2; } else if (lda < max(1,m)) { *info = -4; } else if (m < n) { *info = -10; } if (uplo == MagmaUpper) { printf("Upper side is unavailable \n"); *info = -1; } if (*info != 0) { magma_xerbla( __func__, -(*info) ); return *info; } // Quick return if possible if (m == 0 || n == 0) { return *info; } magmaDoubleComplex alpha = MAGMA_Z_NEG_ONE; magmaDoubleComplex beta = MAGMA_Z_ONE; dim3 dimGrid(1); dim3 threads(POTF2_TILE_SIZE, 1); int shared_mem_size = sizeof(magmaDoubleComplex)*m*n; // + sizeof(double)*(POTF2_TILE_SIZE+1); zpotf2_kernel<<<dimGrid, threads, shared_mem_size >>>(m, n, dA, lda, alpha, beta, info); return *info; }
446848a08742a3d2d79052231d7e413f6272031d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*--------------------------------------------------------------------------*\ Copyright (c) 2008-2009, Danny Ruijters. All rights reserved. http://www.dannyruijters.nl/cubicinterpolation/ This file is part of CUDA Cubic B-Spline Interpolation (CI). Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the copyright holders nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. The views and conclusions contained in the software and documentation are those of the authors and should not be interpreted as representing official policies, either expressed or implied. \*--------------------------------------------------------------------------*/ #include <stdio.h> #include <cutil.h> #include <memcpy.cu> #include <cubicPrefilter3D.cu> #include <cubicFilter3D_kernel.cu> texture<uchar, 3, hipReadModeNormalizedFloat> tex; //3D texture texture<float, 3, hipReadModeElementType> coeffs; //3D texture __device__ float sampleTexture(float3 coord, uint filterMethod) { // read from 3D texture switch (filterMethod) { case 0: //nearest neighbor case 1: return interpolate_trilinear(tex, coord); //linear case 2: return interpolate_tricubic_simple(coeffs, coord); //simple cubic case 3: return interpolate_tricubic_fast(coeffs, coord); //fast cubic case 4: return interpolate_tricubic_fast(tex, coord); //non-prefiltered, fast cubic default: return 0.0f; } } __global__ void rayCast(float4* output, const float3* rayCoords0, const float3* rayCoords1, uint imageWidth, float3 volumeExtent, uint filterMethod) { uint x = __umul24(blockIdx.x, blockDim.x) + threadIdx.x; uint y = __umul24(blockIdx.y, blockDim.y) + threadIdx.y; uint i = __umul24(y, imageWidth) + x; float3 start = volumeExtent * rayCoords0[i]; float3 end = volumeExtent * rayCoords1[i]; float3 dir = end - start; float rayLength = length(dir); float3 rayColor = make_float3(0.0f); float rayAlpha = 0.0f; if (rayLength > 0.1f) { const float alphaThreshold = 0.95f; //opacity threshold for early ray termination of saturated rays const float density = 0.1f; const float brightness = 1.5f; float sampleDistance = 0.5f / rayLength; //distance between the samples for (float t = 0.0f; t < 1.0f && rayAlpha < alphaThreshold; t += sampleDistance) { float sample = brightness * sampleTexture(start + t * dir, filterMethod); float4 lookup = make_float4(sample, sample, sample, density * sample); // Under operator rayColor += (1.0f - rayAlpha) * lookup.w * make_float3(lookup.x, lookup.y, lookup.z); rayAlpha += (1.0f - rayAlpha) * lookup.w; } } // write output color output[i] = make_float4(rayColor, rayAlpha); } // render image using CUDA extern "C" void render(float4* output, float3* rayCoords[2], uint2 imageExtent, uint3 volumeSize, uint filterMethod) { // set texture parameters tex.filterMode = (filterMethod == 0) ? hipFilterModePoint : hipFilterModeLinear; // call CUDA kernel, writing results to PBO const dim3 blockSize(min(PowTwoDivider(imageExtent.x), 16), min(PowTwoDivider(imageExtent.y), 8)); const dim3 gridSize(imageExtent.x / blockSize.x, imageExtent.y / blockSize.y); const float3 volumeExtent = make_float3((float)volumeSize.x, (float)volumeSize.y, (float)volumeSize.z); hipLaunchKernelGGL(( rayCast), dim3(gridSize), dim3(blockSize), 0, 0, output, rayCoords[0], rayCoords[1], imageExtent.x, volumeExtent, filterMethod); CUT_CHECK_ERROR("kernel failed"); } // intialize the textures, and calculate the cubic B-spline coefficients extern "C" void initCuda(const uchar* voxels, uint3 volumeSize) { // calculate the b-spline coefficients float* bsplineCoeffs = CastUCharVolumeHostToDevice(voxels, volumeSize.x, volumeSize.y, volumeSize.z); CubicBSplinePrefilter3DTimer(bsplineCoeffs, volumeSize.x, volumeSize.y, volumeSize.z); // create the b-spline coefficients texture hipChannelFormatDesc channelDescCoeff = hipCreateChannelDesc<float>(); hipArray *coeffArray = 0; hipExtent volumeExtent = make_hipExtent(volumeSize.x, volumeSize.y, volumeSize.z); CUDA_SAFE_CALL(hipMalloc3DArray(&coeffArray, &channelDescCoeff, volumeExtent)); // copy data to 3D array hipMemcpy3DParms copyParams = {0}; copyParams.extent = volumeExtent; copyParams.srcPtr = make_hipPitchedPtr((void*)bsplineCoeffs, volumeSize.x*sizeof(float), volumeSize.x, volumeSize.y); copyParams.dstArray = coeffArray; copyParams.kind = hipMemcpyDeviceToDevice; CUDA_SAFE_CALL(hipMemcpy3D(&copyParams)); // bind array to 3D texture CUDA_SAFE_CALL(hipBindTextureToArray(coeffs, coeffArray, channelDescCoeff)); coeffs.normalized = false; //access with absolute texture coordinates coeffs.filterMode = hipFilterModeLinear; CUDA_SAFE_CALL(hipFree(bsplineCoeffs)); //they are now in the coeffs texture, we do not need this anymore // Now create a texture with the original sample values for nearest neighbor and linear interpolation // Note that if you are going to do cubic interpolation only, you can remove the following code // create 3D array hipChannelFormatDesc channelDesc = hipCreateChannelDesc<uchar>(); hipArray *volumeArray = 0; CUDA_SAFE_CALL(hipMalloc3DArray(&volumeArray, &channelDesc, volumeExtent)); // copy data to 3D array copyParams.srcPtr = make_hipPitchedPtr((void*)voxels, volumeSize.x*sizeof(uchar), volumeSize.x, volumeSize.y); copyParams.dstArray = volumeArray; copyParams.kind = hipMemcpyHostToDevice; CUDA_SAFE_CALL(hipMemcpy3D(&copyParams)); // bind array to 3D texture CUDA_SAFE_CALL(hipBindTextureToArray(tex, volumeArray, channelDesc)); tex.normalized = false; //access with absolute texture coordinates }
446848a08742a3d2d79052231d7e413f6272031d.cu
/*--------------------------------------------------------------------------*\ Copyright (c) 2008-2009, Danny Ruijters. All rights reserved. http://www.dannyruijters.nl/cubicinterpolation/ This file is part of CUDA Cubic B-Spline Interpolation (CI). Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the copyright holders nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. The views and conclusions contained in the software and documentation are those of the authors and should not be interpreted as representing official policies, either expressed or implied. \*--------------------------------------------------------------------------*/ #include <stdio.h> #include <cutil.h> #include <memcpy.cu> #include <cubicPrefilter3D.cu> #include <cubicFilter3D_kernel.cu> texture<uchar, 3, cudaReadModeNormalizedFloat> tex; //3D texture texture<float, 3, cudaReadModeElementType> coeffs; //3D texture __device__ float sampleTexture(float3 coord, uint filterMethod) { // read from 3D texture switch (filterMethod) { case 0: //nearest neighbor case 1: return interpolate_trilinear(tex, coord); //linear case 2: return interpolate_tricubic_simple(coeffs, coord); //simple cubic case 3: return interpolate_tricubic_fast(coeffs, coord); //fast cubic case 4: return interpolate_tricubic_fast(tex, coord); //non-prefiltered, fast cubic default: return 0.0f; } } __global__ void rayCast(float4* output, const float3* rayCoords0, const float3* rayCoords1, uint imageWidth, float3 volumeExtent, uint filterMethod) { uint x = __umul24(blockIdx.x, blockDim.x) + threadIdx.x; uint y = __umul24(blockIdx.y, blockDim.y) + threadIdx.y; uint i = __umul24(y, imageWidth) + x; float3 start = volumeExtent * rayCoords0[i]; float3 end = volumeExtent * rayCoords1[i]; float3 dir = end - start; float rayLength = length(dir); float3 rayColor = make_float3(0.0f); float rayAlpha = 0.0f; if (rayLength > 0.1f) { const float alphaThreshold = 0.95f; //opacity threshold for early ray termination of saturated rays const float density = 0.1f; const float brightness = 1.5f; float sampleDistance = 0.5f / rayLength; //distance between the samples for (float t = 0.0f; t < 1.0f && rayAlpha < alphaThreshold; t += sampleDistance) { float sample = brightness * sampleTexture(start + t * dir, filterMethod); float4 lookup = make_float4(sample, sample, sample, density * sample); // Under operator rayColor += (1.0f - rayAlpha) * lookup.w * make_float3(lookup.x, lookup.y, lookup.z); rayAlpha += (1.0f - rayAlpha) * lookup.w; } } // write output color output[i] = make_float4(rayColor, rayAlpha); } // render image using CUDA extern "C" void render(float4* output, float3* rayCoords[2], uint2 imageExtent, uint3 volumeSize, uint filterMethod) { // set texture parameters tex.filterMode = (filterMethod == 0) ? cudaFilterModePoint : cudaFilterModeLinear; // call CUDA kernel, writing results to PBO const dim3 blockSize(min(PowTwoDivider(imageExtent.x), 16), min(PowTwoDivider(imageExtent.y), 8)); const dim3 gridSize(imageExtent.x / blockSize.x, imageExtent.y / blockSize.y); const float3 volumeExtent = make_float3((float)volumeSize.x, (float)volumeSize.y, (float)volumeSize.z); rayCast<<<gridSize, blockSize>>>(output, rayCoords[0], rayCoords[1], imageExtent.x, volumeExtent, filterMethod); CUT_CHECK_ERROR("kernel failed"); } // intialize the textures, and calculate the cubic B-spline coefficients extern "C" void initCuda(const uchar* voxels, uint3 volumeSize) { // calculate the b-spline coefficients float* bsplineCoeffs = CastUCharVolumeHostToDevice(voxels, volumeSize.x, volumeSize.y, volumeSize.z); CubicBSplinePrefilter3DTimer(bsplineCoeffs, volumeSize.x, volumeSize.y, volumeSize.z); // create the b-spline coefficients texture cudaChannelFormatDesc channelDescCoeff = cudaCreateChannelDesc<float>(); cudaArray *coeffArray = 0; cudaExtent volumeExtent = make_cudaExtent(volumeSize.x, volumeSize.y, volumeSize.z); CUDA_SAFE_CALL(cudaMalloc3DArray(&coeffArray, &channelDescCoeff, volumeExtent)); // copy data to 3D array cudaMemcpy3DParms copyParams = {0}; copyParams.extent = volumeExtent; copyParams.srcPtr = make_cudaPitchedPtr((void*)bsplineCoeffs, volumeSize.x*sizeof(float), volumeSize.x, volumeSize.y); copyParams.dstArray = coeffArray; copyParams.kind = cudaMemcpyDeviceToDevice; CUDA_SAFE_CALL(cudaMemcpy3D(&copyParams)); // bind array to 3D texture CUDA_SAFE_CALL(cudaBindTextureToArray(coeffs, coeffArray, channelDescCoeff)); coeffs.normalized = false; //access with absolute texture coordinates coeffs.filterMode = cudaFilterModeLinear; CUDA_SAFE_CALL(cudaFree(bsplineCoeffs)); //they are now in the coeffs texture, we do not need this anymore // Now create a texture with the original sample values for nearest neighbor and linear interpolation // Note that if you are going to do cubic interpolation only, you can remove the following code // create 3D array cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<uchar>(); cudaArray *volumeArray = 0; CUDA_SAFE_CALL(cudaMalloc3DArray(&volumeArray, &channelDesc, volumeExtent)); // copy data to 3D array copyParams.srcPtr = make_cudaPitchedPtr((void*)voxels, volumeSize.x*sizeof(uchar), volumeSize.x, volumeSize.y); copyParams.dstArray = volumeArray; copyParams.kind = cudaMemcpyHostToDevice; CUDA_SAFE_CALL(cudaMemcpy3D(&copyParams)); // bind array to 3D texture CUDA_SAFE_CALL(cudaBindTextureToArray(tex, volumeArray, channelDesc)); tex.normalized = false; //access with absolute texture coordinates }
882febc015b0149e9648f388a1fd7a875d9d1db6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * Copyright 1993-2012 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. */ #include <stdio.h> #include <stdlib.h> static const int WORK_SIZE = 256; /** * This macro checks return value of the CUDA runtime call and exits * the application if the call failed. */ #define CUDA_CHECK_RETURN(value) { \ hipError_t _m_cudaStat = value; \ if (_m_cudaStat != hipSuccess) { \ fprintf(stderr, "Error %s at line %d in file %s\n", \ hipGetErrorString(_m_cudaStat), __LINE__, __FILE__); \ exit(1); \ } } /** * CUDA kernel function that reverses the order of bits in each element of the array. */ __global__ void vecSum_GPU3(const double* in, double* res, const unsigned long n) { //dynamic shared memory size __shared__ double tmp[1024]; unsigned long i = blockIdx.x*blockDim.x+threadIdx.x; if(i<n) tmp[threadIdx.x] = in[i]; __syncthreads(); //do reduction in shared memory for(unsigned int s=blockDim.x/2; s>0; s>>=1) { if(threadIdx.x < s) { tmp[threadIdx.x] += tmp[threadIdx.x + s]; } __syncthreads(); } if(threadIdx.x == 0) res[blockIdx.x] = tmp[0]; } /** * Host function that prepares data array and passes it to the CUDA kernel. */ int main(int argc, char** argv) { if(argc<2) { printf("Not enough Arguments, please specify a size, Nigga!\n"); return 1; } int nDevices; hipGetDeviceCount(&nDevices); int THREADS_PER_BLOCK = 0; for (int i = 0; i < nDevices; i++) { hipDeviceProp_t prop; hipGetDeviceProperties(&prop, i); printf(" Device Number: %d\n", i); printf(" Device name: %s\n", prop.name); printf(" Memory Clock Rate (KHz): %d\n", prop.memoryClockRate); printf(" Memory Bus Width (bits): %d\n", prop.memoryBusWidth); printf(" Peak Memory Bandwidth (GB/s): %f\n", 2.0*prop.memoryClockRate*(prop.memoryBusWidth/8)/1.0e6); printf(" Max Threads Per Block: %d\n\n", prop.maxThreadsPerBlock); THREADS_PER_BLOCK = prop.maxThreadsPerBlock; } long vec_size = atol(argv[1]); printf("Size of Vector: %d\n", vec_size); int blocks = ceil((float)vec_size/THREADS_PER_BLOCK); printf("Blocks: %d\n",blocks); long vec_size_full = THREADS_PER_BLOCK * blocks; // Vector with Threads Per Block printf("Size of Block Filling Vector: %d\n", vec_size); double* vec = (double*)malloc(sizeof(double) * vec_size_full); double* res = (double*)malloc(sizeof(double) * vec_size_full); for(int i = 0; i < vec_size_full; i++) { if (i < vec_size) vec[i]=1.0f; else vec[i]=0.0f; res[i]=0; } printf("\n"); double* d_vec; double* d_res; hipMalloc((double **) &d_vec, vec_size_full * sizeof(double)); hipMalloc((double **) &d_res, vec_size_full * sizeof(double)); hipMemcpy(d_vec,vec,vec_size_full*sizeof(double), hipMemcpyHostToDevice); hipLaunchKernelGGL(( vecSum_GPU3), dim3(blocks),dim3(THREADS_PER_BLOCK), 0, 0, d_vec,d_res,vec_size); hipMemcpy(res, d_res, vec_size_full*sizeof(double), hipMemcpyDeviceToHost); for(int i = 0; i < blocks; i++) { printf("%f\n", res[i]); } CUDA_CHECK_RETURN(hipDeviceReset()); return 0; }
882febc015b0149e9648f388a1fd7a875d9d1db6.cu
/** * Copyright 1993-2012 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. */ #include <stdio.h> #include <stdlib.h> static const int WORK_SIZE = 256; /** * This macro checks return value of the CUDA runtime call and exits * the application if the call failed. */ #define CUDA_CHECK_RETURN(value) { \ cudaError_t _m_cudaStat = value; \ if (_m_cudaStat != cudaSuccess) { \ fprintf(stderr, "Error %s at line %d in file %s\n", \ cudaGetErrorString(_m_cudaStat), __LINE__, __FILE__); \ exit(1); \ } } /** * CUDA kernel function that reverses the order of bits in each element of the array. */ __global__ void vecSum_GPU3(const double* in, double* res, const unsigned long n) { //dynamic shared memory size __shared__ double tmp[1024]; unsigned long i = blockIdx.x*blockDim.x+threadIdx.x; if(i<n) tmp[threadIdx.x] = in[i]; __syncthreads(); //do reduction in shared memory for(unsigned int s=blockDim.x/2; s>0; s>>=1) { if(threadIdx.x < s) { tmp[threadIdx.x] += tmp[threadIdx.x + s]; } __syncthreads(); } if(threadIdx.x == 0) res[blockIdx.x] = tmp[0]; } /** * Host function that prepares data array and passes it to the CUDA kernel. */ int main(int argc, char** argv) { if(argc<2) { printf("Not enough Arguments, please specify a size, Nigga!\n"); return 1; } int nDevices; cudaGetDeviceCount(&nDevices); int THREADS_PER_BLOCK = 0; for (int i = 0; i < nDevices; i++) { cudaDeviceProp prop; cudaGetDeviceProperties(&prop, i); printf(" Device Number: %d\n", i); printf(" Device name: %s\n", prop.name); printf(" Memory Clock Rate (KHz): %d\n", prop.memoryClockRate); printf(" Memory Bus Width (bits): %d\n", prop.memoryBusWidth); printf(" Peak Memory Bandwidth (GB/s): %f\n", 2.0*prop.memoryClockRate*(prop.memoryBusWidth/8)/1.0e6); printf(" Max Threads Per Block: %d\n\n", prop.maxThreadsPerBlock); THREADS_PER_BLOCK = prop.maxThreadsPerBlock; } long vec_size = atol(argv[1]); printf("Size of Vector: %d\n", vec_size); int blocks = ceil((float)vec_size/THREADS_PER_BLOCK); printf("Blocks: %d\n",blocks); long vec_size_full = THREADS_PER_BLOCK * blocks; // Vector with Threads Per Block printf("Size of Block Filling Vector: %d\n", vec_size); double* vec = (double*)malloc(sizeof(double) * vec_size_full); double* res = (double*)malloc(sizeof(double) * vec_size_full); for(int i = 0; i < vec_size_full; i++) { if (i < vec_size) vec[i]=1.0f; else vec[i]=0.0f; res[i]=0; } printf("\n"); double* d_vec; double* d_res; cudaMalloc((double **) &d_vec, vec_size_full * sizeof(double)); cudaMalloc((double **) &d_res, vec_size_full * sizeof(double)); cudaMemcpy(d_vec,vec,vec_size_full*sizeof(double), cudaMemcpyHostToDevice); vecSum_GPU3<<<blocks,THREADS_PER_BLOCK>>>(d_vec,d_res,vec_size); cudaMemcpy(res, d_res, vec_size_full*sizeof(double), cudaMemcpyDeviceToHost); for(int i = 0; i < blocks; i++) { printf("%f\n", res[i]); } CUDA_CHECK_RETURN(cudaDeviceReset()); return 0; }
1e1d32048b3d427767f59f772a59a5f06c9604a3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #define NNEU %(nneu)d #define E_K -85 // Potassium Reverse Potential #define E_cl -30 // Chlorine Reverse Potential #define G_s 1.6 // Maximum Shaker Conductance #define G_dr 3.5 // Maximum Delayed Rectifier Conductance #define G_Cl 0.056 // Chlorine Leak Conductance #define G_K 0.082 // Potassium Leak Conductance #define C = 4 // Membrane Capacitance __global__ f_hhn( %(type)s *I, %(type)s *V, %(type)s *SA, %(type)s *SI, %(type)s *DRA, %(type)s *DRI ) { %(type)s i_ext = I; %(type)s v = V; %(type)s sa = SA; %(type)s si = SI; %(type)s dra = DRA; %(type)s dri = DRI; float sa_inf = powf(1/(1 + exp((-30-v)/13.5)), (1/3)); float sa_tau = 0.13+3.39*exp(-powf(-73-v, 2)/pow(20,2)); float si_inf = 1/(1 + exp((-55-v)/-5.5)); float si_tau = 113 * exp(-pow(-71-v, 2)/pow(29,2)); float dra_inf = powf(1/(1 + exp((-5-v))), 0.5); float dra_tau = 0.5 + 5.75 * exp(-pow(-25-v, 2)/pow(32,2)); float dri_inf = 1 / (1 + exp((-25-v) / -10.5)); float dri_tau = 890; //Runge-Kutta? }
1e1d32048b3d427767f59f772a59a5f06c9604a3.cu
#define NNEU %(nneu)d #define E_K -85 // Potassium Reverse Potential #define E_cl -30 // Chlorine Reverse Potential #define G_s 1.6 // Maximum Shaker Conductance #define G_dr 3.5 // Maximum Delayed Rectifier Conductance #define G_Cl 0.056 // Chlorine Leak Conductance #define G_K 0.082 // Potassium Leak Conductance #define C = 4 // Membrane Capacitance __global__ f_hhn( %(type)s *I, %(type)s *V, %(type)s *SA, %(type)s *SI, %(type)s *DRA, %(type)s *DRI ) { %(type)s i_ext = I; %(type)s v = V; %(type)s sa = SA; %(type)s si = SI; %(type)s dra = DRA; %(type)s dri = DRI; float sa_inf = powf(1/(1 + exp((-30-v)/13.5)), (1/3)); float sa_tau = 0.13+3.39*exp(-powf(-73-v, 2)/pow(20,2)); float si_inf = 1/(1 + exp((-55-v)/-5.5)); float si_tau = 113 * exp(-pow(-71-v, 2)/pow(29,2)); float dra_inf = powf(1/(1 + exp((-5-v))), 0.5); float dra_tau = 0.5 + 5.75 * exp(-pow(-25-v, 2)/pow(32,2)); float dri_inf = 1 / (1 + exp((-25-v) / -10.5)); float dri_tau = 890; //Runge-Kutta? }
46d16a8b92c7ea16dbb127a5cb91510f5ac75c04.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<cuda_runtime.h> #include<iostream> #include<stdio.h> #include<sys/time.h> #include<assert.h> using namespace std; #define REAL double #define BX 128 #define BY 8 #define BZ 1 #define GZ 1 const REAL cc = 0.4; const REAL ce = 0.1; const REAL cw = 0.1; const REAL cs = 0.1; const REAL cn = 0.1; const REAL ct = 0.1; const REAL cb = 0.1; //Must be re-written, including all the parameters int stencil(REAL *A, REAL *B, int nx, int ny, int nz, int steps) { int i, j, k, s; #define IDX(i,j,k) ((i)*ny*nz+(j)*nz+(k)) for(s = 0; s < steps; s ++) { for(i = 0; i < nx; i ++) { for(j = 0; j < ny; j ++) { for(k = 0; k < nz; k ++) { REAL r = 0.4*A[IDX(i,j,k)]; if(k != 0) r += 0.1*A[IDX(i,j,k-1)]; else r += 0.1*A[IDX(i,j,k)]; if(k != nz-1) r += 0.1*A[IDX(i,j,k+1)]; else r += 0.1*A[IDX(i,j,k)]; if(j != 0) r += 0.1*A[IDX(i,j-1,k)]; else r += 0.1*A[IDX(i,j,k)]; if(j != ny-1) r += 0.1*A[IDX(i,j+1,k)]; else r += 0.1*A[IDX(i,j,k)]; if(i != 0) r += 0.1*A[IDX(i-1,j,k)]; else r += 0.1*A[IDX(i,j,k)]; if(i != nx-1) r += 0.1*A[IDX(i+1,j,k)]; else r += 0.1*A[IDX(i,j,k)]; B[IDX(i,j,k)] = r; } } } REAL *tmp = NULL; tmp = A, A = B, B = tmp; } return 0; } void check(REAL *a, REAL *b, int nx, int ny, int nz) { int slice = nx * ny; for (int z = 1; z < nz-1; ++z) { for (int y = 1; y < ny-1; ++y) { for (int x = 1; x < nz-1; ++x) { int idx = z * slice + y * nx + x; //cout << a[idx] << " " << b[idx] << endl; if (abs(a[idx]-b[idx]) > 1e-5) { //cout << a[idx] << " " << b[idx] << endl; cout << x << "," << y << "," << z << endl; //printf("%d\n", idx); printf("Wrong!!!!!!!!\n"); return; } } } } printf("Right!!!!!!\n"); return; } __global__ void baseline(REAL* A, REAL* B, int nx, int ny, int nz) { int i = threadIdx.x + blockDim.x*blockIdx.x; int j = threadIdx.y + blockDim.y*blockIdx.y; int kb = nz/gridDim.z*blockIdx.z; int slice = nx*ny; int k = kb; //int k = kb > 0? kb: 1; int ke = (kb+nz/gridDim.z<nz)? kb+nz/gridDim.z : nz; int c = i + j*nx + k*slice; if (i >= 0 && i < nx && j >= 0 && j < ny) { //#pragma unroll for (; k < ke; k++){ int w = (i==0)?c:c-1; int e = (i==nx-1)?c:c+1; int n = (j==0)?c:c-nx; int s = (j==ny-1)?c:c+nx; int b = (k==0)?c:c-slice; int t = (k==nz-1)?c:c+slice; B[c] = ce*A[e] + cw*A[w] + cs*A[s] + cn*A[n] +ct*A[t] + cb*A[b] + cc*A[c]; c += slice; //if (k > 0 && k < nz-1 && i > 0 && i < nx-1 && j > 0 && j < ny-1){ // B[idx] = ce*A[idx+1] + cw*A[idx-1] + cs*A[idx+nx] + cn*A[idx-nx] // +ct*A[idx+slice] + cb*A[idx-slice] + cc*A[idx]; // idx += slice; } } } __global__ void baseopt(REAL* A, REAL* B, int nx, int ny, int nz) { int i = threadIdx.x + blockDim.x*blockIdx.x; int j = threadIdx.y + blockDim.y*blockIdx.y; int kb = nz/gridDim.z*blockIdx.z; int slice = nx*ny; //int k = kb > 0? kb: 1; int k = kb; int ke = (kb+nz/gridDim.z<nz)? kb+nz/gridDim.z : nz; int c = i + j*nx + k*slice; int b = (k==0)?c:c-slice; int w = (i==0)?c:c-1; int e = (i==nx-1)?c:c+1; int n = (j==0)?c:c-nx; int s = (j==ny-1)?c:c+nx; int t; double b_b = A[b]; double b_c = A[c]; double b_t; if (i >= 0 && i < nx && j >= 0 && j < ny) { #pragma unroll for (; k < ke; k++){ t = (k==nz-1)?c:c+slice; b_t = A[t]; B[c] = ce*A[e] + cw*A[w] + cs*A[s] + cn*A[n] +ct*b_t + cb*b_b + cc*b_c; b_b = b_c; b_c = b_t; c += slice; //b_t = B[idx+slice]; ////A[idx] = ce*B[idx+1] + cw*B[idx-1] + cs*B[idx+nx] + cn*B[idx-nx] //// +ct*B[idx+slice] + cb*B[idx-slice] + cc*B[idx]; //A[idx] = ce*B[idx+1] + cw*B[idx-1] + cs*B[idx+nx] + cn*B[idx-nx] // +ct*b_t + cb*b_b + cc*b_c; //b_b = b_c; //b_c = b_t; //idx += slice; } } return; } __global__ void roc(const REAL* __restrict__ A, REAL* B, int nx, int ny, int nz) { int i = threadIdx.x + blockDim.x*blockIdx.x; int j = threadIdx.y + blockDim.y*blockIdx.y; int kb = nz/gridDim.z*blockIdx.z; int slice = nx*ny; //int k = kb > 0? kb: 1; int k = kb; int ke = (kb+nz/gridDim.z<nz)? kb+nz/gridDim.z : nz; int c = i + j*nx + k*slice; int b = (k==0)?c:c-slice; int w = (i==0)?c:c-1; int e = (i==nx-1)?c:c+1; int n = (j==0)?c:c-nx; int s = (j==ny-1)?c:c+nx; int t; double b_b = A[b]; double b_c = A[c]; double b_t; if (i >= 0 && i < nx && j >= 0 && j < ny) { #pragma unroll for (; k < ke; k++){ t = (k==nz-1)?c:c+slice; b_t = A[t]; B[c] = ce*A[e] + cw*A[w] + cs*A[s] + cn*A[n] +ct*b_t + cb*b_b + cc*b_c; b_b = b_c; b_c = b_t; c += slice; } } return; } int main(int argc, char **argv){ int NX = atoi(argv[2]); int NY = atoi(argv[3]); int NZ = atoi(argv[4]); int T = atoi(argv[5]); int num = 8; int NZ_ = NZ/num+2; if (NX*NY*NZ <= 600*600*600) { num = 1; NZ_ = NZ; } int p1, p2; if (NZ % num == 0) { p1 = p2 = NZ/num; } else { p1 = NZ / (num-1); p2 = NZ - p1*(num-1); } //int size = sizeof(REAL)*NX*NY*NZ; int partsize1 = NX*NY*p1; int partsize2 = NX*NY*p2; REAL *host_A, *host_B; int totalsize; if (num == 1) { totalsize = NX*NY*NZ; host_A = new REAL[totalsize]; host_B = new REAL[totalsize]; } else { totalsize = (partsize1+2*NX*NY)*(num-1)+partsize2; host_A = new REAL[totalsize]; host_B = new REAL[totalsize]; } int size_ = NZ_*NY*NX; REAL *cpu_A = new REAL[NX*NY*NZ]; REAL *result_A = new REAL[NX*NY*NZ]; REAL *cpu_B = new REAL[NX*NY*NZ]; for (int i = 0; i < totalsize; ++i) { host_A[i] = 1.0; host_B[i] = 1.0; } //for (int k = 0; k < NZ; k++) // for (int j = 0; j < NY; j++) // for (int i = 0; i < NX; i++) { // host_A[k*NY*NX+j*NX+i] = 1.0; // host_B[k*NY*NX+j*NX+i] = 1.0; // } for (int k = 0; k < NZ; k++) for (int j = 0; j < NY; j++) for (int i = 0; i < NX; i++) { //cout << k*NY*NX + j*NX + i << endl; cpu_A[k*NY*NX+j*NX+i] = 1.0; cpu_B[k*NY*NX+j*NX+i] = 1.0; result_A[k*NY*NX+j*NX+i] = 0.0; } hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); float elapsed_time; double flops; int index = 0; int partsize; cout << "start gpu computing..." << endl; for (int i = 0; i < num; ++i) { REAL *dev_A, *dev_B; if (i == 0) { partsize = partsize1+NX*NY; NZ_ = p1+1; } else if (i < num-1) { partsize = partsize1+2*NX*NY; NZ_ = p1+2; } else { partsize = partsize2+NX*NY; NZ_ = p2+1; } if (num == 1) { partsize = NX*NY*NZ; NZ_ = NZ; } hipMalloc(&dev_A, sizeof(REAL)*partsize); hipMalloc(&dev_B, sizeof(REAL)*partsize); hipMemcpy(dev_A, host_A+index, sizeof(REAL)*partsize, hipMemcpyHostToDevice); hipMemcpy(dev_B, host_B+index, sizeof(REAL)*partsize, hipMemcpyHostToDevice); dim3 threadPerBlock(BX, BY, BZ); //128,1,1 dim3 blockPerGrid((NX+BX-1)/BX, (NY+BY-1)/BY, GZ); //512/128,512/1,1 = 4,512,1 /////////////////////////////////////////////////////////////// //baseline for (int t = 0; t < T; t++){ //baseline<<<blockPerGrid, threadPerBlock>>>(dev_A, dev_B, NX, NY, NZ_); hipLaunchKernelGGL(( roc), dim3(blockPerGrid), dim3(threadPerBlock), 0, 0, dev_A, dev_B, NX, NY, NZ_); REAL* tmp = dev_A; dev_A = dev_B; dev_B = tmp; } /////////////////////////////////////////////////////////////// if (hipGetLastError() != hipSuccess) printf("cudawrong!!!\n"); hipMemcpy(host_A+index, dev_A, sizeof(REAL)*partsize, hipMemcpyDeviceToHost); index += partsize; hipFree(dev_A); hipFree(dev_B); } hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&elapsed_time, start, stop); /* struct timeval t1, t2; gettimeofday(&t1, NULL); stencil(cpu_A, cpu_B, NX, NY, NZ, T); gettimeofday(&t2, NULL); float cpubaseline_time = (t2.tv_sec-t1.tv_sec)*1e3 + (t2.tv_usec-t1.tv_usec)*1e-3; cout << "CPU time:" << cpubaseline_time/T << " ms" << endl; if (num == 1) { check(cpu_A, host_A, NX, NY, NZ); } else { int index=0, partsize=0, idx=0; for (int i = 0; i < num; ++i) { if (i < num-1) partsize = partsize1; else partsize = partsize2; for (int j = 0; j < partsize; ++j) { result_A[idx] = host_A[index+j]; idx++; } index += partsize+2*NX*NY; } check(cpu_A, result_A, NX, NY, NZ); }*/ //printf("baseline: Gflops = %lf\n", flops); printf("baseline: elapsed time = %f ms\n", elapsed_time); flops = 1.0*13*(NX-2)*(NY-2)*(NZ-2)*T/1.e+6; flops /= elapsed_time; /* /////////////////////////////////////////////////////////////// //baseopt hipEventRecord(start, 0); for (int t = 0; t < T; t++){ baseopt<<<blockPerGrid, threadPerBlock>>>(dev_A, dev_B, NX, NY, NZ); REAL* tmp = dev_A; dev_A = dev_B; dev_B = tmp; } hipEventRecord(stop, 0); hipEventSynchronize(stop); if (hipGetLastError() != hipSuccess) printf("baseopt: wrong!!!\n"); hipEventElapsedTime(&elapsed_time, start, stop); printf("baseopt: elapsed time = %f ms\n", elapsed_time/T); flops = 1.0*13*(NX-2)*(NY-2)*(NZ-2)*T/1.e+6; flops /= elapsed_time; //printf("baseopt: Gflops = %lf\n", flops); /////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////// //read-only data cache hipEventRecord(start, 0); for (int t = 0; t < T; t++){ roc<<<blockPerGrid, threadPerBlock>>>(dev_A, dev_B, NX, NY, NZ); REAL* tmp = dev_A; dev_A = dev_B; dev_B = tmp; } hipEventRecord(stop, 0); hipEventSynchronize(stop); if (hipGetLastError() != hipSuccess) printf("read-only data cache: wrong!!!\n"); hipEventElapsedTime(&elapsed_time, start, stop); printf("read-only data cache: elapsed time = %f ms\n", elapsed_time/T); flops = 1.0*13*(NX-2)*(NY-2)*(NZ-2)*T/1.e+6; flops /= elapsed_time; //printf("read-only data cache: Gflops = %lf\n", flops); /////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////// //share memory raw hipEventRecord(start, 0); for (int t = 0; t < T; t++){ shm_raw<<<blockPerGrid, threadPerBlock>>>(dev_A, dev_B, NX, NY, NZ); REAL* tmp = dev_A; dev_A = dev_B; dev_B = tmp; } hipEventRecord(stop, 0); hipEventSynchronize(stop); if (hipGetLastError() != hipSuccess) printf("share memory raw: wrong!!!\n"); hipEventElapsedTime(&elapsed_time, start, stop); printf("share memory raw: elapsed time = %f ms\n", elapsed_time/T); flops = 1.0*13*(NX-2)*(NY-2)*(NZ-2)*T/1.e+6; flops /= elapsed_time; //printf("share memory raw: Gflops = %lf\n", flops); /////////////////////////////////////////////////////////////// hipEventDestroy(start); hipEventDestroy(stop); */ return 0; }
46d16a8b92c7ea16dbb127a5cb91510f5ac75c04.cu
#include<cuda_runtime.h> #include<iostream> #include<stdio.h> #include<sys/time.h> #include<assert.h> using namespace std; #define REAL double #define BX 128 #define BY 8 #define BZ 1 #define GZ 1 const REAL cc = 0.4; const REAL ce = 0.1; const REAL cw = 0.1; const REAL cs = 0.1; const REAL cn = 0.1; const REAL ct = 0.1; const REAL cb = 0.1; //Must be re-written, including all the parameters int stencil(REAL *A, REAL *B, int nx, int ny, int nz, int steps) { int i, j, k, s; #define IDX(i,j,k) ((i)*ny*nz+(j)*nz+(k)) for(s = 0; s < steps; s ++) { for(i = 0; i < nx; i ++) { for(j = 0; j < ny; j ++) { for(k = 0; k < nz; k ++) { REAL r = 0.4*A[IDX(i,j,k)]; if(k != 0) r += 0.1*A[IDX(i,j,k-1)]; else r += 0.1*A[IDX(i,j,k)]; if(k != nz-1) r += 0.1*A[IDX(i,j,k+1)]; else r += 0.1*A[IDX(i,j,k)]; if(j != 0) r += 0.1*A[IDX(i,j-1,k)]; else r += 0.1*A[IDX(i,j,k)]; if(j != ny-1) r += 0.1*A[IDX(i,j+1,k)]; else r += 0.1*A[IDX(i,j,k)]; if(i != 0) r += 0.1*A[IDX(i-1,j,k)]; else r += 0.1*A[IDX(i,j,k)]; if(i != nx-1) r += 0.1*A[IDX(i+1,j,k)]; else r += 0.1*A[IDX(i,j,k)]; B[IDX(i,j,k)] = r; } } } REAL *tmp = NULL; tmp = A, A = B, B = tmp; } return 0; } void check(REAL *a, REAL *b, int nx, int ny, int nz) { int slice = nx * ny; for (int z = 1; z < nz-1; ++z) { for (int y = 1; y < ny-1; ++y) { for (int x = 1; x < nz-1; ++x) { int idx = z * slice + y * nx + x; //cout << a[idx] << " " << b[idx] << endl; if (abs(a[idx]-b[idx]) > 1e-5) { //cout << a[idx] << " " << b[idx] << endl; cout << x << "," << y << "," << z << endl; //printf("%d\n", idx); printf("Wrong!!!!!!!!\n"); return; } } } } printf("Right!!!!!!\n"); return; } __global__ void baseline(REAL* A, REAL* B, int nx, int ny, int nz) { int i = threadIdx.x + blockDim.x*blockIdx.x; int j = threadIdx.y + blockDim.y*blockIdx.y; int kb = nz/gridDim.z*blockIdx.z; int slice = nx*ny; int k = kb; //int k = kb > 0? kb: 1; int ke = (kb+nz/gridDim.z<nz)? kb+nz/gridDim.z : nz; int c = i + j*nx + k*slice; if (i >= 0 && i < nx && j >= 0 && j < ny) { //#pragma unroll for (; k < ke; k++){ int w = (i==0)?c:c-1; int e = (i==nx-1)?c:c+1; int n = (j==0)?c:c-nx; int s = (j==ny-1)?c:c+nx; int b = (k==0)?c:c-slice; int t = (k==nz-1)?c:c+slice; B[c] = ce*A[e] + cw*A[w] + cs*A[s] + cn*A[n] +ct*A[t] + cb*A[b] + cc*A[c]; c += slice; //if (k > 0 && k < nz-1 && i > 0 && i < nx-1 && j > 0 && j < ny-1){ // B[idx] = ce*A[idx+1] + cw*A[idx-1] + cs*A[idx+nx] + cn*A[idx-nx] // +ct*A[idx+slice] + cb*A[idx-slice] + cc*A[idx]; // idx += slice; } } } __global__ void baseopt(REAL* A, REAL* B, int nx, int ny, int nz) { int i = threadIdx.x + blockDim.x*blockIdx.x; int j = threadIdx.y + blockDim.y*blockIdx.y; int kb = nz/gridDim.z*blockIdx.z; int slice = nx*ny; //int k = kb > 0? kb: 1; int k = kb; int ke = (kb+nz/gridDim.z<nz)? kb+nz/gridDim.z : nz; int c = i + j*nx + k*slice; int b = (k==0)?c:c-slice; int w = (i==0)?c:c-1; int e = (i==nx-1)?c:c+1; int n = (j==0)?c:c-nx; int s = (j==ny-1)?c:c+nx; int t; double b_b = A[b]; double b_c = A[c]; double b_t; if (i >= 0 && i < nx && j >= 0 && j < ny) { #pragma unroll for (; k < ke; k++){ t = (k==nz-1)?c:c+slice; b_t = A[t]; B[c] = ce*A[e] + cw*A[w] + cs*A[s] + cn*A[n] +ct*b_t + cb*b_b + cc*b_c; b_b = b_c; b_c = b_t; c += slice; //b_t = B[idx+slice]; ////A[idx] = ce*B[idx+1] + cw*B[idx-1] + cs*B[idx+nx] + cn*B[idx-nx] //// +ct*B[idx+slice] + cb*B[idx-slice] + cc*B[idx]; //A[idx] = ce*B[idx+1] + cw*B[idx-1] + cs*B[idx+nx] + cn*B[idx-nx] // +ct*b_t + cb*b_b + cc*b_c; //b_b = b_c; //b_c = b_t; //idx += slice; } } return; } __global__ void roc(const REAL* __restrict__ A, REAL* B, int nx, int ny, int nz) { int i = threadIdx.x + blockDim.x*blockIdx.x; int j = threadIdx.y + blockDim.y*blockIdx.y; int kb = nz/gridDim.z*blockIdx.z; int slice = nx*ny; //int k = kb > 0? kb: 1; int k = kb; int ke = (kb+nz/gridDim.z<nz)? kb+nz/gridDim.z : nz; int c = i + j*nx + k*slice; int b = (k==0)?c:c-slice; int w = (i==0)?c:c-1; int e = (i==nx-1)?c:c+1; int n = (j==0)?c:c-nx; int s = (j==ny-1)?c:c+nx; int t; double b_b = A[b]; double b_c = A[c]; double b_t; if (i >= 0 && i < nx && j >= 0 && j < ny) { #pragma unroll for (; k < ke; k++){ t = (k==nz-1)?c:c+slice; b_t = A[t]; B[c] = ce*A[e] + cw*A[w] + cs*A[s] + cn*A[n] +ct*b_t + cb*b_b + cc*b_c; b_b = b_c; b_c = b_t; c += slice; } } return; } int main(int argc, char **argv){ int NX = atoi(argv[2]); int NY = atoi(argv[3]); int NZ = atoi(argv[4]); int T = atoi(argv[5]); int num = 8; int NZ_ = NZ/num+2; if (NX*NY*NZ <= 600*600*600) { num = 1; NZ_ = NZ; } int p1, p2; if (NZ % num == 0) { p1 = p2 = NZ/num; } else { p1 = NZ / (num-1); p2 = NZ - p1*(num-1); } //int size = sizeof(REAL)*NX*NY*NZ; int partsize1 = NX*NY*p1; int partsize2 = NX*NY*p2; REAL *host_A, *host_B; int totalsize; if (num == 1) { totalsize = NX*NY*NZ; host_A = new REAL[totalsize]; host_B = new REAL[totalsize]; } else { totalsize = (partsize1+2*NX*NY)*(num-1)+partsize2; host_A = new REAL[totalsize]; host_B = new REAL[totalsize]; } int size_ = NZ_*NY*NX; REAL *cpu_A = new REAL[NX*NY*NZ]; REAL *result_A = new REAL[NX*NY*NZ]; REAL *cpu_B = new REAL[NX*NY*NZ]; for (int i = 0; i < totalsize; ++i) { host_A[i] = 1.0; host_B[i] = 1.0; } //for (int k = 0; k < NZ; k++) // for (int j = 0; j < NY; j++) // for (int i = 0; i < NX; i++) { // host_A[k*NY*NX+j*NX+i] = 1.0; // host_B[k*NY*NX+j*NX+i] = 1.0; // } for (int k = 0; k < NZ; k++) for (int j = 0; j < NY; j++) for (int i = 0; i < NX; i++) { //cout << k*NY*NX + j*NX + i << endl; cpu_A[k*NY*NX+j*NX+i] = 1.0; cpu_B[k*NY*NX+j*NX+i] = 1.0; result_A[k*NY*NX+j*NX+i] = 0.0; } cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); float elapsed_time; double flops; int index = 0; int partsize; cout << "start gpu computing..." << endl; for (int i = 0; i < num; ++i) { REAL *dev_A, *dev_B; if (i == 0) { partsize = partsize1+NX*NY; NZ_ = p1+1; } else if (i < num-1) { partsize = partsize1+2*NX*NY; NZ_ = p1+2; } else { partsize = partsize2+NX*NY; NZ_ = p2+1; } if (num == 1) { partsize = NX*NY*NZ; NZ_ = NZ; } cudaMalloc(&dev_A, sizeof(REAL)*partsize); cudaMalloc(&dev_B, sizeof(REAL)*partsize); cudaMemcpy(dev_A, host_A+index, sizeof(REAL)*partsize, cudaMemcpyHostToDevice); cudaMemcpy(dev_B, host_B+index, sizeof(REAL)*partsize, cudaMemcpyHostToDevice); dim3 threadPerBlock(BX, BY, BZ); //128,1,1 dim3 blockPerGrid((NX+BX-1)/BX, (NY+BY-1)/BY, GZ); //512/128,512/1,1 = 4,512,1 /////////////////////////////////////////////////////////////// //baseline for (int t = 0; t < T; t++){ //baseline<<<blockPerGrid, threadPerBlock>>>(dev_A, dev_B, NX, NY, NZ_); roc<<<blockPerGrid, threadPerBlock>>>(dev_A, dev_B, NX, NY, NZ_); REAL* tmp = dev_A; dev_A = dev_B; dev_B = tmp; } /////////////////////////////////////////////////////////////// if (cudaGetLastError() != cudaSuccess) printf("cudawrong!!!\n"); cudaMemcpy(host_A+index, dev_A, sizeof(REAL)*partsize, cudaMemcpyDeviceToHost); index += partsize; cudaFree(dev_A); cudaFree(dev_B); } cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsed_time, start, stop); /* struct timeval t1, t2; gettimeofday(&t1, NULL); stencil(cpu_A, cpu_B, NX, NY, NZ, T); gettimeofday(&t2, NULL); float cpubaseline_time = (t2.tv_sec-t1.tv_sec)*1e3 + (t2.tv_usec-t1.tv_usec)*1e-3; cout << "CPU time:" << cpubaseline_time/T << " ms" << endl; if (num == 1) { check(cpu_A, host_A, NX, NY, NZ); } else { int index=0, partsize=0, idx=0; for (int i = 0; i < num; ++i) { if (i < num-1) partsize = partsize1; else partsize = partsize2; for (int j = 0; j < partsize; ++j) { result_A[idx] = host_A[index+j]; idx++; } index += partsize+2*NX*NY; } check(cpu_A, result_A, NX, NY, NZ); }*/ //printf("baseline: Gflops = %lf\n", flops); printf("baseline: elapsed time = %f ms\n", elapsed_time); flops = 1.0*13*(NX-2)*(NY-2)*(NZ-2)*T/1.e+6; flops /= elapsed_time; /* /////////////////////////////////////////////////////////////// //baseopt cudaEventRecord(start, 0); for (int t = 0; t < T; t++){ baseopt<<<blockPerGrid, threadPerBlock>>>(dev_A, dev_B, NX, NY, NZ); REAL* tmp = dev_A; dev_A = dev_B; dev_B = tmp; } cudaEventRecord(stop, 0); cudaEventSynchronize(stop); if (cudaGetLastError() != cudaSuccess) printf("baseopt: wrong!!!\n"); cudaEventElapsedTime(&elapsed_time, start, stop); printf("baseopt: elapsed time = %f ms\n", elapsed_time/T); flops = 1.0*13*(NX-2)*(NY-2)*(NZ-2)*T/1.e+6; flops /= elapsed_time; //printf("baseopt: Gflops = %lf\n", flops); /////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////// //read-only data cache cudaEventRecord(start, 0); for (int t = 0; t < T; t++){ roc<<<blockPerGrid, threadPerBlock>>>(dev_A, dev_B, NX, NY, NZ); REAL* tmp = dev_A; dev_A = dev_B; dev_B = tmp; } cudaEventRecord(stop, 0); cudaEventSynchronize(stop); if (cudaGetLastError() != cudaSuccess) printf("read-only data cache: wrong!!!\n"); cudaEventElapsedTime(&elapsed_time, start, stop); printf("read-only data cache: elapsed time = %f ms\n", elapsed_time/T); flops = 1.0*13*(NX-2)*(NY-2)*(NZ-2)*T/1.e+6; flops /= elapsed_time; //printf("read-only data cache: Gflops = %lf\n", flops); /////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////// //share memory raw cudaEventRecord(start, 0); for (int t = 0; t < T; t++){ shm_raw<<<blockPerGrid, threadPerBlock>>>(dev_A, dev_B, NX, NY, NZ); REAL* tmp = dev_A; dev_A = dev_B; dev_B = tmp; } cudaEventRecord(stop, 0); cudaEventSynchronize(stop); if (cudaGetLastError() != cudaSuccess) printf("share memory raw: wrong!!!\n"); cudaEventElapsedTime(&elapsed_time, start, stop); printf("share memory raw: elapsed time = %f ms\n", elapsed_time/T); flops = 1.0*13*(NX-2)*(NY-2)*(NZ-2)*T/1.e+6; flops /= elapsed_time; //printf("share memory raw: Gflops = %lf\n", flops); /////////////////////////////////////////////////////////////// cudaEventDestroy(start); cudaEventDestroy(stop); */ return 0; }
90767d75d914927a207d059d1d354ff740dbff66.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <stdio.h> void hello(); int main(void) { cuda_hello<1,6>(); } __global__ void cuda_hello() { printf("Good job!\n"); }
90767d75d914927a207d059d1d354ff740dbff66.cu
#include <cuda.h> #include <stdio.h> void hello(); int main(void) { cuda_hello<1,6>(); } __global__ void cuda_hello() { printf("Good job!\n"); }
70b8b8f6f4cd82474d23dd0c308f57da495aea58.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "relabelUnrollKernel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int *components = NULL; hipMalloc(&components, XSIZE*YSIZE); int previousLabel = 1; int newLabel = 1; const int colsComponents = 1; const int idx = 1; const int frameRows = 1; const int factor = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( relabelUnrollKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, components,previousLabel,newLabel,colsComponents,idx,frameRows,factor); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( relabelUnrollKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, components,previousLabel,newLabel,colsComponents,idx,frameRows,factor); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( relabelUnrollKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, components,previousLabel,newLabel,colsComponents,idx,frameRows,factor); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
70b8b8f6f4cd82474d23dd0c308f57da495aea58.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "relabelUnrollKernel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int *components = NULL; cudaMalloc(&components, XSIZE*YSIZE); int previousLabel = 1; int newLabel = 1; const int colsComponents = 1; const int idx = 1; const int frameRows = 1; const int factor = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); relabelUnrollKernel<<<gridBlock,threadBlock>>>(components,previousLabel,newLabel,colsComponents,idx,frameRows,factor); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { relabelUnrollKernel<<<gridBlock,threadBlock>>>(components,previousLabel,newLabel,colsComponents,idx,frameRows,factor); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { relabelUnrollKernel<<<gridBlock,threadBlock>>>(components,previousLabel,newLabel,colsComponents,idx,frameRows,factor); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
43120ed53329de6dfb6442bbae6b23ca3723dfda.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "hiprand/hiprand.h" #include "rocblas.h" #include "convolutional_layer.h" #include "deconvolutional_layer.h" #include "batchnorm_layer.h" #include "gemm.h" #include "blas.h" #include "im2col.h" #include "col2im.h" #include "utils.h" #include "hip/hip_runtime.h" void forward_deconvolutional_layer_gpu(layer l, network net) { int i; int m = l.size*l.size*l.n; int n = l.h*l.w; int k = l.c; fill_gpu(l.outputs*l.batch, 0, l.output_gpu, 1); for(i = 0; i < l.batch; ++i){ float *a = l.weights_gpu; float *b = net.input_gpu + i*l.c*l.h*l.w; float *c = net.workspace; gemm_gpu(1,0,m,n,k,1,a,m,b,n,0,c,n); col2im_gpu(net.workspace, l.out_c, l.out_h, l.out_w, l.size, l.stride, l.pad, l.output_gpu+i*l.outputs); } if (l.batch_normalize) { forward_batchnorm_layer_gpu(l, net); } else { add_bias_gpu(l.output_gpu, l.biases_gpu, l.batch, l.n, l.out_w*l.out_h); } activate_array_gpu(l.output_gpu, l.batch*l.n*l.out_w*l.out_h, l.activation); } void backward_deconvolutional_layer_gpu(layer l, network net) { int i; //constrain_gpu(l.outputs*l.batch, 1, l.delta_gpu, 1); gradient_array_gpu(l.output_gpu, l.outputs*l.batch, l.activation, l.delta_gpu); if(l.batch_normalize){ backward_batchnorm_layer_gpu(l, net); } else { backward_bias_gpu(l.bias_updates_gpu, l.delta_gpu, l.batch, l.n, l.out_w*l.out_h); } //if(net.delta_gpu) memset(net.delta_gpu, 0, l.batch*l.h*l.w*l.c*sizeof(float)); for(i = 0; i < l.batch; ++i){ int m = l.c; int n = l.size*l.size*l.n; int k = l.h*l.w; float *a = net.input_gpu + i*m*k; float *b = net.workspace; float *c = l.weight_updates_gpu; im2col_gpu(l.delta_gpu + i*l.outputs, l.out_c, l.out_h, l.out_w, l.size, l.stride, l.pad, b); gemm_gpu(0,1,m,n,k,1,a,k,b,k,1,c,n); if(net.delta_gpu){ int m = l.c; int n = l.h*l.w; int k = l.size*l.size*l.n; float *a = l.weights_gpu; float *b = net.workspace; float *c = net.delta_gpu + i*n*m; gemm_gpu(0,0,m,n,k,1,a,k,b,n,1,c,n); } } } void pull_deconvolutional_layer(layer l) { cuda_pull_array(l.weights_gpu, l.weights, l.c*l.n*l.size*l.size); cuda_pull_array(l.biases_gpu, l.biases, l.n); cuda_pull_array(l.weight_updates_gpu, l.weight_updates, l.c*l.n*l.size*l.size); cuda_pull_array(l.bias_updates_gpu, l.bias_updates, l.n); if (l.batch_normalize){ cuda_pull_array(l.scales_gpu, l.scales, l.n); cuda_pull_array(l.rolling_mean_gpu, l.rolling_mean, l.n); cuda_pull_array(l.rolling_variance_gpu, l.rolling_variance, l.n); } } void push_deconvolutional_layer(layer l) { cuda_push_array(l.weights_gpu, l.weights, l.c*l.n*l.size*l.size); cuda_push_array(l.biases_gpu, l.biases, l.n); cuda_push_array(l.weight_updates_gpu, l.weight_updates, l.c*l.n*l.size*l.size); cuda_push_array(l.bias_updates_gpu, l.bias_updates, l.n); if (l.batch_normalize){ cuda_push_array(l.scales_gpu, l.scales, l.n); cuda_push_array(l.rolling_mean_gpu, l.rolling_mean, l.n); cuda_push_array(l.rolling_variance_gpu, l.rolling_variance, l.n); } } void update_deconvolutional_layer_gpu(layer l, update_args a) { float learning_rate = a.learning_rate*l.learning_rate_scale; float momentum = a.momentum; float decay = a.decay; int batch = a.batch; if(a.adam){ adam_update_gpu(l.weights_gpu, l.weight_updates_gpu, l.m_gpu, l.v_gpu, a.B1, a.B2, a.eps, decay, learning_rate, l.nweights, batch, a.t); adam_update_gpu(l.biases_gpu, l.bias_updates_gpu, l.bias_m_gpu, l.bias_v_gpu, a.B1, a.B2, a.eps, decay, learning_rate, l.n, batch, a.t); if(l.scales_gpu){ adam_update_gpu(l.scales_gpu, l.scale_updates_gpu, l.scale_m_gpu, l.scale_v_gpu, a.B1, a.B2, a.eps, decay, learning_rate, l.n, batch, a.t); } }else{ axpy_gpu(l.nweights, -decay*batch, l.weights_gpu, 1, l.weight_updates_gpu, 1); axpy_gpu(l.nweights, learning_rate/batch, l.weight_updates_gpu, 1, l.weights_gpu, 1); scal_gpu(l.nweights, momentum, l.weight_updates_gpu, 1); axpy_gpu(l.n, learning_rate/batch, l.bias_updates_gpu, 1, l.biases_gpu, 1); scal_gpu(l.n, momentum, l.bias_updates_gpu, 1); if(l.scales_gpu){ axpy_gpu(l.n, learning_rate/batch, l.scale_updates_gpu, 1, l.scales_gpu, 1); scal_gpu(l.n, momentum, l.scale_updates_gpu, 1); } } }
43120ed53329de6dfb6442bbae6b23ca3723dfda.cu
#include "cuda_runtime.h" #include "curand.h" #include "cublas_v2.h" #include "convolutional_layer.h" #include "deconvolutional_layer.h" #include "batchnorm_layer.h" #include "gemm.h" #include "blas.h" #include "im2col.h" #include "col2im.h" #include "utils.h" #include "cuda.h" void forward_deconvolutional_layer_gpu(layer l, network net) { int i; int m = l.size*l.size*l.n; int n = l.h*l.w; int k = l.c; fill_gpu(l.outputs*l.batch, 0, l.output_gpu, 1); for(i = 0; i < l.batch; ++i){ float *a = l.weights_gpu; float *b = net.input_gpu + i*l.c*l.h*l.w; float *c = net.workspace; gemm_gpu(1,0,m,n,k,1,a,m,b,n,0,c,n); col2im_gpu(net.workspace, l.out_c, l.out_h, l.out_w, l.size, l.stride, l.pad, l.output_gpu+i*l.outputs); } if (l.batch_normalize) { forward_batchnorm_layer_gpu(l, net); } else { add_bias_gpu(l.output_gpu, l.biases_gpu, l.batch, l.n, l.out_w*l.out_h); } activate_array_gpu(l.output_gpu, l.batch*l.n*l.out_w*l.out_h, l.activation); } void backward_deconvolutional_layer_gpu(layer l, network net) { int i; //constrain_gpu(l.outputs*l.batch, 1, l.delta_gpu, 1); gradient_array_gpu(l.output_gpu, l.outputs*l.batch, l.activation, l.delta_gpu); if(l.batch_normalize){ backward_batchnorm_layer_gpu(l, net); } else { backward_bias_gpu(l.bias_updates_gpu, l.delta_gpu, l.batch, l.n, l.out_w*l.out_h); } //if(net.delta_gpu) memset(net.delta_gpu, 0, l.batch*l.h*l.w*l.c*sizeof(float)); for(i = 0; i < l.batch; ++i){ int m = l.c; int n = l.size*l.size*l.n; int k = l.h*l.w; float *a = net.input_gpu + i*m*k; float *b = net.workspace; float *c = l.weight_updates_gpu; im2col_gpu(l.delta_gpu + i*l.outputs, l.out_c, l.out_h, l.out_w, l.size, l.stride, l.pad, b); gemm_gpu(0,1,m,n,k,1,a,k,b,k,1,c,n); if(net.delta_gpu){ int m = l.c; int n = l.h*l.w; int k = l.size*l.size*l.n; float *a = l.weights_gpu; float *b = net.workspace; float *c = net.delta_gpu + i*n*m; gemm_gpu(0,0,m,n,k,1,a,k,b,n,1,c,n); } } } void pull_deconvolutional_layer(layer l) { cuda_pull_array(l.weights_gpu, l.weights, l.c*l.n*l.size*l.size); cuda_pull_array(l.biases_gpu, l.biases, l.n); cuda_pull_array(l.weight_updates_gpu, l.weight_updates, l.c*l.n*l.size*l.size); cuda_pull_array(l.bias_updates_gpu, l.bias_updates, l.n); if (l.batch_normalize){ cuda_pull_array(l.scales_gpu, l.scales, l.n); cuda_pull_array(l.rolling_mean_gpu, l.rolling_mean, l.n); cuda_pull_array(l.rolling_variance_gpu, l.rolling_variance, l.n); } } void push_deconvolutional_layer(layer l) { cuda_push_array(l.weights_gpu, l.weights, l.c*l.n*l.size*l.size); cuda_push_array(l.biases_gpu, l.biases, l.n); cuda_push_array(l.weight_updates_gpu, l.weight_updates, l.c*l.n*l.size*l.size); cuda_push_array(l.bias_updates_gpu, l.bias_updates, l.n); if (l.batch_normalize){ cuda_push_array(l.scales_gpu, l.scales, l.n); cuda_push_array(l.rolling_mean_gpu, l.rolling_mean, l.n); cuda_push_array(l.rolling_variance_gpu, l.rolling_variance, l.n); } } void update_deconvolutional_layer_gpu(layer l, update_args a) { float learning_rate = a.learning_rate*l.learning_rate_scale; float momentum = a.momentum; float decay = a.decay; int batch = a.batch; if(a.adam){ adam_update_gpu(l.weights_gpu, l.weight_updates_gpu, l.m_gpu, l.v_gpu, a.B1, a.B2, a.eps, decay, learning_rate, l.nweights, batch, a.t); adam_update_gpu(l.biases_gpu, l.bias_updates_gpu, l.bias_m_gpu, l.bias_v_gpu, a.B1, a.B2, a.eps, decay, learning_rate, l.n, batch, a.t); if(l.scales_gpu){ adam_update_gpu(l.scales_gpu, l.scale_updates_gpu, l.scale_m_gpu, l.scale_v_gpu, a.B1, a.B2, a.eps, decay, learning_rate, l.n, batch, a.t); } }else{ axpy_gpu(l.nweights, -decay*batch, l.weights_gpu, 1, l.weight_updates_gpu, 1); axpy_gpu(l.nweights, learning_rate/batch, l.weight_updates_gpu, 1, l.weights_gpu, 1); scal_gpu(l.nweights, momentum, l.weight_updates_gpu, 1); axpy_gpu(l.n, learning_rate/batch, l.bias_updates_gpu, 1, l.biases_gpu, 1); scal_gpu(l.n, momentum, l.bias_updates_gpu, 1); if(l.scales_gpu){ axpy_gpu(l.n, learning_rate/batch, l.scale_updates_gpu, 1, l.scales_gpu, 1); scal_gpu(l.n, momentum, l.scale_updates_gpu, 1); } } }
857ca678188758f2f323ded9f88ecde5937afa9e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <freeglut.h> #include <cuda_gl_interop.h> #include <cstdio> // printf() #include <math.h> __global__ void g_computeDensityPressure(float* positions, float* densities, float* pressures, int number_of_particles, float particle_diameter_squared, float particle_mass, float POLY6, float BOLTZMANN_CONSTANT, float REST_DENSITY) { int i = (blockIdx.x * blockDim.x + threadIdx.x); if (i < number_of_particles) { // Storing particle variables for readability. float x = positions[i * 2]; float y = positions[i * 2 + 1]; // Pointers as density and pressure are being manipulated. float* density = &densities[i]; float* pressure = &pressures[i]; // Reset density so it can be recalculated each iteration. *density = 0; for (int j = 0; j < number_of_particles; j++) { // Storing variables for other particle for readability. float other_x = positions[j * 2]; float other_y = positions[j * 2 + 1]; // Calculate vector between the pair of particles. float difference_x = other_x - x; float difference_y = other_y - y; // Calculate the squared distance between the pair of particles. float distance_squared = powf(difference_x, 2) + powf(difference_y, 2); // If the particles are overlapping. if (distance_squared < particle_diameter_squared) { // Add the other particle's mass, scaled by a muller kernel to this particle's density. *density += particle_mass * POLY6 * powf(particle_diameter_squared - distance_squared, 3.0f); } } // Calculate pressure of particle using an "equation of state", relating it's density to a given rest density. *pressure = BOLTZMANN_CONSTANT * (*density - REST_DENSITY); } } __global__ void g_computeForces(float* positions, float* velocities, float* forces, float* densities, float* pressures, int number_of_particles, float particle_diameter, float particle_mass, float particle_viscocity, float gravity_x, float gravity_y, float SPIKY_GRAD, float VISC_LAP) { int i = (blockIdx.x * blockDim.x + threadIdx.x); if (i < number_of_particles) { // Storing particle variables for readability. float x = positions[i * 2]; float y = positions[i * 2 + 1]; float vx = velocities[i * 2]; float vy = velocities[i * 2 + 1]; float density = densities[i]; float pressure = pressures[i]; // Pointers as force is getting manipulated. float* fx = &forces[i * 2]; float* fy = &forces[i * 2 + 1]; // Reset pressure and viscocity contributions so they can be recalculated each iteration. float pressure_contribution_x = 0.0f; float pressure_contribution_y = 0.0f; float viscocity_contribution_x = 0.0f; float viscocity_contribution_y = 0.0f; for (int j = 0; j < number_of_particles; j++) { // Do not compare particles if they are the same. if (i != j) { // Storing variables for other particle for readability. float other_x = positions[j * 2]; float other_y = positions[j * 2 + 1]; float other_vx = velocities[j * 2]; float other_vy = velocities[j * 2 + 1]; float other_density = densities[j]; float other_pressure = pressures[j]; // Calculate vector between the pair of particles. float difference_x = other_x - x; float difference_y = other_y - y; // Calculate the distance between the pair of particles. Distance needed later so no benefit from comparing squared distances. float distance = sqrt(powf(difference_x, 2.f) + powf(difference_y, 2.f)); // If particles are overlapping. if (distance != 0 && distance < particle_diameter) // <--- distance != 0 is handling case where particles are at the same position. { // Calculate the direction vector of this particle. float direction_x = -difference_x / distance; float direction_y = -difference_y / distance; // Add other particle's pressure and viscocity contributions using Navier-Stokes equations, scaled by Muller kernels. pressure_contribution_x += direction_x * particle_mass * (pressure + other_pressure) / (2.f * other_density) * SPIKY_GRAD * powf(particle_diameter - distance, 2.f); pressure_contribution_y += direction_y * particle_mass * (pressure + other_pressure) / (2.f * other_density) * SPIKY_GRAD * powf(particle_diameter - distance, 2.f); viscocity_contribution_x += particle_viscocity * particle_mass * (other_vx - vx) / other_density * VISC_LAP * (particle_diameter - distance); viscocity_contribution_y += particle_viscocity * particle_mass * (other_vy - vy) / other_density * VISC_LAP * (particle_diameter - distance); } } } // Calculate gravity contribution. float gravity_contribution_x = gravity_x * density; float gravity_contribution_y = gravity_y * density; // Add all force contributions together to calculate the particle's force. *fx = pressure_contribution_x + viscocity_contribution_x + gravity_contribution_x; *fy = pressure_contribution_y + viscocity_contribution_y + gravity_contribution_y; } } __global__ void g_integrate(float scale, float* positions, float* velocities, float* forces, float* densities, int number_of_particles, float integration_timestep, float damping, float jitter) { int i = (blockIdx.x * blockDim.x + threadIdx.x); if (i < number_of_particles) { // Storing particle variables for readability. float* x = &positions[i * 2]; float* y = &positions[i * 2 + 1]; float* vx = &velocities[i * 2]; float* vy = &velocities[i * 2 + 1]; float* fx = &forces[i * 2]; float* fy = &forces[i * 2 + 1]; float density = densities[i]; // Update velocity of particle. *vx += *fx / density * integration_timestep; *vy += *fy / density * integration_timestep; // Update position of particle. *x += *vx * integration_timestep; *y += *vy * integration_timestep; // Handle collision detection at edges of environment rectangle. // LEFT if (*x < -512.0f * scale) { *vx *= damping; *x = (-512.0f * scale) + jitter; } // RIGHT if (*x > 512.0f * scale) { *vx *= damping; *x = (512.0f * scale) - jitter; } // TOP if (*y > 256.0f * scale) { *vy *= damping; *y = (256.0f * scale) - jitter; } // BOTTOM if (*y < -256.0f * scale) { *vy *= damping; *y = (-256.0f * scale) + jitter; } } } void errorCheck(const int line, const char* const file) { // Checks last cuda kernel for errors. hipError_t error = hipGetLastError(); // If there was an error. if (error != hipSuccess) { // Print error message with line number and file. printf("CUDA ERROR: %s at line %d in\n %s\n", hipGetErrorString(error), line, file); // Exit the application. exit(EXIT_FAILURE); } } void c_cudaMalloc(void** devPtr, int size) { // Allocates device memory. hipMalloc(devPtr, size); errorCheck(__LINE__, __FILE__); } void c_cudaFree(void* devPtr) { // Releases device memory. hipFree(devPtr); errorCheck(__LINE__, __FILE__); } void c_registerVBO(struct cudaGraphicsResource** cuda_vbo_resource, unsigned int vbo) { // Registers OpenGL VBO and returns a handle to it. hipGraphicsGLRegisterBuffer(cuda_vbo_resource, vbo, hipGraphicsMapFlagsNone); errorCheck(__LINE__, __FILE__); } void c_unregisterVBO(struct cudaGraphicsResource* cuda_vbo_resource) { // Unregisters VBO. hipGraphicsUnregisterResource(cuda_vbo_resource); errorCheck(__LINE__, __FILE__); } void* c_mapVBO(struct cudaGraphicsResource** cuda_vbo_resource) { // Maps VBO for access by CUDA. hipGraphicsMapResources(1, cuda_vbo_resource, 0); errorCheck(__LINE__, __FILE__); void* vbo_pointer; size_t bytes; // Returns a pointer to the mapped VBO. hipGraphicsResourceGetMappedPointer((void**)&vbo_pointer, &bytes, *cuda_vbo_resource); errorCheck(__LINE__, __FILE__); return vbo_pointer; } void c_unmapVBO(struct cudaGraphicsResource* cuda_vbo_resource) { // Unmaps VBO. hipGraphicsUnmapResources(1, &cuda_vbo_resource, 0); errorCheck(__LINE__, __FILE__); } void c_computeDensityPressure(int threads, int blocks, float* positions, float* densities, float* pressures, int number_of_particles, float particle_diameter_squared, float particle_mass, float POLY6, float BOLTZMANN_CONSTANT, float REST_DENSITY) { hipLaunchKernelGGL(( g_computeDensityPressure), dim3(blocks), dim3(threads), 0, 0, positions, densities, pressures, number_of_particles, particle_diameter_squared, particle_mass, POLY6, BOLTZMANN_CONSTANT, REST_DENSITY); errorCheck(__LINE__, __FILE__); // Sync threads before continuing. hipDeviceSynchronize(); errorCheck(__LINE__, __FILE__); } void c_computeForces(int threads, int blocks, float* positions, float* velocities, float* forces, float* densities, float* pressures, int number_of_particles, float particle_diameter, float particle_mass, float particle_viscocity, float gravity_x, float gravity_y, float SPIKY_GRAD, float VISC_LAP) { hipLaunchKernelGGL(( g_computeForces), dim3(blocks), dim3(threads), 0, 0, positions, velocities, forces, densities, pressures, number_of_particles, particle_diameter, particle_mass, particle_viscocity, gravity_x, gravity_y, SPIKY_GRAD, VISC_LAP); errorCheck(__LINE__, __FILE__); // Sync threads before continuing. hipDeviceSynchronize(); errorCheck(__LINE__, __FILE__); } void c_integrate(int threads, int blocks, float scale, float* positions, float* velocities, float* forces, float* densities, int number_of_particles, float integration_timestep, float damping) { // A small offest applied to particles on boundary collision to avoid stacking. float jitter = static_cast <float> (rand()) / static_cast <float> (RAND_MAX); hipLaunchKernelGGL(( g_integrate), dim3(blocks), dim3(threads), 0, 0, scale, positions, velocities, forces, densities, number_of_particles, integration_timestep, damping, jitter); errorCheck(__LINE__, __FILE__); }
857ca678188758f2f323ded9f88ecde5937afa9e.cu
#include <freeglut.h> #include <cuda_gl_interop.h> #include <cstdio> // printf() #include <math.h> __global__ void g_computeDensityPressure(float* positions, float* densities, float* pressures, int number_of_particles, float particle_diameter_squared, float particle_mass, float POLY6, float BOLTZMANN_CONSTANT, float REST_DENSITY) { int i = (blockIdx.x * blockDim.x + threadIdx.x); if (i < number_of_particles) { // Storing particle variables for readability. float x = positions[i * 2]; float y = positions[i * 2 + 1]; // Pointers as density and pressure are being manipulated. float* density = &densities[i]; float* pressure = &pressures[i]; // Reset density so it can be recalculated each iteration. *density = 0; for (int j = 0; j < number_of_particles; j++) { // Storing variables for other particle for readability. float other_x = positions[j * 2]; float other_y = positions[j * 2 + 1]; // Calculate vector between the pair of particles. float difference_x = other_x - x; float difference_y = other_y - y; // Calculate the squared distance between the pair of particles. float distance_squared = powf(difference_x, 2) + powf(difference_y, 2); // If the particles are overlapping. if (distance_squared < particle_diameter_squared) { // Add the other particle's mass, scaled by a muller kernel to this particle's density. *density += particle_mass * POLY6 * powf(particle_diameter_squared - distance_squared, 3.0f); } } // Calculate pressure of particle using an "equation of state", relating it's density to a given rest density. *pressure = BOLTZMANN_CONSTANT * (*density - REST_DENSITY); } } __global__ void g_computeForces(float* positions, float* velocities, float* forces, float* densities, float* pressures, int number_of_particles, float particle_diameter, float particle_mass, float particle_viscocity, float gravity_x, float gravity_y, float SPIKY_GRAD, float VISC_LAP) { int i = (blockIdx.x * blockDim.x + threadIdx.x); if (i < number_of_particles) { // Storing particle variables for readability. float x = positions[i * 2]; float y = positions[i * 2 + 1]; float vx = velocities[i * 2]; float vy = velocities[i * 2 + 1]; float density = densities[i]; float pressure = pressures[i]; // Pointers as force is getting manipulated. float* fx = &forces[i * 2]; float* fy = &forces[i * 2 + 1]; // Reset pressure and viscocity contributions so they can be recalculated each iteration. float pressure_contribution_x = 0.0f; float pressure_contribution_y = 0.0f; float viscocity_contribution_x = 0.0f; float viscocity_contribution_y = 0.0f; for (int j = 0; j < number_of_particles; j++) { // Do not compare particles if they are the same. if (i != j) { // Storing variables for other particle for readability. float other_x = positions[j * 2]; float other_y = positions[j * 2 + 1]; float other_vx = velocities[j * 2]; float other_vy = velocities[j * 2 + 1]; float other_density = densities[j]; float other_pressure = pressures[j]; // Calculate vector between the pair of particles. float difference_x = other_x - x; float difference_y = other_y - y; // Calculate the distance between the pair of particles. Distance needed later so no benefit from comparing squared distances. float distance = sqrt(powf(difference_x, 2.f) + powf(difference_y, 2.f)); // If particles are overlapping. if (distance != 0 && distance < particle_diameter) // <--- distance != 0 is handling case where particles are at the same position. { // Calculate the direction vector of this particle. float direction_x = -difference_x / distance; float direction_y = -difference_y / distance; // Add other particle's pressure and viscocity contributions using Navier-Stokes equations, scaled by Muller kernels. pressure_contribution_x += direction_x * particle_mass * (pressure + other_pressure) / (2.f * other_density) * SPIKY_GRAD * powf(particle_diameter - distance, 2.f); pressure_contribution_y += direction_y * particle_mass * (pressure + other_pressure) / (2.f * other_density) * SPIKY_GRAD * powf(particle_diameter - distance, 2.f); viscocity_contribution_x += particle_viscocity * particle_mass * (other_vx - vx) / other_density * VISC_LAP * (particle_diameter - distance); viscocity_contribution_y += particle_viscocity * particle_mass * (other_vy - vy) / other_density * VISC_LAP * (particle_diameter - distance); } } } // Calculate gravity contribution. float gravity_contribution_x = gravity_x * density; float gravity_contribution_y = gravity_y * density; // Add all force contributions together to calculate the particle's force. *fx = pressure_contribution_x + viscocity_contribution_x + gravity_contribution_x; *fy = pressure_contribution_y + viscocity_contribution_y + gravity_contribution_y; } } __global__ void g_integrate(float scale, float* positions, float* velocities, float* forces, float* densities, int number_of_particles, float integration_timestep, float damping, float jitter) { int i = (blockIdx.x * blockDim.x + threadIdx.x); if (i < number_of_particles) { // Storing particle variables for readability. float* x = &positions[i * 2]; float* y = &positions[i * 2 + 1]; float* vx = &velocities[i * 2]; float* vy = &velocities[i * 2 + 1]; float* fx = &forces[i * 2]; float* fy = &forces[i * 2 + 1]; float density = densities[i]; // Update velocity of particle. *vx += *fx / density * integration_timestep; *vy += *fy / density * integration_timestep; // Update position of particle. *x += *vx * integration_timestep; *y += *vy * integration_timestep; // Handle collision detection at edges of environment rectangle. // LEFT if (*x < -512.0f * scale) { *vx *= damping; *x = (-512.0f * scale) + jitter; } // RIGHT if (*x > 512.0f * scale) { *vx *= damping; *x = (512.0f * scale) - jitter; } // TOP if (*y > 256.0f * scale) { *vy *= damping; *y = (256.0f * scale) - jitter; } // BOTTOM if (*y < -256.0f * scale) { *vy *= damping; *y = (-256.0f * scale) + jitter; } } } void errorCheck(const int line, const char* const file) { // Checks last cuda kernel for errors. cudaError_t error = cudaGetLastError(); // If there was an error. if (error != cudaSuccess) { // Print error message with line number and file. printf("CUDA ERROR: %s at line %d in\n %s\n", cudaGetErrorString(error), line, file); // Exit the application. exit(EXIT_FAILURE); } } void c_cudaMalloc(void** devPtr, int size) { // Allocates device memory. cudaMalloc(devPtr, size); errorCheck(__LINE__, __FILE__); } void c_cudaFree(void* devPtr) { // Releases device memory. cudaFree(devPtr); errorCheck(__LINE__, __FILE__); } void c_registerVBO(struct cudaGraphicsResource** cuda_vbo_resource, unsigned int vbo) { // Registers OpenGL VBO and returns a handle to it. cudaGraphicsGLRegisterBuffer(cuda_vbo_resource, vbo, cudaGraphicsMapFlagsNone); errorCheck(__LINE__, __FILE__); } void c_unregisterVBO(struct cudaGraphicsResource* cuda_vbo_resource) { // Unregisters VBO. cudaGraphicsUnregisterResource(cuda_vbo_resource); errorCheck(__LINE__, __FILE__); } void* c_mapVBO(struct cudaGraphicsResource** cuda_vbo_resource) { // Maps VBO for access by CUDA. cudaGraphicsMapResources(1, cuda_vbo_resource, 0); errorCheck(__LINE__, __FILE__); void* vbo_pointer; size_t bytes; // Returns a pointer to the mapped VBO. cudaGraphicsResourceGetMappedPointer((void**)&vbo_pointer, &bytes, *cuda_vbo_resource); errorCheck(__LINE__, __FILE__); return vbo_pointer; } void c_unmapVBO(struct cudaGraphicsResource* cuda_vbo_resource) { // Unmaps VBO. cudaGraphicsUnmapResources(1, &cuda_vbo_resource, 0); errorCheck(__LINE__, __FILE__); } void c_computeDensityPressure(int threads, int blocks, float* positions, float* densities, float* pressures, int number_of_particles, float particle_diameter_squared, float particle_mass, float POLY6, float BOLTZMANN_CONSTANT, float REST_DENSITY) { g_computeDensityPressure<<<blocks, threads>>>(positions, densities, pressures, number_of_particles, particle_diameter_squared, particle_mass, POLY6, BOLTZMANN_CONSTANT, REST_DENSITY); errorCheck(__LINE__, __FILE__); // Sync threads before continuing. cudaDeviceSynchronize(); errorCheck(__LINE__, __FILE__); } void c_computeForces(int threads, int blocks, float* positions, float* velocities, float* forces, float* densities, float* pressures, int number_of_particles, float particle_diameter, float particle_mass, float particle_viscocity, float gravity_x, float gravity_y, float SPIKY_GRAD, float VISC_LAP) { g_computeForces<<<blocks, threads>>>(positions, velocities, forces, densities, pressures, number_of_particles, particle_diameter, particle_mass, particle_viscocity, gravity_x, gravity_y, SPIKY_GRAD, VISC_LAP); errorCheck(__LINE__, __FILE__); // Sync threads before continuing. cudaDeviceSynchronize(); errorCheck(__LINE__, __FILE__); } void c_integrate(int threads, int blocks, float scale, float* positions, float* velocities, float* forces, float* densities, int number_of_particles, float integration_timestep, float damping) { // A small offest applied to particles on boundary collision to avoid stacking. float jitter = static_cast <float> (rand()) / static_cast <float> (RAND_MAX); g_integrate<<<blocks, threads>>>(scale, positions, velocities, forces, densities, number_of_particles, integration_timestep, damping, jitter); errorCheck(__LINE__, __FILE__); }
abaf33f1f4e757fe125c3354a2156d50212fb846.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.5.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date January 2019 @generated from magmablas/zsymmetrize_tiles.cu, normal z -> c, Wed Jan 2 14:18:51 2019 @author Mark Gates */ #include "magma_internal.h" #define NB 64 /* Symmetrizes ntile tiles at a time, e.g., all diagonal tiles of a matrix. Grid is ceil(m/NB) x ntile. Each tile is m x m, and is divided into block rows, each NB x m. Each block has NB threads. Each thread copies one row, iterating across all columns below diagonal. The bottom block of rows may be partially outside the matrix; if so, rows outside the matrix (i >= m) are disabled. */ __global__ void csymmetrize_tiles_lower( int m, magmaFloatComplex *dA, int ldda, int mstride, int nstride ) { // shift dA to tile's top-left corner dA += blockIdx.y*(mstride + nstride*ldda); // dA iterates across row i and dAT iterates down column i. int i = blockIdx.x*NB + threadIdx.x; magmaFloatComplex *dAT = dA; if ( i < m ) { dA += i; dAT += i*ldda; magmaFloatComplex *dAend = dA + i*ldda; // end at diagonal dA(i,i) while( dA < dAend ) { *dAT = MAGMA_C_CONJ(*dA); // upper := lower dA += ldda; dAT += 1; } *dA = MAGMA_C_MAKE( MAGMA_C_REAL(*dA), 0 ); // make diagonal real } } // only difference with _lower version is direction dA=dAT instead of dAT=dA. __global__ void csymmetrize_tiles_upper( int m, magmaFloatComplex *dA, int ldda, int mstride, int nstride ) { // shift dA to tile's top-left corner dA += blockIdx.y*(mstride + nstride*ldda); // dA iterates across row i and dAT iterates down column i. int i = blockIdx.x*NB + threadIdx.x; magmaFloatComplex *dAT = dA; if ( i < m ) { dA += i; dAT += i*ldda; magmaFloatComplex *dAend = dA + i*ldda; // end at diagonal dA(i,i) while( dA < dAend ) { *dA = MAGMA_C_CONJ(*dAT); // lower := upper dA += ldda; dAT += 1; } *dA = MAGMA_C_MAKE( MAGMA_C_REAL(*dA), 0 ); // make diagonal real } } /***************************************************************************//** Purpose ------- CSYMMETRIZE_TILES copies lower triangle to upper triangle, or vice-versa, to make some blocks of dA into general representations of a symmetric block. In Complex, it sets the diagonal to be Real. This processes NTILE blocks, typically the diagonal blocks. Each block is offset by mstride rows and nstride columns from the previous block. Arguments --------- @param[in] uplo magma_uplo_t Specifies the part of the matrix dA that is valid on input. - = MagmaUpper: Upper triangular part - = MagmaLower: Lower triangular part @param[in] m INTEGER The number of rows & columns of each square block of dA. M >= 0. @param[in,out] dA COMPLEX array, dimension (LDDA,N) The matrix dA. N = m + nstride*(ntile-1). @param[in] ldda INTEGER The leading dimension of the array dA. LDDA >= max(1, m + mstride*(ntile-1)). @param[in] ntile INTEGER Number of blocks to symmetrize. ntile >= 0. @param[in] mstride INTEGER Row offset from start of one block to start of next block. mstride >= 0. Either (mstride >= m) or (nstride >= m), to prevent m-by-m tiles from overlapping. @param[in] nstride INTEGER Column offset from start of one block to start of next block. nstride >= 0. @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_symmetrize_batched *******************************************************************************/ extern "C" void magmablas_csymmetrize_tiles( magma_uplo_t uplo, magma_int_t m, magmaFloatComplex_ptr dA, magma_int_t ldda, magma_int_t ntile, magma_int_t mstride, magma_int_t nstride, magma_queue_t queue ) { magma_int_t info = 0; if ( uplo != MagmaLower && uplo != MagmaUpper ) info = -1; else if ( m < 0 ) info = -2; else if ( ldda < max(1,m + mstride*(ntile-1)) ) info = -5; else if ( ntile < 0 ) info = -6; else if ( mstride < 0 ) info = -7; else if ( nstride < 0 ) info = -8; else if ( mstride < m && nstride < m ) // only one must be >= m. info = -7; if ( info != 0 ) { magma_xerbla( __func__, -(info) ); return; } if ( m == 0 || ntile == 0 ) return; dim3 threads( NB, 1 ); dim3 grid( magma_ceildiv( m, NB ), ntile ); //printf( "m %d, grid %d x %d, threads %d\n", m, grid.x, grid.y, threads.x ); if ( uplo == MagmaUpper ) { hipLaunchKernelGGL(( csymmetrize_tiles_upper) , dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, dA, ldda, mstride, nstride ); } else { hipLaunchKernelGGL(( csymmetrize_tiles_lower) , dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, dA, ldda, mstride, nstride ); } }
abaf33f1f4e757fe125c3354a2156d50212fb846.cu
/* -- MAGMA (version 2.5.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date January 2019 @generated from magmablas/zsymmetrize_tiles.cu, normal z -> c, Wed Jan 2 14:18:51 2019 @author Mark Gates */ #include "magma_internal.h" #define NB 64 /* Symmetrizes ntile tiles at a time, e.g., all diagonal tiles of a matrix. Grid is ceil(m/NB) x ntile. Each tile is m x m, and is divided into block rows, each NB x m. Each block has NB threads. Each thread copies one row, iterating across all columns below diagonal. The bottom block of rows may be partially outside the matrix; if so, rows outside the matrix (i >= m) are disabled. */ __global__ void csymmetrize_tiles_lower( int m, magmaFloatComplex *dA, int ldda, int mstride, int nstride ) { // shift dA to tile's top-left corner dA += blockIdx.y*(mstride + nstride*ldda); // dA iterates across row i and dAT iterates down column i. int i = blockIdx.x*NB + threadIdx.x; magmaFloatComplex *dAT = dA; if ( i < m ) { dA += i; dAT += i*ldda; magmaFloatComplex *dAend = dA + i*ldda; // end at diagonal dA(i,i) while( dA < dAend ) { *dAT = MAGMA_C_CONJ(*dA); // upper := lower dA += ldda; dAT += 1; } *dA = MAGMA_C_MAKE( MAGMA_C_REAL(*dA), 0 ); // make diagonal real } } // only difference with _lower version is direction dA=dAT instead of dAT=dA. __global__ void csymmetrize_tiles_upper( int m, magmaFloatComplex *dA, int ldda, int mstride, int nstride ) { // shift dA to tile's top-left corner dA += blockIdx.y*(mstride + nstride*ldda); // dA iterates across row i and dAT iterates down column i. int i = blockIdx.x*NB + threadIdx.x; magmaFloatComplex *dAT = dA; if ( i < m ) { dA += i; dAT += i*ldda; magmaFloatComplex *dAend = dA + i*ldda; // end at diagonal dA(i,i) while( dA < dAend ) { *dA = MAGMA_C_CONJ(*dAT); // lower := upper dA += ldda; dAT += 1; } *dA = MAGMA_C_MAKE( MAGMA_C_REAL(*dA), 0 ); // make diagonal real } } /***************************************************************************//** Purpose ------- CSYMMETRIZE_TILES copies lower triangle to upper triangle, or vice-versa, to make some blocks of dA into general representations of a symmetric block. In Complex, it sets the diagonal to be Real. This processes NTILE blocks, typically the diagonal blocks. Each block is offset by mstride rows and nstride columns from the previous block. Arguments --------- @param[in] uplo magma_uplo_t Specifies the part of the matrix dA that is valid on input. - = MagmaUpper: Upper triangular part - = MagmaLower: Lower triangular part @param[in] m INTEGER The number of rows & columns of each square block of dA. M >= 0. @param[in,out] dA COMPLEX array, dimension (LDDA,N) The matrix dA. N = m + nstride*(ntile-1). @param[in] ldda INTEGER The leading dimension of the array dA. LDDA >= max(1, m + mstride*(ntile-1)). @param[in] ntile INTEGER Number of blocks to symmetrize. ntile >= 0. @param[in] mstride INTEGER Row offset from start of one block to start of next block. mstride >= 0. Either (mstride >= m) or (nstride >= m), to prevent m-by-m tiles from overlapping. @param[in] nstride INTEGER Column offset from start of one block to start of next block. nstride >= 0. @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_symmetrize_batched *******************************************************************************/ extern "C" void magmablas_csymmetrize_tiles( magma_uplo_t uplo, magma_int_t m, magmaFloatComplex_ptr dA, magma_int_t ldda, magma_int_t ntile, magma_int_t mstride, magma_int_t nstride, magma_queue_t queue ) { magma_int_t info = 0; if ( uplo != MagmaLower && uplo != MagmaUpper ) info = -1; else if ( m < 0 ) info = -2; else if ( ldda < max(1,m + mstride*(ntile-1)) ) info = -5; else if ( ntile < 0 ) info = -6; else if ( mstride < 0 ) info = -7; else if ( nstride < 0 ) info = -8; else if ( mstride < m && nstride < m ) // only one must be >= m. info = -7; if ( info != 0 ) { magma_xerbla( __func__, -(info) ); return; } if ( m == 0 || ntile == 0 ) return; dim3 threads( NB, 1 ); dim3 grid( magma_ceildiv( m, NB ), ntile ); //printf( "m %d, grid %d x %d, threads %d\n", m, grid.x, grid.y, threads.x ); if ( uplo == MagmaUpper ) { csymmetrize_tiles_upper <<< grid, threads, 0, queue->cuda_stream() >>> ( m, dA, ldda, mstride, nstride ); } else { csymmetrize_tiles_lower <<< grid, threads, 0, queue->cuda_stream() >>> ( m, dA, ldda, mstride, nstride ); } }
fed4f23c9dcf137931098292570b2f49841de580.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include <rocblas.h> #include "cudamat_kernels.cuh" #include "cudamat.cuh" extern "C" { /* ------------------------------ CUBLAS init/shutdown ------------------------------ */ inline bool check_cublas_error() { cublasStatus status = hipblasGetError(); return status != HIPBLAS_STATUS_SUCCESS; } inline bool checkCUDAError() { hipError_t err = hipGetLastError(); if (hipSuccess != err) printf("%s\n", hipGetErrorString( err)); return hipSuccess != err; } extern const char* get_last_cuda_error() { hipError_t err = hipGetLastError(); return hipGetErrorString( err); } extern int cublas_init() { hipblasInit(); if (check_cublas_error()) return CUBLAS_ERROR; else return 0; } extern int cublas_shutdown() { hipblasShutdown(); hipDeviceReset(); } extern int cuda_set_device(int deviceId) { hipSetDevice(deviceId); if (checkCUDAError()) return CUDA_ERROR; else return 0; } extern int init_random(rnd_struct* rnd_state, int seed, char* cudamatpath) { unsigned int * host_mults; host_mults = (unsigned int*)malloc(NUM_RND_STREAMS * sizeof(unsigned int)); FILE * pFile; pFile = fopen (cudamatpath,"r"); for (int i = 0; i < NUM_RND_STREAMS; i++) { fscanf (pFile, "%u", &host_mults[i]); } fclose (pFile); hipblasAlloc(NUM_RND_STREAMS, sizeof(unsigned int), (void**)&rnd_state->dev_mults); hipblasAlloc(NUM_RND_STREAMS, sizeof(unsigned long long), (void**)&rnd_state->dev_words); hipblasSetVector(NUM_RND_STREAMS, sizeof(unsigned int), host_mults, 1, rnd_state->dev_mults, 1); //hipMalloc((void **)&rnd_state->dev_mults, NUM_RND_STREAMS * sizeof(unsigned int)); //hipMalloc((void **)&rnd_state->dev_words, NUM_RND_STREAMS * sizeof(unsigned long long)); //hipMemcpy(rnd_state->dev_mults, host_mults, NUM_RND_STREAMS * sizeof(unsigned int), hipMemcpyHostToDevice); hipDeviceSynchronize(); hipLaunchKernelGGL(( kSeedRandom), dim3(NUM_RND_BLOCKS), dim3(NUM_RND_THREADS_PER_BLOCK), 0, 0, rnd_state->dev_mults, rnd_state->dev_words, seed); hipDeviceSynchronize(); if (checkCUDAError()) return CUDA_ERROR; else return 0; } /* ------------------------------ Utility routines ------------------------------ */ extern int get_leading_dimension(cudamat* mat) { return mat->is_trans ? mat->size[1] : mat->size[0]; } extern int get_nonleading_dimension(cudamat* mat) { return mat->is_trans ? mat->size[0] : mat->size[1]; } extern void set_transpose(cudamat* mat, int is_trans) { mat->is_trans = is_trans; } inline char get_transpose_char(cudamat* mat) { return mat->is_trans ? 't' : 'n'; } extern void cuda_sync_threads() { hipDeviceSynchronize(); } /* ------------------------------ Allocating/moving data ------------------------------ */ extern int allocate_device_memory(cudamat* mat) { int len = mat->size[0]*mat->size[1]; cublasStatus stat; stat = hipblasAlloc(len, sizeof(mat->data_device[0]), (void**)&mat->data_device); if (stat != HIPBLAS_STATUS_SUCCESS || check_cublas_error()) { checkCUDAError(); return CUBLAS_ERROR; } mat->on_device = 1; return 0; } extern int copy_to_host(cudamat* mat) { int len = mat->size[0]*mat->size[1]; if (mat->on_device) { hipblasGetVector(len, sizeof(mat->data_host[0]), mat->data_device, 1, mat->data_host, 1); if (check_cublas_error()) return CUBLAS_ERROR; } else return ERROR_NOT_ON_DEVICE; return 0; } extern int copy_to_device(cudamat* mat) { int len = mat->size[0]*mat->size[1]; int err_code = 0; //if (!mat->owns_data) // return VIEW_ERROR; if (!mat->on_device) { err_code = allocate_device_memory(mat); if (err_code) return err_code; } hipblasSetVector(len, sizeof(mat->data_host[0]), mat->data_host, 1, mat->data_device, 1); if (check_cublas_error()) return CUBLAS_ERROR; return 0; } extern int copy_on_device(cudamat* mat1, cudamat* mat2) { int len = mat1->size[0]*mat1->size[1]; if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; hipblasScopy(len, mat1->data_device, 1, mat2->data_device, 1); if (check_cublas_error()) return CUBLAS_ERROR; else return 0; } extern int get_row_slice(cudamat* source, cudamat* target, unsigned int start, unsigned int end) { int height = source->size[0]; int width = source->size[1]; if ((end - start) != target->size[0] || source->size[1] != target->size[1] || start >= end || end > height) return ERROR_INCOMPATIBLE_DIMENSIONS; dim3 kernelBlockGrid((int)ceil((end - start)/32.), (int)ceil(width/32.), 1); dim3 kernelBlockDim(32, 1, 1); hipLaunchKernelGGL(( kGetRowSlice), dim3(kernelBlockGrid),dim3(kernelBlockDim), 0, 0, source->data_device, target->data_device, start, end, width, height); hipDeviceSynchronize(); if (checkCUDAError()) return CUDA_ERROR; else return 0; } extern int set_row_slice(cudamat* source, cudamat* target, unsigned int start, unsigned int end) { int height = target->size[0]; int width = target->size[1]; if ((end - start) != source->size[0] || source->size[1] != target->size[1] || start >= end || end > height) return ERROR_INCOMPATIBLE_DIMENSIONS; dim3 kernelBlockGrid((int)ceil((end - start)/32.), (int)ceil(width/32.), 1); dim3 kernelBlockDim(32, 1, 1); hipLaunchKernelGGL(( kSetRowSlice), dim3(kernelBlockGrid),dim3(kernelBlockDim), 0, 0, source->data_device, target->data_device, start, end, width, height); hipDeviceSynchronize(); if (checkCUDAError()) return CUDA_ERROR; else return 0; } extern int copy_transpose(cudamat* source, cudamat* target) { unsigned int height = source->size[0]; unsigned int width = source->size[1]; if (source->size[0] != target->size[1] || source->size[1] != target->size[0]) return ERROR_INCOMPATIBLE_DIMENSIONS; // setup execution parameters unsigned int grid_x = height / COPY_BLOCK_SIZE; if (height % COPY_BLOCK_SIZE) grid_x++; unsigned int grid_y = width / COPY_BLOCK_SIZE; if (width % COPY_BLOCK_SIZE) grid_y++; dim3 grid(grid_x, grid_y, 1); dim3 threads(COPY_BLOCK_SIZE, COPY_BLOCK_SIZE, 1); hipLaunchKernelGGL(( kTranspose), dim3(grid), dim3(threads) , 0, 0, target->data_device, source->data_device, height, width); if (checkCUDAError()) return CUDA_ERROR; else return 0; } extern int free_device_memory(cudamat* mat) { if (mat->owns_data && mat->on_device) { cublasStatus stat; stat = hipblasFree(mat->data_device); mat->on_device = 0; if (stat != HIPBLAS_STATUS_SUCCESS || check_cublas_error()) return CUBLAS_ERROR; } return 0; } extern int reshape(cudamat* mat, unsigned int m, unsigned int n) { if (mat->size[0] * mat->size[1] != m * n) return ERROR_INCOMPATIBLE_DIMENSIONS; mat->size[0] = m; mat->size[1] = n; return 0; } extern int get_slice(cudamat* source, cudamat* target, unsigned int first_col, unsigned int last_col) { if (source->is_trans) return ERROR_TRANSPOSED; if (!source->on_device) return ERROR_NOT_ON_DEVICE; if (last_col > source->size[1] || (first_col >= last_col)) return ERROR_INCOMPATIBLE_DIMENSIONS; int num_rows = source->size[0]; target->data_host = 0; target->data_device = source->data_device + first_col * num_rows; target->on_device = 1; target->on_host = 0; target->size[0] = source->size[0]; target->size[1] = last_col - first_col; target->is_trans = 0; target->owns_data = 0; return 0; } extern int get_vector_slice(cudamat* source, cudamat* target, unsigned int first_ind, unsigned int last_ind) { // source must be a vector if (source->size[0] > 1 && source->size[1] > 1) return ERROR_GENERIC; if (source->is_trans) return ERROR_TRANSPOSED; if (!source->on_device) return ERROR_NOT_ON_DEVICE; if (first_ind >= last_ind) return ERROR_INCOMPATIBLE_DIMENSIONS; int num_rows = source->size[0]; target->data_host = 0; target->data_device = source->data_device + first_ind * num_rows; target->on_device = 1; target->on_host = 0; target->is_trans = 0; target->owns_data = 0; if (source->size[0] > 1) { if (last_ind > source->size[0]) return ERROR_INCOMPATIBLE_DIMENSIONS; target->size[0] = last_ind - first_ind; target->size[1] = 1; } else { if (last_ind > source->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; target->size[0] = 1; target->size[1] = last_ind - first_ind; } return 0; } /* ------------------------------ Initialization routines ------------------------------ */ extern void init_from_array(cudamat* mat, float* data, int m, int n) { mat->data_host = data; mat->size[0] = m; mat->size[1] = n; mat->on_device = 0; mat->on_host = 1; mat->is_trans = 0; mat->owns_data = 1; } extern int init_empty(cudamat* mat, int m, int n) { mat->size[0] = m; mat->size[1] = n; mat->on_device = 0; mat->on_host = 0; mat->is_trans = 0; mat->owns_data = 1; return allocate_device_memory(mat); } /* ------------------------------ Random number generation ------------------------------ */ extern int fill_with_rand(rnd_struct* rnd_state, cudamat* mat) { int len = mat->size[0] * mat->size[1]; if (!mat->on_device) return ERROR_NOT_ON_DEVICE; hipLaunchKernelGGL(( kRandomUniform), dim3(NUM_RND_BLOCKS),dim3(NUM_RND_THREADS_PER_BLOCK), 0, 0, rnd_state->dev_mults, rnd_state->dev_words, mat->data_device, len); hipDeviceSynchronize(); if (checkCUDAError()) return CUDA_ERROR; else return 0; } extern int fill_with_randn(rnd_struct* rnd_state, cudamat* mat) { int len = mat->size[0] * mat->size[1]; if (!mat->on_device) return ERROR_NOT_ON_DEVICE; hipLaunchKernelGGL(( kRandomGaussian), dim3(NUM_RND_BLOCKS),dim3(NUM_RND_THREADS_PER_BLOCK), 0, 0, rnd_state->dev_mults, rnd_state->dev_words, mat->data_device, len); hipDeviceSynchronize(); if (checkCUDAError()) return CUDA_ERROR; else return 0; } /* ------------------------------ Algebraic operations ------------------------------ */ extern int add_col_vec(cudamat* mat, cudamat* vec, cudamat* target) { unsigned int h = mat->size[0], w = mat->size[1]; if (!mat->on_device || !vec->on_device) return ERROR_NOT_ON_DEVICE; if (mat->is_trans) return ERROR_TRANSPOSED; if (mat->size[0] != vec->size[0] || vec->size[1] != 1 || mat->size[0] != target->size[0] || mat->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; hipLaunchKernelGGL(( kAddColVector), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, vec->data_device, target->data_device, w, h); hipDeviceSynchronize(); if (checkCUDAError()) { return CUDA_ERROR; } return 0; } extern int add_col_mult(cudamat* mat, cudamat* vec, cudamat* target, float mult) { unsigned int h = mat->size[0], w = mat->size[1]; if (!mat->on_device || !vec->on_device) return ERROR_NOT_ON_DEVICE; if (mat->is_trans) return ERROR_TRANSPOSED; if (mat->size[0] != vec->size[0] || vec->size[1] != 1 || mat->size[0] != target->size[0] || mat->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; hipLaunchKernelGGL(( kAddColMult), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, vec->data_device, target->data_device, mult, w, h); hipDeviceSynchronize(); if (checkCUDAError()) return CUDA_ERROR; return 0; } extern int add_row_vec(cudamat* mat, cudamat* vec, cudamat* target) { unsigned int h = mat->size[0], w = mat->size[1]; if (!mat->on_device || !vec->on_device) return ERROR_NOT_ON_DEVICE; if (mat->is_trans) return ERROR_TRANSPOSED; if (mat->size[1] != vec->size[1] || vec->size[0] != 1 || mat->size[0] != target->size[0] || mat->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; hipLaunchKernelGGL(( kAddRowVector), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, vec->data_device, target->data_device, w, h); hipDeviceSynchronize(); if (checkCUDAError()) return CUDA_ERROR; return 0; } extern int mult_by_col_vec(cudamat* mat, cudamat* vec, cudamat* target) { unsigned int h = mat->size[0], w = mat->size[1]; if (!mat->on_device || !vec->on_device) return ERROR_NOT_ON_DEVICE; if (mat->is_trans) return ERROR_TRANSPOSED; if (mat->size[0] != vec->size[0] || vec->size[1] != 1 || mat->size[0] != target->size[0] || mat->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; hipLaunchKernelGGL(( kMultByColVector), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, vec->data_device, target->data_device, w, h); hipDeviceSynchronize(); if (checkCUDAError()) return CUDA_ERROR; return 0; } extern int mult_by_row_vec(cudamat* mat, cudamat* vec, cudamat* target) { unsigned int h = mat->size[0], w = mat->size[1]; if (!mat->on_device || !vec->on_device) return ERROR_NOT_ON_DEVICE; if (mat->is_trans) return ERROR_TRANSPOSED; if (mat->size[1] != vec->size[1] || vec->size[0] != 1 || mat->size[0] != target->size[0] || mat->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; hipLaunchKernelGGL(( kMultByRowVector), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, vec->data_device, target->data_device, w, h); hipDeviceSynchronize(); if (checkCUDAError()) return CUDA_ERROR; return 0; } extern int less_than(cudamat* mat1, cudamat* mat2, cudamat* target) { int len = mat1->size[0]*mat1->size[1]; if (!mat1->on_device || !mat2->on_device) return ERROR_NOT_ON_DEVICE; if (mat1->is_trans != mat2->is_trans) return ERROR_TRANSPOSEDNESS; if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] || mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; hipLaunchKernelGGL(( kLessThan), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat1->data_device, mat2->data_device, target->data_device, len); hipDeviceSynchronize(); if (checkCUDAError()) return CUDA_ERROR; return 0; } extern int less_than_scalar(cudamat* mat, float val, cudamat* target) { int len = mat->size[0]*mat->size[1]; if (!mat->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat->is_trans != target->is_trans) return ERROR_TRANSPOSEDNESS; if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; hipLaunchKernelGGL(( kLessThanScalar), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, val, target->data_device, len); hipDeviceSynchronize(); if (checkCUDAError()) return CUDA_ERROR; return 0; } extern int greater_than(cudamat* mat1, cudamat* mat2, cudamat* target) { int len = mat1->size[0]*mat1->size[1]; if (!mat1->on_device || !mat2->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat1->is_trans != mat2->is_trans) return ERROR_TRANSPOSEDNESS; if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] || mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; hipLaunchKernelGGL(( kGreaterThan), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat1->data_device, mat2->data_device, target->data_device, len); hipDeviceSynchronize(); if (checkCUDAError()) return CUDA_ERROR; return 0; } extern int greater_than_scalar(cudamat* mat, float val, cudamat* target) { int len = mat->size[0]*mat->size[1]; if (!mat->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat->is_trans != target->is_trans) return ERROR_TRANSPOSEDNESS; if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; hipLaunchKernelGGL(( kGreaterThanScalar), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, val, target->data_device, len); hipDeviceSynchronize(); if (checkCUDAError()) return CUDA_ERROR; return 0; } extern int max_by_axis(cudamat* mat, cudamat* target, int axis) { unsigned int h = mat->size[0], w = mat->size[1]; if (!mat->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat->is_trans) return ERROR_TRANSPOSED; if (axis == 0) { if (target->size[0] != 1 || target->size[1] != mat->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; hipLaunchKernelGGL(( kMaxColumnwise), dim3(w),dim3(32), 0, 0, mat->data_device, target->data_device, w, h); hipDeviceSynchronize(); } else return ERROR_UNSUPPORTED; if (checkCUDAError()) return CUDA_ERROR; return 0; } extern int sign(cudamat* mat, cudamat* target) { int len = mat->size[0]*mat->size[1]; if (!mat->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat->is_trans != target->is_trans) return ERROR_TRANSPOSEDNESS; if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; hipLaunchKernelGGL(( kSign), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, target->data_device, len); hipDeviceSynchronize(); if (checkCUDAError()) return CUDA_ERROR; return 0; } extern int apply_sigmoid(cudamat* mat, cudamat* target) { unsigned int len = mat->size[0] * mat->size[1]; if (!mat->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; hipLaunchKernelGGL(( kApplySigmoid), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, target->data_device, len); hipDeviceSynchronize(); if (checkCUDAError()) return CUDA_ERROR; return 0; } extern int apply_tanh(cudamat* mat, cudamat* target) { unsigned int len = mat->size[0] * mat->size[1]; if (!mat->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; hipLaunchKernelGGL(( kApplyTanh), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, target->data_device, len); hipDeviceSynchronize(); if (checkCUDAError()) return CUDA_ERROR; return 0; } extern int apply_abs(cudamat* mat, cudamat* target) { unsigned int len = mat->size[0] * mat->size[1]; if (!mat->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; hipLaunchKernelGGL(( kApplyAbs), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, target->data_device, len); hipDeviceSynchronize(); if (checkCUDAError()) return CUDA_ERROR; return 0; } extern int apply_log_1_plus_exp(cudamat* mat, cudamat* target) { unsigned int len = mat->size[0] * mat->size[1]; if (!mat->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; hipLaunchKernelGGL(( kApplyLog1PlusExp), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, target->data_device, len); hipDeviceSynchronize(); if (checkCUDAError()) return CUDA_ERROR; return 0; } extern int apply_log(cudamat* mat, cudamat* target) { unsigned int len = mat->size[0] * mat->size[1]; if (!mat->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; hipLaunchKernelGGL(( kLog), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, target->data_device, len); hipDeviceSynchronize(); if (checkCUDAError()) return CUDA_ERROR; return 0; } extern int apply_exp(cudamat* mat, cudamat* target) { unsigned int len = mat->size[0] * mat->size[1]; if (!mat->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; hipLaunchKernelGGL(( kExp), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, target->data_device, len); hipDeviceSynchronize(); if (checkCUDAError()) return CUDA_ERROR; return 0; } extern int apply_sqrt(cudamat* mat, cudamat* target) { unsigned int len = mat->size[0] * mat->size[1]; if (!mat->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; hipLaunchKernelGGL(( kSqrt), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, target->data_device, len); hipDeviceSynchronize(); if (checkCUDAError()) return CUDA_ERROR; return 0; } extern int apply_pow(cudamat* mat, float pow, cudamat* target) { unsigned int len = mat->size[0] * mat->size[1]; if (!mat->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; hipLaunchKernelGGL(( kPow), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, pow, target->data_device, len); hipDeviceSynchronize(); if (checkCUDAError()) return CUDA_ERROR; return 0; } extern int apply_pow_matrix(cudamat* mat, cudamat* pow, cudamat* target) { unsigned int len = mat->size[0] * mat->size[1]; if (!mat->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; if (mat->size[0] != pow->size[0] || mat->size[1] != pow->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; hipLaunchKernelGGL(( kPowMatrix), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, pow->data_device, target->data_device, len); hipDeviceSynchronize(); if (checkCUDAError()) return CUDA_ERROR; return 0; } extern int reciprocal(cudamat* mat, cudamat* target) { unsigned int len = mat->size[0] * mat->size[1]; if (!mat->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; hipLaunchKernelGGL(( kReciprocal), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, target->data_device, len); hipDeviceSynchronize(); if (checkCUDAError()) return CUDA_ERROR; return 0; } extern int dot(cudamat* mat1, cudamat* mat2, cudamat* target, float beta, float alpha) { if (!mat1->on_device || !mat2->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (get_leading_dimension(mat1) != get_leading_dimension(target) || get_nonleading_dimension(mat2) != get_nonleading_dimension(target) || get_nonleading_dimension(mat1) != get_leading_dimension(mat2)) { return ERROR_INCOMPATIBLE_DIMENSIONS; } int m = get_leading_dimension(mat1), k = get_leading_dimension(mat2), n = get_nonleading_dimension(mat2); hipblasSgemm(get_transpose_char(mat1), get_transpose_char(mat2), m, n, k, alpha, mat1->data_device, mat1->size[0], mat2->data_device, mat2->size[0], beta, target->data_device, target->size[0]); if (check_cublas_error()) return CUBLAS_ERROR; hipDeviceSynchronize(); return 0; } extern float vdot(cudamat* mat1, cudamat* mat2, int* err_code) { int len = mat1->size[0]*mat1->size[1]; float res; if (!mat1->on_device || !mat2->on_device) return ERROR_NOT_ON_DEVICE; if (mat1->is_trans != mat2->is_trans) { *err_code = ERROR_TRANSPOSEDNESS; return 0; } if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1]) { *err_code = ERROR_INCOMPATIBLE_DIMENSIONS; return 0; } res = hipblasSdot(len, mat1->data_device, 1, mat2->data_device, 1); if (check_cublas_error()) { *err_code = CUBLAS_ERROR; return -1.; } else { *err_code = 0; return res; } } /* Perform the operation mat1 = mat1 + alpha * mat2. mat1 and mat2 must have the same transposedness. */ extern int add_mult(cudamat* mat1, cudamat* mat2, float alpha) { int len = mat1->size[0]*mat1->size[1]; if (!mat1->on_device || !mat2->on_device) return ERROR_NOT_ON_DEVICE; if (mat1->is_trans != mat2->is_trans) return ERROR_TRANSPOSEDNESS; if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; hipblasSaxpy(len, alpha, mat2->data_device, 1, mat1->data_device, 1); if (check_cublas_error()) return CUBLAS_ERROR; return 0; } extern int add_elementwise(cudamat* mat1, cudamat* mat2, cudamat* target) { int len = mat1->size[0]*mat1->size[1]; if (!mat1->on_device || !mat2->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat1->is_trans != mat2->is_trans) return ERROR_TRANSPOSEDNESS; if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] || mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; hipLaunchKernelGGL(( kAdd), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat1->data_device, mat2->data_device, target->data_device, len); hipDeviceSynchronize(); if (checkCUDAError()) return CUDA_ERROR; return 0; } extern int subtract_elementwise(cudamat* mat1, cudamat* mat2, cudamat* target) { int len = mat1->size[0]*mat1->size[1]; if (!mat1->on_device || !mat2->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat1->is_trans != mat2->is_trans) return ERROR_TRANSPOSEDNESS; if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] || mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; hipLaunchKernelGGL(( kSubtract), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat1->data_device, mat2->data_device, target->data_device, len); hipDeviceSynchronize(); if (checkCUDAError()) return CUDA_ERROR; return 0; } extern int divide_elementwise(cudamat* mat1, cudamat* mat2, cudamat* target) { int len = mat1->size[0]*mat1->size[1]; if (!mat1->on_device || !mat2->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat1->is_trans != mat2->is_trans) return ERROR_TRANSPOSEDNESS; if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] || mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; hipLaunchKernelGGL(( kDivide), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat1->data_device, mat2->data_device, target->data_device, len); hipDeviceSynchronize(); if (checkCUDAError()) return CUDA_ERROR; return 0; } /* Elementwise multiplication of 2 matrices */ extern int mult_elementwise(cudamat* mat1, cudamat* mat2, cudamat* target) { int len = mat1->size[0]*mat1->size[1]; if (!mat1->on_device || !mat2->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat1->is_trans != mat2->is_trans) return ERROR_TRANSPOSEDNESS; if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] || mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; hipLaunchKernelGGL(( kMult), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat1->data_device, mat2->data_device, target->data_device, len); hipDeviceSynchronize(); if (checkCUDAError()) return CUDA_ERROR; return 0; } extern int assign_scalar(cudamat* mat, float alpha) { int len = mat->size[0]*mat->size[1]; if (!mat->on_device) return ERROR_NOT_ON_DEVICE; hipLaunchKernelGGL(( kAssignScalar), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, alpha, len); hipDeviceSynchronize(); if (checkCUDAError()) return CUDA_ERROR; return 0; } extern int mult_by_scalar(cudamat* mat, float alpha, cudamat* target) { int len = mat->size[0]*mat->size[1]; if (!mat->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; hipLaunchKernelGGL(( kMultScalar), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, alpha, target->data_device, len); hipDeviceSynchronize(); if (checkCUDAError()) return CUDA_ERROR; return 0; } extern int divide_by_scalar(cudamat* mat, float alpha, cudamat* target) { int len = mat->size[0]*mat->size[1]; if (!mat->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; hipLaunchKernelGGL(( kDivideScalar), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, alpha, target->data_device, len); hipDeviceSynchronize(); if (checkCUDAError()) return CUDA_ERROR; return 0; } extern int add_scalar(cudamat* mat, float alpha, cudamat* target) { int len = mat->size[0]*mat->size[1]; if (!mat->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; hipLaunchKernelGGL(( kAddScalar), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, alpha, target->data_device, len); hipDeviceSynchronize(); if (checkCUDAError()) return CUDA_ERROR; return 0; } extern float euclid_norm(cudamat* mat, int* err_code) { int len = mat->size[0]*mat->size[1]; float res = hipblasSnrm2(len, mat->data_device, 1); if (!mat->on_device) return ERROR_NOT_ON_DEVICE; if (check_cublas_error()) { *err_code = CUBLAS_ERROR; return -1.; } else { *err_code = 0; return res; } } extern int selectRows(cudamat* source, cudamat* target, cudamat* indices){ const int nRetRows = indices->size[1]; if (nRetRows==0) return 0; dim3 gridDim((nRetRows+31)/32); dim3 blockDim(32); hipLaunchKernelGGL(( kSelectRows), dim3(gridDim), dim3(blockDim), 0, 0, source->data_device, target->data_device, indices->data_device, nRetRows, source->size[0], source->size[1]); hipDeviceSynchronize(); if (checkCUDAError()) return CUDA_ERROR; else return 0; } extern int setSelectedRows(cudamat* target, cudamat* source, cudamat* indices){ const int nSetRows = indices->size[1]; if (nSetRows==0) return 0; dim3 gridDim((nSetRows+31)/32); dim3 blockDim(32); hipLaunchKernelGGL(( kSetSelectedRows), dim3(gridDim), dim3(blockDim), 0, 0, target->data_device, source->data_device, indices->data_device, nSetRows, target->size[0], target->size[1]); hipDeviceSynchronize(); if (checkCUDAError()) return CUDA_ERROR; else return 0; } }
fed4f23c9dcf137931098292570b2f49841de580.cu
#include <stdio.h> #include <stdlib.h> #include <cublas.h> #include "cudamat_kernels.cuh" #include "cudamat.cuh" extern "C" { /* ------------------------------ CUBLAS init/shutdown ------------------------------ */ inline bool check_cublas_error() { cublasStatus status = cublasGetError(); return status != CUBLAS_STATUS_SUCCESS; } inline bool checkCUDAError() { cudaError_t err = cudaGetLastError(); if (cudaSuccess != err) printf("%s\n", cudaGetErrorString( err)); return cudaSuccess != err; } extern const char* get_last_cuda_error() { cudaError_t err = cudaGetLastError(); return cudaGetErrorString( err); } extern int cublas_init() { cublasInit(); if (check_cublas_error()) return CUBLAS_ERROR; else return 0; } extern int cublas_shutdown() { cublasShutdown(); cudaThreadExit(); } extern int cuda_set_device(int deviceId) { cudaSetDevice(deviceId); if (checkCUDAError()) return CUDA_ERROR; else return 0; } extern int init_random(rnd_struct* rnd_state, int seed, char* cudamatpath) { unsigned int * host_mults; host_mults = (unsigned int*)malloc(NUM_RND_STREAMS * sizeof(unsigned int)); FILE * pFile; pFile = fopen (cudamatpath,"r"); for (int i = 0; i < NUM_RND_STREAMS; i++) { fscanf (pFile, "%u", &host_mults[i]); } fclose (pFile); cublasAlloc(NUM_RND_STREAMS, sizeof(unsigned int), (void**)&rnd_state->dev_mults); cublasAlloc(NUM_RND_STREAMS, sizeof(unsigned long long), (void**)&rnd_state->dev_words); cublasSetVector(NUM_RND_STREAMS, sizeof(unsigned int), host_mults, 1, rnd_state->dev_mults, 1); //cudaMalloc((void **)&rnd_state->dev_mults, NUM_RND_STREAMS * sizeof(unsigned int)); //cudaMalloc((void **)&rnd_state->dev_words, NUM_RND_STREAMS * sizeof(unsigned long long)); //cudaMemcpy(rnd_state->dev_mults, host_mults, NUM_RND_STREAMS * sizeof(unsigned int), cudaMemcpyHostToDevice); cudaThreadSynchronize(); kSeedRandom<<<NUM_RND_BLOCKS, NUM_RND_THREADS_PER_BLOCK>>>(rnd_state->dev_mults, rnd_state->dev_words, seed); cudaThreadSynchronize(); if (checkCUDAError()) return CUDA_ERROR; else return 0; } /* ------------------------------ Utility routines ------------------------------ */ extern int get_leading_dimension(cudamat* mat) { return mat->is_trans ? mat->size[1] : mat->size[0]; } extern int get_nonleading_dimension(cudamat* mat) { return mat->is_trans ? mat->size[0] : mat->size[1]; } extern void set_transpose(cudamat* mat, int is_trans) { mat->is_trans = is_trans; } inline char get_transpose_char(cudamat* mat) { return mat->is_trans ? 't' : 'n'; } extern void cuda_sync_threads() { cudaThreadSynchronize(); } /* ------------------------------ Allocating/moving data ------------------------------ */ extern int allocate_device_memory(cudamat* mat) { int len = mat->size[0]*mat->size[1]; cublasStatus stat; stat = cublasAlloc(len, sizeof(mat->data_device[0]), (void**)&mat->data_device); if (stat != CUBLAS_STATUS_SUCCESS || check_cublas_error()) { checkCUDAError(); return CUBLAS_ERROR; } mat->on_device = 1; return 0; } extern int copy_to_host(cudamat* mat) { int len = mat->size[0]*mat->size[1]; if (mat->on_device) { cublasGetVector(len, sizeof(mat->data_host[0]), mat->data_device, 1, mat->data_host, 1); if (check_cublas_error()) return CUBLAS_ERROR; } else return ERROR_NOT_ON_DEVICE; return 0; } extern int copy_to_device(cudamat* mat) { int len = mat->size[0]*mat->size[1]; int err_code = 0; //if (!mat->owns_data) // return VIEW_ERROR; if (!mat->on_device) { err_code = allocate_device_memory(mat); if (err_code) return err_code; } cublasSetVector(len, sizeof(mat->data_host[0]), mat->data_host, 1, mat->data_device, 1); if (check_cublas_error()) return CUBLAS_ERROR; return 0; } extern int copy_on_device(cudamat* mat1, cudamat* mat2) { int len = mat1->size[0]*mat1->size[1]; if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; cublasScopy(len, mat1->data_device, 1, mat2->data_device, 1); if (check_cublas_error()) return CUBLAS_ERROR; else return 0; } extern int get_row_slice(cudamat* source, cudamat* target, unsigned int start, unsigned int end) { int height = source->size[0]; int width = source->size[1]; if ((end - start) != target->size[0] || source->size[1] != target->size[1] || start >= end || end > height) return ERROR_INCOMPATIBLE_DIMENSIONS; dim3 kernelBlockGrid((int)ceil((end - start)/32.), (int)ceil(width/32.), 1); dim3 kernelBlockDim(32, 1, 1); kGetRowSlice<<<kernelBlockGrid,kernelBlockDim>>>(source->data_device, target->data_device, start, end, width, height); cudaThreadSynchronize(); if (checkCUDAError()) return CUDA_ERROR; else return 0; } extern int set_row_slice(cudamat* source, cudamat* target, unsigned int start, unsigned int end) { int height = target->size[0]; int width = target->size[1]; if ((end - start) != source->size[0] || source->size[1] != target->size[1] || start >= end || end > height) return ERROR_INCOMPATIBLE_DIMENSIONS; dim3 kernelBlockGrid((int)ceil((end - start)/32.), (int)ceil(width/32.), 1); dim3 kernelBlockDim(32, 1, 1); kSetRowSlice<<<kernelBlockGrid,kernelBlockDim>>>(source->data_device, target->data_device, start, end, width, height); cudaThreadSynchronize(); if (checkCUDAError()) return CUDA_ERROR; else return 0; } extern int copy_transpose(cudamat* source, cudamat* target) { unsigned int height = source->size[0]; unsigned int width = source->size[1]; if (source->size[0] != target->size[1] || source->size[1] != target->size[0]) return ERROR_INCOMPATIBLE_DIMENSIONS; // setup execution parameters unsigned int grid_x = height / COPY_BLOCK_SIZE; if (height % COPY_BLOCK_SIZE) grid_x++; unsigned int grid_y = width / COPY_BLOCK_SIZE; if (width % COPY_BLOCK_SIZE) grid_y++; dim3 grid(grid_x, grid_y, 1); dim3 threads(COPY_BLOCK_SIZE, COPY_BLOCK_SIZE, 1); kTranspose<<< grid, threads >>>(target->data_device, source->data_device, height, width); if (checkCUDAError()) return CUDA_ERROR; else return 0; } extern int free_device_memory(cudamat* mat) { if (mat->owns_data && mat->on_device) { cublasStatus stat; stat = cublasFree(mat->data_device); mat->on_device = 0; if (stat != CUBLAS_STATUS_SUCCESS || check_cublas_error()) return CUBLAS_ERROR; } return 0; } extern int reshape(cudamat* mat, unsigned int m, unsigned int n) { if (mat->size[0] * mat->size[1] != m * n) return ERROR_INCOMPATIBLE_DIMENSIONS; mat->size[0] = m; mat->size[1] = n; return 0; } extern int get_slice(cudamat* source, cudamat* target, unsigned int first_col, unsigned int last_col) { if (source->is_trans) return ERROR_TRANSPOSED; if (!source->on_device) return ERROR_NOT_ON_DEVICE; if (last_col > source->size[1] || (first_col >= last_col)) return ERROR_INCOMPATIBLE_DIMENSIONS; int num_rows = source->size[0]; target->data_host = 0; target->data_device = source->data_device + first_col * num_rows; target->on_device = 1; target->on_host = 0; target->size[0] = source->size[0]; target->size[1] = last_col - first_col; target->is_trans = 0; target->owns_data = 0; return 0; } extern int get_vector_slice(cudamat* source, cudamat* target, unsigned int first_ind, unsigned int last_ind) { // source must be a vector if (source->size[0] > 1 && source->size[1] > 1) return ERROR_GENERIC; if (source->is_trans) return ERROR_TRANSPOSED; if (!source->on_device) return ERROR_NOT_ON_DEVICE; if (first_ind >= last_ind) return ERROR_INCOMPATIBLE_DIMENSIONS; int num_rows = source->size[0]; target->data_host = 0; target->data_device = source->data_device + first_ind * num_rows; target->on_device = 1; target->on_host = 0; target->is_trans = 0; target->owns_data = 0; if (source->size[0] > 1) { if (last_ind > source->size[0]) return ERROR_INCOMPATIBLE_DIMENSIONS; target->size[0] = last_ind - first_ind; target->size[1] = 1; } else { if (last_ind > source->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; target->size[0] = 1; target->size[1] = last_ind - first_ind; } return 0; } /* ------------------------------ Initialization routines ------------------------------ */ extern void init_from_array(cudamat* mat, float* data, int m, int n) { mat->data_host = data; mat->size[0] = m; mat->size[1] = n; mat->on_device = 0; mat->on_host = 1; mat->is_trans = 0; mat->owns_data = 1; } extern int init_empty(cudamat* mat, int m, int n) { mat->size[0] = m; mat->size[1] = n; mat->on_device = 0; mat->on_host = 0; mat->is_trans = 0; mat->owns_data = 1; return allocate_device_memory(mat); } /* ------------------------------ Random number generation ------------------------------ */ extern int fill_with_rand(rnd_struct* rnd_state, cudamat* mat) { int len = mat->size[0] * mat->size[1]; if (!mat->on_device) return ERROR_NOT_ON_DEVICE; kRandomUniform<<<NUM_RND_BLOCKS,NUM_RND_THREADS_PER_BLOCK>>>(rnd_state->dev_mults, rnd_state->dev_words, mat->data_device, len); cudaThreadSynchronize(); if (checkCUDAError()) return CUDA_ERROR; else return 0; } extern int fill_with_randn(rnd_struct* rnd_state, cudamat* mat) { int len = mat->size[0] * mat->size[1]; if (!mat->on_device) return ERROR_NOT_ON_DEVICE; kRandomGaussian<<<NUM_RND_BLOCKS,NUM_RND_THREADS_PER_BLOCK>>>(rnd_state->dev_mults, rnd_state->dev_words, mat->data_device, len); cudaThreadSynchronize(); if (checkCUDAError()) return CUDA_ERROR; else return 0; } /* ------------------------------ Algebraic operations ------------------------------ */ extern int add_col_vec(cudamat* mat, cudamat* vec, cudamat* target) { unsigned int h = mat->size[0], w = mat->size[1]; if (!mat->on_device || !vec->on_device) return ERROR_NOT_ON_DEVICE; if (mat->is_trans) return ERROR_TRANSPOSED; if (mat->size[0] != vec->size[0] || vec->size[1] != 1 || mat->size[0] != target->size[0] || mat->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; kAddColVector<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, vec->data_device, target->data_device, w, h); cudaThreadSynchronize(); if (checkCUDAError()) { return CUDA_ERROR; } return 0; } extern int add_col_mult(cudamat* mat, cudamat* vec, cudamat* target, float mult) { unsigned int h = mat->size[0], w = mat->size[1]; if (!mat->on_device || !vec->on_device) return ERROR_NOT_ON_DEVICE; if (mat->is_trans) return ERROR_TRANSPOSED; if (mat->size[0] != vec->size[0] || vec->size[1] != 1 || mat->size[0] != target->size[0] || mat->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; kAddColMult<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, vec->data_device, target->data_device, mult, w, h); cudaThreadSynchronize(); if (checkCUDAError()) return CUDA_ERROR; return 0; } extern int add_row_vec(cudamat* mat, cudamat* vec, cudamat* target) { unsigned int h = mat->size[0], w = mat->size[1]; if (!mat->on_device || !vec->on_device) return ERROR_NOT_ON_DEVICE; if (mat->is_trans) return ERROR_TRANSPOSED; if (mat->size[1] != vec->size[1] || vec->size[0] != 1 || mat->size[0] != target->size[0] || mat->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; kAddRowVector<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, vec->data_device, target->data_device, w, h); cudaThreadSynchronize(); if (checkCUDAError()) return CUDA_ERROR; return 0; } extern int mult_by_col_vec(cudamat* mat, cudamat* vec, cudamat* target) { unsigned int h = mat->size[0], w = mat->size[1]; if (!mat->on_device || !vec->on_device) return ERROR_NOT_ON_DEVICE; if (mat->is_trans) return ERROR_TRANSPOSED; if (mat->size[0] != vec->size[0] || vec->size[1] != 1 || mat->size[0] != target->size[0] || mat->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; kMultByColVector<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, vec->data_device, target->data_device, w, h); cudaThreadSynchronize(); if (checkCUDAError()) return CUDA_ERROR; return 0; } extern int mult_by_row_vec(cudamat* mat, cudamat* vec, cudamat* target) { unsigned int h = mat->size[0], w = mat->size[1]; if (!mat->on_device || !vec->on_device) return ERROR_NOT_ON_DEVICE; if (mat->is_trans) return ERROR_TRANSPOSED; if (mat->size[1] != vec->size[1] || vec->size[0] != 1 || mat->size[0] != target->size[0] || mat->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; kMultByRowVector<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, vec->data_device, target->data_device, w, h); cudaThreadSynchronize(); if (checkCUDAError()) return CUDA_ERROR; return 0; } extern int less_than(cudamat* mat1, cudamat* mat2, cudamat* target) { int len = mat1->size[0]*mat1->size[1]; if (!mat1->on_device || !mat2->on_device) return ERROR_NOT_ON_DEVICE; if (mat1->is_trans != mat2->is_trans) return ERROR_TRANSPOSEDNESS; if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] || mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; kLessThan<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat1->data_device, mat2->data_device, target->data_device, len); cudaThreadSynchronize(); if (checkCUDAError()) return CUDA_ERROR; return 0; } extern int less_than_scalar(cudamat* mat, float val, cudamat* target) { int len = mat->size[0]*mat->size[1]; if (!mat->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat->is_trans != target->is_trans) return ERROR_TRANSPOSEDNESS; if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; kLessThanScalar<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, val, target->data_device, len); cudaThreadSynchronize(); if (checkCUDAError()) return CUDA_ERROR; return 0; } extern int greater_than(cudamat* mat1, cudamat* mat2, cudamat* target) { int len = mat1->size[0]*mat1->size[1]; if (!mat1->on_device || !mat2->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat1->is_trans != mat2->is_trans) return ERROR_TRANSPOSEDNESS; if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] || mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; kGreaterThan<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat1->data_device, mat2->data_device, target->data_device, len); cudaThreadSynchronize(); if (checkCUDAError()) return CUDA_ERROR; return 0; } extern int greater_than_scalar(cudamat* mat, float val, cudamat* target) { int len = mat->size[0]*mat->size[1]; if (!mat->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat->is_trans != target->is_trans) return ERROR_TRANSPOSEDNESS; if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; kGreaterThanScalar<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, val, target->data_device, len); cudaThreadSynchronize(); if (checkCUDAError()) return CUDA_ERROR; return 0; } extern int max_by_axis(cudamat* mat, cudamat* target, int axis) { unsigned int h = mat->size[0], w = mat->size[1]; if (!mat->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat->is_trans) return ERROR_TRANSPOSED; if (axis == 0) { if (target->size[0] != 1 || target->size[1] != mat->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; kMaxColumnwise<<<w,32>>>(mat->data_device, target->data_device, w, h); cudaThreadSynchronize(); } else return ERROR_UNSUPPORTED; if (checkCUDAError()) return CUDA_ERROR; return 0; } extern int sign(cudamat* mat, cudamat* target) { int len = mat->size[0]*mat->size[1]; if (!mat->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat->is_trans != target->is_trans) return ERROR_TRANSPOSEDNESS; if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; kSign<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, target->data_device, len); cudaThreadSynchronize(); if (checkCUDAError()) return CUDA_ERROR; return 0; } extern int apply_sigmoid(cudamat* mat, cudamat* target) { unsigned int len = mat->size[0] * mat->size[1]; if (!mat->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; kApplySigmoid<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, target->data_device, len); cudaThreadSynchronize(); if (checkCUDAError()) return CUDA_ERROR; return 0; } extern int apply_tanh(cudamat* mat, cudamat* target) { unsigned int len = mat->size[0] * mat->size[1]; if (!mat->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; kApplyTanh<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, target->data_device, len); cudaThreadSynchronize(); if (checkCUDAError()) return CUDA_ERROR; return 0; } extern int apply_abs(cudamat* mat, cudamat* target) { unsigned int len = mat->size[0] * mat->size[1]; if (!mat->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; kApplyAbs<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, target->data_device, len); cudaThreadSynchronize(); if (checkCUDAError()) return CUDA_ERROR; return 0; } extern int apply_log_1_plus_exp(cudamat* mat, cudamat* target) { unsigned int len = mat->size[0] * mat->size[1]; if (!mat->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; kApplyLog1PlusExp<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, target->data_device, len); cudaThreadSynchronize(); if (checkCUDAError()) return CUDA_ERROR; return 0; } extern int apply_log(cudamat* mat, cudamat* target) { unsigned int len = mat->size[0] * mat->size[1]; if (!mat->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; kLog<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, target->data_device, len); cudaThreadSynchronize(); if (checkCUDAError()) return CUDA_ERROR; return 0; } extern int apply_exp(cudamat* mat, cudamat* target) { unsigned int len = mat->size[0] * mat->size[1]; if (!mat->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; kExp<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, target->data_device, len); cudaThreadSynchronize(); if (checkCUDAError()) return CUDA_ERROR; return 0; } extern int apply_sqrt(cudamat* mat, cudamat* target) { unsigned int len = mat->size[0] * mat->size[1]; if (!mat->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; kSqrt<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, target->data_device, len); cudaThreadSynchronize(); if (checkCUDAError()) return CUDA_ERROR; return 0; } extern int apply_pow(cudamat* mat, float pow, cudamat* target) { unsigned int len = mat->size[0] * mat->size[1]; if (!mat->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; kPow<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, pow, target->data_device, len); cudaThreadSynchronize(); if (checkCUDAError()) return CUDA_ERROR; return 0; } extern int apply_pow_matrix(cudamat* mat, cudamat* pow, cudamat* target) { unsigned int len = mat->size[0] * mat->size[1]; if (!mat->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; if (mat->size[0] != pow->size[0] || mat->size[1] != pow->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; kPowMatrix<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, pow->data_device, target->data_device, len); cudaThreadSynchronize(); if (checkCUDAError()) return CUDA_ERROR; return 0; } extern int reciprocal(cudamat* mat, cudamat* target) { unsigned int len = mat->size[0] * mat->size[1]; if (!mat->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; kReciprocal<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, target->data_device, len); cudaThreadSynchronize(); if (checkCUDAError()) return CUDA_ERROR; return 0; } extern int dot(cudamat* mat1, cudamat* mat2, cudamat* target, float beta, float alpha) { if (!mat1->on_device || !mat2->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (get_leading_dimension(mat1) != get_leading_dimension(target) || get_nonleading_dimension(mat2) != get_nonleading_dimension(target) || get_nonleading_dimension(mat1) != get_leading_dimension(mat2)) { return ERROR_INCOMPATIBLE_DIMENSIONS; } int m = get_leading_dimension(mat1), k = get_leading_dimension(mat2), n = get_nonleading_dimension(mat2); cublasSgemm(get_transpose_char(mat1), get_transpose_char(mat2), m, n, k, alpha, mat1->data_device, mat1->size[0], mat2->data_device, mat2->size[0], beta, target->data_device, target->size[0]); if (check_cublas_error()) return CUBLAS_ERROR; cudaThreadSynchronize(); return 0; } extern float vdot(cudamat* mat1, cudamat* mat2, int* err_code) { int len = mat1->size[0]*mat1->size[1]; float res; if (!mat1->on_device || !mat2->on_device) return ERROR_NOT_ON_DEVICE; if (mat1->is_trans != mat2->is_trans) { *err_code = ERROR_TRANSPOSEDNESS; return 0; } if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1]) { *err_code = ERROR_INCOMPATIBLE_DIMENSIONS; return 0; } res = cublasSdot(len, mat1->data_device, 1, mat2->data_device, 1); if (check_cublas_error()) { *err_code = CUBLAS_ERROR; return -1.; } else { *err_code = 0; return res; } } /* Perform the operation mat1 = mat1 + alpha * mat2. mat1 and mat2 must have the same transposedness. */ extern int add_mult(cudamat* mat1, cudamat* mat2, float alpha) { int len = mat1->size[0]*mat1->size[1]; if (!mat1->on_device || !mat2->on_device) return ERROR_NOT_ON_DEVICE; if (mat1->is_trans != mat2->is_trans) return ERROR_TRANSPOSEDNESS; if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; cublasSaxpy(len, alpha, mat2->data_device, 1, mat1->data_device, 1); if (check_cublas_error()) return CUBLAS_ERROR; return 0; } extern int add_elementwise(cudamat* mat1, cudamat* mat2, cudamat* target) { int len = mat1->size[0]*mat1->size[1]; if (!mat1->on_device || !mat2->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat1->is_trans != mat2->is_trans) return ERROR_TRANSPOSEDNESS; if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] || mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; kAdd<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat1->data_device, mat2->data_device, target->data_device, len); cudaThreadSynchronize(); if (checkCUDAError()) return CUDA_ERROR; return 0; } extern int subtract_elementwise(cudamat* mat1, cudamat* mat2, cudamat* target) { int len = mat1->size[0]*mat1->size[1]; if (!mat1->on_device || !mat2->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat1->is_trans != mat2->is_trans) return ERROR_TRANSPOSEDNESS; if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] || mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; kSubtract<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat1->data_device, mat2->data_device, target->data_device, len); cudaThreadSynchronize(); if (checkCUDAError()) return CUDA_ERROR; return 0; } extern int divide_elementwise(cudamat* mat1, cudamat* mat2, cudamat* target) { int len = mat1->size[0]*mat1->size[1]; if (!mat1->on_device || !mat2->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat1->is_trans != mat2->is_trans) return ERROR_TRANSPOSEDNESS; if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] || mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; kDivide<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat1->data_device, mat2->data_device, target->data_device, len); cudaThreadSynchronize(); if (checkCUDAError()) return CUDA_ERROR; return 0; } /* Elementwise multiplication of 2 matrices */ extern int mult_elementwise(cudamat* mat1, cudamat* mat2, cudamat* target) { int len = mat1->size[0]*mat1->size[1]; if (!mat1->on_device || !mat2->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat1->is_trans != mat2->is_trans) return ERROR_TRANSPOSEDNESS; if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] || mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; kMult<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat1->data_device, mat2->data_device, target->data_device, len); cudaThreadSynchronize(); if (checkCUDAError()) return CUDA_ERROR; return 0; } extern int assign_scalar(cudamat* mat, float alpha) { int len = mat->size[0]*mat->size[1]; if (!mat->on_device) return ERROR_NOT_ON_DEVICE; kAssignScalar<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, alpha, len); cudaThreadSynchronize(); if (checkCUDAError()) return CUDA_ERROR; return 0; } extern int mult_by_scalar(cudamat* mat, float alpha, cudamat* target) { int len = mat->size[0]*mat->size[1]; if (!mat->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; kMultScalar<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, alpha, target->data_device, len); cudaThreadSynchronize(); if (checkCUDAError()) return CUDA_ERROR; return 0; } extern int divide_by_scalar(cudamat* mat, float alpha, cudamat* target) { int len = mat->size[0]*mat->size[1]; if (!mat->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; kDivideScalar<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, alpha, target->data_device, len); cudaThreadSynchronize(); if (checkCUDAError()) return CUDA_ERROR; return 0; } extern int add_scalar(cudamat* mat, float alpha, cudamat* target) { int len = mat->size[0]*mat->size[1]; if (!mat->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; kAddScalar<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, alpha, target->data_device, len); cudaThreadSynchronize(); if (checkCUDAError()) return CUDA_ERROR; return 0; } extern float euclid_norm(cudamat* mat, int* err_code) { int len = mat->size[0]*mat->size[1]; float res = cublasSnrm2(len, mat->data_device, 1); if (!mat->on_device) return ERROR_NOT_ON_DEVICE; if (check_cublas_error()) { *err_code = CUBLAS_ERROR; return -1.; } else { *err_code = 0; return res; } } extern int selectRows(cudamat* source, cudamat* target, cudamat* indices){ const int nRetRows = indices->size[1]; if (nRetRows==0) return 0; dim3 gridDim((nRetRows+31)/32); dim3 blockDim(32); kSelectRows<<<gridDim, blockDim>>>(source->data_device, target->data_device, indices->data_device, nRetRows, source->size[0], source->size[1]); cudaThreadSynchronize(); if (checkCUDAError()) return CUDA_ERROR; else return 0; } extern int setSelectedRows(cudamat* target, cudamat* source, cudamat* indices){ const int nSetRows = indices->size[1]; if (nSetRows==0) return 0; dim3 gridDim((nSetRows+31)/32); dim3 blockDim(32); kSetSelectedRows<<<gridDim, blockDim>>>(target->data_device, source->data_device, indices->data_device, nSetRows, target->size[0], target->size[1]); cudaThreadSynchronize(); if (checkCUDAError()) return CUDA_ERROR; else return 0; } }
23e73bfc324eecdca735f87278f694511fc06947.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //nvcc -o lab5_1_cpu lab5_1_cpu.cu //Implementacao em CPU para a conversao em tons de cinza duma imagem RGB //Autor: Pedro Silva #include <stdio.h> #include <stdlib.h> #include <time.h> #define STB_IMAGE_IMPLEMENTATION #define STB_IMAGE_WRITE_IMPLEMENTATION #include "stb_image.h" #include "stb_image_write.h" /*stb -> biblioteca para abrir e ler imagens: https://github.com/nothings/stb*/ #define CHANNELS 4 //(RGBA) // Device code // We have 3 channels corresponding to RGB // The input image is encoded as unsigned characters [0, 255] //__global__ void colorToGreyScaleConvertion(unsigned char* grayImage, unsigned char * rgbImage, int width, int height) { for(int col=0; col<width; col++) { for(int row=0; row<height; row++) { // get 1D coordinate for the grayscale image int grayOffset = row * width + col; // one can think of the RGB image having // CHANNEL times columns of the gray scale image int rgbOffset = grayOffset * CHANNELS; unsigned char r = rgbImage[rgbOffset]; // red value for pixel unsigned char g = rgbImage[rgbOffset + 1]; // green value for pixel unsigned char b = rgbImage[rgbOffset + 2]; // blue value for pixel // perform the rescaling and store it // We multiply by floating point constants grayImage[grayOffset] = 0.21f * r + 0.71f * g + 0.07f * b; } } } int main(int argc, char const *argv[]) { printf("Exercicio 1, Lab 5 de CHAD. Leitura e grayscaling de imagens com varias dimensoes (CPU).\n"); int N, M, channels; unsigned char * h_i_rgb, * h_i_gs; struct timespec start, end; double startTime, endTime; for(int i = 0; i < 5; i++){ printf("%i-sima iterao.\n", i); //Definir as dimensoes da imagem, N e M, de acordo com a iteracao, i, do ciclo e load da imagem //unsigned char *data = stbi_load(filename, &x, &y, &n, 0); // Standard parameters: // int *x -- outputs image width in pixels // int *y -- outputs image height in pixels // int *channels_in_file -- outputs # of image components in image file // int desired_channels -- if non-zero, # of image components requested in result //int i = 1; switch(i){ case 0: h_i_rgb = stbi_load("image_255_255.png", &N, &M, &channels, 0); if(h_i_rgb == NULL) fprintf(stderr, "Erro a carregar imagem na iterao %i.\n", i); printf("Dimensoes da imagem: %i x %i.\tNumero de canais: %i.\n", N, M, channels); break; case 1: h_i_rgb = stbi_load("image_800_600.png", &N, &M, &channels, 0); if(h_i_rgb == NULL) fprintf(stderr, "Erro a carregar imagem na iterao %i.\n", i); printf("Dimensoes da imagem: %i x %i.\tNumero de canais: %i.\n", N, M, channels); break; case 2: h_i_rgb = stbi_load("image_1920_1080.png", &N, &M, &channels, 0); if(h_i_rgb == NULL) fprintf(stderr, "Erro a carregar imagem na iterao %i.\n", i); printf("Dimensoes da imagem: %i x %i.\tNumero de canais: %i.\n", N, M, channels); break; case 3: h_i_rgb = stbi_load("image_3840_2160.png", &N, &M, &channels, 0); if(h_i_rgb == NULL) fprintf(stderr, "Erro a carregar imagem na iterao %i.\n", i); printf("Dimensoes da imagem: %i x %i.\tNumero de canais: %i.\n", N, M, channels); break; case 4: h_i_rgb = stbi_load("image_7680_4320.png", &N, &M, &channels, 0); if(h_i_rgb == NULL) fprintf(stderr, "Erro a carregar imagem na iterao %i.\n", i); printf("Dimensoes da imagem: %i x %i.\tNumero de canais: %i.\n", N, M, channels); break; default: fprintf(stderr, "Erro no switch para inicializar dimensoes da imagem.\n"); return(-1); } //Alocar memoria para imagem GS h_i_gs = (unsigned char*)malloc(N * M * 1 * sizeof(unsigned char)); //So temos um canal de cinzentos //comecar conversao e cronometrar clock_gettime(CLOCK_MONOTONIC, &start); colorToGreyScaleConvertion(h_i_gs, h_i_rgb, N, M); //Gravar imagem em ficheiros //int stbi_write_png(char const *filename, int w, int h, int comp, const void *data, int stride_in_bytes); switch(i){ case 0: if(stbi_write_png("image_255_255_grey_cpu.png", N, M, 1, h_i_gs, N) == 0) fprintf(stderr, "Erro a imprimir imagem de %i por %i.\n", N, M); break; case 1: if(stbi_write_png("image_800_600_grey_cpu.png", N, M, 1, h_i_gs, N) == 0) fprintf(stderr, "Erro a imprimir imagem de %i por %i.\n", N, M); break; case 2: if(stbi_write_png("image_1920_1080_grey_cpu.png", N, M, 1, h_i_gs, N) == 0) fprintf(stderr, "Erro a imprimir imagem de %i por %i.\n", N, M); break; case 3: if(stbi_write_png("image_3840_2160_grey_cpu.png", N, M, 1, h_i_gs, N) == 0) fprintf(stderr, "Erro a imprimir imagem de %i por %i.\n", N, M); break; case 4: if(stbi_write_png("image_7680_4320_grey_cpu.png", N, M, 1, h_i_gs, N) == 0) fprintf(stderr, "Erro a imprimir imagem de %i por %i.\n", N, M); break; default: fprintf(stderr, "Erro no switch de imprimir imagem.\n"); return(-1); } //Memory cleanup stbi_image_free(h_i_rgb); free(h_i_gs); clock_gettime(CLOCK_MONOTONIC, &end); startTime = (start.tv_sec * 1e3) + (start.tv_nsec * 1e-6); endTime = (end.tv_sec * 1e3) + (end.tv_nsec * 1e-6); printf("Tempo de execuo do CPU: %fms.\n", endTime - startTime); } return 0; }
23e73bfc324eecdca735f87278f694511fc06947.cu
//nvcc -o lab5_1_cpu lab5_1_cpu.cu //Implementacao em CPU para a conversao em tons de cinza duma imagem RGB //Autor: Pedro Silva #include <stdio.h> #include <stdlib.h> #include <time.h> #define STB_IMAGE_IMPLEMENTATION #define STB_IMAGE_WRITE_IMPLEMENTATION #include "stb_image.h" #include "stb_image_write.h" /*stb -> biblioteca para abrir e ler imagens: https://github.com/nothings/stb*/ #define CHANNELS 4 //(RGBA) // Device code // We have 3 channels corresponding to RGB // The input image is encoded as unsigned characters [0, 255] //__global__ void colorToGreyScaleConvertion(unsigned char* grayImage, unsigned char * rgbImage, int width, int height) { for(int col=0; col<width; col++) { for(int row=0; row<height; row++) { // get 1D coordinate for the grayscale image int grayOffset = row * width + col; // one can think of the RGB image having // CHANNEL times columns of the gray scale image int rgbOffset = grayOffset * CHANNELS; unsigned char r = rgbImage[rgbOffset]; // red value for pixel unsigned char g = rgbImage[rgbOffset + 1]; // green value for pixel unsigned char b = rgbImage[rgbOffset + 2]; // blue value for pixel // perform the rescaling and store it // We multiply by floating point constants grayImage[grayOffset] = 0.21f * r + 0.71f * g + 0.07f * b; } } } int main(int argc, char const *argv[]) { printf("Exercicio 1, Lab 5 de CHAD. Leitura e grayscaling de imagens com varias dimensoes (CPU).\n"); int N, M, channels; unsigned char * h_i_rgb, * h_i_gs; struct timespec start, end; double startTime, endTime; for(int i = 0; i < 5; i++){ printf("%i-ésima iteração.\n", i); //Definir as dimensoes da imagem, N e M, de acordo com a iteracao, i, do ciclo e load da imagem //unsigned char *data = stbi_load(filename, &x, &y, &n, 0); // Standard parameters: // int *x -- outputs image width in pixels // int *y -- outputs image height in pixels // int *channels_in_file -- outputs # of image components in image file // int desired_channels -- if non-zero, # of image components requested in result //int i = 1; switch(i){ case 0: h_i_rgb = stbi_load("image_255_255.png", &N, &M, &channels, 0); if(h_i_rgb == NULL) fprintf(stderr, "Erro a carregar imagem na iteração %i.\n", i); printf("Dimensoes da imagem: %i x %i.\tNumero de canais: %i.\n", N, M, channels); break; case 1: h_i_rgb = stbi_load("image_800_600.png", &N, &M, &channels, 0); if(h_i_rgb == NULL) fprintf(stderr, "Erro a carregar imagem na iteração %i.\n", i); printf("Dimensoes da imagem: %i x %i.\tNumero de canais: %i.\n", N, M, channels); break; case 2: h_i_rgb = stbi_load("image_1920_1080.png", &N, &M, &channels, 0); if(h_i_rgb == NULL) fprintf(stderr, "Erro a carregar imagem na iteração %i.\n", i); printf("Dimensoes da imagem: %i x %i.\tNumero de canais: %i.\n", N, M, channels); break; case 3: h_i_rgb = stbi_load("image_3840_2160.png", &N, &M, &channels, 0); if(h_i_rgb == NULL) fprintf(stderr, "Erro a carregar imagem na iteração %i.\n", i); printf("Dimensoes da imagem: %i x %i.\tNumero de canais: %i.\n", N, M, channels); break; case 4: h_i_rgb = stbi_load("image_7680_4320.png", &N, &M, &channels, 0); if(h_i_rgb == NULL) fprintf(stderr, "Erro a carregar imagem na iteração %i.\n", i); printf("Dimensoes da imagem: %i x %i.\tNumero de canais: %i.\n", N, M, channels); break; default: fprintf(stderr, "Erro no switch para inicializar dimensoes da imagem.\n"); return(-1); } //Alocar memoria para imagem GS h_i_gs = (unsigned char*)malloc(N * M * 1 * sizeof(unsigned char)); //So temos um canal de cinzentos //comecar conversao e cronometrar clock_gettime(CLOCK_MONOTONIC, &start); colorToGreyScaleConvertion(h_i_gs, h_i_rgb, N, M); //Gravar imagem em ficheiros //int stbi_write_png(char const *filename, int w, int h, int comp, const void *data, int stride_in_bytes); switch(i){ case 0: if(stbi_write_png("image_255_255_grey_cpu.png", N, M, 1, h_i_gs, N) == 0) fprintf(stderr, "Erro a imprimir imagem de %i por %i.\n", N, M); break; case 1: if(stbi_write_png("image_800_600_grey_cpu.png", N, M, 1, h_i_gs, N) == 0) fprintf(stderr, "Erro a imprimir imagem de %i por %i.\n", N, M); break; case 2: if(stbi_write_png("image_1920_1080_grey_cpu.png", N, M, 1, h_i_gs, N) == 0) fprintf(stderr, "Erro a imprimir imagem de %i por %i.\n", N, M); break; case 3: if(stbi_write_png("image_3840_2160_grey_cpu.png", N, M, 1, h_i_gs, N) == 0) fprintf(stderr, "Erro a imprimir imagem de %i por %i.\n", N, M); break; case 4: if(stbi_write_png("image_7680_4320_grey_cpu.png", N, M, 1, h_i_gs, N) == 0) fprintf(stderr, "Erro a imprimir imagem de %i por %i.\n", N, M); break; default: fprintf(stderr, "Erro no switch de imprimir imagem.\n"); return(-1); } //Memory cleanup stbi_image_free(h_i_rgb); free(h_i_gs); clock_gettime(CLOCK_MONOTONIC, &end); startTime = (start.tv_sec * 1e3) + (start.tv_nsec * 1e-6); endTime = (end.tv_sec * 1e3) + (end.tv_nsec * 1e-6); printf("Tempo de execução do CPU: %fms.\n", endTime - startTime); } return 0; }
d495fcf309e0769173492be14b42fdd9d28a3037.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #ifdef __NVCC__ #include "hipcub/hipcub.hpp" #endif #ifdef __HIPCC__ #include <hipcub/hipcub.hpp> namespace cub = hipcub; #endif #include <memory> #include <vector> #include "paddle/fluid/framework/ddim.h" #include "paddle/fluid/operators/layer_norm_op.h" #include "paddle/fluid/platform/float16.h" #ifdef PADDLE_WITH_CUDA #include "paddle/fluid/platform/cudnn_helper.h" #endif #ifdef PADDLE_WITH_HIP #include "paddle/fluid/platform/miopen_helper.h" #endif namespace paddle { namespace operators { using Tensor = framework::Tensor; using DataLayout = framework::DataLayout; template <typename T> using CudnnDataType = platform::CudnnDataType<T>; template <typename T> using LayerNormParamType = typename CudnnDataType<T>::BatchNormParamType; inline static int GetDesiredBlockDim(int block_dim) { #ifdef __HIPCC__ const int kMaxBlockDim = 256; #else const int kMaxBlockDim = 512; #endif return block_dim >= kMaxBlockDim ? kMaxBlockDim : (1 << (static_cast<int>(std::log2f(block_dim)))); } #define FIXED_BLOCK_DIM_CASE_BASE(log2_block_dim, ...) \ case (1 << (log2_block_dim)): { \ constexpr auto kBlockDim = (1 << (log2_block_dim)); \ __VA_ARGS__; \ } break #define FIXED_BLOCK_DIM_CASE(...) \ FIXED_BLOCK_DIM_CASE_BASE(9, ##__VA_ARGS__); \ FIXED_BLOCK_DIM_CASE_BASE(8, ##__VA_ARGS__); \ FIXED_BLOCK_DIM_CASE_BASE(7, ##__VA_ARGS__); \ FIXED_BLOCK_DIM_CASE_BASE(6, ##__VA_ARGS__); \ FIXED_BLOCK_DIM_CASE_BASE(5, ##__VA_ARGS__); \ FIXED_BLOCK_DIM_CASE_BASE(4, ##__VA_ARGS__); \ FIXED_BLOCK_DIM_CASE_BASE(3, ##__VA_ARGS__); \ FIXED_BLOCK_DIM_CASE_BASE(2, ##__VA_ARGS__); \ FIXED_BLOCK_DIM_CASE_BASE(1, ##__VA_ARGS__) #define FIXED_BLOCK_DIM_FIXED_BLOCK_NUM_CASE_BASE( \ log2_block_dim, feature_size, kMaxBlockNum, ...) \ case (1 << (log2_block_dim)): { \ for (int i = 0; i < ::ceil(feature_size / (1.0 * kMaxBlockNum)); i++) { \ int col_offset = i * kMaxBlockNum; \ int block_num = ::min(feature_size - col_offset, kMaxBlockNum); \ constexpr auto kBlockDim = (1 << (log2_block_dim)); \ __VA_ARGS__; \ } \ } break #define FIXED_BLOCK_DIM_FIXED_BLOCK_NUM_CASE(feature_size, kMaxBlockNum, ...) \ FIXED_BLOCK_DIM_FIXED_BLOCK_NUM_CASE_BASE(9, feature_size, kMaxBlockNum, \ ##__VA_ARGS__); \ FIXED_BLOCK_DIM_FIXED_BLOCK_NUM_CASE_BASE(8, feature_size, kMaxBlockNum, \ ##__VA_ARGS__); \ FIXED_BLOCK_DIM_FIXED_BLOCK_NUM_CASE_BASE(7, feature_size, kMaxBlockNum, \ ##__VA_ARGS__); \ FIXED_BLOCK_DIM_FIXED_BLOCK_NUM_CASE_BASE(6, feature_size, kMaxBlockNum, \ ##__VA_ARGS__); \ FIXED_BLOCK_DIM_FIXED_BLOCK_NUM_CASE_BASE(5, feature_size, kMaxBlockNum, \ ##__VA_ARGS__); \ FIXED_BLOCK_DIM_FIXED_BLOCK_NUM_CASE_BASE(4, feature_size, kMaxBlockNum, \ ##__VA_ARGS__); \ FIXED_BLOCK_DIM_FIXED_BLOCK_NUM_CASE_BASE(3, feature_size, kMaxBlockNum, \ ##__VA_ARGS__); \ FIXED_BLOCK_DIM_FIXED_BLOCK_NUM_CASE_BASE(2, feature_size, kMaxBlockNum, \ ##__VA_ARGS__); \ FIXED_BLOCK_DIM_FIXED_BLOCK_NUM_CASE_BASE(1, feature_size, kMaxBlockNum, \ ##__VA_ARGS__) static __device__ __forceinline__ float real_sqrt(float x) { return sqrtf(x); } static __device__ __forceinline__ double real_sqrt(double x) { return sqrt(x); } template <typename T> struct PairForLayerNorm { __device__ __forceinline__ PairForLayerNorm() {} __device__ __forceinline__ PairForLayerNorm(const T &first, const T &second) : first_(first), second_(second) {} T first_; T second_; }; template <typename T> struct PairForLayerNormAddFunctor { __device__ __forceinline__ PairForLayerNorm<T> operator()( const PairForLayerNorm<T> &p1, const PairForLayerNorm<T> &p2) { return PairForLayerNorm<T>(p1.first_ + p2.first_, p1.second_ + p2.second_); } }; template <typename T> __inline__ __device__ T rsqrt_(const T val) { return static_cast<T>(1) / sqrt(val); } template <> __inline__ __device__ float rsqrt_(const float val) { return rsqrtf(val); } template <> __inline__ __device__ double rsqrt_(const double val) { return rsqrt(val); } #if CUDA_ARCH_FP16_SUPPORTED(__CUDA_ARCH__) template <> __inline__ __device__ half rsqrt_(const half val) { return hrsqrt(val); } #endif template <typename T, typename U, int BlockDim> __global__ void LayerNormForward(const T *x, const U *scale, const U *bias, T *y, U *mean, U *var, float epsilon, int feature_size) { using BlockReduce = hipcub::BlockReduce<PairForLayerNorm<U>, BlockDim>; __shared__ typename BlockReduce::TempStorage temp_storage; __shared__ U mean_share; __shared__ U var_share; int beg_idx = blockIdx.x * feature_size + threadIdx.x; int end_idx = (blockIdx.x + 1) * feature_size; // Step 1: Reduce to calculate mean and var U mean_val = 0; U var_val = 0; for (int i = beg_idx; i < end_idx; i += BlockDim) { U tmp = static_cast<U>(x[i]); mean_val += tmp; var_val += (tmp * tmp); } auto pair = BlockReduce(temp_storage) .Reduce(PairForLayerNorm<U>(mean_val, var_val), PairForLayerNormAddFunctor<U>()); if (threadIdx.x == 0) { auto tmp = pair.first_ / feature_size; mean[blockIdx.x] = mean_share = static_cast<U>(tmp); var[blockIdx.x] = var_share = static_cast<U>(pair.second_ / feature_size - tmp * tmp); } __syncthreads(); mean_val = mean_share; U invvar = rsqrt_<U>(var_share + static_cast<U>(epsilon)); // Step 2: Calculate y if (scale != nullptr) { if (bias != nullptr) { for (int i = beg_idx, j = threadIdx.x; i < end_idx; i += BlockDim, j += BlockDim) { y[i] = static_cast<T>( scale[j] * (static_cast<U>(x[i]) - mean_val) * invvar + bias[j]); } } else { for (int i = beg_idx, j = threadIdx.x; i < end_idx; i += BlockDim, j += BlockDim) { y[i] = static_cast<T>(scale[j] * (static_cast<U>(x[i]) - mean_val) * invvar); } } } else { // scale == nullptr if (bias != nullptr) { for (int i = beg_idx, j = threadIdx.x; i < end_idx; i += BlockDim, j += BlockDim) { y[i] = static_cast<T>((static_cast<U>(x[i]) - mean_val) * invvar + bias[j]); } } else { for (int i = beg_idx, j = threadIdx.x; i < end_idx; i += BlockDim, j += BlockDim) { y[i] = static_cast<T>((static_cast<U>(x[i]) - mean_val) * invvar); } } } } template <typename T, typename U, int VPT> __inline__ __device__ void cuLoadAddStridedInputs( const int i1_block, const int thr_load_row_off, const int thr_load_col_off, const int i2_off, const int row_stride, U *warp_buf1, U *warp_buf2, const T *input, const T *dout, const int i1_end, const int n2, const U *__restrict__ mean, const U *__restrict__ var, const float epsilon) { const int i1 = i1_block + thr_load_row_off; if (i1 >= i1_end) return; U curr_mean = mean[i1]; U curr_invvar = rsqrt_<U>(var[i1] + epsilon); for (int k = 0; k < VPT; ++k) { const int i2 = i2_off + k; const int load_idx = i1 * n2 + i2; const int write_idx = thr_load_row_off * row_stride + thr_load_col_off + k; if (i2 < n2) { U curr_input = static_cast<U>(input[load_idx]); U curr_dout = static_cast<U>(dout[load_idx]); warp_buf1[write_idx] += curr_dout; warp_buf2[write_idx] += curr_dout * (curr_input - curr_mean) * curr_invvar; } } } template <typename T, typename U, int BDIMX, int BDIMY, int VPTX> __global__ void LayerNormBackwardPartGradGammaBeta( const T *__restrict__ dout, const T *__restrict__ input, const int n1, const int n2, const U *__restrict__ mean, const U *__restrict__ var, float epsilon, U *part_grad_gamma, U *part_grad_beta) { // VPTX -> value per thread.x, BDIMX -> blockDim.x, BDIMY -> blockDim.y, BDIMX // -> blockDim.x // template for compile time optimizations constexpr int row_stride = BDIMX + 1; const int thr_load_col_off = (threadIdx.x * VPTX) & (BDIMX - 1); const int thr_load_row_off = (threadIdx.x * VPTX) / BDIMX + threadIdx.y * BDIMY; const int i2_off = blockIdx.x * BDIMX + thr_load_col_off; constexpr int shared_cap = (BDIMX * BDIMY > 2 * VPTX * BDIMY * row_stride) ? BDIMX * BDIMY : 2 * VPTX * BDIMY * row_stride; __shared__ U buf[shared_cap]; U *warp_buf1 = reinterpret_cast<U *>(buf); U *warp_buf2 = warp_buf1 + VPTX * BDIMY * row_stride; for (int idx = threadIdx.y * blockDim.x + threadIdx.x; idx < 2 * VPTX * BDIMY * row_stride; idx += BDIMX * BDIMY) { buf[idx] = U(0); } __syncthreads(); for (int i1_block = blockIdx.y * BDIMY * VPTX; i1_block < n1; i1_block += VPTX * BDIMY * gridDim.y) { cuLoadAddStridedInputs<T, U, VPTX>( i1_block, thr_load_row_off, thr_load_col_off, i2_off, row_stride, warp_buf1, warp_buf2, input, dout, n1, n2, mean, var, epsilon); } __syncthreads(); // inter-warp reductions // sum within each warp U acc1 = U(0); U acc2 = U(0); for (int k = 0; k < VPTX; ++k) { int row1 = threadIdx.y + k * VPTX; int idx1 = row1 * row_stride + threadIdx.x; acc1 += warp_buf1[idx1]; acc2 += warp_buf2[idx1]; } warp_buf1[threadIdx.y * row_stride + threadIdx.x] = acc1; warp_buf2[threadIdx.y * row_stride + threadIdx.x] = acc2; __syncthreads(); // sum all warps for (int offset = VPTX >> 1; offset > 1; offset >>= 1) { if (threadIdx.y < offset) { int row1 = threadIdx.y; int row2 = threadIdx.y + offset; int idx1 = row1 * row_stride + threadIdx.x; int idx2 = row2 * row_stride + threadIdx.x; warp_buf1[idx1] += warp_buf1[idx2]; warp_buf2[idx1] += warp_buf2[idx2]; } __syncthreads(); } int i2 = blockIdx.x * blockDim.x + threadIdx.x; if (threadIdx.y == 0 && i2 < n2) { int row1 = threadIdx.y; int row2 = threadIdx.y + 1; int idx1 = row1 * row_stride + threadIdx.x; int idx2 = row2 * row_stride + threadIdx.x; part_grad_beta[blockIdx.y * n2 + i2] = warp_buf1[idx1] + warp_buf1[idx2]; part_grad_gamma[blockIdx.y * n2 + i2] = warp_buf2[idx1] + warp_buf2[idx2]; } } template <typename T, typename U, int BDIMX, int BDIMY> __global__ void LayerNormBackwardSumGradGammaBeta( const U *part_grad_gamma, const U *part_grad_beta, const int part_size, // const int n1, const int n2, T* grad_gamma, T* grad_beta) { const int n1, const int n2, U *grad_gamma, U *grad_beta) { // sum partial gradients for gamma and beta __shared__ U buf[BDIMX * BDIMY]; int i2 = blockIdx.x * BDIMX + threadIdx.x; if (i2 < n2) { // each warp does sequential reductions until reduced part_size is num_warps int num_warp_reductions = part_size / BDIMY; U sum_gamma = U(0); U sum_beta = U(0); const U *part_grad_gamma_ptr = part_grad_gamma + threadIdx.y * num_warp_reductions * n2 + i2; const U *part_grad_beta_ptr = part_grad_beta + threadIdx.y * num_warp_reductions * n2 + i2; for (int warp_offset = 0; warp_offset < num_warp_reductions; ++warp_offset) { sum_gamma += part_grad_gamma_ptr[warp_offset * n2]; sum_beta += part_grad_beta_ptr[warp_offset * n2]; } // inter-warp reductions constexpr int nbsize3 = BDIMX * BDIMY / 2; for (int offset = BDIMY / 2; offset >= 1; offset /= 2) { // top half write to shared memory if (threadIdx.y >= offset && threadIdx.y < 2 * offset) { const int write_idx = (threadIdx.y - offset) * blockDim.x + threadIdx.x; buf[write_idx] = sum_gamma; buf[write_idx + nbsize3] = sum_beta; } __syncthreads(); // bottom half sums if (threadIdx.y < offset) { const int read_idx = threadIdx.y * BDIMX + threadIdx.x; sum_gamma += buf[read_idx]; sum_beta += buf[read_idx + nbsize3]; } __syncthreads(); } // write out fully summed gradients if (threadIdx.y == 0) { grad_gamma[i2] = sum_gamma; grad_beta[i2] = sum_beta; } } } template <typename T, typename U, int BDIMX, int BDIMY> __global__ void LayerNormBackwardComputeGradInput( const T *__restrict__ dout, const T *__restrict__ input, const int n1, const int n2, // const U* __restrict__ mean, const U* __restrict__ var, const float // epsilon, const T* gamma, const U *__restrict__ mean, const U *__restrict__ var, const float epsilon, const U *gamma, T *grad_input) { #ifdef __HIPCC__ for (auto i1 = hipBlockIdx_y; i1 < n1; i1 += hipGridDim_y) { #else for (auto i1 = blockIdx.y; i1 < n1; i1 += gridDim.y) { #endif U sum_loss1 = U(0); U sum_loss2 = U(0); const U c_mean = mean[i1]; const U c_invvar = rsqrt_<U>(var[i1] + epsilon); const T *k_input = input + i1 * n2; const T *k_dout = dout + i1 * n2; constexpr int numx = BDIMX * BDIMY; const int thrx = threadIdx.x + threadIdx.y * BDIMX; if (gamma != NULL) { int l = 4 * thrx; for (; l + 3 < n2; l += 4 * numx) { for (int k = 0; k < 4; ++k) { const U c_h = static_cast<U>(k_input[l + k]); const U c_loss = static_cast<U>(k_dout[l + k]); sum_loss1 += c_loss * gamma[l + k]; sum_loss2 += c_loss * gamma[l + k] * (c_h - c_mean) * c_invvar; } } for (; l < n2; ++l) { const U c_h = static_cast<U>(k_input[l]); const U c_loss = static_cast<U>(k_dout[l]); sum_loss1 += c_loss * gamma[l]; sum_loss2 += c_loss * gamma[l] * (c_h - c_mean) * c_invvar; } } else { int l = 4 * thrx; for (; l + 3 < n2; l += 4 * numx) { for (int k = 0; k < 4; ++k) { const U c_h = static_cast<U>(k_input[l + k]); const U c_loss = static_cast<U>(k_dout[l + k]); sum_loss1 += c_loss; sum_loss2 += c_loss * (c_h - c_mean) * c_invvar; } } for (; l < n2; ++l) { const U c_h = static_cast<U>(k_input[l]); const U c_loss = static_cast<U>(k_dout[l]); sum_loss1 += c_loss; sum_loss2 += c_loss * (c_h - c_mean) * c_invvar; } } // intra-warp reductions for (int mask = BDIMX / 2; mask > 0; mask /= 2) { #ifdef PADDLE_WITH_HIP sum_loss1 += __shfl_xor(sum_loss1, mask, warpSize); // WARP_SHFL_XOR(sum_loss1, mask); sum_loss2 += __shfl_xor(sum_loss2, mask, warpSize); // WARP_SHFL_XOR(sum_loss2, mask); #else sum_loss1 += __shfl_xor_sync(0xffffffff, sum_loss1, mask, warpSize); // WARP_SHFL_XOR(sum_loss1, mask); sum_loss2 += __shfl_xor_sync(0xffffffff, sum_loss2, mask, warpSize); // WARP_SHFL_XOR(sum_loss2, mask); #endif } // inter-warp reductions if (BDIMY > 1) { __shared__ U buf[BDIMX * BDIMY]; for (int offset = BDIMY / 2; offset > 0; offset /= 2) { // upper half of warps write to shared if (threadIdx.y >= offset && threadIdx.y < 2 * offset) { const int wrt_i = (threadIdx.y - offset) * BDIMX + threadIdx.x; buf[2 * wrt_i] = sum_loss1; buf[2 * wrt_i + 1] = sum_loss2; } __syncthreads(); // lower half merges if (threadIdx.y < offset) { const int read_i = threadIdx.y * blockDim.x + threadIdx.x; sum_loss1 += buf[2 * read_i]; sum_loss2 += buf[2 * read_i + 1]; } __syncthreads(); } if (threadIdx.y == 0) { buf[2 * threadIdx.x] = sum_loss1; buf[2 * threadIdx.x + 1] = sum_loss2; } __syncthreads(); if (threadIdx.y != 0) { sum_loss1 = buf[2 * threadIdx.x]; sum_loss2 = buf[2 * threadIdx.x + 1]; } } // all threads now have the two sums over l U fH = (U)n2; U term1 = (U(1) / fH) * c_invvar; T *k_grad_input = grad_input + i1 * n2; if (gamma != NULL) { for (int l = thrx; l < n2; l += numx) { const U c_h = static_cast<U>(k_input[l]); const U c_loss = static_cast<U>(k_dout[l]); U f_grad_input = fH * c_loss * gamma[l]; f_grad_input -= sum_loss1; f_grad_input -= (c_h - c_mean) * c_invvar * sum_loss2; f_grad_input *= term1; k_grad_input[l] = static_cast<T>(f_grad_input); } } else { for (int l = thrx; l < n2; l += numx) { const U c_h = static_cast<U>(k_input[l]); const U c_loss = static_cast<U>(k_dout[l]); U f_grad_input = fH * c_loss; f_grad_input -= sum_loss1; f_grad_input -= (c_h - c_mean) * c_invvar * sum_loss2; f_grad_input *= term1; k_grad_input[l] = static_cast<T>(f_grad_input); } } } } // Make sure that d_scale != nullptr && d_bias != nullptr // Since d_scale != nullptr, scale would not be nullptr template <typename T, typename U, int BlockDim, bool HasDx> __global__ void LayerNormBackwardGradientAll(const T *x, const T *d_y, U *d_scale, U *d_bias, T *d_x, const U *mean, const U *var, const U *scale, float epsilon, int batch_size, int feature_size, int col_offset) { using BlockReduce = hipcub::BlockReduce<PairForLayerNorm<U>, BlockDim>; __shared__ typename BlockReduce::TempStorage temp_storage; int beg_idx = threadIdx.x * feature_size + (blockIdx.x + col_offset); int end_idx = batch_size * feature_size + (blockIdx.x + col_offset); int stride = BlockDim * feature_size; U d_scale_partial = static_cast<U>(0), d_bias_partial = static_cast<U>(0); for (int i = beg_idx; i < end_idx; i += stride) { int row_idx = i / feature_size; auto var_val = real_sqrt(static_cast<U>(var[row_idx]) + epsilon); d_scale_partial += static_cast<U>(d_y[i]) * (static_cast<U>(x[i]) - mean[row_idx]) / var_val; d_bias_partial += static_cast<U>(d_y[i]); if (HasDx) { d_x[i] = static_cast<T>(static_cast<U>(d_y[i]) * scale[blockIdx.x + col_offset] / var_val); } } auto pair = BlockReduce(temp_storage) .Reduce(PairForLayerNorm<U>(d_scale_partial, d_bias_partial), PairForLayerNormAddFunctor<U>()); if (threadIdx.x == 0) { d_scale[blockIdx.x + col_offset] = pair.first_; d_bias[blockIdx.x + col_offset] = pair.second_; } } // Make sure that there is only one true expression: d_scale != nullptr // or d_bias != nullptr // Notice: scale may be nullptr template <typename T, typename U, int BlockDim, bool HasDx, bool HasDScale> __global__ void LayerNormBackwardGradientScaleOrBias( const T *x, const T *d_y, U *d_scale, U *d_bias, T *d_x, const U *mean, const U *var, const U *scale, float epsilon, int batch_size, int feature_size, int col_offset) { using BlockReduce = hipcub::BlockReduce<U, BlockDim>; __shared__ typename BlockReduce::TempStorage temp_storage; int beg_idx = threadIdx.x * feature_size + blockIdx.x + col_offset; int end_idx = batch_size * feature_size + blockIdx.x + col_offset; int stride = BlockDim * feature_size; U d_scale_or_d_bias_partial = static_cast<U>(0); for (int i = beg_idx; i < end_idx; i += stride) { int row_idx = i / feature_size; auto var_val = static_cast<U>(real_sqrt(static_cast<float>(var[row_idx]) + epsilon)); if (HasDScale) { d_scale_or_d_bias_partial += static_cast<U>(d_y[i]) * (static_cast<U>(x[i]) - mean[row_idx]) / var_val; } else { // d_bias != nullptr d_scale_or_d_bias_partial += static_cast<U>(d_y[i]); } if (HasDx) { if (scale != nullptr) { d_x[i] = static_cast<T>(static_cast<U>(d_y[i]) * scale[blockIdx.x + col_offset] / var_val); } else { d_x[i] = static_cast<T>(static_cast<U>(d_y[i]) / var_val); } } } d_scale_or_d_bias_partial = BlockReduce(temp_storage).Reduce(d_scale_or_d_bias_partial, hipcub::Sum()); if (threadIdx.x == 0) { if (HasDScale) { d_scale[blockIdx.x + col_offset] = d_scale_or_d_bias_partial; } else { d_bias[blockIdx.x + col_offset] = d_scale_or_d_bias_partial; } } } template <typename T, typename U, int BlockDim> __global__ void LayerNormBackwardPostProcessToCalculateDX(const T *x, T *d_x, const U *mean, const U *var, float epsilon, int feature_size) { using BlockReduce = hipcub::BlockReduce<PairForLayerNorm<U>, BlockDim>; __shared__ typename BlockReduce::TempStorage temp_storage; __shared__ U d_x_reduce_tmp[2]; int beg_idx = blockIdx.x * feature_size + threadIdx.x; int end_idx = (blockIdx.x + 1) * feature_size; U block_mean = mean[blockIdx.x]; U block_var = var[blockIdx.x]; U d_x_mean_partial = static_cast<U>(0), d_x_var_partial = static_cast<U>(0); for (int i = beg_idx; i < end_idx; i += BlockDim) { d_x_mean_partial += static_cast<U>(d_x[i]); d_x_var_partial += static_cast<U>(d_x[i]) * (static_cast<U>(x[i]) - block_mean); } auto pair = BlockReduce(temp_storage) .Reduce(PairForLayerNorm<U>(d_x_mean_partial, d_x_var_partial), PairForLayerNormAddFunctor<U>()); if (threadIdx.x == 0) { d_x_reduce_tmp[0] = static_cast<float>(pair.first_) / feature_size; d_x_reduce_tmp[1] = static_cast<float>(pair.second_) / (feature_size * (static_cast<float>(block_var) + epsilon)); } __syncthreads(); d_x_mean_partial = d_x_reduce_tmp[0]; d_x_var_partial = d_x_reduce_tmp[1]; for (int i = beg_idx; i < end_idx; i += BlockDim) { d_x[i] -= static_cast<T>(d_x_mean_partial); d_x[i] -= static_cast<T>((static_cast<U>(x[i]) - block_mean) * d_x_var_partial); } } // Here, we only calculate d_x template <typename T, typename U, int BlockDim> __global__ void LayerNormBackwardGradientOnlyDX(const T *x, const T *d_y, T *d_x, const U *mean, const U *var, const U *scale, float epsilon, int feature_size) { using BlockReduce = hipcub::BlockReduce<PairForLayerNorm<U>, BlockDim>; __shared__ typename BlockReduce::TempStorage temp_storage; __shared__ U d_x_reduce_tmp[2]; int beg_idx = blockIdx.x * feature_size + threadIdx.x; int end_idx = (blockIdx.x + 1) * feature_size; U block_mean = mean[blockIdx.x], block_var = var[blockIdx.x]; U d_x_mean_partial = static_cast<U>(0), d_x_var_partial = static_cast<U>(0); for (int i = beg_idx; i < end_idx; i += BlockDim) { auto var_val = static_cast<U>(real_sqrt(static_cast<float>(block_var) + epsilon)); if (scale != nullptr) { int col_idx = i % feature_size; d_x[i] = static_cast<T>(static_cast<U>(d_y[i]) * scale[col_idx] / var_val); } else { d_x[i] = static_cast<T>(static_cast<U>(d_y[i]) / var_val); } d_x_mean_partial += static_cast<U>(d_x[i]); d_x_var_partial += static_cast<U>(d_x[i]) * (static_cast<U>(x[i]) - block_mean); } auto pair = BlockReduce(temp_storage) .Reduce(PairForLayerNorm<U>(d_x_mean_partial, d_x_var_partial), PairForLayerNormAddFunctor<U>()); if (threadIdx.x == 0) { d_x_reduce_tmp[0] = static_cast<float>(pair.first_) / feature_size; d_x_reduce_tmp[1] = static_cast<float>(pair.second_) / (feature_size * (static_cast<float>(block_var) + epsilon)); } __syncthreads(); d_x_mean_partial = d_x_reduce_tmp[0]; d_x_var_partial = d_x_reduce_tmp[1]; for (int i = beg_idx; i < end_idx; i += BlockDim) { d_x[i] -= static_cast<T>(d_x_mean_partial); d_x[i] -= static_cast<T>((static_cast<U>(x[i]) - block_mean) * d_x_var_partial); } } template <typename T, typename U> __global__ void LayerNormBackwardWhenBatchSizeIsOne( const T *x, const T *d_y, T *d_x, U *d_scale, U *d_bias, const U *mean, const U *var, const U *scale, float epsilon, int feature_size) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < feature_size) { auto var_val = static_cast<U>(real_sqrt(static_cast<float>(var[idx]) + epsilon)); if (d_x != nullptr) { if (d_scale == nullptr) { d_x[idx] = static_cast<T>(static_cast<U>(d_y[idx]) / var_val); } else { d_x[idx] = static_cast<T>(static_cast<U>(d_y[idx]) * scale[idx] / var_val); } } if (d_scale != nullptr) { d_scale[idx] = static_cast<U>(d_y[idx]) * (static_cast<U>(x[idx]) - mean[idx]) / var_val; } if (d_bias != nullptr) d_bias[idx] = static_cast<U>(d_y[idx]); } } template <typename T, typename U> static void LayerNormBackward(const T *x, const T *d_y, const U *scale, const U *mean, const U *var, T *d_x, U *d_scale, U *d_bias, float epsilon, int batch_size, int feature_size, const framework::ExecutionContext &ctx) { auto &dev_ctx = ctx.cuda_device_context(); auto stream = dev_ctx.stream(); #ifdef __HIPCC__ const int kMaxBlockDim = 256; #else const int kMaxBlockDim = 512; #endif const int kMaxBlockNum = 128; int gradient_flag = ((d_x != nullptr ? 1 : 0) << 2) | ((d_scale != nullptr ? 1 : 0) << 1) | ((d_bias != nullptr ? 1 : 0)); if (gradient_flag == 0) return; if (batch_size == 1) { hipLaunchKernelGGL(( LayerNormBackwardWhenBatchSizeIsOne< T, U>), dim3((feature_size + kMaxBlockDim - 1) / kMaxBlockDim), dim3(kMaxBlockDim), 0, stream, x, d_y, d_x, d_scale, d_bias, mean, var, scale, epsilon, feature_size); if (d_x != nullptr) { switch (GetDesiredBlockDim(feature_size)) { hipLaunchKernelGGL(( FIXED_BLOCK_DIM_CASE(LayerNormBackwardPostProcessToCalculateDX< T, U, kBlockDim>), dim3(1), dim3(kBlockDim), 0, stream, x, d_x, mean, var, epsilon, feature_size)); } } return; } auto block_dim = GetDesiredBlockDim(batch_size); switch (gradient_flag) { case 1: // d_x == nulptr, d_scale == nullptr, d_bias != nullptr switch (block_dim) { FIXED_BLOCK_DIM_FIXED_BLOCK_NUM_CASE( feature_size, kMaxBlockNum, hipLaunchKernelGGL(( LayerNormBackwardGradientScaleOrBias< T, U, kBlockDim, false, false>), dim3(block_num), dim3(kBlockDim), 0, stream, x, d_y, d_scale, d_bias, d_x, mean, var, scale, epsilon, batch_size, feature_size, col_offset)); } break; case 2: // d_x == nullptr, d_scale != nullptr, d_bias == nullptr switch (block_dim) { FIXED_BLOCK_DIM_FIXED_BLOCK_NUM_CASE( feature_size, kMaxBlockNum, hipLaunchKernelGGL(( LayerNormBackwardGradientScaleOrBias< T, U, kBlockDim, false, true>), dim3(block_num), dim3(kBlockDim), 0, stream, x, d_y, d_scale, d_bias, d_x, mean, var, scale, epsilon, batch_size, feature_size, col_offset)); } break; case 3: // d_x == nullptr, d_scale != nulptr, d_bias != nullptr switch (block_dim) { FIXED_BLOCK_DIM_FIXED_BLOCK_NUM_CASE( feature_size, kMaxBlockNum, hipLaunchKernelGGL(( LayerNormBackwardGradientAll< T, U, kBlockDim, false>), dim3(block_num), dim3(kBlockDim), 0, stream, x, d_y, d_scale, d_bias, d_x, mean, var, scale, epsilon, batch_size, feature_size, col_offset)); } break; case 4: // d_x != nullptr, d_scale == nullptr, d_bias == nullptr switch (GetDesiredBlockDim(feature_size)) { FIXED_BLOCK_DIM_CASE( hipLaunchKernelGGL(( LayerNormBackwardGradientOnlyDX< T, U, kBlockDim>), dim3(batch_size), dim3(kBlockDim), 0, stream, x, d_y, d_x, mean, var, scale, epsilon, feature_size)); } break; case 5: // d_x != nulptr, d_scale == nullptr, d_bias != nullptr switch (block_dim) { FIXED_BLOCK_DIM_FIXED_BLOCK_NUM_CASE( feature_size, kMaxBlockNum, hipLaunchKernelGGL(( LayerNormBackwardGradientScaleOrBias< T, U, kBlockDim, true, false>), dim3(block_num), dim3(kBlockDim), 0, stream, x, d_y, d_scale, d_bias, d_x, mean, var, scale, epsilon, batch_size, feature_size, col_offset)); } switch (GetDesiredBlockDim(feature_size)) { FIXED_BLOCK_DIM_CASE( hipLaunchKernelGGL(( LayerNormBackwardPostProcessToCalculateDX< T, U, kBlockDim>), dim3(batch_size), dim3(kBlockDim), 0, stream, x, d_x, mean, var, epsilon, feature_size)); } break; case 6: // d_x != nullptr, d_scale != nullptr, d_bias == nullptr switch (block_dim) { FIXED_BLOCK_DIM_FIXED_BLOCK_NUM_CASE( feature_size, kMaxBlockNum, hipLaunchKernelGGL(( LayerNormBackwardGradientScaleOrBias< T, U, kBlockDim, true, true>), dim3(block_num), dim3(kBlockDim), 0, stream, x, d_y, d_scale, d_bias, d_x, mean, var, scale, epsilon, batch_size, feature_size, col_offset)); } switch (GetDesiredBlockDim(feature_size)) { FIXED_BLOCK_DIM_CASE( hipLaunchKernelGGL(( LayerNormBackwardPostProcessToCalculateDX< T, U, kBlockDim>), dim3(batch_size), dim3(kBlockDim), 0, stream, x, d_x, mean, var, epsilon, feature_size)); } break; case 7: // d_x != nullptr, d_scale != nullptr, d_bias != nullptr { constexpr int VPT = 4; constexpr int BDIMX2 = 32; constexpr int BDIMY2 = 4; dim3 threads2(BDIMX2, BDIMY2, 1); constexpr int part_size = BDIMY2 * VPT; const dim3 blocks2((feature_size + BDIMX2 - 1) / BDIMX2, part_size, 1); auto part_grad_gamma_ptr = memory::Alloc(dev_ctx, part_size * feature_size * sizeof(U)); auto part_grad_beta_ptr = memory::Alloc(dev_ctx, part_size * feature_size * sizeof(U)); U *part_grad_gamma = reinterpret_cast<U *>(part_grad_gamma_ptr->ptr()); U *part_grad_beta = reinterpret_cast<U *>(part_grad_beta_ptr->ptr()); hipLaunchKernelGGL(( LayerNormBackwardPartGradGammaBeta<T, U, BDIMX2, BDIMY2, VPT>), dim3(blocks2), dim3(threads2), 0, stream, d_y, x, batch_size, feature_size, mean, var, epsilon, part_grad_gamma, part_grad_beta); // compute part_grad_gamma, beta constexpr int BDIMX3 = 32; constexpr int BDIMY3 = 8; dim3 threads3(BDIMX3, BDIMY3, 1); const dim3 blocks3((feature_size + BDIMX2 - 1) / BDIMX2, 1, 1); hipLaunchKernelGGL(( LayerNormBackwardSumGradGammaBeta< T, U, BDIMX3, BDIMY3>), dim3(blocks3), dim3(threads3), 0, stream, part_grad_gamma, part_grad_beta, part_size, batch_size, feature_size, d_scale, d_bias); constexpr int BDIMX1 = 32; constexpr int BDIMY1 = 4; dim3 threads1(BDIMX1, BDIMY1, 1); const dim3 blocks1(1, batch_size, 1); hipLaunchKernelGGL(( LayerNormBackwardComputeGradInput< T, U, BDIMX1, BDIMY1>), dim3(blocks1), dim3(threads1), 0, stream, d_y, x, batch_size, feature_size, mean, var, epsilon, scale, d_x); break; } default: break; } } template <typename T> void LayerNormDirectCUDAFunctor<T>::operator()(gpuStream_t stream, const T *input, std::vector<int> input_shape, const T *bias, const T *scale, T *output, T *mean, T *variance, int begin_norm_axis, float eps) { const auto x_dims = framework::make_ddim(input_shape); auto matrix_dim = framework::flatten_to_2d(x_dims, begin_norm_axis); int batch_size = static_cast<int>(matrix_dim[0]); int feature_size = static_cast<int>(matrix_dim[1]); switch (GetDesiredBlockDim(feature_size)) { FIXED_BLOCK_DIM_CASE( hipLaunchKernelGGL(( LayerNormForward<T, T, kBlockDim>), dim3(batch_size), dim3(kBlockDim), 0, stream, input, scale, bias, output, mean, variance, eps, feature_size)); default: PADDLE_THROW(platform::errors::InvalidArgument( "Product from begin_norm_axis to end in layer_norm must be larger " "than 1")); break; } } template <typename T> class LayerNormKernel<platform::CUDADeviceContext, T> : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext &ctx) const override { using U = LayerNormParamType<T>; const float epsilon = ctx.Attr<float>("epsilon"); auto *scale = ctx.Input<Tensor>("Scale"); auto *bias = ctx.Input<Tensor>("Bias"); auto *x = ctx.Input<Tensor>("X"); auto *y = ctx.Output<Tensor>("Y"); auto *mean = ctx.Output<Tensor>("Mean"); auto *var = ctx.Output<Tensor>("Variance"); const auto begin_norm_axis = ctx.Attr<int>("begin_norm_axis"); const auto x_dims = x->dims(); auto *x_data = x->data<T>(); auto *y_data = y->mutable_data<T>(ctx.GetPlace()); auto *mean_data = mean->mutable_data<U>(ctx.GetPlace()); auto *var_data = var->mutable_data<U>(ctx.GetPlace()); auto *scale_data = (scale == nullptr ? nullptr : scale->data<U>()); auto *bias_data = (bias == nullptr ? nullptr : bias->data<U>()); auto matrix_dim = framework::flatten_to_2d(x_dims, begin_norm_axis); int batch_size = static_cast<int>(matrix_dim[0]); int feature_size = static_cast<int>(matrix_dim[1]); auto stream = ctx.cuda_device_context().stream(); switch (GetDesiredBlockDim(feature_size)) { FIXED_BLOCK_DIM_CASE( hipLaunchKernelGGL(( LayerNormForward<T, U, kBlockDim>), dim3(batch_size), dim3(kBlockDim), 0, stream, x_data, scale_data, bias_data, y_data, mean_data, var_data, epsilon, feature_size)); default: PADDLE_THROW(platform::errors::InvalidArgument( "Product from begin_norm_axis to end must be larger than 1")); break; } } }; template <typename T> class LayerNormGradKernel<platform::CUDADeviceContext, T> : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext &ctx) const override { using U = LayerNormParamType<T>; const float epsilon = ctx.Attr<float>("epsilon"); // d_x, d_scale, d_bias may be nullptr auto *d_x = ctx.Output<Tensor>(framework::GradVarName("X")); auto *d_scale = ctx.Output<Tensor>(framework::GradVarName("Scale")); auto *d_bias = ctx.Output<Tensor>(framework::GradVarName("Bias")); auto *x = ctx.Input<Tensor>("X"); auto *mean = ctx.Input<Tensor>("Mean"); auto *var = ctx.Input<Tensor>("Variance"); auto *scale = ctx.Input<Tensor>("Scale"); auto *d_y = ctx.Input<Tensor>(framework::GradVarName("Y")); auto *x_data = x->data<T>(); auto *d_y_data = d_y->data<T>(); auto *mean_data = mean->data<U>(); auto *var_data = var->data<U>(); auto *scale_data = (scale == nullptr ? nullptr : scale->data<U>()); auto *d_scale_data = (d_scale == nullptr ? nullptr : d_scale->mutable_data<U>(ctx.GetPlace())); auto *d_bias_data = (d_bias == nullptr ? nullptr : d_bias->mutable_data<U>(ctx.GetPlace())); auto *d_x_data = (d_x == nullptr ? nullptr : d_x->mutable_data<T>(ctx.GetPlace())); const auto &x_dims = x->dims(); const auto begin_norm_axis = ctx.Attr<int>("begin_norm_axis"); auto matrix_dim = framework::flatten_to_2d(x_dims, begin_norm_axis); int batch_size = static_cast<int>(matrix_dim[0]); int feature_size = static_cast<int>(matrix_dim[1]); LayerNormBackward<T, U>(x_data, d_y_data, scale_data, mean_data, var_data, d_x_data, d_scale_data, d_bias_data, epsilon, batch_size, feature_size, ctx); } }; template class LayerNormDirectCUDAFunctor<float>; #undef FIXED_BLOCK_DIM_FIXED_BLOCK_NUM_CASE_BASE #undef FIXED_BLOCK_DIM_FIXED_BLOCK_NUM_CASE #undef FIXED_BLOCK_DIM_CASE_BASE #undef FIXED_BLOCK_DIM_CASE } // namespace operators } // namespace paddle namespace ops = paddle::operators; namespace plat = paddle::platform; #ifdef PADDLE_WITH_HIP // MIOPEN do not support double REGISTER_OP_CUDA_KERNEL( layer_norm, ops::LayerNormKernel<paddle::platform::CUDADeviceContext, float>, ops::LayerNormKernel<paddle::platform::CUDADeviceContext, plat::float16>); REGISTER_OP_CUDA_KERNEL( layer_norm_grad, ops::LayerNormGradKernel<paddle::platform::CUDADeviceContext, float>, ops::LayerNormGradKernel<paddle::platform::CUDADeviceContext, plat::float16>); #else REGISTER_OP_CUDA_KERNEL( layer_norm, ops::LayerNormKernel<paddle::platform::CUDADeviceContext, float>, ops::LayerNormKernel<paddle::platform::CUDADeviceContext, double>, ops::LayerNormKernel<paddle::platform::CUDADeviceContext, plat::float16>); REGISTER_OP_CUDA_KERNEL( layer_norm_grad, ops::LayerNormGradKernel<paddle::platform::CUDADeviceContext, float>, ops::LayerNormGradKernel<paddle::platform::CUDADeviceContext, double>, ops::LayerNormGradKernel<paddle::platform::CUDADeviceContext, plat::float16>); #endif
d495fcf309e0769173492be14b42fdd9d28a3037.cu
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #ifdef __NVCC__ #include "cub/cub.cuh" #endif #ifdef __HIPCC__ #include <hipcub/hipcub.hpp> namespace cub = hipcub; #endif #include <memory> #include <vector> #include "paddle/fluid/framework/ddim.h" #include "paddle/fluid/operators/layer_norm_op.h" #include "paddle/fluid/platform/float16.h" #ifdef PADDLE_WITH_CUDA #include "paddle/fluid/platform/cudnn_helper.h" #endif #ifdef PADDLE_WITH_HIP #include "paddle/fluid/platform/miopen_helper.h" #endif namespace paddle { namespace operators { using Tensor = framework::Tensor; using DataLayout = framework::DataLayout; template <typename T> using CudnnDataType = platform::CudnnDataType<T>; template <typename T> using LayerNormParamType = typename CudnnDataType<T>::BatchNormParamType; inline static int GetDesiredBlockDim(int block_dim) { #ifdef __HIPCC__ const int kMaxBlockDim = 256; #else const int kMaxBlockDim = 512; #endif return block_dim >= kMaxBlockDim ? kMaxBlockDim : (1 << (static_cast<int>(std::log2f(block_dim)))); } #define FIXED_BLOCK_DIM_CASE_BASE(log2_block_dim, ...) \ case (1 << (log2_block_dim)): { \ constexpr auto kBlockDim = (1 << (log2_block_dim)); \ __VA_ARGS__; \ } break #define FIXED_BLOCK_DIM_CASE(...) \ FIXED_BLOCK_DIM_CASE_BASE(9, ##__VA_ARGS__); \ FIXED_BLOCK_DIM_CASE_BASE(8, ##__VA_ARGS__); \ FIXED_BLOCK_DIM_CASE_BASE(7, ##__VA_ARGS__); \ FIXED_BLOCK_DIM_CASE_BASE(6, ##__VA_ARGS__); \ FIXED_BLOCK_DIM_CASE_BASE(5, ##__VA_ARGS__); \ FIXED_BLOCK_DIM_CASE_BASE(4, ##__VA_ARGS__); \ FIXED_BLOCK_DIM_CASE_BASE(3, ##__VA_ARGS__); \ FIXED_BLOCK_DIM_CASE_BASE(2, ##__VA_ARGS__); \ FIXED_BLOCK_DIM_CASE_BASE(1, ##__VA_ARGS__) #define FIXED_BLOCK_DIM_FIXED_BLOCK_NUM_CASE_BASE( \ log2_block_dim, feature_size, kMaxBlockNum, ...) \ case (1 << (log2_block_dim)): { \ for (int i = 0; i < std::ceil(feature_size / (1.0 * kMaxBlockNum)); i++) { \ int col_offset = i * kMaxBlockNum; \ int block_num = std::min(feature_size - col_offset, kMaxBlockNum); \ constexpr auto kBlockDim = (1 << (log2_block_dim)); \ __VA_ARGS__; \ } \ } break #define FIXED_BLOCK_DIM_FIXED_BLOCK_NUM_CASE(feature_size, kMaxBlockNum, ...) \ FIXED_BLOCK_DIM_FIXED_BLOCK_NUM_CASE_BASE(9, feature_size, kMaxBlockNum, \ ##__VA_ARGS__); \ FIXED_BLOCK_DIM_FIXED_BLOCK_NUM_CASE_BASE(8, feature_size, kMaxBlockNum, \ ##__VA_ARGS__); \ FIXED_BLOCK_DIM_FIXED_BLOCK_NUM_CASE_BASE(7, feature_size, kMaxBlockNum, \ ##__VA_ARGS__); \ FIXED_BLOCK_DIM_FIXED_BLOCK_NUM_CASE_BASE(6, feature_size, kMaxBlockNum, \ ##__VA_ARGS__); \ FIXED_BLOCK_DIM_FIXED_BLOCK_NUM_CASE_BASE(5, feature_size, kMaxBlockNum, \ ##__VA_ARGS__); \ FIXED_BLOCK_DIM_FIXED_BLOCK_NUM_CASE_BASE(4, feature_size, kMaxBlockNum, \ ##__VA_ARGS__); \ FIXED_BLOCK_DIM_FIXED_BLOCK_NUM_CASE_BASE(3, feature_size, kMaxBlockNum, \ ##__VA_ARGS__); \ FIXED_BLOCK_DIM_FIXED_BLOCK_NUM_CASE_BASE(2, feature_size, kMaxBlockNum, \ ##__VA_ARGS__); \ FIXED_BLOCK_DIM_FIXED_BLOCK_NUM_CASE_BASE(1, feature_size, kMaxBlockNum, \ ##__VA_ARGS__) static __device__ __forceinline__ float real_sqrt(float x) { return sqrtf(x); } static __device__ __forceinline__ double real_sqrt(double x) { return sqrt(x); } template <typename T> struct PairForLayerNorm { __device__ __forceinline__ PairForLayerNorm() {} __device__ __forceinline__ PairForLayerNorm(const T &first, const T &second) : first_(first), second_(second) {} T first_; T second_; }; template <typename T> struct PairForLayerNormAddFunctor { __device__ __forceinline__ PairForLayerNorm<T> operator()( const PairForLayerNorm<T> &p1, const PairForLayerNorm<T> &p2) { return PairForLayerNorm<T>(p1.first_ + p2.first_, p1.second_ + p2.second_); } }; template <typename T> __inline__ __device__ T rsqrt_(const T val) { return static_cast<T>(1) / sqrt(val); } template <> __inline__ __device__ float rsqrt_(const float val) { return rsqrtf(val); } template <> __inline__ __device__ double rsqrt_(const double val) { return rsqrt(val); } #if CUDA_ARCH_FP16_SUPPORTED(__CUDA_ARCH__) template <> __inline__ __device__ half rsqrt_(const half val) { return hrsqrt(val); } #endif template <typename T, typename U, int BlockDim> __global__ void LayerNormForward(const T *x, const U *scale, const U *bias, T *y, U *mean, U *var, float epsilon, int feature_size) { using BlockReduce = cub::BlockReduce<PairForLayerNorm<U>, BlockDim>; __shared__ typename BlockReduce::TempStorage temp_storage; __shared__ U mean_share; __shared__ U var_share; int beg_idx = blockIdx.x * feature_size + threadIdx.x; int end_idx = (blockIdx.x + 1) * feature_size; // Step 1: Reduce to calculate mean and var U mean_val = 0; U var_val = 0; for (int i = beg_idx; i < end_idx; i += BlockDim) { U tmp = static_cast<U>(x[i]); mean_val += tmp; var_val += (tmp * tmp); } auto pair = BlockReduce(temp_storage) .Reduce(PairForLayerNorm<U>(mean_val, var_val), PairForLayerNormAddFunctor<U>()); if (threadIdx.x == 0) { auto tmp = pair.first_ / feature_size; mean[blockIdx.x] = mean_share = static_cast<U>(tmp); var[blockIdx.x] = var_share = static_cast<U>(pair.second_ / feature_size - tmp * tmp); } __syncthreads(); mean_val = mean_share; U invvar = rsqrt_<U>(var_share + static_cast<U>(epsilon)); // Step 2: Calculate y if (scale != nullptr) { if (bias != nullptr) { for (int i = beg_idx, j = threadIdx.x; i < end_idx; i += BlockDim, j += BlockDim) { y[i] = static_cast<T>( scale[j] * (static_cast<U>(x[i]) - mean_val) * invvar + bias[j]); } } else { for (int i = beg_idx, j = threadIdx.x; i < end_idx; i += BlockDim, j += BlockDim) { y[i] = static_cast<T>(scale[j] * (static_cast<U>(x[i]) - mean_val) * invvar); } } } else { // scale == nullptr if (bias != nullptr) { for (int i = beg_idx, j = threadIdx.x; i < end_idx; i += BlockDim, j += BlockDim) { y[i] = static_cast<T>((static_cast<U>(x[i]) - mean_val) * invvar + bias[j]); } } else { for (int i = beg_idx, j = threadIdx.x; i < end_idx; i += BlockDim, j += BlockDim) { y[i] = static_cast<T>((static_cast<U>(x[i]) - mean_val) * invvar); } } } } template <typename T, typename U, int VPT> __inline__ __device__ void cuLoadAddStridedInputs( const int i1_block, const int thr_load_row_off, const int thr_load_col_off, const int i2_off, const int row_stride, U *warp_buf1, U *warp_buf2, const T *input, const T *dout, const int i1_end, const int n2, const U *__restrict__ mean, const U *__restrict__ var, const float epsilon) { const int i1 = i1_block + thr_load_row_off; if (i1 >= i1_end) return; U curr_mean = mean[i1]; U curr_invvar = rsqrt_<U>(var[i1] + epsilon); for (int k = 0; k < VPT; ++k) { const int i2 = i2_off + k; const int load_idx = i1 * n2 + i2; const int write_idx = thr_load_row_off * row_stride + thr_load_col_off + k; if (i2 < n2) { U curr_input = static_cast<U>(input[load_idx]); U curr_dout = static_cast<U>(dout[load_idx]); warp_buf1[write_idx] += curr_dout; warp_buf2[write_idx] += curr_dout * (curr_input - curr_mean) * curr_invvar; } } } template <typename T, typename U, int BDIMX, int BDIMY, int VPTX> __global__ void LayerNormBackwardPartGradGammaBeta( const T *__restrict__ dout, const T *__restrict__ input, const int n1, const int n2, const U *__restrict__ mean, const U *__restrict__ var, float epsilon, U *part_grad_gamma, U *part_grad_beta) { // VPTX -> value per thread.x, BDIMX -> blockDim.x, BDIMY -> blockDim.y, BDIMX // -> blockDim.x // template for compile time optimizations constexpr int row_stride = BDIMX + 1; const int thr_load_col_off = (threadIdx.x * VPTX) & (BDIMX - 1); const int thr_load_row_off = (threadIdx.x * VPTX) / BDIMX + threadIdx.y * BDIMY; const int i2_off = blockIdx.x * BDIMX + thr_load_col_off; constexpr int shared_cap = (BDIMX * BDIMY > 2 * VPTX * BDIMY * row_stride) ? BDIMX * BDIMY : 2 * VPTX * BDIMY * row_stride; __shared__ U buf[shared_cap]; U *warp_buf1 = reinterpret_cast<U *>(buf); U *warp_buf2 = warp_buf1 + VPTX * BDIMY * row_stride; for (int idx = threadIdx.y * blockDim.x + threadIdx.x; idx < 2 * VPTX * BDIMY * row_stride; idx += BDIMX * BDIMY) { buf[idx] = U(0); } __syncthreads(); for (int i1_block = blockIdx.y * BDIMY * VPTX; i1_block < n1; i1_block += VPTX * BDIMY * gridDim.y) { cuLoadAddStridedInputs<T, U, VPTX>( i1_block, thr_load_row_off, thr_load_col_off, i2_off, row_stride, warp_buf1, warp_buf2, input, dout, n1, n2, mean, var, epsilon); } __syncthreads(); // inter-warp reductions // sum within each warp U acc1 = U(0); U acc2 = U(0); for (int k = 0; k < VPTX; ++k) { int row1 = threadIdx.y + k * VPTX; int idx1 = row1 * row_stride + threadIdx.x; acc1 += warp_buf1[idx1]; acc2 += warp_buf2[idx1]; } warp_buf1[threadIdx.y * row_stride + threadIdx.x] = acc1; warp_buf2[threadIdx.y * row_stride + threadIdx.x] = acc2; __syncthreads(); // sum all warps for (int offset = VPTX >> 1; offset > 1; offset >>= 1) { if (threadIdx.y < offset) { int row1 = threadIdx.y; int row2 = threadIdx.y + offset; int idx1 = row1 * row_stride + threadIdx.x; int idx2 = row2 * row_stride + threadIdx.x; warp_buf1[idx1] += warp_buf1[idx2]; warp_buf2[idx1] += warp_buf2[idx2]; } __syncthreads(); } int i2 = blockIdx.x * blockDim.x + threadIdx.x; if (threadIdx.y == 0 && i2 < n2) { int row1 = threadIdx.y; int row2 = threadIdx.y + 1; int idx1 = row1 * row_stride + threadIdx.x; int idx2 = row2 * row_stride + threadIdx.x; part_grad_beta[blockIdx.y * n2 + i2] = warp_buf1[idx1] + warp_buf1[idx2]; part_grad_gamma[blockIdx.y * n2 + i2] = warp_buf2[idx1] + warp_buf2[idx2]; } } template <typename T, typename U, int BDIMX, int BDIMY> __global__ void LayerNormBackwardSumGradGammaBeta( const U *part_grad_gamma, const U *part_grad_beta, const int part_size, // const int n1, const int n2, T* grad_gamma, T* grad_beta) { const int n1, const int n2, U *grad_gamma, U *grad_beta) { // sum partial gradients for gamma and beta __shared__ U buf[BDIMX * BDIMY]; int i2 = blockIdx.x * BDIMX + threadIdx.x; if (i2 < n2) { // each warp does sequential reductions until reduced part_size is num_warps int num_warp_reductions = part_size / BDIMY; U sum_gamma = U(0); U sum_beta = U(0); const U *part_grad_gamma_ptr = part_grad_gamma + threadIdx.y * num_warp_reductions * n2 + i2; const U *part_grad_beta_ptr = part_grad_beta + threadIdx.y * num_warp_reductions * n2 + i2; for (int warp_offset = 0; warp_offset < num_warp_reductions; ++warp_offset) { sum_gamma += part_grad_gamma_ptr[warp_offset * n2]; sum_beta += part_grad_beta_ptr[warp_offset * n2]; } // inter-warp reductions constexpr int nbsize3 = BDIMX * BDIMY / 2; for (int offset = BDIMY / 2; offset >= 1; offset /= 2) { // top half write to shared memory if (threadIdx.y >= offset && threadIdx.y < 2 * offset) { const int write_idx = (threadIdx.y - offset) * blockDim.x + threadIdx.x; buf[write_idx] = sum_gamma; buf[write_idx + nbsize3] = sum_beta; } __syncthreads(); // bottom half sums if (threadIdx.y < offset) { const int read_idx = threadIdx.y * BDIMX + threadIdx.x; sum_gamma += buf[read_idx]; sum_beta += buf[read_idx + nbsize3]; } __syncthreads(); } // write out fully summed gradients if (threadIdx.y == 0) { grad_gamma[i2] = sum_gamma; grad_beta[i2] = sum_beta; } } } template <typename T, typename U, int BDIMX, int BDIMY> __global__ void LayerNormBackwardComputeGradInput( const T *__restrict__ dout, const T *__restrict__ input, const int n1, const int n2, // const U* __restrict__ mean, const U* __restrict__ var, const float // epsilon, const T* gamma, const U *__restrict__ mean, const U *__restrict__ var, const float epsilon, const U *gamma, T *grad_input) { #ifdef __HIPCC__ for (auto i1 = hipBlockIdx_y; i1 < n1; i1 += hipGridDim_y) { #else for (auto i1 = blockIdx.y; i1 < n1; i1 += gridDim.y) { #endif U sum_loss1 = U(0); U sum_loss2 = U(0); const U c_mean = mean[i1]; const U c_invvar = rsqrt_<U>(var[i1] + epsilon); const T *k_input = input + i1 * n2; const T *k_dout = dout + i1 * n2; constexpr int numx = BDIMX * BDIMY; const int thrx = threadIdx.x + threadIdx.y * BDIMX; if (gamma != NULL) { int l = 4 * thrx; for (; l + 3 < n2; l += 4 * numx) { for (int k = 0; k < 4; ++k) { const U c_h = static_cast<U>(k_input[l + k]); const U c_loss = static_cast<U>(k_dout[l + k]); sum_loss1 += c_loss * gamma[l + k]; sum_loss2 += c_loss * gamma[l + k] * (c_h - c_mean) * c_invvar; } } for (; l < n2; ++l) { const U c_h = static_cast<U>(k_input[l]); const U c_loss = static_cast<U>(k_dout[l]); sum_loss1 += c_loss * gamma[l]; sum_loss2 += c_loss * gamma[l] * (c_h - c_mean) * c_invvar; } } else { int l = 4 * thrx; for (; l + 3 < n2; l += 4 * numx) { for (int k = 0; k < 4; ++k) { const U c_h = static_cast<U>(k_input[l + k]); const U c_loss = static_cast<U>(k_dout[l + k]); sum_loss1 += c_loss; sum_loss2 += c_loss * (c_h - c_mean) * c_invvar; } } for (; l < n2; ++l) { const U c_h = static_cast<U>(k_input[l]); const U c_loss = static_cast<U>(k_dout[l]); sum_loss1 += c_loss; sum_loss2 += c_loss * (c_h - c_mean) * c_invvar; } } // intra-warp reductions for (int mask = BDIMX / 2; mask > 0; mask /= 2) { #ifdef PADDLE_WITH_HIP sum_loss1 += __shfl_xor(sum_loss1, mask, warpSize); // WARP_SHFL_XOR(sum_loss1, mask); sum_loss2 += __shfl_xor(sum_loss2, mask, warpSize); // WARP_SHFL_XOR(sum_loss2, mask); #else sum_loss1 += __shfl_xor_sync(0xffffffff, sum_loss1, mask, warpSize); // WARP_SHFL_XOR(sum_loss1, mask); sum_loss2 += __shfl_xor_sync(0xffffffff, sum_loss2, mask, warpSize); // WARP_SHFL_XOR(sum_loss2, mask); #endif } // inter-warp reductions if (BDIMY > 1) { __shared__ U buf[BDIMX * BDIMY]; for (int offset = BDIMY / 2; offset > 0; offset /= 2) { // upper half of warps write to shared if (threadIdx.y >= offset && threadIdx.y < 2 * offset) { const int wrt_i = (threadIdx.y - offset) * BDIMX + threadIdx.x; buf[2 * wrt_i] = sum_loss1; buf[2 * wrt_i + 1] = sum_loss2; } __syncthreads(); // lower half merges if (threadIdx.y < offset) { const int read_i = threadIdx.y * blockDim.x + threadIdx.x; sum_loss1 += buf[2 * read_i]; sum_loss2 += buf[2 * read_i + 1]; } __syncthreads(); } if (threadIdx.y == 0) { buf[2 * threadIdx.x] = sum_loss1; buf[2 * threadIdx.x + 1] = sum_loss2; } __syncthreads(); if (threadIdx.y != 0) { sum_loss1 = buf[2 * threadIdx.x]; sum_loss2 = buf[2 * threadIdx.x + 1]; } } // all threads now have the two sums over l U fH = (U)n2; U term1 = (U(1) / fH) * c_invvar; T *k_grad_input = grad_input + i1 * n2; if (gamma != NULL) { for (int l = thrx; l < n2; l += numx) { const U c_h = static_cast<U>(k_input[l]); const U c_loss = static_cast<U>(k_dout[l]); U f_grad_input = fH * c_loss * gamma[l]; f_grad_input -= sum_loss1; f_grad_input -= (c_h - c_mean) * c_invvar * sum_loss2; f_grad_input *= term1; k_grad_input[l] = static_cast<T>(f_grad_input); } } else { for (int l = thrx; l < n2; l += numx) { const U c_h = static_cast<U>(k_input[l]); const U c_loss = static_cast<U>(k_dout[l]); U f_grad_input = fH * c_loss; f_grad_input -= sum_loss1; f_grad_input -= (c_h - c_mean) * c_invvar * sum_loss2; f_grad_input *= term1; k_grad_input[l] = static_cast<T>(f_grad_input); } } } } // Make sure that d_scale != nullptr && d_bias != nullptr // Since d_scale != nullptr, scale would not be nullptr template <typename T, typename U, int BlockDim, bool HasDx> __global__ void LayerNormBackwardGradientAll(const T *x, const T *d_y, U *d_scale, U *d_bias, T *d_x, const U *mean, const U *var, const U *scale, float epsilon, int batch_size, int feature_size, int col_offset) { using BlockReduce = cub::BlockReduce<PairForLayerNorm<U>, BlockDim>; __shared__ typename BlockReduce::TempStorage temp_storage; int beg_idx = threadIdx.x * feature_size + (blockIdx.x + col_offset); int end_idx = batch_size * feature_size + (blockIdx.x + col_offset); int stride = BlockDim * feature_size; U d_scale_partial = static_cast<U>(0), d_bias_partial = static_cast<U>(0); for (int i = beg_idx; i < end_idx; i += stride) { int row_idx = i / feature_size; auto var_val = real_sqrt(static_cast<U>(var[row_idx]) + epsilon); d_scale_partial += static_cast<U>(d_y[i]) * (static_cast<U>(x[i]) - mean[row_idx]) / var_val; d_bias_partial += static_cast<U>(d_y[i]); if (HasDx) { d_x[i] = static_cast<T>(static_cast<U>(d_y[i]) * scale[blockIdx.x + col_offset] / var_val); } } auto pair = BlockReduce(temp_storage) .Reduce(PairForLayerNorm<U>(d_scale_partial, d_bias_partial), PairForLayerNormAddFunctor<U>()); if (threadIdx.x == 0) { d_scale[blockIdx.x + col_offset] = pair.first_; d_bias[blockIdx.x + col_offset] = pair.second_; } } // Make sure that there is only one true expression: d_scale != nullptr // or d_bias != nullptr // Notice: scale may be nullptr template <typename T, typename U, int BlockDim, bool HasDx, bool HasDScale> __global__ void LayerNormBackwardGradientScaleOrBias( const T *x, const T *d_y, U *d_scale, U *d_bias, T *d_x, const U *mean, const U *var, const U *scale, float epsilon, int batch_size, int feature_size, int col_offset) { using BlockReduce = cub::BlockReduce<U, BlockDim>; __shared__ typename BlockReduce::TempStorage temp_storage; int beg_idx = threadIdx.x * feature_size + blockIdx.x + col_offset; int end_idx = batch_size * feature_size + blockIdx.x + col_offset; int stride = BlockDim * feature_size; U d_scale_or_d_bias_partial = static_cast<U>(0); for (int i = beg_idx; i < end_idx; i += stride) { int row_idx = i / feature_size; auto var_val = static_cast<U>(real_sqrt(static_cast<float>(var[row_idx]) + epsilon)); if (HasDScale) { d_scale_or_d_bias_partial += static_cast<U>(d_y[i]) * (static_cast<U>(x[i]) - mean[row_idx]) / var_val; } else { // d_bias != nullptr d_scale_or_d_bias_partial += static_cast<U>(d_y[i]); } if (HasDx) { if (scale != nullptr) { d_x[i] = static_cast<T>(static_cast<U>(d_y[i]) * scale[blockIdx.x + col_offset] / var_val); } else { d_x[i] = static_cast<T>(static_cast<U>(d_y[i]) / var_val); } } } d_scale_or_d_bias_partial = BlockReduce(temp_storage).Reduce(d_scale_or_d_bias_partial, cub::Sum()); if (threadIdx.x == 0) { if (HasDScale) { d_scale[blockIdx.x + col_offset] = d_scale_or_d_bias_partial; } else { d_bias[blockIdx.x + col_offset] = d_scale_or_d_bias_partial; } } } template <typename T, typename U, int BlockDim> __global__ void LayerNormBackwardPostProcessToCalculateDX(const T *x, T *d_x, const U *mean, const U *var, float epsilon, int feature_size) { using BlockReduce = cub::BlockReduce<PairForLayerNorm<U>, BlockDim>; __shared__ typename BlockReduce::TempStorage temp_storage; __shared__ U d_x_reduce_tmp[2]; int beg_idx = blockIdx.x * feature_size + threadIdx.x; int end_idx = (blockIdx.x + 1) * feature_size; U block_mean = mean[blockIdx.x]; U block_var = var[blockIdx.x]; U d_x_mean_partial = static_cast<U>(0), d_x_var_partial = static_cast<U>(0); for (int i = beg_idx; i < end_idx; i += BlockDim) { d_x_mean_partial += static_cast<U>(d_x[i]); d_x_var_partial += static_cast<U>(d_x[i]) * (static_cast<U>(x[i]) - block_mean); } auto pair = BlockReduce(temp_storage) .Reduce(PairForLayerNorm<U>(d_x_mean_partial, d_x_var_partial), PairForLayerNormAddFunctor<U>()); if (threadIdx.x == 0) { d_x_reduce_tmp[0] = static_cast<float>(pair.first_) / feature_size; d_x_reduce_tmp[1] = static_cast<float>(pair.second_) / (feature_size * (static_cast<float>(block_var) + epsilon)); } __syncthreads(); d_x_mean_partial = d_x_reduce_tmp[0]; d_x_var_partial = d_x_reduce_tmp[1]; for (int i = beg_idx; i < end_idx; i += BlockDim) { d_x[i] -= static_cast<T>(d_x_mean_partial); d_x[i] -= static_cast<T>((static_cast<U>(x[i]) - block_mean) * d_x_var_partial); } } // Here, we only calculate d_x template <typename T, typename U, int BlockDim> __global__ void LayerNormBackwardGradientOnlyDX(const T *x, const T *d_y, T *d_x, const U *mean, const U *var, const U *scale, float epsilon, int feature_size) { using BlockReduce = cub::BlockReduce<PairForLayerNorm<U>, BlockDim>; __shared__ typename BlockReduce::TempStorage temp_storage; __shared__ U d_x_reduce_tmp[2]; int beg_idx = blockIdx.x * feature_size + threadIdx.x; int end_idx = (blockIdx.x + 1) * feature_size; U block_mean = mean[blockIdx.x], block_var = var[blockIdx.x]; U d_x_mean_partial = static_cast<U>(0), d_x_var_partial = static_cast<U>(0); for (int i = beg_idx; i < end_idx; i += BlockDim) { auto var_val = static_cast<U>(real_sqrt(static_cast<float>(block_var) + epsilon)); if (scale != nullptr) { int col_idx = i % feature_size; d_x[i] = static_cast<T>(static_cast<U>(d_y[i]) * scale[col_idx] / var_val); } else { d_x[i] = static_cast<T>(static_cast<U>(d_y[i]) / var_val); } d_x_mean_partial += static_cast<U>(d_x[i]); d_x_var_partial += static_cast<U>(d_x[i]) * (static_cast<U>(x[i]) - block_mean); } auto pair = BlockReduce(temp_storage) .Reduce(PairForLayerNorm<U>(d_x_mean_partial, d_x_var_partial), PairForLayerNormAddFunctor<U>()); if (threadIdx.x == 0) { d_x_reduce_tmp[0] = static_cast<float>(pair.first_) / feature_size; d_x_reduce_tmp[1] = static_cast<float>(pair.second_) / (feature_size * (static_cast<float>(block_var) + epsilon)); } __syncthreads(); d_x_mean_partial = d_x_reduce_tmp[0]; d_x_var_partial = d_x_reduce_tmp[1]; for (int i = beg_idx; i < end_idx; i += BlockDim) { d_x[i] -= static_cast<T>(d_x_mean_partial); d_x[i] -= static_cast<T>((static_cast<U>(x[i]) - block_mean) * d_x_var_partial); } } template <typename T, typename U> __global__ void LayerNormBackwardWhenBatchSizeIsOne( const T *x, const T *d_y, T *d_x, U *d_scale, U *d_bias, const U *mean, const U *var, const U *scale, float epsilon, int feature_size) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < feature_size) { auto var_val = static_cast<U>(real_sqrt(static_cast<float>(var[idx]) + epsilon)); if (d_x != nullptr) { if (d_scale == nullptr) { d_x[idx] = static_cast<T>(static_cast<U>(d_y[idx]) / var_val); } else { d_x[idx] = static_cast<T>(static_cast<U>(d_y[idx]) * scale[idx] / var_val); } } if (d_scale != nullptr) { d_scale[idx] = static_cast<U>(d_y[idx]) * (static_cast<U>(x[idx]) - mean[idx]) / var_val; } if (d_bias != nullptr) d_bias[idx] = static_cast<U>(d_y[idx]); } } template <typename T, typename U> static void LayerNormBackward(const T *x, const T *d_y, const U *scale, const U *mean, const U *var, T *d_x, U *d_scale, U *d_bias, float epsilon, int batch_size, int feature_size, const framework::ExecutionContext &ctx) { auto &dev_ctx = ctx.cuda_device_context(); auto stream = dev_ctx.stream(); #ifdef __HIPCC__ const int kMaxBlockDim = 256; #else const int kMaxBlockDim = 512; #endif const int kMaxBlockNum = 128; int gradient_flag = ((d_x != nullptr ? 1 : 0) << 2) | ((d_scale != nullptr ? 1 : 0) << 1) | ((d_bias != nullptr ? 1 : 0)); if (gradient_flag == 0) return; if (batch_size == 1) { LayerNormBackwardWhenBatchSizeIsOne< T, U><<<(feature_size + kMaxBlockDim - 1) / kMaxBlockDim, kMaxBlockDim, 0, stream>>>(x, d_y, d_x, d_scale, d_bias, mean, var, scale, epsilon, feature_size); if (d_x != nullptr) { switch (GetDesiredBlockDim(feature_size)) { FIXED_BLOCK_DIM_CASE(LayerNormBackwardPostProcessToCalculateDX< T, U, kBlockDim><<<1, kBlockDim, 0, stream>>>( x, d_x, mean, var, epsilon, feature_size)); } } return; } auto block_dim = GetDesiredBlockDim(batch_size); switch (gradient_flag) { case 1: // d_x == nulptr, d_scale == nullptr, d_bias != nullptr switch (block_dim) { FIXED_BLOCK_DIM_FIXED_BLOCK_NUM_CASE( feature_size, kMaxBlockNum, LayerNormBackwardGradientScaleOrBias< T, U, kBlockDim, false, false><<<block_num, kBlockDim, 0, stream>>>( x, d_y, d_scale, d_bias, d_x, mean, var, scale, epsilon, batch_size, feature_size, col_offset)); } break; case 2: // d_x == nullptr, d_scale != nullptr, d_bias == nullptr switch (block_dim) { FIXED_BLOCK_DIM_FIXED_BLOCK_NUM_CASE( feature_size, kMaxBlockNum, LayerNormBackwardGradientScaleOrBias< T, U, kBlockDim, false, true><<<block_num, kBlockDim, 0, stream>>>( x, d_y, d_scale, d_bias, d_x, mean, var, scale, epsilon, batch_size, feature_size, col_offset)); } break; case 3: // d_x == nullptr, d_scale != nulptr, d_bias != nullptr switch (block_dim) { FIXED_BLOCK_DIM_FIXED_BLOCK_NUM_CASE( feature_size, kMaxBlockNum, LayerNormBackwardGradientAll< T, U, kBlockDim, false><<<block_num, kBlockDim, 0, stream>>>( x, d_y, d_scale, d_bias, d_x, mean, var, scale, epsilon, batch_size, feature_size, col_offset)); } break; case 4: // d_x != nullptr, d_scale == nullptr, d_bias == nullptr switch (GetDesiredBlockDim(feature_size)) { FIXED_BLOCK_DIM_CASE( LayerNormBackwardGradientOnlyDX< T, U, kBlockDim><<<batch_size, kBlockDim, 0, stream>>>( x, d_y, d_x, mean, var, scale, epsilon, feature_size)); } break; case 5: // d_x != nulptr, d_scale == nullptr, d_bias != nullptr switch (block_dim) { FIXED_BLOCK_DIM_FIXED_BLOCK_NUM_CASE( feature_size, kMaxBlockNum, LayerNormBackwardGradientScaleOrBias< T, U, kBlockDim, true, false><<<block_num, kBlockDim, 0, stream>>>( x, d_y, d_scale, d_bias, d_x, mean, var, scale, epsilon, batch_size, feature_size, col_offset)); } switch (GetDesiredBlockDim(feature_size)) { FIXED_BLOCK_DIM_CASE( LayerNormBackwardPostProcessToCalculateDX< T, U, kBlockDim><<<batch_size, kBlockDim, 0, stream>>>( x, d_x, mean, var, epsilon, feature_size)); } break; case 6: // d_x != nullptr, d_scale != nullptr, d_bias == nullptr switch (block_dim) { FIXED_BLOCK_DIM_FIXED_BLOCK_NUM_CASE( feature_size, kMaxBlockNum, LayerNormBackwardGradientScaleOrBias< T, U, kBlockDim, true, true><<<block_num, kBlockDim, 0, stream>>>( x, d_y, d_scale, d_bias, d_x, mean, var, scale, epsilon, batch_size, feature_size, col_offset)); } switch (GetDesiredBlockDim(feature_size)) { FIXED_BLOCK_DIM_CASE( LayerNormBackwardPostProcessToCalculateDX< T, U, kBlockDim><<<batch_size, kBlockDim, 0, stream>>>( x, d_x, mean, var, epsilon, feature_size)); } break; case 7: // d_x != nullptr, d_scale != nullptr, d_bias != nullptr { constexpr int VPT = 4; constexpr int BDIMX2 = 32; constexpr int BDIMY2 = 4; dim3 threads2(BDIMX2, BDIMY2, 1); constexpr int part_size = BDIMY2 * VPT; const dim3 blocks2((feature_size + BDIMX2 - 1) / BDIMX2, part_size, 1); auto part_grad_gamma_ptr = memory::Alloc(dev_ctx, part_size * feature_size * sizeof(U)); auto part_grad_beta_ptr = memory::Alloc(dev_ctx, part_size * feature_size * sizeof(U)); U *part_grad_gamma = reinterpret_cast<U *>(part_grad_gamma_ptr->ptr()); U *part_grad_beta = reinterpret_cast<U *>(part_grad_beta_ptr->ptr()); LayerNormBackwardPartGradGammaBeta<T, U, BDIMX2, BDIMY2, VPT><<<blocks2, threads2, 0, stream>>>( d_y, x, batch_size, feature_size, mean, var, epsilon, part_grad_gamma, part_grad_beta); // compute part_grad_gamma, beta constexpr int BDIMX3 = 32; constexpr int BDIMY3 = 8; dim3 threads3(BDIMX3, BDIMY3, 1); const dim3 blocks3((feature_size + BDIMX2 - 1) / BDIMX2, 1, 1); LayerNormBackwardSumGradGammaBeta< T, U, BDIMX3, BDIMY3><<<blocks3, threads3, 0, stream>>>( part_grad_gamma, part_grad_beta, part_size, batch_size, feature_size, d_scale, d_bias); constexpr int BDIMX1 = 32; constexpr int BDIMY1 = 4; dim3 threads1(BDIMX1, BDIMY1, 1); const dim3 blocks1(1, batch_size, 1); LayerNormBackwardComputeGradInput< T, U, BDIMX1, BDIMY1><<<blocks1, threads1, 0, stream>>>( d_y, x, batch_size, feature_size, mean, var, epsilon, scale, d_x); break; } default: break; } } template <typename T> void LayerNormDirectCUDAFunctor<T>::operator()(gpuStream_t stream, const T *input, std::vector<int> input_shape, const T *bias, const T *scale, T *output, T *mean, T *variance, int begin_norm_axis, float eps) { const auto x_dims = framework::make_ddim(input_shape); auto matrix_dim = framework::flatten_to_2d(x_dims, begin_norm_axis); int batch_size = static_cast<int>(matrix_dim[0]); int feature_size = static_cast<int>(matrix_dim[1]); switch (GetDesiredBlockDim(feature_size)) { FIXED_BLOCK_DIM_CASE( LayerNormForward<T, T, kBlockDim><<<batch_size, kBlockDim, 0, stream>>>( input, scale, bias, output, mean, variance, eps, feature_size)); default: PADDLE_THROW(platform::errors::InvalidArgument( "Product from begin_norm_axis to end in layer_norm must be larger " "than 1")); break; } } template <typename T> class LayerNormKernel<platform::CUDADeviceContext, T> : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext &ctx) const override { using U = LayerNormParamType<T>; const float epsilon = ctx.Attr<float>("epsilon"); auto *scale = ctx.Input<Tensor>("Scale"); auto *bias = ctx.Input<Tensor>("Bias"); auto *x = ctx.Input<Tensor>("X"); auto *y = ctx.Output<Tensor>("Y"); auto *mean = ctx.Output<Tensor>("Mean"); auto *var = ctx.Output<Tensor>("Variance"); const auto begin_norm_axis = ctx.Attr<int>("begin_norm_axis"); const auto x_dims = x->dims(); auto *x_data = x->data<T>(); auto *y_data = y->mutable_data<T>(ctx.GetPlace()); auto *mean_data = mean->mutable_data<U>(ctx.GetPlace()); auto *var_data = var->mutable_data<U>(ctx.GetPlace()); auto *scale_data = (scale == nullptr ? nullptr : scale->data<U>()); auto *bias_data = (bias == nullptr ? nullptr : bias->data<U>()); auto matrix_dim = framework::flatten_to_2d(x_dims, begin_norm_axis); int batch_size = static_cast<int>(matrix_dim[0]); int feature_size = static_cast<int>(matrix_dim[1]); auto stream = ctx.cuda_device_context().stream(); switch (GetDesiredBlockDim(feature_size)) { FIXED_BLOCK_DIM_CASE( LayerNormForward<T, U, kBlockDim><<<batch_size, kBlockDim, 0, stream>>>( x_data, scale_data, bias_data, y_data, mean_data, var_data, epsilon, feature_size)); default: PADDLE_THROW(platform::errors::InvalidArgument( "Product from begin_norm_axis to end must be larger than 1")); break; } } }; template <typename T> class LayerNormGradKernel<platform::CUDADeviceContext, T> : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext &ctx) const override { using U = LayerNormParamType<T>; const float epsilon = ctx.Attr<float>("epsilon"); // d_x, d_scale, d_bias may be nullptr auto *d_x = ctx.Output<Tensor>(framework::GradVarName("X")); auto *d_scale = ctx.Output<Tensor>(framework::GradVarName("Scale")); auto *d_bias = ctx.Output<Tensor>(framework::GradVarName("Bias")); auto *x = ctx.Input<Tensor>("X"); auto *mean = ctx.Input<Tensor>("Mean"); auto *var = ctx.Input<Tensor>("Variance"); auto *scale = ctx.Input<Tensor>("Scale"); auto *d_y = ctx.Input<Tensor>(framework::GradVarName("Y")); auto *x_data = x->data<T>(); auto *d_y_data = d_y->data<T>(); auto *mean_data = mean->data<U>(); auto *var_data = var->data<U>(); auto *scale_data = (scale == nullptr ? nullptr : scale->data<U>()); auto *d_scale_data = (d_scale == nullptr ? nullptr : d_scale->mutable_data<U>(ctx.GetPlace())); auto *d_bias_data = (d_bias == nullptr ? nullptr : d_bias->mutable_data<U>(ctx.GetPlace())); auto *d_x_data = (d_x == nullptr ? nullptr : d_x->mutable_data<T>(ctx.GetPlace())); const auto &x_dims = x->dims(); const auto begin_norm_axis = ctx.Attr<int>("begin_norm_axis"); auto matrix_dim = framework::flatten_to_2d(x_dims, begin_norm_axis); int batch_size = static_cast<int>(matrix_dim[0]); int feature_size = static_cast<int>(matrix_dim[1]); LayerNormBackward<T, U>(x_data, d_y_data, scale_data, mean_data, var_data, d_x_data, d_scale_data, d_bias_data, epsilon, batch_size, feature_size, ctx); } }; template class LayerNormDirectCUDAFunctor<float>; #undef FIXED_BLOCK_DIM_FIXED_BLOCK_NUM_CASE_BASE #undef FIXED_BLOCK_DIM_FIXED_BLOCK_NUM_CASE #undef FIXED_BLOCK_DIM_CASE_BASE #undef FIXED_BLOCK_DIM_CASE } // namespace operators } // namespace paddle namespace ops = paddle::operators; namespace plat = paddle::platform; #ifdef PADDLE_WITH_HIP // MIOPEN do not support double REGISTER_OP_CUDA_KERNEL( layer_norm, ops::LayerNormKernel<paddle::platform::CUDADeviceContext, float>, ops::LayerNormKernel<paddle::platform::CUDADeviceContext, plat::float16>); REGISTER_OP_CUDA_KERNEL( layer_norm_grad, ops::LayerNormGradKernel<paddle::platform::CUDADeviceContext, float>, ops::LayerNormGradKernel<paddle::platform::CUDADeviceContext, plat::float16>); #else REGISTER_OP_CUDA_KERNEL( layer_norm, ops::LayerNormKernel<paddle::platform::CUDADeviceContext, float>, ops::LayerNormKernel<paddle::platform::CUDADeviceContext, double>, ops::LayerNormKernel<paddle::platform::CUDADeviceContext, plat::float16>); REGISTER_OP_CUDA_KERNEL( layer_norm_grad, ops::LayerNormGradKernel<paddle::platform::CUDADeviceContext, float>, ops::LayerNormGradKernel<paddle::platform::CUDADeviceContext, double>, ops::LayerNormGradKernel<paddle::platform::CUDADeviceContext, plat::float16>); #endif
8fd2126b2101849ce6b5330342635da2a8339ee1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "core.h" #include <hip/hip_fp16.h> namespace v0 //cuda baseline { static __global__ void cudaCallbackKernel( // const int width, // const int height, // const float *__restrict__ input, // float *__restrict__ output) // { const int idy = blockIdx.y * blockDim.y + threadIdx.y; // const int idx = blockIdx.x * blockDim.x + threadIdx.x; // if (idy < height && idx < width) { int cnt[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; // for (int offsety = -2; offsety <= 2; ++offsety) { const int py = idy + offsety; if (0 <= py && py < height) for (int offsetx = -2; offsetx <= 2; ++offsetx) { const int px = idx + offsetx; if (0 <= px && px < width) ++cnt[(int)input[py * width + px]]; } } double n = (min(idx, 2) + 1 + min(width - idx, 2)) * (min(idy, 2) + 1 + min(height - idy, 2)), // n_inv = 1.0 / n, ans = log(n); //ans = logn - n_i/n*log(n_i) for (int i = 0; i < 16; ++i) if (cnt[i]) ans -= log((double)cnt[i]) * cnt[i] * n_inv; output[idy * width + idx] = ans; } } static void cudaCallback( int width, int height, float *sample, float **result) { float *input_d, *output_d; // CHECK(hipMalloc((void **)&output_d, sizeof(float) * width * height)); CHECK(hipMalloc((void **)&input_d, sizeof(float) * width * height)); CHECK(hipMemcpy(input_d, sample, sizeof(float) * width * height, hipMemcpyHostToDevice)); const int BLOCK_DIM_X = 32, BLOCK_DIM_Y = 32; const dim3 blockDim(BLOCK_DIM_X, BLOCK_DIM_Y), gridDim(divup(width, BLOCK_DIM_X), divup(height, BLOCK_DIM_Y)); hipLaunchKernelGGL(( cudaCallbackKernel), dim3(gridDim), dim3(blockDim), 0, 0, width, height, input_d, output_d); // *result = (float *)malloc(sizeof(float) * width * height); CHECK(hipMemcpy(*result, output_d, sizeof(float) * width * height, hipMemcpyDeviceToHost)); CHECK(hipFree(input_d)); CHECK(hipFree(output_d)); } } // namespace v0 namespace v1 //cuda log { static __global__ void cudaCallbackKernel( const int width, const int height, const float *__restrict__ input, float *__restrict__ output) { const int idy = blockIdx.y * blockDim.y + threadIdx.y; const int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idy < height && idx < width) { int cnt[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; for (int offsety = -2; offsety <= 2; ++offsety) { const int py = idy + offsety; if (0 <= py && py < height) for (int offsetx = -2; offsetx <= 2; ++offsetx) { const int px = idx + offsetx; if (0 <= px && px < width) ++cnt[(int)input[py * width + px]]; } } const double mylog[26] = { 0.0, //log 00 log(1.0), log(2.0), log(3.0), log(4.0), log(5.0), log(6.0), log(7.0), log(8.0), log(9.0), log(10.0), log(11.0), log(12.0), log(13.0), log(14.0), log(15.0), log(16.0), log(17.0), log(18.0), log(19.0), log(20.0), log(21.0), log(22.0), log(23.0), log(24.0), log(25.0)}; // const int n = (min(idx, 2) + 1 + min(width - idx, 2)) * (min(idy, 2) + 1 + min(height - idy, 2)); double ans = mylog[n], n_inv = 1.0 / n; for (int i = 0; i < 16; ++i) ans -= mylog[cnt[i]] * cnt[i] * n_inv; output[idy * width + idx] = ans; } } static void cudaCallback( int width, int height, float *sample, float **result) { float *input_d, *output_d; CHECK(hipMalloc((void **)&input_d, sizeof(float) * width * height)); CHECK(hipMalloc((void **)&output_d, sizeof(float) * width * height)); CHECK(hipMemcpy(input_d, sample, sizeof(float) * width * height, hipMemcpyHostToDevice)); const int BLOCK_DIM_X = 32, BLOCK_DIM_Y = 32; const dim3 blockDim(BLOCK_DIM_X, BLOCK_DIM_Y), gridDim(divup(width, BLOCK_DIM_X), divup(height, BLOCK_DIM_Y)); hipLaunchKernelGGL(( cudaCallbackKernel), dim3(gridDim), dim3(blockDim), 0, 0, width, height, input_d, output_d); *result = (float *)malloc(sizeof(float) * width * height); CHECK(hipMemcpy(*result, output_d, sizeof(float) * width * height, hipMemcpyDeviceToHost)); CHECK(hipFree(input_d)); CHECK(hipFree(output_d)); } } // namespace v1 namespace v2 //cuda logshared memory { static __global__ void cudaCallbackKernel( const int width, const int height, const float *__restrict__ input, float *__restrict__ output) { const int idy = blockIdx.y * blockDim.y + threadIdx.y; const int idx = blockIdx.x * blockDim.x + threadIdx.x; //shared memory __shared__ double mylog[26]; if (threadIdx.y == 0 && threadIdx.x < 26) mylog[threadIdx.x] = threadIdx.x == 0 ? 0.0 : log((double)threadIdx.x); __syncthreads(); if (idy < height && idx < width) { int cnt[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; for (int offsety = -2; offsety <= 2; ++offsety) { const int py = idy + offsety; if (0 <= py && py < height) for (int offsetx = -2; offsetx <= 2; ++offsetx) { const int px = idx + offsetx; if (0 <= px && px < width) ++cnt[(int)input[py * width + px]]; } } const int n = (min(idx, 2) + 1 + min(width - idx, 2)) * (min(idy, 2) + 1 + min(height - idy, 2)); double ans = mylog[n], n_inv = 1.0 / n; for (int i = 0; i < 16; ++i) ans -= mylog[cnt[i]] * cnt[i] * n_inv; output[idy * width + idx] = ans; } } static void cudaCallback( int width, int height, float *sample, float **result) { float *input_d, *output_d; CHECK(hipMalloc((void **)&input_d, sizeof(float) * width * height)); CHECK(hipMalloc((void **)&output_d, sizeof(float) * width * height)); CHECK(hipMemcpy(input_d, sample, sizeof(float) * width * height, hipMemcpyHostToDevice)); const int BLOCK_DIM_X = 32, BLOCK_DIM_Y = 32; const dim3 blockDim(BLOCK_DIM_X, BLOCK_DIM_Y), gridDim(divup(width, BLOCK_DIM_X), divup(height, BLOCK_DIM_Y)); hipLaunchKernelGGL(( cudaCallbackKernel), dim3(gridDim), dim3(blockDim), 0, 0, width, height, input_d, output_d); *result = (float *)malloc(sizeof(float) * width * height); CHECK(hipMemcpy(*result, output_d, sizeof(float) * width * height, hipMemcpyDeviceToHost)); CHECK(hipFree(input_d)); CHECK(hipFree(output_d)); } } // namespace v2 namespace v3 //cuda logconstant memory { static __constant__ double mylog[26]; static __global__ void cudaCallbackKernel( const int width, const int height, const float *__restrict__ input, float *__restrict__ output) { const int idy = blockIdx.y * blockDim.y + threadIdx.y; const int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idy < height && idx < width) { int cnt[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; for (int offsety = -2; offsety <= 2; ++offsety) { const int py = idy + offsety; if (0 <= py && py < height) for (int offsetx = -2; offsetx <= 2; ++offsetx) { const int px = idx + offsetx; if (0 <= px && px < width) ++cnt[(int)input[py * width + px]]; } } const int n = (min(idx, 2) + 1 + min(width - idx, 2)) * (min(idy, 2) + 1 + min(height - idy, 2)); double ans = mylog[n], n_inv = 1.0 / n; for (int i = 0; i < 16; ++i) ans -= mylog[cnt[i]] * cnt[i] * n_inv; output[idy * width + idx] = ans; } } static void cudaCallback( int width, int height, float *sample, float **result) { float *input_d, *output_d; const double mylog_h[26] = { 0.0, log(1.0), log(2.0), log(3.0), log(4.0), log(5.0), log(6.0), log(7.0), log(8.0), log(9.0), log(10.0), log(11.0), log(12.0), log(13.0), log(14.0), log(15.0), log(16.0), log(17.0), log(18.0), log(19.0), log(20.0), log(21.0), log(22.0), log(23.0), log(24.0), log(25.0)}; //constant memory CHECK(hipMemcpyToSymbol(mylog, (const double *)mylog_h, sizeof(mylog_h))); CHECK(hipMalloc((void **)&input_d, sizeof(float) * width * height)); CHECK(hipMalloc((void **)&output_d, sizeof(float) * width * height)); CHECK(hipMemcpy(input_d, sample, sizeof(float) * width * height, hipMemcpyHostToDevice)); const int BLOCK_DIM_X = 32, BLOCK_DIM_Y = 32; const dim3 blockDim(BLOCK_DIM_X, BLOCK_DIM_Y), gridDim(divup(width, BLOCK_DIM_X), divup(height, BLOCK_DIM_Y)); hipLaunchKernelGGL(( cudaCallbackKernel), dim3(gridDim), dim3(blockDim), 0, 0, width, height, input_d, output_d); *result = (float *)malloc(sizeof(float) * width * height); CHECK(hipMemcpy(*result, output_d, sizeof(float) * width * height, hipMemcpyDeviceToHost)); CHECK(hipFree(input_d)); CHECK(hipFree(output_d)); } } // namespace v3 namespace v4 //cuda logdevice memory { static __device__ double mylog[26]; static __global__ void cudaCallbackKernel( const int width, const int height, const float *__restrict__ input, float *__restrict__ output) { const int idy = blockIdx.y * blockDim.y + threadIdx.y; const int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idy < height && idx < width) { int cnt[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; for (int offsety = -2; offsety <= 2; ++offsety) { const int py = idy + offsety; if (0 <= py && py < height) for (int offsetx = -2; offsetx <= 2; ++offsetx) { const int px = idx + offsetx; if (0 <= px && px < width) ++cnt[(int)input[py * width + px]]; } } const int n = (min(idx, 2) + 1 + min(width - idx, 2)) * (min(idy, 2) + 1 + min(height - idy, 2)); double ans = mylog[n], n_inv = 1.0 / n; for (int i = 0; i < 16; ++i) ans -= mylog[cnt[i]] * cnt[i] * n_inv; output[idy * width + idx] = ans; } } static void cudaCallback( int width, int height, float *sample, float **result) { float *input_d, *output_d; const double mylog_h[26] = { 0.0, log(1.0), log(2.0), log(3.0), log(4.0), log(5.0), log(6.0), log(7.0), log(8.0), log(9.0), log(10.0), log(11.0), log(12.0), log(13.0), log(14.0), log(15.0), log(16.0), log(17.0), log(18.0), log(19.0), log(20.0), log(21.0), log(22.0), log(23.0), log(24.0), log(25.0)}; CHECK(hipMemcpyToSymbol(mylog, (const double *)mylog_h, sizeof(mylog_h))); CHECK(hipMalloc((void **)&input_d, sizeof(float) * width * height)); CHECK(hipMalloc((void **)&output_d, sizeof(float) * width * height)); CHECK(hipMemcpy(input_d, sample, sizeof(float) * width * height, hipMemcpyHostToDevice)); const int BLOCK_DIM_X = 32, BLOCK_DIM_Y = 32; const dim3 blockDim(BLOCK_DIM_X, BLOCK_DIM_Y), gridDim(divup(width, BLOCK_DIM_X), divup(height, BLOCK_DIM_Y)); hipLaunchKernelGGL(( cudaCallbackKernel), dim3(gridDim), dim3(blockDim), 0, 0, width, height, input_d, output_d); *result = (float *)malloc(sizeof(float) * width * height); CHECK(hipMemcpy(*result, output_d, sizeof(float) * width * height, hipMemcpyDeviceToHost)); CHECK(hipFree(input_d)); CHECK(hipFree(output_d)); } } // namespace v4 namespace v5 //cuda logtexure memory { static texture<float> mylog_tex; static __device__ float mylog[26]; //texture4float static __global__ void cudaCallbackKernel( const int width, const int height, const float *__restrict__ input, float *__restrict__ output) { const int idy = blockIdx.y * blockDim.y + threadIdx.y; const int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idy < height && idx < width) { int cnt[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; for (int offsety = -2; offsety <= 2; ++offsety) { const int py = idy + offsety; if (0 <= py && py < height) for (int offsetx = -2; offsetx <= 2; ++offsetx) { const int px = idx + offsetx; if (0 <= px && px < width) ++cnt[(int)input[py * width + px]]; } } const int n = (min(idx, 2) + 1 + min(width - idx, 2)) * (min(idy, 2) + 1 + min(height - idy, 2)); double ans = tex1Dfetch(mylog_tex, n), n_inv = 1.0 / n; for (int i = 0; i < 16; ++i) ans -= tex1Dfetch(mylog_tex, cnt[i]) * cnt[i] * n_inv; output[idy * width + idx] = ans; } } static void cudaCallback( int width, int height, float *sample, float **result) { float *input_d, *output_d; float mylog_h[26] = { 0.0, log(1.0), log(2.0), log(3.0), log(4.0), log(5.0), log(6.0), log(7.0), log(8.0), log(9.0), log(10.0), log(11.0), log(12.0), log(13.0), log(14.0), log(15.0), log(16.0), log(17.0), log(18.0), log(19.0), log(20.0), log(21.0), log(22.0), log(23.0), log(24.0), log(25.0)}, *mylog_d; hipMemcpyToSymbol(mylog, mylog_h, sizeof(float) * 26); hipGetSymbolAddress((void **)&mylog_d, mylog); hipBindTexture(0, mylog_tex, mylog_d); CHECK(hipMalloc((void **)&input_d, sizeof(float) * width * height)); CHECK(hipMalloc((void **)&output_d, sizeof(float) * width * height)); CHECK(hipMemcpy(input_d, sample, sizeof(float) * width * height, hipMemcpyHostToDevice)); const int BLOCK_DIM_X = 32, BLOCK_DIM_Y = 32; const dim3 blockDim(BLOCK_DIM_X, BLOCK_DIM_Y), gridDim(divup(width, BLOCK_DIM_X), divup(height, BLOCK_DIM_Y)); hipLaunchKernelGGL(( cudaCallbackKernel), dim3(gridDim), dim3(blockDim), 0, 0, width, height, input_d, output_d); *result = (float *)malloc(sizeof(float) * width * height); CHECK(hipMemcpy(*result, output_d, sizeof(float) * width * height, hipMemcpyDeviceToHost)); CHECK(hipFree(input_d)); CHECK(hipFree(output_d)); } } // namespace v5 namespace v6 //cuda log+ { static __global__ void cudaCallbackKernel( const int width, const int height, const float *__restrict__ input, float *__restrict__ output) { const int idy = blockIdx.y * blockDim.y + threadIdx.y; const int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idy < height && idx < width) { signed char cnt[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; //intchar4 for (signed char offsety = -2; offsety <= 2; ++offsety) { const int py = idy + offsety; if (0 <= py && py < height) for (signed char offsetx = -2; offsetx <= 2; ++offsetx) { const int px = idx + offsetx; if (0 <= px && px < width) ++cnt[(signed char)input[py * width + px]]; } } const double mylog[26] = { 0.0, log(1.0), log(2.0), log(3.0), log(4.0), log(5.0), log(6.0), log(7.0), log(8.0), log(9.0), log(10.0), log(11.0), log(12.0), log(13.0), log(14.0), log(15.0), log(16.0), log(17.0), log(18.0), log(19.0), log(20.0), log(21.0), log(22.0), log(23.0), log(24.0), log(25.0)}; const signed char n = (min(idx, 2) + 1 + min(width - idx, 2)) * (min(idy, 2) + 1 + min(height - idy, 2)); double ans = mylog[n], n_inv = 1.0 / n; for (signed char i = 0; i < 16; ++i) ans -= mylog[cnt[i]] * cnt[i] * n_inv; output[idy * width + idx] = ans; } } static void cudaCallback( int width, int height, float *sample, float **result) { float *input_d, *output_d; CHECK(hipMalloc((void **)&input_d, sizeof(float) * width * height)); CHECK(hipMalloc((void **)&output_d, sizeof(float) * width * height)); CHECK(hipMemcpy(input_d, sample, sizeof(float) * width * height, hipMemcpyHostToDevice)); const int BLOCK_DIM_X = 32, BLOCK_DIM_Y = 32; const dim3 blockDim(BLOCK_DIM_X, BLOCK_DIM_Y), gridDim(divup(width, BLOCK_DIM_X), divup(height, BLOCK_DIM_Y)); hipLaunchKernelGGL(( cudaCallbackKernel), dim3(gridDim), dim3(blockDim), 0, 0, width, height, input_d, output_d); *result = (float *)malloc(sizeof(float) * width * height); CHECK(hipMemcpy(*result, output_d, sizeof(float) * width * height, hipMemcpyDeviceToHost)); CHECK(hipFree(input_d)); CHECK(hipFree(output_d)); } } // namespace v6 namespace v7 //cuda log++ { template <int BLOCK_DIM_X> static __global__ __launch_bounds__(BLOCK_DIM_X) void cudaCallbackKernel( const int width, const int height, const float *__restrict__ input, float *__restrict__ output) { const int idy = blockIdx.y; const int idx = blockIdx.x * BLOCK_DIM_X + threadIdx.x; if (idy < height && idx < width) { __shared__ signed char cnts[16][BLOCK_DIM_X]; signed char *cnt = cnts[0] + threadIdx.x; for (signed char i = 0; i < 16; ++i) cnt[i * BLOCK_DIM_X] = 0; for (signed char offsety = -2; offsety <= 2; ++offsety) { const int py = idy + offsety; if (0 <= py && py < height) for (signed char offsetx = -2; offsetx <= 2; ++offsetx) { const int px = idx + offsetx; if (0 <= px && px < width) ++cnt[(int)input[py * width + px] * BLOCK_DIM_X]; } } const float mylog[24] = { 2 * log(2.0), 3 * log(3.0), 4 * log(4.0), 5 * log(5.0), 6 * log(6.0), 7 * log(7.0), 8 * log(8.0), 9 * log(9.0), 10 * log(10.0), 11 * log(11.0), 12 * log(12.0), 13 * log(13.0), 14 * log(14.0), 15 * log(15.0), 16 * log(16.0), 17 * log(17.0), 18 * log(18.0), 19 * log(19.0), 20 * log(20.0), 21 * log(21.0), 22 * log(22.0), 23 * log(23.0), 24 * log(24.0), 25 * log(25.0)}; const signed char n = (min(idx, 2) + 1 + min(width - idx, 2)) * (min(idy, 2) + 1 + min(height - idy, 2)); double ans = mylog[n - 2]; for (signed char i = 0; i < 16; ++i) { signed char c = cnt[i * BLOCK_DIM_X] - (signed char)2; if (c >= 0) ans -= mylog[c]; } output[idy * width + idx] = ans / n; } } static void cudaCallback( int width, int height, float *sample, float **result) { float *input_d, *output_d; CHECK(hipMalloc((void **)&input_d, sizeof(float) * width * height)); CHECK(hipMalloc((void **)&output_d, sizeof(float) * width * height)); CHECK(hipMemcpy(input_d, sample, sizeof(float) * width * height, hipMemcpyHostToDevice)); const int BLOCK_DIM_X = 512; const dim3 blockDim(BLOCK_DIM_X), gridDim(divup(width, BLOCK_DIM_X), height); hipLaunchKernelGGL(( cudaCallbackKernel<BLOCK_DIM_X>), dim3(gridDim), dim3(blockDim), 0, 0, width, height, input_d, output_d); *result = (float *)malloc(sizeof(float) * width * height); CHECK(hipMemcpy(*result, output_d, sizeof(float) * width * height, hipMemcpyDeviceToHost)); CHECK(hipFree(input_d)); CHECK(hipFree(output_d)); } } // namespace v7 namespace v8 //cuda log+++texure memory { static __global__ void cudaCallbackKernel( hipTextureObject_t texObj, // const int width, const int height, float *__restrict__ output) { const int idy = blockIdx.y * blockDim.y + threadIdx.y; const int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idy < height && idx < width) { signed char cnt[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; for (signed char offsety = -2; offsety <= 2; ++offsety) { const int py = idy + offsety; if (0 <= py && py < height) for (signed char offsetx = -2; offsetx <= 2; ++offsetx) { const int px = idx + offsetx; if (0 <= px && px < width) ++cnt[(signed char)tex2D<float>(texObj, px, py)]; } } const float mylog[26] = { 0.0, log(1.0), log(2.0), log(3.0), log(4.0), log(5.0), log(6.0), log(7.0), log(8.0), log(9.0), log(10.0), log(11.0), log(12.0), log(13.0), log(14.0), log(15.0), log(16.0), log(17.0), log(18.0), log(19.0), log(20.0), log(21.0), log(22.0), log(23.0), log(24.0), log(25.0)}; const signed char n = (min(idx, 2) + 1 + min(width - idx, 2)) * (min(idy, 2) + 1 + min(height - idy, 2)); double ans = mylog[n], n_inv = 1.0 / n; for (signed char i = 0; i < 16; ++i) ans -= mylog[cnt[i]] * n_inv * cnt[i]; output[idy * width + idx] = ans; } } static void cudaCallback( int width, int height, float *sample, float **result) { float *output_d; CHECK(hipMalloc((void **)&output_d, sizeof(float) * width * height)); hipArray *cuArray; hipChannelFormatDesc channelDesc = hipCreateChannelDesc<float>(); CHECK(hipMallocArray(&cuArray, &channelDesc, width, height)); CHECK(hipMemcpy2DToArray(cuArray, 0, 0, sample, sizeof(float) * width, sizeof(float) * width, height, hipMemcpyHostToDevice)); // cudaArray struct hipResourceDesc resDesc; memset(&resDesc, 0, sizeof(resDesc)); resDesc.resType = hipResourceTypeArray; resDesc.res.array.array = cuArray; // struct hipTextureDesc texDesc; memset(&texDesc, 0, sizeof(texDesc)); texDesc.readMode = hipReadModeElementType; // hipTextureObject_t texObj = 0; CHECK(hipCreateTextureObject(&texObj, &resDesc, &texDesc, NULL)); const int BLOCK_DIM_X = 32, BLOCK_DIM_Y = 32; const dim3 blockDim(BLOCK_DIM_X, BLOCK_DIM_Y), gridDim(divup(width, BLOCK_DIM_X), divup(height, BLOCK_DIM_Y)); hipLaunchKernelGGL(( cudaCallbackKernel), dim3(gridDim), dim3(blockDim), 0, 0, texObj, width, height, output_d); *result = (float *)malloc(sizeof(float) * width * height); CHECK(hipMemcpy(*result, output_d, sizeof(float) * width * height, hipMemcpyDeviceToHost)) CHECK(hipDestroyTextureObject(texObj)); CHECK(hipFreeArray(cuArray)); CHECK(hipFree(output_d)); } } // namespace v8 namespace v9 //cuda log+++shared memory { template < int BLOCK_DIM_X, int BLOCK_DIM_Y> static __global__ void cudaCallbackKernel( const int width, const int height, const float *__restrict__ input, float *__restrict__ output) { const int idy = blockIdx.y * (BLOCK_DIM_Y - 4) + threadIdx.y - 2; const int idx = blockIdx.x * (BLOCK_DIM_X - 4) + threadIdx.x - 2; //shared memory __shared__ char input_s[BLOCK_DIM_Y][BLOCK_DIM_X | 1]; //16 input_s[threadIdx.y][threadIdx.x] = 0 <= idy && idy < height && 0 <= idx && idx < width ? input[idy * width + idx] : 16; __syncthreads(); if (1 < threadIdx.y && threadIdx.y < BLOCK_DIM_Y - 2 && 1 < threadIdx.x && threadIdx.x < BLOCK_DIM_X - 2 && idy < height && idx < width) { signed char cnt[17] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; // for (signed char offsety = -2; offsety <= 2; ++offsety) for (signed char offsetx = -2; offsetx <= 2; ++offsetx) ++cnt[input_s[threadIdx.y + offsety][threadIdx.x + offsetx]]; const float mylog[26] = { 0.0, log(1.0), log(2.0), log(3.0), log(4.0), log(5.0), log(6.0), log(7.0), log(8.0), log(9.0), log(10.0), log(11.0), log(12.0), log(13.0), log(14.0), log(15.0), log(16.0), log(17.0), log(18.0), log(19.0), log(20.0), log(21.0), log(22.0), log(23.0), log(24.0), log(25.0)}; const signed char n = (min(idx, 2) + 1 + min(width - idx, 2)) * (min(idy, 2) + 1 + min(height - idy, 2)); double ans = mylog[n], n_inv = 1.0 / n; for (signed char i = 0; i < 16; ++i) ans -= mylog[cnt[i]] * n_inv * cnt[i]; output[idy * width + idx] = ans; } } static void cudaCallback( int width, int height, float *sample, float **result) { float *input_d, *output_d; CHECK(hipMalloc((void **)&input_d, sizeof(float) * width * height)); CHECK(hipMalloc((void **)&output_d, sizeof(float) * width * height)); CHECK(hipMemcpy(input_d, sample, sizeof(float) * width * height, hipMemcpyHostToDevice)); const int BLOCK_DIM_X = 32, BLOCK_DIM_Y = 32; const dim3 blockDim(BLOCK_DIM_X, BLOCK_DIM_Y), gridDim(divup(width, BLOCK_DIM_X - 4), divup(height, BLOCK_DIM_Y - 4)); hipLaunchKernelGGL(( cudaCallbackKernel< BLOCK_DIM_X, BLOCK_DIM_Y>), dim3(gridDim), dim3(blockDim), 0, 0, width, height, input_d, output_d); *result = (float *)malloc(sizeof(float) * width * height); CHECK(hipMemcpy(*result, output_d, sizeof(float) * width * height, hipMemcpyDeviceToHost)); CHECK(hipFree(input_d)); CHECK(hipFree(output_d)); } } // namespace v9 namespace v10 //openmp baseline { static void cudaCallback( int width, int height, float *sample, float **result) { *result = (float *)malloc(sizeof(float) * width * height); #pragma omp parallel for // for (int pos = 0; pos < width * height; ++pos) { const int idy = pos / width, idx = pos - idy * width; int cnt[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; for (int offsety = -2; offsety <= 2; ++offsety) for (int offsetx = -2; offsetx <= 2; ++offsetx) { const int py = idy + offsety, px = idx + offsetx; if (0 <= py && py < height && 0 <= px && px < width) ++cnt[(int)sample[py * width + px]]; } double n = (min(idx, 2) + 1 + min(width - idx, 2)) * (min(idy, 2) + 1 + min(height - idy, 2)), ans = log(n), n_inv = 1.0 / n; for (int i = 0; i < 16; ++i) if (cnt[i]) ans -= log((double)cnt[i]) * cnt[i] * n_inv; (*result)[pos] = ans; } } } // namespace v10 namespace v11 //openmp log { static void cudaCallback( int width, int height, float *sample, float **result) { *result = (float *)malloc(sizeof(float) * width * height); #pragma omp parallel for for (int pos = 0; pos < width * height; ++pos) { const int idy = pos / width, idx = pos - idy * width; int cnt[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; for (int offsety = -2; offsety <= 2; ++offsety) for (int offsetx = -2; offsetx <= 2; ++offsetx) { const int py = idy + offsety, px = idx + offsetx; if (0 <= py && py < height && 0 <= px && px < width) ++cnt[(int)sample[py * width + px]]; } const double mylog[26] = { 0.0, log(1.0), log(2.0), log(3.0), log(4.0), log(5.0), log(6.0), log(7.0), log(8.0), log(9.0), log(10.0), log(11.0), log(12.0), log(13.0), log(14.0), log(15.0), log(16.0), log(17.0), log(18.0), log(19.0), log(20.0), log(21.0), log(22.0), log(23.0), log(24.0), log(25.0)}; // const int n = (min(idx, 2) + 1 + min(width - idx, 2)) * (min(idy, 2) + 1 + min(height - idy, 2)); double ans = mylog[n], n_inv = 1.0 / n; for (int i = 0; i < 16; ++i) ans -= mylog[cnt[i]] * n_inv * cnt[i]; (*result)[pos] = ans; } } } // namespace v11 namespace v12 //openmp log+ { static void cudaCallback( int width, int height, float *sample, float **result) { *result = (float *)malloc(sizeof(float) * width * height); #pragma omp parallel for for (int pos = 0; pos < width * height; ++pos) { const int idy = pos / width, idx = pos - idy * width; signed char cnt[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; for (signed char offsety = -2; offsety <= 2; ++offsety) for (signed char offsetx = -2; offsetx <= 2; ++offsetx) { const int py = idy + offsety, px = idx + offsetx; if (0 <= py && py < height && 0 <= px && px < width) ++cnt[(signed char)sample[py * width + px]]; } const float mylog[26] = { 0.0, log(1.0), log(2.0), log(3.0), log(4.0), log(5.0), log(6.0), log(7.0), log(8.0), log(9.0), log(10.0), log(11.0), log(12.0), log(13.0), log(14.0), log(15.0), log(16.0), log(17.0), log(18.0), log(19.0), log(20.0), log(21.0), log(22.0), log(23.0), log(24.0), log(25.0)}; const signed char n = (min(idx, 2) + 1 + min(width - idx, 2)) * (min(idy, 2) + 1 + min(height - idy, 2)); double ans = mylog[n], n_inv = 1.0 / n; for (signed char i = 0; i < 16; ++i) ans -= mylog[cnt[i]] * n_inv * cnt[i]; (*result)[pos] = ans; } } } // namespace v12 namespace v13 //openmp log++ { static void cudaCallback( int width, int height, float *sample, float **result) { *result = (float *)malloc(sizeof(float) * width * height); int *sum[16]; #pragma omp parallel for // X =x_i for (int i = 0; i < 16; ++i) { int *p = (int *)malloc(sizeof(int) * (width + 5) * (height + 5)); for (int pos = 0; pos < (width + 5) * (height + 5); ++pos) { const int idy = pos / (width + 5), idx = pos - idy * (width + 5); if (idy && idx) { p[pos] = p[(idy - 1) * (width + 5) + idx] + p[idy * (width + 5) + idx - 1] - p[(idy - 1) * (width + 5) + (idx - 1)]; const int py = idy - 3, px = idx - 3; if (0 <= py && py < height && 0 <= px && px < width && i == sample[py * width + px]) ++p[pos]; //i } else p[pos] = 0; } sum[i] = p; } #pragma omp parallel for for (int pos = 0; pos < width * height; ++pos) { const int idy = pos / width, idx = pos - idy * width; const float mylog[26] = { 0.0, log(1.0), log(2.0), log(3.0), log(4.0), log(5.0), log(6.0), log(7.0), log(8.0), log(9.0), log(10.0), log(11.0), log(12.0), log(13.0), log(14.0), log(15.0), log(16.0), log(17.0), log(18.0), log(19.0), log(20.0), log(21.0), log(22.0), log(23.0), log(24.0), log(25.0)}; const signed char n = (min(idx, 2) + 1 + min(width - idx, 2)) * (min(idy, 2) + 1 + min(height - idy, 2)); double ans = mylog[n], n_inv = 1.0 / n; for (signed char i = 0; i < 16; ++i) { const signed char cnti = sum[i][(idy + 5) * (width + 5) + idx + 5] - sum[i][(idy + 5) * (width + 5) + idx] - sum[i][idy * (width + 5) + idx + 5] + sum[i][idy * (width + 5) + idx]; // ans -= mylog[cnti] * n_inv * cnti; } (*result)[pos] = ans; } for (int i = 0; i < 16; ++i) free(sum[i]); } } // namespace v13 namespace v14 //cuda+openmp v7v12 { static void cudaCallback( int width, int height, float *sample, float **result) { int num_gpus = 0; CHECK(hipGetDeviceCount(&num_gpus)); if (num_gpus > height - 4) // num_gpus = height - 4; if (num_gpus < 1 || width * height < (80 * 2048)) //V10080SMSM2048 return v12::cudaCallback(width, height, sample, result); if (num_gpus < 2) // return v7::cudaCallback(width, height, sample, result); *result = (float *)malloc(sizeof(float) * width * height); #pragma omp parallel num_threads(num_gpus) { int thread_num = omp_get_thread_num(), thread_hgt = (height - 4) / num_gpus, //height thread_beg = thread_hgt * thread_num + 2; if (thread_num == num_gpus - 1) // thread_hgt = height - 2 - thread_beg; float *thread_result; CHECK(hipSetDevice(thread_num)); // v7::cudaCallback( // width, thread_hgt + 4, sample + width * (thread_beg - 2), &thread_result); float *dst = (*result) + width * thread_beg, *src = thread_result + width * 2; if (thread_num == 0) //0 dst -= width * 2, src -= width * 2, thread_hgt += 2; if (thread_num == num_gpus - 1) // thread_hgt += 2; memcpy( // dst, src, sizeof(float) * width * thread_hgt); free(thread_result); // } } } // namespace v14 struct WarmUP { WarmUP(int W, int H) { void (*cudaCallback[])(int, int, float *, float **) = { v0::cudaCallback, v1::cudaCallback, v2::cudaCallback, v3::cudaCallback, v4::cudaCallback, v5::cudaCallback, v6::cudaCallback, v7::cudaCallback, v8::cudaCallback, v9::cudaCallback, v10::cudaCallback, v11::cudaCallback, v12::cudaCallback, v13::cudaCallback}; // float *sample = (float *)malloc(sizeof(float) * W * H); #pragma omp parallel { unsigned seed = omp_get_thread_num(); // #pragma omp for for (int i = 0; i < W * H; ++i) sample[i] = rand_r(&seed) & 15; // } for (int i = 0; i < sizeof(cudaCallback) / sizeof(cudaCallback[0]); ++i) { int num_gpus = 0; CHECK(hipGetDeviceCount(&num_gpus)); #pragma omp parallel num_threads(num_gpus) // { float *result; int thread_num = omp_get_thread_num(); CHECK(hipSetDevice(thread_num)); cudaCallback[i](W, H, sample, &result); free(result); } } free(sample); } }; struct Benchmark { Benchmark(int W, int H) { void (*cudaCallback[])(int, int, float *, float **) = { v0::cudaCallback, v1::cudaCallback, v2::cudaCallback, v3::cudaCallback, v4::cudaCallback, v5::cudaCallback, v6::cudaCallback, v7::cudaCallback, v8::cudaCallback, v9::cudaCallback, v10::cudaCallback, v11::cudaCallback, v12::cudaCallback, v13::cudaCallback, v14::cudaCallback}; float *sample = (float *)malloc(sizeof(float) * W * H); #pragma omp parallel { unsigned seed = omp_get_thread_num(); #pragma omp for for (int i = 0; i < W * H; ++i) sample[i] = rand_r(&seed) & 15; } printf("\n\nStart benchmark with matrix size %d * %d:\n\n", W, H); //benchnmark for (int i = 0; i < sizeof(cudaCallback) / sizeof(cudaCallback[0]); ++i) { float *result; hipEvent_t beg, end; hipEventCreate(&beg); hipEventCreate(&end); hipEventRecord(beg); cudaCallback[i](W, H, sample, &result); hipEventRecord(end); hipEventSynchronize(beg); hipEventSynchronize(end); float elapsed_time; hipEventElapsedTime( &elapsed_time, beg, end); printf("Version %d: %fms\n", i, elapsed_time); free(result); } printf("\n\nFinish benchmark with matrix size %d * %d.\n\n", W, H); free(sample); } }; static WarmUP warm_up(1, 1); static Benchmark benchmark400(400, 400), benchmark2560(2560, 2560), benchmark10240(10240, 10240); void cudaCallback( int width, int height, float *sample, float **result) { v14::cudaCallback(width, height, sample, result); }
8fd2126b2101849ce6b5330342635da2a8339ee1.cu
#include "core.h" #include <cuda_fp16.h> namespace v0 //cuda baseline { static __global__ void cudaCallbackKernel( //调用的核函数 const int width, // 输入矩阵宽,下同 const int height, //输入矩阵高,下同 const float *__restrict__ input, //输入矩阵 float *__restrict__ output) //输出矩阵 { const int idy = blockIdx.y * blockDim.y + threadIdx.y; //该线程对应元素的行坐标 const int idx = blockIdx.x * blockDim.x + threadIdx.x; //该线程对应元素的列坐标 if (idy < height && idx < width) { int cnt[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; //循环中统计每个元素的出现次数 for (int offsety = -2; offsety <= 2; ++offsety) { const int py = idy + offsety; if (0 <= py && py < height) for (int offsetx = -2; offsetx <= 2; ++offsetx) { const int px = idx + offsetx; if (0 <= px && px < width) ++cnt[(int)input[py * width + px]]; } } double n = (min(idx, 2) + 1 + min(width - idx, 2)) * (min(idy, 2) + 1 + min(height - idy, 2)), //当前位置邻域的大小 n_inv = 1.0 / n, ans = log(n); //ans = logn - n_i/n*log(n_i) for (int i = 0; i < 16; ++i) if (cnt[i]) ans -= log((double)cnt[i]) * cnt[i] * n_inv; output[idy * width + idx] = ans; } } static void cudaCallback( int width, int height, float *sample, float **result) { float *input_d, *output_d; //接下来在显卡上分配内存空间,并将输入拷贝到显卡上 CHECK(cudaMalloc((void **)&output_d, sizeof(float) * width * height)); CHECK(cudaMalloc((void **)&input_d, sizeof(float) * width * height)); CHECK(cudaMemcpy(input_d, sample, sizeof(float) * width * height, cudaMemcpyHostToDevice)); const int BLOCK_DIM_X = 32, BLOCK_DIM_Y = 32; const dim3 blockDim(BLOCK_DIM_X, BLOCK_DIM_Y), gridDim(divup(width, BLOCK_DIM_X), divup(height, BLOCK_DIM_Y)); cudaCallbackKernel<<< gridDim, blockDim>>>( width, height, input_d, output_d); //将结果写回,并释放显存空间 *result = (float *)malloc(sizeof(float) * width * height); CHECK(cudaMemcpy(*result, output_d, sizeof(float) * width * height, cudaMemcpyDeviceToHost)); CHECK(cudaFree(input_d)); CHECK(cudaFree(output_d)); } } // namespace v0 namespace v1 //cuda 预处理log到寄存器 { static __global__ void cudaCallbackKernel( const int width, const int height, const float *__restrict__ input, float *__restrict__ output) { const int idy = blockIdx.y * blockDim.y + threadIdx.y; const int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idy < height && idx < width) { int cnt[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; for (int offsety = -2; offsety <= 2; ++offsety) { const int py = idy + offsety; if (0 <= py && py < height) for (int offsetx = -2; offsetx <= 2; ++offsetx) { const int px = idx + offsetx; if (0 <= px && px < width) ++cnt[(int)input[py * width + px]]; } } const double mylog[26] = { 0.0, //log 0设置为0 log(1.0), log(2.0), log(3.0), log(4.0), log(5.0), log(6.0), log(7.0), log(8.0), log(9.0), log(10.0), log(11.0), log(12.0), log(13.0), log(14.0), log(15.0), log(16.0), log(17.0), log(18.0), log(19.0), log(20.0), log(21.0), log(22.0), log(23.0), log(24.0), log(25.0)}; //预处理对数表到寄存器。此处计算在编译时就已经完成 const int n = (min(idx, 2) + 1 + min(width - idx, 2)) * (min(idy, 2) + 1 + min(height - idy, 2)); double ans = mylog[n], n_inv = 1.0 / n; for (int i = 0; i < 16; ++i) ans -= mylog[cnt[i]] * cnt[i] * n_inv; output[idy * width + idx] = ans; } } static void cudaCallback( int width, int height, float *sample, float **result) { float *input_d, *output_d; CHECK(cudaMalloc((void **)&input_d, sizeof(float) * width * height)); CHECK(cudaMalloc((void **)&output_d, sizeof(float) * width * height)); CHECK(cudaMemcpy(input_d, sample, sizeof(float) * width * height, cudaMemcpyHostToDevice)); const int BLOCK_DIM_X = 32, BLOCK_DIM_Y = 32; const dim3 blockDim(BLOCK_DIM_X, BLOCK_DIM_Y), gridDim(divup(width, BLOCK_DIM_X), divup(height, BLOCK_DIM_Y)); cudaCallbackKernel<<< gridDim, blockDim>>>( width, height, input_d, output_d); *result = (float *)malloc(sizeof(float) * width * height); CHECK(cudaMemcpy(*result, output_d, sizeof(float) * width * height, cudaMemcpyDeviceToHost)); CHECK(cudaFree(input_d)); CHECK(cudaFree(output_d)); } } // namespace v1 namespace v2 //cuda 预处理log到shared memory { static __global__ void cudaCallbackKernel( const int width, const int height, const float *__restrict__ input, float *__restrict__ output) { const int idy = blockIdx.y * blockDim.y + threadIdx.y; const int idx = blockIdx.x * blockDim.x + threadIdx.x; //shared memory不允许直接初始化,要在运行的时候由每个线程计算 __shared__ double mylog[26]; if (threadIdx.y == 0 && threadIdx.x < 26) mylog[threadIdx.x] = threadIdx.x == 0 ? 0.0 : log((double)threadIdx.x); __syncthreads(); if (idy < height && idx < width) { int cnt[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; for (int offsety = -2; offsety <= 2; ++offsety) { const int py = idy + offsety; if (0 <= py && py < height) for (int offsetx = -2; offsetx <= 2; ++offsetx) { const int px = idx + offsetx; if (0 <= px && px < width) ++cnt[(int)input[py * width + px]]; } } const int n = (min(idx, 2) + 1 + min(width - idx, 2)) * (min(idy, 2) + 1 + min(height - idy, 2)); double ans = mylog[n], n_inv = 1.0 / n; for (int i = 0; i < 16; ++i) ans -= mylog[cnt[i]] * cnt[i] * n_inv; output[idy * width + idx] = ans; } } static void cudaCallback( int width, int height, float *sample, float **result) { float *input_d, *output_d; CHECK(cudaMalloc((void **)&input_d, sizeof(float) * width * height)); CHECK(cudaMalloc((void **)&output_d, sizeof(float) * width * height)); CHECK(cudaMemcpy(input_d, sample, sizeof(float) * width * height, cudaMemcpyHostToDevice)); const int BLOCK_DIM_X = 32, BLOCK_DIM_Y = 32; const dim3 blockDim(BLOCK_DIM_X, BLOCK_DIM_Y), gridDim(divup(width, BLOCK_DIM_X), divup(height, BLOCK_DIM_Y)); cudaCallbackKernel<<< gridDim, blockDim>>>( width, height, input_d, output_d); *result = (float *)malloc(sizeof(float) * width * height); CHECK(cudaMemcpy(*result, output_d, sizeof(float) * width * height, cudaMemcpyDeviceToHost)); CHECK(cudaFree(input_d)); CHECK(cudaFree(output_d)); } } // namespace v2 namespace v3 //cuda 预处理log到constant memory { static __constant__ double mylog[26]; static __global__ void cudaCallbackKernel( const int width, const int height, const float *__restrict__ input, float *__restrict__ output) { const int idy = blockIdx.y * blockDim.y + threadIdx.y; const int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idy < height && idx < width) { int cnt[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; for (int offsety = -2; offsety <= 2; ++offsety) { const int py = idy + offsety; if (0 <= py && py < height) for (int offsetx = -2; offsetx <= 2; ++offsetx) { const int px = idx + offsetx; if (0 <= px && px < width) ++cnt[(int)input[py * width + px]]; } } const int n = (min(idx, 2) + 1 + min(width - idx, 2)) * (min(idy, 2) + 1 + min(height - idy, 2)); double ans = mylog[n], n_inv = 1.0 / n; for (int i = 0; i < 16; ++i) ans -= mylog[cnt[i]] * cnt[i] * n_inv; output[idy * width + idx] = ans; } } static void cudaCallback( int width, int height, float *sample, float **result) { float *input_d, *output_d; const double mylog_h[26] = { 0.0, log(1.0), log(2.0), log(3.0), log(4.0), log(5.0), log(6.0), log(7.0), log(8.0), log(9.0), log(10.0), log(11.0), log(12.0), log(13.0), log(14.0), log(15.0), log(16.0), log(17.0), log(18.0), log(19.0), log(20.0), log(21.0), log(22.0), log(23.0), log(24.0), log(25.0)}; //计算并将值发送到constant memory CHECK(cudaMemcpyToSymbol(mylog, (const double *)mylog_h, sizeof(mylog_h))); CHECK(cudaMalloc((void **)&input_d, sizeof(float) * width * height)); CHECK(cudaMalloc((void **)&output_d, sizeof(float) * width * height)); CHECK(cudaMemcpy(input_d, sample, sizeof(float) * width * height, cudaMemcpyHostToDevice)); const int BLOCK_DIM_X = 32, BLOCK_DIM_Y = 32; const dim3 blockDim(BLOCK_DIM_X, BLOCK_DIM_Y), gridDim(divup(width, BLOCK_DIM_X), divup(height, BLOCK_DIM_Y)); cudaCallbackKernel<<< gridDim, blockDim>>>( width, height, input_d, output_d); *result = (float *)malloc(sizeof(float) * width * height); CHECK(cudaMemcpy(*result, output_d, sizeof(float) * width * height, cudaMemcpyDeviceToHost)); CHECK(cudaFree(input_d)); CHECK(cudaFree(output_d)); } } // namespace v3 namespace v4 //cuda 预处理log到device memory { static __device__ double mylog[26]; static __global__ void cudaCallbackKernel( const int width, const int height, const float *__restrict__ input, float *__restrict__ output) { const int idy = blockIdx.y * blockDim.y + threadIdx.y; const int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idy < height && idx < width) { int cnt[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; for (int offsety = -2; offsety <= 2; ++offsety) { const int py = idy + offsety; if (0 <= py && py < height) for (int offsetx = -2; offsetx <= 2; ++offsetx) { const int px = idx + offsetx; if (0 <= px && px < width) ++cnt[(int)input[py * width + px]]; } } const int n = (min(idx, 2) + 1 + min(width - idx, 2)) * (min(idy, 2) + 1 + min(height - idy, 2)); double ans = mylog[n], n_inv = 1.0 / n; for (int i = 0; i < 16; ++i) ans -= mylog[cnt[i]] * cnt[i] * n_inv; output[idy * width + idx] = ans; } } static void cudaCallback( int width, int height, float *sample, float **result) { float *input_d, *output_d; const double mylog_h[26] = { 0.0, log(1.0), log(2.0), log(3.0), log(4.0), log(5.0), log(6.0), log(7.0), log(8.0), log(9.0), log(10.0), log(11.0), log(12.0), log(13.0), log(14.0), log(15.0), log(16.0), log(17.0), log(18.0), log(19.0), log(20.0), log(21.0), log(22.0), log(23.0), log(24.0), log(25.0)}; CHECK(cudaMemcpyToSymbol(mylog, (const double *)mylog_h, sizeof(mylog_h))); CHECK(cudaMalloc((void **)&input_d, sizeof(float) * width * height)); CHECK(cudaMalloc((void **)&output_d, sizeof(float) * width * height)); CHECK(cudaMemcpy(input_d, sample, sizeof(float) * width * height, cudaMemcpyHostToDevice)); const int BLOCK_DIM_X = 32, BLOCK_DIM_Y = 32; const dim3 blockDim(BLOCK_DIM_X, BLOCK_DIM_Y), gridDim(divup(width, BLOCK_DIM_X), divup(height, BLOCK_DIM_Y)); cudaCallbackKernel<<< gridDim, blockDim>>>( width, height, input_d, output_d); *result = (float *)malloc(sizeof(float) * width * height); CHECK(cudaMemcpy(*result, output_d, sizeof(float) * width * height, cudaMemcpyDeviceToHost)); CHECK(cudaFree(input_d)); CHECK(cudaFree(output_d)); } } // namespace v4 namespace v5 //cuda 预处理log到texure memory { static texture<float> mylog_tex; static __device__ float mylog[26]; //texture只允许4字节的float static __global__ void cudaCallbackKernel( const int width, const int height, const float *__restrict__ input, float *__restrict__ output) { const int idy = blockIdx.y * blockDim.y + threadIdx.y; const int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idy < height && idx < width) { int cnt[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; for (int offsety = -2; offsety <= 2; ++offsety) { const int py = idy + offsety; if (0 <= py && py < height) for (int offsetx = -2; offsetx <= 2; ++offsetx) { const int px = idx + offsetx; if (0 <= px && px < width) ++cnt[(int)input[py * width + px]]; } } const int n = (min(idx, 2) + 1 + min(width - idx, 2)) * (min(idy, 2) + 1 + min(height - idy, 2)); double ans = tex1Dfetch(mylog_tex, n), n_inv = 1.0 / n; for (int i = 0; i < 16; ++i) ans -= tex1Dfetch(mylog_tex, cnt[i]) * cnt[i] * n_inv; output[idy * width + idx] = ans; } } static void cudaCallback( int width, int height, float *sample, float **result) { float *input_d, *output_d; float mylog_h[26] = { 0.0, log(1.0), log(2.0), log(3.0), log(4.0), log(5.0), log(6.0), log(7.0), log(8.0), log(9.0), log(10.0), log(11.0), log(12.0), log(13.0), log(14.0), log(15.0), log(16.0), log(17.0), log(18.0), log(19.0), log(20.0), log(21.0), log(22.0), log(23.0), log(24.0), log(25.0)}, *mylog_d; cudaMemcpyToSymbol(mylog, mylog_h, sizeof(float) * 26); cudaGetSymbolAddress((void **)&mylog_d, mylog); cudaBindTexture(0, mylog_tex, mylog_d); CHECK(cudaMalloc((void **)&input_d, sizeof(float) * width * height)); CHECK(cudaMalloc((void **)&output_d, sizeof(float) * width * height)); CHECK(cudaMemcpy(input_d, sample, sizeof(float) * width * height, cudaMemcpyHostToDevice)); const int BLOCK_DIM_X = 32, BLOCK_DIM_Y = 32; const dim3 blockDim(BLOCK_DIM_X, BLOCK_DIM_Y), gridDim(divup(width, BLOCK_DIM_X), divup(height, BLOCK_DIM_Y)); cudaCallbackKernel<<< gridDim, blockDim>>>( width, height, input_d, output_d); *result = (float *)malloc(sizeof(float) * width * height); CHECK(cudaMemcpy(*result, output_d, sizeof(float) * width * height, cudaMemcpyDeviceToHost)); CHECK(cudaFree(input_d)); CHECK(cudaFree(output_d)); } } // namespace v5 namespace v6 //cuda 预处理log到寄存器+使用更小的整型类型 { static __global__ void cudaCallbackKernel( const int width, const int height, const float *__restrict__ input, float *__restrict__ output) { const int idy = blockIdx.y * blockDim.y + threadIdx.y; const int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idy < height && idx < width) { signed char cnt[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; //寄存器类型从int改成char,减少了4倍寄存器压力 for (signed char offsety = -2; offsety <= 2; ++offsety) { const int py = idy + offsety; if (0 <= py && py < height) for (signed char offsetx = -2; offsetx <= 2; ++offsetx) { const int px = idx + offsetx; if (0 <= px && px < width) ++cnt[(signed char)input[py * width + px]]; } } const double mylog[26] = { 0.0, log(1.0), log(2.0), log(3.0), log(4.0), log(5.0), log(6.0), log(7.0), log(8.0), log(9.0), log(10.0), log(11.0), log(12.0), log(13.0), log(14.0), log(15.0), log(16.0), log(17.0), log(18.0), log(19.0), log(20.0), log(21.0), log(22.0), log(23.0), log(24.0), log(25.0)}; const signed char n = (min(idx, 2) + 1 + min(width - idx, 2)) * (min(idy, 2) + 1 + min(height - idy, 2)); double ans = mylog[n], n_inv = 1.0 / n; for (signed char i = 0; i < 16; ++i) ans -= mylog[cnt[i]] * cnt[i] * n_inv; output[idy * width + idx] = ans; } } static void cudaCallback( int width, int height, float *sample, float **result) { float *input_d, *output_d; CHECK(cudaMalloc((void **)&input_d, sizeof(float) * width * height)); CHECK(cudaMalloc((void **)&output_d, sizeof(float) * width * height)); CHECK(cudaMemcpy(input_d, sample, sizeof(float) * width * height, cudaMemcpyHostToDevice)); const int BLOCK_DIM_X = 32, BLOCK_DIM_Y = 32; const dim3 blockDim(BLOCK_DIM_X, BLOCK_DIM_Y), gridDim(divup(width, BLOCK_DIM_X), divup(height, BLOCK_DIM_Y)); cudaCallbackKernel<<< gridDim, blockDim>>>( width, height, input_d, output_d); *result = (float *)malloc(sizeof(float) * width * height); CHECK(cudaMemcpy(*result, output_d, sizeof(float) * width * height, cudaMemcpyDeviceToHost)); CHECK(cudaFree(input_d)); CHECK(cudaFree(output_d)); } } // namespace v6 namespace v7 //cuda 预处理log到寄存器+使用更小的整型类型+使用更小的浮点类型 { template <int BLOCK_DIM_X> static __global__ __launch_bounds__(BLOCK_DIM_X) void cudaCallbackKernel( const int width, const int height, const float *__restrict__ input, float *__restrict__ output) { const int idy = blockIdx.y; const int idx = blockIdx.x * BLOCK_DIM_X + threadIdx.x; if (idy < height && idx < width) { __shared__ signed char cnts[16][BLOCK_DIM_X]; signed char *cnt = cnts[0] + threadIdx.x; for (signed char i = 0; i < 16; ++i) cnt[i * BLOCK_DIM_X] = 0; for (signed char offsety = -2; offsety <= 2; ++offsety) { const int py = idy + offsety; if (0 <= py && py < height) for (signed char offsetx = -2; offsetx <= 2; ++offsetx) { const int px = idx + offsetx; if (0 <= px && px < width) ++cnt[(int)input[py * width + px] * BLOCK_DIM_X]; } } const float mylog[24] = { 2 * log(2.0), 3 * log(3.0), 4 * log(4.0), 5 * log(5.0), 6 * log(6.0), 7 * log(7.0), 8 * log(8.0), 9 * log(9.0), 10 * log(10.0), 11 * log(11.0), 12 * log(12.0), 13 * log(13.0), 14 * log(14.0), 15 * log(15.0), 16 * log(16.0), 17 * log(17.0), 18 * log(18.0), 19 * log(19.0), 20 * log(20.0), 21 * log(21.0), 22 * log(22.0), 23 * log(23.0), 24 * log(24.0), 25 * log(25.0)}; const signed char n = (min(idx, 2) + 1 + min(width - idx, 2)) * (min(idy, 2) + 1 + min(height - idy, 2)); double ans = mylog[n - 2]; for (signed char i = 0; i < 16; ++i) { signed char c = cnt[i * BLOCK_DIM_X] - (signed char)2; if (c >= 0) ans -= mylog[c]; } output[idy * width + idx] = ans / n; } } static void cudaCallback( int width, int height, float *sample, float **result) { float *input_d, *output_d; CHECK(cudaMalloc((void **)&input_d, sizeof(float) * width * height)); CHECK(cudaMalloc((void **)&output_d, sizeof(float) * width * height)); CHECK(cudaMemcpy(input_d, sample, sizeof(float) * width * height, cudaMemcpyHostToDevice)); const int BLOCK_DIM_X = 512; const dim3 blockDim(BLOCK_DIM_X), gridDim(divup(width, BLOCK_DIM_X), height); cudaCallbackKernel<BLOCK_DIM_X><<< gridDim, blockDim>>>( width, height, input_d, output_d); *result = (float *)malloc(sizeof(float) * width * height); CHECK(cudaMemcpy(*result, output_d, sizeof(float) * width * height, cudaMemcpyDeviceToHost)); CHECK(cudaFree(input_d)); CHECK(cudaFree(output_d)); } } // namespace v7 namespace v8 //cuda 预处理log到寄存器+使用更小的整型类型+使用更小的浮点类型+使用texure memory优化读入 { static __global__ void cudaCallbackKernel( cudaTextureObject_t texObj, //使用纹理对象 const int width, const int height, float *__restrict__ output) { const int idy = blockIdx.y * blockDim.y + threadIdx.y; const int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idy < height && idx < width) { signed char cnt[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; for (signed char offsety = -2; offsety <= 2; ++offsety) { const int py = idy + offsety; if (0 <= py && py < height) for (signed char offsetx = -2; offsetx <= 2; ++offsetx) { const int px = idx + offsetx; if (0 <= px && px < width) ++cnt[(signed char)tex2D<float>(texObj, px, py)]; } } const float mylog[26] = { 0.0, log(1.0), log(2.0), log(3.0), log(4.0), log(5.0), log(6.0), log(7.0), log(8.0), log(9.0), log(10.0), log(11.0), log(12.0), log(13.0), log(14.0), log(15.0), log(16.0), log(17.0), log(18.0), log(19.0), log(20.0), log(21.0), log(22.0), log(23.0), log(24.0), log(25.0)}; const signed char n = (min(idx, 2) + 1 + min(width - idx, 2)) * (min(idy, 2) + 1 + min(height - idy, 2)); double ans = mylog[n], n_inv = 1.0 / n; for (signed char i = 0; i < 16; ++i) ans -= mylog[cnt[i]] * n_inv * cnt[i]; output[idy * width + idx] = ans; } } static void cudaCallback( int width, int height, float *sample, float **result) { float *output_d; CHECK(cudaMalloc((void **)&output_d, sizeof(float) * width * height)); cudaArray *cuArray; cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<float>(); CHECK(cudaMallocArray(&cuArray, &channelDesc, width, height)); CHECK(cudaMemcpy2DToArray(cuArray, 0, 0, sample, sizeof(float) * width, sizeof(float) * width, height, cudaMemcpyHostToDevice)); // 绑定纹理到cudaArray上 struct cudaResourceDesc resDesc; memset(&resDesc, 0, sizeof(resDesc)); resDesc.resType = cudaResourceTypeArray; resDesc.res.array.array = cuArray; // 设置纹理为只读 struct cudaTextureDesc texDesc; memset(&texDesc, 0, sizeof(texDesc)); texDesc.readMode = cudaReadModeElementType; // 创建纹理对象 cudaTextureObject_t texObj = 0; CHECK(cudaCreateTextureObject(&texObj, &resDesc, &texDesc, NULL)); const int BLOCK_DIM_X = 32, BLOCK_DIM_Y = 32; const dim3 blockDim(BLOCK_DIM_X, BLOCK_DIM_Y), gridDim(divup(width, BLOCK_DIM_X), divup(height, BLOCK_DIM_Y)); cudaCallbackKernel<<< gridDim, blockDim>>>( texObj, width, height, output_d); *result = (float *)malloc(sizeof(float) * width * height); CHECK(cudaMemcpy(*result, output_d, sizeof(float) * width * height, cudaMemcpyDeviceToHost)) CHECK(cudaDestroyTextureObject(texObj)); CHECK(cudaFreeArray(cuArray)); CHECK(cudaFree(output_d)); } } // namespace v8 namespace v9 //cuda 预处理log到寄存器+使用更小的整型类型+使用更小的浮点类型+使用shared memory优化读入 { template < int BLOCK_DIM_X, int BLOCK_DIM_Y> static __global__ void cudaCallbackKernel( const int width, const int height, const float *__restrict__ input, float *__restrict__ output) { const int idy = blockIdx.y * (BLOCK_DIM_Y - 4) + threadIdx.y - 2; const int idx = blockIdx.x * (BLOCK_DIM_X - 4) + threadIdx.x - 2; //读入shared memory __shared__ char input_s[BLOCK_DIM_Y][BLOCK_DIM_X | 1]; //溢出的值用16代替 input_s[threadIdx.y][threadIdx.x] = 0 <= idy && idy < height && 0 <= idx && idx < width ? input[idy * width + idx] : 16; __syncthreads(); if (1 < threadIdx.y && threadIdx.y < BLOCK_DIM_Y - 2 && 1 < threadIdx.x && threadIdx.x < BLOCK_DIM_X - 2 && idy < height && idx < width) { signed char cnt[17] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; //此处计数器多开一位用于非法值 for (signed char offsety = -2; offsety <= 2; ++offsety) for (signed char offsetx = -2; offsetx <= 2; ++offsetx) ++cnt[input_s[threadIdx.y + offsety][threadIdx.x + offsetx]]; const float mylog[26] = { 0.0, log(1.0), log(2.0), log(3.0), log(4.0), log(5.0), log(6.0), log(7.0), log(8.0), log(9.0), log(10.0), log(11.0), log(12.0), log(13.0), log(14.0), log(15.0), log(16.0), log(17.0), log(18.0), log(19.0), log(20.0), log(21.0), log(22.0), log(23.0), log(24.0), log(25.0)}; const signed char n = (min(idx, 2) + 1 + min(width - idx, 2)) * (min(idy, 2) + 1 + min(height - idy, 2)); double ans = mylog[n], n_inv = 1.0 / n; for (signed char i = 0; i < 16; ++i) ans -= mylog[cnt[i]] * n_inv * cnt[i]; output[idy * width + idx] = ans; } } static void cudaCallback( int width, int height, float *sample, float **result) { float *input_d, *output_d; CHECK(cudaMalloc((void **)&input_d, sizeof(float) * width * height)); CHECK(cudaMalloc((void **)&output_d, sizeof(float) * width * height)); CHECK(cudaMemcpy(input_d, sample, sizeof(float) * width * height, cudaMemcpyHostToDevice)); const int BLOCK_DIM_X = 32, BLOCK_DIM_Y = 32; const dim3 blockDim(BLOCK_DIM_X, BLOCK_DIM_Y), gridDim(divup(width, BLOCK_DIM_X - 4), divup(height, BLOCK_DIM_Y - 4)); cudaCallbackKernel< BLOCK_DIM_X, BLOCK_DIM_Y><<< gridDim, blockDim>>>( width, height, input_d, output_d); *result = (float *)malloc(sizeof(float) * width * height); CHECK(cudaMemcpy(*result, output_d, sizeof(float) * width * height, cudaMemcpyDeviceToHost)); CHECK(cudaFree(input_d)); CHECK(cudaFree(output_d)); } } // namespace v9 namespace v10 //openmp baseline { static void cudaCallback( int width, int height, float *sample, float **result) { *result = (float *)malloc(sizeof(float) * width * height); #pragma omp parallel for //每个位置没有循环依赖,可以直接并行 for (int pos = 0; pos < width * height; ++pos) { const int idy = pos / width, idx = pos - idy * width; int cnt[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; for (int offsety = -2; offsety <= 2; ++offsety) for (int offsetx = -2; offsetx <= 2; ++offsetx) { const int py = idy + offsety, px = idx + offsetx; if (0 <= py && py < height && 0 <= px && px < width) ++cnt[(int)sample[py * width + px]]; } double n = (min(idx, 2) + 1 + min(width - idx, 2)) * (min(idy, 2) + 1 + min(height - idy, 2)), ans = log(n), n_inv = 1.0 / n; for (int i = 0; i < 16; ++i) if (cnt[i]) ans -= log((double)cnt[i]) * cnt[i] * n_inv; (*result)[pos] = ans; } } } // namespace v10 namespace v11 //openmp 预处理log到寄存器 { static void cudaCallback( int width, int height, float *sample, float **result) { *result = (float *)malloc(sizeof(float) * width * height); #pragma omp parallel for for (int pos = 0; pos < width * height; ++pos) { const int idy = pos / width, idx = pos - idy * width; int cnt[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; for (int offsety = -2; offsety <= 2; ++offsety) for (int offsetx = -2; offsetx <= 2; ++offsetx) { const int py = idy + offsety, px = idx + offsetx; if (0 <= py && py < height && 0 <= px && px < width) ++cnt[(int)sample[py * width + px]]; } const double mylog[26] = { 0.0, log(1.0), log(2.0), log(3.0), log(4.0), log(5.0), log(6.0), log(7.0), log(8.0), log(9.0), log(10.0), log(11.0), log(12.0), log(13.0), log(14.0), log(15.0), log(16.0), log(17.0), log(18.0), log(19.0), log(20.0), log(21.0), log(22.0), log(23.0), log(24.0), log(25.0)}; //预处理对数表,其值在编译时已求得 const int n = (min(idx, 2) + 1 + min(width - idx, 2)) * (min(idy, 2) + 1 + min(height - idy, 2)); double ans = mylog[n], n_inv = 1.0 / n; for (int i = 0; i < 16; ++i) ans -= mylog[cnt[i]] * n_inv * cnt[i]; (*result)[pos] = ans; } } } // namespace v11 namespace v12 //openmp 预处理log到寄存器+使用更小的类型 { static void cudaCallback( int width, int height, float *sample, float **result) { *result = (float *)malloc(sizeof(float) * width * height); #pragma omp parallel for for (int pos = 0; pos < width * height; ++pos) { const int idy = pos / width, idx = pos - idy * width; signed char cnt[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; for (signed char offsety = -2; offsety <= 2; ++offsety) for (signed char offsetx = -2; offsetx <= 2; ++offsetx) { const int py = idy + offsety, px = idx + offsetx; if (0 <= py && py < height && 0 <= px && px < width) ++cnt[(signed char)sample[py * width + px]]; } const float mylog[26] = { 0.0, log(1.0), log(2.0), log(3.0), log(4.0), log(5.0), log(6.0), log(7.0), log(8.0), log(9.0), log(10.0), log(11.0), log(12.0), log(13.0), log(14.0), log(15.0), log(16.0), log(17.0), log(18.0), log(19.0), log(20.0), log(21.0), log(22.0), log(23.0), log(24.0), log(25.0)}; const signed char n = (min(idx, 2) + 1 + min(width - idx, 2)) * (min(idy, 2) + 1 + min(height - idy, 2)); double ans = mylog[n], n_inv = 1.0 / n; for (signed char i = 0; i < 16; ++i) ans -= mylog[cnt[i]] * n_inv * cnt[i]; (*result)[pos] = ans; } } } // namespace v12 namespace v13 //openmp 预处理log到寄存器+使用更小的类型+预处理前缀和 { static void cudaCallback( int width, int height, float *sample, float **result) { *result = (float *)malloc(sizeof(float) * width * height); int *sum[16]; #pragma omp parallel for //此处预处理 X =x_i 对答案的贡献的前缀和 for (int i = 0; i < 16; ++i) { int *p = (int *)malloc(sizeof(int) * (width + 5) * (height + 5)); for (int pos = 0; pos < (width + 5) * (height + 5); ++pos) { const int idy = pos / (width + 5), idx = pos - idy * (width + 5); if (idy && idx) { p[pos] = p[(idy - 1) * (width + 5) + idx] + p[idy * (width + 5) + idx - 1] - p[(idy - 1) * (width + 5) + (idx - 1)]; const int py = idy - 3, px = idx - 3; if (0 <= py && py < height && 0 <= px && px < width && i == sample[py * width + px]) ++p[pos]; //当前位置上的元素是i的话可更新 } else p[pos] = 0; } sum[i] = p; } #pragma omp parallel for for (int pos = 0; pos < width * height; ++pos) { const int idy = pos / width, idx = pos - idy * width; const float mylog[26] = { 0.0, log(1.0), log(2.0), log(3.0), log(4.0), log(5.0), log(6.0), log(7.0), log(8.0), log(9.0), log(10.0), log(11.0), log(12.0), log(13.0), log(14.0), log(15.0), log(16.0), log(17.0), log(18.0), log(19.0), log(20.0), log(21.0), log(22.0), log(23.0), log(24.0), log(25.0)}; const signed char n = (min(idx, 2) + 1 + min(width - idx, 2)) * (min(idy, 2) + 1 + min(height - idy, 2)); double ans = mylog[n], n_inv = 1.0 / n; for (signed char i = 0; i < 16; ++i) { const signed char cnti = sum[i][(idy + 5) * (width + 5) + idx + 5] - sum[i][(idy + 5) * (width + 5) + idx] - sum[i][idy * (width + 5) + idx + 5] + sum[i][idy * (width + 5) + idx]; //用前缀和公式计算 ans -= mylog[cnti] * n_inv * cnti; } (*result)[pos] = ans; } for (int i = 0; i < 16; ++i) free(sum[i]); } } // namespace v13 namespace v14 //cuda+openmp 多卡,基于v7、v12 { static void cudaCallback( int width, int height, float *sample, float **result) { int num_gpus = 0; CHECK(cudaGetDeviceCount(&num_gpus)); if (num_gpus > height - 4) //显卡远多于可划分数据时适当减少使用的显卡 num_gpus = height - 4; if (num_gpus < 1 || width * height < (80 * 2048)) //单张V100有80个SM,每个SM最多2048个常驻线程,不能满载时直接使用 return v12::cudaCallback(width, height, sample, result); if (num_gpus < 2) //只有一张显卡时直接调用单卡版本减少开销 return v7::cudaCallback(width, height, sample, result); *result = (float *)malloc(sizeof(float) * width * height); #pragma omp parallel num_threads(num_gpus) { int thread_num = omp_get_thread_num(), thread_hgt = (height - 4) / num_gpus, //每个线程实际有效的height长度 thread_beg = thread_hgt * thread_num + 2; if (thread_num == num_gpus - 1) //最后一个线程特判,因为不一定整除 thread_hgt = height - 2 - thread_beg; float *thread_result; CHECK(cudaSetDevice(thread_num)); //不同线程指定不同显卡 v7::cudaCallback( //划分为子问题,分别交给单卡版本 width, thread_hgt + 4, sample + width * (thread_beg - 2), &thread_result); float *dst = (*result) + width * thread_beg, *src = thread_result + width * 2; if (thread_num == 0) //0号线程输出的上边界也是有效的 dst -= width * 2, src -= width * 2, thread_hgt += 2; if (thread_num == num_gpus - 1) //最后一个线程的下边界也是有效的 thread_hgt += 2; memcpy( //将子问题的答案拷贝回原问题 dst, src, sizeof(float) * width * thread_hgt); free(thread_result); //释放子问题的内存空间 } } } // namespace v14 struct WarmUP { WarmUP(int W, int H) { void (*cudaCallback[])(int, int, float *, float **) = { v0::cudaCallback, v1::cudaCallback, v2::cudaCallback, v3::cudaCallback, v4::cudaCallback, v5::cudaCallback, v6::cudaCallback, v7::cudaCallback, v8::cudaCallback, v9::cudaCallback, v10::cudaCallback, v11::cudaCallback, v12::cudaCallback, v13::cudaCallback}; //由于多卡版本是调用单卡版本实现的,因此无需热身 float *sample = (float *)malloc(sizeof(float) * W * H); #pragma omp parallel { unsigned seed = omp_get_thread_num(); //每个线程使用不同的随机数种子 #pragma omp for for (int i = 0; i < W * H; ++i) sample[i] = rand_r(&seed) & 15; //使用线程安全的随机数函数 } for (int i = 0; i < sizeof(cudaCallback) / sizeof(cudaCallback[0]); ++i) { int num_gpus = 0; CHECK(cudaGetDeviceCount(&num_gpus)); #pragma omp parallel num_threads(num_gpus) //对于每张显卡都要优化 { float *result; int thread_num = omp_get_thread_num(); CHECK(cudaSetDevice(thread_num)); cudaCallback[i](W, H, sample, &result); free(result); } } free(sample); } }; struct Benchmark { Benchmark(int W, int H) { void (*cudaCallback[])(int, int, float *, float **) = { v0::cudaCallback, v1::cudaCallback, v2::cudaCallback, v3::cudaCallback, v4::cudaCallback, v5::cudaCallback, v6::cudaCallback, v7::cudaCallback, v8::cudaCallback, v9::cudaCallback, v10::cudaCallback, v11::cudaCallback, v12::cudaCallback, v13::cudaCallback, v14::cudaCallback}; float *sample = (float *)malloc(sizeof(float) * W * H); #pragma omp parallel { unsigned seed = omp_get_thread_num(); #pragma omp for for (int i = 0; i < W * H; ++i) sample[i] = rand_r(&seed) & 15; } printf("\n\nStart benchmark with matrix size %d * %d:\n\n", W, H); //开始benchnmark for (int i = 0; i < sizeof(cudaCallback) / sizeof(cudaCallback[0]); ++i) { float *result; cudaEvent_t beg, end; cudaEventCreate(&beg); cudaEventCreate(&end); cudaEventRecord(beg); cudaCallback[i](W, H, sample, &result); cudaEventRecord(end); cudaEventSynchronize(beg); cudaEventSynchronize(end); float elapsed_time; cudaEventElapsedTime( &elapsed_time, beg, end); printf("Version %d: %fms\n", i, elapsed_time); free(result); } printf("\n\nFinish benchmark with matrix size %d * %d.\n\n", W, H); free(sample); } }; static WarmUP warm_up(1, 1); static Benchmark benchmark400(400, 400), benchmark2560(2560, 2560), benchmark10240(10240, 10240); void cudaCallback( int width, int height, float *sample, float **result) { v14::cudaCallback(width, height, sample, result); }
dc66b9d31407a6187713cef53d0cba5564208077.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Use grid strided loops, described here: // https://devblogs.nvidia.com/cuda-pro-tip-write-flexible-kernels-grid-stride-loops/ // This pattern ensures that all of the loop values are visited once, no matter // what grid parameters are used for the function. // We cannot include CUDA header for mathematical constants, since it requires // that the development headers of the CUDA toolkit are installed. template <typename T> struct Constants {}; template <> struct Constants<double> { static constexpr double INV_SQRT_2 = 0.7071067811865475; static constexpr double INV_SQRT_2PI = 0.3989422804014327; }; template <> struct Constants<float> { static constexpr float INV_SQRT_2 = 0.70710677; static constexpr float INV_SQRT_2PI = 0.3989423; }; template <typename U> __global__ void gather_add(U* out_bo, const U* table_to, const int* indices_bk, int T, int O, int B, int K) { int _loop_start = blockIdx.x * blockDim.x + threadIdx.x; int _loop_stride = blockDim.x * gridDim.x; for (int b = _loop_start; b < B; b += _loop_stride) { for (int k = 0; k < K; ++k) { int idx = indices_bk[b * K + k]; const U* table = table_to + idx * O; U* out = out_bo + b * O; for (int o = 0; o < O; ++o) { out[o] += table[o]; } } } } template <typename T> __global__ void seq2col(T* output, const T* X, const int* lengths, int nW, int B, int I, int nL) { // Let's say nW is 1 (it usually is). Then we want to take: // 1a 1b 1c // 2a 2b 2c // 3a 3b 3c // And make // __ __ __ 1a 1b 1c 2a 2b 2c // 1a 1b 1c 2a 2b 2c 3a 3b 3c // 2a 2b 2c 3a 3b 3c __ __ __ // Where __ is padding. // Now let's say nW is 2. Then we want to take: // 1a 1b 1c // 2a 2b 2c // 3a 3b 3c // And make // __ __ __ __ __ __ 1a 1b 1c 2a 2b 2c 3a 3b 3c // __ __ __ 1a 1b 1c 2a 2b 2c 3a 3b 3c __ __ __ // 1a 1b 1c 2a 2b 2c 3a 3b 3c __ __ __ __ __ __ // * x_start=-6, x_end=9 : (0-2) * 3, (0+2+1) * 3 // * x_start=-3, x_end=13 : (1-2) * 3, (1+2+1) * 3 // * x_start=0, x_end=16 : (2-2) * 3, (2+2+1) * 3 // // If lengths > 1, then the sequence lengths dictate // the boundaries/padding rather than the begin/end // of X. int _loop_start = blockIdx.x * blockDim.x + threadIdx.x; int _loop_stride = blockDim.x * gridDim.x; int nF = nW * 2 + 1; int seq = 0; int seq_start = 0; for (int b = _loop_start; b < B; b += _loop_stride) { // Find sequence sequence in which b lies. for (; seq < nL; ++seq) { if (b < seq_start + lengths[seq]) { break; } seq_start += lengths[seq]; } // Calculate the bounds of the sequence wherein b lies. int seq_end = seq_start + lengths[seq]; // Find the unconstrained window around b, which // may be out of the sequence bounds. int window_start = b - nW; int window_end = b + nW + 1; // Find the sequence-constrained window around b. int x_start = max(seq_start, window_start); int x_end = min(seq_end, window_end); int n_elems = x_end - x_start; // If the left window is cut short, we want to start by // the same amount in the output. int out_offset = x_start - window_start; for (int i = 0; i < n_elems * I; i++) { output[(b * I * nF) + (out_offset * I) + i] = X[(x_start * I) + i]; } } } template <typename T> __global__ void maxout(T* best, int* which, const T* cands, int B, int O, int P) { int _loop_start = blockIdx.x * blockDim.x + threadIdx.x; int _loop_stride = blockDim.x * gridDim.x; for (int bo = _loop_start; bo < B * O; bo += _loop_stride) { // Go to the candidates at the output we're working on const T* cands_bo = &cands[bo * P]; int best_idx = 0; T best_val = cands_bo[0]; for (int p = 1; p < P; ++p) { if (cands_bo[p] > best_val) { best_idx = p; best_val = cands_bo[p]; } } which[bo] = best_idx; best[bo] = best_val; } } template <typename T> __global__ void clipped_linear(T* Y, const T* X, double slope, double offset, double min_val, double max_val, int N) { int _loop_start = blockIdx.x * blockDim.x + threadIdx.x; int _loop_stride = blockDim.x * gridDim.x; for (int i = _loop_start; i < N; i += _loop_stride) { T y = X[i] * slope + offset; Y[i] = min(max(y, min_val), max_val); } } template <typename T> __global__ void dish(T* Y, const T* X, int N) { int _loop_start = blockIdx.x * blockDim.x + threadIdx.x; int _loop_stride = blockDim.x * gridDim.x; for (int i = _loop_start; i < N; i += _loop_stride) { T x = X[i]; Y[i] = 0.5 * x * (x / sqrt(1 + x * x) + 1); } } template <typename T> __global__ void gelu(T* Y, const T* X, double threshold, int N) { int _loop_start = blockIdx.x * blockDim.x + threadIdx.x; int _loop_stride = blockDim.x * gridDim.x; for (int i = _loop_start; i < N; i += _loop_stride) { T x = X[i]; if (x >= threshold) { Y[i] = x; } else if (x <= -threshold) { Y[i] = 0.0; } else { T cdf = 0.5 * (1.0 + erf(Constants<T>::INV_SQRT_2 * x)); Y[i] = x * cdf; } } } template <typename T> __global__ void mish(T* Y, const T* X, double threshold, int N) { int _loop_start = blockIdx.x * blockDim.x + threadIdx.x; int _loop_stride = blockDim.x * gridDim.x; T one = 1.; for (int i = _loop_start; i < N; i += _loop_stride) { if (X[i] >= threshold) Y[i] = X[i]; else Y[i] = X[i] * tanh(log(one + exp(X[i]))); } } template <typename T> __global__ void swish(T* Y, const T* X, double threshold, int N) { int _loop_start = blockIdx.x * blockDim.x + threadIdx.x; int _loop_stride = blockDim.x * gridDim.x; for (int i = _loop_start; i < N; i += _loop_stride) { if (X[i] >= threshold) { Y[i] = X[i]; } else if (X[i] <= -threshold) { Y[i] = 0.0; } else { T logistic_cdf = 1.0 / (1.0 + exp(-X[i])); Y[i] = X[i] * logistic_cdf; } } } template <typename U> __global__ void reduce_sum(U* output, const U* X, const int* lengths, int B, int T, int O) { // Compute sums of a batch of concatenated sequences int _loop_start = blockIdx.x * blockDim.x + threadIdx.x; int _loop_stride = blockDim.x * gridDim.x; for (int b = _loop_start; b < B; b += _loop_stride) { // Go to the regions we're working on U* output_b = &output[b*O]; // Find the sequence item we're working on int t = 0; for (int i=0; i < b; ++i) { t += lengths[i]; } int length = lengths[b]; // Each invocation of the kernel sums one batch. for (int i=0; i < length; ++i) // Iterate over rows { const U* X_t = &X[(t+i)*O]; for (int j=0; j < O; ++j) { output_b[j] += X_t[j]; } } } } template <typename U> __global__ void reduce_max(U* maxes, int* which, const U* X, const int* lengths, int B, int T, int O) { int _loop_start = blockIdx.x * blockDim.x + threadIdx.x; int _loop_stride = blockDim.x * gridDim.x; for (int b = _loop_start; b < B; b += _loop_stride) { // Go to the regions we're working on U* maxes_b = &maxes[b*O]; int* which_b = &which[b*O]; // Find the sequence item we're working on const U* X_t = X; for (int i=0; i < b; ++i) { X_t += lengths[i] * O; } // Each invocation of the kernel maxes one sequence. // Start by assuming maxes are the first element. for (int i=0; i < O; ++i) { maxes_b[i] = X_t[i]; which_b[i] = 0; } int length = lengths[b]; for (int i=1; i < length; ++i) // Iterate over rows { X_t += O; for (int j=0; j < O; ++j) { if (X_t[j] > maxes_b[j]) { maxes_b[j] = X_t[j]; which_b[j] = i; } } } } } template <typename T> __global__ void backprop_seq2col(T* d_seqs, const T* d_cols, const int* lengths, int nW, int B, int I, int nL) { // Here's what we're doing, if we had 2d indexing. //for i in range(B): // d_seq[i] += d_cols[i-2, 4] // d_seq[i] += d_cols[i-1, 3] // d_seq[i] += d_cols[i, 2] // d_seq[i] += d_cols[i+1, 1] // d_seq[i] += d_cols[i+2, 0] int _loop_start = blockIdx.x * blockDim.x + threadIdx.x; int _loop_stride = blockDim.x * gridDim.x; int nF = nW * 2 + 1; int seq = 0; int seq_start = 0; for (int b = _loop_start; b < B; b += _loop_stride) { // Find sequence offset in which b lies. // Fixme: do not restart offset search for every b. for (; seq < nL; ++seq) { if (b < seq_start + lengths[seq]) { break; } seq_start += lengths[seq]; } // Calculate the bounds of the sequence wherein b lies. int seq_end = seq_start + lengths[seq]; // Find the unconstrained window around b, which // may be out of the sequence bounds. int window_start = b - nW; int window_end = b + nW + 1; // Find the sequence-constrained window around b. int d_seqs_start = max(seq_start, window_start); int d_seqs_end = min(seq_end, window_end); // The here update proceeds differently than the other seq2col // implementations. We have to do all the updates for the b in this loop // iteration, otherwise we get data races due to parallelism in CUDA. // // A batch item b occurs, given nw=1, in: // // position 0 in b - 1 (if present) <- window_start // position 1 in b // position 2 in b + 1 (if present) <- window_end // // The following loop sums the gradients for those occurrences. // b_w loops over [b - 1, b, b + 1] and computes the position // of b within the column gradients of [b - 1 ... b + 1]. for (int b_w = d_seqs_start; b_w < d_seqs_end; ++b_w) { int position = (2 * nW) - (b_w - window_start); int start = (b_w * I * nF) + (position * I); for (int i = 0; i < I; ++i) { d_seqs[(b*I + i)] += d_cols[start + i]; } } } } template <typename T> __global__ void backprop_clipped_linear(T* dX, const T* dY, const T* X, double slope, double offset, double min_val, double max_val, int N) { int _loop_start = blockIdx.x * blockDim.x + threadIdx.x; int _loop_stride = blockDim.x * gridDim.x; T low = (min_val - offset) / slope; T high = (max_val - offset) / slope; for (int i = _loop_start; i < N; i += _loop_stride) { T x = X[i]; if (low < x && x < high) { dX[i] = dY[i] * slope; } else { dX[i] = 0; } } } template <typename T> __global__ void backprop_hard_swish(T* dX, const T* dY, const T* X, int N) { int _loop_start = blockIdx.x * blockDim.x + threadIdx.x; int _loop_stride = blockDim.x * gridDim.x; for (int i = _loop_start; i < N; i += _loop_stride) { if (X[i] > 2.5) { dX[i] = dY[i]; } else if (X[i] < -2.5) { dX[i] = 0; } else { dX[i] = dY[i] * (X[i] * 0.4 + 0.5); } } } template <typename T> __global__ void backprop_hard_swish_mobilenet(T* dX, const T* dY, const T* X, int N) { int _loop_start = blockIdx.x * blockDim.x + threadIdx.x; int _loop_stride = blockDim.x * gridDim.x; for (int i = _loop_start; i < N; i += _loop_stride) { if (X[i] > 3.0) { dX[i] = dY[i]; } else if (X[i] < -3.0) { dX[i] = 0; } else { dX[i] = dY[i] * ((X[i] * 2.0 + 3.0) / 6.0); } } } template <typename T> __global__ void backprop_dish(T* dX, const T* dY, const T* X, int N) { int _loop_start = blockIdx.x * blockDim.x + threadIdx.x; int _loop_stride = blockDim.x * gridDim.x; for (int i = _loop_start; i < N; i += _loop_stride) { T x = X[i]; T x_sq = x * x; T x_sq_plus_one = x_sq + 1.0; dX[i] = dY[i] * (x/sqrt(x_sq_plus_one) - (0.5 * x * x_sq) / pow(x_sq_plus_one, static_cast<T>(1.5)) + 0.5); } } template <typename T> __global__ void backprop_gelu(T* dX, const T* dY, const T* X, double threshold, int N) { int _loop_start = blockIdx.x * blockDim.x + threadIdx.x; int _loop_stride = blockDim.x * gridDim.x; for (int i = _loop_start; i < N; i += _loop_stride) { T x = X[i]; if (x >= threshold) { dX[i] = dY[i]; } else if (x <= -threshold) { dX[i] = 0.0; } else { T cdf = 0.5 * (1.0 + erf(Constants<T>::INV_SQRT_2 * x)); T pdf = Constants<T>::INV_SQRT_2PI * exp(-0.5 * x * x); dX[i] = dY[i] * (cdf + x * pdf); } } } template <typename T> __global__ void backprop_maxout(T* dX, const T* dY, const int* which, int B, int O, int P) { int _loop_start = blockIdx.x * blockDim.x + threadIdx.x; int _loop_stride = blockDim.x * gridDim.x; for (int b = _loop_start; b < B; b += _loop_stride) { // Go to the regions we're working on T* dX_b = &dX[b*O*P]; const T* dY_b = &dY[b*O]; const int* which_b = &which[b*O]; for (int i=0; i < O; ++i) dX_b[(i*P)+which_b[i]] = dY_b[i]; } } template <typename T> __global__ void backprop_mish(T* dX, const T* dY, const T* X, double threshold, int N) { int _loop_start = blockIdx.x * blockDim.x + threadIdx.x; int _loop_stride = blockDim.x * gridDim.x; for (int i = _loop_start; i < N; i += _loop_stride) { T x = X[i]; if (x >= threshold) { dX[i] = dY[i]; } else { T exp_x = exp(x); T exp_2x = exp(2*x); T exp_3x = exp(3*x); T omega = (4. * (x+1)) + (4 * exp_2x) + exp_3x + exp_x * (4.*x+6); T delta = 2 * exp_x + exp_2x + 2; dX[i] = dY[i] * ((exp_x * omega) / (delta * delta)); } } } template <typename T> __global__ void backprop_swish(T* dX, const T* dY, const T* X, const T* Y, double threshold, int N) { int _loop_start = blockIdx.x * blockDim.x + threadIdx.x; int _loop_stride = blockDim.x * gridDim.x; for (int i = _loop_start; i < N; i += _loop_stride) { T x = X[i]; T y = Y[i]; if (x >= threshold) { dX[i] = dY[i]; } else if (x <= -threshold) { dX[i] = 0.0; } else { T cdf = 1.0 / (1 + exp(-x)); T d = y + cdf * (1 - y); dX[i] = dY[i] * d; } } } template <typename U> __global__ void backprop_reduce_sum(U* dX, const U* d_sum, const int* lengths, int B, int T, int O) { int _loop_start = blockIdx.x * blockDim.x + threadIdx.x; int _loop_stride = blockDim.x * gridDim.x; int seq_start = 0; int b = 0; for (int t = _loop_start; t < T; t += _loop_stride) { // Find the sequence item we're working on while ((b < B) && (seq_start+lengths[b]) <= t) { seq_start += lengths[b]; b += 1; } if (lengths[b] == 0) continue; for (int i=0; i < O; ++i) { dX[t * O + i] = d_sum[b * O + i]; } } } template <typename U> __global__ void backprop_reduce_mean(U* dX, const U* d_mean, const int* lengths, int B, int T, int O) { int _loop_start = blockIdx.x * blockDim.x + threadIdx.x; int _loop_stride = blockDim.x * gridDim.x; int seq_start = 0; int b = 0; for (int t = _loop_start; t < T; t += _loop_stride) { // Find the sequence item we're working on while ((b < B) && (seq_start+lengths[b]) <= t) { seq_start += lengths[b]; b += 1; } if (lengths[b] == 0) continue; U* dX_t = &dX[t * O]; const U* d_mean_b = &d_mean[b * O]; int lengths_b = lengths[b]; for (int i=0; i < O; ++i) { dX_t[i] = d_mean_b[i] / lengths_b; } } } template <typename U> __global__ void backprop_reduce_max(U* dX, const U* d_maxes, const int* which, const int* lengths, int B, int T, int O) { int _loop_start = blockIdx.x * blockDim.x + threadIdx.x; int _loop_stride = blockDim.x * gridDim.x; int seq_start = 0; int b = 0; for (int t = _loop_start; t < T; t += _loop_stride) { // We're calculating the gradient of the unpooled sequences, from // the gradient of the maxes. In this loop, we're getting the gradient // of a single sequence item, t. We need to know the sequence index, // b. while ((b < B) && (seq_start+lengths[b]) <= t) { seq_start += lengths[b]; b += 1; } if (lengths[b] == 0) continue; // The "which" array tells us which rows were selected as the max. // So we need to find the index of our t in the sequence. int index_of_t = t-seq_start; // Get the rows we're dealing with, to avoid cluttering the loop // with the index math. U* dX_t = &dX[t*O]; const U* d_maxes_b = &d_maxes[b*O]; const int* which_b = &which[b*O]; // Now loop over our row. for (int i=0; i < O; ++i) { // If we used the value for this cell, // pass the gradient if (which_b[i] == index_of_t) dX_t[i] = d_maxes_b[i]; } } }
dc66b9d31407a6187713cef53d0cba5564208077.cu
// Use grid strided loops, described here: // https://devblogs.nvidia.com/cuda-pro-tip-write-flexible-kernels-grid-stride-loops/ // This pattern ensures that all of the loop values are visited once, no matter // what grid parameters are used for the function. // We cannot include CUDA header for mathematical constants, since it requires // that the development headers of the CUDA toolkit are installed. template <typename T> struct Constants {}; template <> struct Constants<double> { static constexpr double INV_SQRT_2 = 0.7071067811865475; static constexpr double INV_SQRT_2PI = 0.3989422804014327; }; template <> struct Constants<float> { static constexpr float INV_SQRT_2 = 0.70710677; static constexpr float INV_SQRT_2PI = 0.3989423; }; template <typename U> __global__ void gather_add(U* out_bo, const U* table_to, const int* indices_bk, int T, int O, int B, int K) { int _loop_start = blockIdx.x * blockDim.x + threadIdx.x; int _loop_stride = blockDim.x * gridDim.x; for (int b = _loop_start; b < B; b += _loop_stride) { for (int k = 0; k < K; ++k) { int idx = indices_bk[b * K + k]; const U* table = table_to + idx * O; U* out = out_bo + b * O; for (int o = 0; o < O; ++o) { out[o] += table[o]; } } } } template <typename T> __global__ void seq2col(T* output, const T* X, const int* lengths, int nW, int B, int I, int nL) { // Let's say nW is 1 (it usually is). Then we want to take: // 1a 1b 1c // 2a 2b 2c // 3a 3b 3c // And make // __ __ __ 1a 1b 1c 2a 2b 2c // 1a 1b 1c 2a 2b 2c 3a 3b 3c // 2a 2b 2c 3a 3b 3c __ __ __ // Where __ is padding. // Now let's say nW is 2. Then we want to take: // 1a 1b 1c // 2a 2b 2c // 3a 3b 3c // And make // __ __ __ __ __ __ 1a 1b 1c 2a 2b 2c 3a 3b 3c // __ __ __ 1a 1b 1c 2a 2b 2c 3a 3b 3c __ __ __ // 1a 1b 1c 2a 2b 2c 3a 3b 3c __ __ __ __ __ __ // * x_start=-6, x_end=9 : (0-2) * 3, (0+2+1) * 3 // * x_start=-3, x_end=13 : (1-2) * 3, (1+2+1) * 3 // * x_start=0, x_end=16 : (2-2) * 3, (2+2+1) * 3 // // If lengths > 1, then the sequence lengths dictate // the boundaries/padding rather than the begin/end // of X. int _loop_start = blockIdx.x * blockDim.x + threadIdx.x; int _loop_stride = blockDim.x * gridDim.x; int nF = nW * 2 + 1; int seq = 0; int seq_start = 0; for (int b = _loop_start; b < B; b += _loop_stride) { // Find sequence sequence in which b lies. for (; seq < nL; ++seq) { if (b < seq_start + lengths[seq]) { break; } seq_start += lengths[seq]; } // Calculate the bounds of the sequence wherein b lies. int seq_end = seq_start + lengths[seq]; // Find the unconstrained window around b, which // may be out of the sequence bounds. int window_start = b - nW; int window_end = b + nW + 1; // Find the sequence-constrained window around b. int x_start = max(seq_start, window_start); int x_end = min(seq_end, window_end); int n_elems = x_end - x_start; // If the left window is cut short, we want to start by // the same amount in the output. int out_offset = x_start - window_start; for (int i = 0; i < n_elems * I; i++) { output[(b * I * nF) + (out_offset * I) + i] = X[(x_start * I) + i]; } } } template <typename T> __global__ void maxout(T* best, int* which, const T* cands, int B, int O, int P) { int _loop_start = blockIdx.x * blockDim.x + threadIdx.x; int _loop_stride = blockDim.x * gridDim.x; for (int bo = _loop_start; bo < B * O; bo += _loop_stride) { // Go to the candidates at the output we're working on const T* cands_bo = &cands[bo * P]; int best_idx = 0; T best_val = cands_bo[0]; for (int p = 1; p < P; ++p) { if (cands_bo[p] > best_val) { best_idx = p; best_val = cands_bo[p]; } } which[bo] = best_idx; best[bo] = best_val; } } template <typename T> __global__ void clipped_linear(T* Y, const T* X, double slope, double offset, double min_val, double max_val, int N) { int _loop_start = blockIdx.x * blockDim.x + threadIdx.x; int _loop_stride = blockDim.x * gridDim.x; for (int i = _loop_start; i < N; i += _loop_stride) { T y = X[i] * slope + offset; Y[i] = min(max(y, min_val), max_val); } } template <typename T> __global__ void dish(T* Y, const T* X, int N) { int _loop_start = blockIdx.x * blockDim.x + threadIdx.x; int _loop_stride = blockDim.x * gridDim.x; for (int i = _loop_start; i < N; i += _loop_stride) { T x = X[i]; Y[i] = 0.5 * x * (x / sqrt(1 + x * x) + 1); } } template <typename T> __global__ void gelu(T* Y, const T* X, double threshold, int N) { int _loop_start = blockIdx.x * blockDim.x + threadIdx.x; int _loop_stride = blockDim.x * gridDim.x; for (int i = _loop_start; i < N; i += _loop_stride) { T x = X[i]; if (x >= threshold) { Y[i] = x; } else if (x <= -threshold) { Y[i] = 0.0; } else { T cdf = 0.5 * (1.0 + erf(Constants<T>::INV_SQRT_2 * x)); Y[i] = x * cdf; } } } template <typename T> __global__ void mish(T* Y, const T* X, double threshold, int N) { int _loop_start = blockIdx.x * blockDim.x + threadIdx.x; int _loop_stride = blockDim.x * gridDim.x; T one = 1.; for (int i = _loop_start; i < N; i += _loop_stride) { if (X[i] >= threshold) Y[i] = X[i]; else Y[i] = X[i] * tanh(log(one + exp(X[i]))); } } template <typename T> __global__ void swish(T* Y, const T* X, double threshold, int N) { int _loop_start = blockIdx.x * blockDim.x + threadIdx.x; int _loop_stride = blockDim.x * gridDim.x; for (int i = _loop_start; i < N; i += _loop_stride) { if (X[i] >= threshold) { Y[i] = X[i]; } else if (X[i] <= -threshold) { Y[i] = 0.0; } else { T logistic_cdf = 1.0 / (1.0 + exp(-X[i])); Y[i] = X[i] * logistic_cdf; } } } template <typename U> __global__ void reduce_sum(U* output, const U* X, const int* lengths, int B, int T, int O) { // Compute sums of a batch of concatenated sequences int _loop_start = blockIdx.x * blockDim.x + threadIdx.x; int _loop_stride = blockDim.x * gridDim.x; for (int b = _loop_start; b < B; b += _loop_stride) { // Go to the regions we're working on U* output_b = &output[b*O]; // Find the sequence item we're working on int t = 0; for (int i=0; i < b; ++i) { t += lengths[i]; } int length = lengths[b]; // Each invocation of the kernel sums one batch. for (int i=0; i < length; ++i) // Iterate over rows { const U* X_t = &X[(t+i)*O]; for (int j=0; j < O; ++j) { output_b[j] += X_t[j]; } } } } template <typename U> __global__ void reduce_max(U* maxes, int* which, const U* X, const int* lengths, int B, int T, int O) { int _loop_start = blockIdx.x * blockDim.x + threadIdx.x; int _loop_stride = blockDim.x * gridDim.x; for (int b = _loop_start; b < B; b += _loop_stride) { // Go to the regions we're working on U* maxes_b = &maxes[b*O]; int* which_b = &which[b*O]; // Find the sequence item we're working on const U* X_t = X; for (int i=0; i < b; ++i) { X_t += lengths[i] * O; } // Each invocation of the kernel maxes one sequence. // Start by assuming maxes are the first element. for (int i=0; i < O; ++i) { maxes_b[i] = X_t[i]; which_b[i] = 0; } int length = lengths[b]; for (int i=1; i < length; ++i) // Iterate over rows { X_t += O; for (int j=0; j < O; ++j) { if (X_t[j] > maxes_b[j]) { maxes_b[j] = X_t[j]; which_b[j] = i; } } } } } template <typename T> __global__ void backprop_seq2col(T* d_seqs, const T* d_cols, const int* lengths, int nW, int B, int I, int nL) { // Here's what we're doing, if we had 2d indexing. //for i in range(B): // d_seq[i] += d_cols[i-2, 4] // d_seq[i] += d_cols[i-1, 3] // d_seq[i] += d_cols[i, 2] // d_seq[i] += d_cols[i+1, 1] // d_seq[i] += d_cols[i+2, 0] int _loop_start = blockIdx.x * blockDim.x + threadIdx.x; int _loop_stride = blockDim.x * gridDim.x; int nF = nW * 2 + 1; int seq = 0; int seq_start = 0; for (int b = _loop_start; b < B; b += _loop_stride) { // Find sequence offset in which b lies. // Fixme: do not restart offset search for every b. for (; seq < nL; ++seq) { if (b < seq_start + lengths[seq]) { break; } seq_start += lengths[seq]; } // Calculate the bounds of the sequence wherein b lies. int seq_end = seq_start + lengths[seq]; // Find the unconstrained window around b, which // may be out of the sequence bounds. int window_start = b - nW; int window_end = b + nW + 1; // Find the sequence-constrained window around b. int d_seqs_start = max(seq_start, window_start); int d_seqs_end = min(seq_end, window_end); // The here update proceeds differently than the other seq2col // implementations. We have to do all the updates for the b in this loop // iteration, otherwise we get data races due to parallelism in CUDA. // // A batch item b occurs, given nw=1, in: // // position 0 in b - 1 (if present) <- window_start // position 1 in b // position 2 in b + 1 (if present) <- window_end // // The following loop sums the gradients for those occurrences. // b_w loops over [b - 1, b, b + 1] and computes the position // of b within the column gradients of [b - 1 ... b + 1]. for (int b_w = d_seqs_start; b_w < d_seqs_end; ++b_w) { int position = (2 * nW) - (b_w - window_start); int start = (b_w * I * nF) + (position * I); for (int i = 0; i < I; ++i) { d_seqs[(b*I + i)] += d_cols[start + i]; } } } } template <typename T> __global__ void backprop_clipped_linear(T* dX, const T* dY, const T* X, double slope, double offset, double min_val, double max_val, int N) { int _loop_start = blockIdx.x * blockDim.x + threadIdx.x; int _loop_stride = blockDim.x * gridDim.x; T low = (min_val - offset) / slope; T high = (max_val - offset) / slope; for (int i = _loop_start; i < N; i += _loop_stride) { T x = X[i]; if (low < x && x < high) { dX[i] = dY[i] * slope; } else { dX[i] = 0; } } } template <typename T> __global__ void backprop_hard_swish(T* dX, const T* dY, const T* X, int N) { int _loop_start = blockIdx.x * blockDim.x + threadIdx.x; int _loop_stride = blockDim.x * gridDim.x; for (int i = _loop_start; i < N; i += _loop_stride) { if (X[i] > 2.5) { dX[i] = dY[i]; } else if (X[i] < -2.5) { dX[i] = 0; } else { dX[i] = dY[i] * (X[i] * 0.4 + 0.5); } } } template <typename T> __global__ void backprop_hard_swish_mobilenet(T* dX, const T* dY, const T* X, int N) { int _loop_start = blockIdx.x * blockDim.x + threadIdx.x; int _loop_stride = blockDim.x * gridDim.x; for (int i = _loop_start; i < N; i += _loop_stride) { if (X[i] > 3.0) { dX[i] = dY[i]; } else if (X[i] < -3.0) { dX[i] = 0; } else { dX[i] = dY[i] * ((X[i] * 2.0 + 3.0) / 6.0); } } } template <typename T> __global__ void backprop_dish(T* dX, const T* dY, const T* X, int N) { int _loop_start = blockIdx.x * blockDim.x + threadIdx.x; int _loop_stride = blockDim.x * gridDim.x; for (int i = _loop_start; i < N; i += _loop_stride) { T x = X[i]; T x_sq = x * x; T x_sq_plus_one = x_sq + 1.0; dX[i] = dY[i] * (x/sqrt(x_sq_plus_one) - (0.5 * x * x_sq) / pow(x_sq_plus_one, static_cast<T>(1.5)) + 0.5); } } template <typename T> __global__ void backprop_gelu(T* dX, const T* dY, const T* X, double threshold, int N) { int _loop_start = blockIdx.x * blockDim.x + threadIdx.x; int _loop_stride = blockDim.x * gridDim.x; for (int i = _loop_start; i < N; i += _loop_stride) { T x = X[i]; if (x >= threshold) { dX[i] = dY[i]; } else if (x <= -threshold) { dX[i] = 0.0; } else { T cdf = 0.5 * (1.0 + erf(Constants<T>::INV_SQRT_2 * x)); T pdf = Constants<T>::INV_SQRT_2PI * exp(-0.5 * x * x); dX[i] = dY[i] * (cdf + x * pdf); } } } template <typename T> __global__ void backprop_maxout(T* dX, const T* dY, const int* which, int B, int O, int P) { int _loop_start = blockIdx.x * blockDim.x + threadIdx.x; int _loop_stride = blockDim.x * gridDim.x; for (int b = _loop_start; b < B; b += _loop_stride) { // Go to the regions we're working on T* dX_b = &dX[b*O*P]; const T* dY_b = &dY[b*O]; const int* which_b = &which[b*O]; for (int i=0; i < O; ++i) dX_b[(i*P)+which_b[i]] = dY_b[i]; } } template <typename T> __global__ void backprop_mish(T* dX, const T* dY, const T* X, double threshold, int N) { int _loop_start = blockIdx.x * blockDim.x + threadIdx.x; int _loop_stride = blockDim.x * gridDim.x; for (int i = _loop_start; i < N; i += _loop_stride) { T x = X[i]; if (x >= threshold) { dX[i] = dY[i]; } else { T exp_x = exp(x); T exp_2x = exp(2*x); T exp_3x = exp(3*x); T omega = (4. * (x+1)) + (4 * exp_2x) + exp_3x + exp_x * (4.*x+6); T delta = 2 * exp_x + exp_2x + 2; dX[i] = dY[i] * ((exp_x * omega) / (delta * delta)); } } } template <typename T> __global__ void backprop_swish(T* dX, const T* dY, const T* X, const T* Y, double threshold, int N) { int _loop_start = blockIdx.x * blockDim.x + threadIdx.x; int _loop_stride = blockDim.x * gridDim.x; for (int i = _loop_start; i < N; i += _loop_stride) { T x = X[i]; T y = Y[i]; if (x >= threshold) { dX[i] = dY[i]; } else if (x <= -threshold) { dX[i] = 0.0; } else { T cdf = 1.0 / (1 + exp(-x)); T d = y + cdf * (1 - y); dX[i] = dY[i] * d; } } } template <typename U> __global__ void backprop_reduce_sum(U* dX, const U* d_sum, const int* lengths, int B, int T, int O) { int _loop_start = blockIdx.x * blockDim.x + threadIdx.x; int _loop_stride = blockDim.x * gridDim.x; int seq_start = 0; int b = 0; for (int t = _loop_start; t < T; t += _loop_stride) { // Find the sequence item we're working on while ((b < B) && (seq_start+lengths[b]) <= t) { seq_start += lengths[b]; b += 1; } if (lengths[b] == 0) continue; for (int i=0; i < O; ++i) { dX[t * O + i] = d_sum[b * O + i]; } } } template <typename U> __global__ void backprop_reduce_mean(U* dX, const U* d_mean, const int* lengths, int B, int T, int O) { int _loop_start = blockIdx.x * blockDim.x + threadIdx.x; int _loop_stride = blockDim.x * gridDim.x; int seq_start = 0; int b = 0; for (int t = _loop_start; t < T; t += _loop_stride) { // Find the sequence item we're working on while ((b < B) && (seq_start+lengths[b]) <= t) { seq_start += lengths[b]; b += 1; } if (lengths[b] == 0) continue; U* dX_t = &dX[t * O]; const U* d_mean_b = &d_mean[b * O]; int lengths_b = lengths[b]; for (int i=0; i < O; ++i) { dX_t[i] = d_mean_b[i] / lengths_b; } } } template <typename U> __global__ void backprop_reduce_max(U* dX, const U* d_maxes, const int* which, const int* lengths, int B, int T, int O) { int _loop_start = blockIdx.x * blockDim.x + threadIdx.x; int _loop_stride = blockDim.x * gridDim.x; int seq_start = 0; int b = 0; for (int t = _loop_start; t < T; t += _loop_stride) { // We're calculating the gradient of the unpooled sequences, from // the gradient of the maxes. In this loop, we're getting the gradient // of a single sequence item, t. We need to know the sequence index, // b. while ((b < B) && (seq_start+lengths[b]) <= t) { seq_start += lengths[b]; b += 1; } if (lengths[b] == 0) continue; // The "which" array tells us which rows were selected as the max. // So we need to find the index of our t in the sequence. int index_of_t = t-seq_start; // Get the rows we're dealing with, to avoid cluttering the loop // with the index math. U* dX_t = &dX[t*O]; const U* d_maxes_b = &d_maxes[b*O]; const int* which_b = &which[b*O]; // Now loop over our row. for (int i=0; i < O; ++i) { // If we used the value for this cell, // pass the gradient if (which_b[i] == index_of_t) dX_t[i] = d_maxes_b[i]; } } }
a16028129ce78ae9edf6f516beb0e07f378fd02f.hip
// !!! This is a file automatically generated by hipify!!! // generated by gen_cuda_conv_bias_kern_impls.py #include "../conv_bias_int8_implicit_gemm_cdiv4hwn4_ld_64bit.cuinl" template void megdnn::cuda::conv_bias_int8::do_conv_bias_int8_implicit_gemm_cdiv4hwn4_ld_64bit<PerChannelBiasVisitor, IConvEpilogue<Activation<megdnn::param_enumv::ConvBias::NonlineMode::RELU>>>( const int8_t* d_src, const int8_t* d_filter, PerChannelBiasVisitor bias, IConvEpilogue<Activation<megdnn::param_enumv::ConvBias::NonlineMode::RELU>> epilogue, const ConvParam& param, float alpha, float beta, hipStream_t stream);
a16028129ce78ae9edf6f516beb0e07f378fd02f.cu
// generated by gen_cuda_conv_bias_kern_impls.py #include "../conv_bias_int8_implicit_gemm_cdiv4hwn4_ld_64bit.cuinl" template void megdnn::cuda::conv_bias_int8::do_conv_bias_int8_implicit_gemm_cdiv4hwn4_ld_64bit<PerChannelBiasVisitor, IConvEpilogue<Activation<megdnn::param_enumv::ConvBias::NonlineMode::RELU>>>( const int8_t* d_src, const int8_t* d_filter, PerChannelBiasVisitor bias, IConvEpilogue<Activation<megdnn::param_enumv::ConvBias::NonlineMode::RELU>> epilogue, const ConvParam& param, float alpha, float beta, cudaStream_t stream);
78224bb229a03b29d8516b3bc6a2714916af04e0.hip
// !!! This is a file automatically generated by hipify!!! // // auto-generated by ops.py// //header #define OPS_API 2 #define OPS_3D #define OPS_SOA #include "ops_lib_cpp.h" #include "ops_cuda_rt_support.h" #include "ops_cuda_reduction.h" #include <hip/hip_complex.h> #ifdef OPS_MPI #include "ops_mpi_core.h" #endif // global constants void ops_init_backend() {} void ops_decl_const_char(int dim, char const *type, int size, char *dat, char const *name){ { printf("error: unknown const name\n"); exit(1); } } //user kernel files #include "multidim_kernel_cuda_kernel.cu" #include "multidim_copy_kernel_cuda_kernel.cu" #include "multidim_reduce_kernel_cuda_kernel.cu"
78224bb229a03b29d8516b3bc6a2714916af04e0.cu
// // auto-generated by ops.py// //header #define OPS_API 2 #define OPS_3D #define OPS_SOA #include "ops_lib_cpp.h" #include "ops_cuda_rt_support.h" #include "ops_cuda_reduction.h" #include <cuComplex.h> #ifdef OPS_MPI #include "ops_mpi_core.h" #endif // global constants void ops_init_backend() {} void ops_decl_const_char(int dim, char const *type, int size, char *dat, char const *name){ { printf("error: unknown const name\n"); exit(1); } } //user kernel files #include "multidim_kernel_cuda_kernel.cu" #include "multidim_copy_kernel_cuda_kernel.cu" #include "multidim_reduce_kernel_cuda_kernel.cu"
acd2d26ff899f3b74a3cac175f50e65cd019ffa1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdint.h> #include <time.h> #define patchSize 8 #define sharedSize 256 #define STB_IMAGE_IMPLEMENTATION #include "stb_image.h" #define STB_IMAGE_WRITE_IMPLEMENTATION #include "stb_image_write.h" __global__ void calculateHistogram(uint8_t *imageData, int *countArray, int totalSize){ __shared__ int private_histo[256]; private_histo[threadIdx.x] = 0; int imageIndex = blockDim.x * blockIdx.x + threadIdx.x; if(imageIndex < totalSize){ atomicAdd(&private_histo[imageData[imageIndex]], 1); __syncthreads(); atomicAdd(&countArray[threadIdx.x], private_histo[threadIdx.x]); } } __global__ void maskImage(uint8_t *imageData, int *scannedArray, int totalSize){ int imageIndex = blockDim.x * blockIdx.x + threadIdx.x; if(imageIndex < totalSize){ imageData[imageIndex] = scannedArray[imageData[imageIndex]]; } } __global__ void kogge_stone_scan(int *countArray, int *resultArray) { __shared__ int XY[2*sharedSize]; int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < 256){ XY[threadIdx.x] = countArray[i]; } for (unsigned int stride = 1; stride <= sharedSize; stride *= 2) { __syncthreads(); int index = (threadIdx.x+1)*stride*2 - 1; if (index < 2 * sharedSize){ XY[index] += XY[index-stride]; } } for (int stride = 2 * blockDim.x / 4; stride > 0; stride /= 2) { __syncthreads(); int index = (threadIdx.x + 1) * 2 * stride - 1; if (index + stride < 2 * blockDim.x) { XY[index + stride] += XY[index]; } } __syncthreads(); if (i < 256) resultArray[i] = XY[threadIdx.x]; } __global__ void calculateEqualization(int *scannedArray, int cdfmin, int totalSize){ scannedArray[threadIdx.x] = int( 255 * (scannedArray[threadIdx.x] - cdfmin) / (totalSize - cdfmin) ); } int main(int argc, char ** argv) { /********** I use CUDA Event to calculate time but also use Clock to calculate total time ***************/ clock_t total_start, total_end; total_start = clock(); /********* Device Information by CUDA ***********************/ hipDeviceProp_t deviceProp; hipGetDeviceProperties(&deviceProp, 0); printf("Device name: %s\n", deviceProp.name); printf("The maximum number of thread blocks: %d\n", deviceProp.maxGridSize[0] * deviceProp.maxGridSize[1]); printf("The maximum number of threads per block: %d\n\n", deviceProp.maxThreadsPerBlock); /*************** Read of Image Data, Image Width, Image Height by a library I found online ****************/ int width, height, bpp; uint8_t* imageData = stbi_load(argv[1], &width, &height, &bpp, 1); //float* data = stbi_loadf(argv[1], &width, &height, &bpp, 1); printf("Width: %d Height: %d BPP: %d\n\n", width, height, bpp); /*************** CUDA Memory Allocation and Memory Copy for Image Data and also array to calculate histogram named arrayCount *****************/ uint8_t *rim; const int imsize = width*height*sizeof(uint8_t); hipMalloc( (void**)&rim, imsize ); hipMemcpy( rim, imageData , imsize, hipMemcpyHostToDevice ); int countArray[256]; for(int i=0;i<256;i++){ countArray[i] = 0; } int *cim; const int graysize = 256 * sizeof(int); hipMalloc( (void**)&cim, graysize ); hipMemcpy( cim, countArray , graysize, hipMemcpyHostToDevice ); /******************* CUDA Grid Creation *******************************/ int block = (width * height)/256 + ((width * height)/256.0 != 0.0); /******************* Calculating Histogram and Measure the Execution Time *****************/ hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start); hipLaunchKernelGGL(( calculateHistogram), dim3(block), dim3(sharedSize), 0, 0, rim, cim, width * height); hipEventRecord(stop); hipEventSynchronize(stop); float milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); hipDeviceSynchronize(); printf("Execution Time for Calculating Histogram: %f milliseconds\n\n", milliseconds); /****************** countArray is a small array (256 sized) so CPU is enough for its operation ************************/ /****************** Here I calculate CDF ****************************/ int scannedArray[256]; int *sim; hipMalloc( (void**)&sim, graysize ); hipMemcpy( sim, scannedArray , graysize, hipMemcpyHostToDevice ); int blockSize = 256/sharedSize + (256.0/sharedSize != 0.0); hipLaunchKernelGGL(( kogge_stone_scan), dim3(blockSize) ,dim3(sharedSize), 0, 0, cim, sim); hipDeviceSynchronize(); hipMemcpy(scannedArray, sim, graysize, hipMemcpyDeviceToHost); /****************** I store for range 0 to 255 but only use the range from minimum in image to maximum in image ****************/ /****************** So minimum may not be in first place in array. So I search for it. It is found in few steps most probably ****************/ int cdfmin; for(int i=0;i<256;i++){ if(scannedArray[i] != 0){ cdfmin = scannedArray[i]; break; } } /****************** countArray is a small array (256 sized) so CPU is enough for its operation ************************/ /****************** Here I calculate Equalization ****************************/ hipLaunchKernelGGL(( calculateEqualization), dim3(1),dim3(256), 0, 0, sim, cdfmin, width * height); /***************** Memory Allocation and Memory Copy for my new countArray which is used for CDF and Equalization then ******************/ /***************** Mask Image means convert value of image to new value of after CDF and Equalization ********************/ hipEventRecord(start); hipLaunchKernelGGL(( maskImage), dim3(block), dim3(sharedSize), 0, 0, rim, sim, width * height); hipEventRecord(stop); hipEventSynchronize(stop); milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); hipDeviceSynchronize(); hipMemcpy(imageData, rim, imsize, hipMemcpyDeviceToHost); printf("Execution Time for Masking Image: %f milliseconds\n\n", milliseconds); /***************** Here create new image after CDF and Equalization **************/ /***************** You can give PNG or PGM files as input. But target image file format is PNG ************************/ /***************** So after compile run should be like ./main file.pgm file2.png ************************/ stbi_write_png(argv[2], width, height, 1, imageData, width*1); total_end = clock(); float total_time = ((double) (total_end - total_start)) / (CLOCKS_PER_SEC/1000); printf("Total Time: %f milliseconds\n", total_time); return 0; }
acd2d26ff899f3b74a3cac175f50e65cd019ffa1.cu
#include <stdint.h> #include <time.h> #define patchSize 8 #define sharedSize 256 #define STB_IMAGE_IMPLEMENTATION #include "stb_image.h" #define STB_IMAGE_WRITE_IMPLEMENTATION #include "stb_image_write.h" __global__ void calculateHistogram(uint8_t *imageData, int *countArray, int totalSize){ __shared__ int private_histo[256]; private_histo[threadIdx.x] = 0; int imageIndex = blockDim.x * blockIdx.x + threadIdx.x; if(imageIndex < totalSize){ atomicAdd(&private_histo[imageData[imageIndex]], 1); __syncthreads(); atomicAdd(&countArray[threadIdx.x], private_histo[threadIdx.x]); } } __global__ void maskImage(uint8_t *imageData, int *scannedArray, int totalSize){ int imageIndex = blockDim.x * blockIdx.x + threadIdx.x; if(imageIndex < totalSize){ imageData[imageIndex] = scannedArray[imageData[imageIndex]]; } } __global__ void kogge_stone_scan(int *countArray, int *resultArray) { __shared__ int XY[2*sharedSize]; int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < 256){ XY[threadIdx.x] = countArray[i]; } for (unsigned int stride = 1; stride <= sharedSize; stride *= 2) { __syncthreads(); int index = (threadIdx.x+1)*stride*2 - 1; if (index < 2 * sharedSize){ XY[index] += XY[index-stride]; } } for (int stride = 2 * blockDim.x / 4; stride > 0; stride /= 2) { __syncthreads(); int index = (threadIdx.x + 1) * 2 * stride - 1; if (index + stride < 2 * blockDim.x) { XY[index + stride] += XY[index]; } } __syncthreads(); if (i < 256) resultArray[i] = XY[threadIdx.x]; } __global__ void calculateEqualization(int *scannedArray, int cdfmin, int totalSize){ scannedArray[threadIdx.x] = int( 255 * (scannedArray[threadIdx.x] - cdfmin) / (totalSize - cdfmin) ); } int main(int argc, char ** argv) { /********** I use CUDA Event to calculate time but also use Clock to calculate total time ***************/ clock_t total_start, total_end; total_start = clock(); /********* Device Information by CUDA ***********************/ cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, 0); printf("Device name: %s\n", deviceProp.name); printf("The maximum number of thread blocks: %d\n", deviceProp.maxGridSize[0] * deviceProp.maxGridSize[1]); printf("The maximum number of threads per block: %d\n\n", deviceProp.maxThreadsPerBlock); /*************** Read of Image Data, Image Width, Image Height by a library I found online ****************/ int width, height, bpp; uint8_t* imageData = stbi_load(argv[1], &width, &height, &bpp, 1); //float* data = stbi_loadf(argv[1], &width, &height, &bpp, 1); printf("Width: %d Height: %d BPP: %d\n\n", width, height, bpp); /*************** CUDA Memory Allocation and Memory Copy for Image Data and also array to calculate histogram named arrayCount *****************/ uint8_t *rim; const int imsize = width*height*sizeof(uint8_t); cudaMalloc( (void**)&rim, imsize ); cudaMemcpy( rim, imageData , imsize, cudaMemcpyHostToDevice ); int countArray[256]; for(int i=0;i<256;i++){ countArray[i] = 0; } int *cim; const int graysize = 256 * sizeof(int); cudaMalloc( (void**)&cim, graysize ); cudaMemcpy( cim, countArray , graysize, cudaMemcpyHostToDevice ); /******************* CUDA Grid Creation *******************************/ int block = (width * height)/256 + ((width * height)/256.0 != 0.0); /******************* Calculating Histogram and Measure the Execution Time *****************/ cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); calculateHistogram<<<block, sharedSize>>>(rim, cim, width * height); cudaEventRecord(stop); cudaEventSynchronize(stop); float milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); cudaDeviceSynchronize(); printf("Execution Time for Calculating Histogram: %f milliseconds\n\n", milliseconds); /****************** countArray is a small array (256 sized) so CPU is enough for its operation ************************/ /****************** Here I calculate CDF ****************************/ int scannedArray[256]; int *sim; cudaMalloc( (void**)&sim, graysize ); cudaMemcpy( sim, scannedArray , graysize, cudaMemcpyHostToDevice ); int blockSize = 256/sharedSize + (256.0/sharedSize != 0.0); kogge_stone_scan<<<blockSize ,sharedSize>>>(cim, sim); cudaDeviceSynchronize(); cudaMemcpy(scannedArray, sim, graysize, cudaMemcpyDeviceToHost); /****************** I store for range 0 to 255 but only use the range from minimum in image to maximum in image ****************/ /****************** So minimum may not be in first place in array. So I search for it. It is found in few steps most probably ****************/ int cdfmin; for(int i=0;i<256;i++){ if(scannedArray[i] != 0){ cdfmin = scannedArray[i]; break; } } /****************** countArray is a small array (256 sized) so CPU is enough for its operation ************************/ /****************** Here I calculate Equalization ****************************/ calculateEqualization<<<1,256>>>(sim, cdfmin, width * height); /***************** Memory Allocation and Memory Copy for my new countArray which is used for CDF and Equalization then ******************/ /***************** Mask Image means convert value of image to new value of after CDF and Equalization ********************/ cudaEventRecord(start); maskImage<<<block, sharedSize>>>(rim, sim, width * height); cudaEventRecord(stop); cudaEventSynchronize(stop); milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); cudaDeviceSynchronize(); cudaMemcpy(imageData, rim, imsize, cudaMemcpyDeviceToHost); printf("Execution Time for Masking Image: %f milliseconds\n\n", milliseconds); /***************** Here create new image after CDF and Equalization **************/ /***************** You can give PNG or PGM files as input. But target image file format is PNG ************************/ /***************** So after compile run should be like ./main file.pgm file2.png ************************/ stbi_write_png(argv[2], width, height, 1, imageData, width*1); total_end = clock(); float total_time = ((double) (total_end - total_start)) / (CLOCKS_PER_SEC/1000); printf("Total Time: %f milliseconds\n", total_time); return 0; }
c7d88466663b352ca7008a458a5cbe255c6e6fed.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void sum(int *a, int *b, int *c) { int i; for(i = 0; i < N; i++) { c[i] = a[i] + b[i]; } }
c7d88466663b352ca7008a458a5cbe255c6e6fed.cu
#include "includes.h" __global__ void sum(int *a, int *b, int *c) { int i; for(i = 0; i < N; i++) { c[i] = a[i] + b[i]; } }
39f4564a25069600d46d806e37f70fd12f8902c6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" %%cuda --name student_func.cu /* Udacity Homework 3 HDR Tone-mapping Background HDR ============== A High Dynamic Range (HDR) image contains a wider variation of intensity and color than is allowed by the RGB format with 1 byte per channel that we have used in the previous assignment. To store this extra information we use single precision floating point for each channel. This allows for an extremely wide range of intensity values. In the image for this assignment, the inside of church with light coming in through stained glass windows, the raw input floating point values for the channels range from 0 to 275. But the mean is .41 and 98% of the values are less than 3! This means that certain areas (the windows) are extremely bright compared to everywhere else. If we linearly map this [0-275] range into the [0-255] range that we have been using then most values will be mapped to zero! The only thing we will be able to see are the very brightest areas - the windows - everything else will appear pitch black. The problem is that although we have cameras capable of recording the wide range of intensity that exists in the real world our monitors are not capable of displaying them. Our eyes are also quite capable of observing a much wider range of intensities than our image formats / monitors are capable of displaying. Tone-mapping is a process that transforms the intensities in the image so that the brightest values aren't nearly so far away from the mean. That way when we transform the values into [0-255] we can actually see the entire image. There are many ways to perform this process and it is as much an art as a science - there is no single "right" answer. In this homework we will implement one possible technique. Background Chrominance-Luminance ================================ The RGB space that we have been using to represent images can be thought of as one possible set of axes spanning a three dimensional space of color. We sometimes choose other axes to represent this space because they make certain operations more convenient. Another possible way of representing a color image is to separate the color information (chromaticity) from the brightness information. There are multiple different methods for doing this - a common one during the analog television days was known as Chrominance-Luminance or YUV. We choose to represent the image in this way so that we can remap only the intensity channel and then recombine the new intensity values with the color information to form the final image. Old TV signals used to be transmitted in this way so that black & white televisions could display the luminance channel while color televisions would display all three of the channels. Tone-mapping ============ In this assignment we are going to transform the luminance channel (actually the log of the luminance, but this is unimportant for the parts of the algorithm that you will be implementing) by compressing its range to [0, 1]. To do this we need the cumulative distribution of the luminance values. Example ------- input : [2 4 3 3 1 7 4 5 7 0 9 4 3 2] min / max / range: 0 / 9 / 9 histo with 3 bins: [4 7 3] cdf : [4 11 14] Your task is to calculate this cumulative distribution by following these steps. */ #include "utils.h" __global__ void findMinMaxLogLumPerBlock(const float* const d_logLuminance, const size_t numRows, const size_t numCols, float* d_minLogLum, float* d_maxLogLum) { unsigned int i = (blockIdx.x * blockDim.x) + threadIdx.x; unsigned int j = (blockIdx.y * blockDim.y) + threadIdx.y; if (i >= numCols || j >= numRows) return; unsigned int g_oneDOffset = j * numCols + i; unsigned int s_oneDOffset = threadIdx.y * blockDim.x + threadIdx.x; const unsigned int threadsPerBlock = blockDim.x * blockDim.y; extern __shared__ float s_minMaxLogLum[]; s_minMaxLogLum[s_oneDOffset] = d_logLuminance[g_oneDOffset]; s_minMaxLogLum[threadsPerBlock + s_oneDOffset] = d_logLuminance[g_oneDOffset]; __syncthreads(); for (size_t it = threadsPerBlock / 2; it > 0; it >>= 1) { if (s_oneDOffset < it) s_minMaxLogLum[s_oneDOffset] = min(s_minMaxLogLum[s_oneDOffset], s_minMaxLogLum[s_oneDOffset + it]); __syncthreads(); } if(s_oneDOffset == 0) d_minLogLum[blockIdx.y * gridDim.x + blockIdx.x] = s_minMaxLogLum[0]; __syncthreads(); for (size_t it = threadsPerBlock / 2; it > 0; it >>= 1) { if (s_oneDOffset < it) s_minMaxLogLum[threadsPerBlock + s_oneDOffset] = max(s_minMaxLogLum[threadsPerBlock + s_oneDOffset], s_minMaxLogLum[threadsPerBlock + s_oneDOffset + it]); __syncthreads(); } if(s_oneDOffset == 0) d_maxLogLum[blockIdx.y * gridDim.x + blockIdx.x] = s_minMaxLogLum[threadsPerBlock]; } __global__ void reduceMinMaxLumPerBlock(float* const d_minLogLumArray, float* const d_maxLogLumArray, const size_t numRows, const size_t numCols, float* d_minLogLum, float* d_maxLogLum) { unsigned int i = threadIdx.x; if (i >= (numCols * numRows)) return; const unsigned int blocksPerGrid = numRows * numCols; extern __shared__ float s_minMaxLogLumArray[]; s_minMaxLogLumArray[i] = d_minLogLumArray[i]; s_minMaxLogLumArray[i + blocksPerGrid] = d_maxLogLumArray[i]; __syncthreads(); for (size_t it = blocksPerGrid / 2; it > 0; it >>= 1) { if (i < it) s_minMaxLogLumArray[i] = min(s_minMaxLogLumArray[i], s_minMaxLogLumArray[i + it]); __syncthreads(); } if(i == 0) *d_minLogLum = s_minMaxLogLumArray[0]; __syncthreads(); for (size_t it = blocksPerGrid / 2; it > 0; it >>= 1) { if (i < it) s_minMaxLogLumArray[i + blocksPerGrid] = max(s_minMaxLogLumArray[i + blocksPerGrid], s_minMaxLogLumArray[i + blocksPerGrid + it]); __syncthreads(); } if(i == 0) *d_maxLogLum = s_minMaxLogLumArray[blocksPerGrid]; } __global__ void calculateHisto(const float* const d_logLuminance, const size_t numRows, const size_t numCols, const size_t numBins, float* d_minLogLum, float* d_rangeLogLum, unsigned int* d_histo) { unsigned int i = (blockIdx.x * blockDim.x) + threadIdx.x; unsigned int j = (blockIdx.y * blockDim.y) + threadIdx.y; if (i >= numCols || j >= numRows) return; unsigned int g_oneDOffset = j * numCols + i; unsigned int binNum = min(static_cast<unsigned int>(numBins - 1), static_cast<unsigned int>(((d_logLuminance[g_oneDOffset] - (*d_minLogLum)) / (*d_rangeLogLum)) * numBins)); atomicAdd(&(d_histo[binNum]), 1); } __global__ void hellisAndSteeleCDF(unsigned int* d_histo, const size_t numBins, unsigned int* d_cdf) { extern __shared__ unsigned int temp[]; unsigned int g_oneDOffset = (blockIdx.x * blockDim.x) + threadIdx.x; if (g_oneDOffset >= numBins) return; unsigned int pout = 0,pin=1; if(g_oneDOffset != 0) temp[g_oneDOffset] = d_histo[g_oneDOffset-1]; //exclusive scan else temp[g_oneDOffset] = 0; __syncthreads(); for (size_t off = 1; off < numBins; off <<= 1) { pout = 1 - pout; pin = 1 - pout; if (g_oneDOffset >= off) temp[numBins * pout + g_oneDOffset] = temp[numBins * pin + g_oneDOffset] + temp[numBins * pin + g_oneDOffset - off]; else temp[numBins * pout + g_oneDOffset] = temp[numBins * pin + g_oneDOffset]; __syncthreads(); } d_cdf[g_oneDOffset] = temp[pout * numBins + g_oneDOffset]; } void your_histogram_and_prefixsum(const float* const d_logLuminance, unsigned int* const d_cdf, float &min_logLum, float &max_logLum, const size_t numRows, const size_t numCols, const size_t numBins) { //TODO /*Here are the steps you need to implement 1) find the minimum and maximum value in the input logLuminance channel store in min_logLum and max_logLum 2) subtract them to find the range 3) generate a histogram of all the values in the logLuminance channel using the formula: bin = (lum[i] - lumMin) / lumRange * numBins 4) Perform an exclusive scan (prefix sum) on the histogram to get the cumulative distribution of luminance values (this should go in the incoming d_cdf pointer which already has been allocated for you) */ // Calculate min and max logLum per block and copy it back to global memory float* d_minLogLumPtr = nullptr; float* d_maxLogLumPtr = nullptr; // Number of threads per block (32 * 32) const unsigned int threads = 32; // Number of blocks per grid unsigned int blocksX = (numCols + threads - 1) / threads; unsigned int blocksY = (numRows + threads - 1) / threads; // Allocate memory for min and max logLum checkCudaErrors(hipMalloc(&d_minLogLumPtr, sizeof(float) * blocksX * blocksY)); checkCudaErrors(hipMalloc(&d_maxLogLumPtr, sizeof(float) * blocksX * blocksY)); checkCudaErrors(hipMemset(d_minLogLumPtr, 0, sizeof(float) * blocksX * blocksY)); checkCudaErrors(hipMemset(d_maxLogLumPtr, 0, sizeof(float) * blocksX * blocksY)); dim3 threadsPerBlock(threads, threads, 1); dim3 blocksPerGrid(blocksX, blocksY, 1); const unsigned int numThreadsPerBlock = threads * threads; hipLaunchKernelGGL(( findMinMaxLogLumPerBlock), dim3(blocksPerGrid), dim3(threadsPerBlock), 2 * numThreadsPerBlock * sizeof(float), 0, d_logLuminance, numRows, numCols, d_minLogLumPtr, d_maxLogLumPtr); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); float* d_minLogLum = nullptr; float* d_maxLogLum = nullptr; checkCudaErrors(hipMalloc(&d_minLogLum, sizeof(float))); checkCudaErrors(hipMalloc(&d_maxLogLum, sizeof(float))); checkCudaErrors(hipMemset(d_minLogLum, 0, sizeof(float))); checkCudaErrors(hipMemset(d_maxLogLum, 0, sizeof(float))); const unsigned int numblocksPerGrid = blocksY * blocksX; hipLaunchKernelGGL(( reduceMinMaxLumPerBlock), dim3(1), dim3(blocksX * blocksY), 2 * numblocksPerGrid * sizeof(float), 0, d_minLogLumPtr, d_maxLogLumPtr, blocksY, blocksX, d_minLogLum, d_maxLogLum); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); checkCudaErrors(hipMemcpy(&min_logLum, d_minLogLum, sizeof(float), hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(&max_logLum, d_maxLogLum, sizeof(float), hipMemcpyDeviceToHost)); float range_logLum = max_logLum - min_logLum; float* d_rangeLogLum = nullptr; unsigned int* d_histo = nullptr; checkCudaErrors(hipMalloc(&d_rangeLogLum, sizeof(float))); checkCudaErrors(hipMalloc(&d_histo, numBins * sizeof(unsigned int))); checkCudaErrors(hipMemcpy(d_rangeLogLum, &range_logLum, sizeof(float), hipMemcpyHostToDevice)); checkCudaErrors(hipMemset(d_histo, 0, numBins * sizeof(unsigned int))); hipLaunchKernelGGL(( calculateHisto), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_logLuminance, numRows, numCols, numBins, d_minLogLum, d_rangeLogLum, d_histo); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); checkCudaErrors(hipMemset(d_cdf, 0, numBins * sizeof(unsigned int))); unsigned int threadsPerBlockCDF = threads * threads; unsigned int blocksPerGridCDF = (numBins + ((threads * threads) - 1)) / (threads * threads); hipLaunchKernelGGL(( hellisAndSteeleCDF), dim3(blocksPerGridCDF), dim3(threadsPerBlockCDF), 2 * numBins * sizeof(unsigned int), 0, d_histo, numBins, d_cdf); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); // Free allocated memory checkCudaErrors(hipFree(d_minLogLumPtr)); checkCudaErrors(hipFree(d_maxLogLumPtr)); checkCudaErrors(hipFree(d_minLogLum)); checkCudaErrors(hipFree(d_maxLogLum)); checkCudaErrors(hipFree(d_rangeLogLum)); checkCudaErrors(hipFree(d_histo)); }
39f4564a25069600d46d806e37f70fd12f8902c6.cu
%%cuda --name student_func.cu /* Udacity Homework 3 HDR Tone-mapping Background HDR ============== A High Dynamic Range (HDR) image contains a wider variation of intensity and color than is allowed by the RGB format with 1 byte per channel that we have used in the previous assignment. To store this extra information we use single precision floating point for each channel. This allows for an extremely wide range of intensity values. In the image for this assignment, the inside of church with light coming in through stained glass windows, the raw input floating point values for the channels range from 0 to 275. But the mean is .41 and 98% of the values are less than 3! This means that certain areas (the windows) are extremely bright compared to everywhere else. If we linearly map this [0-275] range into the [0-255] range that we have been using then most values will be mapped to zero! The only thing we will be able to see are the very brightest areas - the windows - everything else will appear pitch black. The problem is that although we have cameras capable of recording the wide range of intensity that exists in the real world our monitors are not capable of displaying them. Our eyes are also quite capable of observing a much wider range of intensities than our image formats / monitors are capable of displaying. Tone-mapping is a process that transforms the intensities in the image so that the brightest values aren't nearly so far away from the mean. That way when we transform the values into [0-255] we can actually see the entire image. There are many ways to perform this process and it is as much an art as a science - there is no single "right" answer. In this homework we will implement one possible technique. Background Chrominance-Luminance ================================ The RGB space that we have been using to represent images can be thought of as one possible set of axes spanning a three dimensional space of color. We sometimes choose other axes to represent this space because they make certain operations more convenient. Another possible way of representing a color image is to separate the color information (chromaticity) from the brightness information. There are multiple different methods for doing this - a common one during the analog television days was known as Chrominance-Luminance or YUV. We choose to represent the image in this way so that we can remap only the intensity channel and then recombine the new intensity values with the color information to form the final image. Old TV signals used to be transmitted in this way so that black & white televisions could display the luminance channel while color televisions would display all three of the channels. Tone-mapping ============ In this assignment we are going to transform the luminance channel (actually the log of the luminance, but this is unimportant for the parts of the algorithm that you will be implementing) by compressing its range to [0, 1]. To do this we need the cumulative distribution of the luminance values. Example ------- input : [2 4 3 3 1 7 4 5 7 0 9 4 3 2] min / max / range: 0 / 9 / 9 histo with 3 bins: [4 7 3] cdf : [4 11 14] Your task is to calculate this cumulative distribution by following these steps. */ #include "utils.h" __global__ void findMinMaxLogLumPerBlock(const float* const d_logLuminance, const size_t numRows, const size_t numCols, float* d_minLogLum, float* d_maxLogLum) { unsigned int i = (blockIdx.x * blockDim.x) + threadIdx.x; unsigned int j = (blockIdx.y * blockDim.y) + threadIdx.y; if (i >= numCols || j >= numRows) return; unsigned int g_oneDOffset = j * numCols + i; unsigned int s_oneDOffset = threadIdx.y * blockDim.x + threadIdx.x; const unsigned int threadsPerBlock = blockDim.x * blockDim.y; extern __shared__ float s_minMaxLogLum[]; s_minMaxLogLum[s_oneDOffset] = d_logLuminance[g_oneDOffset]; s_minMaxLogLum[threadsPerBlock + s_oneDOffset] = d_logLuminance[g_oneDOffset]; __syncthreads(); for (size_t it = threadsPerBlock / 2; it > 0; it >>= 1) { if (s_oneDOffset < it) s_minMaxLogLum[s_oneDOffset] = min(s_minMaxLogLum[s_oneDOffset], s_minMaxLogLum[s_oneDOffset + it]); __syncthreads(); } if(s_oneDOffset == 0) d_minLogLum[blockIdx.y * gridDim.x + blockIdx.x] = s_minMaxLogLum[0]; __syncthreads(); for (size_t it = threadsPerBlock / 2; it > 0; it >>= 1) { if (s_oneDOffset < it) s_minMaxLogLum[threadsPerBlock + s_oneDOffset] = max(s_minMaxLogLum[threadsPerBlock + s_oneDOffset], s_minMaxLogLum[threadsPerBlock + s_oneDOffset + it]); __syncthreads(); } if(s_oneDOffset == 0) d_maxLogLum[blockIdx.y * gridDim.x + blockIdx.x] = s_minMaxLogLum[threadsPerBlock]; } __global__ void reduceMinMaxLumPerBlock(float* const d_minLogLumArray, float* const d_maxLogLumArray, const size_t numRows, const size_t numCols, float* d_minLogLum, float* d_maxLogLum) { unsigned int i = threadIdx.x; if (i >= (numCols * numRows)) return; const unsigned int blocksPerGrid = numRows * numCols; extern __shared__ float s_minMaxLogLumArray[]; s_minMaxLogLumArray[i] = d_minLogLumArray[i]; s_minMaxLogLumArray[i + blocksPerGrid] = d_maxLogLumArray[i]; __syncthreads(); for (size_t it = blocksPerGrid / 2; it > 0; it >>= 1) { if (i < it) s_minMaxLogLumArray[i] = min(s_minMaxLogLumArray[i], s_minMaxLogLumArray[i + it]); __syncthreads(); } if(i == 0) *d_minLogLum = s_minMaxLogLumArray[0]; __syncthreads(); for (size_t it = blocksPerGrid / 2; it > 0; it >>= 1) { if (i < it) s_minMaxLogLumArray[i + blocksPerGrid] = max(s_minMaxLogLumArray[i + blocksPerGrid], s_minMaxLogLumArray[i + blocksPerGrid + it]); __syncthreads(); } if(i == 0) *d_maxLogLum = s_minMaxLogLumArray[blocksPerGrid]; } __global__ void calculateHisto(const float* const d_logLuminance, const size_t numRows, const size_t numCols, const size_t numBins, float* d_minLogLum, float* d_rangeLogLum, unsigned int* d_histo) { unsigned int i = (blockIdx.x * blockDim.x) + threadIdx.x; unsigned int j = (blockIdx.y * blockDim.y) + threadIdx.y; if (i >= numCols || j >= numRows) return; unsigned int g_oneDOffset = j * numCols + i; unsigned int binNum = min(static_cast<unsigned int>(numBins - 1), static_cast<unsigned int>(((d_logLuminance[g_oneDOffset] - (*d_minLogLum)) / (*d_rangeLogLum)) * numBins)); atomicAdd(&(d_histo[binNum]), 1); } __global__ void hellisAndSteeleCDF(unsigned int* d_histo, const size_t numBins, unsigned int* d_cdf) { extern __shared__ unsigned int temp[]; unsigned int g_oneDOffset = (blockIdx.x * blockDim.x) + threadIdx.x; if (g_oneDOffset >= numBins) return; unsigned int pout = 0,pin=1; if(g_oneDOffset != 0) temp[g_oneDOffset] = d_histo[g_oneDOffset-1]; //exclusive scan else temp[g_oneDOffset] = 0; __syncthreads(); for (size_t off = 1; off < numBins; off <<= 1) { pout = 1 - pout; pin = 1 - pout; if (g_oneDOffset >= off) temp[numBins * pout + g_oneDOffset] = temp[numBins * pin + g_oneDOffset] + temp[numBins * pin + g_oneDOffset - off]; else temp[numBins * pout + g_oneDOffset] = temp[numBins * pin + g_oneDOffset]; __syncthreads(); } d_cdf[g_oneDOffset] = temp[pout * numBins + g_oneDOffset]; } void your_histogram_and_prefixsum(const float* const d_logLuminance, unsigned int* const d_cdf, float &min_logLum, float &max_logLum, const size_t numRows, const size_t numCols, const size_t numBins) { //TODO /*Here are the steps you need to implement 1) find the minimum and maximum value in the input logLuminance channel store in min_logLum and max_logLum 2) subtract them to find the range 3) generate a histogram of all the values in the logLuminance channel using the formula: bin = (lum[i] - lumMin) / lumRange * numBins 4) Perform an exclusive scan (prefix sum) on the histogram to get the cumulative distribution of luminance values (this should go in the incoming d_cdf pointer which already has been allocated for you) */ // Calculate min and max logLum per block and copy it back to global memory float* d_minLogLumPtr = nullptr; float* d_maxLogLumPtr = nullptr; // Number of threads per block (32 * 32) const unsigned int threads = 32; // Number of blocks per grid unsigned int blocksX = (numCols + threads - 1) / threads; unsigned int blocksY = (numRows + threads - 1) / threads; // Allocate memory for min and max logLum checkCudaErrors(cudaMalloc(&d_minLogLumPtr, sizeof(float) * blocksX * blocksY)); checkCudaErrors(cudaMalloc(&d_maxLogLumPtr, sizeof(float) * blocksX * blocksY)); checkCudaErrors(cudaMemset(d_minLogLumPtr, 0, sizeof(float) * blocksX * blocksY)); checkCudaErrors(cudaMemset(d_maxLogLumPtr, 0, sizeof(float) * blocksX * blocksY)); dim3 threadsPerBlock(threads, threads, 1); dim3 blocksPerGrid(blocksX, blocksY, 1); const unsigned int numThreadsPerBlock = threads * threads; findMinMaxLogLumPerBlock<<<blocksPerGrid, threadsPerBlock, 2 * numThreadsPerBlock * sizeof(float)>>>(d_logLuminance, numRows, numCols, d_minLogLumPtr, d_maxLogLumPtr); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); float* d_minLogLum = nullptr; float* d_maxLogLum = nullptr; checkCudaErrors(cudaMalloc(&d_minLogLum, sizeof(float))); checkCudaErrors(cudaMalloc(&d_maxLogLum, sizeof(float))); checkCudaErrors(cudaMemset(d_minLogLum, 0, sizeof(float))); checkCudaErrors(cudaMemset(d_maxLogLum, 0, sizeof(float))); const unsigned int numblocksPerGrid = blocksY * blocksX; reduceMinMaxLumPerBlock<<<1, blocksX * blocksY, 2 * numblocksPerGrid * sizeof(float)>>>(d_minLogLumPtr, d_maxLogLumPtr, blocksY, blocksX, d_minLogLum, d_maxLogLum); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); checkCudaErrors(cudaMemcpy(&min_logLum, d_minLogLum, sizeof(float), cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(&max_logLum, d_maxLogLum, sizeof(float), cudaMemcpyDeviceToHost)); float range_logLum = max_logLum - min_logLum; float* d_rangeLogLum = nullptr; unsigned int* d_histo = nullptr; checkCudaErrors(cudaMalloc(&d_rangeLogLum, sizeof(float))); checkCudaErrors(cudaMalloc(&d_histo, numBins * sizeof(unsigned int))); checkCudaErrors(cudaMemcpy(d_rangeLogLum, &range_logLum, sizeof(float), cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemset(d_histo, 0, numBins * sizeof(unsigned int))); calculateHisto<<<blocksPerGrid, threadsPerBlock>>>(d_logLuminance, numRows, numCols, numBins, d_minLogLum, d_rangeLogLum, d_histo); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); checkCudaErrors(cudaMemset(d_cdf, 0, numBins * sizeof(unsigned int))); unsigned int threadsPerBlockCDF = threads * threads; unsigned int blocksPerGridCDF = (numBins + ((threads * threads) - 1)) / (threads * threads); hellisAndSteeleCDF<<<blocksPerGridCDF, threadsPerBlockCDF, 2 * numBins * sizeof(unsigned int)>>>(d_histo, numBins, d_cdf); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); // Free allocated memory checkCudaErrors(cudaFree(d_minLogLumPtr)); checkCudaErrors(cudaFree(d_maxLogLumPtr)); checkCudaErrors(cudaFree(d_minLogLum)); checkCudaErrors(cudaFree(d_maxLogLum)); checkCudaErrors(cudaFree(d_rangeLogLum)); checkCudaErrors(cudaFree(d_histo)); }
5c36af60d208ba4d37b3ab85df2993f7214170e4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* [t]exture [o]bject */ #include <stdio.h> #include "u.h" /* utils */ /* ceiling `m' to `n' (returns the smallest `A' such n*A is not less than `m') */ #define ceiln(m, n) ( ((m) + (n) - 1)/(n) ) /* a common kernel execution configuration */ #define k_cnf(n) ceiln((n), 128), 128 #define n 5 /* number of elements */ float *d_A, *d_B; /* device */ float h_A[n], h_B[n]; /* host */ #define sz ((n)*sizeof(h_A[0])) hipTextureObject_t to; hipResourceDesc resD; hipTextureDesc texD; __global__ void plus(float *A, hipTextureObject_t to) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i >= n) return; float b = tex1Dfetch<float>(to, i); if (i < n) A[i] += b; } void h_ini() { /* host ini */ for (int i = 0; i < n; ++i) { h_A[i] = i; h_B[i] = 10*i; } } void d_ini() { /* device ini */ hipMalloc(&d_A, sz); hipMalloc(&d_B, sz); } void h2d() { /* host to device */ hipMemcpy(d_A, h_A, sz, hipMemcpyHostToDevice); hipMemcpy(d_B, h_B, sz, hipMemcpyHostToDevice); } void d2h() { /* device to host */ hipMemcpy(h_A, d_A, sz, hipMemcpyDeviceToHost); } void tex_ini() { memset(&resD, 0, sizeof(resD)); resD.resType = hipResourceTypeLinear; resD.res.linear.devPtr = d_B; resD.res.linear.sizeInBytes = sz; resD.res.linear.desc = hipCreateChannelDesc<float>(); memset(&texD, 0, sizeof(texD)); texD.normalizedCoords = 0; texD.readMode = hipReadModeElementType; checkCudaErrors(hipCreateTextureObject(&to, &resD, &texD, NULL)); } int main() { h_ini(); d_ini(); h2d(); /* host to device */ tex_ini(); plus<<<k_cnf(n)>>>(d_A, to); d2h(); /* device to host */ for (int i = 0; i < n; i++) printf("a[%d] = %2g\n", i, h_A[i]); }
5c36af60d208ba4d37b3ab85df2993f7214170e4.cu
/* [t]exture [o]bject */ #include <stdio.h> #include "u.h" /* utils */ /* ceiling `m' to `n' (returns the smallest `A' such n*A is not less than `m') */ #define ceiln(m, n) ( ((m) + (n) - 1)/(n) ) /* a common kernel execution configuration */ #define k_cnf(n) ceiln((n), 128), 128 #define n 5 /* number of elements */ float *d_A, *d_B; /* device */ float h_A[n], h_B[n]; /* host */ #define sz ((n)*sizeof(h_A[0])) cudaTextureObject_t to; cudaResourceDesc resD; cudaTextureDesc texD; __global__ void plus(float *A, cudaTextureObject_t to) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i >= n) return; float b = tex1Dfetch<float>(to, i); if (i < n) A[i] += b; } void h_ini() { /* host ini */ for (int i = 0; i < n; ++i) { h_A[i] = i; h_B[i] = 10*i; } } void d_ini() { /* device ini */ cudaMalloc(&d_A, sz); cudaMalloc(&d_B, sz); } void h2d() { /* host to device */ cudaMemcpy(d_A, h_A, sz, cudaMemcpyHostToDevice); cudaMemcpy(d_B, h_B, sz, cudaMemcpyHostToDevice); } void d2h() { /* device to host */ cudaMemcpy(h_A, d_A, sz, cudaMemcpyDeviceToHost); } void tex_ini() { memset(&resD, 0, sizeof(resD)); resD.resType = cudaResourceTypeLinear; resD.res.linear.devPtr = d_B; resD.res.linear.sizeInBytes = sz; resD.res.linear.desc = cudaCreateChannelDesc<float>(); memset(&texD, 0, sizeof(texD)); texD.normalizedCoords = 0; texD.readMode = cudaReadModeElementType; checkCudaErrors(cudaCreateTextureObject(&to, &resD, &texD, NULL)); } int main() { h_ini(); d_ini(); h2d(); /* host to device */ tex_ini(); plus<<<k_cnf(n)>>>(d_A, to); d2h(); /* device to host */ for (int i = 0; i < n; i++) printf("a[%d] = %2g\n", i, h_A[i]); }
191072c90abd5baa927214ccef0f6567822c0442.hip
// !!! This is a file automatically generated by hipify!!! #define _USE_MATH_DEFINES #include "patch_match_cuda.h" #include <algorithm> #include <cfloat> #include <cmath> #include <cstdint> #include <sstream> #include "hip/hip_runtime.h" #include "cudacc.h" // bool bOutPutMessage = true; #include <opencv2\core.hpp> #include <opencv2\imgproc.hpp> // The number of threads per Cuda thread. Warning: Do not change this value, // since the templated window sizes rely on this value. #define THREADS_PER_BLOCK 16 // We must not include "util/math.h" to avoid any Eigen includes here, // since Visual Studio cannot compile some of the Eigen/Boost expressions. #ifndef DEG2RAD #define DEG2RAD(deg) deg * 0.0174532925199432 #endif namespace colmap { namespace mvs { //cudaReadModeNormalizedFloattex2D01 //cudaReadModeElementType //P63 texture<uint8_t, hipTextureType2D, hipReadModeNormalizedFloat> ref_image_texture; // texture<uint8_t, hipTextureType2DLayered, hipReadModeNormalizedFloat> src_images_texture; texture<float, hipTextureType2DLayered, hipReadModeElementType> src_depth_maps_texture; texture<float, hipTextureType2D, hipReadModeElementType> poses_texture; // Calibration of reference image as {fx, cx, fy, cy}. __constant__ float ref_K[4]; // Calibration of reference image as {1/fx, -cx/fx, 1/fy, -cy/fy}. __constant__ float ref_inv_K[4]; __device__ inline void Mat33DotVec3(const float mat[9], const float vec[3], float result[3]) { result[0] = mat[0] * vec[0] + mat[1] * vec[1] + mat[2] * vec[2]; result[1] = mat[3] * vec[0] + mat[4] * vec[1] + mat[5] * vec[2]; result[2] = mat[6] * vec[0] + mat[7] * vec[1] + mat[8] * vec[2]; } __device__ inline void Mat33DotVec3Homogeneous(const float mat[9], const float vec[2], float result[2]) { const float inv_z = 1.0f / (mat[6] * vec[0] + mat[7] * vec[1] + mat[8]); result[0] = inv_z * (mat[0] * vec[0] + mat[1] * vec[1] + mat[2]); result[1] = inv_z * (mat[3] * vec[0] + mat[4] * vec[1] + mat[5]); } __device__ inline float DotProduct3(const float vec1[3], const float vec2[3]) { return vec1[0] * vec2[0] + vec1[1] * vec2[1] + vec1[2] * vec2[2]; } __device__ inline float GenerateRandomDepth(const float depth_min, const float depth_max, hiprandState_t* rand_state) { return hiprand_uniform(rand_state) * (depth_max - depth_min) + depth_min; } __device__ inline void GenerateRandomNormal(const int row, const int col, hiprandState_t* rand_state, float normal[3]) { // Unbiased sampling of normal, according to George Marsaglia, "Choosing a // Point from the Surface of a Sphere", 1972. float v1 = 0.0f; float v2 = 0.0f; float s = 2.0f; while (s >= 1.0f) { v1 = 2.0f * hiprand_uniform(rand_state) - 1.0f; v2 = 2.0f * hiprand_uniform(rand_state) - 1.0f; s = v1 * v1 + v2 * v2; } const float s_norm = sqrt(1.0f - s); normal[0] = 2.0f * v1 * s_norm; normal[1] = 2.0f * v2 * s_norm; normal[2] = 1.0f - 2.0f * s; // Make sure normal is looking away from camera. const float view_ray[3] = { ref_inv_K[0] * col + ref_inv_K[1], ref_inv_K[2] * row + ref_inv_K[3], 1.0f }; if (DotProduct3(normal, view_ray) > 0) { normal[0] = -normal[0]; normal[1] = -normal[1]; normal[2] = -normal[2]; } } __device__ inline float PerturbDepth(const float perturbation, const float depth, hiprandState_t* rand_state) { const float depth_min = (1.0f - perturbation) * depth; const float depth_max = (1.0f + perturbation) * depth; return GenerateRandomDepth(depth_min, depth_max, rand_state); } __device__ inline void PerturbNormal(const int row, const int col, const float perturbation, const float normal[3], hiprandState_t* rand_state, float perturbed_normal[3], const int num_trials = 0) { // Perturbation rotation angles. const float a1 = (hiprand_uniform(rand_state) - 0.5f) * perturbation; const float a2 = (hiprand_uniform(rand_state) - 0.5f) * perturbation; const float a3 = (hiprand_uniform(rand_state) - 0.5f) * perturbation; const float sin_a1 = sin(a1); const float sin_a2 = sin(a2); const float sin_a3 = sin(a3); const float cos_a1 = cos(a1); const float cos_a2 = cos(a2); const float cos_a3 = cos(a3); // R = Rx * Ry * Rz float R[9]; R[0] = cos_a2 * cos_a3; R[1] = -cos_a2 * sin_a3; R[2] = sin_a2; R[3] = cos_a1 * sin_a3 + cos_a3 * sin_a1 * sin_a2; R[4] = cos_a1 * cos_a3 - sin_a1 * sin_a2 * sin_a3; R[5] = -cos_a2 * sin_a1; R[6] = sin_a1 * sin_a3 - cos_a1 * cos_a3 * sin_a2; R[7] = cos_a3 * sin_a1 + cos_a1 * sin_a2 * sin_a3; R[8] = cos_a1 * cos_a2; // Perturb the normal vector. Mat33DotVec3(R, normal, perturbed_normal); // Make sure the perturbed normal is still looking in the same direction as // the viewing direction. const float view_ray[3] = { ref_inv_K[0] * col + ref_inv_K[1], ref_inv_K[2] * row + ref_inv_K[3], 1.0f }; if (DotProduct3(perturbed_normal, view_ray) >= 0.0f) { const int kMaxNumTrials = 3; if (num_trials < kMaxNumTrials) { PerturbNormal(row, col, 0.5f*perturbation, normal, rand_state, perturbed_normal, num_trials + 1); return; } else { perturbed_normal[0] = normal[0]; perturbed_normal[1] = normal[1]; perturbed_normal[2] = normal[2]; return; } } // Make sure normal has unit norm. const float inv_norm = rsqrt(DotProduct3(perturbed_normal, perturbed_normal)); perturbed_normal[0] *= inv_norm; perturbed_normal[1] *= inv_norm; perturbed_normal[2] *= inv_norm; } __device__ inline void ComputePointAtDepth(const float row, const float col, const float depth, float point[3]) { point[0] = depth * (ref_inv_K[0] * col + ref_inv_K[1]); point[1] = depth * (ref_inv_K[2] * row + ref_inv_K[3]); point[2] = depth; } // Transfer depth on plane from viewing ray at row1 to row2. The returned // depth is the intersection of the viewing ray through row2 with the plane // at row1 defined by the given depth and normal. __device__ inline float PropagateDepth(const float depth1, const float normal1[3], const float row1, const float row2) { // Point along first viewing ray. const float x1 = depth1 * (ref_inv_K[2] * row1 + ref_inv_K[3]); const float y1 = depth1; // Point on plane defined by point along first viewing ray and plane normal1. const float x2 = x1 + normal1[2]; const float y2 = y1 - normal1[1]; // Origin of second viewing ray. // const float x3 = 0.0f; // const float y3 = 0.0f; // Point on second viewing ray. const float x4 = ref_inv_K[2] * row2 + ref_inv_K[3]; // const float y4 = 1.0f; // Intersection of the lines ((x1, y1), (x2, y2)) and ((x3, y3), (x4, y4)). const float denom = x2 - x1 + x4 * (y1 - y2); const float kEps = 1e-5f; if (abs(denom) < kEps) { return depth1; } const float nom = y1 * x2 - x1 * y2; return nom / denom; } // First, compute triangulation angle between reference and source image for 3D // point. Second, compute incident angle between viewing direction of source // image and normal direction of 3D point. Both angles are cosine distances. __device__ inline void ComputeViewingAngles(const float point[3], const float normal[3], const int image_id, float* cos_triangulation_angle, float* cos_incident_angle) { *cos_triangulation_angle = 0.0f; *cos_incident_angle = 0.0f; // Projection center of source image. float C[3]; for (int i = 0; i < 3; ++i) { C[i] = tex2D(poses_texture, i + 16, image_id); } // Ray from point to camera. const float SX[3] = { C[0] - point[0], C[1] - point[1], C[2] - point[2] }; // Length of ray from reference image to point. const float RX_inv_norm = rsqrt(DotProduct3(point, point)); // Length of ray from source image to point. const float SX_inv_norm = rsqrt(DotProduct3(SX, SX)); *cos_incident_angle = DotProduct3(SX, normal) * SX_inv_norm; *cos_triangulation_angle = DotProduct3(SX, point) * RX_inv_norm * SX_inv_norm; } __device__ inline void ComposeHomography(const int image_id, const int row, const int col, const float depth, const float normal[3], float H[9]) { // Calibration of source image. float K[4]; for (int i = 0; i < 4; ++i) { K[i] = tex2D(poses_texture, i, image_id); } // Relative rotation between reference and source image. float R[9]; for (int i = 0; i < 9; ++i) { R[i] = tex2D(poses_texture, i + 4, image_id); } // Relative translation between reference and source image. float T[3]; for (int i = 0; i < 3; ++i) { T[i] = tex2D(poses_texture, i + 13, image_id); } // Distance to the plane. const float dist = depth * (normal[0] * (ref_inv_K[0] * col + ref_inv_K[1]) + normal[1] * (ref_inv_K[2] * row + ref_inv_K[3]) + normal[2]); const float inv_dist = 1.0f / dist; const float inv_dist_N0 = inv_dist * normal[0]; const float inv_dist_N1 = inv_dist * normal[1]; const float inv_dist_N2 = inv_dist * normal[2]; // Homography as H = K * (R - T * n' / d) * Kref^-1. H[0] = ref_inv_K[0] * (K[0] * (R[0] + inv_dist_N0 * T[0]) + K[1] * (R[6] + inv_dist_N0 * T[2])); H[1] = ref_inv_K[2] * (K[0] * (R[1] + inv_dist_N1 * T[0]) + K[1] * (R[7] + inv_dist_N1 * T[2])); H[2] = K[0] * (R[2] + inv_dist_N2 * T[0]) + K[1] * (R[8] + inv_dist_N2 * T[2]) + ref_inv_K[1] * (K[0] * (R[0] + inv_dist_N0 * T[0]) + K[1] * (R[6] + inv_dist_N0 * T[2])) + ref_inv_K[3] * (K[0] * (R[1] + inv_dist_N1 * T[0]) + K[1] * (R[7] + inv_dist_N1 * T[2])); H[3] = ref_inv_K[0] * (K[2] * (R[3] + inv_dist_N0 * T[1]) + K[3] * (R[6] + inv_dist_N0 * T[2])); H[4] = ref_inv_K[2] * (K[2] * (R[4] + inv_dist_N1 * T[1]) + K[3] * (R[7] + inv_dist_N1 * T[2])); H[5] = K[2] * (R[5] + inv_dist_N2 * T[1]) + K[3] * (R[8] + inv_dist_N2 * T[2]) + ref_inv_K[1] * (K[2] * (R[3] + inv_dist_N0 * T[1]) + K[3] * (R[6] + inv_dist_N0 * T[2])) + ref_inv_K[3] * (K[2] * (R[4] + inv_dist_N1 * T[1]) + K[3] * (R[7] + inv_dist_N1 * T[2])); H[6] = ref_inv_K[0] * (R[6] + inv_dist_N0 * T[2]); H[7] = ref_inv_K[2] * (R[7] + inv_dist_N1 * T[2]); H[8] = R[8] + ref_inv_K[1] * (R[6] + inv_dist_N0 * T[2]) + ref_inv_K[3] * (R[7] + inv_dist_N1 * T[2]) + inv_dist_N2 * T[2]; } // The return values is 1 - NCC, so the range is [0, 2], the smaller the // value, the better the color consistency. template <int kWindowSize> struct PhotoConsistencyCostComputer { // Image data in local window around patch. const float* local_ref_image = nullptr; // Precomputed sum of raw and squared image intensities. float local_ref_sum = 0.0f; float local_ref_squared_sum = 0.0f; // Identifier of source image. int src_image_id = -1; // Center position of patch in reference image. int row = -1; int col = -1; // Parameters for bilateral weighting. float sigma_spatial = 3.0f; float sigma_color = 0.3f; // Depth and normal for which to warp patch. float depth = 0.0f; const float* normal = nullptr; // Dimensions of reference image. int ref_image_width = 0; int ref_image_height = 0; __device__ inline float Compute() const { const float kMaxCost = 2.0f; const int kWindowRadius = kWindowSize / 2; const int thread_id = threadIdx.x; const int row_start = row - kWindowRadius; const int col_start = col - kWindowRadius; const int row_end = row + kWindowRadius; const int col_end = col + kWindowRadius; if (row_start < 0 || col_start < 0 || row_end >= ref_image_height || col_end >= ref_image_width) { return kMaxCost; } float tform[9]; ComposeHomography(src_image_id, row, col, depth, normal, tform); float col_src = tform[0] * col_start + tform[1] * row_start + tform[2]; float row_src = tform[3] * col_start + tform[4] * row_start + tform[5]; float z = tform[6] * col_start + tform[7] * row_start + tform[8]; float base_col_src = col_src; float base_row_src = row_src; float base_z = z; int ref_image_idx = THREADS_PER_BLOCK - kWindowRadius + thread_id; int ref_image_base_idx = ref_image_idx; const float center_ref = local_ref_image[ref_image_idx + kWindowRadius * 3 * THREADS_PER_BLOCK + kWindowRadius]; const float sum_ref = local_ref_sum; const float sum_ref_ref = local_ref_squared_sum; float sum_src = 0.0f; float sum_src_src = 0.0f; float sum_ref_src = 0.0f; float bilateral_weight_sum = 0.0f; for (int row = 0; row < kWindowSize; row++) { // Accumulate values per row to reduce numerical errors. float sum_src_row = 0.0f; float sum_src_src_row = 0.0f; float sum_ref_src_row = 0.0f; float bilateral_weight_sum_row = 0.0f; for (int col = 0; col < kWindowSize; col++) { const float inv_z = 1.0f / z; const float norm_col_src = inv_z * col_src + 0.5f; const float norm_row_src = inv_z * row_src + 0.5f; const float ref = local_ref_image[ref_image_idx]; const float src = tex2DLayered(src_images_texture, norm_col_src, norm_row_src, src_image_id); const float bilateral_weight = ComputeBilateralWeight(kWindowRadius, kWindowRadius, row, col, center_ref, ref, sigma_spatial, sigma_color); sum_src_row += bilateral_weight * src; sum_src_src_row += bilateral_weight * src * src; sum_ref_src_row += bilateral_weight * ref * src; bilateral_weight_sum_row += bilateral_weight; ref_image_idx += 1; col_src += tform[0]; row_src += tform[3]; z += tform[6]; } sum_src += sum_src_row; sum_src_src += sum_src_src_row; sum_ref_src += sum_ref_src_row; bilateral_weight_sum += bilateral_weight_sum_row; ref_image_base_idx += 3 * THREADS_PER_BLOCK; ref_image_idx = ref_image_base_idx; base_col_src += tform[1]; base_row_src += tform[4]; base_z += tform[7]; col_src = base_col_src; row_src = base_row_src; z = base_z; } const float inv_bilateral_weight_sum = 1.0f / bilateral_weight_sum; sum_src *= inv_bilateral_weight_sum; sum_src_src *= inv_bilateral_weight_sum; sum_ref_src *= inv_bilateral_weight_sum; const float var_ref = sum_ref_ref - sum_ref * sum_ref; const float var_src = sum_src_src - sum_src * sum_src; // Based on Jensen's Inequality for convex functions, the variance // should always be larger than 0. Do not make this threshold smaller. const float kMinVar = 1e-5f; if (var_ref < kMinVar || var_src < kMinVar) { return kMaxCost; } else { const float covar_src_ref = sum_ref_src - sum_ref * sum_src; const float var_ref_src = sqrt(var_ref * var_src); return max(0.0f, min(kMaxCost, 1.0f - covar_src_ref / var_ref_src)); } } }; __device__ inline float ComputeGeomConsistencyCost(const float row, const float col, const float depth, const int image_id, const float max_cost) { // Extract projection matrices for source image. float P[12]; for (int i = 0; i < 12; ++i) { P[i] = tex2D(poses_texture, i + 19, image_id); } float inv_P[12]; for (int i = 0; i < 12; ++i) { inv_P[i] = tex2D(poses_texture, i + 31, image_id); } // Project point in reference image to world. float forward_point[3]; ComputePointAtDepth(row, col, depth, forward_point); // Project world point to source image. const float inv_forward_z = 1.0f / (P[8] * forward_point[0] + P[9] * forward_point[1] + P[10] * forward_point[2] + P[11]); float src_col = inv_forward_z * (P[0] * forward_point[0] + P[1] * forward_point[1] + P[2] * forward_point[2] + P[3]); float src_row = inv_forward_z * (P[4] * forward_point[0] + P[5] * forward_point[1] + P[6] * forward_point[2] + P[7]); // Extract depth in source image. const float src_depth = tex2DLayered(src_depth_maps_texture, src_col + 0.5f, src_row + 0.5f, image_id); // Projection outside of source image. if (src_depth == 0.0f) { return max_cost; } // Project point in source image to world. src_col *= src_depth; src_row *= src_depth; const float backward_point_x = inv_P[0] * src_col + inv_P[1] * src_row + inv_P[2] * src_depth + inv_P[3]; const float backward_point_y = inv_P[4] * src_col + inv_P[5] * src_row + inv_P[6] * src_depth + inv_P[7]; const float backward_point_z = inv_P[8] * src_col + inv_P[9] * src_row + inv_P[10] * src_depth + inv_P[11]; const float inv_backward_point_z = 1.0f / backward_point_z; // Project world point back to reference image. const float backward_col = inv_backward_point_z * (ref_K[0] * backward_point_x + ref_K[1] * backward_point_z); const float backward_row = inv_backward_point_z * (ref_K[2] * backward_point_y + ref_K[3] * backward_point_z); // Return truncated reprojection error between original observation and // the forward-backward projected observation. const float diff_col = col - backward_col; const float diff_row = row - backward_row; return min(max_cost, sqrt(diff_col * diff_col + diff_row * diff_row)); } // Find index of minimum in given values. template <int kNumCosts> __device__ inline int FindMinCost(const float costs[kNumCosts]) { float min_cost = costs[0]; int min_cost_idx = 0; for (int idx = 1; idx < kNumCosts; ++idx) { if (costs[idx] <= min_cost) { min_cost = costs[idx]; min_cost_idx = idx; } } return min_cost_idx; } template <int kWindowSize> __device__ inline void ReadRefImageIntoSharedMemory(float* local_image, const int row, const int col, const int thread_id) { // For the first row, read the entire block into shared memory. For all // consecutive rows, it is only necessary to shift the rows in shared memory // up by one element and then read in a new row at the bottom of the shared // memory. Note that this assumes that the calling loop starts with the first // row and then consecutively reads in a new row. if (row == 0) { int r = row - kWindowSize / 2; for (int i = 0; i < kWindowSize; ++i) { int c = col - THREADS_PER_BLOCK; #pragma unroll//local_iamge3*ThreadPBkWindowSize // for (int j = 0; j < 3; ++j) { local_image[thread_id + i * 3 * THREADS_PER_BLOCK + j * THREADS_PER_BLOCK] = tex2D(ref_image_texture, c, r); c += THREADS_PER_BLOCK; } r += 1; } } else { // Move rows in shared memory up by one row. for (int i = 1; i < kWindowSize; ++i) { #pragma unroll for (int j = 0; j < 3; ++j) { local_image[thread_id + (i - 1) * 3 * THREADS_PER_BLOCK + j * THREADS_PER_BLOCK] = local_image[thread_id + i * 3 * THREADS_PER_BLOCK + j * THREADS_PER_BLOCK]; } } // Read next row into the last row of shared memory. const int r = row + kWindowSize / 2; int c = col - THREADS_PER_BLOCK; const int i = kWindowSize - 1; #pragma unroll for (int j = 0; j < 3; ++j) { local_image[thread_id + i * 3 * THREADS_PER_BLOCK + j * THREADS_PER_BLOCK] = tex2D(ref_image_texture, c, r); c += THREADS_PER_BLOCK; } } __syncthreads(); } __device__ inline void TransformPDFToCDF(float* probs, const int num_probs) { float prob_sum = 0.0f; for (int i = 0; i < num_probs; ++i) { prob_sum += probs[i]; } const float inv_prob_sum = 1.0f / prob_sum; float cum_prob = 0.0f; for (int i = 0; i < num_probs; ++i) { const float prob = probs[i] * inv_prob_sum; cum_prob += prob; probs[i] = cum_prob; } } class LikelihoodComputer { public: __device__ LikelihoodComputer(const float ncc_sigma, const float min_triangulation_angle, const float incident_angle_sigma) : cos_min_triangulation_angle_(cos(min_triangulation_angle)), inv_incident_angle_sigma_square_( -0.5f / (incident_angle_sigma * incident_angle_sigma)), inv_ncc_sigma_square_(-0.5f / (ncc_sigma * ncc_sigma)), ncc_norm_factor_(ComputeNCCCostNormFactor(ncc_sigma)) {} // Compute forward message from current cost and forward message of // previous / neighboring pixel. __device__ float ComputeForwardMessage(const float cost, const float prev) const { return ComputeMessage<true>(cost, prev); } // Compute backward message from current cost and backward message of // previous / neighboring pixel. __device__ float ComputeBackwardMessage(const float cost, const float prev) const { return ComputeMessage<false>(cost, prev); } // Compute the selection probability from the forward and backward message. __device__ inline float ComputeSelProb(const float alpha, const float beta, const float prev, const float prev_weight) const { const float zn0 = (1.0f - alpha) * (1.0f - beta); const float zn1 = alpha * beta; const float curr = zn1 / (zn0 + zn1); return prev_weight * prev + (1.0f - prev_weight) * curr; } // Compute NCC probability. Note that cost = 1 - NCC. __device__ inline float ComputeNCCProb(const float cost) const { return exp(cost * cost * inv_ncc_sigma_square_) * ncc_norm_factor_; } // Compute the triangulation angle probability. __device__ inline float ComputeTriProb( const float cos_triangulation_angle) const { const float abs_cos_triangulation_angle = abs(cos_triangulation_angle); if (abs_cos_triangulation_angle > cos_min_triangulation_angle_) { const float scaled = 1.0f - (1.0f - abs_cos_triangulation_angle) / (1.0f - cos_min_triangulation_angle_); const float likelihood = 1.0f - scaled * scaled; return min(1.0f, max(0.0f, likelihood)); } else { return 1.0f; } } // Compute the incident angle probability. __device__ inline float ComputeIncProb(const float cos_incident_angle) const { const float x = 1.0f - max(0.0f, cos_incident_angle); return exp(x * x * inv_incident_angle_sigma_square_); } // Compute the warping/resolution prior probability. template <int kWindowSize> __device__ inline float ComputeResolutionProb(const float H[9], const float row, const float col) const { const int kWindowRadius = kWindowSize / 2; // Warp corners of patch in reference image to source image. float src1[2]; const float ref1[2] = { row - kWindowRadius, col - kWindowRadius }; Mat33DotVec3Homogeneous(H, ref1, src1); float src2[2]; const float ref2[2] = { row - kWindowRadius, col + kWindowRadius }; Mat33DotVec3Homogeneous(H, ref2, src2); float src3[2]; const float ref3[2] = { row + kWindowRadius, col + kWindowRadius }; Mat33DotVec3Homogeneous(H, ref3, src3); float src4[2]; const float ref4[2] = { row + kWindowRadius, col - kWindowRadius }; Mat33DotVec3Homogeneous(H, ref4, src4); // Compute area of patches in reference and source image. const float ref_area = kWindowSize * kWindowSize; const float src_area = abs(0.5f * (src1[0] * src2[1] - src2[0] * src1[1] - src1[0] * src4[1] + src2[0] * src3[1] - src3[0] * src2[1] + src4[0] * src1[1] + src3[0] * src4[1] - src4[0] * src3[1])); if (ref_area > src_area) { return src_area / ref_area; } else { return ref_area / src_area; } } private: // The normalization for the likelihood function, i.e. the normalization for // the prior on the matching cost. __device__ static inline float ComputeNCCCostNormFactor( const float ncc_sigma) { // A = sqrt(2pi)*sigma/2*erf(sqrt(2)/sigma) // erf(x) = 2/sqrt(pi) * integral from 0 to x of exp(-t^2) dt return 2.0f / (sqrt(2.0f * M_PI) * ncc_sigma * erff(2.0f / (ncc_sigma * 1.414213562f))); } // Compute the forward or backward message. template <bool kForward> __device__ inline float ComputeMessage(const float cost, const float prev) const { const float kUniformProb = 0.5f; const float kNoChangeProb = 0.99999f; const float kChangeProb = 1.0f - kNoChangeProb; const float emission = ComputeNCCProb(cost); float zn0; // Message for selection probability = 0. float zn1; // Message for selection probability = 1. if (kForward) { zn0 = (prev * kChangeProb + (1.0f - prev) * kNoChangeProb) * kUniformProb; zn1 = (prev * kNoChangeProb + (1.0f - prev) * kChangeProb) * emission; } else { zn0 = prev * emission * kChangeProb + (1.0f - prev) * kUniformProb * kNoChangeProb; zn1 = prev * emission * kNoChangeProb + (1.0f - prev) * kUniformProb * kChangeProb; } return zn1 / (zn0 + zn1); } float cos_min_triangulation_angle_; float inv_incident_angle_sigma_square_; float inv_ncc_sigma_square_; float ncc_norm_factor_; }; __global__ void InitNormalMap(GpuMat<float> normal_map, GpuMat<hiprandState_t> rand_state_map) { const int row = blockDim.y * blockIdx.y + threadIdx.y; const int col = blockDim.x * blockIdx.x + threadIdx.x; if (col < normal_map.GetWidth() && row < normal_map.GetHeight()) { hiprandState_t rand_state = rand_state_map.Get(row, col); float normal[3]; GenerateRandomNormal(row, col, &rand_state, normal); normal_map.SetSlice(row, col, normal); rand_state_map.Set(row, col, rand_state); } } // Rotate normals by 90deg around z-axis in counter-clockwise direction. __global__ void RotateNormalMap(GpuMat<float> normal_map) { const int row = blockDim.y * blockIdx.y + threadIdx.y; const int col = blockDim.x * blockIdx.x + threadIdx.x; if (col < normal_map.GetWidth() && row < normal_map.GetHeight()) { float normal[3]; normal_map.GetSlice(row, col, normal); float rotated_normal[3]; rotated_normal[0] = normal[1]; rotated_normal[1] = -normal[0]; rotated_normal[2] = normal[2]; normal_map.SetSlice(row, col, rotated_normal); } } template <int kWindowSize> __global__ void ComputeInitialCost(GpuMat<float> cost_map, const GpuMat<float> depth_map, const GpuMat<float> normal_map, const GpuMat<float> ref_sum_image, const GpuMat<float> ref_squared_sum_image, const float sigma_spatial, const float sigma_color) { const int thread_id = threadIdx.x; const int col = blockDim.x * blockIdx.x + threadIdx.x; __shared__ float local_ref_image[THREADS_PER_BLOCK * 3 * kWindowSize]; PhotoConsistencyCostComputer<kWindowSize> pcc_computer; pcc_computer.local_ref_image = local_ref_image; pcc_computer.ref_image_width = cost_map.GetWidth(); pcc_computer.ref_image_height = cost_map.GetHeight(); pcc_computer.row = 0; pcc_computer.col = col; pcc_computer.sigma_spatial = sigma_spatial; pcc_computer.sigma_color = sigma_color; float normal[3]; pcc_computer.normal = normal; for (int row = 0; row < cost_map.GetHeight(); ++row) { // Note that this must be executed even for pixels outside the borders, // since pixels are used in the local neighborhood of the current pixel. ReadRefImageIntoSharedMemory<kWindowSize>(local_ref_image, row, col, thread_id); if (col < cost_map.GetWidth()) { pcc_computer.depth = depth_map.Get(row, col); normal_map.GetSlice(row, col, normal); pcc_computer.local_ref_sum = ref_sum_image.Get(row, col); pcc_computer.local_ref_squared_sum = ref_squared_sum_image.Get(row, col); for (int image_id = 0; image_id < cost_map.GetDepth(); ++image_id) { pcc_computer.src_image_id = image_id; cost_map.Set(row, col, image_id, pcc_computer.Compute()); } pcc_computer.row += 1; } } } struct SweepOptions { float depth_min = 0.0f; float depth_max = 1.0f; int num_samples = 15; float sigma_spatial = 3.0f; float sigma_color = 0.3f; float ncc_sigma = 0.6f; float min_triangulation_angle = 0.5f; float incident_angle_sigma = 0.9f; float prev_sel_prob_weight = 0.0f; float geom_consistency_regularizer = 0.1f; float geom_consistency_max_cost = 5.0f; float filter_min_ncc = 0.1f; float filter_min_triangulation_angle = 3.0f; int filter_min_num_consistent = 2; float filter_geom_consistency_max_cost = 1.0f; }; template <int kWindowSize, bool kGeomConsistencyTerm = false, bool kFilterPhotoConsistency = false, bool kFilterGeomConsistency = false> __global__ void SweepFromTopToBottom( GpuMat<float> global_workspace, GpuMat<hiprandState_t> rand_state_map, GpuMat<float> cost_map, GpuMat<float> depth_map, GpuMat<float> normal_map, GpuMat<uint8_t> consistency_mask, GpuMat<float> sel_prob_map, const GpuMat<float> prev_sel_prob_map, const GpuMat<float> ref_sum_image, const GpuMat<float> ref_squared_sum_image, const SweepOptions options) { const int thread_id = threadIdx.x; const int col = blockDim.x * blockIdx.x + threadIdx.x; // Probability for boundary pixels. const float kUniformProb = 0.5f; LikelihoodComputer likelihood_computer(options.ncc_sigma, options.min_triangulation_angle, options.incident_angle_sigma); float* forward_message = &global_workspace.GetPtr()[col * global_workspace.GetHeight()]; float* sampling_probs = &global_workspace.GetPtr()[global_workspace.GetWidth() * global_workspace.GetHeight() + col * global_workspace.GetHeight()]; ////////////////////////////////////////////////////////////////////////////// // Compute backward message for all rows. Note that the backward messages are // temporarily stored in the sel_prob_map and replaced row by row as the // updated forward messages are computed further below. ////////////////////////////////////////////////////////////////////////////// if (col < cost_map.GetWidth()) { for (int image_id = 0; image_id < cost_map.GetDepth(); ++image_id) { // Compute backward message. float beta = kUniformProb; for (int row = cost_map.GetHeight() - 1; row >= 0; --row) { const float cost = cost_map.Get(row, col, image_id); beta = likelihood_computer.ComputeBackwardMessage(cost, beta); sel_prob_map.Set(row, col, image_id, beta); } // Initialize forward message. forward_message[image_id] = kUniformProb; } } ////////////////////////////////////////////////////////////////////////////// // Estimate parameters for remaining rows and compute selection probabilities. ////////////////////////////////////////////////////////////////////////////// // Shared memory holding local patch around current position for one warp. // Contains 3 vertical stripes of height kWindowSize, that are reused within // one warp for NCC computation. Note that this limits the maximum window // size to 2 * THREADS_PER_BLOCK + 1. __shared__ float local_ref_image[THREADS_PER_BLOCK * 3 * kWindowSize]; PhotoConsistencyCostComputer<kWindowSize> pcc_computer; pcc_computer.local_ref_image = local_ref_image; pcc_computer.ref_image_width = cost_map.GetWidth(); pcc_computer.ref_image_height = cost_map.GetHeight(); pcc_computer.col = col; pcc_computer.sigma_spatial = options.sigma_spatial; pcc_computer.sigma_color = options.sigma_color; struct ParamState { float depth = 0.0f; float normal[3]; }; // Parameters of previous pixel in column. ParamState prev_param_state; // Parameters of current pixel in column. ParamState curr_param_state; // Randomly sampled parameters. ParamState rand_param_state; // Cuda PRNG state for random sampling. hiprandState_t rand_state; if (col < cost_map.GetWidth()) { // Read random state for current column. rand_state = rand_state_map.Get(0, col); // Parameters for first row in column. prev_param_state.depth = depth_map.Get(0, col); normal_map.GetSlice(0, col, prev_param_state.normal); } for (int row = 0; row < cost_map.GetHeight(); ++row) { // Note that this must be executed even for pixels outside the borders, // since pixels are used in the local neighborhood of the current pixel. ReadRefImageIntoSharedMemory<kWindowSize>(local_ref_image, row, col, thread_id); if (col >= cost_map.GetWidth()) { continue; } //// //int numLessCosts = 0;//Patchcost //for (int image_id = 0; image_id < cost_map.GetDepth(); ++image_id) //{ // if (cost_map.Get(row, col, image_id) <= 0.4f){ // ++numLessCosts; // } //} //if (numLessCosts >= 2) //{ // // Update previous depth for next row. // prev_param_state.depth = depth_map.Get(row, col); // float curNormals[3]; normal_map.GetSlice(row, col, curNormals); // for (int i = 0; i < 3; ++i) { // prev_param_state.normal[i] = curNormals[i]; // } // continue;// //} pcc_computer.row = row; pcc_computer.local_ref_sum = ref_sum_image.Get(row, col); pcc_computer.local_ref_squared_sum = ref_squared_sum_image.Get(row, col); // Propagate the depth at which the current ray intersects with the plane // of the normal of the previous ray. This helps to better estimate // the depth of very oblique structures, i.e. pixels whose normal direction // is significantly different from their viewing direction. prev_param_state.depth = PropagateDepth( prev_param_state.depth, prev_param_state.normal, row - 1, row); // Read parameters for current pixel from previous sweep. curr_param_state.depth = depth_map.Get(row, col); normal_map.GetSlice(row, col, curr_param_state.normal); // Generate random parameters. rand_param_state.depth = GenerateRandomDepth(options.depth_min, options.depth_max, &rand_state); GenerateRandomNormal(row, col, &rand_state, rand_param_state.normal); // Read in the backward message, compute selection probabilities and // modulate selection probabilities with priors. float point[3]; ComputePointAtDepth(row, col, curr_param_state.depth, point); for (int image_id = 0; image_id < cost_map.GetDepth(); ++image_id) { const float cost = cost_map.Get(row, col, image_id); const float alpha = likelihood_computer.ComputeForwardMessage( cost, forward_message[image_id]); const float beta = sel_prob_map.Get(row, col, image_id); const float prev_prob = prev_sel_prob_map.Get(row, col, image_id); const float sel_prob = likelihood_computer.ComputeSelProb( alpha, beta, prev_prob, options.prev_sel_prob_weight); float cos_triangulation_angle; float cos_incident_angle; ComputeViewingAngles(point, curr_param_state.normal, image_id, &cos_triangulation_angle, &cos_incident_angle); const float tri_prob = likelihood_computer.ComputeTriProb(cos_triangulation_angle); const float inc_prob = likelihood_computer.ComputeIncProb(cos_incident_angle); float H[9]; ComposeHomography(image_id, row, col, curr_param_state.depth, curr_param_state.normal, H); const float res_prob = likelihood_computer.ComputeResolutionProb<kWindowSize>(H, row, col); sampling_probs[image_id] = sel_prob * tri_prob * inc_prob * res_prob; } TransformPDFToCDF(sampling_probs, cost_map.GetDepth()); // Compute matching cost using Monte Carlo sampling of source images. Images // with higher selection probability are more likely to be sampled. Hence, // if only very few source images see the reference image pixel, the same // source image is likely to be sampled many times. Instead of taking // the best K probabilities, this sampling scheme has the advantage of // being adaptive to any distribution of selection probabilities. //const float kPerturbation = 0.02f; //const float perturbed_depth = // PerturbDepth(kPerturbation, curr_param_state.depth, &rand_state); //float perturbed_normal[3]; //PerturbNormal(row, col, kPerturbation * M_PI, curr_param_state.normal, // &rand_state, perturbed_normal); ////(depth,normal)(cur,perturb),(perturb,cur) const int kNumCosts = 5; float costs[kNumCosts]; const float depths[kNumCosts] = { curr_param_state.depth, prev_param_state.depth, rand_param_state.depth, curr_param_state.depth, rand_param_state.depth }; const float* normals[kNumCosts] = { curr_param_state.normal, prev_param_state.normal,rand_param_state.normal, rand_param_state.normal, curr_param_state.normal }; for (int i = 0; i < kNumCosts; ++i) { costs[i] = 0.0f; } for (int sample = 0; sample < options.num_samples; ++sample) { const float rand_prob = hiprand_uniform(&rand_state) - FLT_EPSILON; pcc_computer.src_image_id = -1; for (int image_id = 0; image_id < cost_map.GetDepth(); ++image_id) { const float prob = sampling_probs[image_id]; if (prob > rand_prob) { pcc_computer.src_image_id = image_id; break; } } if (pcc_computer.src_image_id == -1) { continue; } costs[0] += cost_map.Get(row, col, pcc_computer.src_image_id); if (kGeomConsistencyTerm) { costs[0] += options.geom_consistency_regularizer * ComputeGeomConsistencyCost( row, col, depths[0], pcc_computer.src_image_id, options.geom_consistency_max_cost); } for (int i = 1; i < kNumCosts; ++i) { pcc_computer.depth = depths[i]; pcc_computer.normal = normals[i]; costs[i] += pcc_computer.Compute(); if (kGeomConsistencyTerm) { costs[i] += options.geom_consistency_regularizer * ComputeGeomConsistencyCost( row, col, depths[i], pcc_computer.src_image_id, options.geom_consistency_max_cost); } } } // Find the parameters of the minimum cost. const int min_cost_idx = FindMinCost<kNumCosts>(costs); const float best_depth = depths[min_cost_idx]; const float* best_normal = normals[min_cost_idx]; // Save best new parameters. depth_map.Set(row, col, best_depth); normal_map.SetSlice(row, col, best_normal); // Use the new cost to recompute the updated forward message and // the selection probability. pcc_computer.depth = best_depth; pcc_computer.normal = best_normal; for (int image_id = 0; image_id < cost_map.GetDepth(); ++image_id) { // Determine the cost for best depth. float cost; if (min_cost_idx == 0) { cost = cost_map.Get(row, col, image_id); } else { pcc_computer.src_image_id = image_id; cost = pcc_computer.Compute(); cost_map.Set(row, col, image_id, cost); } const float alpha = likelihood_computer.ComputeForwardMessage( cost, forward_message[image_id]); const float beta = sel_prob_map.Get(row, col, image_id); const float prev_prob = prev_sel_prob_map.Get(row, col, image_id); const float prob = likelihood_computer.ComputeSelProb( alpha, beta, prev_prob, options.prev_sel_prob_weight); forward_message[image_id] = alpha; sel_prob_map.Set(row, col, image_id, prob); } if (kFilterPhotoConsistency || kFilterGeomConsistency) { int num_consistent = 0; float best_point[3]; ComputePointAtDepth(row, col, best_depth, best_point); const float min_ncc_prob = likelihood_computer.ComputeNCCProb(1.0f - options.filter_min_ncc); const float cos_min_triangulation_angle = cos(options.filter_min_triangulation_angle); for (int image_id = 0; image_id < cost_map.GetDepth(); ++image_id) { float cos_triangulation_angle; float cos_incident_angle; ComputeViewingAngles(best_point, best_normal, image_id, &cos_triangulation_angle, &cos_incident_angle); if (cos_triangulation_angle > cos_min_triangulation_angle || cos_incident_angle <= 0.0f) { continue; } if (!kFilterGeomConsistency) { if (sel_prob_map.Get(row, col, image_id) >= min_ncc_prob) { consistency_mask.Set(row, col, image_id, 1); num_consistent += 1; } } else if (!kFilterPhotoConsistency) { if (ComputeGeomConsistencyCost(row, col, best_depth, image_id, options.geom_consistency_max_cost) <= options.filter_geom_consistency_max_cost) { consistency_mask.Set(row, col, image_id, 1); num_consistent += 1; } } else { if (sel_prob_map.Get(row, col, image_id) >= min_ncc_prob && ComputeGeomConsistencyCost(row, col, best_depth, image_id, options.geom_consistency_max_cost) <= options.filter_geom_consistency_max_cost) { consistency_mask.Set(row, col, image_id, 1); num_consistent += 1; } } } if (num_consistent < options.filter_min_num_consistent) { const float kFilterValue = 0.0f; depth_map.Set(row, col, kFilterValue); normal_map.Set(row, col, 0, kFilterValue); normal_map.Set(row, col, 1, kFilterValue); normal_map.Set(row, col, 2, kFilterValue); for (int image_id = 0; image_id < cost_map.GetDepth(); ++image_id) { consistency_mask.Set(row, col, image_id, 0); } } } // Update previous depth for next row. prev_param_state.depth = best_depth; for (int i = 0; i < 3; ++i) { prev_param_state.normal[i] = best_normal[i]; } } if (col < cost_map.GetWidth()) { rand_state_map.Set(0, col, rand_state); } } PatchMatchCuda::PatchMatchCuda(const PatchMatch::Options& options, const PatchMatch::Problem& problem) : options_(options), problem_(problem), ref_width_(0), ref_height_(0), rotation_in_half_pi_(0) { SetBestCudaDevice(options_.gpu_index); InitRefImage(); InitSourceImages(); InitTransforms(); InitWorkspaceMemory(); } PatchMatchCuda::~PatchMatchCuda() { for (size_t i = 0; i < 4; ++i) { poses_device_[i].reset(); } } void PatchMatchCuda::Run() { #define CALL_RUN_FUNC(window_radius) \ case window_radius: \ RunWithWindowSize<2 * window_radius + 1>(); \ break; switch (options_.window_radius) { CALL_RUN_FUNC(1) CALL_RUN_FUNC(2) CALL_RUN_FUNC(3) CALL_RUN_FUNC(4) CALL_RUN_FUNC(5) CALL_RUN_FUNC(6) CALL_RUN_FUNC(7) CALL_RUN_FUNC(8) CALL_RUN_FUNC(9) CALL_RUN_FUNC(10) CALL_RUN_FUNC(11) CALL_RUN_FUNC(12) CALL_RUN_FUNC(13) CALL_RUN_FUNC(14) CALL_RUN_FUNC(15) CALL_RUN_FUNC(16) CALL_RUN_FUNC(17) CALL_RUN_FUNC(18) CALL_RUN_FUNC(19) CALL_RUN_FUNC(20) default: { std::cerr << "Error: Window size not supported" << std::endl; break; } } #undef CALL_RUN_FUNC } DepthMap PatchMatchCuda::GetDepthMap() const { return DepthMap(depth_map_->CopyToMat(), options_.depth_min, options_.depth_max); } NormalMap PatchMatchCuda::GetNormalMap() const { return NormalMap(normal_map_->CopyToMat()); } Mat<float> PatchMatchCuda::GetSelProbMap() const { return prev_sel_prob_map_->CopyToMat(); } std::vector<int> PatchMatchCuda::GetConsistentImageIds() const { const Mat<uint8_t> mask = consistency_mask_->CopyToMat(); std::vector<int> consistent_image_ids; std::vector<int> pixel_consistent_image_ids; pixel_consistent_image_ids.reserve(mask.GetDepth()); for (size_t r = 0; r < mask.GetHeight(); ++r) { for (size_t c = 0; c < mask.GetWidth(); ++c) { pixel_consistent_image_ids.clear(); for (size_t d = 0; d < mask.GetDepth(); ++d) { if (mask.Get(r, c, d)) { pixel_consistent_image_ids.push_back(problem_.src_img_ids[d]); } } if (pixel_consistent_image_ids.size() > 0) { consistent_image_ids.push_back(c); consistent_image_ids.push_back(r); consistent_image_ids.push_back(pixel_consistent_image_ids.size()); consistent_image_ids.insert(consistent_image_ids.end(), pixel_consistent_image_ids.begin(), pixel_consistent_image_ids.end()); } } } return consistent_image_ids; } template <int kWindowSize> void PatchMatchCuda::RunWithWindowSize() { CudaTimer total_timer; CudaTimer init_timer; ComputeCudaConfig(); ComputeInitialCost<kWindowSize> << <sweep_grid_size_, sweep_block_size_ >> > ( *cost_map_, *depth_map_, *normal_map_, *ref_image_->sum_image, *ref_image_->squared_sum_image, options_.sigma_spatial, options_.sigma_color); CUDA_CHECK_ERROR(); init_timer.Print("Initialization"); const int num_iterations = options_.geom_consistency ? options_.num_geometric_iterations : options_.num_photometric_iteratoins; const float total_num_steps = num_iterations * 4; SweepOptions sweep_options; sweep_options.depth_min = options_.depth_min; sweep_options.depth_max = options_.depth_max; sweep_options.sigma_spatial = options_.sigma_spatial; sweep_options.sigma_color = options_.sigma_color; sweep_options.num_samples = options_.num_samples; sweep_options.ncc_sigma = options_.ncc_sigma; sweep_options.min_triangulation_angle = DEG2RAD(options_.min_triangulation_angle); sweep_options.incident_angle_sigma = options_.incident_angle_sigma; sweep_options.geom_consistency_regularizer = options_.geom_consistency_regularizer; sweep_options.geom_consistency_max_cost = options_.geom_consistency_max_cost; sweep_options.filter_min_ncc = options_.filter_min_ncc; sweep_options.filter_min_triangulation_angle = DEG2RAD(options_.filter_min_triangulation_angle); sweep_options.filter_min_num_consistent = options_.filter_min_num_consistent; sweep_options.filter_geom_consistency_max_cost = options_.filter_geom_consistency_max_cost; // map if (bOutPutMessage && problem_.ref_img_id == 0) //if (bOutPutMessage) { if (!options_.geom_consistency) { cv::imwrite("df_0_0.jpg", GetDepthMap().ToBitmap(2, 98)); cv::imwrite("nf_0_0.jpg", GetNormalMap().ToBitmap()); } else { cv::imwrite("dg_0_0.jpg", GetDepthMap().ToBitmap(2, 98)); cv::imwrite("ng_0_0.jpg", GetNormalMap().ToBitmap()); } } for (int iter = 0; iter < num_iterations; ++iter) { CudaTimer iter_timer; for (int sweep = 0; sweep < 4; ++sweep) { CudaTimer sweep_timer; sweep_options.prev_sel_prob_weight = static_cast<float>(iter * 4 + sweep) / total_num_steps; const bool last_sweep = iter == num_iterations - 1 && sweep == 3; #define CALL_SWEEP_FUNC \ hipLaunchKernelGGL(( SweepFromTopToBottom<kWindowSize, kGeomConsistencyTerm, \ kFilterPhotoConsistency, kFilterGeomConsistency>) \ , dim3(sweep_grid_size_), dim3(sweep_block_size_), 0, 0, \ *global_workspace_, *rand_state_map_, *cost_map_, *depth_map_, \ *normal_map_, *consistency_mask_, *sel_prob_map_, \ *prev_sel_prob_map_, *ref_image_->sum_image, \ *ref_image_->squared_sum_image, sweep_options); if (last_sweep) { if (options_.filter) { consistency_mask_.reset(new GpuMat<uint8_t>(cost_map_->GetWidth(), cost_map_->GetHeight(), cost_map_->GetDepth())); consistency_mask_->FillWithScalar(0); } if (options_.geom_consistency) { const bool kGeomConsistencyTerm = true; if (options_.filter) { const bool kFilterPhotoConsistency = true; const bool kFilterGeomConsistency = true; CALL_SWEEP_FUNC } else { const bool kFilterPhotoConsistency = false; const bool kFilterGeomConsistency = false; CALL_SWEEP_FUNC } } else { const bool kGeomConsistencyTerm = false; if (options_.filter) { const bool kFilterPhotoConsistency = true; const bool kFilterGeomConsistency = false; CALL_SWEEP_FUNC } else { const bool kFilterPhotoConsistency = false; const bool kFilterGeomConsistency = false; CALL_SWEEP_FUNC } } } else { const bool kFilterPhotoConsistency = false; const bool kFilterGeomConsistency = false; if (options_.geom_consistency) { const bool kGeomConsistencyTerm = true; CALL_SWEEP_FUNC } else { const bool kGeomConsistencyTerm = false; CALL_SWEEP_FUNC } } #undef CALL_SWEEP_FUNC CUDA_CHECK_ERROR(); Rotate(); // Rotate selected image map. if (last_sweep && options_.filter) { std::unique_ptr<GpuMat<uint8_t>> rot_consistency_mask_( new GpuMat<uint8_t>(cost_map_->GetWidth(), cost_map_->GetHeight(), cost_map_->GetDepth())); consistency_mask_->Rotate(rot_consistency_mask_.get()); consistency_mask_.swap(rot_consistency_mask_); } sweep_timer.Print(" Sweep " + std::to_string(sweep + 1)); // map if (bOutPutMessage && problem_.ref_img_id == 0) //if (bOutPutMessage) { cv::Mat depthmapImage, normalmapImage; depthmapImage = GetDepthMap().ToBitmap(2, 98); normalmapImage = GetNormalMap().ToBitmap(); if (sweep != 3) { for (int i = 0; i < sweep + 1; i++) { cv::transpose(depthmapImage, depthmapImage); cv::flip(depthmapImage, depthmapImage, 1);// cv::transpose(normalmapImage, normalmapImage); cv::flip(normalmapImage, normalmapImage, 1); } } char path_1[20]; char path_2[20]; if (!options_.geom_consistency) { sprintf_s(path_1, "df_%d_%d.jpg", iter + 1, sweep + 1); sprintf_s(path_2, "nf_%d_%d.jpg", iter + 1, sweep + 1); } else { sprintf_s(path_1, "dg_%d_%d.jpg", iter + 1, sweep + 1); sprintf_s(path_2, "ng_%d_%d.jpg", iter + 1, sweep + 1); } cv::imwrite(path_1, depthmapImage); cv::imwrite(path_2, normalmapImage); } } iter_timer.Print("Iteration " + std::to_string(iter + 1)); } total_timer.Print("Total"); } void PatchMatchCuda::ComputeCudaConfig() { sweep_block_size_.x = THREADS_PER_BLOCK; sweep_block_size_.y = 1; sweep_block_size_.z = 1; sweep_grid_size_.x = (depth_map_->GetWidth() - 1) / THREADS_PER_BLOCK + 1; sweep_grid_size_.y = 1; sweep_grid_size_.z = 1; elem_wise_block_size_.x = THREADS_PER_BLOCK; elem_wise_block_size_.y = THREADS_PER_BLOCK; elem_wise_block_size_.z = 1; elem_wise_grid_size_.x = (depth_map_->GetWidth() - 1) / THREADS_PER_BLOCK + 1; elem_wise_grid_size_.y = (depth_map_->GetHeight() - 1) / THREADS_PER_BLOCK + 1; elem_wise_grid_size_.z = 1; } void PatchMatchCuda::InitRefImage() { const Image& ref_image = problem_.images->at(problem_.ref_img_id); ref_width_ = ref_image.GetWidth(); ref_height_ = ref_image.GetHeight(); // Upload to device. ref_image_.reset(new GpuMatRefImage(ref_width_, ref_height_)); cv::Mat ref_image_mat = ref_image.GetBitmap(); std::vector<uint8_t> ref_image_array(ref_height_*ref_width_); ref_image_array.assign(ref_image_mat.datastart, ref_image_mat.dataend); ref_image_->Filter(ref_image_array.data(), options_.window_radius, options_.sigma_spatial, options_.sigma_color); ref_image_device_.reset( new CudaArrayWrapper<uint8_t>(ref_width_, ref_height_, 1)); ref_image_device_->CopyFromGpuMat(*ref_image_->image); //cuda //const hipChannelFormatDesc channelDesc = hipCreateChannelDesc<uint8_t>(); // Create texture. ref_image_texture.addressMode[0] = hipAddressModeBorder; ref_image_texture.addressMode[1] = hipAddressModeBorder; ref_image_texture.addressMode[2] = hipAddressModeBorder; ref_image_texture.filterMode = hipFilterModePoint; ref_image_texture.normalized = false; checkCudaErrors(hipBindTextureToArray(ref_image_texture, ref_image_device_->GetPtr())); } void PatchMatchCuda::InitSourceImages() { // Determine maximum image size. size_t max_width = 0; size_t max_height = 0; for (const auto image_id : problem_.src_img_ids) { const Image& image = problem_.images->at(image_id); if (image.GetWidth() > max_width) { max_width = image.GetWidth(); } if (image.GetHeight() > max_height) { max_height = image.GetHeight(); } } // Upload source images to device. { // Copy source images to contiguous memory block. const uint8_t kDefaultValue = 0; std::vector<uint8_t> src_images_host_data( static_cast<size_t>(max_width * max_height * problem_.src_img_ids.size()), kDefaultValue); for (size_t i = 0; i < problem_.src_img_ids.size(); ++i) { const Image& image = problem_.images->at(problem_.src_img_ids[i]); const cv::Mat &bitmap = image.GetBitmap(); uint8_t* dest = src_images_host_data.data() + max_width * max_height * i; for (size_t r = 0; r < image.GetHeight(); ++r) { memcpy(dest, bitmap.ptr<uchar>(r), image.GetWidth() * sizeof(uint8_t)); dest += max_width; } } // Upload to device. src_images_device_.reset(new CudaArrayWrapper<uint8_t>( max_width, max_height, problem_.src_img_ids.size())); src_images_device_->CopyToDevice(src_images_host_data.data()); //const hipChannelFormatDesc channelDesc = hipCreateChannelDesc<uint8_t>(); // Create source images texture. src_images_texture.addressMode[0] = hipAddressModeBorder; src_images_texture.addressMode[1] = hipAddressModeBorder; src_images_texture.addressMode[2] = hipAddressModeBorder; src_images_texture.filterMode = hipFilterModeLinear; src_images_texture.normalized = false; checkCudaErrors(hipBindTextureToArray(src_images_texture, src_images_device_->GetPtr())); } // Upload source depth maps to device. if (options_.geom_consistency) { const float kDefaultValue = 0.0f; std::vector<float> src_depth_maps_host_data( static_cast<size_t>(max_width * max_height * problem_.src_img_ids.size()), kDefaultValue); for (size_t i = 0; i < problem_.src_img_ids.size(); ++i) { const DepthMap& depth_map = problem_.depth_maps->at(problem_.src_img_ids[i]); float* dest = src_depth_maps_host_data.data() + max_width * max_height * i; for (size_t r = 0; r < depth_map.GetHeight(); ++r) { memcpy(dest, depth_map.GetPtr() + r * depth_map.GetWidth(), depth_map.GetWidth() * sizeof(float)); dest += max_width; } } src_depth_maps_device_.reset(new CudaArrayWrapper<float>( max_width, max_height, problem_.src_img_ids.size())); src_depth_maps_device_->CopyToDevice(src_depth_maps_host_data.data()); //const hipChannelFormatDesc channelDesc1 = hipCreateChannelDesc<float>(); // Create source depth maps texture. src_depth_maps_texture.addressMode[0] = hipAddressModeBorder; src_depth_maps_texture.addressMode[1] = hipAddressModeBorder; src_depth_maps_texture.addressMode[2] = hipAddressModeBorder; // TODO: Check if linear interpolation improves results or not. src_depth_maps_texture.filterMode = hipFilterModePoint; src_depth_maps_texture.normalized = false; checkCudaErrors(hipBindTextureToArray(src_depth_maps_texture, src_depth_maps_device_->GetPtr())); } } void PatchMatchCuda::InitTransforms() { const Image& ref_image = problem_.images->at(problem_.ref_img_id); ////////////////////////////////////////////////////////////////////////////// // Generate rotated versions (counter-clockwise) of calibration matrix. ////////////////////////////////////////////////////////////////////////////// for (size_t i = 0; i < 4; ++i) { ref_K_host_[i][0] = ref_image.GetK()[0]; ref_K_host_[i][1] = ref_image.GetK()[2]; ref_K_host_[i][2] = ref_image.GetK()[4]; ref_K_host_[i][3] = ref_image.GetK()[5]; } // Rotated by 90 degrees. std::swap(ref_K_host_[1][0], ref_K_host_[1][2]); std::swap(ref_K_host_[1][1], ref_K_host_[1][3]); ref_K_host_[1][3] = ref_width_ - 1 - ref_K_host_[1][3]; // Rotated by 180 degrees. ref_K_host_[2][1] = ref_width_ - 1 - ref_K_host_[2][1]; ref_K_host_[2][3] = ref_height_ - 1 - ref_K_host_[2][3]; // Rotated by 270 degrees. std::swap(ref_K_host_[3][0], ref_K_host_[3][2]); std::swap(ref_K_host_[3][1], ref_K_host_[3][3]); ref_K_host_[3][1] = ref_height_ - 1 - ref_K_host_[3][1]; // Extract 1/fx, -cx/fx, fy, -cy/fy. for (size_t i = 0; i < 4; ++i) { ref_inv_K_host_[i][0] = 1.0f / ref_K_host_[i][0]; ref_inv_K_host_[i][1] = -ref_K_host_[i][1] / ref_K_host_[i][0]; ref_inv_K_host_[i][2] = 1.0f / ref_K_host_[i][2]; ref_inv_K_host_[i][3] = -ref_K_host_[i][3] / ref_K_host_[i][2]; } // Bind 0 degrees version to constant global memory. checkCudaErrors(hipMemcpyToSymbol(ref_K, ref_K_host_[0], sizeof(float) * 4, 0, hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpyToSymbol(ref_inv_K, ref_inv_K_host_[0], sizeof(float) * 4, 0, hipMemcpyHostToDevice)); ////////////////////////////////////////////////////////////////////////////// // Generate rotated versions of camera poses. ////////////////////////////////////////////////////////////////////////////// float rotated_R[9]; memcpy(rotated_R, ref_image.GetR(), 9 * sizeof(float)); float rotated_T[3]; memcpy(rotated_T, ref_image.GetT(), 3 * sizeof(float)); // Matrix for 90deg rotation around Z-axis in counter-clockwise direction. const float R_z90[9] = { 0, 1, 0, -1, 0, 0, 0, 0, 1 }; for (size_t i = 0; i < 4; ++i) { const size_t kNumTformParams = 4 + 9 + 3 + 3 + 12 + 12; std::vector<float> poses_host_data(kNumTformParams * problem_.src_img_ids.size()); int offset = 0; for (const auto image_id : problem_.src_img_ids) { const Image& image = problem_.images->at(image_id); const float K[4] = { image.GetK()[0], image.GetK()[2], image.GetK()[4], image.GetK()[5] }; memcpy(poses_host_data.data() + offset, K, 4 * sizeof(float)); offset += 4; float rel_R[9]; float rel_T[3]; ComputeRelativePose(rotated_R, rotated_T, image.GetR(), image.GetT(), rel_R, rel_T); memcpy(poses_host_data.data() + offset, rel_R, 9 * sizeof(float)); offset += 9; memcpy(poses_host_data.data() + offset, rel_T, 3 * sizeof(float)); offset += 3; float C[3]; ComputeProjectionCenter(rel_R, rel_T, C); memcpy(poses_host_data.data() + offset, C, 3 * sizeof(float)); offset += 3; float P[12]; ComposeProjectionMatrix(image.GetK(), rel_R, rel_T, P); memcpy(poses_host_data.data() + offset, P, 12 * sizeof(float)); offset += 12; float inv_P[12]; ComposeInverseProjectionMatrix(image.GetK(), rel_R, rel_T, inv_P); memcpy(poses_host_data.data() + offset, inv_P, 12 * sizeof(float)); offset += 12; } poses_device_[i].reset(new CudaArrayWrapper<float>( kNumTformParams, problem_.src_img_ids.size(), 1)); poses_device_[i]->CopyToDevice(poses_host_data.data()); RotatePose(R_z90, rotated_R, rotated_T); } //const hipChannelFormatDesc channelDesc = hipCreateChannelDesc<float>(); poses_texture.addressMode[0] = hipAddressModeBorder; poses_texture.addressMode[1] = hipAddressModeBorder; poses_texture.addressMode[2] = hipAddressModeBorder; poses_texture.filterMode = hipFilterModePoint; poses_texture.normalized = false; checkCudaErrors( hipBindTextureToArray(poses_texture, poses_device_[0]->GetPtr())); } __global__ void InitSparse(GpuMat<float> depth_map, GpuMat<float> normal_map, GpuMat<float> prev_sel_prob_map, float *sparsePoints, float *sparseNormals, int *tracks, const size_t num, const size_t trackNum) { for (int i = 0, j = 0; i < num; i = i + 3, j++) { const float sp[3] = { sparsePoints[i], sparsePoints[i + 1], sparsePoints[i + 2] }; const float sn[3] = { sparseNormals[i], sparseNormals[i + 1], sparseNormals[i + 2] }; depth_map.Set(sp[0], sp[1], sp[2]); normal_map.SetSlice(sp[0], sp[1], sn); for (int k = j + 1; tracks[k] != -1; k++)//-1-1-1-1 { prev_sel_prob_map.Set(sp[0], sp[1], tracks[k], 1.0f); } } } void PatchMatchCuda::InitWorkspaceMemory() { rand_state_map_.reset(new GpuMatPRNG(ref_width_, ref_height_)); //map depth_map_.reset(new GpuMat<float>(ref_width_, ref_height_)); if (options_.geom_consistency) { const DepthMap& init_depth_map = problem_.depth_maps->at(problem_.ref_img_id); depth_map_->CopyToDevice(init_depth_map.GetPtr(), init_depth_map.GetWidth() * sizeof(float)); } else { depth_map_->FillWithRandomNumbers(options_.depth_min, options_.depth_max, *rand_state_map_); } normal_map_.reset(new GpuMat<float>(ref_width_, ref_height_, 3)); // Note that it is not necessary to keep the selection probability map in // memory for all pixels. Theoretically, it is possible to incorporate // the temporary selection probabilities in the global_workspace_. // However, it is useful to keep the probabilities for the entire image // in memory, so that it can be exported. sel_prob_map_.reset(new GpuMat<float>(ref_width_, ref_height_, problem_.src_img_ids.size())); prev_sel_prob_map_.reset(new GpuMat<float>(ref_width_, ref_height_, problem_.src_img_ids.size())); prev_sel_prob_map_->FillWithScalar(0.5f); cost_map_.reset(new GpuMat<float>(ref_width_, ref_height_, problem_.src_img_ids.size())); const int ref_max_dim = ::max(ref_width_, ref_height_); global_workspace_.reset( new GpuMat<float>(ref_max_dim, problem_.src_img_ids.size(), 2)); consistency_mask_.reset(new GpuMat<uint8_t>(0, 0, 0)); ComputeCudaConfig(); // if (options_.geom_consistency) { const NormalMap& init_normal_map = problem_.normal_maps->at(problem_.ref_img_id); normal_map_->CopyToDevice(init_normal_map.GetPtr(), init_normal_map.GetWidth() * sizeof(float)); } else { InitNormalMap << <elem_wise_grid_size_, elem_wise_block_size_ >> > ( *normal_map_, *rand_state_map_); } if (!options_.geom_consistency && options_.bUse_sparse_points) { const size_t size = problem_.sparsePoints.size(); const size_t trackNum = problem_.tracks.size(); float *sparsePoints, *sparseNormals; int *tracks; checkCudaErrors(hipMalloc((void **)&sparsePoints, size * sizeof(float))); checkCudaErrors(hipMemcpy(sparsePoints, &problem_.sparsePoints[0], size * sizeof(float), hipMemcpyHostToDevice)); checkCudaErrors(hipMalloc((void **)&sparseNormals, size * sizeof(float))); checkCudaErrors(hipMemcpy(sparseNormals, &problem_.sparseNormals[0], size * sizeof(float), hipMemcpyHostToDevice)); checkCudaErrors(hipMalloc((void **)&tracks, trackNum * sizeof(int))); checkCudaErrors(hipMemcpy(tracks, &problem_.tracks[0], trackNum * sizeof(int), hipMemcpyHostToDevice)); dim3 block(1, 1, 1); InitSparse << <1, block >> > (*depth_map_, *normal_map_, *prev_sel_prob_map_, sparsePoints, sparseNormals, tracks, size, trackNum); //CUDA_CHECK_ERROR(); checkCudaErrors(hipFree(sparsePoints)); checkCudaErrors(hipFree(sparseNormals)); checkCudaErrors(hipFree(tracks)); } } void PatchMatchCuda::Rotate() { rotation_in_half_pi_ = (rotation_in_half_pi_ + 1) % 4; size_t width; size_t height; if (rotation_in_half_pi_ % 2 == 0) { width = ref_width_; height = ref_height_; } else { width = ref_height_; height = ref_width_; } // Rotate random map. { std::unique_ptr<GpuMatPRNG> rotated_rand_state_map( new GpuMatPRNG(width, height)); rand_state_map_->Rotate(rotated_rand_state_map.get()); rand_state_map_.swap(rotated_rand_state_map); } // Rotate depth map. { std::unique_ptr<GpuMat<float>> rotated_depth_map( new GpuMat<float>(width, height)); depth_map_->Rotate(rotated_depth_map.get()); depth_map_.swap(rotated_depth_map); } // Rotate normal map. { RotateNormalMap << <elem_wise_grid_size_, elem_wise_block_size_ >> > ( *normal_map_); std::unique_ptr<GpuMat<float>> rotated_normal_map( new GpuMat<float>(width, height, 3)); normal_map_->Rotate(rotated_normal_map.get()); normal_map_.swap(rotated_normal_map); } // Rotate reference image. { std::unique_ptr<GpuMatRefImage> rotated_ref_image( new GpuMatRefImage(width, height)); ref_image_->image->Rotate(rotated_ref_image->image.get()); ref_image_->sum_image->Rotate(rotated_ref_image->sum_image.get()); ref_image_->squared_sum_image->Rotate( rotated_ref_image->squared_sum_image.get()); ref_image_.swap(rotated_ref_image); } // Bind rotated reference image to texture. ref_image_device_.reset(new CudaArrayWrapper<uint8_t>(width, height, 1)); ref_image_device_->CopyFromGpuMat(*ref_image_->image); //const hipChannelFormatDesc channelDesc = hipCreateChannelDesc<uint8_t>(); checkCudaErrors(hipUnbindTexture(ref_image_texture)); checkCudaErrors( hipBindTextureToArray(ref_image_texture, ref_image_device_->GetPtr())); // Rotate selection probability map. prev_sel_prob_map_.reset( new GpuMat<float>(width, height, problem_.src_img_ids.size())); sel_prob_map_->Rotate(prev_sel_prob_map_.get()); sel_prob_map_.reset( new GpuMat<float>(width, height, problem_.src_img_ids.size())); // Rotate cost map. { std::unique_ptr<GpuMat<float>> rotated_cost_map( new GpuMat<float>(width, height, problem_.src_img_ids.size())); cost_map_->Rotate(rotated_cost_map.get()); cost_map_.swap(rotated_cost_map); } // Rotate transformations. const hipChannelFormatDesc channelDesc1 = hipCreateChannelDesc<float>(); checkCudaErrors(hipUnbindTexture(poses_texture)); checkCudaErrors(hipBindTextureToArray( poses_texture, poses_device_[rotation_in_half_pi_]->GetPtr())); // Rotate calibration. checkCudaErrors(hipMemcpyToSymbol(ref_K, ref_K_host_[rotation_in_half_pi_], sizeof(float) * 4, 0, hipMemcpyHostToDevice)); checkCudaErrors( hipMemcpyToSymbol(ref_inv_K, ref_inv_K_host_[rotation_in_half_pi_], sizeof(float) * 4, 0, hipMemcpyHostToDevice)); // Recompute Cuda configuration for rotated reference image. ComputeCudaConfig(); } } // namespace mvs } // namespace colmap
191072c90abd5baa927214ccef0f6567822c0442.cu
#define _USE_MATH_DEFINES #include "patch_match_cuda.h" #include <algorithm> #include <cfloat> #include <cmath> #include <cstdint> #include <sstream> #include "cuda.h" #include "cudacc.h" //调试的时候,输出信息以供参考 bool bOutPutMessage = true; #include <opencv2\core.hpp> #include <opencv2\imgproc.hpp> // The number of threads per Cuda thread. Warning: Do not change this value, // since the templated window sizes rely on this value. #define THREADS_PER_BLOCK 16 // We must not include "util/math.h" to avoid any Eigen includes here, // since Visual Studio cannot compile some of the Eigen/Boost expressions. #ifndef DEG2RAD #define DEG2RAD(deg) deg * 0.0174532925199432 #endif namespace colmap { namespace mvs { //cudaReadModeNormalizedFloat进行类型转化,用tex2D取到的值是归一化的【0,1】 //cudaReadModeElementType不改变返回值的类型 //纹理参照系,P63 texture<uint8_t, cudaTextureType2D, cudaReadModeNormalizedFloat> ref_image_texture; //二维层次纹理,线性插值采样 texture<uint8_t, cudaTextureType2DLayered, cudaReadModeNormalizedFloat> src_images_texture; texture<float, cudaTextureType2DLayered, cudaReadModeElementType> src_depth_maps_texture; texture<float, cudaTextureType2D, cudaReadModeElementType> poses_texture; // Calibration of reference image as {fx, cx, fy, cy}. __constant__ float ref_K[4]; // Calibration of reference image as {1/fx, -cx/fx, 1/fy, -cy/fy}. __constant__ float ref_inv_K[4]; __device__ inline void Mat33DotVec3(const float mat[9], const float vec[3], float result[3]) { result[0] = mat[0] * vec[0] + mat[1] * vec[1] + mat[2] * vec[2]; result[1] = mat[3] * vec[0] + mat[4] * vec[1] + mat[5] * vec[2]; result[2] = mat[6] * vec[0] + mat[7] * vec[1] + mat[8] * vec[2]; } __device__ inline void Mat33DotVec3Homogeneous(const float mat[9], const float vec[2], float result[2]) { const float inv_z = 1.0f / (mat[6] * vec[0] + mat[7] * vec[1] + mat[8]); result[0] = inv_z * (mat[0] * vec[0] + mat[1] * vec[1] + mat[2]); result[1] = inv_z * (mat[3] * vec[0] + mat[4] * vec[1] + mat[5]); } __device__ inline float DotProduct3(const float vec1[3], const float vec2[3]) { return vec1[0] * vec2[0] + vec1[1] * vec2[1] + vec1[2] * vec2[2]; } __device__ inline float GenerateRandomDepth(const float depth_min, const float depth_max, curandState* rand_state) { return curand_uniform(rand_state) * (depth_max - depth_min) + depth_min; } __device__ inline void GenerateRandomNormal(const int row, const int col, curandState* rand_state, float normal[3]) { // Unbiased sampling of normal, according to George Marsaglia, "Choosing a // Point from the Surface of a Sphere", 1972. float v1 = 0.0f; float v2 = 0.0f; float s = 2.0f; while (s >= 1.0f) { v1 = 2.0f * curand_uniform(rand_state) - 1.0f; v2 = 2.0f * curand_uniform(rand_state) - 1.0f; s = v1 * v1 + v2 * v2; } const float s_norm = sqrt(1.0f - s); normal[0] = 2.0f * v1 * s_norm; normal[1] = 2.0f * v2 * s_norm; normal[2] = 1.0f - 2.0f * s; // Make sure normal is looking away from camera. const float view_ray[3] = { ref_inv_K[0] * col + ref_inv_K[1], ref_inv_K[2] * row + ref_inv_K[3], 1.0f }; if (DotProduct3(normal, view_ray) > 0) { normal[0] = -normal[0]; normal[1] = -normal[1]; normal[2] = -normal[2]; } } __device__ inline float PerturbDepth(const float perturbation, const float depth, curandState* rand_state) { const float depth_min = (1.0f - perturbation) * depth; const float depth_max = (1.0f + perturbation) * depth; return GenerateRandomDepth(depth_min, depth_max, rand_state); } __device__ inline void PerturbNormal(const int row, const int col, const float perturbation, const float normal[3], curandState* rand_state, float perturbed_normal[3], const int num_trials = 0) { // Perturbation rotation angles. const float a1 = (curand_uniform(rand_state) - 0.5f) * perturbation; const float a2 = (curand_uniform(rand_state) - 0.5f) * perturbation; const float a3 = (curand_uniform(rand_state) - 0.5f) * perturbation; const float sin_a1 = sin(a1); const float sin_a2 = sin(a2); const float sin_a3 = sin(a3); const float cos_a1 = cos(a1); const float cos_a2 = cos(a2); const float cos_a3 = cos(a3); // R = Rx * Ry * Rz float R[9]; R[0] = cos_a2 * cos_a3; R[1] = -cos_a2 * sin_a3; R[2] = sin_a2; R[3] = cos_a1 * sin_a3 + cos_a3 * sin_a1 * sin_a2; R[4] = cos_a1 * cos_a3 - sin_a1 * sin_a2 * sin_a3; R[5] = -cos_a2 * sin_a1; R[6] = sin_a1 * sin_a3 - cos_a1 * cos_a3 * sin_a2; R[7] = cos_a3 * sin_a1 + cos_a1 * sin_a2 * sin_a3; R[8] = cos_a1 * cos_a2; // Perturb the normal vector. Mat33DotVec3(R, normal, perturbed_normal); // Make sure the perturbed normal is still looking in the same direction as // the viewing direction. const float view_ray[3] = { ref_inv_K[0] * col + ref_inv_K[1], ref_inv_K[2] * row + ref_inv_K[3], 1.0f }; if (DotProduct3(perturbed_normal, view_ray) >= 0.0f) { const int kMaxNumTrials = 3; if (num_trials < kMaxNumTrials) { PerturbNormal(row, col, 0.5f*perturbation, normal, rand_state, perturbed_normal, num_trials + 1); return; } else { perturbed_normal[0] = normal[0]; perturbed_normal[1] = normal[1]; perturbed_normal[2] = normal[2]; return; } } // Make sure normal has unit norm. const float inv_norm = rsqrt(DotProduct3(perturbed_normal, perturbed_normal)); perturbed_normal[0] *= inv_norm; perturbed_normal[1] *= inv_norm; perturbed_normal[2] *= inv_norm; } __device__ inline void ComputePointAtDepth(const float row, const float col, const float depth, float point[3]) { point[0] = depth * (ref_inv_K[0] * col + ref_inv_K[1]); point[1] = depth * (ref_inv_K[2] * row + ref_inv_K[3]); point[2] = depth; } // Transfer depth on plane from viewing ray at row1 to row2. The returned // depth is the intersection of the viewing ray through row2 with the plane // at row1 defined by the given depth and normal. __device__ inline float PropagateDepth(const float depth1, const float normal1[3], const float row1, const float row2) { // Point along first viewing ray. const float x1 = depth1 * (ref_inv_K[2] * row1 + ref_inv_K[3]); const float y1 = depth1; // Point on plane defined by point along first viewing ray and plane normal1. const float x2 = x1 + normal1[2]; const float y2 = y1 - normal1[1]; // Origin of second viewing ray. // const float x3 = 0.0f; // const float y3 = 0.0f; // Point on second viewing ray. const float x4 = ref_inv_K[2] * row2 + ref_inv_K[3]; // const float y4 = 1.0f; // Intersection of the lines ((x1, y1), (x2, y2)) and ((x3, y3), (x4, y4)). const float denom = x2 - x1 + x4 * (y1 - y2); const float kEps = 1e-5f; if (abs(denom) < kEps) { return depth1; } const float nom = y1 * x2 - x1 * y2; return nom / denom; } // First, compute triangulation angle between reference and source image for 3D // point. Second, compute incident angle between viewing direction of source // image and normal direction of 3D point. Both angles are cosine distances. __device__ inline void ComputeViewingAngles(const float point[3], const float normal[3], const int image_id, float* cos_triangulation_angle, float* cos_incident_angle) { *cos_triangulation_angle = 0.0f; *cos_incident_angle = 0.0f; // Projection center of source image. float C[3]; for (int i = 0; i < 3; ++i) { C[i] = tex2D(poses_texture, i + 16, image_id); } // Ray from point to camera. const float SX[3] = { C[0] - point[0], C[1] - point[1], C[2] - point[2] }; // Length of ray from reference image to point. const float RX_inv_norm = rsqrt(DotProduct3(point, point)); // Length of ray from source image to point. const float SX_inv_norm = rsqrt(DotProduct3(SX, SX)); *cos_incident_angle = DotProduct3(SX, normal) * SX_inv_norm; *cos_triangulation_angle = DotProduct3(SX, point) * RX_inv_norm * SX_inv_norm; } __device__ inline void ComposeHomography(const int image_id, const int row, const int col, const float depth, const float normal[3], float H[9]) { // Calibration of source image. float K[4]; for (int i = 0; i < 4; ++i) { K[i] = tex2D(poses_texture, i, image_id); } // Relative rotation between reference and source image. float R[9]; for (int i = 0; i < 9; ++i) { R[i] = tex2D(poses_texture, i + 4, image_id); } // Relative translation between reference and source image. float T[3]; for (int i = 0; i < 3; ++i) { T[i] = tex2D(poses_texture, i + 13, image_id); } // Distance to the plane. const float dist = depth * (normal[0] * (ref_inv_K[0] * col + ref_inv_K[1]) + normal[1] * (ref_inv_K[2] * row + ref_inv_K[3]) + normal[2]); const float inv_dist = 1.0f / dist; const float inv_dist_N0 = inv_dist * normal[0]; const float inv_dist_N1 = inv_dist * normal[1]; const float inv_dist_N2 = inv_dist * normal[2]; // Homography as H = K * (R - T * n' / d) * Kref^-1. H[0] = ref_inv_K[0] * (K[0] * (R[0] + inv_dist_N0 * T[0]) + K[1] * (R[6] + inv_dist_N0 * T[2])); H[1] = ref_inv_K[2] * (K[0] * (R[1] + inv_dist_N1 * T[0]) + K[1] * (R[7] + inv_dist_N1 * T[2])); H[2] = K[0] * (R[2] + inv_dist_N2 * T[0]) + K[1] * (R[8] + inv_dist_N2 * T[2]) + ref_inv_K[1] * (K[0] * (R[0] + inv_dist_N0 * T[0]) + K[1] * (R[6] + inv_dist_N0 * T[2])) + ref_inv_K[3] * (K[0] * (R[1] + inv_dist_N1 * T[0]) + K[1] * (R[7] + inv_dist_N1 * T[2])); H[3] = ref_inv_K[0] * (K[2] * (R[3] + inv_dist_N0 * T[1]) + K[3] * (R[6] + inv_dist_N0 * T[2])); H[4] = ref_inv_K[2] * (K[2] * (R[4] + inv_dist_N1 * T[1]) + K[3] * (R[7] + inv_dist_N1 * T[2])); H[5] = K[2] * (R[5] + inv_dist_N2 * T[1]) + K[3] * (R[8] + inv_dist_N2 * T[2]) + ref_inv_K[1] * (K[2] * (R[3] + inv_dist_N0 * T[1]) + K[3] * (R[6] + inv_dist_N0 * T[2])) + ref_inv_K[3] * (K[2] * (R[4] + inv_dist_N1 * T[1]) + K[3] * (R[7] + inv_dist_N1 * T[2])); H[6] = ref_inv_K[0] * (R[6] + inv_dist_N0 * T[2]); H[7] = ref_inv_K[2] * (R[7] + inv_dist_N1 * T[2]); H[8] = R[8] + ref_inv_K[1] * (R[6] + inv_dist_N0 * T[2]) + ref_inv_K[3] * (R[7] + inv_dist_N1 * T[2]) + inv_dist_N2 * T[2]; } // The return values is 1 - NCC, so the range is [0, 2], the smaller the // value, the better the color consistency. template <int kWindowSize> struct PhotoConsistencyCostComputer { // Image data in local window around patch. const float* local_ref_image = nullptr; // Precomputed sum of raw and squared image intensities. float local_ref_sum = 0.0f; float local_ref_squared_sum = 0.0f; // Identifier of source image. int src_image_id = -1; // Center position of patch in reference image. int row = -1; int col = -1; // Parameters for bilateral weighting. float sigma_spatial = 3.0f; float sigma_color = 0.3f; // Depth and normal for which to warp patch. float depth = 0.0f; const float* normal = nullptr; // Dimensions of reference image. int ref_image_width = 0; int ref_image_height = 0; __device__ inline float Compute() const { const float kMaxCost = 2.0f; const int kWindowRadius = kWindowSize / 2; const int thread_id = threadIdx.x; const int row_start = row - kWindowRadius; const int col_start = col - kWindowRadius; const int row_end = row + kWindowRadius; const int col_end = col + kWindowRadius; if (row_start < 0 || col_start < 0 || row_end >= ref_image_height || col_end >= ref_image_width) { return kMaxCost; } float tform[9]; ComposeHomography(src_image_id, row, col, depth, normal, tform); float col_src = tform[0] * col_start + tform[1] * row_start + tform[2]; float row_src = tform[3] * col_start + tform[4] * row_start + tform[5]; float z = tform[6] * col_start + tform[7] * row_start + tform[8]; float base_col_src = col_src; float base_row_src = row_src; float base_z = z; int ref_image_idx = THREADS_PER_BLOCK - kWindowRadius + thread_id; int ref_image_base_idx = ref_image_idx; const float center_ref = local_ref_image[ref_image_idx + kWindowRadius * 3 * THREADS_PER_BLOCK + kWindowRadius]; const float sum_ref = local_ref_sum; const float sum_ref_ref = local_ref_squared_sum; float sum_src = 0.0f; float sum_src_src = 0.0f; float sum_ref_src = 0.0f; float bilateral_weight_sum = 0.0f; for (int row = 0; row < kWindowSize; row++) { // Accumulate values per row to reduce numerical errors. float sum_src_row = 0.0f; float sum_src_src_row = 0.0f; float sum_ref_src_row = 0.0f; float bilateral_weight_sum_row = 0.0f; for (int col = 0; col < kWindowSize; col++) { const float inv_z = 1.0f / z; const float norm_col_src = inv_z * col_src + 0.5f; const float norm_row_src = inv_z * row_src + 0.5f; const float ref = local_ref_image[ref_image_idx]; const float src = tex2DLayered(src_images_texture, norm_col_src, norm_row_src, src_image_id); const float bilateral_weight = ComputeBilateralWeight(kWindowRadius, kWindowRadius, row, col, center_ref, ref, sigma_spatial, sigma_color); sum_src_row += bilateral_weight * src; sum_src_src_row += bilateral_weight * src * src; sum_ref_src_row += bilateral_weight * ref * src; bilateral_weight_sum_row += bilateral_weight; ref_image_idx += 1; col_src += tform[0]; row_src += tform[3]; z += tform[6]; } sum_src += sum_src_row; sum_src_src += sum_src_src_row; sum_ref_src += sum_ref_src_row; bilateral_weight_sum += bilateral_weight_sum_row; ref_image_base_idx += 3 * THREADS_PER_BLOCK; ref_image_idx = ref_image_base_idx; base_col_src += tform[1]; base_row_src += tform[4]; base_z += tform[7]; col_src = base_col_src; row_src = base_row_src; z = base_z; } const float inv_bilateral_weight_sum = 1.0f / bilateral_weight_sum; sum_src *= inv_bilateral_weight_sum; sum_src_src *= inv_bilateral_weight_sum; sum_ref_src *= inv_bilateral_weight_sum; const float var_ref = sum_ref_ref - sum_ref * sum_ref; const float var_src = sum_src_src - sum_src * sum_src; // Based on Jensen's Inequality for convex functions, the variance // should always be larger than 0. Do not make this threshold smaller. const float kMinVar = 1e-5f; if (var_ref < kMinVar || var_src < kMinVar) { return kMaxCost; } else { const float covar_src_ref = sum_ref_src - sum_ref * sum_src; const float var_ref_src = sqrt(var_ref * var_src); return max(0.0f, min(kMaxCost, 1.0f - covar_src_ref / var_ref_src)); } } }; __device__ inline float ComputeGeomConsistencyCost(const float row, const float col, const float depth, const int image_id, const float max_cost) { // Extract projection matrices for source image. float P[12]; for (int i = 0; i < 12; ++i) { P[i] = tex2D(poses_texture, i + 19, image_id); } float inv_P[12]; for (int i = 0; i < 12; ++i) { inv_P[i] = tex2D(poses_texture, i + 31, image_id); } // Project point in reference image to world. float forward_point[3]; ComputePointAtDepth(row, col, depth, forward_point); // Project world point to source image. const float inv_forward_z = 1.0f / (P[8] * forward_point[0] + P[9] * forward_point[1] + P[10] * forward_point[2] + P[11]); float src_col = inv_forward_z * (P[0] * forward_point[0] + P[1] * forward_point[1] + P[2] * forward_point[2] + P[3]); float src_row = inv_forward_z * (P[4] * forward_point[0] + P[5] * forward_point[1] + P[6] * forward_point[2] + P[7]); // Extract depth in source image. const float src_depth = tex2DLayered(src_depth_maps_texture, src_col + 0.5f, src_row + 0.5f, image_id); // Projection outside of source image. if (src_depth == 0.0f) { return max_cost; } // Project point in source image to world. src_col *= src_depth; src_row *= src_depth; const float backward_point_x = inv_P[0] * src_col + inv_P[1] * src_row + inv_P[2] * src_depth + inv_P[3]; const float backward_point_y = inv_P[4] * src_col + inv_P[5] * src_row + inv_P[6] * src_depth + inv_P[7]; const float backward_point_z = inv_P[8] * src_col + inv_P[9] * src_row + inv_P[10] * src_depth + inv_P[11]; const float inv_backward_point_z = 1.0f / backward_point_z; // Project world point back to reference image. const float backward_col = inv_backward_point_z * (ref_K[0] * backward_point_x + ref_K[1] * backward_point_z); const float backward_row = inv_backward_point_z * (ref_K[2] * backward_point_y + ref_K[3] * backward_point_z); // Return truncated reprojection error between original observation and // the forward-backward projected observation. const float diff_col = col - backward_col; const float diff_row = row - backward_row; return min(max_cost, sqrt(diff_col * diff_col + diff_row * diff_row)); } // Find index of minimum in given values. template <int kNumCosts> __device__ inline int FindMinCost(const float costs[kNumCosts]) { float min_cost = costs[0]; int min_cost_idx = 0; for (int idx = 1; idx < kNumCosts; ++idx) { if (costs[idx] <= min_cost) { min_cost = costs[idx]; min_cost_idx = idx; } } return min_cost_idx; } template <int kWindowSize> __device__ inline void ReadRefImageIntoSharedMemory(float* local_image, const int row, const int col, const int thread_id) { // For the first row, read the entire block into shared memory. For all // consecutive rows, it is only necessary to shift the rows in shared memory // up by one element and then read in a new row at the bottom of the shared // memory. Note that this assumes that the calling loop starts with the first // row and then consecutively reads in a new row. if (row == 0) { int r = row - kWindowSize / 2; for (int i = 0; i < kWindowSize; ++i) { int c = col - THREADS_PER_BLOCK; #pragma unroll//可以将local_iamge想象成二维的,每一行大小为3*ThreadPB,有kWindowSize列 //每个线程读一行中的三个值,读取窗口大小的高度 for (int j = 0; j < 3; ++j) { local_image[thread_id + i * 3 * THREADS_PER_BLOCK + j * THREADS_PER_BLOCK] = tex2D(ref_image_texture, c, r); c += THREADS_PER_BLOCK; } r += 1; } } else { // Move rows in shared memory up by one row. for (int i = 1; i < kWindowSize; ++i) { #pragma unroll for (int j = 0; j < 3; ++j) { local_image[thread_id + (i - 1) * 3 * THREADS_PER_BLOCK + j * THREADS_PER_BLOCK] = local_image[thread_id + i * 3 * THREADS_PER_BLOCK + j * THREADS_PER_BLOCK]; } } // Read next row into the last row of shared memory. const int r = row + kWindowSize / 2; int c = col - THREADS_PER_BLOCK; const int i = kWindowSize - 1; #pragma unroll for (int j = 0; j < 3; ++j) { local_image[thread_id + i * 3 * THREADS_PER_BLOCK + j * THREADS_PER_BLOCK] = tex2D(ref_image_texture, c, r); c += THREADS_PER_BLOCK; } } __syncthreads(); } __device__ inline void TransformPDFToCDF(float* probs, const int num_probs) { float prob_sum = 0.0f; for (int i = 0; i < num_probs; ++i) { prob_sum += probs[i]; } const float inv_prob_sum = 1.0f / prob_sum; float cum_prob = 0.0f; for (int i = 0; i < num_probs; ++i) { const float prob = probs[i] * inv_prob_sum; cum_prob += prob; probs[i] = cum_prob; } } class LikelihoodComputer { public: __device__ LikelihoodComputer(const float ncc_sigma, const float min_triangulation_angle, const float incident_angle_sigma) : cos_min_triangulation_angle_(cos(min_triangulation_angle)), inv_incident_angle_sigma_square_( -0.5f / (incident_angle_sigma * incident_angle_sigma)), inv_ncc_sigma_square_(-0.5f / (ncc_sigma * ncc_sigma)), ncc_norm_factor_(ComputeNCCCostNormFactor(ncc_sigma)) {} // Compute forward message from current cost and forward message of // previous / neighboring pixel. __device__ float ComputeForwardMessage(const float cost, const float prev) const { return ComputeMessage<true>(cost, prev); } // Compute backward message from current cost and backward message of // previous / neighboring pixel. __device__ float ComputeBackwardMessage(const float cost, const float prev) const { return ComputeMessage<false>(cost, prev); } // Compute the selection probability from the forward and backward message. __device__ inline float ComputeSelProb(const float alpha, const float beta, const float prev, const float prev_weight) const { const float zn0 = (1.0f - alpha) * (1.0f - beta); const float zn1 = alpha * beta; const float curr = zn1 / (zn0 + zn1); return prev_weight * prev + (1.0f - prev_weight) * curr; } // Compute NCC probability. Note that cost = 1 - NCC. __device__ inline float ComputeNCCProb(const float cost) const { return exp(cost * cost * inv_ncc_sigma_square_) * ncc_norm_factor_; } // Compute the triangulation angle probability. __device__ inline float ComputeTriProb( const float cos_triangulation_angle) const { const float abs_cos_triangulation_angle = abs(cos_triangulation_angle); if (abs_cos_triangulation_angle > cos_min_triangulation_angle_) { const float scaled = 1.0f - (1.0f - abs_cos_triangulation_angle) / (1.0f - cos_min_triangulation_angle_); const float likelihood = 1.0f - scaled * scaled; return min(1.0f, max(0.0f, likelihood)); } else { return 1.0f; } } // Compute the incident angle probability. __device__ inline float ComputeIncProb(const float cos_incident_angle) const { const float x = 1.0f - max(0.0f, cos_incident_angle); return exp(x * x * inv_incident_angle_sigma_square_); } // Compute the warping/resolution prior probability. template <int kWindowSize> __device__ inline float ComputeResolutionProb(const float H[9], const float row, const float col) const { const int kWindowRadius = kWindowSize / 2; // Warp corners of patch in reference image to source image. float src1[2]; const float ref1[2] = { row - kWindowRadius, col - kWindowRadius }; Mat33DotVec3Homogeneous(H, ref1, src1); float src2[2]; const float ref2[2] = { row - kWindowRadius, col + kWindowRadius }; Mat33DotVec3Homogeneous(H, ref2, src2); float src3[2]; const float ref3[2] = { row + kWindowRadius, col + kWindowRadius }; Mat33DotVec3Homogeneous(H, ref3, src3); float src4[2]; const float ref4[2] = { row + kWindowRadius, col - kWindowRadius }; Mat33DotVec3Homogeneous(H, ref4, src4); // Compute area of patches in reference and source image. const float ref_area = kWindowSize * kWindowSize; const float src_area = abs(0.5f * (src1[0] * src2[1] - src2[0] * src1[1] - src1[0] * src4[1] + src2[0] * src3[1] - src3[0] * src2[1] + src4[0] * src1[1] + src3[0] * src4[1] - src4[0] * src3[1])); if (ref_area > src_area) { return src_area / ref_area; } else { return ref_area / src_area; } } private: // The normalization for the likelihood function, i.e. the normalization for // the prior on the matching cost. __device__ static inline float ComputeNCCCostNormFactor( const float ncc_sigma) { // A = sqrt(2pi)*sigma/2*erf(sqrt(2)/sigma) // erf(x) = 2/sqrt(pi) * integral from 0 to x of exp(-t^2) dt return 2.0f / (sqrt(2.0f * M_PI) * ncc_sigma * erff(2.0f / (ncc_sigma * 1.414213562f))); } // Compute the forward or backward message. template <bool kForward> __device__ inline float ComputeMessage(const float cost, const float prev) const { const float kUniformProb = 0.5f; const float kNoChangeProb = 0.99999f; const float kChangeProb = 1.0f - kNoChangeProb; const float emission = ComputeNCCProb(cost); float zn0; // Message for selection probability = 0. float zn1; // Message for selection probability = 1. if (kForward) { zn0 = (prev * kChangeProb + (1.0f - prev) * kNoChangeProb) * kUniformProb; zn1 = (prev * kNoChangeProb + (1.0f - prev) * kChangeProb) * emission; } else { zn0 = prev * emission * kChangeProb + (1.0f - prev) * kUniformProb * kNoChangeProb; zn1 = prev * emission * kNoChangeProb + (1.0f - prev) * kUniformProb * kChangeProb; } return zn1 / (zn0 + zn1); } float cos_min_triangulation_angle_; float inv_incident_angle_sigma_square_; float inv_ncc_sigma_square_; float ncc_norm_factor_; }; __global__ void InitNormalMap(GpuMat<float> normal_map, GpuMat<curandState> rand_state_map) { const int row = blockDim.y * blockIdx.y + threadIdx.y; const int col = blockDim.x * blockIdx.x + threadIdx.x; if (col < normal_map.GetWidth() && row < normal_map.GetHeight()) { curandState rand_state = rand_state_map.Get(row, col); float normal[3]; GenerateRandomNormal(row, col, &rand_state, normal); normal_map.SetSlice(row, col, normal); rand_state_map.Set(row, col, rand_state); } } // Rotate normals by 90deg around z-axis in counter-clockwise direction. __global__ void RotateNormalMap(GpuMat<float> normal_map) { const int row = blockDim.y * blockIdx.y + threadIdx.y; const int col = blockDim.x * blockIdx.x + threadIdx.x; if (col < normal_map.GetWidth() && row < normal_map.GetHeight()) { float normal[3]; normal_map.GetSlice(row, col, normal); float rotated_normal[3]; rotated_normal[0] = normal[1]; rotated_normal[1] = -normal[0]; rotated_normal[2] = normal[2]; normal_map.SetSlice(row, col, rotated_normal); } } template <int kWindowSize> __global__ void ComputeInitialCost(GpuMat<float> cost_map, const GpuMat<float> depth_map, const GpuMat<float> normal_map, const GpuMat<float> ref_sum_image, const GpuMat<float> ref_squared_sum_image, const float sigma_spatial, const float sigma_color) { const int thread_id = threadIdx.x; const int col = blockDim.x * blockIdx.x + threadIdx.x; __shared__ float local_ref_image[THREADS_PER_BLOCK * 3 * kWindowSize]; PhotoConsistencyCostComputer<kWindowSize> pcc_computer; pcc_computer.local_ref_image = local_ref_image; pcc_computer.ref_image_width = cost_map.GetWidth(); pcc_computer.ref_image_height = cost_map.GetHeight(); pcc_computer.row = 0; pcc_computer.col = col; pcc_computer.sigma_spatial = sigma_spatial; pcc_computer.sigma_color = sigma_color; float normal[3]; pcc_computer.normal = normal; for (int row = 0; row < cost_map.GetHeight(); ++row) { // Note that this must be executed even for pixels outside the borders, // since pixels are used in the local neighborhood of the current pixel. ReadRefImageIntoSharedMemory<kWindowSize>(local_ref_image, row, col, thread_id); if (col < cost_map.GetWidth()) { pcc_computer.depth = depth_map.Get(row, col); normal_map.GetSlice(row, col, normal); pcc_computer.local_ref_sum = ref_sum_image.Get(row, col); pcc_computer.local_ref_squared_sum = ref_squared_sum_image.Get(row, col); for (int image_id = 0; image_id < cost_map.GetDepth(); ++image_id) { pcc_computer.src_image_id = image_id; cost_map.Set(row, col, image_id, pcc_computer.Compute()); } pcc_computer.row += 1; } } } struct SweepOptions { float depth_min = 0.0f; float depth_max = 1.0f; int num_samples = 15; float sigma_spatial = 3.0f; float sigma_color = 0.3f; float ncc_sigma = 0.6f; float min_triangulation_angle = 0.5f; float incident_angle_sigma = 0.9f; float prev_sel_prob_weight = 0.0f; float geom_consistency_regularizer = 0.1f; float geom_consistency_max_cost = 5.0f; float filter_min_ncc = 0.1f; float filter_min_triangulation_angle = 3.0f; int filter_min_num_consistent = 2; float filter_geom_consistency_max_cost = 1.0f; }; template <int kWindowSize, bool kGeomConsistencyTerm = false, bool kFilterPhotoConsistency = false, bool kFilterGeomConsistency = false> __global__ void SweepFromTopToBottom( GpuMat<float> global_workspace, GpuMat<curandState> rand_state_map, GpuMat<float> cost_map, GpuMat<float> depth_map, GpuMat<float> normal_map, GpuMat<uint8_t> consistency_mask, GpuMat<float> sel_prob_map, const GpuMat<float> prev_sel_prob_map, const GpuMat<float> ref_sum_image, const GpuMat<float> ref_squared_sum_image, const SweepOptions options) { const int thread_id = threadIdx.x; const int col = blockDim.x * blockIdx.x + threadIdx.x; // Probability for boundary pixels. const float kUniformProb = 0.5f; LikelihoodComputer likelihood_computer(options.ncc_sigma, options.min_triangulation_angle, options.incident_angle_sigma); float* forward_message = &global_workspace.GetPtr()[col * global_workspace.GetHeight()]; float* sampling_probs = &global_workspace.GetPtr()[global_workspace.GetWidth() * global_workspace.GetHeight() + col * global_workspace.GetHeight()]; ////////////////////////////////////////////////////////////////////////////// // Compute backward message for all rows. Note that the backward messages are // temporarily stored in the sel_prob_map and replaced row by row as the // updated forward messages are computed further below. ////////////////////////////////////////////////////////////////////////////// if (col < cost_map.GetWidth()) { for (int image_id = 0; image_id < cost_map.GetDepth(); ++image_id) { // Compute backward message. float beta = kUniformProb; for (int row = cost_map.GetHeight() - 1; row >= 0; --row) { const float cost = cost_map.Get(row, col, image_id); beta = likelihood_computer.ComputeBackwardMessage(cost, beta); sel_prob_map.Set(row, col, image_id, beta); } // Initialize forward message. forward_message[image_id] = kUniformProb; } } ////////////////////////////////////////////////////////////////////////////// // Estimate parameters for remaining rows and compute selection probabilities. ////////////////////////////////////////////////////////////////////////////// // Shared memory holding local patch around current position for one warp. // Contains 3 vertical stripes of height kWindowSize, that are reused within // one warp for NCC computation. Note that this limits the maximum window // size to 2 * THREADS_PER_BLOCK + 1. __shared__ float local_ref_image[THREADS_PER_BLOCK * 3 * kWindowSize]; PhotoConsistencyCostComputer<kWindowSize> pcc_computer; pcc_computer.local_ref_image = local_ref_image; pcc_computer.ref_image_width = cost_map.GetWidth(); pcc_computer.ref_image_height = cost_map.GetHeight(); pcc_computer.col = col; pcc_computer.sigma_spatial = options.sigma_spatial; pcc_computer.sigma_color = options.sigma_color; struct ParamState { float depth = 0.0f; float normal[3]; }; // Parameters of previous pixel in column. ParamState prev_param_state; // Parameters of current pixel in column. ParamState curr_param_state; // Randomly sampled parameters. ParamState rand_param_state; // Cuda PRNG state for random sampling. curandState rand_state; if (col < cost_map.GetWidth()) { // Read random state for current column. rand_state = rand_state_map.Get(0, col); // Parameters for first row in column. prev_param_state.depth = depth_map.Get(0, col); normal_map.GetSlice(0, col, prev_param_state.normal); } for (int row = 0; row < cost_map.GetHeight(); ++row) { // Note that this must be executed even for pixels outside the borders, // since pixels are used in the local neighborhood of the current pixel. ReadRefImageIntoSharedMemory<kWindowSize>(local_ref_image, row, col, thread_id); if (col >= cost_map.GetWidth()) { continue; } ////阈值判断 //int numLessCosts = 0;//Patch块cost值小于某个阈值的个数 //for (int image_id = 0; image_id < cost_map.GetDepth(); ++image_id) //{ // if (cost_map.Get(row, col, image_id) <= 0.4f){ // ++numLessCosts; // } //} //if (numLessCosts >= 2) //{ // // Update previous depth for next row. // prev_param_state.depth = depth_map.Get(row, col); // float curNormals[3]; normal_map.GetSlice(row, col, curNormals); // for (int i = 0; i < 3; ++i) { // prev_param_state.normal[i] = curNormals[i]; // } // continue;//直接跳过,处理下一行 //} pcc_computer.row = row; pcc_computer.local_ref_sum = ref_sum_image.Get(row, col); pcc_computer.local_ref_squared_sum = ref_squared_sum_image.Get(row, col); // Propagate the depth at which the current ray intersects with the plane // of the normal of the previous ray. This helps to better estimate // the depth of very oblique structures, i.e. pixels whose normal direction // is significantly different from their viewing direction. prev_param_state.depth = PropagateDepth( prev_param_state.depth, prev_param_state.normal, row - 1, row); // Read parameters for current pixel from previous sweep. curr_param_state.depth = depth_map.Get(row, col); normal_map.GetSlice(row, col, curr_param_state.normal); // Generate random parameters. rand_param_state.depth = GenerateRandomDepth(options.depth_min, options.depth_max, &rand_state); GenerateRandomNormal(row, col, &rand_state, rand_param_state.normal); // Read in the backward message, compute selection probabilities and // modulate selection probabilities with priors. float point[3]; ComputePointAtDepth(row, col, curr_param_state.depth, point); for (int image_id = 0; image_id < cost_map.GetDepth(); ++image_id) { const float cost = cost_map.Get(row, col, image_id); const float alpha = likelihood_computer.ComputeForwardMessage( cost, forward_message[image_id]); const float beta = sel_prob_map.Get(row, col, image_id); const float prev_prob = prev_sel_prob_map.Get(row, col, image_id); const float sel_prob = likelihood_computer.ComputeSelProb( alpha, beta, prev_prob, options.prev_sel_prob_weight); float cos_triangulation_angle; float cos_incident_angle; ComputeViewingAngles(point, curr_param_state.normal, image_id, &cos_triangulation_angle, &cos_incident_angle); const float tri_prob = likelihood_computer.ComputeTriProb(cos_triangulation_angle); const float inc_prob = likelihood_computer.ComputeIncProb(cos_incident_angle); float H[9]; ComposeHomography(image_id, row, col, curr_param_state.depth, curr_param_state.normal, H); const float res_prob = likelihood_computer.ComputeResolutionProb<kWindowSize>(H, row, col); sampling_probs[image_id] = sel_prob * tri_prob * inc_prob * res_prob; } TransformPDFToCDF(sampling_probs, cost_map.GetDepth()); // Compute matching cost using Monte Carlo sampling of source images. Images // with higher selection probability are more likely to be sampled. Hence, // if only very few source images see the reference image pixel, the same // source image is likely to be sampled many times. Instead of taking // the best K probabilities, this sampling scheme has the advantage of // being adaptive to any distribution of selection probabilities. //const float kPerturbation = 0.02f; //const float perturbed_depth = // PerturbDepth(kPerturbation, curr_param_state.depth, &rand_state); //float perturbed_normal[3]; //PerturbNormal(row, col, kPerturbation * M_PI, curr_param_state.normal, // &rand_state, perturbed_normal); ////少了两组(depth,normal)分别是(cur,perturb),(perturb,cur) const int kNumCosts = 5; float costs[kNumCosts]; const float depths[kNumCosts] = { curr_param_state.depth, prev_param_state.depth, rand_param_state.depth, curr_param_state.depth, rand_param_state.depth }; const float* normals[kNumCosts] = { curr_param_state.normal, prev_param_state.normal,rand_param_state.normal, rand_param_state.normal, curr_param_state.normal }; for (int i = 0; i < kNumCosts; ++i) { costs[i] = 0.0f; } for (int sample = 0; sample < options.num_samples; ++sample) { const float rand_prob = curand_uniform(&rand_state) - FLT_EPSILON; pcc_computer.src_image_id = -1; for (int image_id = 0; image_id < cost_map.GetDepth(); ++image_id) { const float prob = sampling_probs[image_id]; if (prob > rand_prob) { pcc_computer.src_image_id = image_id; break; } } if (pcc_computer.src_image_id == -1) { continue; } costs[0] += cost_map.Get(row, col, pcc_computer.src_image_id); if (kGeomConsistencyTerm) { costs[0] += options.geom_consistency_regularizer * ComputeGeomConsistencyCost( row, col, depths[0], pcc_computer.src_image_id, options.geom_consistency_max_cost); } for (int i = 1; i < kNumCosts; ++i) { pcc_computer.depth = depths[i]; pcc_computer.normal = normals[i]; costs[i] += pcc_computer.Compute(); if (kGeomConsistencyTerm) { costs[i] += options.geom_consistency_regularizer * ComputeGeomConsistencyCost( row, col, depths[i], pcc_computer.src_image_id, options.geom_consistency_max_cost); } } } // Find the parameters of the minimum cost. const int min_cost_idx = FindMinCost<kNumCosts>(costs); const float best_depth = depths[min_cost_idx]; const float* best_normal = normals[min_cost_idx]; // Save best new parameters. depth_map.Set(row, col, best_depth); normal_map.SetSlice(row, col, best_normal); // Use the new cost to recompute the updated forward message and // the selection probability. pcc_computer.depth = best_depth; pcc_computer.normal = best_normal; for (int image_id = 0; image_id < cost_map.GetDepth(); ++image_id) { // Determine the cost for best depth. float cost; if (min_cost_idx == 0) { cost = cost_map.Get(row, col, image_id); } else { pcc_computer.src_image_id = image_id; cost = pcc_computer.Compute(); cost_map.Set(row, col, image_id, cost); } const float alpha = likelihood_computer.ComputeForwardMessage( cost, forward_message[image_id]); const float beta = sel_prob_map.Get(row, col, image_id); const float prev_prob = prev_sel_prob_map.Get(row, col, image_id); const float prob = likelihood_computer.ComputeSelProb( alpha, beta, prev_prob, options.prev_sel_prob_weight); forward_message[image_id] = alpha; sel_prob_map.Set(row, col, image_id, prob); } if (kFilterPhotoConsistency || kFilterGeomConsistency) { int num_consistent = 0; float best_point[3]; ComputePointAtDepth(row, col, best_depth, best_point); const float min_ncc_prob = likelihood_computer.ComputeNCCProb(1.0f - options.filter_min_ncc); const float cos_min_triangulation_angle = cos(options.filter_min_triangulation_angle); for (int image_id = 0; image_id < cost_map.GetDepth(); ++image_id) { float cos_triangulation_angle; float cos_incident_angle; ComputeViewingAngles(best_point, best_normal, image_id, &cos_triangulation_angle, &cos_incident_angle); if (cos_triangulation_angle > cos_min_triangulation_angle || cos_incident_angle <= 0.0f) { continue; } if (!kFilterGeomConsistency) { if (sel_prob_map.Get(row, col, image_id) >= min_ncc_prob) { consistency_mask.Set(row, col, image_id, 1); num_consistent += 1; } } else if (!kFilterPhotoConsistency) { if (ComputeGeomConsistencyCost(row, col, best_depth, image_id, options.geom_consistency_max_cost) <= options.filter_geom_consistency_max_cost) { consistency_mask.Set(row, col, image_id, 1); num_consistent += 1; } } else { if (sel_prob_map.Get(row, col, image_id) >= min_ncc_prob && ComputeGeomConsistencyCost(row, col, best_depth, image_id, options.geom_consistency_max_cost) <= options.filter_geom_consistency_max_cost) { consistency_mask.Set(row, col, image_id, 1); num_consistent += 1; } } } if (num_consistent < options.filter_min_num_consistent) { const float kFilterValue = 0.0f; depth_map.Set(row, col, kFilterValue); normal_map.Set(row, col, 0, kFilterValue); normal_map.Set(row, col, 1, kFilterValue); normal_map.Set(row, col, 2, kFilterValue); for (int image_id = 0; image_id < cost_map.GetDepth(); ++image_id) { consistency_mask.Set(row, col, image_id, 0); } } } // Update previous depth for next row. prev_param_state.depth = best_depth; for (int i = 0; i < 3; ++i) { prev_param_state.normal[i] = best_normal[i]; } } if (col < cost_map.GetWidth()) { rand_state_map.Set(0, col, rand_state); } } PatchMatchCuda::PatchMatchCuda(const PatchMatch::Options& options, const PatchMatch::Problem& problem) : options_(options), problem_(problem), ref_width_(0), ref_height_(0), rotation_in_half_pi_(0) { SetBestCudaDevice(options_.gpu_index); InitRefImage(); InitSourceImages(); InitTransforms(); InitWorkspaceMemory(); } PatchMatchCuda::~PatchMatchCuda() { for (size_t i = 0; i < 4; ++i) { poses_device_[i].reset(); } } void PatchMatchCuda::Run() { #define CALL_RUN_FUNC(window_radius) \ case window_radius: \ RunWithWindowSize<2 * window_radius + 1>(); \ break; switch (options_.window_radius) { CALL_RUN_FUNC(1) CALL_RUN_FUNC(2) CALL_RUN_FUNC(3) CALL_RUN_FUNC(4) CALL_RUN_FUNC(5) CALL_RUN_FUNC(6) CALL_RUN_FUNC(7) CALL_RUN_FUNC(8) CALL_RUN_FUNC(9) CALL_RUN_FUNC(10) CALL_RUN_FUNC(11) CALL_RUN_FUNC(12) CALL_RUN_FUNC(13) CALL_RUN_FUNC(14) CALL_RUN_FUNC(15) CALL_RUN_FUNC(16) CALL_RUN_FUNC(17) CALL_RUN_FUNC(18) CALL_RUN_FUNC(19) CALL_RUN_FUNC(20) default: { std::cerr << "Error: Window size not supported" << std::endl; break; } } #undef CALL_RUN_FUNC } DepthMap PatchMatchCuda::GetDepthMap() const { return DepthMap(depth_map_->CopyToMat(), options_.depth_min, options_.depth_max); } NormalMap PatchMatchCuda::GetNormalMap() const { return NormalMap(normal_map_->CopyToMat()); } Mat<float> PatchMatchCuda::GetSelProbMap() const { return prev_sel_prob_map_->CopyToMat(); } std::vector<int> PatchMatchCuda::GetConsistentImageIds() const { const Mat<uint8_t> mask = consistency_mask_->CopyToMat(); std::vector<int> consistent_image_ids; std::vector<int> pixel_consistent_image_ids; pixel_consistent_image_ids.reserve(mask.GetDepth()); for (size_t r = 0; r < mask.GetHeight(); ++r) { for (size_t c = 0; c < mask.GetWidth(); ++c) { pixel_consistent_image_ids.clear(); for (size_t d = 0; d < mask.GetDepth(); ++d) { if (mask.Get(r, c, d)) { pixel_consistent_image_ids.push_back(problem_.src_img_ids[d]); } } if (pixel_consistent_image_ids.size() > 0) { consistent_image_ids.push_back(c); consistent_image_ids.push_back(r); consistent_image_ids.push_back(pixel_consistent_image_ids.size()); consistent_image_ids.insert(consistent_image_ids.end(), pixel_consistent_image_ids.begin(), pixel_consistent_image_ids.end()); } } } return consistent_image_ids; } template <int kWindowSize> void PatchMatchCuda::RunWithWindowSize() { CudaTimer total_timer; CudaTimer init_timer; ComputeCudaConfig(); ComputeInitialCost<kWindowSize> << <sweep_grid_size_, sweep_block_size_ >> > ( *cost_map_, *depth_map_, *normal_map_, *ref_image_->sum_image, *ref_image_->squared_sum_image, options_.sigma_spatial, options_.sigma_color); CUDA_CHECK_ERROR(); init_timer.Print("Initialization"); const int num_iterations = options_.geom_consistency ? options_.num_geometric_iterations : options_.num_photometric_iteratoins; const float total_num_steps = num_iterations * 4; SweepOptions sweep_options; sweep_options.depth_min = options_.depth_min; sweep_options.depth_max = options_.depth_max; sweep_options.sigma_spatial = options_.sigma_spatial; sweep_options.sigma_color = options_.sigma_color; sweep_options.num_samples = options_.num_samples; sweep_options.ncc_sigma = options_.ncc_sigma; sweep_options.min_triangulation_angle = DEG2RAD(options_.min_triangulation_angle); sweep_options.incident_angle_sigma = options_.incident_angle_sigma; sweep_options.geom_consistency_regularizer = options_.geom_consistency_regularizer; sweep_options.geom_consistency_max_cost = options_.geom_consistency_max_cost; sweep_options.filter_min_ncc = options_.filter_min_ncc; sweep_options.filter_min_triangulation_angle = DEG2RAD(options_.filter_min_triangulation_angle); sweep_options.filter_min_num_consistent = options_.filter_min_num_consistent; sweep_options.filter_geom_consistency_max_cost = options_.filter_geom_consistency_max_cost; // 输出初始化的深度和法向map图 if (bOutPutMessage && problem_.ref_img_id == 0) //if (bOutPutMessage) { if (!options_.geom_consistency) { cv::imwrite("df_0_0.jpg", GetDepthMap().ToBitmap(2, 98)); cv::imwrite("nf_0_0.jpg", GetNormalMap().ToBitmap()); } else { cv::imwrite("dg_0_0.jpg", GetDepthMap().ToBitmap(2, 98)); cv::imwrite("ng_0_0.jpg", GetNormalMap().ToBitmap()); } } for (int iter = 0; iter < num_iterations; ++iter) { CudaTimer iter_timer; for (int sweep = 0; sweep < 4; ++sweep) { CudaTimer sweep_timer; sweep_options.prev_sel_prob_weight = static_cast<float>(iter * 4 + sweep) / total_num_steps; const bool last_sweep = iter == num_iterations - 1 && sweep == 3; #define CALL_SWEEP_FUNC \ SweepFromTopToBottom<kWindowSize, kGeomConsistencyTerm, \ kFilterPhotoConsistency, kFilterGeomConsistency> \ <<<sweep_grid_size_, sweep_block_size_>>>( \ *global_workspace_, *rand_state_map_, *cost_map_, *depth_map_, \ *normal_map_, *consistency_mask_, *sel_prob_map_, \ *prev_sel_prob_map_, *ref_image_->sum_image, \ *ref_image_->squared_sum_image, sweep_options); if (last_sweep) { if (options_.filter) { consistency_mask_.reset(new GpuMat<uint8_t>(cost_map_->GetWidth(), cost_map_->GetHeight(), cost_map_->GetDepth())); consistency_mask_->FillWithScalar(0); } if (options_.geom_consistency) { const bool kGeomConsistencyTerm = true; if (options_.filter) { const bool kFilterPhotoConsistency = true; const bool kFilterGeomConsistency = true; CALL_SWEEP_FUNC } else { const bool kFilterPhotoConsistency = false; const bool kFilterGeomConsistency = false; CALL_SWEEP_FUNC } } else { const bool kGeomConsistencyTerm = false; if (options_.filter) { const bool kFilterPhotoConsistency = true; const bool kFilterGeomConsistency = false; CALL_SWEEP_FUNC } else { const bool kFilterPhotoConsistency = false; const bool kFilterGeomConsistency = false; CALL_SWEEP_FUNC } } } else { const bool kFilterPhotoConsistency = false; const bool kFilterGeomConsistency = false; if (options_.geom_consistency) { const bool kGeomConsistencyTerm = true; CALL_SWEEP_FUNC } else { const bool kGeomConsistencyTerm = false; CALL_SWEEP_FUNC } } #undef CALL_SWEEP_FUNC CUDA_CHECK_ERROR(); Rotate(); // Rotate selected image map. if (last_sweep && options_.filter) { std::unique_ptr<GpuMat<uint8_t>> rot_consistency_mask_( new GpuMat<uint8_t>(cost_map_->GetWidth(), cost_map_->GetHeight(), cost_map_->GetDepth())); consistency_mask_->Rotate(rot_consistency_mask_.get()); consistency_mask_.swap(rot_consistency_mask_); } sweep_timer.Print(" Sweep " + std::to_string(sweep + 1)); // 把每一次迭代后的深度和法向map图输出到本地查看 if (bOutPutMessage && problem_.ref_img_id == 0) //if (bOutPutMessage) { cv::Mat depthmapImage, normalmapImage; depthmapImage = GetDepthMap().ToBitmap(2, 98); normalmapImage = GetNormalMap().ToBitmap(); if (sweep != 3) { for (int i = 0; i < sweep + 1; i++) { cv::transpose(depthmapImage, depthmapImage); cv::flip(depthmapImage, depthmapImage, 1);//顺时针旋转矩阵 cv::transpose(normalmapImage, normalmapImage); cv::flip(normalmapImage, normalmapImage, 1); } } char path_1[20]; char path_2[20]; if (!options_.geom_consistency) { sprintf_s(path_1, "df_%d_%d.jpg", iter + 1, sweep + 1); sprintf_s(path_2, "nf_%d_%d.jpg", iter + 1, sweep + 1); } else { sprintf_s(path_1, "dg_%d_%d.jpg", iter + 1, sweep + 1); sprintf_s(path_2, "ng_%d_%d.jpg", iter + 1, sweep + 1); } cv::imwrite(path_1, depthmapImage); cv::imwrite(path_2, normalmapImage); } } iter_timer.Print("Iteration " + std::to_string(iter + 1)); } total_timer.Print("Total"); } void PatchMatchCuda::ComputeCudaConfig() { sweep_block_size_.x = THREADS_PER_BLOCK; sweep_block_size_.y = 1; sweep_block_size_.z = 1; sweep_grid_size_.x = (depth_map_->GetWidth() - 1) / THREADS_PER_BLOCK + 1; sweep_grid_size_.y = 1; sweep_grid_size_.z = 1; elem_wise_block_size_.x = THREADS_PER_BLOCK; elem_wise_block_size_.y = THREADS_PER_BLOCK; elem_wise_block_size_.z = 1; elem_wise_grid_size_.x = (depth_map_->GetWidth() - 1) / THREADS_PER_BLOCK + 1; elem_wise_grid_size_.y = (depth_map_->GetHeight() - 1) / THREADS_PER_BLOCK + 1; elem_wise_grid_size_.z = 1; } void PatchMatchCuda::InitRefImage() { const Image& ref_image = problem_.images->at(problem_.ref_img_id); ref_width_ = ref_image.GetWidth(); ref_height_ = ref_image.GetHeight(); // Upload to device. ref_image_.reset(new GpuMatRefImage(ref_width_, ref_height_)); cv::Mat ref_image_mat = ref_image.GetBitmap(); std::vector<uint8_t> ref_image_array(ref_height_*ref_width_); ref_image_array.assign(ref_image_mat.datastart, ref_image_mat.dataend); ref_image_->Filter(ref_image_array.data(), options_.window_radius, options_.sigma_spatial, options_.sigma_color); ref_image_device_.reset( new CudaArrayWrapper<uint8_t>(ref_width_, ref_height_, 1)); ref_image_device_->CopyFromGpuMat(*ref_image_->image); //创建cuda数组通道描述符 //const cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<uint8_t>(); // Create texture. ref_image_texture.addressMode[0] = cudaAddressModeBorder; ref_image_texture.addressMode[1] = cudaAddressModeBorder; ref_image_texture.addressMode[2] = cudaAddressModeBorder; ref_image_texture.filterMode = cudaFilterModePoint; ref_image_texture.normalized = false; checkCudaErrors(cudaBindTextureToArray(ref_image_texture, ref_image_device_->GetPtr())); } void PatchMatchCuda::InitSourceImages() { // Determine maximum image size. size_t max_width = 0; size_t max_height = 0; for (const auto image_id : problem_.src_img_ids) { const Image& image = problem_.images->at(image_id); if (image.GetWidth() > max_width) { max_width = image.GetWidth(); } if (image.GetHeight() > max_height) { max_height = image.GetHeight(); } } // Upload source images to device. { // Copy source images to contiguous memory block. const uint8_t kDefaultValue = 0; std::vector<uint8_t> src_images_host_data( static_cast<size_t>(max_width * max_height * problem_.src_img_ids.size()), kDefaultValue); for (size_t i = 0; i < problem_.src_img_ids.size(); ++i) { const Image& image = problem_.images->at(problem_.src_img_ids[i]); const cv::Mat &bitmap = image.GetBitmap(); uint8_t* dest = src_images_host_data.data() + max_width * max_height * i; for (size_t r = 0; r < image.GetHeight(); ++r) { memcpy(dest, bitmap.ptr<uchar>(r), image.GetWidth() * sizeof(uint8_t)); dest += max_width; } } // Upload to device. src_images_device_.reset(new CudaArrayWrapper<uint8_t>( max_width, max_height, problem_.src_img_ids.size())); src_images_device_->CopyToDevice(src_images_host_data.data()); //const cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<uint8_t>(); // Create source images texture. src_images_texture.addressMode[0] = cudaAddressModeBorder; src_images_texture.addressMode[1] = cudaAddressModeBorder; src_images_texture.addressMode[2] = cudaAddressModeBorder; src_images_texture.filterMode = cudaFilterModeLinear; src_images_texture.normalized = false; checkCudaErrors(cudaBindTextureToArray(src_images_texture, src_images_device_->GetPtr())); } // Upload source depth maps to device. if (options_.geom_consistency) { const float kDefaultValue = 0.0f; std::vector<float> src_depth_maps_host_data( static_cast<size_t>(max_width * max_height * problem_.src_img_ids.size()), kDefaultValue); for (size_t i = 0; i < problem_.src_img_ids.size(); ++i) { const DepthMap& depth_map = problem_.depth_maps->at(problem_.src_img_ids[i]); float* dest = src_depth_maps_host_data.data() + max_width * max_height * i; for (size_t r = 0; r < depth_map.GetHeight(); ++r) { memcpy(dest, depth_map.GetPtr() + r * depth_map.GetWidth(), depth_map.GetWidth() * sizeof(float)); dest += max_width; } } src_depth_maps_device_.reset(new CudaArrayWrapper<float>( max_width, max_height, problem_.src_img_ids.size())); src_depth_maps_device_->CopyToDevice(src_depth_maps_host_data.data()); //const cudaChannelFormatDesc channelDesc1 = cudaCreateChannelDesc<float>(); // Create source depth maps texture. src_depth_maps_texture.addressMode[0] = cudaAddressModeBorder; src_depth_maps_texture.addressMode[1] = cudaAddressModeBorder; src_depth_maps_texture.addressMode[2] = cudaAddressModeBorder; // TODO: Check if linear interpolation improves results or not. src_depth_maps_texture.filterMode = cudaFilterModePoint; src_depth_maps_texture.normalized = false; checkCudaErrors(cudaBindTextureToArray(src_depth_maps_texture, src_depth_maps_device_->GetPtr())); } } void PatchMatchCuda::InitTransforms() { const Image& ref_image = problem_.images->at(problem_.ref_img_id); ////////////////////////////////////////////////////////////////////////////// // Generate rotated versions (counter-clockwise) of calibration matrix. ////////////////////////////////////////////////////////////////////////////// for (size_t i = 0; i < 4; ++i) { ref_K_host_[i][0] = ref_image.GetK()[0]; ref_K_host_[i][1] = ref_image.GetK()[2]; ref_K_host_[i][2] = ref_image.GetK()[4]; ref_K_host_[i][3] = ref_image.GetK()[5]; } // Rotated by 90 degrees. std::swap(ref_K_host_[1][0], ref_K_host_[1][2]); std::swap(ref_K_host_[1][1], ref_K_host_[1][3]); ref_K_host_[1][3] = ref_width_ - 1 - ref_K_host_[1][3]; // Rotated by 180 degrees. ref_K_host_[2][1] = ref_width_ - 1 - ref_K_host_[2][1]; ref_K_host_[2][3] = ref_height_ - 1 - ref_K_host_[2][3]; // Rotated by 270 degrees. std::swap(ref_K_host_[3][0], ref_K_host_[3][2]); std::swap(ref_K_host_[3][1], ref_K_host_[3][3]); ref_K_host_[3][1] = ref_height_ - 1 - ref_K_host_[3][1]; // Extract 1/fx, -cx/fx, fy, -cy/fy. for (size_t i = 0; i < 4; ++i) { ref_inv_K_host_[i][0] = 1.0f / ref_K_host_[i][0]; ref_inv_K_host_[i][1] = -ref_K_host_[i][1] / ref_K_host_[i][0]; ref_inv_K_host_[i][2] = 1.0f / ref_K_host_[i][2]; ref_inv_K_host_[i][3] = -ref_K_host_[i][3] / ref_K_host_[i][2]; } // Bind 0 degrees version to constant global memory. checkCudaErrors(cudaMemcpyToSymbol(ref_K, ref_K_host_[0], sizeof(float) * 4, 0, cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpyToSymbol(ref_inv_K, ref_inv_K_host_[0], sizeof(float) * 4, 0, cudaMemcpyHostToDevice)); ////////////////////////////////////////////////////////////////////////////// // Generate rotated versions of camera poses. ////////////////////////////////////////////////////////////////////////////// float rotated_R[9]; memcpy(rotated_R, ref_image.GetR(), 9 * sizeof(float)); float rotated_T[3]; memcpy(rotated_T, ref_image.GetT(), 3 * sizeof(float)); // Matrix for 90deg rotation around Z-axis in counter-clockwise direction. const float R_z90[9] = { 0, 1, 0, -1, 0, 0, 0, 0, 1 }; for (size_t i = 0; i < 4; ++i) { const size_t kNumTformParams = 4 + 9 + 3 + 3 + 12 + 12; std::vector<float> poses_host_data(kNumTformParams * problem_.src_img_ids.size()); int offset = 0; for (const auto image_id : problem_.src_img_ids) { const Image& image = problem_.images->at(image_id); const float K[4] = { image.GetK()[0], image.GetK()[2], image.GetK()[4], image.GetK()[5] }; memcpy(poses_host_data.data() + offset, K, 4 * sizeof(float)); offset += 4; float rel_R[9]; float rel_T[3]; ComputeRelativePose(rotated_R, rotated_T, image.GetR(), image.GetT(), rel_R, rel_T); memcpy(poses_host_data.data() + offset, rel_R, 9 * sizeof(float)); offset += 9; memcpy(poses_host_data.data() + offset, rel_T, 3 * sizeof(float)); offset += 3; float C[3]; ComputeProjectionCenter(rel_R, rel_T, C); memcpy(poses_host_data.data() + offset, C, 3 * sizeof(float)); offset += 3; float P[12]; ComposeProjectionMatrix(image.GetK(), rel_R, rel_T, P); memcpy(poses_host_data.data() + offset, P, 12 * sizeof(float)); offset += 12; float inv_P[12]; ComposeInverseProjectionMatrix(image.GetK(), rel_R, rel_T, inv_P); memcpy(poses_host_data.data() + offset, inv_P, 12 * sizeof(float)); offset += 12; } poses_device_[i].reset(new CudaArrayWrapper<float>( kNumTformParams, problem_.src_img_ids.size(), 1)); poses_device_[i]->CopyToDevice(poses_host_data.data()); RotatePose(R_z90, rotated_R, rotated_T); } //const cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<float>(); poses_texture.addressMode[0] = cudaAddressModeBorder; poses_texture.addressMode[1] = cudaAddressModeBorder; poses_texture.addressMode[2] = cudaAddressModeBorder; poses_texture.filterMode = cudaFilterModePoint; poses_texture.normalized = false; checkCudaErrors( cudaBindTextureToArray(poses_texture, poses_device_[0]->GetPtr())); } __global__ void InitSparse(GpuMat<float> depth_map, GpuMat<float> normal_map, GpuMat<float> prev_sel_prob_map, float *sparsePoints, float *sparseNormals, int *tracks, const size_t num, const size_t trackNum) { for (int i = 0, j = 0; i < num; i = i + 3, j++) { const float sp[3] = { sparsePoints[i], sparsePoints[i + 1], sparsePoints[i + 2] }; const float sn[3] = { sparseNormals[i], sparseNormals[i + 1], sparseNormals[i + 2] }; depth_map.Set(sp[0], sp[1], sp[2]); normal_map.SetSlice(sp[0], sp[1], sn); for (int k = j + 1; tracks[k] != -1; k++)//-1。。。-1。。。-1数据记录在每两个-1之间 { prev_sel_prob_map.Set(sp[0], sp[1], tracks[k], 1.0f); } } } void PatchMatchCuda::InitWorkspaceMemory() { rand_state_map_.reset(new GpuMatPRNG(ref_width_, ref_height_)); //初始化深度map图 depth_map_.reset(new GpuMat<float>(ref_width_, ref_height_)); if (options_.geom_consistency) { const DepthMap& init_depth_map = problem_.depth_maps->at(problem_.ref_img_id); depth_map_->CopyToDevice(init_depth_map.GetPtr(), init_depth_map.GetWidth() * sizeof(float)); } else { depth_map_->FillWithRandomNumbers(options_.depth_min, options_.depth_max, *rand_state_map_); } normal_map_.reset(new GpuMat<float>(ref_width_, ref_height_, 3)); // Note that it is not necessary to keep the selection probability map in // memory for all pixels. Theoretically, it is possible to incorporate // the temporary selection probabilities in the global_workspace_. // However, it is useful to keep the probabilities for the entire image // in memory, so that it can be exported. sel_prob_map_.reset(new GpuMat<float>(ref_width_, ref_height_, problem_.src_img_ids.size())); prev_sel_prob_map_.reset(new GpuMat<float>(ref_width_, ref_height_, problem_.src_img_ids.size())); prev_sel_prob_map_->FillWithScalar(0.5f); cost_map_.reset(new GpuMat<float>(ref_width_, ref_height_, problem_.src_img_ids.size())); const int ref_max_dim = std::max(ref_width_, ref_height_); global_workspace_.reset( new GpuMat<float>(ref_max_dim, problem_.src_img_ids.size(), 2)); consistency_mask_.reset(new GpuMat<uint8_t>(0, 0, 0)); ComputeCudaConfig(); //这里不能移到前面去,不知道为什么!!!!!! if (options_.geom_consistency) { const NormalMap& init_normal_map = problem_.normal_maps->at(problem_.ref_img_id); normal_map_->CopyToDevice(init_normal_map.GetPtr(), init_normal_map.GetWidth() * sizeof(float)); } else { InitNormalMap << <elem_wise_grid_size_, elem_wise_block_size_ >> > ( *normal_map_, *rand_state_map_); } if (!options_.geom_consistency && options_.bUse_sparse_points) { const size_t size = problem_.sparsePoints.size(); const size_t trackNum = problem_.tracks.size(); float *sparsePoints, *sparseNormals; int *tracks; checkCudaErrors(cudaMalloc((void **)&sparsePoints, size * sizeof(float))); checkCudaErrors(cudaMemcpy(sparsePoints, &problem_.sparsePoints[0], size * sizeof(float), cudaMemcpyHostToDevice)); checkCudaErrors(cudaMalloc((void **)&sparseNormals, size * sizeof(float))); checkCudaErrors(cudaMemcpy(sparseNormals, &problem_.sparseNormals[0], size * sizeof(float), cudaMemcpyHostToDevice)); checkCudaErrors(cudaMalloc((void **)&tracks, trackNum * sizeof(int))); checkCudaErrors(cudaMemcpy(tracks, &problem_.tracks[0], trackNum * sizeof(int), cudaMemcpyHostToDevice)); dim3 block(1, 1, 1); InitSparse << <1, block >> > (*depth_map_, *normal_map_, *prev_sel_prob_map_, sparsePoints, sparseNormals, tracks, size, trackNum); //CUDA_CHECK_ERROR(); checkCudaErrors(cudaFree(sparsePoints)); checkCudaErrors(cudaFree(sparseNormals)); checkCudaErrors(cudaFree(tracks)); } } void PatchMatchCuda::Rotate() { rotation_in_half_pi_ = (rotation_in_half_pi_ + 1) % 4; size_t width; size_t height; if (rotation_in_half_pi_ % 2 == 0) { width = ref_width_; height = ref_height_; } else { width = ref_height_; height = ref_width_; } // Rotate random map. { std::unique_ptr<GpuMatPRNG> rotated_rand_state_map( new GpuMatPRNG(width, height)); rand_state_map_->Rotate(rotated_rand_state_map.get()); rand_state_map_.swap(rotated_rand_state_map); } // Rotate depth map. { std::unique_ptr<GpuMat<float>> rotated_depth_map( new GpuMat<float>(width, height)); depth_map_->Rotate(rotated_depth_map.get()); depth_map_.swap(rotated_depth_map); } // Rotate normal map. { RotateNormalMap << <elem_wise_grid_size_, elem_wise_block_size_ >> > ( *normal_map_); std::unique_ptr<GpuMat<float>> rotated_normal_map( new GpuMat<float>(width, height, 3)); normal_map_->Rotate(rotated_normal_map.get()); normal_map_.swap(rotated_normal_map); } // Rotate reference image. { std::unique_ptr<GpuMatRefImage> rotated_ref_image( new GpuMatRefImage(width, height)); ref_image_->image->Rotate(rotated_ref_image->image.get()); ref_image_->sum_image->Rotate(rotated_ref_image->sum_image.get()); ref_image_->squared_sum_image->Rotate( rotated_ref_image->squared_sum_image.get()); ref_image_.swap(rotated_ref_image); } // Bind rotated reference image to texture. ref_image_device_.reset(new CudaArrayWrapper<uint8_t>(width, height, 1)); ref_image_device_->CopyFromGpuMat(*ref_image_->image); //const cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<uint8_t>(); checkCudaErrors(cudaUnbindTexture(ref_image_texture)); checkCudaErrors( cudaBindTextureToArray(ref_image_texture, ref_image_device_->GetPtr())); // Rotate selection probability map. prev_sel_prob_map_.reset( new GpuMat<float>(width, height, problem_.src_img_ids.size())); sel_prob_map_->Rotate(prev_sel_prob_map_.get()); sel_prob_map_.reset( new GpuMat<float>(width, height, problem_.src_img_ids.size())); // Rotate cost map. { std::unique_ptr<GpuMat<float>> rotated_cost_map( new GpuMat<float>(width, height, problem_.src_img_ids.size())); cost_map_->Rotate(rotated_cost_map.get()); cost_map_.swap(rotated_cost_map); } // Rotate transformations. const cudaChannelFormatDesc channelDesc1 = cudaCreateChannelDesc<float>(); checkCudaErrors(cudaUnbindTexture(poses_texture)); checkCudaErrors(cudaBindTextureToArray( poses_texture, poses_device_[rotation_in_half_pi_]->GetPtr())); // Rotate calibration. checkCudaErrors(cudaMemcpyToSymbol(ref_K, ref_K_host_[rotation_in_half_pi_], sizeof(float) * 4, 0, cudaMemcpyHostToDevice)); checkCudaErrors( cudaMemcpyToSymbol(ref_inv_K, ref_inv_K_host_[rotation_in_half_pi_], sizeof(float) * 4, 0, cudaMemcpyHostToDevice)); // Recompute Cuda configuration for rotated reference image. ComputeCudaConfig(); } } // namespace mvs } // namespace colmap
fdd4cb779e296124b87f8fe290941e52786c1b02.hip
// !!! This is a file automatically generated by hipify!!! #include "CudaLBFGS/lbfgs.h" #include <iostream> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime_api.h> #include <device_launch_parameters.h> #include <hip/device_functions.h> using namespace std; // f(x) = x^T A x + b^T x + c // gradf(x) = 2 A x + b class cpu_quadratic : public cpu_cost_function { public: // A is expected to point to n*n floats // b is expected to point to n floats cpu_quadratic(size_t n, float *A, float *b, float c) : cpu_cost_function(n) , m_A(A) , m_b(b) , m_c(c) {} void cpu_f(const floatdouble *h_x, floatdouble *h_y) { floatdouble xAx = 0.0f; for (size_t i = 0; i < m_numDimensions; ++i) { for (size_t j = 0; j < m_numDimensions; ++j) { xAx += m_A[i * m_numDimensions + j] * h_x[i] * h_x[j]; } } floatdouble bx = 0.0f; for (size_t i = 0; i < m_numDimensions; ++i) { bx += m_b[i] * h_x[i]; } *h_y = xAx + bx + m_c; } void cpu_gradf(const floatdouble *h_x, floatdouble *h_grad) { for (size_t i = 0; i < m_numDimensions; ++i) { h_grad[i] = 0.0f; for (size_t j = 0; j < m_numDimensions; ++j) { h_grad[i] += m_A[i * m_numDimensions + j] * h_x[j]; } h_grad[i] *= 2.0f; h_grad[i] += m_b[i]; } } void cpu_f_gradf(const floatdouble *h_x, floatdouble *h_f, floatdouble *h_gradf) { cpu_f(h_x, h_f); cpu_gradf(h_x, h_gradf); } private: float *m_A; float *m_b; float m_c; }; namespace gpu_quadratic_d { __device__ float tmp1; __device__ float tmp2; __device__ static void myAtomicAdd(float *address, float value) { #if __CUDA_ARCH__ >= 200 atomicAdd(address, value); #else // cf. https://www.sharcnet.ca/help/index.php/CUDA_tips_and_tricks int oldval, newval, readback; oldval = __float_as_int(*address); newval = __float_as_int(__int_as_float(oldval) + value); while ((readback=atomicCAS((int *)address, oldval, newval)) != oldval) { oldval = readback; newval = __float_as_int(__int_as_float(oldval) + value); } #endif } __global__ void kernelF(const float *d_xAx, const float *d_bx, const float *d_c, float *d_y) { *d_y = *d_xAx + *d_bx + *d_c; } __global__ void kernelGradf(const float *d_x, float *d_grad, float *A, float *b, const size_t len) { size_t index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= len) return; d_grad[index] = 0.0f; for (size_t j = 0; j < len; ++j) { d_grad[index] += A[index * len + j] * d_x[j]; } d_grad[index] *= 2.0f; d_grad[index] += b[index]; } __global__ static void xAx(const float *x, const float *A, const size_t len, float *res) { size_t index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= len*len) return; __shared__ float s_sum; // block local aggregate s_sum = 0.0f; __syncthreads(); // wait for all to initialize const size_t i = index / len; const size_t j = index % len; myAtomicAdd(&s_sum, A[index] * x[i] * x[j]); __syncthreads(); if (threadIdx.x == 0) myAtomicAdd(res, s_sum); } __global__ static void dot(const float *a, const float *b, const size_t len, float *res) { size_t index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= len) return; __shared__ float s_sum; // block local aggregate s_sum = 0.0f; __syncthreads(); // wait for all to initialize myAtomicAdd(&s_sum, a[index] * b[index]); __syncthreads(); if (threadIdx.x == 0) myAtomicAdd(res, s_sum); } } class gpu_quadratic : public cost_function { public: // A is expected to point to n*n floats // b is expected to point to n floats gpu_quadratic(size_t n, float *A, float *b, float c) : cost_function(n) { CudaSafeCall( hipMalloc(&m_d_A, n*n * sizeof(float)) ); CudaSafeCall( hipMalloc(&m_d_b, n * sizeof(float)) ); CudaSafeCall( hipMalloc(&m_d_c, 1 * sizeof(float)) ); CudaSafeCall( hipMemcpy(m_d_A, A, n*n * sizeof(float), hipMemcpyHostToDevice) ); CudaSafeCall( hipMemcpy(m_d_b, b, n * sizeof(float), hipMemcpyHostToDevice) ); CudaSafeCall( hipMemcpy(m_d_c, &c, 1 * sizeof(float), hipMemcpyHostToDevice)); CudaSafeCall( hipGetSymbolAddress((void**)&m_d_tmp1, gpu_quadratic_d::tmp1) ); CudaSafeCall( hipGetSymbolAddress((void**)&m_d_tmp2, gpu_quadratic_d::tmp2) ); } ~gpu_quadratic() { CudaSafeCall( hipFree(m_d_A) ); CudaSafeCall( hipFree(m_d_b) ); CudaSafeCall( hipFree(m_d_c) ); } void f(const float *d_x, float *d_y) { const size_t &NX = m_numDimensions; dim3 blockDim(512); size_t NX2 = NX * NX; dim3 gridDim_xAx((NX2 % blockDim.x) == 0 ? (NX2 / blockDim.x) : (NX2 / blockDim.x) + 1); dim3 gridDim_dot((NX % blockDim.x) == 0 ? (NX / blockDim.x) : (NX / blockDim.x) + 1); CudaSafeCall( hipMemset(m_d_tmp1, 0, sizeof(float)) ); CudaSafeCall( hipMemset(m_d_tmp2, 0, sizeof(float)) ); hipLaunchKernelGGL(( gpu_quadratic_d::xAx), dim3(gridDim_xAx), dim3(blockDim), 0, 0, d_x, m_d_A, NX, m_d_tmp1); CudaCheckError(); hipLaunchKernelGGL(( gpu_quadratic_d::dot), dim3(gridDim_dot), dim3(blockDim), 0, 0, d_x, m_d_b, NX, m_d_tmp2); CudaCheckError(); hipDeviceSynchronize(); hipLaunchKernelGGL(( gpu_quadratic_d::kernelF), dim3(1), dim3(1), 0, 0, m_d_tmp1, m_d_tmp2, m_d_c, d_y); CudaCheckError(); } void gradf(const float *d_x, float *d_grad) { const size_t &NX = m_numDimensions; dim3 blockDim(512); dim3 gridDim((NX % blockDim.x) == 0 ? (NX / blockDim.x) : (NX / blockDim.x) + 1); hipLaunchKernelGGL(( gpu_quadratic_d::kernelGradf), dim3(gridDim), dim3(blockDim), 0, 0, d_x, d_grad, m_d_A, m_d_b, NX); CudaCheckError(); } void f_gradf(const float *d_x, float *d_f, float *d_grad) { f(d_x, d_f); gradf(d_x, d_grad); } private: float *m_d_A; float *m_d_b; float *m_d_c; float *m_d_tmp1; float *m_d_tmp2; }; int main(int argc, char **argv) { // CPU size_t n = /*2*/ 8 /*200*/ /*500*/ /*5000*/; size_t maxIter = 500; float gradientEps = 1e-3f; float *A = new float[n*n]; for (size_t i = 0; i < n; ++i) { for (size_t j = 0; j < n; ++j) { //A[i * n + j] = (i == j) ? 1.0f : 0.0f; // 8 on main diagonal, 1 on side diagonals, 0 else if (i == j) A[i * n + j] = 8.0f; else if ((i == j-1) || (i == j+1)) A[i * n + j] = 1.0f; else A[i * n + j] = 0.0f; } } float *b = new float[n]; for (size_t i = 0; i < n; ++i) { b[i] = 1.0f; } float c = 42.0f; cpu_quadratic p1(n, A, b, c); lbfgs minimizer1(p1); minimizer1.setMaxIterations(maxIter); minimizer1.setGradientEpsilon(gradientEps); float *x = new float[n]; for (size_t i = 0; i < n; ++i) { x[i] = i % 2 == 0 ? 5.0f : -10.0f; } lbfgs::status stat = minimizer1.minimize_with_host_x(x); cout << lbfgs::statusToString(stat).c_str() << endl; cout << "CPU quadratic:"; for (size_t i = 0; i < n; ++i) { cout << " " << x[i]; } cout << endl; // GPU for (size_t i = 0; i < n; ++i) { x[i] = i % 2 == 0 ? 5.0f : -10.0f; } gpu_quadratic p2(n, A, b, c); lbfgs minimizer2(p2); minimizer2.setMaxIterations(maxIter); minimizer2.setGradientEpsilon(gradientEps); float *d_x; CudaSafeCall( hipMalloc(&d_x, n * sizeof(float)) ); CudaSafeCall( hipMemcpy(d_x, x, n * sizeof(float), hipMemcpyHostToDevice) ); lbfgs::status stat2 = minimizer2.minimize(d_x); cout << lbfgs::statusToString(stat2).c_str() << endl; CudaSafeCall( hipMemcpy(x, d_x, n * sizeof(float), hipMemcpyDeviceToHost) ); CudaSafeCall( hipFree(d_x) ); cout << "GPU quadratic:"; for (size_t i = 0; i < n; ++i) { cout << " " << x[i]; } cout << endl; delete [] x; delete [] A; delete [] b; return 0; }
fdd4cb779e296124b87f8fe290941e52786c1b02.cu
#include "CudaLBFGS/lbfgs.h" #include <iostream> #include <cuda.h> #include <cuda_runtime.h> #include <cuda_runtime_api.h> #include <device_launch_parameters.h> #include <device_functions.h> using namespace std; // f(x) = x^T A x + b^T x + c // gradf(x) = 2 A x + b class cpu_quadratic : public cpu_cost_function { public: // A is expected to point to n*n floats // b is expected to point to n floats cpu_quadratic(size_t n, float *A, float *b, float c) : cpu_cost_function(n) , m_A(A) , m_b(b) , m_c(c) {} void cpu_f(const floatdouble *h_x, floatdouble *h_y) { floatdouble xAx = 0.0f; for (size_t i = 0; i < m_numDimensions; ++i) { for (size_t j = 0; j < m_numDimensions; ++j) { xAx += m_A[i * m_numDimensions + j] * h_x[i] * h_x[j]; } } floatdouble bx = 0.0f; for (size_t i = 0; i < m_numDimensions; ++i) { bx += m_b[i] * h_x[i]; } *h_y = xAx + bx + m_c; } void cpu_gradf(const floatdouble *h_x, floatdouble *h_grad) { for (size_t i = 0; i < m_numDimensions; ++i) { h_grad[i] = 0.0f; for (size_t j = 0; j < m_numDimensions; ++j) { h_grad[i] += m_A[i * m_numDimensions + j] * h_x[j]; } h_grad[i] *= 2.0f; h_grad[i] += m_b[i]; } } void cpu_f_gradf(const floatdouble *h_x, floatdouble *h_f, floatdouble *h_gradf) { cpu_f(h_x, h_f); cpu_gradf(h_x, h_gradf); } private: float *m_A; float *m_b; float m_c; }; namespace gpu_quadratic_d { __device__ float tmp1; __device__ float tmp2; __device__ static void myAtomicAdd(float *address, float value) { #if __CUDA_ARCH__ >= 200 atomicAdd(address, value); #else // cf. https://www.sharcnet.ca/help/index.php/CUDA_tips_and_tricks int oldval, newval, readback; oldval = __float_as_int(*address); newval = __float_as_int(__int_as_float(oldval) + value); while ((readback=atomicCAS((int *)address, oldval, newval)) != oldval) { oldval = readback; newval = __float_as_int(__int_as_float(oldval) + value); } #endif } __global__ void kernelF(const float *d_xAx, const float *d_bx, const float *d_c, float *d_y) { *d_y = *d_xAx + *d_bx + *d_c; } __global__ void kernelGradf(const float *d_x, float *d_grad, float *A, float *b, const size_t len) { size_t index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= len) return; d_grad[index] = 0.0f; for (size_t j = 0; j < len; ++j) { d_grad[index] += A[index * len + j] * d_x[j]; } d_grad[index] *= 2.0f; d_grad[index] += b[index]; } __global__ static void xAx(const float *x, const float *A, const size_t len, float *res) { size_t index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= len*len) return; __shared__ float s_sum; // block local aggregate s_sum = 0.0f; __syncthreads(); // wait for all to initialize const size_t i = index / len; const size_t j = index % len; myAtomicAdd(&s_sum, A[index] * x[i] * x[j]); __syncthreads(); if (threadIdx.x == 0) myAtomicAdd(res, s_sum); } __global__ static void dot(const float *a, const float *b, const size_t len, float *res) { size_t index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= len) return; __shared__ float s_sum; // block local aggregate s_sum = 0.0f; __syncthreads(); // wait for all to initialize myAtomicAdd(&s_sum, a[index] * b[index]); __syncthreads(); if (threadIdx.x == 0) myAtomicAdd(res, s_sum); } } class gpu_quadratic : public cost_function { public: // A is expected to point to n*n floats // b is expected to point to n floats gpu_quadratic(size_t n, float *A, float *b, float c) : cost_function(n) { CudaSafeCall( cudaMalloc(&m_d_A, n*n * sizeof(float)) ); CudaSafeCall( cudaMalloc(&m_d_b, n * sizeof(float)) ); CudaSafeCall( cudaMalloc(&m_d_c, 1 * sizeof(float)) ); CudaSafeCall( cudaMemcpy(m_d_A, A, n*n * sizeof(float), cudaMemcpyHostToDevice) ); CudaSafeCall( cudaMemcpy(m_d_b, b, n * sizeof(float), cudaMemcpyHostToDevice) ); CudaSafeCall( cudaMemcpy(m_d_c, &c, 1 * sizeof(float), cudaMemcpyHostToDevice)); CudaSafeCall( cudaGetSymbolAddress((void**)&m_d_tmp1, gpu_quadratic_d::tmp1) ); CudaSafeCall( cudaGetSymbolAddress((void**)&m_d_tmp2, gpu_quadratic_d::tmp2) ); } ~gpu_quadratic() { CudaSafeCall( cudaFree(m_d_A) ); CudaSafeCall( cudaFree(m_d_b) ); CudaSafeCall( cudaFree(m_d_c) ); } void f(const float *d_x, float *d_y) { const size_t &NX = m_numDimensions; dim3 blockDim(512); size_t NX2 = NX * NX; dim3 gridDim_xAx((NX2 % blockDim.x) == 0 ? (NX2 / blockDim.x) : (NX2 / blockDim.x) + 1); dim3 gridDim_dot((NX % blockDim.x) == 0 ? (NX / blockDim.x) : (NX / blockDim.x) + 1); CudaSafeCall( cudaMemset(m_d_tmp1, 0, sizeof(float)) ); CudaSafeCall( cudaMemset(m_d_tmp2, 0, sizeof(float)) ); gpu_quadratic_d::xAx<<<gridDim_xAx, blockDim>>>(d_x, m_d_A, NX, m_d_tmp1); CudaCheckError(); gpu_quadratic_d::dot<<<gridDim_dot, blockDim>>>(d_x, m_d_b, NX, m_d_tmp2); CudaCheckError(); cudaDeviceSynchronize(); gpu_quadratic_d::kernelF<<<1, 1>>>(m_d_tmp1, m_d_tmp2, m_d_c, d_y); CudaCheckError(); } void gradf(const float *d_x, float *d_grad) { const size_t &NX = m_numDimensions; dim3 blockDim(512); dim3 gridDim((NX % blockDim.x) == 0 ? (NX / blockDim.x) : (NX / blockDim.x) + 1); gpu_quadratic_d::kernelGradf<<<gridDim, blockDim>>>(d_x, d_grad, m_d_A, m_d_b, NX); CudaCheckError(); } void f_gradf(const float *d_x, float *d_f, float *d_grad) { f(d_x, d_f); gradf(d_x, d_grad); } private: float *m_d_A; float *m_d_b; float *m_d_c; float *m_d_tmp1; float *m_d_tmp2; }; int main(int argc, char **argv) { // CPU size_t n = /*2*/ 8 /*200*/ /*500*/ /*5000*/; size_t maxIter = 500; float gradientEps = 1e-3f; float *A = new float[n*n]; for (size_t i = 0; i < n; ++i) { for (size_t j = 0; j < n; ++j) { //A[i * n + j] = (i == j) ? 1.0f : 0.0f; // 8 on main diagonal, 1 on side diagonals, 0 else if (i == j) A[i * n + j] = 8.0f; else if ((i == j-1) || (i == j+1)) A[i * n + j] = 1.0f; else A[i * n + j] = 0.0f; } } float *b = new float[n]; for (size_t i = 0; i < n; ++i) { b[i] = 1.0f; } float c = 42.0f; cpu_quadratic p1(n, A, b, c); lbfgs minimizer1(p1); minimizer1.setMaxIterations(maxIter); minimizer1.setGradientEpsilon(gradientEps); float *x = new float[n]; for (size_t i = 0; i < n; ++i) { x[i] = i % 2 == 0 ? 5.0f : -10.0f; } lbfgs::status stat = minimizer1.minimize_with_host_x(x); cout << lbfgs::statusToString(stat).c_str() << endl; cout << "CPU quadratic:"; for (size_t i = 0; i < n; ++i) { cout << " " << x[i]; } cout << endl; // GPU for (size_t i = 0; i < n; ++i) { x[i] = i % 2 == 0 ? 5.0f : -10.0f; } gpu_quadratic p2(n, A, b, c); lbfgs minimizer2(p2); minimizer2.setMaxIterations(maxIter); minimizer2.setGradientEpsilon(gradientEps); float *d_x; CudaSafeCall( cudaMalloc(&d_x, n * sizeof(float)) ); CudaSafeCall( cudaMemcpy(d_x, x, n * sizeof(float), cudaMemcpyHostToDevice) ); lbfgs::status stat2 = minimizer2.minimize(d_x); cout << lbfgs::statusToString(stat2).c_str() << endl; CudaSafeCall( cudaMemcpy(x, d_x, n * sizeof(float), cudaMemcpyDeviceToHost) ); CudaSafeCall( cudaFree(d_x) ); cout << "GPU quadratic:"; for (size_t i = 0; i < n; ++i) { cout << " " << x[i]; } cout << endl; delete [] x; delete [] A; delete [] b; return 0; }
d49cd999b7b64cf035eb6b48941ba0361918f4ce.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author [email protected] // #include <Environment.h> #include <loops/transform_same.h> #include <types/types.h> #include <op_boilerplate.h> #include <loops/legacy_ops.h> #include <helpers/DebugHelper.h> using namespace simdOps; template <typename X, typename OpType> __global__ void transformSameSimple(void *x, Nd4jLong *xShapeInfo, int xRank, void *params, void *z, Nd4jLong *zShapeInfo, int zRank, int *allocationPointer, void *reductionPointer, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { functions::transform::TransformSame<X>::template transformCuda<OpType>(x,xShapeInfo,params,z,zShapeInfo,allocationPointer,reductionPointer, tadShapeInfo, tadOffsets); } namespace functions { namespace transform { template<typename X> _CUDA_H void TransformSame<X>::executeTransformShaped(dim3 launchDims, hipStream_t *stream, int opNum, void *x, Nd4jLong *xShape, int xRank, void *extraParams, void *z, Nd4jLong *zShape, int zRank, int *allocationPointer, void *reductionPointer, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { DISPATCH_BY_OPNUM_T(intermediateShaped, PARAMS(launchDims, stream, x, xShape, xRank, extraParams, z, zShape, zRank, allocationPointer, reductionPointer, tadShapeInfo, tadOffsets), TRANSFORM_SAME_OPS); DEBUG_KERNEL(stream, opNum); } template<typename X> template <typename OpType> __device__ void TransformSame<X>::transformCuda(void *vx, Nd4jLong *xShapeInfo, void *vparams, void *vz, Nd4jLong *zShapeInfo, int *allocationPointer, void *vreductionPointer, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { auto x = static_cast<X*>(vx); auto z = static_cast<X*>(vz); auto params = static_cast<X*>(vparams); auto reductionPointer = static_cast<X*>(vreductionPointer); if(OpType::requiresSpecial) { OpType::execSpecialCuda(x,xShapeInfo,z,zShapeInfo,params, allocationPointer, reductionPointer, tadShapeInfo, tadOffsets); return; } else { __shared__ Nd4jLong xEws; __shared__ Nd4jLong zEws; __shared__ char xOrder; __shared__ char zOrder; __shared__ Nd4jLong length; if (threadIdx.x == 0) { xEws = shape::elementWiseStride(xShapeInfo); zEws = shape::elementWiseStride(zShapeInfo); xOrder = shape::order(xShapeInfo); zOrder = shape::order(zShapeInfo); length = shape::length(xShapeInfo); } __syncthreads(); auto tid = blockIdx.x * blockDim.x + threadIdx.x; int totalThreads = gridDim.x * blockDim.x; if(xEws > 0 && zEws > 0 && xOrder == zOrder) { for (int i = tid; i < length; i += totalThreads) z[i * zEws] = OpType::op(x[i * xEws], params); } else { if(vx == vz) { for (Nd4jLong i = tid; i < length; i+= totalThreads) { auto xOffset = shape::getIndexOffset(i, xShapeInfo); z[xOffset] = OpType::op(x[xOffset], params); } } else { for (Nd4jLong i = tid; i < length; i+= totalThreads) { auto xOffset = shape::getIndexOffset(i, xShapeInfo); auto zOffset = shape::getIndexOffset(i, zShapeInfo); z[zOffset] = OpType::op(x[xOffset], params); } } } } }; template<typename X> template <typename OpType> _CUDA_H void TransformSame<X>::intermediateShaped(dim3 launchDims, hipStream_t *stream, void *x, Nd4jLong *xShape, int xRank, void *extraParams, void *z, Nd4jLong *zShape, int zRank, int *allocationPointer, void *reductionPointer, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { hipLaunchKernelGGL(( transformSameSimple<X, OpType>), dim3(launchDims.x), dim3(launchDims.x), launchDims.z, *stream, x, xShape, xRank, extraParams, z, zShape, zRank, allocationPointer, reductionPointer, tadShapeInfo, tadOffsets); nd4j::DebugHelper::checkErrorCode(stream, "transformSame(...) failed"); } BUILD_SINGLE_TEMPLATE(template class ND4J_EXPORT TransformSame, , LIBND4J_TYPES); } }
d49cd999b7b64cf035eb6b48941ba0361918f4ce.cu
/******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author [email protected] // #include <Environment.h> #include <loops/transform_same.h> #include <types/types.h> #include <op_boilerplate.h> #include <loops/legacy_ops.h> #include <helpers/DebugHelper.h> using namespace simdOps; template <typename X, typename OpType> __global__ void transformSameSimple(void *x, Nd4jLong *xShapeInfo, int xRank, void *params, void *z, Nd4jLong *zShapeInfo, int zRank, int *allocationPointer, void *reductionPointer, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { functions::transform::TransformSame<X>::template transformCuda<OpType>(x,xShapeInfo,params,z,zShapeInfo,allocationPointer,reductionPointer, tadShapeInfo, tadOffsets); } namespace functions { namespace transform { template<typename X> _CUDA_H void TransformSame<X>::executeTransformShaped(dim3 launchDims, cudaStream_t *stream, int opNum, void *x, Nd4jLong *xShape, int xRank, void *extraParams, void *z, Nd4jLong *zShape, int zRank, int *allocationPointer, void *reductionPointer, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { DISPATCH_BY_OPNUM_T(intermediateShaped, PARAMS(launchDims, stream, x, xShape, xRank, extraParams, z, zShape, zRank, allocationPointer, reductionPointer, tadShapeInfo, tadOffsets), TRANSFORM_SAME_OPS); DEBUG_KERNEL(stream, opNum); } template<typename X> template <typename OpType> __device__ void TransformSame<X>::transformCuda(void *vx, Nd4jLong *xShapeInfo, void *vparams, void *vz, Nd4jLong *zShapeInfo, int *allocationPointer, void *vreductionPointer, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { auto x = static_cast<X*>(vx); auto z = static_cast<X*>(vz); auto params = static_cast<X*>(vparams); auto reductionPointer = static_cast<X*>(vreductionPointer); if(OpType::requiresSpecial) { OpType::execSpecialCuda(x,xShapeInfo,z,zShapeInfo,params, allocationPointer, reductionPointer, tadShapeInfo, tadOffsets); return; } else { __shared__ Nd4jLong xEws; __shared__ Nd4jLong zEws; __shared__ char xOrder; __shared__ char zOrder; __shared__ Nd4jLong length; if (threadIdx.x == 0) { xEws = shape::elementWiseStride(xShapeInfo); zEws = shape::elementWiseStride(zShapeInfo); xOrder = shape::order(xShapeInfo); zOrder = shape::order(zShapeInfo); length = shape::length(xShapeInfo); } __syncthreads(); auto tid = blockIdx.x * blockDim.x + threadIdx.x; int totalThreads = gridDim.x * blockDim.x; if(xEws > 0 && zEws > 0 && xOrder == zOrder) { for (int i = tid; i < length; i += totalThreads) z[i * zEws] = OpType::op(x[i * xEws], params); } else { if(vx == vz) { for (Nd4jLong i = tid; i < length; i+= totalThreads) { auto xOffset = shape::getIndexOffset(i, xShapeInfo); z[xOffset] = OpType::op(x[xOffset], params); } } else { for (Nd4jLong i = tid; i < length; i+= totalThreads) { auto xOffset = shape::getIndexOffset(i, xShapeInfo); auto zOffset = shape::getIndexOffset(i, zShapeInfo); z[zOffset] = OpType::op(x[xOffset], params); } } } } }; template<typename X> template <typename OpType> _CUDA_H void TransformSame<X>::intermediateShaped(dim3 launchDims, cudaStream_t *stream, void *x, Nd4jLong *xShape, int xRank, void *extraParams, void *z, Nd4jLong *zShape, int zRank, int *allocationPointer, void *reductionPointer, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { transformSameSimple<X, OpType><<<launchDims.x, launchDims.x, launchDims.z, *stream>>>(x, xShape, xRank, extraParams, z, zShape, zRank, allocationPointer, reductionPointer, tadShapeInfo, tadOffsets); nd4j::DebugHelper::checkErrorCode(stream, "transformSame(...) failed"); } BUILD_SINGLE_TEMPLATE(template class ND4J_EXPORT TransformSame, , LIBND4J_TYPES); } }
0360edbdc138a57c641720ea36bdd1122eb5bf04.hip
// !!! This is a file automatically generated by hipify!!! /* Copyright 2019 Stanford * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "taso/ops.h" #include "taso/cuda_helper.h" using namespace taso; void MergeGConv::map(void) { size_t outputSize = sizeof(DATATYPE) * outputs[0].volume(); checkCUDA(hipMalloc(&outputs[0].data_ptr, outputSize)); } void MergeGConv::unmap(void) { checkCUDA(hipFree(outputs[0].data_ptr)); } void MergeGConv::forward(bool block) { //merge_gconv_kernel<<<GET_BLOCKS(outputs[0].volume()), CUDA_NUM_THREADS>>>( // (DATATYPE*)outputs[0].data_ptr, (DATATYPE*)inputs[0].data_ptr, if (block) checkCUDA(hipDeviceSynchronize()); }
0360edbdc138a57c641720ea36bdd1122eb5bf04.cu
/* Copyright 2019 Stanford * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "taso/ops.h" #include "taso/cuda_helper.h" using namespace taso; void MergeGConv::map(void) { size_t outputSize = sizeof(DATATYPE) * outputs[0].volume(); checkCUDA(cudaMalloc(&outputs[0].data_ptr, outputSize)); } void MergeGConv::unmap(void) { checkCUDA(cudaFree(outputs[0].data_ptr)); } void MergeGConv::forward(bool block) { //merge_gconv_kernel<<<GET_BLOCKS(outputs[0].volume()), CUDA_NUM_THREADS>>>( // (DATATYPE*)outputs[0].data_ptr, (DATATYPE*)inputs[0].data_ptr, if (block) checkCUDA(cudaDeviceSynchronize()); }
8f29224ceb9aad4804e5973fe79b05642225fd74.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "saber/funcs/impl/cuda/depthwise_deconv.h" #include "saber/funcs/impl/impl_macro.h" namespace anakin { namespace saber { template <typename dtype> __global__ void depthwise_deconv_2d(const int channel_in_stride, const int channel_out_stride, const int kernel_size, const dtype* const din, const int num, const int channels, const int hin, const int win, const int hout, const int wout, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, dtype* const dout, const dtype* const weight, const dtype* const bias, bool bias_flag, bool relu_flag) { int wo = blockIdx.x * blockDim.x + threadIdx.x; int w = wo + pad_w; int ho = blockIdx.y * blockDim.y + threadIdx.y; int h = ho + pad_h; int c = blockIdx.z % channels; int i = blockIdx.z; int index = i * channel_out_stride + ho * wout + wo; extern __shared__ dtype sharedw[]; int idx = threadIdx.y * blockDim.x + threadIdx.x; if (idx < kernel_size) { sharedw[idx] = weight[c * kernel_size + idx]; } __syncthreads(); if (wo < wout && ho < hout) { const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; const int phend = min(h / stride_h + 1, hin); const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1; const int pwend = min(w / stride_w + 1, win); const int khstart = (h >= kernel_h) ? ((h - kernel_h) % stride_h) + (kernel_h - stride_h) : h; const int kwstart = (w >= kernel_w) ? ((w - kernel_w) % stride_w) + (kernel_w - stride_w) : w; dtype gradient = 0; const dtype* const top_diff_slice = din + i * channel_in_stride; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { int kh = khstart - (ph - phstart) * stride_h; int kw = kwstart - (pw - pwstart) * stride_w; gradient += top_diff_slice[ph * win + pw] * sharedw[kh * kernel_w + kw]; } } if (bias_flag) { gradient += bias[c]; } if (relu_flag) { gradient = gradient > (dtype)0 ? gradient : (dtype)0; } dout[index] = gradient; } } template <> SaberStatus DepthwiseDeconv<NV, AK_FLOAT>::create( const std::vector<Tensor<NV> *>& inputs, std::vector<Tensor<NV> *>& outputs, ConvParam<NV>& param, Context<NV> &ctx) { return SaberSuccess; } template <> SaberStatus DepthwiseDeconv<NV, AK_FLOAT>::init( const std::vector<Tensor<NV> *>& inputs, std::vector<Tensor<NV> *>& outputs, ConvParam<NV>& param, Context<NV>& ctx) { this->_ctx = &ctx; return create(inputs, outputs, param, ctx); } template <> SaberStatus DepthwiseDeconv<NV, AK_FLOAT>::dispatch(\ const std::vector<Tensor<NV> *>& inputs, std::vector<Tensor<NV> *>& outputs, ConvParam<NV>& param) { hipStream_t stream = this->_ctx->get_compute_stream(); const float* din = (const float*)inputs[0]->data(); float* dout = (float*)outputs[0]->mutable_data(); const float* weight = (const float*)param.weight()->data(); const float* bias = (const float*)param.bias()->data(); int win = inputs[0]->width(); int hin = inputs[0]->height(); int num = inputs[0]->num(); int ch_in = inputs[0]->channel(); int wout = outputs[0]->width(); int hout = outputs[0]->height(); int ch_out = outputs[0]->channel(); int kernel_h = param.weight()->height(); int kernel_w = param.weight()->width(); dim3 block(32, 32); int gx = (wout + block.x - 1) / block.x; int gy = (hout + block.y - 1) / block.y; dim3 grid(gx, gy, num * ch_out); int channel_in_stride = hin * win; int channel_out_stride = hout * wout; int kernel_size = kernel_h * kernel_w; int shared_mem_size = kernel_size * sizeof(float); bool bias_flag = param.bias()->valid_size() > 0; bool relu_flag = param.activation_param.has_active; hipLaunchKernelGGL(( depthwise_deconv_2d<float>), dim3(grid), dim3(block), shared_mem_size, stream, channel_in_stride, channel_out_stride, kernel_size, \ din, num, ch_in, hin, win, hout, wout, kernel_h, \ kernel_w, param.stride_h, param.stride_w, \ param.pad_h, param.pad_w, \ dout, weight, bias, bias_flag, relu_flag); return SaberSuccess; } template class DepthwiseDeconv<NV, AK_FLOAT>; DEFINE_OP_TEMPLATE(DepthwiseDeconv, ConvParam, NV, AK_HALF); DEFINE_OP_TEMPLATE(DepthwiseDeconv, ConvParam, NV, AK_INT8); } //namespace anakin } //namespace anakin
8f29224ceb9aad4804e5973fe79b05642225fd74.cu
#include "saber/funcs/impl/cuda/depthwise_deconv.h" #include "saber/funcs/impl/impl_macro.h" namespace anakin { namespace saber { template <typename dtype> __global__ void depthwise_deconv_2d(const int channel_in_stride, const int channel_out_stride, const int kernel_size, const dtype* const din, const int num, const int channels, const int hin, const int win, const int hout, const int wout, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, dtype* const dout, const dtype* const weight, const dtype* const bias, bool bias_flag, bool relu_flag) { int wo = blockIdx.x * blockDim.x + threadIdx.x; int w = wo + pad_w; int ho = blockIdx.y * blockDim.y + threadIdx.y; int h = ho + pad_h; int c = blockIdx.z % channels; int i = blockIdx.z; int index = i * channel_out_stride + ho * wout + wo; extern __shared__ dtype sharedw[]; int idx = threadIdx.y * blockDim.x + threadIdx.x; if (idx < kernel_size) { sharedw[idx] = weight[c * kernel_size + idx]; } __syncthreads(); if (wo < wout && ho < hout) { const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; const int phend = min(h / stride_h + 1, hin); const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1; const int pwend = min(w / stride_w + 1, win); const int khstart = (h >= kernel_h) ? ((h - kernel_h) % stride_h) + (kernel_h - stride_h) : h; const int kwstart = (w >= kernel_w) ? ((w - kernel_w) % stride_w) + (kernel_w - stride_w) : w; dtype gradient = 0; const dtype* const top_diff_slice = din + i * channel_in_stride; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { int kh = khstart - (ph - phstart) * stride_h; int kw = kwstart - (pw - pwstart) * stride_w; gradient += top_diff_slice[ph * win + pw] * sharedw[kh * kernel_w + kw]; } } if (bias_flag) { gradient += bias[c]; } if (relu_flag) { gradient = gradient > (dtype)0 ? gradient : (dtype)0; } dout[index] = gradient; } } template <> SaberStatus DepthwiseDeconv<NV, AK_FLOAT>::create( const std::vector<Tensor<NV> *>& inputs, std::vector<Tensor<NV> *>& outputs, ConvParam<NV>& param, Context<NV> &ctx) { return SaberSuccess; } template <> SaberStatus DepthwiseDeconv<NV, AK_FLOAT>::init( const std::vector<Tensor<NV> *>& inputs, std::vector<Tensor<NV> *>& outputs, ConvParam<NV>& param, Context<NV>& ctx) { this->_ctx = &ctx; return create(inputs, outputs, param, ctx); } template <> SaberStatus DepthwiseDeconv<NV, AK_FLOAT>::dispatch(\ const std::vector<Tensor<NV> *>& inputs, std::vector<Tensor<NV> *>& outputs, ConvParam<NV>& param) { cudaStream_t stream = this->_ctx->get_compute_stream(); const float* din = (const float*)inputs[0]->data(); float* dout = (float*)outputs[0]->mutable_data(); const float* weight = (const float*)param.weight()->data(); const float* bias = (const float*)param.bias()->data(); int win = inputs[0]->width(); int hin = inputs[0]->height(); int num = inputs[0]->num(); int ch_in = inputs[0]->channel(); int wout = outputs[0]->width(); int hout = outputs[0]->height(); int ch_out = outputs[0]->channel(); int kernel_h = param.weight()->height(); int kernel_w = param.weight()->width(); dim3 block(32, 32); int gx = (wout + block.x - 1) / block.x; int gy = (hout + block.y - 1) / block.y; dim3 grid(gx, gy, num * ch_out); int channel_in_stride = hin * win; int channel_out_stride = hout * wout; int kernel_size = kernel_h * kernel_w; int shared_mem_size = kernel_size * sizeof(float); bool bias_flag = param.bias()->valid_size() > 0; bool relu_flag = param.activation_param.has_active; depthwise_deconv_2d<float><<<grid, block, shared_mem_size, stream>>>( channel_in_stride, channel_out_stride, kernel_size, \ din, num, ch_in, hin, win, hout, wout, kernel_h, \ kernel_w, param.stride_h, param.stride_w, \ param.pad_h, param.pad_w, \ dout, weight, bias, bias_flag, relu_flag); return SaberSuccess; } template class DepthwiseDeconv<NV, AK_FLOAT>; DEFINE_OP_TEMPLATE(DepthwiseDeconv, ConvParam, NV, AK_HALF); DEFINE_OP_TEMPLATE(DepthwiseDeconv, ConvParam, NV, AK_INT8); } //namespace anakin } //namespace anakin
cbea231144d60100d8a3199109d6bf1c1773f57e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Highly Optimized Object-oriented Many-particle Dynamics -- Blue Edition (HOOMD-blue) Open Source Software License Copyright 2009-2014 The Regents of the University of Michigan All rights reserved. HOOMD-blue may contain modifications ("Contributions") provided, and to which copyright is held, by various Contributors who have granted The Regents of the University of Michigan the right to modify and/or distribute such Contributions. You may redistribute, use, and create derivate works of HOOMD-blue, in source and binary forms, provided you abide by the following conditions: * Redistributions of source code must retain the above copyright notice, this list of conditions, and the following disclaimer both in the code and prominently in any materials provided with the distribution. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions, and the following disclaimer in the documentation and/or other materials provided with the distribution. * All publications and presentations based on HOOMD-blue, including any reports or published results obtained, in whole or in part, with HOOMD-blue, will acknowledge its use according to the terms posted at the time of submission on: http://codeblue.umich.edu/hoomd-blue/citations.html * Any electronic documents citing HOOMD-Blue will link to the HOOMD-Blue website: http://codeblue.umich.edu/hoomd-blue/ * Apart from the above required attributions, neither the name of the copyright holder nor the names of HOOMD-blue's contributors may be used to endorse or promote products derived from this software without specific prior written permission. Disclaimer THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND/OR ANY WARRANTIES THAT THIS SOFTWARE IS FREE OF INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ // Maintainer: jglaser #include "ParticleData.cuh" #include "BondedGroupData.cuh" #include "kernels/scan.cuh" #include "kernels/mergesort.cuh" #include "kernels/intervalmove.cuh" /*! \file BondedGroupData.cu \brief Implements the helper functions (GPU version) for updating the GPU bonded group tables */ template<unsigned int group_size, typename group_t> __global__ void gpu_count_groups_kernel( const unsigned int n_groups, const group_t *d_group_table, const unsigned int *d_rtag, unsigned int *d_scratch_idx, unsigned int *d_scratch_g, unsigned int *d_n_groups, unsigned int max_n_groups, unsigned int *d_condition, unsigned int next_flag ) { unsigned int group_idx = blockIdx.x*blockDim.x + threadIdx.x; if (group_idx >= n_groups) return; group_t g = d_group_table[group_idx]; #pragma unroll for (unsigned int i = 0; i < group_size; ++i) { unsigned int tag_i = g.tag[i]; unsigned int pidx_i = d_rtag[tag_i]; // detect incomplete groups if (pidx_i == NOT_LOCAL) atomicMax(d_condition, next_flag+1+group_idx); // write out group_idx to temporary array d_scratch_g[i*n_groups+group_idx] = group_idx; d_scratch_idx[i*n_groups+group_idx] = pidx_i; // atomically increment number of groups unsigned int n = 0; if (pidx_i != NOT_LOCAL) n = atomicInc(&d_n_groups[pidx_i],0xffffffff); if (n >= max_n_groups) // set flag to indicate we need to grow the output array atomicMax(d_condition,next_flag); } } template<unsigned int group_size, typename group_t> __global__ void gpu_group_scatter_kernel( unsigned int n_scratch, const unsigned int *d_scratch_g, const unsigned int *d_scratch_idx, const unsigned int *d_offset, const group_t *d_members, const unsigned int *d_group_type, const unsigned int *d_rtag, group_t *d_pidx_group_table, unsigned int *d_pidx_gpos_table, unsigned int pidx_group_table_pitch ) { unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= n_scratch) return; unsigned int pidx = d_scratch_idx[i]; unsigned int offset = d_offset[i]*pidx_group_table_pitch + pidx; // load group unsigned int group_idx = d_scratch_g[i]; group_t g = d_members[group_idx]; // construct compact group representation, excluding particle pidx group_t p; // last element = group type p.idx[group_size-1] = d_group_type[group_idx]; unsigned int j = 0; // position in group unsigned int gpos = 0; #pragma unroll for (unsigned int k = 0; k < group_size; ++k) { unsigned int tag_k = g.tag[k]; unsigned int pidx_k = d_rtag[tag_k]; if (pidx_k == pidx) { gpos = k; continue; } p.idx[j++] = pidx_k; } d_pidx_group_table[offset] = p; d_pidx_gpos_table[offset] = gpos; } template<unsigned int group_size, typename group_t> void gpu_update_group_table( const unsigned int n_groups, const unsigned int N, const group_t* d_group_table, const unsigned int *d_group_type, const unsigned int *d_rtag, unsigned int *d_n_groups, unsigned int max_n_groups, unsigned int *d_condition, unsigned int next_flag, unsigned int &flag, group_t *d_pidx_group_table, unsigned int *d_pidx_gpos_table, const unsigned int pidx_group_table_pitch, unsigned int *d_scratch_g, unsigned int *d_scratch_idx, unsigned int *d_offsets, unsigned int *d_seg_offsets, mgpu::ContextPtr mgpu_context ) { // construct scratch table by expanding the group table by particle index unsigned int block_size = 512; unsigned n_blocks = n_groups / block_size + 1; // reset number of groups hipMemsetAsync(d_n_groups, 0, sizeof(unsigned int)*N); hipLaunchKernelGGL(( gpu_count_groups_kernel<group_size>), dim3(n_blocks), dim3(block_size), 0, 0, n_groups, d_group_table, d_rtag, d_scratch_idx, d_scratch_g, d_n_groups, max_n_groups, d_condition, next_flag); // read back flag hipMemcpyAsync(&flag, d_condition, sizeof(unsigned int), hipMemcpyDeviceToHost); hipDeviceSynchronize(); if (! (flag >= next_flag) && n_groups) { // we are good, fill group table // sort groups by particle index mgpu::MergesortPairs(d_scratch_idx, d_scratch_g, group_size*n_groups, *mgpu_context); mgpu::Scan<mgpu::MgpuScanTypeExc>(d_n_groups, N, (unsigned int) 0, mgpu::plus<unsigned int>(), (unsigned int *) NULL, (unsigned int *)NULL, d_seg_offsets,*mgpu_context); // use IntervalMove to perform a segmented scan of d_scratch_idx, // using segment offsets as input mgpu::constant_iterator<unsigned int> const_it(0); mgpu::counting_iterator<unsigned int> count_it(0); mgpu::IntervalMove(group_size*n_groups, const_it, d_seg_offsets, d_seg_offsets, N, count_it, d_offsets, *mgpu_context); // scatter groups to destinations block_size = 512; n_blocks = group_size*n_groups/block_size + 1; hipLaunchKernelGGL(( gpu_group_scatter_kernel<group_size>), dim3(n_blocks), dim3(block_size), 0, 0, n_groups*group_size, d_scratch_g, d_scratch_idx, d_offsets, d_group_table, d_group_type, d_rtag, d_pidx_group_table, d_pidx_gpos_table, pidx_group_table_pitch); } } /* * Explicit template instantiations */ //! BondData template void gpu_update_group_table<2>( const unsigned int n_groups, const unsigned int N, const union group_storage<2> *d_group_table, const unsigned int *d_group_type, const unsigned int *d_rtag, unsigned int *d_n_groups, unsigned int max_n_groups, unsigned int *d_condition, unsigned int next_flag, unsigned int &flag, group_storage<2> *d_pidx_group_table, unsigned int *d_pidx_gpos_table, const unsigned int pidx_group_table_pitch, unsigned int *d_scratch_g, unsigned int *d_scratch_idx, unsigned int *d_offsets, unsigned int *d_seg_offsets, mgpu::ContextPtr mgpu_context ); //! AngleData template void gpu_update_group_table<3>( const unsigned int n_groups, const unsigned int N, const union group_storage<3> *d_group_table, const unsigned int *d_group_type, const unsigned int *d_rtag, unsigned int *d_n_groups, unsigned int max_n_groups, unsigned int *d_condition, unsigned int next_flag, unsigned int &flag, group_storage<3> *d_pidx_group_table, unsigned int *d_pidx_gpos_table, const unsigned int pidx_group_table_pitch, unsigned int *d_scratch_g, unsigned int *d_scratch_idx, unsigned int *d_offsets, unsigned int *d_seg_offsets, mgpu::ContextPtr mgpu_context ); //! DihedralData and ImproperData template void gpu_update_group_table<4>( const unsigned int n_groups, const unsigned int N, const union group_storage<4> *d_group_table, const unsigned int *d_group_type, const unsigned int *d_rtag, unsigned int *d_n_groups, unsigned int max_n_groups, unsigned int *d_condition, unsigned int next_flag, unsigned int &flag, group_storage<4> *d_pidx_group_table, unsigned int *d_pidx_gpos_table, const unsigned int pidx_group_table_pitch, unsigned int *d_scratch_g, unsigned int *d_scratch_idx, unsigned int *d_offsets, unsigned int *d_seg_offsets, mgpu::ContextPtr mgpu_context );
cbea231144d60100d8a3199109d6bf1c1773f57e.cu
/* Highly Optimized Object-oriented Many-particle Dynamics -- Blue Edition (HOOMD-blue) Open Source Software License Copyright 2009-2014 The Regents of the University of Michigan All rights reserved. HOOMD-blue may contain modifications ("Contributions") provided, and to which copyright is held, by various Contributors who have granted The Regents of the University of Michigan the right to modify and/or distribute such Contributions. You may redistribute, use, and create derivate works of HOOMD-blue, in source and binary forms, provided you abide by the following conditions: * Redistributions of source code must retain the above copyright notice, this list of conditions, and the following disclaimer both in the code and prominently in any materials provided with the distribution. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions, and the following disclaimer in the documentation and/or other materials provided with the distribution. * All publications and presentations based on HOOMD-blue, including any reports or published results obtained, in whole or in part, with HOOMD-blue, will acknowledge its use according to the terms posted at the time of submission on: http://codeblue.umich.edu/hoomd-blue/citations.html * Any electronic documents citing HOOMD-Blue will link to the HOOMD-Blue website: http://codeblue.umich.edu/hoomd-blue/ * Apart from the above required attributions, neither the name of the copyright holder nor the names of HOOMD-blue's contributors may be used to endorse or promote products derived from this software without specific prior written permission. Disclaimer THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND/OR ANY WARRANTIES THAT THIS SOFTWARE IS FREE OF INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ // Maintainer: jglaser #include "ParticleData.cuh" #include "BondedGroupData.cuh" #include "kernels/scan.cuh" #include "kernels/mergesort.cuh" #include "kernels/intervalmove.cuh" /*! \file BondedGroupData.cu \brief Implements the helper functions (GPU version) for updating the GPU bonded group tables */ template<unsigned int group_size, typename group_t> __global__ void gpu_count_groups_kernel( const unsigned int n_groups, const group_t *d_group_table, const unsigned int *d_rtag, unsigned int *d_scratch_idx, unsigned int *d_scratch_g, unsigned int *d_n_groups, unsigned int max_n_groups, unsigned int *d_condition, unsigned int next_flag ) { unsigned int group_idx = blockIdx.x*blockDim.x + threadIdx.x; if (group_idx >= n_groups) return; group_t g = d_group_table[group_idx]; #pragma unroll for (unsigned int i = 0; i < group_size; ++i) { unsigned int tag_i = g.tag[i]; unsigned int pidx_i = d_rtag[tag_i]; // detect incomplete groups if (pidx_i == NOT_LOCAL) atomicMax(d_condition, next_flag+1+group_idx); // write out group_idx to temporary array d_scratch_g[i*n_groups+group_idx] = group_idx; d_scratch_idx[i*n_groups+group_idx] = pidx_i; // atomically increment number of groups unsigned int n = 0; if (pidx_i != NOT_LOCAL) n = atomicInc(&d_n_groups[pidx_i],0xffffffff); if (n >= max_n_groups) // set flag to indicate we need to grow the output array atomicMax(d_condition,next_flag); } } template<unsigned int group_size, typename group_t> __global__ void gpu_group_scatter_kernel( unsigned int n_scratch, const unsigned int *d_scratch_g, const unsigned int *d_scratch_idx, const unsigned int *d_offset, const group_t *d_members, const unsigned int *d_group_type, const unsigned int *d_rtag, group_t *d_pidx_group_table, unsigned int *d_pidx_gpos_table, unsigned int pidx_group_table_pitch ) { unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= n_scratch) return; unsigned int pidx = d_scratch_idx[i]; unsigned int offset = d_offset[i]*pidx_group_table_pitch + pidx; // load group unsigned int group_idx = d_scratch_g[i]; group_t g = d_members[group_idx]; // construct compact group representation, excluding particle pidx group_t p; // last element = group type p.idx[group_size-1] = d_group_type[group_idx]; unsigned int j = 0; // position in group unsigned int gpos = 0; #pragma unroll for (unsigned int k = 0; k < group_size; ++k) { unsigned int tag_k = g.tag[k]; unsigned int pidx_k = d_rtag[tag_k]; if (pidx_k == pidx) { gpos = k; continue; } p.idx[j++] = pidx_k; } d_pidx_group_table[offset] = p; d_pidx_gpos_table[offset] = gpos; } template<unsigned int group_size, typename group_t> void gpu_update_group_table( const unsigned int n_groups, const unsigned int N, const group_t* d_group_table, const unsigned int *d_group_type, const unsigned int *d_rtag, unsigned int *d_n_groups, unsigned int max_n_groups, unsigned int *d_condition, unsigned int next_flag, unsigned int &flag, group_t *d_pidx_group_table, unsigned int *d_pidx_gpos_table, const unsigned int pidx_group_table_pitch, unsigned int *d_scratch_g, unsigned int *d_scratch_idx, unsigned int *d_offsets, unsigned int *d_seg_offsets, mgpu::ContextPtr mgpu_context ) { // construct scratch table by expanding the group table by particle index unsigned int block_size = 512; unsigned n_blocks = n_groups / block_size + 1; // reset number of groups cudaMemsetAsync(d_n_groups, 0, sizeof(unsigned int)*N); gpu_count_groups_kernel<group_size><<<n_blocks, block_size>>>( n_groups, d_group_table, d_rtag, d_scratch_idx, d_scratch_g, d_n_groups, max_n_groups, d_condition, next_flag); // read back flag cudaMemcpyAsync(&flag, d_condition, sizeof(unsigned int), cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); if (! (flag >= next_flag) && n_groups) { // we are good, fill group table // sort groups by particle index mgpu::MergesortPairs(d_scratch_idx, d_scratch_g, group_size*n_groups, *mgpu_context); mgpu::Scan<mgpu::MgpuScanTypeExc>(d_n_groups, N, (unsigned int) 0, mgpu::plus<unsigned int>(), (unsigned int *) NULL, (unsigned int *)NULL, d_seg_offsets,*mgpu_context); // use IntervalMove to perform a segmented scan of d_scratch_idx, // using segment offsets as input mgpu::constant_iterator<unsigned int> const_it(0); mgpu::counting_iterator<unsigned int> count_it(0); mgpu::IntervalMove(group_size*n_groups, const_it, d_seg_offsets, d_seg_offsets, N, count_it, d_offsets, *mgpu_context); // scatter groups to destinations block_size = 512; n_blocks = group_size*n_groups/block_size + 1; gpu_group_scatter_kernel<group_size><<<n_blocks, block_size>>>( n_groups*group_size, d_scratch_g, d_scratch_idx, d_offsets, d_group_table, d_group_type, d_rtag, d_pidx_group_table, d_pidx_gpos_table, pidx_group_table_pitch); } } /* * Explicit template instantiations */ //! BondData template void gpu_update_group_table<2>( const unsigned int n_groups, const unsigned int N, const union group_storage<2> *d_group_table, const unsigned int *d_group_type, const unsigned int *d_rtag, unsigned int *d_n_groups, unsigned int max_n_groups, unsigned int *d_condition, unsigned int next_flag, unsigned int &flag, group_storage<2> *d_pidx_group_table, unsigned int *d_pidx_gpos_table, const unsigned int pidx_group_table_pitch, unsigned int *d_scratch_g, unsigned int *d_scratch_idx, unsigned int *d_offsets, unsigned int *d_seg_offsets, mgpu::ContextPtr mgpu_context ); //! AngleData template void gpu_update_group_table<3>( const unsigned int n_groups, const unsigned int N, const union group_storage<3> *d_group_table, const unsigned int *d_group_type, const unsigned int *d_rtag, unsigned int *d_n_groups, unsigned int max_n_groups, unsigned int *d_condition, unsigned int next_flag, unsigned int &flag, group_storage<3> *d_pidx_group_table, unsigned int *d_pidx_gpos_table, const unsigned int pidx_group_table_pitch, unsigned int *d_scratch_g, unsigned int *d_scratch_idx, unsigned int *d_offsets, unsigned int *d_seg_offsets, mgpu::ContextPtr mgpu_context ); //! DihedralData and ImproperData template void gpu_update_group_table<4>( const unsigned int n_groups, const unsigned int N, const union group_storage<4> *d_group_table, const unsigned int *d_group_type, const unsigned int *d_rtag, unsigned int *d_n_groups, unsigned int max_n_groups, unsigned int *d_condition, unsigned int next_flag, unsigned int &flag, group_storage<4> *d_pidx_group_table, unsigned int *d_pidx_gpos_table, const unsigned int pidx_group_table_pitch, unsigned int *d_scratch_g, unsigned int *d_scratch_idx, unsigned int *d_offsets, unsigned int *d_seg_offsets, mgpu::ContextPtr mgpu_context );
5862cde8591501f9f2fc33001ff4990292b25e34.hip
// !!! This is a file automatically generated by hipify!!! // includes system #include <sstream> // std::ostringstream // include CUDA #include "hip/hip_runtime.h" #include "device_launch_parameters.h" // include project #include "integrator_exception.h" #include "rkn76.h" #include "util.h" #define THREADS_PER_BLOCK 256 static hipError_t HandleError(hipError_t cudaStatus, const char *file, int line) { if (hipSuccess != cudaStatus) { printf( "%s in %s at line %d\n", hipGetErrorString( cudaStatus ), file, line ); return cudaStatus; } return cudaStatus; } #define HANDLE_ERROR(cudaStatus) (HandleError(cudaStatus, __FILE__, __LINE__)) #define LAMBDA 1.0/20.0 #define sQ sqrt(21.0) ttt_t rkn76::c[] = { 0.0, 1.0/10.0, 1.0/5.0, 3.0/8.0, 1.0/2.0, (7.0-sQ)/14.0, (7.0+sQ)/14.0, 1.0, 1.0 }; var_t rkn76::a[] = { 1.0/200.0, 1.0/150.0, 1.0/75.0, 171.0/8192.0, 45.0/4096.0, 315.0/8192.0, 5.0/288.0, 25.0/528.0, 25.0/672.0, 16.0/693.0, (1003.0-205.0*sQ)/12348.0,-25.0*(751.0-173.0*sQ)/90552.0, 25.0*(624.0-137.0*sQ)/43218.0, -128.0*(361.0-79.0*sQ)/237699.0, (3411.0-745.0*sQ)/24696.0, (793.0+187.0*sQ)/12348.0, -25.0*(331.0+113.0*sQ)/90552.0, 25.0*(1044.0+247.0*sQ)/43218.0, -128.0*(14885.0+3779.0*sQ)/9745659.0, (3327.0+797.0*sQ)/24696.0, -(581.0+127.0*sQ)/1722.0, -(157.0-3.0*sQ)/378.0, 25.0*(143.0-10.0*sQ)/2772.0, -25.0*(876.0+55.0*sQ)/3969.0, 1280.0*(913.0+18.0*sQ)/596673.0, -(1353.0+26.0*sQ)/2268.0, 7.0*(1777.0+377.0*sQ)/4428.0, 7.0*(5.0-sQ)/36.0, 1.0/20.0, 0.0, 0.0, 0.0, 8.0/45.0, 7.0*(7.0+sQ)/360.0, 7.0*(7.0-sQ)/360.0, 0.0 }; var_t rkn76::bh[]= { 1.0/20.0, 0.0, 0.0, 0.0, 8.0/45.0, 7.0*(7.0+sQ)/360.0, 7.0*(7.0-sQ)/360.0, 0.0, 0.0 }; var_t rkn76::b[] = { 1.0/20.0, 0.0, 0.0, 0.0, 8.0/45.0, 7.0*(7.0+sQ)/360.0, 7.0*(7.0-sQ)/360.0, -LAMBDA, LAMBDA }; #undef sQ // ytemp = y_n + dt*(a21*k1) static __global__ void calc_ytemp_for_k2_kernel(int_t n, var_t *ytemp, const var_t *y_n, const var_t *k1, var_t k1f) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; while (n > tid) { ytemp[tid] = y_n[tid] + k1f * k1[tid]; tid += stride; } } // ytemp = y_n + dt*(a31*k1 + a32*k2) static __global__ void calc_ytemp_for_k3_kernel(int_t n, var_t *ytemp, const var_t *y_n, const var_t *k1, const var_t *k2, var_t k1f, var_t k2f) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; while (n > tid) { ytemp[tid] = y_n[tid] + k1f * k1[tid] + k2f * k2[tid]; tid += stride; } } // ytemp = y_n + dt*(a41*k1 + a42*k2 + a43*k3) static __global__ void calc_ytemp_for_k4_kernel(int_t n, var_t *ytemp, const var_t *y_n, const var_t *k1, const var_t *k2, const var_t *k3, var_t k1f, var_t k2f, var_t k3f) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; while (n > tid) { ytemp[tid] = y_n[tid] + k1f * k1[tid] + k2f * k2[tid] + k3f * k3[tid]; tid += stride; } } // ytemp = y_n + dt*(a51*k1 + a52*k2 + a53*k3 + a54*k4) static __global__ void calc_ytemp_for_k5_kernel(int_t n, var_t *ytemp, const var_t *y_n, const var_t *k1, const var_t *k2, const var_t *k3, const var_t *k4, var_t k1f, var_t k2f, var_t k3f, var_t k4f) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; while (n > tid) { ytemp[tid] = y_n[tid] + k1f * k1[tid] + k2f * k2[tid] + k3f * k3[tid] + k4f * k4[tid]; tid += stride; } } // ytemp = y_n + dt*(a61*k1 + a62*k2 + a63*k3 + a64*k4 + a65*k5) static __global__ void calc_ytemp_for_k6_kernel(int_t n, var_t *ytemp, const var_t *y_n, const var_t *k1, const var_t *k2, const var_t *k3, const var_t *k4, const var_t *k5, var_t k1f, var_t k2f, var_t k3f, var_t k4f, var_t k5f) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; while (n > tid) { ytemp[tid] = y_n[tid] + k1f * k1[tid] + k2f * k2[tid] + k3f * k3[tid] + k4f * k4[tid] + k5f * k5[tid]; tid += stride; } } // ytemp = y_n + dt*(a71*k1 + a72*k2 + a73*k3 + a74*k4 + a75*k5 + a76*k6) static __global__ void calc_ytemp_for_k7_kernel(int_t n, var_t *ytemp, const var_t *y_n, const var_t *k1, const var_t *k2, const var_t *k3, const var_t *k4, const var_t *k5, const var_t *k6, var_t k1f, var_t k2f, var_t k3f, var_t k4f, var_t k5f, var_t k6f) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; while (n > tid) { ytemp[tid] = y_n[tid] + k1f * k1[tid] + k2f * k2[tid] + k3f * k3[tid] + k4f * k4[tid] + k5f * k5[tid] + k6f * k6[tid]; tid += stride; } } // ytemp = y_n + dt*(a81*k1 + a82*k2 + a83*k3 + a84*k4 + a85*k5 + a86*k6 + a87*k7) static __global__ void calc_ytemp_for_k8_kernel(int_t n, var_t *ytemp, const var_t *y_n, const var_t *k1, const var_t *k2, const var_t *k3, const var_t *k4, const var_t *k5, const var_t *k6, const var_t *k7, var_t k1f, var_t k2f, var_t k3f, var_t k4f, var_t k5f, var_t k6f, var_t k7f) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; while (n > tid) { ytemp[tid] = y_n[tid] + k1f * k1[tid] + k2f * k2[tid] + k3f * k3[tid] + k4f * k4[tid] + k5f * k5[tid] + k6f * k6[tid] + k7f * k7[tid]; tid += stride; } } // ytemp = y_n + dt*(a91*k1 + a95*k5 + a96*k6 + a97*k7) static __global__ void calc_ytemp_for_k9_kernel(int_t n, var_t *ytemp, const var_t *y_n, const var_t *k1, const var_t *k5, const var_t *k6, const var_t *k7, var_t k1f, var_t k5f, var_t k6f, var_t k7f) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; while (n > tid) { ytemp[tid] = y_n[tid] + k1f * k1[tid] + k5f * k5[tid] + k6f * k6[tid] + k7f * k7[tid]; tid += stride; } } // y = y_n + dt*(bh1*k1 + bh5*k5 + bh6*k6 + bh7*k7) static __global__ void calc_y_kernel(int_t n, var_t *y, const var_t *y_n, const var_t *k1, const var_t *k5, const var_t *k6, const var_t *k7, var_t k1f, var_t k5f, var_t k6f, var_t k7f) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; while (n > tid) { y[tid] = y_n[tid] + k1f * k1[tid] + k5f * k5[tid] + k6f * k6[tid] + k7f * k7[tid]; tid += stride; } } static __global__ void calc_f8_sub_f9_kernel(int_t n, var_t* result, const var_t* f8, const var_t* f9) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; while (n > tid) { result[tid] = f8[tid] - f9[tid]; tid += stride; } } void rkn76::call_calc_k8_sub_k9_kernel() { for (int i = 0; i < f.get_order(); i++) { int n = f.d_y[i].size(); var_t *err = d_err[i].data().get(); var_t* k8 = d_k[i][7].data().get(); var_t* k9 = d_k[i][8].data().get(); calculate_grid(n, THREADS_PER_BLOCK); hipLaunchKernelGGL(( calc_f8_sub_f9_kernel), dim3(grid), dim3(block), 0, 0, n, err, k8, k9); hipError_t cudaStatus = HANDLE_ERROR(hipGetLastError()); if (hipSuccess != cudaStatus) { throw integrator_exception("calc_f8_sub_f9_kernel failed"); } } } void rkn76::call_calc_ytemp_for_kr_kernel(int r) { int idx = 0; for (int i = 0; i < f.get_order(); i++) { int n = f.d_y[i].size(); calculate_grid(n, THREADS_PER_BLOCK); var_t* y_n= f.d_y[i].data().get(); var_t* k1 = d_k[i][0].data().get(); var_t* k2 = d_k[i][1].data().get(); var_t* k3 = d_k[i][2].data().get(); var_t* k4 = d_k[i][3].data().get(); var_t* k5 = d_k[i][4].data().get(); var_t* k6 = d_k[i][5].data().get(); var_t* k7 = d_k[i][6].data().get(); var_t* k8; if (adaptive) { k8 = d_k[i][7].data().get(); } switch (r) { case 1: idx = 0; hipLaunchKernelGGL(( calc_ytemp_for_k2_kernel), dim3(grid), dim3(block), 0, 0, n, d_ytemp[i].data().get(), y_n, k1, a[idx]*dt_try); break; case 2: idx = 1; hipLaunchKernelGGL(( calc_ytemp_for_k3_kernel), dim3(grid), dim3(block), 0, 0, n, d_ytemp[i].data().get(), y_n, k1, k2, a[idx]*dt_try, a[idx+1]*dt_try); break; case 3: idx = 3; hipLaunchKernelGGL(( calc_ytemp_for_k4_kernel), dim3(grid), dim3(block), 0, 0, n, d_ytemp[i].data().get(), y_n, k1, k2, k3, a[idx]*dt_try, a[idx+1]*dt_try, a[idx+2]*dt_try); break; case 4: idx = 6; hipLaunchKernelGGL(( calc_ytemp_for_k5_kernel), dim3(grid), dim3(block), 0, 0, n, d_ytemp[i].data().get(), y_n, k1, k2, k3, k4, a[idx]*dt_try, a[idx+1]*dt_try, a[idx+2]*dt_try, a[idx+3]*dt_try); break; case 5: idx = 10; hipLaunchKernelGGL(( calc_ytemp_for_k6_kernel), dim3(grid), dim3(block), 0, 0, n, d_ytemp[i].data().get(), y_n, k1, k2, k3, k4, k5, a[idx]*dt_try, a[idx+1]*dt_try, a[idx+2]*dt_try, a[idx+3]*dt_try, a[idx+4]*dt_try); break; case 6: idx = 15; hipLaunchKernelGGL(( calc_ytemp_for_k7_kernel), dim3(grid), dim3(block), 0, 0, n, d_ytemp[i].data().get(), y_n, k1, k2, k3, k4, k5, k6, a[idx]*dt_try, a[idx+1]*dt_try, a[idx+2]*dt_try, a[idx+3]*dt_try, a[idx+4]*dt_try, a[idx+5]*dt_try); break; case 7: idx = 21; hipLaunchKernelGGL(( calc_ytemp_for_k8_kernel), dim3(grid), dim3(block), 0, 0, n, d_ytemp[i].data().get(), y_n, k1, k2, k3, k4, k5, k6, k7, a[idx]*dt_try, a[idx+1]*dt_try, a[idx+2]*dt_try, a[idx+3]*dt_try, a[idx+4]*dt_try, a[idx+5]*dt_try, a[idx+6]*dt_try); break; case 8: idx = 28; hipLaunchKernelGGL(( calc_ytemp_for_k9_kernel), dim3(grid), dim3(block), 0, 0, n, d_ytemp[i].data().get(), y_n, k1, k5, k6, k7, a[idx]*dt_try, a[idx+4]*dt_try, a[idx+5]*dt_try, a[idx+6]*dt_try); break; default: ostringstream msg("call_calc_ytemp_for_kr_kernel() function was called with invalid parameter: ", ostringstream::ate); msg << r+1 << "!"; throw integrator_exception(msg.str()); } hipError_t cudaStatus = HANDLE_ERROR(hipGetLastError()); if (hipSuccess != cudaStatus) { ostringstream msg("calc_ytemp_for_k", ostringstream::ate); msg << r+1 << "_kernel failed"; throw integrator_exception(msg.str()); } } } void rkn76::call_calc_y_kernel() { for (int i = 0; i < f.get_order(); i++) { int n = f.d_y[i].size(); calculate_grid(n, THREADS_PER_BLOCK); var_t* y_n= f.d_y[i].data().get(); var_t *y = f.d_yout[i].data().get(); var_t* k1 = d_k[i][0].data().get(); var_t* k5 = d_k[i][4].data().get(); var_t* k6 = d_k[i][5].data().get(); var_t* k7 = d_k[i][6].data().get(); hipLaunchKernelGGL(( calc_y_kernel), dim3(grid), dim3(block), 0, 0, n, y, y_n, k1, k5, k6, k7, b[0]*dt_try, b[4]*dt_try, b[5]*dt_try, b[6]*dt_try); hipError_t cudaStatus = HANDLE_ERROR(hipGetLastError()); if (hipSuccess != cudaStatus) { throw integrator_exception("calc_y_kernel failed"); } } } rkn76::rkn76(ode& f, ttt_t dt, bool adaptive, var_t tolerance) : integrator(f, dt), adaptive(adaptive), tolerance(tolerance), d_k(f.get_order()), d_ytemp(f.get_order(), d_var_t()), d_err(f.get_order(), d_var_t()) { RKOrder = 7; r_max = adaptive ? RKOrder + 2 : RKOrder; int forder = f.get_order(); for (int i = 0; i < forder; i++) { int size = f.d_y[i].size(); d_ytemp[i].resize(size); if (adaptive) { d_err[i].resize(size); } d_k[i].resize(r_max); for (int r = 0; r < r_max; r++) { d_k[i][r].resize(size); } } } void rkn76::calculate_grid(int nData, int threads_per_block) { int nThread = ::min(threads_per_block, nData); int nBlock = (nData + nThread - 1)/nThread; grid.x = nBlock; block.x = nThread; } ttt_t rkn76::step() { int forder = f.get_order(); int r = 0; // Calculate k1 = f(tn, yn) = d_k[][0] ttt_t ttemp = f.t + c[r] * dt; for (int i = 0; i < forder; i++) { f.calculate_dy(i, r, ttemp, f.d_p, f.d_y, d_k[i][r]); } dt_try = dt; var_t max_err = 0.0; int_t iter = 0; do { // Calculate k2 = f(tn + c2 * dt, yn + a21 * dt * k1) = d_k[][1] // Calculate k3 = f(tn + c3 * dt, yn + a31 * dt * k1 + ...) = d_k[][2] // Calculate k4 = f(tn + c4 * dt, yn + a41 * dt * k1 + ...) = d_k[][3] // ... // Calculate k7 = f(tn + c7 * dt, yn + a71 * dt * k1 + ...) = d_k[][6] for (r = 1; r < RKOrder; r++) { ttemp = f.t + c[r] * dt; call_calc_ytemp_for_kr_kernel(r); for (int i = 0; i < forder; i++) { f.calculate_dy(i, r, ttemp, f.d_p, d_ytemp, d_k[i][r]); } } dt_did = dt_try; if (adaptive) { // Calculate k8 = f(tn + c8 * dt, yn + a81 * dt * k1 + ...) = d_k[][7] // Calculate k9 = f(tn + c9 * dt, yn + a91 * dt * k1 + ...) = d_k[][8] for (r = RKOrder; r < r_max; r++) { ttemp = f.t + c[r] * dt; call_calc_ytemp_for_kr_kernel(r); for (int i = 0; i < forder; i++) { f.calculate_dy(i, r, ttemp, f.d_p, r == r_max - 1 ? f.d_yout : d_ytemp, d_k[i][r]); } } // calculate d_err = f8 - f9 call_calc_k8_sub_k9_kernel(); max_err = fabs(dt_try*LAMBDA*::max(max_vec(d_err[0]), max_vec(d_err[1]))); dt_try *= 0.9 * pow(tolerance / max_err, 1.0/8.0); } else { call_calc_y_kernel(); } iter++; } while(adaptive && max_err > tolerance); n_failed_step += (iter - 1); n_step++; // Set the next step size dt = dt_try; f.tout = f.t + dt_did; f.swap_in_out(); return dt_did; } #undef LAMBDA
5862cde8591501f9f2fc33001ff4990292b25e34.cu
// includes system #include <sstream> // std::ostringstream // include CUDA #include "cuda_runtime.h" #include "device_launch_parameters.h" // include project #include "integrator_exception.h" #include "rkn76.h" #include "util.h" #define THREADS_PER_BLOCK 256 static cudaError_t HandleError(cudaError_t cudaStatus, const char *file, int line) { if (cudaSuccess != cudaStatus) { printf( "%s in %s at line %d\n", cudaGetErrorString( cudaStatus ), file, line ); return cudaStatus; } return cudaStatus; } #define HANDLE_ERROR(cudaStatus) (HandleError(cudaStatus, __FILE__, __LINE__)) #define LAMBDA 1.0/20.0 #define sQ sqrt(21.0) ttt_t rkn76::c[] = { 0.0, 1.0/10.0, 1.0/5.0, 3.0/8.0, 1.0/2.0, (7.0-sQ)/14.0, (7.0+sQ)/14.0, 1.0, 1.0 }; var_t rkn76::a[] = { 1.0/200.0, 1.0/150.0, 1.0/75.0, 171.0/8192.0, 45.0/4096.0, 315.0/8192.0, 5.0/288.0, 25.0/528.0, 25.0/672.0, 16.0/693.0, (1003.0-205.0*sQ)/12348.0,-25.0*(751.0-173.0*sQ)/90552.0, 25.0*(624.0-137.0*sQ)/43218.0, -128.0*(361.0-79.0*sQ)/237699.0, (3411.0-745.0*sQ)/24696.0, (793.0+187.0*sQ)/12348.0, -25.0*(331.0+113.0*sQ)/90552.0, 25.0*(1044.0+247.0*sQ)/43218.0, -128.0*(14885.0+3779.0*sQ)/9745659.0, (3327.0+797.0*sQ)/24696.0, -(581.0+127.0*sQ)/1722.0, -(157.0-3.0*sQ)/378.0, 25.0*(143.0-10.0*sQ)/2772.0, -25.0*(876.0+55.0*sQ)/3969.0, 1280.0*(913.0+18.0*sQ)/596673.0, -(1353.0+26.0*sQ)/2268.0, 7.0*(1777.0+377.0*sQ)/4428.0, 7.0*(5.0-sQ)/36.0, 1.0/20.0, 0.0, 0.0, 0.0, 8.0/45.0, 7.0*(7.0+sQ)/360.0, 7.0*(7.0-sQ)/360.0, 0.0 }; var_t rkn76::bh[]= { 1.0/20.0, 0.0, 0.0, 0.0, 8.0/45.0, 7.0*(7.0+sQ)/360.0, 7.0*(7.0-sQ)/360.0, 0.0, 0.0 }; var_t rkn76::b[] = { 1.0/20.0, 0.0, 0.0, 0.0, 8.0/45.0, 7.0*(7.0+sQ)/360.0, 7.0*(7.0-sQ)/360.0, -LAMBDA, LAMBDA }; #undef sQ // ytemp = y_n + dt*(a21*k1) static __global__ void calc_ytemp_for_k2_kernel(int_t n, var_t *ytemp, const var_t *y_n, const var_t *k1, var_t k1f) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; while (n > tid) { ytemp[tid] = y_n[tid] + k1f * k1[tid]; tid += stride; } } // ytemp = y_n + dt*(a31*k1 + a32*k2) static __global__ void calc_ytemp_for_k3_kernel(int_t n, var_t *ytemp, const var_t *y_n, const var_t *k1, const var_t *k2, var_t k1f, var_t k2f) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; while (n > tid) { ytemp[tid] = y_n[tid] + k1f * k1[tid] + k2f * k2[tid]; tid += stride; } } // ytemp = y_n + dt*(a41*k1 + a42*k2 + a43*k3) static __global__ void calc_ytemp_for_k4_kernel(int_t n, var_t *ytemp, const var_t *y_n, const var_t *k1, const var_t *k2, const var_t *k3, var_t k1f, var_t k2f, var_t k3f) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; while (n > tid) { ytemp[tid] = y_n[tid] + k1f * k1[tid] + k2f * k2[tid] + k3f * k3[tid]; tid += stride; } } // ytemp = y_n + dt*(a51*k1 + a52*k2 + a53*k3 + a54*k4) static __global__ void calc_ytemp_for_k5_kernel(int_t n, var_t *ytemp, const var_t *y_n, const var_t *k1, const var_t *k2, const var_t *k3, const var_t *k4, var_t k1f, var_t k2f, var_t k3f, var_t k4f) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; while (n > tid) { ytemp[tid] = y_n[tid] + k1f * k1[tid] + k2f * k2[tid] + k3f * k3[tid] + k4f * k4[tid]; tid += stride; } } // ytemp = y_n + dt*(a61*k1 + a62*k2 + a63*k3 + a64*k4 + a65*k5) static __global__ void calc_ytemp_for_k6_kernel(int_t n, var_t *ytemp, const var_t *y_n, const var_t *k1, const var_t *k2, const var_t *k3, const var_t *k4, const var_t *k5, var_t k1f, var_t k2f, var_t k3f, var_t k4f, var_t k5f) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; while (n > tid) { ytemp[tid] = y_n[tid] + k1f * k1[tid] + k2f * k2[tid] + k3f * k3[tid] + k4f * k4[tid] + k5f * k5[tid]; tid += stride; } } // ytemp = y_n + dt*(a71*k1 + a72*k2 + a73*k3 + a74*k4 + a75*k5 + a76*k6) static __global__ void calc_ytemp_for_k7_kernel(int_t n, var_t *ytemp, const var_t *y_n, const var_t *k1, const var_t *k2, const var_t *k3, const var_t *k4, const var_t *k5, const var_t *k6, var_t k1f, var_t k2f, var_t k3f, var_t k4f, var_t k5f, var_t k6f) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; while (n > tid) { ytemp[tid] = y_n[tid] + k1f * k1[tid] + k2f * k2[tid] + k3f * k3[tid] + k4f * k4[tid] + k5f * k5[tid] + k6f * k6[tid]; tid += stride; } } // ytemp = y_n + dt*(a81*k1 + a82*k2 + a83*k3 + a84*k4 + a85*k5 + a86*k6 + a87*k7) static __global__ void calc_ytemp_for_k8_kernel(int_t n, var_t *ytemp, const var_t *y_n, const var_t *k1, const var_t *k2, const var_t *k3, const var_t *k4, const var_t *k5, const var_t *k6, const var_t *k7, var_t k1f, var_t k2f, var_t k3f, var_t k4f, var_t k5f, var_t k6f, var_t k7f) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; while (n > tid) { ytemp[tid] = y_n[tid] + k1f * k1[tid] + k2f * k2[tid] + k3f * k3[tid] + k4f * k4[tid] + k5f * k5[tid] + k6f * k6[tid] + k7f * k7[tid]; tid += stride; } } // ytemp = y_n + dt*(a91*k1 + a95*k5 + a96*k6 + a97*k7) static __global__ void calc_ytemp_for_k9_kernel(int_t n, var_t *ytemp, const var_t *y_n, const var_t *k1, const var_t *k5, const var_t *k6, const var_t *k7, var_t k1f, var_t k5f, var_t k6f, var_t k7f) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; while (n > tid) { ytemp[tid] = y_n[tid] + k1f * k1[tid] + k5f * k5[tid] + k6f * k6[tid] + k7f * k7[tid]; tid += stride; } } // y = y_n + dt*(bh1*k1 + bh5*k5 + bh6*k6 + bh7*k7) static __global__ void calc_y_kernel(int_t n, var_t *y, const var_t *y_n, const var_t *k1, const var_t *k5, const var_t *k6, const var_t *k7, var_t k1f, var_t k5f, var_t k6f, var_t k7f) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; while (n > tid) { y[tid] = y_n[tid] + k1f * k1[tid] + k5f * k5[tid] + k6f * k6[tid] + k7f * k7[tid]; tid += stride; } } static __global__ void calc_f8_sub_f9_kernel(int_t n, var_t* result, const var_t* f8, const var_t* f9) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; while (n > tid) { result[tid] = f8[tid] - f9[tid]; tid += stride; } } void rkn76::call_calc_k8_sub_k9_kernel() { for (int i = 0; i < f.get_order(); i++) { int n = f.d_y[i].size(); var_t *err = d_err[i].data().get(); var_t* k8 = d_k[i][7].data().get(); var_t* k9 = d_k[i][8].data().get(); calculate_grid(n, THREADS_PER_BLOCK); calc_f8_sub_f9_kernel<<<grid, block>>>(n, err, k8, k9); cudaError cudaStatus = HANDLE_ERROR(cudaGetLastError()); if (cudaSuccess != cudaStatus) { throw integrator_exception("calc_f8_sub_f9_kernel failed"); } } } void rkn76::call_calc_ytemp_for_kr_kernel(int r) { int idx = 0; for (int i = 0; i < f.get_order(); i++) { int n = f.d_y[i].size(); calculate_grid(n, THREADS_PER_BLOCK); var_t* y_n= f.d_y[i].data().get(); var_t* k1 = d_k[i][0].data().get(); var_t* k2 = d_k[i][1].data().get(); var_t* k3 = d_k[i][2].data().get(); var_t* k4 = d_k[i][3].data().get(); var_t* k5 = d_k[i][4].data().get(); var_t* k6 = d_k[i][5].data().get(); var_t* k7 = d_k[i][6].data().get(); var_t* k8; if (adaptive) { k8 = d_k[i][7].data().get(); } switch (r) { case 1: idx = 0; calc_ytemp_for_k2_kernel<<<grid, block>>>(n, d_ytemp[i].data().get(), y_n, k1, a[idx]*dt_try); break; case 2: idx = 1; calc_ytemp_for_k3_kernel<<<grid, block>>>(n, d_ytemp[i].data().get(), y_n, k1, k2, a[idx]*dt_try, a[idx+1]*dt_try); break; case 3: idx = 3; calc_ytemp_for_k4_kernel<<<grid, block>>>(n, d_ytemp[i].data().get(), y_n, k1, k2, k3, a[idx]*dt_try, a[idx+1]*dt_try, a[idx+2]*dt_try); break; case 4: idx = 6; calc_ytemp_for_k5_kernel<<<grid, block>>>(n, d_ytemp[i].data().get(), y_n, k1, k2, k3, k4, a[idx]*dt_try, a[idx+1]*dt_try, a[idx+2]*dt_try, a[idx+3]*dt_try); break; case 5: idx = 10; calc_ytemp_for_k6_kernel<<<grid, block>>>(n, d_ytemp[i].data().get(), y_n, k1, k2, k3, k4, k5, a[idx]*dt_try, a[idx+1]*dt_try, a[idx+2]*dt_try, a[idx+3]*dt_try, a[idx+4]*dt_try); break; case 6: idx = 15; calc_ytemp_for_k7_kernel<<<grid, block>>>(n, d_ytemp[i].data().get(), y_n, k1, k2, k3, k4, k5, k6, a[idx]*dt_try, a[idx+1]*dt_try, a[idx+2]*dt_try, a[idx+3]*dt_try, a[idx+4]*dt_try, a[idx+5]*dt_try); break; case 7: idx = 21; calc_ytemp_for_k8_kernel<<<grid, block>>>(n, d_ytemp[i].data().get(), y_n, k1, k2, k3, k4, k5, k6, k7, a[idx]*dt_try, a[idx+1]*dt_try, a[idx+2]*dt_try, a[idx+3]*dt_try, a[idx+4]*dt_try, a[idx+5]*dt_try, a[idx+6]*dt_try); break; case 8: idx = 28; calc_ytemp_for_k9_kernel<<<grid, block>>>(n, d_ytemp[i].data().get(), y_n, k1, k5, k6, k7, a[idx]*dt_try, a[idx+4]*dt_try, a[idx+5]*dt_try, a[idx+6]*dt_try); break; default: ostringstream msg("call_calc_ytemp_for_kr_kernel() function was called with invalid parameter: ", ostringstream::ate); msg << r+1 << "!"; throw integrator_exception(msg.str()); } cudaError cudaStatus = HANDLE_ERROR(cudaGetLastError()); if (cudaSuccess != cudaStatus) { ostringstream msg("calc_ytemp_for_k", ostringstream::ate); msg << r+1 << "_kernel failed"; throw integrator_exception(msg.str()); } } } void rkn76::call_calc_y_kernel() { for (int i = 0; i < f.get_order(); i++) { int n = f.d_y[i].size(); calculate_grid(n, THREADS_PER_BLOCK); var_t* y_n= f.d_y[i].data().get(); var_t *y = f.d_yout[i].data().get(); var_t* k1 = d_k[i][0].data().get(); var_t* k5 = d_k[i][4].data().get(); var_t* k6 = d_k[i][5].data().get(); var_t* k7 = d_k[i][6].data().get(); calc_y_kernel<<<grid, block>>>(n, y, y_n, k1, k5, k6, k7, b[0]*dt_try, b[4]*dt_try, b[5]*dt_try, b[6]*dt_try); cudaError cudaStatus = HANDLE_ERROR(cudaGetLastError()); if (cudaSuccess != cudaStatus) { throw integrator_exception("calc_y_kernel failed"); } } } rkn76::rkn76(ode& f, ttt_t dt, bool adaptive, var_t tolerance) : integrator(f, dt), adaptive(adaptive), tolerance(tolerance), d_k(f.get_order()), d_ytemp(f.get_order(), d_var_t()), d_err(f.get_order(), d_var_t()) { RKOrder = 7; r_max = adaptive ? RKOrder + 2 : RKOrder; int forder = f.get_order(); for (int i = 0; i < forder; i++) { int size = f.d_y[i].size(); d_ytemp[i].resize(size); if (adaptive) { d_err[i].resize(size); } d_k[i].resize(r_max); for (int r = 0; r < r_max; r++) { d_k[i][r].resize(size); } } } void rkn76::calculate_grid(int nData, int threads_per_block) { int nThread = std::min(threads_per_block, nData); int nBlock = (nData + nThread - 1)/nThread; grid.x = nBlock; block.x = nThread; } ttt_t rkn76::step() { int forder = f.get_order(); int r = 0; // Calculate k1 = f(tn, yn) = d_k[][0] ttt_t ttemp = f.t + c[r] * dt; for (int i = 0; i < forder; i++) { f.calculate_dy(i, r, ttemp, f.d_p, f.d_y, d_k[i][r]); } dt_try = dt; var_t max_err = 0.0; int_t iter = 0; do { // Calculate k2 = f(tn + c2 * dt, yn + a21 * dt * k1) = d_k[][1] // Calculate k3 = f(tn + c3 * dt, yn + a31 * dt * k1 + ...) = d_k[][2] // Calculate k4 = f(tn + c4 * dt, yn + a41 * dt * k1 + ...) = d_k[][3] // ... // Calculate k7 = f(tn + c7 * dt, yn + a71 * dt * k1 + ...) = d_k[][6] for (r = 1; r < RKOrder; r++) { ttemp = f.t + c[r] * dt; call_calc_ytemp_for_kr_kernel(r); for (int i = 0; i < forder; i++) { f.calculate_dy(i, r, ttemp, f.d_p, d_ytemp, d_k[i][r]); } } dt_did = dt_try; if (adaptive) { // Calculate k8 = f(tn + c8 * dt, yn + a81 * dt * k1 + ...) = d_k[][7] // Calculate k9 = f(tn + c9 * dt, yn + a91 * dt * k1 + ...) = d_k[][8] for (r = RKOrder; r < r_max; r++) { ttemp = f.t + c[r] * dt; call_calc_ytemp_for_kr_kernel(r); for (int i = 0; i < forder; i++) { f.calculate_dy(i, r, ttemp, f.d_p, r == r_max - 1 ? f.d_yout : d_ytemp, d_k[i][r]); } } // calculate d_err = f8 - f9 call_calc_k8_sub_k9_kernel(); max_err = fabs(dt_try*LAMBDA*std::max(max_vec(d_err[0]), max_vec(d_err[1]))); dt_try *= 0.9 * pow(tolerance / max_err, 1.0/8.0); } else { call_calc_y_kernel(); } iter++; } while(adaptive && max_err > tolerance); n_failed_step += (iter - 1); n_step++; // Set the next step size dt = dt_try; f.tout = f.t + dt_did; f.swap_in_out(); return dt_did; } #undef LAMBDA
8dca8a75e0b43a9b9b93a6c69290c2c42c139cca.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // (c) Copyright 2013 Lev Barash, Landau Institute for Theoretical Physics, Russian Academy of Sciences // This is supplement to the paper: // L.Yu. Barash, L.N. Shchur, "PRAND: GPU accelerated parallel random number generation library: Using most reliable algorithms and applying parallelism of modern GPUs and CPUs". // e-mail: barash @ itp.ac.ru (remove space) #include<stdio.h> #define gm29_CUDA_CALL(x) do { if((x) != hipSuccess) { printf("Error: %s at %s:%d\n",hipGetErrorString(hipGetLastError()),__FILE__,__LINE__); exit(1);}} while(0) #define gm29_BLOCKS 512 #define gm29_THREADS 128 #define gm29_ARRAY_SECTIONS (gm29_BLOCKS*gm29_THREADS/32) #define gm29_k 4 #define gm29_q 2 #define gm29_g 536870909U #define gm29_halfg 268435456U typedef struct{ unsigned xN[32] __attribute__ ((aligned(16))), xP[32] __attribute__ ((aligned(16))); } gm29_state; typedef gm29_state gm29_sse_state; unsigned gm29_sse_Consts[16] __attribute__ ((aligned(16))) = {536870911,536870911,536870911,536870911,1073741818,1073741818,1073741818,1073741818, 536870908,536870908,536870908,536870908,536870909,536870909,536870909,536870909}; extern "C" __host__ unsigned int gm29_sse_generate_(gm29_sse_state* state){ unsigned output1; unsigned output2 __attribute__ ((unused)); asm volatile("movaps (%4),%%xmm7\n" \ "movaps 16(%4),%%xmm6\n" \ "movaps 32(%4),%%xmm4\n" \ "movaps (%2),%%xmm0\n" \ "movaps (%3),%%xmm5\n" \ "movaps %%xmm0,(%3)\n" \ "pslld $2,%%xmm0\n" \ "paddd %%xmm6,%%xmm0\n" \ "pslld $1,%%xmm5\n" \ "psubd %%xmm5,%%xmm0\n" \ "movaps %%xmm0,%%xmm5\n" \ "psrld $29,%%xmm5\n" \ "pand %%xmm7,%%xmm0\n" \ "paddd %%xmm5,%%xmm0\n" \ "paddd %%xmm5,%%xmm0\n" \ "paddd %%xmm5,%%xmm0\n" \ "movaps %%xmm0,%%xmm5\n" \ "pcmpgtd %%xmm4,%%xmm5\n" \ "pand 48(%4),%%xmm5\n" \ "psubd %%xmm5,%%xmm0\n" \ "movaps %%xmm0,(%2)\n" \ "movaps 16(%2),%%xmm1\n" \ "movaps 16(%3),%%xmm5\n" \ "movaps %%xmm1,16(%3)\n" \ "pslld $2,%%xmm1\n" \ "paddd %%xmm6,%%xmm1\n" \ "pslld $1,%%xmm5\n" \ "psubd %%xmm5,%%xmm1\n" \ "movaps %%xmm1,%%xmm5\n" \ "psrld $29,%%xmm5\n" \ "pand %%xmm7,%%xmm1\n" \ "paddd %%xmm5,%%xmm1\n" \ "paddd %%xmm5,%%xmm1\n" \ "paddd %%xmm5,%%xmm1\n" \ "movaps %%xmm1,%%xmm5\n" \ "pcmpgtd %%xmm4,%%xmm5\n" \ "pand 48(%4),%%xmm5\n" \ "psubd %%xmm5,%%xmm1\n" \ "movaps %%xmm1,16(%2)\n" \ "movaps 32(%2),%%xmm2\n" \ "movaps 32(%3),%%xmm5\n" \ "movaps %%xmm2,32(%3)\n" \ "pslld $2,%%xmm2\n" \ "paddd %%xmm6,%%xmm2\n" \ "pslld $1,%%xmm5\n" \ "psubd %%xmm5,%%xmm2\n" \ "movaps %%xmm2,%%xmm5\n" \ "psrld $29,%%xmm5\n" \ "pand %%xmm7,%%xmm2\n" \ "paddd %%xmm5,%%xmm2\n" \ "paddd %%xmm5,%%xmm2\n" \ "paddd %%xmm5,%%xmm2\n" \ "movaps %%xmm2,%%xmm5\n" \ "pcmpgtd %%xmm4,%%xmm5\n" \ "pand 48(%4),%%xmm5\n" \ "psubd %%xmm5,%%xmm2\n" \ "movaps %%xmm2,32(%2)\n" \ "movaps 48(%2),%%xmm3\n" \ "movaps 48(%3),%%xmm5\n" \ "movaps %%xmm3,48(%3)\n" \ "pslld $2,%%xmm3\n" \ "paddd %%xmm6,%%xmm3\n" \ "pslld $1,%%xmm5\n" \ "psubd %%xmm5,%%xmm3\n" \ "movaps %%xmm3,%%xmm5\n" \ "psrld $29,%%xmm5\n" \ "pand %%xmm7,%%xmm3\n" \ "paddd %%xmm5,%%xmm3\n" \ "paddd %%xmm5,%%xmm3\n" \ "paddd %%xmm5,%%xmm3\n" \ "movaps %%xmm3,%%xmm5\n" \ "pcmpgtd %%xmm4,%%xmm5\n" \ "pand 48(%4),%%xmm5\n" \ "psubd %%xmm5,%%xmm3\n" \ "movaps %%xmm3,48(%2)\n" \ "psrld $28,%%xmm0\n" \ "psrld $28,%%xmm1\n" \ "psrld $28,%%xmm2\n" \ "psrld $28,%%xmm3\n" \ "packssdw %%xmm1,%%xmm0\n" \ "packssdw %%xmm3,%%xmm2\n" \ "packsswb %%xmm2,%%xmm0\n" \ "psllw $7,%%xmm0\n" \ "pmovmskb %%xmm0,%0\n" \ "movaps 64(%2),%%xmm0\n" \ "movaps 64(%3),%%xmm5\n" \ "movaps %%xmm0,64(%3)\n" \ "pslld $2,%%xmm0\n" \ "paddd %%xmm6,%%xmm0\n" \ "pslld $1,%%xmm5\n" \ "psubd %%xmm5,%%xmm0\n" \ "movaps %%xmm0,%%xmm5\n" \ "psrld $29,%%xmm5\n" \ "pand %%xmm7,%%xmm0\n" \ "paddd %%xmm5,%%xmm0\n" \ "paddd %%xmm5,%%xmm0\n" \ "paddd %%xmm5,%%xmm0\n" \ "movaps %%xmm0,%%xmm5\n" \ "pcmpgtd %%xmm4,%%xmm5\n" \ "pand 48(%4),%%xmm5\n" \ "psubd %%xmm5,%%xmm0\n" \ "movaps %%xmm0,64(%2)\n" \ "movaps 80(%2),%%xmm1\n" \ "movaps 80(%3),%%xmm5\n" \ "movaps %%xmm1,80(%3)\n" \ "pslld $2,%%xmm1\n" \ "paddd %%xmm6,%%xmm1\n" \ "pslld $1,%%xmm5\n" \ "psubd %%xmm5,%%xmm1\n" \ "movaps %%xmm1,%%xmm5\n" \ "psrld $29,%%xmm5\n" \ "pand %%xmm7,%%xmm1\n" \ "paddd %%xmm5,%%xmm1\n" \ "paddd %%xmm5,%%xmm1\n" \ "paddd %%xmm5,%%xmm1\n" \ "movaps %%xmm1,%%xmm5\n" \ "pcmpgtd %%xmm4,%%xmm5\n" \ "pand 48(%4),%%xmm5\n" \ "psubd %%xmm5,%%xmm1\n" \ "movaps %%xmm1,80(%2)\n" \ "movaps 96(%2),%%xmm2\n" \ "movaps 96(%3),%%xmm5\n" \ "movaps %%xmm2,96(%3)\n" \ "pslld $2,%%xmm2\n" \ "paddd %%xmm6,%%xmm2\n" \ "pslld $1,%%xmm5\n" \ "psubd %%xmm5,%%xmm2\n" \ "movaps %%xmm2,%%xmm5\n" \ "psrld $29,%%xmm5\n" \ "pand %%xmm7,%%xmm2\n" \ "paddd %%xmm5,%%xmm2\n" \ "paddd %%xmm5,%%xmm2\n" \ "paddd %%xmm5,%%xmm2\n" \ "movaps %%xmm2,%%xmm5\n" \ "pcmpgtd %%xmm4,%%xmm5\n" \ "pand 48(%4),%%xmm5\n" \ "psubd %%xmm5,%%xmm2\n" \ "movaps %%xmm2,96(%2)\n" \ "movaps 112(%2),%%xmm3\n" \ "movaps 112(%3),%%xmm5\n" \ "movaps %%xmm3,112(%3)\n" \ "pslld $2,%%xmm3\n" \ "paddd %%xmm6,%%xmm3\n" \ "pslld $1,%%xmm5\n" \ "psubd %%xmm5,%%xmm3\n" \ "movaps %%xmm3,%%xmm5\n" \ "psrld $29,%%xmm5\n" \ "pand %%xmm7,%%xmm3\n" \ "paddd %%xmm5,%%xmm3\n" \ "paddd %%xmm5,%%xmm3\n" \ "paddd %%xmm5,%%xmm3\n" \ "movaps %%xmm3,%%xmm5\n" \ "pcmpgtd %%xmm4,%%xmm5\n" \ "pand 48(%4),%%xmm5\n" \ "psubd %%xmm5,%%xmm3\n" \ "movaps %%xmm3,112(%2)\n" \ "psrld $28,%%xmm0\n" \ "psrld $28,%%xmm1\n" \ "psrld $28,%%xmm2\n" \ "psrld $28,%%xmm3\n" \ "packssdw %%xmm1,%%xmm0\n" \ "packssdw %%xmm3,%%xmm2\n" \ "packsswb %%xmm2,%%xmm0\n" \ "psllw $7,%%xmm0\n" \ "pmovmskb %%xmm0,%1\n" \ "shll $16,%1\n" \ "addl %1,%0\n" \ "":"=&r"(output1),"=&r"(output2):"r"(state->xN),"r"(state->xP),"r"(gm29_sse_Consts)); return output1; } extern "C" __device__ __host__ void gm29_get_sse_state_(gm29_state* state,gm29_sse_state* sse_state){ int i; for(i=0;i<32;i++) {sse_state->xN[i]=state->xN[i]; sse_state->xP[i]=state->xP[i];} } extern "C" __device__ __host__ unsigned gm29_CNext(unsigned N,unsigned P){ return (gm29_k*N+gm29_q*(gm29_g-P))%gm29_g; } extern "C" __device__ __host__ unsigned gm29_CNext2(unsigned N,unsigned P,unsigned myk,unsigned myq){ unsigned long long NNN,PP,kk,qq,gg,rr; // returns (myk*N-myq*P) (mod gm29_g) NNN=N; PP=P; kk=myk; qq=myq; gg=gm29_g; rr=(kk*NNN+qq*(gg-PP)); NNN=rr>>29; PP=rr-(NNN*gg); PP-=((PP>>29)*gg); return (unsigned)PP; } extern "C" __device__ __host__ unsigned gm29_GetNextN(unsigned x0,unsigned x1,unsigned n){ //returns x_{2^n} unsigned myk=gm29_k,myq=gm29_q,i,x=x1; for(i=0;i<n;i++){ x=gm29_CNext2(x,x0,myk,myq); myk=gm29_CNext2(myk,2,myk,myq); myq=gm29_CNext2(myq,0,myq,0); } return x; } extern "C" __device__ __host__ unsigned gm29_GetNextAny(unsigned x0,unsigned x1,unsigned long long N){ // returns x_N unsigned long long i; unsigned xp=x0,xn=x1,xpnew,xnnew,shift=0; i=N; while(i>0){ if(i%2==1){ // xp,xn ----> 2^shift xpnew=gm29_GetNextN(xp,xn,shift); xnnew=gm29_GetNextN(xn,gm29_CNext(xn,xp),shift); xp=xpnew; xn=xnnew; } i/=2; shift++; } return xp; } extern "C" __device__ __host__ void gm29_skipahead_(gm29_state* state, unsigned long long offset){ unsigned xn,xp,j; for(j=0;j<32;j++){ xp=gm29_GetNextAny(state->xP[j],state->xN[j],offset); xn=gm29_GetNextAny(state->xP[j],state->xN[j],offset+1); state->xP[j]=xp; state->xN[j]=xn; } } extern "C" __device__ __host__ void gm29_init_(gm29_state* state){ unsigned x0=514932,x1=127293,xp,xn,j; for(j=0;j<32;j++){ xp=gm29_GetNextAny(x0,x1,9007198285571818UL); xn=gm29_GetNextAny(x0,x1,9007198285571819UL); state->xP[j]=xp; state->xN[j]=xn; x0=xp; x1=xn; } } extern "C" __device__ __host__ void gm29_init_short_sequence_(gm29_state* state,unsigned SequenceNumber){ gm29_init_(state); // 0 <= SequenceNumber < 10^8; length of each sequence <= 8*10^7 gm29_skipahead_(state,82927047ULL*(unsigned long long)SequenceNumber); } extern "C" __device__ __host__ void gm29_init_medium_sequence_(gm29_state* state,unsigned SequenceNumber){ gm29_init_(state); // 0 <= SequenceNumber < 10^6; length of each sequence <= 8*10^9 gm29_skipahead_(state,8799201913ULL*(unsigned long long)SequenceNumber); } extern "C" __device__ __host__ void gm29_init_long_sequence_(gm29_state* state,unsigned SequenceNumber){ gm29_init_(state); // 0 <= SequenceNumber < 10^4; length of each sequence <= 8*10^11 gm29_skipahead_(state,828317697521ULL*(unsigned long long)SequenceNumber); } extern "C" __device__ __host__ unsigned int gm29_generate_(gm29_state* state){ unsigned sum=0, i, temp, bit=1; for(i=0;i<32;i++){ temp=(gm29_k*state->xN[i]+gm29_q*(gm29_g-state->xP[i]))%gm29_g; state->xP[i]=state->xN[i]; state->xN[i]=temp; sum+= ((temp<gm29_halfg)?0:bit); bit*=2; } return sum; } extern "C" __device__ __host__ float gm29_generate_uniform_float_(gm29_state* state){ unsigned sum=0, i, temp,bit=1; for(i=0;i<32;i++){ temp=(gm29_k*state->xN[i]+gm29_q*(gm29_g-state->xP[i]))%gm29_g; state->xP[i]=state->xN[i]; state->xN[i]=temp; sum+= ((temp<gm29_halfg)?0:bit); bit*=2; } return ((float) sum) * 2.3283064365386963e-10; } extern "C" __host__ void gm29_print_state_(gm29_state* state){int i; printf("Generator State:\nxN={"); for(i=0;i<32;i++) {printf("%u",state->xN[i]%gm29_g); printf((i<31)?",":"}\nxP={");} for(i=0;i<32;i++) {printf("%u",state->xP[i]%gm29_g); printf((i<31)?",":"}\n\n");} } extern "C" __host__ void gm29_print_sse_state_(gm29_sse_state* state){int i; printf("Generator State:\nxN={"); for(i=0;i<32;i++) {printf("%u",state->xN[i]%gm29_g); printf((i<31)?",":"}\nxP={");} for(i=0;i<32;i++) {printf("%u",state->xP[i]%gm29_g); printf((i<31)?",":"}\n\n");} } __global__ void gm29_kernel_generate_array(gm29_state* state, unsigned int* out, long* length) { unsigned temp,sum,i,orbit,seqNum; long offset; __shared__ unsigned xP[gm29_THREADS]; // one generator per s=32 threads, i.e. one orbit __shared__ unsigned xN[gm29_THREADS]; // per thread, i.e. blockDim.x orbits per block __shared__ unsigned a[gm29_THREADS]; // array "a" contains corresponding parts of output orbit = threadIdx.x % 32; seqNum = (threadIdx.x + blockIdx.x * blockDim.x)>>5; // RNG_sequence index offset = seqNum*(*length); // start of the section in the output array xP[threadIdx.x]=gm29_GetNextAny(state->xP[orbit],state->xN[orbit],offset); xN[threadIdx.x]=gm29_GetNextAny(state->xP[orbit],state->xN[orbit],offset+1); for(i=0;i<(*length);i++){ temp = gm29_CNext( xN[threadIdx.x], xP[threadIdx.x] ); xP[threadIdx.x] = xN[threadIdx.x]; xN[threadIdx.x] = temp; a[threadIdx.x] = (temp < gm29_halfg ? 0 : (1<<orbit) ); __syncthreads(); // each s=32 threads result in "length" values in the output array if((orbit&3)==0) a[threadIdx.x] = a[threadIdx.x]+a[threadIdx.x+1]+a[threadIdx.x+2]+a[threadIdx.x+3]; __syncthreads(); if((orbit&15)==0) a[threadIdx.x] = a[threadIdx.x]+a[threadIdx.x+4]+a[threadIdx.x+8]+a[threadIdx.x+12]; __syncthreads(); if(orbit==0){ sum=a[threadIdx.x]+a[threadIdx.x+16]; out[offset+i]=sum; } } } extern "C" __host__ void gm29_generate_gpu_array_(gm29_state* state, unsigned int* dev_out, unsigned int* length){ long mylength = (*length)/gm29_ARRAY_SECTIONS; gm29_state* dev_state; long* dev_length; if((mylength*gm29_ARRAY_SECTIONS)<(*length)) mylength++; gm29_CUDA_CALL(hipMalloc((void**)&dev_state,sizeof(gm29_state))); gm29_CUDA_CALL(hipMalloc((void**)&dev_length,sizeof(long))); gm29_CUDA_CALL(hipMemcpy(dev_state,state,sizeof(gm29_state),hipMemcpyHostToDevice)); gm29_CUDA_CALL(hipMemcpy(dev_length,&mylength,sizeof(long),hipMemcpyHostToDevice)); hipLaunchKernelGGL(( gm29_kernel_generate_array), dim3(gm29_BLOCKS),dim3(gm29_THREADS), 0, 0, dev_state,dev_out,dev_length); gm29_CUDA_CALL(hipGetLastError()); gm29_CUDA_CALL(hipFree(dev_state)); gm29_CUDA_CALL(hipFree(dev_length)); } __global__ void gm29_kernel_generate_array_float(gm29_state* state, float* out, long* length) { unsigned temp,sum,i,orbit,seqNum; long offset; __shared__ unsigned xP[gm29_THREADS]; // one generator per s=32 threads, i.e. one orbit __shared__ unsigned xN[gm29_THREADS]; // per thread, i.e. blockDim.x orbits per block __shared__ unsigned a[gm29_THREADS]; // array "a" contains corresponding parts of output orbit = threadIdx.x % 32; seqNum = (threadIdx.x + blockIdx.x * blockDim.x)>>5; // RNG_sequence index offset = seqNum*(*length); // start of the section in the output array xP[threadIdx.x]=gm29_GetNextAny(state->xP[orbit],state->xN[orbit],offset); xN[threadIdx.x]=gm29_GetNextAny(state->xP[orbit],state->xN[orbit],offset+1); for(i=0;i<(*length);i++){ // each s=32 threads result in "length" values in the output array temp = gm29_CNext( xN[threadIdx.x], xP[threadIdx.x] ); xP[threadIdx.x] = xN[threadIdx.x]; xN[threadIdx.x] = temp; a[threadIdx.x] = (temp < gm29_halfg ? 0 : (1<<orbit) ); __syncthreads(); if((orbit&3)==0) a[threadIdx.x] = a[threadIdx.x]+a[threadIdx.x+1]+a[threadIdx.x+2]+a[threadIdx.x+3]; __syncthreads(); if((orbit&15)==0) a[threadIdx.x] = a[threadIdx.x]+a[threadIdx.x+4]+a[threadIdx.x+8]+a[threadIdx.x+12]; __syncthreads(); if(orbit==0){ sum=a[threadIdx.x]+a[threadIdx.x+16]; out[offset+i]=((float)sum) * 2.3283064365386963e-10; } } } extern "C" __host__ void gm29_generate_gpu_array_float_(gm29_state* state, float* dev_out, unsigned int* length){ long mylength = (*length)/gm29_ARRAY_SECTIONS; gm29_state* dev_state; long* dev_length; if((mylength*gm29_ARRAY_SECTIONS)<(*length)) mylength++; gm29_CUDA_CALL(hipMalloc((void**)&dev_state,sizeof(gm29_state))); gm29_CUDA_CALL(hipMalloc((void**)&dev_length,sizeof(long))); gm29_CUDA_CALL(hipMemcpy(dev_state,state,sizeof(gm29_state),hipMemcpyHostToDevice)); gm29_CUDA_CALL(hipMemcpy(dev_length,&mylength,sizeof(long),hipMemcpyHostToDevice)); hipLaunchKernelGGL(( gm29_kernel_generate_array_float), dim3(gm29_BLOCKS),dim3(gm29_THREADS), 0, 0, dev_state,dev_out,dev_length); gm29_CUDA_CALL(hipGetLastError()); gm29_CUDA_CALL(hipFree(dev_state)); gm29_CUDA_CALL(hipFree(dev_length)); } __global__ void gm29_kernel_generate_array_double(gm29_state* state, double* out, long* length) { unsigned temp,sum,i,orbit,seqNum; long offset; __shared__ unsigned xP[gm29_THREADS]; // one generator per s=32 threads, i.e. one orbit __shared__ unsigned xN[gm29_THREADS]; // per thread, i.e. blockDim.x orbits per block __shared__ unsigned a[gm29_THREADS]; // array "a" contains corresponding parts of output orbit = threadIdx.x % 32; seqNum = (threadIdx.x + blockIdx.x * blockDim.x)>>5; // RNG_sequence index offset = seqNum*(*length); // start of the section in the output array xP[threadIdx.x]=gm29_GetNextAny(state->xP[orbit],state->xN[orbit],offset); xN[threadIdx.x]=gm29_GetNextAny(state->xP[orbit],state->xN[orbit],offset+1); for(i=0;i<(*length);i++){ // each s=32 threads result in "length" values in the output array temp = gm29_CNext( xN[threadIdx.x], xP[threadIdx.x] ); xP[threadIdx.x] = xN[threadIdx.x]; xN[threadIdx.x] = temp; a[threadIdx.x] = (temp < gm29_halfg ? 0 : (1<<orbit) ); __syncthreads(); if((orbit&3)==0) a[threadIdx.x] = a[threadIdx.x]+a[threadIdx.x+1]+a[threadIdx.x+2]+a[threadIdx.x+3]; __syncthreads(); if((orbit&15)==0) a[threadIdx.x] = a[threadIdx.x]+a[threadIdx.x+4]+a[threadIdx.x+8]+a[threadIdx.x+12]; __syncthreads(); if(orbit==0){ sum=a[threadIdx.x]+a[threadIdx.x+16]; out[offset+i]=((double)sum) * 2.3283064365386963e-10; } } } extern "C" __host__ void gm29_generate_gpu_array_double_(gm29_state* state, double* dev_out, unsigned int* length){ long mylength = (*length)/gm29_ARRAY_SECTIONS; gm29_state* dev_state; long* dev_length; if((mylength*gm29_ARRAY_SECTIONS)<(*length)) mylength++; gm29_CUDA_CALL(hipMalloc((void**)&dev_state,sizeof(gm29_state))); gm29_CUDA_CALL(hipMalloc((void**)&dev_length,sizeof(long))); gm29_CUDA_CALL(hipMemcpy(dev_state,state,sizeof(gm29_state),hipMemcpyHostToDevice)); gm29_CUDA_CALL(hipMemcpy(dev_length,&mylength,sizeof(long),hipMemcpyHostToDevice)); hipLaunchKernelGGL(( gm29_kernel_generate_array_double), dim3(gm29_BLOCKS),dim3(gm29_THREADS), 0, 0, dev_state,dev_out,dev_length); gm29_CUDA_CALL(hipGetLastError()); gm29_CUDA_CALL(hipFree(dev_state)); gm29_CUDA_CALL(hipFree(dev_length)); } extern "C" __host__ void gm29_generate_array_(gm29_state* state, unsigned int* out, unsigned int* length){ long mylength = (*length)/gm29_ARRAY_SECTIONS; gm29_state* dev_state; unsigned int* dev_out; long* dev_length; if((mylength*gm29_ARRAY_SECTIONS)<(*length)) mylength++; gm29_CUDA_CALL(hipMalloc((void**)&dev_state,sizeof(gm29_state))); gm29_CUDA_CALL(hipMalloc((void**)&dev_out,mylength*gm29_ARRAY_SECTIONS*sizeof(unsigned int))); gm29_CUDA_CALL(hipMalloc((void**)&dev_length,sizeof(long))); gm29_CUDA_CALL(hipMemcpy(dev_state,state,sizeof(gm29_state),hipMemcpyHostToDevice)); gm29_CUDA_CALL(hipMemcpy(dev_length,&mylength,sizeof(long),hipMemcpyHostToDevice)); hipLaunchKernelGGL(( gm29_kernel_generate_array), dim3(gm29_BLOCKS),dim3(gm29_THREADS), 0, 0, dev_state,dev_out,dev_length); gm29_CUDA_CALL(hipGetLastError()); gm29_CUDA_CALL(hipMemcpy(out,dev_out,(*length)*sizeof(unsigned int),hipMemcpyDeviceToHost)); gm29_CUDA_CALL(hipFree(dev_state)); gm29_CUDA_CALL(hipFree(dev_out)); gm29_CUDA_CALL(hipFree(dev_length)); }
8dca8a75e0b43a9b9b93a6c69290c2c42c139cca.cu
// (c) Copyright 2013 Lev Barash, Landau Institute for Theoretical Physics, Russian Academy of Sciences // This is supplement to the paper: // L.Yu. Barash, L.N. Shchur, "PRAND: GPU accelerated parallel random number generation library: Using most reliable algorithms and applying parallelism of modern GPUs and CPUs". // e-mail: barash @ itp.ac.ru (remove space) #include<stdio.h> #define gm29_CUDA_CALL(x) do { if((x) != cudaSuccess) { printf("Error: %s at %s:%d\n",cudaGetErrorString(cudaGetLastError()),__FILE__,__LINE__); exit(1);}} while(0) #define gm29_BLOCKS 512 #define gm29_THREADS 128 #define gm29_ARRAY_SECTIONS (gm29_BLOCKS*gm29_THREADS/32) #define gm29_k 4 #define gm29_q 2 #define gm29_g 536870909U #define gm29_halfg 268435456U typedef struct{ unsigned xN[32] __attribute__ ((aligned(16))), xP[32] __attribute__ ((aligned(16))); } gm29_state; typedef gm29_state gm29_sse_state; unsigned gm29_sse_Consts[16] __attribute__ ((aligned(16))) = {536870911,536870911,536870911,536870911,1073741818,1073741818,1073741818,1073741818, 536870908,536870908,536870908,536870908,536870909,536870909,536870909,536870909}; extern "C" __host__ unsigned int gm29_sse_generate_(gm29_sse_state* state){ unsigned output1; unsigned output2 __attribute__ ((unused)); asm volatile("movaps (%4),%%xmm7\n" \ "movaps 16(%4),%%xmm6\n" \ "movaps 32(%4),%%xmm4\n" \ "movaps (%2),%%xmm0\n" \ "movaps (%3),%%xmm5\n" \ "movaps %%xmm0,(%3)\n" \ "pslld $2,%%xmm0\n" \ "paddd %%xmm6,%%xmm0\n" \ "pslld $1,%%xmm5\n" \ "psubd %%xmm5,%%xmm0\n" \ "movaps %%xmm0,%%xmm5\n" \ "psrld $29,%%xmm5\n" \ "pand %%xmm7,%%xmm0\n" \ "paddd %%xmm5,%%xmm0\n" \ "paddd %%xmm5,%%xmm0\n" \ "paddd %%xmm5,%%xmm0\n" \ "movaps %%xmm0,%%xmm5\n" \ "pcmpgtd %%xmm4,%%xmm5\n" \ "pand 48(%4),%%xmm5\n" \ "psubd %%xmm5,%%xmm0\n" \ "movaps %%xmm0,(%2)\n" \ "movaps 16(%2),%%xmm1\n" \ "movaps 16(%3),%%xmm5\n" \ "movaps %%xmm1,16(%3)\n" \ "pslld $2,%%xmm1\n" \ "paddd %%xmm6,%%xmm1\n" \ "pslld $1,%%xmm5\n" \ "psubd %%xmm5,%%xmm1\n" \ "movaps %%xmm1,%%xmm5\n" \ "psrld $29,%%xmm5\n" \ "pand %%xmm7,%%xmm1\n" \ "paddd %%xmm5,%%xmm1\n" \ "paddd %%xmm5,%%xmm1\n" \ "paddd %%xmm5,%%xmm1\n" \ "movaps %%xmm1,%%xmm5\n" \ "pcmpgtd %%xmm4,%%xmm5\n" \ "pand 48(%4),%%xmm5\n" \ "psubd %%xmm5,%%xmm1\n" \ "movaps %%xmm1,16(%2)\n" \ "movaps 32(%2),%%xmm2\n" \ "movaps 32(%3),%%xmm5\n" \ "movaps %%xmm2,32(%3)\n" \ "pslld $2,%%xmm2\n" \ "paddd %%xmm6,%%xmm2\n" \ "pslld $1,%%xmm5\n" \ "psubd %%xmm5,%%xmm2\n" \ "movaps %%xmm2,%%xmm5\n" \ "psrld $29,%%xmm5\n" \ "pand %%xmm7,%%xmm2\n" \ "paddd %%xmm5,%%xmm2\n" \ "paddd %%xmm5,%%xmm2\n" \ "paddd %%xmm5,%%xmm2\n" \ "movaps %%xmm2,%%xmm5\n" \ "pcmpgtd %%xmm4,%%xmm5\n" \ "pand 48(%4),%%xmm5\n" \ "psubd %%xmm5,%%xmm2\n" \ "movaps %%xmm2,32(%2)\n" \ "movaps 48(%2),%%xmm3\n" \ "movaps 48(%3),%%xmm5\n" \ "movaps %%xmm3,48(%3)\n" \ "pslld $2,%%xmm3\n" \ "paddd %%xmm6,%%xmm3\n" \ "pslld $1,%%xmm5\n" \ "psubd %%xmm5,%%xmm3\n" \ "movaps %%xmm3,%%xmm5\n" \ "psrld $29,%%xmm5\n" \ "pand %%xmm7,%%xmm3\n" \ "paddd %%xmm5,%%xmm3\n" \ "paddd %%xmm5,%%xmm3\n" \ "paddd %%xmm5,%%xmm3\n" \ "movaps %%xmm3,%%xmm5\n" \ "pcmpgtd %%xmm4,%%xmm5\n" \ "pand 48(%4),%%xmm5\n" \ "psubd %%xmm5,%%xmm3\n" \ "movaps %%xmm3,48(%2)\n" \ "psrld $28,%%xmm0\n" \ "psrld $28,%%xmm1\n" \ "psrld $28,%%xmm2\n" \ "psrld $28,%%xmm3\n" \ "packssdw %%xmm1,%%xmm0\n" \ "packssdw %%xmm3,%%xmm2\n" \ "packsswb %%xmm2,%%xmm0\n" \ "psllw $7,%%xmm0\n" \ "pmovmskb %%xmm0,%0\n" \ "movaps 64(%2),%%xmm0\n" \ "movaps 64(%3),%%xmm5\n" \ "movaps %%xmm0,64(%3)\n" \ "pslld $2,%%xmm0\n" \ "paddd %%xmm6,%%xmm0\n" \ "pslld $1,%%xmm5\n" \ "psubd %%xmm5,%%xmm0\n" \ "movaps %%xmm0,%%xmm5\n" \ "psrld $29,%%xmm5\n" \ "pand %%xmm7,%%xmm0\n" \ "paddd %%xmm5,%%xmm0\n" \ "paddd %%xmm5,%%xmm0\n" \ "paddd %%xmm5,%%xmm0\n" \ "movaps %%xmm0,%%xmm5\n" \ "pcmpgtd %%xmm4,%%xmm5\n" \ "pand 48(%4),%%xmm5\n" \ "psubd %%xmm5,%%xmm0\n" \ "movaps %%xmm0,64(%2)\n" \ "movaps 80(%2),%%xmm1\n" \ "movaps 80(%3),%%xmm5\n" \ "movaps %%xmm1,80(%3)\n" \ "pslld $2,%%xmm1\n" \ "paddd %%xmm6,%%xmm1\n" \ "pslld $1,%%xmm5\n" \ "psubd %%xmm5,%%xmm1\n" \ "movaps %%xmm1,%%xmm5\n" \ "psrld $29,%%xmm5\n" \ "pand %%xmm7,%%xmm1\n" \ "paddd %%xmm5,%%xmm1\n" \ "paddd %%xmm5,%%xmm1\n" \ "paddd %%xmm5,%%xmm1\n" \ "movaps %%xmm1,%%xmm5\n" \ "pcmpgtd %%xmm4,%%xmm5\n" \ "pand 48(%4),%%xmm5\n" \ "psubd %%xmm5,%%xmm1\n" \ "movaps %%xmm1,80(%2)\n" \ "movaps 96(%2),%%xmm2\n" \ "movaps 96(%3),%%xmm5\n" \ "movaps %%xmm2,96(%3)\n" \ "pslld $2,%%xmm2\n" \ "paddd %%xmm6,%%xmm2\n" \ "pslld $1,%%xmm5\n" \ "psubd %%xmm5,%%xmm2\n" \ "movaps %%xmm2,%%xmm5\n" \ "psrld $29,%%xmm5\n" \ "pand %%xmm7,%%xmm2\n" \ "paddd %%xmm5,%%xmm2\n" \ "paddd %%xmm5,%%xmm2\n" \ "paddd %%xmm5,%%xmm2\n" \ "movaps %%xmm2,%%xmm5\n" \ "pcmpgtd %%xmm4,%%xmm5\n" \ "pand 48(%4),%%xmm5\n" \ "psubd %%xmm5,%%xmm2\n" \ "movaps %%xmm2,96(%2)\n" \ "movaps 112(%2),%%xmm3\n" \ "movaps 112(%3),%%xmm5\n" \ "movaps %%xmm3,112(%3)\n" \ "pslld $2,%%xmm3\n" \ "paddd %%xmm6,%%xmm3\n" \ "pslld $1,%%xmm5\n" \ "psubd %%xmm5,%%xmm3\n" \ "movaps %%xmm3,%%xmm5\n" \ "psrld $29,%%xmm5\n" \ "pand %%xmm7,%%xmm3\n" \ "paddd %%xmm5,%%xmm3\n" \ "paddd %%xmm5,%%xmm3\n" \ "paddd %%xmm5,%%xmm3\n" \ "movaps %%xmm3,%%xmm5\n" \ "pcmpgtd %%xmm4,%%xmm5\n" \ "pand 48(%4),%%xmm5\n" \ "psubd %%xmm5,%%xmm3\n" \ "movaps %%xmm3,112(%2)\n" \ "psrld $28,%%xmm0\n" \ "psrld $28,%%xmm1\n" \ "psrld $28,%%xmm2\n" \ "psrld $28,%%xmm3\n" \ "packssdw %%xmm1,%%xmm0\n" \ "packssdw %%xmm3,%%xmm2\n" \ "packsswb %%xmm2,%%xmm0\n" \ "psllw $7,%%xmm0\n" \ "pmovmskb %%xmm0,%1\n" \ "shll $16,%1\n" \ "addl %1,%0\n" \ "":"=&r"(output1),"=&r"(output2):"r"(state->xN),"r"(state->xP),"r"(gm29_sse_Consts)); return output1; } extern "C" __device__ __host__ void gm29_get_sse_state_(gm29_state* state,gm29_sse_state* sse_state){ int i; for(i=0;i<32;i++) {sse_state->xN[i]=state->xN[i]; sse_state->xP[i]=state->xP[i];} } extern "C" __device__ __host__ unsigned gm29_CNext(unsigned N,unsigned P){ return (gm29_k*N+gm29_q*(gm29_g-P))%gm29_g; } extern "C" __device__ __host__ unsigned gm29_CNext2(unsigned N,unsigned P,unsigned myk,unsigned myq){ unsigned long long NNN,PP,kk,qq,gg,rr; // returns (myk*N-myq*P) (mod gm29_g) NNN=N; PP=P; kk=myk; qq=myq; gg=gm29_g; rr=(kk*NNN+qq*(gg-PP)); NNN=rr>>29; PP=rr-(NNN*gg); PP-=((PP>>29)*gg); return (unsigned)PP; } extern "C" __device__ __host__ unsigned gm29_GetNextN(unsigned x0,unsigned x1,unsigned n){ //returns x_{2^n} unsigned myk=gm29_k,myq=gm29_q,i,x=x1; for(i=0;i<n;i++){ x=gm29_CNext2(x,x0,myk,myq); myk=gm29_CNext2(myk,2,myk,myq); myq=gm29_CNext2(myq,0,myq,0); } return x; } extern "C" __device__ __host__ unsigned gm29_GetNextAny(unsigned x0,unsigned x1,unsigned long long N){ // returns x_N unsigned long long i; unsigned xp=x0,xn=x1,xpnew,xnnew,shift=0; i=N; while(i>0){ if(i%2==1){ // xp,xn ----> 2^shift xpnew=gm29_GetNextN(xp,xn,shift); xnnew=gm29_GetNextN(xn,gm29_CNext(xn,xp),shift); xp=xpnew; xn=xnnew; } i/=2; shift++; } return xp; } extern "C" __device__ __host__ void gm29_skipahead_(gm29_state* state, unsigned long long offset){ unsigned xn,xp,j; for(j=0;j<32;j++){ xp=gm29_GetNextAny(state->xP[j],state->xN[j],offset); xn=gm29_GetNextAny(state->xP[j],state->xN[j],offset+1); state->xP[j]=xp; state->xN[j]=xn; } } extern "C" __device__ __host__ void gm29_init_(gm29_state* state){ unsigned x0=514932,x1=127293,xp,xn,j; for(j=0;j<32;j++){ xp=gm29_GetNextAny(x0,x1,9007198285571818UL); xn=gm29_GetNextAny(x0,x1,9007198285571819UL); state->xP[j]=xp; state->xN[j]=xn; x0=xp; x1=xn; } } extern "C" __device__ __host__ void gm29_init_short_sequence_(gm29_state* state,unsigned SequenceNumber){ gm29_init_(state); // 0 <= SequenceNumber < 10^8; length of each sequence <= 8*10^7 gm29_skipahead_(state,82927047ULL*(unsigned long long)SequenceNumber); } extern "C" __device__ __host__ void gm29_init_medium_sequence_(gm29_state* state,unsigned SequenceNumber){ gm29_init_(state); // 0 <= SequenceNumber < 10^6; length of each sequence <= 8*10^9 gm29_skipahead_(state,8799201913ULL*(unsigned long long)SequenceNumber); } extern "C" __device__ __host__ void gm29_init_long_sequence_(gm29_state* state,unsigned SequenceNumber){ gm29_init_(state); // 0 <= SequenceNumber < 10^4; length of each sequence <= 8*10^11 gm29_skipahead_(state,828317697521ULL*(unsigned long long)SequenceNumber); } extern "C" __device__ __host__ unsigned int gm29_generate_(gm29_state* state){ unsigned sum=0, i, temp, bit=1; for(i=0;i<32;i++){ temp=(gm29_k*state->xN[i]+gm29_q*(gm29_g-state->xP[i]))%gm29_g; state->xP[i]=state->xN[i]; state->xN[i]=temp; sum+= ((temp<gm29_halfg)?0:bit); bit*=2; } return sum; } extern "C" __device__ __host__ float gm29_generate_uniform_float_(gm29_state* state){ unsigned sum=0, i, temp,bit=1; for(i=0;i<32;i++){ temp=(gm29_k*state->xN[i]+gm29_q*(gm29_g-state->xP[i]))%gm29_g; state->xP[i]=state->xN[i]; state->xN[i]=temp; sum+= ((temp<gm29_halfg)?0:bit); bit*=2; } return ((float) sum) * 2.3283064365386963e-10; } extern "C" __host__ void gm29_print_state_(gm29_state* state){int i; printf("Generator State:\nxN={"); for(i=0;i<32;i++) {printf("%u",state->xN[i]%gm29_g); printf((i<31)?",":"}\nxP={");} for(i=0;i<32;i++) {printf("%u",state->xP[i]%gm29_g); printf((i<31)?",":"}\n\n");} } extern "C" __host__ void gm29_print_sse_state_(gm29_sse_state* state){int i; printf("Generator State:\nxN={"); for(i=0;i<32;i++) {printf("%u",state->xN[i]%gm29_g); printf((i<31)?",":"}\nxP={");} for(i=0;i<32;i++) {printf("%u",state->xP[i]%gm29_g); printf((i<31)?",":"}\n\n");} } __global__ void gm29_kernel_generate_array(gm29_state* state, unsigned int* out, long* length) { unsigned temp,sum,i,orbit,seqNum; long offset; __shared__ unsigned xP[gm29_THREADS]; // one generator per s=32 threads, i.e. one orbit __shared__ unsigned xN[gm29_THREADS]; // per thread, i.e. blockDim.x orbits per block __shared__ unsigned a[gm29_THREADS]; // array "a" contains corresponding parts of output orbit = threadIdx.x % 32; seqNum = (threadIdx.x + blockIdx.x * blockDim.x)>>5; // RNG_sequence index offset = seqNum*(*length); // start of the section in the output array xP[threadIdx.x]=gm29_GetNextAny(state->xP[orbit],state->xN[orbit],offset); xN[threadIdx.x]=gm29_GetNextAny(state->xP[orbit],state->xN[orbit],offset+1); for(i=0;i<(*length);i++){ temp = gm29_CNext( xN[threadIdx.x], xP[threadIdx.x] ); xP[threadIdx.x] = xN[threadIdx.x]; xN[threadIdx.x] = temp; a[threadIdx.x] = (temp < gm29_halfg ? 0 : (1<<orbit) ); __syncthreads(); // each s=32 threads result in "length" values in the output array if((orbit&3)==0) a[threadIdx.x] = a[threadIdx.x]+a[threadIdx.x+1]+a[threadIdx.x+2]+a[threadIdx.x+3]; __syncthreads(); if((orbit&15)==0) a[threadIdx.x] = a[threadIdx.x]+a[threadIdx.x+4]+a[threadIdx.x+8]+a[threadIdx.x+12]; __syncthreads(); if(orbit==0){ sum=a[threadIdx.x]+a[threadIdx.x+16]; out[offset+i]=sum; } } } extern "C" __host__ void gm29_generate_gpu_array_(gm29_state* state, unsigned int* dev_out, unsigned int* length){ long mylength = (*length)/gm29_ARRAY_SECTIONS; gm29_state* dev_state; long* dev_length; if((mylength*gm29_ARRAY_SECTIONS)<(*length)) mylength++; gm29_CUDA_CALL(cudaMalloc((void**)&dev_state,sizeof(gm29_state))); gm29_CUDA_CALL(cudaMalloc((void**)&dev_length,sizeof(long))); gm29_CUDA_CALL(cudaMemcpy(dev_state,state,sizeof(gm29_state),cudaMemcpyHostToDevice)); gm29_CUDA_CALL(cudaMemcpy(dev_length,&mylength,sizeof(long),cudaMemcpyHostToDevice)); gm29_kernel_generate_array<<<gm29_BLOCKS,gm29_THREADS>>>(dev_state,dev_out,dev_length); gm29_CUDA_CALL(cudaGetLastError()); gm29_CUDA_CALL(cudaFree(dev_state)); gm29_CUDA_CALL(cudaFree(dev_length)); } __global__ void gm29_kernel_generate_array_float(gm29_state* state, float* out, long* length) { unsigned temp,sum,i,orbit,seqNum; long offset; __shared__ unsigned xP[gm29_THREADS]; // one generator per s=32 threads, i.e. one orbit __shared__ unsigned xN[gm29_THREADS]; // per thread, i.e. blockDim.x orbits per block __shared__ unsigned a[gm29_THREADS]; // array "a" contains corresponding parts of output orbit = threadIdx.x % 32; seqNum = (threadIdx.x + blockIdx.x * blockDim.x)>>5; // RNG_sequence index offset = seqNum*(*length); // start of the section in the output array xP[threadIdx.x]=gm29_GetNextAny(state->xP[orbit],state->xN[orbit],offset); xN[threadIdx.x]=gm29_GetNextAny(state->xP[orbit],state->xN[orbit],offset+1); for(i=0;i<(*length);i++){ // each s=32 threads result in "length" values in the output array temp = gm29_CNext( xN[threadIdx.x], xP[threadIdx.x] ); xP[threadIdx.x] = xN[threadIdx.x]; xN[threadIdx.x] = temp; a[threadIdx.x] = (temp < gm29_halfg ? 0 : (1<<orbit) ); __syncthreads(); if((orbit&3)==0) a[threadIdx.x] = a[threadIdx.x]+a[threadIdx.x+1]+a[threadIdx.x+2]+a[threadIdx.x+3]; __syncthreads(); if((orbit&15)==0) a[threadIdx.x] = a[threadIdx.x]+a[threadIdx.x+4]+a[threadIdx.x+8]+a[threadIdx.x+12]; __syncthreads(); if(orbit==0){ sum=a[threadIdx.x]+a[threadIdx.x+16]; out[offset+i]=((float)sum) * 2.3283064365386963e-10; } } } extern "C" __host__ void gm29_generate_gpu_array_float_(gm29_state* state, float* dev_out, unsigned int* length){ long mylength = (*length)/gm29_ARRAY_SECTIONS; gm29_state* dev_state; long* dev_length; if((mylength*gm29_ARRAY_SECTIONS)<(*length)) mylength++; gm29_CUDA_CALL(cudaMalloc((void**)&dev_state,sizeof(gm29_state))); gm29_CUDA_CALL(cudaMalloc((void**)&dev_length,sizeof(long))); gm29_CUDA_CALL(cudaMemcpy(dev_state,state,sizeof(gm29_state),cudaMemcpyHostToDevice)); gm29_CUDA_CALL(cudaMemcpy(dev_length,&mylength,sizeof(long),cudaMemcpyHostToDevice)); gm29_kernel_generate_array_float<<<gm29_BLOCKS,gm29_THREADS>>>(dev_state,dev_out,dev_length); gm29_CUDA_CALL(cudaGetLastError()); gm29_CUDA_CALL(cudaFree(dev_state)); gm29_CUDA_CALL(cudaFree(dev_length)); } __global__ void gm29_kernel_generate_array_double(gm29_state* state, double* out, long* length) { unsigned temp,sum,i,orbit,seqNum; long offset; __shared__ unsigned xP[gm29_THREADS]; // one generator per s=32 threads, i.e. one orbit __shared__ unsigned xN[gm29_THREADS]; // per thread, i.e. blockDim.x orbits per block __shared__ unsigned a[gm29_THREADS]; // array "a" contains corresponding parts of output orbit = threadIdx.x % 32; seqNum = (threadIdx.x + blockIdx.x * blockDim.x)>>5; // RNG_sequence index offset = seqNum*(*length); // start of the section in the output array xP[threadIdx.x]=gm29_GetNextAny(state->xP[orbit],state->xN[orbit],offset); xN[threadIdx.x]=gm29_GetNextAny(state->xP[orbit],state->xN[orbit],offset+1); for(i=0;i<(*length);i++){ // each s=32 threads result in "length" values in the output array temp = gm29_CNext( xN[threadIdx.x], xP[threadIdx.x] ); xP[threadIdx.x] = xN[threadIdx.x]; xN[threadIdx.x] = temp; a[threadIdx.x] = (temp < gm29_halfg ? 0 : (1<<orbit) ); __syncthreads(); if((orbit&3)==0) a[threadIdx.x] = a[threadIdx.x]+a[threadIdx.x+1]+a[threadIdx.x+2]+a[threadIdx.x+3]; __syncthreads(); if((orbit&15)==0) a[threadIdx.x] = a[threadIdx.x]+a[threadIdx.x+4]+a[threadIdx.x+8]+a[threadIdx.x+12]; __syncthreads(); if(orbit==0){ sum=a[threadIdx.x]+a[threadIdx.x+16]; out[offset+i]=((double)sum) * 2.3283064365386963e-10; } } } extern "C" __host__ void gm29_generate_gpu_array_double_(gm29_state* state, double* dev_out, unsigned int* length){ long mylength = (*length)/gm29_ARRAY_SECTIONS; gm29_state* dev_state; long* dev_length; if((mylength*gm29_ARRAY_SECTIONS)<(*length)) mylength++; gm29_CUDA_CALL(cudaMalloc((void**)&dev_state,sizeof(gm29_state))); gm29_CUDA_CALL(cudaMalloc((void**)&dev_length,sizeof(long))); gm29_CUDA_CALL(cudaMemcpy(dev_state,state,sizeof(gm29_state),cudaMemcpyHostToDevice)); gm29_CUDA_CALL(cudaMemcpy(dev_length,&mylength,sizeof(long),cudaMemcpyHostToDevice)); gm29_kernel_generate_array_double<<<gm29_BLOCKS,gm29_THREADS>>>(dev_state,dev_out,dev_length); gm29_CUDA_CALL(cudaGetLastError()); gm29_CUDA_CALL(cudaFree(dev_state)); gm29_CUDA_CALL(cudaFree(dev_length)); } extern "C" __host__ void gm29_generate_array_(gm29_state* state, unsigned int* out, unsigned int* length){ long mylength = (*length)/gm29_ARRAY_SECTIONS; gm29_state* dev_state; unsigned int* dev_out; long* dev_length; if((mylength*gm29_ARRAY_SECTIONS)<(*length)) mylength++; gm29_CUDA_CALL(cudaMalloc((void**)&dev_state,sizeof(gm29_state))); gm29_CUDA_CALL(cudaMalloc((void**)&dev_out,mylength*gm29_ARRAY_SECTIONS*sizeof(unsigned int))); gm29_CUDA_CALL(cudaMalloc((void**)&dev_length,sizeof(long))); gm29_CUDA_CALL(cudaMemcpy(dev_state,state,sizeof(gm29_state),cudaMemcpyHostToDevice)); gm29_CUDA_CALL(cudaMemcpy(dev_length,&mylength,sizeof(long),cudaMemcpyHostToDevice)); gm29_kernel_generate_array<<<gm29_BLOCKS,gm29_THREADS>>>(dev_state,dev_out,dev_length); gm29_CUDA_CALL(cudaGetLastError()); gm29_CUDA_CALL(cudaMemcpy(out,dev_out,(*length)*sizeof(unsigned int),cudaMemcpyDeviceToHost)); gm29_CUDA_CALL(cudaFree(dev_state)); gm29_CUDA_CALL(cudaFree(dev_out)); gm29_CUDA_CALL(cudaFree(dev_length)); }
a99287ccb04d8b4abd4339decad1a6afb922bfef.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <rocblas.h> #include <stdio.h> __global__ void add(int *a, int *b, int *c) { *c = *a + *b; } int test(void) { int a, b, c; int *d_a, *d_b, *d_c; hipMalloc((void **)&d_a, sizeof(int)); hipMalloc((void **)&d_b, sizeof(int)); hipMalloc((void **)&d_c, sizeof(int)); a = 3; b = 4; hipMemcpy(d_a, &a, sizeof(int), hipMemcpyHostToDevice); hipMemcpy(d_b, &b, sizeof(int), hipMemcpyHostToDevice); hipLaunchKernelGGL(( add), dim3(1),dim3(1), 0, 0, d_a, d_b, d_c); hipMemcpy(&c, d_c, sizeof(int), hipMemcpyDeviceToHost); printf("ANS %d + %d = %d\n",a,b,c); hipFree(d_a); hipFree(d_b); hipFree(d_c); return 0; }
a99287ccb04d8b4abd4339decad1a6afb922bfef.cu
#include "cuda_runtime.h" #include <cublas_v2.h> #include <stdio.h> __global__ void add(int *a, int *b, int *c) { *c = *a + *b; } int test(void) { int a, b, c; int *d_a, *d_b, *d_c; cudaMalloc((void **)&d_a, sizeof(int)); cudaMalloc((void **)&d_b, sizeof(int)); cudaMalloc((void **)&d_c, sizeof(int)); a = 3; b = 4; cudaMemcpy(d_a, &a, sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_b, &b, sizeof(int), cudaMemcpyHostToDevice); add<<<1,1>>>(d_a, d_b, d_c); cudaMemcpy(&c, d_c, sizeof(int), cudaMemcpyDeviceToHost); printf("ANS %d + %d = %d\n",a,b,c); cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); return 0; }
518885225a184febd265933f149c95fa3a5730cc.hip
// !!! This is a file automatically generated by hipify!!! #include "pbf_add.h" #include <device_launch_parameters.h> namespace { } // end of unnamed ns namespace pbf { namespace cuda { void addParticle( pbf_particle& particle, const glm::vec3* adding_position, const glm::vec3* adding_velocity, uint32_t adding_num) { auto x = particle.phase.x + particle.phase.num; auto v = particle.phase.v + particle.phase.num; hipMemcpy(x, adding_position, adding_num * sizeof(dom_dim), hipMemcpyDeviceToDevice); hipMemcpy(v, adding_velocity, adding_num * sizeof(dom_dim), hipMemcpyDeviceToDevice); particle.phase.num += adding_num; #ifdef _DEBUG gpuErrchk(hipPeekAtLastError()); gpuErrchk(hipDeviceSynchronize()); #endif } } // end of cuda ns } // end of pbf ns
518885225a184febd265933f149c95fa3a5730cc.cu
#include "pbf_add.h" #include <device_launch_parameters.h> namespace { } // end of unnamed ns namespace pbf { namespace cuda { void addParticle( pbf_particle& particle, const glm::vec3* adding_position, const glm::vec3* adding_velocity, uint32_t adding_num) { auto x = particle.phase.x + particle.phase.num; auto v = particle.phase.v + particle.phase.num; cudaMemcpy(x, adding_position, adding_num * sizeof(dom_dim), cudaMemcpyDeviceToDevice); cudaMemcpy(v, adding_velocity, adding_num * sizeof(dom_dim), cudaMemcpyDeviceToDevice); particle.phase.num += adding_num; #ifdef _DEBUG gpuErrchk(cudaPeekAtLastError()); gpuErrchk(cudaDeviceSynchronize()); #endif } } // end of cuda ns } // end of pbf ns
bd9a8efeb2c1ad96e26559c7df54c5136dfe9016.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <algorithm> #include <cfloat> #include <vector> #include "caffe/layer.hpp" #include "caffe/util/math_functions.hpp" #include "caffe/vision_layers.hpp" namespace caffe { template <typename Dtype> __global__ void SoftmaxLossForwardGPU(const int nthreads, const Dtype* prob_data, const Dtype* label, Dtype* loss, const int num, const int dim, const int spatial_dim, const bool has_ignore_label_, const int ignore_label_, Dtype* counts) { CUDA_KERNEL_LOOP(index, nthreads) { const int n = index / spatial_dim; const int s = index % spatial_dim; const int label_value = static_cast<int>(label[n * spatial_dim + s]); if (has_ignore_label_ && label_value == ignore_label_) { loss[index] = 0; counts[index] = 0; } else { loss[index] = -log(max(prob_data[n * dim + label_value * spatial_dim + s], Dtype(FLT_MIN))); counts[index] = 1; } } } template <typename Dtype> void SoftmaxWithLossBDKLayer<Dtype>::Forward_gpu( const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { this->softmax_layer_->Forward(this->softmax_bottom_vec_, this->softmax_top_vec_); const Dtype* prob_data = this->prob_.gpu_data(); const Dtype* label = bottom[1]->gpu_data(); const int dim = this->prob_.count() / this->outer_num_; const int nthreads = this->outer_num_ * this->inner_num_; // Since this memory is not used for anything until it is overwritten // on the backward pass, we use it here to avoid having to allocate new GPU // memory to accumulate intermediate results in the kernel. Dtype* loss_data = bottom[0]->mutable_gpu_diff(); // Similarly, this memory is never used elsewhere, and thus we can use it // to avoid having to allocate additional GPU memory. Dtype* counts = this->prob_.mutable_gpu_diff(); // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( SoftmaxLossForwardGPU<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, prob_data, label, loss_data, this->outer_num_, dim, this->inner_num_, this->has_ignore_label_, this->ignore_label_, counts); Dtype loss; caffe_gpu_asum(nthreads, loss_data, &loss); if (this->normalize_) { Dtype count; caffe_gpu_asum(nthreads, counts, &count); loss /= count; } else { loss /= this->outer_num_; } top[0]->mutable_cpu_data()[0] = loss; if (top.size() == 2) { top[1]->ShareData(this->prob_); } this->softmax_layer_1_->Forward(this->softmax_bottom_vec_1_, this->softmax_top_vec_1_); } template <typename Dtype> __global__ void SoftmaxLossBackwardGPU(const int nthreads, const Dtype* top, const Dtype* label, Dtype* bottom_diff, const int num, const int dim, const int spatial_dim, const bool has_ignore_label_, const int ignore_label_, Dtype* counts) { const int channels = dim / spatial_dim; CUDA_KERNEL_LOOP(index, nthreads) { const int n = index / spatial_dim; const int s = index % spatial_dim; const int label_value = static_cast<int>(label[n * spatial_dim + s]); if (has_ignore_label_ && label_value == ignore_label_) { for (int c = 0; c < channels; ++c) { bottom_diff[n * dim + c * spatial_dim + s] = 0; } counts[index] = 0; } else { bottom_diff[n * dim + label_value * spatial_dim + s] -= 1; counts[index] = 1; } } } template <typename Dtype> void SoftmaxWithLossBDKLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (propagate_down[1]) { LOG(FATAL) << this->type() << " Layer cannot backpropagate to label inputs."; } if (propagate_down[0]) { const int nthreads = this->outer_num_ * this->inner_num_; Dtype* counts = this->prob_.mutable_gpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); Dtype* bottom_diff1 = bottom[2]->mutable_gpu_diff(); const Dtype* prob_data = this->prob_.gpu_data(); const Dtype* top_data = top[0]->gpu_data(); caffe_gpu_memcpy(this->prob_.count() * sizeof(Dtype), prob_data, bottom_diff); caffe_gpu_memcpy(this->prob_.count() * sizeof(Dtype), prob_data, bottom_diff1); if(this->layer_param_.loss_param().down_sgld() == 0){ caffe_gpu_scal(this->prob_.count(), Dtype(0), bottom_diff); }else{ const Dtype* label = bottom[1]->gpu_data(); const int dim = this->prob_.count() / this->outer_num_; //const int nthreads = this->outer_num_ * this->inner_num_; // Since this memory is never used for anything else, // we use to to avoid allocating new GPU memory. //Dtype* counts = this->prob_.mutable_gpu_diff(); // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( SoftmaxLossBackwardGPU<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, top_data, label, bottom_diff, this->outer_num_, dim, this->inner_num_, this->has_ignore_label_, this->ignore_label_, counts);} const Dtype loss_weight = top[0]->cpu_diff()[0]; caffe_gpu_axpby(this->prob_1_.count(), Dtype(1), this->prob_1_.gpu_data(), Dtype(-1), bottom_diff1); if (this->normalize_) { Dtype count; caffe_gpu_asum(nthreads, counts, &count); if(this->layer_param_.loss_param().down_sgld() == 1){//LOG(INFO) << "loss_weight = " << loss_weight << ", count = " << count; caffe_gpu_scal(this->prob_.count(), loss_weight / count, bottom_diff);} caffe_gpu_scal(this->prob_.count(), loss_weight / count, bottom_diff1); //caffe_gpu_memcpy(this->prob_.count() * sizeof(Dtype), bottom_diff, bottom_diff1); } else {//LOG(INFO) << "loss_weight = " << loss_weight << ", outer_num = " << this->outer_num_; if(this->layer_param_.loss_param().down_sgld() == 1){ caffe_gpu_scal(this->prob_.count(), loss_weight / this->outer_num_, bottom_diff);} caffe_gpu_scal(this->prob_.count(), loss_weight / this->outer_num_, bottom_diff1); //caffe_gpu_memcpy(this->prob_.count() * sizeof(Dtype), bottom_diff, bottom_diff1); } //LOG(INFO) << "count = " << this->prob_.count(); //for(int i = 0; i < 1; i++){LOG(INFO) << top[0]->cpu_diff()[0];} } } INSTANTIATE_LAYER_GPU_FUNCS(SoftmaxWithLossBDKLayer); } // namespace caffe
bd9a8efeb2c1ad96e26559c7df54c5136dfe9016.cu
#include <algorithm> #include <cfloat> #include <vector> #include "caffe/layer.hpp" #include "caffe/util/math_functions.hpp" #include "caffe/vision_layers.hpp" namespace caffe { template <typename Dtype> __global__ void SoftmaxLossForwardGPU(const int nthreads, const Dtype* prob_data, const Dtype* label, Dtype* loss, const int num, const int dim, const int spatial_dim, const bool has_ignore_label_, const int ignore_label_, Dtype* counts) { CUDA_KERNEL_LOOP(index, nthreads) { const int n = index / spatial_dim; const int s = index % spatial_dim; const int label_value = static_cast<int>(label[n * spatial_dim + s]); if (has_ignore_label_ && label_value == ignore_label_) { loss[index] = 0; counts[index] = 0; } else { loss[index] = -log(max(prob_data[n * dim + label_value * spatial_dim + s], Dtype(FLT_MIN))); counts[index] = 1; } } } template <typename Dtype> void SoftmaxWithLossBDKLayer<Dtype>::Forward_gpu( const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { this->softmax_layer_->Forward(this->softmax_bottom_vec_, this->softmax_top_vec_); const Dtype* prob_data = this->prob_.gpu_data(); const Dtype* label = bottom[1]->gpu_data(); const int dim = this->prob_.count() / this->outer_num_; const int nthreads = this->outer_num_ * this->inner_num_; // Since this memory is not used for anything until it is overwritten // on the backward pass, we use it here to avoid having to allocate new GPU // memory to accumulate intermediate results in the kernel. Dtype* loss_data = bottom[0]->mutable_gpu_diff(); // Similarly, this memory is never used elsewhere, and thus we can use it // to avoid having to allocate additional GPU memory. Dtype* counts = this->prob_.mutable_gpu_diff(); // NOLINT_NEXT_LINE(whitespace/operators) SoftmaxLossForwardGPU<Dtype><<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS>>>(nthreads, prob_data, label, loss_data, this->outer_num_, dim, this->inner_num_, this->has_ignore_label_, this->ignore_label_, counts); Dtype loss; caffe_gpu_asum(nthreads, loss_data, &loss); if (this->normalize_) { Dtype count; caffe_gpu_asum(nthreads, counts, &count); loss /= count; } else { loss /= this->outer_num_; } top[0]->mutable_cpu_data()[0] = loss; if (top.size() == 2) { top[1]->ShareData(this->prob_); } this->softmax_layer_1_->Forward(this->softmax_bottom_vec_1_, this->softmax_top_vec_1_); } template <typename Dtype> __global__ void SoftmaxLossBackwardGPU(const int nthreads, const Dtype* top, const Dtype* label, Dtype* bottom_diff, const int num, const int dim, const int spatial_dim, const bool has_ignore_label_, const int ignore_label_, Dtype* counts) { const int channels = dim / spatial_dim; CUDA_KERNEL_LOOP(index, nthreads) { const int n = index / spatial_dim; const int s = index % spatial_dim; const int label_value = static_cast<int>(label[n * spatial_dim + s]); if (has_ignore_label_ && label_value == ignore_label_) { for (int c = 0; c < channels; ++c) { bottom_diff[n * dim + c * spatial_dim + s] = 0; } counts[index] = 0; } else { bottom_diff[n * dim + label_value * spatial_dim + s] -= 1; counts[index] = 1; } } } template <typename Dtype> void SoftmaxWithLossBDKLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (propagate_down[1]) { LOG(FATAL) << this->type() << " Layer cannot backpropagate to label inputs."; } if (propagate_down[0]) { const int nthreads = this->outer_num_ * this->inner_num_; Dtype* counts = this->prob_.mutable_gpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); Dtype* bottom_diff1 = bottom[2]->mutable_gpu_diff(); const Dtype* prob_data = this->prob_.gpu_data(); const Dtype* top_data = top[0]->gpu_data(); caffe_gpu_memcpy(this->prob_.count() * sizeof(Dtype), prob_data, bottom_diff); caffe_gpu_memcpy(this->prob_.count() * sizeof(Dtype), prob_data, bottom_diff1); if(this->layer_param_.loss_param().down_sgld() == 0){ caffe_gpu_scal(this->prob_.count(), Dtype(0), bottom_diff); }else{ const Dtype* label = bottom[1]->gpu_data(); const int dim = this->prob_.count() / this->outer_num_; //const int nthreads = this->outer_num_ * this->inner_num_; // Since this memory is never used for anything else, // we use to to avoid allocating new GPU memory. //Dtype* counts = this->prob_.mutable_gpu_diff(); // NOLINT_NEXT_LINE(whitespace/operators) SoftmaxLossBackwardGPU<Dtype><<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS>>>(nthreads, top_data, label, bottom_diff, this->outer_num_, dim, this->inner_num_, this->has_ignore_label_, this->ignore_label_, counts);} const Dtype loss_weight = top[0]->cpu_diff()[0]; caffe_gpu_axpby(this->prob_1_.count(), Dtype(1), this->prob_1_.gpu_data(), Dtype(-1), bottom_diff1); if (this->normalize_) { Dtype count; caffe_gpu_asum(nthreads, counts, &count); if(this->layer_param_.loss_param().down_sgld() == 1){//LOG(INFO) << "loss_weight = " << loss_weight << ", count = " << count; caffe_gpu_scal(this->prob_.count(), loss_weight / count, bottom_diff);} caffe_gpu_scal(this->prob_.count(), loss_weight / count, bottom_diff1); //caffe_gpu_memcpy(this->prob_.count() * sizeof(Dtype), bottom_diff, bottom_diff1); } else {//LOG(INFO) << "loss_weight = " << loss_weight << ", outer_num = " << this->outer_num_; if(this->layer_param_.loss_param().down_sgld() == 1){ caffe_gpu_scal(this->prob_.count(), loss_weight / this->outer_num_, bottom_diff);} caffe_gpu_scal(this->prob_.count(), loss_weight / this->outer_num_, bottom_diff1); //caffe_gpu_memcpy(this->prob_.count() * sizeof(Dtype), bottom_diff, bottom_diff1); } //LOG(INFO) << "count = " << this->prob_.count(); //for(int i = 0; i < 1; i++){LOG(INFO) << top[0]->cpu_diff()[0];} } } INSTANTIATE_LAYER_GPU_FUNCS(SoftmaxWithLossBDKLayer); } // namespace caffe
3dd85f4ec069cf8bb8eb50a1e5c9b3dfc8f65b1a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*! * \file att_sampler.cu * \author Heliang Zheng * \adapted from https://github.com/apache/incubator-mxnet/blob/master/src/operator/bilinear_sampler.cu */ #include "./att_sampler-inl.h" #include <algorithm> #include "../../common/cuda_utils.h" namespace mshadow { namespace cuda { template<typename DType> __device__ bool between(DType value, int lowerBound, int upperBound) { return (value >= lowerBound && value <= upperBound); } template<typename DType> __global__ void AttSamplerForwardKernel(const int i_c, const int i_h, const int i_w, const DType* data, const DType* grid, const int o_n, const int o_c, const int o_h, const int o_w, DType* out) { for (int index = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x; index < o_n * o_c * o_h * o_w; index += blockDim.x * gridDim.x * gridDim.y) { // (n, c, h, w) is the element in out int w = index % o_w; int h = (index / o_w) % o_h; int c = (index / o_w / o_h) % o_c; int n = index / o_w / o_h / o_c; DType y_real; DType x_real; y_real = (*(grid + n * o_h * o_w * 2 + h * o_w + w + o_h * o_w) + 1) * (i_h - 1) / 2; x_real = (*(grid + n * o_h * o_w * 2 + h * o_w + w) + 1) * (i_w - 1) / 2; int top_left_y = static_cast<int>(floor(y_real)); int top_left_x = static_cast<int>(floor(x_real)); DType top_left_y_w = 1.0 - (y_real - top_left_y); DType top_left_x_w = 1.0 - (x_real - top_left_x); DType top_left_v = 0; DType top_right_v = 0; DType bottom_left_v = 0; DType bottom_right_v = 0; index_t out_index = n * o_c * o_h * o_w + c * o_h * o_w + h * o_w + w; int data_index = n * i_c * i_h * i_w + c * i_h * i_w + top_left_y * i_w + top_left_x; if (between(top_left_x, 0, i_w - 1) && between(top_left_y, 0, i_h - 1)) top_left_v = *(data + data_index); if (between(top_left_x + 1, 0, i_w - 1) && between(top_left_y, 0, i_h - 1)) top_right_v = *(data + data_index + 1); if (between(top_left_x, 0, i_w - 1) && between(top_left_y + 1, 0, i_h - 1)) bottom_left_v = *(data + data_index + i_w); if (between(top_left_x + 1, 0, i_w - 1) && between(top_left_y + 1, 0, i_h - 1)) bottom_right_v = *(data + data_index + i_w + 1); *(out + out_index) = top_left_v * top_left_y_w * top_left_x_w + top_right_v * top_left_y_w * (1.0 - top_left_x_w) + bottom_left_v * (1.0 - top_left_y_w) * top_left_x_w + bottom_right_v * (1.0 - top_left_y_w) * (1.0 - top_left_x_w); } } template<typename DType> __global__ void AttSamplerBackwardKernel(const int i_c, const int i_h, const int i_w, const DType* grad, const DType* data, const int o_n, const int o_c, const int o_h, const int o_w, DType* g_input, const DType* grid_src) { for (int index = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x; index < o_n * o_h * o_w; index += blockDim.x * gridDim.x * gridDim.y) { // (n, c, h, w) is the element in grad int w = index % o_w; int h = (index / o_w) % o_h; int n = index / o_w / o_h; DType y_real; DType x_real; y_real = (*(grid_src + n * o_h * o_w * 2 + h * o_w + w + o_h * o_w) + 1) * (i_h - 1) / 2; x_real = (*(grid_src + n * o_h * o_w * 2 + h * o_w + w) + 1) * (i_w - 1) / 2; int top_left_y = static_cast<int>(floor(y_real)); int top_left_x = static_cast<int>(floor(x_real)); DType top_left_y_w = 1.0 - (y_real - top_left_y); DType top_left_x_w = 1.0 - (x_real - top_left_x); for (index_t c = 0; c < o_c; ++c) { index_t grad_index = n * o_c * o_h * o_w + c * o_h * o_w + h * o_w + w; int data_index = n * i_c * i_h * i_w + c * i_h * i_w + top_left_y * i_w + top_left_x; if (between(top_left_x, 0, i_w - 1) && between(top_left_y, 0, i_h - 1)) { atomicAdd(&g_input[data_index], *(grad + grad_index) * top_left_y_w * top_left_x_w); } if (between(top_left_x + 1, 0, i_w - 1) && between(top_left_y, 0, i_h - 1)) { atomicAdd(&g_input[data_index + 1], *(grad + grad_index) * top_left_y_w * (1.0 - top_left_x_w)); } if (between(top_left_x, 0, i_w - 1) && between(top_left_y + 1, 0, i_h - 1)) { atomicAdd(&g_input[data_index + i_w], *(grad + grad_index) * (1.0 - top_left_y_w) * top_left_x_w); } if (between(top_left_x + 1, 0, i_w - 1) && between(top_left_y + 1, 0, i_h - 1)) { atomicAdd(&g_input[data_index + i_w + 1], *(grad + grad_index) * (1.0 - top_left_y_w) * (1.0 - top_left_x_w)); } } } } } // namespace cuda template<typename DType> inline void AttSamplerForward(const Tensor<gpu, 4, DType> &output, const Tensor<gpu, 4, DType> &input, const Tensor<gpu, 4, DType> &grid_src) { DType *out = output.dptr_; const DType *data = input.dptr_; const DType *grid = grid_src.dptr_; int o_n = output.size(0), o_c = output.size(1), o_h = output.size(2), o_w = output.size(3); int i_c = input.size(1), i_h = input.size(2), i_w = input.size(3); using namespace cuda; const int max_block = (output.shape_.Size() + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock; const int grid_dim_x = (max_block > kMaxGridDim) ? kMaxGridDim : max_block; const int grid_dim_y = (max_block > kMaxGridDim) ? (max_block + kMaxGridDim - 1) / kMaxGridDim : 1; dim3 num_blocks(grid_dim_x, grid_dim_y); dim3 threads_per_block(kMaxThreadsPerBlock); CheckLaunchParam(num_blocks, threads_per_block, "attention sampler forward"); hipStream_t stream = Stream<gpu>::GetStream(output.stream_); cuda::AttSamplerForwardKernel<DType> << <num_blocks, threads_per_block, 0, stream >> >( i_c, i_h, i_w, data, grid, o_n, o_c, o_h, o_w, out); // post kernel check hipError_t err = hipPeekAtLastError(); CHECK_EQ(err, hipSuccess) << hipGetErrorString(err); } template<typename DType> inline void AttSamplerBackward(const Tensor<gpu, 4, DType> &input_grad, const Tensor<gpu, 4, DType> &output_grad, const Tensor<gpu, 4, DType> &input_data, const Tensor<gpu, 4, DType> &grid) { DType *g_input = input_grad.dptr_; const DType *grid_src = grid.dptr_; const DType *grad = output_grad.dptr_; const DType *data = input_data.dptr_; int o_n = output_grad.size(0), o_c = output_grad.size(1), o_h = output_grad.size(2), o_w = output_grad.size(3); int i_c = input_data.size(1), i_h = input_data.size(2), i_w = input_data.size(3); using namespace cuda; const int max_block = (output_grad.shape_.Size() / o_c + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock; const int grid_dim_x = (max_block > kMaxGridDim) ? kMaxGridDim : max_block; const int grid_dim_y = (max_block > kMaxGridDim) ? (max_block + kMaxGridDim - 1) / kMaxGridDim : 1; dim3 num_blocks(grid_dim_x, grid_dim_y); dim3 threads_per_block(kMaxThreadsPerBlock); CheckLaunchParam(num_blocks, threads_per_block, "att sampler backward"); hipStream_t stream = Stream<gpu>::GetStream(input_grad.stream_); cuda::AttSamplerBackwardKernel<DType> << <num_blocks, threads_per_block, 0, stream >> >( i_c, i_h, i_w, grad, data, o_n, o_c, o_h, o_w, g_input, grid_src); // post kernel check hipError_t err = hipPeekAtLastError(); CHECK_EQ(err, hipSuccess) << hipGetErrorString(err); } } // namespace mshadow namespace mxnet { namespace op { template<> Operator* CreateOp<gpu>(AttSamplerParam param, int dtype) { Operator *op = NULL; MSHADOW_REAL_TYPE_SWITCH(dtype, DType, { op = new AttSamplerOp<gpu, DType>(param); }) return op; } } // namespace op } // namespace mxnet
3dd85f4ec069cf8bb8eb50a1e5c9b3dfc8f65b1a.cu
/*! * \file att_sampler.cu * \author Heliang Zheng * \adapted from https://github.com/apache/incubator-mxnet/blob/master/src/operator/bilinear_sampler.cu */ #include "./att_sampler-inl.h" #include <algorithm> #include "../../common/cuda_utils.h" namespace mshadow { namespace cuda { template<typename DType> __device__ bool between(DType value, int lowerBound, int upperBound) { return (value >= lowerBound && value <= upperBound); } template<typename DType> __global__ void AttSamplerForwardKernel(const int i_c, const int i_h, const int i_w, const DType* data, const DType* grid, const int o_n, const int o_c, const int o_h, const int o_w, DType* out) { for (int index = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x; index < o_n * o_c * o_h * o_w; index += blockDim.x * gridDim.x * gridDim.y) { // (n, c, h, w) is the element in out int w = index % o_w; int h = (index / o_w) % o_h; int c = (index / o_w / o_h) % o_c; int n = index / o_w / o_h / o_c; DType y_real; DType x_real; y_real = (*(grid + n * o_h * o_w * 2 + h * o_w + w + o_h * o_w) + 1) * (i_h - 1) / 2; x_real = (*(grid + n * o_h * o_w * 2 + h * o_w + w) + 1) * (i_w - 1) / 2; int top_left_y = static_cast<int>(floor(y_real)); int top_left_x = static_cast<int>(floor(x_real)); DType top_left_y_w = 1.0 - (y_real - top_left_y); DType top_left_x_w = 1.0 - (x_real - top_left_x); DType top_left_v = 0; DType top_right_v = 0; DType bottom_left_v = 0; DType bottom_right_v = 0; index_t out_index = n * o_c * o_h * o_w + c * o_h * o_w + h * o_w + w; int data_index = n * i_c * i_h * i_w + c * i_h * i_w + top_left_y * i_w + top_left_x; if (between(top_left_x, 0, i_w - 1) && between(top_left_y, 0, i_h - 1)) top_left_v = *(data + data_index); if (between(top_left_x + 1, 0, i_w - 1) && between(top_left_y, 0, i_h - 1)) top_right_v = *(data + data_index + 1); if (between(top_left_x, 0, i_w - 1) && between(top_left_y + 1, 0, i_h - 1)) bottom_left_v = *(data + data_index + i_w); if (between(top_left_x + 1, 0, i_w - 1) && between(top_left_y + 1, 0, i_h - 1)) bottom_right_v = *(data + data_index + i_w + 1); *(out + out_index) = top_left_v * top_left_y_w * top_left_x_w + top_right_v * top_left_y_w * (1.0 - top_left_x_w) + bottom_left_v * (1.0 - top_left_y_w) * top_left_x_w + bottom_right_v * (1.0 - top_left_y_w) * (1.0 - top_left_x_w); } } template<typename DType> __global__ void AttSamplerBackwardKernel(const int i_c, const int i_h, const int i_w, const DType* grad, const DType* data, const int o_n, const int o_c, const int o_h, const int o_w, DType* g_input, const DType* grid_src) { for (int index = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x; index < o_n * o_h * o_w; index += blockDim.x * gridDim.x * gridDim.y) { // (n, c, h, w) is the element in grad int w = index % o_w; int h = (index / o_w) % o_h; int n = index / o_w / o_h; DType y_real; DType x_real; y_real = (*(grid_src + n * o_h * o_w * 2 + h * o_w + w + o_h * o_w) + 1) * (i_h - 1) / 2; x_real = (*(grid_src + n * o_h * o_w * 2 + h * o_w + w) + 1) * (i_w - 1) / 2; int top_left_y = static_cast<int>(floor(y_real)); int top_left_x = static_cast<int>(floor(x_real)); DType top_left_y_w = 1.0 - (y_real - top_left_y); DType top_left_x_w = 1.0 - (x_real - top_left_x); for (index_t c = 0; c < o_c; ++c) { index_t grad_index = n * o_c * o_h * o_w + c * o_h * o_w + h * o_w + w; int data_index = n * i_c * i_h * i_w + c * i_h * i_w + top_left_y * i_w + top_left_x; if (between(top_left_x, 0, i_w - 1) && between(top_left_y, 0, i_h - 1)) { atomicAdd(&g_input[data_index], *(grad + grad_index) * top_left_y_w * top_left_x_w); } if (between(top_left_x + 1, 0, i_w - 1) && between(top_left_y, 0, i_h - 1)) { atomicAdd(&g_input[data_index + 1], *(grad + grad_index) * top_left_y_w * (1.0 - top_left_x_w)); } if (between(top_left_x, 0, i_w - 1) && between(top_left_y + 1, 0, i_h - 1)) { atomicAdd(&g_input[data_index + i_w], *(grad + grad_index) * (1.0 - top_left_y_w) * top_left_x_w); } if (between(top_left_x + 1, 0, i_w - 1) && between(top_left_y + 1, 0, i_h - 1)) { atomicAdd(&g_input[data_index + i_w + 1], *(grad + grad_index) * (1.0 - top_left_y_w) * (1.0 - top_left_x_w)); } } } } } // namespace cuda template<typename DType> inline void AttSamplerForward(const Tensor<gpu, 4, DType> &output, const Tensor<gpu, 4, DType> &input, const Tensor<gpu, 4, DType> &grid_src) { DType *out = output.dptr_; const DType *data = input.dptr_; const DType *grid = grid_src.dptr_; int o_n = output.size(0), o_c = output.size(1), o_h = output.size(2), o_w = output.size(3); int i_c = input.size(1), i_h = input.size(2), i_w = input.size(3); using namespace cuda; const int max_block = (output.shape_.Size() + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock; const int grid_dim_x = (max_block > kMaxGridDim) ? kMaxGridDim : max_block; const int grid_dim_y = (max_block > kMaxGridDim) ? (max_block + kMaxGridDim - 1) / kMaxGridDim : 1; dim3 num_blocks(grid_dim_x, grid_dim_y); dim3 threads_per_block(kMaxThreadsPerBlock); CheckLaunchParam(num_blocks, threads_per_block, "attention sampler forward"); cudaStream_t stream = Stream<gpu>::GetStream(output.stream_); cuda::AttSamplerForwardKernel<DType> << <num_blocks, threads_per_block, 0, stream >> >( i_c, i_h, i_w, data, grid, o_n, o_c, o_h, o_w, out); // post kernel check cudaError err = cudaPeekAtLastError(); CHECK_EQ(err, cudaSuccess) << cudaGetErrorString(err); } template<typename DType> inline void AttSamplerBackward(const Tensor<gpu, 4, DType> &input_grad, const Tensor<gpu, 4, DType> &output_grad, const Tensor<gpu, 4, DType> &input_data, const Tensor<gpu, 4, DType> &grid) { DType *g_input = input_grad.dptr_; const DType *grid_src = grid.dptr_; const DType *grad = output_grad.dptr_; const DType *data = input_data.dptr_; int o_n = output_grad.size(0), o_c = output_grad.size(1), o_h = output_grad.size(2), o_w = output_grad.size(3); int i_c = input_data.size(1), i_h = input_data.size(2), i_w = input_data.size(3); using namespace cuda; const int max_block = (output_grad.shape_.Size() / o_c + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock; const int grid_dim_x = (max_block > kMaxGridDim) ? kMaxGridDim : max_block; const int grid_dim_y = (max_block > kMaxGridDim) ? (max_block + kMaxGridDim - 1) / kMaxGridDim : 1; dim3 num_blocks(grid_dim_x, grid_dim_y); dim3 threads_per_block(kMaxThreadsPerBlock); CheckLaunchParam(num_blocks, threads_per_block, "att sampler backward"); cudaStream_t stream = Stream<gpu>::GetStream(input_grad.stream_); cuda::AttSamplerBackwardKernel<DType> << <num_blocks, threads_per_block, 0, stream >> >( i_c, i_h, i_w, grad, data, o_n, o_c, o_h, o_w, g_input, grid_src); // post kernel check cudaError err = cudaPeekAtLastError(); CHECK_EQ(err, cudaSuccess) << cudaGetErrorString(err); } } // namespace mshadow namespace mxnet { namespace op { template<> Operator* CreateOp<gpu>(AttSamplerParam param, int dtype) { Operator *op = NULL; MSHADOW_REAL_TYPE_SWITCH(dtype, DType, { op = new AttSamplerOp<gpu, DType>(param); }) return op; } } // namespace op } // namespace mxnet
6db5374736f0d0cea6c0513af53ea1ae14b9578b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" __global__ void orcu_kernel28265(const int nrows, const int ndiags, int sbdiag, int ndofs, int* offsets, double* A, double* x, double* y) { const int tid=blockIdx.x*blockDim.x+threadIdx.x; const int gsize=gridDim.x*blockDim.x; double ysum; int j, k, col, row; for (int i=tid; i<=nrows-1; i+=gsize) { { ysum=0.0; for (j=0; j<=ndiags-1; j++ ) { row=i+j*sbdiag; col=(floor((float)i/ndofs)+offsets[j])*ndofs; if (col>=0&&col<nrows) for (k=0; k<=ndofs-1; k++ ) ysum=ysum+A[row+k*nrows]*x[col+k]; } y[i]=ysum; } } } void MatMult_SeqDIA(double* A, double* x, double* y, int M, int N, int P, int NOS, int DOF) { register int i,j,k; int col,row; double ysum; /*@ begin PerfTuning ( def performance_params { param TC[] = range(32,1025,32); param BC[] = range(14,105,14); param PL[] = [16,32,48]; } def input_params { param M[] = [32]; param N[] = [32]; param P[] = [32]; param NOS = 7; param DOF[] = range(1,17); constraint c1 = (M==N); constraint c2 = (N==P); } def input_vars { decl dynamic double A[M*N*P*DOF*DOF*NOS] = random; decl dynamic double x[M*N*P*DOF] = random; decl dynamic double y[M*N*P*DOF] = 0; decl static int offsets[NOS] = {-M*N*DOF,-M*DOF,-DOF,0,DOF,M*DOF,M*N*DOF}; } ) @*/ /**-- (Generated by Orio) Best performance cost: [2.0805400000000001, 2.0489600000000001, 2.0615999999999999, 2.0588799999999998, 2.0479400000000001] Tuned for specific problem sizes: DOF = 14 M = 32 N = 32 NOS = 7 P = 32 Best performance parameters: BC = 98 PL = 32 TC = 1024 --**/ int nrows=M*N*P*DOF; int ndiags=NOS; int ndofs=DOF; int sbdiag=M*N*P*DOF*DOF; /*@ begin Loop(transform CUDA(threadCount=TC, blockCount=BC, preferL1Size=PL) for(i=0; i<=nrows-1; i++){ ysum = 0.0; for(j=0; j<=ndiags-1; j++){ row = i+j*sbdiag; col = (floor((float)i/ndofs)+offsets[j])*ndofs; if(col>=0&&col<nrows) for(k=0; k<=ndofs-1; k++) ysum += A[row+k*nrows] * x[col+k]; } y[i] = ysum; } ) @*/ { hipDeviceSynchronize(); /*declare variables*/ double *dev_A, *dev_x, *dev_y; int *dev_offsets; int nthreads=1024; /*calculate device dimensions*/ dim3 dimGrid, dimBlock; dimBlock.x=nthreads; dimGrid.x=98; /*allocate device memory*/ hipMalloc(&dev_A,M *N *P *DOF *DOF *NOS*sizeof(double)); hipMalloc(&dev_x,M *N *P *DOF*sizeof(double)); hipMalloc(&dev_y,M *N *P *DOF*sizeof(double)); hipMalloc(&dev_offsets,NOS*sizeof(int)); hipDeviceSetCacheConfig(hipFuncCachePreferEqual); /*copy data from host to device*/ hipEventRecord(tstart,0); hipMemcpy(dev_A,A,M *N *P *DOF *DOF *NOS*sizeof(double),hipMemcpyHostToDevice); hipMemcpy(dev_x,x,M *N *P *DOF*sizeof(double),hipMemcpyHostToDevice); hipMemcpy(dev_offsets,offsets,NOS*sizeof(int),hipMemcpyHostToDevice); hipEventRecord(tstop,0); hipEventSynchronize(tstop); hipEventElapsedTime(&orcu_transfer,tstart,tstop); hipEventRecord(start,0); /*invoke device kernel*/ hipLaunchKernelGGL(( orcu_kernel28265), dim3(dimGrid),dim3(dimBlock), 0, 0, nrows,ndiags,sbdiag,ndofs,dev_offsets,dev_A,dev_x,dev_y); hipEventRecord(stop,0); hipEventSynchronize(stop); hipEventElapsedTime(&orcu_elapsed,start,stop); /*copy data from device to host*/ hipMemcpy(y,dev_y,M *N *P *DOF*sizeof(double),hipMemcpyDeviceToHost); hipDeviceSetCacheConfig(hipFuncCachePreferNone); /*free allocated memory*/ hipFree(dev_A); hipFree(dev_x); hipFree(dev_y); hipFree(dev_offsets); hipError_t err=hipGetLastError(); if (hipSuccess!=err) printf("CUDA runtime error: %s@",hipGetErrorString(err)); } /*@ end @*/ /*@ end @*/ }
6db5374736f0d0cea6c0513af53ea1ae14b9578b.cu
__global__ void orcu_kernel28265(const int nrows, const int ndiags, int sbdiag, int ndofs, int* offsets, double* A, double* x, double* y) { const int tid=blockIdx.x*blockDim.x+threadIdx.x; const int gsize=gridDim.x*blockDim.x; double ysum; int j, k, col, row; for (int i=tid; i<=nrows-1; i+=gsize) { { ysum=0.0; for (j=0; j<=ndiags-1; j++ ) { row=i+j*sbdiag; col=(floor((float)i/ndofs)+offsets[j])*ndofs; if (col>=0&&col<nrows) for (k=0; k<=ndofs-1; k++ ) ysum=ysum+A[row+k*nrows]*x[col+k]; } y[i]=ysum; } } } void MatMult_SeqDIA(double* A, double* x, double* y, int M, int N, int P, int NOS, int DOF) { register int i,j,k; int col,row; double ysum; /*@ begin PerfTuning ( def performance_params { param TC[] = range(32,1025,32); param BC[] = range(14,105,14); param PL[] = [16,32,48]; } def input_params { param M[] = [32]; param N[] = [32]; param P[] = [32]; param NOS = 7; param DOF[] = range(1,17); constraint c1 = (M==N); constraint c2 = (N==P); } def input_vars { decl dynamic double A[M*N*P*DOF*DOF*NOS] = random; decl dynamic double x[M*N*P*DOF] = random; decl dynamic double y[M*N*P*DOF] = 0; decl static int offsets[NOS] = {-M*N*DOF,-M*DOF,-DOF,0,DOF,M*DOF,M*N*DOF}; } ) @*/ /**-- (Generated by Orio) Best performance cost: [2.0805400000000001, 2.0489600000000001, 2.0615999999999999, 2.0588799999999998, 2.0479400000000001] Tuned for specific problem sizes: DOF = 14 M = 32 N = 32 NOS = 7 P = 32 Best performance parameters: BC = 98 PL = 32 TC = 1024 --**/ int nrows=M*N*P*DOF; int ndiags=NOS; int ndofs=DOF; int sbdiag=M*N*P*DOF*DOF; /*@ begin Loop(transform CUDA(threadCount=TC, blockCount=BC, preferL1Size=PL) for(i=0; i<=nrows-1; i++){ ysum = 0.0; for(j=0; j<=ndiags-1; j++){ row = i+j*sbdiag; col = (floor((float)i/ndofs)+offsets[j])*ndofs; if(col>=0&&col<nrows) for(k=0; k<=ndofs-1; k++) ysum += A[row+k*nrows] * x[col+k]; } y[i] = ysum; } ) @*/ { cudaDeviceSynchronize(); /*declare variables*/ double *dev_A, *dev_x, *dev_y; int *dev_offsets; int nthreads=1024; /*calculate device dimensions*/ dim3 dimGrid, dimBlock; dimBlock.x=nthreads; dimGrid.x=98; /*allocate device memory*/ cudaMalloc(&dev_A,M *N *P *DOF *DOF *NOS*sizeof(double)); cudaMalloc(&dev_x,M *N *P *DOF*sizeof(double)); cudaMalloc(&dev_y,M *N *P *DOF*sizeof(double)); cudaMalloc(&dev_offsets,NOS*sizeof(int)); cudaDeviceSetCacheConfig(cudaFuncCachePreferEqual); /*copy data from host to device*/ cudaEventRecord(tstart,0); cudaMemcpy(dev_A,A,M *N *P *DOF *DOF *NOS*sizeof(double),cudaMemcpyHostToDevice); cudaMemcpy(dev_x,x,M *N *P *DOF*sizeof(double),cudaMemcpyHostToDevice); cudaMemcpy(dev_offsets,offsets,NOS*sizeof(int),cudaMemcpyHostToDevice); cudaEventRecord(tstop,0); cudaEventSynchronize(tstop); cudaEventElapsedTime(&orcu_transfer,tstart,tstop); cudaEventRecord(start,0); /*invoke device kernel*/ orcu_kernel28265<<<dimGrid,dimBlock>>>(nrows,ndiags,sbdiag,ndofs,dev_offsets,dev_A,dev_x,dev_y); cudaEventRecord(stop,0); cudaEventSynchronize(stop); cudaEventElapsedTime(&orcu_elapsed,start,stop); /*copy data from device to host*/ cudaMemcpy(y,dev_y,M *N *P *DOF*sizeof(double),cudaMemcpyDeviceToHost); cudaDeviceSetCacheConfig(cudaFuncCachePreferNone); /*free allocated memory*/ cudaFree(dev_A); cudaFree(dev_x); cudaFree(dev_y); cudaFree(dev_offsets); cudaError_t err=cudaGetLastError(); if (cudaSuccess!=err) printf("CUDA runtime error: %s@",cudaGetErrorString(err)); } /*@ end @*/ /*@ end @*/ }
9e871b6a17b965342d9899496faf6e5ee2d429a6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // N-queen for CUDA // // Copyright(c) 2008 Ping-Che Chen //#define WIN32_LEAN_AND_MEAN //#include <windows.h> #include <stdio.h> #include <cutil.h> #include <iostream> using namespace std; #define THREAD_NUM 96 int bunk = 0; // this is a dummy variable used for making sure clock() are not optimized out /* * ---------------------------------------------------------------- * This is a recursive version of n-queen backtracking solver. * A non-recursive version is used instead. * ---------------------------------------------------------------- long long solve_nqueen_internal(int n, unsigned int mask, unsigned int l_mask, unsigned int r_mask, unsigned int t_mask) { if(mask == t_mask) { return 1; } unsigned int m = (mask | l_mask | r_mask); if((m & t_mask) == t_mask) { return 0; } long long total = 0; unsigned int index = (m + 1) & ~m; while((index & t_mask) != 0) { total += solve_nqueen_internal(mask | index, (l_mask | index) << 1, (r_mask | index) >> 1, t_mask); m |= index; index = (m + 1) & ~m; } return total; } long long solve_nqueen(int n) { return solve_nqueen_internal(0, 0, 0, (1 << n) - 1); } */ /* ------------------------------------------------------------------- * This is a non-recursive version of n-queen backtracking solver. * This provides the basis for the CUDA version. * ------------------------------------------------------------------- */ long long solve_nqueen(int n) { unsigned int mask[32]; unsigned int l_mask[32]; unsigned int r_mask[32]; unsigned int m[32]; if(n <= 0 || n > 32) { return 0; } const unsigned int t_mask = (1 << n) - 1; long long total = 0; long long upper_total = 0; int i = 0, j; unsigned int index; mask[0] = 0; l_mask[0] = 0; r_mask[0] = 0; m[0] = 0; for(j = 0; j < (n + 1) / 2; j++) { index = (1 << j); m[0] |= index; mask[1] = index; l_mask[1] = index << 1; r_mask[1] = index >> 1; m[1] = (mask[1] | l_mask[1] | r_mask[1]); i = 1; if(n % 2 == 1 && j == (n + 1) / 2 - 1) { upper_total = total; total = 0; } while(i > 0) { if((m[i] & t_mask) == t_mask) { i--; } else { index = ((m[i] + 1) ^ m[i]) & ~m[i]; m[i] |= index; if((index & t_mask) != 0) { if(i + 1 == n) { total++; i--; } else { mask[i + 1] = mask[i] | index; l_mask[i + 1] = (l_mask[i] | index) << 1; r_mask[i + 1] = (r_mask[i] | index) >> 1; m[i + 1] = (mask[i + 1] | l_mask[i + 1] | r_mask[i + 1]); i++; } } else { i --; } } } } bunk = 2; if(n % 2 == 0) { return total * 2; } else { return upper_total * 2 + total; } } /* ------------------------------------------------------------------- * This is a non-recursive version of n-queen backtracking solver * with multi-thread support. * ------------------------------------------------------------------- */ /* struct thread_context { HANDLE thread; bool stop; long long total; int n; unsigned int mask; unsigned int l_mask; unsigned int r_mask; unsigned int t_mask; HANDLE ready; HANDLE complete; }; DWORD WINAPI solve_nqueen_proc(LPVOID param) { thread_context* ctx = (thread_context*) param; unsigned int mask[32]; unsigned int l_mask[32]; unsigned int r_mask[32]; unsigned int m[32]; unsigned int t_mask; long long total; unsigned int index; unsigned int mark; for(;;) { WaitForSingleObject(ctx->ready, INFINITE); if(ctx->stop) { break; } int i = 0; mask[0] = ctx->mask; l_mask[0] = ctx->l_mask; r_mask[0] = ctx->r_mask; m[0] = mask[0] | l_mask[0] | r_mask[0]; total = 0; t_mask = ctx->t_mask; mark = ctx->n; while(i >= 0) { if((m[i] & t_mask) == t_mask) { i--; } else { index = (m[i] + 1) & ~m[i]; m[i] |= index; if((index & t_mask) != 0) { if(i + 1 == mark) { total++; i--; } else { mask[i + 1] = mask[i] | index; l_mask[i + 1] = (l_mask[i] | index) << 1; r_mask[i + 1] = (r_mask[i] | index) >> 1; m[i + 1] = (mask[i + 1] | l_mask[i + 1] | r_mask[i + 1]); i++; } } else { i --; } } } ctx->total = total; SetEvent(ctx->complete); } return 0; } long long solve_nqueen_mcpu(int n) { if(n <= 0 || n > 32) { return 0; } SYSTEM_INFO info; thread_context* threads; int num_threads; GetSystemInfo(&info); num_threads = info.dwNumberOfProcessors; if(num_threads == 1) { // only one cpu found, use single thread version return solve_nqueen(n); } threads = new thread_context[num_threads]; int j; for(j = 0; j < num_threads; j++) { threads[j].stop = false; threads[j].ready = CreateEvent(0, FALSE, FALSE, 0); threads[j].complete = CreateEvent(0, FALSE, TRUE, 0); threads[j].thread = CreateThread(0, 0, solve_nqueen_proc, threads + j, 0, 0); threads[j].total = 0; } int thread_idx = 0; const unsigned int t_mask = (1 << n) - 1; long long total = 0; unsigned int index; unsigned int m_mask = 0; if(n % 2 == 1) { m_mask = 1 << ((n + 1) / 2 - 1); } for(j = 0; j < (n + 1) / 2; j++) { index = 1 << j; WaitForSingleObject(threads[thread_idx].complete, INFINITE); if(threads[thread_idx].mask != m_mask) { total += threads[thread_idx].total * 2; } else { total += threads[thread_idx].total; } threads[thread_idx].mask = index; threads[thread_idx].l_mask = index << 1; threads[thread_idx].r_mask = index >> 1; threads[thread_idx].t_mask = t_mask; threads[thread_idx].total = 0; threads[thread_idx].n = n - 1; SetEvent(threads[thread_idx].ready); thread_idx = (thread_idx + 1) % num_threads; } // collect all threads... HANDLE* events = new HANDLE[num_threads]; for(j = 0; j < num_threads; j++) { events[j] = threads[j].complete; } WaitForMultipleObjects(num_threads, events, TRUE, INFINITE); for(j = 0; j < num_threads; j++) { if(threads[j].mask != m_mask) { total += threads[j].total * 2; } else { total += threads[j].total; } threads[j].stop = true; SetEvent(threads[j].ready); events[j] = threads[j].thread; } WaitForMultipleObjects(num_threads, events, TRUE, INFINITE); for(j = 0; j < num_threads; j++) { CloseHandle(threads[j].thread); CloseHandle(threads[j].ready); CloseHandle(threads[j].complete); } delete[] threads; delete[] events; bunk = 3; return total; } */ /* -------------------------------------------------------------------------- * This is a non-recursive version of n-queen backtracking solver for CUDA. * It receives multiple initial conditions from a CPU iterator, and count * each conditions. * -------------------------------------------------------------------------- */ __global__ void solve_nqueen_cuda_kernel(int n, int mark, unsigned int* total_masks, unsigned int* total_l_masks, unsigned int* total_r_masks, unsigned int* results, int total_conditions) { const int tid = threadIdx.x; const int bid = blockIdx.x; const int idx = bid * blockDim.x + tid; __shared__ unsigned int mask[THREAD_NUM][10]; __shared__ unsigned int l_mask[THREAD_NUM][10]; __shared__ unsigned int r_mask[THREAD_NUM][10]; __shared__ unsigned int m[THREAD_NUM][10]; __shared__ unsigned int sum[THREAD_NUM]; const unsigned int t_mask = (1 << n) - 1; int total = 0; int i = 0; unsigned int index; if(idx < total_conditions) { mask[tid][i] = total_masks[idx]; l_mask[tid][i] = total_l_masks[idx]; r_mask[tid][i] = total_r_masks[idx]; m[tid][i] = mask[tid][i] | l_mask[tid][i] | r_mask[tid][i]; while(i >= 0) { if((m[tid][i] & t_mask) == t_mask) { i--; } else { index = (m[tid][i] + 1) & ~m[tid][i]; m[tid][i] |= index; if((index & t_mask) != 0) { if(i + 1 == mark) { total++; i--; } else { mask[tid][i + 1] = mask[tid][i] | index; l_mask[tid][i + 1] = (l_mask[tid][i] | index) << 1; r_mask[tid][i + 1] = (r_mask[tid][i] | index) >> 1; m[tid][i + 1] = (mask[tid][i + 1] | l_mask[tid][i + 1] | r_mask[tid][i + 1]); i++; } } else { i --; } } } sum[tid] = total; } else { sum[tid] = 0; } __syncthreads(); // reduction if(tid < 64 && tid + 64 < THREAD_NUM) { sum[tid] += sum[tid + 64]; } __syncthreads(); if(tid < 32) { sum[tid] += sum[tid + 32]; } __syncthreads(); if(tid < 16) { sum[tid] += sum[tid + 16]; } __syncthreads(); if(tid < 8) { sum[tid] += sum[tid + 8]; } __syncthreads(); if(tid < 4) { sum[tid] += sum[tid + 4]; } __syncthreads(); if(tid < 2) { sum[tid] += sum[tid + 2]; } __syncthreads(); if(tid < 1) { sum[tid] += sum[tid + 1]; } __syncthreads(); if(tid == 0) { results[bid] = sum[0]; } } long long solve_nqueen_cuda(int n, int steps) { // generating start conditions unsigned int mask[32]; unsigned int l_mask[32]; unsigned int r_mask[32]; unsigned int m[32]; unsigned int index; if(n <= 0 || n > 32) { return 0; } unsigned int* total_masks = new unsigned int[steps]; unsigned int* total_l_masks = new unsigned int[steps]; unsigned int* total_r_masks = new unsigned int[steps]; unsigned int* results = new unsigned int[steps]; unsigned int* masks_cuda; unsigned int* l_masks_cuda; unsigned int* r_masks_cuda; unsigned int* results_cuda; hipMalloc((void**) &masks_cuda, sizeof(int) * steps); hipMalloc((void**) &l_masks_cuda, sizeof(int) * steps); hipMalloc((void**) &r_masks_cuda, sizeof(int) * steps); hipMalloc((void**) &results_cuda, sizeof(int) * steps / THREAD_NUM); const unsigned int t_mask = (1 << n) - 1; const unsigned int mark = n > 11 ? n - 10 : 2; long long total = 0; int total_conditions = 0; int i = 0, j; mask[0] = 0; l_mask[0] = 0; r_mask[0] = 0; m[0] = 0; bool computed = false; for(j = 0; j < n / 2; j++) { index = (1 << j); m[0] |= index; mask[1] = index; l_mask[1] = index << 1; r_mask[1] = index >> 1; m[1] = (mask[1] | l_mask[1] | r_mask[1]); i = 1; while(i > 0) { if((m[i] & t_mask) == t_mask) { i--; } else { index = (m[i] + 1) & ~m[i]; m[i] |= index; if((index & t_mask) != 0) { mask[i + 1] = mask[i] | index; l_mask[i + 1] = (l_mask[i] | index) << 1; r_mask[i + 1] = (r_mask[i] | index) >> 1; m[i + 1] = (mask[i + 1] | l_mask[i + 1] | r_mask[i + 1]); i++; if(i == mark) { total_masks[total_conditions] = mask[i]; total_l_masks[total_conditions] = l_mask[i]; total_r_masks[total_conditions] = r_mask[i]; total_conditions++; if(total_conditions == steps) { if(computed) { hipMemcpy(results, results_cuda, sizeof(int) * steps / THREAD_NUM, hipMemcpyDeviceToHost); for(int j = 0; j < steps / THREAD_NUM; j++) { total += results[j]; } computed = false; } // start computation hipMemcpy(masks_cuda, total_masks, sizeof(int) * total_conditions, hipMemcpyHostToDevice); hipMemcpy(l_masks_cuda, total_l_masks, sizeof(int) * total_conditions, hipMemcpyHostToDevice); hipMemcpy(r_masks_cuda, total_r_masks, sizeof(int) * total_conditions, hipMemcpyHostToDevice); printf("%d %d\n",steps/THREAD_NUM,THREAD_NUM); hipLaunchKernelGGL(( solve_nqueen_cuda_kernel), dim3(steps/THREAD_NUM), dim3(THREAD_NUM), 0, 0, n, n - mark, masks_cuda, l_masks_cuda, r_masks_cuda, results_cuda, total_conditions); computed = true; total_conditions = 0; } i--; } } else { i --; } } } } if(computed) { hipMemcpy(results, results_cuda, sizeof(int) * steps / THREAD_NUM, hipMemcpyDeviceToHost); for(int j = 0; j < steps / THREAD_NUM; j++) { total += results[j]; } computed = false; } hipMemcpy(masks_cuda, total_masks, sizeof(int) * total_conditions, hipMemcpyHostToDevice); hipMemcpy(l_masks_cuda, total_l_masks, sizeof(int) * total_conditions, hipMemcpyHostToDevice); hipMemcpy(r_masks_cuda, total_r_masks, sizeof(int) * total_conditions, hipMemcpyHostToDevice); printf("%d %d\n",steps/THREAD_NUM,THREAD_NUM); hipLaunchKernelGGL(( solve_nqueen_cuda_kernel), dim3(steps/THREAD_NUM), dim3(THREAD_NUM), 0, 0, n, n - mark, masks_cuda, l_masks_cuda, r_masks_cuda, results_cuda, total_conditions); hipMemcpy(results, results_cuda, sizeof(int) * steps / THREAD_NUM, hipMemcpyDeviceToHost); for(int j = 0; j < steps / THREAD_NUM; j++) { total += results[j]; } total *= 2; if(n % 2 == 1) { computed = false; total_conditions = 0; index = (1 << (n - 1) / 2); m[0] |= index; mask[1] = index; l_mask[1] = index << 1; r_mask[1] = index >> 1; m[1] = (mask[1] | l_mask[1] | r_mask[1]); i = 1; while(i > 0) { if((m[i] & t_mask) == t_mask) { i--; } else { index = (m[i] + 1) & ~m[i]; m[i] |= index; if((index & t_mask) != 0) { mask[i + 1] = mask[i] | index; l_mask[i + 1] = (l_mask[i] | index) << 1; r_mask[i + 1] = (r_mask[i] | index) >> 1; m[i + 1] = (mask[i + 1] | l_mask[i + 1] | r_mask[i + 1]); i++; if(i == mark) { total_masks[total_conditions] = mask[i]; total_l_masks[total_conditions] = l_mask[i]; total_r_masks[total_conditions] = r_mask[i]; total_conditions++; if(total_conditions == steps) { if(computed) { hipMemcpy(results, results_cuda, sizeof(int) * steps / THREAD_NUM, hipMemcpyDeviceToHost); for(int j = 0; j < steps / THREAD_NUM; j++) { total += results[j]; } computed = false; } // start computation hipMemcpy(masks_cuda, total_masks, sizeof(int) * total_conditions, hipMemcpyHostToDevice); hipMemcpy(l_masks_cuda, total_l_masks, sizeof(int) * total_conditions, hipMemcpyHostToDevice); hipMemcpy(r_masks_cuda, total_r_masks, sizeof(int) * total_conditions, hipMemcpyHostToDevice); printf("%d %d\n",steps/THREAD_NUM,THREAD_NUM); hipLaunchKernelGGL(( solve_nqueen_cuda_kernel), dim3(steps/THREAD_NUM), dim3(THREAD_NUM), 0, 0, n, n - mark, masks_cuda, l_masks_cuda, r_masks_cuda, results_cuda, total_conditions); computed = true; total_conditions = 0; } i--; } } else { i --; } } } if(computed) { hipMemcpy(results, results_cuda, sizeof(int) * steps / THREAD_NUM, hipMemcpyDeviceToHost); for(int j = 0; j < steps / THREAD_NUM; j++) { total += results[j]; } computed = false; } hipMemcpy(masks_cuda, total_masks, sizeof(int) * total_conditions, hipMemcpyHostToDevice); hipMemcpy(l_masks_cuda, total_l_masks, sizeof(int) * total_conditions, hipMemcpyHostToDevice); hipMemcpy(r_masks_cuda, total_r_masks, sizeof(int) * total_conditions, hipMemcpyHostToDevice); printf("%d %d\n",steps/THREAD_NUM,THREAD_NUM); hipLaunchKernelGGL(( solve_nqueen_cuda_kernel), dim3(steps/THREAD_NUM), dim3(THREAD_NUM), 0, 0, n, n - mark, masks_cuda, l_masks_cuda, r_masks_cuda, results_cuda, total_conditions); hipMemcpy(results, results_cuda, sizeof(int) * steps / THREAD_NUM, hipMemcpyDeviceToHost); for(int j = 0; j < steps / THREAD_NUM; j++) { total += results[j]; } } hipFree(masks_cuda); hipFree(l_masks_cuda); hipFree(r_masks_cuda); hipFree(results_cuda); delete[] total_masks; delete[] total_l_masks; delete[] total_r_masks; delete[] results; bunk = 1; return total; } bool InitCUDA() { int count; hipGetDeviceCount(&count); if(count == 0) { fprintf(stderr, "There is no device.\n"); return false; } int i; for(i = 0; i < count; i++) { hipDeviceProp_t prop; if(hipGetDeviceProperties(&prop, i) == hipSuccess) { if(prop.major >= 1) { break; } } } if(i == count) { fprintf(stderr, "There is no device supporting CUDA 1.x.\n"); return false; } hipSetDevice(i); return true; } int main(int argc, char** argv) { unsigned int hTimer; double gpuTime; // initialise card and timer int deviceCount; CUDA_SAFE_CALL_NO_SYNC(hipGetDeviceCount(&deviceCount)); if (deviceCount == 0) { fprintf(stderr, "There is no device.\n"); exit(EXIT_FAILURE); } int dev; for (dev = 0; dev < deviceCount; ++dev) { hipDeviceProp_t deviceProp; CUDA_SAFE_CALL_NO_SYNC(hipGetDeviceProperties(&deviceProp, dev)); if (deviceProp.major >= 1) break; } if (dev == deviceCount) { fprintf(stderr, "There is no device supporting CUDA.\n"); exit(EXIT_FAILURE); } else CUDA_SAFE_CALL(hipSetDevice(dev)); CUT_SAFE_CALL( cutCreateTimer(&hTimer) ); int n = 11;//11; clock_t start, end; long long solution; bool cpu = true, gpu = true; int argstart = 1, steps = 24576; if(argc >= 2 && argv[1][0] == '-') { if(argv[1][1] == 'c' || argv[1][1] == 'C') { gpu = false; } else if(argv[1][1] == 'g' || argv[1][1] == 'G') { cpu = false; } argstart = 2; } if(argc < argstart + 1) { printf("Usage: %s [-c|-g] n steps\n", argv[0]); printf(" -c: CPU only\n"); printf(" -g: GPU only\n"); printf(" n: n-queen\n"); printf(" steps: step for GPU\n"); printf("Default to 8 queen\n"); } else { n = atoi(argv[argstart]); if(n <= 1 || n > 32) { printf("Invalid n, n should be > 1 and <= 32\n"); printf("Note: n > 18 will require a very very long time to compute!\n"); return 0; } if(argc >= argstart + 2) { steps = atoi(argv[argstart + 1]); if(steps <= THREAD_NUM || steps % THREAD_NUM != 0) { printf("Invalid step, step should be multiple of %d\n", THREAD_NUM); return 0; } } } if(gpu) { if(!InitCUDA()) { return 0; } printf("CUDA initialized.\n"); } if(cpu) { CUDA_SAFE_CALL( hipDeviceSynchronize() ); CUT_SAFE_CALL( cutResetTimer(hTimer) ); CUT_SAFE_CALL( cutStartTimer(hTimer) ); //start = clock(); solution = solve_nqueen(n); //solve_nqueen_mcpu(n); //solution = solve_nqueen(n); //end = clock(); CUT_SAFE_CALL( cutStopTimer(hTimer) ); gpuTime = cutGetTimerValue(hTimer); printf("CPU: %d queen = %lld time = %f msec\n", n, solution, gpuTime); } if(gpu) { //start = clock(); CUDA_SAFE_CALL( hipDeviceSynchronize() ); CUT_SAFE_CALL( cutResetTimer(hTimer) ); CUT_SAFE_CALL( cutStartTimer(hTimer) ); solution = solve_nqueen_cuda(n, steps); //end = clock(); CUT_SAFE_CALL( cutStopTimer(hTimer) ); gpuTime = cutGetTimerValue(hTimer); printf("GPU: %d queen = %lld time = %f msec\n", n, solution, gpuTime); } return 0; }
9e871b6a17b965342d9899496faf6e5ee2d429a6.cu
// N-queen for CUDA // // Copyright(c) 2008 Ping-Che Chen //#define WIN32_LEAN_AND_MEAN //#include <windows.h> #include <stdio.h> #include <cutil.h> #include <iostream> using namespace std; #define THREAD_NUM 96 int bunk = 0; // this is a dummy variable used for making sure clock() are not optimized out /* * ---------------------------------------------------------------- * This is a recursive version of n-queen backtracking solver. * A non-recursive version is used instead. * ---------------------------------------------------------------- long long solve_nqueen_internal(int n, unsigned int mask, unsigned int l_mask, unsigned int r_mask, unsigned int t_mask) { if(mask == t_mask) { return 1; } unsigned int m = (mask | l_mask | r_mask); if((m & t_mask) == t_mask) { return 0; } long long total = 0; unsigned int index = (m + 1) & ~m; while((index & t_mask) != 0) { total += solve_nqueen_internal(mask | index, (l_mask | index) << 1, (r_mask | index) >> 1, t_mask); m |= index; index = (m + 1) & ~m; } return total; } long long solve_nqueen(int n) { return solve_nqueen_internal(0, 0, 0, (1 << n) - 1); } */ /* ------------------------------------------------------------------- * This is a non-recursive version of n-queen backtracking solver. * This provides the basis for the CUDA version. * ------------------------------------------------------------------- */ long long solve_nqueen(int n) { unsigned int mask[32]; unsigned int l_mask[32]; unsigned int r_mask[32]; unsigned int m[32]; if(n <= 0 || n > 32) { return 0; } const unsigned int t_mask = (1 << n) - 1; long long total = 0; long long upper_total = 0; int i = 0, j; unsigned int index; mask[0] = 0; l_mask[0] = 0; r_mask[0] = 0; m[0] = 0; for(j = 0; j < (n + 1) / 2; j++) { index = (1 << j); m[0] |= index; mask[1] = index; l_mask[1] = index << 1; r_mask[1] = index >> 1; m[1] = (mask[1] | l_mask[1] | r_mask[1]); i = 1; if(n % 2 == 1 && j == (n + 1) / 2 - 1) { upper_total = total; total = 0; } while(i > 0) { if((m[i] & t_mask) == t_mask) { i--; } else { index = ((m[i] + 1) ^ m[i]) & ~m[i]; m[i] |= index; if((index & t_mask) != 0) { if(i + 1 == n) { total++; i--; } else { mask[i + 1] = mask[i] | index; l_mask[i + 1] = (l_mask[i] | index) << 1; r_mask[i + 1] = (r_mask[i] | index) >> 1; m[i + 1] = (mask[i + 1] | l_mask[i + 1] | r_mask[i + 1]); i++; } } else { i --; } } } } bunk = 2; if(n % 2 == 0) { return total * 2; } else { return upper_total * 2 + total; } } /* ------------------------------------------------------------------- * This is a non-recursive version of n-queen backtracking solver * with multi-thread support. * ------------------------------------------------------------------- */ /* struct thread_context { HANDLE thread; bool stop; long long total; int n; unsigned int mask; unsigned int l_mask; unsigned int r_mask; unsigned int t_mask; HANDLE ready; HANDLE complete; }; DWORD WINAPI solve_nqueen_proc(LPVOID param) { thread_context* ctx = (thread_context*) param; unsigned int mask[32]; unsigned int l_mask[32]; unsigned int r_mask[32]; unsigned int m[32]; unsigned int t_mask; long long total; unsigned int index; unsigned int mark; for(;;) { WaitForSingleObject(ctx->ready, INFINITE); if(ctx->stop) { break; } int i = 0; mask[0] = ctx->mask; l_mask[0] = ctx->l_mask; r_mask[0] = ctx->r_mask; m[0] = mask[0] | l_mask[0] | r_mask[0]; total = 0; t_mask = ctx->t_mask; mark = ctx->n; while(i >= 0) { if((m[i] & t_mask) == t_mask) { i--; } else { index = (m[i] + 1) & ~m[i]; m[i] |= index; if((index & t_mask) != 0) { if(i + 1 == mark) { total++; i--; } else { mask[i + 1] = mask[i] | index; l_mask[i + 1] = (l_mask[i] | index) << 1; r_mask[i + 1] = (r_mask[i] | index) >> 1; m[i + 1] = (mask[i + 1] | l_mask[i + 1] | r_mask[i + 1]); i++; } } else { i --; } } } ctx->total = total; SetEvent(ctx->complete); } return 0; } long long solve_nqueen_mcpu(int n) { if(n <= 0 || n > 32) { return 0; } SYSTEM_INFO info; thread_context* threads; int num_threads; GetSystemInfo(&info); num_threads = info.dwNumberOfProcessors; if(num_threads == 1) { // only one cpu found, use single thread version return solve_nqueen(n); } threads = new thread_context[num_threads]; int j; for(j = 0; j < num_threads; j++) { threads[j].stop = false; threads[j].ready = CreateEvent(0, FALSE, FALSE, 0); threads[j].complete = CreateEvent(0, FALSE, TRUE, 0); threads[j].thread = CreateThread(0, 0, solve_nqueen_proc, threads + j, 0, 0); threads[j].total = 0; } int thread_idx = 0; const unsigned int t_mask = (1 << n) - 1; long long total = 0; unsigned int index; unsigned int m_mask = 0; if(n % 2 == 1) { m_mask = 1 << ((n + 1) / 2 - 1); } for(j = 0; j < (n + 1) / 2; j++) { index = 1 << j; WaitForSingleObject(threads[thread_idx].complete, INFINITE); if(threads[thread_idx].mask != m_mask) { total += threads[thread_idx].total * 2; } else { total += threads[thread_idx].total; } threads[thread_idx].mask = index; threads[thread_idx].l_mask = index << 1; threads[thread_idx].r_mask = index >> 1; threads[thread_idx].t_mask = t_mask; threads[thread_idx].total = 0; threads[thread_idx].n = n - 1; SetEvent(threads[thread_idx].ready); thread_idx = (thread_idx + 1) % num_threads; } // collect all threads... HANDLE* events = new HANDLE[num_threads]; for(j = 0; j < num_threads; j++) { events[j] = threads[j].complete; } WaitForMultipleObjects(num_threads, events, TRUE, INFINITE); for(j = 0; j < num_threads; j++) { if(threads[j].mask != m_mask) { total += threads[j].total * 2; } else { total += threads[j].total; } threads[j].stop = true; SetEvent(threads[j].ready); events[j] = threads[j].thread; } WaitForMultipleObjects(num_threads, events, TRUE, INFINITE); for(j = 0; j < num_threads; j++) { CloseHandle(threads[j].thread); CloseHandle(threads[j].ready); CloseHandle(threads[j].complete); } delete[] threads; delete[] events; bunk = 3; return total; } */ /* -------------------------------------------------------------------------- * This is a non-recursive version of n-queen backtracking solver for CUDA. * It receives multiple initial conditions from a CPU iterator, and count * each conditions. * -------------------------------------------------------------------------- */ __global__ void solve_nqueen_cuda_kernel(int n, int mark, unsigned int* total_masks, unsigned int* total_l_masks, unsigned int* total_r_masks, unsigned int* results, int total_conditions) { const int tid = threadIdx.x; const int bid = blockIdx.x; const int idx = bid * blockDim.x + tid; __shared__ unsigned int mask[THREAD_NUM][10]; __shared__ unsigned int l_mask[THREAD_NUM][10]; __shared__ unsigned int r_mask[THREAD_NUM][10]; __shared__ unsigned int m[THREAD_NUM][10]; __shared__ unsigned int sum[THREAD_NUM]; const unsigned int t_mask = (1 << n) - 1; int total = 0; int i = 0; unsigned int index; if(idx < total_conditions) { mask[tid][i] = total_masks[idx]; l_mask[tid][i] = total_l_masks[idx]; r_mask[tid][i] = total_r_masks[idx]; m[tid][i] = mask[tid][i] | l_mask[tid][i] | r_mask[tid][i]; while(i >= 0) { if((m[tid][i] & t_mask) == t_mask) { i--; } else { index = (m[tid][i] + 1) & ~m[tid][i]; m[tid][i] |= index; if((index & t_mask) != 0) { if(i + 1 == mark) { total++; i--; } else { mask[tid][i + 1] = mask[tid][i] | index; l_mask[tid][i + 1] = (l_mask[tid][i] | index) << 1; r_mask[tid][i + 1] = (r_mask[tid][i] | index) >> 1; m[tid][i + 1] = (mask[tid][i + 1] | l_mask[tid][i + 1] | r_mask[tid][i + 1]); i++; } } else { i --; } } } sum[tid] = total; } else { sum[tid] = 0; } __syncthreads(); // reduction if(tid < 64 && tid + 64 < THREAD_NUM) { sum[tid] += sum[tid + 64]; } __syncthreads(); if(tid < 32) { sum[tid] += sum[tid + 32]; } __syncthreads(); if(tid < 16) { sum[tid] += sum[tid + 16]; } __syncthreads(); if(tid < 8) { sum[tid] += sum[tid + 8]; } __syncthreads(); if(tid < 4) { sum[tid] += sum[tid + 4]; } __syncthreads(); if(tid < 2) { sum[tid] += sum[tid + 2]; } __syncthreads(); if(tid < 1) { sum[tid] += sum[tid + 1]; } __syncthreads(); if(tid == 0) { results[bid] = sum[0]; } } long long solve_nqueen_cuda(int n, int steps) { // generating start conditions unsigned int mask[32]; unsigned int l_mask[32]; unsigned int r_mask[32]; unsigned int m[32]; unsigned int index; if(n <= 0 || n > 32) { return 0; } unsigned int* total_masks = new unsigned int[steps]; unsigned int* total_l_masks = new unsigned int[steps]; unsigned int* total_r_masks = new unsigned int[steps]; unsigned int* results = new unsigned int[steps]; unsigned int* masks_cuda; unsigned int* l_masks_cuda; unsigned int* r_masks_cuda; unsigned int* results_cuda; cudaMalloc((void**) &masks_cuda, sizeof(int) * steps); cudaMalloc((void**) &l_masks_cuda, sizeof(int) * steps); cudaMalloc((void**) &r_masks_cuda, sizeof(int) * steps); cudaMalloc((void**) &results_cuda, sizeof(int) * steps / THREAD_NUM); const unsigned int t_mask = (1 << n) - 1; const unsigned int mark = n > 11 ? n - 10 : 2; long long total = 0; int total_conditions = 0; int i = 0, j; mask[0] = 0; l_mask[0] = 0; r_mask[0] = 0; m[0] = 0; bool computed = false; for(j = 0; j < n / 2; j++) { index = (1 << j); m[0] |= index; mask[1] = index; l_mask[1] = index << 1; r_mask[1] = index >> 1; m[1] = (mask[1] | l_mask[1] | r_mask[1]); i = 1; while(i > 0) { if((m[i] & t_mask) == t_mask) { i--; } else { index = (m[i] + 1) & ~m[i]; m[i] |= index; if((index & t_mask) != 0) { mask[i + 1] = mask[i] | index; l_mask[i + 1] = (l_mask[i] | index) << 1; r_mask[i + 1] = (r_mask[i] | index) >> 1; m[i + 1] = (mask[i + 1] | l_mask[i + 1] | r_mask[i + 1]); i++; if(i == mark) { total_masks[total_conditions] = mask[i]; total_l_masks[total_conditions] = l_mask[i]; total_r_masks[total_conditions] = r_mask[i]; total_conditions++; if(total_conditions == steps) { if(computed) { cudaMemcpy(results, results_cuda, sizeof(int) * steps / THREAD_NUM, cudaMemcpyDeviceToHost); for(int j = 0; j < steps / THREAD_NUM; j++) { total += results[j]; } computed = false; } // start computation cudaMemcpy(masks_cuda, total_masks, sizeof(int) * total_conditions, cudaMemcpyHostToDevice); cudaMemcpy(l_masks_cuda, total_l_masks, sizeof(int) * total_conditions, cudaMemcpyHostToDevice); cudaMemcpy(r_masks_cuda, total_r_masks, sizeof(int) * total_conditions, cudaMemcpyHostToDevice); printf("%d %d\n",steps/THREAD_NUM,THREAD_NUM); solve_nqueen_cuda_kernel<<<steps/THREAD_NUM, THREAD_NUM>>>(n, n - mark, masks_cuda, l_masks_cuda, r_masks_cuda, results_cuda, total_conditions); computed = true; total_conditions = 0; } i--; } } else { i --; } } } } if(computed) { cudaMemcpy(results, results_cuda, sizeof(int) * steps / THREAD_NUM, cudaMemcpyDeviceToHost); for(int j = 0; j < steps / THREAD_NUM; j++) { total += results[j]; } computed = false; } cudaMemcpy(masks_cuda, total_masks, sizeof(int) * total_conditions, cudaMemcpyHostToDevice); cudaMemcpy(l_masks_cuda, total_l_masks, sizeof(int) * total_conditions, cudaMemcpyHostToDevice); cudaMemcpy(r_masks_cuda, total_r_masks, sizeof(int) * total_conditions, cudaMemcpyHostToDevice); printf("%d %d\n",steps/THREAD_NUM,THREAD_NUM); solve_nqueen_cuda_kernel<<<steps/THREAD_NUM, THREAD_NUM>>>(n, n - mark, masks_cuda, l_masks_cuda, r_masks_cuda, results_cuda, total_conditions); cudaMemcpy(results, results_cuda, sizeof(int) * steps / THREAD_NUM, cudaMemcpyDeviceToHost); for(int j = 0; j < steps / THREAD_NUM; j++) { total += results[j]; } total *= 2; if(n % 2 == 1) { computed = false; total_conditions = 0; index = (1 << (n - 1) / 2); m[0] |= index; mask[1] = index; l_mask[1] = index << 1; r_mask[1] = index >> 1; m[1] = (mask[1] | l_mask[1] | r_mask[1]); i = 1; while(i > 0) { if((m[i] & t_mask) == t_mask) { i--; } else { index = (m[i] + 1) & ~m[i]; m[i] |= index; if((index & t_mask) != 0) { mask[i + 1] = mask[i] | index; l_mask[i + 1] = (l_mask[i] | index) << 1; r_mask[i + 1] = (r_mask[i] | index) >> 1; m[i + 1] = (mask[i + 1] | l_mask[i + 1] | r_mask[i + 1]); i++; if(i == mark) { total_masks[total_conditions] = mask[i]; total_l_masks[total_conditions] = l_mask[i]; total_r_masks[total_conditions] = r_mask[i]; total_conditions++; if(total_conditions == steps) { if(computed) { cudaMemcpy(results, results_cuda, sizeof(int) * steps / THREAD_NUM, cudaMemcpyDeviceToHost); for(int j = 0; j < steps / THREAD_NUM; j++) { total += results[j]; } computed = false; } // start computation cudaMemcpy(masks_cuda, total_masks, sizeof(int) * total_conditions, cudaMemcpyHostToDevice); cudaMemcpy(l_masks_cuda, total_l_masks, sizeof(int) * total_conditions, cudaMemcpyHostToDevice); cudaMemcpy(r_masks_cuda, total_r_masks, sizeof(int) * total_conditions, cudaMemcpyHostToDevice); printf("%d %d\n",steps/THREAD_NUM,THREAD_NUM); solve_nqueen_cuda_kernel<<<steps/THREAD_NUM, THREAD_NUM>>>(n, n - mark, masks_cuda, l_masks_cuda, r_masks_cuda, results_cuda, total_conditions); computed = true; total_conditions = 0; } i--; } } else { i --; } } } if(computed) { cudaMemcpy(results, results_cuda, sizeof(int) * steps / THREAD_NUM, cudaMemcpyDeviceToHost); for(int j = 0; j < steps / THREAD_NUM; j++) { total += results[j]; } computed = false; } cudaMemcpy(masks_cuda, total_masks, sizeof(int) * total_conditions, cudaMemcpyHostToDevice); cudaMemcpy(l_masks_cuda, total_l_masks, sizeof(int) * total_conditions, cudaMemcpyHostToDevice); cudaMemcpy(r_masks_cuda, total_r_masks, sizeof(int) * total_conditions, cudaMemcpyHostToDevice); printf("%d %d\n",steps/THREAD_NUM,THREAD_NUM); solve_nqueen_cuda_kernel<<<steps/THREAD_NUM, THREAD_NUM>>>(n, n - mark, masks_cuda, l_masks_cuda, r_masks_cuda, results_cuda, total_conditions); cudaMemcpy(results, results_cuda, sizeof(int) * steps / THREAD_NUM, cudaMemcpyDeviceToHost); for(int j = 0; j < steps / THREAD_NUM; j++) { total += results[j]; } } cudaFree(masks_cuda); cudaFree(l_masks_cuda); cudaFree(r_masks_cuda); cudaFree(results_cuda); delete[] total_masks; delete[] total_l_masks; delete[] total_r_masks; delete[] results; bunk = 1; return total; } bool InitCUDA() { int count; cudaGetDeviceCount(&count); if(count == 0) { fprintf(stderr, "There is no device.\n"); return false; } int i; for(i = 0; i < count; i++) { cudaDeviceProp prop; if(cudaGetDeviceProperties(&prop, i) == cudaSuccess) { if(prop.major >= 1) { break; } } } if(i == count) { fprintf(stderr, "There is no device supporting CUDA 1.x.\n"); return false; } cudaSetDevice(i); return true; } int main(int argc, char** argv) { unsigned int hTimer; double gpuTime; // initialise card and timer int deviceCount; CUDA_SAFE_CALL_NO_SYNC(cudaGetDeviceCount(&deviceCount)); if (deviceCount == 0) { fprintf(stderr, "There is no device.\n"); exit(EXIT_FAILURE); } int dev; for (dev = 0; dev < deviceCount; ++dev) { cudaDeviceProp deviceProp; CUDA_SAFE_CALL_NO_SYNC(cudaGetDeviceProperties(&deviceProp, dev)); if (deviceProp.major >= 1) break; } if (dev == deviceCount) { fprintf(stderr, "There is no device supporting CUDA.\n"); exit(EXIT_FAILURE); } else CUDA_SAFE_CALL(cudaSetDevice(dev)); CUT_SAFE_CALL( cutCreateTimer(&hTimer) ); int n = 11;//11; clock_t start, end; long long solution; bool cpu = true, gpu = true; int argstart = 1, steps = 24576; if(argc >= 2 && argv[1][0] == '-') { if(argv[1][1] == 'c' || argv[1][1] == 'C') { gpu = false; } else if(argv[1][1] == 'g' || argv[1][1] == 'G') { cpu = false; } argstart = 2; } if(argc < argstart + 1) { printf("Usage: %s [-c|-g] n steps\n", argv[0]); printf(" -c: CPU only\n"); printf(" -g: GPU only\n"); printf(" n: n-queen\n"); printf(" steps: step for GPU\n"); printf("Default to 8 queen\n"); } else { n = atoi(argv[argstart]); if(n <= 1 || n > 32) { printf("Invalid n, n should be > 1 and <= 32\n"); printf("Note: n > 18 will require a very very long time to compute!\n"); return 0; } if(argc >= argstart + 2) { steps = atoi(argv[argstart + 1]); if(steps <= THREAD_NUM || steps % THREAD_NUM != 0) { printf("Invalid step, step should be multiple of %d\n", THREAD_NUM); return 0; } } } if(gpu) { if(!InitCUDA()) { return 0; } printf("CUDA initialized.\n"); } if(cpu) { CUDA_SAFE_CALL( cudaThreadSynchronize() ); CUT_SAFE_CALL( cutResetTimer(hTimer) ); CUT_SAFE_CALL( cutStartTimer(hTimer) ); //start = clock(); solution = solve_nqueen(n); //solve_nqueen_mcpu(n); //solution = solve_nqueen(n); //end = clock(); CUT_SAFE_CALL( cutStopTimer(hTimer) ); gpuTime = cutGetTimerValue(hTimer); printf("CPU: %d queen = %lld time = %f msec\n", n, solution, gpuTime); } if(gpu) { //start = clock(); CUDA_SAFE_CALL( cudaThreadSynchronize() ); CUT_SAFE_CALL( cutResetTimer(hTimer) ); CUT_SAFE_CALL( cutStartTimer(hTimer) ); solution = solve_nqueen_cuda(n, steps); //end = clock(); CUT_SAFE_CALL( cutStopTimer(hTimer) ); gpuTime = cutGetTimerValue(hTimer); printf("GPU: %d queen = %lld time = %f msec\n", n, solution, gpuTime); } return 0; }
08e754163913706c51121d26fe7fff9588b8c40f.hip
// !!! This is a file automatically generated by hipify!!! #include <ATen/ATen.h> #include <ATen/hip/HIPContext.h> #include <THH/THHNumerics.cuh> #include "THH/THH.h" #include <hip/hip_runtime.h> #include "compat.h" #define cudaCheckErrors(msg) \ do { \ hipError_t __err = hipGetLastError(); \ if (__err != hipSuccess) { \ fprintf(stderr, "Fatal error: %s (%s at %s:%d)\n", \ msg, hipGetErrorString(__err), \ __FILE__, __LINE__); \ fprintf(stderr, "*** FAILED - ABORTING\n"); \ exit(1); \ } \ } while (0) template<> struct std::hash<hipIpcMemHandle_t> { size_t operator() (const hipIpcMemHandle_t& handle) const { size_t hash = 0; uint8_t* ptr = (uint8_t*)&handle; assert(sizeof(uint8_t) == 1); for (int i=0; i<sizeof(hipIpcMemHandle_t); i++) { hash += *ptr; ptr++; } return hash; } }; template<> struct std::equal_to<hipIpcMemHandle_t> { bool operator() (const hipIpcMemHandle_t &lhs, const hipIpcMemHandle_t &rhs) const { return (std::memcmp((void*) &lhs, (void*) &rhs, sizeof(hipIpcMemHandle_t)) == 0); } }; namespace { namespace gpuipc { //from: src/operator/nn/cudnn/nhwc_batch_norm_kernel.h // The number of threads per pixel. const int THREADS_PER_PIXEL = 16; // The number of elements per ldg. const int ELEMENTS_PER_LDG = 4; // The number of reducing ops, each uses its own space : mean, var, dscale, dbias const int REDUCE_OPS = 4; // Maximum block.y supported - limited due to buffer allocation const int MAX_BLOCK_Y = 256; const int MAX_OFFSET = REDUCE_OPS*MAX_BLOCK_Y; const int BYTES_PER_ELEM = 4; // Buffer size per sync step const int SINGLE_SYNC_BUFFER_BYTES = MAX_OFFSET*THREADS_PER_PIXEL*2*ELEMENTS_PER_LDG*BYTES_PER_ELEM; }; class IpcMemHandleRegistry { public: void* getPtr(const hipIpcMemHandle_t& handle, int64_t offset) { if (registry_.count(handle) == 0) { registry_.insert(std::make_pair(handle, RegistryEntry())); registry_[handle].dev_ptr = ipcOpenMem(handle); } registry_[handle].ref_count++; return (((uint8_t*)registry_[handle].dev_ptr) + offset); } void releasePtr(const hipIpcMemHandle_t& handle) { if (registry_.count(handle) == 0) { } if (--registry_[handle].ref_count == 0) { ipcCloseMem(registry_[handle].dev_ptr); registry_.erase(handle); } } struct RegistryEntry { void* dev_ptr; int ref_count; RegistryEntry() : dev_ptr(NULL) , ref_count(0) {} }; protected: std::unordered_map<hipIpcMemHandle_t, RegistryEntry> registry_; void* ipcOpenMem(const hipIpcMemHandle_t& handle) { void *data; hipIpcOpenMemHandle(&data, handle, hipIpcMemLazyEnablePeerAccess); cudaCheckErrors("ipc init"); return data; } void ipcCloseMem(void* dev_ptr) { hipIpcCloseMemHandle(dev_ptr); cudaCheckErrors("ipc close"); } }; } static IpcMemHandleRegistry ipc_mem_registry; int64_t get_buffer_size(const int bn_sync_steps) { return bn_sync_steps * gpuipc::SINGLE_SYNC_BUFFER_BYTES; } void* get_remote_data_ptr(const at::Tensor& handle, const int64_t offset) { hipIpcMemHandle_t my_handle; memcpy((unsigned char *)(&my_handle), handle.DATA_PTR<uint8_t>(), sizeof(my_handle)); return ipc_mem_registry.getPtr(my_handle, offset); } void close_remote_data(const at::Tensor& handle) { hipIpcMemHandle_t my_handle; memcpy((unsigned char *)(&my_handle), handle.DATA_PTR<uint8_t>(), sizeof(my_handle)); ipc_mem_registry.releasePtr(my_handle); } void* get_data_ptr( const at::Tensor& data) { return data.DATA_PTR<uint8_t>(); }
08e754163913706c51121d26fe7fff9588b8c40f.cu
#include <ATen/ATen.h> #include <ATen/cuda/CUDAContext.h> #include <THC/THCNumerics.cuh> #include "THC/THC.h" #include <cuda.h> #include "compat.h" #define cudaCheckErrors(msg) \ do { \ cudaError_t __err = cudaGetLastError(); \ if (__err != cudaSuccess) { \ fprintf(stderr, "Fatal error: %s (%s at %s:%d)\n", \ msg, cudaGetErrorString(__err), \ __FILE__, __LINE__); \ fprintf(stderr, "*** FAILED - ABORTING\n"); \ exit(1); \ } \ } while (0) template<> struct std::hash<cudaIpcMemHandle_t> { size_t operator() (const cudaIpcMemHandle_t& handle) const { size_t hash = 0; uint8_t* ptr = (uint8_t*)&handle; assert(sizeof(uint8_t) == 1); for (int i=0; i<sizeof(cudaIpcMemHandle_t); i++) { hash += *ptr; ptr++; } return hash; } }; template<> struct std::equal_to<cudaIpcMemHandle_t> { bool operator() (const cudaIpcMemHandle_t &lhs, const cudaIpcMemHandle_t &rhs) const { return (std::memcmp((void*) &lhs, (void*) &rhs, sizeof(cudaIpcMemHandle_t)) == 0); } }; namespace { namespace gpuipc { //from: src/operator/nn/cudnn/nhwc_batch_norm_kernel.h // The number of threads per pixel. const int THREADS_PER_PIXEL = 16; // The number of elements per ldg. const int ELEMENTS_PER_LDG = 4; // The number of reducing ops, each uses its own space : mean, var, dscale, dbias const int REDUCE_OPS = 4; // Maximum block.y supported - limited due to buffer allocation const int MAX_BLOCK_Y = 256; const int MAX_OFFSET = REDUCE_OPS*MAX_BLOCK_Y; const int BYTES_PER_ELEM = 4; // Buffer size per sync step const int SINGLE_SYNC_BUFFER_BYTES = MAX_OFFSET*THREADS_PER_PIXEL*2*ELEMENTS_PER_LDG*BYTES_PER_ELEM; }; class IpcMemHandleRegistry { public: void* getPtr(const cudaIpcMemHandle_t& handle, int64_t offset) { if (registry_.count(handle) == 0) { registry_.insert(std::make_pair(handle, RegistryEntry())); registry_[handle].dev_ptr = ipcOpenMem(handle); } registry_[handle].ref_count++; return (((uint8_t*)registry_[handle].dev_ptr) + offset); } void releasePtr(const cudaIpcMemHandle_t& handle) { if (registry_.count(handle) == 0) { } if (--registry_[handle].ref_count == 0) { ipcCloseMem(registry_[handle].dev_ptr); registry_.erase(handle); } } struct RegistryEntry { void* dev_ptr; int ref_count; RegistryEntry() : dev_ptr(NULL) , ref_count(0) {} }; protected: std::unordered_map<cudaIpcMemHandle_t, RegistryEntry> registry_; void* ipcOpenMem(const cudaIpcMemHandle_t& handle) { void *data; cudaIpcOpenMemHandle(&data, handle, cudaIpcMemLazyEnablePeerAccess); cudaCheckErrors("ipc init"); return data; } void ipcCloseMem(void* dev_ptr) { cudaIpcCloseMemHandle(dev_ptr); cudaCheckErrors("ipc close"); } }; } static IpcMemHandleRegistry ipc_mem_registry; int64_t get_buffer_size(const int bn_sync_steps) { return bn_sync_steps * gpuipc::SINGLE_SYNC_BUFFER_BYTES; } void* get_remote_data_ptr(const at::Tensor& handle, const int64_t offset) { cudaIpcMemHandle_t my_handle; memcpy((unsigned char *)(&my_handle), handle.DATA_PTR<uint8_t>(), sizeof(my_handle)); return ipc_mem_registry.getPtr(my_handle, offset); } void close_remote_data(const at::Tensor& handle) { cudaIpcMemHandle_t my_handle; memcpy((unsigned char *)(&my_handle), handle.DATA_PTR<uint8_t>(), sizeof(my_handle)); ipc_mem_registry.releasePtr(my_handle); } void* get_data_ptr( const at::Tensor& data) { return data.DATA_PTR<uint8_t>(); }
510012ce57cb2bfd2e8c02f726d657710149d79f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // auto-generated by ops.py // __constant__ int dims_update_halo_kernel3_minus_2_b [3][2]; static int dims_update_halo_kernel3_minus_2_b_h [3][2] = {0}; //user function __device__ inline void update_halo_kernel3_minus_2_b_gpu(ACC<double> &vol_flux_x, ACC<double> &mass_flux_x, const int* fields) { if(fields[FIELD_VOL_FLUX_X] == 1) vol_flux_x(0,0,0) = -(vol_flux_x(-2,0,0)); if(fields[FIELD_MASS_FLUX_X] == 1) mass_flux_x(0,0,0) = -(mass_flux_x(-2,0,0)); } __global__ void ops_update_halo_kernel3_minus_2_b( double* __restrict arg0, double* __restrict arg1, const int* __restrict arg2, int size0, int size1, int size2 ){ int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel3_minus_2_b[0][0] + idx_z * 1*1 * dims_update_halo_kernel3_minus_2_b[0][0] * dims_update_halo_kernel3_minus_2_b[0][1]; arg1 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel3_minus_2_b[1][0] + idx_z * 1*1 * dims_update_halo_kernel3_minus_2_b[1][0] * dims_update_halo_kernel3_minus_2_b[1][1]; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { ACC<double> argp0(dims_update_halo_kernel3_minus_2_b[0][0], dims_update_halo_kernel3_minus_2_b[0][1], arg0); ACC<double> argp1(dims_update_halo_kernel3_minus_2_b[1][0], dims_update_halo_kernel3_minus_2_b[1][1], arg1); update_halo_kernel3_minus_2_b_gpu(argp0, argp1, arg2); } } // host stub function #ifndef OPS_LAZY void ops_par_loop_update_halo_kernel3_minus_2_b(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { #else void ops_par_loop_update_halo_kernel3_minus_2_b_execute(ops_kernel_descriptor *desc) { int dim = desc->dim; #if OPS_MPI ops_block block = desc->block; #endif int *range = desc->range; ops_arg arg0 = desc->args[0]; ops_arg arg1 = desc->args[1]; ops_arg arg2 = desc->args[2]; #endif //Timing double t1,t2,c1,c2; ops_arg args[3] = { arg0, arg1, arg2}; #if CHECKPOINTING && !OPS_LAZY if (!ops_checkpointing_before(args,3,range,66)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(66,"update_halo_kernel3_minus_2_b"); OPS_kernels[66].count++; ops_timers_core(&c1,&t1); } //compute locally allocated range for the sub-block int start[3]; int end[3]; #if OPS_MPI && !OPS_LAZY sub_block_list sb = OPS_sub_block_list[block->index]; #endif //OPS_MPI #ifdef OPS_MPI int arg_idx[3]; #endif #ifdef OPS_MPI if (compute_ranges(args, 3,block, range, start, end, arg_idx) < 0) return; #else //OPS_MPI for ( int n=0; n<3; n++ ){ start[n] = range[2*n];end[n] = range[2*n+1]; } #endif int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; if (xdim0 != dims_update_halo_kernel3_minus_2_b_h[0][0] || ydim0 != dims_update_halo_kernel3_minus_2_b_h[0][1] || xdim1 != dims_update_halo_kernel3_minus_2_b_h[1][0] || ydim1 != dims_update_halo_kernel3_minus_2_b_h[1][1]) { dims_update_halo_kernel3_minus_2_b_h[0][0] = xdim0; dims_update_halo_kernel3_minus_2_b_h[0][1] = ydim0; dims_update_halo_kernel3_minus_2_b_h[1][0] = xdim1; dims_update_halo_kernel3_minus_2_b_h[1][1] = ydim1; cutilSafeCall(hipMemcpyToSymbol( dims_update_halo_kernel3_minus_2_b, dims_update_halo_kernel3_minus_2_b_h, sizeof(dims_update_halo_kernel3_minus_2_b))); } int *arg2h = (int *)arg2.data; int x_size = MAX(0,end[0]-start[0]); int y_size = MAX(0,end[1]-start[1]); int z_size = MAX(0,end[2]-start[2]); dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1); dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z); int consts_bytes = 0; consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int)); reallocConstArrays(consts_bytes); consts_bytes = 0; arg2.data = OPS_consts_h + consts_bytes; arg2.data_d = OPS_consts_d + consts_bytes; for (int d=0; d<NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d]; consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int)); mvConstArraysToDevice(consts_bytes); int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size); int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size); char *p_a[3]; //set up initial pointers int base0 = args[0].dat->base_offset + dat0 * 1 * (start[0] * args[0].stencil->stride[0]); base0 = base0+ dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]); base0 = base0+ dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2]); p_a[0] = (char *)args[0].data_d + base0; int base1 = args[1].dat->base_offset + dat1 * 1 * (start[0] * args[1].stencil->stride[0]); base1 = base1+ dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]); base1 = base1+ dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2]); p_a[1] = (char *)args[1].data_d + base1; #ifndef OPS_LAZY ops_H_D_exchanges_device(args, 3); ops_halo_exchanges(args,3,range); #endif if (OPS_diags > 1) { ops_timers_core(&c2,&t2); OPS_kernels[66].mpi_time += t2-t1; } //call kernel wrapper function, passing in pointers to data if (x_size > 0 && y_size > 0 && z_size > 0) hipLaunchKernelGGL(( ops_update_halo_kernel3_minus_2_b), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d,x_size, y_size, z_size); cutilSafeCall(hipGetLastError()); if (OPS_diags>1) { cutilSafeCall(hipDeviceSynchronize()); ops_timers_core(&c1,&t1); OPS_kernels[66].time += t1-t2; } #ifndef OPS_LAZY ops_set_dirtybit_device(args, 3); ops_set_halo_dirtybit3(&args[0],range); ops_set_halo_dirtybit3(&args[1],range); #endif if (OPS_diags > 1) { //Update kernel record ops_timers_core(&c2,&t2); OPS_kernels[66].mpi_time += t2-t1; OPS_kernels[66].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[66].transfer += ops_compute_transfer(dim, start, end, &arg1); } } #ifdef OPS_LAZY void ops_par_loop_update_halo_kernel3_minus_2_b(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor)); desc->name = name; desc->block = block; desc->dim = dim; desc->device = 1; desc->index = 66; desc->hash = 5381; desc->hash = ((desc->hash << 5) + desc->hash) + 66; for ( int i=0; i<6; i++ ){ desc->range[i] = range[i]; desc->orig_range[i] = range[i]; desc->hash = ((desc->hash << 5) + desc->hash) + range[i]; } desc->nargs = 3; desc->args = (ops_arg*)malloc(3*sizeof(ops_arg)); desc->args[0] = arg0; desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index; desc->args[1] = arg1; desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index; desc->args[2] = arg2; char *tmp = (char*)malloc(NUM_FIELDS*sizeof(int)); memcpy(tmp, arg2.data,NUM_FIELDS*sizeof(int)); desc->args[2].data = tmp; desc->function = ops_par_loop_update_halo_kernel3_minus_2_b_execute; if (OPS_diags > 1) { ops_timing_realloc(66,"update_halo_kernel3_minus_2_b"); } ops_enqueue_kernel(desc); } #endif
510012ce57cb2bfd2e8c02f726d657710149d79f.cu
// // auto-generated by ops.py // __constant__ int dims_update_halo_kernel3_minus_2_b [3][2]; static int dims_update_halo_kernel3_minus_2_b_h [3][2] = {0}; //user function __device__ inline void update_halo_kernel3_minus_2_b_gpu(ACC<double> &vol_flux_x, ACC<double> &mass_flux_x, const int* fields) { if(fields[FIELD_VOL_FLUX_X] == 1) vol_flux_x(0,0,0) = -(vol_flux_x(-2,0,0)); if(fields[FIELD_MASS_FLUX_X] == 1) mass_flux_x(0,0,0) = -(mass_flux_x(-2,0,0)); } __global__ void ops_update_halo_kernel3_minus_2_b( double* __restrict arg0, double* __restrict arg1, const int* __restrict arg2, int size0, int size1, int size2 ){ int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel3_minus_2_b[0][0] + idx_z * 1*1 * dims_update_halo_kernel3_minus_2_b[0][0] * dims_update_halo_kernel3_minus_2_b[0][1]; arg1 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel3_minus_2_b[1][0] + idx_z * 1*1 * dims_update_halo_kernel3_minus_2_b[1][0] * dims_update_halo_kernel3_minus_2_b[1][1]; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { ACC<double> argp0(dims_update_halo_kernel3_minus_2_b[0][0], dims_update_halo_kernel3_minus_2_b[0][1], arg0); ACC<double> argp1(dims_update_halo_kernel3_minus_2_b[1][0], dims_update_halo_kernel3_minus_2_b[1][1], arg1); update_halo_kernel3_minus_2_b_gpu(argp0, argp1, arg2); } } // host stub function #ifndef OPS_LAZY void ops_par_loop_update_halo_kernel3_minus_2_b(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { #else void ops_par_loop_update_halo_kernel3_minus_2_b_execute(ops_kernel_descriptor *desc) { int dim = desc->dim; #if OPS_MPI ops_block block = desc->block; #endif int *range = desc->range; ops_arg arg0 = desc->args[0]; ops_arg arg1 = desc->args[1]; ops_arg arg2 = desc->args[2]; #endif //Timing double t1,t2,c1,c2; ops_arg args[3] = { arg0, arg1, arg2}; #if CHECKPOINTING && !OPS_LAZY if (!ops_checkpointing_before(args,3,range,66)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(66,"update_halo_kernel3_minus_2_b"); OPS_kernels[66].count++; ops_timers_core(&c1,&t1); } //compute locally allocated range for the sub-block int start[3]; int end[3]; #if OPS_MPI && !OPS_LAZY sub_block_list sb = OPS_sub_block_list[block->index]; #endif //OPS_MPI #ifdef OPS_MPI int arg_idx[3]; #endif #ifdef OPS_MPI if (compute_ranges(args, 3,block, range, start, end, arg_idx) < 0) return; #else //OPS_MPI for ( int n=0; n<3; n++ ){ start[n] = range[2*n];end[n] = range[2*n+1]; } #endif int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; if (xdim0 != dims_update_halo_kernel3_minus_2_b_h[0][0] || ydim0 != dims_update_halo_kernel3_minus_2_b_h[0][1] || xdim1 != dims_update_halo_kernel3_minus_2_b_h[1][0] || ydim1 != dims_update_halo_kernel3_minus_2_b_h[1][1]) { dims_update_halo_kernel3_minus_2_b_h[0][0] = xdim0; dims_update_halo_kernel3_minus_2_b_h[0][1] = ydim0; dims_update_halo_kernel3_minus_2_b_h[1][0] = xdim1; dims_update_halo_kernel3_minus_2_b_h[1][1] = ydim1; cutilSafeCall(cudaMemcpyToSymbol( dims_update_halo_kernel3_minus_2_b, dims_update_halo_kernel3_minus_2_b_h, sizeof(dims_update_halo_kernel3_minus_2_b))); } int *arg2h = (int *)arg2.data; int x_size = MAX(0,end[0]-start[0]); int y_size = MAX(0,end[1]-start[1]); int z_size = MAX(0,end[2]-start[2]); dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1); dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z); int consts_bytes = 0; consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int)); reallocConstArrays(consts_bytes); consts_bytes = 0; arg2.data = OPS_consts_h + consts_bytes; arg2.data_d = OPS_consts_d + consts_bytes; for (int d=0; d<NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d]; consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int)); mvConstArraysToDevice(consts_bytes); int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size); int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size); char *p_a[3]; //set up initial pointers int base0 = args[0].dat->base_offset + dat0 * 1 * (start[0] * args[0].stencil->stride[0]); base0 = base0+ dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]); base0 = base0+ dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2]); p_a[0] = (char *)args[0].data_d + base0; int base1 = args[1].dat->base_offset + dat1 * 1 * (start[0] * args[1].stencil->stride[0]); base1 = base1+ dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]); base1 = base1+ dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2]); p_a[1] = (char *)args[1].data_d + base1; #ifndef OPS_LAZY ops_H_D_exchanges_device(args, 3); ops_halo_exchanges(args,3,range); #endif if (OPS_diags > 1) { ops_timers_core(&c2,&t2); OPS_kernels[66].mpi_time += t2-t1; } //call kernel wrapper function, passing in pointers to data if (x_size > 0 && y_size > 0 && z_size > 0) ops_update_halo_kernel3_minus_2_b<<<grid, tblock >>> ( (double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d,x_size, y_size, z_size); cutilSafeCall(cudaGetLastError()); if (OPS_diags>1) { cutilSafeCall(cudaDeviceSynchronize()); ops_timers_core(&c1,&t1); OPS_kernels[66].time += t1-t2; } #ifndef OPS_LAZY ops_set_dirtybit_device(args, 3); ops_set_halo_dirtybit3(&args[0],range); ops_set_halo_dirtybit3(&args[1],range); #endif if (OPS_diags > 1) { //Update kernel record ops_timers_core(&c2,&t2); OPS_kernels[66].mpi_time += t2-t1; OPS_kernels[66].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[66].transfer += ops_compute_transfer(dim, start, end, &arg1); } } #ifdef OPS_LAZY void ops_par_loop_update_halo_kernel3_minus_2_b(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor)); desc->name = name; desc->block = block; desc->dim = dim; desc->device = 1; desc->index = 66; desc->hash = 5381; desc->hash = ((desc->hash << 5) + desc->hash) + 66; for ( int i=0; i<6; i++ ){ desc->range[i] = range[i]; desc->orig_range[i] = range[i]; desc->hash = ((desc->hash << 5) + desc->hash) + range[i]; } desc->nargs = 3; desc->args = (ops_arg*)malloc(3*sizeof(ops_arg)); desc->args[0] = arg0; desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index; desc->args[1] = arg1; desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index; desc->args[2] = arg2; char *tmp = (char*)malloc(NUM_FIELDS*sizeof(int)); memcpy(tmp, arg2.data,NUM_FIELDS*sizeof(int)); desc->args[2].data = tmp; desc->function = ops_par_loop_update_halo_kernel3_minus_2_b_execute; if (OPS_diags > 1) { ops_timing_realloc(66,"update_halo_kernel3_minus_2_b"); } ops_enqueue_kernel(desc); } #endif
25d80fc2179cd88aaaa53cd894529b94446646bf.hip
// !!! This is a file automatically generated by hipify!!! #include "errorHandle.h" void checkError(hipError_t error) { if (error == hipSuccess) return; fprintf(stderr, "CUDA_ERROR(%d) %s: %s\n", error, hipGetErrorName(error), hipGetErrorString(error)); abort(); }
25d80fc2179cd88aaaa53cd894529b94446646bf.cu
#include "errorHandle.h" void checkError(cudaError_t error) { if (error == cudaSuccess) return; fprintf(stderr, "CUDA_ERROR(%d) %s: %s\n", error, cudaGetErrorName(error), cudaGetErrorString(error)); abort(); }
f7ce41b165e9b5cfbf9f21a972302cec98bd6d1c.hip
// !!! This is a file automatically generated by hipify!!! /** * Copyright (c) Facebook, Inc. and its affiliates. * * This source code is licensed under the MIT license found in the * LICENSE file in the root directory of this source tree. */ #include <faiss/gpu/GpuIndex.h> #include <faiss/gpu/GpuResources.h> #include <faiss/gpu/impl/InterleavedCodes.h> #include <faiss/gpu/impl/RemapIndices.h> #include <faiss/gpu/utils/DeviceUtils.h> #include <thrust/host_vector.h> #include <faiss/gpu/impl/FlatIndex.cuh> #include <faiss/gpu/impl/IVFAppend.cuh> #include <faiss/gpu/impl/IVFFlat.cuh> #include <faiss/gpu/impl/IVFFlatScan.cuh> #include <faiss/gpu/impl/IVFInterleaved.cuh> #include <faiss/gpu/utils/ConversionOperators.cuh> #include <faiss/gpu/utils/CopyUtils.cuh> #include <faiss/gpu/utils/DeviceDefs.cuh> #include <faiss/gpu/utils/Float16.cuh> #include <faiss/gpu/utils/HostTensor.cuh> #include <faiss/gpu/utils/Transpose.cuh> #include <limits> #include <unordered_map> namespace faiss { namespace gpu { IVFFlat::IVFFlat( GpuResources* res, int dim, int nlist, faiss::MetricType metric, float metricArg, bool useResidual, faiss::ScalarQuantizer* scalarQ, bool interleavedLayout, IndicesOptions indicesOptions, MemorySpace space) : IVFBase(res, dim, nlist, metric, metricArg, useResidual, interleavedLayout, indicesOptions, space), scalarQ_(scalarQ ? new GpuScalarQuantizer(res, *scalarQ) : nullptr) {} IVFFlat::~IVFFlat() {} size_t IVFFlat::getGpuVectorsEncodingSize_(int numVecs) const { if (interleavedLayout_) { // bits per scalar code int bits = scalarQ_ ? scalarQ_->bits : 32 /* float */; // bytes to encode a block of 32 vectors (single dimension) int bytesPerDimBlock = bits * 32 / 8; // bytes to fully encode 32 vectors int bytesPerBlock = bytesPerDimBlock * dim_; // number of blocks of 32 vectors we have int numBlocks = utils::divUp(numVecs, 32); // total size to encode numVecs return bytesPerBlock * numBlocks; } else { size_t sizePerVector = (scalarQ_ ? scalarQ_->code_size : sizeof(float) * dim_); return (size_t)numVecs * sizePerVector; } } size_t IVFFlat::getCpuVectorsEncodingSize_(int numVecs) const { size_t sizePerVector = (scalarQ_ ? scalarQ_->code_size : sizeof(float) * dim_); return (size_t)numVecs * sizePerVector; } std::vector<uint8_t> IVFFlat::translateCodesToGpu_( std::vector<uint8_t> codes, size_t numVecs) const { if (!interleavedLayout_) { // same format return codes; } int bitsPerCode = scalarQ_ ? scalarQ_->bits : 32; auto up = unpackNonInterleaved(std::move(codes), numVecs, dim_, bitsPerCode); return packInterleaved(std::move(up), numVecs, dim_, bitsPerCode); } std::vector<uint8_t> IVFFlat::translateCodesFromGpu_( std::vector<uint8_t> codes, size_t numVecs) const { if (!interleavedLayout_) { // same format return codes; } int bitsPerCode = scalarQ_ ? scalarQ_->bits : 32; auto up = unpackInterleaved(std::move(codes), numVecs, dim_, bitsPerCode); return packNonInterleaved(std::move(up), numVecs, dim_, bitsPerCode); } void IVFFlat::appendVectors_( Tensor<float, 2, true>& vecs, Tensor<float, 2, true>& ivfCentroidResiduals, Tensor<idx_t, 1, true>& indices, Tensor<idx_t, 1, true>& uniqueLists, Tensor<int, 1, true>& vectorsByUniqueList, Tensor<int, 1, true>& uniqueListVectorStart, Tensor<int, 1, true>& uniqueListStartOffset, Tensor<idx_t, 1, true>& listIds, Tensor<int, 1, true>& listOffset, hipStream_t stream) { // // Append the new encodings // // Append indices to the IVF lists runIVFIndicesAppend( listIds, listOffset, indices, indicesOptions_, deviceListIndexPointers_, stream); // Append the encoded vectors to the IVF lists if (interleavedLayout_) { runIVFFlatInterleavedAppend( listIds, listOffset, uniqueLists, vectorsByUniqueList, uniqueListVectorStart, uniqueListStartOffset, useResidual_ ? ivfCentroidResiduals : vecs, scalarQ_.get(), deviceListDataPointers_, resources_, stream); } else { runIVFFlatAppend( listIds, listOffset, useResidual_ ? ivfCentroidResiduals : vecs, scalarQ_.get(), deviceListDataPointers_, stream); } } void IVFFlat::search( Index* coarseQuantizer, Tensor<float, 2, true>& queries, int nprobe, int k, Tensor<float, 2, true>& outDistances, Tensor<idx_t, 2, true>& outIndices) { auto stream = resources_->getDefaultStreamCurrentDevice(); // These are caught at a higher level FAISS_ASSERT(nprobe <= GPU_MAX_SELECTION_K); FAISS_ASSERT(k <= GPU_MAX_SELECTION_K); nprobe = ::min(nprobe, (int)getNumLists()); FAISS_ASSERT(queries.getSize(1) == dim_); FAISS_ASSERT(outDistances.getSize(0) == queries.getSize(0)); FAISS_ASSERT(outIndices.getSize(0) == queries.getSize(0)); // Reserve space for the quantized information DeviceTensor<float, 2, true> coarseDistances( resources_, makeTempAlloc(AllocType::Other, stream), {queries.getSize(0), nprobe}); DeviceTensor<idx_t, 2, true> coarseIndices( resources_, makeTempAlloc(AllocType::Other, stream), {queries.getSize(0), nprobe}); // in case we also want/need residuals, we need the original centroids as // well // FIXME: why centroids instead of calculating residuals in one go? DeviceTensor<float, 3, true> residualBase( resources_, makeTempAlloc(AllocType::Other, stream), {queries.getSize(0), nprobe, dim_}); searchCoarseQuantizer_( coarseQuantizer, nprobe, queries, coarseDistances, coarseIndices, nullptr, // we need the IVF centroids to which vectors were assigned if // vectors are encoded using the residual useResidual_ ? &residualBase : nullptr); searchImpl_( queries, coarseDistances, coarseIndices, residualBase, k, outDistances, outIndices, false); } void IVFFlat::searchPreassigned( Index* coarseQuantizer, Tensor<float, 2, true>& vecs, Tensor<float, 2, true>& ivfDistances, Tensor<idx_t, 2, true>& ivfAssignments, int k, Tensor<float, 2, true>& outDistances, Tensor<idx_t, 2, true>& outIndices, bool storePairs) { FAISS_ASSERT(ivfDistances.getSize(0) == vecs.getSize(0)); FAISS_ASSERT(ivfAssignments.getSize(0) == vecs.getSize(0)); FAISS_ASSERT(outDistances.getSize(0) == vecs.getSize(0)); FAISS_ASSERT(outIndices.getSize(0) == vecs.getSize(0)); FAISS_ASSERT(vecs.getSize(1) == dim_); auto stream = resources_->getDefaultStreamCurrentDevice(); auto nprobe = ivfAssignments.getSize(1); FAISS_ASSERT(nprobe <= numLists_); // Based on the IVF assignments, we need the IVF centroids to which vectors // were assigned // FIXME: IVFPQ doesn't need this information as it has direct reference to // all IVF centroids and within the various kernels can look it up by index // as needed. Can we convert IVFFlat to do the same thing? DeviceTensor<float, 3, true> ivfCentroids( resources_, makeTempAlloc(AllocType::Other, stream), {vecs.getSize(0), nprobe, dim_}); auto gpuQuantizer = tryCastGpuIndex(coarseQuantizer); if (gpuQuantizer) { // We can pass device pointers directly gpuQuantizer->reconstruct_batch( vecs.getSize(0) * nprobe, ivfAssignments.data(), ivfCentroids.data()); } else { // CPU coarse quantizer auto cpuIVFCentroids = std::vector<float>(vecs.getSize(0) * nprobe * dim_); // We need to copy `ivfAssignments` to the CPU, in order to pass to a // CPU index auto cpuIVFAssignments = ivfAssignments.copyToVector(stream); coarseQuantizer->reconstruct_batch( vecs.getSize(0) * nprobe, cpuIVFAssignments.data(), cpuIVFCentroids.data()); ivfCentroids.copyFrom(cpuIVFCentroids, stream); } searchImpl_( vecs, ivfDistances, ivfAssignments, ivfCentroids, k, outDistances, outIndices, storePairs); } void IVFFlat::searchImpl_( Tensor<float, 2, true>& queries, Tensor<float, 2, true>& coarseDistances, Tensor<idx_t, 2, true>& coarseIndices, Tensor<float, 3, true>& ivfCentroids, int k, Tensor<float, 2, true>& outDistances, Tensor<idx_t, 2, true>& outIndices, bool storePairs) { FAISS_ASSERT(storePairs == false); auto stream = resources_->getDefaultStreamCurrentDevice(); if (interleavedLayout_) { runIVFInterleavedScan( queries, coarseIndices, deviceListDataPointers_, deviceListIndexPointers_, indicesOptions_, deviceListLengths_, k, metric_, useResidual_, ivfCentroids, scalarQ_.get(), outDistances, outIndices, resources_); } else { runIVFFlatScan( queries, coarseIndices, deviceListDataPointers_, deviceListIndexPointers_, indicesOptions_, deviceListLengths_, maxListLength_, k, metric_, useResidual_, ivfCentroids, scalarQ_.get(), outDistances, outIndices, resources_); } // If the GPU isn't storing indices (they are on the CPU side), we // need to perform the re-mapping here // FIXME: we might ultimately be calling this function with inputs // from the CPU, these are unnecessary copies if (indicesOptions_ == INDICES_CPU) { HostTensor<idx_t, 2, true> hostOutIndices(outIndices, stream); ivfOffsetToUserIndex( hostOutIndices.data(), numLists_, hostOutIndices.getSize(0), hostOutIndices.getSize(1), listOffsetToUserIndex_); // Copy back to GPU, since the input to this function is on the // GPU outIndices.copyFrom(hostOutIndices, stream); } } } // namespace gpu } // namespace faiss
f7ce41b165e9b5cfbf9f21a972302cec98bd6d1c.cu
/** * Copyright (c) Facebook, Inc. and its affiliates. * * This source code is licensed under the MIT license found in the * LICENSE file in the root directory of this source tree. */ #include <faiss/gpu/GpuIndex.h> #include <faiss/gpu/GpuResources.h> #include <faiss/gpu/impl/InterleavedCodes.h> #include <faiss/gpu/impl/RemapIndices.h> #include <faiss/gpu/utils/DeviceUtils.h> #include <thrust/host_vector.h> #include <faiss/gpu/impl/FlatIndex.cuh> #include <faiss/gpu/impl/IVFAppend.cuh> #include <faiss/gpu/impl/IVFFlat.cuh> #include <faiss/gpu/impl/IVFFlatScan.cuh> #include <faiss/gpu/impl/IVFInterleaved.cuh> #include <faiss/gpu/utils/ConversionOperators.cuh> #include <faiss/gpu/utils/CopyUtils.cuh> #include <faiss/gpu/utils/DeviceDefs.cuh> #include <faiss/gpu/utils/Float16.cuh> #include <faiss/gpu/utils/HostTensor.cuh> #include <faiss/gpu/utils/Transpose.cuh> #include <limits> #include <unordered_map> namespace faiss { namespace gpu { IVFFlat::IVFFlat( GpuResources* res, int dim, int nlist, faiss::MetricType metric, float metricArg, bool useResidual, faiss::ScalarQuantizer* scalarQ, bool interleavedLayout, IndicesOptions indicesOptions, MemorySpace space) : IVFBase(res, dim, nlist, metric, metricArg, useResidual, interleavedLayout, indicesOptions, space), scalarQ_(scalarQ ? new GpuScalarQuantizer(res, *scalarQ) : nullptr) {} IVFFlat::~IVFFlat() {} size_t IVFFlat::getGpuVectorsEncodingSize_(int numVecs) const { if (interleavedLayout_) { // bits per scalar code int bits = scalarQ_ ? scalarQ_->bits : 32 /* float */; // bytes to encode a block of 32 vectors (single dimension) int bytesPerDimBlock = bits * 32 / 8; // bytes to fully encode 32 vectors int bytesPerBlock = bytesPerDimBlock * dim_; // number of blocks of 32 vectors we have int numBlocks = utils::divUp(numVecs, 32); // total size to encode numVecs return bytesPerBlock * numBlocks; } else { size_t sizePerVector = (scalarQ_ ? scalarQ_->code_size : sizeof(float) * dim_); return (size_t)numVecs * sizePerVector; } } size_t IVFFlat::getCpuVectorsEncodingSize_(int numVecs) const { size_t sizePerVector = (scalarQ_ ? scalarQ_->code_size : sizeof(float) * dim_); return (size_t)numVecs * sizePerVector; } std::vector<uint8_t> IVFFlat::translateCodesToGpu_( std::vector<uint8_t> codes, size_t numVecs) const { if (!interleavedLayout_) { // same format return codes; } int bitsPerCode = scalarQ_ ? scalarQ_->bits : 32; auto up = unpackNonInterleaved(std::move(codes), numVecs, dim_, bitsPerCode); return packInterleaved(std::move(up), numVecs, dim_, bitsPerCode); } std::vector<uint8_t> IVFFlat::translateCodesFromGpu_( std::vector<uint8_t> codes, size_t numVecs) const { if (!interleavedLayout_) { // same format return codes; } int bitsPerCode = scalarQ_ ? scalarQ_->bits : 32; auto up = unpackInterleaved(std::move(codes), numVecs, dim_, bitsPerCode); return packNonInterleaved(std::move(up), numVecs, dim_, bitsPerCode); } void IVFFlat::appendVectors_( Tensor<float, 2, true>& vecs, Tensor<float, 2, true>& ivfCentroidResiduals, Tensor<idx_t, 1, true>& indices, Tensor<idx_t, 1, true>& uniqueLists, Tensor<int, 1, true>& vectorsByUniqueList, Tensor<int, 1, true>& uniqueListVectorStart, Tensor<int, 1, true>& uniqueListStartOffset, Tensor<idx_t, 1, true>& listIds, Tensor<int, 1, true>& listOffset, cudaStream_t stream) { // // Append the new encodings // // Append indices to the IVF lists runIVFIndicesAppend( listIds, listOffset, indices, indicesOptions_, deviceListIndexPointers_, stream); // Append the encoded vectors to the IVF lists if (interleavedLayout_) { runIVFFlatInterleavedAppend( listIds, listOffset, uniqueLists, vectorsByUniqueList, uniqueListVectorStart, uniqueListStartOffset, useResidual_ ? ivfCentroidResiduals : vecs, scalarQ_.get(), deviceListDataPointers_, resources_, stream); } else { runIVFFlatAppend( listIds, listOffset, useResidual_ ? ivfCentroidResiduals : vecs, scalarQ_.get(), deviceListDataPointers_, stream); } } void IVFFlat::search( Index* coarseQuantizer, Tensor<float, 2, true>& queries, int nprobe, int k, Tensor<float, 2, true>& outDistances, Tensor<idx_t, 2, true>& outIndices) { auto stream = resources_->getDefaultStreamCurrentDevice(); // These are caught at a higher level FAISS_ASSERT(nprobe <= GPU_MAX_SELECTION_K); FAISS_ASSERT(k <= GPU_MAX_SELECTION_K); nprobe = std::min(nprobe, (int)getNumLists()); FAISS_ASSERT(queries.getSize(1) == dim_); FAISS_ASSERT(outDistances.getSize(0) == queries.getSize(0)); FAISS_ASSERT(outIndices.getSize(0) == queries.getSize(0)); // Reserve space for the quantized information DeviceTensor<float, 2, true> coarseDistances( resources_, makeTempAlloc(AllocType::Other, stream), {queries.getSize(0), nprobe}); DeviceTensor<idx_t, 2, true> coarseIndices( resources_, makeTempAlloc(AllocType::Other, stream), {queries.getSize(0), nprobe}); // in case we also want/need residuals, we need the original centroids as // well // FIXME: why centroids instead of calculating residuals in one go? DeviceTensor<float, 3, true> residualBase( resources_, makeTempAlloc(AllocType::Other, stream), {queries.getSize(0), nprobe, dim_}); searchCoarseQuantizer_( coarseQuantizer, nprobe, queries, coarseDistances, coarseIndices, nullptr, // we need the IVF centroids to which vectors were assigned if // vectors are encoded using the residual useResidual_ ? &residualBase : nullptr); searchImpl_( queries, coarseDistances, coarseIndices, residualBase, k, outDistances, outIndices, false); } void IVFFlat::searchPreassigned( Index* coarseQuantizer, Tensor<float, 2, true>& vecs, Tensor<float, 2, true>& ivfDistances, Tensor<idx_t, 2, true>& ivfAssignments, int k, Tensor<float, 2, true>& outDistances, Tensor<idx_t, 2, true>& outIndices, bool storePairs) { FAISS_ASSERT(ivfDistances.getSize(0) == vecs.getSize(0)); FAISS_ASSERT(ivfAssignments.getSize(0) == vecs.getSize(0)); FAISS_ASSERT(outDistances.getSize(0) == vecs.getSize(0)); FAISS_ASSERT(outIndices.getSize(0) == vecs.getSize(0)); FAISS_ASSERT(vecs.getSize(1) == dim_); auto stream = resources_->getDefaultStreamCurrentDevice(); auto nprobe = ivfAssignments.getSize(1); FAISS_ASSERT(nprobe <= numLists_); // Based on the IVF assignments, we need the IVF centroids to which vectors // were assigned // FIXME: IVFPQ doesn't need this information as it has direct reference to // all IVF centroids and within the various kernels can look it up by index // as needed. Can we convert IVFFlat to do the same thing? DeviceTensor<float, 3, true> ivfCentroids( resources_, makeTempAlloc(AllocType::Other, stream), {vecs.getSize(0), nprobe, dim_}); auto gpuQuantizer = tryCastGpuIndex(coarseQuantizer); if (gpuQuantizer) { // We can pass device pointers directly gpuQuantizer->reconstruct_batch( vecs.getSize(0) * nprobe, ivfAssignments.data(), ivfCentroids.data()); } else { // CPU coarse quantizer auto cpuIVFCentroids = std::vector<float>(vecs.getSize(0) * nprobe * dim_); // We need to copy `ivfAssignments` to the CPU, in order to pass to a // CPU index auto cpuIVFAssignments = ivfAssignments.copyToVector(stream); coarseQuantizer->reconstruct_batch( vecs.getSize(0) * nprobe, cpuIVFAssignments.data(), cpuIVFCentroids.data()); ivfCentroids.copyFrom(cpuIVFCentroids, stream); } searchImpl_( vecs, ivfDistances, ivfAssignments, ivfCentroids, k, outDistances, outIndices, storePairs); } void IVFFlat::searchImpl_( Tensor<float, 2, true>& queries, Tensor<float, 2, true>& coarseDistances, Tensor<idx_t, 2, true>& coarseIndices, Tensor<float, 3, true>& ivfCentroids, int k, Tensor<float, 2, true>& outDistances, Tensor<idx_t, 2, true>& outIndices, bool storePairs) { FAISS_ASSERT(storePairs == false); auto stream = resources_->getDefaultStreamCurrentDevice(); if (interleavedLayout_) { runIVFInterleavedScan( queries, coarseIndices, deviceListDataPointers_, deviceListIndexPointers_, indicesOptions_, deviceListLengths_, k, metric_, useResidual_, ivfCentroids, scalarQ_.get(), outDistances, outIndices, resources_); } else { runIVFFlatScan( queries, coarseIndices, deviceListDataPointers_, deviceListIndexPointers_, indicesOptions_, deviceListLengths_, maxListLength_, k, metric_, useResidual_, ivfCentroids, scalarQ_.get(), outDistances, outIndices, resources_); } // If the GPU isn't storing indices (they are on the CPU side), we // need to perform the re-mapping here // FIXME: we might ultimately be calling this function with inputs // from the CPU, these are unnecessary copies if (indicesOptions_ == INDICES_CPU) { HostTensor<idx_t, 2, true> hostOutIndices(outIndices, stream); ivfOffsetToUserIndex( hostOutIndices.data(), numLists_, hostOutIndices.getSize(0), hostOutIndices.getSize(1), listOffsetToUserIndex_); // Copy back to GPU, since the input to this function is on the // GPU outIndices.copyFrom(hostOutIndices, stream); } } } // namespace gpu } // namespace faiss
3cdbe39d95e79cc8eb478a74ae18ebb88db987ee.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "cu_setAll.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *A = NULL; hipMalloc(&A, XSIZE*YSIZE); const float val = 1; const int n = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( cu_setAll), dim3(gridBlock),dim3(threadBlock), 0, 0, A,val,n); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( cu_setAll), dim3(gridBlock),dim3(threadBlock), 0, 0, A,val,n); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( cu_setAll), dim3(gridBlock),dim3(threadBlock), 0, 0, A,val,n); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
3cdbe39d95e79cc8eb478a74ae18ebb88db987ee.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "cu_setAll.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *A = NULL; cudaMalloc(&A, XSIZE*YSIZE); const float val = 1; const int n = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); cu_setAll<<<gridBlock,threadBlock>>>(A,val,n); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { cu_setAll<<<gridBlock,threadBlock>>>(A,val,n); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { cu_setAll<<<gridBlock,threadBlock>>>(A,val,n); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}