hip_filename
stringlengths
5
84
hip_content
stringlengths
79
9.69M
cuda_filename
stringlengths
4
83
cuda_content
stringlengths
19
9.69M
49038b15eabae7c7629f2e8a1e06352fcd9502ec.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright 1993-2014 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ #include <stdint.h> #include <tdp/cuda/cuda.h> #include <assert.h> #include <tdp/nvidia/helper_cuda.h> #include <tdp/preproc/convolutionSeparable.h> //////////////////////////////////////////////////////////////////////////////// // Convolution kernel storage //////////////////////////////////////////////////////////////////////////////// __constant__ float c_Kernel[KERNEL_LENGTH]; void setConvolutionKernel(float *h_Kernel) { hipMemcpyToSymbol(c_Kernel, h_Kernel, KERNEL_LENGTH * sizeof(float)); } //////////////////////////////////////////////////////////////////////////////// // Row convolution filter //////////////////////////////////////////////////////////////////////////////// #define ROWS_BLOCKDIM_X 16 #define ROWS_BLOCKDIM_Y 4 #define ROWS_RESULT_STEPS 8 #define ROWS_HALO_STEPS 1 __global__ void convolutionRowsKernel( float *d_Dst, float *d_Src, int imageW, int imageH, int pitch ) { __shared__ float s_Data[ROWS_BLOCKDIM_Y][(ROWS_RESULT_STEPS + 2 * ROWS_HALO_STEPS) * ROWS_BLOCKDIM_X]; //Offset to the left halo edge const int baseX = (blockIdx.x * ROWS_RESULT_STEPS - ROWS_HALO_STEPS) * ROWS_BLOCKDIM_X + threadIdx.x; const int baseY = blockIdx.y * ROWS_BLOCKDIM_Y + threadIdx.y; d_Src += baseY * pitch + baseX; d_Dst += baseY * pitch + baseX; //Load main data #pragma unroll for (int i = ROWS_HALO_STEPS; i < ROWS_HALO_STEPS + ROWS_RESULT_STEPS; i++) { s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X] = d_Src[i * ROWS_BLOCKDIM_X]; } //Load left halo #pragma unroll for (int i = 0; i < ROWS_HALO_STEPS; i++) { s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X] = (baseX >= -i * ROWS_BLOCKDIM_X) ? d_Src[i * ROWS_BLOCKDIM_X] : 0; } //Load right halo #pragma unroll for (int i = ROWS_HALO_STEPS + ROWS_RESULT_STEPS; i < ROWS_HALO_STEPS + ROWS_RESULT_STEPS + ROWS_HALO_STEPS; i++) { s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X] = (imageW - baseX > i * ROWS_BLOCKDIM_X) ? d_Src[i * ROWS_BLOCKDIM_X] : 0; } //Compute and store results __syncthreads(); #pragma unroll for (int i = ROWS_HALO_STEPS; i < ROWS_HALO_STEPS + ROWS_RESULT_STEPS; i++) { float sum = 0; #pragma unroll for (int j = -KERNEL_RADIUS; j <= KERNEL_RADIUS; j++) { sum += c_Kernel[KERNEL_RADIUS - j] * s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X + j]; } d_Dst[i * ROWS_BLOCKDIM_X] = sum; } } void convolutionRowsGPU( float *d_Dst, float *d_Src, int imageW, int imageH ) { assert(ROWS_BLOCKDIM_X * ROWS_HALO_STEPS >= KERNEL_RADIUS); assert(imageW % (ROWS_RESULT_STEPS * ROWS_BLOCKDIM_X) == 0); assert(imageH % ROWS_BLOCKDIM_Y == 0); dim3 blocks(imageW / (ROWS_RESULT_STEPS * ROWS_BLOCKDIM_X), imageH / ROWS_BLOCKDIM_Y); dim3 threads(ROWS_BLOCKDIM_X, ROWS_BLOCKDIM_Y); hipLaunchKernelGGL(( convolutionRowsKernel), dim3(blocks), dim3(threads), 0, 0, d_Dst, d_Src, imageW, imageH, imageW ); //getLastCudaError("convolutionRowsKernel() execution failed\n"); } //////////////////////////////////////////////////////////////////////////////// // Column convolution filter //////////////////////////////////////////////////////////////////////////////// #define COLUMNS_BLOCKDIM_X 16 #define COLUMNS_BLOCKDIM_Y 8 #define COLUMNS_RESULT_STEPS 8 #define COLUMNS_HALO_STEPS 1 __global__ void convolutionColumnsKernel( float *d_Dst, float *d_Src, int imageW, int imageH, int pitch ) { __shared__ float s_Data[COLUMNS_BLOCKDIM_X][(COLUMNS_RESULT_STEPS + 2 * COLUMNS_HALO_STEPS) * COLUMNS_BLOCKDIM_Y + 1]; //Offset to the upper halo edge const int baseX = blockIdx.x * COLUMNS_BLOCKDIM_X + threadIdx.x; const int baseY = (blockIdx.y * COLUMNS_RESULT_STEPS - COLUMNS_HALO_STEPS) * COLUMNS_BLOCKDIM_Y + threadIdx.y; d_Src += baseY * pitch + baseX; d_Dst += baseY * pitch + baseX; //Main data #pragma unroll for (int i = COLUMNS_HALO_STEPS; i < COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS; i++) { s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y] = d_Src[i * COLUMNS_BLOCKDIM_Y * pitch]; } //Upper halo #pragma unroll for (int i = 0; i < COLUMNS_HALO_STEPS; i++) { s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y] = (baseY >= -i * COLUMNS_BLOCKDIM_Y) ? d_Src[i * COLUMNS_BLOCKDIM_Y * pitch] : 0; } //Lower halo #pragma unroll for (int i = COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS; i < COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS + COLUMNS_HALO_STEPS; i++) { s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y]= (imageH - baseY > i * COLUMNS_BLOCKDIM_Y) ? d_Src[i * COLUMNS_BLOCKDIM_Y * pitch] : 0; } //Compute and store results __syncthreads(); #pragma unroll for (int i = COLUMNS_HALO_STEPS; i < COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS; i++) { float sum = 0; #pragma unroll for (int j = -KERNEL_RADIUS; j <= KERNEL_RADIUS; j++) { sum += c_Kernel[KERNEL_RADIUS - j] * s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y + j]; } d_Dst[i * COLUMNS_BLOCKDIM_Y * pitch] = sum; } } void convolutionColumnsGPU( float *d_Dst, float *d_Src, int imageW, int imageH ) { assert(COLUMNS_BLOCKDIM_Y * COLUMNS_HALO_STEPS >= KERNEL_RADIUS); assert(imageW % COLUMNS_BLOCKDIM_X == 0); assert(imageH % (COLUMNS_RESULT_STEPS * COLUMNS_BLOCKDIM_Y) == 0); dim3 blocks(imageW / COLUMNS_BLOCKDIM_X, imageH / (COLUMNS_RESULT_STEPS * COLUMNS_BLOCKDIM_Y)); dim3 threads(COLUMNS_BLOCKDIM_X, COLUMNS_BLOCKDIM_Y); hipLaunchKernelGGL(( convolutionColumnsKernel), dim3(blocks), dim3(threads), 0, 0, d_Dst, d_Src, imageW, imageH, imageW ); //getLastCudaError("convolutionColumnsKernel() execution failed\n"); }
49038b15eabae7c7629f2e8a1e06352fcd9502ec.cu
/* * Copyright 1993-2014 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ #include <stdint.h> #include <tdp/cuda/cuda.h> #include <assert.h> #include <tdp/nvidia/helper_cuda.h> #include <tdp/preproc/convolutionSeparable.h> //////////////////////////////////////////////////////////////////////////////// // Convolution kernel storage //////////////////////////////////////////////////////////////////////////////// __constant__ float c_Kernel[KERNEL_LENGTH]; void setConvolutionKernel(float *h_Kernel) { cudaMemcpyToSymbol(c_Kernel, h_Kernel, KERNEL_LENGTH * sizeof(float)); } //////////////////////////////////////////////////////////////////////////////// // Row convolution filter //////////////////////////////////////////////////////////////////////////////// #define ROWS_BLOCKDIM_X 16 #define ROWS_BLOCKDIM_Y 4 #define ROWS_RESULT_STEPS 8 #define ROWS_HALO_STEPS 1 __global__ void convolutionRowsKernel( float *d_Dst, float *d_Src, int imageW, int imageH, int pitch ) { __shared__ float s_Data[ROWS_BLOCKDIM_Y][(ROWS_RESULT_STEPS + 2 * ROWS_HALO_STEPS) * ROWS_BLOCKDIM_X]; //Offset to the left halo edge const int baseX = (blockIdx.x * ROWS_RESULT_STEPS - ROWS_HALO_STEPS) * ROWS_BLOCKDIM_X + threadIdx.x; const int baseY = blockIdx.y * ROWS_BLOCKDIM_Y + threadIdx.y; d_Src += baseY * pitch + baseX; d_Dst += baseY * pitch + baseX; //Load main data #pragma unroll for (int i = ROWS_HALO_STEPS; i < ROWS_HALO_STEPS + ROWS_RESULT_STEPS; i++) { s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X] = d_Src[i * ROWS_BLOCKDIM_X]; } //Load left halo #pragma unroll for (int i = 0; i < ROWS_HALO_STEPS; i++) { s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X] = (baseX >= -i * ROWS_BLOCKDIM_X) ? d_Src[i * ROWS_BLOCKDIM_X] : 0; } //Load right halo #pragma unroll for (int i = ROWS_HALO_STEPS + ROWS_RESULT_STEPS; i < ROWS_HALO_STEPS + ROWS_RESULT_STEPS + ROWS_HALO_STEPS; i++) { s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X] = (imageW - baseX > i * ROWS_BLOCKDIM_X) ? d_Src[i * ROWS_BLOCKDIM_X] : 0; } //Compute and store results __syncthreads(); #pragma unroll for (int i = ROWS_HALO_STEPS; i < ROWS_HALO_STEPS + ROWS_RESULT_STEPS; i++) { float sum = 0; #pragma unroll for (int j = -KERNEL_RADIUS; j <= KERNEL_RADIUS; j++) { sum += c_Kernel[KERNEL_RADIUS - j] * s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X + j]; } d_Dst[i * ROWS_BLOCKDIM_X] = sum; } } void convolutionRowsGPU( float *d_Dst, float *d_Src, int imageW, int imageH ) { assert(ROWS_BLOCKDIM_X * ROWS_HALO_STEPS >= KERNEL_RADIUS); assert(imageW % (ROWS_RESULT_STEPS * ROWS_BLOCKDIM_X) == 0); assert(imageH % ROWS_BLOCKDIM_Y == 0); dim3 blocks(imageW / (ROWS_RESULT_STEPS * ROWS_BLOCKDIM_X), imageH / ROWS_BLOCKDIM_Y); dim3 threads(ROWS_BLOCKDIM_X, ROWS_BLOCKDIM_Y); convolutionRowsKernel<<<blocks, threads>>>( d_Dst, d_Src, imageW, imageH, imageW ); //getLastCudaError("convolutionRowsKernel() execution failed\n"); } //////////////////////////////////////////////////////////////////////////////// // Column convolution filter //////////////////////////////////////////////////////////////////////////////// #define COLUMNS_BLOCKDIM_X 16 #define COLUMNS_BLOCKDIM_Y 8 #define COLUMNS_RESULT_STEPS 8 #define COLUMNS_HALO_STEPS 1 __global__ void convolutionColumnsKernel( float *d_Dst, float *d_Src, int imageW, int imageH, int pitch ) { __shared__ float s_Data[COLUMNS_BLOCKDIM_X][(COLUMNS_RESULT_STEPS + 2 * COLUMNS_HALO_STEPS) * COLUMNS_BLOCKDIM_Y + 1]; //Offset to the upper halo edge const int baseX = blockIdx.x * COLUMNS_BLOCKDIM_X + threadIdx.x; const int baseY = (blockIdx.y * COLUMNS_RESULT_STEPS - COLUMNS_HALO_STEPS) * COLUMNS_BLOCKDIM_Y + threadIdx.y; d_Src += baseY * pitch + baseX; d_Dst += baseY * pitch + baseX; //Main data #pragma unroll for (int i = COLUMNS_HALO_STEPS; i < COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS; i++) { s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y] = d_Src[i * COLUMNS_BLOCKDIM_Y * pitch]; } //Upper halo #pragma unroll for (int i = 0; i < COLUMNS_HALO_STEPS; i++) { s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y] = (baseY >= -i * COLUMNS_BLOCKDIM_Y) ? d_Src[i * COLUMNS_BLOCKDIM_Y * pitch] : 0; } //Lower halo #pragma unroll for (int i = COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS; i < COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS + COLUMNS_HALO_STEPS; i++) { s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y]= (imageH - baseY > i * COLUMNS_BLOCKDIM_Y) ? d_Src[i * COLUMNS_BLOCKDIM_Y * pitch] : 0; } //Compute and store results __syncthreads(); #pragma unroll for (int i = COLUMNS_HALO_STEPS; i < COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS; i++) { float sum = 0; #pragma unroll for (int j = -KERNEL_RADIUS; j <= KERNEL_RADIUS; j++) { sum += c_Kernel[KERNEL_RADIUS - j] * s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y + j]; } d_Dst[i * COLUMNS_BLOCKDIM_Y * pitch] = sum; } } void convolutionColumnsGPU( float *d_Dst, float *d_Src, int imageW, int imageH ) { assert(COLUMNS_BLOCKDIM_Y * COLUMNS_HALO_STEPS >= KERNEL_RADIUS); assert(imageW % COLUMNS_BLOCKDIM_X == 0); assert(imageH % (COLUMNS_RESULT_STEPS * COLUMNS_BLOCKDIM_Y) == 0); dim3 blocks(imageW / COLUMNS_BLOCKDIM_X, imageH / (COLUMNS_RESULT_STEPS * COLUMNS_BLOCKDIM_Y)); dim3 threads(COLUMNS_BLOCKDIM_X, COLUMNS_BLOCKDIM_Y); convolutionColumnsKernel<<<blocks, threads>>>( d_Dst, d_Src, imageW, imageH, imageW ); //getLastCudaError("convolutionColumnsKernel() execution failed\n"); }
3f840527d3a1f88bb97513ac79da57359dcd2b92.hip
// !!! This is a file automatically generated by hipify!!! /******************************************************************************* * FILE: debug.cu * PURPOSE: Contain functions used to keep track of CUDA functions errors. * NAME: Vuong Pham-Duy * Faculty of Computer Science and Technology * Ho Chi Minh University of Technology, Viet Nam * [email protected] * DATE: 11/10/2016 *******************************************************************************/ #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> /****************************************************************************** * Macro: gpuErrchk * Purpose: a simple wrap macro used to detect gpu errors. ******************************************************************************/ #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } void gpuAssert(hipError_t code, const char *file, int line, bool abort = true) { if (code != hipSuccess) { fprintf(stderr, "GPUassert: %s %s %d\n", hipGetErrorString(code), file, line); if (abort) exit(code); } } /****************************************************************************** * Function: write_debug_file * Purpose: These function write an array of unsigned char or short int to a file * whose its absolute path is defined by DEBUGFILE. "debug.txt" will show us some * infomation about the errors that happened. ******************************************************************************/ int write_debug_file(char *outfilename, short int *image, int rows, int cols) { FILE *fp; errno_t err; /*************************************************************************** * Open the output image file for writing if a filename was given. If no * filename was provided, set fp to write to standard output. ***************************************************************************/ if (outfilename == NULL) fp = stdout; else{ if ((err = fopen_s(&fp, outfilename, "w")) != 0){ fprintf(stderr, "Error writing the file %s in write2file(): %d \n", outfilename, err); return(0); } } /*************************************************************************** * Write the image data to the file. ***************************************************************************/ int i, j; for (i = 0; i < rows; i++) { for (j = 0; j < cols; j++) fprintf(fp, "%10d|", image[i * cols + j]); fprintf(fp, "\n"); } printf("Finish writting %d lines and %d cols to debug.txt\n", i, j); if (fp != stdout) fclose(fp); return(1); } int write_debug_file(char *outfilename, unsigned char *image, int rows, int cols) { FILE *fp; errno_t err; /*************************************************************************** * Open the output image file for writing if a filename was given. If no * filename was provided, set fp to write to standard output. ***************************************************************************/ if (outfilename == NULL) fp = stdout; else{ if ((err = fopen_s(&fp, outfilename, "w")) != 0){ fprintf(stderr, "Error writing the file %s in write2file(): %d \n", outfilename, err); return(0); } } /*************************************************************************** * Write the image data to the file. ***************************************************************************/ int i, j; for (i = 0; i < rows; i++) { for (j = 0; j < cols; j++) fprintf(fp, "%3u|", image[i * cols + j]); fprintf(fp, "\n"); } printf("Finish writting %d lines to debug.txt\n", i); if (fp != stdout) fclose(fp); return(1); }
3f840527d3a1f88bb97513ac79da57359dcd2b92.cu
/******************************************************************************* * FILE: debug.cu * PURPOSE: Contain functions used to keep track of CUDA functions errors. * NAME: Vuong Pham-Duy * Faculty of Computer Science and Technology * Ho Chi Minh University of Technology, Viet Nam * [email protected] * DATE: 11/10/2016 *******************************************************************************/ #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> /****************************************************************************** * Macro: gpuErrchk * Purpose: a simple wrap macro used to detect gpu errors. ******************************************************************************/ #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } void gpuAssert(cudaError_t code, const char *file, int line, bool abort = true) { if (code != cudaSuccess) { fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } /****************************************************************************** * Function: write_debug_file * Purpose: These function write an array of unsigned char or short int to a file * whose its absolute path is defined by DEBUGFILE. "debug.txt" will show us some * infomation about the errors that happened. ******************************************************************************/ int write_debug_file(char *outfilename, short int *image, int rows, int cols) { FILE *fp; errno_t err; /*************************************************************************** * Open the output image file for writing if a filename was given. If no * filename was provided, set fp to write to standard output. ***************************************************************************/ if (outfilename == NULL) fp = stdout; else{ if ((err = fopen_s(&fp, outfilename, "w")) != 0){ fprintf(stderr, "Error writing the file %s in write2file(): %d \n", outfilename, err); return(0); } } /*************************************************************************** * Write the image data to the file. ***************************************************************************/ int i, j; for (i = 0; i < rows; i++) { for (j = 0; j < cols; j++) fprintf(fp, "%10d|", image[i * cols + j]); fprintf(fp, "\n"); } printf("Finish writting %d lines and %d cols to debug.txt\n", i, j); if (fp != stdout) fclose(fp); return(1); } int write_debug_file(char *outfilename, unsigned char *image, int rows, int cols) { FILE *fp; errno_t err; /*************************************************************************** * Open the output image file for writing if a filename was given. If no * filename was provided, set fp to write to standard output. ***************************************************************************/ if (outfilename == NULL) fp = stdout; else{ if ((err = fopen_s(&fp, outfilename, "w")) != 0){ fprintf(stderr, "Error writing the file %s in write2file(): %d \n", outfilename, err); return(0); } } /*************************************************************************** * Write the image data to the file. ***************************************************************************/ int i, j; for (i = 0; i < rows; i++) { for (j = 0; j < cols; j++) fprintf(fp, "%3u|", image[i * cols + j]); fprintf(fp, "\n"); } printf("Finish writting %d lines to debug.txt\n", i); if (fp != stdout) fclose(fp); return(1); }
97b119155e8d8ad72663bd0868f48e2aed981ea6.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2019-2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "scc_matrix.cuh" #include "weak_cc_hip.cuh" #include <thrust/sequence.h> #include <cstdint> #include <cugraph/algorithms.hpp> #include <cugraph/graph.hpp> #include <cugraph/utilities/error.hpp> #include <iostream> #include <type_traits> #include <utilities/graph_utils.cuh> #include "topology/topology.cuh" namespace cugraph { namespace detail { /** * @brief Compute connected components. * The weak version (for undirected graphs, only) was imported from cuML. * This implementation comes from [1] and solves component labeling problem in * parallel on CSR-indexes based upon the vertex degree and adjacency graph. * * [1] Hawick, K.A et al, 2010. "Parallel graph component labelling with GPUs and CUDA" * * The strong version (for directed or undirected graphs) is based on: * [2] Gilbert, J. et al, 2011. "Graph Algorithms in the Language of Linear Algebra" * * C = I | A | A^2 |...| A^k * where matrix multiplication is via semi-ring: * (combine, reduce) == (&, |) (bitwise ops) * Then: X = C & transpose(C); and finally, apply get_labels(X); * * * @tparam IndexT the numeric type of non-floating point elements * @tparam TPB_X the threads to use per block when configuring the kernel * @param graph input graph; assumed undirected for weakly CC [in] * @param connectivity_type CUGRAPH_WEAK or CUGRAPH_STRONG [in] * @param stream the cuda stream [in] */ template <typename VT, typename ET, typename WT, int TPB_X = 32> std::enable_if_t<std::is_signed<VT>::value> connected_components_impl( GraphCSRView<VT, ET, WT> const &graph, cugraph_cc_t connectivity_type, VT *labels, hipStream_t stream) { using ByteT = unsigned char; // minimum addressable unit CUGRAPH_EXPECTS(graph.offsets != nullptr, "Invalid input argument: graph.offsets is nullptr"); CUGRAPH_EXPECTS(graph.indices != nullptr, "Invalid input argument: graph.indices is nullptr"); VT nrows = graph.number_of_vertices; if (connectivity_type == cugraph_cc_t::CUGRAPH_WEAK) { MLCommon::Sparse::weak_cc_entry<VT, ET, TPB_X>(labels, graph.offsets, graph.indices, graph.number_of_edges, graph.number_of_vertices, stream); } else { SCC_Data<ByteT, VT> sccd(nrows, graph.offsets, graph.indices); auto num_iters = sccd.run_scc(labels); } } } // namespace detail template <typename VT, typename ET, typename WT> void connected_components(GraphCSRView<VT, ET, WT> const &graph, cugraph_cc_t connectivity_type, VT *labels) { hipStream_t stream{nullptr}; CUGRAPH_EXPECTS(labels != nullptr, "Invalid input argument: labels parameter is NULL"); return detail::connected_components_impl<VT, ET, WT>(graph, connectivity_type, labels, stream); } template void connected_components<int32_t, int32_t, float>( GraphCSRView<int32_t, int32_t, float> const &, cugraph_cc_t, int32_t *); template void connected_components<int64_t, int64_t, float>( GraphCSRView<int64_t, int64_t, float> const &, cugraph_cc_t, int64_t *); } // namespace cugraph
97b119155e8d8ad72663bd0868f48e2aed981ea6.cu
/* * Copyright (c) 2019-2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "scc_matrix.cuh" #include "weak_cc.cuh" #include <thrust/sequence.h> #include <cstdint> #include <cugraph/algorithms.hpp> #include <cugraph/graph.hpp> #include <cugraph/utilities/error.hpp> #include <iostream> #include <type_traits> #include <utilities/graph_utils.cuh> #include "topology/topology.cuh" namespace cugraph { namespace detail { /** * @brief Compute connected components. * The weak version (for undirected graphs, only) was imported from cuML. * This implementation comes from [1] and solves component labeling problem in * parallel on CSR-indexes based upon the vertex degree and adjacency graph. * * [1] Hawick, K.A et al, 2010. "Parallel graph component labelling with GPUs and CUDA" * * The strong version (for directed or undirected graphs) is based on: * [2] Gilbert, J. et al, 2011. "Graph Algorithms in the Language of Linear Algebra" * * C = I | A | A^2 |...| A^k * where matrix multiplication is via semi-ring: * (combine, reduce) == (&, |) (bitwise ops) * Then: X = C & transpose(C); and finally, apply get_labels(X); * * * @tparam IndexT the numeric type of non-floating point elements * @tparam TPB_X the threads to use per block when configuring the kernel * @param graph input graph; assumed undirected for weakly CC [in] * @param connectivity_type CUGRAPH_WEAK or CUGRAPH_STRONG [in] * @param stream the cuda stream [in] */ template <typename VT, typename ET, typename WT, int TPB_X = 32> std::enable_if_t<std::is_signed<VT>::value> connected_components_impl( GraphCSRView<VT, ET, WT> const &graph, cugraph_cc_t connectivity_type, VT *labels, cudaStream_t stream) { using ByteT = unsigned char; // minimum addressable unit CUGRAPH_EXPECTS(graph.offsets != nullptr, "Invalid input argument: graph.offsets is nullptr"); CUGRAPH_EXPECTS(graph.indices != nullptr, "Invalid input argument: graph.indices is nullptr"); VT nrows = graph.number_of_vertices; if (connectivity_type == cugraph_cc_t::CUGRAPH_WEAK) { MLCommon::Sparse::weak_cc_entry<VT, ET, TPB_X>(labels, graph.offsets, graph.indices, graph.number_of_edges, graph.number_of_vertices, stream); } else { SCC_Data<ByteT, VT> sccd(nrows, graph.offsets, graph.indices); auto num_iters = sccd.run_scc(labels); } } } // namespace detail template <typename VT, typename ET, typename WT> void connected_components(GraphCSRView<VT, ET, WT> const &graph, cugraph_cc_t connectivity_type, VT *labels) { cudaStream_t stream{nullptr}; CUGRAPH_EXPECTS(labels != nullptr, "Invalid input argument: labels parameter is NULL"); return detail::connected_components_impl<VT, ET, WT>(graph, connectivity_type, labels, stream); } template void connected_components<int32_t, int32_t, float>( GraphCSRView<int32_t, int32_t, float> const &, cugraph_cc_t, int32_t *); template void connected_components<int64_t, int64_t, float>( GraphCSRView<int64_t, int64_t, float> const &, cugraph_cc_t, int64_t *); } // namespace cugraph
082c8a40fd29f58db9c0b6795bba17b723cb10bd.hip
// !!! This is a file automatically generated by hipify!!! // includes CUDA Runtime #include <hip/hip_runtime.h> #include <cstdint> #include <cstdio> #include "cuda/ParallelHuffman/histogram.cuh" using uint8__t = uint8_t; __global__ void naiveHistogram(int input_data[], int output[], int N, int symbols_per_thread) { unsigned int i = blockDim.x * blockIdx.x + threadIdx.x; unsigned int j; if (i * symbols_per_thread < N) { // if there is a symbol to count, for (j = i * symbols_per_thread; j < (i + 1) * symbols_per_thread; j++) { if (j < N) { unsigned int item = input_data[j]; // Symbol to count atomicAdd(&output[item], 1); // update bin count by 1 } } } } // const static unsigned int WARP_SIZE = 32; #define MIN(a, b) ((a) < (b)) ? (a) : (b) template <typename T, typename Q> __global__ void p2013Histogram(T *input_data, Q *output, size_t N, int bins, int R) { extern __shared__ int Hs[/*(bins + 1) * R*/]; const unsigned int warpid = (int)(threadIdx.x / WARP_SIZE); const unsigned int lane = threadIdx.x % WARP_SIZE; const unsigned int warps_block = blockDim.x / WARP_SIZE; const unsigned int off_rep = (bins + 1) * (threadIdx.x % R); const unsigned int begin = (N / warps_block) * warpid + WARP_SIZE * blockIdx.x + lane; unsigned int end = (N / warps_block) * (warpid + 1); const unsigned int step = WARP_SIZE * gridDim.x; // final warp handles data outside of the warps_block partitions if (warpid >= warps_block - 1) end = N; for (unsigned int pos = threadIdx.x; pos < (bins + 1) * R; pos += blockDim.x) Hs[pos] = 0; __syncthreads(); for (unsigned int i = begin; i < end; i += step) { int d = input_data[i]; atomicAdd(&Hs[off_rep + d], 1); } __syncthreads(); for (unsigned int pos = threadIdx.x; pos < bins; pos += blockDim.x) { int sum = 0; for (int base = 0; base < (bins + 1) * R; base += bins + 1) { sum += Hs[base + pos]; } atomicAdd(output + pos, sum); } } template __global__ void p2013Histogram<uint8__t, unsigned int>( uint8__t *input_data, unsigned int *output, size_t N, int bins, int R); template __global__ void p2013Histogram<uint16_t, unsigned int>( uint16_t *input_data, unsigned int *output, size_t N, int bins, int R); template __global__ void p2013Histogram<uint32_t, unsigned int>( uint32_t *input_data, unsigned int *output, size_t N, int bins, int R);
082c8a40fd29f58db9c0b6795bba17b723cb10bd.cu
// includes CUDA Runtime #include <cuda_runtime.h> #include <cstdint> #include <cstdio> #include "cuda/ParallelHuffman/histogram.cuh" using uint8__t = uint8_t; __global__ void naiveHistogram(int input_data[], int output[], int N, int symbols_per_thread) { unsigned int i = blockDim.x * blockIdx.x + threadIdx.x; unsigned int j; if (i * symbols_per_thread < N) { // if there is a symbol to count, for (j = i * symbols_per_thread; j < (i + 1) * symbols_per_thread; j++) { if (j < N) { unsigned int item = input_data[j]; // Symbol to count atomicAdd(&output[item], 1); // update bin count by 1 } } } } // const static unsigned int WARP_SIZE = 32; #define MIN(a, b) ((a) < (b)) ? (a) : (b) template <typename T, typename Q> __global__ void p2013Histogram(T *input_data, Q *output, size_t N, int bins, int R) { extern __shared__ int Hs[/*(bins + 1) * R*/]; const unsigned int warpid = (int)(threadIdx.x / WARP_SIZE); const unsigned int lane = threadIdx.x % WARP_SIZE; const unsigned int warps_block = blockDim.x / WARP_SIZE; const unsigned int off_rep = (bins + 1) * (threadIdx.x % R); const unsigned int begin = (N / warps_block) * warpid + WARP_SIZE * blockIdx.x + lane; unsigned int end = (N / warps_block) * (warpid + 1); const unsigned int step = WARP_SIZE * gridDim.x; // final warp handles data outside of the warps_block partitions if (warpid >= warps_block - 1) end = N; for (unsigned int pos = threadIdx.x; pos < (bins + 1) * R; pos += blockDim.x) Hs[pos] = 0; __syncthreads(); for (unsigned int i = begin; i < end; i += step) { int d = input_data[i]; atomicAdd(&Hs[off_rep + d], 1); } __syncthreads(); for (unsigned int pos = threadIdx.x; pos < bins; pos += blockDim.x) { int sum = 0; for (int base = 0; base < (bins + 1) * R; base += bins + 1) { sum += Hs[base + pos]; } atomicAdd(output + pos, sum); } } template __global__ void p2013Histogram<uint8__t, unsigned int>( uint8__t *input_data, unsigned int *output, size_t N, int bins, int R); template __global__ void p2013Histogram<uint16_t, unsigned int>( uint16_t *input_data, unsigned int *output, size_t N, int bins, int R); template __global__ void p2013Histogram<uint32_t, unsigned int>( uint32_t *input_data, unsigned int *output, size_t N, int bins, int R);
77accb7ff656e046fb825bc7a1f4f70440e23ca6.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright 2008-2013 NVIDIA Corporation * Modifications Copyright 2019 Advanced Micro Devices, Inc. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <unittest/unittest.h> #include <thrust/sequence.h> #include <thrust/device_malloc_allocator.h> #include <vector> #include <list> #include <limits> #include <utility> template <class Vector> void TestVectorZeroSize(void) { Vector v; ASSERT_EQUAL(v.size(), 0); ASSERT_EQUAL((v.begin() == v.end()), true); } DECLARE_VECTOR_UNITTEST(TestVectorZeroSize); void TestVectorBool(void) { thrust::host_vector<bool> h(3); thrust::device_vector<bool> d(3); h[0] = true; h[1] = false; h[2] = true; d[0] = true; d[1] = false; d[2] = true; ASSERT_EQUAL(h[0], true); ASSERT_EQUAL(h[1], false); ASSERT_EQUAL(h[2], true); ASSERT_EQUAL(d[0], true); ASSERT_EQUAL(d[1], false); ASSERT_EQUAL(d[2], true); } DECLARE_UNITTEST(TestVectorBool); template <class Vector> void TestVectorFrontBack(void) { Vector v(3); v[0] = 0; v[1] = 1; v[2] = 2; ASSERT_EQUAL(v.front(), 0); ASSERT_EQUAL(v.back(), 2); } DECLARE_VECTOR_UNITTEST(TestVectorFrontBack); template <class Vector> void TestVectorData(void) { Vector v(3); v[0] = 0; v[1] = 1; v[2] = 2; ASSERT_EQUAL(0, *v.data()); ASSERT_EQUAL(1, *(v.data() + 1)); ASSERT_EQUAL(2, *(v.data() + 2)); ASSERT_EQUAL(&v.front(), v.data()); ASSERT_EQUAL(&*v.begin(), v.data()); ASSERT_EQUAL(&v[0], v.data()); const Vector &c_v = v; ASSERT_EQUAL(0, *c_v.data()); ASSERT_EQUAL(1, *(c_v.data() + 1)); ASSERT_EQUAL(2, *(c_v.data() + 2)); ASSERT_EQUAL(&c_v.front(), c_v.data()); ASSERT_EQUAL(&*c_v.begin(), c_v.data()); ASSERT_EQUAL(&c_v[0], c_v.data()); } DECLARE_VECTOR_UNITTEST(TestVectorData); template <class Vector> void TestVectorElementAssignment(void) { Vector v(3); v[0] = 0; v[1] = 1; v[2] = 2; ASSERT_EQUAL(v[0], 0); ASSERT_EQUAL(v[1], 1); ASSERT_EQUAL(v[2], 2); v[0] = 10; v[1] = 11; v[2] = 12; ASSERT_EQUAL(v[0], 10); ASSERT_EQUAL(v[1], 11); ASSERT_EQUAL(v[2], 12); Vector w(3); w[0] = v[0]; w[1] = v[1]; w[2] = v[2]; ASSERT_EQUAL(v, w); } DECLARE_VECTOR_UNITTEST(TestVectorElementAssignment); template <class Vector> void TestVectorFromSTLVector(void) { typedef typename Vector::value_type T; std::vector<T> stl_vector(3); stl_vector[0] = 0; stl_vector[1] = 1; stl_vector[2] = 2; thrust::host_vector<T> v(stl_vector); ASSERT_EQUAL(v.size(), 3); ASSERT_EQUAL(v[0], 0); ASSERT_EQUAL(v[1], 1); ASSERT_EQUAL(v[2], 2); v = stl_vector; ASSERT_EQUAL(v.size(), 3); ASSERT_EQUAL(v[0], 0); ASSERT_EQUAL(v[1], 1); ASSERT_EQUAL(v[2], 2); } DECLARE_VECTOR_UNITTEST(TestVectorFromSTLVector); template <class Vector> void TestVectorFillAssign(void) { typedef typename Vector::value_type T; thrust::host_vector<T> v; v.assign(3, 13); ASSERT_EQUAL(v.size(), 3); ASSERT_EQUAL(v[0], 13); ASSERT_EQUAL(v[1], 13); ASSERT_EQUAL(v[2], 13); } DECLARE_VECTOR_UNITTEST(TestVectorFillAssign); template <class Vector> void TestVectorAssignFromSTLVector(void) { typedef typename Vector::value_type T; std::vector<T> stl_vector(3); stl_vector[0] = 0; stl_vector[1] = 1; stl_vector[2] = 2; thrust::host_vector<T> v; v.assign(stl_vector.begin(), stl_vector.end()); ASSERT_EQUAL(v.size(), 3); ASSERT_EQUAL(v[0], 0); ASSERT_EQUAL(v[1], 1); ASSERT_EQUAL(v[2], 2); } DECLARE_VECTOR_UNITTEST(TestVectorAssignFromSTLVector); template <class Vector> void TestVectorFromBiDirectionalIterator(void) { typedef typename Vector::value_type T; std::list<T> stl_list; stl_list.push_back(0); stl_list.push_back(1); stl_list.push_back(2); thrust::host_vector<int> v(stl_list.begin(), stl_list.end()); ASSERT_EQUAL(v.size(), 3); ASSERT_EQUAL(v[0], 0); ASSERT_EQUAL(v[1], 1); ASSERT_EQUAL(v[2], 2); } DECLARE_VECTOR_UNITTEST(TestVectorFromBiDirectionalIterator); template <class Vector> void TestVectorAssignFromBiDirectionalIterator(void) { typedef typename Vector::value_type T; std::list<T> stl_list; stl_list.push_back(0); stl_list.push_back(1); stl_list.push_back(2); Vector v; v.assign(stl_list.begin(), stl_list.end()); ASSERT_EQUAL(v.size(), 3); ASSERT_EQUAL(v[0], 0); ASSERT_EQUAL(v[1], 1); ASSERT_EQUAL(v[2], 2); } DECLARE_VECTOR_UNITTEST(TestVectorAssignFromBiDirectionalIterator); template <class Vector> void TestVectorAssignFromHostVector(void) { typedef typename Vector::value_type T; thrust::host_vector<T> h(3); h[0] = 0; h[1] = 1; h[2] = 2; Vector v; v.assign(h.begin(), h.end()); ASSERT_EQUAL(v, h); } DECLARE_VECTOR_UNITTEST(TestVectorAssignFromHostVector); template <class Vector> void TestVectorToAndFromHostVector(void) { typedef typename Vector::value_type T; thrust::host_vector<T> h(3); h[0] = 0; h[1] = 1; h[2] = 2; Vector v(h); ASSERT_EQUAL(v, h); v[0] = 10; v[1] = 11; v[2] = 12; ASSERT_EQUAL(h[0], 0); ASSERT_EQUAL(v[0], 10); ASSERT_EQUAL(h[1], 1); ASSERT_EQUAL(v[1], 11); ASSERT_EQUAL(h[2], 2); ASSERT_EQUAL(v[2], 12); h = v; ASSERT_EQUAL(v, h); h[1] = 11; v = h; ASSERT_EQUAL(v, h); } DECLARE_VECTOR_UNITTEST(TestVectorToAndFromHostVector); template <class Vector> void TestVectorAssignFromDeviceVector(void) { typedef typename Vector::value_type T; thrust::device_vector<T> d(3); d[0] = 0; d[1] = 1; d[2] = 2; Vector v; v.assign(d.begin(), d.end()); ASSERT_EQUAL(v, d); } DECLARE_VECTOR_UNITTEST(TestVectorAssignFromDeviceVector); template <class Vector> void TestVectorToAndFromDeviceVector(void) { typedef typename Vector::value_type T; thrust::device_vector<T> h(3); h[0] = 0; h[1] = 1; h[2] = 2; Vector v(h); ASSERT_EQUAL(v, h); v[0] = 10; v[1] = 11; v[2] = 12; ASSERT_EQUAL(h[0], 0); ASSERT_EQUAL(v[0], 10); ASSERT_EQUAL(h[1], 1); ASSERT_EQUAL(v[1], 11); ASSERT_EQUAL(h[2], 2); ASSERT_EQUAL(v[2], 12); h = v; ASSERT_EQUAL(v, h); h[1] = 11; v = h; ASSERT_EQUAL(v, h); } DECLARE_VECTOR_UNITTEST(TestVectorToAndFromDeviceVector); template <class Vector> void TestVectorWithInitialValue(void) { typedef typename Vector::value_type T; const T init = 17; Vector v(3, init); ASSERT_EQUAL(v.size(), 3); ASSERT_EQUAL(v[0], init); ASSERT_EQUAL(v[1], init); ASSERT_EQUAL(v[2], init); } DECLARE_VECTOR_UNITTEST(TestVectorWithInitialValue); template <class Vector> void TestVectorSwap(void) { Vector v(3); v[0] = 0; v[1] = 1; v[2] = 2; Vector u(3); u[0] = 10; u[1] = 11; u[2] = 12; v.swap(u); ASSERT_EQUAL(v[0], 10); ASSERT_EQUAL(u[0], 0); ASSERT_EQUAL(v[1], 11); ASSERT_EQUAL(u[1], 1); ASSERT_EQUAL(v[2], 12); ASSERT_EQUAL(u[2], 2); } DECLARE_VECTOR_UNITTEST(TestVectorSwap); template <class Vector> void TestVectorErasePosition(void) { Vector v(5); v[0] = 0; v[1] = 1; v[2] = 2; v[3] = 3; v[4] = 4; v.erase(v.begin() + 2); ASSERT_EQUAL(v.size(), 4); ASSERT_EQUAL(v[0], 0); ASSERT_EQUAL(v[1], 1); ASSERT_EQUAL(v[2], 3); ASSERT_EQUAL(v[3], 4); v.erase(v.begin() + 0); ASSERT_EQUAL(v.size(), 3); ASSERT_EQUAL(v[0], 1); ASSERT_EQUAL(v[1], 3); ASSERT_EQUAL(v[2], 4); v.erase(v.begin() + 2); ASSERT_EQUAL(v.size(), 2); ASSERT_EQUAL(v[0], 1); ASSERT_EQUAL(v[1], 3); v.erase(v.begin() + 1); ASSERT_EQUAL(v.size(), 1); ASSERT_EQUAL(v[0], 1); v.erase(v.begin() + 0); ASSERT_EQUAL(v.size(), 0); } DECLARE_VECTOR_UNITTEST(TestVectorErasePosition); template <class Vector> void TestVectorEraseRange(void) { Vector v(6); v[0] = 0; v[1] = 1; v[2] = 2; v[3] = 3; v[4] = 4; v[5] = 5; v.erase(v.begin() + 1, v.begin() + 3); ASSERT_EQUAL(v.size(), 4); ASSERT_EQUAL(v[0], 0); ASSERT_EQUAL(v[1], 3); ASSERT_EQUAL(v[2], 4); ASSERT_EQUAL(v[3], 5); v.erase(v.begin() + 2, v.end()); ASSERT_EQUAL(v.size(), 2); ASSERT_EQUAL(v[0], 0); ASSERT_EQUAL(v[1], 3); v.erase(v.begin() + 0, v.begin() + 1); ASSERT_EQUAL(v.size(), 1); ASSERT_EQUAL(v[0], 3); v.erase(v.begin(), v.end()); ASSERT_EQUAL(v.size(), 0); } DECLARE_VECTOR_UNITTEST(TestVectorEraseRange); void TestVectorEquality(void) { thrust::host_vector<int> h_a(3); thrust::host_vector<int> h_b(3); thrust::host_vector<int> h_c(3); h_a[0] = 0; h_a[1] = 1; h_a[2] = 2; h_b[0] = 0; h_b[1] = 1; h_b[2] = 3; h_b[0] = 0; h_b[1] = 1; thrust::device_vector<int> d_a(3); thrust::device_vector<int> d_b(3); thrust::device_vector<int> d_c(3); d_a[0] = 0; d_a[1] = 1; d_a[2] = 2; d_b[0] = 0; d_b[1] = 1; d_b[2] = 3; d_b[0] = 0; d_b[1] = 1; std::vector<int> s_a(3); std::vector<int> s_b(3); std::vector<int> s_c(3); s_a[0] = 0; s_a[1] = 1; s_a[2] = 2; s_b[0] = 0; s_b[1] = 1; s_b[2] = 3; s_b[0] = 0; s_b[1] = 1; ASSERT_EQUAL((h_a == h_a), true); ASSERT_EQUAL((h_a == d_a), true); ASSERT_EQUAL((d_a == h_a), true); ASSERT_EQUAL((d_a == d_a), true); ASSERT_EQUAL((h_b == h_b), true); ASSERT_EQUAL((h_b == d_b), true); ASSERT_EQUAL((d_b == h_b), true); ASSERT_EQUAL((d_b == d_b), true); ASSERT_EQUAL((h_c == h_c), true); ASSERT_EQUAL((h_c == d_c), true); ASSERT_EQUAL((d_c == h_c), true); ASSERT_EQUAL((d_c == d_c), true); // test vector vs device_vector ASSERT_EQUAL((s_a == d_a), true); ASSERT_EQUAL((d_a == s_a), true); ASSERT_EQUAL((s_b == d_b), true); ASSERT_EQUAL((d_b == s_b), true); ASSERT_EQUAL((s_c == d_c), true); ASSERT_EQUAL((d_c == s_c), true); // test vector vs host_vector ASSERT_EQUAL((s_a == h_a), true); ASSERT_EQUAL((h_a == s_a), true); ASSERT_EQUAL((s_b == h_b), true); ASSERT_EQUAL((h_b == s_b), true); ASSERT_EQUAL((s_c == h_c), true); ASSERT_EQUAL((h_c == s_c), true); ASSERT_EQUAL((h_a == h_b), false); ASSERT_EQUAL((h_a == d_b), false); ASSERT_EQUAL((d_a == h_b), false); ASSERT_EQUAL((d_a == d_b), false); ASSERT_EQUAL((h_b == h_a), false); ASSERT_EQUAL((h_b == d_a), false); ASSERT_EQUAL((d_b == h_a), false); ASSERT_EQUAL((d_b == d_a), false); ASSERT_EQUAL((h_a == h_c), false); ASSERT_EQUAL((h_a == d_c), false); ASSERT_EQUAL((d_a == h_c), false); ASSERT_EQUAL((d_a == d_c), false); ASSERT_EQUAL((h_c == h_a), false); ASSERT_EQUAL((h_c == d_a), false); ASSERT_EQUAL((d_c == h_a), false); ASSERT_EQUAL((d_c == d_a), false); ASSERT_EQUAL((h_b == h_c), false); ASSERT_EQUAL((h_b == d_c), false); ASSERT_EQUAL((d_b == h_c), false); ASSERT_EQUAL((d_b == d_c), false); ASSERT_EQUAL((h_c == h_b), false); ASSERT_EQUAL((h_c == d_b), false); ASSERT_EQUAL((d_c == h_b), false); ASSERT_EQUAL((d_c == d_b), false); // test vector vs device_vector ASSERT_EQUAL((s_a == d_b), false); ASSERT_EQUAL((d_a == s_b), false); ASSERT_EQUAL((s_b == d_a), false); ASSERT_EQUAL((d_b == s_a), false); ASSERT_EQUAL((s_a == d_c), false); ASSERT_EQUAL((d_a == s_c), false); ASSERT_EQUAL((s_c == d_a), false); ASSERT_EQUAL((d_c == s_a), false); ASSERT_EQUAL((s_b == d_c), false); ASSERT_EQUAL((d_b == s_c), false); ASSERT_EQUAL((s_c == d_b), false); ASSERT_EQUAL((d_c == s_b), false); // test vector vs host_vector ASSERT_EQUAL((s_a == h_b), false); ASSERT_EQUAL((h_a == s_b), false); ASSERT_EQUAL((s_b == h_a), false); ASSERT_EQUAL((h_b == s_a), false); ASSERT_EQUAL((s_a == h_c), false); ASSERT_EQUAL((h_a == s_c), false); ASSERT_EQUAL((s_c == h_a), false); ASSERT_EQUAL((h_c == s_a), false); ASSERT_EQUAL((s_b == h_c), false); ASSERT_EQUAL((h_b == s_c), false); ASSERT_EQUAL((s_c == h_b), false); ASSERT_EQUAL((h_c == s_b), false); } DECLARE_UNITTEST(TestVectorEquality); void TestVectorInequality(void) { thrust::host_vector<int> h_a(3); thrust::host_vector<int> h_b(3); thrust::host_vector<int> h_c(3); h_a[0] = 0; h_a[1] = 1; h_a[2] = 2; h_b[0] = 0; h_b[1] = 1; h_b[2] = 3; h_b[0] = 0; h_b[1] = 1; thrust::device_vector<int> d_a(3); thrust::device_vector<int> d_b(3); thrust::device_vector<int> d_c(3); d_a[0] = 0; d_a[1] = 1; d_a[2] = 2; d_b[0] = 0; d_b[1] = 1; d_b[2] = 3; d_b[0] = 0; d_b[1] = 1; std::vector<int> s_a(3); std::vector<int> s_b(3); std::vector<int> s_c(3); s_a[0] = 0; s_a[1] = 1; s_a[2] = 2; s_b[0] = 0; s_b[1] = 1; s_b[2] = 3; s_b[0] = 0; s_b[1] = 1; ASSERT_EQUAL((h_a != h_a), false); ASSERT_EQUAL((h_a != d_a), false); ASSERT_EQUAL((d_a != h_a), false); ASSERT_EQUAL((d_a != d_a), false); ASSERT_EQUAL((h_b != h_b), false); ASSERT_EQUAL((h_b != d_b), false); ASSERT_EQUAL((d_b != h_b), false); ASSERT_EQUAL((d_b != d_b), false); ASSERT_EQUAL((h_c != h_c), false); ASSERT_EQUAL((h_c != d_c), false); ASSERT_EQUAL((d_c != h_c), false); ASSERT_EQUAL((d_c != d_c), false); // test vector vs device_vector ASSERT_EQUAL((s_a != d_a), false); ASSERT_EQUAL((d_a != s_a), false); ASSERT_EQUAL((s_b != d_b), false); ASSERT_EQUAL((d_b != s_b), false); ASSERT_EQUAL((s_c != d_c), false); ASSERT_EQUAL((d_c != s_c), false); // test vector vs host_vector ASSERT_EQUAL((s_a != h_a), false); ASSERT_EQUAL((h_a != s_a), false); ASSERT_EQUAL((s_b != h_b), false); ASSERT_EQUAL((h_b != s_b), false); ASSERT_EQUAL((s_c != h_c), false); ASSERT_EQUAL((h_c != s_c), false); ASSERT_EQUAL((h_a != h_b), true); ASSERT_EQUAL((h_a != d_b), true); ASSERT_EQUAL((d_a != h_b), true); ASSERT_EQUAL((d_a != d_b), true); ASSERT_EQUAL((h_b != h_a), true); ASSERT_EQUAL((h_b != d_a), true); ASSERT_EQUAL((d_b != h_a), true); ASSERT_EQUAL((d_b != d_a), true); ASSERT_EQUAL((h_a != h_c), true); ASSERT_EQUAL((h_a != d_c), true); ASSERT_EQUAL((d_a != h_c), true); ASSERT_EQUAL((d_a != d_c), true); ASSERT_EQUAL((h_c != h_a), true); ASSERT_EQUAL((h_c != d_a), true); ASSERT_EQUAL((d_c != h_a), true); ASSERT_EQUAL((d_c != d_a), true); ASSERT_EQUAL((h_b != h_c), true); ASSERT_EQUAL((h_b != d_c), true); ASSERT_EQUAL((d_b != h_c), true); ASSERT_EQUAL((d_b != d_c), true); ASSERT_EQUAL((h_c != h_b), true); ASSERT_EQUAL((h_c != d_b), true); ASSERT_EQUAL((d_c != h_b), true); ASSERT_EQUAL((d_c != d_b), true); // test vector vs device_vector ASSERT_EQUAL((s_a != d_b), true); ASSERT_EQUAL((d_a != s_b), true); ASSERT_EQUAL((s_b != d_a), true); ASSERT_EQUAL((d_b != s_a), true); ASSERT_EQUAL((s_a != d_c), true); ASSERT_EQUAL((d_a != s_c), true); ASSERT_EQUAL((s_c != d_a), true); ASSERT_EQUAL((d_c != s_a), true); ASSERT_EQUAL((s_b != d_c), true); ASSERT_EQUAL((d_b != s_c), true); ASSERT_EQUAL((s_c != d_b), true); ASSERT_EQUAL((d_c != s_b), true); // test vector vs host_vector ASSERT_EQUAL((s_a != h_b), true); ASSERT_EQUAL((h_a != s_b), true); ASSERT_EQUAL((s_b != h_a), true); ASSERT_EQUAL((h_b != s_a), true); ASSERT_EQUAL((s_a != h_c), true); ASSERT_EQUAL((h_a != s_c), true); ASSERT_EQUAL((s_c != h_a), true); ASSERT_EQUAL((h_c != s_a), true); ASSERT_EQUAL((s_b != h_c), true); ASSERT_EQUAL((h_b != s_c), true); ASSERT_EQUAL((s_c != h_b), true); ASSERT_EQUAL((h_c != s_b), true); } DECLARE_UNITTEST(TestVectorInequality); template <class Vector> void TestVectorResizing(void) { Vector v; v.resize(3); ASSERT_EQUAL(v.size(), 3); v[0] = 0; v[1] = 1; v[2] = 2; v.resize(5); ASSERT_EQUAL(v.size(), 5); ASSERT_EQUAL(v[0], 0); ASSERT_EQUAL(v[1], 1); ASSERT_EQUAL(v[2], 2); v[3] = 3; v[4] = 4; v.resize(4); ASSERT_EQUAL(v.size(), 4); ASSERT_EQUAL(v[0], 0); ASSERT_EQUAL(v[1], 1); ASSERT_EQUAL(v[2], 2); ASSERT_EQUAL(v[3], 3); v.resize(0); ASSERT_EQUAL(v.size(), 0); // TODO remove this WAR #if defined(__HIPCC__) && TORCH_HIP_VERSION==3000 // depending on sizeof(T), we will receive one // of two possible exceptions try { v.resize(std::numeric_limits<size_t>::max()); } catch(std::length_error e) {} catch(std::bad_alloc e) { // reset the CUDA error hipGetLastError(); } // end catch #endif // defined(__HIPCC__) && TORCH_HIP_VERSION==3000 ASSERT_EQUAL(v.size(), 0); } DECLARE_VECTOR_UNITTEST(TestVectorResizing); template <class Vector> void TestVectorReserving(void) { Vector v; v.reserve(3); ASSERT_GEQUAL(v.capacity(), 3); size_t old_capacity = v.capacity(); v.reserve(0); ASSERT_EQUAL(v.capacity(), old_capacity); // TODO remove this WAR #if defined(__HIPCC__) && TORCH_HIP_VERSION==3000 try { v.reserve(std::numeric_limits<size_t>::max()); } catch(std::length_error e) {} catch(std::bad_alloc e) {} #endif // defined(__HIPCC__) && TORCH_HIP_VERSION==3000 ASSERT_EQUAL(v.capacity(), old_capacity); } DECLARE_VECTOR_UNITTEST(TestVectorReserving) template <class Vector> void TestVectorShrinkToFit(void) { Vector v; v.reserve(200); ASSERT_GEQUAL(v.capacity(), 200); v.push_back(1); v.push_back(2); v.push_back(3); v.shrink_to_fit(); ASSERT_EQUAL(1, v[0]); ASSERT_EQUAL(2, v[1]); ASSERT_EQUAL(3, v[2]); ASSERT_EQUAL(3, v.size()); ASSERT_EQUAL(3, v.capacity()); } DECLARE_VECTOR_UNITTEST(TestVectorShrinkToFit) template <int N> struct LargeStruct { int data[N]; __host__ __device__ bool operator==(const LargeStruct & ls) const { for (int i = 0; i < N; i++) if (data[i] != ls.data[i]) return false; return true; } }; void TestVectorContainingLargeType(void) { // Thrust issue #5 // http://code.google.com/p/thrust/issues/detail?id=5 const static int N = 100; typedef LargeStruct<N> T; thrust::device_vector<T> dv1; thrust::host_vector<T> hv1; ASSERT_EQUAL_QUIET(dv1, hv1); thrust::device_vector<T> dv2(20); thrust::host_vector<T> hv2(20); ASSERT_EQUAL_QUIET(dv2, hv2); // initialize tofirst element to something nonzero T ls; for (int i = 0; i < N; i++) ls.data[i] = i; thrust::device_vector<T> dv3(20, ls); thrust::host_vector<T> hv3(20, ls); ASSERT_EQUAL_QUIET(dv3, hv3); // change first element ls.data[0] = -13; dv3[2] = ls; hv3[2] = ls; ASSERT_EQUAL_QUIET(dv3, hv3); } DECLARE_UNITTEST(TestVectorContainingLargeType); template <typename Vector> void TestVectorReversed(void) { Vector v(3); v[0] = 0; v[1] = 1; v[2] = 2; ASSERT_EQUAL(3, v.rend() - v.rbegin()); ASSERT_EQUAL(3, static_cast<const Vector&>(v).rend() - static_cast<const Vector&>(v).rbegin()); ASSERT_EQUAL(3, v.crend() - v.crbegin()); ASSERT_EQUAL(2, *v.rbegin()); ASSERT_EQUAL(2, *static_cast<const Vector&>(v).rbegin()); ASSERT_EQUAL(2, *v.crbegin()); ASSERT_EQUAL(1, *(v.rbegin() + 1)); ASSERT_EQUAL(0, *(v.rbegin() + 2)); ASSERT_EQUAL(0, *(v.rend() - 1)); ASSERT_EQUAL(1, *(v.rend() - 2)); } DECLARE_VECTOR_UNITTEST(TestVectorReversed); #if __cplusplus >= 201103L template <class Vector> void TestVectorMove(void) { //test move construction Vector v1(3); v1[0] = 0; v1[1] = 1; v1[2] = 2; const auto ptr1 = v1.data(); const auto size1 = v1.size(); Vector v2(std::move(v1)); const auto ptr2 = v2.data(); const auto size2 = v2.size(); // ensure v1 was left empty ASSERT_EQUAL(true, v1.empty()); // ensure v2 received the data from before ASSERT_EQUAL(v2[0], 0); ASSERT_EQUAL(v2[1], 1); ASSERT_EQUAL(v2[2], 2); ASSERT_EQUAL(size1, size2); // ensure v2 received the pointer from before ASSERT_EQUAL(ptr1, ptr2); //test move assignment Vector v3(3); v3[0] = 3; v3[1] = 4; v3[2] = 5; const auto ptr3 = v3.data(); const auto size3 = v3.size(); v2 = std::move(v3); const auto ptr4 = v2.data(); const auto size4 = v2.size(); // ensure v3 was left empty ASSERT_EQUAL(true, v3.empty()); // ensure v2 received the data from before ASSERT_EQUAL(v2[0], 3); ASSERT_EQUAL(v2[1], 4); ASSERT_EQUAL(v2[2], 5); ASSERT_EQUAL(size3, size4); // ensure v2 received the pointer from before ASSERT_EQUAL(ptr3, ptr4); } DECLARE_VECTOR_UNITTEST(TestVectorMove); #endif
77accb7ff656e046fb825bc7a1f4f70440e23ca6.cu
/* * Copyright 2008-2013 NVIDIA Corporation * Modifications Copyright© 2019 Advanced Micro Devices, Inc. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <unittest/unittest.h> #include <thrust/sequence.h> #include <thrust/device_malloc_allocator.h> #include <vector> #include <list> #include <limits> #include <utility> template <class Vector> void TestVectorZeroSize(void) { Vector v; ASSERT_EQUAL(v.size(), 0); ASSERT_EQUAL((v.begin() == v.end()), true); } DECLARE_VECTOR_UNITTEST(TestVectorZeroSize); void TestVectorBool(void) { thrust::host_vector<bool> h(3); thrust::device_vector<bool> d(3); h[0] = true; h[1] = false; h[2] = true; d[0] = true; d[1] = false; d[2] = true; ASSERT_EQUAL(h[0], true); ASSERT_EQUAL(h[1], false); ASSERT_EQUAL(h[2], true); ASSERT_EQUAL(d[0], true); ASSERT_EQUAL(d[1], false); ASSERT_EQUAL(d[2], true); } DECLARE_UNITTEST(TestVectorBool); template <class Vector> void TestVectorFrontBack(void) { Vector v(3); v[0] = 0; v[1] = 1; v[2] = 2; ASSERT_EQUAL(v.front(), 0); ASSERT_EQUAL(v.back(), 2); } DECLARE_VECTOR_UNITTEST(TestVectorFrontBack); template <class Vector> void TestVectorData(void) { Vector v(3); v[0] = 0; v[1] = 1; v[2] = 2; ASSERT_EQUAL(0, *v.data()); ASSERT_EQUAL(1, *(v.data() + 1)); ASSERT_EQUAL(2, *(v.data() + 2)); ASSERT_EQUAL(&v.front(), v.data()); ASSERT_EQUAL(&*v.begin(), v.data()); ASSERT_EQUAL(&v[0], v.data()); const Vector &c_v = v; ASSERT_EQUAL(0, *c_v.data()); ASSERT_EQUAL(1, *(c_v.data() + 1)); ASSERT_EQUAL(2, *(c_v.data() + 2)); ASSERT_EQUAL(&c_v.front(), c_v.data()); ASSERT_EQUAL(&*c_v.begin(), c_v.data()); ASSERT_EQUAL(&c_v[0], c_v.data()); } DECLARE_VECTOR_UNITTEST(TestVectorData); template <class Vector> void TestVectorElementAssignment(void) { Vector v(3); v[0] = 0; v[1] = 1; v[2] = 2; ASSERT_EQUAL(v[0], 0); ASSERT_EQUAL(v[1], 1); ASSERT_EQUAL(v[2], 2); v[0] = 10; v[1] = 11; v[2] = 12; ASSERT_EQUAL(v[0], 10); ASSERT_EQUAL(v[1], 11); ASSERT_EQUAL(v[2], 12); Vector w(3); w[0] = v[0]; w[1] = v[1]; w[2] = v[2]; ASSERT_EQUAL(v, w); } DECLARE_VECTOR_UNITTEST(TestVectorElementAssignment); template <class Vector> void TestVectorFromSTLVector(void) { typedef typename Vector::value_type T; std::vector<T> stl_vector(3); stl_vector[0] = 0; stl_vector[1] = 1; stl_vector[2] = 2; thrust::host_vector<T> v(stl_vector); ASSERT_EQUAL(v.size(), 3); ASSERT_EQUAL(v[0], 0); ASSERT_EQUAL(v[1], 1); ASSERT_EQUAL(v[2], 2); v = stl_vector; ASSERT_EQUAL(v.size(), 3); ASSERT_EQUAL(v[0], 0); ASSERT_EQUAL(v[1], 1); ASSERT_EQUAL(v[2], 2); } DECLARE_VECTOR_UNITTEST(TestVectorFromSTLVector); template <class Vector> void TestVectorFillAssign(void) { typedef typename Vector::value_type T; thrust::host_vector<T> v; v.assign(3, 13); ASSERT_EQUAL(v.size(), 3); ASSERT_EQUAL(v[0], 13); ASSERT_EQUAL(v[1], 13); ASSERT_EQUAL(v[2], 13); } DECLARE_VECTOR_UNITTEST(TestVectorFillAssign); template <class Vector> void TestVectorAssignFromSTLVector(void) { typedef typename Vector::value_type T; std::vector<T> stl_vector(3); stl_vector[0] = 0; stl_vector[1] = 1; stl_vector[2] = 2; thrust::host_vector<T> v; v.assign(stl_vector.begin(), stl_vector.end()); ASSERT_EQUAL(v.size(), 3); ASSERT_EQUAL(v[0], 0); ASSERT_EQUAL(v[1], 1); ASSERT_EQUAL(v[2], 2); } DECLARE_VECTOR_UNITTEST(TestVectorAssignFromSTLVector); template <class Vector> void TestVectorFromBiDirectionalIterator(void) { typedef typename Vector::value_type T; std::list<T> stl_list; stl_list.push_back(0); stl_list.push_back(1); stl_list.push_back(2); thrust::host_vector<int> v(stl_list.begin(), stl_list.end()); ASSERT_EQUAL(v.size(), 3); ASSERT_EQUAL(v[0], 0); ASSERT_EQUAL(v[1], 1); ASSERT_EQUAL(v[2], 2); } DECLARE_VECTOR_UNITTEST(TestVectorFromBiDirectionalIterator); template <class Vector> void TestVectorAssignFromBiDirectionalIterator(void) { typedef typename Vector::value_type T; std::list<T> stl_list; stl_list.push_back(0); stl_list.push_back(1); stl_list.push_back(2); Vector v; v.assign(stl_list.begin(), stl_list.end()); ASSERT_EQUAL(v.size(), 3); ASSERT_EQUAL(v[0], 0); ASSERT_EQUAL(v[1], 1); ASSERT_EQUAL(v[2], 2); } DECLARE_VECTOR_UNITTEST(TestVectorAssignFromBiDirectionalIterator); template <class Vector> void TestVectorAssignFromHostVector(void) { typedef typename Vector::value_type T; thrust::host_vector<T> h(3); h[0] = 0; h[1] = 1; h[2] = 2; Vector v; v.assign(h.begin(), h.end()); ASSERT_EQUAL(v, h); } DECLARE_VECTOR_UNITTEST(TestVectorAssignFromHostVector); template <class Vector> void TestVectorToAndFromHostVector(void) { typedef typename Vector::value_type T; thrust::host_vector<T> h(3); h[0] = 0; h[1] = 1; h[2] = 2; Vector v(h); ASSERT_EQUAL(v, h); v[0] = 10; v[1] = 11; v[2] = 12; ASSERT_EQUAL(h[0], 0); ASSERT_EQUAL(v[0], 10); ASSERT_EQUAL(h[1], 1); ASSERT_EQUAL(v[1], 11); ASSERT_EQUAL(h[2], 2); ASSERT_EQUAL(v[2], 12); h = v; ASSERT_EQUAL(v, h); h[1] = 11; v = h; ASSERT_EQUAL(v, h); } DECLARE_VECTOR_UNITTEST(TestVectorToAndFromHostVector); template <class Vector> void TestVectorAssignFromDeviceVector(void) { typedef typename Vector::value_type T; thrust::device_vector<T> d(3); d[0] = 0; d[1] = 1; d[2] = 2; Vector v; v.assign(d.begin(), d.end()); ASSERT_EQUAL(v, d); } DECLARE_VECTOR_UNITTEST(TestVectorAssignFromDeviceVector); template <class Vector> void TestVectorToAndFromDeviceVector(void) { typedef typename Vector::value_type T; thrust::device_vector<T> h(3); h[0] = 0; h[1] = 1; h[2] = 2; Vector v(h); ASSERT_EQUAL(v, h); v[0] = 10; v[1] = 11; v[2] = 12; ASSERT_EQUAL(h[0], 0); ASSERT_EQUAL(v[0], 10); ASSERT_EQUAL(h[1], 1); ASSERT_EQUAL(v[1], 11); ASSERT_EQUAL(h[2], 2); ASSERT_EQUAL(v[2], 12); h = v; ASSERT_EQUAL(v, h); h[1] = 11; v = h; ASSERT_EQUAL(v, h); } DECLARE_VECTOR_UNITTEST(TestVectorToAndFromDeviceVector); template <class Vector> void TestVectorWithInitialValue(void) { typedef typename Vector::value_type T; const T init = 17; Vector v(3, init); ASSERT_EQUAL(v.size(), 3); ASSERT_EQUAL(v[0], init); ASSERT_EQUAL(v[1], init); ASSERT_EQUAL(v[2], init); } DECLARE_VECTOR_UNITTEST(TestVectorWithInitialValue); template <class Vector> void TestVectorSwap(void) { Vector v(3); v[0] = 0; v[1] = 1; v[2] = 2; Vector u(3); u[0] = 10; u[1] = 11; u[2] = 12; v.swap(u); ASSERT_EQUAL(v[0], 10); ASSERT_EQUAL(u[0], 0); ASSERT_EQUAL(v[1], 11); ASSERT_EQUAL(u[1], 1); ASSERT_EQUAL(v[2], 12); ASSERT_EQUAL(u[2], 2); } DECLARE_VECTOR_UNITTEST(TestVectorSwap); template <class Vector> void TestVectorErasePosition(void) { Vector v(5); v[0] = 0; v[1] = 1; v[2] = 2; v[3] = 3; v[4] = 4; v.erase(v.begin() + 2); ASSERT_EQUAL(v.size(), 4); ASSERT_EQUAL(v[0], 0); ASSERT_EQUAL(v[1], 1); ASSERT_EQUAL(v[2], 3); ASSERT_EQUAL(v[3], 4); v.erase(v.begin() + 0); ASSERT_EQUAL(v.size(), 3); ASSERT_EQUAL(v[0], 1); ASSERT_EQUAL(v[1], 3); ASSERT_EQUAL(v[2], 4); v.erase(v.begin() + 2); ASSERT_EQUAL(v.size(), 2); ASSERT_EQUAL(v[0], 1); ASSERT_EQUAL(v[1], 3); v.erase(v.begin() + 1); ASSERT_EQUAL(v.size(), 1); ASSERT_EQUAL(v[0], 1); v.erase(v.begin() + 0); ASSERT_EQUAL(v.size(), 0); } DECLARE_VECTOR_UNITTEST(TestVectorErasePosition); template <class Vector> void TestVectorEraseRange(void) { Vector v(6); v[0] = 0; v[1] = 1; v[2] = 2; v[3] = 3; v[4] = 4; v[5] = 5; v.erase(v.begin() + 1, v.begin() + 3); ASSERT_EQUAL(v.size(), 4); ASSERT_EQUAL(v[0], 0); ASSERT_EQUAL(v[1], 3); ASSERT_EQUAL(v[2], 4); ASSERT_EQUAL(v[3], 5); v.erase(v.begin() + 2, v.end()); ASSERT_EQUAL(v.size(), 2); ASSERT_EQUAL(v[0], 0); ASSERT_EQUAL(v[1], 3); v.erase(v.begin() + 0, v.begin() + 1); ASSERT_EQUAL(v.size(), 1); ASSERT_EQUAL(v[0], 3); v.erase(v.begin(), v.end()); ASSERT_EQUAL(v.size(), 0); } DECLARE_VECTOR_UNITTEST(TestVectorEraseRange); void TestVectorEquality(void) { thrust::host_vector<int> h_a(3); thrust::host_vector<int> h_b(3); thrust::host_vector<int> h_c(3); h_a[0] = 0; h_a[1] = 1; h_a[2] = 2; h_b[0] = 0; h_b[1] = 1; h_b[2] = 3; h_b[0] = 0; h_b[1] = 1; thrust::device_vector<int> d_a(3); thrust::device_vector<int> d_b(3); thrust::device_vector<int> d_c(3); d_a[0] = 0; d_a[1] = 1; d_a[2] = 2; d_b[0] = 0; d_b[1] = 1; d_b[2] = 3; d_b[0] = 0; d_b[1] = 1; std::vector<int> s_a(3); std::vector<int> s_b(3); std::vector<int> s_c(3); s_a[0] = 0; s_a[1] = 1; s_a[2] = 2; s_b[0] = 0; s_b[1] = 1; s_b[2] = 3; s_b[0] = 0; s_b[1] = 1; ASSERT_EQUAL((h_a == h_a), true); ASSERT_EQUAL((h_a == d_a), true); ASSERT_EQUAL((d_a == h_a), true); ASSERT_EQUAL((d_a == d_a), true); ASSERT_EQUAL((h_b == h_b), true); ASSERT_EQUAL((h_b == d_b), true); ASSERT_EQUAL((d_b == h_b), true); ASSERT_EQUAL((d_b == d_b), true); ASSERT_EQUAL((h_c == h_c), true); ASSERT_EQUAL((h_c == d_c), true); ASSERT_EQUAL((d_c == h_c), true); ASSERT_EQUAL((d_c == d_c), true); // test vector vs device_vector ASSERT_EQUAL((s_a == d_a), true); ASSERT_EQUAL((d_a == s_a), true); ASSERT_EQUAL((s_b == d_b), true); ASSERT_EQUAL((d_b == s_b), true); ASSERT_EQUAL((s_c == d_c), true); ASSERT_EQUAL((d_c == s_c), true); // test vector vs host_vector ASSERT_EQUAL((s_a == h_a), true); ASSERT_EQUAL((h_a == s_a), true); ASSERT_EQUAL((s_b == h_b), true); ASSERT_EQUAL((h_b == s_b), true); ASSERT_EQUAL((s_c == h_c), true); ASSERT_EQUAL((h_c == s_c), true); ASSERT_EQUAL((h_a == h_b), false); ASSERT_EQUAL((h_a == d_b), false); ASSERT_EQUAL((d_a == h_b), false); ASSERT_EQUAL((d_a == d_b), false); ASSERT_EQUAL((h_b == h_a), false); ASSERT_EQUAL((h_b == d_a), false); ASSERT_EQUAL((d_b == h_a), false); ASSERT_EQUAL((d_b == d_a), false); ASSERT_EQUAL((h_a == h_c), false); ASSERT_EQUAL((h_a == d_c), false); ASSERT_EQUAL((d_a == h_c), false); ASSERT_EQUAL((d_a == d_c), false); ASSERT_EQUAL((h_c == h_a), false); ASSERT_EQUAL((h_c == d_a), false); ASSERT_EQUAL((d_c == h_a), false); ASSERT_EQUAL((d_c == d_a), false); ASSERT_EQUAL((h_b == h_c), false); ASSERT_EQUAL((h_b == d_c), false); ASSERT_EQUAL((d_b == h_c), false); ASSERT_EQUAL((d_b == d_c), false); ASSERT_EQUAL((h_c == h_b), false); ASSERT_EQUAL((h_c == d_b), false); ASSERT_EQUAL((d_c == h_b), false); ASSERT_EQUAL((d_c == d_b), false); // test vector vs device_vector ASSERT_EQUAL((s_a == d_b), false); ASSERT_EQUAL((d_a == s_b), false); ASSERT_EQUAL((s_b == d_a), false); ASSERT_EQUAL((d_b == s_a), false); ASSERT_EQUAL((s_a == d_c), false); ASSERT_EQUAL((d_a == s_c), false); ASSERT_EQUAL((s_c == d_a), false); ASSERT_EQUAL((d_c == s_a), false); ASSERT_EQUAL((s_b == d_c), false); ASSERT_EQUAL((d_b == s_c), false); ASSERT_EQUAL((s_c == d_b), false); ASSERT_EQUAL((d_c == s_b), false); // test vector vs host_vector ASSERT_EQUAL((s_a == h_b), false); ASSERT_EQUAL((h_a == s_b), false); ASSERT_EQUAL((s_b == h_a), false); ASSERT_EQUAL((h_b == s_a), false); ASSERT_EQUAL((s_a == h_c), false); ASSERT_EQUAL((h_a == s_c), false); ASSERT_EQUAL((s_c == h_a), false); ASSERT_EQUAL((h_c == s_a), false); ASSERT_EQUAL((s_b == h_c), false); ASSERT_EQUAL((h_b == s_c), false); ASSERT_EQUAL((s_c == h_b), false); ASSERT_EQUAL((h_c == s_b), false); } DECLARE_UNITTEST(TestVectorEquality); void TestVectorInequality(void) { thrust::host_vector<int> h_a(3); thrust::host_vector<int> h_b(3); thrust::host_vector<int> h_c(3); h_a[0] = 0; h_a[1] = 1; h_a[2] = 2; h_b[0] = 0; h_b[1] = 1; h_b[2] = 3; h_b[0] = 0; h_b[1] = 1; thrust::device_vector<int> d_a(3); thrust::device_vector<int> d_b(3); thrust::device_vector<int> d_c(3); d_a[0] = 0; d_a[1] = 1; d_a[2] = 2; d_b[0] = 0; d_b[1] = 1; d_b[2] = 3; d_b[0] = 0; d_b[1] = 1; std::vector<int> s_a(3); std::vector<int> s_b(3); std::vector<int> s_c(3); s_a[0] = 0; s_a[1] = 1; s_a[2] = 2; s_b[0] = 0; s_b[1] = 1; s_b[2] = 3; s_b[0] = 0; s_b[1] = 1; ASSERT_EQUAL((h_a != h_a), false); ASSERT_EQUAL((h_a != d_a), false); ASSERT_EQUAL((d_a != h_a), false); ASSERT_EQUAL((d_a != d_a), false); ASSERT_EQUAL((h_b != h_b), false); ASSERT_EQUAL((h_b != d_b), false); ASSERT_EQUAL((d_b != h_b), false); ASSERT_EQUAL((d_b != d_b), false); ASSERT_EQUAL((h_c != h_c), false); ASSERT_EQUAL((h_c != d_c), false); ASSERT_EQUAL((d_c != h_c), false); ASSERT_EQUAL((d_c != d_c), false); // test vector vs device_vector ASSERT_EQUAL((s_a != d_a), false); ASSERT_EQUAL((d_a != s_a), false); ASSERT_EQUAL((s_b != d_b), false); ASSERT_EQUAL((d_b != s_b), false); ASSERT_EQUAL((s_c != d_c), false); ASSERT_EQUAL((d_c != s_c), false); // test vector vs host_vector ASSERT_EQUAL((s_a != h_a), false); ASSERT_EQUAL((h_a != s_a), false); ASSERT_EQUAL((s_b != h_b), false); ASSERT_EQUAL((h_b != s_b), false); ASSERT_EQUAL((s_c != h_c), false); ASSERT_EQUAL((h_c != s_c), false); ASSERT_EQUAL((h_a != h_b), true); ASSERT_EQUAL((h_a != d_b), true); ASSERT_EQUAL((d_a != h_b), true); ASSERT_EQUAL((d_a != d_b), true); ASSERT_EQUAL((h_b != h_a), true); ASSERT_EQUAL((h_b != d_a), true); ASSERT_EQUAL((d_b != h_a), true); ASSERT_EQUAL((d_b != d_a), true); ASSERT_EQUAL((h_a != h_c), true); ASSERT_EQUAL((h_a != d_c), true); ASSERT_EQUAL((d_a != h_c), true); ASSERT_EQUAL((d_a != d_c), true); ASSERT_EQUAL((h_c != h_a), true); ASSERT_EQUAL((h_c != d_a), true); ASSERT_EQUAL((d_c != h_a), true); ASSERT_EQUAL((d_c != d_a), true); ASSERT_EQUAL((h_b != h_c), true); ASSERT_EQUAL((h_b != d_c), true); ASSERT_EQUAL((d_b != h_c), true); ASSERT_EQUAL((d_b != d_c), true); ASSERT_EQUAL((h_c != h_b), true); ASSERT_EQUAL((h_c != d_b), true); ASSERT_EQUAL((d_c != h_b), true); ASSERT_EQUAL((d_c != d_b), true); // test vector vs device_vector ASSERT_EQUAL((s_a != d_b), true); ASSERT_EQUAL((d_a != s_b), true); ASSERT_EQUAL((s_b != d_a), true); ASSERT_EQUAL((d_b != s_a), true); ASSERT_EQUAL((s_a != d_c), true); ASSERT_EQUAL((d_a != s_c), true); ASSERT_EQUAL((s_c != d_a), true); ASSERT_EQUAL((d_c != s_a), true); ASSERT_EQUAL((s_b != d_c), true); ASSERT_EQUAL((d_b != s_c), true); ASSERT_EQUAL((s_c != d_b), true); ASSERT_EQUAL((d_c != s_b), true); // test vector vs host_vector ASSERT_EQUAL((s_a != h_b), true); ASSERT_EQUAL((h_a != s_b), true); ASSERT_EQUAL((s_b != h_a), true); ASSERT_EQUAL((h_b != s_a), true); ASSERT_EQUAL((s_a != h_c), true); ASSERT_EQUAL((h_a != s_c), true); ASSERT_EQUAL((s_c != h_a), true); ASSERT_EQUAL((h_c != s_a), true); ASSERT_EQUAL((s_b != h_c), true); ASSERT_EQUAL((h_b != s_c), true); ASSERT_EQUAL((s_c != h_b), true); ASSERT_EQUAL((h_c != s_b), true); } DECLARE_UNITTEST(TestVectorInequality); template <class Vector> void TestVectorResizing(void) { Vector v; v.resize(3); ASSERT_EQUAL(v.size(), 3); v[0] = 0; v[1] = 1; v[2] = 2; v.resize(5); ASSERT_EQUAL(v.size(), 5); ASSERT_EQUAL(v[0], 0); ASSERT_EQUAL(v[1], 1); ASSERT_EQUAL(v[2], 2); v[3] = 3; v[4] = 4; v.resize(4); ASSERT_EQUAL(v.size(), 4); ASSERT_EQUAL(v[0], 0); ASSERT_EQUAL(v[1], 1); ASSERT_EQUAL(v[2], 2); ASSERT_EQUAL(v[3], 3); v.resize(0); ASSERT_EQUAL(v.size(), 0); // TODO remove this WAR #if defined(__CUDACC__) && CUDA_VERSION==3000 // depending on sizeof(T), we will receive one // of two possible exceptions try { v.resize(std::numeric_limits<size_t>::max()); } catch(std::length_error e) {} catch(std::bad_alloc e) { // reset the CUDA error cudaGetLastError(); } // end catch #endif // defined(__CUDACC__) && CUDA_VERSION==3000 ASSERT_EQUAL(v.size(), 0); } DECLARE_VECTOR_UNITTEST(TestVectorResizing); template <class Vector> void TestVectorReserving(void) { Vector v; v.reserve(3); ASSERT_GEQUAL(v.capacity(), 3); size_t old_capacity = v.capacity(); v.reserve(0); ASSERT_EQUAL(v.capacity(), old_capacity); // TODO remove this WAR #if defined(__CUDACC__) && CUDA_VERSION==3000 try { v.reserve(std::numeric_limits<size_t>::max()); } catch(std::length_error e) {} catch(std::bad_alloc e) {} #endif // defined(__CUDACC__) && CUDA_VERSION==3000 ASSERT_EQUAL(v.capacity(), old_capacity); } DECLARE_VECTOR_UNITTEST(TestVectorReserving) template <class Vector> void TestVectorShrinkToFit(void) { Vector v; v.reserve(200); ASSERT_GEQUAL(v.capacity(), 200); v.push_back(1); v.push_back(2); v.push_back(3); v.shrink_to_fit(); ASSERT_EQUAL(1, v[0]); ASSERT_EQUAL(2, v[1]); ASSERT_EQUAL(3, v[2]); ASSERT_EQUAL(3, v.size()); ASSERT_EQUAL(3, v.capacity()); } DECLARE_VECTOR_UNITTEST(TestVectorShrinkToFit) template <int N> struct LargeStruct { int data[N]; __host__ __device__ bool operator==(const LargeStruct & ls) const { for (int i = 0; i < N; i++) if (data[i] != ls.data[i]) return false; return true; } }; void TestVectorContainingLargeType(void) { // Thrust issue #5 // http://code.google.com/p/thrust/issues/detail?id=5 const static int N = 100; typedef LargeStruct<N> T; thrust::device_vector<T> dv1; thrust::host_vector<T> hv1; ASSERT_EQUAL_QUIET(dv1, hv1); thrust::device_vector<T> dv2(20); thrust::host_vector<T> hv2(20); ASSERT_EQUAL_QUIET(dv2, hv2); // initialize tofirst element to something nonzero T ls; for (int i = 0; i < N; i++) ls.data[i] = i; thrust::device_vector<T> dv3(20, ls); thrust::host_vector<T> hv3(20, ls); ASSERT_EQUAL_QUIET(dv3, hv3); // change first element ls.data[0] = -13; dv3[2] = ls; hv3[2] = ls; ASSERT_EQUAL_QUIET(dv3, hv3); } DECLARE_UNITTEST(TestVectorContainingLargeType); template <typename Vector> void TestVectorReversed(void) { Vector v(3); v[0] = 0; v[1] = 1; v[2] = 2; ASSERT_EQUAL(3, v.rend() - v.rbegin()); ASSERT_EQUAL(3, static_cast<const Vector&>(v).rend() - static_cast<const Vector&>(v).rbegin()); ASSERT_EQUAL(3, v.crend() - v.crbegin()); ASSERT_EQUAL(2, *v.rbegin()); ASSERT_EQUAL(2, *static_cast<const Vector&>(v).rbegin()); ASSERT_EQUAL(2, *v.crbegin()); ASSERT_EQUAL(1, *(v.rbegin() + 1)); ASSERT_EQUAL(0, *(v.rbegin() + 2)); ASSERT_EQUAL(0, *(v.rend() - 1)); ASSERT_EQUAL(1, *(v.rend() - 2)); } DECLARE_VECTOR_UNITTEST(TestVectorReversed); #if __cplusplus >= 201103L template <class Vector> void TestVectorMove(void) { //test move construction Vector v1(3); v1[0] = 0; v1[1] = 1; v1[2] = 2; const auto ptr1 = v1.data(); const auto size1 = v1.size(); Vector v2(std::move(v1)); const auto ptr2 = v2.data(); const auto size2 = v2.size(); // ensure v1 was left empty ASSERT_EQUAL(true, v1.empty()); // ensure v2 received the data from before ASSERT_EQUAL(v2[0], 0); ASSERT_EQUAL(v2[1], 1); ASSERT_EQUAL(v2[2], 2); ASSERT_EQUAL(size1, size2); // ensure v2 received the pointer from before ASSERT_EQUAL(ptr1, ptr2); //test move assignment Vector v3(3); v3[0] = 3; v3[1] = 4; v3[2] = 5; const auto ptr3 = v3.data(); const auto size3 = v3.size(); v2 = std::move(v3); const auto ptr4 = v2.data(); const auto size4 = v2.size(); // ensure v3 was left empty ASSERT_EQUAL(true, v3.empty()); // ensure v2 received the data from before ASSERT_EQUAL(v2[0], 3); ASSERT_EQUAL(v2[1], 4); ASSERT_EQUAL(v2[2], 5); ASSERT_EQUAL(size3, size4); // ensure v2 received the pointer from before ASSERT_EQUAL(ptr3, ptr4); } DECLARE_VECTOR_UNITTEST(TestVectorMove); #endif
1d05a226d74a90a0a59faa7713c0eef63aca9e5b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <df/voxel/color.h> #include <df/voxel/probability.h> #include <df/voxel/compositeVoxel.h> #include <df/voxel/voxelGrid.h> #include <df/voxel/tsdf.h> #include <df/util/cudaHelpers.h> #include <iostream> namespace df { namespace internal { template <typename VoxelT> __global__ void fillVoxelGridKernel(Tensor<3,VoxelT,DeviceResident> grid, const VoxelT value) { const uint x = threadIdx.x + blockDim.x * blockIdx.x; const uint y = threadIdx.y + blockDim.y * blockIdx.y; const uint z = threadIdx.z + blockDim.z * blockIdx.z; if ((x < grid.dimensionSize(0)) && (y < grid.dimensionSize(1)) && (z < grid.dimensionSize(2))) { grid(x,y,z) = value; } } template <> template <typename VoxelT> void VoxelGridFiller<DeviceResident>::fill(Tensor<3,VoxelT,DeviceResident> & grid, const VoxelT & value) { dim3 block(16,16,4); dim3 threadGrid(intDivideAndCeil(grid.dimensionSize(0),block.x), intDivideAndCeil(grid.dimensionSize(1),block.y), intDivideAndCeil(grid.dimensionSize(2),block.z)); // TODO hipLaunchKernelGGL(( fillVoxelGridKernel), dim3(threadGrid),dim3(block), 0, 0, grid,value); } template void VoxelGridFiller<DeviceResident>::fill(Tensor<3,CompositeVoxel<float,TsdfVoxel>,DeviceResident> &, const CompositeVoxel<float,TsdfVoxel> &); template void VoxelGridFiller<DeviceResident>::fill(Tensor<3,CompositeVoxel<float,TsdfVoxel,ColorVoxel>,DeviceResident> &, const CompositeVoxel<float,TsdfVoxel,ColorVoxel> &); template void VoxelGridFiller<DeviceResident>::fill(Tensor<3,CompositeVoxel<float,TsdfVoxel,ProbabilityVoxel>,DeviceResident> &, const CompositeVoxel<float,TsdfVoxel,ProbabilityVoxel> &); template void VoxelGridFiller<DeviceResident>::fill(Tensor<3,Eigen::Matrix<int,4,1,Eigen::DontAlign>,DeviceResident> &, const Eigen::Matrix<int,4,1,Eigen::DontAlign> &); } // namespace internal } // namespace df
1d05a226d74a90a0a59faa7713c0eef63aca9e5b.cu
#include <df/voxel/color.h> #include <df/voxel/probability.h> #include <df/voxel/compositeVoxel.h> #include <df/voxel/voxelGrid.h> #include <df/voxel/tsdf.h> #include <df/util/cudaHelpers.h> #include <iostream> namespace df { namespace internal { template <typename VoxelT> __global__ void fillVoxelGridKernel(Tensor<3,VoxelT,DeviceResident> grid, const VoxelT value) { const uint x = threadIdx.x + blockDim.x * blockIdx.x; const uint y = threadIdx.y + blockDim.y * blockIdx.y; const uint z = threadIdx.z + blockDim.z * blockIdx.z; if ((x < grid.dimensionSize(0)) && (y < grid.dimensionSize(1)) && (z < grid.dimensionSize(2))) { grid(x,y,z) = value; } } template <> template <typename VoxelT> void VoxelGridFiller<DeviceResident>::fill(Tensor<3,VoxelT,DeviceResident> & grid, const VoxelT & value) { dim3 block(16,16,4); dim3 threadGrid(intDivideAndCeil(grid.dimensionSize(0),block.x), intDivideAndCeil(grid.dimensionSize(1),block.y), intDivideAndCeil(grid.dimensionSize(2),block.z)); // TODO fillVoxelGridKernel<<<threadGrid,block>>>(grid,value); } template void VoxelGridFiller<DeviceResident>::fill(Tensor<3,CompositeVoxel<float,TsdfVoxel>,DeviceResident> &, const CompositeVoxel<float,TsdfVoxel> &); template void VoxelGridFiller<DeviceResident>::fill(Tensor<3,CompositeVoxel<float,TsdfVoxel,ColorVoxel>,DeviceResident> &, const CompositeVoxel<float,TsdfVoxel,ColorVoxel> &); template void VoxelGridFiller<DeviceResident>::fill(Tensor<3,CompositeVoxel<float,TsdfVoxel,ProbabilityVoxel>,DeviceResident> &, const CompositeVoxel<float,TsdfVoxel,ProbabilityVoxel> &); template void VoxelGridFiller<DeviceResident>::fill(Tensor<3,Eigen::Matrix<int,4,1,Eigen::DontAlign>,DeviceResident> &, const Eigen::Matrix<int,4,1,Eigen::DontAlign> &); } // namespace internal } // namespace df
e47b3757035c5660edfdcd2830a219f05c8e1abb.hip
// !!! This is a file automatically generated by hipify!!! #include <cstdlib> #include <cstdio> #include <iostream> #include <iomanip> #include <hip/hip_runtime.h> #include <gauge_field.h> #include "quda_matrix.h" #include "svd_quda.h" #include <hisq_links_quda.h> #ifndef FL_UNITARIZE_PI #define FL_UNITARIZE_PI 3.14159265358979323846 #endif #ifndef FL_UNITARIZE_PI23 #define FL_UNITARIZE_PI23 FL_UNITARIZE_PI*2.0/3.0 #endif __constant__ int INPUT_PADDING=0; __constant__ int OUTPUT_PADDING=0; __constant__ int DEV_MAX_ITER = 20; static int HOST_MAX_ITER = 20; __constant__ double DEV_FL_MAX_ERROR; __constant__ double DEV_FL_UNITARIZE_EPS; __constant__ bool DEV_FL_REUNIT_ALLOW_SVD; __constant__ bool DEV_FL_REUNIT_SVD_ONLY; __constant__ double DEV_FL_REUNIT_SVD_REL_ERROR; __constant__ double DEV_FL_REUNIT_SVD_ABS_ERROR; __constant__ bool DEV_FL_CHECK_UNITARIZATION; static double HOST_FL_MAX_ERROR; static double HOST_FL_UNITARIZE_EPS; static bool HOST_FL_REUNIT_ALLOW_SVD; static bool HOST_FL_REUNIT_SVD_ONLY; static double HOST_FL_REUNIT_SVD_REL_ERROR; static double HOST_FL_REUNIT_SVD_ABS_ERROR; static bool HOST_FL_CHECK_UNITARIZATION; namespace quda{ void setUnitarizeLinksPadding(int input_padding, int output_padding) { hipMemcpyToSymbol("INPUT_PADDING", &input_padding, sizeof(int)); hipMemcpyToSymbol("OUTPUT_PADDING", &output_padding, sizeof(int)); return; } template<class Cmplx> __device__ __host__ bool isUnitary(const Matrix<Cmplx,3>& matrix, double max_error) { const Matrix<Cmplx,3> identity = conj(matrix)*matrix; for(int i=0; i<3; ++i){ if( fabs(identity(i,i).x - 1.0) > max_error || fabs(identity(i,i).y) > max_error) return false; for(int j=i+1; j<3; ++j){ if( fabs(identity(i,j).x) > max_error || fabs(identity(i,j).y) > max_error || fabs(identity(j,i).x) > max_error || fabs(identity(j,i).y) > max_error ){ return false; } } } return true; } template<class Cmplx> __device__ __host__ bool isUnitarizedLinkConsistent(const Matrix<Cmplx,3>& initial_matrix, const Matrix<Cmplx,3>& unitary_matrix, double max_error) { Matrix<Cmplx,3> temporary; temporary = conj(initial_matrix)*unitary_matrix; temporary = temporary*temporary - conj(initial_matrix)*initial_matrix; for(int i=0; i<3; ++i){ for(int j=0; j<3; ++j){ if( fabs(temporary(i,j).x) > max_error || fabs(temporary(i,j).y) > max_error){ return false; } } } return true; } void setUnitarizeLinksConstants(double unitarize_eps, double max_error, bool allow_svd, bool svd_only, double svd_rel_error, double svd_abs_error, bool check_unitarization) { // not_set is only initialised once static bool not_set=true; if(not_set){ hipMemcpyToSymbol("DEV_FL_UNITARIZE_EPS", &unitarize_eps, sizeof(double)); hipMemcpyToSymbol("DEV_FL_REUNIT_ALLOW_SVD", &allow_svd, sizeof(bool)); hipMemcpyToSymbol("DEV_FL_REUNIT_SVD_ONLY", &svd_only, sizeof(bool)); hipMemcpyToSymbol("DEV_FL_REUNIT_SVD_REL_ERROR", &svd_rel_error, sizeof(double)); hipMemcpyToSymbol("DEV_FL_REUNIT_SVD_ABS_ERROR", &svd_abs_error, sizeof(double)); hipMemcpyToSymbol("DEV_FL_MAX_ERROR", &max_error, sizeof(double)); hipMemcpyToSymbol("DEV_FL_CHECK_UNITARIZATION", &check_unitarization, sizeof(bool)); HOST_FL_UNITARIZE_EPS = unitarize_eps; HOST_FL_REUNIT_ALLOW_SVD = allow_svd; HOST_FL_REUNIT_SVD_ONLY = svd_only; HOST_FL_REUNIT_SVD_REL_ERROR = svd_rel_error; HOST_FL_REUNIT_SVD_ABS_ERROR = svd_abs_error; HOST_FL_MAX_ERROR = max_error; HOST_FL_CHECK_UNITARIZATION = check_unitarization; not_set = false; } checkCudaError(); return; } template<class T> __device__ __host__ T getAbsMin(const T* const array, int size){ T min = fabs(array[0]); for(int i=1; i<size; ++i){ T abs_val = fabs(array[i]); if((abs_val) < min){ min = abs_val; } } return min; } template<class Real> __device__ __host__ inline bool checkAbsoluteError(Real a, Real b, Real epsilon) { if( fabs(a-b) < epsilon) return true; return false; } template<class Real> __device__ __host__ inline bool checkRelativeError(Real a, Real b, Real epsilon) { if( fabs((a-b)/b) < epsilon ) return true; return false; } // Compute the reciprocal square root of the matrix q // Also modify q if the eigenvalues are dangerously small. template<class Cmplx> __device__ __host__ bool reciprocalRoot(const Matrix<Cmplx,3>& q, Matrix<Cmplx,3>* res){ Matrix<Cmplx,3> qsq, tempq; typename RealTypeId<Cmplx>::Type c[3]; typename RealTypeId<Cmplx>::Type g[3]; qsq = q*q; tempq = qsq*q; c[0] = getTrace(q).x; c[1] = getTrace(qsq).x/2.0; c[2] = getTrace(tempq).x/3.0; g[0] = g[1] = g[2] = c[0]/3.; typename RealTypeId<Cmplx>::Type r,s,theta; s = c[1]/3. - c[0]*c[0]/18; #ifdef __CUDA_ARCH__ #define FL_UNITARIZE_EPS DEV_FL_UNITARIZE_EPS #else #define FL_UNITARIZE_EPS HOST_FL_UNITARIZE_EPS #endif #ifdef __CUDA_ARCH__ #define FL_REUNIT_SVD_REL_ERROR DEV_FL_REUNIT_SVD_REL_ERROR #define FL_REUNIT_SVD_ABS_ERROR DEV_FL_REUNIT_SVD_ABS_ERROR #else // cpu #define FL_REUNIT_SVD_REL_ERROR HOST_FL_REUNIT_SVD_REL_ERROR #define FL_REUNIT_SVD_ABS_ERROR HOST_FL_REUNIT_SVD_ABS_ERROR #endif typename RealTypeId<Cmplx>::Type cosTheta; if(fabs(s) >= FL_UNITARIZE_EPS){ const typename RealTypeId<Cmplx>::Type sqrt_s = sqrt(s); r = c[2]/2. - (c[0]/3.)*(c[1] - c[0]*c[0]/9.); cosTheta = r/(sqrt_s*sqrt_s*sqrt_s); if(fabs(cosTheta) >= 1.0){ if( r > 0 ){ theta = 0.0; }else{ theta = FL_UNITARIZE_PI; } }else{ theta = acos(cosTheta); } g[0] = c[0]/3 + 2*sqrt_s*cos( theta/3 ); g[1] = c[0]/3 + 2*sqrt_s*cos( theta/3 + FL_UNITARIZE_PI23 ); g[2] = c[0]/3 + 2*sqrt_s*cos( theta/3 + 2*FL_UNITARIZE_PI23 ); } // Check the eigenvalues, if the determinant does not match the product of the eigenvalues // return false. Then call SVD instead. typename RealTypeId<Cmplx>::Type det = getDeterminant(q).x; if( fabs(det) < FL_REUNIT_SVD_ABS_ERROR ){ return false; } if( checkRelativeError(g[0]*g[1]*g[2],det,FL_REUNIT_SVD_REL_ERROR) == false ) return false; // At this point we have finished with the c's // use these to store sqrt(g) for(int i=0; i<3; ++i) c[i] = sqrt(g[i]); // done with the g's, use these to store u, v, w g[0] = c[0]+c[1]+c[2]; g[1] = c[0]*c[1] + c[0]*c[2] + c[1]*c[2]; g[2] = c[0]*c[1]*c[2]; const typename RealTypeId<Cmplx>::Type & denominator = g[2]*(g[0]*g[1]-g[2]); c[0] = (g[0]*g[1]*g[1] - g[2]*(g[0]*g[0]+g[1]))/denominator; c[1] = (-g[0]*g[0]*g[0] - g[2] + 2.*g[0]*g[1])/denominator; c[2] = g[0]/denominator; tempq = c[1]*q + c[2]*qsq; // Add a real scalar tempq(0,0).x += c[0]; tempq(1,1).x += c[0]; tempq(2,2).x += c[0]; *res = tempq; return true; } template<class Cmplx> __host__ __device__ bool unitarizeLinkMILC(const Matrix<Cmplx,3>& in, Matrix<Cmplx,3>* const result) { Matrix<Cmplx,3> u; #ifdef __CUDA_ARCH__ #define FL_REUNIT_SVD_ONLY DEV_FL_REUNIT_SVD_ONLY #define FL_REUNIT_ALLOW_SVD DEV_FL_REUNIT_ALLOW_SVD #else #define FL_REUNIT_SVD_ONLY HOST_FL_REUNIT_SVD_ONLY #define FL_REUNIT_ALLOW_SVD HOST_FL_REUNIT_ALLOW_SVD #endif if( !FL_REUNIT_SVD_ONLY ){ if( reciprocalRoot<Cmplx>(conj(in)*in,&u) ){ *result = in*u; return true; } } // If we've got this far, then the Caley-Hamilton unitarization // has failed. If SVD is not allowed, the unitarization has failed. if( !FL_REUNIT_ALLOW_SVD ) return false; Matrix<Cmplx,3> v; typename RealTypeId<Cmplx>::Type singular_values[3]; computeSVD<Cmplx>(in, u, v, singular_values); // should pass pointers to u, v I guess *result = u*conj(v); return true; } // unitarizeMILC template<class Cmplx> __host__ __device__ bool unitarizeLinkSVD(const Matrix<Cmplx,3>& in, Matrix<Cmplx,3>* const result) { Matrix<Cmplx,3> u, v; typename RealTypeId<Cmplx>::Type singular_values[3]; computeSVD<Cmplx>(in, u, v, singular_values); // should pass pointers to u,v I guess *result = u*conj(v); #ifdef __CUDA_ARCH__ #define FL_MAX_ERROR DEV_FL_MAX_ERROR #else #define FL_MAX_ERROR HOST_FL_MAX_ERROR #endif if(isUnitary(*result,FL_MAX_ERROR)==false) { #if (!defined(__CUDA_ARCH__) || (__COMPUTE_CAPABILITY__>=200)) printf("ERROR: Link unitarity test failed\n"); printf("TOLERANCE: %g\n", FL_MAX_ERROR); #endif return false; } return true; } #undef FL_MAX_ERROR template<class Cmplx> __host__ __device__ bool unitarizeLinkNewton(const Matrix<Cmplx,3>& in, Matrix<Cmplx,3>* const result) { Matrix<Cmplx,3> u, uinv; u = in; #ifdef __CUDA_ARCH__ #define MAX_ITER DEV_MAX_ITER #else #define MAX_ITER HOST_MAX_ITER #endif for(int i=0; i<MAX_ITER; ++i){ computeMatrixInverse(u, &uinv); u = 0.5*(u + conj(uinv)); } #undef MAX_ITER if(isUnitarizedLinkConsistent(in,u,0.0000001)==false) { #if (!defined(__CUDA_ARCH__) || (__COMPUTE_CAPABILITY__>=200)) printf("ERROR: Unitarized link is not consistent with incoming link\n"); #endif return false; } *result = u; return true; } template<class Cmplx> __global__ void getUnitarizedField(const Cmplx* inlink_even, const Cmplx* inlink_odd, Cmplx* outlink_even, Cmplx* outlink_odd, int* num_failures, const int threads) { int mem_idx = blockIdx.x*blockDim.x + threadIdx.x; if (mem_idx >= threads) return; const Cmplx* inlink; Cmplx* outlink; inlink = inlink_even; outlink = outlink_even; if(mem_idx >= Vh){ mem_idx = mem_idx - Vh; inlink = inlink_odd; outlink = outlink_odd; } // Unitarization is always done in double precision Matrix<double2,3> v, result; for(int dir=0; dir<4; ++dir){ loadLinkVariableFromArray(inlink, dir, mem_idx, Vh+INPUT_PADDING, &v); unitarizeLinkMILC(v, &result); #ifdef __CUDA_ARCH__ #define FL_MAX_ERROR DEV_FL_MAX_ERROR #define FL_CHECK_UNITARIZATION DEV_FL_CHECK_UNITARIZATION #else #define FL_MAX_ERROR HOST_FL_MAX_ERROR #define FL_CHECK_UNITARIZATION HOST_FL_CHECK_UNITARIZATION #endif if(FL_CHECK_UNITARIZATION){ if(isUnitary(result,FL_MAX_ERROR) == false) { #ifdef __CUDA_ARCH__ atomicAdd(num_failures, 1); #else (*num_failures)++; #endif } } writeLinkVariableToArray(result, dir, mem_idx, Vh+OUTPUT_PADDING, outlink); } return; } class UnitarizeLinksCuda : public Tunable { private: const cudaGaugeField &inField; cudaGaugeField &outField; int *fails; int sharedBytesPerThread() const { return 0; } int sharedBytesPerBlock(const TuneParam &) const { return 0; } // don't tune the grid dimension bool advanceGridDim(TuneParam &param) const { return false; } bool advanceBlockDim(TuneParam &param) const { bool rtn = Tunable::advanceBlockDim(param); const int threads = inField.Volume(); param.grid = dim3((threads+param.block.x-1)/param.block.x, 1, 1); return rtn; } public: UnitarizeLinksCuda(const cudaGaugeField& inField, cudaGaugeField& outField, int* fails) : inField(inField), outField(outField), fails(fails) { ; } virtual ~UnitarizeLinksCuda() { ; } void apply(const hipStream_t &stream) { TuneParam tp = tuneLaunch(*this, dslashTuning, verbosity); if(inField.Precision() == QUDA_SINGLE_PRECISION){ hipLaunchKernelGGL(( getUnitarizedField), dim3(tp.grid),dim3(tp.block), 0, 0, (float2*)inField.Even_p(), (float2*)inField.Odd_p(), (float2*)outField.Even_p(), (float2*)outField.Odd_p(), fails, inField.Volume()); }else if(inField.Precision() == QUDA_DOUBLE_PRECISION){ hipLaunchKernelGGL(( getUnitarizedField), dim3(tp.grid),dim3(tp.block), 0, 0, (double2*)inField.Even_p(), (double2*)inField.Odd_p(), (double2*)outField.Even_p(), (double2*)outField.Odd_p(), fails, inField.Volume()); } else { errorQuda("UnitarizeLinks not implemented for precision %d", inField.Precision()); } } void preTune() { ; } void postTune() { hipMemset(fails, 0, sizeof(int)); } // reset fails counter void initTuneParam(TuneParam &param) const { Tunable::initTuneParam(param); const int threads = inField.Volume(); param.grid = dim3((threads+param.block.x-1)/param.block.x, 1, 1); } /** sets default values for when tuning is disabled */ void defaultTuneParam(TuneParam &param) const { Tunable::defaultTuneParam(param); const int threads = inField.Volume(); param.grid = dim3((threads+param.block.x-1)/param.block.x, 1, 1); } long long flops() const { return 0; } // FIXME: add flops counter TuneKey tuneKey() const { std::stringstream vol, aux; vol << inField.X()[0] << "x"; vol << inField.X()[1] << "x"; vol << inField.X()[2] << "x"; vol << inField.X()[3] << "x"; aux << "threads=" << inField.Volume() << ",prec=" << inField.Precision(); aux << "stride=" << inField.Stride(); return TuneKey(vol.str(), typeid(*this).name(), aux.str()); } }; // UnitarizeLinksCuda void unitarizeLinksCuda(const QudaGaugeParam& param, cudaGaugeField& inField, cudaGaugeField* outField, int* fails) { UnitarizeLinksCuda unitarizeLinks(inField, *outField, fails); unitarizeLinks.apply(0); } void unitarizeLinksCPU(const QudaGaugeParam& param, cpuGaugeField& infield, cpuGaugeField* outfield) { int num_failures = 0; Matrix<double2,3> inlink, outlink; for(int i=0; i<infield.Volume(); ++i){ for(int dir=0; dir<4; ++dir){ if(param.cpu_prec == QUDA_SINGLE_PRECISION){ copyArrayToLink(&inlink, ((float*)(infield.Gauge_p()) + (i*4 + dir)*18)); // order of arguments? if( unitarizeLinkNewton<double2>(inlink, &outlink) == false ) num_failures++; copyLinkToArray(((float*)(outfield->Gauge_p()) + (i*4 + dir)*18), outlink); }else if(param.cpu_prec == QUDA_DOUBLE_PRECISION){ copyArrayToLink(&inlink, ((double*)(infield.Gauge_p()) + (i*4 + dir)*18)); // order of arguments? if( unitarizeLinkNewton<double2>(inlink, &outlink) == false ) num_failures++; copyLinkToArray(((double*)(outfield->Gauge_p()) + (i*4 + dir)*18), outlink); } // precision? } // dir } // loop over volume return; } // CPU function which checks that the gauge field is unitary bool isUnitary(const QudaGaugeParam& param, cpuGaugeField& field, double max_error) { Matrix<double2,3> link, identity; for(int i=0; i<field.Volume(); ++i){ for(int dir=0; dir<4; ++dir){ if(param.cpu_prec == QUDA_SINGLE_PRECISION){ copyArrayToLink(&link, ((float*)(field.Gauge_p()) + (i*4 + dir)*18)); // order of arguments? }else if(param.cpu_prec == QUDA_DOUBLE_PRECISION){ copyArrayToLink(&link, ((double*)(field.Gauge_p()) + (i*4 + dir)*18)); // order of arguments? }else{ errorQuda("Unsupported precision\n"); } if(isUnitary(link,max_error) == false){ printf("Unitarity failure\n"); printf("site index = %d,\t direction = %d\n", i, dir); printLink(link); identity = conj(link)*link; printLink(identity); return false; } } // dir } // i return true; } // is unitary } // namespace quda
e47b3757035c5660edfdcd2830a219f05c8e1abb.cu
#include <cstdlib> #include <cstdio> #include <iostream> #include <iomanip> #include <cuda.h> #include <gauge_field.h> #include "quda_matrix.h" #include "svd_quda.h" #include <hisq_links_quda.h> #ifndef FL_UNITARIZE_PI #define FL_UNITARIZE_PI 3.14159265358979323846 #endif #ifndef FL_UNITARIZE_PI23 #define FL_UNITARIZE_PI23 FL_UNITARIZE_PI*2.0/3.0 #endif __constant__ int INPUT_PADDING=0; __constant__ int OUTPUT_PADDING=0; __constant__ int DEV_MAX_ITER = 20; static int HOST_MAX_ITER = 20; __constant__ double DEV_FL_MAX_ERROR; __constant__ double DEV_FL_UNITARIZE_EPS; __constant__ bool DEV_FL_REUNIT_ALLOW_SVD; __constant__ bool DEV_FL_REUNIT_SVD_ONLY; __constant__ double DEV_FL_REUNIT_SVD_REL_ERROR; __constant__ double DEV_FL_REUNIT_SVD_ABS_ERROR; __constant__ bool DEV_FL_CHECK_UNITARIZATION; static double HOST_FL_MAX_ERROR; static double HOST_FL_UNITARIZE_EPS; static bool HOST_FL_REUNIT_ALLOW_SVD; static bool HOST_FL_REUNIT_SVD_ONLY; static double HOST_FL_REUNIT_SVD_REL_ERROR; static double HOST_FL_REUNIT_SVD_ABS_ERROR; static bool HOST_FL_CHECK_UNITARIZATION; namespace quda{ void setUnitarizeLinksPadding(int input_padding, int output_padding) { cudaMemcpyToSymbol("INPUT_PADDING", &input_padding, sizeof(int)); cudaMemcpyToSymbol("OUTPUT_PADDING", &output_padding, sizeof(int)); return; } template<class Cmplx> __device__ __host__ bool isUnitary(const Matrix<Cmplx,3>& matrix, double max_error) { const Matrix<Cmplx,3> identity = conj(matrix)*matrix; for(int i=0; i<3; ++i){ if( fabs(identity(i,i).x - 1.0) > max_error || fabs(identity(i,i).y) > max_error) return false; for(int j=i+1; j<3; ++j){ if( fabs(identity(i,j).x) > max_error || fabs(identity(i,j).y) > max_error || fabs(identity(j,i).x) > max_error || fabs(identity(j,i).y) > max_error ){ return false; } } } return true; } template<class Cmplx> __device__ __host__ bool isUnitarizedLinkConsistent(const Matrix<Cmplx,3>& initial_matrix, const Matrix<Cmplx,3>& unitary_matrix, double max_error) { Matrix<Cmplx,3> temporary; temporary = conj(initial_matrix)*unitary_matrix; temporary = temporary*temporary - conj(initial_matrix)*initial_matrix; for(int i=0; i<3; ++i){ for(int j=0; j<3; ++j){ if( fabs(temporary(i,j).x) > max_error || fabs(temporary(i,j).y) > max_error){ return false; } } } return true; } void setUnitarizeLinksConstants(double unitarize_eps, double max_error, bool allow_svd, bool svd_only, double svd_rel_error, double svd_abs_error, bool check_unitarization) { // not_set is only initialised once static bool not_set=true; if(not_set){ cudaMemcpyToSymbol("DEV_FL_UNITARIZE_EPS", &unitarize_eps, sizeof(double)); cudaMemcpyToSymbol("DEV_FL_REUNIT_ALLOW_SVD", &allow_svd, sizeof(bool)); cudaMemcpyToSymbol("DEV_FL_REUNIT_SVD_ONLY", &svd_only, sizeof(bool)); cudaMemcpyToSymbol("DEV_FL_REUNIT_SVD_REL_ERROR", &svd_rel_error, sizeof(double)); cudaMemcpyToSymbol("DEV_FL_REUNIT_SVD_ABS_ERROR", &svd_abs_error, sizeof(double)); cudaMemcpyToSymbol("DEV_FL_MAX_ERROR", &max_error, sizeof(double)); cudaMemcpyToSymbol("DEV_FL_CHECK_UNITARIZATION", &check_unitarization, sizeof(bool)); HOST_FL_UNITARIZE_EPS = unitarize_eps; HOST_FL_REUNIT_ALLOW_SVD = allow_svd; HOST_FL_REUNIT_SVD_ONLY = svd_only; HOST_FL_REUNIT_SVD_REL_ERROR = svd_rel_error; HOST_FL_REUNIT_SVD_ABS_ERROR = svd_abs_error; HOST_FL_MAX_ERROR = max_error; HOST_FL_CHECK_UNITARIZATION = check_unitarization; not_set = false; } checkCudaError(); return; } template<class T> __device__ __host__ T getAbsMin(const T* const array, int size){ T min = fabs(array[0]); for(int i=1; i<size; ++i){ T abs_val = fabs(array[i]); if((abs_val) < min){ min = abs_val; } } return min; } template<class Real> __device__ __host__ inline bool checkAbsoluteError(Real a, Real b, Real epsilon) { if( fabs(a-b) < epsilon) return true; return false; } template<class Real> __device__ __host__ inline bool checkRelativeError(Real a, Real b, Real epsilon) { if( fabs((a-b)/b) < epsilon ) return true; return false; } // Compute the reciprocal square root of the matrix q // Also modify q if the eigenvalues are dangerously small. template<class Cmplx> __device__ __host__ bool reciprocalRoot(const Matrix<Cmplx,3>& q, Matrix<Cmplx,3>* res){ Matrix<Cmplx,3> qsq, tempq; typename RealTypeId<Cmplx>::Type c[3]; typename RealTypeId<Cmplx>::Type g[3]; qsq = q*q; tempq = qsq*q; c[0] = getTrace(q).x; c[1] = getTrace(qsq).x/2.0; c[2] = getTrace(tempq).x/3.0; g[0] = g[1] = g[2] = c[0]/3.; typename RealTypeId<Cmplx>::Type r,s,theta; s = c[1]/3. - c[0]*c[0]/18; #ifdef __CUDA_ARCH__ #define FL_UNITARIZE_EPS DEV_FL_UNITARIZE_EPS #else #define FL_UNITARIZE_EPS HOST_FL_UNITARIZE_EPS #endif #ifdef __CUDA_ARCH__ #define FL_REUNIT_SVD_REL_ERROR DEV_FL_REUNIT_SVD_REL_ERROR #define FL_REUNIT_SVD_ABS_ERROR DEV_FL_REUNIT_SVD_ABS_ERROR #else // cpu #define FL_REUNIT_SVD_REL_ERROR HOST_FL_REUNIT_SVD_REL_ERROR #define FL_REUNIT_SVD_ABS_ERROR HOST_FL_REUNIT_SVD_ABS_ERROR #endif typename RealTypeId<Cmplx>::Type cosTheta; if(fabs(s) >= FL_UNITARIZE_EPS){ const typename RealTypeId<Cmplx>::Type sqrt_s = sqrt(s); r = c[2]/2. - (c[0]/3.)*(c[1] - c[0]*c[0]/9.); cosTheta = r/(sqrt_s*sqrt_s*sqrt_s); if(fabs(cosTheta) >= 1.0){ if( r > 0 ){ theta = 0.0; }else{ theta = FL_UNITARIZE_PI; } }else{ theta = acos(cosTheta); } g[0] = c[0]/3 + 2*sqrt_s*cos( theta/3 ); g[1] = c[0]/3 + 2*sqrt_s*cos( theta/3 + FL_UNITARIZE_PI23 ); g[2] = c[0]/3 + 2*sqrt_s*cos( theta/3 + 2*FL_UNITARIZE_PI23 ); } // Check the eigenvalues, if the determinant does not match the product of the eigenvalues // return false. Then call SVD instead. typename RealTypeId<Cmplx>::Type det = getDeterminant(q).x; if( fabs(det) < FL_REUNIT_SVD_ABS_ERROR ){ return false; } if( checkRelativeError(g[0]*g[1]*g[2],det,FL_REUNIT_SVD_REL_ERROR) == false ) return false; // At this point we have finished with the c's // use these to store sqrt(g) for(int i=0; i<3; ++i) c[i] = sqrt(g[i]); // done with the g's, use these to store u, v, w g[0] = c[0]+c[1]+c[2]; g[1] = c[0]*c[1] + c[0]*c[2] + c[1]*c[2]; g[2] = c[0]*c[1]*c[2]; const typename RealTypeId<Cmplx>::Type & denominator = g[2]*(g[0]*g[1]-g[2]); c[0] = (g[0]*g[1]*g[1] - g[2]*(g[0]*g[0]+g[1]))/denominator; c[1] = (-g[0]*g[0]*g[0] - g[2] + 2.*g[0]*g[1])/denominator; c[2] = g[0]/denominator; tempq = c[1]*q + c[2]*qsq; // Add a real scalar tempq(0,0).x += c[0]; tempq(1,1).x += c[0]; tempq(2,2).x += c[0]; *res = tempq; return true; } template<class Cmplx> __host__ __device__ bool unitarizeLinkMILC(const Matrix<Cmplx,3>& in, Matrix<Cmplx,3>* const result) { Matrix<Cmplx,3> u; #ifdef __CUDA_ARCH__ #define FL_REUNIT_SVD_ONLY DEV_FL_REUNIT_SVD_ONLY #define FL_REUNIT_ALLOW_SVD DEV_FL_REUNIT_ALLOW_SVD #else #define FL_REUNIT_SVD_ONLY HOST_FL_REUNIT_SVD_ONLY #define FL_REUNIT_ALLOW_SVD HOST_FL_REUNIT_ALLOW_SVD #endif if( !FL_REUNIT_SVD_ONLY ){ if( reciprocalRoot<Cmplx>(conj(in)*in,&u) ){ *result = in*u; return true; } } // If we've got this far, then the Caley-Hamilton unitarization // has failed. If SVD is not allowed, the unitarization has failed. if( !FL_REUNIT_ALLOW_SVD ) return false; Matrix<Cmplx,3> v; typename RealTypeId<Cmplx>::Type singular_values[3]; computeSVD<Cmplx>(in, u, v, singular_values); // should pass pointers to u, v I guess *result = u*conj(v); return true; } // unitarizeMILC template<class Cmplx> __host__ __device__ bool unitarizeLinkSVD(const Matrix<Cmplx,3>& in, Matrix<Cmplx,3>* const result) { Matrix<Cmplx,3> u, v; typename RealTypeId<Cmplx>::Type singular_values[3]; computeSVD<Cmplx>(in, u, v, singular_values); // should pass pointers to u,v I guess *result = u*conj(v); #ifdef __CUDA_ARCH__ #define FL_MAX_ERROR DEV_FL_MAX_ERROR #else #define FL_MAX_ERROR HOST_FL_MAX_ERROR #endif if(isUnitary(*result,FL_MAX_ERROR)==false) { #if (!defined(__CUDA_ARCH__) || (__COMPUTE_CAPABILITY__>=200)) printf("ERROR: Link unitarity test failed\n"); printf("TOLERANCE: %g\n", FL_MAX_ERROR); #endif return false; } return true; } #undef FL_MAX_ERROR template<class Cmplx> __host__ __device__ bool unitarizeLinkNewton(const Matrix<Cmplx,3>& in, Matrix<Cmplx,3>* const result) { Matrix<Cmplx,3> u, uinv; u = in; #ifdef __CUDA_ARCH__ #define MAX_ITER DEV_MAX_ITER #else #define MAX_ITER HOST_MAX_ITER #endif for(int i=0; i<MAX_ITER; ++i){ computeMatrixInverse(u, &uinv); u = 0.5*(u + conj(uinv)); } #undef MAX_ITER if(isUnitarizedLinkConsistent(in,u,0.0000001)==false) { #if (!defined(__CUDA_ARCH__) || (__COMPUTE_CAPABILITY__>=200)) printf("ERROR: Unitarized link is not consistent with incoming link\n"); #endif return false; } *result = u; return true; } template<class Cmplx> __global__ void getUnitarizedField(const Cmplx* inlink_even, const Cmplx* inlink_odd, Cmplx* outlink_even, Cmplx* outlink_odd, int* num_failures, const int threads) { int mem_idx = blockIdx.x*blockDim.x + threadIdx.x; if (mem_idx >= threads) return; const Cmplx* inlink; Cmplx* outlink; inlink = inlink_even; outlink = outlink_even; if(mem_idx >= Vh){ mem_idx = mem_idx - Vh; inlink = inlink_odd; outlink = outlink_odd; } // Unitarization is always done in double precision Matrix<double2,3> v, result; for(int dir=0; dir<4; ++dir){ loadLinkVariableFromArray(inlink, dir, mem_idx, Vh+INPUT_PADDING, &v); unitarizeLinkMILC(v, &result); #ifdef __CUDA_ARCH__ #define FL_MAX_ERROR DEV_FL_MAX_ERROR #define FL_CHECK_UNITARIZATION DEV_FL_CHECK_UNITARIZATION #else #define FL_MAX_ERROR HOST_FL_MAX_ERROR #define FL_CHECK_UNITARIZATION HOST_FL_CHECK_UNITARIZATION #endif if(FL_CHECK_UNITARIZATION){ if(isUnitary(result,FL_MAX_ERROR) == false) { #ifdef __CUDA_ARCH__ atomicAdd(num_failures, 1); #else (*num_failures)++; #endif } } writeLinkVariableToArray(result, dir, mem_idx, Vh+OUTPUT_PADDING, outlink); } return; } class UnitarizeLinksCuda : public Tunable { private: const cudaGaugeField &inField; cudaGaugeField &outField; int *fails; int sharedBytesPerThread() const { return 0; } int sharedBytesPerBlock(const TuneParam &) const { return 0; } // don't tune the grid dimension bool advanceGridDim(TuneParam &param) const { return false; } bool advanceBlockDim(TuneParam &param) const { bool rtn = Tunable::advanceBlockDim(param); const int threads = inField.Volume(); param.grid = dim3((threads+param.block.x-1)/param.block.x, 1, 1); return rtn; } public: UnitarizeLinksCuda(const cudaGaugeField& inField, cudaGaugeField& outField, int* fails) : inField(inField), outField(outField), fails(fails) { ; } virtual ~UnitarizeLinksCuda() { ; } void apply(const cudaStream_t &stream) { TuneParam tp = tuneLaunch(*this, dslashTuning, verbosity); if(inField.Precision() == QUDA_SINGLE_PRECISION){ getUnitarizedField<<<tp.grid,tp.block>>>((float2*)inField.Even_p(), (float2*)inField.Odd_p(), (float2*)outField.Even_p(), (float2*)outField.Odd_p(), fails, inField.Volume()); }else if(inField.Precision() == QUDA_DOUBLE_PRECISION){ getUnitarizedField<<<tp.grid,tp.block>>>((double2*)inField.Even_p(), (double2*)inField.Odd_p(), (double2*)outField.Even_p(), (double2*)outField.Odd_p(), fails, inField.Volume()); } else { errorQuda("UnitarizeLinks not implemented for precision %d", inField.Precision()); } } void preTune() { ; } void postTune() { cudaMemset(fails, 0, sizeof(int)); } // reset fails counter void initTuneParam(TuneParam &param) const { Tunable::initTuneParam(param); const int threads = inField.Volume(); param.grid = dim3((threads+param.block.x-1)/param.block.x, 1, 1); } /** sets default values for when tuning is disabled */ void defaultTuneParam(TuneParam &param) const { Tunable::defaultTuneParam(param); const int threads = inField.Volume(); param.grid = dim3((threads+param.block.x-1)/param.block.x, 1, 1); } long long flops() const { return 0; } // FIXME: add flops counter TuneKey tuneKey() const { std::stringstream vol, aux; vol << inField.X()[0] << "x"; vol << inField.X()[1] << "x"; vol << inField.X()[2] << "x"; vol << inField.X()[3] << "x"; aux << "threads=" << inField.Volume() << ",prec=" << inField.Precision(); aux << "stride=" << inField.Stride(); return TuneKey(vol.str(), typeid(*this).name(), aux.str()); } }; // UnitarizeLinksCuda void unitarizeLinksCuda(const QudaGaugeParam& param, cudaGaugeField& inField, cudaGaugeField* outField, int* fails) { UnitarizeLinksCuda unitarizeLinks(inField, *outField, fails); unitarizeLinks.apply(0); } void unitarizeLinksCPU(const QudaGaugeParam& param, cpuGaugeField& infield, cpuGaugeField* outfield) { int num_failures = 0; Matrix<double2,3> inlink, outlink; for(int i=0; i<infield.Volume(); ++i){ for(int dir=0; dir<4; ++dir){ if(param.cpu_prec == QUDA_SINGLE_PRECISION){ copyArrayToLink(&inlink, ((float*)(infield.Gauge_p()) + (i*4 + dir)*18)); // order of arguments? if( unitarizeLinkNewton<double2>(inlink, &outlink) == false ) num_failures++; copyLinkToArray(((float*)(outfield->Gauge_p()) + (i*4 + dir)*18), outlink); }else if(param.cpu_prec == QUDA_DOUBLE_PRECISION){ copyArrayToLink(&inlink, ((double*)(infield.Gauge_p()) + (i*4 + dir)*18)); // order of arguments? if( unitarizeLinkNewton<double2>(inlink, &outlink) == false ) num_failures++; copyLinkToArray(((double*)(outfield->Gauge_p()) + (i*4 + dir)*18), outlink); } // precision? } // dir } // loop over volume return; } // CPU function which checks that the gauge field is unitary bool isUnitary(const QudaGaugeParam& param, cpuGaugeField& field, double max_error) { Matrix<double2,3> link, identity; for(int i=0; i<field.Volume(); ++i){ for(int dir=0; dir<4; ++dir){ if(param.cpu_prec == QUDA_SINGLE_PRECISION){ copyArrayToLink(&link, ((float*)(field.Gauge_p()) + (i*4 + dir)*18)); // order of arguments? }else if(param.cpu_prec == QUDA_DOUBLE_PRECISION){ copyArrayToLink(&link, ((double*)(field.Gauge_p()) + (i*4 + dir)*18)); // order of arguments? }else{ errorQuda("Unsupported precision\n"); } if(isUnitary(link,max_error) == false){ printf("Unitarity failure\n"); printf("site index = %d,\t direction = %d\n", i, dir); printLink(link); identity = conj(link)*link; printLink(identity); return false; } } // dir } // i return true; } // is unitary } // namespace quda
c7cb2e769b62e6dd0d447068af18d154aff7dce5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "orc_common.h" #include "orc_gpu.h" #include <io/utilities/block_utils.cuh> #include <rmm/cuda_stream_view.hpp> namespace cudf { namespace io { namespace orc { namespace gpu { /** * @brief Initializes statistics groups * * @param[out] groups Statistics groups * @param[in] cols Column descriptors * @param[in] num_columns Number of columns * @param[in] num_rowgroups Number of rowgroups * @param[in] row_index_stride Rowgroup size in rows */ constexpr unsigned int init_threads_per_group = 32; constexpr unsigned int init_groups_per_block = 4; constexpr unsigned int init_threads_per_block = init_threads_per_group * init_groups_per_block; __global__ void __launch_bounds__(init_threads_per_block) gpu_init_statistics_groups(statistics_group *groups, const stats_column_desc *cols, uint32_t num_columns, uint32_t num_rowgroups, uint32_t row_index_stride) { __shared__ __align__(4) statistics_group group_g[init_groups_per_block]; uint32_t col_id = blockIdx.y; uint32_t chunk_id = (blockIdx.x * init_groups_per_block) + threadIdx.y; uint32_t t = threadIdx.x; statistics_group *group = &group_g[threadIdx.y]; if (chunk_id < num_rowgroups and t == 0) { uint32_t num_rows = cols[col_id].leaf_column->size(); group->col = &cols[col_id]; group->start_row = chunk_id * row_index_stride; group->num_rows = min(num_rows - min(chunk_id * row_index_stride, num_rows), row_index_stride); groups[col_id * num_rowgroups + chunk_id] = *group; } } /** * @brief Get the buffer size and offsets of encoded statistics * * @param[in,out] groups Statistics merge groups * @param[in] statistics_count Number of statistics buffers */ constexpr unsigned int buffersize_reduction_dim = 32; constexpr unsigned int block_size = buffersize_reduction_dim * buffersize_reduction_dim; constexpr unsigned int pb_fld_hdrlen = 1; constexpr unsigned int pb_fld_hdrlen16 = 2; // > 127-byte length constexpr unsigned int pb_fld_hdrlen32 = 5; // > 16KB length constexpr unsigned int pb_fldlen_int64 = 10; constexpr unsigned int pb_fldlen_float64 = 8; constexpr unsigned int pb_fldlen_decimal = 40; // Assume decimal2string fits in 40 characters constexpr unsigned int pb_fldlen_bucket1 = 1 + pb_fldlen_int64; constexpr unsigned int pb_fldlen_common = 2 * pb_fld_hdrlen + pb_fldlen_int64; template <unsigned int block_size> __global__ void __launch_bounds__(block_size, 1) gpu_init_statistics_buffersize(statistics_merge_group *groups, const statistics_chunk *chunks, uint32_t statistics_count) { using block_scan = hipcub::BlockScan<uint32_t, block_size, cub::BLOCK_SCAN_WARP_SCANS>; __shared__ typename block_scan::TempStorage temp_storage; volatile uint32_t stats_size = 0; uint32_t t = threadIdx.x; __syncthreads(); for (uint32_t start = 0; start < statistics_count; start += block_size) { uint32_t stats_len = 0, stats_pos; uint32_t idx = start + t; if (idx < statistics_count) { const stats_column_desc *col = groups[idx].col; statistics_dtype dtype = col->stats_dtype; switch (dtype) { case dtype_bool: stats_len = pb_fldlen_common + pb_fld_hdrlen + pb_fldlen_bucket1; break; case dtype_int8: case dtype_int16: case dtype_int32: case dtype_date32: case dtype_int64: case dtype_timestamp64: stats_len = pb_fldlen_common + pb_fld_hdrlen + 3 * (pb_fld_hdrlen + pb_fldlen_int64); break; case dtype_float32: case dtype_float64: stats_len = pb_fldlen_common + pb_fld_hdrlen + 3 * (pb_fld_hdrlen + pb_fldlen_float64); break; case dtype_decimal64: case dtype_decimal128: stats_len = pb_fldlen_common + pb_fld_hdrlen16 + 3 * (pb_fld_hdrlen + pb_fldlen_decimal); break; case dtype_string: stats_len = pb_fldlen_common + pb_fld_hdrlen32 + 3 * (pb_fld_hdrlen + pb_fldlen_int64) + chunks[idx].min_value.str_val.length + chunks[idx].max_value.str_val.length; break; default: break; } } uint32_t tmp_stats_size; block_scan(temp_storage).ExclusiveSum(stats_len, stats_pos, tmp_stats_size); stats_pos += stats_size; stats_size += tmp_stats_size; if (idx < statistics_count) { groups[idx].start_chunk = stats_pos; groups[idx].num_chunks = stats_len; } __syncthreads(); } } struct stats_state_s { uint8_t *base; ///< Output buffer start uint8_t *end; ///< Output buffer end statistics_chunk chunk; statistics_merge_group group; stats_column_desc col; // ORC stats uint64_t numberOfValues; uint8_t hasNull; }; /* * Protobuf encoding - see * https://developers.google.com/protocol-buffers/docs/encoding */ // Protobuf varint encoding for unsigned int __device__ inline uint8_t *pb_encode_uint(uint8_t *p, uint64_t v) { while (v > 0x7f) { *p++ = ((uint32_t)v | 0x80); v >>= 7; } *p++ = v; return p; } // Protobuf field encoding for unsigned int __device__ inline uint8_t *pb_put_uint(uint8_t *p, uint32_t id, uint64_t v) { p[0] = id * 8 + PB_TYPE_VARINT; // NOTE: Assumes id < 16 return pb_encode_uint(p + 1, v); } // Protobuf field encoding for signed int __device__ inline uint8_t *pb_put_int(uint8_t *p, uint32_t id, int64_t v) { int64_t s = (v < 0); return pb_put_uint(p, id, (v ^ -s) * 2 + s); } // Protobuf field encoding for 'packed' unsigned int (single value) __device__ inline uint8_t *pb_put_packed_uint(uint8_t *p, uint32_t id, uint64_t v) { uint8_t *p2 = pb_encode_uint(p + 2, v); p[0] = id * 8 + PB_TYPE_FIXEDLEN; p[1] = static_cast<uint8_t>(p2 - (p + 2)); return p2; } // Protobuf field encoding for binary/string __device__ inline uint8_t *pb_put_binary(uint8_t *p, uint32_t id, const void *bytes, uint32_t len) { p[0] = id * 8 + PB_TYPE_FIXEDLEN; p = pb_encode_uint(p + 1, len); memcpy(p, bytes, len); return p + len; } // Protobuf field encoding for 64-bit raw encoding (double) __device__ inline uint8_t *pb_put_fixed64(uint8_t *p, uint32_t id, const void *raw64) { p[0] = id * 8 + PB_TYPE_FIXED64; memcpy(p + 1, raw64, 8); return p + 9; } /** * @brief Encode statistics in ORC protobuf format * * @param[in,out] groups Statistics merge groups * @param[in,out] chunks Statistics data * @param[in] statistics_count Number of statistics buffers * * ORC statistics format from https://orc.apache.org/specification/ORCv1/ * * message ColumnStatistics { * // the number of values * optional uint64 numberOfValues = 1; * // At most one of these has a value for any column * optional IntegerStatistics intStatistics = 2; * optional DoubleStatistics doubleStatistics = 3; * optional StringStatistics stringStatistics = 4; * optional BucketStatistics bucketStatistics = 5; * optional DecimalStatistics decimalStatistics = 6; * optional DateStatistics dateStatistics = 7; * optional BinaryStatistics binaryStatistics = 8; * optional TimestampStatistics timestampStatistics = 9; * optional bool hasNull = 10; * } */ constexpr unsigned int encode_threads_per_chunk = 32; constexpr unsigned int encode_chunks_per_block = 4; constexpr unsigned int encode_threads_per_block = encode_threads_per_chunk * encode_chunks_per_block; __global__ void __launch_bounds__(encode_threads_per_block) gpu_encode_statistics(uint8_t *blob_bfr, statistics_merge_group *groups, const statistics_chunk *chunks, uint32_t statistics_count) { __shared__ __align__(8) stats_state_s state_g[encode_chunks_per_block]; uint32_t t = threadIdx.x; uint32_t idx = blockIdx.x * encode_chunks_per_block + threadIdx.y; stats_state_s *const s = &state_g[threadIdx.y]; // Encode and update actual bfr size if (idx < statistics_count && t == 0) { s->chunk = chunks[idx]; s->group = groups[idx]; s->col = *(s->group.col); s->base = blob_bfr + s->group.start_chunk; s->end = blob_bfr + s->group.start_chunk + s->group.num_chunks; uint8_t *cur = pb_put_uint(s->base, 1, s->chunk.non_nulls); uint8_t *fld_start = cur; switch (s->col.stats_dtype) { case dtype_int8: case dtype_int16: case dtype_int32: case dtype_int64: // intStatistics = 2 // message IntegerStatistics { // optional sint64 minimum = 1; // optional sint64 maximum = 2; // optional sint64 sum = 3; // } if (s->chunk.has_minmax || s->chunk.has_sum) { *cur = 2 * 8 + PB_TYPE_FIXEDLEN; cur += 2; if (s->chunk.has_minmax) { cur = pb_put_int(cur, 1, s->chunk.min_value.i_val); cur = pb_put_int(cur, 2, s->chunk.max_value.i_val); } if (s->chunk.has_sum) { cur = pb_put_int(cur, 3, s->chunk.sum.i_val); } fld_start[1] = cur - (fld_start + 2); } break; case dtype_float32: case dtype_float64: // doubleStatistics = 3 // message DoubleStatistics { // optional double minimum = 1; // optional double maximum = 2; // optional double sum = 3; // } if (s->chunk.has_minmax) { *cur = 3 * 8 + PB_TYPE_FIXEDLEN; cur += 2; cur = pb_put_fixed64(cur, 1, &s->chunk.min_value.fp_val); cur = pb_put_fixed64(cur, 2, &s->chunk.max_value.fp_val); fld_start[1] = cur - (fld_start + 2); } break; case dtype_string: // stringStatistics = 4 // message StringStatistics { // optional string minimum = 1; // optional string maximum = 2; // optional sint64 sum = 3; // sum will store the total length of all strings // } if (s->chunk.has_minmax && s->chunk.has_sum) { uint32_t sz = (pb_put_uint(cur, 3, s->chunk.sum.i_val) - cur) + (pb_put_uint(cur, 1, s->chunk.min_value.str_val.length) - cur) + (pb_put_uint(cur, 2, s->chunk.max_value.str_val.length) - cur) + s->chunk.min_value.str_val.length + s->chunk.max_value.str_val.length; cur[0] = 4 * 8 + PB_TYPE_FIXEDLEN; cur = pb_encode_uint(cur + 1, sz); cur = pb_put_binary( cur, 1, s->chunk.min_value.str_val.ptr, s->chunk.min_value.str_val.length); cur = pb_put_binary( cur, 2, s->chunk.max_value.str_val.ptr, s->chunk.max_value.str_val.length); cur = pb_put_uint(cur, 3, s->chunk.sum.i_val); } break; case dtype_bool: // bucketStatistics = 5 // message BucketStatistics { // repeated uint64 count = 1 [packed=true]; // } if (s->chunk.has_sum) { // Sum is equal to the number of 'true' values cur[0] = 5 * 8 + PB_TYPE_FIXEDLEN; cur = pb_put_packed_uint(cur + 2, 1, s->chunk.sum.i_val); fld_start[1] = cur - (fld_start + 2); } break; case dtype_decimal64: case dtype_decimal128: // decimalStatistics = 6 // message DecimalStatistics { // optional string minimum = 1; // optional string maximum = 2; // optional string sum = 3; // } if (s->chunk.has_minmax) { // TODO: Decimal support (decimal min/max stored as strings) } break; case dtype_date32: // dateStatistics = 7 // message DateStatistics { // min,max values saved as days since epoch // optional sint32 minimum = 1; // optional sint32 maximum = 2; // } if (s->chunk.has_minmax) { cur[0] = 7 * 8 + PB_TYPE_FIXEDLEN; cur += 2; cur = pb_put_int(cur, 1, s->chunk.min_value.i_val); cur = pb_put_int(cur, 2, s->chunk.max_value.i_val); fld_start[1] = cur - (fld_start + 2); } break; case dtype_timestamp64: // timestampStatistics = 9 // message TimestampStatistics { // optional sint64 minimum = 1; // min,max values saved as milliseconds since epoch // optional sint64 maximum = 2; // optional sint64 minimumUtc = 3; // min,max values saved as milliseconds since UNIX epoch // optional sint64 maximumUtc = 4; // } if (s->chunk.has_minmax) { cur[0] = 9 * 8 + PB_TYPE_FIXEDLEN; cur += 2; cur = pb_put_int(cur, 3, s->chunk.min_value.i_val); // minimumUtc cur = pb_put_int(cur, 4, s->chunk.max_value.i_val); // maximumUtc fld_start[1] = cur - (fld_start + 2); } break; default: break; } groups[idx].num_chunks = static_cast<uint32_t>(cur - s->base); } } /** * @brief Launches kernels to initialize statistics collection * * @param[out] groups Statistics groups (rowgroup-level) * @param[in] cols Column descriptors * @param[in] num_columns Number of columns * @param[in] num_rowgroups Number of rowgroups * @param[in] row_index_stride Rowgroup size in rows * @param[in] stream CUDA stream to use, default `rmm::cuda_stream_default` */ void orc_init_statistics_groups(statistics_group *groups, const stats_column_desc *cols, uint32_t num_columns, uint32_t num_rowgroups, uint32_t row_index_stride, rmm::cuda_stream_view stream) { dim3 dim_grid((num_rowgroups + init_groups_per_block - 1) / init_groups_per_block, num_columns); dim3 dim_block(init_threads_per_group, init_groups_per_block); hipLaunchKernelGGL(( gpu_init_statistics_groups), dim3(dim_grid), dim3(dim_block), 0, stream.value(), groups, cols, num_columns, num_rowgroups, row_index_stride); } /** * @brief Launches kernels to return statistics buffer offsets and sizes * * @param[in,out] groups Statistics merge groups * @param[in] chunks Statistics chunks * @param[in] statistics_count Number of statistics buffers to encode * @param[in] stream CUDA stream to use, default `rmm::cuda_stream_default` */ void orc_init_statistics_buffersize(statistics_merge_group *groups, const statistics_chunk *chunks, uint32_t statistics_count, rmm::cuda_stream_view stream) { hipLaunchKernelGGL(( gpu_init_statistics_buffersize<block_size>) , dim3(1), dim3(block_size), 0, stream.value(), groups, chunks, statistics_count); } /** * @brief Launches kernel to encode statistics in ORC protobuf format * * @param[out] blob_bfr Output buffer for statistics blobs * @param[in,out] groups Statistics merge groups * @param[in,out] chunks Statistics data * @param[in] statistics_count Number of statistics buffers */ void orc_encode_statistics(uint8_t *blob_bfr, statistics_merge_group *groups, const statistics_chunk *chunks, uint32_t statistics_count, rmm::cuda_stream_view stream) { unsigned int num_blocks = (statistics_count + encode_chunks_per_block - 1) / encode_chunks_per_block; dim3 dim_block(encode_threads_per_chunk, encode_chunks_per_block); hipLaunchKernelGGL(( gpu_encode_statistics), dim3(num_blocks), dim3(dim_block), 0, stream.value(), blob_bfr, groups, chunks, statistics_count); } } // namespace gpu } // namespace orc } // namespace io } // namespace cudf
c7cb2e769b62e6dd0d447068af18d154aff7dce5.cu
/* * Copyright (c) 2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "orc_common.h" #include "orc_gpu.h" #include <io/utilities/block_utils.cuh> #include <rmm/cuda_stream_view.hpp> namespace cudf { namespace io { namespace orc { namespace gpu { /** * @brief Initializes statistics groups * * @param[out] groups Statistics groups * @param[in] cols Column descriptors * @param[in] num_columns Number of columns * @param[in] num_rowgroups Number of rowgroups * @param[in] row_index_stride Rowgroup size in rows */ constexpr unsigned int init_threads_per_group = 32; constexpr unsigned int init_groups_per_block = 4; constexpr unsigned int init_threads_per_block = init_threads_per_group * init_groups_per_block; __global__ void __launch_bounds__(init_threads_per_block) gpu_init_statistics_groups(statistics_group *groups, const stats_column_desc *cols, uint32_t num_columns, uint32_t num_rowgroups, uint32_t row_index_stride) { __shared__ __align__(4) statistics_group group_g[init_groups_per_block]; uint32_t col_id = blockIdx.y; uint32_t chunk_id = (blockIdx.x * init_groups_per_block) + threadIdx.y; uint32_t t = threadIdx.x; statistics_group *group = &group_g[threadIdx.y]; if (chunk_id < num_rowgroups and t == 0) { uint32_t num_rows = cols[col_id].leaf_column->size(); group->col = &cols[col_id]; group->start_row = chunk_id * row_index_stride; group->num_rows = min(num_rows - min(chunk_id * row_index_stride, num_rows), row_index_stride); groups[col_id * num_rowgroups + chunk_id] = *group; } } /** * @brief Get the buffer size and offsets of encoded statistics * * @param[in,out] groups Statistics merge groups * @param[in] statistics_count Number of statistics buffers */ constexpr unsigned int buffersize_reduction_dim = 32; constexpr unsigned int block_size = buffersize_reduction_dim * buffersize_reduction_dim; constexpr unsigned int pb_fld_hdrlen = 1; constexpr unsigned int pb_fld_hdrlen16 = 2; // > 127-byte length constexpr unsigned int pb_fld_hdrlen32 = 5; // > 16KB length constexpr unsigned int pb_fldlen_int64 = 10; constexpr unsigned int pb_fldlen_float64 = 8; constexpr unsigned int pb_fldlen_decimal = 40; // Assume decimal2string fits in 40 characters constexpr unsigned int pb_fldlen_bucket1 = 1 + pb_fldlen_int64; constexpr unsigned int pb_fldlen_common = 2 * pb_fld_hdrlen + pb_fldlen_int64; template <unsigned int block_size> __global__ void __launch_bounds__(block_size, 1) gpu_init_statistics_buffersize(statistics_merge_group *groups, const statistics_chunk *chunks, uint32_t statistics_count) { using block_scan = cub::BlockScan<uint32_t, block_size, cub::BLOCK_SCAN_WARP_SCANS>; __shared__ typename block_scan::TempStorage temp_storage; volatile uint32_t stats_size = 0; uint32_t t = threadIdx.x; __syncthreads(); for (uint32_t start = 0; start < statistics_count; start += block_size) { uint32_t stats_len = 0, stats_pos; uint32_t idx = start + t; if (idx < statistics_count) { const stats_column_desc *col = groups[idx].col; statistics_dtype dtype = col->stats_dtype; switch (dtype) { case dtype_bool: stats_len = pb_fldlen_common + pb_fld_hdrlen + pb_fldlen_bucket1; break; case dtype_int8: case dtype_int16: case dtype_int32: case dtype_date32: case dtype_int64: case dtype_timestamp64: stats_len = pb_fldlen_common + pb_fld_hdrlen + 3 * (pb_fld_hdrlen + pb_fldlen_int64); break; case dtype_float32: case dtype_float64: stats_len = pb_fldlen_common + pb_fld_hdrlen + 3 * (pb_fld_hdrlen + pb_fldlen_float64); break; case dtype_decimal64: case dtype_decimal128: stats_len = pb_fldlen_common + pb_fld_hdrlen16 + 3 * (pb_fld_hdrlen + pb_fldlen_decimal); break; case dtype_string: stats_len = pb_fldlen_common + pb_fld_hdrlen32 + 3 * (pb_fld_hdrlen + pb_fldlen_int64) + chunks[idx].min_value.str_val.length + chunks[idx].max_value.str_val.length; break; default: break; } } uint32_t tmp_stats_size; block_scan(temp_storage).ExclusiveSum(stats_len, stats_pos, tmp_stats_size); stats_pos += stats_size; stats_size += tmp_stats_size; if (idx < statistics_count) { groups[idx].start_chunk = stats_pos; groups[idx].num_chunks = stats_len; } __syncthreads(); } } struct stats_state_s { uint8_t *base; ///< Output buffer start uint8_t *end; ///< Output buffer end statistics_chunk chunk; statistics_merge_group group; stats_column_desc col; // ORC stats uint64_t numberOfValues; uint8_t hasNull; }; /* * Protobuf encoding - see * https://developers.google.com/protocol-buffers/docs/encoding */ // Protobuf varint encoding for unsigned int __device__ inline uint8_t *pb_encode_uint(uint8_t *p, uint64_t v) { while (v > 0x7f) { *p++ = ((uint32_t)v | 0x80); v >>= 7; } *p++ = v; return p; } // Protobuf field encoding for unsigned int __device__ inline uint8_t *pb_put_uint(uint8_t *p, uint32_t id, uint64_t v) { p[0] = id * 8 + PB_TYPE_VARINT; // NOTE: Assumes id < 16 return pb_encode_uint(p + 1, v); } // Protobuf field encoding for signed int __device__ inline uint8_t *pb_put_int(uint8_t *p, uint32_t id, int64_t v) { int64_t s = (v < 0); return pb_put_uint(p, id, (v ^ -s) * 2 + s); } // Protobuf field encoding for 'packed' unsigned int (single value) __device__ inline uint8_t *pb_put_packed_uint(uint8_t *p, uint32_t id, uint64_t v) { uint8_t *p2 = pb_encode_uint(p + 2, v); p[0] = id * 8 + PB_TYPE_FIXEDLEN; p[1] = static_cast<uint8_t>(p2 - (p + 2)); return p2; } // Protobuf field encoding for binary/string __device__ inline uint8_t *pb_put_binary(uint8_t *p, uint32_t id, const void *bytes, uint32_t len) { p[0] = id * 8 + PB_TYPE_FIXEDLEN; p = pb_encode_uint(p + 1, len); memcpy(p, bytes, len); return p + len; } // Protobuf field encoding for 64-bit raw encoding (double) __device__ inline uint8_t *pb_put_fixed64(uint8_t *p, uint32_t id, const void *raw64) { p[0] = id * 8 + PB_TYPE_FIXED64; memcpy(p + 1, raw64, 8); return p + 9; } /** * @brief Encode statistics in ORC protobuf format * * @param[in,out] groups Statistics merge groups * @param[in,out] chunks Statistics data * @param[in] statistics_count Number of statistics buffers * * ORC statistics format from https://orc.apache.org/specification/ORCv1/ * * message ColumnStatistics { * // the number of values * optional uint64 numberOfValues = 1; * // At most one of these has a value for any column * optional IntegerStatistics intStatistics = 2; * optional DoubleStatistics doubleStatistics = 3; * optional StringStatistics stringStatistics = 4; * optional BucketStatistics bucketStatistics = 5; * optional DecimalStatistics decimalStatistics = 6; * optional DateStatistics dateStatistics = 7; * optional BinaryStatistics binaryStatistics = 8; * optional TimestampStatistics timestampStatistics = 9; * optional bool hasNull = 10; * } */ constexpr unsigned int encode_threads_per_chunk = 32; constexpr unsigned int encode_chunks_per_block = 4; constexpr unsigned int encode_threads_per_block = encode_threads_per_chunk * encode_chunks_per_block; __global__ void __launch_bounds__(encode_threads_per_block) gpu_encode_statistics(uint8_t *blob_bfr, statistics_merge_group *groups, const statistics_chunk *chunks, uint32_t statistics_count) { __shared__ __align__(8) stats_state_s state_g[encode_chunks_per_block]; uint32_t t = threadIdx.x; uint32_t idx = blockIdx.x * encode_chunks_per_block + threadIdx.y; stats_state_s *const s = &state_g[threadIdx.y]; // Encode and update actual bfr size if (idx < statistics_count && t == 0) { s->chunk = chunks[idx]; s->group = groups[idx]; s->col = *(s->group.col); s->base = blob_bfr + s->group.start_chunk; s->end = blob_bfr + s->group.start_chunk + s->group.num_chunks; uint8_t *cur = pb_put_uint(s->base, 1, s->chunk.non_nulls); uint8_t *fld_start = cur; switch (s->col.stats_dtype) { case dtype_int8: case dtype_int16: case dtype_int32: case dtype_int64: // intStatistics = 2 // message IntegerStatistics { // optional sint64 minimum = 1; // optional sint64 maximum = 2; // optional sint64 sum = 3; // } if (s->chunk.has_minmax || s->chunk.has_sum) { *cur = 2 * 8 + PB_TYPE_FIXEDLEN; cur += 2; if (s->chunk.has_minmax) { cur = pb_put_int(cur, 1, s->chunk.min_value.i_val); cur = pb_put_int(cur, 2, s->chunk.max_value.i_val); } if (s->chunk.has_sum) { cur = pb_put_int(cur, 3, s->chunk.sum.i_val); } fld_start[1] = cur - (fld_start + 2); } break; case dtype_float32: case dtype_float64: // doubleStatistics = 3 // message DoubleStatistics { // optional double minimum = 1; // optional double maximum = 2; // optional double sum = 3; // } if (s->chunk.has_minmax) { *cur = 3 * 8 + PB_TYPE_FIXEDLEN; cur += 2; cur = pb_put_fixed64(cur, 1, &s->chunk.min_value.fp_val); cur = pb_put_fixed64(cur, 2, &s->chunk.max_value.fp_val); fld_start[1] = cur - (fld_start + 2); } break; case dtype_string: // stringStatistics = 4 // message StringStatistics { // optional string minimum = 1; // optional string maximum = 2; // optional sint64 sum = 3; // sum will store the total length of all strings // } if (s->chunk.has_minmax && s->chunk.has_sum) { uint32_t sz = (pb_put_uint(cur, 3, s->chunk.sum.i_val) - cur) + (pb_put_uint(cur, 1, s->chunk.min_value.str_val.length) - cur) + (pb_put_uint(cur, 2, s->chunk.max_value.str_val.length) - cur) + s->chunk.min_value.str_val.length + s->chunk.max_value.str_val.length; cur[0] = 4 * 8 + PB_TYPE_FIXEDLEN; cur = pb_encode_uint(cur + 1, sz); cur = pb_put_binary( cur, 1, s->chunk.min_value.str_val.ptr, s->chunk.min_value.str_val.length); cur = pb_put_binary( cur, 2, s->chunk.max_value.str_val.ptr, s->chunk.max_value.str_val.length); cur = pb_put_uint(cur, 3, s->chunk.sum.i_val); } break; case dtype_bool: // bucketStatistics = 5 // message BucketStatistics { // repeated uint64 count = 1 [packed=true]; // } if (s->chunk.has_sum) { // Sum is equal to the number of 'true' values cur[0] = 5 * 8 + PB_TYPE_FIXEDLEN; cur = pb_put_packed_uint(cur + 2, 1, s->chunk.sum.i_val); fld_start[1] = cur - (fld_start + 2); } break; case dtype_decimal64: case dtype_decimal128: // decimalStatistics = 6 // message DecimalStatistics { // optional string minimum = 1; // optional string maximum = 2; // optional string sum = 3; // } if (s->chunk.has_minmax) { // TODO: Decimal support (decimal min/max stored as strings) } break; case dtype_date32: // dateStatistics = 7 // message DateStatistics { // min,max values saved as days since epoch // optional sint32 minimum = 1; // optional sint32 maximum = 2; // } if (s->chunk.has_minmax) { cur[0] = 7 * 8 + PB_TYPE_FIXEDLEN; cur += 2; cur = pb_put_int(cur, 1, s->chunk.min_value.i_val); cur = pb_put_int(cur, 2, s->chunk.max_value.i_val); fld_start[1] = cur - (fld_start + 2); } break; case dtype_timestamp64: // timestampStatistics = 9 // message TimestampStatistics { // optional sint64 minimum = 1; // min,max values saved as milliseconds since epoch // optional sint64 maximum = 2; // optional sint64 minimumUtc = 3; // min,max values saved as milliseconds since UNIX epoch // optional sint64 maximumUtc = 4; // } if (s->chunk.has_minmax) { cur[0] = 9 * 8 + PB_TYPE_FIXEDLEN; cur += 2; cur = pb_put_int(cur, 3, s->chunk.min_value.i_val); // minimumUtc cur = pb_put_int(cur, 4, s->chunk.max_value.i_val); // maximumUtc fld_start[1] = cur - (fld_start + 2); } break; default: break; } groups[idx].num_chunks = static_cast<uint32_t>(cur - s->base); } } /** * @brief Launches kernels to initialize statistics collection * * @param[out] groups Statistics groups (rowgroup-level) * @param[in] cols Column descriptors * @param[in] num_columns Number of columns * @param[in] num_rowgroups Number of rowgroups * @param[in] row_index_stride Rowgroup size in rows * @param[in] stream CUDA stream to use, default `rmm::cuda_stream_default` */ void orc_init_statistics_groups(statistics_group *groups, const stats_column_desc *cols, uint32_t num_columns, uint32_t num_rowgroups, uint32_t row_index_stride, rmm::cuda_stream_view stream) { dim3 dim_grid((num_rowgroups + init_groups_per_block - 1) / init_groups_per_block, num_columns); dim3 dim_block(init_threads_per_group, init_groups_per_block); gpu_init_statistics_groups<<<dim_grid, dim_block, 0, stream.value()>>>( groups, cols, num_columns, num_rowgroups, row_index_stride); } /** * @brief Launches kernels to return statistics buffer offsets and sizes * * @param[in,out] groups Statistics merge groups * @param[in] chunks Statistics chunks * @param[in] statistics_count Number of statistics buffers to encode * @param[in] stream CUDA stream to use, default `rmm::cuda_stream_default` */ void orc_init_statistics_buffersize(statistics_merge_group *groups, const statistics_chunk *chunks, uint32_t statistics_count, rmm::cuda_stream_view stream) { gpu_init_statistics_buffersize<block_size> <<<1, block_size, 0, stream.value()>>>(groups, chunks, statistics_count); } /** * @brief Launches kernel to encode statistics in ORC protobuf format * * @param[out] blob_bfr Output buffer for statistics blobs * @param[in,out] groups Statistics merge groups * @param[in,out] chunks Statistics data * @param[in] statistics_count Number of statistics buffers */ void orc_encode_statistics(uint8_t *blob_bfr, statistics_merge_group *groups, const statistics_chunk *chunks, uint32_t statistics_count, rmm::cuda_stream_view stream) { unsigned int num_blocks = (statistics_count + encode_chunks_per_block - 1) / encode_chunks_per_block; dim3 dim_block(encode_threads_per_chunk, encode_chunks_per_block); gpu_encode_statistics<<<num_blocks, dim_block, 0, stream.value()>>>( blob_bfr, groups, chunks, statistics_count); } } // namespace gpu } // namespace orc } // namespace io } // namespace cudf
e7cffbb992ac027dbc908a3fc3ba9af33fe2498d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include"Octree.test.cuh" #include"Octree.cuh" #include"../../../../../Namespaces/MeshReader/MeshReader.test.h" #include"../../../../../Namespaces/Tests/Tests.h" #include"../../../../../Namespaces/Windows/Windows.h" #include"../../../Components/Transform/Transform.h" #include"../../../Components/Shaders/DefaultShader/DefaultShader.cuh" #include<iomanip> #include<thread> #include<mutex> namespace OctreeTest { namespace Private { // ######################################## // ############# PIXEL COLOR: ############# // ######################################## //#define USE_NORMAL_COLOR __device__ __host__ inline static void colorPixel(const Octree<Renderable<BakedTriFace> > &octree, Color &pixel, const Transform &trans, int i, int j, int width, int height, int /*frame*/) { Octree<Renderable<BakedTriFace> >::RaycastHit hit; Vector3 dir((float)(j - width / 2), (float)(height / 2 - i), (float)(width / 2)); Ray r = trans.ray(dir.normalized()); if (octree.cast(r, hit)) { #ifdef USE_NORMAL_COLOR Vector3 normal = (hit.object.norm.massCenter(hit.object.vert.getMases(hit.hitPoint)).normalized() >> trans) / 2.0f + Vector3(0.5f, 0.5f, 0.5f); normal = Vector3(normal.x, normal.y, 1.0f - normal.z) * (64.0f / max(hit.hitDistance, 64.0f)); pixel(normal.x, normal.y, normal.z, 1.0f); #else DefaultShader shad; /* Photon ph(Ray(hit.hitPoint - Vector3(0, 0, 1000000000), Vector3::front()), ColorRGB(1, 1, 1)); /*/ Photon ph(trans.frontRay(), ColorRGB(1, 1, 1)); //*/ pixel = shad.cast(DefaultShader::ShaderHitInfo { &hit.object->object, ph, hit.hitPoint, r.origin }).observed.color; #endif } else { #ifdef USE_NORMAL_COLOR pixel((((i ^ j) + frame) % 256) / 256.0f, (((i & j) - frame) % 256) / 256.0f, (((i | j) + frame) % 256) / 256.0f); #else pixel(0, 0, 0); #endif } } // ######################################## // ####### DEVICE DATA DUMP KERNEL: ####### // ######################################## /* __global__ static void dumpDeviceOctree(const Octree<> *octree) { octree->dump(); } //*/ #define OCTREE_TEST_KERNELS_BLOCK_WIDTH 8 #define OCTREE_TEST_KERNELS_BLOCK_HEIGHT 16 // ######################################## // ### DEVICE RENDER KERNEL DIMENSIONS: ### // ######################################## __device__ __host__ inline static int numThreads() { return (OCTREE_TEST_KERNELS_BLOCK_WIDTH * OCTREE_TEST_KERNELS_BLOCK_HEIGHT); } __device__ __host__ inline static int numBlocksWidth(int width) { return ((width + OCTREE_TEST_KERNELS_BLOCK_WIDTH - 1) / OCTREE_TEST_KERNELS_BLOCK_WIDTH); } __device__ __host__ inline static int numBlocksHeight(int height) { return ((height + OCTREE_TEST_KERNELS_BLOCK_HEIGHT - 1) / OCTREE_TEST_KERNELS_BLOCK_HEIGHT); } __device__ __host__ inline static int numBlocks(int width, int height) { return (numBlocksWidth(width) * numBlocksHeight(height)); } // ######################################## // ######### DEVICE RENDER KERNEL: ######## // ######################################## __global__ static void color(const Octree<Renderable<BakedTriFace> > *octree, Color *image, const Transform trans, int width, int height, int frame) { /* // This should not compile: Octree<> oct = Octree<>(); Octree<> oct1 = oct; Octree<> oct2(oct1); //*/ register int blocksWidth = numBlocksWidth(width); register int lineId = (blockIdx.x / blocksWidth); register int columnId = (blockIdx.x - (lineId * blocksWidth)); register int threadLine = (threadIdx.x / OCTREE_TEST_KERNELS_BLOCK_WIDTH); register int threadColumn = (threadIdx.x - (threadLine * OCTREE_TEST_KERNELS_BLOCK_WIDTH)); register int x = columnId * OCTREE_TEST_KERNELS_BLOCK_WIDTH + threadColumn; register int y = lineId * OCTREE_TEST_KERNELS_BLOCK_HEIGHT + threadLine; if (x < width && y < height) colorPixel(*octree, image[y * width + x], trans, y, x, width, height, frame); } #undef OCTREE_TEST_KERNELS_BLOCK_WIDTH #undef OCTREE_TEST_KERNELS_BLOCK_HEIGHT // ######################################## // ####### MACRO CONFIGURATION DUMP: ###### // ######################################## inline static void dumpConfiguration() { std::cout << "########### DUMPING OCTREE COMPILE PARAMETERS ##########" << std::endl; std::cout << "OCTREE_POLYCOUNT_TO_SPLIT_NODE: " << OCTREE_POLYCOUNT_TO_SPLIT_NODE << std::endl; std::cout << "OCTREE_VOXEL_LOCAL_CAPACITY: " << OCTREE_VOXEL_LOCAL_CAPACITY << std::endl; std::cout << "OCTREE_MAX_DEPTH: " << OCTREE_MAX_DEPTH << std::endl; std::cout << std::endl; } // ######################################## // ######## OCTREE CONSTRUCTION: ########## // ######################################## inline static Octree<Renderable<BakedTriFace> > constructOctree(const Stacktor<PolyMesh> &meshes, Octree<Renderable<BakedTriFace> > *&devOctree) { Octree<Renderable<BakedTriFace> > octree; Vertex minVert = meshes[0].vertex(0); Vertex maxVert = meshes[0].vertex(0); for (int i = 0; i < meshes.size(); i++) for (int j = 0; j < meshes[i].vertextCount(); j++) { if (meshes[i].vertex(j).x < minVert.x) minVert.x = meshes[i].vertex(j).x; else if (meshes[i].vertex(j).x > maxVert.x) maxVert.x = meshes[i].vertex(j).x; if (meshes[i].vertex(j).y < minVert.y) minVert.y = meshes[i].vertex(j).y; else if (meshes[i].vertex(j).y > maxVert.y) maxVert.y = meshes[i].vertex(j).y; if (meshes[i].vertex(j).z < minVert.z) minVert.z = meshes[i].vertex(j).z; else if (meshes[i].vertex(j).z > maxVert.z) maxVert.z = meshes[i].vertex(j).z; } octree.reinit(AABB(minVert - EPSILON_VECTOR, maxVert + EPSILON_VECTOR)); octree.reset(); for (int i = 0; i < meshes.size(); i++) { BakedTriMesh mesh; meshes[i].bake(mesh); //* for (int j = 0; j < mesh.size(); j++) octree.push(Renderable<BakedTriFace>(mesh[j], 0)); std::cout << "PUSHED " << i << std::endl; /*/ octree.put(mesh); std::cout << "PUT " << i << std::endl; //*/ } //* std::cout << "BUILDING..." << std::endl; octree.build(); /*/ std::cout << "OPTIMIZING..." << std::endl; octree.reduceNodes(); //*/ std::cout << "UPLOADING..." << std::endl; devOctree = octree.upload(); if (devOctree != NULL) std::cout << "OCTREE UPLOADED" << std::endl; else std::cout << "OCTREE UPLOAD FAILED" << std::endl; //Octree<Renderable<BakedTriFace> > tmpClone = octree; return octree; } class OctreeTestRenderContext { private: Octree<Renderable<BakedTriFace> > octree, *devOctree; char windowData[sizeof(Windows::Window)]; Matrix<Color> *image, *imageBack; Color *devColor, *devColorBack; int devColWidth, devColHeight; std::mutex colorLock; std::condition_variable colorLockWait; volatile bool frameReady; bool onDevice, spacePressed; #ifdef _WIN32 POINT cursor; #endif Vector3 euler; Transform trans; Vector3 pivot; float distance; bool mouseWasDown; hipStream_t stream; bool canRunOnCuda; struct CPUrenderThread { std::thread thread; std::condition_variable wait; std::condition_variable realease; volatile bool b; }; CPUrenderThread *cpuThreads; int cpuThreadCount; inline Windows::Window& window() { return (*((Windows::Window *)windowData)); } // ######################################## // ############ DEVICE SWITCH: ############ // ######################################## inline void switchDevice() { if ( #ifdef _WIN32 GetAsyncKeyState(VK_SPACE) & 0x8000 #else false #endif ) { if (!spacePressed) { std::cout << "Changing state..."; colorLock.lock(); onDevice = ((!onDevice) && canRunOnCuda); colorLock.unlock(); if (onDevice) std::cout << "USING DEVICE" << std::endl; else std::cout << "USING HOST" << std::endl; } spacePressed = true; } else spacePressed = false; } // ######################################## // ############### ROTATION: ############## // ######################################## inline void rotate() { if (window().inFocus()) { if ( #ifdef _WIN32 GetKeyState(VK_LBUTTON) & 0x100 #else false #endif ) { #ifdef _WIN32 POINT newCursor; GetCursorPos(&newCursor); if (mouseWasDown) { euler.y += (newCursor.x - cursor.x) / 4.0f; euler.x += (newCursor.y - cursor.y) / 4.0f; if (euler.x <= -80) euler.x = -80; else if (euler.x >= 80) euler.x = 80; trans.setEulerAngles(euler); } else mouseWasDown = true; cursor = newCursor; #endif } else mouseWasDown = false; } else mouseWasDown = false; trans.setPosition(pivot + trans.back() * distance); } // ######################################## // ######## DEVICE RENDER ROUTINE: ######## // ######################################## inline bool renderOnDevice(int width, int height, int frame) { if (devColWidth != width || devColHeight != height || devColor == NULL) { if (devColor != NULL) hipFree(devColor); if (hipMalloc(&devColor, sizeof(Color) * max(1, width * height)) != hipSuccess) { std::cout << "CUDA_MALLOC PROBLEM" << std::endl; return false; } colorLock.lock(); if (devColorBack != NULL) hipFree(devColorBack); if (hipMalloc(&devColorBack, sizeof(Color) * max(1, width * height)) != hipSuccess) { std::cout << "CUDA_MALLOC PROBLEM" << std::endl; return false; } colorLock.unlock(); devColWidth = width; devColHeight = height; } color << <numBlocks(devColWidth, devColHeight), numThreads(), 0, stream >> >(devOctree, devColor, trans, devColWidth, devColHeight, frame); bool success = (hipStreamSynchronize(stream) == hipSuccess); if (!success) { std::cout << "STREAM JOIN ERROR" << std::endl; return false; } if (colorLock.try_lock()) { Color *tmp = devColor; devColor = devColorBack; devColorBack = tmp; colorLock.unlock(); colorLockWait.notify_all(); } return true; } // ######################################## // ######### HOST RENDER ROUTINE: ######### // ######################################## struct CpuRenderThreadParams { const Octree<Renderable<BakedTriFace> > *octree; Matrix<Color> *image; Transform trans; int step; int startI; int frame; }; inline static void cpuRenderThread(CpuRenderThreadParams params) { /* const int width = image->width(); const int height = image->height(); const int chunkWidth = 32; const int chunkHeight = 16; const int horChunks = ((width + chunkWidth - 1) / chunkWidth); const int verChunks = ((height + chunkHeight - 1) / chunkHeight); const int totalChunks = (horChunks * verChunks); for (int chunkId = startI; chunkId < totalChunks; chunkId += step) { const int chunkY = (chunkId / horChunks); const int chunkX = (chunkId - (chunkY * horChunks)); const int endY = min(height, ((chunkY + 1) * chunkHeight)); const int endX = min(width, ((chunkX + 1) * chunkWidth)); const int startX = (chunkX * chunkWidth); for (int i = chunkY * chunkHeight; i < endY; i++) for (int j = startX; j < endX; j++) colorPixel(*octree, image->operator[](i)[j], trans, i, j, width, height, frame); } /*/ for (int i = params.startI; i < params.image->height(); i += params.step) for (int j = 0; j < params.image->width(); j++) colorPixel(*params.octree, params.image->operator[](i)[j], params.trans, i, j, params.image->width(), params.image->height(), params.frame); //*/ } inline void renderOnCPU(int width, int height, int frame) { if (width != image->width() || height != image->height()) { image->setDimensions(width, height); } int numThreads = min(max(std::thread::hardware_concurrency(), 1), 32); std::thread threads[32]; for (int i = 0; i < numThreads; i++) threads[i] = std::thread(cpuRenderThread, CpuRenderThreadParams { &octree, image, trans, numThreads, i, frame }); for (int i = 0; i < numThreads; i++) threads[i].join(); if (colorLock.try_lock()) { Matrix<Color> *tmp = image; image = imageBack; imageBack = tmp; colorLock.unlock(); colorLockWait.notify_all(); } } // ######################################## // ############# FRAME RENDER: ############ // ######################################## inline bool render(int frame) { int width, height; window().getDimensions(width, height); if (onDevice) return renderOnDevice(width, height, frame); else renderOnCPU(width, height, frame); return true; } // ######################################## // ############### FPS DUMP: ############## // ######################################## inline static void dumpFPS(const int frame, long &time) { if (frame % 128 == 0 && frame > 0) { long newTime = clock(); long deltaTime = (newTime - time); float avgDeltaTime = ((float)deltaTime) / 128.0f; std::cout << "CLOCK: " << avgDeltaTime << " (" << CLOCKS_PER_SEC / avgDeltaTime << " fps)" << std::endl; time = newTime; } } inline static void windowUpdateThread(OctreeTestRenderContext *context) { while (!context->window().dead()) { std::unique_lock<std::mutex> uniqueLock(context->colorLock); context->colorLockWait.wait(uniqueLock); if (context->onDevice) { if (context->devColorBack != NULL); context->window().updateFrameDevice(context->devColorBack, context->devColWidth, context->devColHeight); } else context->window().updateFrameHost(*context->imageBack); } } public: // ######################################## // ######## READING & PREPARATION: ######## // ######################################## inline OctreeTestRenderContext() { // ################################ // ############# INTRO: ########### std::cout << std::fixed << std::setprecision(2); dumpConfiguration(); // ################################ // ############# DATA: ############ Stacktor<PolyMesh> meshes; MeshReaderTest::readMeshes(meshes); octree = constructOctree(meshes, devOctree); // ############ WINDOW: ########### devColor = NULL; devColorBack = NULL; devColWidth = 0; devColHeight = 0; frameReady = false; image = new Matrix<Color>(); imageBack = new Matrix<Color>(); // ############ RENDER: ########### std::cout << "READY TO RENDER" << std::endl; if (hipStreamCreate(&stream) != hipSuccess) { std::cout << "STREAM ALLOCATION ERROR" << std::endl; canRunOnCuda = false; } else canRunOnCuda = true; onDevice = canRunOnCuda; spacePressed = false; // ########### ROTATION: ########## euler(0, 0, 0); mouseWasDown = true; // ######### CPU THREADS: ######### Octree<Vertex> octo; octo.put(Vertex(0, 0, 0)); octo.cast(Ray(Vector3(-32, -32, -32), Vector3(1, 1, 1))); // ######### TRANSFORM: ######### pivot(0, 0, 0); distance = 128.0f; trans.setEulerAngles(euler); trans.setPosition(pivot + trans.back() * distance); } // ######################################## // ################ CLEANUP: ############## // ######################################## inline ~OctreeTestRenderContext() { if (devOctree != NULL) { if (Octree<Renderable<BakedTriFace> >::dispose(devOctree)) std::cout << "DEVICE OCTREE DIPOSED SUCCESSFULY" << std::endl; else std::cout << "ERROR DISPOSING OF DEVICE OCTREE" << std::endl; hipFree(devOctree); } if (devColor != NULL) hipFree(devColor); if (devColorBack != NULL) hipFree(devColorBack); delete image; delete imageBack; if (canRunOnCuda) if (hipStreamDestroy(stream) != hipSuccess) std::cout << "FAILED TO DESTROY STREAM" << std::endl; } // ######################################## // ############# RENDER TEST: ############# // ######################################## inline void runTest() { new(&window()) Windows::Window(L"OCTREE TEST WINDOW"); std::thread refreshThread(windowUpdateThread, this); int frame = 0; long time = clock(); while (true) { if (window().dead()) break; switchDevice(); rotate(); if (!render(frame)) continue; dumpFPS(frame, time); frame++; } colorLockWait.notify_all(); refreshThread.join(); window().~Window(); } }; // ######################################## // ########## BASIC OCTREE TEST: ########## // ######################################## inline static void test() { OctreeTestRenderContext context; context.runTest(); } } /* Tests basic capabilities of Octree by rendering normals with backward ray tracing */ void test() { Tests::runTest(Private::test, "Testing Octree"); } void runtContinuousTest() { while (true) { std::string s; std::cout << "Enter anything to prevent running Octree test... "; std::getline(std::cin, s); if (s.length() > 0) break; OctreeTest::test(); } } }
e7cffbb992ac027dbc908a3fc3ba9af33fe2498d.cu
#include"Octree.test.cuh" #include"Octree.cuh" #include"../../../../../Namespaces/MeshReader/MeshReader.test.h" #include"../../../../../Namespaces/Tests/Tests.h" #include"../../../../../Namespaces/Windows/Windows.h" #include"../../../Components/Transform/Transform.h" #include"../../../Components/Shaders/DefaultShader/DefaultShader.cuh" #include<iomanip> #include<thread> #include<mutex> namespace OctreeTest { namespace Private { // ######################################## // ############# PIXEL COLOR: ############# // ######################################## //#define USE_NORMAL_COLOR __device__ __host__ inline static void colorPixel(const Octree<Renderable<BakedTriFace> > &octree, Color &pixel, const Transform &trans, int i, int j, int width, int height, int /*frame*/) { Octree<Renderable<BakedTriFace> >::RaycastHit hit; Vector3 dir((float)(j - width / 2), (float)(height / 2 - i), (float)(width / 2)); Ray r = trans.ray(dir.normalized()); if (octree.cast(r, hit)) { #ifdef USE_NORMAL_COLOR Vector3 normal = (hit.object.norm.massCenter(hit.object.vert.getMases(hit.hitPoint)).normalized() >> trans) / 2.0f + Vector3(0.5f, 0.5f, 0.5f); normal = Vector3(normal.x, normal.y, 1.0f - normal.z) * (64.0f / max(hit.hitDistance, 64.0f)); pixel(normal.x, normal.y, normal.z, 1.0f); #else DefaultShader shad; /* Photon ph(Ray(hit.hitPoint - Vector3(0, 0, 1000000000), Vector3::front()), ColorRGB(1, 1, 1)); /*/ Photon ph(trans.frontRay(), ColorRGB(1, 1, 1)); //*/ pixel = shad.cast(DefaultShader::ShaderHitInfo { &hit.object->object, ph, hit.hitPoint, r.origin }).observed.color; #endif } else { #ifdef USE_NORMAL_COLOR pixel((((i ^ j) + frame) % 256) / 256.0f, (((i & j) - frame) % 256) / 256.0f, (((i | j) + frame) % 256) / 256.0f); #else pixel(0, 0, 0); #endif } } // ######################################## // ####### DEVICE DATA DUMP KERNEL: ####### // ######################################## /* __global__ static void dumpDeviceOctree(const Octree<> *octree) { octree->dump(); } //*/ #define OCTREE_TEST_KERNELS_BLOCK_WIDTH 8 #define OCTREE_TEST_KERNELS_BLOCK_HEIGHT 16 // ######################################## // ### DEVICE RENDER KERNEL DIMENSIONS: ### // ######################################## __device__ __host__ inline static int numThreads() { return (OCTREE_TEST_KERNELS_BLOCK_WIDTH * OCTREE_TEST_KERNELS_BLOCK_HEIGHT); } __device__ __host__ inline static int numBlocksWidth(int width) { return ((width + OCTREE_TEST_KERNELS_BLOCK_WIDTH - 1) / OCTREE_TEST_KERNELS_BLOCK_WIDTH); } __device__ __host__ inline static int numBlocksHeight(int height) { return ((height + OCTREE_TEST_KERNELS_BLOCK_HEIGHT - 1) / OCTREE_TEST_KERNELS_BLOCK_HEIGHT); } __device__ __host__ inline static int numBlocks(int width, int height) { return (numBlocksWidth(width) * numBlocksHeight(height)); } // ######################################## // ######### DEVICE RENDER KERNEL: ######## // ######################################## __global__ static void color(const Octree<Renderable<BakedTriFace> > *octree, Color *image, const Transform trans, int width, int height, int frame) { /* // This should not compile: Octree<> oct = Octree<>(); Octree<> oct1 = oct; Octree<> oct2(oct1); //*/ register int blocksWidth = numBlocksWidth(width); register int lineId = (blockIdx.x / blocksWidth); register int columnId = (blockIdx.x - (lineId * blocksWidth)); register int threadLine = (threadIdx.x / OCTREE_TEST_KERNELS_BLOCK_WIDTH); register int threadColumn = (threadIdx.x - (threadLine * OCTREE_TEST_KERNELS_BLOCK_WIDTH)); register int x = columnId * OCTREE_TEST_KERNELS_BLOCK_WIDTH + threadColumn; register int y = lineId * OCTREE_TEST_KERNELS_BLOCK_HEIGHT + threadLine; if (x < width && y < height) colorPixel(*octree, image[y * width + x], trans, y, x, width, height, frame); } #undef OCTREE_TEST_KERNELS_BLOCK_WIDTH #undef OCTREE_TEST_KERNELS_BLOCK_HEIGHT // ######################################## // ####### MACRO CONFIGURATION DUMP: ###### // ######################################## inline static void dumpConfiguration() { std::cout << "########### DUMPING OCTREE COMPILE PARAMETERS ##########" << std::endl; std::cout << "OCTREE_POLYCOUNT_TO_SPLIT_NODE: " << OCTREE_POLYCOUNT_TO_SPLIT_NODE << std::endl; std::cout << "OCTREE_VOXEL_LOCAL_CAPACITY: " << OCTREE_VOXEL_LOCAL_CAPACITY << std::endl; std::cout << "OCTREE_MAX_DEPTH: " << OCTREE_MAX_DEPTH << std::endl; std::cout << std::endl; } // ######################################## // ######## OCTREE CONSTRUCTION: ########## // ######################################## inline static Octree<Renderable<BakedTriFace> > constructOctree(const Stacktor<PolyMesh> &meshes, Octree<Renderable<BakedTriFace> > *&devOctree) { Octree<Renderable<BakedTriFace> > octree; Vertex minVert = meshes[0].vertex(0); Vertex maxVert = meshes[0].vertex(0); for (int i = 0; i < meshes.size(); i++) for (int j = 0; j < meshes[i].vertextCount(); j++) { if (meshes[i].vertex(j).x < minVert.x) minVert.x = meshes[i].vertex(j).x; else if (meshes[i].vertex(j).x > maxVert.x) maxVert.x = meshes[i].vertex(j).x; if (meshes[i].vertex(j).y < minVert.y) minVert.y = meshes[i].vertex(j).y; else if (meshes[i].vertex(j).y > maxVert.y) maxVert.y = meshes[i].vertex(j).y; if (meshes[i].vertex(j).z < minVert.z) minVert.z = meshes[i].vertex(j).z; else if (meshes[i].vertex(j).z > maxVert.z) maxVert.z = meshes[i].vertex(j).z; } octree.reinit(AABB(minVert - EPSILON_VECTOR, maxVert + EPSILON_VECTOR)); octree.reset(); for (int i = 0; i < meshes.size(); i++) { BakedTriMesh mesh; meshes[i].bake(mesh); //* for (int j = 0; j < mesh.size(); j++) octree.push(Renderable<BakedTriFace>(mesh[j], 0)); std::cout << "PUSHED " << i << std::endl; /*/ octree.put(mesh); std::cout << "PUT " << i << std::endl; //*/ } //* std::cout << "BUILDING..." << std::endl; octree.build(); /*/ std::cout << "OPTIMIZING..." << std::endl; octree.reduceNodes(); //*/ std::cout << "UPLOADING..." << std::endl; devOctree = octree.upload(); if (devOctree != NULL) std::cout << "OCTREE UPLOADED" << std::endl; else std::cout << "OCTREE UPLOAD FAILED" << std::endl; //Octree<Renderable<BakedTriFace> > tmpClone = octree; return octree; } class OctreeTestRenderContext { private: Octree<Renderable<BakedTriFace> > octree, *devOctree; char windowData[sizeof(Windows::Window)]; Matrix<Color> *image, *imageBack; Color *devColor, *devColorBack; int devColWidth, devColHeight; std::mutex colorLock; std::condition_variable colorLockWait; volatile bool frameReady; bool onDevice, spacePressed; #ifdef _WIN32 POINT cursor; #endif Vector3 euler; Transform trans; Vector3 pivot; float distance; bool mouseWasDown; cudaStream_t stream; bool canRunOnCuda; struct CPUrenderThread { std::thread thread; std::condition_variable wait; std::condition_variable realease; volatile bool b; }; CPUrenderThread *cpuThreads; int cpuThreadCount; inline Windows::Window& window() { return (*((Windows::Window *)windowData)); } // ######################################## // ############ DEVICE SWITCH: ############ // ######################################## inline void switchDevice() { if ( #ifdef _WIN32 GetAsyncKeyState(VK_SPACE) & 0x8000 #else false #endif ) { if (!spacePressed) { std::cout << "Changing state..."; colorLock.lock(); onDevice = ((!onDevice) && canRunOnCuda); colorLock.unlock(); if (onDevice) std::cout << "USING DEVICE" << std::endl; else std::cout << "USING HOST" << std::endl; } spacePressed = true; } else spacePressed = false; } // ######################################## // ############### ROTATION: ############## // ######################################## inline void rotate() { if (window().inFocus()) { if ( #ifdef _WIN32 GetKeyState(VK_LBUTTON) & 0x100 #else false #endif ) { #ifdef _WIN32 POINT newCursor; GetCursorPos(&newCursor); if (mouseWasDown) { euler.y += (newCursor.x - cursor.x) / 4.0f; euler.x += (newCursor.y - cursor.y) / 4.0f; if (euler.x <= -80) euler.x = -80; else if (euler.x >= 80) euler.x = 80; trans.setEulerAngles(euler); } else mouseWasDown = true; cursor = newCursor; #endif } else mouseWasDown = false; } else mouseWasDown = false; trans.setPosition(pivot + trans.back() * distance); } // ######################################## // ######## DEVICE RENDER ROUTINE: ######## // ######################################## inline bool renderOnDevice(int width, int height, int frame) { if (devColWidth != width || devColHeight != height || devColor == NULL) { if (devColor != NULL) cudaFree(devColor); if (cudaMalloc(&devColor, sizeof(Color) * max(1, width * height)) != cudaSuccess) { std::cout << "CUDA_MALLOC PROBLEM" << std::endl; return false; } colorLock.lock(); if (devColorBack != NULL) cudaFree(devColorBack); if (cudaMalloc(&devColorBack, sizeof(Color) * max(1, width * height)) != cudaSuccess) { std::cout << "CUDA_MALLOC PROBLEM" << std::endl; return false; } colorLock.unlock(); devColWidth = width; devColHeight = height; } color << <numBlocks(devColWidth, devColHeight), numThreads(), 0, stream >> >(devOctree, devColor, trans, devColWidth, devColHeight, frame); bool success = (cudaStreamSynchronize(stream) == cudaSuccess); if (!success) { std::cout << "STREAM JOIN ERROR" << std::endl; return false; } if (colorLock.try_lock()) { Color *tmp = devColor; devColor = devColorBack; devColorBack = tmp; colorLock.unlock(); colorLockWait.notify_all(); } return true; } // ######################################## // ######### HOST RENDER ROUTINE: ######### // ######################################## struct CpuRenderThreadParams { const Octree<Renderable<BakedTriFace> > *octree; Matrix<Color> *image; Transform trans; int step; int startI; int frame; }; inline static void cpuRenderThread(CpuRenderThreadParams params) { /* const int width = image->width(); const int height = image->height(); const int chunkWidth = 32; const int chunkHeight = 16; const int horChunks = ((width + chunkWidth - 1) / chunkWidth); const int verChunks = ((height + chunkHeight - 1) / chunkHeight); const int totalChunks = (horChunks * verChunks); for (int chunkId = startI; chunkId < totalChunks; chunkId += step) { const int chunkY = (chunkId / horChunks); const int chunkX = (chunkId - (chunkY * horChunks)); const int endY = min(height, ((chunkY + 1) * chunkHeight)); const int endX = min(width, ((chunkX + 1) * chunkWidth)); const int startX = (chunkX * chunkWidth); for (int i = chunkY * chunkHeight; i < endY; i++) for (int j = startX; j < endX; j++) colorPixel(*octree, image->operator[](i)[j], trans, i, j, width, height, frame); } /*/ for (int i = params.startI; i < params.image->height(); i += params.step) for (int j = 0; j < params.image->width(); j++) colorPixel(*params.octree, params.image->operator[](i)[j], params.trans, i, j, params.image->width(), params.image->height(), params.frame); //*/ } inline void renderOnCPU(int width, int height, int frame) { if (width != image->width() || height != image->height()) { image->setDimensions(width, height); } int numThreads = min(max(std::thread::hardware_concurrency(), 1), 32); std::thread threads[32]; for (int i = 0; i < numThreads; i++) threads[i] = std::thread(cpuRenderThread, CpuRenderThreadParams { &octree, image, trans, numThreads, i, frame }); for (int i = 0; i < numThreads; i++) threads[i].join(); if (colorLock.try_lock()) { Matrix<Color> *tmp = image; image = imageBack; imageBack = tmp; colorLock.unlock(); colorLockWait.notify_all(); } } // ######################################## // ############# FRAME RENDER: ############ // ######################################## inline bool render(int frame) { int width, height; window().getDimensions(width, height); if (onDevice) return renderOnDevice(width, height, frame); else renderOnCPU(width, height, frame); return true; } // ######################################## // ############### FPS DUMP: ############## // ######################################## inline static void dumpFPS(const int frame, long &time) { if (frame % 128 == 0 && frame > 0) { long newTime = clock(); long deltaTime = (newTime - time); float avgDeltaTime = ((float)deltaTime) / 128.0f; std::cout << "CLOCK: " << avgDeltaTime << " (" << CLOCKS_PER_SEC / avgDeltaTime << " fps)" << std::endl; time = newTime; } } inline static void windowUpdateThread(OctreeTestRenderContext *context) { while (!context->window().dead()) { std::unique_lock<std::mutex> uniqueLock(context->colorLock); context->colorLockWait.wait(uniqueLock); if (context->onDevice) { if (context->devColorBack != NULL); context->window().updateFrameDevice(context->devColorBack, context->devColWidth, context->devColHeight); } else context->window().updateFrameHost(*context->imageBack); } } public: // ######################################## // ######## READING & PREPARATION: ######## // ######################################## inline OctreeTestRenderContext() { // ################################ // ############# INTRO: ########### std::cout << std::fixed << std::setprecision(2); dumpConfiguration(); // ################################ // ############# DATA: ############ Stacktor<PolyMesh> meshes; MeshReaderTest::readMeshes(meshes); octree = constructOctree(meshes, devOctree); // ############ WINDOW: ########### devColor = NULL; devColorBack = NULL; devColWidth = 0; devColHeight = 0; frameReady = false; image = new Matrix<Color>(); imageBack = new Matrix<Color>(); // ############ RENDER: ########### std::cout << "READY TO RENDER" << std::endl; if (cudaStreamCreate(&stream) != cudaSuccess) { std::cout << "STREAM ALLOCATION ERROR" << std::endl; canRunOnCuda = false; } else canRunOnCuda = true; onDevice = canRunOnCuda; spacePressed = false; // ########### ROTATION: ########## euler(0, 0, 0); mouseWasDown = true; // ######### CPU THREADS: ######### Octree<Vertex> octo; octo.put(Vertex(0, 0, 0)); octo.cast(Ray(Vector3(-32, -32, -32), Vector3(1, 1, 1))); // ######### TRANSFORM: ######### pivot(0, 0, 0); distance = 128.0f; trans.setEulerAngles(euler); trans.setPosition(pivot + trans.back() * distance); } // ######################################## // ################ CLEANUP: ############## // ######################################## inline ~OctreeTestRenderContext() { if (devOctree != NULL) { if (Octree<Renderable<BakedTriFace> >::dispose(devOctree)) std::cout << "DEVICE OCTREE DIPOSED SUCCESSFULY" << std::endl; else std::cout << "ERROR DISPOSING OF DEVICE OCTREE" << std::endl; cudaFree(devOctree); } if (devColor != NULL) cudaFree(devColor); if (devColorBack != NULL) cudaFree(devColorBack); delete image; delete imageBack; if (canRunOnCuda) if (cudaStreamDestroy(stream) != cudaSuccess) std::cout << "FAILED TO DESTROY STREAM" << std::endl; } // ######################################## // ############# RENDER TEST: ############# // ######################################## inline void runTest() { new(&window()) Windows::Window(L"OCTREE TEST WINDOW"); std::thread refreshThread(windowUpdateThread, this); int frame = 0; long time = clock(); while (true) { if (window().dead()) break; switchDevice(); rotate(); if (!render(frame)) continue; dumpFPS(frame, time); frame++; } colorLockWait.notify_all(); refreshThread.join(); window().~Window(); } }; // ######################################## // ########## BASIC OCTREE TEST: ########## // ######################################## inline static void test() { OctreeTestRenderContext context; context.runTest(); } } /* Tests basic capabilities of Octree by rendering normals with backward ray tracing */ void test() { Tests::runTest(Private::test, "Testing Octree"); } void runtContinuousTest() { while (true) { std::string s; std::cout << "Enter anything to prevent running Octree test... "; std::getline(std::cin, s); if (s.length() > 0) break; OctreeTest::test(); } } }
5ec9f3838ff8ff881d836c7fdb161f154c00eb34.hip
// !!! This is a file automatically generated by hipify!!! #include "sigmoidlayer.h" #include "hip/hip_runtime.h" #include "math.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdexcept> __global__ void SigmoidLayer_Forward_cu(SigmoidNode *node, double *in, double *out) { int weightCount = node[blockIdx.x].weightCount; double val = 0; for (int i = 0; i < weightCount; i++) { val += node[blockIdx.x].weights[i] * in[blockIdx.x]; } out[blockIdx.x] = 1.0 / (1.0 + exp(-val)); } __global__ void SigmoidLayer_Backward_cu(SigmoidNode *node, double *forward, double *in, double *out, double learnRate) { double error = forward[blockIdx.x] * (1 - forward[blockIdx.x]) * in[blockIdx.x]; int weightCount = node[blockIdx.x].weightCount; double val = 0; for (int i = 0; i < weightCount; i++) { node[blockIdx.x].weights[i] += error * forward[i] * learnRate; } out[blockIdx.x] = 1.0 / (1.0 + exp(-val)); } void SigmoidLayer_Forward(SigmoidNode *node, double *input, double *output, int nodeCount) { hipLaunchKernelGGL(( SigmoidLayer_Forward_cu), dim3(nodeCount), dim3(1), 0, 0, node, input, output); if (hipGetLastError() != hipError_t::hipSuccess) { throw std::runtime_error("Sigmoid Forward CUDA method returned an error"); } if (hipDeviceSynchronize() != hipError_t::hipSuccess) { throw std::runtime_error("Sigmoid Forward CUDA syncronize returned an error"); } } void SigmoidLayer_Backward(SigmoidNode *node, double *forward, double *input, double *output, int nodeCount, double learnRate) { hipLaunchKernelGGL(( SigmoidLayer_Backward_cu), dim3(nodeCount), dim3(1), 0, 0, node, forward, input, output, learnRate); if (hipGetLastError() != hipError_t::hipSuccess) { throw std::runtime_error("Sigmoid Forward CUDA method returned an error"); } if (hipDeviceSynchronize() != hipError_t::hipSuccess) { throw std::runtime_error("Sigmoid Forward CUDA syncronize returned an error"); } }
5ec9f3838ff8ff881d836c7fdb161f154c00eb34.cu
#include "sigmoidlayer.h" #include "cuda_runtime.h" #include "math.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdexcept> __global__ void SigmoidLayer_Forward_cu(SigmoidNode *node, double *in, double *out) { int weightCount = node[blockIdx.x].weightCount; double val = 0; for (int i = 0; i < weightCount; i++) { val += node[blockIdx.x].weights[i] * in[blockIdx.x]; } out[blockIdx.x] = 1.0 / (1.0 + exp(-val)); } __global__ void SigmoidLayer_Backward_cu(SigmoidNode *node, double *forward, double *in, double *out, double learnRate) { double error = forward[blockIdx.x] * (1 - forward[blockIdx.x]) * in[blockIdx.x]; int weightCount = node[blockIdx.x].weightCount; double val = 0; for (int i = 0; i < weightCount; i++) { node[blockIdx.x].weights[i] += error * forward[i] * learnRate; } out[blockIdx.x] = 1.0 / (1.0 + exp(-val)); } void SigmoidLayer_Forward(SigmoidNode *node, double *input, double *output, int nodeCount) { SigmoidLayer_Forward_cu<<<nodeCount, 1>>>(node, input, output); if (cudaGetLastError() != cudaError::cudaSuccess) { throw std::runtime_error("Sigmoid Forward CUDA method returned an error"); } if (cudaDeviceSynchronize() != cudaError::cudaSuccess) { throw std::runtime_error("Sigmoid Forward CUDA syncronize returned an error"); } } void SigmoidLayer_Backward(SigmoidNode *node, double *forward, double *input, double *output, int nodeCount, double learnRate) { SigmoidLayer_Backward_cu<<<nodeCount, 1>>>(node, forward, input, output, learnRate); if (cudaGetLastError() != cudaError::cudaSuccess) { throw std::runtime_error("Sigmoid Forward CUDA method returned an error"); } if (cudaDeviceSynchronize() != cudaError::cudaSuccess) { throw std::runtime_error("Sigmoid Forward CUDA syncronize returned an error"); } }
5d5a8723339bf0fabc4849591e90b0dd27a61a01.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.1.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date August 2016 @generated from magmablas/zlascl2.cu, normal z -> d, Tue Aug 30 09:38:32 2016 @author Theo Mary */ #include "magma_internal.h" #define NB 64 // each thread block does one NB x n block row of A. // each thread does one row, starting from left edge and moving right. __global__ void dlascl2_full(int m, int n, const double* D, double* A, int lda) { int ind = blockIdx.x * NB + threadIdx.x; double mul = D[ind]; A += ind; if (ind < m) { for (int j=0; j < n; j++ ) A[j*lda] *= mul; } } // each thread block does one NB x n block row of A. // each thread does one row, starting from left edge and moving right to diagonal. __global__ void dlascl2_lower(int m, int n, const double* D, double* A, int lda) { int ind = blockIdx.x * NB + threadIdx.x; int break_d = (ind < n) ? ind : n-1; double mul = D[ind]; A += ind; if (ind < m) { for (int j=0; j <= break_d; j++ ) A[j*lda] *= mul; } } // each thread block does one NB x n block row of A. // each thread does one row, starting from right edge and moving left to diagonal. __global__ void dlascl2_upper(int m, int n, const double *D, double* A, int lda) { int ind = blockIdx.x * NB + threadIdx.x; double mul = D[ind]; A += ind; if (ind < m) { for (int j=n-1; j >= ind; j--) A[j*lda] *= mul; } } /***************************************************************************//** Purpose ------- DLASCL2 scales the M by N real matrix A by the real diagonal matrix dD. TYPE specifies that A may be full, upper triangular, lower triangular. Arguments --------- @param[in] type magma_type_t TYPE indices the storage type of the input matrix A. = MagmaFull: full matrix. = MagmaLower: lower triangular matrix. = MagmaUpper: upper triangular matrix. Other formats that LAPACK supports, MAGMA does not currently support. @param[in] m INTEGER The number of rows of the matrix A. M >= 0. @param[in] n INTEGER The number of columns of the matrix A. N >= 0. @param[in] dD DOUBLE PRECISION vector, dimension (M) The diagonal matrix containing the scalar factors. Stored as a vector. @param[in,out] dA DOUBLE PRECISION array, dimension (LDDA,N) The matrix to be scaled by dD. See TYPE for the storage type. @param[in] ldda INTEGER The leading dimension of the array A. LDDA >= max(1,M). @param[out] info INTEGER - = 0: successful exit - < 0: if INFO = -i, the i-th argument had an illegal value. @param[in] queue magma_queue_t Queue to execute in. @see magma_dlascl_diag @ingroup magma_lascl_diag *******************************************************************************/ extern "C" void magmablas_dlascl2_q( magma_type_t type, magma_int_t m, magma_int_t n, magmaDouble_const_ptr dD, magmaDouble_ptr dA, magma_int_t ldda, magma_queue_t queue, magma_int_t *info ) { *info = 0; if ( type != MagmaLower && type != MagmaUpper && type != MagmaFull ) *info = -1; else if ( m < 0 ) *info = -2; else if ( n < 0 ) *info = -3; else if ( ldda < max(1,m) ) *info = -5; if (*info != 0) { magma_xerbla( __func__, -(*info) ); return; //info; } dim3 grid( magma_ceildiv( m, NB ) ); dim3 threads( NB ); if (type == MagmaLower) { hipLaunchKernelGGL(( dlascl2_lower) , dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, n, dD, dA, ldda); } else if (type == MagmaUpper) { hipLaunchKernelGGL(( dlascl2_upper) , dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, n, dD, dA, ldda); } else if (type == MagmaFull) { hipLaunchKernelGGL(( dlascl2_full) , dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, n, dD, dA, ldda); } }
5d5a8723339bf0fabc4849591e90b0dd27a61a01.cu
/* -- MAGMA (version 2.1.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date August 2016 @generated from magmablas/zlascl2.cu, normal z -> d, Tue Aug 30 09:38:32 2016 @author Theo Mary */ #include "magma_internal.h" #define NB 64 // each thread block does one NB x n block row of A. // each thread does one row, starting from left edge and moving right. __global__ void dlascl2_full(int m, int n, const double* D, double* A, int lda) { int ind = blockIdx.x * NB + threadIdx.x; double mul = D[ind]; A += ind; if (ind < m) { for (int j=0; j < n; j++ ) A[j*lda] *= mul; } } // each thread block does one NB x n block row of A. // each thread does one row, starting from left edge and moving right to diagonal. __global__ void dlascl2_lower(int m, int n, const double* D, double* A, int lda) { int ind = blockIdx.x * NB + threadIdx.x; int break_d = (ind < n) ? ind : n-1; double mul = D[ind]; A += ind; if (ind < m) { for (int j=0; j <= break_d; j++ ) A[j*lda] *= mul; } } // each thread block does one NB x n block row of A. // each thread does one row, starting from right edge and moving left to diagonal. __global__ void dlascl2_upper(int m, int n, const double *D, double* A, int lda) { int ind = blockIdx.x * NB + threadIdx.x; double mul = D[ind]; A += ind; if (ind < m) { for (int j=n-1; j >= ind; j--) A[j*lda] *= mul; } } /***************************************************************************//** Purpose ------- DLASCL2 scales the M by N real matrix A by the real diagonal matrix dD. TYPE specifies that A may be full, upper triangular, lower triangular. Arguments --------- @param[in] type magma_type_t TYPE indices the storage type of the input matrix A. = MagmaFull: full matrix. = MagmaLower: lower triangular matrix. = MagmaUpper: upper triangular matrix. Other formats that LAPACK supports, MAGMA does not currently support. @param[in] m INTEGER The number of rows of the matrix A. M >= 0. @param[in] n INTEGER The number of columns of the matrix A. N >= 0. @param[in] dD DOUBLE PRECISION vector, dimension (M) The diagonal matrix containing the scalar factors. Stored as a vector. @param[in,out] dA DOUBLE PRECISION array, dimension (LDDA,N) The matrix to be scaled by dD. See TYPE for the storage type. @param[in] ldda INTEGER The leading dimension of the array A. LDDA >= max(1,M). @param[out] info INTEGER - = 0: successful exit - < 0: if INFO = -i, the i-th argument had an illegal value. @param[in] queue magma_queue_t Queue to execute in. @see magma_dlascl_diag @ingroup magma_lascl_diag *******************************************************************************/ extern "C" void magmablas_dlascl2_q( magma_type_t type, magma_int_t m, magma_int_t n, magmaDouble_const_ptr dD, magmaDouble_ptr dA, magma_int_t ldda, magma_queue_t queue, magma_int_t *info ) { *info = 0; if ( type != MagmaLower && type != MagmaUpper && type != MagmaFull ) *info = -1; else if ( m < 0 ) *info = -2; else if ( n < 0 ) *info = -3; else if ( ldda < max(1,m) ) *info = -5; if (*info != 0) { magma_xerbla( __func__, -(*info) ); return; //info; } dim3 grid( magma_ceildiv( m, NB ) ); dim3 threads( NB ); if (type == MagmaLower) { dlascl2_lower <<< grid, threads, 0, queue->cuda_stream() >>> (m, n, dD, dA, ldda); } else if (type == MagmaUpper) { dlascl2_upper <<< grid, threads, 0, queue->cuda_stream() >>> (m, n, dD, dA, ldda); } else if (type == MagmaFull) { dlascl2_full <<< grid, threads, 0, queue->cuda_stream() >>> (m, n, dD, dA, ldda); } }
58b19c0e4889f4557cb4cf5226f405cd362fa2c3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // auto-generated by op2.py // //user function __device__ void update_gpu( const float *qold, float *q, float *res, const float *adt, float *rms) { float del, adti; float rmsl = 0.0f; adti = 1.0f / (*adt); for (int n = 0; n < 4; n++) { del = adti * res[n]; q[n] = qold[n] - del; res[n] = 0.0f; rmsl += del * del; } *rms += rmsl; } // CUDA kernel function __global__ void op_cuda_update( const float *__restrict arg0, float *arg1, float *arg2, const float *__restrict arg3, float *arg4, int set_size ) { float arg4_l[1]; for ( int d=0; d<1; d++ ){ arg4_l[d]=ZERO_float; } //process set elements for ( int n=threadIdx.x+blockIdx.x*blockDim.x; n<set_size; n+=blockDim.x*gridDim.x ){ //user-supplied kernel call update_gpu(arg0+n*4, arg1+n*4, arg2+n*4, arg3+n*1, arg4_l); } //global reductions for ( int d=0; d<1; d++ ){ op_reduction<OP_INC>(&arg4[d+blockIdx.x*1],arg4_l[d]); } } //host stub function void op_par_loop_update(char const *name, op_set set, op_arg arg0, op_arg arg1, op_arg arg2, op_arg arg3, op_arg arg4){ float*arg4h = (float *)arg4.data; int nargs = 5; op_arg args[5]; args[0] = arg0; args[1] = arg1; args[2] = arg2; args[3] = arg3; args[4] = arg4; // initialise timers double cpu_t1, cpu_t2, wall_t1, wall_t2; op_timing_realloc(4); op_timers_core(&cpu_t1, &wall_t1); OP_kernels[4].name = name; OP_kernels[4].count += 1; if (OP_diags>2) { printf(" kernel routine w/o indirection: update"); } op_mpi_halo_exchanges_cuda(set, nargs, args); if (set->size > 0) { //set CUDA execution parameters #ifdef OP_BLOCK_SIZE_4 int nthread = OP_BLOCK_SIZE_4; #else int nthread = OP_block_size; // int nthread = 128; #endif int nblocks = 200; //transfer global reduction data to GPU int maxblocks = nblocks; int reduct_bytes = 0; int reduct_size = 0; reduct_bytes += ROUND_UP(maxblocks*1*sizeof(float)); reduct_size = MAX(reduct_size,sizeof(float)); reallocReductArrays(reduct_bytes); reduct_bytes = 0; arg4.data = OP_reduct_h + reduct_bytes; arg4.data_d = OP_reduct_d + reduct_bytes; for ( int b=0; b<maxblocks; b++ ){ for ( int d=0; d<1; d++ ){ ((float *)arg4.data)[d+b*1] = ZERO_float; } } reduct_bytes += ROUND_UP(maxblocks*1*sizeof(float)); mvReductArraysToDevice(reduct_bytes); int nshared = reduct_size*nthread; hipLaunchKernelGGL(( op_cuda_update), dim3(nblocks),dim3(nthread),nshared, 0, (float *) arg0.data_d, (float *) arg1.data_d, (float *) arg2.data_d, (float *) arg3.data_d, (float *) arg4.data_d, set->size ); //transfer global reduction data back to CPU mvReductArraysToHost(reduct_bytes); for ( int b=0; b<maxblocks; b++ ){ for ( int d=0; d<1; d++ ){ arg4h[d] = arg4h[d] + ((float *)arg4.data)[d+b*1]; } } arg4.data = (char *)arg4h; op_mpi_reduce(&arg4,arg4h); } op_mpi_set_dirtybit_cuda(nargs, args); cutilSafeCall(hipDeviceSynchronize()); //update kernel record op_timers_core(&cpu_t2, &wall_t2); OP_kernels[4].time += wall_t2 - wall_t1; OP_kernels[4].transfer += (float)set->size * arg0.size; OP_kernels[4].transfer += (float)set->size * arg1.size * 2.0f; OP_kernels[4].transfer += (float)set->size * arg2.size * 2.0f; OP_kernels[4].transfer += (float)set->size * arg3.size; }
58b19c0e4889f4557cb4cf5226f405cd362fa2c3.cu
// // auto-generated by op2.py // //user function __device__ void update_gpu( const float *qold, float *q, float *res, const float *adt, float *rms) { float del, adti; float rmsl = 0.0f; adti = 1.0f / (*adt); for (int n = 0; n < 4; n++) { del = adti * res[n]; q[n] = qold[n] - del; res[n] = 0.0f; rmsl += del * del; } *rms += rmsl; } // CUDA kernel function __global__ void op_cuda_update( const float *__restrict arg0, float *arg1, float *arg2, const float *__restrict arg3, float *arg4, int set_size ) { float arg4_l[1]; for ( int d=0; d<1; d++ ){ arg4_l[d]=ZERO_float; } //process set elements for ( int n=threadIdx.x+blockIdx.x*blockDim.x; n<set_size; n+=blockDim.x*gridDim.x ){ //user-supplied kernel call update_gpu(arg0+n*4, arg1+n*4, arg2+n*4, arg3+n*1, arg4_l); } //global reductions for ( int d=0; d<1; d++ ){ op_reduction<OP_INC>(&arg4[d+blockIdx.x*1],arg4_l[d]); } } //host stub function void op_par_loop_update(char const *name, op_set set, op_arg arg0, op_arg arg1, op_arg arg2, op_arg arg3, op_arg arg4){ float*arg4h = (float *)arg4.data; int nargs = 5; op_arg args[5]; args[0] = arg0; args[1] = arg1; args[2] = arg2; args[3] = arg3; args[4] = arg4; // initialise timers double cpu_t1, cpu_t2, wall_t1, wall_t2; op_timing_realloc(4); op_timers_core(&cpu_t1, &wall_t1); OP_kernels[4].name = name; OP_kernels[4].count += 1; if (OP_diags>2) { printf(" kernel routine w/o indirection: update"); } op_mpi_halo_exchanges_cuda(set, nargs, args); if (set->size > 0) { //set CUDA execution parameters #ifdef OP_BLOCK_SIZE_4 int nthread = OP_BLOCK_SIZE_4; #else int nthread = OP_block_size; // int nthread = 128; #endif int nblocks = 200; //transfer global reduction data to GPU int maxblocks = nblocks; int reduct_bytes = 0; int reduct_size = 0; reduct_bytes += ROUND_UP(maxblocks*1*sizeof(float)); reduct_size = MAX(reduct_size,sizeof(float)); reallocReductArrays(reduct_bytes); reduct_bytes = 0; arg4.data = OP_reduct_h + reduct_bytes; arg4.data_d = OP_reduct_d + reduct_bytes; for ( int b=0; b<maxblocks; b++ ){ for ( int d=0; d<1; d++ ){ ((float *)arg4.data)[d+b*1] = ZERO_float; } } reduct_bytes += ROUND_UP(maxblocks*1*sizeof(float)); mvReductArraysToDevice(reduct_bytes); int nshared = reduct_size*nthread; op_cuda_update<<<nblocks,nthread,nshared>>>( (float *) arg0.data_d, (float *) arg1.data_d, (float *) arg2.data_d, (float *) arg3.data_d, (float *) arg4.data_d, set->size ); //transfer global reduction data back to CPU mvReductArraysToHost(reduct_bytes); for ( int b=0; b<maxblocks; b++ ){ for ( int d=0; d<1; d++ ){ arg4h[d] = arg4h[d] + ((float *)arg4.data)[d+b*1]; } } arg4.data = (char *)arg4h; op_mpi_reduce(&arg4,arg4h); } op_mpi_set_dirtybit_cuda(nargs, args); cutilSafeCall(cudaDeviceSynchronize()); //update kernel record op_timers_core(&cpu_t2, &wall_t2); OP_kernels[4].time += wall_t2 - wall_t1; OP_kernels[4].transfer += (float)set->size * arg0.size; OP_kernels[4].transfer += (float)set->size * arg1.size * 2.0f; OP_kernels[4].transfer += (float)set->size * arg2.size * 2.0f; OP_kernels[4].transfer += (float)set->size * arg3.size; }
09da47e0d7eae9e8a8bc36b4de7de2ad43f321f2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // Notes: // // 1) strategy: one thread per node in the 2D block; // after initialisation it marches in the k-direction // working with 3 planes of data at a time // // 2) each thread also loads in data for at most one halo node; // assumes the number of halo nodes is not more than the // number of interior nodes // // 3) corner halo nodes are included because they are needed // for more general applications with cross-derivatives // // 4) could try double-buffering in the future fairly easily // // definition to use efficient __mul24 intrinsic #define INDEX(i, j, j_off) (i + __mul24(j, j_off)) // device code __global__ void GPU_laplace3d(int NX, int NY, int NZ, int pitch, float* d_u1, float* d_u2) { int indg, indg_h, indg0; int i, j, k, ind, ind_h, halo, active; float u2, sixth = 1.0f / 6.0f; int NXM1 = NX - 1; int NYM1 = NY - 1; int NZM1 = NZ - 1; // // define local array offsets // #define IOFF 1 #define JOFF (BLOCK_X + 2) #define KOFF (BLOCK_X + 2) * (BLOCK_Y + 2) __shared__ float u1[3 * KOFF]; // // first set up indices for halos // k = threadIdx.x + threadIdx.y * BLOCK_X; halo = k < 2 * (BLOCK_X + BLOCK_Y + 2); if (halo) { if (threadIdx.y < 2) { // y-halos (coalesced) i = threadIdx.x; j = threadIdx.y * (BLOCK_Y + 1) - 1; } else { // x-halos (not coalesced) i = (k % 2) * (BLOCK_X + 1) - 1; j = k / 2 - BLOCK_X - 1; } ind_h = INDEX(i + 1, j + 1, JOFF) + KOFF; i = INDEX(i, blockIdx.x, BLOCK_X); // global indices j = INDEX(j, blockIdx.y, BLOCK_Y); indg_h = INDEX(i, j, pitch); halo = (i >= 0) && (i < NX) && (j >= 0) && (j < NY); } // // then set up indices for main block // i = threadIdx.x; j = threadIdx.y; ind = INDEX(i + 1, j + 1, JOFF) + KOFF; i = INDEX(i, blockIdx.x, BLOCK_X); // global indices j = INDEX(j, blockIdx.y, BLOCK_Y); indg = INDEX(i, j, pitch); active = (i < NX) && (j < NY); // // read initial plane of u1 array // if (active) u1[ind + KOFF] = d_u1[indg]; if (halo) u1[ind_h + KOFF] = d_u1[indg_h]; // // loop over k-planes // for (k = 0; k < NZ; k++) { // move two planes down and read in new plane k+1 if (active) { indg0 = indg; indg = INDEX(indg, NY, pitch); u1[ind - KOFF] = u1[ind]; u1[ind] = u1[ind + KOFF]; if (k < NZM1) u1[ind + KOFF] = d_u1[indg]; } if (halo) { indg_h = INDEX(indg_h, NY, pitch); u1[ind_h - KOFF] = u1[ind_h]; u1[ind_h] = u1[ind_h + KOFF]; if (k < NZM1) u1[ind_h + KOFF] = d_u1[indg_h]; } __syncthreads(); // // perform Jacobi iteration to set values in u2 // if (active) { if (i == 0 || i == NXM1 || j == 0 || j == NYM1 || k == 0 || k == NZM1) { u2 = u1[ind]; // Dirichlet b.c.'s } else { u2 = (u1[ind - IOFF] + u1[ind + IOFF] + u1[ind - JOFF] + u1[ind + JOFF] + u1[ind - KOFF] + u1[ind + KOFF]) * sixth; } d_u2[indg0] = u2; } __syncthreads(); } }
09da47e0d7eae9e8a8bc36b4de7de2ad43f321f2.cu
// // Notes: // // 1) strategy: one thread per node in the 2D block; // after initialisation it marches in the k-direction // working with 3 planes of data at a time // // 2) each thread also loads in data for at most one halo node; // assumes the number of halo nodes is not more than the // number of interior nodes // // 3) corner halo nodes are included because they are needed // for more general applications with cross-derivatives // // 4) could try double-buffering in the future fairly easily // // definition to use efficient __mul24 intrinsic #define INDEX(i, j, j_off) (i + __mul24(j, j_off)) // device code __global__ void GPU_laplace3d(int NX, int NY, int NZ, int pitch, float* d_u1, float* d_u2) { int indg, indg_h, indg0; int i, j, k, ind, ind_h, halo, active; float u2, sixth = 1.0f / 6.0f; int NXM1 = NX - 1; int NYM1 = NY - 1; int NZM1 = NZ - 1; // // define local array offsets // #define IOFF 1 #define JOFF (BLOCK_X + 2) #define KOFF (BLOCK_X + 2) * (BLOCK_Y + 2) __shared__ float u1[3 * KOFF]; // // first set up indices for halos // k = threadIdx.x + threadIdx.y * BLOCK_X; halo = k < 2 * (BLOCK_X + BLOCK_Y + 2); if (halo) { if (threadIdx.y < 2) { // y-halos (coalesced) i = threadIdx.x; j = threadIdx.y * (BLOCK_Y + 1) - 1; } else { // x-halos (not coalesced) i = (k % 2) * (BLOCK_X + 1) - 1; j = k / 2 - BLOCK_X - 1; } ind_h = INDEX(i + 1, j + 1, JOFF) + KOFF; i = INDEX(i, blockIdx.x, BLOCK_X); // global indices j = INDEX(j, blockIdx.y, BLOCK_Y); indg_h = INDEX(i, j, pitch); halo = (i >= 0) && (i < NX) && (j >= 0) && (j < NY); } // // then set up indices for main block // i = threadIdx.x; j = threadIdx.y; ind = INDEX(i + 1, j + 1, JOFF) + KOFF; i = INDEX(i, blockIdx.x, BLOCK_X); // global indices j = INDEX(j, blockIdx.y, BLOCK_Y); indg = INDEX(i, j, pitch); active = (i < NX) && (j < NY); // // read initial plane of u1 array // if (active) u1[ind + KOFF] = d_u1[indg]; if (halo) u1[ind_h + KOFF] = d_u1[indg_h]; // // loop over k-planes // for (k = 0; k < NZ; k++) { // move two planes down and read in new plane k+1 if (active) { indg0 = indg; indg = INDEX(indg, NY, pitch); u1[ind - KOFF] = u1[ind]; u1[ind] = u1[ind + KOFF]; if (k < NZM1) u1[ind + KOFF] = d_u1[indg]; } if (halo) { indg_h = INDEX(indg_h, NY, pitch); u1[ind_h - KOFF] = u1[ind_h]; u1[ind_h] = u1[ind_h + KOFF]; if (k < NZM1) u1[ind_h + KOFF] = d_u1[indg_h]; } __syncthreads(); // // perform Jacobi iteration to set values in u2 // if (active) { if (i == 0 || i == NXM1 || j == 0 || j == NYM1 || k == 0 || k == NZM1) { u2 = u1[ind]; // Dirichlet b.c.'s } else { u2 = (u1[ind - IOFF] + u1[ind + IOFF] + u1[ind - JOFF] + u1[ind + JOFF] + u1[ind - KOFF] + u1[ind + KOFF]) * sixth; } d_u2[indg0] = u2; } __syncthreads(); } }
329c121fdfd197e71b38f5a4d4a9b7acad2e6523.hip
// !!! This is a file automatically generated by hipify!!! // *************************************************************** // Copyright (c) 2023 Jittor. All Rights Reserved. // This file is subject to the terms and conditions defined in // file 'LICENSE.txt', which is part of this source code package. // *************************************************************** #include "misc/nan_checker.h" #include "misc/cuda_flags.h" #include <hip/hip_runtime.h> #include <hip/hip_fp16.h> #include "helper_cuda.h" #include <cassert> namespace jittor { #define MAX_NAN_REPORT 10 inline __device__ void print_nan(float v, int64 i, int* cnt) { auto x = atomicAdd(cnt, 1); if (x<MAX_NAN_REPORT) { printf("detect a[%lld] = %f\n", i, v); cnt[x+1] = i; } } #ifdef HAS_CUDA __global__ void _check_nan_float16(__half* __restrict__ ptr, int64 num, int* cnt) { int64 i = threadIdx.x + blockIdx.x * (int64)blockDim.x; if (i<num) { #if JT_CHECK_NAN == 2 if (isnan(__half2float(ptr[i]))) #else if (isnan(__half2float(ptr[i])) || __hisinf(ptr[i]) // || abs(__half2float(ptr[i])) > 60000.f ) #endif print_nan(float(ptr[i]), i, cnt); } } __global__ void _check_nan_float32(float32* __restrict__ ptr, int64 num, int* cnt) { int64 i = threadIdx.x + blockIdx.x * (int64)blockDim.x; if (i<num) { #if JT_CHECK_NAN == 2 if (::isnan(ptr[i])) #else if (::isnan(ptr[i]) || ::isinf(ptr[i])) #endif print_nan(float(ptr[i]), i, cnt); } } __global__ void _check_nan_float64(float64* __restrict__ ptr, int64 num, int* cnt) { int64 i = threadIdx.x + blockIdx.x * (int64)blockDim.x; if (i<num) { #if JT_CHECK_NAN == 2 if (::isnan(ptr[i])) #else if (::isnan(ptr[i]) || ::isinf(ptr[i])) #endif print_nan(float(ptr[i]), i, cnt); } } int* check_nan_get_device_ptr() { static int* ptr = nullptr; if (ptr) return ptr; hipMalloc(&ptr, 4+4*MAX_NAN_REPORT); hipMemset(ptr, 0, 4+4*MAX_NAN_REPORT); return ptr; } vector<int> report_nan() { vector<int> buffer(MAX_NAN_REPORT+1); auto ptr = check_nan_get_device_ptr(); hipMemcpy(buffer.data(), ptr, 4+4*MAX_NAN_REPORT, hipMemcpyDeviceToHost); hipMemset(ptr, 0, 4); return buffer; } vector<int> check_nan_float64(float64* ptr, int64 num) { int block_num = ::max((int64)1, (num-1)/1024+1); int thread_num = ::min((int64)1024, num); hipLaunchKernelGGL(( _check_nan_float64), dim3(block_num), dim3(thread_num), 0, 0, ptr, num, check_nan_get_device_ptr()); return report_nan(); } vector<int> check_nan_float32(float32* ptr, int64 num) { int block_num = ::max((int64)1, (num-1)/1024+1); int thread_num = ::min((int64)1024, num); hipLaunchKernelGGL(( _check_nan_float32), dim3(block_num), dim3(thread_num), 0, 0, ptr, num, check_nan_get_device_ptr()); return report_nan(); } vector<int> check_nan_float16(__half* ptr, int64 num) { int block_num = ::max((int64)1, (num-1)/1024+1); int thread_num = ::min((int64)1024, num); hipLaunchKernelGGL(( _check_nan_float16), dim3(block_num), dim3(thread_num), 0, 0, ptr, num, check_nan_get_device_ptr()); return report_nan(); } #endif }
329c121fdfd197e71b38f5a4d4a9b7acad2e6523.cu
// *************************************************************** // Copyright (c) 2023 Jittor. All Rights Reserved. // This file is subject to the terms and conditions defined in // file 'LICENSE.txt', which is part of this source code package. // *************************************************************** #include "misc/nan_checker.h" #include "misc/cuda_flags.h" #include <cuda_runtime.h> #include <cuda_fp16.h> #include "helper_cuda.h" #include <cassert> namespace jittor { #define MAX_NAN_REPORT 10 inline __device__ void print_nan(float v, int64 i, int* cnt) { auto x = atomicAdd(cnt, 1); if (x<MAX_NAN_REPORT) { printf("detect a[%lld] = %f\n", i, v); cnt[x+1] = i; } } #ifdef HAS_CUDA __global__ void _check_nan_float16(__half* __restrict__ ptr, int64 num, int* cnt) { int64 i = threadIdx.x + blockIdx.x * (int64)blockDim.x; if (i<num) { #if JT_CHECK_NAN == 2 if (isnan(__half2float(ptr[i]))) #else if (isnan(__half2float(ptr[i])) || __hisinf(ptr[i]) // || abs(__half2float(ptr[i])) > 60000.f ) #endif print_nan(float(ptr[i]), i, cnt); } } __global__ void _check_nan_float32(float32* __restrict__ ptr, int64 num, int* cnt) { int64 i = threadIdx.x + blockIdx.x * (int64)blockDim.x; if (i<num) { #if JT_CHECK_NAN == 2 if (::isnan(ptr[i])) #else if (::isnan(ptr[i]) || ::isinf(ptr[i])) #endif print_nan(float(ptr[i]), i, cnt); } } __global__ void _check_nan_float64(float64* __restrict__ ptr, int64 num, int* cnt) { int64 i = threadIdx.x + blockIdx.x * (int64)blockDim.x; if (i<num) { #if JT_CHECK_NAN == 2 if (::isnan(ptr[i])) #else if (::isnan(ptr[i]) || ::isinf(ptr[i])) #endif print_nan(float(ptr[i]), i, cnt); } } int* check_nan_get_device_ptr() { static int* ptr = nullptr; if (ptr) return ptr; cudaMalloc(&ptr, 4+4*MAX_NAN_REPORT); cudaMemset(ptr, 0, 4+4*MAX_NAN_REPORT); return ptr; } vector<int> report_nan() { vector<int> buffer(MAX_NAN_REPORT+1); auto ptr = check_nan_get_device_ptr(); cudaMemcpy(buffer.data(), ptr, 4+4*MAX_NAN_REPORT, cudaMemcpyDeviceToHost); cudaMemset(ptr, 0, 4); return buffer; } vector<int> check_nan_float64(float64* ptr, int64 num) { int block_num = std::max((int64)1, (num-1)/1024+1); int thread_num = std::min((int64)1024, num); _check_nan_float64<<<block_num, thread_num>>>(ptr, num, check_nan_get_device_ptr()); return report_nan(); } vector<int> check_nan_float32(float32* ptr, int64 num) { int block_num = std::max((int64)1, (num-1)/1024+1); int thread_num = std::min((int64)1024, num); _check_nan_float32<<<block_num, thread_num>>>(ptr, num, check_nan_get_device_ptr()); return report_nan(); } vector<int> check_nan_float16(__half* ptr, int64 num) { int block_num = std::max((int64)1, (num-1)/1024+1); int thread_num = std::min((int64)1024, num); _check_nan_float16<<<block_num, thread_num>>>(ptr, num, check_nan_get_device_ptr()); return report_nan(); } #endif }
cac6f6e667a0d4cb180455f87be903bc136e8ec0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include "../include/cuda_utilities.hpp" __global__ void do_histo( int num_entries, const uint8_t * const d_source_array, uint8_t * d_histo ) { uint tid = blockIdx.x * blockDim.x + threadIdx.x; if( tid < num_entries ) { uint8_t value = d_source_array[tid]; uint8_t * bin_address = d_histo+value; atomicIncUint8( bin_address ); } } __host__ void test_uint8_histo( ) { int num_entries = 256*123; // Build array uint8_t h_source_data[ num_entries]; for( int i=0; i<num_entries; i++ ) { h_source_data[i] = i % 256; } // Do histo uint8_t * d_source_data; uint8_t * d_histo; hipError_t err = hipMalloc( &d_source_data, num_entries * sizeof( uint8_t ) ); check_cuda_error( "Couldn't allocate source data", err ); hipMemcpy( d_source_data, h_source_data, num_entries * sizeof( uint8_t ), hipMemcpyHostToDevice ); check_cuda_error( "Couldn't load source data", err ); hipMalloc( &d_histo, 256 ); check_cuda_error( "Couldn't allocate histo data", err ); hipMemset( d_histo, 0, 256 * sizeof( uint8_t) ); check_cuda_error( "Couldn't clear histo data", err ); dim3 block( 100 ); dim3 grid( divUp( num_entries, block.x)); hipLaunchKernelGGL(( do_histo), dim3(grid), dim3(block) , 0, 0, num_entries, d_source_data, d_histo ); hipDeviceSynchronize(); err = hipGetLastError( ); check_cuda_error( "Kernel failed", err ); err= hipFree( d_source_data ); check_cuda_error( "Couldn't free source data", err ); uint8_t * h_histo = (uint8_t *) new uint8_t[256]; err = hipMemcpy( (void *) h_histo, d_histo, 256 * sizeof( uint8_t), hipMemcpyDeviceToHost); check_cuda_error( "Couldn't copy to histo to host", err ); hipFree( d_histo ); check_cuda_error( "Couldn't free device histo data", err ); for( int i=0; i<256; i++ ) { std::cout << "bin " << i << ": " << (int)h_histo[i] << std::endl; } delete [] h_histo; } int main( int argc, char *argv[] ) { test_uint8_histo( ); }
cac6f6e667a0d4cb180455f87be903bc136e8ec0.cu
#include <iostream> #include "../include/cuda_utilities.hpp" __global__ void do_histo( int num_entries, const uint8_t * const d_source_array, uint8_t * d_histo ) { uint tid = blockIdx.x * blockDim.x + threadIdx.x; if( tid < num_entries ) { uint8_t value = d_source_array[tid]; uint8_t * bin_address = d_histo+value; atomicIncUint8( bin_address ); } } __host__ void test_uint8_histo( ) { int num_entries = 256*123; // Build array uint8_t h_source_data[ num_entries]; for( int i=0; i<num_entries; i++ ) { h_source_data[i] = i % 256; } // Do histo uint8_t * d_source_data; uint8_t * d_histo; cudaError_t err = cudaMalloc( &d_source_data, num_entries * sizeof( uint8_t ) ); check_cuda_error( "Couldn't allocate source data", err ); cudaMemcpy( d_source_data, h_source_data, num_entries * sizeof( uint8_t ), cudaMemcpyHostToDevice ); check_cuda_error( "Couldn't load source data", err ); cudaMalloc( &d_histo, 256 ); check_cuda_error( "Couldn't allocate histo data", err ); cudaMemset( d_histo, 0, 256 * sizeof( uint8_t) ); check_cuda_error( "Couldn't clear histo data", err ); dim3 block( 100 ); dim3 grid( divUp( num_entries, block.x)); do_histo<<< grid, block >>>( num_entries, d_source_data, d_histo ); cudaDeviceSynchronize(); err = cudaGetLastError( ); check_cuda_error( "Kernel failed", err ); err= cudaFree( d_source_data ); check_cuda_error( "Couldn't free source data", err ); uint8_t * h_histo = (uint8_t *) new uint8_t[256]; err = cudaMemcpy( (void *) h_histo, d_histo, 256 * sizeof( uint8_t), cudaMemcpyDeviceToHost); check_cuda_error( "Couldn't copy to histo to host", err ); cudaFree( d_histo ); check_cuda_error( "Couldn't free device histo data", err ); for( int i=0; i<256; i++ ) { std::cout << "bin " << i << ": " << (int)h_histo[i] << std::endl; } delete [] h_histo; } int main( int argc, char *argv[] ) { test_uint8_histo( ); }
73b7a16ec20ecb63cdd39da50c6a324b516e7ae7.hip
// !!! This is a file automatically generated by hipify!!! /******************************************************* * Copyright (c) 2014, ArrayFire * All rights reserved. * * This file is distributed under 3-clause BSD license. * The complete license agreement can be obtained at: * http://arrayfire.com/licenses/BSD-3-Clause ********************************************************/ #include <sparse.hpp> #include <kernel/sparse.hpp> #include <stdexcept> #include <string> #include <arith.hpp> #include <cast.hpp> #include <complex.hpp> #include <copy.hpp> #include <err_common.hpp> #include <lookup.hpp> #include <math.hpp> #include <platform.hpp> #include <where.hpp> namespace cuda { using namespace common; using namespace std; //hipsparseStatus_t hipsparseZcsr2csc(hipsparseHandle_t handle, // int m, int n, int nnz, // const hipDoubleComplex *csrSortedVal, // const int *csrSortedRowPtr, const int *csrSortedColInd, // hipDoubleComplex *cscSortedVal, // int *cscSortedRowInd, int *cscSortedColPtr, // hipsparseAction_t copyValues, // hipsparseIndexBase_t idxBase); template<typename T> struct csr2csc_func_def_t { typedef hipsparseStatus_t (*csr2csc_func_def)( hipsparseHandle_t, int, int, int, const T *, const int *, const int *, T *, int *, int *, hipsparseAction_t, hipsparseIndexBase_t); }; //hipsparseStatus_t hipsparseZdense2csr(hipsparseHandle_t handle, // int m, int n, // const hipsparseMatDescr_t descrA, // const hipDoubleComplex *A, int lda, // const int *nnzPerRow, // hipDoubleComplex *csrValA, // int *csrRowPtrA, int *csrColIndA) template<typename T> struct dense2csr_func_def_t { typedef hipsparseStatus_t (*dense2csr_func_def)( hipsparseHandle_t, int, int, const hipsparseMatDescr_t, const T *, int, const int *, T *, int *, int *); }; //hipsparseStatus_t hipsparseZdense2csc(hipsparseHandle_t handle, // int m, int n, // const hipsparseMatDescr_t descrA, // const hipDoubleComplex *A, int lda, // const int *nnzPerCol, // hipDoubleComplex *cscValA, // int *cscRowIndA, int *cscColPtrA) template<typename T> struct dense2csc_func_def_t { typedef hipsparseStatus_t (*dense2csc_func_def)( hipsparseHandle_t, int, int, const hipsparseMatDescr_t, const T *, int, const int *, T *, int *, int *); }; //hipsparseStatus_t hipsparseZcsr2dense(hipsparseHandle_t handle, // int m, int n, // const hipsparseMatDescr_t descrA, // const hipDoubleComplex *csrValA, // const int *csrRowPtrA, // const int *csrColIndA, // hipDoubleComplex *A, int lda) template<typename T> struct csr2dense_func_def_t { typedef hipsparseStatus_t (*csr2dense_func_def)( hipsparseHandle_t, int, int, const hipsparseMatDescr_t, const T *, const int *, const int *, T *, int); }; //hipsparseStatus_t hipsparseZcsc2dense(hipsparseHandle_t handle, // int m, int n, // const hipsparseMatDescr_t descrA, // const hipDoubleComplex *cscValA, // const int *cscRowIndA, // const int *cscColPtrA, // hipDoubleComplex *A, int lda) template<typename T> struct csc2dense_func_def_t { typedef hipsparseStatus_t (*csc2dense_func_def)( hipsparseHandle_t, int, int, const hipsparseMatDescr_t, const T *, const int *, const int *, T *, int); }; //hipsparseStatus_t hipsparseZnnz(hipsparseHandle_t handle, // hipsparseDirection_t dirA, // int m, int n, // const hipsparseMatDescr_t descrA, // const hipDoubleComplex *A, int lda, // int *nnzPerRowColumn, // int *nnzTotalDevHostPtr) template<typename T> struct nnz_func_def_t { typedef hipsparseStatus_t (*nnz_func_def)( hipsparseHandle_t, hipsparseDirection_t, int, int, const hipsparseMatDescr_t, const T *, int, int *, int *); }; //hipsparseStatus_t hipsparseZgthr(hipsparseHandle_t handle, // int nnz, // const hipDoubleComplex *y, // hipDoubleComplex *xVal, const int *xInd, // hipsparseIndexBase_t idxBase) template<typename T> struct gthr_func_def_t { typedef hipsparseStatus_t (*gthr_func_def)(hipsparseHandle_t, int, const T *, T*, const int *, hipsparseIndexBase_t); }; #define SPARSE_FUNC_DEF( FUNC ) \ template<typename T> \ typename FUNC##_func_def_t<T>::FUNC##_func_def \ FUNC##_func(); #define SPARSE_FUNC( FUNC, TYPE, PREFIX ) \ template<> typename FUNC##_func_def_t<TYPE>::FUNC##_func_def \ FUNC##_func<TYPE>() \ { return (FUNC##_func_def_t<TYPE>::FUNC##_func_def)&cusparse##PREFIX##FUNC; } SPARSE_FUNC_DEF(csr2csc) SPARSE_FUNC(csr2csc, float, S) SPARSE_FUNC(csr2csc, double, D) SPARSE_FUNC(csr2csc, cfloat, C) SPARSE_FUNC(csr2csc, cdouble,Z) SPARSE_FUNC_DEF(dense2csr) SPARSE_FUNC(dense2csr, float, S) SPARSE_FUNC(dense2csr, double, D) SPARSE_FUNC(dense2csr, cfloat, C) SPARSE_FUNC(dense2csr, cdouble,Z) SPARSE_FUNC_DEF(dense2csc) SPARSE_FUNC(dense2csc, float, S) SPARSE_FUNC(dense2csc, double, D) SPARSE_FUNC(dense2csc, cfloat, C) SPARSE_FUNC(dense2csc, cdouble,Z) SPARSE_FUNC_DEF(csr2dense) SPARSE_FUNC(csr2dense, float, S) SPARSE_FUNC(csr2dense, double, D) SPARSE_FUNC(csr2dense, cfloat, C) SPARSE_FUNC(csr2dense, cdouble,Z) SPARSE_FUNC_DEF(csc2dense) SPARSE_FUNC(csc2dense, float, S) SPARSE_FUNC(csc2dense, double, D) SPARSE_FUNC(csc2dense, cfloat, C) SPARSE_FUNC(csc2dense, cdouble,Z) SPARSE_FUNC_DEF(nnz) SPARSE_FUNC(nnz, float, S) SPARSE_FUNC(nnz, double, D) SPARSE_FUNC(nnz, cfloat, C) SPARSE_FUNC(nnz, cdouble,Z) SPARSE_FUNC_DEF(gthr) SPARSE_FUNC(gthr, float, S) SPARSE_FUNC(gthr, double, D) SPARSE_FUNC(gthr, cfloat, C) SPARSE_FUNC(gthr, cdouble,Z) #undef SPARSE_FUNC #undef SPARSE_FUNC_DEF // Partial template specialization of sparseConvertDenseToStorage for COO // However, template specialization is not allowed template<typename T> SparseArray<T> sparseConvertDenseToCOO(const Array<T> &in) { Array<uint> nonZeroIdx_ = where<T>(in); Array<int> nonZeroIdx = cast<int, uint>(nonZeroIdx_); dim_t nNZ = nonZeroIdx.elements(); Array<int> constDim = createValueArray<int>(dim4(nNZ), in.dims()[0]); Array<int> rowIdx = arithOp<int, af_mod_t>(nonZeroIdx, constDim, nonZeroIdx.dims()); Array<int> colIdx = arithOp<int, af_div_t>(nonZeroIdx, constDim, nonZeroIdx.dims()); Array<T> values = copyArray<T>(in); values.modDims(dim4(values.elements())); values = lookup<T, int>(values, nonZeroIdx, 0); return createArrayDataSparseArray<T>(in.dims(), values, rowIdx, colIdx, AF_STORAGE_COO); } template<typename T, af_storage stype> SparseArray<T> sparseConvertDenseToStorage(const Array<T> &in) { const int M = in.dims()[0]; const int N = in.dims()[1]; // Create Sparse Matrix Descriptor hipsparseMatDescr_t descr = 0; CUSPARSE_CHECK(hipsparseCreateMatDescr(&descr)); hipsparseSetMatType(descr, HIPSPARSE_MATRIX_TYPE_GENERAL); hipsparseSetMatIndexBase(descr, HIPSPARSE_INDEX_BASE_ZERO); int d = -1; hipsparseDirection_t dir = HIPSPARSE_DIRECTION_ROW; if(stype == AF_STORAGE_CSR) { d = M; dir = HIPSPARSE_DIRECTION_ROW; } else { d = N; dir = HIPSPARSE_DIRECTION_COLUMN; } Array<int> nnzPerDir = createEmptyArray<int>(dim4(d)); int nNZ = -1; CUSPARSE_CHECK(nnz_func<T>()( sparseHandle(), dir, M, N, descr, in.get(), in.strides()[1], nnzPerDir.get(), &nNZ)); Array<int> rowIdx = createEmptyArray<int>(dim4()); Array<int> colIdx = createEmptyArray<int>(dim4()); if(stype == AF_STORAGE_CSR) { rowIdx = createEmptyArray<int>(dim4(M+1)); colIdx = createEmptyArray<int>(dim4(nNZ)); } else { rowIdx = createEmptyArray<int>(dim4(nNZ)); colIdx = createEmptyArray<int>(dim4(N+1)); } Array<T> values = createEmptyArray<T>(dim4(nNZ)); if(stype == AF_STORAGE_CSR) CUSPARSE_CHECK(dense2csr_func<T>()( sparseHandle(), M, N, descr, in.get(), in.strides()[1], nnzPerDir.get(), values.get(), rowIdx.get(), colIdx.get())); else CUSPARSE_CHECK(dense2csc_func<T>()( sparseHandle(), M, N, descr, in.get(), in.strides()[1], nnzPerDir.get(), values.get(), rowIdx.get(), colIdx.get())); // Destory Sparse Matrix Descriptor CUSPARSE_CHECK(hipsparseDestroyMatDescr(descr)); return createArrayDataSparseArray<T>(in.dims(), values, rowIdx, colIdx, stype); } // Partial template specialization of sparseConvertStorageToDense for COO // However, template specialization is not allowed template<typename T> Array<T> sparseConvertCOOToDense(const SparseArray<T> &in) { Array<T> dense = createValueArray<T>(in.dims(), scalar<T>(0)); const Array<T> values = in.getValues(); const Array<int> rowIdx = in.getRowIdx(); const Array<int> colIdx = in.getColIdx(); kernel::coo2dense<T>(dense, values, rowIdx, colIdx); return dense; } template<typename T, af_storage stype> Array<T> sparseConvertStorageToDense(const SparseArray<T> &in) { // Create Sparse Matrix Descriptor hipsparseMatDescr_t descr = 0; CUSPARSE_CHECK(hipsparseCreateMatDescr(&descr)); hipsparseSetMatType(descr, HIPSPARSE_MATRIX_TYPE_GENERAL); hipsparseSetMatIndexBase(descr, HIPSPARSE_INDEX_BASE_ZERO); int M = in.dims()[0]; int N = in.dims()[1]; Array<T> dense = createValueArray<T>(in.dims(), scalar<T>(0)); int d_strides1 = dense.strides()[1]; if(stype == AF_STORAGE_CSR) CUSPARSE_CHECK(csr2dense_func<T>()( sparseHandle(), M, N, descr, in.getValues().get(), in.getRowIdx().get(), in.getColIdx().get(), dense.get(), d_strides1)); else CUSPARSE_CHECK(csc2dense_func<T>()( sparseHandle(), M, N, descr, in.getValues().get(), in.getRowIdx().get(), in.getColIdx().get(), dense.get(), d_strides1)); // Destory Sparse Matrix Descriptor CUSPARSE_CHECK(hipsparseDestroyMatDescr(descr)); return dense; } template<typename T, af_storage dest, af_storage src> SparseArray<T> sparseConvertStorageToStorage(const SparseArray<T> &in) { using std::shared_ptr; in.eval(); int nNZ = in.getNNZ(); SparseArray<T> converted = createEmptySparseArray<T>(in.dims(), nNZ, dest); if(src == AF_STORAGE_CSR && dest == AF_STORAGE_COO) { // Copy colIdx as is CUDA_CHECK(hipMemcpyAsync(converted.getColIdx().get(), in.getColIdx().get(), in.getColIdx().elements() * sizeof(int), hipMemcpyDeviceToDevice, cuda::getActiveStream())); // cusparse function to expand compressed row into coordinate CUSPARSE_CHECK(hipsparseXcsr2coo( sparseHandle(), in.getRowIdx().get(), nNZ, in.dims()[0], converted.getRowIdx().get(), HIPSPARSE_INDEX_BASE_ZERO)); // Call sort size_t pBufferSizeInBytes = 0; CUSPARSE_CHECK(hipsparseXcoosort_bufferSizeExt( sparseHandle(), in.dims()[0], in.dims()[1], nNZ, converted.getRowIdx().get(), converted.getColIdx().get(), &pBufferSizeInBytes)); shared_ptr<char> pBuffer(memAlloc<char>(pBufferSizeInBytes), memFree<char>); shared_ptr<int> P(memAlloc<int>(nNZ), memFree<int>); CUSPARSE_CHECK(hipsparseCreateIdentityPermutation(sparseHandle(), nNZ, P.get())); CUSPARSE_CHECK(hipsparseXcoosortByColumn( sparseHandle(), in.dims()[0], in.dims()[1], nNZ, converted.getRowIdx().get(), converted.getColIdx().get(), P.get(), (void*)pBuffer.get())); CUSPARSE_CHECK(gthr_func<T>()( sparseHandle(), nNZ, in.getValues().get(), converted.getValues().get(), P.get(), HIPSPARSE_INDEX_BASE_ZERO)); } else if (src == AF_STORAGE_COO && dest == AF_STORAGE_CSR) { // The cusparse csr sort function is not behaving correctly. // So the work around is to convert the COO into row major and then // convert it to CSR // Deep copy input into temporary COO Row Major SparseArray<T> cooT = createArrayDataSparseArray<T>(in.dims(), in.getValues(), in.getRowIdx(), in.getColIdx(), in.getStorage(), true); // Call sort to convert column major to row major { size_t pBufferSizeInBytes = 0; CUSPARSE_CHECK(hipsparseXcoosort_bufferSizeExt( sparseHandle(), cooT.dims()[0], cooT.dims()[1], nNZ, cooT.getRowIdx().get(), cooT.getColIdx().get(), &pBufferSizeInBytes)); shared_ptr<char> pBuffer(memAlloc<char>(pBufferSizeInBytes), memFree<char>); shared_ptr<int> P(memAlloc<int>(nNZ), memFree<int>); CUSPARSE_CHECK(hipsparseCreateIdentityPermutation(sparseHandle(), nNZ, P.get())); CUSPARSE_CHECK(hipsparseXcoosortByRow( sparseHandle(), cooT.dims()[0], cooT.dims()[1], nNZ, cooT.getRowIdx().get(), cooT.getColIdx().get(), P.get(), (void*)pBuffer.get())); CUSPARSE_CHECK(gthr_func<T>()( sparseHandle(), nNZ, in.getValues().get(), cooT.getValues().get(), P.get(), HIPSPARSE_INDEX_BASE_ZERO)); } // Copy values and colIdx as is CUDA_CHECK(hipMemcpyAsync(converted.getValues().get(), cooT.getValues().get(), cooT.getValues().elements() * sizeof(T), hipMemcpyDeviceToDevice, cuda::getActiveStream())); CUDA_CHECK(hipMemcpyAsync(converted.getColIdx().get(), cooT.getColIdx().get(), cooT.getColIdx().elements() * sizeof(int), hipMemcpyDeviceToDevice, cuda::getActiveStream())); // cusparse function to compress row from coordinate CUSPARSE_CHECK(hipsparseXcoo2csr( sparseHandle(), cooT.getRowIdx().get(), nNZ, cooT.dims()[0], converted.getRowIdx().get(), HIPSPARSE_INDEX_BASE_ZERO)); // No need to call CSRSORT } else { // Should never come here AF_ERROR("CUDA Backend invalid conversion combination", AF_ERR_NOT_SUPPORTED); } return converted; } #define INSTANTIATE_TO_STORAGE(T, S) \ template SparseArray<T> sparseConvertStorageToStorage<T, S, AF_STORAGE_CSR>(const SparseArray<T> &in); \ template SparseArray<T> sparseConvertStorageToStorage<T, S, AF_STORAGE_CSC>(const SparseArray<T> &in); \ template SparseArray<T> sparseConvertStorageToStorage<T, S, AF_STORAGE_COO>(const SparseArray<T> &in); \ #define INSTANTIATE_COO_SPECIAL(T) \ template<> SparseArray<T> sparseConvertDenseToStorage<T, AF_STORAGE_COO>(const Array<T> &in) \ { return sparseConvertDenseToCOO<T>(in); } \ template<> Array<T> sparseConvertStorageToDense<T, AF_STORAGE_COO>(const SparseArray<T> &in) \ { return sparseConvertCOOToDense<T>(in); } \ #define INSTANTIATE_SPARSE(T) \ template SparseArray<T> sparseConvertDenseToStorage<T, AF_STORAGE_CSR>(const Array<T> &in); \ template SparseArray<T> sparseConvertDenseToStorage<T, AF_STORAGE_CSC>(const Array<T> &in); \ \ template Array<T> sparseConvertStorageToDense<T, AF_STORAGE_CSR>(const SparseArray<T> &in); \ template Array<T> sparseConvertStorageToDense<T, AF_STORAGE_CSC>(const SparseArray<T> &in); \ \ INSTANTIATE_COO_SPECIAL(T) \ \ INSTANTIATE_TO_STORAGE(T, AF_STORAGE_CSR) \ INSTANTIATE_TO_STORAGE(T, AF_STORAGE_CSC) \ INSTANTIATE_TO_STORAGE(T, AF_STORAGE_COO) \ INSTANTIATE_SPARSE(float) INSTANTIATE_SPARSE(double) INSTANTIATE_SPARSE(cfloat) INSTANTIATE_SPARSE(cdouble) #undef INSTANTIATE_TO_STORAGE #undef INSTANTIATE_COO_SPECIAL #undef INSTANTIATE_SPARSE }
73b7a16ec20ecb63cdd39da50c6a324b516e7ae7.cu
/******************************************************* * Copyright (c) 2014, ArrayFire * All rights reserved. * * This file is distributed under 3-clause BSD license. * The complete license agreement can be obtained at: * http://arrayfire.com/licenses/BSD-3-Clause ********************************************************/ #include <sparse.hpp> #include <kernel/sparse.hpp> #include <stdexcept> #include <string> #include <arith.hpp> #include <cast.hpp> #include <complex.hpp> #include <copy.hpp> #include <err_common.hpp> #include <lookup.hpp> #include <math.hpp> #include <platform.hpp> #include <where.hpp> namespace cuda { using namespace common; using namespace std; //cusparseStatus_t cusparseZcsr2csc(cusparseHandle_t handle, // int m, int n, int nnz, // const cuDoubleComplex *csrSortedVal, // const int *csrSortedRowPtr, const int *csrSortedColInd, // cuDoubleComplex *cscSortedVal, // int *cscSortedRowInd, int *cscSortedColPtr, // cusparseAction_t copyValues, // cusparseIndexBase_t idxBase); template<typename T> struct csr2csc_func_def_t { typedef cusparseStatus_t (*csr2csc_func_def)( cusparseHandle_t, int, int, int, const T *, const int *, const int *, T *, int *, int *, cusparseAction_t, cusparseIndexBase_t); }; //cusparseStatus_t cusparseZdense2csr(cusparseHandle_t handle, // int m, int n, // const cusparseMatDescr_t descrA, // const cuDoubleComplex *A, int lda, // const int *nnzPerRow, // cuDoubleComplex *csrValA, // int *csrRowPtrA, int *csrColIndA) template<typename T> struct dense2csr_func_def_t { typedef cusparseStatus_t (*dense2csr_func_def)( cusparseHandle_t, int, int, const cusparseMatDescr_t, const T *, int, const int *, T *, int *, int *); }; //cusparseStatus_t cusparseZdense2csc(cusparseHandle_t handle, // int m, int n, // const cusparseMatDescr_t descrA, // const cuDoubleComplex *A, int lda, // const int *nnzPerCol, // cuDoubleComplex *cscValA, // int *cscRowIndA, int *cscColPtrA) template<typename T> struct dense2csc_func_def_t { typedef cusparseStatus_t (*dense2csc_func_def)( cusparseHandle_t, int, int, const cusparseMatDescr_t, const T *, int, const int *, T *, int *, int *); }; //cusparseStatus_t cusparseZcsr2dense(cusparseHandle_t handle, // int m, int n, // const cusparseMatDescr_t descrA, // const cuDoubleComplex *csrValA, // const int *csrRowPtrA, // const int *csrColIndA, // cuDoubleComplex *A, int lda) template<typename T> struct csr2dense_func_def_t { typedef cusparseStatus_t (*csr2dense_func_def)( cusparseHandle_t, int, int, const cusparseMatDescr_t, const T *, const int *, const int *, T *, int); }; //cusparseStatus_t cusparseZcsc2dense(cusparseHandle_t handle, // int m, int n, // const cusparseMatDescr_t descrA, // const cuDoubleComplex *cscValA, // const int *cscRowIndA, // const int *cscColPtrA, // cuDoubleComplex *A, int lda) template<typename T> struct csc2dense_func_def_t { typedef cusparseStatus_t (*csc2dense_func_def)( cusparseHandle_t, int, int, const cusparseMatDescr_t, const T *, const int *, const int *, T *, int); }; //cusparseStatus_t cusparseZnnz(cusparseHandle_t handle, // cusparseDirection_t dirA, // int m, int n, // const cusparseMatDescr_t descrA, // const cuDoubleComplex *A, int lda, // int *nnzPerRowColumn, // int *nnzTotalDevHostPtr) template<typename T> struct nnz_func_def_t { typedef cusparseStatus_t (*nnz_func_def)( cusparseHandle_t, cusparseDirection_t, int, int, const cusparseMatDescr_t, const T *, int, int *, int *); }; //cusparseStatus_t cusparseZgthr(cusparseHandle_t handle, // int nnz, // const cuDoubleComplex *y, // cuDoubleComplex *xVal, const int *xInd, // cusparseIndexBase_t idxBase) template<typename T> struct gthr_func_def_t { typedef cusparseStatus_t (*gthr_func_def)(cusparseHandle_t, int, const T *, T*, const int *, cusparseIndexBase_t); }; #define SPARSE_FUNC_DEF( FUNC ) \ template<typename T> \ typename FUNC##_func_def_t<T>::FUNC##_func_def \ FUNC##_func(); #define SPARSE_FUNC( FUNC, TYPE, PREFIX ) \ template<> typename FUNC##_func_def_t<TYPE>::FUNC##_func_def \ FUNC##_func<TYPE>() \ { return (FUNC##_func_def_t<TYPE>::FUNC##_func_def)&cusparse##PREFIX##FUNC; } SPARSE_FUNC_DEF(csr2csc) SPARSE_FUNC(csr2csc, float, S) SPARSE_FUNC(csr2csc, double, D) SPARSE_FUNC(csr2csc, cfloat, C) SPARSE_FUNC(csr2csc, cdouble,Z) SPARSE_FUNC_DEF(dense2csr) SPARSE_FUNC(dense2csr, float, S) SPARSE_FUNC(dense2csr, double, D) SPARSE_FUNC(dense2csr, cfloat, C) SPARSE_FUNC(dense2csr, cdouble,Z) SPARSE_FUNC_DEF(dense2csc) SPARSE_FUNC(dense2csc, float, S) SPARSE_FUNC(dense2csc, double, D) SPARSE_FUNC(dense2csc, cfloat, C) SPARSE_FUNC(dense2csc, cdouble,Z) SPARSE_FUNC_DEF(csr2dense) SPARSE_FUNC(csr2dense, float, S) SPARSE_FUNC(csr2dense, double, D) SPARSE_FUNC(csr2dense, cfloat, C) SPARSE_FUNC(csr2dense, cdouble,Z) SPARSE_FUNC_DEF(csc2dense) SPARSE_FUNC(csc2dense, float, S) SPARSE_FUNC(csc2dense, double, D) SPARSE_FUNC(csc2dense, cfloat, C) SPARSE_FUNC(csc2dense, cdouble,Z) SPARSE_FUNC_DEF(nnz) SPARSE_FUNC(nnz, float, S) SPARSE_FUNC(nnz, double, D) SPARSE_FUNC(nnz, cfloat, C) SPARSE_FUNC(nnz, cdouble,Z) SPARSE_FUNC_DEF(gthr) SPARSE_FUNC(gthr, float, S) SPARSE_FUNC(gthr, double, D) SPARSE_FUNC(gthr, cfloat, C) SPARSE_FUNC(gthr, cdouble,Z) #undef SPARSE_FUNC #undef SPARSE_FUNC_DEF // Partial template specialization of sparseConvertDenseToStorage for COO // However, template specialization is not allowed template<typename T> SparseArray<T> sparseConvertDenseToCOO(const Array<T> &in) { Array<uint> nonZeroIdx_ = where<T>(in); Array<int> nonZeroIdx = cast<int, uint>(nonZeroIdx_); dim_t nNZ = nonZeroIdx.elements(); Array<int> constDim = createValueArray<int>(dim4(nNZ), in.dims()[0]); Array<int> rowIdx = arithOp<int, af_mod_t>(nonZeroIdx, constDim, nonZeroIdx.dims()); Array<int> colIdx = arithOp<int, af_div_t>(nonZeroIdx, constDim, nonZeroIdx.dims()); Array<T> values = copyArray<T>(in); values.modDims(dim4(values.elements())); values = lookup<T, int>(values, nonZeroIdx, 0); return createArrayDataSparseArray<T>(in.dims(), values, rowIdx, colIdx, AF_STORAGE_COO); } template<typename T, af_storage stype> SparseArray<T> sparseConvertDenseToStorage(const Array<T> &in) { const int M = in.dims()[0]; const int N = in.dims()[1]; // Create Sparse Matrix Descriptor cusparseMatDescr_t descr = 0; CUSPARSE_CHECK(cusparseCreateMatDescr(&descr)); cusparseSetMatType(descr, CUSPARSE_MATRIX_TYPE_GENERAL); cusparseSetMatIndexBase(descr, CUSPARSE_INDEX_BASE_ZERO); int d = -1; cusparseDirection_t dir = CUSPARSE_DIRECTION_ROW; if(stype == AF_STORAGE_CSR) { d = M; dir = CUSPARSE_DIRECTION_ROW; } else { d = N; dir = CUSPARSE_DIRECTION_COLUMN; } Array<int> nnzPerDir = createEmptyArray<int>(dim4(d)); int nNZ = -1; CUSPARSE_CHECK(nnz_func<T>()( sparseHandle(), dir, M, N, descr, in.get(), in.strides()[1], nnzPerDir.get(), &nNZ)); Array<int> rowIdx = createEmptyArray<int>(dim4()); Array<int> colIdx = createEmptyArray<int>(dim4()); if(stype == AF_STORAGE_CSR) { rowIdx = createEmptyArray<int>(dim4(M+1)); colIdx = createEmptyArray<int>(dim4(nNZ)); } else { rowIdx = createEmptyArray<int>(dim4(nNZ)); colIdx = createEmptyArray<int>(dim4(N+1)); } Array<T> values = createEmptyArray<T>(dim4(nNZ)); if(stype == AF_STORAGE_CSR) CUSPARSE_CHECK(dense2csr_func<T>()( sparseHandle(), M, N, descr, in.get(), in.strides()[1], nnzPerDir.get(), values.get(), rowIdx.get(), colIdx.get())); else CUSPARSE_CHECK(dense2csc_func<T>()( sparseHandle(), M, N, descr, in.get(), in.strides()[1], nnzPerDir.get(), values.get(), rowIdx.get(), colIdx.get())); // Destory Sparse Matrix Descriptor CUSPARSE_CHECK(cusparseDestroyMatDescr(descr)); return createArrayDataSparseArray<T>(in.dims(), values, rowIdx, colIdx, stype); } // Partial template specialization of sparseConvertStorageToDense for COO // However, template specialization is not allowed template<typename T> Array<T> sparseConvertCOOToDense(const SparseArray<T> &in) { Array<T> dense = createValueArray<T>(in.dims(), scalar<T>(0)); const Array<T> values = in.getValues(); const Array<int> rowIdx = in.getRowIdx(); const Array<int> colIdx = in.getColIdx(); kernel::coo2dense<T>(dense, values, rowIdx, colIdx); return dense; } template<typename T, af_storage stype> Array<T> sparseConvertStorageToDense(const SparseArray<T> &in) { // Create Sparse Matrix Descriptor cusparseMatDescr_t descr = 0; CUSPARSE_CHECK(cusparseCreateMatDescr(&descr)); cusparseSetMatType(descr, CUSPARSE_MATRIX_TYPE_GENERAL); cusparseSetMatIndexBase(descr, CUSPARSE_INDEX_BASE_ZERO); int M = in.dims()[0]; int N = in.dims()[1]; Array<T> dense = createValueArray<T>(in.dims(), scalar<T>(0)); int d_strides1 = dense.strides()[1]; if(stype == AF_STORAGE_CSR) CUSPARSE_CHECK(csr2dense_func<T>()( sparseHandle(), M, N, descr, in.getValues().get(), in.getRowIdx().get(), in.getColIdx().get(), dense.get(), d_strides1)); else CUSPARSE_CHECK(csc2dense_func<T>()( sparseHandle(), M, N, descr, in.getValues().get(), in.getRowIdx().get(), in.getColIdx().get(), dense.get(), d_strides1)); // Destory Sparse Matrix Descriptor CUSPARSE_CHECK(cusparseDestroyMatDescr(descr)); return dense; } template<typename T, af_storage dest, af_storage src> SparseArray<T> sparseConvertStorageToStorage(const SparseArray<T> &in) { using std::shared_ptr; in.eval(); int nNZ = in.getNNZ(); SparseArray<T> converted = createEmptySparseArray<T>(in.dims(), nNZ, dest); if(src == AF_STORAGE_CSR && dest == AF_STORAGE_COO) { // Copy colIdx as is CUDA_CHECK(cudaMemcpyAsync(converted.getColIdx().get(), in.getColIdx().get(), in.getColIdx().elements() * sizeof(int), cudaMemcpyDeviceToDevice, cuda::getActiveStream())); // cusparse function to expand compressed row into coordinate CUSPARSE_CHECK(cusparseXcsr2coo( sparseHandle(), in.getRowIdx().get(), nNZ, in.dims()[0], converted.getRowIdx().get(), CUSPARSE_INDEX_BASE_ZERO)); // Call sort size_t pBufferSizeInBytes = 0; CUSPARSE_CHECK(cusparseXcoosort_bufferSizeExt( sparseHandle(), in.dims()[0], in.dims()[1], nNZ, converted.getRowIdx().get(), converted.getColIdx().get(), &pBufferSizeInBytes)); shared_ptr<char> pBuffer(memAlloc<char>(pBufferSizeInBytes), memFree<char>); shared_ptr<int> P(memAlloc<int>(nNZ), memFree<int>); CUSPARSE_CHECK(cusparseCreateIdentityPermutation(sparseHandle(), nNZ, P.get())); CUSPARSE_CHECK(cusparseXcoosortByColumn( sparseHandle(), in.dims()[0], in.dims()[1], nNZ, converted.getRowIdx().get(), converted.getColIdx().get(), P.get(), (void*)pBuffer.get())); CUSPARSE_CHECK(gthr_func<T>()( sparseHandle(), nNZ, in.getValues().get(), converted.getValues().get(), P.get(), CUSPARSE_INDEX_BASE_ZERO)); } else if (src == AF_STORAGE_COO && dest == AF_STORAGE_CSR) { // The cusparse csr sort function is not behaving correctly. // So the work around is to convert the COO into row major and then // convert it to CSR // Deep copy input into temporary COO Row Major SparseArray<T> cooT = createArrayDataSparseArray<T>(in.dims(), in.getValues(), in.getRowIdx(), in.getColIdx(), in.getStorage(), true); // Call sort to convert column major to row major { size_t pBufferSizeInBytes = 0; CUSPARSE_CHECK(cusparseXcoosort_bufferSizeExt( sparseHandle(), cooT.dims()[0], cooT.dims()[1], nNZ, cooT.getRowIdx().get(), cooT.getColIdx().get(), &pBufferSizeInBytes)); shared_ptr<char> pBuffer(memAlloc<char>(pBufferSizeInBytes), memFree<char>); shared_ptr<int> P(memAlloc<int>(nNZ), memFree<int>); CUSPARSE_CHECK(cusparseCreateIdentityPermutation(sparseHandle(), nNZ, P.get())); CUSPARSE_CHECK(cusparseXcoosortByRow( sparseHandle(), cooT.dims()[0], cooT.dims()[1], nNZ, cooT.getRowIdx().get(), cooT.getColIdx().get(), P.get(), (void*)pBuffer.get())); CUSPARSE_CHECK(gthr_func<T>()( sparseHandle(), nNZ, in.getValues().get(), cooT.getValues().get(), P.get(), CUSPARSE_INDEX_BASE_ZERO)); } // Copy values and colIdx as is CUDA_CHECK(cudaMemcpyAsync(converted.getValues().get(), cooT.getValues().get(), cooT.getValues().elements() * sizeof(T), cudaMemcpyDeviceToDevice, cuda::getActiveStream())); CUDA_CHECK(cudaMemcpyAsync(converted.getColIdx().get(), cooT.getColIdx().get(), cooT.getColIdx().elements() * sizeof(int), cudaMemcpyDeviceToDevice, cuda::getActiveStream())); // cusparse function to compress row from coordinate CUSPARSE_CHECK(cusparseXcoo2csr( sparseHandle(), cooT.getRowIdx().get(), nNZ, cooT.dims()[0], converted.getRowIdx().get(), CUSPARSE_INDEX_BASE_ZERO)); // No need to call CSRSORT } else { // Should never come here AF_ERROR("CUDA Backend invalid conversion combination", AF_ERR_NOT_SUPPORTED); } return converted; } #define INSTANTIATE_TO_STORAGE(T, S) \ template SparseArray<T> sparseConvertStorageToStorage<T, S, AF_STORAGE_CSR>(const SparseArray<T> &in); \ template SparseArray<T> sparseConvertStorageToStorage<T, S, AF_STORAGE_CSC>(const SparseArray<T> &in); \ template SparseArray<T> sparseConvertStorageToStorage<T, S, AF_STORAGE_COO>(const SparseArray<T> &in); \ #define INSTANTIATE_COO_SPECIAL(T) \ template<> SparseArray<T> sparseConvertDenseToStorage<T, AF_STORAGE_COO>(const Array<T> &in) \ { return sparseConvertDenseToCOO<T>(in); } \ template<> Array<T> sparseConvertStorageToDense<T, AF_STORAGE_COO>(const SparseArray<T> &in) \ { return sparseConvertCOOToDense<T>(in); } \ #define INSTANTIATE_SPARSE(T) \ template SparseArray<T> sparseConvertDenseToStorage<T, AF_STORAGE_CSR>(const Array<T> &in); \ template SparseArray<T> sparseConvertDenseToStorage<T, AF_STORAGE_CSC>(const Array<T> &in); \ \ template Array<T> sparseConvertStorageToDense<T, AF_STORAGE_CSR>(const SparseArray<T> &in); \ template Array<T> sparseConvertStorageToDense<T, AF_STORAGE_CSC>(const SparseArray<T> &in); \ \ INSTANTIATE_COO_SPECIAL(T) \ \ INSTANTIATE_TO_STORAGE(T, AF_STORAGE_CSR) \ INSTANTIATE_TO_STORAGE(T, AF_STORAGE_CSC) \ INSTANTIATE_TO_STORAGE(T, AF_STORAGE_COO) \ INSTANTIATE_SPARSE(float) INSTANTIATE_SPARSE(double) INSTANTIATE_SPARSE(cfloat) INSTANTIATE_SPARSE(cdouble) #undef INSTANTIATE_TO_STORAGE #undef INSTANTIATE_COO_SPECIAL #undef INSTANTIATE_SPARSE }
366cd7f5c38f24d157344e09281e53f01c4c285c.hip
// !!! This is a file automatically generated by hipify!!! #include <stdlib.h> #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <iostream> #include <stddef.h> #include <limits.h> #include <stdalign.h> __global__ void f_kernel(int *d_flag){ int tid = blockIdx.x*blockDim.x+threadIdx.x; int flag = 1; if(threadIdx.x == 0) *d_flag = 0; } int f(void){ int *d_flag; int flag = 1; hipMalloc((void**)&d_flag, sizeof(int)); hipMemcpy(d_flag,0,sizeof(int), hipMemcpyHostToDevice); hipLaunchKernelGGL(( f_kernel), dim3(1),dim3(1), 0, 0, d_flag); hipMemcpy(&flag, d_flag, sizeof(int), hipMemcpyDeviceToHost); } int main(void) { f() + 0; printf("%d\n", f() + 0); //return 0; } //
366cd7f5c38f24d157344e09281e53f01c4c285c.cu
#include <stdlib.h> #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <iostream> #include <stddef.h> #include <limits.h> #include <stdalign.h> __global__ void f_kernel(int *d_flag){ int tid = blockIdx.x*blockDim.x+threadIdx.x; int flag = 1; if(threadIdx.x == 0) *d_flag = 0; } int f(void){ int *d_flag; int flag = 1; cudaMalloc((void**)&d_flag, sizeof(int)); cudaMemcpy(d_flag,0,sizeof(int), cudaMemcpyHostToDevice); f_kernel<<<1,1>>>(d_flag); cudaMemcpy(&flag, d_flag, sizeof(int), cudaMemcpyDeviceToHost); } int main(void) { f() + 0; printf("%d\n", f() + 0); //return 0; } //编译通过;
69b6bb9ef8344d8942ca344c77ffbcced8a2a84a.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <random> #include <cmath> #include <chrono> #include <mnist.hpp> #include <utils.hpp> constexpr std::size_t minibatch_size = 64; constexpr std::size_t test_size = 10000; constexpr std::size_t num_iterations = 1000; constexpr std::size_t input_size = mnist_loader::IMAGE_DIM * mnist_loader::IMAGE_DIM; constexpr std::size_t hidden_size = 100; constexpr std::size_t output_size = mnist_loader::CLASS_SIZE; constexpr std::size_t print_info_interval = 20; constexpr float learning_rate = 0.5f; void matmul(float* const C, const float* const A, const float* const B, const std::size_t M, const std::size_t N, const std::size_t K) { for (std::size_t m = 0; m < M; m++) { for (std::size_t n = 0; n < N; n++) { float sum = 0.0f; for (std::size_t k = 0; k < K; k++) { sum += A[m + k * M] * B[k + n * K]; } C[m + M * n] = sum; } } } void matmul_tn(float* const C, const float* const A, const float* const B, const std::size_t M, const std::size_t N, const std::size_t K) { for (std::size_t m = 0; m < M; m++) { for (std::size_t n = 0; n < N; n++) { float sum = 0.0f; for (std::size_t k = 0; k < K; k++) { sum += A[k + m * K] * B[k + n * K]; } C[m + M * n] = sum; } } } void gemm_nt(const float beta, float* const C, const float alpha, const float* const A, const float* const B, const std::size_t M, const std::size_t N, const std::size_t K) { for (std::size_t m = 0; m < M; m++) { for (std::size_t n = 0; n < N; n++) { float sum = 0.0f; for (std::size_t k = 0; k < K; k++) { sum += A[m + k * M] * B[n + k * N]; } C[m + M * n] = beta * C[m + M * n] + alpha * sum; } } } void elementwise_product(float* const C, const float* const A, const float* const B, const std::size_t size) { for (std::size_t i = 0; i < size; i++) { C[i] = A[i] * B[i]; } } void add_bias(float* const A, const float* const bias, const std::size_t layer_size, const std::size_t minibatch_size) { for (std::size_t mb = 0; mb < minibatch_size; mb++) { for (std::size_t ls = 0; ls < layer_size; ls++) { A[mb * layer_size + ls] += bias[ls]; } } } void ReLU(float* const acted, const float* const pre_act, const std::size_t size, const std::size_t minibatch_size) { for (std::size_t i = 0; i < size * minibatch_size; i++) { acted[i] = ::max(0.0f, pre_act[i]); } } void dReLU(float* const d_acted, const float* const pre_act, const std::size_t size, const std::size_t minibatch_size) { for (std::size_t i = 0; i < size * minibatch_size; i++) { if (pre_act[i] < 0.0f) { d_acted[i] = 0.0f; } else { d_acted[i] = 1.0f; } } } void softmax(float* const acted, const float* const pre_act, const std::size_t layer_size, const std::size_t minibatch_size) { for (std::size_t mb = 0; mb < minibatch_size; mb++) { float e_sum = 0.0f; for (std::size_t ls = 0; ls < layer_size; ls++) { const float e = ::exp(pre_act[mb * layer_size + ls] - pre_act[mb * layer_size + 0]); acted[mb * layer_size + ls] = e; e_sum += e; } for (std::size_t ls = 0; ls < layer_size; ls++) { acted[mb * layer_size + ls] /= e_sum; } } } float compute_accuracy(const float* const forwarded_data, const float* const correct_data, const std::size_t size, const std::size_t minibatch_size) { std::size_t num_correct = 0; for (std::size_t mb = 0; mb < minibatch_size; mb++) { std::size_t max_index = 0; for (std::size_t i = 1; i < size; i++) { if (forwarded_data[mb * size + i] > forwarded_data[mb * size + max_index]) { max_index = i; } } if (correct_data[mb * size + max_index] > 0.5f) { num_correct++; } } return (float)num_correct / minibatch_size; } float compute_loss(const float* const forwarded_data, const float* const correct_data, const std::size_t size, const std::size_t minibatch_size) { float loss = 0.0f; for (std::size_t mb = 0; mb < minibatch_size; mb++) { std::size_t correct_index = 0; for (std::size_t i = 0; i < size; i++) { if (correct_data[mb * size + i] > 0.5f) { correct_index = i; } } loss -= ::log(forwarded_data[mb * size + correct_index]); } return loss / minibatch_size; } void compute_last_error(float* const last_error, const float* const last_act, const float* const correct_data, const std::size_t output_size, const std::size_t minibatch_size) { for (std::size_t i = 0; i < output_size * minibatch_size; i++) { last_error[i] = last_act[i] - correct_data[i]; } } void update_weight(float* const W, const float* const error, const float* const acted, const std::size_t W_M, const std::size_t W_N, const std::size_t minibatch_size, const float learning_rate) { gemm_nt(1.0f, W, - learning_rate / minibatch_size, error, acted, W_M, W_N, minibatch_size); } void update_bias(float* const bias, const float* const error, const std::size_t W_M, const std::size_t minibatch_size, const float learning_rate) { for (std::size_t i = 0; i < W_M; i++) { float sum = 0.0f; for (std::size_t mb = 0; mb < minibatch_size; mb++) { sum += error[mb * W_M + i]; } bias[i] -= learning_rate / minibatch_size * sum; } } int main() { mnist_loader train_data, test_data; train_data.load("train-images-idx3-ubyte", "train-labels-idx1-ubyte"); test_data.load("t10k-images-idx3-ubyte", "t10k-labels-idx1-ubyte"); float* minibatch_image_data; float* minibatch_label_data; float* test_image_data; float* test_label_data; float* minibatch_hidden_data_pre; float* minibatch_hidden_data; float* minibatch_hidden_error; float* minibatch_output_data_pre; float* minibatch_output_data; float* minibatch_output_error; float* test_hidden_data_pre; float* test_hidden_data; float* test_hidden_error; float* test_output_data_pre; float* test_output_data; float* test_output_error; float* layer0_weight; float* layer0_bias; float* layer1_weight; float* layer1_bias; hipMallocManaged((void**)&minibatch_image_data, minibatch_size * mnist_loader::IMAGE_DIM * mnist_loader::IMAGE_DIM * sizeof(float)); hipMallocManaged((void**)&minibatch_label_data, minibatch_size * mnist_loader::CLASS_SIZE * sizeof(float)); hipMallocManaged((void**)&test_image_data, test_size * mnist_loader::IMAGE_DIM * mnist_loader::IMAGE_DIM * sizeof(float)); hipMallocManaged((void**)&test_label_data, test_size * mnist_loader::CLASS_SIZE * sizeof(float)); hipMallocManaged((void**)&minibatch_hidden_data_pre, minibatch_size * hidden_size * sizeof(float)); hipMallocManaged((void**)&minibatch_hidden_data, minibatch_size * hidden_size * sizeof(float)); hipMallocManaged((void**)&minibatch_hidden_error, minibatch_size * hidden_size * sizeof(float)); hipMallocManaged((void**)&minibatch_output_data_pre, minibatch_size * output_size * sizeof(float)); hipMallocManaged((void**)&minibatch_output_data, minibatch_size * output_size * sizeof(float)); hipMallocManaged((void**)&minibatch_output_error, minibatch_size * output_size * sizeof(float)); hipMallocManaged((void**)&test_hidden_data_pre, test_size * hidden_size * sizeof(float)); hipMallocManaged((void**)&test_hidden_data, test_size * hidden_size * sizeof(float)); hipMallocManaged((void**)&test_hidden_error, test_size * hidden_size * sizeof(float)); hipMallocManaged((void**)&test_output_data_pre, test_size * output_size * sizeof(float)); hipMallocManaged((void**)&test_output_data, test_size * output_size * sizeof(float)); hipMallocManaged((void**)&test_output_error, test_size * output_size * sizeof(float)); hipMallocManaged((void**)&layer0_weight, input_size * hidden_size * sizeof(float)); hipMallocManaged((void**)&layer0_bias, hidden_size * sizeof(float)); hipMallocManaged((void**)&layer1_weight, hidden_size * output_size * sizeof(float)); hipMallocManaged((void**)&layer1_bias, output_size * sizeof(float)); utils::random_init(layer0_weight, input_size * hidden_size); utils::random_init(layer0_bias, hidden_size); utils::random_init(layer1_weight, hidden_size * output_size); utils::random_init(layer1_bias, output_size); std::mt19937 mt(std::random_device{}()); std::uniform_int_distribution<std::size_t> image_dist(0, train_data.get_num_data()); // copy test data for (std::size_t i = 0; i < test_size; i++) { test_data.copy(i, test_image_data + i * mnist_loader::IMAGE_DIM * mnist_loader::IMAGE_DIM, test_label_data + i * mnist_loader::CLASS_SIZE); } const auto start_clock = std::chrono::system_clock::now(); // training loop for (std::size_t i = 0; i < num_iterations; i++) { // load minibatch for (std::size_t d = 0; d < minibatch_size; d++) { const std::size_t image_id = image_dist(mt); train_data.copy(image_id, minibatch_image_data + d * mnist_loader::IMAGE_DIM * mnist_loader::IMAGE_DIM, minibatch_label_data + d * mnist_loader::CLASS_SIZE); } // // Forward // matmul( minibatch_hidden_data_pre, layer0_weight, minibatch_image_data, hidden_size, minibatch_size, input_size); add_bias( minibatch_hidden_data_pre, layer0_bias, hidden_size, minibatch_size); ReLU( minibatch_hidden_data, minibatch_hidden_data_pre, hidden_size, minibatch_size); matmul( minibatch_output_data_pre, layer1_weight, minibatch_hidden_data, output_size, minibatch_size, hidden_size); add_bias( minibatch_output_data_pre, layer1_bias, output_size, minibatch_size); softmax( minibatch_output_data, minibatch_output_data_pre, output_size, minibatch_size ); // // Backword // compute_last_error(minibatch_output_error, minibatch_output_data, minibatch_label_data, output_size, minibatch_size); dReLU(minibatch_hidden_data_pre, minibatch_hidden_data_pre, hidden_size, minibatch_size); matmul_tn(minibatch_hidden_error, layer1_weight, minibatch_output_error, hidden_size, minibatch_size, output_size); elementwise_product(minibatch_hidden_error, minibatch_hidden_error, minibatch_hidden_data_pre, minibatch_size * hidden_size); update_weight(layer1_weight, minibatch_output_error, minibatch_hidden_data, output_size, hidden_size, minibatch_size, learning_rate); update_weight(layer0_weight, minibatch_hidden_error, minibatch_image_data, hidden_size, input_size, minibatch_size, learning_rate); update_bias(layer1_bias, minibatch_output_error, output_size, minibatch_size, learning_rate); update_bias(layer0_bias, minibatch_hidden_error, hidden_size, minibatch_size, learning_rate); if (i % print_info_interval == (print_info_interval - 1)) { const auto end_clock = std::chrono::system_clock::now(); const auto elapsed_time = std::chrono::duration_cast<std::chrono::microseconds>(end_clock - start_clock).count() / 1000lu; matmul( test_hidden_data_pre, layer0_weight, test_image_data, hidden_size, test_size, input_size); add_bias( test_hidden_data_pre, layer0_bias, hidden_size, test_size); ReLU( test_hidden_data, test_hidden_data_pre, hidden_size, test_size); matmul( test_output_data_pre, layer1_weight, test_hidden_data, output_size, test_size, hidden_size); add_bias( test_output_data_pre, layer1_bias, output_size, test_size); softmax( test_output_data, test_output_data_pre, output_size, test_size ); const auto train_acc = compute_accuracy(minibatch_output_data, minibatch_label_data, output_size, minibatch_size); const auto train_loss = compute_loss(minibatch_output_data, minibatch_label_data, output_size, minibatch_size); const auto test_acc = compute_accuracy(test_output_data, test_label_data, output_size, minibatch_size); const auto test_loss = compute_loss(test_output_data, test_label_data, output_size, minibatch_size); std::printf("[%6luiters : %6lums] train/acc = %.3f %%, train/loss = %e, test/acc = %.3f %%, test/loss = %e\n", i + 1, elapsed_time, train_acc * 100.0f, train_loss, test_acc * 100.0f, test_loss); } } hipFree(minibatch_image_data); hipFree(minibatch_label_data); hipFree(minibatch_hidden_data); hipFree(minibatch_output_data); hipFree(minibatch_hidden_data_pre); hipFree(minibatch_output_data_pre); hipFree(test_image_data); hipFree(test_label_data); hipFree(test_hidden_data); hipFree(test_output_data); hipFree(test_hidden_data_pre); hipFree(test_output_data_pre); hipFree(layer0_weight); hipFree(layer0_bias); hipFree(layer1_weight); hipFree(layer1_bias); }
69b6bb9ef8344d8942ca344c77ffbcced8a2a84a.cu
#include <iostream> #include <random> #include <cmath> #include <chrono> #include <mnist.hpp> #include <utils.hpp> constexpr std::size_t minibatch_size = 64; constexpr std::size_t test_size = 10000; constexpr std::size_t num_iterations = 1000; constexpr std::size_t input_size = mnist_loader::IMAGE_DIM * mnist_loader::IMAGE_DIM; constexpr std::size_t hidden_size = 100; constexpr std::size_t output_size = mnist_loader::CLASS_SIZE; constexpr std::size_t print_info_interval = 20; constexpr float learning_rate = 0.5f; void matmul(float* const C, const float* const A, const float* const B, const std::size_t M, const std::size_t N, const std::size_t K) { for (std::size_t m = 0; m < M; m++) { for (std::size_t n = 0; n < N; n++) { float sum = 0.0f; for (std::size_t k = 0; k < K; k++) { sum += A[m + k * M] * B[k + n * K]; } C[m + M * n] = sum; } } } void matmul_tn(float* const C, const float* const A, const float* const B, const std::size_t M, const std::size_t N, const std::size_t K) { for (std::size_t m = 0; m < M; m++) { for (std::size_t n = 0; n < N; n++) { float sum = 0.0f; for (std::size_t k = 0; k < K; k++) { sum += A[k + m * K] * B[k + n * K]; } C[m + M * n] = sum; } } } void gemm_nt(const float beta, float* const C, const float alpha, const float* const A, const float* const B, const std::size_t M, const std::size_t N, const std::size_t K) { for (std::size_t m = 0; m < M; m++) { for (std::size_t n = 0; n < N; n++) { float sum = 0.0f; for (std::size_t k = 0; k < K; k++) { sum += A[m + k * M] * B[n + k * N]; } C[m + M * n] = beta * C[m + M * n] + alpha * sum; } } } void elementwise_product(float* const C, const float* const A, const float* const B, const std::size_t size) { for (std::size_t i = 0; i < size; i++) { C[i] = A[i] * B[i]; } } void add_bias(float* const A, const float* const bias, const std::size_t layer_size, const std::size_t minibatch_size) { for (std::size_t mb = 0; mb < minibatch_size; mb++) { for (std::size_t ls = 0; ls < layer_size; ls++) { A[mb * layer_size + ls] += bias[ls]; } } } void ReLU(float* const acted, const float* const pre_act, const std::size_t size, const std::size_t minibatch_size) { for (std::size_t i = 0; i < size * minibatch_size; i++) { acted[i] = std::max(0.0f, pre_act[i]); } } void dReLU(float* const d_acted, const float* const pre_act, const std::size_t size, const std::size_t minibatch_size) { for (std::size_t i = 0; i < size * minibatch_size; i++) { if (pre_act[i] < 0.0f) { d_acted[i] = 0.0f; } else { d_acted[i] = 1.0f; } } } void softmax(float* const acted, const float* const pre_act, const std::size_t layer_size, const std::size_t minibatch_size) { for (std::size_t mb = 0; mb < minibatch_size; mb++) { float e_sum = 0.0f; for (std::size_t ls = 0; ls < layer_size; ls++) { const float e = std::exp(pre_act[mb * layer_size + ls] - pre_act[mb * layer_size + 0]); acted[mb * layer_size + ls] = e; e_sum += e; } for (std::size_t ls = 0; ls < layer_size; ls++) { acted[mb * layer_size + ls] /= e_sum; } } } float compute_accuracy(const float* const forwarded_data, const float* const correct_data, const std::size_t size, const std::size_t minibatch_size) { std::size_t num_correct = 0; for (std::size_t mb = 0; mb < minibatch_size; mb++) { std::size_t max_index = 0; for (std::size_t i = 1; i < size; i++) { if (forwarded_data[mb * size + i] > forwarded_data[mb * size + max_index]) { max_index = i; } } if (correct_data[mb * size + max_index] > 0.5f) { num_correct++; } } return (float)num_correct / minibatch_size; } float compute_loss(const float* const forwarded_data, const float* const correct_data, const std::size_t size, const std::size_t minibatch_size) { float loss = 0.0f; for (std::size_t mb = 0; mb < minibatch_size; mb++) { std::size_t correct_index = 0; for (std::size_t i = 0; i < size; i++) { if (correct_data[mb * size + i] > 0.5f) { correct_index = i; } } loss -= std::log(forwarded_data[mb * size + correct_index]); } return loss / minibatch_size; } void compute_last_error(float* const last_error, const float* const last_act, const float* const correct_data, const std::size_t output_size, const std::size_t minibatch_size) { for (std::size_t i = 0; i < output_size * minibatch_size; i++) { last_error[i] = last_act[i] - correct_data[i]; } } void update_weight(float* const W, const float* const error, const float* const acted, const std::size_t W_M, const std::size_t W_N, const std::size_t minibatch_size, const float learning_rate) { gemm_nt(1.0f, W, - learning_rate / minibatch_size, error, acted, W_M, W_N, minibatch_size); } void update_bias(float* const bias, const float* const error, const std::size_t W_M, const std::size_t minibatch_size, const float learning_rate) { for (std::size_t i = 0; i < W_M; i++) { float sum = 0.0f; for (std::size_t mb = 0; mb < minibatch_size; mb++) { sum += error[mb * W_M + i]; } bias[i] -= learning_rate / minibatch_size * sum; } } int main() { mnist_loader train_data, test_data; train_data.load("train-images-idx3-ubyte", "train-labels-idx1-ubyte"); test_data.load("t10k-images-idx3-ubyte", "t10k-labels-idx1-ubyte"); float* minibatch_image_data; float* minibatch_label_data; float* test_image_data; float* test_label_data; float* minibatch_hidden_data_pre; float* minibatch_hidden_data; float* minibatch_hidden_error; float* minibatch_output_data_pre; float* minibatch_output_data; float* minibatch_output_error; float* test_hidden_data_pre; float* test_hidden_data; float* test_hidden_error; float* test_output_data_pre; float* test_output_data; float* test_output_error; float* layer0_weight; float* layer0_bias; float* layer1_weight; float* layer1_bias; cudaMallocManaged((void**)&minibatch_image_data, minibatch_size * mnist_loader::IMAGE_DIM * mnist_loader::IMAGE_DIM * sizeof(float)); cudaMallocManaged((void**)&minibatch_label_data, minibatch_size * mnist_loader::CLASS_SIZE * sizeof(float)); cudaMallocManaged((void**)&test_image_data, test_size * mnist_loader::IMAGE_DIM * mnist_loader::IMAGE_DIM * sizeof(float)); cudaMallocManaged((void**)&test_label_data, test_size * mnist_loader::CLASS_SIZE * sizeof(float)); cudaMallocManaged((void**)&minibatch_hidden_data_pre, minibatch_size * hidden_size * sizeof(float)); cudaMallocManaged((void**)&minibatch_hidden_data, minibatch_size * hidden_size * sizeof(float)); cudaMallocManaged((void**)&minibatch_hidden_error, minibatch_size * hidden_size * sizeof(float)); cudaMallocManaged((void**)&minibatch_output_data_pre, minibatch_size * output_size * sizeof(float)); cudaMallocManaged((void**)&minibatch_output_data, minibatch_size * output_size * sizeof(float)); cudaMallocManaged((void**)&minibatch_output_error, minibatch_size * output_size * sizeof(float)); cudaMallocManaged((void**)&test_hidden_data_pre, test_size * hidden_size * sizeof(float)); cudaMallocManaged((void**)&test_hidden_data, test_size * hidden_size * sizeof(float)); cudaMallocManaged((void**)&test_hidden_error, test_size * hidden_size * sizeof(float)); cudaMallocManaged((void**)&test_output_data_pre, test_size * output_size * sizeof(float)); cudaMallocManaged((void**)&test_output_data, test_size * output_size * sizeof(float)); cudaMallocManaged((void**)&test_output_error, test_size * output_size * sizeof(float)); cudaMallocManaged((void**)&layer0_weight, input_size * hidden_size * sizeof(float)); cudaMallocManaged((void**)&layer0_bias, hidden_size * sizeof(float)); cudaMallocManaged((void**)&layer1_weight, hidden_size * output_size * sizeof(float)); cudaMallocManaged((void**)&layer1_bias, output_size * sizeof(float)); utils::random_init(layer0_weight, input_size * hidden_size); utils::random_init(layer0_bias, hidden_size); utils::random_init(layer1_weight, hidden_size * output_size); utils::random_init(layer1_bias, output_size); std::mt19937 mt(std::random_device{}()); std::uniform_int_distribution<std::size_t> image_dist(0, train_data.get_num_data()); // copy test data for (std::size_t i = 0; i < test_size; i++) { test_data.copy(i, test_image_data + i * mnist_loader::IMAGE_DIM * mnist_loader::IMAGE_DIM, test_label_data + i * mnist_loader::CLASS_SIZE); } const auto start_clock = std::chrono::system_clock::now(); // training loop for (std::size_t i = 0; i < num_iterations; i++) { // load minibatch for (std::size_t d = 0; d < minibatch_size; d++) { const std::size_t image_id = image_dist(mt); train_data.copy(image_id, minibatch_image_data + d * mnist_loader::IMAGE_DIM * mnist_loader::IMAGE_DIM, minibatch_label_data + d * mnist_loader::CLASS_SIZE); } // // Forward // matmul( minibatch_hidden_data_pre, layer0_weight, minibatch_image_data, hidden_size, minibatch_size, input_size); add_bias( minibatch_hidden_data_pre, layer0_bias, hidden_size, minibatch_size); ReLU( minibatch_hidden_data, minibatch_hidden_data_pre, hidden_size, minibatch_size); matmul( minibatch_output_data_pre, layer1_weight, minibatch_hidden_data, output_size, minibatch_size, hidden_size); add_bias( minibatch_output_data_pre, layer1_bias, output_size, minibatch_size); softmax( minibatch_output_data, minibatch_output_data_pre, output_size, minibatch_size ); // // Backword // compute_last_error(minibatch_output_error, minibatch_output_data, minibatch_label_data, output_size, minibatch_size); dReLU(minibatch_hidden_data_pre, minibatch_hidden_data_pre, hidden_size, minibatch_size); matmul_tn(minibatch_hidden_error, layer1_weight, minibatch_output_error, hidden_size, minibatch_size, output_size); elementwise_product(minibatch_hidden_error, minibatch_hidden_error, minibatch_hidden_data_pre, minibatch_size * hidden_size); update_weight(layer1_weight, minibatch_output_error, minibatch_hidden_data, output_size, hidden_size, minibatch_size, learning_rate); update_weight(layer0_weight, minibatch_hidden_error, minibatch_image_data, hidden_size, input_size, minibatch_size, learning_rate); update_bias(layer1_bias, minibatch_output_error, output_size, minibatch_size, learning_rate); update_bias(layer0_bias, minibatch_hidden_error, hidden_size, minibatch_size, learning_rate); if (i % print_info_interval == (print_info_interval - 1)) { const auto end_clock = std::chrono::system_clock::now(); const auto elapsed_time = std::chrono::duration_cast<std::chrono::microseconds>(end_clock - start_clock).count() / 1000lu; matmul( test_hidden_data_pre, layer0_weight, test_image_data, hidden_size, test_size, input_size); add_bias( test_hidden_data_pre, layer0_bias, hidden_size, test_size); ReLU( test_hidden_data, test_hidden_data_pre, hidden_size, test_size); matmul( test_output_data_pre, layer1_weight, test_hidden_data, output_size, test_size, hidden_size); add_bias( test_output_data_pre, layer1_bias, output_size, test_size); softmax( test_output_data, test_output_data_pre, output_size, test_size ); const auto train_acc = compute_accuracy(minibatch_output_data, minibatch_label_data, output_size, minibatch_size); const auto train_loss = compute_loss(minibatch_output_data, minibatch_label_data, output_size, minibatch_size); const auto test_acc = compute_accuracy(test_output_data, test_label_data, output_size, minibatch_size); const auto test_loss = compute_loss(test_output_data, test_label_data, output_size, minibatch_size); std::printf("[%6luiters : %6lums] train/acc = %.3f %%, train/loss = %e, test/acc = %.3f %%, test/loss = %e\n", i + 1, elapsed_time, train_acc * 100.0f, train_loss, test_acc * 100.0f, test_loss); } } cudaFree(minibatch_image_data); cudaFree(minibatch_label_data); cudaFree(minibatch_hidden_data); cudaFree(minibatch_output_data); cudaFree(minibatch_hidden_data_pre); cudaFree(minibatch_output_data_pre); cudaFree(test_image_data); cudaFree(test_label_data); cudaFree(test_hidden_data); cudaFree(test_output_data); cudaFree(test_hidden_data_pre); cudaFree(test_output_data_pre); cudaFree(layer0_weight); cudaFree(layer0_bias); cudaFree(layer1_weight); cudaFree(layer1_bias); }
53338e6919def1ce16543955f7cdbe46920aec00.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <Temporary_9.h> // cuda error checking string prev_file = ""; int prev_line = 0; __constant__ float c_K[9]; __constant__ float c_modelPose[16]; __constant__ float c_cameraPose[16]; void cuda_check(string file, int line) { hipError_t e = hipGetLastError(); if (e != hipSuccess) { cout << endl << file << ", line " << line << ": " << hipGetErrorString(e) << " (" << e << ")" << endl; if (prev_line > 0) cout << "Previous CUDA call:" << endl << prev_file << ", line " << prev_line << endl; exit(1); } prev_file = file; prev_line = line; } Poseestimator::Poseestimator(vector<Mesh*> meshes, Matrix3f &K){ // initialize cuda hipDeviceSynchronize(); CUDA_CHECK; std::cout << "success" << std::endl; // register the VBOs in cuda for(uint i=0;i<meshes.size();i++){ ModelData *m = new ModelData; // pass a pointer to the ModelMatrix m->ModelMatrix = &meshes[i]->ModelMatrix; m->cuda_vbo_resource.resize(meshes[i]->m_Entries.size()); m->numberOfVertices.resize(meshes[i]->m_Entries.size()); // host m->vertices_out.resize(meshes[i]->m_Entries.size()); m->normals_out.resize(meshes[i]->m_Entries.size()); m->tangents_out.resize(meshes[i]->m_Entries.size()); m->gradTrans.resize(meshes[i]->m_Entries.size()); m->gradRot.resize(meshes[i]->m_Entries.size()); // device m->d_vertices_out.resize(meshes[i]->m_Entries.size()); m->d_normals_out.resize(meshes[i]->m_Entries.size()); m->d_tangents_out.resize(meshes[i]->m_Entries.size()); m->d_gradTrans.resize(meshes[i]->m_Entries.size()); m->d_gradRot.resize(meshes[i]->m_Entries.size()); for(uint j=0;j<meshes[i]->m_Entries.size();j++) { // Register the OpenGL buffer objects in cuda hipGraphicsGLRegisterBuffer(&m->cuda_vbo_resource[j], meshes[i]->m_Entries[j].VB, hipGraphicsMapFlagsReadOnly ); CUDA_CHECK; //hipGraphicsMapResources(1, &m->cuda_vbo_resource[j], 0); //CUDA_CHECK; // how many vertices m->numberOfVertices[j] = meshes[i]->m_Entries[j].NumVertices; // allocate memory on host m->vertices_out[j] = new float3[m->numberOfVertices[j]]; m->normals_out[j] = new float3[m->numberOfVertices[j]]; m->tangents_out[j] = new float3[m->numberOfVertices[j]]; m->gradTrans[j] = new float3[m->numberOfVertices[j]]; m->gradRot[j] = new float3[m->numberOfVertices[j]]; // allocate memory on gpu hipMalloc(&m->d_vertices_out[j], m->numberOfVertices[j] * sizeof(float3)); CUDA_CHECK; hipMalloc(&m->d_normals_out[j], m->numberOfVertices[j] * sizeof(float3)); CUDA_CHECK; hipMalloc(&m->d_tangents_out[j], m->numberOfVertices[j] * sizeof(float3)); CUDA_CHECK; hipMalloc(&m->d_gradTrans[j], m->numberOfVertices[j] * sizeof(float3)); CUDA_CHECK; hipMalloc(&m->d_gradRot[j], m->numberOfVertices[j] * sizeof(float3)); CUDA_CHECK; cout << "number of vertices: " << m->numberOfVertices[j] << endl; } modelData.push_back(m); } hipMalloc(&d_gradient, 6 * sizeof(float)); CUDA_CHECK; hipMalloc(&d_border, WIDTH * HEIGHT * sizeof(uchar)); CUDA_CHECK; hipMalloc(&d_img_out, WIDTH * HEIGHT * sizeof(uchar)); CUDA_CHECK; hipMalloc(&d_image, WIDTH * HEIGHT * sizeof(uchar)); CUDA_CHECK; res = new uchar[WIDTH * HEIGHT]; // copy camera matrices to gpu hipMemcpyToSymbol(c_K, &K(0, 0), 9 * sizeof(float)); } Poseestimator::~Poseestimator() { hipFree(d_border); CUDA_CHECK; hipFree(d_img_out); CUDA_CHECK; hipFree(d_image); CUDA_CHECK; hipFree(d_gradient); CUDA_CHECK; delete[] res; for(auto m:modelData) delete m; } __global__ void costFcn(Vertex *vertices, float3 *vertices_out, float3 *normals_out, float3 *tangents_out, uchar *border, uchar *image, float mu_in, float mu_out, float sigma_in, float sigma_out, uchar *img_out, int numberOfVertices, float3 *gradTrans, float3 *gradRot, float* grad) { // iteration over image is parallelized int idx = threadIdx.x + blockDim.x * blockIdx.x; if (idx < numberOfVertices) { // set gradients to zero gradTrans[idx].x = 0; gradTrans[idx].y = 0; gradTrans[idx].z = 0; gradRot[idx].x = 0; gradRot[idx].y = 0; gradRot[idx].z = 0; float3 v = vertices[idx].m_pos; float3 n = vertices[idx].m_normal; // calculate position of vertex in camera coordinate system float3 pos, posModel; posModel.x = 0.0f; posModel.y = 0.0f; posModel.z = 0.0f; // modelPose // x posModel.x += c_modelPose[0 + 4 * 0] * v.x; posModel.x += c_modelPose[0 + 4 * 1] * v.y; posModel.x += c_modelPose[0 + 4 * 2] * v.z; posModel.x += c_modelPose[0 + 4 * 3]; // y posModel.y += c_modelPose[1 + 4 * 0] * v.x; posModel.y += c_modelPose[1 + 4 * 1] * v.y; posModel.y += c_modelPose[1 + 4 * 2] * v.z; posModel.y += c_modelPose[1 + 4 * 3]; // z posModel.z += c_modelPose[2 + 4 * 0] * v.x; posModel.z += c_modelPose[2 + 4 * 1] * v.y; posModel.z += c_modelPose[2 + 4 * 2] * v.z; posModel.z += c_modelPose[2 + 4 * 3]; // cameraPose pos.x = 0.0f; pos.y = 0.0f; pos.z = 0.0f; // x pos.x += c_cameraPose[0 + 4 * 0] * posModel.x; pos.x += c_cameraPose[0 + 4 * 1] * posModel.y; pos.x += c_cameraPose[0 + 4 * 2] * posModel.z; pos.x += c_cameraPose[0 + 4 * 3]; // y pos.y += c_cameraPose[1 + 4 * 0] * posModel.x; pos.y += c_cameraPose[1 + 4 * 1] * posModel.y; pos.y += c_cameraPose[1 + 4 * 2] * posModel.z; pos.y += c_cameraPose[1 + 4 * 3]; // z pos.z += c_cameraPose[2 + 4 * 0] * posModel.x; pos.z += c_cameraPose[2 + 4 * 1] * posModel.y; pos.z += c_cameraPose[2 + 4 * 2] * posModel.z; pos.z += c_cameraPose[2 + 4 * 3]; float posNorm = sqrtf(pos.x * pos.x + pos.y * pos.y + pos.z * pos.z); vertices_out[idx] = pos; // calculate orientation of normal in camera coordinate system float3 normal, normalModel; normalModel.x = 0.0f; normalModel.y = 0.0f; normalModel.z = 0.0f; // modelPose // x normalModel.x += c_modelPose[0 + 4 * 0] * n.x; normalModel.x += c_modelPose[0 + 4 * 1] * n.y; normalModel.x += c_modelPose[0 + 4 * 2] * n.z; // y normalModel.y += c_modelPose[1 + 4 * 0] * n.x; normalModel.y += c_modelPose[1 + 4 * 1] * n.y; normalModel.y += c_modelPose[1 + 4 * 2] * n.z; // z normalModel.z += c_modelPose[2 + 4 * 0] * n.x; normalModel.z += c_modelPose[2 + 4 * 1] * n.y; normalModel.z += c_modelPose[2 + 4 * 2] * n.z; // cameraPose normal.x = 0.0f; normal.y = 0.0f; normal.z = 0.0f; // x normal.x += c_cameraPose[0 + 4 * 0] * normalModel.x; normal.x += c_cameraPose[0 + 4 * 1] * normalModel.y; normal.x += c_cameraPose[0 + 4 * 2] * normalModel.z; // y normal.y += c_cameraPose[1 + 4 * 0] * normalModel.x; normal.y += c_cameraPose[1 + 4 * 1] * normalModel.y; normal.y += c_cameraPose[1 + 4 * 2] * normalModel.z; // z normal.z += c_cameraPose[2 + 4 * 0] * normalModel.x; normal.z += c_cameraPose[2 + 4 * 1] * normalModel.y; normal.z += c_cameraPose[2 + 4 * 2] * normalModel.z; normals_out[idx] = normal; // calculate dot product position and normal float dot = normal.x * pos.x / posNorm + normal.y * pos.y / posNorm + normal.z * pos.z / posNorm; // calculate gradient of silhuette float3 cross = {pos.y * normal.z - pos.z * normal.y, pos.z * normal.x - pos.x * normal.z, pos.x * normal.y - pos.y * normal.x}; float dCnorm = sqrtf(cross.x * cross.x + cross.y * cross.y + cross.z * cross.z); tangents_out[idx] = cross; // calculate pixel location with intrinsic matrix K float3 pixel; pixel.x = 0.0f; pixel.y = 0.0f; pixel.z = 0.0f; // x pixel.x += c_K[0 + 3 * 0] * pos.x; pixel.x += c_K[0 + 3 * 1] * pos.y; pixel.x += c_K[0 + 3 * 2] * pos.z; // y pixel.y += c_K[1 + 3 * 0] * pos.x; pixel.y += c_K[1 + 3 * 1] * pos.y; pixel.y += c_K[1 + 3 * 2] * pos.z; // z pixel.z += c_K[2 + 3 * 0] * pos.x; pixel.z += c_K[2 + 3 * 1] * pos.y; pixel.z += c_K[2 + 3 * 2] * pos.z; int2 pixelCoord; pixelCoord.x = (int) pixel.x / pixel.z; pixelCoord.y = (int) pixel.y / pixel.z; // if its a border pixel and the dot product small enough if (pixelCoord.x >= 0 && pixelCoord.x < WIDTH && pixelCoord.y >= 0 && pixelCoord.y < HEIGHT && fabsf(dot)< 0.005f && border[pixelCoord.y * WIDTH + (WIDTH-pixelCoord.x-1)] == 255) {// img_out[pixelCoord.y * WIDTH + (WIDTH-pixelCoord.x-1)] = 255; float Rc = (((float) image[pixelCoord.y * WIDTH + (WIDTH-pixelCoord.x-1)] - mu_out) * ((float) image[pixelCoord.y * WIDTH + (WIDTH-pixelCoord.x-1)] - mu_out)) ;// / sigma_out float R = (((float) image[pixelCoord.y * WIDTH + (WIDTH-pixelCoord.x-1)] - mu_in) * ((float) image[pixelCoord.y * WIDTH + (WIDTH-pixelCoord.x-1)] - mu_in)) ;// /sigma_in float statistics = (Rc - R) ;// * dCnorm logf(sigma_out / sigma_in) + gradTrans[idx].x = statistics * normal.x; gradTrans[idx].y = statistics * normal.y; gradTrans[idx].z = statistics * normal.z; float Om[9] = {0, posModel.z, -posModel.y, -posModel.z, 0, posModel.x, posModel.y, -posModel.x, 0}; float M[9] = {0, 0, 0, 0, 0, 0, 0, 0, 0}; for (uint i = 0; i < 3; i++) for (uint j = 0; j < 3; j++) for (uint k = 0; k < 3; k++) M[i + 3 * j] += c_cameraPose[i + 4 * k] * Om[k + 3 * j]; statistics *= posNorm / (pos.z * pos.z * pos.z); gradRot[idx].x = statistics * (M[0 + 3 * 0] * normal.x + M[1 + 3 * 0] * normal.y + M[2 + 3 * 0] * normal.z); gradRot[idx].y = statistics * (M[0 + 3 * 1] * normal.x + M[1 + 3 * 1] * normal.y + M[2 + 3 * 1] * normal.z); gradRot[idx].z = statistics * (M[0 + 3 * 2] * normal.x + M[1 + 3 * 2] * normal.y + M[2 + 3 * 2] * normal.z); } else { tangents_out[idx].x = 0; tangents_out[idx].y = 0; tangents_out[idx].z = 0; } } } double Poseestimator::iterateOnce(const Mat &img_camera, Mat &img_artificial, VectorXd &pose, VectorXd &grad) { Mat img_camera_gray, img_camera_copy, img_artificial_gray, img_artificial_gray2; VectorXd initial_pose = pose; img_camera.copyTo(img_camera_copy); cvtColor(img_camera_copy, img_camera_gray, CV_BGR2GRAY); cvtColor(img_artificial, img_artificial_gray, CV_BGR2GRAY); // make a copy img_artificial_gray.copyTo(img_artificial_gray2); cv::Canny(img_artificial_gray, img_artificial_gray, 100, 200, 3); //imshow("Canny", img_artificial_gray); vector<vector<cv::Point> > contours; vector<cv::Vec4i> hierarchy; findContours(img_artificial_gray, contours, hierarchy, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_TC89_L1, cv::Point(0, 0)); double min_contour_area = 40; for (auto it = contours.begin(); it != contours.end();) { if (contourArea(*it) < min_contour_area) it = contours.erase(it); else ++it; } if (contours.size() > 0) { Mat border = Mat::zeros(HEIGHT, WIDTH, CV_8UC1); double A_in = 0; for (int idx = 0; idx < contours.size(); idx++) { drawContours(border, contours, idx, 255, 10, 8, hierarchy, 0, cv::Point()); A_in += contourArea(contours[idx]); drawContours(img_camera_copy, contours, idx, cv::Scalar(0, 255, 0), 1, 8, hierarchy, 0, cv::Point()); } double A_out = WIDTH * HEIGHT - A_in; imshow("camera image", img_camera_copy); cv::waitKey(1); Mat R_mask = Mat::zeros(HEIGHT, WIDTH, CV_8UC1), Rc_mask, R = Mat::zeros(HEIGHT, WIDTH, CV_8UC1), Rc = Mat::zeros(HEIGHT, WIDTH, CV_8UC1); fillPoly(R_mask, contours, 255); bitwise_not(R_mask, Rc_mask); // this will mask out the respective part of the webcam image bitwise_and(img_camera_gray, R_mask, R); bitwise_and(img_camera_gray, Rc_mask, Rc); //imshow("R_", R); //imshow("Rc_", Rc); // convert camera image to float R.convertTo(R, CV_32FC1); Rc.convertTo(Rc, CV_32FC1); // calculate mean double mu_in = sum(R).val[0] / A_in; double mu_out = sum(Rc).val[0] / A_out; R = R - mu_in; Rc = Rc - mu_out; imshow("R", R/255.0f); imshow("Rc", Rc/255.0f); cv::waitKey(1); // copy only the respective areas Mat Rpow = Mat::zeros(HEIGHT, WIDTH, CV_32FC1), Rcpow = Mat::zeros(HEIGHT, WIDTH, CV_32FC1); R.copyTo(Rpow, R_mask); Rc.copyTo(Rcpow, Rc_mask); // calculate sigma pow(Rpow, 2.0, Rpow); pow(Rcpow, 2.0, Rcpow); double sigma_in = sum(Rpow).val[0] / A_in; double sigma_out = sum(Rcpow).val[0] / A_out; double energy = -sum(Rpow).val[0] - sum(Rcpow).val[0]; cost.push_back(energy); cout << "cost: " << energy << endl; Matrix3f rot = Matrix3f::Identity(); Matrix3f skew; Vector3f p(pose(3), pose(4), pose(5)); float angle = p.norm(); if (abs(angle) > 0.0000001) { p.normalize(); skew << 0, -p(2), p(1), p(2), 0, -p(0), -p(1), p(0), 0; rot = rot + sin(angle) * skew; rot = rot + (1.0 - cos(angle)) * skew * skew; } Matrix4f ViewMatrix = Matrix4f::Identity(); ViewMatrix.topLeftCorner(3, 3) = rot; ViewMatrix.topRightCorner(3, 1) << pose(0), pose(1), pose(2); Eigen::Matrix4f cameraPose = ViewMatrix; hipMemcpy(d_border, border.data, WIDTH * HEIGHT * sizeof(uchar), hipMemcpyHostToDevice); CUDA_CHECK; hipMemcpy(d_image, img_camera_gray.data, WIDTH * HEIGHT * sizeof(uchar), hipMemcpyHostToDevice); CUDA_CHECK; // set result image an the gradients to zero hipMemset(d_img_out, 0, WIDTH * HEIGHT * sizeof(uchar)); CUDA_CHECK; // set constants on gpu hipMemcpyToSymbol(c_cameraPose, &cameraPose(0, 0), 16 * sizeof(float)); grad << 0, 0, 0, 0, 0, 0; for(uint i=0;i<modelData.size();i++) { for (uint j = 0; j < modelData[i]->cuda_vbo_resource.size(); j++) { // set modelPose on gpu hipMemcpyToSymbol(c_modelPose, &(*modelData[i]->ModelMatrix)(0,0), 16 * sizeof(float)); dim3 block = dim3(1, 1, 1); dim3 grid = dim3(modelData[i]->numberOfVertices[j], 1, 1); //cout << "number of vertices: " << modelData.size() << endl; // map OpenGL buffer object for writing from CUDA Vertex *vertices; hipGraphicsMapResources(1, &modelData[i]->cuda_vbo_resource[j], 0); CUDA_CHECK; size_t num_bytes; hipGraphicsResourceGetMappedPointer((void **)&vertices, &num_bytes, modelData[i]->cuda_vbo_resource[j]); CUDA_CHECK; hipLaunchKernelGGL(( costFcn) , dim3(grid), dim3(block) , 0, 0, vertices, modelData[i]->d_vertices_out[j], modelData[i]->d_normals_out[j], modelData[i]->d_tangents_out[j], d_border, d_image, mu_in, mu_out, sigma_in, sigma_out, d_img_out, modelData[i]->numberOfVertices[j], modelData[i]->d_gradTrans[j], modelData[i]->d_gradRot[j], d_gradient); CUDA_CHECK; // unmap buffer object hipGraphicsUnmapResources(1, &modelData[i]->cuda_vbo_resource[j], 0); CUDA_CHECK; #ifdef VISUALIZE hipMemcpy( modelData[i]->vertices_out[j], modelData[i]->d_vertices_out[j], modelData[i]->numberOfVertices[j] * sizeof(float3), hipMemcpyDeviceToHost); CUDA_CHECK; hipMemcpy( modelData[i]->normals_out[j], modelData[i]->d_normals_out[j], modelData[i]->numberOfVertices[j] * sizeof(float3), hipMemcpyDeviceToHost); CUDA_CHECK; hipMemcpy( modelData[i]->tangents_out[j], modelData[i]->d_tangents_out[j], modelData[i]->numberOfVertices[j] * sizeof(float3), hipMemcpyDeviceToHost); CUDA_CHECK; #endif hipMemcpy(modelData[i]->gradTrans[j], modelData[i]->d_gradTrans[j], modelData[i]->numberOfVertices[j] * sizeof(float3), hipMemcpyDeviceToHost); CUDA_CHECK; hipMemcpy(modelData[i]->gradRot[j], modelData[i]->d_gradRot[j], modelData[i]->numberOfVertices[j] * sizeof(float3), hipMemcpyDeviceToHost); CUDA_CHECK; for (uint k = 0; k < modelData[i]->numberOfVertices[j]; k++) { grad(0) += modelData[i]->gradTrans[j][k].x; grad(1) += modelData[i]->gradTrans[j][k].y; grad(2) += modelData[i]->gradTrans[j][k].z; grad(3) += modelData[i]->gradRot[j][k].x; grad(4) += modelData[i]->gradRot[j][k].y; grad(5) += modelData[i]->gradRot[j][k].z; } } } // copy data from gpu to cpu hipMemcpy(res, d_img_out, WIDTH * HEIGHT * sizeof(uchar), hipMemcpyDeviceToHost); CUDA_CHECK; Mat img(HEIGHT, WIDTH, CV_8UC1, res); imshow("result", img); cv::waitKey(1); return energy; } else { cout << "cannot find any contour" << endl; return 0; } }
53338e6919def1ce16543955f7cdbe46920aec00.cu
#include <Temporary_9.h> // cuda error checking string prev_file = ""; int prev_line = 0; __constant__ float c_K[9]; __constant__ float c_modelPose[16]; __constant__ float c_cameraPose[16]; void cuda_check(string file, int line) { cudaError_t e = cudaGetLastError(); if (e != cudaSuccess) { cout << endl << file << ", line " << line << ": " << cudaGetErrorString(e) << " (" << e << ")" << endl; if (prev_line > 0) cout << "Previous CUDA call:" << endl << prev_file << ", line " << prev_line << endl; exit(1); } prev_file = file; prev_line = line; } Poseestimator::Poseestimator(vector<Mesh*> meshes, Matrix3f &K){ // initialize cuda cudaDeviceSynchronize(); CUDA_CHECK; std::cout << "success" << std::endl; // register the VBOs in cuda for(uint i=0;i<meshes.size();i++){ ModelData *m = new ModelData; // pass a pointer to the ModelMatrix m->ModelMatrix = &meshes[i]->ModelMatrix; m->cuda_vbo_resource.resize(meshes[i]->m_Entries.size()); m->numberOfVertices.resize(meshes[i]->m_Entries.size()); // host m->vertices_out.resize(meshes[i]->m_Entries.size()); m->normals_out.resize(meshes[i]->m_Entries.size()); m->tangents_out.resize(meshes[i]->m_Entries.size()); m->gradTrans.resize(meshes[i]->m_Entries.size()); m->gradRot.resize(meshes[i]->m_Entries.size()); // device m->d_vertices_out.resize(meshes[i]->m_Entries.size()); m->d_normals_out.resize(meshes[i]->m_Entries.size()); m->d_tangents_out.resize(meshes[i]->m_Entries.size()); m->d_gradTrans.resize(meshes[i]->m_Entries.size()); m->d_gradRot.resize(meshes[i]->m_Entries.size()); for(uint j=0;j<meshes[i]->m_Entries.size();j++) { // Register the OpenGL buffer objects in cuda cudaGraphicsGLRegisterBuffer(&m->cuda_vbo_resource[j], meshes[i]->m_Entries[j].VB, cudaGraphicsMapFlagsReadOnly ); CUDA_CHECK; //cudaGraphicsMapResources(1, &m->cuda_vbo_resource[j], 0); //CUDA_CHECK; // how many vertices m->numberOfVertices[j] = meshes[i]->m_Entries[j].NumVertices; // allocate memory on host m->vertices_out[j] = new float3[m->numberOfVertices[j]]; m->normals_out[j] = new float3[m->numberOfVertices[j]]; m->tangents_out[j] = new float3[m->numberOfVertices[j]]; m->gradTrans[j] = new float3[m->numberOfVertices[j]]; m->gradRot[j] = new float3[m->numberOfVertices[j]]; // allocate memory on gpu cudaMalloc(&m->d_vertices_out[j], m->numberOfVertices[j] * sizeof(float3)); CUDA_CHECK; cudaMalloc(&m->d_normals_out[j], m->numberOfVertices[j] * sizeof(float3)); CUDA_CHECK; cudaMalloc(&m->d_tangents_out[j], m->numberOfVertices[j] * sizeof(float3)); CUDA_CHECK; cudaMalloc(&m->d_gradTrans[j], m->numberOfVertices[j] * sizeof(float3)); CUDA_CHECK; cudaMalloc(&m->d_gradRot[j], m->numberOfVertices[j] * sizeof(float3)); CUDA_CHECK; cout << "number of vertices: " << m->numberOfVertices[j] << endl; } modelData.push_back(m); } cudaMalloc(&d_gradient, 6 * sizeof(float)); CUDA_CHECK; cudaMalloc(&d_border, WIDTH * HEIGHT * sizeof(uchar)); CUDA_CHECK; cudaMalloc(&d_img_out, WIDTH * HEIGHT * sizeof(uchar)); CUDA_CHECK; cudaMalloc(&d_image, WIDTH * HEIGHT * sizeof(uchar)); CUDA_CHECK; res = new uchar[WIDTH * HEIGHT]; // copy camera matrices to gpu cudaMemcpyToSymbol(c_K, &K(0, 0), 9 * sizeof(float)); } Poseestimator::~Poseestimator() { cudaFree(d_border); CUDA_CHECK; cudaFree(d_img_out); CUDA_CHECK; cudaFree(d_image); CUDA_CHECK; cudaFree(d_gradient); CUDA_CHECK; delete[] res; for(auto m:modelData) delete m; } __global__ void costFcn(Vertex *vertices, float3 *vertices_out, float3 *normals_out, float3 *tangents_out, uchar *border, uchar *image, float mu_in, float mu_out, float sigma_in, float sigma_out, uchar *img_out, int numberOfVertices, float3 *gradTrans, float3 *gradRot, float* grad) { // iteration over image is parallelized int idx = threadIdx.x + blockDim.x * blockIdx.x; if (idx < numberOfVertices) { // set gradients to zero gradTrans[idx].x = 0; gradTrans[idx].y = 0; gradTrans[idx].z = 0; gradRot[idx].x = 0; gradRot[idx].y = 0; gradRot[idx].z = 0; float3 v = vertices[idx].m_pos; float3 n = vertices[idx].m_normal; // calculate position of vertex in camera coordinate system float3 pos, posModel; posModel.x = 0.0f; posModel.y = 0.0f; posModel.z = 0.0f; // modelPose // x posModel.x += c_modelPose[0 + 4 * 0] * v.x; posModel.x += c_modelPose[0 + 4 * 1] * v.y; posModel.x += c_modelPose[0 + 4 * 2] * v.z; posModel.x += c_modelPose[0 + 4 * 3]; // y posModel.y += c_modelPose[1 + 4 * 0] * v.x; posModel.y += c_modelPose[1 + 4 * 1] * v.y; posModel.y += c_modelPose[1 + 4 * 2] * v.z; posModel.y += c_modelPose[1 + 4 * 3]; // z posModel.z += c_modelPose[2 + 4 * 0] * v.x; posModel.z += c_modelPose[2 + 4 * 1] * v.y; posModel.z += c_modelPose[2 + 4 * 2] * v.z; posModel.z += c_modelPose[2 + 4 * 3]; // cameraPose pos.x = 0.0f; pos.y = 0.0f; pos.z = 0.0f; // x pos.x += c_cameraPose[0 + 4 * 0] * posModel.x; pos.x += c_cameraPose[0 + 4 * 1] * posModel.y; pos.x += c_cameraPose[0 + 4 * 2] * posModel.z; pos.x += c_cameraPose[0 + 4 * 3]; // y pos.y += c_cameraPose[1 + 4 * 0] * posModel.x; pos.y += c_cameraPose[1 + 4 * 1] * posModel.y; pos.y += c_cameraPose[1 + 4 * 2] * posModel.z; pos.y += c_cameraPose[1 + 4 * 3]; // z pos.z += c_cameraPose[2 + 4 * 0] * posModel.x; pos.z += c_cameraPose[2 + 4 * 1] * posModel.y; pos.z += c_cameraPose[2 + 4 * 2] * posModel.z; pos.z += c_cameraPose[2 + 4 * 3]; float posNorm = sqrtf(pos.x * pos.x + pos.y * pos.y + pos.z * pos.z); vertices_out[idx] = pos; // calculate orientation of normal in camera coordinate system float3 normal, normalModel; normalModel.x = 0.0f; normalModel.y = 0.0f; normalModel.z = 0.0f; // modelPose // x normalModel.x += c_modelPose[0 + 4 * 0] * n.x; normalModel.x += c_modelPose[0 + 4 * 1] * n.y; normalModel.x += c_modelPose[0 + 4 * 2] * n.z; // y normalModel.y += c_modelPose[1 + 4 * 0] * n.x; normalModel.y += c_modelPose[1 + 4 * 1] * n.y; normalModel.y += c_modelPose[1 + 4 * 2] * n.z; // z normalModel.z += c_modelPose[2 + 4 * 0] * n.x; normalModel.z += c_modelPose[2 + 4 * 1] * n.y; normalModel.z += c_modelPose[2 + 4 * 2] * n.z; // cameraPose normal.x = 0.0f; normal.y = 0.0f; normal.z = 0.0f; // x normal.x += c_cameraPose[0 + 4 * 0] * normalModel.x; normal.x += c_cameraPose[0 + 4 * 1] * normalModel.y; normal.x += c_cameraPose[0 + 4 * 2] * normalModel.z; // y normal.y += c_cameraPose[1 + 4 * 0] * normalModel.x; normal.y += c_cameraPose[1 + 4 * 1] * normalModel.y; normal.y += c_cameraPose[1 + 4 * 2] * normalModel.z; // z normal.z += c_cameraPose[2 + 4 * 0] * normalModel.x; normal.z += c_cameraPose[2 + 4 * 1] * normalModel.y; normal.z += c_cameraPose[2 + 4 * 2] * normalModel.z; normals_out[idx] = normal; // calculate dot product position and normal float dot = normal.x * pos.x / posNorm + normal.y * pos.y / posNorm + normal.z * pos.z / posNorm; // calculate gradient of silhuette float3 cross = {pos.y * normal.z - pos.z * normal.y, pos.z * normal.x - pos.x * normal.z, pos.x * normal.y - pos.y * normal.x}; float dCnorm = sqrtf(cross.x * cross.x + cross.y * cross.y + cross.z * cross.z); tangents_out[idx] = cross; // calculate pixel location with intrinsic matrix K float3 pixel; pixel.x = 0.0f; pixel.y = 0.0f; pixel.z = 0.0f; // x pixel.x += c_K[0 + 3 * 0] * pos.x; pixel.x += c_K[0 + 3 * 1] * pos.y; pixel.x += c_K[0 + 3 * 2] * pos.z; // y pixel.y += c_K[1 + 3 * 0] * pos.x; pixel.y += c_K[1 + 3 * 1] * pos.y; pixel.y += c_K[1 + 3 * 2] * pos.z; // z pixel.z += c_K[2 + 3 * 0] * pos.x; pixel.z += c_K[2 + 3 * 1] * pos.y; pixel.z += c_K[2 + 3 * 2] * pos.z; int2 pixelCoord; pixelCoord.x = (int) pixel.x / pixel.z; pixelCoord.y = (int) pixel.y / pixel.z; // if its a border pixel and the dot product small enough if (pixelCoord.x >= 0 && pixelCoord.x < WIDTH && pixelCoord.y >= 0 && pixelCoord.y < HEIGHT && fabsf(dot)< 0.005f && border[pixelCoord.y * WIDTH + (WIDTH-pixelCoord.x-1)] == 255) {// img_out[pixelCoord.y * WIDTH + (WIDTH-pixelCoord.x-1)] = 255; float Rc = (((float) image[pixelCoord.y * WIDTH + (WIDTH-pixelCoord.x-1)] - mu_out) * ((float) image[pixelCoord.y * WIDTH + (WIDTH-pixelCoord.x-1)] - mu_out)) ;// / sigma_out float R = (((float) image[pixelCoord.y * WIDTH + (WIDTH-pixelCoord.x-1)] - mu_in) * ((float) image[pixelCoord.y * WIDTH + (WIDTH-pixelCoord.x-1)] - mu_in)) ;// /sigma_in float statistics = (Rc - R) ;// * dCnorm logf(sigma_out / sigma_in) + gradTrans[idx].x = statistics * normal.x; gradTrans[idx].y = statistics * normal.y; gradTrans[idx].z = statistics * normal.z; float Om[9] = {0, posModel.z, -posModel.y, -posModel.z, 0, posModel.x, posModel.y, -posModel.x, 0}; float M[9] = {0, 0, 0, 0, 0, 0, 0, 0, 0}; for (uint i = 0; i < 3; i++) for (uint j = 0; j < 3; j++) for (uint k = 0; k < 3; k++) M[i + 3 * j] += c_cameraPose[i + 4 * k] * Om[k + 3 * j]; statistics *= posNorm / (pos.z * pos.z * pos.z); gradRot[idx].x = statistics * (M[0 + 3 * 0] * normal.x + M[1 + 3 * 0] * normal.y + M[2 + 3 * 0] * normal.z); gradRot[idx].y = statistics * (M[0 + 3 * 1] * normal.x + M[1 + 3 * 1] * normal.y + M[2 + 3 * 1] * normal.z); gradRot[idx].z = statistics * (M[0 + 3 * 2] * normal.x + M[1 + 3 * 2] * normal.y + M[2 + 3 * 2] * normal.z); } else { tangents_out[idx].x = 0; tangents_out[idx].y = 0; tangents_out[idx].z = 0; } } } double Poseestimator::iterateOnce(const Mat &img_camera, Mat &img_artificial, VectorXd &pose, VectorXd &grad) { Mat img_camera_gray, img_camera_copy, img_artificial_gray, img_artificial_gray2; VectorXd initial_pose = pose; img_camera.copyTo(img_camera_copy); cvtColor(img_camera_copy, img_camera_gray, CV_BGR2GRAY); cvtColor(img_artificial, img_artificial_gray, CV_BGR2GRAY); // make a copy img_artificial_gray.copyTo(img_artificial_gray2); cv::Canny(img_artificial_gray, img_artificial_gray, 100, 200, 3); //imshow("Canny", img_artificial_gray); vector<vector<cv::Point> > contours; vector<cv::Vec4i> hierarchy; findContours(img_artificial_gray, contours, hierarchy, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_TC89_L1, cv::Point(0, 0)); double min_contour_area = 40; for (auto it = contours.begin(); it != contours.end();) { if (contourArea(*it) < min_contour_area) it = contours.erase(it); else ++it; } if (contours.size() > 0) { Mat border = Mat::zeros(HEIGHT, WIDTH, CV_8UC1); double A_in = 0; for (int idx = 0; idx < contours.size(); idx++) { drawContours(border, contours, idx, 255, 10, 8, hierarchy, 0, cv::Point()); A_in += contourArea(contours[idx]); drawContours(img_camera_copy, contours, idx, cv::Scalar(0, 255, 0), 1, 8, hierarchy, 0, cv::Point()); } double A_out = WIDTH * HEIGHT - A_in; imshow("camera image", img_camera_copy); cv::waitKey(1); Mat R_mask = Mat::zeros(HEIGHT, WIDTH, CV_8UC1), Rc_mask, R = Mat::zeros(HEIGHT, WIDTH, CV_8UC1), Rc = Mat::zeros(HEIGHT, WIDTH, CV_8UC1); fillPoly(R_mask, contours, 255); bitwise_not(R_mask, Rc_mask); // this will mask out the respective part of the webcam image bitwise_and(img_camera_gray, R_mask, R); bitwise_and(img_camera_gray, Rc_mask, Rc); //imshow("R_", R); //imshow("Rc_", Rc); // convert camera image to float R.convertTo(R, CV_32FC1); Rc.convertTo(Rc, CV_32FC1); // calculate mean double mu_in = sum(R).val[0] / A_in; double mu_out = sum(Rc).val[0] / A_out; R = R - mu_in; Rc = Rc - mu_out; imshow("R", R/255.0f); imshow("Rc", Rc/255.0f); cv::waitKey(1); // copy only the respective areas Mat Rpow = Mat::zeros(HEIGHT, WIDTH, CV_32FC1), Rcpow = Mat::zeros(HEIGHT, WIDTH, CV_32FC1); R.copyTo(Rpow, R_mask); Rc.copyTo(Rcpow, Rc_mask); // calculate sigma pow(Rpow, 2.0, Rpow); pow(Rcpow, 2.0, Rcpow); double sigma_in = sum(Rpow).val[0] / A_in; double sigma_out = sum(Rcpow).val[0] / A_out; double energy = -sum(Rpow).val[0] - sum(Rcpow).val[0]; cost.push_back(energy); cout << "cost: " << energy << endl; Matrix3f rot = Matrix3f::Identity(); Matrix3f skew; Vector3f p(pose(3), pose(4), pose(5)); float angle = p.norm(); if (abs(angle) > 0.0000001) { p.normalize(); skew << 0, -p(2), p(1), p(2), 0, -p(0), -p(1), p(0), 0; rot = rot + sin(angle) * skew; rot = rot + (1.0 - cos(angle)) * skew * skew; } Matrix4f ViewMatrix = Matrix4f::Identity(); ViewMatrix.topLeftCorner(3, 3) = rot; ViewMatrix.topRightCorner(3, 1) << pose(0), pose(1), pose(2); Eigen::Matrix4f cameraPose = ViewMatrix; cudaMemcpy(d_border, border.data, WIDTH * HEIGHT * sizeof(uchar), cudaMemcpyHostToDevice); CUDA_CHECK; cudaMemcpy(d_image, img_camera_gray.data, WIDTH * HEIGHT * sizeof(uchar), cudaMemcpyHostToDevice); CUDA_CHECK; // set result image an the gradients to zero cudaMemset(d_img_out, 0, WIDTH * HEIGHT * sizeof(uchar)); CUDA_CHECK; // set constants on gpu cudaMemcpyToSymbol(c_cameraPose, &cameraPose(0, 0), 16 * sizeof(float)); grad << 0, 0, 0, 0, 0, 0; for(uint i=0;i<modelData.size();i++) { for (uint j = 0; j < modelData[i]->cuda_vbo_resource.size(); j++) { // set modelPose on gpu cudaMemcpyToSymbol(c_modelPose, &(*modelData[i]->ModelMatrix)(0,0), 16 * sizeof(float)); dim3 block = dim3(1, 1, 1); dim3 grid = dim3(modelData[i]->numberOfVertices[j], 1, 1); //cout << "number of vertices: " << modelData.size() << endl; // map OpenGL buffer object for writing from CUDA Vertex *vertices; cudaGraphicsMapResources(1, &modelData[i]->cuda_vbo_resource[j], 0); CUDA_CHECK; size_t num_bytes; cudaGraphicsResourceGetMappedPointer((void **)&vertices, &num_bytes, modelData[i]->cuda_vbo_resource[j]); CUDA_CHECK; costFcn <<< grid, block >>> ( vertices, modelData[i]->d_vertices_out[j], modelData[i]->d_normals_out[j], modelData[i]->d_tangents_out[j], d_border, d_image, mu_in, mu_out, sigma_in, sigma_out, d_img_out, modelData[i]->numberOfVertices[j], modelData[i]->d_gradTrans[j], modelData[i]->d_gradRot[j], d_gradient); CUDA_CHECK; // unmap buffer object cudaGraphicsUnmapResources(1, &modelData[i]->cuda_vbo_resource[j], 0); CUDA_CHECK; #ifdef VISUALIZE cudaMemcpy( modelData[i]->vertices_out[j], modelData[i]->d_vertices_out[j], modelData[i]->numberOfVertices[j] * sizeof(float3), cudaMemcpyDeviceToHost); CUDA_CHECK; cudaMemcpy( modelData[i]->normals_out[j], modelData[i]->d_normals_out[j], modelData[i]->numberOfVertices[j] * sizeof(float3), cudaMemcpyDeviceToHost); CUDA_CHECK; cudaMemcpy( modelData[i]->tangents_out[j], modelData[i]->d_tangents_out[j], modelData[i]->numberOfVertices[j] * sizeof(float3), cudaMemcpyDeviceToHost); CUDA_CHECK; #endif cudaMemcpy(modelData[i]->gradTrans[j], modelData[i]->d_gradTrans[j], modelData[i]->numberOfVertices[j] * sizeof(float3), cudaMemcpyDeviceToHost); CUDA_CHECK; cudaMemcpy(modelData[i]->gradRot[j], modelData[i]->d_gradRot[j], modelData[i]->numberOfVertices[j] * sizeof(float3), cudaMemcpyDeviceToHost); CUDA_CHECK; for (uint k = 0; k < modelData[i]->numberOfVertices[j]; k++) { grad(0) += modelData[i]->gradTrans[j][k].x; grad(1) += modelData[i]->gradTrans[j][k].y; grad(2) += modelData[i]->gradTrans[j][k].z; grad(3) += modelData[i]->gradRot[j][k].x; grad(4) += modelData[i]->gradRot[j][k].y; grad(5) += modelData[i]->gradRot[j][k].z; } } } // copy data from gpu to cpu cudaMemcpy(res, d_img_out, WIDTH * HEIGHT * sizeof(uchar), cudaMemcpyDeviceToHost); CUDA_CHECK; Mat img(HEIGHT, WIDTH, CV_8UC1, res); imshow("result", img); cv::waitKey(1); return energy; } else { cout << "cannot find any contour" << endl; return 0; } }
c81687a538c2f91d4be3c5f0f06380620a511949.hip
// !!! This is a file automatically generated by hipify!!! // This file is part of OpenCV project. // It is subject to the license terms in the LICENSE file found in the top-level directory // of this distribution and at http://opencv.org/license.html. #include "opencv2/opencv_modules.hpp" #ifndef HAVE_OPENCV_CUDEV #error "opencv_cudev is required" #else #include "opencv2/core/cuda.hpp" #include "opencv2/cudev.hpp" using namespace cv; using namespace cv::cuda; GpuData::GpuData(const size_t _size) : data(nullptr), size(_size) { CV_CUDEV_SAFE_CALL(hipMalloc(&data, _size)); } GpuData::~GpuData() { CV_CUDEV_SAFE_CALL(hipFree(data)); } ///////////////////////////////////////////////////// /// create void GpuMatND::create(SizeArray _size, int _type) { { auto elements_nonzero = [](SizeArray& v) { return std::all_of(v.begin(), v.end(), [](unsigned u){ return u > 0; }); }; CV_Assert(!_size.empty()); CV_Assert(elements_nonzero(_size)); } _type &= Mat::TYPE_MASK; if (size == _size && type() == _type && !empty() && !external() && isContinuous() && !isSubmatrix()) return; release(); setFields(std::move(_size), _type); data_ = std::make_shared<GpuData>(totalMemSize()); data = data_->data; offset = 0; } ///////////////////////////////////////////////////// /// release void GpuMatND::release() { data = nullptr; data_.reset(); flags = dims = offset = 0; size.clear(); step.clear(); } ///////////////////////////////////////////////////// /// clone static bool next(uchar*& d, const uchar*& s, std::vector<int>& idx, const int dims, const GpuMatND& dst, const GpuMatND& src) { int inc = dims-3; while (true) { if (idx[inc] == src.size[inc] - 1) { if (inc == 0) { return false; } idx[inc] = 0; d -= (dst.size[inc] - 1) * dst.step[inc]; s -= (src.size[inc] - 1) * src.step[inc]; inc--; } else { idx[inc]++; d += dst.step[inc]; s += src.step[inc]; break; } } return true; } GpuMatND GpuMatND::clone() const { CV_DbgAssert(!empty()); GpuMatND ret(size, type()); if (isContinuous()) { CV_CUDEV_SAFE_CALL(hipMemcpy(ret.getDevicePtr(), getDevicePtr(), ret.totalMemSize(), hipMemcpyDeviceToDevice)); } else { // 1D arrays are always continuous if (dims == 2) { CV_CUDEV_SAFE_CALL( hipMemcpy2D(ret.getDevicePtr(), ret.step[0], getDevicePtr(), step[0], size[1]*step[1], size[0], hipMemcpyDeviceToDevice) ); } else { std::vector<int> idx(dims-2, 0); uchar* d = ret.getDevicePtr(); const uchar* s = getDevicePtr(); // iterate each 2D plane do { CV_CUDEV_SAFE_CALL( hipMemcpy2DAsync( d, ret.step[dims-2], s, step[dims-2], size[dims-1]*step[dims-1], size[dims-2], hipMemcpyDeviceToDevice) ); } while (next(d, s, idx, dims, ret, *this)); CV_CUDEV_SAFE_CALL(hipStreamSynchronize(0)); } } return ret; } GpuMatND GpuMatND::clone(Stream& stream) const { CV_DbgAssert(!empty()); GpuMatND ret(size, type()); hipStream_t _stream = StreamAccessor::getStream(stream); if (isContinuous()) { CV_CUDEV_SAFE_CALL(hipMemcpyAsync(ret.getDevicePtr(), getDevicePtr(), ret.totalMemSize(), hipMemcpyDeviceToDevice, _stream)); } else { // 1D arrays are always continuous if (dims == 2) { CV_CUDEV_SAFE_CALL( hipMemcpy2DAsync(ret.getDevicePtr(), ret.step[0], getDevicePtr(), step[0], size[1]*step[1], size[0], hipMemcpyDeviceToDevice, _stream) ); } else { std::vector<int> idx(dims-2, 0); uchar* d = ret.getDevicePtr(); const uchar* s = getDevicePtr(); // iterate each 2D plane do { CV_CUDEV_SAFE_CALL( hipMemcpy2DAsync( d, ret.step[dims-2], s, step[dims-2], size[dims-1]*step[dims-1], size[dims-2], hipMemcpyDeviceToDevice, _stream) ); } while (next(d, s, idx, dims, ret, *this)); } } return ret; } ///////////////////////////////////////////////////// /// upload void GpuMatND::upload(InputArray src) { Mat mat = src.getMat(); CV_DbgAssert(!mat.empty()); if (!mat.isContinuous()) mat = mat.clone(); SizeArray _size(mat.dims); std::copy_n(mat.size.p, mat.dims, _size.data()); create(std::move(_size), mat.type()); CV_CUDEV_SAFE_CALL(hipMemcpy(getDevicePtr(), mat.data, totalMemSize(), hipMemcpyHostToDevice)); } void GpuMatND::upload(InputArray src, Stream& stream) { Mat mat = src.getMat(); CV_DbgAssert(!mat.empty()); if (!mat.isContinuous()) mat = mat.clone(); SizeArray _size(mat.dims); std::copy_n(mat.size.p, mat.dims, _size.data()); create(std::move(_size), mat.type()); hipStream_t _stream = StreamAccessor::getStream(stream); CV_CUDEV_SAFE_CALL(hipMemcpyAsync(getDevicePtr(), mat.data, totalMemSize(), hipMemcpyHostToDevice, _stream)); } ///////////////////////////////////////////////////// /// download void GpuMatND::download(OutputArray dst) const { CV_DbgAssert(!empty()); dst.create(dims, size.data(), type()); Mat mat = dst.getMat(); GpuMatND gmat = *this; if (!gmat.isContinuous()) gmat = gmat.clone(); CV_CUDEV_SAFE_CALL(hipMemcpy(mat.data, gmat.getDevicePtr(), mat.total() * mat.elemSize(), hipMemcpyDeviceToHost)); } void GpuMatND::download(OutputArray dst, Stream& stream) const { CV_DbgAssert(!empty()); dst.create(dims, size.data(), type()); Mat mat = dst.getMat(); GpuMatND gmat = *this; if (!gmat.isContinuous()) gmat = gmat.clone(stream); hipStream_t _stream = StreamAccessor::getStream(stream); CV_CUDEV_SAFE_CALL(hipMemcpyAsync(mat.data, gmat.getDevicePtr(), mat.total() * mat.elemSize(), hipMemcpyDeviceToHost, _stream)); } #endif
c81687a538c2f91d4be3c5f0f06380620a511949.cu
// This file is part of OpenCV project. // It is subject to the license terms in the LICENSE file found in the top-level directory // of this distribution and at http://opencv.org/license.html. #include "opencv2/opencv_modules.hpp" #ifndef HAVE_OPENCV_CUDEV #error "opencv_cudev is required" #else #include "opencv2/core/cuda.hpp" #include "opencv2/cudev.hpp" using namespace cv; using namespace cv::cuda; GpuData::GpuData(const size_t _size) : data(nullptr), size(_size) { CV_CUDEV_SAFE_CALL(cudaMalloc(&data, _size)); } GpuData::~GpuData() { CV_CUDEV_SAFE_CALL(cudaFree(data)); } ///////////////////////////////////////////////////// /// create void GpuMatND::create(SizeArray _size, int _type) { { auto elements_nonzero = [](SizeArray& v) { return std::all_of(v.begin(), v.end(), [](unsigned u){ return u > 0; }); }; CV_Assert(!_size.empty()); CV_Assert(elements_nonzero(_size)); } _type &= Mat::TYPE_MASK; if (size == _size && type() == _type && !empty() && !external() && isContinuous() && !isSubmatrix()) return; release(); setFields(std::move(_size), _type); data_ = std::make_shared<GpuData>(totalMemSize()); data = data_->data; offset = 0; } ///////////////////////////////////////////////////// /// release void GpuMatND::release() { data = nullptr; data_.reset(); flags = dims = offset = 0; size.clear(); step.clear(); } ///////////////////////////////////////////////////// /// clone static bool next(uchar*& d, const uchar*& s, std::vector<int>& idx, const int dims, const GpuMatND& dst, const GpuMatND& src) { int inc = dims-3; while (true) { if (idx[inc] == src.size[inc] - 1) { if (inc == 0) { return false; } idx[inc] = 0; d -= (dst.size[inc] - 1) * dst.step[inc]; s -= (src.size[inc] - 1) * src.step[inc]; inc--; } else { idx[inc]++; d += dst.step[inc]; s += src.step[inc]; break; } } return true; } GpuMatND GpuMatND::clone() const { CV_DbgAssert(!empty()); GpuMatND ret(size, type()); if (isContinuous()) { CV_CUDEV_SAFE_CALL(cudaMemcpy(ret.getDevicePtr(), getDevicePtr(), ret.totalMemSize(), cudaMemcpyDeviceToDevice)); } else { // 1D arrays are always continuous if (dims == 2) { CV_CUDEV_SAFE_CALL( cudaMemcpy2D(ret.getDevicePtr(), ret.step[0], getDevicePtr(), step[0], size[1]*step[1], size[0], cudaMemcpyDeviceToDevice) ); } else { std::vector<int> idx(dims-2, 0); uchar* d = ret.getDevicePtr(); const uchar* s = getDevicePtr(); // iterate each 2D plane do { CV_CUDEV_SAFE_CALL( cudaMemcpy2DAsync( d, ret.step[dims-2], s, step[dims-2], size[dims-1]*step[dims-1], size[dims-2], cudaMemcpyDeviceToDevice) ); } while (next(d, s, idx, dims, ret, *this)); CV_CUDEV_SAFE_CALL(cudaStreamSynchronize(0)); } } return ret; } GpuMatND GpuMatND::clone(Stream& stream) const { CV_DbgAssert(!empty()); GpuMatND ret(size, type()); cudaStream_t _stream = StreamAccessor::getStream(stream); if (isContinuous()) { CV_CUDEV_SAFE_CALL(cudaMemcpyAsync(ret.getDevicePtr(), getDevicePtr(), ret.totalMemSize(), cudaMemcpyDeviceToDevice, _stream)); } else { // 1D arrays are always continuous if (dims == 2) { CV_CUDEV_SAFE_CALL( cudaMemcpy2DAsync(ret.getDevicePtr(), ret.step[0], getDevicePtr(), step[0], size[1]*step[1], size[0], cudaMemcpyDeviceToDevice, _stream) ); } else { std::vector<int> idx(dims-2, 0); uchar* d = ret.getDevicePtr(); const uchar* s = getDevicePtr(); // iterate each 2D plane do { CV_CUDEV_SAFE_CALL( cudaMemcpy2DAsync( d, ret.step[dims-2], s, step[dims-2], size[dims-1]*step[dims-1], size[dims-2], cudaMemcpyDeviceToDevice, _stream) ); } while (next(d, s, idx, dims, ret, *this)); } } return ret; } ///////////////////////////////////////////////////// /// upload void GpuMatND::upload(InputArray src) { Mat mat = src.getMat(); CV_DbgAssert(!mat.empty()); if (!mat.isContinuous()) mat = mat.clone(); SizeArray _size(mat.dims); std::copy_n(mat.size.p, mat.dims, _size.data()); create(std::move(_size), mat.type()); CV_CUDEV_SAFE_CALL(cudaMemcpy(getDevicePtr(), mat.data, totalMemSize(), cudaMemcpyHostToDevice)); } void GpuMatND::upload(InputArray src, Stream& stream) { Mat mat = src.getMat(); CV_DbgAssert(!mat.empty()); if (!mat.isContinuous()) mat = mat.clone(); SizeArray _size(mat.dims); std::copy_n(mat.size.p, mat.dims, _size.data()); create(std::move(_size), mat.type()); cudaStream_t _stream = StreamAccessor::getStream(stream); CV_CUDEV_SAFE_CALL(cudaMemcpyAsync(getDevicePtr(), mat.data, totalMemSize(), cudaMemcpyHostToDevice, _stream)); } ///////////////////////////////////////////////////// /// download void GpuMatND::download(OutputArray dst) const { CV_DbgAssert(!empty()); dst.create(dims, size.data(), type()); Mat mat = dst.getMat(); GpuMatND gmat = *this; if (!gmat.isContinuous()) gmat = gmat.clone(); CV_CUDEV_SAFE_CALL(cudaMemcpy(mat.data, gmat.getDevicePtr(), mat.total() * mat.elemSize(), cudaMemcpyDeviceToHost)); } void GpuMatND::download(OutputArray dst, Stream& stream) const { CV_DbgAssert(!empty()); dst.create(dims, size.data(), type()); Mat mat = dst.getMat(); GpuMatND gmat = *this; if (!gmat.isContinuous()) gmat = gmat.clone(stream); cudaStream_t _stream = StreamAccessor::getStream(stream); CV_CUDEV_SAFE_CALL(cudaMemcpyAsync(mat.data, gmat.getDevicePtr(), mat.total() * mat.elemSize(), cudaMemcpyDeviceToHost, _stream)); } #endif
815c301dd7488d3e3c661f531ef56b8be23ef2d1.hip
// !!! This is a file automatically generated by hipify!!! // small change by Lixin to fit pytorch's API /* Matt Dean - 1422434 - mxd434 Goals implemented: - Block scan for arbitrary length small vectors - 'blockscan' function - Full scan for arbitrary length large vectors - 'scan' function This function decides whether to perform a small (one block) scan or a full (n-level) scan depending on the length of the input vector - BCAO for both scans Hardware: CPU - Intel Core i5-4670k @ 3.4GHz GPU - NVIDIA GeForce GTX 760 Timings: 10,000,000 Elements host : 20749 ms gpu : 7.860768 ms gpu bcao : 4.304064 ms For more results please see the comment at the bottom of this file Extra work: Due to the recursive nature of the full scan it can handle n > 3 levels */ #include <stdlib.h> #include <stdio.h> #include <time.h> #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include "hip/device_functions.h" #include "prefix_sum.h" // scan.cuh void sequential_scan(int* output, int* input, int length); void blockscan(int *output, int *input, int length, bool bcao); void scan(int *output, int *input, int length, bool bcao); void scanLargeDeviceArray(int *output, int *input, int length, bool bcao); void scanSmallDeviceArray(int *d_out, int *d_in, int length, bool bcao); void scanLargeEvenDeviceArray(int *output, int *input, int length, bool bcao); // kernels.cuh __global__ void prescan_arbitrary(int *output, int *input, int n, int powerOfTwo); __global__ void prescan_arbitrary_unoptimized(int *output, int *input, int n, int powerOfTwo); __global__ void prescan_large(int *output, int *input, int n, int* sums); __global__ void prescan_large_unoptimized(int *output, int *input, int n, int *sums); __global__ void add(int *output, int length, int *n1); __global__ void add(int *output, int length, int *n1, int *n2); // utils.h void _checkCudaError(const char *message, hipError_t err, const char *caller); void printResult(const char* prefix, int result, long nanoseconds); void printResult(const char* prefix, int result, float milliseconds); bool isPowerOfTwo(int x); int nextPowerOfTwo(int x); long get_nanos(); /*///////////////////////////////////*/ /* New API */ /*///////////////////////////////////*/ void PrefixSumCUDA( const at::Tensor grid_cnt, int num_grids, at::Tensor grid_off) { scan( grid_off.contiguous().data_ptr<int>(), grid_cnt.contiguous().data_ptr<int>(), num_grids, true ); return; } void PrefixSumCPU( const at::Tensor grid_cnt, int num_grids, at::Tensor grid_off) { sequential_scan( grid_off.contiguous().data_ptr<int>(), grid_cnt.contiguous().data_ptr<int>(), num_grids ); return; } /*///////////////////////////////////*/ /* scan.cu */ /*///////////////////////////////////*/ #define checkCudaError(o, l) _checkCudaError(o, l, __func__) int THREADS_PER_BLOCK = 512; int ELEMENTS_PER_BLOCK = THREADS_PER_BLOCK * 2; void sequential_scan(int* output, int* input, int length) { output[0] = 0; // since this is a prescan, not a scan for (int j = 1; j < length; ++j) { output[j] = input[j - 1] + output[j - 1]; } return; } void blockscan(int *d_out, int *d_in, int length, bool bcao) { int powerOfTwo = nextPowerOfTwo(length); if (bcao) { hipLaunchKernelGGL(( prescan_arbitrary), dim3(1), dim3((length + 1) / 2), 2 * powerOfTwo * sizeof(int), 0, d_out, d_in, length, powerOfTwo); } else { hipLaunchKernelGGL(( prescan_arbitrary_unoptimized), dim3(1), dim3((length + 1) / 2), 2 * powerOfTwo * sizeof(int), 0, d_out, d_in, length, powerOfTwo); } return; } void scan(int *d_out, int *d_in, int length, bool bcao) { if (length > ELEMENTS_PER_BLOCK) { scanLargeDeviceArray(d_out, d_in, length, bcao); } else { scanSmallDeviceArray(d_out, d_in, length, bcao); } return; } void scanLargeDeviceArray(int *d_out, int *d_in, int length, bool bcao) { int remainder = length % (ELEMENTS_PER_BLOCK); if (remainder == 0) { scanLargeEvenDeviceArray(d_out, d_in, length, bcao); } else { // perform a large scan on a compatible multiple of elements int lengthMultiple = length - remainder; scanLargeEvenDeviceArray(d_out, d_in, lengthMultiple, bcao); // scan the remaining elements and add the (inclusive) last element of the large scan to this int *startOfOutputArray = &(d_out[lengthMultiple]); scanSmallDeviceArray(startOfOutputArray, &(d_in[lengthMultiple]), remainder, bcao); hipLaunchKernelGGL(( add), dim3(1), dim3(remainder), 0, 0, startOfOutputArray, remainder, &(d_in[lengthMultiple - 1]), &(d_out[lengthMultiple - 1])); } } void scanSmallDeviceArray(int *d_out, int *d_in, int length, bool bcao) { int powerOfTwo = nextPowerOfTwo(length); if (bcao) { hipLaunchKernelGGL(( prescan_arbitrary), dim3(1), dim3((length + 1) / 2), 2 * powerOfTwo * sizeof(int), 0, d_out, d_in, length, powerOfTwo); } else { hipLaunchKernelGGL(( prescan_arbitrary_unoptimized), dim3(1), dim3((length + 1) / 2), 2 * powerOfTwo * sizeof(int), 0, d_out, d_in, length, powerOfTwo); } } void scanLargeEvenDeviceArray(int *d_out, int *d_in, int length, bool bcao) { const int blocks = length / ELEMENTS_PER_BLOCK; const int sharedMemArraySize = ELEMENTS_PER_BLOCK * sizeof(int); int *d_sums, *d_incr; hipMalloc((void **)&d_sums, blocks * sizeof(int)); hipMalloc((void **)&d_incr, blocks * sizeof(int)); if (bcao) { hipLaunchKernelGGL(( prescan_large), dim3(blocks), dim3(THREADS_PER_BLOCK), 2 * sharedMemArraySize, 0, d_out, d_in, ELEMENTS_PER_BLOCK, d_sums); } else { hipLaunchKernelGGL(( prescan_large_unoptimized), dim3(blocks), dim3(THREADS_PER_BLOCK), 2 * sharedMemArraySize, 0, d_out, d_in, ELEMENTS_PER_BLOCK, d_sums); } const int sumsArrThreadsNeeded = (blocks + 1) / 2; if (sumsArrThreadsNeeded > THREADS_PER_BLOCK) { // perform a large scan on the sums arr scanLargeDeviceArray(d_incr, d_sums, blocks, bcao); } else { // only need one block to scan sums arr so can use small scan scanSmallDeviceArray(d_incr, d_sums, blocks, bcao); } hipLaunchKernelGGL(( add), dim3(blocks), dim3(ELEMENTS_PER_BLOCK), 0, 0, d_out, ELEMENTS_PER_BLOCK, d_incr); hipFree(d_sums); hipFree(d_incr); } /*///////////////////////////////////*/ /* kernels.cu */ /*///////////////////////////////////*/ #define SHARED_MEMORY_BANKS 32 #define LOG_MEM_BANKS 5 // There were two BCAO optimisations in the paper - this one is fastest #define CONFLICT_FREE_OFFSET(n) ((n) >> LOG_MEM_BANKS) __global__ void prescan_arbitrary(int *output, int *input, int n, int powerOfTwo) { extern __shared__ int temp[];// allocated on invocation int threadID = threadIdx.x; int ai = threadID; int bi = threadID + (n / 2); int bankOffsetA = CONFLICT_FREE_OFFSET(ai); int bankOffsetB = CONFLICT_FREE_OFFSET(bi); if (threadID < n) { temp[ai + bankOffsetA] = input[ai]; temp[bi + bankOffsetB] = input[bi]; } else { temp[ai + bankOffsetA] = 0; temp[bi + bankOffsetB] = 0; } int offset = 1; for (int d = powerOfTwo >> 1; d > 0; d >>= 1) // build sum in place up the tree { __syncthreads(); if (threadID < d) { int ai = offset * (2 * threadID + 1) - 1; int bi = offset * (2 * threadID + 2) - 1; ai += CONFLICT_FREE_OFFSET(ai); bi += CONFLICT_FREE_OFFSET(bi); temp[bi] += temp[ai]; } offset *= 2; } if (threadID == 0) { temp[powerOfTwo - 1 + CONFLICT_FREE_OFFSET(powerOfTwo - 1)] = 0; // clear the last element } for (int d = 1; d < powerOfTwo; d *= 2) // traverse down tree & build scan { offset >>= 1; __syncthreads(); if (threadID < d) { int ai = offset * (2 * threadID + 1) - 1; int bi = offset * (2 * threadID + 2) - 1; ai += CONFLICT_FREE_OFFSET(ai); bi += CONFLICT_FREE_OFFSET(bi); int t = temp[ai]; temp[ai] = temp[bi]; temp[bi] += t; } } __syncthreads(); if (threadID < n) { output[ai] = temp[ai + bankOffsetA]; output[bi] = temp[bi + bankOffsetB]; } } __global__ void prescan_arbitrary_unoptimized(int *output, int *input, int n, int powerOfTwo) { extern __shared__ int temp[];// allocated on invocation int threadID = threadIdx.x; if (threadID < n) { temp[2 * threadID] = input[2 * threadID]; // load input into shared memory temp[2 * threadID + 1] = input[2 * threadID + 1]; } else { temp[2 * threadID] = 0; temp[2 * threadID + 1] = 0; } int offset = 1; for (int d = powerOfTwo >> 1; d > 0; d >>= 1) // build sum in place up the tree { __syncthreads(); if (threadID < d) { int ai = offset * (2 * threadID + 1) - 1; int bi = offset * (2 * threadID + 2) - 1; temp[bi] += temp[ai]; } offset *= 2; } if (threadID == 0) { temp[powerOfTwo - 1] = 0; } // clear the last element for (int d = 1; d < powerOfTwo; d *= 2) // traverse down tree & build scan { offset >>= 1; __syncthreads(); if (threadID < d) { int ai = offset * (2 * threadID + 1) - 1; int bi = offset * (2 * threadID + 2) - 1; int t = temp[ai]; temp[ai] = temp[bi]; temp[bi] += t; } } __syncthreads(); if (threadID < n) { output[2 * threadID] = temp[2 * threadID]; // write results to device memory output[2 * threadID + 1] = temp[2 * threadID + 1]; } } __global__ void prescan_large(int *output, int *input, int n, int *sums) { extern __shared__ int temp[]; int blockID = blockIdx.x; int threadID = threadIdx.x; int blockOffset = blockID * n; int ai = threadID; int bi = threadID + (n / 2); int bankOffsetA = CONFLICT_FREE_OFFSET(ai); int bankOffsetB = CONFLICT_FREE_OFFSET(bi); temp[ai + bankOffsetA] = input[blockOffset + ai]; temp[bi + bankOffsetB] = input[blockOffset + bi]; int offset = 1; for (int d = n >> 1; d > 0; d >>= 1) // build sum in place up the tree { __syncthreads(); if (threadID < d) { int ai = offset * (2 * threadID + 1) - 1; int bi = offset * (2 * threadID + 2) - 1; ai += CONFLICT_FREE_OFFSET(ai); bi += CONFLICT_FREE_OFFSET(bi); temp[bi] += temp[ai]; } offset *= 2; } __syncthreads(); if (threadID == 0) { sums[blockID] = temp[n - 1 + CONFLICT_FREE_OFFSET(n - 1)]; temp[n - 1 + CONFLICT_FREE_OFFSET(n - 1)] = 0; } for (int d = 1; d < n; d *= 2) // traverse down tree & build scan { offset >>= 1; __syncthreads(); if (threadID < d) { int ai = offset * (2 * threadID + 1) - 1; int bi = offset * (2 * threadID + 2) - 1; ai += CONFLICT_FREE_OFFSET(ai); bi += CONFLICT_FREE_OFFSET(bi); int t = temp[ai]; temp[ai] = temp[bi]; temp[bi] += t; } } __syncthreads(); output[blockOffset + ai] = temp[ai + bankOffsetA]; output[blockOffset + bi] = temp[bi + bankOffsetB]; } __global__ void prescan_large_unoptimized(int *output, int *input, int n, int *sums) { int blockID = blockIdx.x; int threadID = threadIdx.x; int blockOffset = blockID * n; extern __shared__ int temp[]; temp[2 * threadID] = input[blockOffset + (2 * threadID)]; temp[2 * threadID + 1] = input[blockOffset + (2 * threadID) + 1]; int offset = 1; for (int d = n >> 1; d > 0; d >>= 1) // build sum in place up the tree { __syncthreads(); if (threadID < d) { int ai = offset * (2 * threadID + 1) - 1; int bi = offset * (2 * threadID + 2) - 1; temp[bi] += temp[ai]; } offset *= 2; } __syncthreads(); if (threadID == 0) { sums[blockID] = temp[n - 1]; temp[n - 1] = 0; } for (int d = 1; d < n; d *= 2) // traverse down tree & build scan { offset >>= 1; __syncthreads(); if (threadID < d) { int ai = offset * (2 * threadID + 1) - 1; int bi = offset * (2 * threadID + 2) - 1; int t = temp[ai]; temp[ai] = temp[bi]; temp[bi] += t; } } __syncthreads(); output[blockOffset + (2 * threadID)] = temp[2 * threadID]; output[blockOffset + (2 * threadID) + 1] = temp[2 * threadID + 1]; } __global__ void add(int *output, int length, int *n) { int blockID = blockIdx.x; int threadID = threadIdx.x; int blockOffset = blockID * length; output[blockOffset + threadID] += n[blockID]; } __global__ void add(int *output, int length, int *n1, int *n2) { int blockID = blockIdx.x; int threadID = threadIdx.x; int blockOffset = blockID * length; output[blockOffset + threadID] += n1[blockID] + n2[blockID]; } /*///////////////////////////////////*/ /* utils.cpp */ /*///////////////////////////////////*/ void _checkCudaError(const char *message, hipError_t err, const char *caller) { if (err != hipSuccess) { fprintf(stderr, "Error in: %s\n", caller); fprintf(stderr, message); fprintf(stderr, ": %s\n", hipGetErrorString(err)); exit(0); } } void printResult(const char* prefix, int result, long nanoseconds) { printf(" "); printf(prefix); printf(" : %i in %ld ms \n", result, nanoseconds / 1000); } void printResult(const char* prefix, int result, float milliseconds) { printf(" "); printf(prefix); printf(" : %i in %f ms \n", result, milliseconds); } // from https://stackoverflow.com/a/3638454 bool isPowerOfTwo(int x) { return x && !(x & (x - 1)); } // from https://stackoverflow.com/a/12506181 int nextPowerOfTwo(int x) { int power = 1; while (power < x) { power *= 2; } return power; } // from https://stackoverflow.com/a/36095407 // Get the current time in nanoseconds long get_nanos() { struct timespec ts; timespec_get(&ts, TIME_UTC); return (long)ts.tv_sec * 1000000000L + ts.tv_nsec; }
815c301dd7488d3e3c661f531ef56b8be23ef2d1.cu
// small change by Lixin to fit pytorch's API /* Matt Dean - 1422434 - mxd434 Goals implemented: - Block scan for arbitrary length small vectors - 'blockscan' function - Full scan for arbitrary length large vectors - 'scan' function This function decides whether to perform a small (one block) scan or a full (n-level) scan depending on the length of the input vector - BCAO for both scans Hardware: CPU - Intel Core i5-4670k @ 3.4GHz GPU - NVIDIA GeForce GTX 760 Timings: 10,000,000 Elements host : 20749 ms gpu : 7.860768 ms gpu bcao : 4.304064 ms For more results please see the comment at the bottom of this file Extra work: Due to the recursive nature of the full scan it can handle n > 3 levels */ #include <stdlib.h> #include <stdio.h> #include <time.h> #include "cuda_runtime.h" #include "device_launch_parameters.h" #include "device_functions.h" #include "prefix_sum.h" // scan.cuh void sequential_scan(int* output, int* input, int length); void blockscan(int *output, int *input, int length, bool bcao); void scan(int *output, int *input, int length, bool bcao); void scanLargeDeviceArray(int *output, int *input, int length, bool bcao); void scanSmallDeviceArray(int *d_out, int *d_in, int length, bool bcao); void scanLargeEvenDeviceArray(int *output, int *input, int length, bool bcao); // kernels.cuh __global__ void prescan_arbitrary(int *output, int *input, int n, int powerOfTwo); __global__ void prescan_arbitrary_unoptimized(int *output, int *input, int n, int powerOfTwo); __global__ void prescan_large(int *output, int *input, int n, int* sums); __global__ void prescan_large_unoptimized(int *output, int *input, int n, int *sums); __global__ void add(int *output, int length, int *n1); __global__ void add(int *output, int length, int *n1, int *n2); // utils.h void _checkCudaError(const char *message, cudaError_t err, const char *caller); void printResult(const char* prefix, int result, long nanoseconds); void printResult(const char* prefix, int result, float milliseconds); bool isPowerOfTwo(int x); int nextPowerOfTwo(int x); long get_nanos(); /*///////////////////////////////////*/ /* New API */ /*///////////////////////////////////*/ void PrefixSumCUDA( const at::Tensor grid_cnt, int num_grids, at::Tensor grid_off) { scan( grid_off.contiguous().data_ptr<int>(), grid_cnt.contiguous().data_ptr<int>(), num_grids, true ); return; } void PrefixSumCPU( const at::Tensor grid_cnt, int num_grids, at::Tensor grid_off) { sequential_scan( grid_off.contiguous().data_ptr<int>(), grid_cnt.contiguous().data_ptr<int>(), num_grids ); return; } /*///////////////////////////////////*/ /* scan.cu */ /*///////////////////////////////////*/ #define checkCudaError(o, l) _checkCudaError(o, l, __func__) int THREADS_PER_BLOCK = 512; int ELEMENTS_PER_BLOCK = THREADS_PER_BLOCK * 2; void sequential_scan(int* output, int* input, int length) { output[0] = 0; // since this is a prescan, not a scan for (int j = 1; j < length; ++j) { output[j] = input[j - 1] + output[j - 1]; } return; } void blockscan(int *d_out, int *d_in, int length, bool bcao) { int powerOfTwo = nextPowerOfTwo(length); if (bcao) { prescan_arbitrary<<<1, (length + 1) / 2, 2 * powerOfTwo * sizeof(int)>>>(d_out, d_in, length, powerOfTwo); } else { prescan_arbitrary_unoptimized<<<1, (length + 1) / 2, 2 * powerOfTwo * sizeof(int)>>>(d_out, d_in, length, powerOfTwo); } return; } void scan(int *d_out, int *d_in, int length, bool bcao) { if (length > ELEMENTS_PER_BLOCK) { scanLargeDeviceArray(d_out, d_in, length, bcao); } else { scanSmallDeviceArray(d_out, d_in, length, bcao); } return; } void scanLargeDeviceArray(int *d_out, int *d_in, int length, bool bcao) { int remainder = length % (ELEMENTS_PER_BLOCK); if (remainder == 0) { scanLargeEvenDeviceArray(d_out, d_in, length, bcao); } else { // perform a large scan on a compatible multiple of elements int lengthMultiple = length - remainder; scanLargeEvenDeviceArray(d_out, d_in, lengthMultiple, bcao); // scan the remaining elements and add the (inclusive) last element of the large scan to this int *startOfOutputArray = &(d_out[lengthMultiple]); scanSmallDeviceArray(startOfOutputArray, &(d_in[lengthMultiple]), remainder, bcao); add<<<1, remainder>>>(startOfOutputArray, remainder, &(d_in[lengthMultiple - 1]), &(d_out[lengthMultiple - 1])); } } void scanSmallDeviceArray(int *d_out, int *d_in, int length, bool bcao) { int powerOfTwo = nextPowerOfTwo(length); if (bcao) { prescan_arbitrary<<<1, (length + 1) / 2, 2 * powerOfTwo * sizeof(int)>>>(d_out, d_in, length, powerOfTwo); } else { prescan_arbitrary_unoptimized<<<1, (length + 1) / 2, 2 * powerOfTwo * sizeof(int)>>>(d_out, d_in, length, powerOfTwo); } } void scanLargeEvenDeviceArray(int *d_out, int *d_in, int length, bool bcao) { const int blocks = length / ELEMENTS_PER_BLOCK; const int sharedMemArraySize = ELEMENTS_PER_BLOCK * sizeof(int); int *d_sums, *d_incr; cudaMalloc((void **)&d_sums, blocks * sizeof(int)); cudaMalloc((void **)&d_incr, blocks * sizeof(int)); if (bcao) { prescan_large<<<blocks, THREADS_PER_BLOCK, 2 * sharedMemArraySize>>>(d_out, d_in, ELEMENTS_PER_BLOCK, d_sums); } else { prescan_large_unoptimized<<<blocks, THREADS_PER_BLOCK, 2 * sharedMemArraySize>>>(d_out, d_in, ELEMENTS_PER_BLOCK, d_sums); } const int sumsArrThreadsNeeded = (blocks + 1) / 2; if (sumsArrThreadsNeeded > THREADS_PER_BLOCK) { // perform a large scan on the sums arr scanLargeDeviceArray(d_incr, d_sums, blocks, bcao); } else { // only need one block to scan sums arr so can use small scan scanSmallDeviceArray(d_incr, d_sums, blocks, bcao); } add<<<blocks, ELEMENTS_PER_BLOCK>>>(d_out, ELEMENTS_PER_BLOCK, d_incr); cudaFree(d_sums); cudaFree(d_incr); } /*///////////////////////////////////*/ /* kernels.cu */ /*///////////////////////////////////*/ #define SHARED_MEMORY_BANKS 32 #define LOG_MEM_BANKS 5 // There were two BCAO optimisations in the paper - this one is fastest #define CONFLICT_FREE_OFFSET(n) ((n) >> LOG_MEM_BANKS) __global__ void prescan_arbitrary(int *output, int *input, int n, int powerOfTwo) { extern __shared__ int temp[];// allocated on invocation int threadID = threadIdx.x; int ai = threadID; int bi = threadID + (n / 2); int bankOffsetA = CONFLICT_FREE_OFFSET(ai); int bankOffsetB = CONFLICT_FREE_OFFSET(bi); if (threadID < n) { temp[ai + bankOffsetA] = input[ai]; temp[bi + bankOffsetB] = input[bi]; } else { temp[ai + bankOffsetA] = 0; temp[bi + bankOffsetB] = 0; } int offset = 1; for (int d = powerOfTwo >> 1; d > 0; d >>= 1) // build sum in place up the tree { __syncthreads(); if (threadID < d) { int ai = offset * (2 * threadID + 1) - 1; int bi = offset * (2 * threadID + 2) - 1; ai += CONFLICT_FREE_OFFSET(ai); bi += CONFLICT_FREE_OFFSET(bi); temp[bi] += temp[ai]; } offset *= 2; } if (threadID == 0) { temp[powerOfTwo - 1 + CONFLICT_FREE_OFFSET(powerOfTwo - 1)] = 0; // clear the last element } for (int d = 1; d < powerOfTwo; d *= 2) // traverse down tree & build scan { offset >>= 1; __syncthreads(); if (threadID < d) { int ai = offset * (2 * threadID + 1) - 1; int bi = offset * (2 * threadID + 2) - 1; ai += CONFLICT_FREE_OFFSET(ai); bi += CONFLICT_FREE_OFFSET(bi); int t = temp[ai]; temp[ai] = temp[bi]; temp[bi] += t; } } __syncthreads(); if (threadID < n) { output[ai] = temp[ai + bankOffsetA]; output[bi] = temp[bi + bankOffsetB]; } } __global__ void prescan_arbitrary_unoptimized(int *output, int *input, int n, int powerOfTwo) { extern __shared__ int temp[];// allocated on invocation int threadID = threadIdx.x; if (threadID < n) { temp[2 * threadID] = input[2 * threadID]; // load input into shared memory temp[2 * threadID + 1] = input[2 * threadID + 1]; } else { temp[2 * threadID] = 0; temp[2 * threadID + 1] = 0; } int offset = 1; for (int d = powerOfTwo >> 1; d > 0; d >>= 1) // build sum in place up the tree { __syncthreads(); if (threadID < d) { int ai = offset * (2 * threadID + 1) - 1; int bi = offset * (2 * threadID + 2) - 1; temp[bi] += temp[ai]; } offset *= 2; } if (threadID == 0) { temp[powerOfTwo - 1] = 0; } // clear the last element for (int d = 1; d < powerOfTwo; d *= 2) // traverse down tree & build scan { offset >>= 1; __syncthreads(); if (threadID < d) { int ai = offset * (2 * threadID + 1) - 1; int bi = offset * (2 * threadID + 2) - 1; int t = temp[ai]; temp[ai] = temp[bi]; temp[bi] += t; } } __syncthreads(); if (threadID < n) { output[2 * threadID] = temp[2 * threadID]; // write results to device memory output[2 * threadID + 1] = temp[2 * threadID + 1]; } } __global__ void prescan_large(int *output, int *input, int n, int *sums) { extern __shared__ int temp[]; int blockID = blockIdx.x; int threadID = threadIdx.x; int blockOffset = blockID * n; int ai = threadID; int bi = threadID + (n / 2); int bankOffsetA = CONFLICT_FREE_OFFSET(ai); int bankOffsetB = CONFLICT_FREE_OFFSET(bi); temp[ai + bankOffsetA] = input[blockOffset + ai]; temp[bi + bankOffsetB] = input[blockOffset + bi]; int offset = 1; for (int d = n >> 1; d > 0; d >>= 1) // build sum in place up the tree { __syncthreads(); if (threadID < d) { int ai = offset * (2 * threadID + 1) - 1; int bi = offset * (2 * threadID + 2) - 1; ai += CONFLICT_FREE_OFFSET(ai); bi += CONFLICT_FREE_OFFSET(bi); temp[bi] += temp[ai]; } offset *= 2; } __syncthreads(); if (threadID == 0) { sums[blockID] = temp[n - 1 + CONFLICT_FREE_OFFSET(n - 1)]; temp[n - 1 + CONFLICT_FREE_OFFSET(n - 1)] = 0; } for (int d = 1; d < n; d *= 2) // traverse down tree & build scan { offset >>= 1; __syncthreads(); if (threadID < d) { int ai = offset * (2 * threadID + 1) - 1; int bi = offset * (2 * threadID + 2) - 1; ai += CONFLICT_FREE_OFFSET(ai); bi += CONFLICT_FREE_OFFSET(bi); int t = temp[ai]; temp[ai] = temp[bi]; temp[bi] += t; } } __syncthreads(); output[blockOffset + ai] = temp[ai + bankOffsetA]; output[blockOffset + bi] = temp[bi + bankOffsetB]; } __global__ void prescan_large_unoptimized(int *output, int *input, int n, int *sums) { int blockID = blockIdx.x; int threadID = threadIdx.x; int blockOffset = blockID * n; extern __shared__ int temp[]; temp[2 * threadID] = input[blockOffset + (2 * threadID)]; temp[2 * threadID + 1] = input[blockOffset + (2 * threadID) + 1]; int offset = 1; for (int d = n >> 1; d > 0; d >>= 1) // build sum in place up the tree { __syncthreads(); if (threadID < d) { int ai = offset * (2 * threadID + 1) - 1; int bi = offset * (2 * threadID + 2) - 1; temp[bi] += temp[ai]; } offset *= 2; } __syncthreads(); if (threadID == 0) { sums[blockID] = temp[n - 1]; temp[n - 1] = 0; } for (int d = 1; d < n; d *= 2) // traverse down tree & build scan { offset >>= 1; __syncthreads(); if (threadID < d) { int ai = offset * (2 * threadID + 1) - 1; int bi = offset * (2 * threadID + 2) - 1; int t = temp[ai]; temp[ai] = temp[bi]; temp[bi] += t; } } __syncthreads(); output[blockOffset + (2 * threadID)] = temp[2 * threadID]; output[blockOffset + (2 * threadID) + 1] = temp[2 * threadID + 1]; } __global__ void add(int *output, int length, int *n) { int blockID = blockIdx.x; int threadID = threadIdx.x; int blockOffset = blockID * length; output[blockOffset + threadID] += n[blockID]; } __global__ void add(int *output, int length, int *n1, int *n2) { int blockID = blockIdx.x; int threadID = threadIdx.x; int blockOffset = blockID * length; output[blockOffset + threadID] += n1[blockID] + n2[blockID]; } /*///////////////////////////////////*/ /* utils.cpp */ /*///////////////////////////////////*/ void _checkCudaError(const char *message, cudaError_t err, const char *caller) { if (err != cudaSuccess) { fprintf(stderr, "Error in: %s\n", caller); fprintf(stderr, message); fprintf(stderr, ": %s\n", cudaGetErrorString(err)); exit(0); } } void printResult(const char* prefix, int result, long nanoseconds) { printf(" "); printf(prefix); printf(" : %i in %ld ms \n", result, nanoseconds / 1000); } void printResult(const char* prefix, int result, float milliseconds) { printf(" "); printf(prefix); printf(" : %i in %f ms \n", result, milliseconds); } // from https://stackoverflow.com/a/3638454 bool isPowerOfTwo(int x) { return x && !(x & (x - 1)); } // from https://stackoverflow.com/a/12506181 int nextPowerOfTwo(int x) { int power = 1; while (power < x) { power *= 2; } return power; } // from https://stackoverflow.com/a/36095407 // Get the current time in nanoseconds long get_nanos() { struct timespec ts; timespec_get(&ts, TIME_UTC); return (long)ts.tv_sec * 1000000000L + ts.tv_nsec; }
c22a2bcd6d1a0cad3ba4f4c3e53ce5ac8cbc2871.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.2.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date November 2016 @generated from magmablas/ztranspose_conj.cu, normal z -> c, Sun Nov 20 20:20:30 2016 @author Stan Tomov @author Mark Gates */ #include "magma_internal.h" #define PRECISION_c #if defined(PRECISION_z) #define NX 16 #else #define NX 32 #endif #define NB 32 #define NY 8 // nearly same code in ctranspose.cu // tile M-by-N matrix with ceil(M/NB) by ceil(N/NB) tiles sized NB-by-NB. // uses NX-by-NY threads, where NB/NX, NB/NY, NX/NY evenly. // subtile each NB-by-NB tile with (NB/NX) subtiles sized NX-by-NB // for each subtile // load NX-by-NB subtile transposed from A into sA, as (NB/NY) blocks sized NX-by-NY // save NB-by-NX subtile from sA into AT, as (NB/NX)*(NX/NY) blocks sized NX-by-NY // A += NX // AT += NX*ldat // // e.g., with NB=32, NX=32, NY=8 ([sdc] precisions) // load 32x32 subtile as 4 blocks of 32x8 columns: (A11 A12 A13 A14 ) // save 32x32 subtile as 1*4 blocks of 32x8 columns: (AT11 AT12 AT13 AT14) // // e.g., with NB=32, NX=16, NY=8 (z precision) // load 16x32 subtile as 4 blocks of 16x8 columns: (A11 A12 A13 A14) // save 32x16 subtile as 2*2 blocks of 16x8 columns: (AT11 AT12) // (AT21 AT22) static __device__ void ctranspose_conj_device( int m, int n, const magmaFloatComplex *A, int lda, magmaFloatComplex *AT, int ldat) { __shared__ magmaFloatComplex sA[NB][NX+1]; int tx = threadIdx.x; int ty = threadIdx.y; int ibx = blockIdx.x*NB; int iby = blockIdx.y*NB; int i, j; A += ibx + tx + (iby + ty)*lda; AT += iby + tx + (ibx + ty)*ldat; #pragma unroll for( int tile=0; tile < NB/NX; ++tile ) { // load NX-by-NB subtile transposed from A into sA i = ibx + tx + tile*NX; j = iby + ty; if (i < m) { #pragma unroll for( int j2=0; j2 < NB; j2 += NY ) { if (j + j2 < n) { sA[ty + j2][tx] = MAGMA_C_CONJ( A[j2*lda] ); } } } __syncthreads(); // save NB-by-NX subtile from sA into AT i = iby + tx; j = ibx + ty + tile*NX; #pragma unroll for( int i2=0; i2 < NB; i2 += NX ) { if (i + i2 < n) { #pragma unroll for( int j2=0; j2 < NX; j2 += NY ) { if (j + j2 < m) { AT[i2 + j2*ldat] = sA[tx + i2][ty + j2]; } } } } __syncthreads(); // move to next subtile A += NX; AT += NX*ldat; } } /* kernel wrapper to call the device function. */ __global__ void ctranspose_conj_kernel( int m, int n, const magmaFloatComplex *A, int lda, magmaFloatComplex *AT, int ldat) { ctranspose_conj_device(m, n, A, lda, AT, ldat); } __global__ void ctranspose_conj_kernel_batched( int m, int n, magmaFloatComplex **dA_array, int lda, magmaFloatComplex **dAT_array, int ldat) { int batchid = blockIdx.z; ctranspose_conj_device(m, n, dA_array[batchid], lda, dAT_array[batchid], ldat); } /***************************************************************************//** Purpose ------- ctranspose_conj_q copies and conjugate-transposes a matrix dA to matrix dAT. Same as ctranspose_conj, but adds queue argument. Arguments --------- @param[in] m INTEGER The number of rows of the matrix dA. M >= 0. @param[in] n INTEGER The number of columns of the matrix dA. N >= 0. @param[in] dA COMPLEX array, dimension (LDDA,N) The M-by-N matrix dA. @param[in] ldda INTEGER The leading dimension of the array dA. LDDA >= M. @param[in] dAT COMPLEX array, dimension (LDDAT,M) The N-by-M matrix dAT. @param[in] lddat INTEGER The leading dimension of the array dAT. LDDAT >= N. @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_transpose *******************************************************************************/ extern "C" void magmablas_ctranspose_conj( magma_int_t m, magma_int_t n, magmaFloatComplex_const_ptr dA, magma_int_t ldda, magmaFloatComplex_ptr dAT, magma_int_t lddat, magma_queue_t queue ) { magma_int_t info = 0; if ( m < 0 ) info = -1; else if ( n < 0 ) info = -2; else if ( ldda < m ) info = -4; else if ( lddat < n ) info = -6; if ( info != 0 ) { magma_xerbla( __func__, -(info) ); return; //info; } /* Quick return */ if ( (m == 0) || (n == 0) ) return; dim3 threads( NX, NY ); dim3 grid( magma_ceildiv( m, NB ), magma_ceildiv( n, NB ) ); hipLaunchKernelGGL(( ctranspose_conj_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, n, dA, ldda, dAT, lddat ); } /***************************************************************************//** Purpose ------- ctranspose_conj_batched_q copies and conjugate-transposes a matrix dA_array[i] to matrix dAT_array[i]. Same as ctranspose_conj_batched, but adds queue argument. Arguments --------- @param[in] m INTEGER The number of rows of the matrix dA. M >= 0. @param[in] n INTEGER The number of columns of the matrix dA. N >= 0. @param[in] dA_array COMPLEX* array, dimension (batchCount) array of pointers to the matrices dA, where each dA is of dimension (LDDA,N) The M-by-N matrix dA. @param[in] ldda INTEGER The leading dimension of the array dA. LDDA >= M. @param[in] dAT_array COMPLEX* array, dimension (batchCount) array of pointers to the matrices dAT, where each dAT is of dimension (LDDAT,M) The N-by-M matrix dAT. @param[in] lddat INTEGER The leading dimension of the array dAT. LDDAT >= N. @param[in] queue magma_queue_t Queue to execute in. @param[in] batchCount Number of matrices in dA_array and dAT_array @ingroup magma_transpose_batched *******************************************************************************/ extern "C" void magmablas_ctranspose_conj_batched( magma_int_t m, magma_int_t n, magmaFloatComplex **dA_array, magma_int_t ldda, magmaFloatComplex **dAT_array, magma_int_t lddat, magma_int_t batchCount, magma_queue_t queue ) { magma_int_t info = 0; if ( m < 0 ) info = -1; else if ( n < 0 ) info = -2; else if ( ldda < m ) info = -4; else if ( lddat < n ) info = -6; if ( info != 0 ) { magma_xerbla( __func__, -(info) ); return; //info; } /* Quick return */ if ( (m == 0) || (n == 0) ) return; dim3 threads( NX, NY ); dim3 grid( magma_ceildiv( m, NB ), magma_ceildiv( n, NB ), batchCount ); hipLaunchKernelGGL(( ctranspose_conj_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, n, dA_array, ldda, dAT_array, lddat ); }
c22a2bcd6d1a0cad3ba4f4c3e53ce5ac8cbc2871.cu
/* -- MAGMA (version 2.2.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date November 2016 @generated from magmablas/ztranspose_conj.cu, normal z -> c, Sun Nov 20 20:20:30 2016 @author Stan Tomov @author Mark Gates */ #include "magma_internal.h" #define PRECISION_c #if defined(PRECISION_z) #define NX 16 #else #define NX 32 #endif #define NB 32 #define NY 8 // nearly same code in ctranspose.cu // tile M-by-N matrix with ceil(M/NB) by ceil(N/NB) tiles sized NB-by-NB. // uses NX-by-NY threads, where NB/NX, NB/NY, NX/NY evenly. // subtile each NB-by-NB tile with (NB/NX) subtiles sized NX-by-NB // for each subtile // load NX-by-NB subtile transposed from A into sA, as (NB/NY) blocks sized NX-by-NY // save NB-by-NX subtile from sA into AT, as (NB/NX)*(NX/NY) blocks sized NX-by-NY // A += NX // AT += NX*ldat // // e.g., with NB=32, NX=32, NY=8 ([sdc] precisions) // load 32x32 subtile as 4 blocks of 32x8 columns: (A11 A12 A13 A14 ) // save 32x32 subtile as 1*4 blocks of 32x8 columns: (AT11 AT12 AT13 AT14) // // e.g., with NB=32, NX=16, NY=8 (z precision) // load 16x32 subtile as 4 blocks of 16x8 columns: (A11 A12 A13 A14) // save 32x16 subtile as 2*2 blocks of 16x8 columns: (AT11 AT12) // (AT21 AT22) static __device__ void ctranspose_conj_device( int m, int n, const magmaFloatComplex *A, int lda, magmaFloatComplex *AT, int ldat) { __shared__ magmaFloatComplex sA[NB][NX+1]; int tx = threadIdx.x; int ty = threadIdx.y; int ibx = blockIdx.x*NB; int iby = blockIdx.y*NB; int i, j; A += ibx + tx + (iby + ty)*lda; AT += iby + tx + (ibx + ty)*ldat; #pragma unroll for( int tile=0; tile < NB/NX; ++tile ) { // load NX-by-NB subtile transposed from A into sA i = ibx + tx + tile*NX; j = iby + ty; if (i < m) { #pragma unroll for( int j2=0; j2 < NB; j2 += NY ) { if (j + j2 < n) { sA[ty + j2][tx] = MAGMA_C_CONJ( A[j2*lda] ); } } } __syncthreads(); // save NB-by-NX subtile from sA into AT i = iby + tx; j = ibx + ty + tile*NX; #pragma unroll for( int i2=0; i2 < NB; i2 += NX ) { if (i + i2 < n) { #pragma unroll for( int j2=0; j2 < NX; j2 += NY ) { if (j + j2 < m) { AT[i2 + j2*ldat] = sA[tx + i2][ty + j2]; } } } } __syncthreads(); // move to next subtile A += NX; AT += NX*ldat; } } /* kernel wrapper to call the device function. */ __global__ void ctranspose_conj_kernel( int m, int n, const magmaFloatComplex *A, int lda, magmaFloatComplex *AT, int ldat) { ctranspose_conj_device(m, n, A, lda, AT, ldat); } __global__ void ctranspose_conj_kernel_batched( int m, int n, magmaFloatComplex **dA_array, int lda, magmaFloatComplex **dAT_array, int ldat) { int batchid = blockIdx.z; ctranspose_conj_device(m, n, dA_array[batchid], lda, dAT_array[batchid], ldat); } /***************************************************************************//** Purpose ------- ctranspose_conj_q copies and conjugate-transposes a matrix dA to matrix dAT. Same as ctranspose_conj, but adds queue argument. Arguments --------- @param[in] m INTEGER The number of rows of the matrix dA. M >= 0. @param[in] n INTEGER The number of columns of the matrix dA. N >= 0. @param[in] dA COMPLEX array, dimension (LDDA,N) The M-by-N matrix dA. @param[in] ldda INTEGER The leading dimension of the array dA. LDDA >= M. @param[in] dAT COMPLEX array, dimension (LDDAT,M) The N-by-M matrix dAT. @param[in] lddat INTEGER The leading dimension of the array dAT. LDDAT >= N. @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_transpose *******************************************************************************/ extern "C" void magmablas_ctranspose_conj( magma_int_t m, magma_int_t n, magmaFloatComplex_const_ptr dA, magma_int_t ldda, magmaFloatComplex_ptr dAT, magma_int_t lddat, magma_queue_t queue ) { magma_int_t info = 0; if ( m < 0 ) info = -1; else if ( n < 0 ) info = -2; else if ( ldda < m ) info = -4; else if ( lddat < n ) info = -6; if ( info != 0 ) { magma_xerbla( __func__, -(info) ); return; //info; } /* Quick return */ if ( (m == 0) || (n == 0) ) return; dim3 threads( NX, NY ); dim3 grid( magma_ceildiv( m, NB ), magma_ceildiv( n, NB ) ); ctranspose_conj_kernel<<< grid, threads, 0, queue->cuda_stream() >>> ( m, n, dA, ldda, dAT, lddat ); } /***************************************************************************//** Purpose ------- ctranspose_conj_batched_q copies and conjugate-transposes a matrix dA_array[i] to matrix dAT_array[i]. Same as ctranspose_conj_batched, but adds queue argument. Arguments --------- @param[in] m INTEGER The number of rows of the matrix dA. M >= 0. @param[in] n INTEGER The number of columns of the matrix dA. N >= 0. @param[in] dA_array COMPLEX* array, dimension (batchCount) array of pointers to the matrices dA, where each dA is of dimension (LDDA,N) The M-by-N matrix dA. @param[in] ldda INTEGER The leading dimension of the array dA. LDDA >= M. @param[in] dAT_array COMPLEX* array, dimension (batchCount) array of pointers to the matrices dAT, where each dAT is of dimension (LDDAT,M) The N-by-M matrix dAT. @param[in] lddat INTEGER The leading dimension of the array dAT. LDDAT >= N. @param[in] queue magma_queue_t Queue to execute in. @param[in] batchCount Number of matrices in dA_array and dAT_array @ingroup magma_transpose_batched *******************************************************************************/ extern "C" void magmablas_ctranspose_conj_batched( magma_int_t m, magma_int_t n, magmaFloatComplex **dA_array, magma_int_t ldda, magmaFloatComplex **dAT_array, magma_int_t lddat, magma_int_t batchCount, magma_queue_t queue ) { magma_int_t info = 0; if ( m < 0 ) info = -1; else if ( n < 0 ) info = -2; else if ( ldda < m ) info = -4; else if ( lddat < n ) info = -6; if ( info != 0 ) { magma_xerbla( __func__, -(info) ); return; //info; } /* Quick return */ if ( (m == 0) || (n == 0) ) return; dim3 threads( NX, NY ); dim3 grid( magma_ceildiv( m, NB ), magma_ceildiv( n, NB ), batchCount ); ctranspose_conj_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>> ( m, n, dA_array, ldda, dAT_array, lddat ); }
573d6e23ae54744cc2fba99de776941fe6688fce.hip
// !!! This is a file automatically generated by hipify!!! // Compile: nvcc -g -G -arch=sm_52 -std=c++11 assignment5-p2.cu -o assignment5-p2 #include <hip/hip_runtime.h> #include <iostream> #include <sys/time.h> #define THRESHOLD (0.000001) //Set tile dimension to be a power of 2 #define tileDim (1<<11) using std::cerr; using std::cout; using std::endl; __host__ void host_excl_prefix_sum(float* h_A, float* h_O, int N) { h_O[0] = 0; for (int i = 1; i < N; i++) { h_O[i] = h_O[i - 1] + h_A[i - 1]; } } __global__ void kernel_excl_prefix_sum(float* d_in, float* d_out, int N) { // TODO: Fill in int istart = (blockDim.x*blockIdx.x + threadIdx.x)*tileDim; int iend = min(N,(istart+tileDim)); for(int i=istart+1;i<iend;i++){ d_out[i] = d_out[i-1] + d_in[i-1]; } } __global__ void kernel_preprocess(float* d_in, float* d_out, int N) { // TODO: Fill in int i = (blockDim.x*blockIdx.x + threadIdx.x)*tileDim; if(i>0 && i<N){ for(int j=i-tileDim;j<i;j++){ d_out[i] = d_out[i] + d_in[j]; } } } __host__ void check_result(float* w_ref, float* w_opt, int N) { double maxdiff = 0.0, this_diff = 0.0; int numdiffs = 0; for (int i = 0; i < N; i++) { this_diff = w_ref[i] - w_opt[i]; if (fabs(this_diff) > THRESHOLD) { numdiffs++; if (this_diff > maxdiff) maxdiff = this_diff; } } if (numdiffs > 0) { cout << numdiffs << " Diffs found over threshold " << THRESHOLD << "; Max Diff = " << maxdiff << endl; } else { cout << "No differences found between base and test versions\n"; } } __host__ double rtclock() { // Seconds struct timezone Tzp; struct timeval Tp; int stat; stat = gettimeofday(&Tp, &Tzp); if (stat != 0) { cout << "Error return from gettimeofday: " << stat << "\n"; } return (Tp.tv_sec + Tp.tv_usec * 1.0e-6); } int main() { const int N = (1 << 24); size_t size = N * sizeof(float); float* h_in = (float*)malloc(size); std::fill_n(h_in, N, 1); float* h_excl_sum_out = (float*)malloc(size); std::fill_n(h_excl_sum_out, N, 0); double clkbegin = rtclock(); host_excl_prefix_sum(h_in, h_excl_sum_out, N); double clkend = rtclock(); double time = clkend - clkbegin; // seconds cout << "Serial time on CPU: " << time * 1000 << " msec" << endl; float* h_dev_result = (float*)malloc(size); std::fill_n(h_dev_result, N, 0); float* d_in; float* d_out; hipError_t status; hipEvent_t start, end; // TODO: Fill in status = hipMalloc(&d_in,size); status = hipMalloc(&d_out,size); if(status!=hipSuccess){ cout<<"Error in Cuda Malloc"<<endl; } hipEventCreate(&start); hipEventCreate(&end); hipEventRecord(start, 0); status = hipMemcpy(d_in, h_in, size, hipMemcpyHostToDevice); int numtiles = (int)ceil(((double)N)/((double)tileDim)); int threadPerBlock = min(numtiles,1<<10); int numBlock = (int) ceil(((double)numtiles)/((double)threadPerBlock)); dim3 GridD(numBlock,1,1); dim3 BlockD(threadPerBlock,1,1); hipLaunchKernelGGL(( kernel_preprocess), dim3(GridD),dim3(BlockD), 0, 0, d_in,d_out,N); status = hipMemcpy(h_dev_result, d_out, size, hipMemcpyDeviceToHost); for(int i=tileDim;i<N;i+=tileDim){ h_dev_result[i] = h_dev_result[i] + h_dev_result[i-tileDim]; } status = hipMemcpy(d_out, h_dev_result, size, hipMemcpyHostToDevice); hipLaunchKernelGGL(( kernel_excl_prefix_sum), dim3(GridD),dim3(BlockD), 0, 0, d_in,d_out,N); status = hipMemcpy(h_dev_result, d_out, size, hipMemcpyDeviceToHost); hipEventRecord(end, 0); hipEventSynchronize(end); check_result(h_excl_sum_out, h_dev_result, N); float k_time; // ms hipEventElapsedTime(&k_time, start, end); hipEventDestroy(start); hipEventDestroy(end); cout << "Kernel time on GPU: " << k_time << " msec" << endl; // Free device memory hipFree(d_in); hipFree(d_out); free(h_in); free(h_excl_sum_out); free(h_dev_result); return EXIT_SUCCESS; }
573d6e23ae54744cc2fba99de776941fe6688fce.cu
// Compile: nvcc -g -G -arch=sm_52 -std=c++11 assignment5-p2.cu -o assignment5-p2 #include <cuda.h> #include <iostream> #include <sys/time.h> #define THRESHOLD (0.000001) //Set tile dimension to be a power of 2 #define tileDim (1<<11) using std::cerr; using std::cout; using std::endl; __host__ void host_excl_prefix_sum(float* h_A, float* h_O, int N) { h_O[0] = 0; for (int i = 1; i < N; i++) { h_O[i] = h_O[i - 1] + h_A[i - 1]; } } __global__ void kernel_excl_prefix_sum(float* d_in, float* d_out, int N) { // TODO: Fill in int istart = (blockDim.x*blockIdx.x + threadIdx.x)*tileDim; int iend = min(N,(istart+tileDim)); for(int i=istart+1;i<iend;i++){ d_out[i] = d_out[i-1] + d_in[i-1]; } } __global__ void kernel_preprocess(float* d_in, float* d_out, int N) { // TODO: Fill in int i = (blockDim.x*blockIdx.x + threadIdx.x)*tileDim; if(i>0 && i<N){ for(int j=i-tileDim;j<i;j++){ d_out[i] = d_out[i] + d_in[j]; } } } __host__ void check_result(float* w_ref, float* w_opt, int N) { double maxdiff = 0.0, this_diff = 0.0; int numdiffs = 0; for (int i = 0; i < N; i++) { this_diff = w_ref[i] - w_opt[i]; if (fabs(this_diff) > THRESHOLD) { numdiffs++; if (this_diff > maxdiff) maxdiff = this_diff; } } if (numdiffs > 0) { cout << numdiffs << " Diffs found over threshold " << THRESHOLD << "; Max Diff = " << maxdiff << endl; } else { cout << "No differences found between base and test versions\n"; } } __host__ double rtclock() { // Seconds struct timezone Tzp; struct timeval Tp; int stat; stat = gettimeofday(&Tp, &Tzp); if (stat != 0) { cout << "Error return from gettimeofday: " << stat << "\n"; } return (Tp.tv_sec + Tp.tv_usec * 1.0e-6); } int main() { const int N = (1 << 24); size_t size = N * sizeof(float); float* h_in = (float*)malloc(size); std::fill_n(h_in, N, 1); float* h_excl_sum_out = (float*)malloc(size); std::fill_n(h_excl_sum_out, N, 0); double clkbegin = rtclock(); host_excl_prefix_sum(h_in, h_excl_sum_out, N); double clkend = rtclock(); double time = clkend - clkbegin; // seconds cout << "Serial time on CPU: " << time * 1000 << " msec" << endl; float* h_dev_result = (float*)malloc(size); std::fill_n(h_dev_result, N, 0); float* d_in; float* d_out; cudaError_t status; cudaEvent_t start, end; // TODO: Fill in status = cudaMalloc(&d_in,size); status = cudaMalloc(&d_out,size); if(status!=cudaSuccess){ cout<<"Error in Cuda Malloc"<<endl; } cudaEventCreate(&start); cudaEventCreate(&end); cudaEventRecord(start, 0); status = cudaMemcpy(d_in, h_in, size, cudaMemcpyHostToDevice); int numtiles = (int)ceil(((double)N)/((double)tileDim)); int threadPerBlock = min(numtiles,1<<10); int numBlock = (int) ceil(((double)numtiles)/((double)threadPerBlock)); dim3 GridD(numBlock,1,1); dim3 BlockD(threadPerBlock,1,1); kernel_preprocess<<<GridD,BlockD>>>(d_in,d_out,N); status = cudaMemcpy(h_dev_result, d_out, size, cudaMemcpyDeviceToHost); for(int i=tileDim;i<N;i+=tileDim){ h_dev_result[i] = h_dev_result[i] + h_dev_result[i-tileDim]; } status = cudaMemcpy(d_out, h_dev_result, size, cudaMemcpyHostToDevice); kernel_excl_prefix_sum<<<GridD,BlockD>>>(d_in,d_out,N); status = cudaMemcpy(h_dev_result, d_out, size, cudaMemcpyDeviceToHost); cudaEventRecord(end, 0); cudaEventSynchronize(end); check_result(h_excl_sum_out, h_dev_result, N); float k_time; // ms cudaEventElapsedTime(&k_time, start, end); cudaEventDestroy(start); cudaEventDestroy(end); cout << "Kernel time on GPU: " << k_time << " msec" << endl; // Free device memory cudaFree(d_in); cudaFree(d_out); free(h_in); free(h_excl_sum_out); free(h_dev_result); return EXIT_SUCCESS; }
b58d67fba8dda31da4fdb5ebdccf1b2bb7194a39.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void norm_components(float* N, int npix, float* norm) { int i = blockIdx.x*blockDim.x + threadIdx.x; if (i < npix) { norm[i] = fmaxf(1e-10, sqrtf(N[i] * N[i] + N[npix + i] * N[npix + i] + N[npix * 2 + i] * N[npix * 2 + i])); } }
b58d67fba8dda31da4fdb5ebdccf1b2bb7194a39.cu
#include "includes.h" __global__ void norm_components(float* N, int npix, float* norm) { int i = blockIdx.x*blockDim.x + threadIdx.x; if (i < npix) { norm[i] = fmaxf(1e-10, sqrtf(N[i] * N[i] + N[npix + i] * N[npix + i] + N[npix * 2 + i] * N[npix * 2 + i])); } }
4e3d961dc11276c264bbb3d6877cc5554683af69.hip
// !!! This is a file automatically generated by hipify!!! /* Particle swarm optimization by Ivan Vinogradov 2016 */ #include <iostream> #include <chrono> #include <cfloat> #include <cmath> // OpenGL #include <GL/glew.h> #include <GL/freeglut.h> // CUDA #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <cuda_gl_interop.h> // cuRAND #include <hiprand/hiprand.h> #include <hiprand/hiprand_kernel.h> // Thrust #include <thrust/device_vector.h> #include <thrust/reduce.h> #include <thrust/sort.h> #include "particle.cuh" #define CSC(call) { \ hipError_t err = call; \ if (err != hipSuccess) { \ fprintf(stderr, "CUDA error in file '%s' in line %i: %s.\n", __FILE__, \ __LINE__, hipGetErrorString(err)); \ exit(1); \ } \ } \ while (0) #define DEBUG false #define GLOBAL_COEFF 0.000050 // coefficient of global solution #define LOCAL_COEFF 0.00000010 // coefficient of local solution #define RANDOM_COEFF 0.010 // coefficient of random motion #define DAMPING_COEFF 0.99250 // coefficient of damping force #define REPULSION_COEFF 0.10 // coefficient of repulsive force // CUDA grid dim3 blocks_2d(16, 16); dim3 threads_2d(32, 32); dim3 blocks_1d(16); dim3 threads_1d(1024); // Number of particles int numberOfParticles = 1000; // Window size int width = 1024, height = 640; // Particle size int particleSize = 1; // Cell size for uniform space partitioning float cellSize = 5.0; // Center, zoom float centerX = 0.0; float centerY = 0.0; float zoomX = 10000.0; float zoomY = zoomX * height / width; // Time float timeValue = 0.0; float timeStep = 0.001; // Automatic centering (press key Q to switch) bool autoCenter = true; // OpenGL buffer GLuint vbo; // CUDA resource for OpenGL output struct cudaGraphicsResource *res; Particle *devParticleArray; ParticleArea *devPartileAreaArray; hiprandState_t *devRandomState; // Window size, center, zoom, function min-max, time __constant__ int devWidth; __constant__ int devHeight; __constant__ int devParticleSize; __constant__ float devCenterX; __constant__ float devCenterY; __constant__ float devZoomX; __constant__ float devZoomY; __constant__ float devTimeValue; __constant__ float devTimeStep; // Parameters of uniform space partitioning __constant__ float devUniformSpaceMinX; __constant__ float devUniformSpaceMaxX; __constant__ float devUniformSpaceMinY; __constant__ float devUniformSpaceMaxY; __constant__ float devUniformSpaceCellSize; // Screen coordinates into real coordinates __device__ float2 indexToCoord(int2 index) { return make_float2( (2.0f * index.x / (float)(devWidth - 1) - 1.0f) * devZoomX + devCenterX, -(2.0f * index.y / (float)(devHeight - 1) - 1.0f) * devZoomY + devCenterY); } // Real coordinates into screen coordinates __device__ int2 coordToIndex(float2 coord) { return make_int2( 0.5f * (devWidth - 1) * (1.0f + (coord.x - devCenterX) / devZoomX), 0.5f * (devHeight - 1) * (1.0f - (coord.y - devCenterY) / devZoomY) ); } /* Schwefel Function */ __device__ float fun(float2 coord) { return -coord.x * sin(sqrt(fabs(coord.x))) - coord.y * sin(sqrt(fabs(coord.y))); } __device__ float fun(int2 index) { return fun(indexToCoord(index)); } __global__ void initRandomState(hiprandState_t *state, int n) { int idx = blockIdx.x * blockDim.x + threadIdx.x; int offsetx = blockDim.x * gridDim.x; for (int i = idx; i < n; i += offsetx) { hiprand_init(1337, i, 0, &state[i]); } } __global__ void kernelSwarmInit(Particle *particles, int n, hiprandState_t *state) { int idx = blockIdx.x * blockDim.x + threadIdx.x; int offsetx = blockDim.x * gridDim.x; Particle p; for (int i = idx; i < n; i += offsetx) { p = particles[i]; // Position in the center of the screen p.coords = p.best_coords = indexToCoord(make_int2(devWidth / 2, devHeight / 2)); // Random starting angle and the speed float angle = 2.0 * 3.14 * hiprand_uniform(&state[i]); float speed = 100.0 * hiprand_uniform(&state[i]); p.speed = make_float2(cos(angle) * speed, sin(angle) * speed); p.value = p.best_value = FLT_MAX; particles[i] = p; } } __global__ void kernelSwarmUpdate(uchar4 *image, Particle *particles, int n) { int idx = blockIdx.x * blockDim.x + threadIdx.x; int offsetx = blockDim.x * gridDim.x; Particle p; for (int i = idx; i < n; i += offsetx) { p = particles[i]; p.value = fun(p.coords); if (p.value < p.best_value) { p.best_value = p.value; p.best_coords = p.coords; } particles[i] = p; } } __global__ void kernelNormalizedHeatMap(uchar4 *heatMap, float minValue, float maxValue) { int idx = blockIdx.x * blockDim.x + threadIdx.x; int idy = blockIdx.y * blockDim.y + threadIdx.y; int offsetx = blockDim.x * gridDim.x; int offsety = blockDim.y * gridDim.y; int i, j; float f; for (i = idx; i < devWidth; i += offsetx) { for (j = idy; j < devHeight; j += offsety) { f = (fun(make_int2(i, j)) - minValue) / (maxValue - minValue); if (f < 0.0) f = 0.0; else if (f > 1.0) f = 1.0; heatMap[j * devWidth + i] = make_uchar4( (int)(f * 255), 0, (int)((1.0 - f) * 255), 255 ); } } } __global__ void kernelSwarmDraw(uchar4 *heatMap, Particle *particles, int n) { int idx = blockIdx.x * blockDim.x + threadIdx.x; int offsetx = blockDim.x * gridDim.x; int size = devParticleSize; for (int i = idx; i < n; i += offsetx) { int2 index = coordToIndex(particles[i].coords); for (int x = index.x - size; x <= index.x + size; x++) { for (int y = index.y - size; y <= index.y + size; y++) { if (x >= 0 && x < devWidth && y >= 0 && y < devHeight && (x - index.x) * (x - index.x) + (y - index.y) * (y - index.y) <= size * size ) { if (DEBUG && i == n / 2) { heatMap[y * devWidth + x] = make_uchar4(0, 255, 0, 255); continue; } heatMap[y * devWidth + x] = make_uchar4(255, 255, 255, 255); } } } } } // The assignment for each particle corresponding space partitioning cell __global__ void kernelSwarmAssociateWithCells(Particle *particles, int n) { int idx = blockIdx.x * blockDim.x + threadIdx.x; int offsetx = blockDim.x * gridDim.x; int sizeX = ceil((devUniformSpaceMaxX - devUniformSpaceMinX) / devUniformSpaceCellSize); int sizeY = ceil((devUniformSpaceMaxY - devUniformSpaceMinY) / devUniformSpaceCellSize); Particle p; int cellX, cellY; for (int i = idx; i < n; i += offsetx) { p = particles[i]; cellX = (p.coords.x - devUniformSpaceMinX) / devUniformSpaceCellSize; cellY = (p.coords.y - devUniformSpaceMinY) / devUniformSpaceCellSize; p.cellIndex = cellX * sizeX + cellY; particles[i] = p; } } // The total force of repulsion for the i-th particle (without the space partitioning) __device__ float2 calculateRepulsionAll(Particle *particles, int n, int i) { float2 repulsion, diff, coords_a, coords_b; float distance; // TODO float minDistance = FLT_MAX; repulsion.x = 0.0; repulsion.y = 0.0; coords_a = particles[i].coords; for (int j = 0; j < n; j++) { if (j == i) continue; coords_b = particles[j].coords; diff.x = coords_a.x - coords_b.x; diff.y = coords_a.y - coords_b.y; distance = sqrt(diff.x * diff.x + diff.y * diff.y); // TODO if (DEBUG && i == n / 2 && distance < minDistance) { minDistance = distance; } distance = pow(distance, 5); if (distance < 0.5) distance = 0.5; repulsion.x += diff.x / distance; repulsion.y += diff.y / distance; } // TODO if (DEBUG && i == n / 2) { printf("distance: %lf; interactions: %d\n", minDistance, n); } return repulsion; } // Binary search in a sorted array of particles by cell index of space partitioning __device__ int binarySearchLowerBound(Particle *particles, int size, int cellIndex) { int left = 0, right = size - 1, middle; while (left <= right) { middle = left + (right - left) / 2; if ((&particles[middle])->cellIndex < cellIndex) left = middle + 1; else right = middle - 1; } return left; } // Check that the particle was found in binarySearchLowerBound __device__ int isFound(Particle *particles, int size, int cellIndex, int index) { return index < size && particles[index].cellIndex == cellIndex; } // Search the first particle with specified cell partition index __device__ int findParticleByCell(Particle *particles, int size, int cellIndex) { int index = binarySearchLowerBound(particles, size, cellIndex); return isFound(particles, size, cellIndex, index) ? index : -1; } // The total force of repulsion for the i-th particle (with the space partitioning) __device__ float2 calculateRepulsionClosest(Particle *particles, int n, int i) { float2 diff, repulsion = make_float2(0.0, 0.0); float distance; // TODO float minDistance = FLT_MAX; // Counter of interacting particles int counter = 0; // Dimensions of the space partitioning int sizeX = ceil(abs(devUniformSpaceMaxX - devUniformSpaceMinX) / devUniformSpaceCellSize); int sizeY = ceil(abs(devUniformSpaceMaxY - devUniformSpaceMinY) / devUniformSpaceCellSize); if (sizeX < 1) sizeX = 1; if (sizeY < 1) sizeY = 1; Particle pa, pb; pa = particles[i]; // TODO: int cellX = pa.cellIndex / sizeX; int cellY = pa.cellIndex % sizeX; int radius = 1; for (int x = cellX - radius; x <= cellX + radius; x++) { for (int y = cellY - radius; y <= cellY + radius; y++) { int neighborCellIndex = x * sizeX + y; int neighborIndex = findParticleByCell(particles, n, neighborCellIndex); if (neighborIndex != -1) { for (int k = neighborIndex; k < n; k++) { if (k == i) continue; pb = particles[k]; if (pb.cellIndex != neighborCellIndex) break; diff.x = pa.coords.x - pb.coords.x; diff.y = pa.coords.y - pb.coords.y; distance = sqrt(diff.x * diff.x + diff.y * diff.y); // TODO if (DEBUG && i == n / 2 && distance < minDistance) { minDistance = distance; } distance = pow(distance, 5); if (distance < 0.5) distance = 0.5; repulsion.x += diff.x / distance; repulsion.y += diff.y / distance; counter++; } } } } // TODO if (DEBUG && i == n / 2 && counter > 0) { printf("distance: %lf; interactions: %d\n", minDistance, counter); } return repulsion; } __global__ void kernelSwarmMove(uchar4 *image, Particle *particles, int n, float2 global_minimum, hiprandState_t *state) { int idx = blockIdx.x * blockDim.x + threadIdx.x; int offsetx = blockDim.x * gridDim.x; // The speed_coeff drops from 5 to 0.01 depending on the time float speed_coeff = 5.0 / (1.0 + 0.1 * pow(devTimeValue, 4)); speed_coeff = speed_coeff < 0.01 ? 0.01 : speed_coeff; if (DEBUG && idx == 0) { printf("speed_coeff: %lf\n", speed_coeff); } float rnd_1, rnd_2, rnd_3, rnd_4; Particle p; float2 repulsion; for (int i = idx; i < n; i += offsetx) { rnd_1 = hiprand_uniform(&state[i]); rnd_2 = hiprand_uniform(&state[i]); rnd_3 = hiprand_uniform(&state[i]); rnd_4 = hiprand_uniform(&state[i]); p = particles[i]; p.speed.x = DAMPING_COEFF * p.speed.x + speed_coeff * ( rnd_1 * LOCAL_COEFF * (p.best_coords.x - p.coords.x) + rnd_2 * GLOBAL_COEFF * (global_minimum.x - p.coords.x) + RANDOM_COEFF * (rnd_3 - 0.5) ); p.speed.y = DAMPING_COEFF * p.speed.y + speed_coeff * ( rnd_1 * LOCAL_COEFF * (p.best_coords.y - p.coords.y) + rnd_2 * GLOBAL_COEFF * (global_minimum.y - p.coords.y) + RANDOM_COEFF * (rnd_4 - 0.5) ); repulsion = calculateRepulsionClosest(particles, n, i); p.speed.x += REPULSION_COEFF * repulsion.x; p.speed.y += REPULSION_COEFF * repulsion.y; p.coords.x += p.speed.x; p.coords.y += p.speed.y; particles[i] = p; } } void copySizeToGPU() { CSC(hipMemcpyToSymbol((const void *)&devWidth, &width, sizeof(int))); CSC(hipMemcpyToSymbol((const void *)&devHeight, &height, sizeof(int))); } void copyZoomToGPU() { CSC(hipMemcpyToSymbol((const void *)&devZoomX, &zoomX, sizeof(float))); CSC(hipMemcpyToSymbol((const void *)&devZoomY, &zoomY, sizeof(float))); } void copyParticleSizeToGPU() { CSC(hipMemcpyToSymbol((const void *)&devParticleSize, &particleSize, sizeof(int))); } void copyCenterToGPU() { CSC(hipMemcpyToSymbol((const void *)&devCenterX, &centerX, sizeof(float))); CSC(hipMemcpyToSymbol((const void *)&devCenterY, &centerY, sizeof(float))); } void copyTimeToGPU() { CSC(hipMemcpyToSymbol((const void *)&devTimeValue, &timeValue, sizeof(float))); CSC(hipMemcpyToSymbol((const void *)&devTimeStep, &timeStep, sizeof(float))); } void copyUniformSpaceToGPU(float minX, float maxX, float minY, float maxY, float cellSize) { CSC(hipMemcpyToSymbol((const void *)&devUniformSpaceMinX, &minX, sizeof(float))); CSC(hipMemcpyToSymbol((const void *)&devUniformSpaceMaxX, &maxX, sizeof(float))); CSC(hipMemcpyToSymbol((const void *)&devUniformSpaceMinY, &minY, sizeof(float))); CSC(hipMemcpyToSymbol((const void *)&devUniformSpaceMaxY, &maxY, sizeof(float))); CSC(hipMemcpyToSymbol((const void *)&devUniformSpaceCellSize, &cellSize, sizeof(float))); } void copyToGPU() { copySizeToGPU(); copyZoomToGPU(); copyParticleSizeToGPU(); copyCenterToGPU(); copyTimeToGPU(); } void update() { auto t_start = std::chrono::high_resolution_clock::now(); copyToGPU(); uchar4 *devHeatMap; size_t size; CSC(hipGraphicsMapResources(1, &res, 0)); CSC(hipGraphicsResourceGetMappedPointer((void **)&devHeatMap, &size, res)); // Update the function values and local minima for each particle hipLaunchKernelGGL(( kernelSwarmUpdate), dim3(blocks_1d), dim3(threads_1d), 0, 0, devHeatMap, devParticleArray, numberOfParticles); // The boundaries and the center of the swarm, minimum, maximum, the global minimum thrust::device_ptr<ParticleArea> startParticleAreaArray(devPartileAreaArray); thrust::device_ptr<ParticleArea> endParticleAreaArray = startParticleAreaArray + numberOfParticles; ParticleArea pa; pa.min_x = FLT_MAX ; pa.min_y = FLT_MAX ; pa.max_x = -FLT_MAX ; pa.max_y = -FLT_MAX ; pa.sum_x = 0.0; pa.sum_y = 0.0; pa.minValue = FLT_MAX ; pa.maxValue = -FLT_MAX ; pa.globalMinimum = FLT_MAX ; hipLaunchKernelGGL(( kernelInitParticleArea), dim3(blocks_1d), dim3(threads_1d), 0, 0, devParticleArray, devPartileAreaArray, numberOfParticles ); pa = thrust::reduce(startParticleAreaArray, endParticleAreaArray, pa, ParticleReductionFunctor()); // Align the window in the center of particle swarm if (autoCenter) { centerX = pa.sum_x / numberOfParticles; centerY = pa.sum_y / numberOfParticles; copyCenterToGPU(); } // Draw a heat map and particles hipLaunchKernelGGL(( kernelNormalizedHeatMap), dim3(blocks_2d), dim3(threads_2d), 0, 0, devHeatMap, pa.minValue, pa.maxValue); hipLaunchKernelGGL(( kernelSwarmDraw), dim3(blocks_1d), dim3(threads_1d), 0, 0, devHeatMap, devParticleArray, numberOfParticles); // Space partitioning copyUniformSpaceToGPU(pa.min_x, pa.max_x, pa.min_y, pa.max_y, cellSize); hipLaunchKernelGGL(( kernelSwarmAssociateWithCells), dim3(blocks_1d), dim3(threads_1d), 0, 0, devParticleArray, numberOfParticles); // Sort particles by cell index thrust::device_ptr<Particle> startParticleArray(devParticleArray); thrust::device_ptr<Particle> endParticleArray = startParticleArray + numberOfParticles; thrust::sort(startParticleArray, endParticleArray, ParticleSortByIndexComparator()); // Update particles position hipLaunchKernelGGL(( kernelSwarmMove), dim3(blocks_1d), dim3(threads_1d), 0, 0, devHeatMap, devParticleArray, numberOfParticles, pa.globalMinimumCoords, devRandomState ); timeValue += timeStep; CSC(hipDeviceSynchronize()); CSC(hipGraphicsUnmapResources(1, &res, 0)); auto t_end = std::chrono::high_resolution_clock::now(); double duration = std::chrono::duration<double, std::milli>(t_end-t_start).count(); if (DEBUG) { printf("%lf ms; center: %lf, %lf\n\n", duration, centerX, centerY); } glutPostRedisplay(); } void display() { glMatrixMode(GL_PROJECTION); glLoadIdentity(); gluOrtho2D(0.0, (GLdouble)width, 0.0, (GLdouble)height); glutInitWindowSize(width, height); glClearColor(0.0, 0.0, 0.0, 1.0); glClear(GL_COLOR_BUFFER_BIT); glDrawPixels(width, height, GL_RGBA, GL_UNSIGNED_BYTE, 0); glutSwapBuffers(); } void reshapeFunc(int w, int h) { width = w; height = h; zoomY = zoomX * height / width; glBufferData(GL_PIXEL_UNPACK_BUFFER_ARB, width * height * sizeof(uchar4), NULL, GL_DYNAMIC_DRAW); CSC(hipGraphicsGLRegisterBuffer(&res, vbo, hipGraphicsMapFlagsWriteDiscard)); } void keyboardFunc(unsigned char key, int xmouse, int ymouse) { switch (key) { case 'w': centerY -= 0.1f * zoomY; break; case 'a': centerX -= 0.1f * zoomX; break; case 's': centerY += 0.1f * zoomY; break; case 'd': centerX += 0.1f * zoomX; break; case 'q': autoCenter = autoCenter ? false : true; break; case 45: particleSize -= 1; break; case 61: particleSize += 1; break; default: break; } if (particleSize < 0) particleSize = 0; else if (particleSize > 100) particleSize = 100; } void mouseWheelFunc(int wheel, int direction, int x, int y) { zoomX += direction < 0 ? 0.1 * zoomX : -0.1 * zoomX; if (zoomX < 0.01) zoomX = 0.01; else if (zoomX > 1000000) zoomX = 1000000; zoomY = zoomX * height / width; } int main(int argc, char **argv) { std::cout << "Enter window width: "; std::cin >> width; std::cout << "Enter window height: ";\ std::cin >> height; std::cout << "Enter number of particles: "; std::cin >> numberOfParticles; std::cout << "Enter cell size: "; std::cin >> cellSize; copyToGPU(); hipMalloc((void **)&devRandomState, sizeof(hiprandState_t) * numberOfParticles); hipMalloc((void **)&devParticleArray, sizeof(Particle) * numberOfParticles); hipMalloc((void **)&devPartileAreaArray, sizeof(ParticleArea) * numberOfParticles); hipLaunchKernelGGL(( initRandomState), dim3(blocks_1d), dim3(threads_1d), 0, 0, devRandomState, numberOfParticles); hipLaunchKernelGGL(( kernelSwarmInit), dim3(blocks_1d), dim3(threads_1d), 0, 0, devParticleArray, numberOfParticles, devRandomState); glutInit(&argc, argv); glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGBA); glutInitWindowSize(width, height); glutCreateWindow("Particle swarm optimization"); glutIdleFunc(update); glutDisplayFunc(display); glutReshapeFunc(reshapeFunc); glutKeyboardFunc(keyboardFunc); glutMouseWheelFunc(mouseWheelFunc); glMatrixMode(GL_PROJECTION); glLoadIdentity(); gluOrtho2D(0.0, (GLdouble)width, 0.0, (GLdouble)height); glewInit(); glGenBuffers(1, &vbo); glBindBuffer(GL_PIXEL_UNPACK_BUFFER_ARB, vbo); reshapeFunc(width, height); glutMainLoop(); CSC(hipGraphicsUnregisterResource(res)); glBindBuffer(1, vbo); glDeleteBuffers(1, &vbo); hipFree(devRandomState); hipFree(devParticleArray); hipFree(devRandomState); return 0; }
4e3d961dc11276c264bbb3d6877cc5554683af69.cu
/* Particle swarm optimization by Ivan Vinogradov 2016 */ #include <iostream> #include <chrono> #include <cfloat> #include <cmath> // OpenGL #include <GL/glew.h> #include <GL/freeglut.h> // CUDA #include <cuda.h> #include <cuda_runtime.h> #include <cuda_gl_interop.h> // cuRAND #include <curand.h> #include <curand_kernel.h> // Thrust #include <thrust/device_vector.h> #include <thrust/reduce.h> #include <thrust/sort.h> #include "particle.cuh" #define CSC(call) { \ cudaError err = call; \ if (err != cudaSuccess) { \ fprintf(stderr, "CUDA error in file '%s' in line %i: %s.\n", __FILE__, \ __LINE__, cudaGetErrorString(err)); \ exit(1); \ } \ } \ while (0) #define DEBUG false #define GLOBAL_COEFF 0.000050 // coefficient of global solution #define LOCAL_COEFF 0.00000010 // coefficient of local solution #define RANDOM_COEFF 0.010 // coefficient of random motion #define DAMPING_COEFF 0.99250 // coefficient of damping force #define REPULSION_COEFF 0.10 // coefficient of repulsive force // CUDA grid dim3 blocks_2d(16, 16); dim3 threads_2d(32, 32); dim3 blocks_1d(16); dim3 threads_1d(1024); // Number of particles int numberOfParticles = 1000; // Window size int width = 1024, height = 640; // Particle size int particleSize = 1; // Cell size for uniform space partitioning float cellSize = 5.0; // Center, zoom float centerX = 0.0; float centerY = 0.0; float zoomX = 10000.0; float zoomY = zoomX * height / width; // Time float timeValue = 0.0; float timeStep = 0.001; // Automatic centering (press key Q to switch) bool autoCenter = true; // OpenGL buffer GLuint vbo; // CUDA resource for OpenGL output struct cudaGraphicsResource *res; Particle *devParticleArray; ParticleArea *devPartileAreaArray; curandState *devRandomState; // Window size, center, zoom, function min-max, time __constant__ int devWidth; __constant__ int devHeight; __constant__ int devParticleSize; __constant__ float devCenterX; __constant__ float devCenterY; __constant__ float devZoomX; __constant__ float devZoomY; __constant__ float devTimeValue; __constant__ float devTimeStep; // Parameters of uniform space partitioning __constant__ float devUniformSpaceMinX; __constant__ float devUniformSpaceMaxX; __constant__ float devUniformSpaceMinY; __constant__ float devUniformSpaceMaxY; __constant__ float devUniformSpaceCellSize; // Screen coordinates into real coordinates __device__ float2 indexToCoord(int2 index) { return make_float2( (2.0f * index.x / (float)(devWidth - 1) - 1.0f) * devZoomX + devCenterX, -(2.0f * index.y / (float)(devHeight - 1) - 1.0f) * devZoomY + devCenterY); } // Real coordinates into screen coordinates __device__ int2 coordToIndex(float2 coord) { return make_int2( 0.5f * (devWidth - 1) * (1.0f + (coord.x - devCenterX) / devZoomX), 0.5f * (devHeight - 1) * (1.0f - (coord.y - devCenterY) / devZoomY) ); } /* Schwefel Function */ __device__ float fun(float2 coord) { return -coord.x * sin(sqrt(fabs(coord.x))) - coord.y * sin(sqrt(fabs(coord.y))); } __device__ float fun(int2 index) { return fun(indexToCoord(index)); } __global__ void initRandomState(curandState *state, int n) { int idx = blockIdx.x * blockDim.x + threadIdx.x; int offsetx = blockDim.x * gridDim.x; for (int i = idx; i < n; i += offsetx) { curand_init(1337, i, 0, &state[i]); } } __global__ void kernelSwarmInit(Particle *particles, int n, curandState *state) { int idx = blockIdx.x * blockDim.x + threadIdx.x; int offsetx = blockDim.x * gridDim.x; Particle p; for (int i = idx; i < n; i += offsetx) { p = particles[i]; // Position in the center of the screen p.coords = p.best_coords = indexToCoord(make_int2(devWidth / 2, devHeight / 2)); // Random starting angle and the speed float angle = 2.0 * 3.14 * curand_uniform(&state[i]); float speed = 100.0 * curand_uniform(&state[i]); p.speed = make_float2(cos(angle) * speed, sin(angle) * speed); p.value = p.best_value = FLT_MAX; particles[i] = p; } } __global__ void kernelSwarmUpdate(uchar4 *image, Particle *particles, int n) { int idx = blockIdx.x * blockDim.x + threadIdx.x; int offsetx = blockDim.x * gridDim.x; Particle p; for (int i = idx; i < n; i += offsetx) { p = particles[i]; p.value = fun(p.coords); if (p.value < p.best_value) { p.best_value = p.value; p.best_coords = p.coords; } particles[i] = p; } } __global__ void kernelNormalizedHeatMap(uchar4 *heatMap, float minValue, float maxValue) { int idx = blockIdx.x * blockDim.x + threadIdx.x; int idy = blockIdx.y * blockDim.y + threadIdx.y; int offsetx = blockDim.x * gridDim.x; int offsety = blockDim.y * gridDim.y; int i, j; float f; for (i = idx; i < devWidth; i += offsetx) { for (j = idy; j < devHeight; j += offsety) { f = (fun(make_int2(i, j)) - minValue) / (maxValue - minValue); if (f < 0.0) f = 0.0; else if (f > 1.0) f = 1.0; heatMap[j * devWidth + i] = make_uchar4( (int)(f * 255), 0, (int)((1.0 - f) * 255), 255 ); } } } __global__ void kernelSwarmDraw(uchar4 *heatMap, Particle *particles, int n) { int idx = blockIdx.x * blockDim.x + threadIdx.x; int offsetx = blockDim.x * gridDim.x; int size = devParticleSize; for (int i = idx; i < n; i += offsetx) { int2 index = coordToIndex(particles[i].coords); for (int x = index.x - size; x <= index.x + size; x++) { for (int y = index.y - size; y <= index.y + size; y++) { if (x >= 0 && x < devWidth && y >= 0 && y < devHeight && (x - index.x) * (x - index.x) + (y - index.y) * (y - index.y) <= size * size ) { if (DEBUG && i == n / 2) { heatMap[y * devWidth + x] = make_uchar4(0, 255, 0, 255); continue; } heatMap[y * devWidth + x] = make_uchar4(255, 255, 255, 255); } } } } } // The assignment for each particle corresponding space partitioning cell __global__ void kernelSwarmAssociateWithCells(Particle *particles, int n) { int idx = blockIdx.x * blockDim.x + threadIdx.x; int offsetx = blockDim.x * gridDim.x; int sizeX = ceil((devUniformSpaceMaxX - devUniformSpaceMinX) / devUniformSpaceCellSize); int sizeY = ceil((devUniformSpaceMaxY - devUniformSpaceMinY) / devUniformSpaceCellSize); Particle p; int cellX, cellY; for (int i = idx; i < n; i += offsetx) { p = particles[i]; cellX = (p.coords.x - devUniformSpaceMinX) / devUniformSpaceCellSize; cellY = (p.coords.y - devUniformSpaceMinY) / devUniformSpaceCellSize; p.cellIndex = cellX * sizeX + cellY; particles[i] = p; } } // The total force of repulsion for the i-th particle (without the space partitioning) __device__ float2 calculateRepulsionAll(Particle *particles, int n, int i) { float2 repulsion, diff, coords_a, coords_b; float distance; // TODO float minDistance = FLT_MAX; repulsion.x = 0.0; repulsion.y = 0.0; coords_a = particles[i].coords; for (int j = 0; j < n; j++) { if (j == i) continue; coords_b = particles[j].coords; diff.x = coords_a.x - coords_b.x; diff.y = coords_a.y - coords_b.y; distance = sqrt(diff.x * diff.x + diff.y * diff.y); // TODO if (DEBUG && i == n / 2 && distance < minDistance) { minDistance = distance; } distance = pow(distance, 5); if (distance < 0.5) distance = 0.5; repulsion.x += diff.x / distance; repulsion.y += diff.y / distance; } // TODO if (DEBUG && i == n / 2) { printf("distance: %lf; interactions: %d\n", minDistance, n); } return repulsion; } // Binary search in a sorted array of particles by cell index of space partitioning __device__ int binarySearchLowerBound(Particle *particles, int size, int cellIndex) { int left = 0, right = size - 1, middle; while (left <= right) { middle = left + (right - left) / 2; if ((&particles[middle])->cellIndex < cellIndex) left = middle + 1; else right = middle - 1; } return left; } // Check that the particle was found in binarySearchLowerBound __device__ int isFound(Particle *particles, int size, int cellIndex, int index) { return index < size && particles[index].cellIndex == cellIndex; } // Search the first particle with specified cell partition index __device__ int findParticleByCell(Particle *particles, int size, int cellIndex) { int index = binarySearchLowerBound(particles, size, cellIndex); return isFound(particles, size, cellIndex, index) ? index : -1; } // The total force of repulsion for the i-th particle (with the space partitioning) __device__ float2 calculateRepulsionClosest(Particle *particles, int n, int i) { float2 diff, repulsion = make_float2(0.0, 0.0); float distance; // TODO float minDistance = FLT_MAX; // Counter of interacting particles int counter = 0; // Dimensions of the space partitioning int sizeX = ceil(abs(devUniformSpaceMaxX - devUniformSpaceMinX) / devUniformSpaceCellSize); int sizeY = ceil(abs(devUniformSpaceMaxY - devUniformSpaceMinY) / devUniformSpaceCellSize); if (sizeX < 1) sizeX = 1; if (sizeY < 1) sizeY = 1; Particle pa, pb; pa = particles[i]; // TODO: деление на ноль int cellX = pa.cellIndex / sizeX; int cellY = pa.cellIndex % sizeX; int radius = 1; for (int x = cellX - radius; x <= cellX + radius; x++) { for (int y = cellY - radius; y <= cellY + radius; y++) { int neighborCellIndex = x * sizeX + y; int neighborIndex = findParticleByCell(particles, n, neighborCellIndex); if (neighborIndex != -1) { for (int k = neighborIndex; k < n; k++) { if (k == i) continue; pb = particles[k]; if (pb.cellIndex != neighborCellIndex) break; diff.x = pa.coords.x - pb.coords.x; diff.y = pa.coords.y - pb.coords.y; distance = sqrt(diff.x * diff.x + diff.y * diff.y); // TODO if (DEBUG && i == n / 2 && distance < minDistance) { minDistance = distance; } distance = pow(distance, 5); if (distance < 0.5) distance = 0.5; repulsion.x += diff.x / distance; repulsion.y += diff.y / distance; counter++; } } } } // TODO if (DEBUG && i == n / 2 && counter > 0) { printf("distance: %lf; interactions: %d\n", minDistance, counter); } return repulsion; } __global__ void kernelSwarmMove(uchar4 *image, Particle *particles, int n, float2 global_minimum, curandState *state) { int idx = blockIdx.x * blockDim.x + threadIdx.x; int offsetx = blockDim.x * gridDim.x; // The speed_coeff drops from 5 to 0.01 depending on the time float speed_coeff = 5.0 / (1.0 + 0.1 * pow(devTimeValue, 4)); speed_coeff = speed_coeff < 0.01 ? 0.01 : speed_coeff; if (DEBUG && idx == 0) { printf("speed_coeff: %lf\n", speed_coeff); } float rnd_1, rnd_2, rnd_3, rnd_4; Particle p; float2 repulsion; for (int i = idx; i < n; i += offsetx) { rnd_1 = curand_uniform(&state[i]); rnd_2 = curand_uniform(&state[i]); rnd_3 = curand_uniform(&state[i]); rnd_4 = curand_uniform(&state[i]); p = particles[i]; p.speed.x = DAMPING_COEFF * p.speed.x + speed_coeff * ( rnd_1 * LOCAL_COEFF * (p.best_coords.x - p.coords.x) + rnd_2 * GLOBAL_COEFF * (global_minimum.x - p.coords.x) + RANDOM_COEFF * (rnd_3 - 0.5) ); p.speed.y = DAMPING_COEFF * p.speed.y + speed_coeff * ( rnd_1 * LOCAL_COEFF * (p.best_coords.y - p.coords.y) + rnd_2 * GLOBAL_COEFF * (global_minimum.y - p.coords.y) + RANDOM_COEFF * (rnd_4 - 0.5) ); repulsion = calculateRepulsionClosest(particles, n, i); p.speed.x += REPULSION_COEFF * repulsion.x; p.speed.y += REPULSION_COEFF * repulsion.y; p.coords.x += p.speed.x; p.coords.y += p.speed.y; particles[i] = p; } } void copySizeToGPU() { CSC(cudaMemcpyToSymbol((const void *)&devWidth, &width, sizeof(int))); CSC(cudaMemcpyToSymbol((const void *)&devHeight, &height, sizeof(int))); } void copyZoomToGPU() { CSC(cudaMemcpyToSymbol((const void *)&devZoomX, &zoomX, sizeof(float))); CSC(cudaMemcpyToSymbol((const void *)&devZoomY, &zoomY, sizeof(float))); } void copyParticleSizeToGPU() { CSC(cudaMemcpyToSymbol((const void *)&devParticleSize, &particleSize, sizeof(int))); } void copyCenterToGPU() { CSC(cudaMemcpyToSymbol((const void *)&devCenterX, &centerX, sizeof(float))); CSC(cudaMemcpyToSymbol((const void *)&devCenterY, &centerY, sizeof(float))); } void copyTimeToGPU() { CSC(cudaMemcpyToSymbol((const void *)&devTimeValue, &timeValue, sizeof(float))); CSC(cudaMemcpyToSymbol((const void *)&devTimeStep, &timeStep, sizeof(float))); } void copyUniformSpaceToGPU(float minX, float maxX, float minY, float maxY, float cellSize) { CSC(cudaMemcpyToSymbol((const void *)&devUniformSpaceMinX, &minX, sizeof(float))); CSC(cudaMemcpyToSymbol((const void *)&devUniformSpaceMaxX, &maxX, sizeof(float))); CSC(cudaMemcpyToSymbol((const void *)&devUniformSpaceMinY, &minY, sizeof(float))); CSC(cudaMemcpyToSymbol((const void *)&devUniformSpaceMaxY, &maxY, sizeof(float))); CSC(cudaMemcpyToSymbol((const void *)&devUniformSpaceCellSize, &cellSize, sizeof(float))); } void copyToGPU() { copySizeToGPU(); copyZoomToGPU(); copyParticleSizeToGPU(); copyCenterToGPU(); copyTimeToGPU(); } void update() { auto t_start = std::chrono::high_resolution_clock::now(); copyToGPU(); uchar4 *devHeatMap; size_t size; CSC(cudaGraphicsMapResources(1, &res, 0)); CSC(cudaGraphicsResourceGetMappedPointer((void **)&devHeatMap, &size, res)); // Update the function values and local minima for each particle kernelSwarmUpdate<<<blocks_1d, threads_1d>>>(devHeatMap, devParticleArray, numberOfParticles); // The boundaries and the center of the swarm, minimum, maximum, the global minimum thrust::device_ptr<ParticleArea> startParticleAreaArray(devPartileAreaArray); thrust::device_ptr<ParticleArea> endParticleAreaArray = startParticleAreaArray + numberOfParticles; ParticleArea pa; pa.min_x = FLT_MAX ; pa.min_y = FLT_MAX ; pa.max_x = -FLT_MAX ; pa.max_y = -FLT_MAX ; pa.sum_x = 0.0; pa.sum_y = 0.0; pa.minValue = FLT_MAX ; pa.maxValue = -FLT_MAX ; pa.globalMinimum = FLT_MAX ; kernelInitParticleArea<<<blocks_1d, threads_1d>>>( devParticleArray, devPartileAreaArray, numberOfParticles ); pa = thrust::reduce(startParticleAreaArray, endParticleAreaArray, pa, ParticleReductionFunctor()); // Align the window in the center of particle swarm if (autoCenter) { centerX = pa.sum_x / numberOfParticles; centerY = pa.sum_y / numberOfParticles; copyCenterToGPU(); } // Draw a heat map and particles kernelNormalizedHeatMap<<<blocks_2d, threads_2d>>>(devHeatMap, pa.minValue, pa.maxValue); kernelSwarmDraw<<<blocks_1d, threads_1d>>>(devHeatMap, devParticleArray, numberOfParticles); // Space partitioning copyUniformSpaceToGPU(pa.min_x, pa.max_x, pa.min_y, pa.max_y, cellSize); kernelSwarmAssociateWithCells<<<blocks_1d, threads_1d>>>(devParticleArray, numberOfParticles); // Sort particles by cell index thrust::device_ptr<Particle> startParticleArray(devParticleArray); thrust::device_ptr<Particle> endParticleArray = startParticleArray + numberOfParticles; thrust::sort(startParticleArray, endParticleArray, ParticleSortByIndexComparator()); // Update particles position kernelSwarmMove<<<blocks_1d, threads_1d>>>( devHeatMap, devParticleArray, numberOfParticles, pa.globalMinimumCoords, devRandomState ); timeValue += timeStep; CSC(cudaDeviceSynchronize()); CSC(cudaGraphicsUnmapResources(1, &res, 0)); auto t_end = std::chrono::high_resolution_clock::now(); double duration = std::chrono::duration<double, std::milli>(t_end-t_start).count(); if (DEBUG) { printf("%lf ms; center: %lf, %lf\n\n", duration, centerX, centerY); } glutPostRedisplay(); } void display() { glMatrixMode(GL_PROJECTION); glLoadIdentity(); gluOrtho2D(0.0, (GLdouble)width, 0.0, (GLdouble)height); glutInitWindowSize(width, height); glClearColor(0.0, 0.0, 0.0, 1.0); glClear(GL_COLOR_BUFFER_BIT); glDrawPixels(width, height, GL_RGBA, GL_UNSIGNED_BYTE, 0); glutSwapBuffers(); } void reshapeFunc(int w, int h) { width = w; height = h; zoomY = zoomX * height / width; glBufferData(GL_PIXEL_UNPACK_BUFFER_ARB, width * height * sizeof(uchar4), NULL, GL_DYNAMIC_DRAW); CSC(cudaGraphicsGLRegisterBuffer(&res, vbo, cudaGraphicsMapFlagsWriteDiscard)); } void keyboardFunc(unsigned char key, int xmouse, int ymouse) { switch (key) { case 'w': centerY -= 0.1f * zoomY; break; case 'a': centerX -= 0.1f * zoomX; break; case 's': centerY += 0.1f * zoomY; break; case 'd': centerX += 0.1f * zoomX; break; case 'q': autoCenter = autoCenter ? false : true; break; case 45: particleSize -= 1; break; case 61: particleSize += 1; break; default: break; } if (particleSize < 0) particleSize = 0; else if (particleSize > 100) particleSize = 100; } void mouseWheelFunc(int wheel, int direction, int x, int y) { zoomX += direction < 0 ? 0.1 * zoomX : -0.1 * zoomX; if (zoomX < 0.01) zoomX = 0.01; else if (zoomX > 1000000) zoomX = 1000000; zoomY = zoomX * height / width; } int main(int argc, char **argv) { std::cout << "Enter window width: "; std::cin >> width; std::cout << "Enter window height: ";\ std::cin >> height; std::cout << "Enter number of particles: "; std::cin >> numberOfParticles; std::cout << "Enter cell size: "; std::cin >> cellSize; copyToGPU(); cudaMalloc((void **)&devRandomState, sizeof(curandState) * numberOfParticles); cudaMalloc((void **)&devParticleArray, sizeof(Particle) * numberOfParticles); cudaMalloc((void **)&devPartileAreaArray, sizeof(ParticleArea) * numberOfParticles); initRandomState<<<blocks_1d, threads_1d>>>(devRandomState, numberOfParticles); kernelSwarmInit<<<blocks_1d, threads_1d>>>(devParticleArray, numberOfParticles, devRandomState); glutInit(&argc, argv); glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGBA); glutInitWindowSize(width, height); glutCreateWindow("Particle swarm optimization"); glutIdleFunc(update); glutDisplayFunc(display); glutReshapeFunc(reshapeFunc); glutKeyboardFunc(keyboardFunc); glutMouseWheelFunc(mouseWheelFunc); glMatrixMode(GL_PROJECTION); glLoadIdentity(); gluOrtho2D(0.0, (GLdouble)width, 0.0, (GLdouble)height); glewInit(); glGenBuffers(1, &vbo); glBindBuffer(GL_PIXEL_UNPACK_BUFFER_ARB, vbo); reshapeFunc(width, height); glutMainLoop(); CSC(cudaGraphicsUnregisterResource(res)); glBindBuffer(1, vbo); glDeleteBuffers(1, &vbo); cudaFree(devRandomState); cudaFree(devParticleArray); cudaFree(devRandomState); return 0; }
31c934b872ec330cae2b98bd0ee0c8082e680fdb.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void reduction_kernel_2(float *g_out, float *g_in, unsigned int size) { unsigned int idx_x = blockIdx.x * blockDim.x + threadIdx.x; extern __shared__ float s_data[]; s_data[threadIdx.x] = (idx_x < size) ? g_in[idx_x] : 0.f; __syncthreads(); // do reduction // sequential addressing for (unsigned int stride = blockDim.x / 2; stride > 0; stride >>= 1) { if (threadIdx.x < stride) s_data[threadIdx.x] += s_data[threadIdx.x + stride]; __syncthreads(); } if (threadIdx.x == 0) g_out[blockIdx.x] = s_data[0]; }
31c934b872ec330cae2b98bd0ee0c8082e680fdb.cu
#include "includes.h" __global__ void reduction_kernel_2(float *g_out, float *g_in, unsigned int size) { unsigned int idx_x = blockIdx.x * blockDim.x + threadIdx.x; extern __shared__ float s_data[]; s_data[threadIdx.x] = (idx_x < size) ? g_in[idx_x] : 0.f; __syncthreads(); // do reduction // sequential addressing for (unsigned int stride = blockDim.x / 2; stride > 0; stride >>= 1) { if (threadIdx.x < stride) s_data[threadIdx.x] += s_data[threadIdx.x + stride]; __syncthreads(); } if (threadIdx.x == 0) g_out[blockIdx.x] = s_data[0]; }
46767ed9bbdcde8e3cf6cb092b1a49657a0ce82e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright 2023 CMU, Facebook, LANL, MIT, NVIDIA, and Stanford (alphabetical) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "flexflow/ops/kernels/split_kernels.h" #include "flexflow/utils/cuda_helper.h" namespace FlexFlow { // declare Legion names using Legion::coord_t; namespace Kernels { namespace Split { void forward_kernel_wrapper(float **out_ptrs, float const *in_ptr, coord_t const *out_blk_sizes, coord_t in_blk_size, coord_t num_blks, int numOutputs) { hipStream_t stream; checkCUDA(get_legion_stream(&stream)); Internal::forward_kernel(out_ptrs, in_ptr, out_blk_sizes, in_blk_size, num_blks, numOutputs, stream); } void backward_kernel_wrapper(float *in_grad_ptr, float const **out_grad_ptr, coord_t const *out_blk_sizes, coord_t in_blk_size, coord_t num_blks, int numOutputs) { hipStream_t stream; checkCUDA(get_legion_stream(&stream)); Internal::backward_kernel(in_grad_ptr, out_grad_ptr, out_blk_sizes, in_blk_size, num_blks, numOutputs, stream); // checkCUDA(hipDeviceSynchronize()); } namespace Internal { void forward_kernel(float **out_ptrs, float const *in_ptr, coord_t const *out_blk_sizes, coord_t in_blk_size, coord_t num_blks, int numOutputs, hipStream_t stream) { for (int i = 0; i < numOutputs; i++) { hipLaunchKernelGGL(( copy_with_stride), dim3(GET_BLOCKS(out_blk_sizes[i] * num_blks)), dim3(CUDA_NUM_THREADS), 0, stream, out_ptrs[i], in_ptr, num_blks, out_blk_sizes[i], in_blk_size); in_ptr += out_blk_sizes[i]; } } void backward_kernel(float *in_grad_ptr, float const **out_grad_ptr, coord_t const *out_blk_sizes, coord_t in_blk_size, coord_t num_blks, int numOutputs, hipStream_t stream) { for (int i = 0; i < numOutputs; i++) { hipLaunchKernelGGL(( add_with_stride), dim3(GET_BLOCKS(out_blk_sizes[i] * num_blks)), dim3(CUDA_NUM_THREADS), 0, stream, in_grad_ptr, out_grad_ptr[i], num_blks, in_blk_size, out_blk_sizes[i]); in_grad_ptr += out_blk_sizes[i]; } } } // namespace Internal } // namespace Split } // namespace Kernels } // namespace FlexFlow
46767ed9bbdcde8e3cf6cb092b1a49657a0ce82e.cu
/* Copyright 2023 CMU, Facebook, LANL, MIT, NVIDIA, and Stanford (alphabetical) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "flexflow/ops/kernels/split_kernels.h" #include "flexflow/utils/cuda_helper.h" namespace FlexFlow { // declare Legion names using Legion::coord_t; namespace Kernels { namespace Split { void forward_kernel_wrapper(float **out_ptrs, float const *in_ptr, coord_t const *out_blk_sizes, coord_t in_blk_size, coord_t num_blks, int numOutputs) { cudaStream_t stream; checkCUDA(get_legion_stream(&stream)); Internal::forward_kernel(out_ptrs, in_ptr, out_blk_sizes, in_blk_size, num_blks, numOutputs, stream); } void backward_kernel_wrapper(float *in_grad_ptr, float const **out_grad_ptr, coord_t const *out_blk_sizes, coord_t in_blk_size, coord_t num_blks, int numOutputs) { cudaStream_t stream; checkCUDA(get_legion_stream(&stream)); Internal::backward_kernel(in_grad_ptr, out_grad_ptr, out_blk_sizes, in_blk_size, num_blks, numOutputs, stream); // checkCUDA(cudaDeviceSynchronize()); } namespace Internal { void forward_kernel(float **out_ptrs, float const *in_ptr, coord_t const *out_blk_sizes, coord_t in_blk_size, coord_t num_blks, int numOutputs, cudaStream_t stream) { for (int i = 0; i < numOutputs; i++) { copy_with_stride<<<GET_BLOCKS(out_blk_sizes[i] * num_blks), CUDA_NUM_THREADS, 0, stream>>>( out_ptrs[i], in_ptr, num_blks, out_blk_sizes[i], in_blk_size); in_ptr += out_blk_sizes[i]; } } void backward_kernel(float *in_grad_ptr, float const **out_grad_ptr, coord_t const *out_blk_sizes, coord_t in_blk_size, coord_t num_blks, int numOutputs, cudaStream_t stream) { for (int i = 0; i < numOutputs; i++) { add_with_stride<<<GET_BLOCKS(out_blk_sizes[i] * num_blks), CUDA_NUM_THREADS, 0, stream>>>( in_grad_ptr, out_grad_ptr[i], num_blks, in_blk_size, out_blk_sizes[i]); in_grad_ptr += out_blk_sizes[i]; } } } // namespace Internal } // namespace Split } // namespace Kernels } // namespace FlexFlow
9a84afb28ca3b8e4bd9ed75397faef89c5998723.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <string> #include <math.h> #include <stdlib.h> #include <string.h> #include <malloc.h> #include <vector> #include <iostream> #include <fstream> #include <functional> #include <algorithm> #include <ctime> #define ACCURACY 0.01 #define NUM_OF_GPU_THREADS 1024 #define BLOCK_SIZE 32 void checkError(hipError_t err, int line) { if (hipSuccess != err) { std::cerr << "Error " << hipGetErrorName(err) << " happenend: " << hipGetErrorString(err) << " at line " << line << std::endl; exit(-1); } } __global__ void sgemmKernel(float * cudaA, float * cudaB, float * cudaC, int m, int n, int k, float alpha, float beta) { int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; __shared__ float A[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float B[BLOCK_SIZE][BLOCK_SIZE]; int row = bx * BLOCK_SIZE + tx; int col = by * BLOCK_SIZE + ty; float sum = 0; for (int i = 0; i < gridDim.y; i++) { int a_col = i * BLOCK_SIZE + ty; int a_row = row; if (a_row < m && a_col < k) { A[tx][ty] = cudaA[a_row + a_col * m]; } int b_col = col; int b_row = i * BLOCK_SIZE + tx; if (b_row < k && b_col < n){ B[tx][ty] = cudaB[b_row * n + b_col]; } __syncthreads(); if (row < m && col < n) { int j_end = ((i + 1) * BLOCK_SIZE < k) ? BLOCK_SIZE : k - i * BLOCK_SIZE; for (int j = 0; j < j_end; j++) { sum += A[tx][j] * B[j][ty]; } } __syncthreads(); } if (row < m && col < n) { cudaC[row + col * m] = 0; cudaC[row + col * m] = cudaC[row + col * m] * beta + sum * alpha; } } bool readColMajorMatrixFile(const char *fn, int &nr_row, int &nr_col, std::vector<float>&v) { std::cerr << "Opening file:" << fn << std::endl; std::fstream f(fn, std::fstream::in); if (!f.good()) { return false; } // Read # of rows and cols f >> nr_row; f >> nr_col; float data; std::cerr << "Matrix dimension: " << nr_row << "x" << nr_col << std::endl; while (f.good()) { f >> data; v.push_back(data); } v.pop_back(); // remove the duplicated last element return true; } bool writeColMajorMatrixFile(const char *fn, int nr_row, int nr_col, std::vector<float>&v) { std::cerr << "Opening file:" << fn << " for write." << std::endl; std::fstream f(fn, std::fstream::out); if (!f.good()) { return false; } // Read # of rows and cols f << nr_row << " " << nr_col << " "; std::cerr << "Matrix dimension: " << nr_row << "x" << nr_col << std::endl; for (int i = 0; i < v.size(); ++i) { f << v[i] << ' '; } f << "\n"; return true; } void basicSgemm(char transa, char transb, int m, int n, int k, float alpha, const float *A, int lda, const float *B, int ldb, float beta, float *C, int ldc) { if ((transa != 'N') && (transa != 'n')) { std::cerr << "unsupported value of 'transa' in regtileSgemm()" << std::endl; return; } if ((transb != 'T') && (transb != 't')) { std::cerr << "unsupported value of 'transb' in regtileSgemm()" << std::endl; return; } for (int mm = 0; mm < m; ++mm) { for (int nn = 0; nn < n; ++nn) { float c = 0.0f; for (int i = 0; i < k; ++i) { float a = A[mm + i * lda]; float b = B[nn + i * ldb]; c += a * b; } C[mm + nn * ldc] = C[mm + nn * ldc] * beta + alpha * c; } } } void basicSgemm_par(char transa, char transb, int m, int n, int k, float alpha, const float *A, int lda, const float *B, int ldb, float beta, float *C, int ldc) { float *cudaA, *cudaB, *cudaC; int sizeA = m * k * sizeof(float), sizeB = k * n * sizeof(float), sizeC = m * n * sizeof(float); if ((transa != 'N') && (transa != 'n')) { std::cerr << "unsupported value of 'transa' in regtileSgemm()" << std::endl; return; } if ((transb != 'T') && (transb != 't')) { std::cerr << "unsupported value of 'transb' in regtileSgemm()" << std::endl; return; } checkError(hipMalloc(&cudaA, sizeA), __LINE__); checkError(hipMemcpy(cudaA, A, sizeA, hipMemcpyHostToDevice), __LINE__); checkError(hipMalloc(&cudaB, sizeB), __LINE__); checkError(hipMemcpy(cudaB, B, sizeB, hipMemcpyHostToDevice), __LINE__); checkError(hipMalloc(&cudaC, sizeC), __LINE__); dim3 dimGrid((m + BLOCK_SIZE - 1) / BLOCK_SIZE, (n + BLOCK_SIZE - 1) / BLOCK_SIZE); dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); hipLaunchKernelGGL(( sgemmKernel) , dim3(dimGrid), dim3(dimBlock) , 0, 0, cudaA, cudaB, cudaC, m, n, k, alpha, beta); checkError(hipMemcpy(C, cudaC, sizeC, hipMemcpyDeviceToHost), __LINE__); checkError(hipFree(cudaA), __LINE__); checkError(hipFree(cudaB), __LINE__); checkError(hipFree(cudaC), __LINE__); } int main(int argc, char *argv[]) { int matArow, matAcol; int matBrow, matBcol; std::vector<float> matA; std::vector<float> matBT; //int m, n, k; float timeSeq = 0.0f, timePar = 0.0f; hipEvent_t start = hipEvent_t(); checkError(hipEventCreate(&start), __LINE__); hipEvent_t stop = hipEvent_t(); checkError(hipEventCreate(&stop), __LINE__); if (argc != 4) { fprintf(stderr, "Expecting three input filenames\n"); exit(-1); } /* Read in data */ // load A readColMajorMatrixFile(argv[1], matArow, matAcol, matA); // load B^T readColMajorMatrixFile(argv[2], matBcol, matBrow, matBT); // allocate space for C and D std::vector<float> matC(matArow * matBcol); std::vector<float> matD(matArow * matBcol); //clock_t begin = clock(); hipEventRecord(start, 0); // Use standard sgemm interface basicSgemm('N', 'T', matArow, matBcol, matAcol, 1.0f, &matA.front(), matArow, &matBT.front(), matBcol, 0.0f, &matC.front(), matArow); clock_t end = clock(); //timeSeq = float(end - begin) / CLOCKS_PER_SEC; checkError(hipEventRecord(stop, 0), __LINE__); checkError(hipEventSynchronize(stop), __LINE__); checkError(hipEventElapsedTime(&timeSeq, start, stop), __LINE__); timeSeq /= 1000; hipEventRecord(start, 0); // Use parallel sgemm interface basicSgemm_par('N', 'T', matArow, matBcol, matAcol, 1.0f, &matA.front(), matArow, &matBT.front(), matBcol, 0.0f, &matD.front(), matArow); checkError(hipEventRecord(stop, 0), __LINE__); checkError(hipEventSynchronize(stop), __LINE__); checkError(hipEventElapsedTime(&timePar, start, stop), __LINE__); timePar /= 1000; checkError(hipEventDestroy(start), __LINE__); checkError(hipEventDestroy(stop), __LINE__); writeColMajorMatrixFile(argv[3], matArow, matBcol, matC); std::function<bool(double, double)> comparator = [](double left, double right) { // Lambda function to compare 2 doubles with ACCURACY return fabs(left - right) < ACCURACY; }; std::cerr << "********************DZ3Z1**********************" << std::endl; std::cerr << "Elapsed time - SEQ: " << timeSeq << "." << std::endl; std::cerr << "Elapsed time - PAR: " << timePar << "." << std::endl; std::cerr << (std::equal(matC.begin(), matC.end(), matD.begin(), comparator) ? "TEST PASSED" : "TEST FAILED") << std::endl; std::cerr << "***********************************************" << std::endl; return 0; }
9a84afb28ca3b8e4bd9ed75397faef89c5998723.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <string> #include <math.h> #include <stdlib.h> #include <string.h> #include <malloc.h> #include <vector> #include <iostream> #include <fstream> #include <functional> #include <algorithm> #include <ctime> #define ACCURACY 0.01 #define NUM_OF_GPU_THREADS 1024 #define BLOCK_SIZE 32 void checkError(cudaError_t err, int line) { if (cudaSuccess != err) { std::cerr << "Error " << cudaGetErrorName(err) << " happenend: " << cudaGetErrorString(err) << " at line " << line << std::endl; exit(-1); } } __global__ void sgemmKernel(float * cudaA, float * cudaB, float * cudaC, int m, int n, int k, float alpha, float beta) { int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; __shared__ float A[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float B[BLOCK_SIZE][BLOCK_SIZE]; int row = bx * BLOCK_SIZE + tx; int col = by * BLOCK_SIZE + ty; float sum = 0; for (int i = 0; i < gridDim.y; i++) { int a_col = i * BLOCK_SIZE + ty; int a_row = row; if (a_row < m && a_col < k) { A[tx][ty] = cudaA[a_row + a_col * m]; } int b_col = col; int b_row = i * BLOCK_SIZE + tx; if (b_row < k && b_col < n){ B[tx][ty] = cudaB[b_row * n + b_col]; } __syncthreads(); if (row < m && col < n) { int j_end = ((i + 1) * BLOCK_SIZE < k) ? BLOCK_SIZE : k - i * BLOCK_SIZE; for (int j = 0; j < j_end; j++) { sum += A[tx][j] * B[j][ty]; } } __syncthreads(); } if (row < m && col < n) { cudaC[row + col * m] = 0; cudaC[row + col * m] = cudaC[row + col * m] * beta + sum * alpha; } } bool readColMajorMatrixFile(const char *fn, int &nr_row, int &nr_col, std::vector<float>&v) { std::cerr << "Opening file:" << fn << std::endl; std::fstream f(fn, std::fstream::in); if (!f.good()) { return false; } // Read # of rows and cols f >> nr_row; f >> nr_col; float data; std::cerr << "Matrix dimension: " << nr_row << "x" << nr_col << std::endl; while (f.good()) { f >> data; v.push_back(data); } v.pop_back(); // remove the duplicated last element return true; } bool writeColMajorMatrixFile(const char *fn, int nr_row, int nr_col, std::vector<float>&v) { std::cerr << "Opening file:" << fn << " for write." << std::endl; std::fstream f(fn, std::fstream::out); if (!f.good()) { return false; } // Read # of rows and cols f << nr_row << " " << nr_col << " "; std::cerr << "Matrix dimension: " << nr_row << "x" << nr_col << std::endl; for (int i = 0; i < v.size(); ++i) { f << v[i] << ' '; } f << "\n"; return true; } void basicSgemm(char transa, char transb, int m, int n, int k, float alpha, const float *A, int lda, const float *B, int ldb, float beta, float *C, int ldc) { if ((transa != 'N') && (transa != 'n')) { std::cerr << "unsupported value of 'transa' in regtileSgemm()" << std::endl; return; } if ((transb != 'T') && (transb != 't')) { std::cerr << "unsupported value of 'transb' in regtileSgemm()" << std::endl; return; } for (int mm = 0; mm < m; ++mm) { for (int nn = 0; nn < n; ++nn) { float c = 0.0f; for (int i = 0; i < k; ++i) { float a = A[mm + i * lda]; float b = B[nn + i * ldb]; c += a * b; } C[mm + nn * ldc] = C[mm + nn * ldc] * beta + alpha * c; } } } void basicSgemm_par(char transa, char transb, int m, int n, int k, float alpha, const float *A, int lda, const float *B, int ldb, float beta, float *C, int ldc) { float *cudaA, *cudaB, *cudaC; int sizeA = m * k * sizeof(float), sizeB = k * n * sizeof(float), sizeC = m * n * sizeof(float); if ((transa != 'N') && (transa != 'n')) { std::cerr << "unsupported value of 'transa' in regtileSgemm()" << std::endl; return; } if ((transb != 'T') && (transb != 't')) { std::cerr << "unsupported value of 'transb' in regtileSgemm()" << std::endl; return; } checkError(cudaMalloc(&cudaA, sizeA), __LINE__); checkError(cudaMemcpy(cudaA, A, sizeA, cudaMemcpyHostToDevice), __LINE__); checkError(cudaMalloc(&cudaB, sizeB), __LINE__); checkError(cudaMemcpy(cudaB, B, sizeB, cudaMemcpyHostToDevice), __LINE__); checkError(cudaMalloc(&cudaC, sizeC), __LINE__); dim3 dimGrid((m + BLOCK_SIZE - 1) / BLOCK_SIZE, (n + BLOCK_SIZE - 1) / BLOCK_SIZE); dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); sgemmKernel <<< dimGrid, dimBlock >>> (cudaA, cudaB, cudaC, m, n, k, alpha, beta); checkError(cudaMemcpy(C, cudaC, sizeC, cudaMemcpyDeviceToHost), __LINE__); checkError(cudaFree(cudaA), __LINE__); checkError(cudaFree(cudaB), __LINE__); checkError(cudaFree(cudaC), __LINE__); } int main(int argc, char *argv[]) { int matArow, matAcol; int matBrow, matBcol; std::vector<float> matA; std::vector<float> matBT; //int m, n, k; float timeSeq = 0.0f, timePar = 0.0f; cudaEvent_t start = cudaEvent_t(); checkError(cudaEventCreate(&start), __LINE__); cudaEvent_t stop = cudaEvent_t(); checkError(cudaEventCreate(&stop), __LINE__); if (argc != 4) { fprintf(stderr, "Expecting three input filenames\n"); exit(-1); } /* Read in data */ // load A readColMajorMatrixFile(argv[1], matArow, matAcol, matA); // load B^T readColMajorMatrixFile(argv[2], matBcol, matBrow, matBT); // allocate space for C and D std::vector<float> matC(matArow * matBcol); std::vector<float> matD(matArow * matBcol); //clock_t begin = clock(); cudaEventRecord(start, 0); // Use standard sgemm interface basicSgemm('N', 'T', matArow, matBcol, matAcol, 1.0f, &matA.front(), matArow, &matBT.front(), matBcol, 0.0f, &matC.front(), matArow); clock_t end = clock(); //timeSeq = float(end - begin) / CLOCKS_PER_SEC; checkError(cudaEventRecord(stop, 0), __LINE__); checkError(cudaEventSynchronize(stop), __LINE__); checkError(cudaEventElapsedTime(&timeSeq, start, stop), __LINE__); timeSeq /= 1000; cudaEventRecord(start, 0); // Use parallel sgemm interface basicSgemm_par('N', 'T', matArow, matBcol, matAcol, 1.0f, &matA.front(), matArow, &matBT.front(), matBcol, 0.0f, &matD.front(), matArow); checkError(cudaEventRecord(stop, 0), __LINE__); checkError(cudaEventSynchronize(stop), __LINE__); checkError(cudaEventElapsedTime(&timePar, start, stop), __LINE__); timePar /= 1000; checkError(cudaEventDestroy(start), __LINE__); checkError(cudaEventDestroy(stop), __LINE__); writeColMajorMatrixFile(argv[3], matArow, matBcol, matC); std::function<bool(double, double)> comparator = [](double left, double right) { // Lambda function to compare 2 doubles with ACCURACY return fabs(left - right) < ACCURACY; }; std::cerr << "********************DZ3Z1**********************" << std::endl; std::cerr << "Elapsed time - SEQ: " << timeSeq << "." << std::endl; std::cerr << "Elapsed time - PAR: " << timePar << "." << std::endl; std::cerr << (std::equal(matC.begin(), matC.end(), matD.begin(), comparator) ? "TEST PASSED" : "TEST FAILED") << std::endl; std::cerr << "***********************************************" << std::endl; return 0; }
92d26a22a3af7013b8201bee35af0ca3a6afbceb.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <vector> #define NUM 512 class Double { public: __host__ __device__ float operator()(float val) { return 2*val; } }; template<typename F> __global__ void gpu_kernel(float * buf, F func) { int idx = threadIdx.x; buf[idx] = func(buf[idx]); } template<typename F> void cpu_kernel(float * buf, F func) { for (int idx = 0; idx<NUM; ++idx) { buf[idx] = func(buf[idx]); } } void gpu_run(void) { std::cout << std::endl << "gpu" << std::endl; std::vector<float> h_buf(NUM, 1); float * d_buf; hipMalloc(&d_buf, NUM*sizeof(float)); hipMemcpy(d_buf, h_buf.data(), NUM*sizeof(float), hipMemcpyHostToDevice); hipLaunchKernelGGL(( gpu_kernel), dim3(1), dim3(NUM), 0, 0, d_buf, Double()); hipMemcpy(h_buf.data(), d_buf, NUM*sizeof(float), hipMemcpyDeviceToHost); std::cout << " 0:" << h_buf[0] << std::endl; std::cout << "511:" << h_buf[511] << std::endl; hipFree(d_buf); } void cpu_run(void) { std::cout << std::endl << "cpu" << std::endl; std::vector<float> h_buf(NUM, 1); cpu_kernel(h_buf.data(), Double()); std::cout << " 0:" << h_buf[0] << std::endl; std::cout << "511:" << h_buf[511] << std::endl; } int main() { cpu_run(); gpu_run(); }
92d26a22a3af7013b8201bee35af0ca3a6afbceb.cu
#include <iostream> #include <vector> #define NUM 512 class Double { public: __host__ __device__ float operator()(float val) { return 2*val; } }; template<typename F> __global__ void gpu_kernel(float * buf, F func) { int idx = threadIdx.x; buf[idx] = func(buf[idx]); } template<typename F> void cpu_kernel(float * buf, F func) { for (int idx = 0; idx<NUM; ++idx) { buf[idx] = func(buf[idx]); } } void gpu_run(void) { std::cout << std::endl << "gpu" << std::endl; std::vector<float> h_buf(NUM, 1); float * d_buf; cudaMalloc(&d_buf, NUM*sizeof(float)); cudaMemcpy(d_buf, h_buf.data(), NUM*sizeof(float), cudaMemcpyHostToDevice); gpu_kernel<<<1, NUM>>>(d_buf, Double()); cudaMemcpy(h_buf.data(), d_buf, NUM*sizeof(float), cudaMemcpyDeviceToHost); std::cout << " 0:" << h_buf[0] << std::endl; std::cout << "511:" << h_buf[511] << std::endl; cudaFree(d_buf); } void cpu_run(void) { std::cout << std::endl << "cpu" << std::endl; std::vector<float> h_buf(NUM, 1); cpu_kernel(h_buf.data(), Double()); std::cout << " 0:" << h_buf[0] << std::endl; std::cout << "511:" << h_buf[511] << std::endl; } int main() { cpu_run(); gpu_run(); }
89754b4a43aa8b0fde9dc94b7f5b547c7c3131b3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "global_defines.cuh" void LBM::streaming(){ /*Propagate fluid densities to their next neighbour nodes */ /*c c.......density propagation: all fluid densities are propagated from c non-occupied nodes along the lattice connection lines c to their next neighbours. c*/ if(data_location==GPU) copy_data_from_device_to_host(); int x,y,z; int x_e/*east*/,x_w/*west*/; int y_n/*north*/,y_s/*south*/; int z_l/*left*/,z_r/*right*/; //todo rwta ton Dirk pws to kanei auto to vhma for (z = 0 ; z< lz ; ++z){ z_l = (z+1)%lz; //1 8esh meta to trexon x z_r = (z+lz-1) %( lz) ; for (y = 0; y< ly; ++y){ y_n = (y+1)%ly; //1 8esh meta to trexon y y_s = (y+ly-1) %( ly) ; for (x = 0; x< lx; ++x){ x_e = (x+1)%lx; //1 8esh meta to trexon z x_w = (x+lx-1) %( lx) ; //regular streaming process /* .........density propagation .........zero: just copy */ D3_hlp.Q0[index(z,y,x)]=D3.Q0[index(z,y,x)]; //node_hlp_grid[index4D(z,y,x,0)]=node_grid[index4D(z,y,x,0)]; //.........in the x,y and z directions D3_hlp.Q1[index(z ,y ,x_e)] = D3.Q1[index(z,y,x)]; D3_hlp.Q2[index(z ,y_n,x )] = D3.Q2[index(z,y,x)]; D3_hlp.Q3[index(z ,y ,x_w)] = D3.Q3[index(z,y,x)]; D3_hlp.Q4[index(z ,y_s,x )] = D3.Q4[index(z,y,x)]; D3_hlp.Q5[index(z_l,y ,x )] = D3.Q5[index(z,y,x)]; D3_hlp.Q6[index(z_r,y ,x )] = D3.Q6[index(z,y,x)]; //......... in the x,y diagonals D3_hlp.Q7[ index(z ,y_n,x_e)] = D3.Q7[ index(z,y,x)]; D3_hlp.Q8[ index(z ,y_n,x_w)] = D3.Q8[ index(z,y,x)]; D3_hlp.Q9[ index(z ,y_s,x_w)] = D3.Q9[ index(z,y,x)]; D3_hlp.Q10[index(z ,y_s,x_e)] = D3.Q10[index(z,y,x)]; //......... in the x,z diagonals D3_hlp.Q11[index(z_r,y ,x_e)] = D3.Q11[index(z,y,x)]; D3_hlp.Q12[index(z_r,y ,x_w)] = D3.Q12[index(z,y,x)]; D3_hlp.Q13[index(z_l,y ,x_w)] = D3.Q13[index(z,y,x)]; D3_hlp.Q14[index(z_l,y ,x_e)] = D3.Q14[index(z,y,x)]; //......... in the y,z diagonals D3_hlp.Q15[index(z_l,y_n,x )] = D3.Q15[index(z,y,x)]; D3_hlp.Q16[index(z_r,y_n,x )] = D3.Q16[index(z,y,x)]; D3_hlp.Q17[index(z_r,y_s,x )] = D3.Q17[index(z,y,x)]; D3_hlp.Q18[index(z_l,y_s,x )] = D3.Q18[index(z,y,x)]; }//z-loop }//y loop }//x loop for (z = 0 ; z< lz ; ++z){ //loop for x=0 and x=lx-1!!!! (first and last slice) for (y = 0; y< ly; ++y){ //toslice 0 antigrafetai sto 0 D3_hlp.Q0[index(z,y,0)]=D3.Q0[index(z,y,0)]; D3_hlp.Q1[index(z,y,0)]=D3.Q1[index(z,y,0)]; D3_hlp.Q2[index(z,y,0)]=D3.Q2[index(z,y,0)]; D3_hlp.Q3[index(z,y,0)]=D3.Q3[index(z,y,0)]; D3_hlp.Q4[index(z,y,0)]=D3.Q4[index(z,y,0)]; D3_hlp.Q5[index(z,y,0)]=D3.Q5[index(z,y,0)]; D3_hlp.Q6[index(z,y,0)]=D3.Q6[index(z,y,0)]; D3_hlp.Q7[index(z,y,0)]=D3.Q7[index(z,y,0)]; D3_hlp.Q8[index(z,y,0)]=D3.Q8[index(z,y,0)]; D3_hlp.Q9[index(z,y,0)]=D3.Q9[index(z,y,0)]; D3_hlp.Q10[index(z,y,0)]=D3.Q10[index(z,y,0)]; D3_hlp.Q11[index(z,y,0)]=D3.Q11[index(z,y,0)]; D3_hlp.Q12[index(z,y,0)]=D3.Q12[index(z,y,0)]; D3_hlp.Q13[index(z,y,0)]=D3.Q13[index(z,y,0)]; D3_hlp.Q14[index(z,y,0)]=D3.Q14[index(z,y,0)]; D3_hlp.Q15[index(z,y,0)]=D3.Q15[index(z,y,0)]; D3_hlp.Q16[index(z,y,0)]=D3.Q16[index(z,y,0)]; D3_hlp.Q17[index(z,y,0)]=D3.Q17[index(z,y,0)]; D3_hlp.Q18[index(z,y,0)]=D3.Q18[index(z,y,0)]; //at x= lx I set the incomming density as the one (equilibrum) calculated after the collision D3_hlp.Q3[index(z,y,lx-1)] = D3.Q3[index(z,y,lx-1)]; D3_hlp.Q8[index(z,y,lx-1)] = D3.Q8[index(z,y,lx-1)] ; D3_hlp.Q9[index(z,y,lx-1)] = D3.Q9[index(z,y,lx-1)] ; D3_hlp.Q12[index(z,y,lx-1)] = D3.Q12[index(z,y,lx-1)] ; D3_hlp.Q13[index(z,y,lx-1)] = D3.Q13[index(z,y,lx-1)] ; // gia x=lx-1: the densities 0, 2,4,5,6,15,16,17,18 // aplws 8a metadw8oune // "ka8eta" sto slice kai de 8a ginoun propagate se // alla slices (tuxainei na voleuei auto) // auto to kommati exei HDH ginei pio panw! // // // during streaming some of the indices at // (fixed) x=lx-1, y=ly-1, do not get updated. // it doesn't matter since the last slice on x is not useful. } } /* for (z = 0 ; z< lz ; ++z){ z_l = (z+1)%lz; //1 8esh meta to trexon x z_r = (z+lz-1) %( lz) ; //loop for x=0 and x=lx-1!!!! (first and last slice) for (y = 0; y< ly; ++y){ y_n = (y+1)%ly; //1 8esh meta to trexon x y_s = (y+ly-1) %( ly) ; //toslice 0 antigrafetai sto 0 D3_hlp.Q0[index(z,y,0)]=D3.Q0[index(z,y,0)]; D3_hlp.Q1[index(z,y,0)]=D3.Q1[index(z,y,0)]; D3_hlp.Q2[index(z,y,0)]=D3.Q2[index(z,y,0)]; D3_hlp.Q3[index(z,y,0)]=D3.Q3[index(z,y,0)]; D3_hlp.Q4[index(z,y,0)]=D3.Q4[index(z,y,0)]; D3_hlp.Q5[index(z,y,0)]=D3.Q5[index(z,y,0)]; D3_hlp.Q6[index(z,y,0)]=D3.Q6[index(z,y,0)]; D3_hlp.Q7[index(z,y,0)]=D3.Q7[index(z,y,0)]; D3_hlp.Q8[index(z,y,0)]=D3.Q8[index(z,y,0)]; D3_hlp.Q9[index(z,y,0)]=D3.Q9[index(z,y,0)]; D3_hlp.Q10[index(z,y,0)]=D3.Q10[index(z,y,0)]; D3_hlp.Q11[index(z,y,0)]=D3.Q11[index(z,y,0)]; D3_hlp.Q12[index(z,y,0)]=D3.Q12[index(z,y,0)]; D3_hlp.Q13[index(z,y,0)]=D3.Q13[index(z,y,0)]; D3_hlp.Q14[index(z,y,0)]=D3.Q14[index(z,y,0)]; D3_hlp.Q15[index(z,y,0)]=D3.Q15[index(z,y,0)]; D3_hlp.Q16[index(z,y,0)]=D3.Q16[index(z,y,0)]; D3_hlp.Q17[index(z,y,0)]=D3.Q17[index(z,y,0)]; D3_hlp.Q18[index(z,y,0)]=D3.Q18[index(z,y,0)]; //at x= lx I set the incomming density as the one (equilibrum) calculated after the collision D3_hlp.Q3[index(z,y,lx-1)] = D3.Q3[index(z,y,lx-1)]; D3_hlp.Q8[index(z,y,lx-1)] = D3.Q8[index(z,y,lx-1)] ; D3_hlp.Q9[index(z,y,lx-1)] = D3.Q9[index(z,y,lx-1)] ; D3_hlp.Q12[index(z,y,lx-1)] = D3.Q12[index(z,y,lx-1)] ; D3_hlp.Q13[index(z,y,lx-1)] = D3.Q13[index(z,y,lx-1)] ; //tis aristeres densities apo to slice 0 tis metaferei sto telos D3_hlp.Q3[ index(z ,y ,lx-1)] = D3.Q3[ index(z,y,0)]; D3_hlp.Q8[ index(z ,y_n,lx-1)] = D3.Q8[ index(z,y,0)]; D3_hlp.Q9[ index(z ,y_s,lx-1)] = D3.Q9[ index(z,y,0)]; D3_hlp.Q12[index(z_r,y ,lx-1)] = D3.Q12[index(z,y,0)]; D3_hlp.Q13[index(z_l,y ,lx-1)] = D3.Q13[index(z,y,0)]; //tis deksies densities tou teleutaiou slice tis metaferei sto slice 0 D3_hlp.Q1[ index(z ,y ,0)] = D3.Q1[ index(z,y,lx-1)]; D3_hlp.Q7[ index(z ,y_n,0)] = D3.Q7[ index(z,y,lx-1)]; D3_hlp.Q10[index(z ,y_s,0)] = D3.Q10[index(z,y,lx-1)]; D3_hlp.Q11[index(z_r,y ,0)] = D3.Q11[index(z,y,lx-1)]; D3_hlp.Q14[index(z_l,y ,0)] = D3.Q14[index(z,y,lx-1)]; // gia x=lx-1: the densities 0, 2,4,5,6,15,16,17,18 // aplws 8a metadw8oune // "ka8eta" sto slice kai de 8a ginoun propagate se // alla slices (tuxainei na voleuei auto) // auto to kommati exei HDH ginei pio panw! // // // during streaming some of the indices at // (fixed) x=lx-1, y=ly-1, do not get updated. // it doesn't matter since the last slice on x is not useful. } }*/ #ifdef DEBUG cout << " #LBM streaming OK!" << endl; #endif } void LBM::streaming_first_part(){ /*Propagate fluid densities to their next neighbour nodes */ /*c c.......density propagation: all fluid densities are propagated from c non-occupied nodes along the lattice connection lines c to their next neighbours. c*/ int x,y,z; int x_e/*east*/,x_w/*west*/; int y_n/*north*/,y_s/*south*/; int z_l/*left*/,z_r/*right*/; //todo rwta ton Dirk pws to kanei auto to vhma for (z = 0 ; z< lz ; ++z){ z_l = (z+1)%lz; //1 8esh meta to trexon x z_r = (z+lz-1) %( lz) ; for (y = 0; y< ly; ++y){ y_n = (y+1)%ly; //1 8esh meta to trexon y y_s = (y+ly-1) %( ly) ; for (x = 0; x< lx; ++x){ x_e = (x+1)%lx; //1 8esh meta to trexon z x_w = (x+lx-1) %( lx) ; //regular streaming process /* .........density propagation .........zero: just copy */ D3_hlp.Q0[index(z,y,x)]=D3.Q0[index(z,y,x)]; // node_hlp_grid[index4D(z,y,x,0)]=node_grid[index4D(z,y,x,0)]; //.........in the x,y and z directions D3_hlp.Q1[index(z ,y ,x_e)] = D3.Q1[index(z,y,x)]; D3_hlp.Q2[index(z ,y_n,x )] = D3.Q2[index(z,y,x)]; D3_hlp.Q3[index(z ,y ,x_w)] = D3.Q3[index(z,y,x)]; D3_hlp.Q4[index(z ,y_s,x )] = D3.Q4[index(z,y,x)]; D3_hlp.Q5[index(z_l,y ,x )] = D3.Q5[index(z,y,x)]; D3_hlp.Q6[index(z_r,y ,x )] = D3.Q6[index(z,y,x)]; //......... in the x,y diagonals D3_hlp.Q7[ index(z ,y_n,x_e)] = D3.Q7[ index(z,y,x)]; D3_hlp.Q8[ index(z ,y_n,x_w)] = D3.Q8[ index(z,y,x)]; D3_hlp.Q9[ index(z ,y_s,x_w)] = D3.Q9[ index(z,y,x)]; D3_hlp.Q10[index(z ,y_s,x_e)] = D3.Q10[index(z,y,x)]; //......... in the x,z diagonals D3_hlp.Q11[index(z_r,y ,x_e)] = D3.Q11[index(z,y,x)]; D3_hlp.Q12[index(z_r,y ,x_w)] = D3.Q12[index(z,y,x)]; D3_hlp.Q13[index(z_l,y ,x_w)] = D3.Q13[index(z,y,x)]; D3_hlp.Q14[index(z_l,y ,x_e)] = D3.Q14[index(z,y,x)]; //......... in the y,z diagonals D3_hlp.Q15[index(z_l,y_n,x )] = D3.Q15[index(z,y,x)]; D3_hlp.Q16[index(z_r,y_n,x )] = D3.Q16[index(z,y,x)]; D3_hlp.Q17[index(z_r,y_s,x )] = D3.Q17[index(z,y,x)]; D3_hlp.Q18[index(z_l,y_s,x )] = D3.Q18[index(z,y,x)]; }//z-loop }//y loop }//x loop #ifdef DEBUG cout << " #LBM streaming OK!" << endl; #endif } void LBM::streaming_last_part(){ /*Propagate fluid densities to their next neighbour nodes */ /*c c.......density propagation: all fluid densities are propagated from c non-occupied nodes along the lattice connection lines c to their next neighbours. c*/ int y,z; for (z = 0 ; z< lz ; ++z){ //loop for x=0 and x=lx-1!!!! (first and last slice) for (y = 0; y< ly; ++y){ //toslice 0 antigrafetai sto 0 D3_hlp.Q0[index(z,y,0)]=D3.Q0[index(z,y,0)]; D3_hlp.Q1[index(z,y,0)]=D3.Q1[index(z,y,0)]; D3_hlp.Q2[index(z,y,0)]=D3.Q2[index(z,y,0)]; D3_hlp.Q3[index(z,y,0)]=D3.Q3[index(z,y,0)]; D3_hlp.Q4[index(z,y,0)]=D3.Q4[index(z,y,0)]; D3_hlp.Q5[index(z,y,0)]=D3.Q5[index(z,y,0)]; D3_hlp.Q6[index(z,y,0)]=D3.Q6[index(z,y,0)]; D3_hlp.Q7[index(z,y,0)]=D3.Q7[index(z,y,0)]; D3_hlp.Q8[index(z,y,0)]=D3.Q8[index(z,y,0)]; D3_hlp.Q9[index(z,y,0)]=D3.Q9[index(z,y,0)]; D3_hlp.Q10[index(z,y,0)]=D3.Q10[index(z,y,0)]; D3_hlp.Q11[index(z,y,0)]=D3.Q11[index(z,y,0)]; D3_hlp.Q12[index(z,y,0)]=D3.Q12[index(z,y,0)]; D3_hlp.Q13[index(z,y,0)]=D3.Q13[index(z,y,0)]; D3_hlp.Q14[index(z,y,0)]=D3.Q14[index(z,y,0)]; D3_hlp.Q15[index(z,y,0)]=D3.Q15[index(z,y,0)]; D3_hlp.Q16[index(z,y,0)]=D3.Q16[index(z,y,0)]; D3_hlp.Q17[index(z,y,0)]=D3.Q17[index(z,y,0)]; D3_hlp.Q18[index(z,y,0)]=D3.Q18[index(z,y,0)]; //at x= lx I set the incomming density as the one (equilibrum) calculated after the collision D3_hlp.Q3[index(z,y,lx-1)] = D3.Q3[index(z,y,lx-1)]; D3_hlp.Q8[index(z,y,lx-1)] = D3.Q8[index(z,y,lx-1)] ; D3_hlp.Q9[index(z,y,lx-1)] = D3.Q9[index(z,y,lx-1)] ; D3_hlp.Q12[index(z,y,lx-1)] = D3.Q12[index(z,y,lx-1)] ; D3_hlp.Q13[index(z,y,lx-1)] = D3.Q13[index(z,y,lx-1)] ; // gia x=lx-1: the densities 0, 2,4,5,6,15,16,17,18 // aplws 8a metadw8oune // "ka8eta" sto slice kai de 8a ginoun propagate se // alla slices (tuxainei na voleuei auto) // auto to kommati exei HDH ginei pio panw! // // // during streaming some of the indices at // (fixed) x=lx-1, y=ly-1, do not get updated. // it doesn't matter since the last slice on x is not useful. } } } __global__ void streaming_kernel(int lx, int ly, int lz, FLOATING reynolds, FLOATING nu, FLOATING r_small, FLOATING t_0, FLOATING t_1, FLOATING t_2, FLOATING c_squ, lattice D3, lattice D3_hlp){ /*Propagate fluid densities to their next neighbour nodes */ /*c c.......density propagation: all fluid densities are propagated from c non-occupied nodes along the lattice connection lines c to their next neighbours. c*/ int x,y,z; int x_e/*east*/,x_w/*west*/; int y_n/*north*/,y_s/*south*/; int z_l/*left*/,z_r/*right*/; int rest; int end_of_memory=lz*ly*(lx); int tid=blockIdx.x*blockDim.x+threadIdx.x; z=(int) (tid/(ly*lx)); rest=tid-z; y=(int)(rest/lx); x=rest-y; //if(tid<end_of_memory){ if( z<lz and y<ly and x<lx){ //todo rwta ton Dirk pws to kanei auto to vhma // for (z = 0 ; z< lz ; ++z){ z_l = (z+1)%lz; //1 8esh meta to trexon x z_r = (z+lz-1) %( lz) ; // for (y = 0; y< ly; ++y){ y_n = (y+1)%ly; //1 8esh meta to trexon y y_s = (y+ly-1) %( ly) ; // for (x = 0; x< lx; ++x){ x_e = (x+1)%lx; //1 8esh meta to trexon z x_w = (x+lx-1) %( lx) ; //regular streaming process /* .........density propagation .........zero: just copy */ D3_hlp.Q0[index(z,y,x)]=D3.Q0[index(z,y,x)]; //node_hlp_grid[index4D(z,y,x,0)]=node_grid[index4D(z,y,x,0)]; //.........in the x,y and z directions D3_hlp.Q1[index(z ,y ,x_e)] = D3.Q1[index(z,y,x)]; D3_hlp.Q2[index(z ,y_n,x )] = D3.Q2[index(z,y,x)]; D3_hlp.Q3[index(z ,y ,x_w)] = D3.Q3[index(z,y,x)]; D3_hlp.Q4[index(z ,y_s,x )] = D3.Q4[index(z,y,x)]; D3_hlp.Q5[index(z_l,y ,x )] = D3.Q5[index(z,y,x)]; D3_hlp.Q6[index(z_r,y ,x )] = D3.Q6[index(z,y,x)]; //......... in the x,y diagonals D3_hlp.Q7[ index(z ,y_n,x_e)] = D3.Q7[ index(z,y,x)]; D3_hlp.Q8[ index(z ,y_n,x_w)] = D3.Q8[ index(z,y,x)]; D3_hlp.Q9[ index(z ,y_s,x_w)] = D3.Q9[ index(z,y,x)]; D3_hlp.Q10[index(z ,y_s,x_e)] = D3.Q10[index(z,y,x)]; //......... in the x,z diagonals D3_hlp.Q11[index(z_r,y ,x_e)] = D3.Q11[index(z,y,x)]; D3_hlp.Q12[index(z_r,y ,x_w)] = D3.Q12[index(z,y,x)]; D3_hlp.Q13[index(z_l,y ,x_w)] = D3.Q13[index(z,y,x)]; D3_hlp.Q14[index(z_l,y ,x_e)] = D3.Q14[index(z,y,x)]; //......... in the y,z diagonals D3_hlp.Q15[index(z_l,y_n,x )] = D3.Q15[index(z,y,x)]; D3_hlp.Q16[index(z_r,y_n,x )] = D3.Q16[index(z,y,x)]; D3_hlp.Q17[index(z_r,y_s,x )] = D3.Q17[index(z,y,x)]; D3_hlp.Q18[index(z_l,y_s,x )] = D3.Q18[index(z,y,x)]; // }//z-loop // }//y loop // }//x loop } if(tid<end_of_memory and x==0 ){ // for (z = 0 ; z< lz ; ++z){ // // // //loop for x=0 and x=lx-1!!!! (first and last slice) // for (y = 0; y< ly; ++y){ //toslice 0 antigrafetai sto 0 D3_hlp.Q0[index(z,y,0)]=D3.Q0[index(z,y,0)]; D3_hlp.Q1[index(z,y,0)]=D3.Q1[index(z,y,0)]; D3_hlp.Q2[index(z,y,0)]=D3.Q2[index(z,y,0)]; D3_hlp.Q3[index(z,y,0)]=D3.Q3[index(z,y,0)]; D3_hlp.Q4[index(z,y,0)]=D3.Q4[index(z,y,0)]; D3_hlp.Q5[index(z,y,0)]=D3.Q5[index(z,y,0)]; D3_hlp.Q6[index(z,y,0)]=D3.Q6[index(z,y,0)]; D3_hlp.Q7[index(z,y,0)]=D3.Q7[index(z,y,0)]; D3_hlp.Q8[index(z,y,0)]=D3.Q8[index(z,y,0)]; D3_hlp.Q9[index(z,y,0)]=D3.Q9[index(z,y,0)]; D3_hlp.Q10[index(z,y,0)]=D3.Q10[index(z,y,0)]; D3_hlp.Q11[index(z,y,0)]=D3.Q11[index(z,y,0)]; D3_hlp.Q12[index(z,y,0)]=D3.Q12[index(z,y,0)]; D3_hlp.Q13[index(z,y,0)]=D3.Q13[index(z,y,0)]; D3_hlp.Q14[index(z,y,0)]=D3.Q14[index(z,y,0)]; D3_hlp.Q15[index(z,y,0)]=D3.Q15[index(z,y,0)]; D3_hlp.Q16[index(z,y,0)]=D3.Q16[index(z,y,0)]; D3_hlp.Q17[index(z,y,0)]=D3.Q17[index(z,y,0)]; D3_hlp.Q18[index(z,y,0)]=D3.Q18[index(z,y,0)]; // gia x=lx-1: the densities 0, 2,4,5,6,15,16,17,18 // aplws 8a metadw8oune // "ka8eta" sto slice kai de 8a ginoun propagate se // alla slices (tuxainei na voleuei auto) // auto to kommati exei HDH ginei pio panw! // // // during streaming some of the indices at // (fixed) x=lx-1, y=ly-1, do not get updated. // it doesn't matter since the last slice on x is not useful. // } // } } if(tid<end_of_memory and x==lx-1 ){ //at x= lx I set the incomming density as the one (equilibrum) calculated after the collision D3_hlp.Q3[index(z,y,lx-1)] = D3.Q3[index(z,y,lx-1)]; D3_hlp.Q8[index(z,y,lx-1)] = D3.Q8[index(z,y,lx-1)] ; D3_hlp.Q9[index(z,y,lx-1)] = D3.Q9[index(z,y,lx-1)] ; D3_hlp.Q12[index(z,y,lx-1)] = D3.Q12[index(z,y,lx-1)] ; D3_hlp.Q13[index(z,y,lx-1)] = D3.Q13[index(z,y,lx-1)] ; } #ifdef DEBUG cout << " #LBM streaming OK!" << endl; #endif } __global__ void streaming_kernel_single_threaded_p1(int lx, int ly, int lz, FLOATING *hlp_Q0, FLOATING *hlp_Q1, FLOATING *hlp_Q2, FLOATING *hlp_Q3, FLOATING *hlp_Q4, FLOATING *hlp_Q5, FLOATING *hlp_Q6, FLOATING *hlp_Q7, FLOATING *hlp_Q8, FLOATING *hlp_Q9, FLOATING *hlp_Q10, FLOATING *hlp_Q11, FLOATING *hlp_Q12, FLOATING *hlp_Q13, FLOATING *hlp_Q14, FLOATING *hlp_Q15, FLOATING *hlp_Q16, FLOATING *hlp_Q17, FLOATING *hlp_Q18, FLOATING *Q0, FLOATING *Q1, FLOATING *Q2, FLOATING *Q3, FLOATING *Q4, FLOATING *Q5, FLOATING *Q6, FLOATING *Q7, FLOATING *Q8, FLOATING *Q9, FLOATING *Q10, FLOATING *Q11, FLOATING *Q12, FLOATING *Q13, FLOATING *Q14, FLOATING *Q15, FLOATING *Q16, FLOATING *Q17, FLOATING *Q18){ int tid=blockIdx.x*blockDim.x+threadIdx.x; int end_of_memory=lz*ly*(lx); int z=(int) (tid/(ly*lx)); int y=(tid-z*(ly*lx))/lx; int x=tid-z*(ly*lx)-y*lx; // int x,y,z; int x_e/*east*/,x_w/*west*/; int y_n/*north*/,y_s/*south*/; int z_l/*left*/,z_r/*right*/; // __shared__ FLOATING shared_buffer[64]; if(tid<end_of_memory){ z_l = (z+1)%lz; //1 8esh meta to trexon x z_r = (z+lz-1) %( lz) ; // for (y = 0; y< ly; ++y){ y_n = (y+1)%ly; //1 8esh meta to trexon y y_s = (y+ly-1) %( ly) ; // for (x = 0; x< lx; ++x){ x_e = (x+1)%lx; //1 8esh meta to trexon z x_w = (x+lx-1) %( lx) ; //regular streaming process // .........density propagation // .........zero: just copy hlp_Q0[index(z,y,x)]=Q0[index(z,y,x)]; // node_hlp_grid[index4D(z,y,x,0)]=node_grid[index4D(z,y,x,0)]; //.........in the x,y and z directions hlp_Q1[index(z ,y ,x_e)] = Q1[index(z,y,x)]; hlp_Q2[index(z ,y_n,x )] = Q2[index(z,y,x)]; hlp_Q3[index(z ,y ,x_w)] = Q3[index(z,y,x)]; hlp_Q4[index(z ,y_s,x )] = Q4[index(z,y,x)]; hlp_Q5[index(z_l,y ,x )] = Q5[index(z,y,x)]; hlp_Q6[index(z_r,y ,x )] = Q6[index(z,y,x)]; //......... in the x,y diagonals hlp_Q7[ index(z ,y_n,x_e)] = Q7[ index(z,y,x)]; hlp_Q8[ index(z ,y_n,x_w)] = Q8[ index(z,y,x)]; hlp_Q9[ index(z ,y_s,x_w)] = Q9[ index(z,y,x)]; hlp_Q10[index(z ,y_s,x_e)] = Q10[index(z,y,x)]; //......... in the x,z diagonals hlp_Q11[index(z_r,y ,x_e)] = Q11[index(z,y,x)]; hlp_Q12[index(z_r,y ,x_w)] = Q12[index(z,y,x)]; hlp_Q13[index(z_l,y ,x_w)] = Q13[index(z,y,x)]; hlp_Q14[index(z_l,y ,x_e)] = Q14[index(z,y,x)]; //......... in the y,z diagonals hlp_Q15[index(z_l,y_n,x )] = Q15[index(z,y,x)]; hlp_Q16[index(z_r,y_n,x )] = Q16[index(z,y,x)]; hlp_Q17[index(z_r,y_s,x )] = Q17[index(z,y,x)]; hlp_Q18[index(z_l,y_s,x )] = Q18[index(z,y,x)]; } } __global__ void streaming_kernel_single_threaded_p1_shared(int lx, int ly, int lz, FLOATING *hlp_Q0, FLOATING *hlp_Q1, FLOATING *hlp_Q2, FLOATING *hlp_Q3, FLOATING *hlp_Q4, FLOATING *hlp_Q5, FLOATING *hlp_Q6, FLOATING *hlp_Q7, FLOATING *hlp_Q8, FLOATING *hlp_Q9, FLOATING *hlp_Q10, FLOATING *hlp_Q11, FLOATING *hlp_Q12, FLOATING *hlp_Q13, FLOATING *hlp_Q14, FLOATING *hlp_Q15, FLOATING *hlp_Q16, FLOATING *hlp_Q17, FLOATING *hlp_Q18, FLOATING *Q0, FLOATING *Q1, FLOATING *Q2, FLOATING *Q3, FLOATING *Q4, FLOATING *Q5, FLOATING *Q6, FLOATING *Q7, FLOATING *Q8, FLOATING *Q9, FLOATING *Q10, FLOATING *Q11, FLOATING *Q12, FLOATING *Q13, FLOATING *Q14, FLOATING *Q15, FLOATING *Q16, FLOATING *Q17, FLOATING *Q18){ int tid=blockIdx.x*blockDim.x+threadIdx.x; int end_of_memory=lz*ly*(lx); int z=(int) (tid/(ly*lx)); int y=(tid-z*(ly*lx))/lx; int x=tid-z*(ly*lx)-y*lx; // int x,y,z; int x_e/*east*/,x_w/*west*/; int y_n/*north*/,y_s/*south*/; int z_l/*left*/,z_r/*right*/; extern __shared__ FLOATING shared_buffer[]; FLOATING *shared_Q0=shared_buffer; FLOATING *shared_Q1 = &shared_Q0[blockDim.x]; FLOATING *shared_Q2 = &shared_Q1[blockDim.x]; FLOATING *shared_Q3 = &shared_Q2[blockDim.x]; FLOATING *shared_Q4 = &shared_Q3[blockDim.x]; FLOATING *shared_Q5 = &shared_Q4[blockDim.x]; FLOATING *shared_Q6 = &shared_Q5[blockDim.x]; FLOATING *shared_Q7 = &shared_Q6[blockDim.x]; FLOATING *shared_Q8 = &shared_Q7[blockDim.x]; FLOATING *shared_Q9 = &shared_Q8[blockDim.x]; FLOATING *shared_Q10 = &shared_Q9[blockDim.x]; FLOATING *shared_Q11 = &shared_Q10[blockDim.x]; FLOATING *shared_Q12 = &shared_Q11[blockDim.x]; FLOATING *shared_Q13 = &shared_Q12[blockDim.x]; FLOATING *shared_Q14 = &shared_Q13[blockDim.x]; FLOATING *shared_Q15 = &shared_Q14[blockDim.x]; FLOATING *shared_Q16 = &shared_Q15[blockDim.x]; FLOATING *shared_Q17 = &shared_Q16[blockDim.x]; FLOATING *shared_Q18 = &shared_Q17[blockDim.x]; shared_Q0[threadIdx.x]=Q0[index(z,y,x)]; shared_Q1[threadIdx.x]=Q1[index(z,y,x)]; shared_Q2[threadIdx.x]=Q2[index(z,y,x)]; shared_Q3[threadIdx.x]=Q3[index(z,y,x)]; shared_Q4[threadIdx.x]=Q4[index(z,y,x)]; shared_Q5[threadIdx.x]=Q5[index(z,y,x)]; shared_Q6[threadIdx.x]=Q6[index(z,y,x)]; shared_Q7[threadIdx.x]=Q7[index(z,y,x)]; shared_Q8[threadIdx.x]=Q8[index(z,y,x)]; shared_Q9[threadIdx.x]=Q9[index(z,y,x)]; shared_Q10[threadIdx.x]=Q10[index(z,y,x)]; shared_Q11[threadIdx.x]=Q11[index(z,y,x)]; shared_Q12[threadIdx.x]=Q12[index(z,y,x)]; shared_Q13[threadIdx.x]=Q13[index(z,y,x)]; shared_Q14[threadIdx.x]=Q14[index(z,y,x)]; shared_Q15[threadIdx.x]=Q15[index(z,y,x)]; shared_Q16[threadIdx.x]=Q16[index(z,y,x)]; shared_Q17[threadIdx.x]=Q17[index(z,y,x)]; shared_Q18[threadIdx.x]=Q18[index(z,y,x)]; __syncthreads(); if(tid<end_of_memory){ z_l = (z+1)%lz; //1 8esh meta to trexon x z_r = (z+lz-1) %( lz) ; // for (y = 0; y< ly; ++y){ y_n = (y+1)%ly; //1 8esh meta to trexon y y_s = (y+ly-1) %( ly) ; // for (x = 0; x< lx; ++x){ x_e = (x+1)%lx; //1 8esh meta to trexon z x_w = (x+lx-1) %( lx) ; //regular streaming process // .........density propagation // .........zero: just copy hlp_Q0[index(z,y,x)]=shared_Q0[threadIdx.x]; // node_hlp_grid[index4D(z,y,x,0)]=node_grid[index4D(z,y,x,0)]; //.........in the x,y and z directions hlp_Q1[index(z ,y ,x_e)] =shared_Q1[threadIdx.x]; hlp_Q2[index(z ,y_n,x )] =shared_Q2[threadIdx.x]; hlp_Q3[index(z ,y ,x_w)] =shared_Q3[threadIdx.x]; hlp_Q4[index(z ,y_s,x )] =shared_Q4[threadIdx.x]; hlp_Q5[index(z_l,y ,x )] =shared_Q5[threadIdx.x]; hlp_Q6[index(z_r,y ,x )] =shared_Q6[threadIdx.x]; //......... in the x,y diagonals hlp_Q7[ index(z ,y_n,x_e)] =shared_Q7[ threadIdx.x]; hlp_Q8[ index(z ,y_n,x_w)] =shared_Q8[ threadIdx.x]; hlp_Q9[ index(z ,y_s,x_w)] =shared_Q9[ threadIdx.x]; hlp_Q10[index(z ,y_s,x_e)] =shared_Q10[threadIdx.x]; //......... in the x,z diagonals hlp_Q11[index(z_r,y ,x_e)] =shared_Q11[threadIdx.x]; hlp_Q12[index(z_r,y ,x_w)] =shared_Q12[threadIdx.x]; hlp_Q13[index(z_l,y ,x_w)] =shared_Q13[threadIdx.x]; hlp_Q14[index(z_l,y ,x_e)] =shared_Q14[threadIdx.x]; //......... in the y,z diagonals hlp_Q15[index(z_l,y_n,x )] =shared_Q15[threadIdx.x]; hlp_Q16[index(z_r,y_n,x )] =shared_Q16[threadIdx.x]; hlp_Q17[index(z_r,y_s,x )] =shared_Q17[threadIdx.x]; hlp_Q18[index(z_l,y_s,x )] =shared_Q18[threadIdx.x]; } } __global__ void streaming_kernel_first_part_Q0(int lx, int ly, int lz, const FLOATING *Q0, FLOATING *hlp_Q0){ /*Propagate fluid densities to their next neighbour nodes */ /*c....density propagation: all fluid densities are propagated from c non-occupied nodes along the lattice connection lines c to their next neighbours.*/ int tid=blockIdx.x*blockDim.x+threadIdx.x; int end_of_memory=lz*ly*(lx); int z=(int) (tid/(ly*lx)); int y=(tid-z*(ly*lx))/lx; int x=tid-z*(ly*lx)-y*lx; extern __shared__ FLOATING shared_buffer[]; shared_buffer[threadIdx.x]=Q0[index(z,y,x)]; __syncthreads(); if( tid<end_of_memory) hlp_Q0[index(z,y,x)] = shared_buffer[threadIdx.x]; } __global__ void streaming_kernel_first_part_Q1(int lx, int ly, int lz, const FLOATING *Q1, FLOATING *hlp_Q1){ /*Propagate fluid densities to their next neighbour nodes */ /*c....density propagation: all fluid densities are propagated from c non-occupied nodes along the lattice connection lines c to their next neighbours.*/ int tid=blockIdx.x*blockDim.x+threadIdx.x; int end_of_memory=lz*ly*(lx); int z=(int) (tid/(ly*lx)); int y=(tid-z*(ly*lx))/lx; int x=tid-z*(ly*lx)-y*lx; int x_e/*east*/ = (x+1)%lx; //1 8esh meta to trexon x extern __shared__ FLOATING shared_buffer[]; shared_buffer[threadIdx.x]=Q1[index(z,y,x)]; __syncthreads(); if( tid<end_of_memory) hlp_Q1[index(z ,y ,x_e)] = shared_buffer[threadIdx.x]; } __global__ void streaming_kernel_first_part_Q2(int lx, int ly, int lz, const FLOATING *Q2, FLOATING *hlp_Q2){ /*Propagate fluid densities to their next neighbour nodes */ /*c....density propagation: all fluid densities are propagated from c non-occupied nodes along the lattice connection lines c to their next neighbours.*/ int tid=blockIdx.x*blockDim.x+threadIdx.x; int end_of_memory=lz*ly*(lx); int z=(int) (tid/(ly*lx)); int y=(tid-z*(ly*lx))/lx; int x=tid-z*(ly*lx)-y*lx; int y_n/*north*/ = (y+1)%ly; //1 8esh meta to trexon y extern __shared__ FLOATING shared_buffer[]; shared_buffer[threadIdx.x]=Q2[index(z,y,x)]; __syncthreads(); if( tid<end_of_memory) hlp_Q2[index(z ,y_n,x )] = shared_buffer[threadIdx.x]; } __global__ void streaming_kernel_first_part_Q3(int lx, int ly, int lz, const FLOATING *Q3, FLOATING *hlp_Q3){ /*Propagate fluid densities to their next neighbour nodes */ /*c....density propagation: all fluid densities are propagated from c non-occupied nodes along the lattice connection lines c to their next neighbours.*/ int tid=blockIdx.x*blockDim.x+threadIdx.x; int end_of_memory=lz*ly*(lx); int z=(int) (tid/(ly*lx)); int y=(tid-z*(ly*lx))/lx; int x=tid-z*(ly*lx)-y*lx; int x_w/*west*/ = (x+lx-1) %( lx) ; extern __shared__ FLOATING shared_buffer[]; shared_buffer[threadIdx.x]=Q3[index(z,y,x)]; __syncthreads(); if( tid<end_of_memory) hlp_Q3[index(z ,y ,x_w)] = shared_buffer[threadIdx.x]; } __global__ void streaming_kernel_first_part_Q4(int lx, int ly, int lz, const FLOATING *Q4, FLOATING *hlp_Q4){ /*Propagate fluid densities to their next neighbour nodes */ /*c....density propagation: all fluid densities are propagated from c non-occupied nodes along the lattice connection lines c to their next neighbours.*/ int tid=blockIdx.x*blockDim.x+threadIdx.x; int end_of_memory=lz*ly*(lx); int z=(int) (tid/(ly*lx)); int y=(tid-z*(ly*lx))/lx; int x=tid-z*(ly*lx)-y*lx; int y_s/*south*/ = (y+ly-1) %( ly) ; extern __shared__ FLOATING shared_buffer[]; shared_buffer[threadIdx.x]=Q4[index(z,y,x)]; __syncthreads(); if( tid<end_of_memory) hlp_Q4[index(z ,y_s,x )] = shared_buffer[threadIdx.x]; } __global__ void streaming_kernel_first_part_Q5(int lx, int ly, int lz, const FLOATING *Q5, FLOATING *hlp_Q5){ /*Propagate fluid densities to their next neighbour nodes */ /*c....density propagation: all fluid densities are propagated from c non-occupied nodes along the lattice connection lines c to their next neighbours.*/ int tid=blockIdx.x*blockDim.x+threadIdx.x; int end_of_memory=lz*ly*(lx); int z=(int) (tid/(ly*lx)); int y=(tid-z*(ly*lx))/lx; int x=tid-z*(ly*lx)-y*lx; int z_l/*left*/ = (z+1)%lz; extern __shared__ FLOATING shared_buffer[]; shared_buffer[threadIdx.x]=Q5[index(z,y,x)]; __syncthreads(); if( tid<end_of_memory) hlp_Q5[index(z_l,y ,x )] = shared_buffer[threadIdx.x]; } __global__ void streaming_kernel_first_part_Q6(int lx, int ly, int lz, const FLOATING *Q6, FLOATING *hlp_Q6){ /*Propagate fluid densities to their next neighbour nodes */ /*c....density propagation: all fluid densities are propagated from c non-occupied nodes along the lattice connection lines c to their next neighbours.*/ int tid=blockIdx.x*blockDim.x+threadIdx.x; int end_of_memory=lz*ly*(lx); int z=(int) (tid/(ly*lx)); int y=(tid-z*(ly*lx))/lx; int x=tid-z*(ly*lx)-y*lx; int z_r/*right*/ = (z+lz-1) %( lz) ; extern __shared__ FLOATING shared_buffer[]; shared_buffer[threadIdx.x]=Q6[index(z,y,x)]; __syncthreads(); if( tid<end_of_memory) hlp_Q6[index(z_r,y ,x )] = shared_buffer[threadIdx.x]; } __global__ void streaming_kernel_first_part_Q7(int lx, int ly, int lz, const FLOATING *Q7, FLOATING *hlp_Q7){ /*Propagate fluid densities to their next neighbour nodes */ /*c....density propagation: all fluid densities are propagated from c non-occupied nodes along the lattice connection lines c to their next neighbours.*/ int tid=blockIdx.x*blockDim.x+threadIdx.x; int end_of_memory=lz*ly*(lx); int z=(int) (tid/(ly*lx)); int y=(tid-z*(ly*lx))/lx; int x=tid-z*(ly*lx)-y*lx; int y_n/*north*/ = (y+1)%ly; int x_e/*east*/ = (x+1)%lx; extern __shared__ FLOATING shared_buffer[]; shared_buffer[threadIdx.x]=Q7[index(z,y,x)]; __syncthreads(); if( tid<end_of_memory) hlp_Q7[ index(z ,y_n,x_e)] = shared_buffer[threadIdx.x]; } __global__ void streaming_kernel_first_part_Q8(int lx, int ly, int lz, const FLOATING *Q8, FLOATING *hlp_Q8){ /*Propagate fluid densities to their next neighbour nodes */ /*c....density propagation: all fluid densities are propagated from c non-occupied nodes along the lattice connection lines c to their next neighbours.*/ int tid=blockIdx.x*blockDim.x+threadIdx.x; int end_of_memory=lz*ly*(lx); int z=(int) (tid/(ly*lx)); int y=(tid-z*(ly*lx))/lx; int x=tid-z*(ly*lx)-y*lx; int y_n/*north*/ = (y+1)%ly; int x_w/*west*/ = (x+lx-1) %( lx) ; extern __shared__ FLOATING shared_buffer[]; shared_buffer[threadIdx.x]=Q8[index(z,y,x)]; __syncthreads(); if( tid<end_of_memory) hlp_Q8[ index(z ,y_n,x_w)] = shared_buffer[threadIdx.x]; } __global__ void streaming_kernel_first_part_Q9(int lx, int ly, int lz, const FLOATING *Q9, FLOATING *hlp_Q9){ /*Propagate fluid densities to their next neighbour nodes */ /*c....density propagation: all fluid densities are propagated from c non-occupied nodes along the lattice connection lines c to their next neighbours.*/ int tid=blockIdx.x*blockDim.x+threadIdx.x; int end_of_memory=lz*ly*(lx); int z=(int) (tid/(ly*lx)); int y=(tid-z*(ly*lx))/lx; int x=tid-z*(ly*lx)-y*lx; int y_s/*south*/ = (y+ly-1) %( ly) ; int x_w/*west*/ = (x+lx-1) %( lx) ; extern __shared__ FLOATING shared_buffer[]; shared_buffer[threadIdx.x]=Q9[index(z,y,x)]; __syncthreads(); if( tid<end_of_memory) hlp_Q9[ index(z ,y_s,x_w)] = shared_buffer[threadIdx.x]; } __global__ void streaming_kernel_first_part_Q10(int lx, int ly, int lz, const FLOATING *Q10, FLOATING *hlp_Q10){ /*Propagate fluid densities to their next neighbour nodes */ /*c....density propagation: all fluid densities are propagated from c non-occupied nodes along the lattice connection lines c to their next neighbours.*/ int tid=blockIdx.x*blockDim.x+threadIdx.x; int end_of_memory=lz*ly*(lx); int z=(int) (tid/(ly*lx)); int y=(tid-z*(ly*lx))/lx; int x=tid-z*(ly*lx)-y*lx; int y_s/*south*/ = (y+ly-1) %( ly) ; int x_e/*east*/ = (x+1)%lx; extern __shared__ FLOATING shared_buffer[]; shared_buffer[threadIdx.x]=Q10[index(z,y,x)]; __syncthreads(); if( tid<end_of_memory) hlp_Q10[index(z ,y_s,x_e)] = shared_buffer[threadIdx.x]; } __global__ void streaming_kernel_first_part_Q11(int lx, int ly, int lz, const FLOATING *Q11, FLOATING *hlp_Q11){ /*Propagate fluid densities to their next neighbour nodes */ /*c....density propagation: all fluid densities are propagated from c non-occupied nodes along the lattice connection lines c to their next neighbours.*/ int tid=blockIdx.x*blockDim.x+threadIdx.x; int end_of_memory=lz*ly*(lx); int z=(int) (tid/(ly*lx)); int y=(tid-z*(ly*lx))/lx; int x=tid-z*(ly*lx)-y*lx; int z_r/*right*/ = (z+lz-1) %( lz) ; int x_e/*east*/ = (x+1)%lx; extern __shared__ FLOATING shared_buffer[]; shared_buffer[threadIdx.x]=Q11[index(z,y,x)]; __syncthreads(); if( tid<end_of_memory) hlp_Q11[index(z_r,y ,x_e)] = shared_buffer[threadIdx.x]; } __global__ void streaming_kernel_first_part_Q12(int lx, int ly, int lz, const FLOATING *Q12, FLOATING *hlp_Q12){ /*Propagate fluid densities to their next neighbour nodes */ /*c....density propagation: all fluid densities are propagated from c non-occupied nodes along the lattice connection lines c to their next neighbours.*/ int tid=blockIdx.x*blockDim.x+threadIdx.x; int end_of_memory=lz*ly*(lx); int z=(int) (tid/(ly*lx)); int y=(tid-z*(ly*lx))/lx; int x=tid-z*(ly*lx)-y*lx; int z_r/*right*/ = (z+lz-1) %( lz) ; int x_w/*west*/ = (x+lx-1) %( lx) ; extern __shared__ FLOATING shared_buffer[]; shared_buffer[threadIdx.x]=Q12[index(z,y,x)]; __syncthreads(); if( tid<end_of_memory) hlp_Q12[index(z_r,y ,x_w)] = shared_buffer[threadIdx.x]; } __global__ void streaming_kernel_first_part_Q13(int lx, int ly, int lz, const FLOATING *Q13, FLOATING *hlp_Q13){ /*Propagate fluid densities to their next neighbour nodes */ /*c....density propagation: all fluid densities are propagated from c non-occupied nodes along the lattice connection lines c to their next neighbours.*/ int tid=blockIdx.x*blockDim.x+threadIdx.x; int end_of_memory=lz*ly*(lx); int z=(int) (tid/(ly*lx)); int y=(tid-z*(ly*lx))/lx; int x=tid-z*(ly*lx)-y*lx; int z_l/*left*/ = (z+1)%lz; int x_w/*west*/ = (x+lx-1) %( lx) ; extern __shared__ FLOATING shared_buffer[]; shared_buffer[threadIdx.x]=Q13[index(z,y,x)]; __syncthreads(); if( tid<end_of_memory) hlp_Q13[index(z_l,y ,x_w)] = shared_buffer[threadIdx.x]; } __global__ void streaming_kernel_first_part_Q14(int lx, int ly, int lz, const FLOATING *Q14, FLOATING *hlp_Q14){ /*Propagate fluid densities to their next neighbour nodes */ /*c....density propagation: all fluid densities are propagated from c non-occupied nodes along the lattice connection lines c to their next neighbours.*/ int tid=blockIdx.x*blockDim.x+threadIdx.x; int end_of_memory=lz*ly*(lx); int z=(int) (tid/(ly*lx)); int y=(tid-z*(ly*lx))/lx; int x=tid-z*(ly*lx)-y*lx; int z_l/*left*/ = (z+1)%lz; int x_e/*east*/ = (x+1)%lx; extern __shared__ FLOATING shared_buffer[]; shared_buffer[threadIdx.x]=Q14[index(z,y,x)]; __syncthreads(); if( tid<end_of_memory) hlp_Q14[index(z_l,y ,x_e)] = shared_buffer[threadIdx.x]; } __global__ void streaming_kernel_first_part_Q15(int lx, int ly, int lz, const FLOATING *Q15, FLOATING *hlp_Q15){ /*Propagate fluid densities to their next neighbour nodes */ /*c....density propagation: all fluid densities are propagated from c non-occupied nodes along the lattice connection lines c to their next neighbours.*/ int tid=blockIdx.x*blockDim.x+threadIdx.x; int end_of_memory=lz*ly*(lx); int z=(int) (tid/(ly*lx)); int y=(tid-z*(ly*lx))/lx; int x=tid-z*(ly*lx)-y*lx; int z_l/*left*/ = (z+1)%lz; int y_n/*north*/ = (y+1)%ly; extern __shared__ FLOATING shared_buffer[]; shared_buffer[threadIdx.x]=Q15[index(z,y,x)]; __syncthreads(); if( tid<end_of_memory) hlp_Q15[index(z_l,y_n,x )] = shared_buffer[threadIdx.x]; } __global__ void streaming_kernel_first_part_Q16(int lx, int ly, int lz, const FLOATING *Q16, FLOATING *hlp_Q16){ /*Propagate fluid densities to their next neighbour nodes */ /*c....density propagation: all fluid densities are propagated from c non-occupied nodes along the lattice connection lines c to their next neighbours.*/ int tid=blockIdx.x*blockDim.x+threadIdx.x; int end_of_memory=lz*ly*(lx); int z=(int) (tid/(ly*lx)); int y=(tid-z*(ly*lx))/lx; int x=tid-z*(ly*lx)-y*lx; int z_r /*right*/ = (z+lz-1) %( lz) ; int y_n /*north*/= (y+1)%ly; extern __shared__ FLOATING shared_buffer[]; shared_buffer[threadIdx.x]=Q16[index(z,y,x)]; __syncthreads(); if( tid<end_of_memory) hlp_Q16[index(z_r,y_n,x )] = shared_buffer[threadIdx.x]; } __global__ void streaming_kernel_first_part_Q17(int lx, int ly, int lz, const FLOATING *Q17, FLOATING *hlp_Q17){ /*Propagate fluid densities to their next neighbour nodes */ /*c....density propagation: all fluid densities are propagated from c non-occupied nodes along the lattice connection lines c to their next neighbours.*/ int tid=blockIdx.x*blockDim.x+threadIdx.x; int end_of_memory=lz*ly*(lx); int z=(int) (tid/(ly*lx)); int y=(tid-z*(ly*lx))/lx; int x=tid-z*(ly*lx)-y*lx; int z_r/*right*/ = (z+lz-1) %( lz) ; int y_s/*south*/ = (y+ly-1) %( ly) ; extern __shared__ FLOATING shared_buffer[]; shared_buffer[threadIdx.x]=Q17[index(z,y,x)]; __syncthreads(); if( tid<end_of_memory) hlp_Q17[index(z_r,y_s,x )] = shared_buffer[threadIdx.x]; } __global__ void streaming_kernel_first_part_Q18(int lx, int ly, int lz, const FLOATING *Q18, FLOATING *hlp_Q18){ /*Propagate fluid densities to their next neighbour nodes */ /*c....density propagation: all fluid densities are propagated from c non-occupied nodes along the lattice connection lines c to their next neighbours.*/ int tid=blockIdx.x*blockDim.x+threadIdx.x; int end_of_memory=lz*ly*(lx); int z=(int) (tid/(ly*lx)); int y=(tid-z*(ly*lx))/lx; int x=tid-z*(ly*lx)-y*lx; int z_l/*left*/ = (z+1)%lz; int y_s/*south*/ = (y+ly-1) %( ly) ; extern __shared__ FLOATING shared_buffer[]; shared_buffer[threadIdx.x]=Q18[index(z,y,x)]; __syncthreads(); if( tid<end_of_memory) hlp_Q18[index(z_l,y_s,x )] = shared_buffer[threadIdx.x]; } __global__ void streaming_kernel_single_threaded_p2_v2(int lx, int ly, int lz, FLOATING *hlp_Q0, FLOATING *hlp_Q1, FLOATING *hlp_Q2, FLOATING *hlp_Q3, FLOATING *hlp_Q4, FLOATING *hlp_Q5, FLOATING *hlp_Q6, FLOATING *hlp_Q7, FLOATING *hlp_Q8, FLOATING *hlp_Q9, FLOATING *hlp_Q10, FLOATING *hlp_Q11, FLOATING *hlp_Q12, FLOATING *hlp_Q13, FLOATING *hlp_Q14, FLOATING *hlp_Q15, FLOATING *hlp_Q16, FLOATING *hlp_Q17, FLOATING *hlp_Q18, FLOATING *Q0, FLOATING *Q1, FLOATING *Q2, FLOATING *Q3, FLOATING *Q4, FLOATING *Q5, FLOATING *Q6, FLOATING *Q7, FLOATING *Q8, FLOATING *Q9, FLOATING *Q10, FLOATING *Q11, FLOATING *Q12, FLOATING *Q13, FLOATING *Q14, FLOATING *Q15, FLOATING *Q16, FLOATING *Q17, FLOATING *Q18){ int tid=blockIdx.x*blockDim.x+threadIdx.x; int z=tid/ly; int y=tid-z*ly; if(tid<ly*lz){ // for (z = 0 ; z< lz ; ++z){ //loop for x=0 and x=lx-1!!!! (first and last slice) // for (y = 0; y< ly; ++y){ //toslice 0 antigrafetai sto 0 hlp_Q0[index(z,y,0)]=Q0[index(z,y,0)]; hlp_Q1[index(z,y,0)]=Q1[index(z,y,0)]; hlp_Q2[index(z,y,0)]=Q2[index(z,y,0)]; hlp_Q3[index(z,y,0)]=Q3[index(z,y,0)]; hlp_Q4[index(z,y,0)]=Q4[index(z,y,0)]; hlp_Q5[index(z,y,0)]=Q5[index(z,y,0)]; hlp_Q6[index(z,y,0)]=Q6[index(z,y,0)]; hlp_Q7[index(z,y,0)]=Q7[index(z,y,0)]; hlp_Q8[index(z,y,0)]=Q8[index(z,y,0)]; hlp_Q9[index(z,y,0)]=Q9[index(z,y,0)]; hlp_Q10[index(z,y,0)]=Q10[index(z,y,0)]; hlp_Q11[index(z,y,0)]=Q11[index(z,y,0)]; hlp_Q12[index(z,y,0)]=Q12[index(z,y,0)]; hlp_Q13[index(z,y,0)]=Q13[index(z,y,0)]; hlp_Q14[index(z,y,0)]=Q14[index(z,y,0)]; hlp_Q15[index(z,y,0)]=Q15[index(z,y,0)]; hlp_Q16[index(z,y,0)]=Q16[index(z,y,0)]; hlp_Q17[index(z,y,0)]=Q17[index(z,y,0)]; hlp_Q18[index(z,y,0)]=Q18[index(z,y,0)]; //at x= lx I set the incomming density as the one (equilibrum) calculated after the collision hlp_Q3[index(z,y,lx-1)] = Q3[index(z,y,lx-1)]; hlp_Q8[index(z,y,lx-1)] = Q8[index(z,y,lx-1)] ; hlp_Q9[index(z,y,lx-1)] = Q9[index(z,y,lx-1)] ; hlp_Q12[index(z,y,lx-1)] = Q12[index(z,y,lx-1)] ; hlp_Q13[index(z,y,lx-1)] = Q13[index(z,y,lx-1)] ; // gia x=lx-1: the densities 0, 2,4,5,6,15,16,17,18 // aplws 8a metadw8oune // "ka8eta" sto slice kai de 8a ginoun propagate se // alla slices (tuxainei na voleuei auto) // auto to kommati exei HDH ginei pio panw! // // // during streaming some of the indices at // (fixed) x=lx-1, y=ly-1, do not get updated. // it doesn't matter since the last slice on x is not useful. } } void LBM::cuda_streaming(){ if(data_location==CPU) copy_data_from_host_to_device(); dim3 threads_type2(threads_for_streaming_collision_and_relaxation,1,1); dim3 grid_type2(blocks_for_streaming_collision_and_relaxation,1,1); hipLaunchKernelGGL(( streaming_kernel_first_part_Q0), dim3(grid_type2), dim3(threads_type2), size_of_allocated_shared_memory_for_streaming_collision_and_relaxation, 0, lx, ly, lz, D3_d.Q0, D3_hlp_d.Q0); hipLaunchKernelGGL(( streaming_kernel_first_part_Q1), dim3(grid_type2), dim3(threads_type2), size_of_allocated_shared_memory_for_streaming_collision_and_relaxation, 0, lx, ly, lz, D3_d.Q1, D3_hlp_d.Q1); hipLaunchKernelGGL(( streaming_kernel_first_part_Q2), dim3(grid_type2), dim3(threads_type2), size_of_allocated_shared_memory_for_streaming_collision_and_relaxation, 0, lx, ly, lz, D3_d.Q2, D3_hlp_d.Q2); hipLaunchKernelGGL(( streaming_kernel_first_part_Q3), dim3(grid_type2), dim3(threads_type2), size_of_allocated_shared_memory_for_streaming_collision_and_relaxation, 0, lx, ly, lz, D3_d.Q3, D3_hlp_d.Q3); hipLaunchKernelGGL(( streaming_kernel_first_part_Q4), dim3(grid_type2), dim3(threads_type2), size_of_allocated_shared_memory_for_streaming_collision_and_relaxation, 0, lx, ly, lz, D3_d.Q4, D3_hlp_d.Q4); hipLaunchKernelGGL(( streaming_kernel_first_part_Q5), dim3(grid_type2), dim3(threads_type2), size_of_allocated_shared_memory_for_streaming_collision_and_relaxation, 0, lx, ly, lz, D3_d.Q5, D3_hlp_d.Q5); hipLaunchKernelGGL(( streaming_kernel_first_part_Q6), dim3(grid_type2), dim3(threads_type2), size_of_allocated_shared_memory_for_streaming_collision_and_relaxation, 0, lx, ly, lz, D3_d.Q6, D3_hlp_d.Q6); hipLaunchKernelGGL(( streaming_kernel_first_part_Q7), dim3(grid_type2), dim3(threads_type2), size_of_allocated_shared_memory_for_streaming_collision_and_relaxation, 0, lx, ly, lz, D3_d.Q7, D3_hlp_d.Q7); hipLaunchKernelGGL(( streaming_kernel_first_part_Q8), dim3(grid_type2), dim3(threads_type2), size_of_allocated_shared_memory_for_streaming_collision_and_relaxation, 0, lx, ly, lz, D3_d.Q8, D3_hlp_d.Q8); hipLaunchKernelGGL(( streaming_kernel_first_part_Q9), dim3(grid_type2), dim3(threads_type2), size_of_allocated_shared_memory_for_streaming_collision_and_relaxation, 0, lx, ly, lz, D3_d.Q9, D3_hlp_d.Q9); hipLaunchKernelGGL(( streaming_kernel_first_part_Q10), dim3(grid_type2), dim3(threads_type2), size_of_allocated_shared_memory_for_streaming_collision_and_relaxation, 0, lx, ly, lz, D3_d.Q10, D3_hlp_d.Q10); hipLaunchKernelGGL(( streaming_kernel_first_part_Q11), dim3(grid_type2), dim3(threads_type2), size_of_allocated_shared_memory_for_streaming_collision_and_relaxation, 0, lx, ly, lz, D3_d.Q11, D3_hlp_d.Q11); hipLaunchKernelGGL(( streaming_kernel_first_part_Q12), dim3(grid_type2), dim3(threads_type2), size_of_allocated_shared_memory_for_streaming_collision_and_relaxation, 0, lx, ly, lz, D3_d.Q12, D3_hlp_d.Q12); hipLaunchKernelGGL(( streaming_kernel_first_part_Q13), dim3(grid_type2), dim3(threads_type2), size_of_allocated_shared_memory_for_streaming_collision_and_relaxation, 0, lx, ly, lz, D3_d.Q13, D3_hlp_d.Q13); hipLaunchKernelGGL(( streaming_kernel_first_part_Q14), dim3(grid_type2), dim3(threads_type2), size_of_allocated_shared_memory_for_streaming_collision_and_relaxation, 0, lx, ly, lz, D3_d.Q14, D3_hlp_d.Q14); hipLaunchKernelGGL(( streaming_kernel_first_part_Q15), dim3(grid_type2), dim3(threads_type2), size_of_allocated_shared_memory_for_streaming_collision_and_relaxation, 0, lx, ly, lz, D3_d.Q15, D3_hlp_d.Q15); hipLaunchKernelGGL(( streaming_kernel_first_part_Q16), dim3(grid_type2), dim3(threads_type2), size_of_allocated_shared_memory_for_streaming_collision_and_relaxation, 0, lx, ly, lz, D3_d.Q16, D3_hlp_d.Q16); hipLaunchKernelGGL(( streaming_kernel_first_part_Q17), dim3(grid_type2), dim3(threads_type2), size_of_allocated_shared_memory_for_streaming_collision_and_relaxation, 0, lx, ly, lz, D3_d.Q17, D3_hlp_d.Q17); hipLaunchKernelGGL(( streaming_kernel_first_part_Q18), dim3(grid_type2), dim3(threads_type2), size_of_allocated_shared_memory_for_streaming_collision_and_relaxation, 0, lx, ly, lz, D3_d.Q18, D3_hlp_d.Q18); hipDeviceSynchronize(); if(data_location==CPU) copy_data_from_host_to_device(); int n_of_threads=64; int n_of_blocks=(lz*ly)/n_of_threads; if ( (lattice_nodes%n_of_threads)!=0 ) ++n_of_blocks; dim3 threads_type3(n_of_threads,1,1); dim3 grid_type3(n_of_blocks,1,1); hipLaunchKernelGGL(( streaming_kernel_single_threaded_p2_v2), dim3(grid_type3), dim3(threads_type3), 0, 0, lx,ly,lz, D3_hlp_d.Q0, D3_hlp_d.Q1, D3_hlp_d.Q2, D3_hlp_d.Q3, D3_hlp_d.Q4, D3_hlp_d.Q5, D3_hlp_d.Q6, D3_hlp_d.Q7, D3_hlp_d.Q8, D3_hlp_d.Q9, D3_hlp_d.Q10, D3_hlp_d.Q11, D3_hlp_d.Q12, D3_hlp_d.Q13, D3_hlp_d.Q14, D3_hlp_d.Q15, D3_hlp_d.Q16, D3_hlp_d.Q17, D3_hlp_d.Q18, D3_d.Q0, D3_d.Q1, D3_d.Q2, D3_d.Q3, D3_d.Q4, D3_d.Q5, D3_d.Q6, D3_d.Q7, D3_d.Q8, D3_d.Q9, D3_d.Q10, D3_d.Q11, D3_d.Q12, D3_d.Q13, D3_d.Q14, D3_d.Q15, D3_d.Q16, D3_d.Q17, D3_d.Q18); hipDeviceSynchronize(); }
89754b4a43aa8b0fde9dc94b7f5b547c7c3131b3.cu
#include "global_defines.cuh" void LBM::streaming(){ /*Propagate fluid densities to their next neighbour nodes */ /*c c.......density propagation: all fluid densities are propagated from c non-occupied nodes along the lattice connection lines c to their next neighbours. c*/ if(data_location==GPU) copy_data_from_device_to_host(); int x,y,z; int x_e/*east*/,x_w/*west*/; int y_n/*north*/,y_s/*south*/; int z_l/*left*/,z_r/*right*/; //todo rwta ton Dirk pws to kanei auto to vhma for (z = 0 ; z< lz ; ++z){ z_l = (z+1)%lz; //1 8esh meta to trexon x z_r = (z+lz-1) %( lz) ; for (y = 0; y< ly; ++y){ y_n = (y+1)%ly; //1 8esh meta to trexon y y_s = (y+ly-1) %( ly) ; for (x = 0; x< lx; ++x){ x_e = (x+1)%lx; //1 8esh meta to trexon z x_w = (x+lx-1) %( lx) ; //regular streaming process /* .........density propagation .........zero: just copy */ D3_hlp.Q0[index(z,y,x)]=D3.Q0[index(z,y,x)]; //node_hlp_grid[index4D(z,y,x,0)]=node_grid[index4D(z,y,x,0)]; //.........in the x,y and z directions D3_hlp.Q1[index(z ,y ,x_e)] = D3.Q1[index(z,y,x)]; D3_hlp.Q2[index(z ,y_n,x )] = D3.Q2[index(z,y,x)]; D3_hlp.Q3[index(z ,y ,x_w)] = D3.Q3[index(z,y,x)]; D3_hlp.Q4[index(z ,y_s,x )] = D3.Q4[index(z,y,x)]; D3_hlp.Q5[index(z_l,y ,x )] = D3.Q5[index(z,y,x)]; D3_hlp.Q6[index(z_r,y ,x )] = D3.Q6[index(z,y,x)]; //......... in the x,y diagonals D3_hlp.Q7[ index(z ,y_n,x_e)] = D3.Q7[ index(z,y,x)]; D3_hlp.Q8[ index(z ,y_n,x_w)] = D3.Q8[ index(z,y,x)]; D3_hlp.Q9[ index(z ,y_s,x_w)] = D3.Q9[ index(z,y,x)]; D3_hlp.Q10[index(z ,y_s,x_e)] = D3.Q10[index(z,y,x)]; //......... in the x,z diagonals D3_hlp.Q11[index(z_r,y ,x_e)] = D3.Q11[index(z,y,x)]; D3_hlp.Q12[index(z_r,y ,x_w)] = D3.Q12[index(z,y,x)]; D3_hlp.Q13[index(z_l,y ,x_w)] = D3.Q13[index(z,y,x)]; D3_hlp.Q14[index(z_l,y ,x_e)] = D3.Q14[index(z,y,x)]; //......... in the y,z diagonals D3_hlp.Q15[index(z_l,y_n,x )] = D3.Q15[index(z,y,x)]; D3_hlp.Q16[index(z_r,y_n,x )] = D3.Q16[index(z,y,x)]; D3_hlp.Q17[index(z_r,y_s,x )] = D3.Q17[index(z,y,x)]; D3_hlp.Q18[index(z_l,y_s,x )] = D3.Q18[index(z,y,x)]; }//z-loop }//y loop }//x loop for (z = 0 ; z< lz ; ++z){ //loop for x=0 and x=lx-1!!!! (first and last slice) for (y = 0; y< ly; ++y){ //toslice 0 antigrafetai sto 0 D3_hlp.Q0[index(z,y,0)]=D3.Q0[index(z,y,0)]; D3_hlp.Q1[index(z,y,0)]=D3.Q1[index(z,y,0)]; D3_hlp.Q2[index(z,y,0)]=D3.Q2[index(z,y,0)]; D3_hlp.Q3[index(z,y,0)]=D3.Q3[index(z,y,0)]; D3_hlp.Q4[index(z,y,0)]=D3.Q4[index(z,y,0)]; D3_hlp.Q5[index(z,y,0)]=D3.Q5[index(z,y,0)]; D3_hlp.Q6[index(z,y,0)]=D3.Q6[index(z,y,0)]; D3_hlp.Q7[index(z,y,0)]=D3.Q7[index(z,y,0)]; D3_hlp.Q8[index(z,y,0)]=D3.Q8[index(z,y,0)]; D3_hlp.Q9[index(z,y,0)]=D3.Q9[index(z,y,0)]; D3_hlp.Q10[index(z,y,0)]=D3.Q10[index(z,y,0)]; D3_hlp.Q11[index(z,y,0)]=D3.Q11[index(z,y,0)]; D3_hlp.Q12[index(z,y,0)]=D3.Q12[index(z,y,0)]; D3_hlp.Q13[index(z,y,0)]=D3.Q13[index(z,y,0)]; D3_hlp.Q14[index(z,y,0)]=D3.Q14[index(z,y,0)]; D3_hlp.Q15[index(z,y,0)]=D3.Q15[index(z,y,0)]; D3_hlp.Q16[index(z,y,0)]=D3.Q16[index(z,y,0)]; D3_hlp.Q17[index(z,y,0)]=D3.Q17[index(z,y,0)]; D3_hlp.Q18[index(z,y,0)]=D3.Q18[index(z,y,0)]; //at x= lx I set the incomming density as the one (equilibrum) calculated after the collision D3_hlp.Q3[index(z,y,lx-1)] = D3.Q3[index(z,y,lx-1)]; D3_hlp.Q8[index(z,y,lx-1)] = D3.Q8[index(z,y,lx-1)] ; D3_hlp.Q9[index(z,y,lx-1)] = D3.Q9[index(z,y,lx-1)] ; D3_hlp.Q12[index(z,y,lx-1)] = D3.Q12[index(z,y,lx-1)] ; D3_hlp.Q13[index(z,y,lx-1)] = D3.Q13[index(z,y,lx-1)] ; // gia x=lx-1: the densities 0, 2,4,5,6,15,16,17,18 // aplws 8a metadw8oune // "ka8eta" sto slice kai de 8a ginoun propagate se // alla slices (tuxainei na voleuei auto) // auto to kommati exei HDH ginei pio panw! // // // during streaming some of the indices at // (fixed) x=lx-1, y=ly-1, do not get updated. // it doesn't matter since the last slice on x is not useful. } } /* for (z = 0 ; z< lz ; ++z){ z_l = (z+1)%lz; //1 8esh meta to trexon x z_r = (z+lz-1) %( lz) ; //loop for x=0 and x=lx-1!!!! (first and last slice) for (y = 0; y< ly; ++y){ y_n = (y+1)%ly; //1 8esh meta to trexon x y_s = (y+ly-1) %( ly) ; //toslice 0 antigrafetai sto 0 D3_hlp.Q0[index(z,y,0)]=D3.Q0[index(z,y,0)]; D3_hlp.Q1[index(z,y,0)]=D3.Q1[index(z,y,0)]; D3_hlp.Q2[index(z,y,0)]=D3.Q2[index(z,y,0)]; D3_hlp.Q3[index(z,y,0)]=D3.Q3[index(z,y,0)]; D3_hlp.Q4[index(z,y,0)]=D3.Q4[index(z,y,0)]; D3_hlp.Q5[index(z,y,0)]=D3.Q5[index(z,y,0)]; D3_hlp.Q6[index(z,y,0)]=D3.Q6[index(z,y,0)]; D3_hlp.Q7[index(z,y,0)]=D3.Q7[index(z,y,0)]; D3_hlp.Q8[index(z,y,0)]=D3.Q8[index(z,y,0)]; D3_hlp.Q9[index(z,y,0)]=D3.Q9[index(z,y,0)]; D3_hlp.Q10[index(z,y,0)]=D3.Q10[index(z,y,0)]; D3_hlp.Q11[index(z,y,0)]=D3.Q11[index(z,y,0)]; D3_hlp.Q12[index(z,y,0)]=D3.Q12[index(z,y,0)]; D3_hlp.Q13[index(z,y,0)]=D3.Q13[index(z,y,0)]; D3_hlp.Q14[index(z,y,0)]=D3.Q14[index(z,y,0)]; D3_hlp.Q15[index(z,y,0)]=D3.Q15[index(z,y,0)]; D3_hlp.Q16[index(z,y,0)]=D3.Q16[index(z,y,0)]; D3_hlp.Q17[index(z,y,0)]=D3.Q17[index(z,y,0)]; D3_hlp.Q18[index(z,y,0)]=D3.Q18[index(z,y,0)]; //at x= lx I set the incomming density as the one (equilibrum) calculated after the collision D3_hlp.Q3[index(z,y,lx-1)] = D3.Q3[index(z,y,lx-1)]; D3_hlp.Q8[index(z,y,lx-1)] = D3.Q8[index(z,y,lx-1)] ; D3_hlp.Q9[index(z,y,lx-1)] = D3.Q9[index(z,y,lx-1)] ; D3_hlp.Q12[index(z,y,lx-1)] = D3.Q12[index(z,y,lx-1)] ; D3_hlp.Q13[index(z,y,lx-1)] = D3.Q13[index(z,y,lx-1)] ; //tis aristeres densities apo to slice 0 tis metaferei sto telos D3_hlp.Q3[ index(z ,y ,lx-1)] = D3.Q3[ index(z,y,0)]; D3_hlp.Q8[ index(z ,y_n,lx-1)] = D3.Q8[ index(z,y,0)]; D3_hlp.Q9[ index(z ,y_s,lx-1)] = D3.Q9[ index(z,y,0)]; D3_hlp.Q12[index(z_r,y ,lx-1)] = D3.Q12[index(z,y,0)]; D3_hlp.Q13[index(z_l,y ,lx-1)] = D3.Q13[index(z,y,0)]; //tis deksies densities tou teleutaiou slice tis metaferei sto slice 0 D3_hlp.Q1[ index(z ,y ,0)] = D3.Q1[ index(z,y,lx-1)]; D3_hlp.Q7[ index(z ,y_n,0)] = D3.Q7[ index(z,y,lx-1)]; D3_hlp.Q10[index(z ,y_s,0)] = D3.Q10[index(z,y,lx-1)]; D3_hlp.Q11[index(z_r,y ,0)] = D3.Q11[index(z,y,lx-1)]; D3_hlp.Q14[index(z_l,y ,0)] = D3.Q14[index(z,y,lx-1)]; // gia x=lx-1: the densities 0, 2,4,5,6,15,16,17,18 // aplws 8a metadw8oune // "ka8eta" sto slice kai de 8a ginoun propagate se // alla slices (tuxainei na voleuei auto) // auto to kommati exei HDH ginei pio panw! // // // during streaming some of the indices at // (fixed) x=lx-1, y=ly-1, do not get updated. // it doesn't matter since the last slice on x is not useful. } }*/ #ifdef DEBUG cout << " #LBM streaming OK!" << endl; #endif } void LBM::streaming_first_part(){ /*Propagate fluid densities to their next neighbour nodes */ /*c c.......density propagation: all fluid densities are propagated from c non-occupied nodes along the lattice connection lines c to their next neighbours. c*/ int x,y,z; int x_e/*east*/,x_w/*west*/; int y_n/*north*/,y_s/*south*/; int z_l/*left*/,z_r/*right*/; //todo rwta ton Dirk pws to kanei auto to vhma for (z = 0 ; z< lz ; ++z){ z_l = (z+1)%lz; //1 8esh meta to trexon x z_r = (z+lz-1) %( lz) ; for (y = 0; y< ly; ++y){ y_n = (y+1)%ly; //1 8esh meta to trexon y y_s = (y+ly-1) %( ly) ; for (x = 0; x< lx; ++x){ x_e = (x+1)%lx; //1 8esh meta to trexon z x_w = (x+lx-1) %( lx) ; //regular streaming process /* .........density propagation .........zero: just copy */ D3_hlp.Q0[index(z,y,x)]=D3.Q0[index(z,y,x)]; // node_hlp_grid[index4D(z,y,x,0)]=node_grid[index4D(z,y,x,0)]; //.........in the x,y and z directions D3_hlp.Q1[index(z ,y ,x_e)] = D3.Q1[index(z,y,x)]; D3_hlp.Q2[index(z ,y_n,x )] = D3.Q2[index(z,y,x)]; D3_hlp.Q3[index(z ,y ,x_w)] = D3.Q3[index(z,y,x)]; D3_hlp.Q4[index(z ,y_s,x )] = D3.Q4[index(z,y,x)]; D3_hlp.Q5[index(z_l,y ,x )] = D3.Q5[index(z,y,x)]; D3_hlp.Q6[index(z_r,y ,x )] = D3.Q6[index(z,y,x)]; //......... in the x,y diagonals D3_hlp.Q7[ index(z ,y_n,x_e)] = D3.Q7[ index(z,y,x)]; D3_hlp.Q8[ index(z ,y_n,x_w)] = D3.Q8[ index(z,y,x)]; D3_hlp.Q9[ index(z ,y_s,x_w)] = D3.Q9[ index(z,y,x)]; D3_hlp.Q10[index(z ,y_s,x_e)] = D3.Q10[index(z,y,x)]; //......... in the x,z diagonals D3_hlp.Q11[index(z_r,y ,x_e)] = D3.Q11[index(z,y,x)]; D3_hlp.Q12[index(z_r,y ,x_w)] = D3.Q12[index(z,y,x)]; D3_hlp.Q13[index(z_l,y ,x_w)] = D3.Q13[index(z,y,x)]; D3_hlp.Q14[index(z_l,y ,x_e)] = D3.Q14[index(z,y,x)]; //......... in the y,z diagonals D3_hlp.Q15[index(z_l,y_n,x )] = D3.Q15[index(z,y,x)]; D3_hlp.Q16[index(z_r,y_n,x )] = D3.Q16[index(z,y,x)]; D3_hlp.Q17[index(z_r,y_s,x )] = D3.Q17[index(z,y,x)]; D3_hlp.Q18[index(z_l,y_s,x )] = D3.Q18[index(z,y,x)]; }//z-loop }//y loop }//x loop #ifdef DEBUG cout << " #LBM streaming OK!" << endl; #endif } void LBM::streaming_last_part(){ /*Propagate fluid densities to their next neighbour nodes */ /*c c.......density propagation: all fluid densities are propagated from c non-occupied nodes along the lattice connection lines c to their next neighbours. c*/ int y,z; for (z = 0 ; z< lz ; ++z){ //loop for x=0 and x=lx-1!!!! (first and last slice) for (y = 0; y< ly; ++y){ //toslice 0 antigrafetai sto 0 D3_hlp.Q0[index(z,y,0)]=D3.Q0[index(z,y,0)]; D3_hlp.Q1[index(z,y,0)]=D3.Q1[index(z,y,0)]; D3_hlp.Q2[index(z,y,0)]=D3.Q2[index(z,y,0)]; D3_hlp.Q3[index(z,y,0)]=D3.Q3[index(z,y,0)]; D3_hlp.Q4[index(z,y,0)]=D3.Q4[index(z,y,0)]; D3_hlp.Q5[index(z,y,0)]=D3.Q5[index(z,y,0)]; D3_hlp.Q6[index(z,y,0)]=D3.Q6[index(z,y,0)]; D3_hlp.Q7[index(z,y,0)]=D3.Q7[index(z,y,0)]; D3_hlp.Q8[index(z,y,0)]=D3.Q8[index(z,y,0)]; D3_hlp.Q9[index(z,y,0)]=D3.Q9[index(z,y,0)]; D3_hlp.Q10[index(z,y,0)]=D3.Q10[index(z,y,0)]; D3_hlp.Q11[index(z,y,0)]=D3.Q11[index(z,y,0)]; D3_hlp.Q12[index(z,y,0)]=D3.Q12[index(z,y,0)]; D3_hlp.Q13[index(z,y,0)]=D3.Q13[index(z,y,0)]; D3_hlp.Q14[index(z,y,0)]=D3.Q14[index(z,y,0)]; D3_hlp.Q15[index(z,y,0)]=D3.Q15[index(z,y,0)]; D3_hlp.Q16[index(z,y,0)]=D3.Q16[index(z,y,0)]; D3_hlp.Q17[index(z,y,0)]=D3.Q17[index(z,y,0)]; D3_hlp.Q18[index(z,y,0)]=D3.Q18[index(z,y,0)]; //at x= lx I set the incomming density as the one (equilibrum) calculated after the collision D3_hlp.Q3[index(z,y,lx-1)] = D3.Q3[index(z,y,lx-1)]; D3_hlp.Q8[index(z,y,lx-1)] = D3.Q8[index(z,y,lx-1)] ; D3_hlp.Q9[index(z,y,lx-1)] = D3.Q9[index(z,y,lx-1)] ; D3_hlp.Q12[index(z,y,lx-1)] = D3.Q12[index(z,y,lx-1)] ; D3_hlp.Q13[index(z,y,lx-1)] = D3.Q13[index(z,y,lx-1)] ; // gia x=lx-1: the densities 0, 2,4,5,6,15,16,17,18 // aplws 8a metadw8oune // "ka8eta" sto slice kai de 8a ginoun propagate se // alla slices (tuxainei na voleuei auto) // auto to kommati exei HDH ginei pio panw! // // // during streaming some of the indices at // (fixed) x=lx-1, y=ly-1, do not get updated. // it doesn't matter since the last slice on x is not useful. } } } __global__ void streaming_kernel(int lx, int ly, int lz, FLOATING reynolds, FLOATING nu, FLOATING r_small, FLOATING t_0, FLOATING t_1, FLOATING t_2, FLOATING c_squ, lattice D3, lattice D3_hlp){ /*Propagate fluid densities to their next neighbour nodes */ /*c c.......density propagation: all fluid densities are propagated from c non-occupied nodes along the lattice connection lines c to their next neighbours. c*/ int x,y,z; int x_e/*east*/,x_w/*west*/; int y_n/*north*/,y_s/*south*/; int z_l/*left*/,z_r/*right*/; int rest; int end_of_memory=lz*ly*(lx); int tid=blockIdx.x*blockDim.x+threadIdx.x; z=(int) (tid/(ly*lx)); rest=tid-z; y=(int)(rest/lx); x=rest-y; //if(tid<end_of_memory){ if( z<lz and y<ly and x<lx){ //todo rwta ton Dirk pws to kanei auto to vhma // for (z = 0 ; z< lz ; ++z){ z_l = (z+1)%lz; //1 8esh meta to trexon x z_r = (z+lz-1) %( lz) ; // for (y = 0; y< ly; ++y){ y_n = (y+1)%ly; //1 8esh meta to trexon y y_s = (y+ly-1) %( ly) ; // for (x = 0; x< lx; ++x){ x_e = (x+1)%lx; //1 8esh meta to trexon z x_w = (x+lx-1) %( lx) ; //regular streaming process /* .........density propagation .........zero: just copy */ D3_hlp.Q0[index(z,y,x)]=D3.Q0[index(z,y,x)]; //node_hlp_grid[index4D(z,y,x,0)]=node_grid[index4D(z,y,x,0)]; //.........in the x,y and z directions D3_hlp.Q1[index(z ,y ,x_e)] = D3.Q1[index(z,y,x)]; D3_hlp.Q2[index(z ,y_n,x )] = D3.Q2[index(z,y,x)]; D3_hlp.Q3[index(z ,y ,x_w)] = D3.Q3[index(z,y,x)]; D3_hlp.Q4[index(z ,y_s,x )] = D3.Q4[index(z,y,x)]; D3_hlp.Q5[index(z_l,y ,x )] = D3.Q5[index(z,y,x)]; D3_hlp.Q6[index(z_r,y ,x )] = D3.Q6[index(z,y,x)]; //......... in the x,y diagonals D3_hlp.Q7[ index(z ,y_n,x_e)] = D3.Q7[ index(z,y,x)]; D3_hlp.Q8[ index(z ,y_n,x_w)] = D3.Q8[ index(z,y,x)]; D3_hlp.Q9[ index(z ,y_s,x_w)] = D3.Q9[ index(z,y,x)]; D3_hlp.Q10[index(z ,y_s,x_e)] = D3.Q10[index(z,y,x)]; //......... in the x,z diagonals D3_hlp.Q11[index(z_r,y ,x_e)] = D3.Q11[index(z,y,x)]; D3_hlp.Q12[index(z_r,y ,x_w)] = D3.Q12[index(z,y,x)]; D3_hlp.Q13[index(z_l,y ,x_w)] = D3.Q13[index(z,y,x)]; D3_hlp.Q14[index(z_l,y ,x_e)] = D3.Q14[index(z,y,x)]; //......... in the y,z diagonals D3_hlp.Q15[index(z_l,y_n,x )] = D3.Q15[index(z,y,x)]; D3_hlp.Q16[index(z_r,y_n,x )] = D3.Q16[index(z,y,x)]; D3_hlp.Q17[index(z_r,y_s,x )] = D3.Q17[index(z,y,x)]; D3_hlp.Q18[index(z_l,y_s,x )] = D3.Q18[index(z,y,x)]; // }//z-loop // }//y loop // }//x loop } if(tid<end_of_memory and x==0 ){ // for (z = 0 ; z< lz ; ++z){ // // // //loop for x=0 and x=lx-1!!!! (first and last slice) // for (y = 0; y< ly; ++y){ //toslice 0 antigrafetai sto 0 D3_hlp.Q0[index(z,y,0)]=D3.Q0[index(z,y,0)]; D3_hlp.Q1[index(z,y,0)]=D3.Q1[index(z,y,0)]; D3_hlp.Q2[index(z,y,0)]=D3.Q2[index(z,y,0)]; D3_hlp.Q3[index(z,y,0)]=D3.Q3[index(z,y,0)]; D3_hlp.Q4[index(z,y,0)]=D3.Q4[index(z,y,0)]; D3_hlp.Q5[index(z,y,0)]=D3.Q5[index(z,y,0)]; D3_hlp.Q6[index(z,y,0)]=D3.Q6[index(z,y,0)]; D3_hlp.Q7[index(z,y,0)]=D3.Q7[index(z,y,0)]; D3_hlp.Q8[index(z,y,0)]=D3.Q8[index(z,y,0)]; D3_hlp.Q9[index(z,y,0)]=D3.Q9[index(z,y,0)]; D3_hlp.Q10[index(z,y,0)]=D3.Q10[index(z,y,0)]; D3_hlp.Q11[index(z,y,0)]=D3.Q11[index(z,y,0)]; D3_hlp.Q12[index(z,y,0)]=D3.Q12[index(z,y,0)]; D3_hlp.Q13[index(z,y,0)]=D3.Q13[index(z,y,0)]; D3_hlp.Q14[index(z,y,0)]=D3.Q14[index(z,y,0)]; D3_hlp.Q15[index(z,y,0)]=D3.Q15[index(z,y,0)]; D3_hlp.Q16[index(z,y,0)]=D3.Q16[index(z,y,0)]; D3_hlp.Q17[index(z,y,0)]=D3.Q17[index(z,y,0)]; D3_hlp.Q18[index(z,y,0)]=D3.Q18[index(z,y,0)]; // gia x=lx-1: the densities 0, 2,4,5,6,15,16,17,18 // aplws 8a metadw8oune // "ka8eta" sto slice kai de 8a ginoun propagate se // alla slices (tuxainei na voleuei auto) // auto to kommati exei HDH ginei pio panw! // // // during streaming some of the indices at // (fixed) x=lx-1, y=ly-1, do not get updated. // it doesn't matter since the last slice on x is not useful. // } // } } if(tid<end_of_memory and x==lx-1 ){ //at x= lx I set the incomming density as the one (equilibrum) calculated after the collision D3_hlp.Q3[index(z,y,lx-1)] = D3.Q3[index(z,y,lx-1)]; D3_hlp.Q8[index(z,y,lx-1)] = D3.Q8[index(z,y,lx-1)] ; D3_hlp.Q9[index(z,y,lx-1)] = D3.Q9[index(z,y,lx-1)] ; D3_hlp.Q12[index(z,y,lx-1)] = D3.Q12[index(z,y,lx-1)] ; D3_hlp.Q13[index(z,y,lx-1)] = D3.Q13[index(z,y,lx-1)] ; } #ifdef DEBUG cout << " #LBM streaming OK!" << endl; #endif } __global__ void streaming_kernel_single_threaded_p1(int lx, int ly, int lz, FLOATING *hlp_Q0, FLOATING *hlp_Q1, FLOATING *hlp_Q2, FLOATING *hlp_Q3, FLOATING *hlp_Q4, FLOATING *hlp_Q5, FLOATING *hlp_Q6, FLOATING *hlp_Q7, FLOATING *hlp_Q8, FLOATING *hlp_Q9, FLOATING *hlp_Q10, FLOATING *hlp_Q11, FLOATING *hlp_Q12, FLOATING *hlp_Q13, FLOATING *hlp_Q14, FLOATING *hlp_Q15, FLOATING *hlp_Q16, FLOATING *hlp_Q17, FLOATING *hlp_Q18, FLOATING *Q0, FLOATING *Q1, FLOATING *Q2, FLOATING *Q3, FLOATING *Q4, FLOATING *Q5, FLOATING *Q6, FLOATING *Q7, FLOATING *Q8, FLOATING *Q9, FLOATING *Q10, FLOATING *Q11, FLOATING *Q12, FLOATING *Q13, FLOATING *Q14, FLOATING *Q15, FLOATING *Q16, FLOATING *Q17, FLOATING *Q18){ int tid=blockIdx.x*blockDim.x+threadIdx.x; int end_of_memory=lz*ly*(lx); int z=(int) (tid/(ly*lx)); int y=(tid-z*(ly*lx))/lx; int x=tid-z*(ly*lx)-y*lx; // int x,y,z; int x_e/*east*/,x_w/*west*/; int y_n/*north*/,y_s/*south*/; int z_l/*left*/,z_r/*right*/; // __shared__ FLOATING shared_buffer[64]; if(tid<end_of_memory){ z_l = (z+1)%lz; //1 8esh meta to trexon x z_r = (z+lz-1) %( lz) ; // for (y = 0; y< ly; ++y){ y_n = (y+1)%ly; //1 8esh meta to trexon y y_s = (y+ly-1) %( ly) ; // for (x = 0; x< lx; ++x){ x_e = (x+1)%lx; //1 8esh meta to trexon z x_w = (x+lx-1) %( lx) ; //regular streaming process // .........density propagation // .........zero: just copy hlp_Q0[index(z,y,x)]=Q0[index(z,y,x)]; // node_hlp_grid[index4D(z,y,x,0)]=node_grid[index4D(z,y,x,0)]; //.........in the x,y and z directions hlp_Q1[index(z ,y ,x_e)] = Q1[index(z,y,x)]; hlp_Q2[index(z ,y_n,x )] = Q2[index(z,y,x)]; hlp_Q3[index(z ,y ,x_w)] = Q3[index(z,y,x)]; hlp_Q4[index(z ,y_s,x )] = Q4[index(z,y,x)]; hlp_Q5[index(z_l,y ,x )] = Q5[index(z,y,x)]; hlp_Q6[index(z_r,y ,x )] = Q6[index(z,y,x)]; //......... in the x,y diagonals hlp_Q7[ index(z ,y_n,x_e)] = Q7[ index(z,y,x)]; hlp_Q8[ index(z ,y_n,x_w)] = Q8[ index(z,y,x)]; hlp_Q9[ index(z ,y_s,x_w)] = Q9[ index(z,y,x)]; hlp_Q10[index(z ,y_s,x_e)] = Q10[index(z,y,x)]; //......... in the x,z diagonals hlp_Q11[index(z_r,y ,x_e)] = Q11[index(z,y,x)]; hlp_Q12[index(z_r,y ,x_w)] = Q12[index(z,y,x)]; hlp_Q13[index(z_l,y ,x_w)] = Q13[index(z,y,x)]; hlp_Q14[index(z_l,y ,x_e)] = Q14[index(z,y,x)]; //......... in the y,z diagonals hlp_Q15[index(z_l,y_n,x )] = Q15[index(z,y,x)]; hlp_Q16[index(z_r,y_n,x )] = Q16[index(z,y,x)]; hlp_Q17[index(z_r,y_s,x )] = Q17[index(z,y,x)]; hlp_Q18[index(z_l,y_s,x )] = Q18[index(z,y,x)]; } } __global__ void streaming_kernel_single_threaded_p1_shared(int lx, int ly, int lz, FLOATING *hlp_Q0, FLOATING *hlp_Q1, FLOATING *hlp_Q2, FLOATING *hlp_Q3, FLOATING *hlp_Q4, FLOATING *hlp_Q5, FLOATING *hlp_Q6, FLOATING *hlp_Q7, FLOATING *hlp_Q8, FLOATING *hlp_Q9, FLOATING *hlp_Q10, FLOATING *hlp_Q11, FLOATING *hlp_Q12, FLOATING *hlp_Q13, FLOATING *hlp_Q14, FLOATING *hlp_Q15, FLOATING *hlp_Q16, FLOATING *hlp_Q17, FLOATING *hlp_Q18, FLOATING *Q0, FLOATING *Q1, FLOATING *Q2, FLOATING *Q3, FLOATING *Q4, FLOATING *Q5, FLOATING *Q6, FLOATING *Q7, FLOATING *Q8, FLOATING *Q9, FLOATING *Q10, FLOATING *Q11, FLOATING *Q12, FLOATING *Q13, FLOATING *Q14, FLOATING *Q15, FLOATING *Q16, FLOATING *Q17, FLOATING *Q18){ int tid=blockIdx.x*blockDim.x+threadIdx.x; int end_of_memory=lz*ly*(lx); int z=(int) (tid/(ly*lx)); int y=(tid-z*(ly*lx))/lx; int x=tid-z*(ly*lx)-y*lx; // int x,y,z; int x_e/*east*/,x_w/*west*/; int y_n/*north*/,y_s/*south*/; int z_l/*left*/,z_r/*right*/; extern __shared__ FLOATING shared_buffer[]; FLOATING *shared_Q0=shared_buffer; FLOATING *shared_Q1 = &shared_Q0[blockDim.x]; FLOATING *shared_Q2 = &shared_Q1[blockDim.x]; FLOATING *shared_Q3 = &shared_Q2[blockDim.x]; FLOATING *shared_Q4 = &shared_Q3[blockDim.x]; FLOATING *shared_Q5 = &shared_Q4[blockDim.x]; FLOATING *shared_Q6 = &shared_Q5[blockDim.x]; FLOATING *shared_Q7 = &shared_Q6[blockDim.x]; FLOATING *shared_Q8 = &shared_Q7[blockDim.x]; FLOATING *shared_Q9 = &shared_Q8[blockDim.x]; FLOATING *shared_Q10 = &shared_Q9[blockDim.x]; FLOATING *shared_Q11 = &shared_Q10[blockDim.x]; FLOATING *shared_Q12 = &shared_Q11[blockDim.x]; FLOATING *shared_Q13 = &shared_Q12[blockDim.x]; FLOATING *shared_Q14 = &shared_Q13[blockDim.x]; FLOATING *shared_Q15 = &shared_Q14[blockDim.x]; FLOATING *shared_Q16 = &shared_Q15[blockDim.x]; FLOATING *shared_Q17 = &shared_Q16[blockDim.x]; FLOATING *shared_Q18 = &shared_Q17[blockDim.x]; shared_Q0[threadIdx.x]=Q0[index(z,y,x)]; shared_Q1[threadIdx.x]=Q1[index(z,y,x)]; shared_Q2[threadIdx.x]=Q2[index(z,y,x)]; shared_Q3[threadIdx.x]=Q3[index(z,y,x)]; shared_Q4[threadIdx.x]=Q4[index(z,y,x)]; shared_Q5[threadIdx.x]=Q5[index(z,y,x)]; shared_Q6[threadIdx.x]=Q6[index(z,y,x)]; shared_Q7[threadIdx.x]=Q7[index(z,y,x)]; shared_Q8[threadIdx.x]=Q8[index(z,y,x)]; shared_Q9[threadIdx.x]=Q9[index(z,y,x)]; shared_Q10[threadIdx.x]=Q10[index(z,y,x)]; shared_Q11[threadIdx.x]=Q11[index(z,y,x)]; shared_Q12[threadIdx.x]=Q12[index(z,y,x)]; shared_Q13[threadIdx.x]=Q13[index(z,y,x)]; shared_Q14[threadIdx.x]=Q14[index(z,y,x)]; shared_Q15[threadIdx.x]=Q15[index(z,y,x)]; shared_Q16[threadIdx.x]=Q16[index(z,y,x)]; shared_Q17[threadIdx.x]=Q17[index(z,y,x)]; shared_Q18[threadIdx.x]=Q18[index(z,y,x)]; __syncthreads(); if(tid<end_of_memory){ z_l = (z+1)%lz; //1 8esh meta to trexon x z_r = (z+lz-1) %( lz) ; // for (y = 0; y< ly; ++y){ y_n = (y+1)%ly; //1 8esh meta to trexon y y_s = (y+ly-1) %( ly) ; // for (x = 0; x< lx; ++x){ x_e = (x+1)%lx; //1 8esh meta to trexon z x_w = (x+lx-1) %( lx) ; //regular streaming process // .........density propagation // .........zero: just copy hlp_Q0[index(z,y,x)]=shared_Q0[threadIdx.x]; // node_hlp_grid[index4D(z,y,x,0)]=node_grid[index4D(z,y,x,0)]; //.........in the x,y and z directions hlp_Q1[index(z ,y ,x_e)] =shared_Q1[threadIdx.x]; hlp_Q2[index(z ,y_n,x )] =shared_Q2[threadIdx.x]; hlp_Q3[index(z ,y ,x_w)] =shared_Q3[threadIdx.x]; hlp_Q4[index(z ,y_s,x )] =shared_Q4[threadIdx.x]; hlp_Q5[index(z_l,y ,x )] =shared_Q5[threadIdx.x]; hlp_Q6[index(z_r,y ,x )] =shared_Q6[threadIdx.x]; //......... in the x,y diagonals hlp_Q7[ index(z ,y_n,x_e)] =shared_Q7[ threadIdx.x]; hlp_Q8[ index(z ,y_n,x_w)] =shared_Q8[ threadIdx.x]; hlp_Q9[ index(z ,y_s,x_w)] =shared_Q9[ threadIdx.x]; hlp_Q10[index(z ,y_s,x_e)] =shared_Q10[threadIdx.x]; //......... in the x,z diagonals hlp_Q11[index(z_r,y ,x_e)] =shared_Q11[threadIdx.x]; hlp_Q12[index(z_r,y ,x_w)] =shared_Q12[threadIdx.x]; hlp_Q13[index(z_l,y ,x_w)] =shared_Q13[threadIdx.x]; hlp_Q14[index(z_l,y ,x_e)] =shared_Q14[threadIdx.x]; //......... in the y,z diagonals hlp_Q15[index(z_l,y_n,x )] =shared_Q15[threadIdx.x]; hlp_Q16[index(z_r,y_n,x )] =shared_Q16[threadIdx.x]; hlp_Q17[index(z_r,y_s,x )] =shared_Q17[threadIdx.x]; hlp_Q18[index(z_l,y_s,x )] =shared_Q18[threadIdx.x]; } } __global__ void streaming_kernel_first_part_Q0(int lx, int ly, int lz, const FLOATING *Q0, FLOATING *hlp_Q0){ /*Propagate fluid densities to their next neighbour nodes */ /*c....density propagation: all fluid densities are propagated from c non-occupied nodes along the lattice connection lines c to their next neighbours.*/ int tid=blockIdx.x*blockDim.x+threadIdx.x; int end_of_memory=lz*ly*(lx); int z=(int) (tid/(ly*lx)); int y=(tid-z*(ly*lx))/lx; int x=tid-z*(ly*lx)-y*lx; extern __shared__ FLOATING shared_buffer[]; shared_buffer[threadIdx.x]=Q0[index(z,y,x)]; __syncthreads(); if( tid<end_of_memory) hlp_Q0[index(z,y,x)] = shared_buffer[threadIdx.x]; } __global__ void streaming_kernel_first_part_Q1(int lx, int ly, int lz, const FLOATING *Q1, FLOATING *hlp_Q1){ /*Propagate fluid densities to their next neighbour nodes */ /*c....density propagation: all fluid densities are propagated from c non-occupied nodes along the lattice connection lines c to their next neighbours.*/ int tid=blockIdx.x*blockDim.x+threadIdx.x; int end_of_memory=lz*ly*(lx); int z=(int) (tid/(ly*lx)); int y=(tid-z*(ly*lx))/lx; int x=tid-z*(ly*lx)-y*lx; int x_e/*east*/ = (x+1)%lx; //1 8esh meta to trexon x extern __shared__ FLOATING shared_buffer[]; shared_buffer[threadIdx.x]=Q1[index(z,y,x)]; __syncthreads(); if( tid<end_of_memory) hlp_Q1[index(z ,y ,x_e)] = shared_buffer[threadIdx.x]; } __global__ void streaming_kernel_first_part_Q2(int lx, int ly, int lz, const FLOATING *Q2, FLOATING *hlp_Q2){ /*Propagate fluid densities to their next neighbour nodes */ /*c....density propagation: all fluid densities are propagated from c non-occupied nodes along the lattice connection lines c to their next neighbours.*/ int tid=blockIdx.x*blockDim.x+threadIdx.x; int end_of_memory=lz*ly*(lx); int z=(int) (tid/(ly*lx)); int y=(tid-z*(ly*lx))/lx; int x=tid-z*(ly*lx)-y*lx; int y_n/*north*/ = (y+1)%ly; //1 8esh meta to trexon y extern __shared__ FLOATING shared_buffer[]; shared_buffer[threadIdx.x]=Q2[index(z,y,x)]; __syncthreads(); if( tid<end_of_memory) hlp_Q2[index(z ,y_n,x )] = shared_buffer[threadIdx.x]; } __global__ void streaming_kernel_first_part_Q3(int lx, int ly, int lz, const FLOATING *Q3, FLOATING *hlp_Q3){ /*Propagate fluid densities to their next neighbour nodes */ /*c....density propagation: all fluid densities are propagated from c non-occupied nodes along the lattice connection lines c to their next neighbours.*/ int tid=blockIdx.x*blockDim.x+threadIdx.x; int end_of_memory=lz*ly*(lx); int z=(int) (tid/(ly*lx)); int y=(tid-z*(ly*lx))/lx; int x=tid-z*(ly*lx)-y*lx; int x_w/*west*/ = (x+lx-1) %( lx) ; extern __shared__ FLOATING shared_buffer[]; shared_buffer[threadIdx.x]=Q3[index(z,y,x)]; __syncthreads(); if( tid<end_of_memory) hlp_Q3[index(z ,y ,x_w)] = shared_buffer[threadIdx.x]; } __global__ void streaming_kernel_first_part_Q4(int lx, int ly, int lz, const FLOATING *Q4, FLOATING *hlp_Q4){ /*Propagate fluid densities to their next neighbour nodes */ /*c....density propagation: all fluid densities are propagated from c non-occupied nodes along the lattice connection lines c to their next neighbours.*/ int tid=blockIdx.x*blockDim.x+threadIdx.x; int end_of_memory=lz*ly*(lx); int z=(int) (tid/(ly*lx)); int y=(tid-z*(ly*lx))/lx; int x=tid-z*(ly*lx)-y*lx; int y_s/*south*/ = (y+ly-1) %( ly) ; extern __shared__ FLOATING shared_buffer[]; shared_buffer[threadIdx.x]=Q4[index(z,y,x)]; __syncthreads(); if( tid<end_of_memory) hlp_Q4[index(z ,y_s,x )] = shared_buffer[threadIdx.x]; } __global__ void streaming_kernel_first_part_Q5(int lx, int ly, int lz, const FLOATING *Q5, FLOATING *hlp_Q5){ /*Propagate fluid densities to their next neighbour nodes */ /*c....density propagation: all fluid densities are propagated from c non-occupied nodes along the lattice connection lines c to their next neighbours.*/ int tid=blockIdx.x*blockDim.x+threadIdx.x; int end_of_memory=lz*ly*(lx); int z=(int) (tid/(ly*lx)); int y=(tid-z*(ly*lx))/lx; int x=tid-z*(ly*lx)-y*lx; int z_l/*left*/ = (z+1)%lz; extern __shared__ FLOATING shared_buffer[]; shared_buffer[threadIdx.x]=Q5[index(z,y,x)]; __syncthreads(); if( tid<end_of_memory) hlp_Q5[index(z_l,y ,x )] = shared_buffer[threadIdx.x]; } __global__ void streaming_kernel_first_part_Q6(int lx, int ly, int lz, const FLOATING *Q6, FLOATING *hlp_Q6){ /*Propagate fluid densities to their next neighbour nodes */ /*c....density propagation: all fluid densities are propagated from c non-occupied nodes along the lattice connection lines c to their next neighbours.*/ int tid=blockIdx.x*blockDim.x+threadIdx.x; int end_of_memory=lz*ly*(lx); int z=(int) (tid/(ly*lx)); int y=(tid-z*(ly*lx))/lx; int x=tid-z*(ly*lx)-y*lx; int z_r/*right*/ = (z+lz-1) %( lz) ; extern __shared__ FLOATING shared_buffer[]; shared_buffer[threadIdx.x]=Q6[index(z,y,x)]; __syncthreads(); if( tid<end_of_memory) hlp_Q6[index(z_r,y ,x )] = shared_buffer[threadIdx.x]; } __global__ void streaming_kernel_first_part_Q7(int lx, int ly, int lz, const FLOATING *Q7, FLOATING *hlp_Q7){ /*Propagate fluid densities to their next neighbour nodes */ /*c....density propagation: all fluid densities are propagated from c non-occupied nodes along the lattice connection lines c to their next neighbours.*/ int tid=blockIdx.x*blockDim.x+threadIdx.x; int end_of_memory=lz*ly*(lx); int z=(int) (tid/(ly*lx)); int y=(tid-z*(ly*lx))/lx; int x=tid-z*(ly*lx)-y*lx; int y_n/*north*/ = (y+1)%ly; int x_e/*east*/ = (x+1)%lx; extern __shared__ FLOATING shared_buffer[]; shared_buffer[threadIdx.x]=Q7[index(z,y,x)]; __syncthreads(); if( tid<end_of_memory) hlp_Q7[ index(z ,y_n,x_e)] = shared_buffer[threadIdx.x]; } __global__ void streaming_kernel_first_part_Q8(int lx, int ly, int lz, const FLOATING *Q8, FLOATING *hlp_Q8){ /*Propagate fluid densities to their next neighbour nodes */ /*c....density propagation: all fluid densities are propagated from c non-occupied nodes along the lattice connection lines c to their next neighbours.*/ int tid=blockIdx.x*blockDim.x+threadIdx.x; int end_of_memory=lz*ly*(lx); int z=(int) (tid/(ly*lx)); int y=(tid-z*(ly*lx))/lx; int x=tid-z*(ly*lx)-y*lx; int y_n/*north*/ = (y+1)%ly; int x_w/*west*/ = (x+lx-1) %( lx) ; extern __shared__ FLOATING shared_buffer[]; shared_buffer[threadIdx.x]=Q8[index(z,y,x)]; __syncthreads(); if( tid<end_of_memory) hlp_Q8[ index(z ,y_n,x_w)] = shared_buffer[threadIdx.x]; } __global__ void streaming_kernel_first_part_Q9(int lx, int ly, int lz, const FLOATING *Q9, FLOATING *hlp_Q9){ /*Propagate fluid densities to their next neighbour nodes */ /*c....density propagation: all fluid densities are propagated from c non-occupied nodes along the lattice connection lines c to their next neighbours.*/ int tid=blockIdx.x*blockDim.x+threadIdx.x; int end_of_memory=lz*ly*(lx); int z=(int) (tid/(ly*lx)); int y=(tid-z*(ly*lx))/lx; int x=tid-z*(ly*lx)-y*lx; int y_s/*south*/ = (y+ly-1) %( ly) ; int x_w/*west*/ = (x+lx-1) %( lx) ; extern __shared__ FLOATING shared_buffer[]; shared_buffer[threadIdx.x]=Q9[index(z,y,x)]; __syncthreads(); if( tid<end_of_memory) hlp_Q9[ index(z ,y_s,x_w)] = shared_buffer[threadIdx.x]; } __global__ void streaming_kernel_first_part_Q10(int lx, int ly, int lz, const FLOATING *Q10, FLOATING *hlp_Q10){ /*Propagate fluid densities to their next neighbour nodes */ /*c....density propagation: all fluid densities are propagated from c non-occupied nodes along the lattice connection lines c to their next neighbours.*/ int tid=blockIdx.x*blockDim.x+threadIdx.x; int end_of_memory=lz*ly*(lx); int z=(int) (tid/(ly*lx)); int y=(tid-z*(ly*lx))/lx; int x=tid-z*(ly*lx)-y*lx; int y_s/*south*/ = (y+ly-1) %( ly) ; int x_e/*east*/ = (x+1)%lx; extern __shared__ FLOATING shared_buffer[]; shared_buffer[threadIdx.x]=Q10[index(z,y,x)]; __syncthreads(); if( tid<end_of_memory) hlp_Q10[index(z ,y_s,x_e)] = shared_buffer[threadIdx.x]; } __global__ void streaming_kernel_first_part_Q11(int lx, int ly, int lz, const FLOATING *Q11, FLOATING *hlp_Q11){ /*Propagate fluid densities to their next neighbour nodes */ /*c....density propagation: all fluid densities are propagated from c non-occupied nodes along the lattice connection lines c to their next neighbours.*/ int tid=blockIdx.x*blockDim.x+threadIdx.x; int end_of_memory=lz*ly*(lx); int z=(int) (tid/(ly*lx)); int y=(tid-z*(ly*lx))/lx; int x=tid-z*(ly*lx)-y*lx; int z_r/*right*/ = (z+lz-1) %( lz) ; int x_e/*east*/ = (x+1)%lx; extern __shared__ FLOATING shared_buffer[]; shared_buffer[threadIdx.x]=Q11[index(z,y,x)]; __syncthreads(); if( tid<end_of_memory) hlp_Q11[index(z_r,y ,x_e)] = shared_buffer[threadIdx.x]; } __global__ void streaming_kernel_first_part_Q12(int lx, int ly, int lz, const FLOATING *Q12, FLOATING *hlp_Q12){ /*Propagate fluid densities to their next neighbour nodes */ /*c....density propagation: all fluid densities are propagated from c non-occupied nodes along the lattice connection lines c to their next neighbours.*/ int tid=blockIdx.x*blockDim.x+threadIdx.x; int end_of_memory=lz*ly*(lx); int z=(int) (tid/(ly*lx)); int y=(tid-z*(ly*lx))/lx; int x=tid-z*(ly*lx)-y*lx; int z_r/*right*/ = (z+lz-1) %( lz) ; int x_w/*west*/ = (x+lx-1) %( lx) ; extern __shared__ FLOATING shared_buffer[]; shared_buffer[threadIdx.x]=Q12[index(z,y,x)]; __syncthreads(); if( tid<end_of_memory) hlp_Q12[index(z_r,y ,x_w)] = shared_buffer[threadIdx.x]; } __global__ void streaming_kernel_first_part_Q13(int lx, int ly, int lz, const FLOATING *Q13, FLOATING *hlp_Q13){ /*Propagate fluid densities to their next neighbour nodes */ /*c....density propagation: all fluid densities are propagated from c non-occupied nodes along the lattice connection lines c to their next neighbours.*/ int tid=blockIdx.x*blockDim.x+threadIdx.x; int end_of_memory=lz*ly*(lx); int z=(int) (tid/(ly*lx)); int y=(tid-z*(ly*lx))/lx; int x=tid-z*(ly*lx)-y*lx; int z_l/*left*/ = (z+1)%lz; int x_w/*west*/ = (x+lx-1) %( lx) ; extern __shared__ FLOATING shared_buffer[]; shared_buffer[threadIdx.x]=Q13[index(z,y,x)]; __syncthreads(); if( tid<end_of_memory) hlp_Q13[index(z_l,y ,x_w)] = shared_buffer[threadIdx.x]; } __global__ void streaming_kernel_first_part_Q14(int lx, int ly, int lz, const FLOATING *Q14, FLOATING *hlp_Q14){ /*Propagate fluid densities to their next neighbour nodes */ /*c....density propagation: all fluid densities are propagated from c non-occupied nodes along the lattice connection lines c to their next neighbours.*/ int tid=blockIdx.x*blockDim.x+threadIdx.x; int end_of_memory=lz*ly*(lx); int z=(int) (tid/(ly*lx)); int y=(tid-z*(ly*lx))/lx; int x=tid-z*(ly*lx)-y*lx; int z_l/*left*/ = (z+1)%lz; int x_e/*east*/ = (x+1)%lx; extern __shared__ FLOATING shared_buffer[]; shared_buffer[threadIdx.x]=Q14[index(z,y,x)]; __syncthreads(); if( tid<end_of_memory) hlp_Q14[index(z_l,y ,x_e)] = shared_buffer[threadIdx.x]; } __global__ void streaming_kernel_first_part_Q15(int lx, int ly, int lz, const FLOATING *Q15, FLOATING *hlp_Q15){ /*Propagate fluid densities to their next neighbour nodes */ /*c....density propagation: all fluid densities are propagated from c non-occupied nodes along the lattice connection lines c to their next neighbours.*/ int tid=blockIdx.x*blockDim.x+threadIdx.x; int end_of_memory=lz*ly*(lx); int z=(int) (tid/(ly*lx)); int y=(tid-z*(ly*lx))/lx; int x=tid-z*(ly*lx)-y*lx; int z_l/*left*/ = (z+1)%lz; int y_n/*north*/ = (y+1)%ly; extern __shared__ FLOATING shared_buffer[]; shared_buffer[threadIdx.x]=Q15[index(z,y,x)]; __syncthreads(); if( tid<end_of_memory) hlp_Q15[index(z_l,y_n,x )] = shared_buffer[threadIdx.x]; } __global__ void streaming_kernel_first_part_Q16(int lx, int ly, int lz, const FLOATING *Q16, FLOATING *hlp_Q16){ /*Propagate fluid densities to their next neighbour nodes */ /*c....density propagation: all fluid densities are propagated from c non-occupied nodes along the lattice connection lines c to their next neighbours.*/ int tid=blockIdx.x*blockDim.x+threadIdx.x; int end_of_memory=lz*ly*(lx); int z=(int) (tid/(ly*lx)); int y=(tid-z*(ly*lx))/lx; int x=tid-z*(ly*lx)-y*lx; int z_r /*right*/ = (z+lz-1) %( lz) ; int y_n /*north*/= (y+1)%ly; extern __shared__ FLOATING shared_buffer[]; shared_buffer[threadIdx.x]=Q16[index(z,y,x)]; __syncthreads(); if( tid<end_of_memory) hlp_Q16[index(z_r,y_n,x )] = shared_buffer[threadIdx.x]; } __global__ void streaming_kernel_first_part_Q17(int lx, int ly, int lz, const FLOATING *Q17, FLOATING *hlp_Q17){ /*Propagate fluid densities to their next neighbour nodes */ /*c....density propagation: all fluid densities are propagated from c non-occupied nodes along the lattice connection lines c to their next neighbours.*/ int tid=blockIdx.x*blockDim.x+threadIdx.x; int end_of_memory=lz*ly*(lx); int z=(int) (tid/(ly*lx)); int y=(tid-z*(ly*lx))/lx; int x=tid-z*(ly*lx)-y*lx; int z_r/*right*/ = (z+lz-1) %( lz) ; int y_s/*south*/ = (y+ly-1) %( ly) ; extern __shared__ FLOATING shared_buffer[]; shared_buffer[threadIdx.x]=Q17[index(z,y,x)]; __syncthreads(); if( tid<end_of_memory) hlp_Q17[index(z_r,y_s,x )] = shared_buffer[threadIdx.x]; } __global__ void streaming_kernel_first_part_Q18(int lx, int ly, int lz, const FLOATING *Q18, FLOATING *hlp_Q18){ /*Propagate fluid densities to their next neighbour nodes */ /*c....density propagation: all fluid densities are propagated from c non-occupied nodes along the lattice connection lines c to their next neighbours.*/ int tid=blockIdx.x*blockDim.x+threadIdx.x; int end_of_memory=lz*ly*(lx); int z=(int) (tid/(ly*lx)); int y=(tid-z*(ly*lx))/lx; int x=tid-z*(ly*lx)-y*lx; int z_l/*left*/ = (z+1)%lz; int y_s/*south*/ = (y+ly-1) %( ly) ; extern __shared__ FLOATING shared_buffer[]; shared_buffer[threadIdx.x]=Q18[index(z,y,x)]; __syncthreads(); if( tid<end_of_memory) hlp_Q18[index(z_l,y_s,x )] = shared_buffer[threadIdx.x]; } __global__ void streaming_kernel_single_threaded_p2_v2(int lx, int ly, int lz, FLOATING *hlp_Q0, FLOATING *hlp_Q1, FLOATING *hlp_Q2, FLOATING *hlp_Q3, FLOATING *hlp_Q4, FLOATING *hlp_Q5, FLOATING *hlp_Q6, FLOATING *hlp_Q7, FLOATING *hlp_Q8, FLOATING *hlp_Q9, FLOATING *hlp_Q10, FLOATING *hlp_Q11, FLOATING *hlp_Q12, FLOATING *hlp_Q13, FLOATING *hlp_Q14, FLOATING *hlp_Q15, FLOATING *hlp_Q16, FLOATING *hlp_Q17, FLOATING *hlp_Q18, FLOATING *Q0, FLOATING *Q1, FLOATING *Q2, FLOATING *Q3, FLOATING *Q4, FLOATING *Q5, FLOATING *Q6, FLOATING *Q7, FLOATING *Q8, FLOATING *Q9, FLOATING *Q10, FLOATING *Q11, FLOATING *Q12, FLOATING *Q13, FLOATING *Q14, FLOATING *Q15, FLOATING *Q16, FLOATING *Q17, FLOATING *Q18){ int tid=blockIdx.x*blockDim.x+threadIdx.x; int z=tid/ly; int y=tid-z*ly; if(tid<ly*lz){ // for (z = 0 ; z< lz ; ++z){ //loop for x=0 and x=lx-1!!!! (first and last slice) // for (y = 0; y< ly; ++y){ //toslice 0 antigrafetai sto 0 hlp_Q0[index(z,y,0)]=Q0[index(z,y,0)]; hlp_Q1[index(z,y,0)]=Q1[index(z,y,0)]; hlp_Q2[index(z,y,0)]=Q2[index(z,y,0)]; hlp_Q3[index(z,y,0)]=Q3[index(z,y,0)]; hlp_Q4[index(z,y,0)]=Q4[index(z,y,0)]; hlp_Q5[index(z,y,0)]=Q5[index(z,y,0)]; hlp_Q6[index(z,y,0)]=Q6[index(z,y,0)]; hlp_Q7[index(z,y,0)]=Q7[index(z,y,0)]; hlp_Q8[index(z,y,0)]=Q8[index(z,y,0)]; hlp_Q9[index(z,y,0)]=Q9[index(z,y,0)]; hlp_Q10[index(z,y,0)]=Q10[index(z,y,0)]; hlp_Q11[index(z,y,0)]=Q11[index(z,y,0)]; hlp_Q12[index(z,y,0)]=Q12[index(z,y,0)]; hlp_Q13[index(z,y,0)]=Q13[index(z,y,0)]; hlp_Q14[index(z,y,0)]=Q14[index(z,y,0)]; hlp_Q15[index(z,y,0)]=Q15[index(z,y,0)]; hlp_Q16[index(z,y,0)]=Q16[index(z,y,0)]; hlp_Q17[index(z,y,0)]=Q17[index(z,y,0)]; hlp_Q18[index(z,y,0)]=Q18[index(z,y,0)]; //at x= lx I set the incomming density as the one (equilibrum) calculated after the collision hlp_Q3[index(z,y,lx-1)] = Q3[index(z,y,lx-1)]; hlp_Q8[index(z,y,lx-1)] = Q8[index(z,y,lx-1)] ; hlp_Q9[index(z,y,lx-1)] = Q9[index(z,y,lx-1)] ; hlp_Q12[index(z,y,lx-1)] = Q12[index(z,y,lx-1)] ; hlp_Q13[index(z,y,lx-1)] = Q13[index(z,y,lx-1)] ; // gia x=lx-1: the densities 0, 2,4,5,6,15,16,17,18 // aplws 8a metadw8oune // "ka8eta" sto slice kai de 8a ginoun propagate se // alla slices (tuxainei na voleuei auto) // auto to kommati exei HDH ginei pio panw! // // // during streaming some of the indices at // (fixed) x=lx-1, y=ly-1, do not get updated. // it doesn't matter since the last slice on x is not useful. } } void LBM::cuda_streaming(){ if(data_location==CPU) copy_data_from_host_to_device(); dim3 threads_type2(threads_for_streaming_collision_and_relaxation,1,1); dim3 grid_type2(blocks_for_streaming_collision_and_relaxation,1,1); streaming_kernel_first_part_Q0<<<grid_type2, threads_type2, size_of_allocated_shared_memory_for_streaming_collision_and_relaxation>>>( lx, ly, lz, D3_d.Q0, D3_hlp_d.Q0); streaming_kernel_first_part_Q1<<<grid_type2, threads_type2, size_of_allocated_shared_memory_for_streaming_collision_and_relaxation>>>( lx, ly, lz, D3_d.Q1, D3_hlp_d.Q1); streaming_kernel_first_part_Q2<<<grid_type2, threads_type2, size_of_allocated_shared_memory_for_streaming_collision_and_relaxation>>>( lx, ly, lz, D3_d.Q2, D3_hlp_d.Q2); streaming_kernel_first_part_Q3<<<grid_type2, threads_type2, size_of_allocated_shared_memory_for_streaming_collision_and_relaxation>>>( lx, ly, lz, D3_d.Q3, D3_hlp_d.Q3); streaming_kernel_first_part_Q4<<<grid_type2, threads_type2, size_of_allocated_shared_memory_for_streaming_collision_and_relaxation>>>( lx, ly, lz, D3_d.Q4, D3_hlp_d.Q4); streaming_kernel_first_part_Q5<<<grid_type2, threads_type2, size_of_allocated_shared_memory_for_streaming_collision_and_relaxation>>>( lx, ly, lz, D3_d.Q5, D3_hlp_d.Q5); streaming_kernel_first_part_Q6<<<grid_type2, threads_type2, size_of_allocated_shared_memory_for_streaming_collision_and_relaxation>>>( lx, ly, lz, D3_d.Q6, D3_hlp_d.Q6); streaming_kernel_first_part_Q7<<<grid_type2, threads_type2, size_of_allocated_shared_memory_for_streaming_collision_and_relaxation>>>( lx, ly, lz, D3_d.Q7, D3_hlp_d.Q7); streaming_kernel_first_part_Q8<<<grid_type2, threads_type2, size_of_allocated_shared_memory_for_streaming_collision_and_relaxation>>>( lx, ly, lz, D3_d.Q8, D3_hlp_d.Q8); streaming_kernel_first_part_Q9<<<grid_type2, threads_type2, size_of_allocated_shared_memory_for_streaming_collision_and_relaxation>>>( lx, ly, lz, D3_d.Q9, D3_hlp_d.Q9); streaming_kernel_first_part_Q10<<<grid_type2, threads_type2, size_of_allocated_shared_memory_for_streaming_collision_and_relaxation>>>( lx, ly, lz, D3_d.Q10, D3_hlp_d.Q10); streaming_kernel_first_part_Q11<<<grid_type2, threads_type2, size_of_allocated_shared_memory_for_streaming_collision_and_relaxation>>>( lx, ly, lz, D3_d.Q11, D3_hlp_d.Q11); streaming_kernel_first_part_Q12<<<grid_type2, threads_type2, size_of_allocated_shared_memory_for_streaming_collision_and_relaxation>>>( lx, ly, lz, D3_d.Q12, D3_hlp_d.Q12); streaming_kernel_first_part_Q13<<<grid_type2, threads_type2, size_of_allocated_shared_memory_for_streaming_collision_and_relaxation>>>( lx, ly, lz, D3_d.Q13, D3_hlp_d.Q13); streaming_kernel_first_part_Q14<<<grid_type2, threads_type2, size_of_allocated_shared_memory_for_streaming_collision_and_relaxation>>>( lx, ly, lz, D3_d.Q14, D3_hlp_d.Q14); streaming_kernel_first_part_Q15<<<grid_type2, threads_type2, size_of_allocated_shared_memory_for_streaming_collision_and_relaxation>>>( lx, ly, lz, D3_d.Q15, D3_hlp_d.Q15); streaming_kernel_first_part_Q16<<<grid_type2, threads_type2, size_of_allocated_shared_memory_for_streaming_collision_and_relaxation>>>( lx, ly, lz, D3_d.Q16, D3_hlp_d.Q16); streaming_kernel_first_part_Q17<<<grid_type2, threads_type2, size_of_allocated_shared_memory_for_streaming_collision_and_relaxation>>>( lx, ly, lz, D3_d.Q17, D3_hlp_d.Q17); streaming_kernel_first_part_Q18<<<grid_type2, threads_type2, size_of_allocated_shared_memory_for_streaming_collision_and_relaxation>>>( lx, ly, lz, D3_d.Q18, D3_hlp_d.Q18); cudaDeviceSynchronize(); if(data_location==CPU) copy_data_from_host_to_device(); int n_of_threads=64; int n_of_blocks=(lz*ly)/n_of_threads; if ( (lattice_nodes%n_of_threads)!=0 ) ++n_of_blocks; dim3 threads_type3(n_of_threads,1,1); dim3 grid_type3(n_of_blocks,1,1); streaming_kernel_single_threaded_p2_v2<<<grid_type3, threads_type3>>>(lx,ly,lz, D3_hlp_d.Q0, D3_hlp_d.Q1, D3_hlp_d.Q2, D3_hlp_d.Q3, D3_hlp_d.Q4, D3_hlp_d.Q5, D3_hlp_d.Q6, D3_hlp_d.Q7, D3_hlp_d.Q8, D3_hlp_d.Q9, D3_hlp_d.Q10, D3_hlp_d.Q11, D3_hlp_d.Q12, D3_hlp_d.Q13, D3_hlp_d.Q14, D3_hlp_d.Q15, D3_hlp_d.Q16, D3_hlp_d.Q17, D3_hlp_d.Q18, D3_d.Q0, D3_d.Q1, D3_d.Q2, D3_d.Q3, D3_d.Q4, D3_d.Q5, D3_d.Q6, D3_d.Q7, D3_d.Q8, D3_d.Q9, D3_d.Q10, D3_d.Q11, D3_d.Q12, D3_d.Q13, D3_d.Q14, D3_d.Q15, D3_d.Q16, D3_d.Q17, D3_d.Q18); cudaDeviceSynchronize(); }
518d213a7956a0767133188331afd2bb60e6f88b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <cmath> #define n 10000 #define BLOCK 10 __global__ void Su(float *S_d, float *x) { int i = threadIdx.x + blockIdx.x*blockDim.x; float q = 1.0; for (int j = 1; j <= *x; j++) { q = q*i; } S_d[i] = 1./q; } int main() { float host_x=2;//CPU float *dev_S;float *dev_x;//GPU float host_S[n]; int size = sizeof(float); hipMalloc((void**)&dev_S, n*size); hipMalloc((void**)&dev_x, size); hipMemcpy(dev_S, &host_S, n*size, hipMemcpyHostToDevice); hipMemcpy(dev_x, &host_x, size, hipMemcpyHostToDevice); hipLaunchKernelGGL(( Su) , dim3(BLOCK), dim3(n / BLOCK) , 0, 0, dev_S,dev_x); hipDeviceSynchronize(); hipMemcpy(&host_S, dev_S, n*size, hipMemcpyDeviceToHost); float p = 0.0; for (int i = 1; i < n; i++) { p = p + host_S[i]; } printf("S = %f ", p); hipFree(dev_S); hipFree(dev_x); getchar(); }
518d213a7956a0767133188331afd2bb60e6f88b.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <cmath> #define n 10000 #define BLOCK 10 __global__ void Su(float *S_d, float *x) { int i = threadIdx.x + blockIdx.x*blockDim.x; float q = 1.0; for (int j = 1; j <= *x; j++) { q = q*i; } S_d[i] = 1./q; } int main() { float host_x=2;//CPU float *dev_S;float *dev_x;//GPU float host_S[n]; int size = sizeof(float); cudaMalloc((void**)&dev_S, n*size); cudaMalloc((void**)&dev_x, size); cudaMemcpy(dev_S, &host_S, n*size, cudaMemcpyHostToDevice); cudaMemcpy(dev_x, &host_x, size, cudaMemcpyHostToDevice); Su <<< BLOCK, n / BLOCK >>>(dev_S,dev_x); cudaDeviceSynchronize(); cudaMemcpy(&host_S, dev_S, n*size, cudaMemcpyDeviceToHost); float p = 0.0; for (int i = 1; i < n; i++) { p = p + host_S[i]; } printf("S = %f ", p); cudaFree(dev_S); cudaFree(dev_x); getchar(); }
46d9d1b47870d6756f8ec4b1960cdd85399d77c1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #define SIZE 512 // __global__ void addVector(float* left, float* right, float* result) { // id . int idx = blockIdx.x; // . result[idx] = left[idx] + right[idx]; } __host__ int main() { // ' float *vec1 = new float[SIZE], *vec2 = new float[SIZE], *vec3 = new float[SIZE]; // for (int i = 0; i < SIZE; i++) vec1[i] = vec2[i] = i; // ' float *devVec1, *devVec2, *devVec3; // ' hipMalloc((void**)&devVec1, sizeof(float) * SIZE); hipMalloc((void**)&devVec2, sizeof(float) * SIZE); hipMalloc((void**)&devVec3, sizeof(float) * SIZE); // ' hipMemcpy(devVec1, vec1, sizeof(float) * SIZE, hipMemcpyHostToDevice); hipMemcpy(devVec2, vec2, sizeof(float) * SIZE, hipMemcpyHostToDevice); // , dim3 gridSize = dim3(SIZE, 1, 1); // , dim3 blockSize = dim3(1, 1, 1); // addVector << <gridSize, blockSize >> >(devVec1, devVec2, devVec3); // hipEvent_t syncEvent; // hipEventCreate(&syncEvent); // hipEventRecord(syncEvent, 0); // hipEventSynchronize(syncEvent); // hipMemcpy(vec3, devVec3, sizeof(float) * SIZE, hipMemcpyDeviceToHost); // for (int i = 0; i < SIZE; i++) printf("Element #%i: %.1f\n", i, vec3[i]); // // hipEventDestroy(syncEvent); // ' hipFree(devVec1); hipFree(devVec2); hipFree(devVec3); // delete[] vec1, vec2, vec3; }
46d9d1b47870d6756f8ec4b1960cdd85399d77c1.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #define SIZE 512 // Функція складання двох векторів __global__ void addVector(float* left, float* right, float* result) { // Отримати id поточної нитки. int idx = blockIdx.x; // Розраховуємо результат. result[idx] = left[idx] + right[idx]; } __host__ int main() { // Виділяємо пам'ять під вектора float *vec1 = new float[SIZE], *vec2 = new float[SIZE], *vec3 = new float[SIZE]; // Ініціалізіруем значення векторів for (int i = 0; i < SIZE; i++) vec1[i] = vec2[i] = i; // Покажчики на пам'ять відеокарти float *devVec1, *devVec2, *devVec3; // Виділяємо пам'ять для векторів на відеокарті cudaMalloc((void**)&devVec1, sizeof(float) * SIZE); cudaMalloc((void**)&devVec2, sizeof(float) * SIZE); cudaMalloc((void**)&devVec3, sizeof(float) * SIZE); // Копіюємо дані в пам'ять відеокарти cudaMemcpy(devVec1, vec1, sizeof(float) * SIZE, cudaMemcpyHostToDevice); cudaMemcpy(devVec2, vec2, sizeof(float) * SIZE, cudaMemcpyHostToDevice); // Розмір гріду, що використовується dim3 gridSize = dim3(SIZE, 1, 1); // Розмір блоку, що використовується dim3 blockSize = dim3(1, 1, 1); // Виконуємо виклик функції ядра addVector << <gridSize, blockSize >> >(devVec1, devVec2, devVec3); // Обробник події cudaEvent_t syncEvent; // Створюємо подію cudaEventCreate(&syncEvent); // Записуємо подію cudaEventRecord(syncEvent, 0); // Синхронізуємо подію cudaEventSynchronize(syncEvent); // Тільки тепер отримуємо результат розрахунку cudaMemcpy(vec3, devVec3, sizeof(float) * SIZE, cudaMemcpyDeviceToHost); // Результати розрахунку for (int i = 0; i < SIZE; i++) printf("Element #%i: %.1f\n", i, vec3[i]); // Вивільняються ресурсів // Видалення події cudaEventDestroy(syncEvent); // Вивільнення пам'яті на відеокарті cudaFree(devVec1); cudaFree(devVec2); cudaFree(devVec3); // Вивільнення памьяті основної програми delete[] vec1, vec2, vec3; }
852c1e62e9ce783d451e495b9b350d8b23166542.hip
// !!! This is a file automatically generated by hipify!!! #include <kernels.h> #include <Constants.h> #include "hip/hip_runtime.h" #include "device_launch_parameters.h" __global__ void Vector_Addition(const int* dev_a, const int* dev_b, int* dev_c) { //Get the id of thread within a block unsigned short tid = threadIdx.x; if (tid < THREADS_PER_BLOCK) // check the boundry condition for the threads dev_c[tid] = dev_a[tid] + dev_b[tid]; }
852c1e62e9ce783d451e495b9b350d8b23166542.cu
#include <kernels.h> #include <Constants.h> #include "cuda_runtime.h" #include "device_launch_parameters.h" __global__ void Vector_Addition(const int* dev_a, const int* dev_b, int* dev_c) { //Get the id of thread within a block unsigned short tid = threadIdx.x; if (tid < THREADS_PER_BLOCK) // check the boundry condition for the threads dev_c[tid] = dev_a[tid] + dev_b[tid]; }
12c78b4707c3576a4ebd0930a1c0f7f847c7c516.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __device__ __host__ static inline uint8_t xnor_bit1(uint8_t a, uint8_t b) { return ~(a^b) & 0b1; } __device__ __host__ static inline unsigned char get_bit(unsigned char const*const src, size_t index) { size_t src_i = index / 8; int src_shift = index % 8; unsigned char val = (src[src_i] & (1 << src_shift)) > 0; //unsigned char val = (src[src_i] & (1 << (8 - src_shift))) > 0; return val; } __global__ void convolve_bin_gpu_kernel(float *input, float *weights, float *output, int in_w, int in_h, int in_c, int n, int size, int pad, int new_lda, float *mean_arr_gpu) { int index = blockIdx.x*blockDim.x + threadIdx.x; int fil; // filter index //for (fil = 0; fil < n; ++fil) int chan, y, x, f_y, f_x; // channel index //for (chan = 0; chan < in_c; ++chan) // input - y //for (y = 0; y < in_h; ++y) // input - x //for (x = 0; x < in_w; ++x) x = index % in_w; int index2 = index / in_w; y = index2 % in_h; fil = index2 / in_h; //if (fil < n) // (1-6 for one BLOCK) { //float mean_val = mean_arr_gpu[fil]; int const output_index = fil*in_w*in_h + y*in_w + x; int sum = 0; int good_val = 0; int min_index = blockIdx.x*blockDim.x; int min_fil = (min_index / in_w) / in_h; int max_index = (blockIdx.x+1)*blockDim.x - 1; int max_fil = (max_index / in_w) / in_h; __shared__ uint32_t weights_shared[3*3*1024*6/32 + 1]; // 7 KB (6 filters) - use (new_lda) for size calculation //const int weights_size = size*size*in_c/8; const int weights_size = size*size*in_c / 32 + 1; for (int tmp_fil = min_fil; tmp_fil <= max_fil; tmp_fil++) { for (int s = threadIdx.x; s < weights_size; s += blockDim.x) { //weights_shared[s + (tmp_fil - min_fil)*new_lda / 8] = ((uint8_t *)weights)[tmp_fil*new_lda / 8 + s]; weights_shared[s + (tmp_fil - min_fil)*new_lda/32] = ((uint32_t *)weights)[tmp_fil*new_lda / 32 + s]; } } __syncthreads(); for (chan = 0; chan < in_c; ++chan) { //int const weights_pre_index = fil*in_c*size*size + chan*size*size; //int const weights_pre_index = fil*new_lda + chan*size*size; int const input_pre_index = chan*in_w*in_h; __shared__ uint32_t input_shared[416*416/32 + 1]; // 21.2 KB bytes (for input size 832x832) const int input_shared_size = in_w*in_h / 32 + 1; const int add_input_index = input_pre_index % 32; __syncthreads(); // why??? but is required for (int s = threadIdx.x; s < input_shared_size; s += blockDim.x) { input_shared[s] = ((uint32_t *)input)[input_pre_index / 32 + s]; } __syncthreads(); /* __shared__ uint8_t input_shared[208 * 208 / 8 + 1]; // 5.4 KB bytes (for input size 416x416) const int input_shared_size = in_w*in_h / 8 + 1; const int add_input_index = input_pre_index % 8; __syncthreads(); for (int s = threadIdx.x; s < input_shared_size; s += blockDim.x) { ((uint8_t *)input_shared)[s] = ((uint8_t *)input)[input_pre_index / 8 + s]; } __syncthreads(); */ //int src_index = -1; //uint32_t input_byte; if (fil < n) // (1-6 for one BLOCK) { // filter - y for (f_y = 0; f_y < size; ++f_y) { int input_y = y + f_y - pad; // filter - x for (f_x = 0; f_x < size; ++f_x) { int input_x = x + f_x - pad; if (input_y < 0 || input_x < 0 || input_y >= in_h || input_x >= in_w) continue; //int input_index = input_pre_index + input_y*in_w + input_x; //int weights_index = weights_pre_index + f_y*size + f_x; //int weights_index = fil*in_c*size*size + chan*size*size + f_y*size + f_x; //int weights_index = fil*new_lda + chan*size*size + f_y*size + f_x; //uint8_t in_bit = get_bit((uint8_t *)input, input_index); //uint8_t w_bit = get_bit((uint8_t *)weights, weights_index); //int weights_index = fil*in_c*size*size + chan*size*size + f_y*size + f_x; int weights_shared_index = (fil - min_fil)*new_lda + chan*size*size + f_y*size + f_x; //uint8_t in_bit = get_bit((uint8_t *)weights_shared, weights_shared_index); uint8_t w_bit = get_bit((uint8_t *)weights_shared, weights_shared_index); //int input_index = input_pre_index + input_y*in_w + input_x; int input_shared_index = /*input_pre_index +*/ input_y*in_w + input_x + add_input_index; uint8_t in_bit = get_bit((uint8_t *)input_shared, input_shared_index); /* int new_src_index = input_shared_index / 32; int src_shift = input_shared_index % 32; //if (new_src_index != src_index) { src_index = new_src_index; input_byte = ((uint32_t *)input_shared)[src_index]; } uint8_t in_bit = (input_byte & (1 << src_shift)) >> src_shift; */ int res = xnor_bit1(in_bit, w_bit); sum += res; good_val++; //sum += input[input_index] *weights[weights_index]; } } } // l.output[filters][width][height] += // state.input[channels][width][height] * // l.weights[filters][channels][filter_width][filter_height]; //output[output_index] += sum; } sum = sum - (good_val - sum); //output[output_index] = sum * mean_arr_gpu[fil]; // atoimcAdd for inter-BLOCK sum atomicAdd(&output[output_index], sum * mean_arr_gpu[fil]); } }
12c78b4707c3576a4ebd0930a1c0f7f847c7c516.cu
#include "includes.h" __device__ __host__ static inline uint8_t xnor_bit1(uint8_t a, uint8_t b) { return ~(a^b) & 0b1; } __device__ __host__ static inline unsigned char get_bit(unsigned char const*const src, size_t index) { size_t src_i = index / 8; int src_shift = index % 8; unsigned char val = (src[src_i] & (1 << src_shift)) > 0; //unsigned char val = (src[src_i] & (1 << (8 - src_shift))) > 0; return val; } __global__ void convolve_bin_gpu_kernel(float *input, float *weights, float *output, int in_w, int in_h, int in_c, int n, int size, int pad, int new_lda, float *mean_arr_gpu) { int index = blockIdx.x*blockDim.x + threadIdx.x; int fil; // filter index //for (fil = 0; fil < n; ++fil) int chan, y, x, f_y, f_x; // channel index //for (chan = 0; chan < in_c; ++chan) // input - y //for (y = 0; y < in_h; ++y) // input - x //for (x = 0; x < in_w; ++x) x = index % in_w; int index2 = index / in_w; y = index2 % in_h; fil = index2 / in_h; //if (fil < n) // (1-6 for one BLOCK) { //float mean_val = mean_arr_gpu[fil]; int const output_index = fil*in_w*in_h + y*in_w + x; int sum = 0; int good_val = 0; int min_index = blockIdx.x*blockDim.x; int min_fil = (min_index / in_w) / in_h; int max_index = (blockIdx.x+1)*blockDim.x - 1; int max_fil = (max_index / in_w) / in_h; __shared__ uint32_t weights_shared[3*3*1024*6/32 + 1]; // 7 KB (6 filters) - use (new_lda) for size calculation //const int weights_size = size*size*in_c/8; const int weights_size = size*size*in_c / 32 + 1; for (int tmp_fil = min_fil; tmp_fil <= max_fil; tmp_fil++) { for (int s = threadIdx.x; s < weights_size; s += blockDim.x) { //weights_shared[s + (tmp_fil - min_fil)*new_lda / 8] = ((uint8_t *)weights)[tmp_fil*new_lda / 8 + s]; weights_shared[s + (tmp_fil - min_fil)*new_lda/32] = ((uint32_t *)weights)[tmp_fil*new_lda / 32 + s]; } } __syncthreads(); for (chan = 0; chan < in_c; ++chan) { //int const weights_pre_index = fil*in_c*size*size + chan*size*size; //int const weights_pre_index = fil*new_lda + chan*size*size; int const input_pre_index = chan*in_w*in_h; __shared__ uint32_t input_shared[416*416/32 + 1]; // 21.2 KB bytes (for input size 832x832) const int input_shared_size = in_w*in_h / 32 + 1; const int add_input_index = input_pre_index % 32; __syncthreads(); // why??? but is required for (int s = threadIdx.x; s < input_shared_size; s += blockDim.x) { input_shared[s] = ((uint32_t *)input)[input_pre_index / 32 + s]; } __syncthreads(); /* __shared__ uint8_t input_shared[208 * 208 / 8 + 1]; // 5.4 KB bytes (for input size 416x416) const int input_shared_size = in_w*in_h / 8 + 1; const int add_input_index = input_pre_index % 8; __syncthreads(); for (int s = threadIdx.x; s < input_shared_size; s += blockDim.x) { ((uint8_t *)input_shared)[s] = ((uint8_t *)input)[input_pre_index / 8 + s]; } __syncthreads(); */ //int src_index = -1; //uint32_t input_byte; if (fil < n) // (1-6 for one BLOCK) { // filter - y for (f_y = 0; f_y < size; ++f_y) { int input_y = y + f_y - pad; // filter - x for (f_x = 0; f_x < size; ++f_x) { int input_x = x + f_x - pad; if (input_y < 0 || input_x < 0 || input_y >= in_h || input_x >= in_w) continue; //int input_index = input_pre_index + input_y*in_w + input_x; //int weights_index = weights_pre_index + f_y*size + f_x; //int weights_index = fil*in_c*size*size + chan*size*size + f_y*size + f_x; //int weights_index = fil*new_lda + chan*size*size + f_y*size + f_x; //uint8_t in_bit = get_bit((uint8_t *)input, input_index); //uint8_t w_bit = get_bit((uint8_t *)weights, weights_index); //int weights_index = fil*in_c*size*size + chan*size*size + f_y*size + f_x; int weights_shared_index = (fil - min_fil)*new_lda + chan*size*size + f_y*size + f_x; //uint8_t in_bit = get_bit((uint8_t *)weights_shared, weights_shared_index); uint8_t w_bit = get_bit((uint8_t *)weights_shared, weights_shared_index); //int input_index = input_pre_index + input_y*in_w + input_x; int input_shared_index = /*input_pre_index +*/ input_y*in_w + input_x + add_input_index; uint8_t in_bit = get_bit((uint8_t *)input_shared, input_shared_index); /* int new_src_index = input_shared_index / 32; int src_shift = input_shared_index % 32; //if (new_src_index != src_index) { src_index = new_src_index; input_byte = ((uint32_t *)input_shared)[src_index]; } uint8_t in_bit = (input_byte & (1 << src_shift)) >> src_shift; */ int res = xnor_bit1(in_bit, w_bit); sum += res; good_val++; //sum += input[input_index] *weights[weights_index]; } } } // l.output[filters][width][height] += // state.input[channels][width][height] * // l.weights[filters][channels][filter_width][filter_height]; //output[output_index] += sum; } sum = sum - (good_val - sum); //output[output_index] = sum * mean_arr_gpu[fil]; // atoimcAdd for inter-BLOCK sum atomicAdd(&output[output_index], sum * mean_arr_gpu[fil]); } }
e5c16505ccc767db2a9f4dcd314433fd9fb040d5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * _reg_resampling_kernels.cu * * * Created by Marc Modat on 24/03/2009. * Copyright (c) 2009-2018, University College London * Copyright (c) 2018, NiftyReg Developers. * All rights reserved. * See the LICENSE.txt file in the nifty_reg root folder * */ #ifndef _REG_RESAMPLING_KERNELS_CU #define _REG_RESAMPLING_KERNELS_CU texture<float, 3, hipReadModeElementType> floatingTexture; texture<float4, 1, hipReadModeElementType> floatingMatrixTexture; texture<float4, 1, hipReadModeElementType> deformationFieldTexture; texture<int, 1, hipReadModeElementType> maskTexture; /* *************************************************************** */ __device__ __constant__ int3 c_FloatingDim; __device__ __constant__ int c_VoxelNumber; __device__ __constant__ float c_PaddingValue; __device__ __constant__ int c_ActiveVoxelNumber; /* *************************************************************** */ /* *************************************************************** */ __global__ void reg_resampleImage2D_kernel(float *resultArray) { const int tid= (blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x; if(tid<c_ActiveVoxelNumber){ //Get the real world deformation in the floating space const int tid2 = tex1Dfetch(maskTexture,tid); float4 realdeformation = tex1Dfetch(deformationFieldTexture,tid); //Get the voxel-based deformation in the floating space float2 voxeldeformation; float4 matrix = tex1Dfetch(floatingMatrixTexture,0); voxeldeformation.x = matrix.x*realdeformation.x + matrix.y*realdeformation.y + matrix.w; matrix = tex1Dfetch(floatingMatrixTexture,1); voxeldeformation.y = matrix.x*realdeformation.x + matrix.y*realdeformation.y + matrix.w; int3 floatingImageSize = c_FloatingDim; if( voxeldeformation.x>=0.0f && voxeldeformation.x<=floatingImageSize.x-1 && voxeldeformation.y>=0.0f && voxeldeformation.y<=floatingImageSize.y-1 ){ resultArray[tid2]=tex3D(floatingTexture, voxeldeformation.x+0.5f, voxeldeformation.y+0.5f, 0.5f); } else resultArray[tid2]=c_PaddingValue; } } /* *************************************************************** */ __global__ void reg_resampleImage3D_kernel(float *resultArray) { const int tid= (blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x; if(tid<c_ActiveVoxelNumber){ const int tid2 = tex1Dfetch(maskTexture,tid); //Get the real world deformation in the floating space float4 realdeformation = tex1Dfetch(deformationFieldTexture,tid); //Get the voxel-based deformation in the floating space float3 voxeldeformation; float4 matrix = tex1Dfetch(floatingMatrixTexture,0); voxeldeformation.x = matrix.x*realdeformation.x + matrix.y*realdeformation.y + matrix.z*realdeformation.z + matrix.w; matrix = tex1Dfetch(floatingMatrixTexture,1); voxeldeformation.y = matrix.x*realdeformation.x + matrix.y*realdeformation.y + matrix.z*realdeformation.z + matrix.w; matrix = tex1Dfetch(floatingMatrixTexture,2); voxeldeformation.z = matrix.x*realdeformation.x + matrix.y*realdeformation.y + matrix.z*realdeformation.z + matrix.w; int3 floatingImageSize = c_FloatingDim; if( voxeldeformation.x>=0.0f && voxeldeformation.x<=floatingImageSize.x-1 && voxeldeformation.y>=0.0f && voxeldeformation.y<=floatingImageSize.y-1 && voxeldeformation.z>=0.0f && voxeldeformation.z<=floatingImageSize.z-1 ){ resultArray[tid2]=tex3D(floatingTexture, voxeldeformation.x+0.5f, voxeldeformation.y+0.5f, voxeldeformation.z+0.5f); } else resultArray[tid2]=c_PaddingValue; } } /* *************************************************************** */ /* *************************************************************** */ __global__ void reg_getImageGradient2D_kernel(float4 *gradientArray) { const int tid= (blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x; if(tid<c_ActiveVoxelNumber){ //Get the real world deformation in the floating space float4 realdeformation = tex1Dfetch(deformationFieldTexture,tid); //Get the voxel-based deformation in the floating space float3 voxeldeformation; float4 matrix = tex1Dfetch(floatingMatrixTexture,0); voxeldeformation.x = matrix.x*realdeformation.x + matrix.y*realdeformation.y + matrix.w; matrix = tex1Dfetch(floatingMatrixTexture,1); voxeldeformation.y = matrix.x*realdeformation.x + matrix.y*realdeformation.y + matrix.w; int2 voxel; voxel.x = (int)(voxeldeformation.x); voxel.y = (int)(voxeldeformation.y); float xBasis[2]; float relative = fabsf(voxeldeformation.x - (float)voxel.x); xBasis[0]=1.0f-relative; xBasis[1]=relative; float yBasis[2]; relative = fabsf(voxeldeformation.y - (float)voxel.y); yBasis[0]=1.0f-relative; yBasis[1]=relative; float deriv[2]; deriv[0]=-1.0f; deriv[1]=1.0f; float4 gradientValue=make_float4(0.0f, 0.0f, 0.0f, 0.0f); float2 relativedeformation; for(short b=0; b<2; b++){ float2 tempValueX=make_float2(0.0f, 0.0f); relativedeformation.y=((float)voxel.y+(float)b+0.5f)/(float)c_FloatingDim.y; for(short a=0; a<2; a++){ relativedeformation.x=((float)voxel.x+(float)a+0.5f)/(float)c_FloatingDim.x; float intensity=c_PaddingValue; if(0.f<=relativedeformation.x && relativedeformation.x<=1.f && 0.f<=relativedeformation.y && relativedeformation.y<=1.f) intensity=tex3D(floatingTexture, relativedeformation.x, relativedeformation.y, 0.5f); tempValueX.x += intensity * deriv[a]; tempValueX.y += intensity * xBasis[a]; } gradientValue.x += tempValueX.x * yBasis[b]; gradientValue.y += tempValueX.y * deriv[b]; } gradientArray[tid]=gradientValue; } } /* *************************************************************** */ __global__ void reg_getImageGradient3D_kernel(float4 *gradientArray) { const int tid= (blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x; if(tid<c_ActiveVoxelNumber){ //Get the real world deformation in the floating space float4 realdeformation = tex1Dfetch(deformationFieldTexture,tid); //Get the voxel-based deformation in the floating space float3 voxeldeformation; float4 matrix = tex1Dfetch(floatingMatrixTexture,0); voxeldeformation.x = matrix.x*realdeformation.x + matrix.y*realdeformation.y + matrix.z*realdeformation.z + matrix.w; matrix = tex1Dfetch(floatingMatrixTexture,1); voxeldeformation.y = matrix.x*realdeformation.x + matrix.y*realdeformation.y + matrix.z*realdeformation.z + matrix.w; matrix = tex1Dfetch(floatingMatrixTexture,2); voxeldeformation.z = matrix.x*realdeformation.x + matrix.y*realdeformation.y + matrix.z*realdeformation.z + matrix.w; int3 voxel; voxel.x = (int)(voxeldeformation.x); voxel.y = (int)(voxeldeformation.y); voxel.z = (int)(voxeldeformation.z); float xBasis[2]; float relative = fabsf(voxeldeformation.x - (float)voxel.x); xBasis[0]=1.0f-relative; xBasis[1]=relative; float yBasis[2]; relative = fabsf(voxeldeformation.y - (float)voxel.y); yBasis[0]=1.0f-relative; yBasis[1]=relative; float zBasis[2]; relative = fabsf(voxeldeformation.z - (float)voxel.z); zBasis[0]=1.0f-relative; zBasis[1]=relative; float deriv[2]; deriv[0]=-1.0f; deriv[1]=1.0f; float4 gradientValue=make_float4(0.0f, 0.0f, 0.0f, 0.0f); float3 relativedeformation; for(short c=0; c<2; c++){ relativedeformation.z=((float)voxel.z+(float)c+0.5f)/(float)c_FloatingDim.z; float3 tempValueY=make_float3(0.0f, 0.0f, 0.0f); for(short b=0; b<2; b++){ float2 tempValueX=make_float2(0.0f, 0.0f); relativedeformation.y=((float)voxel.y+(float)b+0.5f)/(float)c_FloatingDim.y; for(short a=0; a<2; a++){ relativedeformation.x=((float)voxel.x+(float)a+0.5f)/(float)c_FloatingDim.x; float intensity=c_PaddingValue; if(0.f<=relativedeformation.x && relativedeformation.x<=1.f && 0.f<=relativedeformation.y && relativedeformation.y<=1.f && 0.f<=relativedeformation.z && relativedeformation.z<=1.f) intensity=tex3D(floatingTexture, relativedeformation.x, relativedeformation.y, relativedeformation.z); tempValueX.x += intensity * deriv[a]; tempValueX.y += intensity * xBasis[a]; } tempValueY.x += tempValueX.x * yBasis[b]; tempValueY.y += tempValueX.y * deriv[b]; tempValueY.z += tempValueX.y * yBasis[b]; } gradientValue.x += tempValueY.x * zBasis[c]; gradientValue.y += tempValueY.y * zBasis[c]; gradientValue.z += tempValueY.z * deriv[c]; } gradientArray[tid]=gradientValue; } } /* *************************************************************** */ /* *************************************************************** */ #endif
e5c16505ccc767db2a9f4dcd314433fd9fb040d5.cu
/* * _reg_resampling_kernels.cu * * * Created by Marc Modat on 24/03/2009. * Copyright (c) 2009-2018, University College London * Copyright (c) 2018, NiftyReg Developers. * All rights reserved. * See the LICENSE.txt file in the nifty_reg root folder * */ #ifndef _REG_RESAMPLING_KERNELS_CU #define _REG_RESAMPLING_KERNELS_CU texture<float, 3, cudaReadModeElementType> floatingTexture; texture<float4, 1, cudaReadModeElementType> floatingMatrixTexture; texture<float4, 1, cudaReadModeElementType> deformationFieldTexture; texture<int, 1, cudaReadModeElementType> maskTexture; /* *************************************************************** */ __device__ __constant__ int3 c_FloatingDim; __device__ __constant__ int c_VoxelNumber; __device__ __constant__ float c_PaddingValue; __device__ __constant__ int c_ActiveVoxelNumber; /* *************************************************************** */ /* *************************************************************** */ __global__ void reg_resampleImage2D_kernel(float *resultArray) { const int tid= (blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x; if(tid<c_ActiveVoxelNumber){ //Get the real world deformation in the floating space const int tid2 = tex1Dfetch(maskTexture,tid); float4 realdeformation = tex1Dfetch(deformationFieldTexture,tid); //Get the voxel-based deformation in the floating space float2 voxeldeformation; float4 matrix = tex1Dfetch(floatingMatrixTexture,0); voxeldeformation.x = matrix.x*realdeformation.x + matrix.y*realdeformation.y + matrix.w; matrix = tex1Dfetch(floatingMatrixTexture,1); voxeldeformation.y = matrix.x*realdeformation.x + matrix.y*realdeformation.y + matrix.w; int3 floatingImageSize = c_FloatingDim; if( voxeldeformation.x>=0.0f && voxeldeformation.x<=floatingImageSize.x-1 && voxeldeformation.y>=0.0f && voxeldeformation.y<=floatingImageSize.y-1 ){ resultArray[tid2]=tex3D(floatingTexture, voxeldeformation.x+0.5f, voxeldeformation.y+0.5f, 0.5f); } else resultArray[tid2]=c_PaddingValue; } } /* *************************************************************** */ __global__ void reg_resampleImage3D_kernel(float *resultArray) { const int tid= (blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x; if(tid<c_ActiveVoxelNumber){ const int tid2 = tex1Dfetch(maskTexture,tid); //Get the real world deformation in the floating space float4 realdeformation = tex1Dfetch(deformationFieldTexture,tid); //Get the voxel-based deformation in the floating space float3 voxeldeformation; float4 matrix = tex1Dfetch(floatingMatrixTexture,0); voxeldeformation.x = matrix.x*realdeformation.x + matrix.y*realdeformation.y + matrix.z*realdeformation.z + matrix.w; matrix = tex1Dfetch(floatingMatrixTexture,1); voxeldeformation.y = matrix.x*realdeformation.x + matrix.y*realdeformation.y + matrix.z*realdeformation.z + matrix.w; matrix = tex1Dfetch(floatingMatrixTexture,2); voxeldeformation.z = matrix.x*realdeformation.x + matrix.y*realdeformation.y + matrix.z*realdeformation.z + matrix.w; int3 floatingImageSize = c_FloatingDim; if( voxeldeformation.x>=0.0f && voxeldeformation.x<=floatingImageSize.x-1 && voxeldeformation.y>=0.0f && voxeldeformation.y<=floatingImageSize.y-1 && voxeldeformation.z>=0.0f && voxeldeformation.z<=floatingImageSize.z-1 ){ resultArray[tid2]=tex3D(floatingTexture, voxeldeformation.x+0.5f, voxeldeformation.y+0.5f, voxeldeformation.z+0.5f); } else resultArray[tid2]=c_PaddingValue; } } /* *************************************************************** */ /* *************************************************************** */ __global__ void reg_getImageGradient2D_kernel(float4 *gradientArray) { const int tid= (blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x; if(tid<c_ActiveVoxelNumber){ //Get the real world deformation in the floating space float4 realdeformation = tex1Dfetch(deformationFieldTexture,tid); //Get the voxel-based deformation in the floating space float3 voxeldeformation; float4 matrix = tex1Dfetch(floatingMatrixTexture,0); voxeldeformation.x = matrix.x*realdeformation.x + matrix.y*realdeformation.y + matrix.w; matrix = tex1Dfetch(floatingMatrixTexture,1); voxeldeformation.y = matrix.x*realdeformation.x + matrix.y*realdeformation.y + matrix.w; int2 voxel; voxel.x = (int)(voxeldeformation.x); voxel.y = (int)(voxeldeformation.y); float xBasis[2]; float relative = fabsf(voxeldeformation.x - (float)voxel.x); xBasis[0]=1.0f-relative; xBasis[1]=relative; float yBasis[2]; relative = fabsf(voxeldeformation.y - (float)voxel.y); yBasis[0]=1.0f-relative; yBasis[1]=relative; float deriv[2]; deriv[0]=-1.0f; deriv[1]=1.0f; float4 gradientValue=make_float4(0.0f, 0.0f, 0.0f, 0.0f); float2 relativedeformation; for(short b=0; b<2; b++){ float2 tempValueX=make_float2(0.0f, 0.0f); relativedeformation.y=((float)voxel.y+(float)b+0.5f)/(float)c_FloatingDim.y; for(short a=0; a<2; a++){ relativedeformation.x=((float)voxel.x+(float)a+0.5f)/(float)c_FloatingDim.x; float intensity=c_PaddingValue; if(0.f<=relativedeformation.x && relativedeformation.x<=1.f && 0.f<=relativedeformation.y && relativedeformation.y<=1.f) intensity=tex3D(floatingTexture, relativedeformation.x, relativedeformation.y, 0.5f); tempValueX.x += intensity * deriv[a]; tempValueX.y += intensity * xBasis[a]; } gradientValue.x += tempValueX.x * yBasis[b]; gradientValue.y += tempValueX.y * deriv[b]; } gradientArray[tid]=gradientValue; } } /* *************************************************************** */ __global__ void reg_getImageGradient3D_kernel(float4 *gradientArray) { const int tid= (blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x; if(tid<c_ActiveVoxelNumber){ //Get the real world deformation in the floating space float4 realdeformation = tex1Dfetch(deformationFieldTexture,tid); //Get the voxel-based deformation in the floating space float3 voxeldeformation; float4 matrix = tex1Dfetch(floatingMatrixTexture,0); voxeldeformation.x = matrix.x*realdeformation.x + matrix.y*realdeformation.y + matrix.z*realdeformation.z + matrix.w; matrix = tex1Dfetch(floatingMatrixTexture,1); voxeldeformation.y = matrix.x*realdeformation.x + matrix.y*realdeformation.y + matrix.z*realdeformation.z + matrix.w; matrix = tex1Dfetch(floatingMatrixTexture,2); voxeldeformation.z = matrix.x*realdeformation.x + matrix.y*realdeformation.y + matrix.z*realdeformation.z + matrix.w; int3 voxel; voxel.x = (int)(voxeldeformation.x); voxel.y = (int)(voxeldeformation.y); voxel.z = (int)(voxeldeformation.z); float xBasis[2]; float relative = fabsf(voxeldeformation.x - (float)voxel.x); xBasis[0]=1.0f-relative; xBasis[1]=relative; float yBasis[2]; relative = fabsf(voxeldeformation.y - (float)voxel.y); yBasis[0]=1.0f-relative; yBasis[1]=relative; float zBasis[2]; relative = fabsf(voxeldeformation.z - (float)voxel.z); zBasis[0]=1.0f-relative; zBasis[1]=relative; float deriv[2]; deriv[0]=-1.0f; deriv[1]=1.0f; float4 gradientValue=make_float4(0.0f, 0.0f, 0.0f, 0.0f); float3 relativedeformation; for(short c=0; c<2; c++){ relativedeformation.z=((float)voxel.z+(float)c+0.5f)/(float)c_FloatingDim.z; float3 tempValueY=make_float3(0.0f, 0.0f, 0.0f); for(short b=0; b<2; b++){ float2 tempValueX=make_float2(0.0f, 0.0f); relativedeformation.y=((float)voxel.y+(float)b+0.5f)/(float)c_FloatingDim.y; for(short a=0; a<2; a++){ relativedeformation.x=((float)voxel.x+(float)a+0.5f)/(float)c_FloatingDim.x; float intensity=c_PaddingValue; if(0.f<=relativedeformation.x && relativedeformation.x<=1.f && 0.f<=relativedeformation.y && relativedeformation.y<=1.f && 0.f<=relativedeformation.z && relativedeformation.z<=1.f) intensity=tex3D(floatingTexture, relativedeformation.x, relativedeformation.y, relativedeformation.z); tempValueX.x += intensity * deriv[a]; tempValueX.y += intensity * xBasis[a]; } tempValueY.x += tempValueX.x * yBasis[b]; tempValueY.y += tempValueX.y * deriv[b]; tempValueY.z += tempValueX.y * yBasis[b]; } gradientValue.x += tempValueY.x * zBasis[c]; gradientValue.y += tempValueY.y * zBasis[c]; gradientValue.z += tempValueY.z * deriv[c]; } gradientArray[tid]=gradientValue; } } /* *************************************************************** */ /* *************************************************************** */ #endif
002b8e1480c00947b004db5d49b36be2b66d4bb4.hip
// !!! This is a file automatically generated by hipify!!! #include <cstdlib> #include <cstdio> #include <vector> #include <algorithm> #include <math.h> #include <omp.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime_api.h> using namespace std; const int TILE_DIM = 32; const int BLOCK_ROWS = 8; #ifndef max #define max( a, b ) ( ((a) > (b)) ? (a) : (b) ) #endif #ifndef min #define min( a, b ) ( ((a) < (b)) ? (a) : (b) ) #endif //const bool pano = false; __global__ void copy_mem(unsigned char *source, unsigned char *render) { int x = blockIdx.x * TILE_DIM + threadIdx.x; int y = blockIdx.y * TILE_DIM + threadIdx.y; int width = gridDim.x * TILE_DIM; for (int j = 0; j < TILE_DIM; j+= BLOCK_ROWS) for (int channel = 0; channel < 3; channel ++ ) render[3*((y+j)*width + x) + channel] = source[3 * ((y+j)*width + x) + channel]; } __global__ void set_depth(unsigned int *depth) { int x = blockIdx.x * TILE_DIM + threadIdx.x; int y = blockIdx.y * TILE_DIM + threadIdx.y; int width = gridDim.x * TILE_DIM; for (int j = 0; j < TILE_DIM; j+= BLOCK_ROWS) depth[(y+j)*width + x] = 65535; } __global__ void char_to_int(int * img2, unsigned char * img) { int x = blockIdx.x * TILE_DIM + threadIdx.x; int y = blockIdx.y * TILE_DIM + threadIdx.y; int width = gridDim.x * TILE_DIM; for (int j = 0; j < TILE_DIM; j+= BLOCK_ROWS) img2[(y+j)*width + x] = img[3*((y+j)*width + x) + 0] * 256 * 256 + img[3*((y+j)*width + x) + 1] * 256 + img[3*((y+j)*width + x) + 2]; } __global__ void int_to_char(int * img2, unsigned char * img) { int x = blockIdx.x * TILE_DIM + threadIdx.x; int y = blockIdx.y * TILE_DIM + threadIdx.y; int width = gridDim.x * TILE_DIM; for (int j = 0; j < TILE_DIM; j+= BLOCK_ROWS) { img[3*((y+j)*width + x)] = img2[(y+j)*width + x] / (256*256); img[3*((y+j)*width + x)+1] = img2[(y+j)*width + x] / 256 % 256; img[3*((y+j)*width + x)+2] = img2[(y+j)*width + x] % 256; } } __global__ void fill(unsigned char * img) { int x = blockIdx.x * TILE_DIM + threadIdx.x; int y = blockIdx.y * TILE_DIM + threadIdx.y; int width = gridDim.x * TILE_DIM; for (int j = 0; j < TILE_DIM; j+= BLOCK_ROWS) { if ( img[3*((y+j)*width + x)] + img[3*((y+j)*width + x)+1] + img[3*((y+j)*width + x)+2] == 0) { img[3*((y+j)*width + x)] = img[3*((y+j)*width + x + 1)]; img[3*((y+j)*width + x)+1] = img[3*((y+j)*width + x + 1)+1]; img[3*((y+j)*width + x)+2] = img[3*((y+j)*width + x + 1)+2]; } } } __global__ void selection_sum_weights(float * selection_sum, float * selection, int n, int stride) { int x = blockIdx.x * TILE_DIM + threadIdx.x; int y = blockIdx.y * TILE_DIM + threadIdx.y; int width = gridDim.x * TILE_DIM; int idx = 0; for (int j = 0; j < TILE_DIM; j+= BLOCK_ROWS) { selection_sum[((y+j)*width + x)] = 0; for ( idx = 0; idx < n; idx ++) { atomicAdd(&(selection_sum[((y+j)*width + x)]), selection[idx * stride + ((y+j)*width + x)]); } } } __global__ void merge(unsigned char * img_all, unsigned char * img, float * selection, int n, int stride) { int x = blockIdx.x * TILE_DIM + threadIdx.x; int y = blockIdx.y * TILE_DIM + threadIdx.y; int width = gridDim.x * TILE_DIM; int idx = 0; float sum = 0; float weight = 0; for (int j = 0; j < TILE_DIM; j+= BLOCK_ROWS) { sum = 0; for (idx = 0; idx < n; idx ++) sum += selection[idx * stride + ((y+j)*width + x)]; for (idx = 0; idx < n; idx ++) selection[idx * stride + ((y+j)*width + x)] /= (sum + 1e-5); img[3*((y+j)*width + x)] = 0; img[3*((y+j)*width + x)+1] = 0; img[3*((y+j)*width + x)+2] = 0; for (idx = 0; idx < n; idx ++) { //weight = selection[idx * stride + ((y+j)*width + x)]; weight = 0.25; //weight = 0.5; img[3*((y+j)*width + x)] += (unsigned char) (img_all[idx * stride * 3 + 3*((y+j)*width + x)] * weight); img[3*((y+j)*width + x)+1] += (unsigned char) (img_all[idx * stride * 3 + 3*((y+j)*width + x) + 1] * weight); img[3*((y+j)*width + x)+2] += (unsigned char)(img_all[idx * stride * 3 + 3*((y+j)*width + x) + 2] * weight); } } } __global__ void merge_sum(unsigned char * img_all, unsigned char * img, float * selection, float * selection_sum, int n, int stride) { int x = blockIdx.x * TILE_DIM + threadIdx.x; int y = blockIdx.y * TILE_DIM + threadIdx.y; int width = gridDim.x * TILE_DIM; int idx = 0; float weight = 0; for (int j = 0; j < TILE_DIM; j+= BLOCK_ROWS) { img[3*((y+j)*width + x)] = 0; img[3*((y+j)*width + x)+1] = 0; img[3*((y+j)*width + x)+2] = 0; for (idx = 0; idx < n; idx ++) { weight = selection[idx * stride + ((y+j)*width + x)] / selection_sum[((y+j)*width + x)]; //weight = 0.25; //weight = 0.5; img[3*((y+j)*width + x)] += (unsigned char) (img_all[idx * stride * 3 + 3*((y+j)*width + x)] * weight); img[3*((y+j)*width + x)+1] += (unsigned char) (img_all[idx * stride * 3 + 3*((y+j)*width + x) + 1] * weight); img[3*((y+j)*width + x)+2] += (unsigned char)(img_all[idx * stride * 3 + 3*((y+j)*width + x) + 2] * weight); } } } __global__ void to3d_point(float *depth, float *points3d) { int x = blockIdx.x * TILE_DIM + threadIdx.x; int y = blockIdx.y * TILE_DIM + threadIdx.y; int w = gridDim.x * TILE_DIM; int h = w / 2; for (int j = 0; j < TILE_DIM; j+= BLOCK_ROWS) { int iw = x; int ih = y + j; float depth_point = depth[ ih*w + iw ] * 128.0; float phi = ((float)(ih) + 0.5) / float(h) * M_PI; float theta = ((float)(iw) + 0.5) / float(w) * 2 * M_PI + M_PI; points3d[(ih * w + iw) * 4 + 0] = depth_point * sin(phi) * cos(theta); points3d[(ih * w + iw) * 4 + 1] = depth_point * sin(phi) * sin(theta); points3d[(ih * w + iw) * 4 + 2] = depth_point * cos(phi); points3d[(ih * w + iw) * 4 + 3] = 1; } } __global__ void transform(float *points3d_after, float *points3d, float * transformation_matrix) { int x = blockIdx.x * TILE_DIM + threadIdx.x; int y = blockIdx.y * TILE_DIM + threadIdx.y; int w = gridDim.x * TILE_DIM; for (int j = 0; j < TILE_DIM; j+= BLOCK_ROWS) { int iw = x; int ih = y + j; for (int ic = 0; ic < 3; ic ++) { points3d_after[(ih * w + iw) * 3 + ic] = points3d[(ih * w + iw) * 4 + 0] * transformation_matrix[4 * ic + 0] + points3d[(ih * w + iw) * 4 + 1] * transformation_matrix[4 * ic + 1] + points3d[(ih * w + iw) * 4 + 2] * transformation_matrix[4 * ic + 2] + points3d[(ih * w + iw) * 4 + 3] * transformation_matrix[4 * ic + 3]; } } } //#define FOV_SCALE 1.73205080757 //#define FOV_SCALE 1 __global__ void transform2d(float *points3d_after, float fov_scale) { int x = blockIdx.x * TILE_DIM + threadIdx.x; int y = blockIdx.y * TILE_DIM + threadIdx.y; int w = gridDim.x * TILE_DIM; for (int j = 0; j < TILE_DIM; j+= BLOCK_ROWS) { int iw = x; int ih = y + j; float x = points3d_after[(ih * w + iw) * 3 + 0]; float y = points3d_after[(ih * w + iw) * 3 + 1]; float z = points3d_after[(ih * w + iw) * 3 + 2]; points3d_after[(ih * w + iw) * 3 + 0] = x;//sqrt(x * x + y * y + z * z); //points3d_after[(ih * w + iw) * 3 + 1] = atan2(y, x); //points3d_after[(ih * w + iw) * 3 + 2] = atan2(sqrt(x * x + y * y), z); float x2 = fov_scale * x; if ((x2 > 0) && (y < x2 * 1.1) && (y > -x2 * 1.1) && (z < x2 * 1.1) && (z > -x2 * 1.1)) { points3d_after[(ih * w + iw) * 3 + 1] = y / (x2 + 1e-5); points3d_after[(ih * w + iw) * 3 + 2] = -z / (x2 + 1e-5); } else { points3d_after[(ih * w + iw) * 3 + 1] = -1; points3d_after[(ih * w + iw) * 3 + 2] = -1; } } } __global__ void render_depth(float *points3d_polar, unsigned int * depth_render) { int x = blockIdx.x * TILE_DIM + threadIdx.x; int y = blockIdx.y * TILE_DIM + threadIdx.y; int w = gridDim.x * TILE_DIM; int h = w /2; for (int j = 0; j < TILE_DIM; j+= BLOCK_ROWS) { int iw = x; int ih = y + j; int tx = round((points3d_polar[(ih * w + iw) * 3 + 1] + M_PI)/(2*M_PI) * w - 0.5); int ty = round((points3d_polar[(ih * w + iw) * 3 + 2])/M_PI * h - 0.5); int this_depth = (int)(512 * points3d_polar[(ih * w + iw) * 3 + 0]); atomicMin(&depth_render[(ty * w + tx)] , this_depth); } } __global__ void get_average(unsigned char * img, int * nz, int * average, int scale) { int x = blockIdx.x * TILE_DIM + threadIdx.x; int y = blockIdx.y * TILE_DIM + threadIdx.y; int width = gridDim.x * TILE_DIM; //int h = width /2; for (int j = 0; j < TILE_DIM; j+= BLOCK_ROWS) { int iw = x; int ih = y + j; if (img[3*(ih*width + iw)] + img[3*(ih*width + iw)+1] + img[3*(ih*width + iw)+2] > 0) { //nz[ih/3 * width + iw/3] += 1; //average[3*(ih/3*width + iw/3)] += (int)img[3*(ih*width + iw)]; //average[3*(ih/3*width + iw/3)+1] += (int)img[3*(ih*width + iw)+1]; //average[3*(ih/3*width + iw/3)+2] += (int)img[3*(ih*width + iw)+2]; atomicAdd(&(nz[ih/scale * width + iw/scale]), 1); atomicAdd(&(average[3*(ih/scale*width + iw/scale)]), (int)img[3*(ih*width + iw)]); atomicAdd(&(average[3*(ih/scale*width + iw/scale)+1]), (int)img[3*(ih*width + iw)+1]); atomicAdd(&(average[3*(ih/scale*width + iw/scale)+2]), (int)img[3*(ih*width + iw)+2]); } } } __global__ void fill_with_average(unsigned char *img, int * nz, int * average, int scale) { int x = blockIdx.x * TILE_DIM + threadIdx.x; int y = blockIdx.y * TILE_DIM + threadIdx.y; int width = gridDim.x * TILE_DIM; //int h = width /2; for (int j = 0; j < TILE_DIM; j+= BLOCK_ROWS) { int iw = x; int ih = y + j; if ((img[3*(ih*width + iw)] + img[3*(ih*width + iw)+1] + img[3*(ih*width + iw)+2] == 0) && (nz[ih/scale * width + iw/scale] > 0)) { img[3*(ih*width + iw)] = (unsigned char)(average[3*(ih/scale*width + iw/scale)] / nz[ih/scale * width + iw/scale]); img[3*(ih*width + iw) + 1] = (unsigned char)(average[3*(ih/scale*width + iw/scale) + 1] / nz[ih/scale * width + iw/scale]); img[3*(ih*width + iw) + 2] = (unsigned char)(average[3*(ih/scale*width + iw/scale) + 2] / nz[ih/scale * width + iw/scale]); } } } __global__ void render_final(float *points3d_polar, float * selection, float * depth_render, int * img, int * render, int oh, int ow) { int x = blockIdx.x * TILE_DIM + threadIdx.x; int y = blockIdx.y * TILE_DIM + threadIdx.y; int w = gridDim.x * TILE_DIM; int h = w /2; int maxsize = oh * ow; for (int j = 0; j < TILE_DIM; j+= BLOCK_ROWS) { int iw = x; int ih = y + j; int tx = round((points3d_polar[(ih * w + iw) * 3 + 1] + 1)/2 * ow - 0.5); int ty = round((points3d_polar[(ih * w + iw) * 3 + 2] + 1)/2 * oh - 0.5); float tx_offset = ((points3d_polar[(ih * w + iw) * 3 + 1] + 1)/2 * ow - 0.5); float ty_offset = ((points3d_polar[(ih * w + iw) * 3 + 2] + 1)/2 * oh - 0.5); float tx00 = 0; float ty00 = 0; float tx01 = ((points3d_polar[(ih * w + iw + 1) * 3 + 1] + 1)/2 * ow - 0.5) - tx_offset; float ty01 = ((points3d_polar[(ih * w + iw + 1) * 3 + 2] + 1)/2 * oh - 0.5) - ty_offset; float tx10 = ((points3d_polar[((ih + 1) * w + iw) * 3 + 1] + 1)/2 * ow - 0.5) - tx_offset; float ty10 = ((points3d_polar[((ih + 1) * w + iw) * 3 + 2] + 1)/2 * oh - 0.5) - ty_offset; float tx11 = ((points3d_polar[((ih+1) * w + iw + 1) * 3 + 1] + 1)/2 * ow - 0.5) - tx_offset; float ty11 = ((points3d_polar[((ih+1) * w + iw + 1) * 3 + 2] + 1)/2 * oh - 0.5) - ty_offset; float t00 = 0 * (float)tx00 + (float)tx01 * -1.0/3 + (float)tx10 * 2.0/3 + (float)tx11 * 1.0/3; float t01 = 0 * (float)ty00 + (float)ty01 * -1.0/3 + (float)ty10 * 2.0/3 + (float)ty11 * 1.0/3; float t10 = 0 * (float)tx00 + (float)tx01 * 2.0/3 + (float)tx10 * -1.0/3 + (float)tx11 * 1.0/3; float t11 = 0 * (float)ty00 + (float)ty01 * 2.0/3 + (float)ty10 * -1.0/3 + (float)ty11 * 1.0/3; float det = t00 * t11 - t01 * t10 + 1e-10; //printf("%f %f %f %f %f\n", t00, t01, t10, t11, det); float it00, it01, it10, it11; it00 = t11/det; it01 = -t01/det; it10 = -t10/det; it11 = t00/det; //printf("inverse %f %f %f %f\n", it00, it01, it10, it11); int this_depth = (int)(12800/128 * points3d_polar[(ih * w + iw) * 3 + 0]); int delta00 = (int)(12800/128 * points3d_polar[(ih * w + iw) * 3 + 0]) - (int)(100 * depth_render[(ty * ow + tx)]); int delta01 = (int)(12800/128 * points3d_polar[(ih * w + iw + 1) * 3 + 0]) - (int)(100 * depth_render[(ty * ow + tx + 1)]); int delta10 = (int)(12800/128 * points3d_polar[((ih + 1) * w + iw) * 3 + 0]) - (int)(100 * depth_render[((ty+1) * ow + tx)]); int delta11 = (int)(12800/128 * points3d_polar[((ih+1) * w + iw + 1) * 3 + 0]) - (int)(100 * depth_render[((ty+1) * ow + tx + 1)]); int mindelta = min(min(delta00, delta01), min(delta10, delta11)); int maxdelta = max(max(delta00, delta01), max(delta10, delta11)); int depth00 = (int)(12800/128 * points3d_polar[(ih * w + iw) * 3 + 0]); int depth01 = (int)(12800/128 * points3d_polar[(ih * w + iw + 1) * 3 + 0]); int depth10 = (int)(12800/128 * points3d_polar[((ih+1) * w + iw) * 3 + 0]); int depth11 = (int)(12800/128 * points3d_polar[((ih+1) * w + iw+1) * 3 + 0]); int max_depth = max(max(depth00, depth10), max(depth01, depth11)); int min_depth = min(min(depth00, depth10), min(depth01, depth11)); int delta_depth = max_depth - min_depth; int txmin = floor(tx_offset + min(min(tx00, tx11), min(tx01, tx10))); int txmax = ceil(tx_offset + max(max(tx00, tx11), max(tx01, tx10))); int tymin = floor(ty_offset + min(min(ty00, ty11), min(ty01, ty10))); int tymax = ceil(ty_offset + max(max(ty00, ty11), max(ty01, ty10))); float newx, newy; int r,g,b; int itx, ity; //render[(ty * ow + tx)] = img[ih * w + iw]; //selection[(ty * ow + tx)] = 1.0; float tolerance = 0.1 * this_depth > 10? 0.1 * this_depth : 10; float tolerance2 = 0.05 * max_depth > 10? 0.05 * max_depth: 10; float flank = 0.01; if ((delta_depth < tolerance2) && (y > 1 * h/8) && (y < (h*7)/8)) if (((mindelta > - tolerance) && (maxdelta < tolerance)) && (this_depth < 10000)) { if (((txmax - txmin) * (tymax - tymin) < 1600) && (txmax - txmin < 40) && (tymax - tymin < 40)) { for (itx = txmin; itx < txmax; itx ++) for (ity = tymin; ity < tymax; ity ++) { if (( 0 <= itx) && (itx < ow) && ( 0 <= ity) && (ity < oh)) { newx = (itx - tx_offset) * it00 + it10 * (ity - ty_offset); newy = (itx - tx_offset) * it01 + it11 * (ity - ty_offset); //printf("%f %f\n", newx, newy); if ((newx > -flank) && (newx < 1 + flank) && (newy > -flank) && (newy < 1 + flank)) { if (newx < 0) newx = 0; if (newy < 0) newy = 0; if (newx > 1) newx = 1; if (newy > 1) newy = 1; r = img[(ih * w + iw)] / (256*256) * (1-newx) * (1-newy) + img[(ih * w + iw + 1)] / (256*256) * (1-newx) * (newy) + img[((ih+1) * w + iw)] / (256*256) * (newx) * (1-newy) + img[((ih+1) * w + iw + 1)] / (256*256) * newx * newy; g = img[(ih * w + iw)] / 256 % 256 * (1-newx) * (1-newy) + img[(ih * w + iw + 1)] / 256 % 256 * (1-newx) * (newy) + img[((ih+1) * w + iw)] / 256 % 256 * (newx) * (1-newy) + img[((ih+1) * w + iw + 1)] / 256 % 256 * newx * newy; b = img[(ih * w + iw)] % 256 * (1-newx) * (1-newy) + img[(ih * w + iw + 1)] % 256 * (1-newx) * (newy) + img[((ih+1) * w + iw)] % 256 * (newx) * (1-newy) + img[((ih+1) * w + iw + 1)] % 256 * newx * newy ; if (r > 255) r = 255; if (g > 255) g = 255; if (b > 255) b = 255; if ((ity * ow + itx > 0) && (ity * ow + itx < maxsize)) { render[(ity * ow + itx)] = r * 256 * 256 + g * 256 + b; selection[(ity * ow + itx)] = 1.0 / abs(det); } } } } } } } } extern "C"{ void render(int n, int h,int w, int oh, int ow, unsigned char * img, float * depth,float * pose, unsigned char * render, float * depth_render, float fov){ //int ih, iw, i, ic; //printf("inside cuda code %d\n", depth); //printf("scale %d\n", s); const int nx = w; const int ny = h; const int onx = ow; const int ony = oh; const size_t input_mem_size = nx*ny; const size_t output_mem_size = onx * ony; dim3 dimGrid(nx/TILE_DIM, ny/TILE_DIM, 1); dim3 dimGrid_out(onx/TILE_DIM, ony/TILE_DIM, 1); dim3 dimBlock(TILE_DIM, BLOCK_ROWS, 1); unsigned char *d_img, *d_render, *d_render_all; float *d_depth, *d_pose; float *d_depth_render; float *d_3dpoint, *d_3dpoint_after; float * d_selection, * d_selection_sum; int * nz; int * average; int *d_render2, *d_img2; hipMalloc((void **)&d_img, input_mem_size * sizeof(unsigned char) * 3); hipMalloc((void **)&d_img2, input_mem_size * sizeof(int)); hipMalloc((void **)&d_render, output_mem_size * sizeof(unsigned char) * 3); hipMalloc((void **)&d_render_all, output_mem_size * sizeof(unsigned char) * 3 * n); hipMalloc((void **)&d_depth, input_mem_size * sizeof(float)); hipMalloc((void **)&d_depth_render, output_mem_size * sizeof(float)); hipMalloc((void **)&d_3dpoint, input_mem_size * sizeof(float) * 4); hipMalloc((void **)&d_3dpoint_after, input_mem_size * sizeof(float) * 4); hipMalloc((void **)&d_pose, sizeof(float) * 16); hipMalloc((void **)&d_selection, output_mem_size * sizeof(float) * n); hipMalloc((void **)&d_selection_sum, output_mem_size * sizeof(float)); hipMalloc((void **)&d_render2, output_mem_size * sizeof(int)); hipMalloc((void **)&nz, output_mem_size * sizeof(int)); hipMalloc((void **)&average, output_mem_size * sizeof(int) * 3); hipMemset(nz, 0, output_mem_size * sizeof(int)); hipMemset(average, 0, output_mem_size * sizeof(int) * 3); hipMemset(d_selection, 0, output_mem_size * sizeof(float) * n); hipMemset(d_selection_sum, 0, output_mem_size * sizeof(float)); hipMemcpy(d_depth_render, depth_render, output_mem_size * sizeof(float), hipMemcpyHostToDevice); hipMemset(d_render_all, 0, output_mem_size * sizeof(unsigned char) * 3 * n); int idx; for (idx = 0; idx < n; idx ++) { hipMemcpy(d_pose, &(pose[idx * 16]), sizeof(float) * 16, hipMemcpyHostToDevice); hipMemcpy(d_img, &(img[idx * input_mem_size * 3]), input_mem_size * sizeof(unsigned char) * 3, hipMemcpyHostToDevice); hipMemcpy(d_depth, &(depth[idx * input_mem_size]), input_mem_size * sizeof(float), hipMemcpyHostToDevice); hipMemset(d_render, 0, output_mem_size * sizeof(unsigned char) * 3); hipMemset(d_render2, 0, output_mem_size * sizeof(int)); hipMemset(d_img2, 0, input_mem_size * sizeof(int)); hipMemset(d_3dpoint, 0, input_mem_size * sizeof(float) * 4); hipMemset(d_3dpoint_after, 0, input_mem_size * sizeof(float) * 3); hipLaunchKernelGGL(( to3d_point), dim3(dimGrid), dim3(dimBlock) , 0, 0, d_depth, d_3dpoint); hipLaunchKernelGGL(( transform), dim3(dimGrid), dim3(dimBlock) , 0, 0, d_3dpoint_after, d_3dpoint, d_pose); float fov_scale = tan(fov/2); hipLaunchKernelGGL(( transform2d), dim3(dimGrid), dim3(dimBlock), 0, 0, d_3dpoint_after, fov_scale); hipLaunchKernelGGL(( char_to_int) , dim3(dimGrid), dim3(dimBlock) , 0, 0, d_img2, d_img); hipLaunchKernelGGL(( render_final) , dim3(dimGrid), dim3(dimBlock) , 0, 0, d_3dpoint_after, &(d_selection[idx * onx * ony]), d_depth_render, d_img2, d_render2, oh, ow); //int_to_char <<< dimGrid_out, dimBlock >>> (d_render2, d_render); hipLaunchKernelGGL(( int_to_char) , dim3(dimGrid_out), dim3(dimBlock) , 0, 0, d_render2, &(d_render_all[idx * output_mem_size * 3])); int fill_size[1] = {3}; for (int j = 0; j < 1; j++) { hipMemset(nz, 0, output_mem_size * sizeof(int)); hipMemset(average, 0, output_mem_size * sizeof(int) * 3); hipLaunchKernelGGL(( get_average) , dim3(dimGrid_out), dim3(dimBlock) , 0, 0, &(d_render_all[idx * output_mem_size * 3]), nz, average, fill_size[j]); hipLaunchKernelGGL(( fill_with_average) , dim3(dimGrid_out), dim3(dimBlock) , 0, 0, &(d_render_all[idx * output_mem_size * 3]), nz, average, fill_size[j]); } } hipLaunchKernelGGL(( selection_sum_weights) , dim3(dimGrid_out), dim3(dimBlock) , 0, 0, d_selection_sum, d_selection, n, output_mem_size); hipLaunchKernelGGL(( merge_sum) , dim3(dimGrid_out), dim3(dimBlock) , 0, 0, d_render_all, d_render, d_selection, d_selection_sum, n, output_mem_size); //merge <<< dimGrid_out, dimBlock >>> (d_render_all, d_render, d_selection, n, output_mem_size); hipMemcpy(render, d_render, output_mem_size * sizeof(unsigned char) * 3 , hipMemcpyDeviceToHost); hipFree(d_img); hipFree(d_depth); hipFree(d_render2); hipFree(d_img2); hipFree(d_render); hipFree(d_depth_render); hipFree(d_3dpoint); hipFree(d_3dpoint_after); hipFree(d_pose); hipFree(d_render_all); hipFree(d_selection); hipFree(nz); hipFree(average); hipFree(d_selection_sum); } }//extern "C"
002b8e1480c00947b004db5d49b36be2b66d4bb4.cu
#include <cstdlib> #include <cstdio> #include <vector> #include <algorithm> #include <math.h> #include <omp.h> #include <cuda.h> #include <cuda_runtime_api.h> using namespace std; const int TILE_DIM = 32; const int BLOCK_ROWS = 8; #ifndef max #define max( a, b ) ( ((a) > (b)) ? (a) : (b) ) #endif #ifndef min #define min( a, b ) ( ((a) < (b)) ? (a) : (b) ) #endif //const bool pano = false; __global__ void copy_mem(unsigned char *source, unsigned char *render) { int x = blockIdx.x * TILE_DIM + threadIdx.x; int y = blockIdx.y * TILE_DIM + threadIdx.y; int width = gridDim.x * TILE_DIM; for (int j = 0; j < TILE_DIM; j+= BLOCK_ROWS) for (int channel = 0; channel < 3; channel ++ ) render[3*((y+j)*width + x) + channel] = source[3 * ((y+j)*width + x) + channel]; } __global__ void set_depth(unsigned int *depth) { int x = blockIdx.x * TILE_DIM + threadIdx.x; int y = blockIdx.y * TILE_DIM + threadIdx.y; int width = gridDim.x * TILE_DIM; for (int j = 0; j < TILE_DIM; j+= BLOCK_ROWS) depth[(y+j)*width + x] = 65535; } __global__ void char_to_int(int * img2, unsigned char * img) { int x = blockIdx.x * TILE_DIM + threadIdx.x; int y = blockIdx.y * TILE_DIM + threadIdx.y; int width = gridDim.x * TILE_DIM; for (int j = 0; j < TILE_DIM; j+= BLOCK_ROWS) img2[(y+j)*width + x] = img[3*((y+j)*width + x) + 0] * 256 * 256 + img[3*((y+j)*width + x) + 1] * 256 + img[3*((y+j)*width + x) + 2]; } __global__ void int_to_char(int * img2, unsigned char * img) { int x = blockIdx.x * TILE_DIM + threadIdx.x; int y = blockIdx.y * TILE_DIM + threadIdx.y; int width = gridDim.x * TILE_DIM; for (int j = 0; j < TILE_DIM; j+= BLOCK_ROWS) { img[3*((y+j)*width + x)] = img2[(y+j)*width + x] / (256*256); img[3*((y+j)*width + x)+1] = img2[(y+j)*width + x] / 256 % 256; img[3*((y+j)*width + x)+2] = img2[(y+j)*width + x] % 256; } } __global__ void fill(unsigned char * img) { int x = blockIdx.x * TILE_DIM + threadIdx.x; int y = blockIdx.y * TILE_DIM + threadIdx.y; int width = gridDim.x * TILE_DIM; for (int j = 0; j < TILE_DIM; j+= BLOCK_ROWS) { if ( img[3*((y+j)*width + x)] + img[3*((y+j)*width + x)+1] + img[3*((y+j)*width + x)+2] == 0) { img[3*((y+j)*width + x)] = img[3*((y+j)*width + x + 1)]; img[3*((y+j)*width + x)+1] = img[3*((y+j)*width + x + 1)+1]; img[3*((y+j)*width + x)+2] = img[3*((y+j)*width + x + 1)+2]; } } } __global__ void selection_sum_weights(float * selection_sum, float * selection, int n, int stride) { int x = blockIdx.x * TILE_DIM + threadIdx.x; int y = blockIdx.y * TILE_DIM + threadIdx.y; int width = gridDim.x * TILE_DIM; int idx = 0; for (int j = 0; j < TILE_DIM; j+= BLOCK_ROWS) { selection_sum[((y+j)*width + x)] = 0; for ( idx = 0; idx < n; idx ++) { atomicAdd(&(selection_sum[((y+j)*width + x)]), selection[idx * stride + ((y+j)*width + x)]); } } } __global__ void merge(unsigned char * img_all, unsigned char * img, float * selection, int n, int stride) { int x = blockIdx.x * TILE_DIM + threadIdx.x; int y = blockIdx.y * TILE_DIM + threadIdx.y; int width = gridDim.x * TILE_DIM; int idx = 0; float sum = 0; float weight = 0; for (int j = 0; j < TILE_DIM; j+= BLOCK_ROWS) { sum = 0; for (idx = 0; idx < n; idx ++) sum += selection[idx * stride + ((y+j)*width + x)]; for (idx = 0; idx < n; idx ++) selection[idx * stride + ((y+j)*width + x)] /= (sum + 1e-5); img[3*((y+j)*width + x)] = 0; img[3*((y+j)*width + x)+1] = 0; img[3*((y+j)*width + x)+2] = 0; for (idx = 0; idx < n; idx ++) { //weight = selection[idx * stride + ((y+j)*width + x)]; weight = 0.25; //weight = 0.5; img[3*((y+j)*width + x)] += (unsigned char) (img_all[idx * stride * 3 + 3*((y+j)*width + x)] * weight); img[3*((y+j)*width + x)+1] += (unsigned char) (img_all[idx * stride * 3 + 3*((y+j)*width + x) + 1] * weight); img[3*((y+j)*width + x)+2] += (unsigned char)(img_all[idx * stride * 3 + 3*((y+j)*width + x) + 2] * weight); } } } __global__ void merge_sum(unsigned char * img_all, unsigned char * img, float * selection, float * selection_sum, int n, int stride) { int x = blockIdx.x * TILE_DIM + threadIdx.x; int y = blockIdx.y * TILE_DIM + threadIdx.y; int width = gridDim.x * TILE_DIM; int idx = 0; float weight = 0; for (int j = 0; j < TILE_DIM; j+= BLOCK_ROWS) { img[3*((y+j)*width + x)] = 0; img[3*((y+j)*width + x)+1] = 0; img[3*((y+j)*width + x)+2] = 0; for (idx = 0; idx < n; idx ++) { weight = selection[idx * stride + ((y+j)*width + x)] / selection_sum[((y+j)*width + x)]; //weight = 0.25; //weight = 0.5; img[3*((y+j)*width + x)] += (unsigned char) (img_all[idx * stride * 3 + 3*((y+j)*width + x)] * weight); img[3*((y+j)*width + x)+1] += (unsigned char) (img_all[idx * stride * 3 + 3*((y+j)*width + x) + 1] * weight); img[3*((y+j)*width + x)+2] += (unsigned char)(img_all[idx * stride * 3 + 3*((y+j)*width + x) + 2] * weight); } } } __global__ void to3d_point(float *depth, float *points3d) { int x = blockIdx.x * TILE_DIM + threadIdx.x; int y = blockIdx.y * TILE_DIM + threadIdx.y; int w = gridDim.x * TILE_DIM; int h = w / 2; for (int j = 0; j < TILE_DIM; j+= BLOCK_ROWS) { int iw = x; int ih = y + j; float depth_point = depth[ ih*w + iw ] * 128.0; float phi = ((float)(ih) + 0.5) / float(h) * M_PI; float theta = ((float)(iw) + 0.5) / float(w) * 2 * M_PI + M_PI; points3d[(ih * w + iw) * 4 + 0] = depth_point * sin(phi) * cos(theta); points3d[(ih * w + iw) * 4 + 1] = depth_point * sin(phi) * sin(theta); points3d[(ih * w + iw) * 4 + 2] = depth_point * cos(phi); points3d[(ih * w + iw) * 4 + 3] = 1; } } __global__ void transform(float *points3d_after, float *points3d, float * transformation_matrix) { int x = blockIdx.x * TILE_DIM + threadIdx.x; int y = blockIdx.y * TILE_DIM + threadIdx.y; int w = gridDim.x * TILE_DIM; for (int j = 0; j < TILE_DIM; j+= BLOCK_ROWS) { int iw = x; int ih = y + j; for (int ic = 0; ic < 3; ic ++) { points3d_after[(ih * w + iw) * 3 + ic] = points3d[(ih * w + iw) * 4 + 0] * transformation_matrix[4 * ic + 0] + points3d[(ih * w + iw) * 4 + 1] * transformation_matrix[4 * ic + 1] + points3d[(ih * w + iw) * 4 + 2] * transformation_matrix[4 * ic + 2] + points3d[(ih * w + iw) * 4 + 3] * transformation_matrix[4 * ic + 3]; } } } //#define FOV_SCALE 1.73205080757 //#define FOV_SCALE 1 __global__ void transform2d(float *points3d_after, float fov_scale) { int x = blockIdx.x * TILE_DIM + threadIdx.x; int y = blockIdx.y * TILE_DIM + threadIdx.y; int w = gridDim.x * TILE_DIM; for (int j = 0; j < TILE_DIM; j+= BLOCK_ROWS) { int iw = x; int ih = y + j; float x = points3d_after[(ih * w + iw) * 3 + 0]; float y = points3d_after[(ih * w + iw) * 3 + 1]; float z = points3d_after[(ih * w + iw) * 3 + 2]; points3d_after[(ih * w + iw) * 3 + 0] = x;//sqrt(x * x + y * y + z * z); //points3d_after[(ih * w + iw) * 3 + 1] = atan2(y, x); //points3d_after[(ih * w + iw) * 3 + 2] = atan2(sqrt(x * x + y * y), z); float x2 = fov_scale * x; if ((x2 > 0) && (y < x2 * 1.1) && (y > -x2 * 1.1) && (z < x2 * 1.1) && (z > -x2 * 1.1)) { points3d_after[(ih * w + iw) * 3 + 1] = y / (x2 + 1e-5); points3d_after[(ih * w + iw) * 3 + 2] = -z / (x2 + 1e-5); } else { points3d_after[(ih * w + iw) * 3 + 1] = -1; points3d_after[(ih * w + iw) * 3 + 2] = -1; } } } __global__ void render_depth(float *points3d_polar, unsigned int * depth_render) { int x = blockIdx.x * TILE_DIM + threadIdx.x; int y = blockIdx.y * TILE_DIM + threadIdx.y; int w = gridDim.x * TILE_DIM; int h = w /2; for (int j = 0; j < TILE_DIM; j+= BLOCK_ROWS) { int iw = x; int ih = y + j; int tx = round((points3d_polar[(ih * w + iw) * 3 + 1] + M_PI)/(2*M_PI) * w - 0.5); int ty = round((points3d_polar[(ih * w + iw) * 3 + 2])/M_PI * h - 0.5); int this_depth = (int)(512 * points3d_polar[(ih * w + iw) * 3 + 0]); atomicMin(&depth_render[(ty * w + tx)] , this_depth); } } __global__ void get_average(unsigned char * img, int * nz, int * average, int scale) { int x = blockIdx.x * TILE_DIM + threadIdx.x; int y = blockIdx.y * TILE_DIM + threadIdx.y; int width = gridDim.x * TILE_DIM; //int h = width /2; for (int j = 0; j < TILE_DIM; j+= BLOCK_ROWS) { int iw = x; int ih = y + j; if (img[3*(ih*width + iw)] + img[3*(ih*width + iw)+1] + img[3*(ih*width + iw)+2] > 0) { //nz[ih/3 * width + iw/3] += 1; //average[3*(ih/3*width + iw/3)] += (int)img[3*(ih*width + iw)]; //average[3*(ih/3*width + iw/3)+1] += (int)img[3*(ih*width + iw)+1]; //average[3*(ih/3*width + iw/3)+2] += (int)img[3*(ih*width + iw)+2]; atomicAdd(&(nz[ih/scale * width + iw/scale]), 1); atomicAdd(&(average[3*(ih/scale*width + iw/scale)]), (int)img[3*(ih*width + iw)]); atomicAdd(&(average[3*(ih/scale*width + iw/scale)+1]), (int)img[3*(ih*width + iw)+1]); atomicAdd(&(average[3*(ih/scale*width + iw/scale)+2]), (int)img[3*(ih*width + iw)+2]); } } } __global__ void fill_with_average(unsigned char *img, int * nz, int * average, int scale) { int x = blockIdx.x * TILE_DIM + threadIdx.x; int y = blockIdx.y * TILE_DIM + threadIdx.y; int width = gridDim.x * TILE_DIM; //int h = width /2; for (int j = 0; j < TILE_DIM; j+= BLOCK_ROWS) { int iw = x; int ih = y + j; if ((img[3*(ih*width + iw)] + img[3*(ih*width + iw)+1] + img[3*(ih*width + iw)+2] == 0) && (nz[ih/scale * width + iw/scale] > 0)) { img[3*(ih*width + iw)] = (unsigned char)(average[3*(ih/scale*width + iw/scale)] / nz[ih/scale * width + iw/scale]); img[3*(ih*width + iw) + 1] = (unsigned char)(average[3*(ih/scale*width + iw/scale) + 1] / nz[ih/scale * width + iw/scale]); img[3*(ih*width + iw) + 2] = (unsigned char)(average[3*(ih/scale*width + iw/scale) + 2] / nz[ih/scale * width + iw/scale]); } } } __global__ void render_final(float *points3d_polar, float * selection, float * depth_render, int * img, int * render, int oh, int ow) { int x = blockIdx.x * TILE_DIM + threadIdx.x; int y = blockIdx.y * TILE_DIM + threadIdx.y; int w = gridDim.x * TILE_DIM; int h = w /2; int maxsize = oh * ow; for (int j = 0; j < TILE_DIM; j+= BLOCK_ROWS) { int iw = x; int ih = y + j; int tx = round((points3d_polar[(ih * w + iw) * 3 + 1] + 1)/2 * ow - 0.5); int ty = round((points3d_polar[(ih * w + iw) * 3 + 2] + 1)/2 * oh - 0.5); float tx_offset = ((points3d_polar[(ih * w + iw) * 3 + 1] + 1)/2 * ow - 0.5); float ty_offset = ((points3d_polar[(ih * w + iw) * 3 + 2] + 1)/2 * oh - 0.5); float tx00 = 0; float ty00 = 0; float tx01 = ((points3d_polar[(ih * w + iw + 1) * 3 + 1] + 1)/2 * ow - 0.5) - tx_offset; float ty01 = ((points3d_polar[(ih * w + iw + 1) * 3 + 2] + 1)/2 * oh - 0.5) - ty_offset; float tx10 = ((points3d_polar[((ih + 1) * w + iw) * 3 + 1] + 1)/2 * ow - 0.5) - tx_offset; float ty10 = ((points3d_polar[((ih + 1) * w + iw) * 3 + 2] + 1)/2 * oh - 0.5) - ty_offset; float tx11 = ((points3d_polar[((ih+1) * w + iw + 1) * 3 + 1] + 1)/2 * ow - 0.5) - tx_offset; float ty11 = ((points3d_polar[((ih+1) * w + iw + 1) * 3 + 2] + 1)/2 * oh - 0.5) - ty_offset; float t00 = 0 * (float)tx00 + (float)tx01 * -1.0/3 + (float)tx10 * 2.0/3 + (float)tx11 * 1.0/3; float t01 = 0 * (float)ty00 + (float)ty01 * -1.0/3 + (float)ty10 * 2.0/3 + (float)ty11 * 1.0/3; float t10 = 0 * (float)tx00 + (float)tx01 * 2.0/3 + (float)tx10 * -1.0/3 + (float)tx11 * 1.0/3; float t11 = 0 * (float)ty00 + (float)ty01 * 2.0/3 + (float)ty10 * -1.0/3 + (float)ty11 * 1.0/3; float det = t00 * t11 - t01 * t10 + 1e-10; //printf("%f %f %f %f %f\n", t00, t01, t10, t11, det); float it00, it01, it10, it11; it00 = t11/det; it01 = -t01/det; it10 = -t10/det; it11 = t00/det; //printf("inverse %f %f %f %f\n", it00, it01, it10, it11); int this_depth = (int)(12800/128 * points3d_polar[(ih * w + iw) * 3 + 0]); int delta00 = (int)(12800/128 * points3d_polar[(ih * w + iw) * 3 + 0]) - (int)(100 * depth_render[(ty * ow + tx)]); int delta01 = (int)(12800/128 * points3d_polar[(ih * w + iw + 1) * 3 + 0]) - (int)(100 * depth_render[(ty * ow + tx + 1)]); int delta10 = (int)(12800/128 * points3d_polar[((ih + 1) * w + iw) * 3 + 0]) - (int)(100 * depth_render[((ty+1) * ow + tx)]); int delta11 = (int)(12800/128 * points3d_polar[((ih+1) * w + iw + 1) * 3 + 0]) - (int)(100 * depth_render[((ty+1) * ow + tx + 1)]); int mindelta = min(min(delta00, delta01), min(delta10, delta11)); int maxdelta = max(max(delta00, delta01), max(delta10, delta11)); int depth00 = (int)(12800/128 * points3d_polar[(ih * w + iw) * 3 + 0]); int depth01 = (int)(12800/128 * points3d_polar[(ih * w + iw + 1) * 3 + 0]); int depth10 = (int)(12800/128 * points3d_polar[((ih+1) * w + iw) * 3 + 0]); int depth11 = (int)(12800/128 * points3d_polar[((ih+1) * w + iw+1) * 3 + 0]); int max_depth = max(max(depth00, depth10), max(depth01, depth11)); int min_depth = min(min(depth00, depth10), min(depth01, depth11)); int delta_depth = max_depth - min_depth; int txmin = floor(tx_offset + min(min(tx00, tx11), min(tx01, tx10))); int txmax = ceil(tx_offset + max(max(tx00, tx11), max(tx01, tx10))); int tymin = floor(ty_offset + min(min(ty00, ty11), min(ty01, ty10))); int tymax = ceil(ty_offset + max(max(ty00, ty11), max(ty01, ty10))); float newx, newy; int r,g,b; int itx, ity; //render[(ty * ow + tx)] = img[ih * w + iw]; //selection[(ty * ow + tx)] = 1.0; float tolerance = 0.1 * this_depth > 10? 0.1 * this_depth : 10; float tolerance2 = 0.05 * max_depth > 10? 0.05 * max_depth: 10; float flank = 0.01; if ((delta_depth < tolerance2) && (y > 1 * h/8) && (y < (h*7)/8)) if (((mindelta > - tolerance) && (maxdelta < tolerance)) && (this_depth < 10000)) { if (((txmax - txmin) * (tymax - tymin) < 1600) && (txmax - txmin < 40) && (tymax - tymin < 40)) { for (itx = txmin; itx < txmax; itx ++) for (ity = tymin; ity < tymax; ity ++) { if (( 0 <= itx) && (itx < ow) && ( 0 <= ity) && (ity < oh)) { newx = (itx - tx_offset) * it00 + it10 * (ity - ty_offset); newy = (itx - tx_offset) * it01 + it11 * (ity - ty_offset); //printf("%f %f\n", newx, newy); if ((newx > -flank) && (newx < 1 + flank) && (newy > -flank) && (newy < 1 + flank)) { if (newx < 0) newx = 0; if (newy < 0) newy = 0; if (newx > 1) newx = 1; if (newy > 1) newy = 1; r = img[(ih * w + iw)] / (256*256) * (1-newx) * (1-newy) + img[(ih * w + iw + 1)] / (256*256) * (1-newx) * (newy) + img[((ih+1) * w + iw)] / (256*256) * (newx) * (1-newy) + img[((ih+1) * w + iw + 1)] / (256*256) * newx * newy; g = img[(ih * w + iw)] / 256 % 256 * (1-newx) * (1-newy) + img[(ih * w + iw + 1)] / 256 % 256 * (1-newx) * (newy) + img[((ih+1) * w + iw)] / 256 % 256 * (newx) * (1-newy) + img[((ih+1) * w + iw + 1)] / 256 % 256 * newx * newy; b = img[(ih * w + iw)] % 256 * (1-newx) * (1-newy) + img[(ih * w + iw + 1)] % 256 * (1-newx) * (newy) + img[((ih+1) * w + iw)] % 256 * (newx) * (1-newy) + img[((ih+1) * w + iw + 1)] % 256 * newx * newy ; if (r > 255) r = 255; if (g > 255) g = 255; if (b > 255) b = 255; if ((ity * ow + itx > 0) && (ity * ow + itx < maxsize)) { render[(ity * ow + itx)] = r * 256 * 256 + g * 256 + b; selection[(ity * ow + itx)] = 1.0 / abs(det); } } } } } } } } extern "C"{ void render(int n, int h,int w, int oh, int ow, unsigned char * img, float * depth,float * pose, unsigned char * render, float * depth_render, float fov){ //int ih, iw, i, ic; //printf("inside cuda code %d\n", depth); //printf("scale %d\n", s); const int nx = w; const int ny = h; const int onx = ow; const int ony = oh; const size_t input_mem_size = nx*ny; const size_t output_mem_size = onx * ony; dim3 dimGrid(nx/TILE_DIM, ny/TILE_DIM, 1); dim3 dimGrid_out(onx/TILE_DIM, ony/TILE_DIM, 1); dim3 dimBlock(TILE_DIM, BLOCK_ROWS, 1); unsigned char *d_img, *d_render, *d_render_all; float *d_depth, *d_pose; float *d_depth_render; float *d_3dpoint, *d_3dpoint_after; float * d_selection, * d_selection_sum; int * nz; int * average; int *d_render2, *d_img2; cudaMalloc((void **)&d_img, input_mem_size * sizeof(unsigned char) * 3); cudaMalloc((void **)&d_img2, input_mem_size * sizeof(int)); cudaMalloc((void **)&d_render, output_mem_size * sizeof(unsigned char) * 3); cudaMalloc((void **)&d_render_all, output_mem_size * sizeof(unsigned char) * 3 * n); cudaMalloc((void **)&d_depth, input_mem_size * sizeof(float)); cudaMalloc((void **)&d_depth_render, output_mem_size * sizeof(float)); cudaMalloc((void **)&d_3dpoint, input_mem_size * sizeof(float) * 4); cudaMalloc((void **)&d_3dpoint_after, input_mem_size * sizeof(float) * 4); cudaMalloc((void **)&d_pose, sizeof(float) * 16); cudaMalloc((void **)&d_selection, output_mem_size * sizeof(float) * n); cudaMalloc((void **)&d_selection_sum, output_mem_size * sizeof(float)); cudaMalloc((void **)&d_render2, output_mem_size * sizeof(int)); cudaMalloc((void **)&nz, output_mem_size * sizeof(int)); cudaMalloc((void **)&average, output_mem_size * sizeof(int) * 3); cudaMemset(nz, 0, output_mem_size * sizeof(int)); cudaMemset(average, 0, output_mem_size * sizeof(int) * 3); cudaMemset(d_selection, 0, output_mem_size * sizeof(float) * n); cudaMemset(d_selection_sum, 0, output_mem_size * sizeof(float)); cudaMemcpy(d_depth_render, depth_render, output_mem_size * sizeof(float), cudaMemcpyHostToDevice); cudaMemset(d_render_all, 0, output_mem_size * sizeof(unsigned char) * 3 * n); int idx; for (idx = 0; idx < n; idx ++) { cudaMemcpy(d_pose, &(pose[idx * 16]), sizeof(float) * 16, cudaMemcpyHostToDevice); cudaMemcpy(d_img, &(img[idx * input_mem_size * 3]), input_mem_size * sizeof(unsigned char) * 3, cudaMemcpyHostToDevice); cudaMemcpy(d_depth, &(depth[idx * input_mem_size]), input_mem_size * sizeof(float), cudaMemcpyHostToDevice); cudaMemset(d_render, 0, output_mem_size * sizeof(unsigned char) * 3); cudaMemset(d_render2, 0, output_mem_size * sizeof(int)); cudaMemset(d_img2, 0, input_mem_size * sizeof(int)); cudaMemset(d_3dpoint, 0, input_mem_size * sizeof(float) * 4); cudaMemset(d_3dpoint_after, 0, input_mem_size * sizeof(float) * 3); to3d_point<<< dimGrid, dimBlock >>>(d_depth, d_3dpoint); transform<<< dimGrid, dimBlock >>>(d_3dpoint_after, d_3dpoint, d_pose); float fov_scale = tan(fov/2); transform2d<<<dimGrid, dimBlock>>>(d_3dpoint_after, fov_scale); char_to_int <<< dimGrid, dimBlock >>> (d_img2, d_img); render_final <<< dimGrid, dimBlock >>> (d_3dpoint_after, &(d_selection[idx * onx * ony]), d_depth_render, d_img2, d_render2, oh, ow); //int_to_char <<< dimGrid_out, dimBlock >>> (d_render2, d_render); int_to_char <<< dimGrid_out, dimBlock >>> (d_render2, &(d_render_all[idx * output_mem_size * 3])); int fill_size[1] = {3}; for (int j = 0; j < 1; j++) { cudaMemset(nz, 0, output_mem_size * sizeof(int)); cudaMemset(average, 0, output_mem_size * sizeof(int) * 3); get_average <<< dimGrid_out, dimBlock >>> (&(d_render_all[idx * output_mem_size * 3]), nz, average, fill_size[j]); fill_with_average <<< dimGrid_out, dimBlock >>> (&(d_render_all[idx * output_mem_size * 3]), nz, average, fill_size[j]); } } selection_sum_weights <<< dimGrid_out, dimBlock >>> (d_selection_sum, d_selection, n, output_mem_size); merge_sum <<< dimGrid_out, dimBlock >>> (d_render_all, d_render, d_selection, d_selection_sum, n, output_mem_size); //merge <<< dimGrid_out, dimBlock >>> (d_render_all, d_render, d_selection, n, output_mem_size); cudaMemcpy(render, d_render, output_mem_size * sizeof(unsigned char) * 3 , cudaMemcpyDeviceToHost); cudaFree(d_img); cudaFree(d_depth); cudaFree(d_render2); cudaFree(d_img2); cudaFree(d_render); cudaFree(d_depth_render); cudaFree(d_3dpoint); cudaFree(d_3dpoint_after); cudaFree(d_pose); cudaFree(d_render_all); cudaFree(d_selection); cudaFree(nz); cudaFree(average); cudaFree(d_selection_sum); } }//extern "C"
b4a498d5b39bf9f1a036213548e0e52d2a0f0cdc.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/psroi_pool_op.h" #include "paddle/fluid/platform/cuda_primitives.h" namespace paddle { namespace operators { using Tensor = framework::Tensor; using LoDTensor = framework::LoDTensor; static constexpr int kNumCUDAThreads = 512; static constexpr int kNumMaximumNumBlocks = 4096; static inline int NumBlocks(const int N) { return ::min((N + kNumCUDAThreads - 1) / kNumCUDAThreads, kNumMaximumNumBlocks); } template <typename T> __global__ void GPUPSROIPoolForward( const int nthreads, const T* input_data, const T* input_rois, const float spatial_scale, const int input_channels, const int height, const int width, const int output_channels, const int pooled_height, const int pooled_width, const int* rois_batch_id_data, T* output_data) { int index = blockIdx.x * blockDim.x + threadIdx.x; int offset = blockDim.x * gridDim.x; for (size_t i = index; i < nthreads; i += offset) { // The output is in order (n, c, ph, pw) int pw = i % pooled_width; int ph = (i / pooled_width) % pooled_height; int c = (i / pooled_width / pooled_height) % output_channels; int n = i / pooled_width / pooled_height / output_channels; // set roi_batch_id int roi_batch_id = rois_batch_id_data[n]; // [start, end) interval for spatial sampling const T* offset_input_rois = input_rois + n * 4; T roi_start_w = static_cast<T>(round(offset_input_rois[0])) * spatial_scale; T roi_start_h = static_cast<T>(round(offset_input_rois[1])) * spatial_scale; T roi_end_w = static_cast<T>(round(offset_input_rois[2]) + 1.) * spatial_scale; T roi_end_h = static_cast<T>(round(offset_input_rois[3]) + 1.) * spatial_scale; // Force too small ROIs to be 1x1 T roi_height = max(roi_end_h - roi_start_h, (T)0.1); // avoid 0 T roi_width = max(roi_end_w - roi_start_w, (T)0.1); // Compute w and h at input feature map T bin_size_h = roi_height / static_cast<T>(pooled_height); T bin_size_w = roi_width / static_cast<T>(pooled_width); int hstart = floor(bin_size_h * static_cast<T>(ph) + roi_start_h); int wstart = floor(bin_size_w * static_cast<T>(pw) + roi_start_w); int hend = ceil(bin_size_h * static_cast<T>(ph + 1) + roi_start_h); int wend = ceil(bin_size_w * static_cast<T>(pw + 1) + roi_start_w); // Add roi offsets and clip to input boundaries hstart = min(max(hstart, 0), height); hend = min(max(hend, 0), height); wstart = min(max(wstart, 0), width); wend = min(max(wend, 0), width); bool is_empty = (hend <= hstart) || (wend <= wstart); int input_channel = (c * pooled_height + ph) * pooled_width + pw; const T* offset_input_data = input_data + (roi_batch_id * input_channels + input_channel) * height * width; T outsum = 0; for (int ih = hstart; ih < hend; ++ih) { for (int iw = wstart; iw < wend; ++iw) { int input_index = ih * width + iw; outsum += offset_input_data[input_index]; } } T bin_area = static_cast<T>((hend - hstart) * (wend - wstart)); output_data[i] = is_empty ? 0. : outsum / bin_area; } } template <typename T> __global__ void GPUPSROIPoolBackward( const int nthreads, const T* input_rois, const T* output_grad_data, const float spatial_scale, const int input_channels, const int height, const int width, const int output_channels, const int pooled_height, const int pooled_width, const int* rois_batch_id_data, T* input_grad_data) { int index = blockIdx.x * blockDim.x + threadIdx.x; int offset = blockDim.x * gridDim.x; for (int i = index; i < nthreads; i += offset) { // The output is in order (n, c, ph, pw) int pw = i % pooled_width; int ph = (i / pooled_width) % pooled_height; int c = (i / pooled_width / pooled_height) % output_channels; int n = i / pooled_width / pooled_height / output_channels; // set roi_batch_id int roi_batch_id = rois_batch_id_data[n]; int input_channel = (c * pooled_height + ph) * pooled_width + pw; int input_offset = (roi_batch_id * input_channels + input_channel) * height * width; T* offset_input_grad_data = input_grad_data + input_offset; // [start, end) interval for spatial sampling const T* offset_input_rois = input_rois + n * 4; T roi_start_w = static_cast<T>(round(offset_input_rois[0])) * spatial_scale; T roi_start_h = static_cast<T>(round(offset_input_rois[1])) * spatial_scale; T roi_end_w = static_cast<T>(round(offset_input_rois[2]) + 1.) * spatial_scale; T roi_end_h = static_cast<T>(round(offset_input_rois[3]) + 1.) * spatial_scale; // Force too small ROIs to be 1x1 T roi_height = max(roi_end_h - roi_start_h, (T)0.1); // avoid 0 T roi_width = max(roi_end_w - roi_start_w, (T)0.1); // Compute w and h at input feature map T bin_size_h = roi_height / static_cast<T>(pooled_height); T bin_size_w = roi_width / static_cast<T>(pooled_width); int hstart = floor(bin_size_h * static_cast<T>(ph) + roi_start_h); int wstart = floor(bin_size_w * static_cast<T>(pw) + roi_start_w); int hend = ceil(bin_size_h * static_cast<T>(ph + 1) + roi_start_h); int wend = ceil(bin_size_w * static_cast<T>(pw + 1) + roi_start_w); // Add roi offsets and clip to input boundaries hstart = min(max(hstart, 0), height); hend = min(max(hend, 0), height); wstart = min(max(wstart, 0), width); wend = min(max(wend, 0), width); bool is_empty = (hend <= hstart) || (wend <= wstart); // Accumulate diff_val into input data T bin_area = static_cast<T>((hend - hstart) * (wend - wstart)); T diff_val = is_empty ? 0. : output_grad_data[i] / bin_area; for (int ih = hstart; ih < hend; ++ih) { for (int iw = wstart; iw < wend; ++iw) { int input_index = ih * width + iw; platform::CudaAtomicAdd(offset_input_grad_data + input_index, diff_val); } } } } template <typename Place, typename T> class GPUPSROIPoolOpKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { auto* in = ctx.Input<Tensor>("X"); auto* rois = ctx.Input<LoDTensor>("ROIs"); auto* out = ctx.Output<Tensor>("Out"); auto pooled_height = ctx.Attr<int>("pooled_height"); auto pooled_width = ctx.Attr<int>("pooled_width"); auto output_channels = ctx.Attr<int>("output_channels"); auto spatial_scale = ctx.Attr<float>("spatial_scale"); auto in_dims = in->dims(); int batch_size = in_dims[0]; int input_channels = in_dims[1]; int height = in_dims[2]; int width = in_dims[3]; PADDLE_ENFORCE_EQ( input_channels, output_channels * pooled_height * pooled_width, platform::errors::InvalidArgument( "The channels %d of input X should equal the product of " "output_channels %d x pooled_height %d x pooled_width %d.", input_channels, output_channels, pooled_height, pooled_width)); int rois_num = rois->dims()[0]; if (rois_num == 0) return; int rois_batch_size; framework::Tensor rois_batch_id_list; rois_batch_id_list.Resize({rois_num}); int* rois_batch_id_data = rois_batch_id_list.mutable_data<int>(platform::CPUPlace()); if (ctx.HasInput("RoisNum")) { auto* rois_num_t = ctx.Input<Tensor>("RoisNum"); rois_batch_size = rois_num_t->numel(); auto* rois_num_data = rois_num_t->data<int>(); PADDLE_ENFORCE_EQ( rois_batch_size, batch_size, platform::errors::InvalidArgument( "The batch size of input(ROIs) and input(X) must be " "the same but received batch size of input(ROIs) and " "input(X) is %d and %d respectively.", rois_batch_size, batch_size)); std::vector<int> rois_num_list(rois_batch_size); memory::Copy(platform::CPUPlace(), rois_num_list.data(), BOOST_GET_CONST(platform::CUDAPlace, ctx.GetPlace()), rois_num_data, sizeof(int) * rois_batch_size, 0); int rois_num_count = 0; for (int i = 0; i < rois_batch_size; ++i) { rois_num_count += rois_num_list[i]; } PADDLE_ENFORCE_EQ( rois_num_count, rois_num, platform::errors::InvalidArgument( "the rois_num from input and RoisNum must be the same")); int start = 0; for (int n = 0; n < rois_batch_size; ++n) { for (int i = start; i < start + rois_num_list[n]; ++i) { rois_batch_id_data[i] = n; } start += rois_num_list[n]; } } else { auto rois_lod = rois->lod().back(); rois_batch_size = rois_lod.size() - 1; PADDLE_ENFORCE_EQ( rois_batch_size, batch_size, platform::errors::InvalidArgument( "The batch size of input(ROIs) and input(X) must be " "the same but received batch size of input(ROIs) and " "input(X) is %d and %d respectively.", rois_batch_size, batch_size)); int rois_num_with_lod = rois_lod[rois_batch_size]; PADDLE_ENFORCE_EQ(rois_num, rois_num_with_lod, platform::errors::InvalidArgument( "The number of rois from input(ROIs) and its LOD " "must be the same. Received rois %d of input(ROIs) " "but the number of rois %d from its LOD is %d", rois_num, rois_num_with_lod)); // set rois batch id for (int n = 0; n < rois_batch_size; ++n) { for (size_t i = rois_lod[n]; i < rois_lod[n + 1]; ++i) { rois_batch_id_data[i] = n; } } } framework::Tensor rois_batch_id_list_gpu; framework::TensorCopy(rois_batch_id_list, ctx.GetPlace(), ctx.device_context(), &rois_batch_id_list_gpu); int output_size = out->numel(); int blocks = NumBlocks(output_size); int threads = kNumCUDAThreads; // call cuda kernel function hipLaunchKernelGGL(( GPUPSROIPoolForward< T>), dim3(blocks), dim3(threads), 0, ctx.cuda_device_context().stream(), output_size, in->data<T>(), rois->data<T>(), spatial_scale, input_channels, height, width, output_channels, pooled_height, pooled_width, rois_batch_id_list_gpu.data<int>(), out->mutable_data<T>(ctx.GetPlace())); } }; template <typename Place, typename T> class GPUPSROIPoolGradOpKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { auto* in = ctx.Input<Tensor>("X"); auto* rois = ctx.Input<LoDTensor>("ROIs"); auto* output_grad = ctx.Input<Tensor>(framework::GradVarName("Out")); auto* input_grad = ctx.Output<Tensor>(framework::GradVarName("X")); auto pooled_height = ctx.Attr<int>("pooled_height"); auto pooled_width = ctx.Attr<int>("pooled_width"); auto output_channels = ctx.Attr<int>("output_channels"); auto spatial_scale = ctx.Attr<float>("spatial_scale"); int rois_num = rois->dims()[0]; int input_channels = in->dims()[1]; int height = in->dims()[2]; int width = in->dims()[3]; if (input_grad) { // set roi batch id framework::Tensor rois_batch_id_list; rois_batch_id_list.Resize({rois_num}); int* rois_batch_id_data = rois_batch_id_list.mutable_data<int>(platform::CPUPlace()); int rois_batch_size; if (ctx.HasInput("RoisNum")) { auto* rois_num_t = ctx.Input<Tensor>("RoisNum"); rois_batch_size = rois_num_t->numel(); std::vector<int> rois_num_list(rois_batch_size); memory::Copy(platform::CPUPlace(), rois_num_list.data(), BOOST_GET_CONST(platform::CUDAPlace, ctx.GetPlace()), rois_num_t->data<int>(), sizeof(int) * rois_batch_size, 0); int start = 0; for (int n = 0; n < rois_batch_size; ++n) { for (int i = start; i < start + rois_num_list[n]; ++i) { rois_batch_id_data[i] = n; } start += rois_num_list[n]; } } else { auto rois_lod = rois->lod().back(); rois_batch_size = rois_lod.size() - 1; for (int n = 0; n < rois_batch_size; ++n) { for (size_t i = rois_lod[n]; i < rois_lod[n + 1]; ++i) { rois_batch_id_data[i] = n; } } } framework::Tensor rois_batch_id_list_gpu; framework::TensorCopy(rois_batch_id_list, ctx.GetPlace(), ctx.device_context(), &rois_batch_id_list_gpu); input_grad->mutable_data<T>(ctx.GetPlace()); math::SetConstant<Place, T> set_zero; set_zero(ctx.cuda_device_context(), input_grad, static_cast<T>(0)); int output_grad_size = output_grad->numel(); int blocks = NumBlocks(output_grad_size); int threads = kNumCUDAThreads; if (output_grad_size > 0) { hipLaunchKernelGGL(( GPUPSROIPoolBackward< T>), dim3(blocks), dim3(threads), 0, ctx.cuda_device_context().stream(), output_grad_size, rois->data<T>(), output_grad->data<T>(), spatial_scale, input_channels, height, width, output_channels, pooled_height, pooled_width, rois_batch_id_list_gpu.data<int>(), input_grad->mutable_data<T>(ctx.GetPlace())); } } } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL( psroi_pool, ops::GPUPSROIPoolOpKernel<paddle::platform::CUDADeviceContext, float>, ops::GPUPSROIPoolOpKernel<paddle::platform::CUDADeviceContext, double>); REGISTER_OP_CUDA_KERNEL( psroi_pool_grad, ops::GPUPSROIPoolGradOpKernel<paddle::platform::CUDADeviceContext, float>, ops::GPUPSROIPoolGradOpKernel<paddle::platform::CUDADeviceContext, double>);
b4a498d5b39bf9f1a036213548e0e52d2a0f0cdc.cu
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/psroi_pool_op.h" #include "paddle/fluid/platform/cuda_primitives.h" namespace paddle { namespace operators { using Tensor = framework::Tensor; using LoDTensor = framework::LoDTensor; static constexpr int kNumCUDAThreads = 512; static constexpr int kNumMaximumNumBlocks = 4096; static inline int NumBlocks(const int N) { return std::min((N + kNumCUDAThreads - 1) / kNumCUDAThreads, kNumMaximumNumBlocks); } template <typename T> __global__ void GPUPSROIPoolForward( const int nthreads, const T* input_data, const T* input_rois, const float spatial_scale, const int input_channels, const int height, const int width, const int output_channels, const int pooled_height, const int pooled_width, const int* rois_batch_id_data, T* output_data) { int index = blockIdx.x * blockDim.x + threadIdx.x; int offset = blockDim.x * gridDim.x; for (size_t i = index; i < nthreads; i += offset) { // The output is in order (n, c, ph, pw) int pw = i % pooled_width; int ph = (i / pooled_width) % pooled_height; int c = (i / pooled_width / pooled_height) % output_channels; int n = i / pooled_width / pooled_height / output_channels; // set roi_batch_id int roi_batch_id = rois_batch_id_data[n]; // [start, end) interval for spatial sampling const T* offset_input_rois = input_rois + n * 4; T roi_start_w = static_cast<T>(round(offset_input_rois[0])) * spatial_scale; T roi_start_h = static_cast<T>(round(offset_input_rois[1])) * spatial_scale; T roi_end_w = static_cast<T>(round(offset_input_rois[2]) + 1.) * spatial_scale; T roi_end_h = static_cast<T>(round(offset_input_rois[3]) + 1.) * spatial_scale; // Force too small ROIs to be 1x1 T roi_height = max(roi_end_h - roi_start_h, (T)0.1); // avoid 0 T roi_width = max(roi_end_w - roi_start_w, (T)0.1); // Compute w and h at input feature map T bin_size_h = roi_height / static_cast<T>(pooled_height); T bin_size_w = roi_width / static_cast<T>(pooled_width); int hstart = floor(bin_size_h * static_cast<T>(ph) + roi_start_h); int wstart = floor(bin_size_w * static_cast<T>(pw) + roi_start_w); int hend = ceil(bin_size_h * static_cast<T>(ph + 1) + roi_start_h); int wend = ceil(bin_size_w * static_cast<T>(pw + 1) + roi_start_w); // Add roi offsets and clip to input boundaries hstart = min(max(hstart, 0), height); hend = min(max(hend, 0), height); wstart = min(max(wstart, 0), width); wend = min(max(wend, 0), width); bool is_empty = (hend <= hstart) || (wend <= wstart); int input_channel = (c * pooled_height + ph) * pooled_width + pw; const T* offset_input_data = input_data + (roi_batch_id * input_channels + input_channel) * height * width; T outsum = 0; for (int ih = hstart; ih < hend; ++ih) { for (int iw = wstart; iw < wend; ++iw) { int input_index = ih * width + iw; outsum += offset_input_data[input_index]; } } T bin_area = static_cast<T>((hend - hstart) * (wend - wstart)); output_data[i] = is_empty ? 0. : outsum / bin_area; } } template <typename T> __global__ void GPUPSROIPoolBackward( const int nthreads, const T* input_rois, const T* output_grad_data, const float spatial_scale, const int input_channels, const int height, const int width, const int output_channels, const int pooled_height, const int pooled_width, const int* rois_batch_id_data, T* input_grad_data) { int index = blockIdx.x * blockDim.x + threadIdx.x; int offset = blockDim.x * gridDim.x; for (int i = index; i < nthreads; i += offset) { // The output is in order (n, c, ph, pw) int pw = i % pooled_width; int ph = (i / pooled_width) % pooled_height; int c = (i / pooled_width / pooled_height) % output_channels; int n = i / pooled_width / pooled_height / output_channels; // set roi_batch_id int roi_batch_id = rois_batch_id_data[n]; int input_channel = (c * pooled_height + ph) * pooled_width + pw; int input_offset = (roi_batch_id * input_channels + input_channel) * height * width; T* offset_input_grad_data = input_grad_data + input_offset; // [start, end) interval for spatial sampling const T* offset_input_rois = input_rois + n * 4; T roi_start_w = static_cast<T>(round(offset_input_rois[0])) * spatial_scale; T roi_start_h = static_cast<T>(round(offset_input_rois[1])) * spatial_scale; T roi_end_w = static_cast<T>(round(offset_input_rois[2]) + 1.) * spatial_scale; T roi_end_h = static_cast<T>(round(offset_input_rois[3]) + 1.) * spatial_scale; // Force too small ROIs to be 1x1 T roi_height = max(roi_end_h - roi_start_h, (T)0.1); // avoid 0 T roi_width = max(roi_end_w - roi_start_w, (T)0.1); // Compute w and h at input feature map T bin_size_h = roi_height / static_cast<T>(pooled_height); T bin_size_w = roi_width / static_cast<T>(pooled_width); int hstart = floor(bin_size_h * static_cast<T>(ph) + roi_start_h); int wstart = floor(bin_size_w * static_cast<T>(pw) + roi_start_w); int hend = ceil(bin_size_h * static_cast<T>(ph + 1) + roi_start_h); int wend = ceil(bin_size_w * static_cast<T>(pw + 1) + roi_start_w); // Add roi offsets and clip to input boundaries hstart = min(max(hstart, 0), height); hend = min(max(hend, 0), height); wstart = min(max(wstart, 0), width); wend = min(max(wend, 0), width); bool is_empty = (hend <= hstart) || (wend <= wstart); // Accumulate diff_val into input data T bin_area = static_cast<T>((hend - hstart) * (wend - wstart)); T diff_val = is_empty ? 0. : output_grad_data[i] / bin_area; for (int ih = hstart; ih < hend; ++ih) { for (int iw = wstart; iw < wend; ++iw) { int input_index = ih * width + iw; platform::CudaAtomicAdd(offset_input_grad_data + input_index, diff_val); } } } } template <typename Place, typename T> class GPUPSROIPoolOpKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { auto* in = ctx.Input<Tensor>("X"); auto* rois = ctx.Input<LoDTensor>("ROIs"); auto* out = ctx.Output<Tensor>("Out"); auto pooled_height = ctx.Attr<int>("pooled_height"); auto pooled_width = ctx.Attr<int>("pooled_width"); auto output_channels = ctx.Attr<int>("output_channels"); auto spatial_scale = ctx.Attr<float>("spatial_scale"); auto in_dims = in->dims(); int batch_size = in_dims[0]; int input_channels = in_dims[1]; int height = in_dims[2]; int width = in_dims[3]; PADDLE_ENFORCE_EQ( input_channels, output_channels * pooled_height * pooled_width, platform::errors::InvalidArgument( "The channels %d of input X should equal the product of " "output_channels %d x pooled_height %d x pooled_width %d.", input_channels, output_channels, pooled_height, pooled_width)); int rois_num = rois->dims()[0]; if (rois_num == 0) return; int rois_batch_size; framework::Tensor rois_batch_id_list; rois_batch_id_list.Resize({rois_num}); int* rois_batch_id_data = rois_batch_id_list.mutable_data<int>(platform::CPUPlace()); if (ctx.HasInput("RoisNum")) { auto* rois_num_t = ctx.Input<Tensor>("RoisNum"); rois_batch_size = rois_num_t->numel(); auto* rois_num_data = rois_num_t->data<int>(); PADDLE_ENFORCE_EQ( rois_batch_size, batch_size, platform::errors::InvalidArgument( "The batch size of input(ROIs) and input(X) must be " "the same but received batch size of input(ROIs) and " "input(X) is %d and %d respectively.", rois_batch_size, batch_size)); std::vector<int> rois_num_list(rois_batch_size); memory::Copy(platform::CPUPlace(), rois_num_list.data(), BOOST_GET_CONST(platform::CUDAPlace, ctx.GetPlace()), rois_num_data, sizeof(int) * rois_batch_size, 0); int rois_num_count = 0; for (int i = 0; i < rois_batch_size; ++i) { rois_num_count += rois_num_list[i]; } PADDLE_ENFORCE_EQ( rois_num_count, rois_num, platform::errors::InvalidArgument( "the rois_num from input and RoisNum must be the same")); int start = 0; for (int n = 0; n < rois_batch_size; ++n) { for (int i = start; i < start + rois_num_list[n]; ++i) { rois_batch_id_data[i] = n; } start += rois_num_list[n]; } } else { auto rois_lod = rois->lod().back(); rois_batch_size = rois_lod.size() - 1; PADDLE_ENFORCE_EQ( rois_batch_size, batch_size, platform::errors::InvalidArgument( "The batch size of input(ROIs) and input(X) must be " "the same but received batch size of input(ROIs) and " "input(X) is %d and %d respectively.", rois_batch_size, batch_size)); int rois_num_with_lod = rois_lod[rois_batch_size]; PADDLE_ENFORCE_EQ(rois_num, rois_num_with_lod, platform::errors::InvalidArgument( "The number of rois from input(ROIs) and its LOD " "must be the same. Received rois %d of input(ROIs) " "but the number of rois %d from its LOD is %d", rois_num, rois_num_with_lod)); // set rois batch id for (int n = 0; n < rois_batch_size; ++n) { for (size_t i = rois_lod[n]; i < rois_lod[n + 1]; ++i) { rois_batch_id_data[i] = n; } } } framework::Tensor rois_batch_id_list_gpu; framework::TensorCopy(rois_batch_id_list, ctx.GetPlace(), ctx.device_context(), &rois_batch_id_list_gpu); int output_size = out->numel(); int blocks = NumBlocks(output_size); int threads = kNumCUDAThreads; // call cuda kernel function GPUPSROIPoolForward< T><<<blocks, threads, 0, ctx.cuda_device_context().stream()>>>( output_size, in->data<T>(), rois->data<T>(), spatial_scale, input_channels, height, width, output_channels, pooled_height, pooled_width, rois_batch_id_list_gpu.data<int>(), out->mutable_data<T>(ctx.GetPlace())); } }; template <typename Place, typename T> class GPUPSROIPoolGradOpKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { auto* in = ctx.Input<Tensor>("X"); auto* rois = ctx.Input<LoDTensor>("ROIs"); auto* output_grad = ctx.Input<Tensor>(framework::GradVarName("Out")); auto* input_grad = ctx.Output<Tensor>(framework::GradVarName("X")); auto pooled_height = ctx.Attr<int>("pooled_height"); auto pooled_width = ctx.Attr<int>("pooled_width"); auto output_channels = ctx.Attr<int>("output_channels"); auto spatial_scale = ctx.Attr<float>("spatial_scale"); int rois_num = rois->dims()[0]; int input_channels = in->dims()[1]; int height = in->dims()[2]; int width = in->dims()[3]; if (input_grad) { // set roi batch id framework::Tensor rois_batch_id_list; rois_batch_id_list.Resize({rois_num}); int* rois_batch_id_data = rois_batch_id_list.mutable_data<int>(platform::CPUPlace()); int rois_batch_size; if (ctx.HasInput("RoisNum")) { auto* rois_num_t = ctx.Input<Tensor>("RoisNum"); rois_batch_size = rois_num_t->numel(); std::vector<int> rois_num_list(rois_batch_size); memory::Copy(platform::CPUPlace(), rois_num_list.data(), BOOST_GET_CONST(platform::CUDAPlace, ctx.GetPlace()), rois_num_t->data<int>(), sizeof(int) * rois_batch_size, 0); int start = 0; for (int n = 0; n < rois_batch_size; ++n) { for (int i = start; i < start + rois_num_list[n]; ++i) { rois_batch_id_data[i] = n; } start += rois_num_list[n]; } } else { auto rois_lod = rois->lod().back(); rois_batch_size = rois_lod.size() - 1; for (int n = 0; n < rois_batch_size; ++n) { for (size_t i = rois_lod[n]; i < rois_lod[n + 1]; ++i) { rois_batch_id_data[i] = n; } } } framework::Tensor rois_batch_id_list_gpu; framework::TensorCopy(rois_batch_id_list, ctx.GetPlace(), ctx.device_context(), &rois_batch_id_list_gpu); input_grad->mutable_data<T>(ctx.GetPlace()); math::SetConstant<Place, T> set_zero; set_zero(ctx.cuda_device_context(), input_grad, static_cast<T>(0)); int output_grad_size = output_grad->numel(); int blocks = NumBlocks(output_grad_size); int threads = kNumCUDAThreads; if (output_grad_size > 0) { GPUPSROIPoolBackward< T><<<blocks, threads, 0, ctx.cuda_device_context().stream()>>>( output_grad_size, rois->data<T>(), output_grad->data<T>(), spatial_scale, input_channels, height, width, output_channels, pooled_height, pooled_width, rois_batch_id_list_gpu.data<int>(), input_grad->mutable_data<T>(ctx.GetPlace())); } } } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL( psroi_pool, ops::GPUPSROIPoolOpKernel<paddle::platform::CUDADeviceContext, float>, ops::GPUPSROIPoolOpKernel<paddle::platform::CUDADeviceContext, double>); REGISTER_OP_CUDA_KERNEL( psroi_pool_grad, ops::GPUPSROIPoolGradOpKernel<paddle::platform::CUDADeviceContext, float>, ops::GPUPSROIPoolGradOpKernel<paddle::platform::CUDADeviceContext, double>);
36bc72df7533d870f58f95e52beaf407bcc9e14f.hip
// !!! This is a file automatically generated by hipify!!! #ifndef KERNEL_CU #define KERNEL_CU #include <math.h> #include <hip/hip_runtime.h> #include <Kernel.h> #include <RandomState.h> #include <RNG.h> // Declare the kernel function __global__ void __kernel_fillArray(float* A, const int arraySize); __global__ void __kernel_fillArrayWithRandomNumbers(float* A, const int arraySize, RandomState* states); // function which invokes the kernel void fillArray(float* A, const int arraySize) { // Number of blocks per grid and the number of threads per block int threadsPerBlock, blocksPerGrid; threadsPerBlock = 512; blocksPerGrid = ceil(double(arraySize)/double(threadsPerBlock)); // invoke the kernel hipLaunchKernelGGL(( __kernel_fillArray) , dim3(blocksPerGrid), dim3(threadsPerBlock) , 0, 0, A, arraySize); } void fillArrayWithRandomNumbers(float* A, const int arraySize, RandomState* states) { // Number of blocks per grid and the number of threads per block int threadsPerBlock, blocksPerGrid; threadsPerBlock = 512; blocksPerGrid = ceil(double(arraySize)/double(threadsPerBlock)); // invoke the kernel hipLaunchKernelGGL(( __kernel_fillArrayWithRandomNumbers) , dim3(blocksPerGrid), dim3(threadsPerBlock) , 0, 0, A, arraySize, states); } // Kernels __global__ void __kernel_fillArray(float* A, const int arraySize) { int threadId = blockDim.x * blockIdx.x + threadIdx.x; if (threadId < arraySize) A[threadId] = 1.0; } __global__ void __kernel_fillArrayWithRandomNumbers(float* A, const int arraySize, RandomState* states) { int threadId = blockDim.x * blockIdx.x + threadIdx.x; RNG rng(states[threadId]); if (threadId < arraySize) A[threadId] = rng.floatNumber(); states[threadId] = rng.getState(); } #endif // KERNEL_CU
36bc72df7533d870f58f95e52beaf407bcc9e14f.cu
#ifndef KERNEL_CU #define KERNEL_CU #include <math.h> #include <cuda_runtime.h> #include <Kernel.h> #include <RandomState.h> #include <RNG.h> // Declare the kernel function __global__ void __kernel_fillArray(float* A, const int arraySize); __global__ void __kernel_fillArrayWithRandomNumbers(float* A, const int arraySize, RandomState* states); // function which invokes the kernel void fillArray(float* A, const int arraySize) { // Number of blocks per grid and the number of threads per block int threadsPerBlock, blocksPerGrid; threadsPerBlock = 512; blocksPerGrid = ceil(double(arraySize)/double(threadsPerBlock)); // invoke the kernel __kernel_fillArray <<< blocksPerGrid, threadsPerBlock >>>(A, arraySize); } void fillArrayWithRandomNumbers(float* A, const int arraySize, RandomState* states) { // Number of blocks per grid and the number of threads per block int threadsPerBlock, blocksPerGrid; threadsPerBlock = 512; blocksPerGrid = ceil(double(arraySize)/double(threadsPerBlock)); // invoke the kernel __kernel_fillArrayWithRandomNumbers <<< blocksPerGrid, threadsPerBlock >>>(A, arraySize, states); } // Kernels __global__ void __kernel_fillArray(float* A, const int arraySize) { int threadId = blockDim.x * blockIdx.x + threadIdx.x; if (threadId < arraySize) A[threadId] = 1.0; } __global__ void __kernel_fillArrayWithRandomNumbers(float* A, const int arraySize, RandomState* states) { int threadId = blockDim.x * blockIdx.x + threadIdx.x; RNG rng(states[threadId]); if (threadId < arraySize) A[threadId] = rng.floatNumber(); states[threadId] = rng.getState(); } #endif // KERNEL_CU
d4db21dfb49ddbe9b98773b7812b92ff6f532018.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <cstdio> #include <cstdlib> #include <cmath> #include <algorithm> using namespace std; #define check_err(cu_err) { cu_err_handler(cu_err, __FILE__, __LINE__); } inline void cu_err_handler(hipError_t err, const char* file, int line) { if (err != hipSuccess) { fprintf(stderr, "GPU error: '%s' at %s:%d\n", hipGetErrorString(err), file, line); exit(1); } } //////// CPU dynamic programming implementation //////// double cpu_dynprog_binomial_american_put( double stock_price, double strike_price, double expire, double volat, int num_steps, double risk_free_rate) { using namespace std; double dt = expire / num_steps; double up_factor = exp(volat * sqrt(dt)); double down_factor = 1 / up_factor; double R = exp(risk_free_rate * dt); double up_prob = (R - down_factor) / (up_factor - down_factor); double *tree = new double[num_steps + 1]; // Initialize end of tree at expire time for (int branch = 0; branch <= num_steps; ++branch) { // Option value when exercising the option double exercise = strike_price - stock_price * pow(up_factor, 2 * branch - num_steps); tree[branch] = max(exercise, .0); //printf("triangle(x, %d) = max(0 %f)\n", branch, exercise); } for (int step = num_steps - 1; step >= 0; --step) { for (int branch = 0; branch <= step; ++branch) { double binomial = 1 / R * (up_prob * tree[branch + 1] + (1 - up_prob) * tree[branch]); double exercise = strike_price - stock_price * pow(up_factor, (double) 2 * branch - step); tree[branch] = max(binomial, exercise); } } double price = tree[0]; delete[] tree; return price; } //////// GPU Reduce tree //////// __global__ void tree_reduction( double* tree, double stock_price, double strike_price, int num_steps, double R, double up_factor, double up_prob) { for (int step = num_steps - 1; step >= 0; --step) { int branch = threadIdx.x; if (branch <= step) { double binomial = 1 / R * (up_prob * tree[branch + 1] + (1 - up_prob) * tree[branch]); double exercise = strike_price - stock_price * pow(up_factor, 2 * branch - step); tree[branch] = max(binomial, exercise); } __syncthreads(); } } double gpu1_binomial_american_put( double stock_price, double strike_price, double expire, double volat, int num_steps, double risk_free_rate) { double dt = expire / num_steps; double up_factor = exp(volat * sqrt(dt)); double down_factor = 1 / up_factor; double R = exp((risk_free_rate) * dt); double up_prob = (R - down_factor) / (up_factor - down_factor); double *host_tree = new double[num_steps + 1]; double *dev_tree; // Initialize end of host_tree at expire time for (int step = 0; step <= num_steps; ++step) { // Option value when exercising the option double exercise = strike_price - stock_price * pow(up_factor, 2 * step - num_steps); host_tree[step] = max(exercise, .0); } check_err(hipMalloc((void** ) &dev_tree, (num_steps + 1) * sizeof(double))); check_err(hipMemcpy(dev_tree, host_tree, (num_steps + 1) * sizeof(double), hipMemcpyHostToDevice)); hipLaunchKernelGGL(( tree_reduction), dim3(1), dim3(num_steps + 1), 0, 0, dev_tree, stock_price, strike_price, num_steps, R, up_factor, up_prob); double price; check_err(hipMemcpy(&price, &dev_tree[0], sizeof(double), hipMemcpyDeviceToHost)); hipFree(dev_tree); delete[] host_tree; return price; } //////// Build tree on GPU directly //////// __global__ void tree_builder( double* tree, double stock_price, double strike_price, int num_steps, double R, double up_factor, double up_prob) { // Initialize end of host_tree at expire time int branch = threadIdx.x; // Option value when exercising the option double exercise = strike_price - stock_price * pow(up_factor, 2 * branch - num_steps); tree[branch] = max(exercise, .0); for (int step = num_steps - 1; step >= 0; --step) { __syncthreads(); int branch = threadIdx.x; if (branch <= step) { double binomial = 1 / R * (up_prob * tree[branch + 1] + (1 - up_prob) * tree[branch]); double exercise = strike_price - stock_price * pow(up_factor, 2 * branch - step); tree[branch] = max(binomial, exercise); } } } double gpu2_binomial_american_put( double stock_price, double strike_price, double expire, double volat, int num_steps, double risk_free_rate) { double dt = expire / num_steps; double up_factor = exp(volat * sqrt(dt)); double down_factor = 1 / up_factor; double R = exp((risk_free_rate) * dt); double up_prob = (R - down_factor) / (up_factor - down_factor); double* dev_tree; double price; check_err(hipMalloc((void** ) &dev_tree, (num_steps + 1) * sizeof(double))); hipLaunchKernelGGL(( tree_builder), dim3(1), dim3(num_steps + 1), 0, 0, dev_tree, stock_price, strike_price, num_steps, R, up_factor, up_prob); check_err(hipMemcpy(&price, &dev_tree[0], sizeof(double), hipMemcpyDeviceToHost)); hipFree(dev_tree); return price; } //////// Build tree on GPU with shared memory //////// __global__ void tree_builder_shared( double* dev_price, double stock_price, double strike_price, int num_steps, double R, double up_factor, double up_prob) { __shared__ double tree[1024]; // Initialize end of host_tree at expire time int branch = threadIdx.x; if (branch <= num_steps) { // Option value when exercising the option double exercise = strike_price - stock_price * pow(up_factor, 2 * branch - num_steps); tree[branch] = max(exercise, .0); } for (int step = num_steps - 1; step >= 0; --step) { __syncthreads(); int branch = threadIdx.x; if (branch <= step) { double binomial = 1 / R * (up_prob * tree[branch + 1] + (1 - up_prob) * tree[branch]); double exercise = strike_price - stock_price * pow(up_factor, 2 * branch - step); tree[branch] = max(binomial, exercise); } } *dev_price = tree[0]; } double gpu3_binomial_american_put( double stock_price, double strike_price, double expire, double volat, int num_steps, double risk_free_rate) { double dt = expire / num_steps; double up_factor = exp(volat * sqrt(dt)); double down_factor = 1 / up_factor; double R = exp((risk_free_rate) * dt); double up_prob = (R - down_factor) / (up_factor - down_factor); double price; double* dev_price; check_err(hipMalloc((void** ) &dev_price, 1 * sizeof(double))); hipLaunchKernelGGL(( tree_builder_shared), dim3(1), dim3(num_steps + 1), 0, 0, dev_price, stock_price, strike_price, num_steps, R, up_factor, up_prob); check_err(hipMemcpy(&price, dev_price, sizeof(double), hipMemcpyDeviceToHost)); hipFree(dev_price); return price; } //////// Build tree on GPU with texture memory //////// /* texture<double> tex_tree1; texture<double> tex_tree2; __global__ void tree_builder_texture( double* tree1, double* tree2, double stock_price, double strike_price, int num_steps, double R, double up_factor, double up_prob) { // Initialize end of host_tree at expire time int branch = blockIdx.x; if (branch <= num_steps) { // Option value when exercising the option double exercise = strike_price - stock_price * pow(up_factor, 2 * branch - num_steps); tree1[branch] = max(exercise, .0); } for (int step = num_steps - 1; step >= 0; --step) { int branch = blockIdx.x; if (branch <= step) { double binomial = 1 / R * (up_prob * tree1[branch + 1] + (1 - up_prob) * tree1[branch]); double exercise = strike_price - stock_price * pow(up_factor, 2 * branch - step); tree1[branch] = max(binomial, exercise); } __syncthreads(); } } double gpu4_binomial_american_put( double stock_price, double strike_price, double expire, double volat, int num_steps, double risk_free_rate) { double dt = expire / num_steps; double up_factor = exp(volat * sqrt(dt)); double down_factor = 1 / up_factor; double R = exp((risk_free_rate) * dt); double up_prob = (R - down_factor) / (up_factor - down_factor); double* dev_tree1; double* dev_tree2; double price; size_t tree_size = (num_steps + 1) * sizeof(double); check_err(hipMalloc((void** ) &dev_tree1, tree_size)); check_err(hipMalloc((void** ) &dev_tree2, tree_size)); check_err(hipBindTexture(NULL, tex_tree1, dev_tree1, tree_size)); check_err(hipBindTexture(NULL, tex_tree2, dev_tree2, tree_size)); tree_builder<<<num_steps, 1>>>(dev_tree, stock_price, strike_price, num_steps, R, up_factor, up_prob); check_err(hipMemcpy(&price, &dev_tree[0], sizeof(double), hipMemcpyDeviceToHost)); hipFree(dev_tree); check_err(hipMalloc((void** ) &dev_price, 1 * sizeof(double))); tree_builder_shared<<<1, num_steps>>>(dev_price, stock_price, strike_price, num_steps, R, up_factor, up_prob); check_err(hipMemcpy(&price, dev_price, sizeof(double), hipMemcpyDeviceToHost)); hipUnbindTexture(tex_tree1); hipUnbindTexture(tex_tree2); hipFree(dev_tree1); hipFree(dev_tree2); return price; } */ #if 1 //////// Build tree on with triangles and parallelograms //////// enum BrickPos { CEIL_EDGE, INNER, FLOOR_EDGE, FINAL }; const int NUM_STEPS = 2; /** * Aggregate one half brick, starting from the leaf nodes in the complete tree * * Needs to be launched with 'NUM_STEPS' threads */ template<BrickPos Pos> __global__ void tree_builder_triangle( double stock_price, double strike_price, double R, double up_factor, double up_prob, int root_pos, double* out_climbing_edge, double* out_sinking_edge) { __shared__ double tree[NUM_STEPS]; // Initialize end of host_tree at expire time int branch = threadIdx.x; // Option value when exercising the option double exercise = strike_price - stock_price * pow(up_factor, 2 * branch - NUM_STEPS + root_pos); // DEBUG printf("e: %d <> %f <> %f ^ %d = %f\n", branch, exercise, up_factor, 2 * branch - NUM_STEPS + root_pos, pow(up_factor, 2 * branch - NUM_STEPS + root_pos)); tree[branch] = max(exercise, .0); //printf("triangle(x, %d) = max(%f, 0)\n", branch, exercise); for (int step = NUM_STEPS - 2; step >= 0; --step) { __syncthreads(); int branch = threadIdx.x; if (branch <= step) { double binomial = 1 / R * (up_prob * tree[branch + 1] + (1 - up_prob) * tree[branch]); double exercise = strike_price - stock_price * pow(up_factor, 2 * branch - step + root_pos); tree[branch] = max(binomial, exercise); //printf("triangle(%d, %d) = max(%f, %f)\n", step, branch, binomial, exercise); if (Pos != FLOOR_EDGE) { out_sinking_edge[step] = tree[0]; } } } if (Pos != CEIL_EDGE) { out_climbing_edge[threadIdx.x] = tree[threadIdx.x]; } } template<BrickPos Pos> __global__ void tree_builder_brick( double stock_price, double strike_price, double R, double up_factor, double up_prob, int root_pos, double* in_upper_edge, double* in_lower_edge, double* out_climbing_edge, double* out_sinking_edge) { // Use shared memory for speedup __shared__ double tree[NUM_STEPS + 1]; tree[threadIdx.x] = in_lower_edge[threadIdx.x]; // Create first triangle for (int step = NUM_STEPS - 1; step >= 0; --step) { // Take one value from the upper edge, each step backward if (threadIdx.x == NUM_STEPS - 1) { // This is the only thread that read tree[NUM_STEPS], so only wait for that thread tree[NUM_STEPS] = in_upper_edge[NUM_STEPS - step - 1]; } __syncthreads(); // Fill all leafs, before writing over with the next step's leafs int branch = threadIdx.x; if (branch >= step) { double binomial = 1 / R * (up_prob * tree[branch + 1] + (1 - up_prob) * tree[branch]); double exercise = strike_price - stock_price * pow(up_factor, 2 * branch - step - (NUM_STEPS - 1) + root_pos); tree[branch] = max(binomial, exercise); //printf("brick(%d, %d) = max(%f, %f)\n", step, branch, binomial, exercise); } } // Create second triangle for (int step = NUM_STEPS - 2; step >= 0; --step) { __syncthreads(); int branch = threadIdx.x; if (branch <= step) { double binomial = 1 / R * (up_prob * tree[branch + 1] + (1 - up_prob) * tree[branch]); double exercise = strike_price - stock_price * pow(up_factor, 2 * branch - step + root_pos); tree[branch] = max(binomial, exercise); if (Pos != FLOOR_EDGE && Pos != FINAL) { out_sinking_edge[step] = tree[0]; } } } if (Pos == FINAL) { *out_climbing_edge = tree[0]; } else if (Pos != CEIL_EDGE) { out_climbing_edge[threadIdx.x] = tree[threadIdx.x]; } } double gpu4_binomial_american_put( double stock_price, double strike_price, double expire, double volat, int num_steps, double risk_free_rate) { double dt = expire / num_steps; double up_factor = exp(volat * sqrt(dt)); double down_factor = 1 / up_factor; double R = exp((risk_free_rate) * dt); double up_prob = (R - down_factor) / (up_factor - down_factor); double price; /* double* host_edge1 = new double[1024]; double* host_edge2 = new double[1024];*/ double* dev_edge1; double* dev_edge2; double* dev_price; check_err(hipMalloc((void** ) &dev_edge1, NUM_STEPS * sizeof(double))); check_err(hipMalloc((void** ) &dev_edge2, NUM_STEPS * sizeof(double))); check_err(hipMalloc((void** ) &dev_price, 1 * sizeof(double))); hipLaunchKernelGGL(( tree_builder_triangle<CEIL_EDGE>) , dim3(1), dim3(NUM_STEPS), 0, 0, stock_price, strike_price, R, up_factor, up_prob, NUM_STEPS+1, NULL, dev_edge1); hipLaunchKernelGGL(( tree_builder_triangle<FLOOR_EDGE>) , dim3(1), dim3(NUM_STEPS), 0, 0, stock_price, strike_price, R, up_factor, up_prob, -NUM_STEPS+1, dev_edge2, NULL); check_err(hipStreamSynchronize(0)); hipLaunchKernelGGL(( tree_builder_brick<FINAL>) , dim3(1), dim3(NUM_STEPS), 0, 0, stock_price, strike_price, R, up_factor, up_prob, 0, dev_edge1, dev_edge2, dev_price, NULL); check_err(hipMemcpyAsync(&price, dev_price, 1 * sizeof(double), hipMemcpyDefault)); /* check_err(hipMemcpyAsync(host_edge1, dev_edge1, 1024 * sizeof(double), hipMemcpyDefault)); check_err(hipMemcpyAsync(host_edge2, dev_edge2, 1024 * sizeof(double), hipMemcpyDefault)); check_err(hipStreamSynchronize(0)); for (int i = 0; i < 1024; ++i) { printf("%f, ", host_edge1[i]); } printf("\n--------\n"); for (int i = 0; i < 1024; ++i) { printf("%f, ", host_edge2[i]); } check_err(hipStreamSynchronize(0)); hipFree(dev_edge1); hipFree(dev_edge2);*/ hipFree(dev_price); return price; } #endif void gpu_benchmark(const char* name, double (*to_invoke)(int), const int reruns) { hipEvent_t start, end; check_err(hipEventCreate(&start)); check_err(hipEventCreate(&end)); //printf("%-32s, %3.8f", name, to_invoke(100)); // Warm up for (int i = 0; i < reruns / 10; ++i) { to_invoke(100); } for (int var = 0; var <= 1000; var += 10) { check_err(hipEventRecord(start, 0)); for (int i = 0; i < reruns; ++i) { to_invoke(var); } check_err(hipEventRecord(end, 0)); check_err(hipEventSynchronize(end)); float duration; check_err(hipEventElapsedTime(&duration, start, end)); duration = duration / reruns * pow(10, 3); printf(", %6d", int(duration + 0.5)); // Microseconds } printf("\n"); check_err(hipEventDestroy(start)); check_err(hipEventDestroy(end)); } void cpu_benchmark(const char* name, double (*to_invoke)(int), int reruns) { clock_t start; clock_t end; //printf("%-32s, %3.8f", name, to_invoke(100)); // Warm up for (int i = 0; i < reruns / 10; ++i) { to_invoke(100); } for (int var = 0; var <= 1000; var += 10) { // Start test start = clock(); for (int i = 0; i < reruns; ++i) { to_invoke(var); } end = clock(); float duration = (double(end) - double(start)) / CLOCKS_PER_SEC / reruns * pow(10, 6); printf(", %6d", int(duration + 0.5)); // Microseconds } printf("\n"); } double cpu(int indep_var) { return cpu_dynprog_binomial_american_put(20, 25, .5, 1, indep_var, 0.06); } double gpu1(int indep_var) { return gpu1_binomial_american_put(20, 25, .5, 1, indep_var, 0.06); } double gpu2(int indep_var) { return gpu2_binomial_american_put(20, 25, .5, 1, indep_var, 0.06); } double gpu3(int indep_var) { return gpu3_binomial_american_put(20, 25, .5, 1, indep_var, 0.06); } double gpu4(int indep_var) { return gpu4_binomial_american_put(20, 25, .5, 1, indep_var, 0.06); } int main() { printf("Compiled: %s %s\n", __DATE__, __TIME__); printf("cpu: %f\n", cpu(2048-1)); printf("gpu: %f\n", gpu4(2048-1)); return; const int reruns = 100; //const size_t num_step_tests = 11; //int step_tests[num_step_tests] = { 1, 100, 200, 300, 400, 500, 600, 700, 800, 900, 1000 }; /* printf("\"%-30s\", \"%-8s\"", "Implementation", "100 step"); for (int i = 0; i < num_step_tests; ++i) { printf(", %6d", step_tests[i]); } printf("\n");*/ cpu_benchmark("CPU dynprog", cpu, reruns); gpu_benchmark("GPU tree reduction", gpu1, reruns); gpu_benchmark("GPU tree build and reduction", gpu2, reruns); gpu_benchmark("GPU shared memory", gpu3, reruns); return 0; }
d4db21dfb49ddbe9b98773b7812b92ff6f532018.cu
#include <iostream> #include <cstdio> #include <cstdlib> #include <cmath> #include <algorithm> using namespace std; #define check_err(cu_err) { cu_err_handler(cu_err, __FILE__, __LINE__); } inline void cu_err_handler(cudaError_t err, const char* file, int line) { if (err != cudaSuccess) { fprintf(stderr, "GPU error: '%s' at %s:%d\n", cudaGetErrorString(err), file, line); exit(1); } } //////// CPU dynamic programming implementation //////// double cpu_dynprog_binomial_american_put( double stock_price, double strike_price, double expire, double volat, int num_steps, double risk_free_rate) { using namespace std; double dt = expire / num_steps; double up_factor = exp(volat * sqrt(dt)); double down_factor = 1 / up_factor; double R = exp(risk_free_rate * dt); double up_prob = (R - down_factor) / (up_factor - down_factor); double *tree = new double[num_steps + 1]; // Initialize end of tree at expire time for (int branch = 0; branch <= num_steps; ++branch) { // Option value when exercising the option double exercise = strike_price - stock_price * pow(up_factor, 2 * branch - num_steps); tree[branch] = max(exercise, .0); //printf("triangle(x, %d) = max(0 %f)\n", branch, exercise); } for (int step = num_steps - 1; step >= 0; --step) { for (int branch = 0; branch <= step; ++branch) { double binomial = 1 / R * (up_prob * tree[branch + 1] + (1 - up_prob) * tree[branch]); double exercise = strike_price - stock_price * pow(up_factor, (double) 2 * branch - step); tree[branch] = max(binomial, exercise); } } double price = tree[0]; delete[] tree; return price; } //////// GPU Reduce tree //////// __global__ void tree_reduction( double* tree, double stock_price, double strike_price, int num_steps, double R, double up_factor, double up_prob) { for (int step = num_steps - 1; step >= 0; --step) { int branch = threadIdx.x; if (branch <= step) { double binomial = 1 / R * (up_prob * tree[branch + 1] + (1 - up_prob) * tree[branch]); double exercise = strike_price - stock_price * pow(up_factor, 2 * branch - step); tree[branch] = max(binomial, exercise); } __syncthreads(); } } double gpu1_binomial_american_put( double stock_price, double strike_price, double expire, double volat, int num_steps, double risk_free_rate) { double dt = expire / num_steps; double up_factor = exp(volat * sqrt(dt)); double down_factor = 1 / up_factor; double R = exp((risk_free_rate) * dt); double up_prob = (R - down_factor) / (up_factor - down_factor); double *host_tree = new double[num_steps + 1]; double *dev_tree; // Initialize end of host_tree at expire time for (int step = 0; step <= num_steps; ++step) { // Option value when exercising the option double exercise = strike_price - stock_price * pow(up_factor, 2 * step - num_steps); host_tree[step] = max(exercise, .0); } check_err(cudaMalloc((void** ) &dev_tree, (num_steps + 1) * sizeof(double))); check_err(cudaMemcpy(dev_tree, host_tree, (num_steps + 1) * sizeof(double), cudaMemcpyHostToDevice)); tree_reduction<<<1, num_steps + 1>>>(dev_tree, stock_price, strike_price, num_steps, R, up_factor, up_prob); double price; check_err(cudaMemcpy(&price, &dev_tree[0], sizeof(double), cudaMemcpyDeviceToHost)); cudaFree(dev_tree); delete[] host_tree; return price; } //////// Build tree on GPU directly //////// __global__ void tree_builder( double* tree, double stock_price, double strike_price, int num_steps, double R, double up_factor, double up_prob) { // Initialize end of host_tree at expire time int branch = threadIdx.x; // Option value when exercising the option double exercise = strike_price - stock_price * pow(up_factor, 2 * branch - num_steps); tree[branch] = max(exercise, .0); for (int step = num_steps - 1; step >= 0; --step) { __syncthreads(); int branch = threadIdx.x; if (branch <= step) { double binomial = 1 / R * (up_prob * tree[branch + 1] + (1 - up_prob) * tree[branch]); double exercise = strike_price - stock_price * pow(up_factor, 2 * branch - step); tree[branch] = max(binomial, exercise); } } } double gpu2_binomial_american_put( double stock_price, double strike_price, double expire, double volat, int num_steps, double risk_free_rate) { double dt = expire / num_steps; double up_factor = exp(volat * sqrt(dt)); double down_factor = 1 / up_factor; double R = exp((risk_free_rate) * dt); double up_prob = (R - down_factor) / (up_factor - down_factor); double* dev_tree; double price; check_err(cudaMalloc((void** ) &dev_tree, (num_steps + 1) * sizeof(double))); tree_builder<<<1, num_steps + 1>>>(dev_tree, stock_price, strike_price, num_steps, R, up_factor, up_prob); check_err(cudaMemcpy(&price, &dev_tree[0], sizeof(double), cudaMemcpyDeviceToHost)); cudaFree(dev_tree); return price; } //////// Build tree on GPU with shared memory //////// __global__ void tree_builder_shared( double* dev_price, double stock_price, double strike_price, int num_steps, double R, double up_factor, double up_prob) { __shared__ double tree[1024]; // Initialize end of host_tree at expire time int branch = threadIdx.x; if (branch <= num_steps) { // Option value when exercising the option double exercise = strike_price - stock_price * pow(up_factor, 2 * branch - num_steps); tree[branch] = max(exercise, .0); } for (int step = num_steps - 1; step >= 0; --step) { __syncthreads(); int branch = threadIdx.x; if (branch <= step) { double binomial = 1 / R * (up_prob * tree[branch + 1] + (1 - up_prob) * tree[branch]); double exercise = strike_price - stock_price * pow(up_factor, 2 * branch - step); tree[branch] = max(binomial, exercise); } } *dev_price = tree[0]; } double gpu3_binomial_american_put( double stock_price, double strike_price, double expire, double volat, int num_steps, double risk_free_rate) { double dt = expire / num_steps; double up_factor = exp(volat * sqrt(dt)); double down_factor = 1 / up_factor; double R = exp((risk_free_rate) * dt); double up_prob = (R - down_factor) / (up_factor - down_factor); double price; double* dev_price; check_err(cudaMalloc((void** ) &dev_price, 1 * sizeof(double))); tree_builder_shared<<<1, num_steps + 1>>>(dev_price, stock_price, strike_price, num_steps, R, up_factor, up_prob); check_err(cudaMemcpy(&price, dev_price, sizeof(double), cudaMemcpyDeviceToHost)); cudaFree(dev_price); return price; } //////// Build tree on GPU with texture memory //////// /* texture<double> tex_tree1; texture<double> tex_tree2; __global__ void tree_builder_texture( double* tree1, double* tree2, double stock_price, double strike_price, int num_steps, double R, double up_factor, double up_prob) { // Initialize end of host_tree at expire time int branch = blockIdx.x; if (branch <= num_steps) { // Option value when exercising the option double exercise = strike_price - stock_price * pow(up_factor, 2 * branch - num_steps); tree1[branch] = max(exercise, .0); } for (int step = num_steps - 1; step >= 0; --step) { int branch = blockIdx.x; if (branch <= step) { double binomial = 1 / R * (up_prob * tree1[branch + 1] + (1 - up_prob) * tree1[branch]); double exercise = strike_price - stock_price * pow(up_factor, 2 * branch - step); tree1[branch] = max(binomial, exercise); } __syncthreads(); } } double gpu4_binomial_american_put( double stock_price, double strike_price, double expire, double volat, int num_steps, double risk_free_rate) { double dt = expire / num_steps; double up_factor = exp(volat * sqrt(dt)); double down_factor = 1 / up_factor; double R = exp((risk_free_rate) * dt); double up_prob = (R - down_factor) / (up_factor - down_factor); double* dev_tree1; double* dev_tree2; double price; size_t tree_size = (num_steps + 1) * sizeof(double); check_err(cudaMalloc((void** ) &dev_tree1, tree_size)); check_err(cudaMalloc((void** ) &dev_tree2, tree_size)); check_err(cudaBindTexture(NULL, tex_tree1, dev_tree1, tree_size)); check_err(cudaBindTexture(NULL, tex_tree2, dev_tree2, tree_size)); tree_builder<<<num_steps, 1>>>(dev_tree, stock_price, strike_price, num_steps, R, up_factor, up_prob); check_err(cudaMemcpy(&price, &dev_tree[0], sizeof(double), cudaMemcpyDeviceToHost)); cudaFree(dev_tree); check_err(cudaMalloc((void** ) &dev_price, 1 * sizeof(double))); tree_builder_shared<<<1, num_steps>>>(dev_price, stock_price, strike_price, num_steps, R, up_factor, up_prob); check_err(cudaMemcpy(&price, dev_price, sizeof(double), cudaMemcpyDeviceToHost)); cudaUnbindTexture(tex_tree1); cudaUnbindTexture(tex_tree2); cudaFree(dev_tree1); cudaFree(dev_tree2); return price; } */ #if 1 //////// Build tree on with triangles and parallelograms //////// enum BrickPos { CEIL_EDGE, INNER, FLOOR_EDGE, FINAL }; const int NUM_STEPS = 2; /** * Aggregate one half brick, starting from the leaf nodes in the complete tree * * Needs to be launched with 'NUM_STEPS' threads */ template<BrickPos Pos> __global__ void tree_builder_triangle( double stock_price, double strike_price, double R, double up_factor, double up_prob, int root_pos, double* out_climbing_edge, double* out_sinking_edge) { __shared__ double tree[NUM_STEPS]; // Initialize end of host_tree at expire time int branch = threadIdx.x; // Option value when exercising the option double exercise = strike_price - stock_price * pow(up_factor, 2 * branch - NUM_STEPS + root_pos); // DEBUG printf("e: %d <> %f <> %f ^ %d = %f\n", branch, exercise, up_factor, 2 * branch - NUM_STEPS + root_pos, pow(up_factor, 2 * branch - NUM_STEPS + root_pos)); tree[branch] = max(exercise, .0); //printf("triangle(x, %d) = max(%f, 0)\n", branch, exercise); for (int step = NUM_STEPS - 2; step >= 0; --step) { __syncthreads(); int branch = threadIdx.x; if (branch <= step) { double binomial = 1 / R * (up_prob * tree[branch + 1] + (1 - up_prob) * tree[branch]); double exercise = strike_price - stock_price * pow(up_factor, 2 * branch - step + root_pos); tree[branch] = max(binomial, exercise); //printf("triangle(%d, %d) = max(%f, %f)\n", step, branch, binomial, exercise); if (Pos != FLOOR_EDGE) { out_sinking_edge[step] = tree[0]; } } } if (Pos != CEIL_EDGE) { out_climbing_edge[threadIdx.x] = tree[threadIdx.x]; } } template<BrickPos Pos> __global__ void tree_builder_brick( double stock_price, double strike_price, double R, double up_factor, double up_prob, int root_pos, double* in_upper_edge, double* in_lower_edge, double* out_climbing_edge, double* out_sinking_edge) { // Use shared memory for speedup __shared__ double tree[NUM_STEPS + 1]; tree[threadIdx.x] = in_lower_edge[threadIdx.x]; // Create first triangle for (int step = NUM_STEPS - 1; step >= 0; --step) { // Take one value from the upper edge, each step backward if (threadIdx.x == NUM_STEPS - 1) { // This is the only thread that read tree[NUM_STEPS], so only wait for that thread tree[NUM_STEPS] = in_upper_edge[NUM_STEPS - step - 1]; } __syncthreads(); // Fill all leafs, before writing over with the next step's leafs int branch = threadIdx.x; if (branch >= step) { double binomial = 1 / R * (up_prob * tree[branch + 1] + (1 - up_prob) * tree[branch]); double exercise = strike_price - stock_price * pow(up_factor, 2 * branch - step - (NUM_STEPS - 1) + root_pos); tree[branch] = max(binomial, exercise); //printf("brick(%d, %d) = max(%f, %f)\n", step, branch, binomial, exercise); } } // Create second triangle for (int step = NUM_STEPS - 2; step >= 0; --step) { __syncthreads(); int branch = threadIdx.x; if (branch <= step) { double binomial = 1 / R * (up_prob * tree[branch + 1] + (1 - up_prob) * tree[branch]); double exercise = strike_price - stock_price * pow(up_factor, 2 * branch - step + root_pos); tree[branch] = max(binomial, exercise); if (Pos != FLOOR_EDGE && Pos != FINAL) { out_sinking_edge[step] = tree[0]; } } } if (Pos == FINAL) { *out_climbing_edge = tree[0]; } else if (Pos != CEIL_EDGE) { out_climbing_edge[threadIdx.x] = tree[threadIdx.x]; } } double gpu4_binomial_american_put( double stock_price, double strike_price, double expire, double volat, int num_steps, double risk_free_rate) { double dt = expire / num_steps; double up_factor = exp(volat * sqrt(dt)); double down_factor = 1 / up_factor; double R = exp((risk_free_rate) * dt); double up_prob = (R - down_factor) / (up_factor - down_factor); double price; /* double* host_edge1 = new double[1024]; double* host_edge2 = new double[1024];*/ double* dev_edge1; double* dev_edge2; double* dev_price; check_err(cudaMalloc((void** ) &dev_edge1, NUM_STEPS * sizeof(double))); check_err(cudaMalloc((void** ) &dev_edge2, NUM_STEPS * sizeof(double))); check_err(cudaMalloc((void** ) &dev_price, 1 * sizeof(double))); tree_builder_triangle<CEIL_EDGE> <<<1, NUM_STEPS>>>(stock_price, strike_price, R, up_factor, up_prob, NUM_STEPS+1, NULL, dev_edge1); tree_builder_triangle<FLOOR_EDGE> <<<1, NUM_STEPS>>>(stock_price, strike_price, R, up_factor, up_prob, -NUM_STEPS+1, dev_edge2, NULL); check_err(cudaStreamSynchronize(0)); tree_builder_brick<FINAL> <<<1, NUM_STEPS>>>(stock_price, strike_price, R, up_factor, up_prob, 0, dev_edge1, dev_edge2, dev_price, NULL); check_err(cudaMemcpyAsync(&price, dev_price, 1 * sizeof(double), cudaMemcpyDefault)); /* check_err(cudaMemcpyAsync(host_edge1, dev_edge1, 1024 * sizeof(double), cudaMemcpyDefault)); check_err(cudaMemcpyAsync(host_edge2, dev_edge2, 1024 * sizeof(double), cudaMemcpyDefault)); check_err(cudaStreamSynchronize(0)); for (int i = 0; i < 1024; ++i) { printf("%f, ", host_edge1[i]); } printf("\n--------\n"); for (int i = 0; i < 1024; ++i) { printf("%f, ", host_edge2[i]); } check_err(cudaStreamSynchronize(0)); cudaFree(dev_edge1); cudaFree(dev_edge2);*/ cudaFree(dev_price); return price; } #endif void gpu_benchmark(const char* name, double (*to_invoke)(int), const int reruns) { cudaEvent_t start, end; check_err(cudaEventCreate(&start)); check_err(cudaEventCreate(&end)); //printf("%-32s, %3.8f", name, to_invoke(100)); // Warm up for (int i = 0; i < reruns / 10; ++i) { to_invoke(100); } for (int var = 0; var <= 1000; var += 10) { check_err(cudaEventRecord(start, 0)); for (int i = 0; i < reruns; ++i) { to_invoke(var); } check_err(cudaEventRecord(end, 0)); check_err(cudaEventSynchronize(end)); float duration; check_err(cudaEventElapsedTime(&duration, start, end)); duration = duration / reruns * pow(10, 3); printf(", %6d", int(duration + 0.5)); // Microseconds } printf("\n"); check_err(cudaEventDestroy(start)); check_err(cudaEventDestroy(end)); } void cpu_benchmark(const char* name, double (*to_invoke)(int), int reruns) { clock_t start; clock_t end; //printf("%-32s, %3.8f", name, to_invoke(100)); // Warm up for (int i = 0; i < reruns / 10; ++i) { to_invoke(100); } for (int var = 0; var <= 1000; var += 10) { // Start test start = clock(); for (int i = 0; i < reruns; ++i) { to_invoke(var); } end = clock(); float duration = (double(end) - double(start)) / CLOCKS_PER_SEC / reruns * pow(10, 6); printf(", %6d", int(duration + 0.5)); // Microseconds } printf("\n"); } double cpu(int indep_var) { return cpu_dynprog_binomial_american_put(20, 25, .5, 1, indep_var, 0.06); } double gpu1(int indep_var) { return gpu1_binomial_american_put(20, 25, .5, 1, indep_var, 0.06); } double gpu2(int indep_var) { return gpu2_binomial_american_put(20, 25, .5, 1, indep_var, 0.06); } double gpu3(int indep_var) { return gpu3_binomial_american_put(20, 25, .5, 1, indep_var, 0.06); } double gpu4(int indep_var) { return gpu4_binomial_american_put(20, 25, .5, 1, indep_var, 0.06); } int main() { printf("Compiled: %s %s\n", __DATE__, __TIME__); printf("cpu: %f\n", cpu(2048-1)); printf("gpu: %f\n", gpu4(2048-1)); return; const int reruns = 100; //const size_t num_step_tests = 11; //int step_tests[num_step_tests] = { 1, 100, 200, 300, 400, 500, 600, 700, 800, 900, 1000 }; /* printf("\"%-30s\", \"%-8s\"", "Implementation", "100 step"); for (int i = 0; i < num_step_tests; ++i) { printf(", %6d", step_tests[i]); } printf("\n");*/ cpu_benchmark("CPU dynprog", cpu, reruns); gpu_benchmark("GPU tree reduction", gpu1, reruns); gpu_benchmark("GPU tree build and reduction", gpu2, reruns); gpu_benchmark("GPU shared memory", gpu3, reruns); return 0; }
4c1ea6878ac7e8eae93fbf3898e22fd63f7e8fa1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <ATen/ATen.h> #include <ATen/hip/HIPContext.h> #include <ATen/hip/HIPApplyUtils.cuh> #include <THH/THHNumerics.cuh> namespace at { namespace cuda { #define THRESH_NUMBER_BINS_FOR_MULTI_BLOCK_MEM 100 #define THRESH_NUMBER_BINS_FOR_GLOBAL_MEM 1000 #define FOR_KERNEL_LOOP(i, lim) \ for (IndexType i = blockIdx.x * blockDim.x + threadIdx.x; i < lim; \ i += gridDim.x * blockDim.x) /* Memory types used for the 3 histogram implementations. See `CUDA_tensor_histogram` below. */ enum class CUDAHistogramMemoryType { SHARED, MULTI_BLOCK, GLOBAL }; namespace { template<typename input_t, typename IndexType> __device__ static IndexType getBin(input_t bVal, input_t minvalue, input_t maxvalue, int64_t nbins) { IndexType bin = (int)((bVal - minvalue) * nbins / (maxvalue - minvalue)); // (only applicable for histc) // while each bin is inclusive at the lower end and exclusive at the higher, i.e. [start, end) // the last bin is inclusive at both, i.e. [start, end], in order to include maxvalue if exists // therefore when bin == nbins, adjust bin to the last bin if (bin == nbins) bin -= 1; return bin; } } /* Kernel for computing the histogram of the input. */ template < typename output_t, typename input_t, typename IndexType, int ADims, int PDims, int BDims, CUDAHistogramMemoryType MemoryType = CUDAHistogramMemoryType::MULTI_BLOCK, typename Op> #ifdef __HIP_PLATFORM_HCC__ C10_LAUNCH_BOUNDS_1(512) #endif __global__ void kernelHistogram1D( detail::TensorInfo<output_t, IndexType> a, /* output */ detail::TensorInfo<output_t, IndexType> p, /* partial output */ detail::TensorInfo<input_t, IndexType> b, /* input */ int64_t nbins, input_t minvalue, input_t maxvalue, IndexType totalElements, Op getOp) { extern __shared__ unsigned char my_smem[]; output_t* smem = nullptr; if (MemoryType == CUDAHistogramMemoryType::SHARED) { ////////////////////////// Shared memory ////////////////////////// // atomically add to block specific shared memory // then atomically add to the global output tensor smem = reinterpret_cast<output_t*>(my_smem); for (IndexType i = threadIdx.x; i < a.sizes[0]; i += blockDim.x) { smem[i] = 0; } __syncthreads(); FOR_KERNEL_LOOP(linearIndex, totalElements) { // Convert `linearIndex` into an offset of `b` const IndexType bOffset = detail::IndexToOffset<input_t, IndexType, BDims>::get(linearIndex, b); const auto bVal = b.data[bOffset]; if (bVal >= minvalue && bVal <= maxvalue) { // Use value at `b` as an offset of `smem` const IndexType bin = getBin<input_t, IndexType>(bVal, minvalue, maxvalue, nbins); atomicAdd(&smem[bin], getOp(linearIndex)); } } __syncthreads(); // NOTE: atomically update output bin count. // Atomic update is imp since __syncthread() will only synchronize threads // in a given block, not across blocks. for (IndexType i = threadIdx.x; i < a.sizes[0]; i += blockDim.x) { const IndexType aOffset = detail::IndexToOffset<output_t, IndexType, ADims>::get(i, a); atomicAdd(&a.data[aOffset], smem[i]); } } else if (MemoryType == CUDAHistogramMemoryType::MULTI_BLOCK) { ////////////////////////// Multi Block memory ////////////////////////// // atomically add to block specific global tensor // then atomically add to the global output tensor // compute histogram for the block FOR_KERNEL_LOOP(linearIndex, totalElements) { // Convert `linearIndex` into an offset of `b` const IndexType bOffset = detail::IndexToOffset<input_t, IndexType, BDims>::get(linearIndex, b); const auto bVal = b.data[bOffset]; if (bVal >= minvalue && bVal <= maxvalue) { // Use value at `b` as an offset of `p` const IndexType bin = getBin<input_t, IndexType>(bVal, minvalue, maxvalue, nbins); const IndexType pIdx = p.strides[0] * blockIdx.x + bin; const IndexType pOffset = detail::IndexToOffset<output_t, IndexType, PDims>::get(pIdx, p); atomicAdd(&p.data[pOffset], getOp(linearIndex)); } } __syncthreads(); // NOTE: atomically update output bin count. // Atomic update is imp since __syncthread() will only synchronize threads // in a given block, not across blocks. const IndexType pIdx = p.strides[0] * blockIdx.x; const IndexType pOffset = detail::IndexToOffset<output_t, IndexType, PDims>::get(pIdx, p); for (IndexType i = threadIdx.x; i < a.sizes[0]; i += blockDim.x) { const IndexType aOffset = detail::IndexToOffset<output_t, IndexType, ADims>::get(i, a); atomicAdd(&a.data[aOffset], p.data[pOffset + i]); } } else { ////////////////////////// Global memory ////////////////////////// // atomically add to the output tensor // compute histogram for the block FOR_KERNEL_LOOP(linearIndex, totalElements) { // Convert `linearIndex` into an offset of `b` const IndexType bOffset = detail::IndexToOffset<input_t, IndexType, BDims>::get(linearIndex, b); const auto bVal = b.data[bOffset]; if (bVal >= minvalue && bVal <= maxvalue) { // Use value at `b` as an offset of `a` const IndexType bin = getBin<input_t, IndexType>(bVal, minvalue, maxvalue, nbins); const IndexType aOffset = detail::IndexToOffset<output_t, IndexType, ADims>::get(bin, a); atomicAdd(&a.data[aOffset], getOp(linearIndex)); } } } } #define HANDLE_CASE(MEMORY_TYPE, WEIGHTS_OP, SHARED_MEM) \ hipLaunchKernelGGL(( kernelHistogram1D<output_t, input_t, IndexType, 1, 2, -1, MEMORY_TYPE>) \ , dim3(grid), \ block, \ SHARED_MEM, \ getCurrentHIPStreamMasqueradingAsCUDA(), \ aInfo, pInfo, bInfo, nbins, minvalue, maxvalue, totalElements, WEIGHTS_OP); \ AT_ASSERTM(hipGetLastError() == hipSuccess, "kernelHistogram1D failed"); #define HANDLE_SWITCH_CASE(mType, getOp) \ switch (mType) { \ case CUDAHistogramMemoryType::SHARED: \ HANDLE_CASE(CUDAHistogramMemoryType::SHARED, getOp, sharedMem); \ break; \ case CUDAHistogramMemoryType::MULTI_BLOCK: \ HANDLE_CASE(CUDAHistogramMemoryType::MULTI_BLOCK, getOp, 0); \ break; \ default: \ HANDLE_CASE(CUDAHistogramMemoryType::GLOBAL, getOp, 0); \ } inline int64_t getFreeGlobalMemory() { // no need to use `hipSetDevice` size_t free_mem, total_mem; hipMemGetInfo(&free_mem, &total_mem); AT_ASSERTM( hipGetLastError() == hipSuccess, "CUDA_tensor_histogram failed to get free global memory"); return static_cast<int64_t>(free_mem); } /* Calculate the frequency of the input values. `a` contains the final output or the histogram. Input `b` is assumed to be 1-D non-negative int array. `c` optionally contains the weight vector. See `help torch.bincount` for details on the math. 3 implementations based of input size and memory usage: case: #bins < THRESH_NUMBER_BINS_FOR_MULTI_BLOCK_MEM and enough shared mem SHARED: Each block atomically adds to it's own **shared** hist copy, then atomically updates the global tensor. case: #bins < THRESH_NUMBER_BINS_FOR_GLOBAL_MEM and enough global mem MULTI_BLOCK: Each block atomically adds to it's own **global** hist copy, then atomically updates the global tensor. case: THRESH_NUMBER_BINS_FOR_GLOBAL_MEM <= #bins GLOBAL: all threads atomically update to a single **global** hist copy. */ template <typename output_t, typename input_t, bool HasWeights> bool CUDA_tensor_histogram( at::Tensor a, /* output */ at::Tensor b, /* input */ at::Tensor c, /* weights(optional) */ int64_t nbins, input_t minvalue, input_t maxvalue, TensorArgType aType = TensorArgType::ReadWrite, TensorArgType bType = TensorArgType::ReadOnly, TensorArgType cType = TensorArgType::ReadOnly) { checkBackend("CUDA_tensor_histogram", {a, b}, Backend::CUDA); if (HasWeights) { checkBackend("CUDA_tensor_histogram", {c}, Backend::CUDA); } auto totalElements = b.numel(); const dim3 block = getApplyBlock(); dim3 grid; int64_t curDevice = current_device(); if (curDevice == -1 || !getApplyGrid(totalElements, grid, curDevice)) { return false; } CUDAHistogramMemoryType memType = CUDAHistogramMemoryType::GLOBAL; auto maxSharedMem = getCurrentDeviceProperties()->sharedMemPerBlock; auto sharedMem = nbins * sizeof(output_t) + 8; // 8 guard bytes auto maxGlobalMem = getFreeGlobalMemory(); auto multiBlockMem = nbins * grid.x * sizeof(output_t) + 8; // 8 guard bytes // determine memory type to use in the kernel if (nbins < THRESH_NUMBER_BINS_FOR_MULTI_BLOCK_MEM && sharedMem < maxSharedMem) { memType = CUDAHistogramMemoryType::SHARED; } else if ( nbins < THRESH_NUMBER_BINS_FOR_GLOBAL_MEM && multiBlockMem < (maxGlobalMem / 2)) { // check against half of free mem to be extra safe // due to cached allocator, we may anyway have slightly more free mem memType = CUDAHistogramMemoryType::MULTI_BLOCK; } // alloc memory for MULTI_BLOCK using IndexType = int64_t; auto aInfo = detail::getTensorInfo<output_t, IndexType>(a); auto bInfo = detail::getTensorInfo<input_t, IndexType>(b); detail::TensorInfo<output_t, IndexType> pInfo(nullptr, 0, {}, {}); Tensor partial_output; if (memType == CUDAHistogramMemoryType::MULTI_BLOCK) { partial_output = native::zeros({grid.x, nbins}, a.options()); pInfo = detail::getTensorInfo<output_t, IndexType>(partial_output); } if (HasWeights) { auto cInfo = detail::getTensorInfo<output_t, IndexType>(c); const auto getWeightsOp = [cInfo] __device__(IndexType cIndex) { const IndexType cOffset = detail::IndexToOffset<output_t, IndexType, 1>::get(cIndex, cInfo); return cInfo.data[cOffset]; }; HANDLE_SWITCH_CASE(memType, getWeightsOp) } else { static const auto getDummyOp = [] __device__(IndexType) { return 1L; }; HANDLE_SWITCH_CASE(memType, getDummyOp) } return true; } #undef HANDLE_CASE #undef HANDLE_SWITCH_CASE #undef FOR_KERNEL_LOOP #undef THRESH_NUMBER_BINS_FOR_GLOBAL_MEM #undef THRESH_NUMBER_BINS_FOR_MULTI_BLOCK_MEM } // namespace cuda namespace { ///////////////// bincount ///////////////// template <typename input_t, typename weights_t> Tensor _bincount_cuda_template( const Tensor& self, const Tensor& weights, int64_t minlength) { if (minlength < 0) { AT_ERROR("minlength should be >= 0"); } if (self.dim() == 1 && self.numel() == 0) { return native::zeros({minlength}, device(kCUDA).dtype(kLong)); } if (self.dim() != 1 || (!std::is_same<input_t, uint8_t>::value && *self.min().cpu().data_ptr<input_t>() < 0)) { AT_ERROR("bincount only supports 1-d non-negative integral inputs."); } bool has_weights = weights.defined(); if (has_weights && weights.size(0) != self.size(0)) { AT_ERROR("input and weights should have the same length"); } const int64_t nbins = ::max(*self.max().cpu().data_ptr<input_t>() + (int64_t)1, minlength); const input_t minvalue = 0; const input_t maxvalue = nbins; // alloc output counter on GPU Tensor output; if (has_weights) { output = native::zeros({nbins}, weights.options()); auto ret = cuda::CUDA_tensor_histogram<weights_t, input_t, true>( output, self, weights, nbins, minvalue, maxvalue); } else { output = native::zeros({nbins}, device(DeviceType::CUDA).dtype(kLong)); auto ret = cuda::CUDA_tensor_histogram<int64_t, input_t, false>( output, self, weights, nbins, minvalue, maxvalue); } return output; } ///////////////// histc ///////////////// template <typename input_t> Tensor _histc_cuda_template( const Tensor& self, int64_t nbins, input_t min, input_t max) { if (nbins <= 0) { AT_ERROR("bins must be > 0"); } Tensor output = native::zeros({nbins}, device(DeviceType::CUDA).dtype(self.scalar_type())); input_t minvalue = min; input_t maxvalue = max; if (min == max) { minvalue = *self.min().cpu().data_ptr<input_t>(); maxvalue = *self.max().cpu().data_ptr<input_t>(); } if (minvalue == maxvalue) { minvalue = minvalue - 1; maxvalue = maxvalue + 1; } #ifndef __HIP_PLATFORM_HCC__ TORCH_CHECK( !(THCNumerics<input_t>::isinf(minvalue) || THCNumerics<input_t>::isinf(maxvalue) || THCNumerics<input_t>::isnan(minvalue) || THCNumerics<input_t>::isnan(maxvalue)), "range of [", minvalue, ", ", maxvalue, "] is not finite"); #else TORCH_CHECK( !(std::isinf(minvalue) || std::isinf(maxvalue) || std::isnan(minvalue) || std::isnan(maxvalue)), "range of [", minvalue, ", ", maxvalue, "] is not finite"); #endif TORCH_CHECK(minvalue < maxvalue, "max must be larger than min"); auto ret = cuda::CUDA_tensor_histogram<input_t, input_t, false>( output, self, Tensor(), nbins, minvalue, maxvalue); return output; } } // namespace namespace native { Tensor _bincount_cuda( const Tensor& self, const Tensor& weights, int64_t minlength) { return AT_DISPATCH_INTEGRAL_TYPES(self.scalar_type(), "bincount_cuda", [&] { const auto scalar = weights.scalar_type(); if (scalar == ScalarType::Undefined || scalar == ScalarType::Float) return _bincount_cuda_template<scalar_t, float>(self, weights, minlength); return _bincount_cuda_template<scalar_t, double>( self, weights.to(kDouble), minlength); }); } Tensor _histc_cuda( const Tensor& self, int64_t nbins, Scalar min, Scalar max) { if (self.scalar_type() == ScalarType::Half) { AT_ERROR("HalfTensor is not supported"); } return AT_DISPATCH_ALL_TYPES(self.scalar_type(), "histc", [&] { return _histc_cuda_template<scalar_t>(self, nbins, min.to<scalar_t>(), max.to<scalar_t>()); }); } Tensor& _histc_out_cuda(Tensor& result, const Tensor& self, int64_t bins, Scalar min, Scalar max) { auto ret = _histc_cuda(self, bins, min, max); result.resize_as_(ret); result.copy_(ret); return result; } } // namespace native } // namespace at
4c1ea6878ac7e8eae93fbf3898e22fd63f7e8fa1.cu
#include <ATen/ATen.h> #include <ATen/cuda/CUDAContext.h> #include <ATen/cuda/CUDAApplyUtils.cuh> #include <THC/THCNumerics.cuh> namespace at { namespace cuda { #define THRESH_NUMBER_BINS_FOR_MULTI_BLOCK_MEM 100 #define THRESH_NUMBER_BINS_FOR_GLOBAL_MEM 1000 #define FOR_KERNEL_LOOP(i, lim) \ for (IndexType i = blockIdx.x * blockDim.x + threadIdx.x; i < lim; \ i += gridDim.x * blockDim.x) /* Memory types used for the 3 histogram implementations. See `CUDA_tensor_histogram` below. */ enum class CUDAHistogramMemoryType { SHARED, MULTI_BLOCK, GLOBAL }; namespace { template<typename input_t, typename IndexType> __device__ static IndexType getBin(input_t bVal, input_t minvalue, input_t maxvalue, int64_t nbins) { IndexType bin = (int)((bVal - minvalue) * nbins / (maxvalue - minvalue)); // (only applicable for histc) // while each bin is inclusive at the lower end and exclusive at the higher, i.e. [start, end) // the last bin is inclusive at both, i.e. [start, end], in order to include maxvalue if exists // therefore when bin == nbins, adjust bin to the last bin if (bin == nbins) bin -= 1; return bin; } } /* Kernel for computing the histogram of the input. */ template < typename output_t, typename input_t, typename IndexType, int ADims, int PDims, int BDims, CUDAHistogramMemoryType MemoryType = CUDAHistogramMemoryType::MULTI_BLOCK, typename Op> #ifdef __HIP_PLATFORM_HCC__ C10_LAUNCH_BOUNDS_1(512) #endif __global__ void kernelHistogram1D( detail::TensorInfo<output_t, IndexType> a, /* output */ detail::TensorInfo<output_t, IndexType> p, /* partial output */ detail::TensorInfo<input_t, IndexType> b, /* input */ int64_t nbins, input_t minvalue, input_t maxvalue, IndexType totalElements, Op getOp) { extern __shared__ unsigned char my_smem[]; output_t* smem = nullptr; if (MemoryType == CUDAHistogramMemoryType::SHARED) { ////////////////////////// Shared memory ////////////////////////// // atomically add to block specific shared memory // then atomically add to the global output tensor smem = reinterpret_cast<output_t*>(my_smem); for (IndexType i = threadIdx.x; i < a.sizes[0]; i += blockDim.x) { smem[i] = 0; } __syncthreads(); FOR_KERNEL_LOOP(linearIndex, totalElements) { // Convert `linearIndex` into an offset of `b` const IndexType bOffset = detail::IndexToOffset<input_t, IndexType, BDims>::get(linearIndex, b); const auto bVal = b.data[bOffset]; if (bVal >= minvalue && bVal <= maxvalue) { // Use value at `b` as an offset of `smem` const IndexType bin = getBin<input_t, IndexType>(bVal, minvalue, maxvalue, nbins); atomicAdd(&smem[bin], getOp(linearIndex)); } } __syncthreads(); // NOTE: atomically update output bin count. // Atomic update is imp since __syncthread() will only synchronize threads // in a given block, not across blocks. for (IndexType i = threadIdx.x; i < a.sizes[0]; i += blockDim.x) { const IndexType aOffset = detail::IndexToOffset<output_t, IndexType, ADims>::get(i, a); atomicAdd(&a.data[aOffset], smem[i]); } } else if (MemoryType == CUDAHistogramMemoryType::MULTI_BLOCK) { ////////////////////////// Multi Block memory ////////////////////////// // atomically add to block specific global tensor // then atomically add to the global output tensor // compute histogram for the block FOR_KERNEL_LOOP(linearIndex, totalElements) { // Convert `linearIndex` into an offset of `b` const IndexType bOffset = detail::IndexToOffset<input_t, IndexType, BDims>::get(linearIndex, b); const auto bVal = b.data[bOffset]; if (bVal >= minvalue && bVal <= maxvalue) { // Use value at `b` as an offset of `p` const IndexType bin = getBin<input_t, IndexType>(bVal, minvalue, maxvalue, nbins); const IndexType pIdx = p.strides[0] * blockIdx.x + bin; const IndexType pOffset = detail::IndexToOffset<output_t, IndexType, PDims>::get(pIdx, p); atomicAdd(&p.data[pOffset], getOp(linearIndex)); } } __syncthreads(); // NOTE: atomically update output bin count. // Atomic update is imp since __syncthread() will only synchronize threads // in a given block, not across blocks. const IndexType pIdx = p.strides[0] * blockIdx.x; const IndexType pOffset = detail::IndexToOffset<output_t, IndexType, PDims>::get(pIdx, p); for (IndexType i = threadIdx.x; i < a.sizes[0]; i += blockDim.x) { const IndexType aOffset = detail::IndexToOffset<output_t, IndexType, ADims>::get(i, a); atomicAdd(&a.data[aOffset], p.data[pOffset + i]); } } else { ////////////////////////// Global memory ////////////////////////// // atomically add to the output tensor // compute histogram for the block FOR_KERNEL_LOOP(linearIndex, totalElements) { // Convert `linearIndex` into an offset of `b` const IndexType bOffset = detail::IndexToOffset<input_t, IndexType, BDims>::get(linearIndex, b); const auto bVal = b.data[bOffset]; if (bVal >= minvalue && bVal <= maxvalue) { // Use value at `b` as an offset of `a` const IndexType bin = getBin<input_t, IndexType>(bVal, minvalue, maxvalue, nbins); const IndexType aOffset = detail::IndexToOffset<output_t, IndexType, ADims>::get(bin, a); atomicAdd(&a.data[aOffset], getOp(linearIndex)); } } } } #define HANDLE_CASE(MEMORY_TYPE, WEIGHTS_OP, SHARED_MEM) \ kernelHistogram1D<output_t, input_t, IndexType, 1, 2, -1, MEMORY_TYPE> \ <<<grid, \ block, \ SHARED_MEM, \ getCurrentCUDAStream()>>>( \ aInfo, pInfo, bInfo, nbins, minvalue, maxvalue, totalElements, WEIGHTS_OP); \ AT_ASSERTM(cudaGetLastError() == cudaSuccess, "kernelHistogram1D failed"); #define HANDLE_SWITCH_CASE(mType, getOp) \ switch (mType) { \ case CUDAHistogramMemoryType::SHARED: \ HANDLE_CASE(CUDAHistogramMemoryType::SHARED, getOp, sharedMem); \ break; \ case CUDAHistogramMemoryType::MULTI_BLOCK: \ HANDLE_CASE(CUDAHistogramMemoryType::MULTI_BLOCK, getOp, 0); \ break; \ default: \ HANDLE_CASE(CUDAHistogramMemoryType::GLOBAL, getOp, 0); \ } inline int64_t getFreeGlobalMemory() { // no need to use `cudaSetDevice` size_t free_mem, total_mem; cudaMemGetInfo(&free_mem, &total_mem); AT_ASSERTM( cudaGetLastError() == cudaSuccess, "CUDA_tensor_histogram failed to get free global memory"); return static_cast<int64_t>(free_mem); } /* Calculate the frequency of the input values. `a` contains the final output or the histogram. Input `b` is assumed to be 1-D non-negative int array. `c` optionally contains the weight vector. See `help torch.bincount` for details on the math. 3 implementations based of input size and memory usage: case: #bins < THRESH_NUMBER_BINS_FOR_MULTI_BLOCK_MEM and enough shared mem SHARED: Each block atomically adds to it's own **shared** hist copy, then atomically updates the global tensor. case: #bins < THRESH_NUMBER_BINS_FOR_GLOBAL_MEM and enough global mem MULTI_BLOCK: Each block atomically adds to it's own **global** hist copy, then atomically updates the global tensor. case: THRESH_NUMBER_BINS_FOR_GLOBAL_MEM <= #bins GLOBAL: all threads atomically update to a single **global** hist copy. */ template <typename output_t, typename input_t, bool HasWeights> bool CUDA_tensor_histogram( at::Tensor a, /* output */ at::Tensor b, /* input */ at::Tensor c, /* weights(optional) */ int64_t nbins, input_t minvalue, input_t maxvalue, TensorArgType aType = TensorArgType::ReadWrite, TensorArgType bType = TensorArgType::ReadOnly, TensorArgType cType = TensorArgType::ReadOnly) { checkBackend("CUDA_tensor_histogram", {a, b}, Backend::CUDA); if (HasWeights) { checkBackend("CUDA_tensor_histogram", {c}, Backend::CUDA); } auto totalElements = b.numel(); const dim3 block = getApplyBlock(); dim3 grid; int64_t curDevice = current_device(); if (curDevice == -1 || !getApplyGrid(totalElements, grid, curDevice)) { return false; } CUDAHistogramMemoryType memType = CUDAHistogramMemoryType::GLOBAL; auto maxSharedMem = getCurrentDeviceProperties()->sharedMemPerBlock; auto sharedMem = nbins * sizeof(output_t) + 8; // 8 guard bytes auto maxGlobalMem = getFreeGlobalMemory(); auto multiBlockMem = nbins * grid.x * sizeof(output_t) + 8; // 8 guard bytes // determine memory type to use in the kernel if (nbins < THRESH_NUMBER_BINS_FOR_MULTI_BLOCK_MEM && sharedMem < maxSharedMem) { memType = CUDAHistogramMemoryType::SHARED; } else if ( nbins < THRESH_NUMBER_BINS_FOR_GLOBAL_MEM && multiBlockMem < (maxGlobalMem / 2)) { // check against half of free mem to be extra safe // due to cached allocator, we may anyway have slightly more free mem memType = CUDAHistogramMemoryType::MULTI_BLOCK; } // alloc memory for MULTI_BLOCK using IndexType = int64_t; auto aInfo = detail::getTensorInfo<output_t, IndexType>(a); auto bInfo = detail::getTensorInfo<input_t, IndexType>(b); detail::TensorInfo<output_t, IndexType> pInfo(nullptr, 0, {}, {}); Tensor partial_output; if (memType == CUDAHistogramMemoryType::MULTI_BLOCK) { partial_output = native::zeros({grid.x, nbins}, a.options()); pInfo = detail::getTensorInfo<output_t, IndexType>(partial_output); } if (HasWeights) { auto cInfo = detail::getTensorInfo<output_t, IndexType>(c); const auto getWeightsOp = [cInfo] __device__(IndexType cIndex) { const IndexType cOffset = detail::IndexToOffset<output_t, IndexType, 1>::get(cIndex, cInfo); return cInfo.data[cOffset]; }; HANDLE_SWITCH_CASE(memType, getWeightsOp) } else { static const auto getDummyOp = [] __device__(IndexType) { return 1L; }; HANDLE_SWITCH_CASE(memType, getDummyOp) } return true; } #undef HANDLE_CASE #undef HANDLE_SWITCH_CASE #undef FOR_KERNEL_LOOP #undef THRESH_NUMBER_BINS_FOR_GLOBAL_MEM #undef THRESH_NUMBER_BINS_FOR_MULTI_BLOCK_MEM } // namespace cuda namespace { ///////////////// bincount ///////////////// template <typename input_t, typename weights_t> Tensor _bincount_cuda_template( const Tensor& self, const Tensor& weights, int64_t minlength) { if (minlength < 0) { AT_ERROR("minlength should be >= 0"); } if (self.dim() == 1 && self.numel() == 0) { return native::zeros({minlength}, device(kCUDA).dtype(kLong)); } if (self.dim() != 1 || (!std::is_same<input_t, uint8_t>::value && *self.min().cpu().data_ptr<input_t>() < 0)) { AT_ERROR("bincount only supports 1-d non-negative integral inputs."); } bool has_weights = weights.defined(); if (has_weights && weights.size(0) != self.size(0)) { AT_ERROR("input and weights should have the same length"); } const int64_t nbins = std::max(*self.max().cpu().data_ptr<input_t>() + (int64_t)1, minlength); const input_t minvalue = 0; const input_t maxvalue = nbins; // alloc output counter on GPU Tensor output; if (has_weights) { output = native::zeros({nbins}, weights.options()); auto ret = cuda::CUDA_tensor_histogram<weights_t, input_t, true>( output, self, weights, nbins, minvalue, maxvalue); } else { output = native::zeros({nbins}, device(DeviceType::CUDA).dtype(kLong)); auto ret = cuda::CUDA_tensor_histogram<int64_t, input_t, false>( output, self, weights, nbins, minvalue, maxvalue); } return output; } ///////////////// histc ///////////////// template <typename input_t> Tensor _histc_cuda_template( const Tensor& self, int64_t nbins, input_t min, input_t max) { if (nbins <= 0) { AT_ERROR("bins must be > 0"); } Tensor output = native::zeros({nbins}, device(DeviceType::CUDA).dtype(self.scalar_type())); input_t minvalue = min; input_t maxvalue = max; if (min == max) { minvalue = *self.min().cpu().data_ptr<input_t>(); maxvalue = *self.max().cpu().data_ptr<input_t>(); } if (minvalue == maxvalue) { minvalue = minvalue - 1; maxvalue = maxvalue + 1; } #ifndef __HIP_PLATFORM_HCC__ TORCH_CHECK( !(THCNumerics<input_t>::isinf(minvalue) || THCNumerics<input_t>::isinf(maxvalue) || THCNumerics<input_t>::isnan(minvalue) || THCNumerics<input_t>::isnan(maxvalue)), "range of [", minvalue, ", ", maxvalue, "] is not finite"); #else TORCH_CHECK( !(std::isinf(minvalue) || std::isinf(maxvalue) || std::isnan(minvalue) || std::isnan(maxvalue)), "range of [", minvalue, ", ", maxvalue, "] is not finite"); #endif TORCH_CHECK(minvalue < maxvalue, "max must be larger than min"); auto ret = cuda::CUDA_tensor_histogram<input_t, input_t, false>( output, self, Tensor(), nbins, minvalue, maxvalue); return output; } } // namespace namespace native { Tensor _bincount_cuda( const Tensor& self, const Tensor& weights, int64_t minlength) { return AT_DISPATCH_INTEGRAL_TYPES(self.scalar_type(), "bincount_cuda", [&] { const auto scalar = weights.scalar_type(); if (scalar == ScalarType::Undefined || scalar == ScalarType::Float) return _bincount_cuda_template<scalar_t, float>(self, weights, minlength); return _bincount_cuda_template<scalar_t, double>( self, weights.to(kDouble), minlength); }); } Tensor _histc_cuda( const Tensor& self, int64_t nbins, Scalar min, Scalar max) { if (self.scalar_type() == ScalarType::Half) { AT_ERROR("HalfTensor is not supported"); } return AT_DISPATCH_ALL_TYPES(self.scalar_type(), "histc", [&] { return _histc_cuda_template<scalar_t>(self, nbins, min.to<scalar_t>(), max.to<scalar_t>()); }); } Tensor& _histc_out_cuda(Tensor& result, const Tensor& self, int64_t bins, Scalar min, Scalar max) { auto ret = _histc_cuda(self, bins, min, max); result.resize_as_(ret); result.copy_(ret); return result; } } // namespace native } // namespace at
1075ee97f782063170f40b5831ae0c07c587a486.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <hip/hip_runtime.h> #include <device_launch_parameters.h> #include <thrust/device_vector.h> #include <thrust/host_vector.h> #include <thrust/execution_policy.h> #include <thrust/for_each.h> #include <thrust/extrema.h> #include <thrust/sort.h> //#include <locale.h> //#include <map> #include <rmm/rmm.h> #include <rmm/thrust_rmm_allocator.h> #include "NVStrings.h" #include "NVStringsImpl.h" #include "custring_view.cuh" #include "custring.cuh" #include "unicode/unicode_flags.h" #include "unicode/charcases.h" // void printCudaError( hipError_t err, const char* prefix ) { if( err == hipSuccess ) return; fprintf(stderr,"%s: %s(%d):%s\n",prefix,hipGetErrorName(err),(int)err,hipGetErrorString(err)); //hipError_t err2 = hipGetLastError(); // clears the error too //if( err != err2 ) // fprintf(stderr," %s:(%d):%s\n",hipGetErrorName(err2),(int)err2,hipGetErrorString(err2)); } // char32_t* to_char32( const char* ca ) { unsigned int size = (unsigned int)strlen(ca); unsigned int count = custring_view::chars_in_string(ca,size); char32_t* rtn = new char32_t[count+1]; char32_t* optr = rtn; const char* iptr = ca; for( int i=0; i < size; ++i ) { Char oc = 0; unsigned int cw = custring_view::char_to_Char(iptr,oc); iptr += cw; i += cw - 1; *optr++ = oc; } rtn[count] = 0; return rtn; } // static unsigned char* d_unicode_flags = nullptr; unsigned char* get_unicode_flags() { if( !d_unicode_flags ) { // leave this out of RMM since it is never freed hipMalloc(&d_unicode_flags,65536); hipMemcpy(d_unicode_flags,unicode_flags,65536,hipMemcpyHostToDevice); } return d_unicode_flags; } static unsigned short* d_charcases = nullptr; unsigned short* get_charcases() { if( !d_charcases ) { // leave this out of RMM since it is never freed hipMalloc(&d_charcases,65536*sizeof(unsigned short)); hipMemcpy(d_charcases,charcases,65536*sizeof(unsigned short),hipMemcpyHostToDevice); } return d_charcases; } // NVStringsImpl::NVStringsImpl(unsigned int count) : bufferSize(0), memoryBuffer(nullptr), bIpcHandle(false), stream_id(0) { pList = new rmm::device_vector<custring_view*>(count,nullptr); } NVStringsImpl::~NVStringsImpl() { if( memoryBuffer && !bIpcHandle ) RMM_FREE(memoryBuffer,0); if( bIpcHandle ) hipIpcCloseMemHandle(memoryBuffer); memoryBuffer = nullptr; delete pList; pList = nullptr; bufferSize = 0; } char* NVStringsImpl::createMemoryFor( size_t* d_lengths ) { unsigned int count = (unsigned int)pList->size(); auto execpol = rmm::exec_policy(stream_id); size_t outsize = thrust::reduce(execpol->on(stream_id), d_lengths, d_lengths+count); if( outsize==0 ) return 0; // all sizes are zero RMM_ALLOC(&memoryBuffer,outsize,0); bufferSize = outsize; return memoryBuffer; } // int NVStrings_init_from_strings(NVStringsImpl* pImpl, const char** strs, unsigned int count ) { hipError_t err = hipSuccess; auto execpol = rmm::exec_policy(0); // first compute the size of each string size_t nbytes = 0; thrust::host_vector<size_t> hoffsets(count+1,0); //hoffsets[0] = 0; --already set by this ----^ thrust::host_vector<size_t> hlengths(count,0); for( unsigned int idx=0; idx < count; ++idx ) { const char* str = strs[idx]; size_t len = ( str ? (strlen(str)+1) : 0 ); size_t nsz = len; // include null-terminator if( len > 0 ) // len=0 is null, len=1 is empty string { hlengths[idx] = len; // just the string length int nchars = custring_view::chars_in_string(str,(int)len-1); nsz = custring_view::alloc_size((int)len-1,nchars); } nsz = ALIGN_SIZE(nsz); nbytes += nsz; hoffsets[idx+1] = nbytes; } // check if they are all null if( nbytes==0 ) return (int)err; // Host serialization unsigned int cheat = 0;//sizeof(custring_view); char* h_flatstrs = (char*)malloc(nbytes); for( unsigned int idx = 0; idx < count; ++idx ) memcpy(h_flatstrs + hoffsets[idx] + cheat, strs[idx], hlengths[idx]); // copy to device memory char* d_flatstrs = nullptr; rmmError_t rerr = RMM_ALLOC(&d_flatstrs,nbytes,0); if( rerr == RMM_SUCCESS ) err = hipMemcpy(d_flatstrs, h_flatstrs, nbytes, hipMemcpyHostToDevice); free(h_flatstrs); // no longer needed if( err != hipSuccess ) { fprintf(stderr,"nvs-sts: alloc/copy %'lu bytes\n",nbytes); printCudaError(err); return (int)err; } // copy offsets and lengths to device memory rmm::device_vector<size_t> offsets(hoffsets); rmm::device_vector<size_t> lengths(hlengths); size_t* d_offsets = offsets.data().get(); size_t* d_lengths = lengths.data().get(); // initialize custring objects in device memory custring_view_array d_strings = pImpl->getStringsPtr(); thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count, [d_flatstrs, d_offsets, d_lengths, cheat, d_strings] __device__(unsigned int idx){ size_t len = d_lengths[idx]; if( len < 1 ) return; // null string size_t offset = d_offsets[idx]; char* ptr = d_flatstrs + offset; char* str = ptr + cheat; d_strings[idx] = custring_view::create_from(ptr,str,(int)len-1); }); // //err = hipDeviceSynchronize(); //if( err!=hipSuccess ) //{ // fprintf(stderr,"nvs-sts: sync=%d copy %'u strings\n",(int)err,count); // printCudaError(err); //} pImpl->setMemoryBuffer(d_flatstrs,nbytes); return (int)err; } // build strings from array of device pointers and sizes int NVStrings_init_from_indexes( NVStringsImpl* pImpl, std::pair<const char*,size_t>* indexes, unsigned int count, bool bdevmem, NVStrings::sorttype stype ) { hipError_t err = hipSuccess; auto execpol = rmm::exec_policy(0); thrust::pair<const char*,size_t>* d_indexes = (thrust::pair<const char*,size_t>*)indexes; if( !bdevmem ) { RMM_ALLOC(&d_indexes,sizeof(std::pair<const char*,size_t>)*count,0); err = hipMemcpy(d_indexes,indexes,sizeof(std::pair<const char*,size_t>)*count,hipMemcpyHostToDevice); } else { // Lets check what we got from the caller by reading all the memory once. // This is wasteful but I cannot keep people from passing bad data: // https://github.com/rapidsai/custrings/issues/191 // This check cannot be done inline below because libraries like thrust may terminate the process // when illegal pointers are passed in. Here we do a pre-check, handle the error and return it. // Do not put any other thrust calls before this line in this method. try { thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count, [d_indexes] __device__ (unsigned int idx) { const char* str = d_indexes[idx].first; size_t bytes = d_indexes[idx].second; if( str ) custring_view::chars_in_string(str,(unsigned int)bytes); }); err = hipDeviceSynchronize(); // do not remove this } catch( thrust::system_error& exc ) { err = (hipError_t)exc.code().value(); //printf("exception: %d: %s\n", (int)err, e.what()); } } if( err != hipSuccess ) { printCudaError(err,"nvs-idx: checking parms"); if( !bdevmem ) RMM_FREE(d_indexes,0); return (int)err; } // sort the list - helps reduce divergence if( stype ) { thrust::sort(execpol->on(0), d_indexes, d_indexes + count, [stype] __device__( thrust::pair<const char*,size_t>& lhs, thrust::pair<const char*,size_t>& rhs ) { if( lhs.first==0 || rhs.first==0 ) return rhs.first!=0; // null < non-null int diff = 0; if( stype & NVStrings::length ) diff = (unsigned int)(lhs.second - rhs.second); if( diff==0 && (stype & NVStrings::name) ) diff = custr::compare(lhs.first,(unsigned int)lhs.second,rhs.first,(unsigned int)rhs.second); return (diff < 0); }); } // first get the size we need to store these strings rmm::device_vector<size_t> sizes(count,0); size_t* d_sizes = sizes.data().get(); thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count, [d_indexes, d_sizes] __device__ (unsigned int idx) { const char* str = d_indexes[idx].first; size_t bytes = d_indexes[idx].second; if( str ) d_sizes[idx] = ALIGN_SIZE(custring_view::alloc_size((char*)str,(int)bytes)); }); // allocate device memory size_t nbytes = thrust::reduce(execpol->on(0),sizes.begin(),sizes.end()); //printf("nvs-idx: %'lu bytes\n",nbytes); if( nbytes==0 ) return 0; // done, all the strings were null char* d_flatdstrs = nullptr; rmmError_t rerr = RMM_ALLOC(&d_flatdstrs,nbytes,0); if( rerr != RMM_SUCCESS ) { fprintf(stderr,"nvs-idx: RMM_ALLOC(%p,%lu)=%d\n", d_flatdstrs,nbytes,(int)rerr); //printCudaError(err); if( !bdevmem ) RMM_FREE(d_indexes,0); return (int)err; } // build offsets array rmm::device_vector<size_t> offsets(count,0); thrust::exclusive_scan(execpol->on(0),sizes.begin(),sizes.end(),offsets.begin()); // now build the strings vector custring_view_array d_strings = pImpl->getStringsPtr(); size_t* d_offsets = offsets.data().get(); thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count, [d_indexes, d_flatdstrs, d_offsets, d_sizes, d_strings] __device__(unsigned int idx){ // add string to internal vector array const char* str = d_indexes[idx].first; size_t bytes = d_indexes[idx].second; size_t offset = d_offsets[idx]; char* ptr = d_flatdstrs + offset; custring_view* dstr = 0; if( str ) dstr = custring_view::create_from(ptr,(char*)str,(int)bytes); d_strings[idx] = dstr; d_sizes[idx] = bytes; }); // pImpl->setMemoryBuffer(d_flatdstrs,nbytes); if( !bdevmem ) RMM_FREE(d_indexes,0); return (int)err; } // build strings from array of device pointers and sizes int NVStrings_init_from_offsets( NVStringsImpl* pImpl, const char* strs, int count, const int* offsets, const unsigned char* bitmask, int nulls ) { if( count==nulls ) return 0; // if all are nulls then we are done setlocale(LC_NUMERIC, ""); hipError_t err = hipSuccess; auto execpol = rmm::exec_policy(0); // first compute the size of each string size_t nbytes = 0; thrust::host_vector<size_t> hoffsets(count+1,0); thrust::host_vector<size_t> hlengths(count,0); for( unsigned int idx=0; idx < count; ++idx ) { int offset = offsets[idx]; int len = offsets[idx+1] - offset; const char* str = strs + offset; int nchars = custring_view::chars_in_string(str,len); int bytes = custring_view::alloc_size(len,nchars); if( bitmask && ((bitmask[idx/8] & (1 << (idx % 8)))==0) ) // from arrow spec bytes = 0; hlengths[idx] = len; nbytes += ALIGN_SIZE(bytes); hoffsets[idx+1] = nbytes; } if( nbytes==0 ) return 0; // should not happen // serialize host memory into a new buffer unsigned int cheat = 0;//sizeof(custring_view); char* h_flatstrs = (char*)malloc(nbytes); for( unsigned int idx = 0; idx < count; ++idx ) memcpy(h_flatstrs + hoffsets[idx] + cheat, strs + offsets[idx], hlengths[idx]); // copy whole thing to device memory char* d_flatstrs = nullptr; rmmError_t rerr = RMM_ALLOC(&d_flatstrs,nbytes,0); if( rerr == RMM_SUCCESS ) err = hipMemcpy(d_flatstrs, h_flatstrs, nbytes, hipMemcpyHostToDevice); free(h_flatstrs); // no longer needed if( err != hipSuccess ) { fprintf(stderr,"nvs-ofs: alloc/copy %'lu bytes\n",nbytes); printCudaError(err); return (int)err; } // copy offsets and lengths to device memory rmm::device_vector<size_t> doffsets(hoffsets); rmm::device_vector<size_t> dlengths(hlengths); size_t* d_offsets = doffsets.data().get(); size_t* d_lengths = dlengths.data().get(); // initialize custring objects in device memory custring_view_array d_strings = pImpl->getStringsPtr(); thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count, [d_flatstrs, d_offsets, d_lengths, cheat, d_strings] __device__(unsigned int idx){ size_t len = d_lengths[idx]; size_t offset = d_offsets[idx]; size_t size = d_offsets[idx+1] - offset; if( size < 1 ) return; // null string char* ptr = d_flatstrs + offset; char* str = ptr + cheat; d_strings[idx] = custring_view::create_from(ptr,str,len); }); // pImpl->setMemoryBuffer(d_flatstrs,nbytes); return (int)err;; } int NVStrings_copy_strings( NVStringsImpl* pImpl, std::vector<NVStrings*>& strslist ) { auto execpol = rmm::exec_policy(0); auto pList = pImpl->pList; unsigned int count = (unsigned int)pList->size(); size_t nbytes = 0; for( auto itr=strslist.begin(); itr!=strslist.end(); itr++ ) nbytes += (*itr)->memsize(); custring_view_array d_results = pList->data().get(); char* d_buffer = nullptr; RMM_ALLOC(&d_buffer,nbytes,0); size_t offset = 0; size_t memoffset = 0; for( auto itr=strslist.begin(); itr!=strslist.end(); itr++ ) { NVStrings* strs = *itr; unsigned int size = strs->size(); size_t memsize = strs->memsize(); if( size==0 ) continue; rmm::device_vector<custring_view*> strings(size,nullptr); custring_view** d_strings = strings.data().get(); strs->create_custring_index(d_strings); if( memsize ) { // checking pointer values to find the first non-null one custring_view** first = thrust::min_element(execpol->on(0),d_strings,d_strings+size, [] __device__ (custring_view* lhs, custring_view* rhs) { return (lhs && rhs) ? (lhs < rhs) : rhs==0; }); char* baseaddr = nullptr; hipError_t err = hipMemcpy(&baseaddr,first,sizeof(custring_view*),hipMemcpyDeviceToHost); if( err!=hipSuccess ) fprintf(stderr, "copy-strings: hipMemcpy(%p,%p,%d)=%d\n",&baseaddr,first,(int)sizeof(custring_view*),(int)err); // copy string memory char* buffer = d_buffer + memoffset; err = hipMemcpy((void*)buffer,(void*)baseaddr,memsize,hipMemcpyDeviceToDevice); if( err!=hipSuccess ) fprintf(stderr, "copy-strings: hipMemcpy(%p,%p,%ld)=%d\n",buffer,baseaddr,memsize,(int)err); // adjust pointers custring_view_array results = d_results + offset; thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), size, [buffer, baseaddr, d_strings, results] __device__(unsigned int idx){ char* dstr = (char*)d_strings[idx]; if( !dstr ) return; size_t diff = dstr - baseaddr; char* newaddr = buffer + diff; results[idx] = (custring_view*)newaddr; }); } offset += size; memoffset += memsize; } // pImpl->setMemoryBuffer(d_buffer,nbytes); return count; } int NVStrings_fixup_pointers( NVStringsImpl* pImpl, char* baseaddr ) { auto execpol = rmm::exec_policy(0); auto pList = pImpl->pList; unsigned int count = (unsigned int)pList->size(); custring_view_array d_strings = pImpl->getStringsPtr(); //---- the following can be used to find the base-address of the original memory ---- //---- instead of passing it across the ipc boundary; leaving it here for now ---- //custring_view** first = thrust::min_element(execpol->on(0),d_strings,d_strings+count, // [] __device__ (custring_view* lhs, custring_view* rhs) { // return (lhs && rhs) ? (lhs < rhs) : rhs==0; // }); //hipError_t err = hipMemcpy(&baseaddr,first,sizeof(custring_view*),hipMemcpyDeviceToHost); //if( err!=hipSuccess ) // fprintf(stderr, "fixup: hipMemcpy(%p,%p,%d)=%d\n",&baseaddr,first,(int)sizeof(custring_view*),(int)err); // char* buffer = pImpl->getMemoryPtr(); thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count, [buffer, baseaddr, d_strings] __device__(unsigned int idx){ char* dstr = (char*)d_strings[idx]; if( !dstr ) return; size_t diff = dstr - baseaddr; char* newaddr = buffer + diff; d_strings[idx] = (custring_view*)newaddr; }); //hipError_t err = hipDeviceSynchronize(); //if( err!=hipSuccess ) // printCudaError(err,"nvs-fixup"); return count; }
1075ee97f782063170f40b5831ae0c07c587a486.cu
/* * Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cuda_runtime.h> #include <device_launch_parameters.h> #include <thrust/device_vector.h> #include <thrust/host_vector.h> #include <thrust/execution_policy.h> #include <thrust/for_each.h> #include <thrust/extrema.h> #include <thrust/sort.h> //#include <locale.h> //#include <map> #include <rmm/rmm.h> #include <rmm/thrust_rmm_allocator.h> #include "NVStrings.h" #include "NVStringsImpl.h" #include "custring_view.cuh" #include "custring.cuh" #include "unicode/unicode_flags.h" #include "unicode/charcases.h" // void printCudaError( cudaError_t err, const char* prefix ) { if( err == cudaSuccess ) return; fprintf(stderr,"%s: %s(%d):%s\n",prefix,cudaGetErrorName(err),(int)err,cudaGetErrorString(err)); //cudaError_t err2 = cudaGetLastError(); // clears the error too //if( err != err2 ) // fprintf(stderr," %s:(%d):%s\n",cudaGetErrorName(err2),(int)err2,cudaGetErrorString(err2)); } // char32_t* to_char32( const char* ca ) { unsigned int size = (unsigned int)strlen(ca); unsigned int count = custring_view::chars_in_string(ca,size); char32_t* rtn = new char32_t[count+1]; char32_t* optr = rtn; const char* iptr = ca; for( int i=0; i < size; ++i ) { Char oc = 0; unsigned int cw = custring_view::char_to_Char(iptr,oc); iptr += cw; i += cw - 1; *optr++ = oc; } rtn[count] = 0; return rtn; } // static unsigned char* d_unicode_flags = nullptr; unsigned char* get_unicode_flags() { if( !d_unicode_flags ) { // leave this out of RMM since it is never freed cudaMalloc(&d_unicode_flags,65536); cudaMemcpy(d_unicode_flags,unicode_flags,65536,cudaMemcpyHostToDevice); } return d_unicode_flags; } static unsigned short* d_charcases = nullptr; unsigned short* get_charcases() { if( !d_charcases ) { // leave this out of RMM since it is never freed cudaMalloc(&d_charcases,65536*sizeof(unsigned short)); cudaMemcpy(d_charcases,charcases,65536*sizeof(unsigned short),cudaMemcpyHostToDevice); } return d_charcases; } // NVStringsImpl::NVStringsImpl(unsigned int count) : bufferSize(0), memoryBuffer(nullptr), bIpcHandle(false), stream_id(0) { pList = new rmm::device_vector<custring_view*>(count,nullptr); } NVStringsImpl::~NVStringsImpl() { if( memoryBuffer && !bIpcHandle ) RMM_FREE(memoryBuffer,0); if( bIpcHandle ) cudaIpcCloseMemHandle(memoryBuffer); memoryBuffer = nullptr; delete pList; pList = nullptr; bufferSize = 0; } char* NVStringsImpl::createMemoryFor( size_t* d_lengths ) { unsigned int count = (unsigned int)pList->size(); auto execpol = rmm::exec_policy(stream_id); size_t outsize = thrust::reduce(execpol->on(stream_id), d_lengths, d_lengths+count); if( outsize==0 ) return 0; // all sizes are zero RMM_ALLOC(&memoryBuffer,outsize,0); bufferSize = outsize; return memoryBuffer; } // int NVStrings_init_from_strings(NVStringsImpl* pImpl, const char** strs, unsigned int count ) { cudaError_t err = cudaSuccess; auto execpol = rmm::exec_policy(0); // first compute the size of each string size_t nbytes = 0; thrust::host_vector<size_t> hoffsets(count+1,0); //hoffsets[0] = 0; --already set by this ----^ thrust::host_vector<size_t> hlengths(count,0); for( unsigned int idx=0; idx < count; ++idx ) { const char* str = strs[idx]; size_t len = ( str ? (strlen(str)+1) : 0 ); size_t nsz = len; // include null-terminator if( len > 0 ) // len=0 is null, len=1 is empty string { hlengths[idx] = len; // just the string length int nchars = custring_view::chars_in_string(str,(int)len-1); nsz = custring_view::alloc_size((int)len-1,nchars); } nsz = ALIGN_SIZE(nsz); nbytes += nsz; hoffsets[idx+1] = nbytes; } // check if they are all null if( nbytes==0 ) return (int)err; // Host serialization unsigned int cheat = 0;//sizeof(custring_view); char* h_flatstrs = (char*)malloc(nbytes); for( unsigned int idx = 0; idx < count; ++idx ) memcpy(h_flatstrs + hoffsets[idx] + cheat, strs[idx], hlengths[idx]); // copy to device memory char* d_flatstrs = nullptr; rmmError_t rerr = RMM_ALLOC(&d_flatstrs,nbytes,0); if( rerr == RMM_SUCCESS ) err = cudaMemcpy(d_flatstrs, h_flatstrs, nbytes, cudaMemcpyHostToDevice); free(h_flatstrs); // no longer needed if( err != cudaSuccess ) { fprintf(stderr,"nvs-sts: alloc/copy %'lu bytes\n",nbytes); printCudaError(err); return (int)err; } // copy offsets and lengths to device memory rmm::device_vector<size_t> offsets(hoffsets); rmm::device_vector<size_t> lengths(hlengths); size_t* d_offsets = offsets.data().get(); size_t* d_lengths = lengths.data().get(); // initialize custring objects in device memory custring_view_array d_strings = pImpl->getStringsPtr(); thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count, [d_flatstrs, d_offsets, d_lengths, cheat, d_strings] __device__(unsigned int idx){ size_t len = d_lengths[idx]; if( len < 1 ) return; // null string size_t offset = d_offsets[idx]; char* ptr = d_flatstrs + offset; char* str = ptr + cheat; d_strings[idx] = custring_view::create_from(ptr,str,(int)len-1); }); // //err = cudaDeviceSynchronize(); //if( err!=cudaSuccess ) //{ // fprintf(stderr,"nvs-sts: sync=%d copy %'u strings\n",(int)err,count); // printCudaError(err); //} pImpl->setMemoryBuffer(d_flatstrs,nbytes); return (int)err; } // build strings from array of device pointers and sizes int NVStrings_init_from_indexes( NVStringsImpl* pImpl, std::pair<const char*,size_t>* indexes, unsigned int count, bool bdevmem, NVStrings::sorttype stype ) { cudaError_t err = cudaSuccess; auto execpol = rmm::exec_policy(0); thrust::pair<const char*,size_t>* d_indexes = (thrust::pair<const char*,size_t>*)indexes; if( !bdevmem ) { RMM_ALLOC(&d_indexes,sizeof(std::pair<const char*,size_t>)*count,0); err = cudaMemcpy(d_indexes,indexes,sizeof(std::pair<const char*,size_t>)*count,cudaMemcpyHostToDevice); } else { // Lets check what we got from the caller by reading all the memory once. // This is wasteful but I cannot keep people from passing bad data: // https://github.com/rapidsai/custrings/issues/191 // This check cannot be done inline below because libraries like thrust may terminate the process // when illegal pointers are passed in. Here we do a pre-check, handle the error and return it. // Do not put any other thrust calls before this line in this method. try { thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count, [d_indexes] __device__ (unsigned int idx) { const char* str = d_indexes[idx].first; size_t bytes = d_indexes[idx].second; if( str ) custring_view::chars_in_string(str,(unsigned int)bytes); }); err = cudaDeviceSynchronize(); // do not remove this } catch( thrust::system_error& exc ) { err = (cudaError_t)exc.code().value(); //printf("exception: %d: %s\n", (int)err, e.what()); } } if( err != cudaSuccess ) { printCudaError(err,"nvs-idx: checking parms"); if( !bdevmem ) RMM_FREE(d_indexes,0); return (int)err; } // sort the list - helps reduce divergence if( stype ) { thrust::sort(execpol->on(0), d_indexes, d_indexes + count, [stype] __device__( thrust::pair<const char*,size_t>& lhs, thrust::pair<const char*,size_t>& rhs ) { if( lhs.first==0 || rhs.first==0 ) return rhs.first!=0; // null < non-null int diff = 0; if( stype & NVStrings::length ) diff = (unsigned int)(lhs.second - rhs.second); if( diff==0 && (stype & NVStrings::name) ) diff = custr::compare(lhs.first,(unsigned int)lhs.second,rhs.first,(unsigned int)rhs.second); return (diff < 0); }); } // first get the size we need to store these strings rmm::device_vector<size_t> sizes(count,0); size_t* d_sizes = sizes.data().get(); thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count, [d_indexes, d_sizes] __device__ (unsigned int idx) { const char* str = d_indexes[idx].first; size_t bytes = d_indexes[idx].second; if( str ) d_sizes[idx] = ALIGN_SIZE(custring_view::alloc_size((char*)str,(int)bytes)); }); // allocate device memory size_t nbytes = thrust::reduce(execpol->on(0),sizes.begin(),sizes.end()); //printf("nvs-idx: %'lu bytes\n",nbytes); if( nbytes==0 ) return 0; // done, all the strings were null char* d_flatdstrs = nullptr; rmmError_t rerr = RMM_ALLOC(&d_flatdstrs,nbytes,0); if( rerr != RMM_SUCCESS ) { fprintf(stderr,"nvs-idx: RMM_ALLOC(%p,%lu)=%d\n", d_flatdstrs,nbytes,(int)rerr); //printCudaError(err); if( !bdevmem ) RMM_FREE(d_indexes,0); return (int)err; } // build offsets array rmm::device_vector<size_t> offsets(count,0); thrust::exclusive_scan(execpol->on(0),sizes.begin(),sizes.end(),offsets.begin()); // now build the strings vector custring_view_array d_strings = pImpl->getStringsPtr(); size_t* d_offsets = offsets.data().get(); thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count, [d_indexes, d_flatdstrs, d_offsets, d_sizes, d_strings] __device__(unsigned int idx){ // add string to internal vector array const char* str = d_indexes[idx].first; size_t bytes = d_indexes[idx].second; size_t offset = d_offsets[idx]; char* ptr = d_flatdstrs + offset; custring_view* dstr = 0; if( str ) dstr = custring_view::create_from(ptr,(char*)str,(int)bytes); d_strings[idx] = dstr; d_sizes[idx] = bytes; }); // pImpl->setMemoryBuffer(d_flatdstrs,nbytes); if( !bdevmem ) RMM_FREE(d_indexes,0); return (int)err; } // build strings from array of device pointers and sizes int NVStrings_init_from_offsets( NVStringsImpl* pImpl, const char* strs, int count, const int* offsets, const unsigned char* bitmask, int nulls ) { if( count==nulls ) return 0; // if all are nulls then we are done setlocale(LC_NUMERIC, ""); cudaError_t err = cudaSuccess; auto execpol = rmm::exec_policy(0); // first compute the size of each string size_t nbytes = 0; thrust::host_vector<size_t> hoffsets(count+1,0); thrust::host_vector<size_t> hlengths(count,0); for( unsigned int idx=0; idx < count; ++idx ) { int offset = offsets[idx]; int len = offsets[idx+1] - offset; const char* str = strs + offset; int nchars = custring_view::chars_in_string(str,len); int bytes = custring_view::alloc_size(len,nchars); if( bitmask && ((bitmask[idx/8] & (1 << (idx % 8)))==0) ) // from arrow spec bytes = 0; hlengths[idx] = len; nbytes += ALIGN_SIZE(bytes); hoffsets[idx+1] = nbytes; } if( nbytes==0 ) return 0; // should not happen // serialize host memory into a new buffer unsigned int cheat = 0;//sizeof(custring_view); char* h_flatstrs = (char*)malloc(nbytes); for( unsigned int idx = 0; idx < count; ++idx ) memcpy(h_flatstrs + hoffsets[idx] + cheat, strs + offsets[idx], hlengths[idx]); // copy whole thing to device memory char* d_flatstrs = nullptr; rmmError_t rerr = RMM_ALLOC(&d_flatstrs,nbytes,0); if( rerr == RMM_SUCCESS ) err = cudaMemcpy(d_flatstrs, h_flatstrs, nbytes, cudaMemcpyHostToDevice); free(h_flatstrs); // no longer needed if( err != cudaSuccess ) { fprintf(stderr,"nvs-ofs: alloc/copy %'lu bytes\n",nbytes); printCudaError(err); return (int)err; } // copy offsets and lengths to device memory rmm::device_vector<size_t> doffsets(hoffsets); rmm::device_vector<size_t> dlengths(hlengths); size_t* d_offsets = doffsets.data().get(); size_t* d_lengths = dlengths.data().get(); // initialize custring objects in device memory custring_view_array d_strings = pImpl->getStringsPtr(); thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count, [d_flatstrs, d_offsets, d_lengths, cheat, d_strings] __device__(unsigned int idx){ size_t len = d_lengths[idx]; size_t offset = d_offsets[idx]; size_t size = d_offsets[idx+1] - offset; if( size < 1 ) return; // null string char* ptr = d_flatstrs + offset; char* str = ptr + cheat; d_strings[idx] = custring_view::create_from(ptr,str,len); }); // pImpl->setMemoryBuffer(d_flatstrs,nbytes); return (int)err;; } int NVStrings_copy_strings( NVStringsImpl* pImpl, std::vector<NVStrings*>& strslist ) { auto execpol = rmm::exec_policy(0); auto pList = pImpl->pList; unsigned int count = (unsigned int)pList->size(); size_t nbytes = 0; for( auto itr=strslist.begin(); itr!=strslist.end(); itr++ ) nbytes += (*itr)->memsize(); custring_view_array d_results = pList->data().get(); char* d_buffer = nullptr; RMM_ALLOC(&d_buffer,nbytes,0); size_t offset = 0; size_t memoffset = 0; for( auto itr=strslist.begin(); itr!=strslist.end(); itr++ ) { NVStrings* strs = *itr; unsigned int size = strs->size(); size_t memsize = strs->memsize(); if( size==0 ) continue; rmm::device_vector<custring_view*> strings(size,nullptr); custring_view** d_strings = strings.data().get(); strs->create_custring_index(d_strings); if( memsize ) { // checking pointer values to find the first non-null one custring_view** first = thrust::min_element(execpol->on(0),d_strings,d_strings+size, [] __device__ (custring_view* lhs, custring_view* rhs) { return (lhs && rhs) ? (lhs < rhs) : rhs==0; }); char* baseaddr = nullptr; cudaError_t err = cudaMemcpy(&baseaddr,first,sizeof(custring_view*),cudaMemcpyDeviceToHost); if( err!=cudaSuccess ) fprintf(stderr, "copy-strings: cudaMemcpy(%p,%p,%d)=%d\n",&baseaddr,first,(int)sizeof(custring_view*),(int)err); // copy string memory char* buffer = d_buffer + memoffset; err = cudaMemcpy((void*)buffer,(void*)baseaddr,memsize,cudaMemcpyDeviceToDevice); if( err!=cudaSuccess ) fprintf(stderr, "copy-strings: cudaMemcpy(%p,%p,%ld)=%d\n",buffer,baseaddr,memsize,(int)err); // adjust pointers custring_view_array results = d_results + offset; thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), size, [buffer, baseaddr, d_strings, results] __device__(unsigned int idx){ char* dstr = (char*)d_strings[idx]; if( !dstr ) return; size_t diff = dstr - baseaddr; char* newaddr = buffer + diff; results[idx] = (custring_view*)newaddr; }); } offset += size; memoffset += memsize; } // pImpl->setMemoryBuffer(d_buffer,nbytes); return count; } int NVStrings_fixup_pointers( NVStringsImpl* pImpl, char* baseaddr ) { auto execpol = rmm::exec_policy(0); auto pList = pImpl->pList; unsigned int count = (unsigned int)pList->size(); custring_view_array d_strings = pImpl->getStringsPtr(); //---- the following can be used to find the base-address of the original memory ---- //---- instead of passing it across the ipc boundary; leaving it here for now ---- //custring_view** first = thrust::min_element(execpol->on(0),d_strings,d_strings+count, // [] __device__ (custring_view* lhs, custring_view* rhs) { // return (lhs && rhs) ? (lhs < rhs) : rhs==0; // }); //cudaError_t err = cudaMemcpy(&baseaddr,first,sizeof(custring_view*),cudaMemcpyDeviceToHost); //if( err!=cudaSuccess ) // fprintf(stderr, "fixup: cudaMemcpy(%p,%p,%d)=%d\n",&baseaddr,first,(int)sizeof(custring_view*),(int)err); // char* buffer = pImpl->getMemoryPtr(); thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count, [buffer, baseaddr, d_strings] __device__(unsigned int idx){ char* dstr = (char*)d_strings[idx]; if( !dstr ) return; size_t diff = dstr - baseaddr; char* newaddr = buffer + diff; d_strings[idx] = (custring_view*)newaddr; }); //cudaError_t err = cudaDeviceSynchronize(); //if( err!=cudaSuccess ) // printCudaError(err,"nvs-fixup"); return count; }
ac9a4e776afcd5cae7a01e6510e7305dc42aecc9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <string> #include <xtensor/xarray.hpp> #include <xtensor/xio.hpp> #include <xtensor/xview.hpp> #include <xtensor/xnpy.hpp> #include <xtensor/xsort.hpp> #define BLOCK_HEIGHT 64 #define BLOCK_WIDTH 98 // GLOBAL VARIABLES uint LAYER_WIDTH = 512; uint MODEL_SEED = 52233264; // GPC_ID to get thread ID values struct GPC_ID { uint t_idx, t_idy, t_idz; uint cta_idx, cta_idy, cta_idz; uint warp_id, sm_id, grid_id; }; // https://stackoverflow.com/questions/612328/difference-between-struct-and-typedef-struct-in-c typedef struct GPC_ID gpc_id; // https://forums.developer.nvidia.com/t/any-way-to-know-on-which-sm-a-thread-is-running/19974/15 // https://www.codeproject.com/Articles/15971/Using-Inline-Assembly-in-C-C __device__ gpc_id get_gpcid(void) { gpc_id my_id; asm("mov.u32 %0, %tid.x;" : "=r"(my_id.t_idx) ); asm("mov.u32 %0, %tid.y;" : "=r"(my_id.t_idy) ); asm("mov.u32 %0, %tid.z;" : "=r"(my_id.t_idz) ); asm("mov.u32 %0, %warpid;" : "=r"(my_id.warp_id) ); asm("mov.u32 %0, %smid;" : "=r"(my_id.sm_id) ); asm("mov.u32 %0, %gridid;" : "=r"(my_id.grid_id) ); asm("mov.u32 %0, %ctaid.x;" : "=r"(my_id.cta_idx) ); asm("mov.u32 %0, %ctaid.y;" : "=r"(my_id.cta_idy) ); asm("mov.u32 %0, %ctaid.z;" : "=r"(my_id.cta_idz) ); return my_id; } __global__ void GET_ID(gpc_id *myid) { int tid = threadIdx.x + blockIdx.x * blockDim.x + blockIdx.y * gridDim.x * blockDim.x; // printf("mytid: %d", tid); myid[tid] = get_gpcid(); } // // Matrix-vector multiplication using CUDA // // Using shared memory and avoiding banking conflicts template<typename T> __global__ void MatMulKernel(T *out, T *in, T *a, const int matrixHeight, const int matrixWidth, gpc_id* myid) { // get variables for loop // copy section of b into shared mem // go through the threads vertically and sum them into a variable // atomic add these variables to the corresponding c index // looping is happening horizontally on the matrix // BLOCK_WIDTH is again horizontal // BLOCK_HEIGHT is going vertical // n_cols / BLOCK_WIDTH blocks horizontally // n_rows / BLOCK_HEIGHT block vertically // get variables for loop // variable for loop length: blockEltHeight __shared__ int blockElt; __shared__ int blockxInd; __shared__ int blockyInd; if (threadIdx.x == 0) // only the first thread of the entire block initializes the shared variables blockElt, blockxInd, blockyInd. { if ((blockIdx.x + 1) * BLOCK_WIDTH <= matrixWidth) blockElt = BLOCK_WIDTH; // NOT the rightmost block so width of block = BLOCK_WIDTH else blockElt = matrixWidth % BLOCK_WIDTH; // rightmost block so width of block = matrixWidth % BLOCK_WIDTH blockxInd = blockIdx.x * BLOCK_WIDTH; // top left thread x-index of the block blockyInd = blockIdx.y * BLOCK_HEIGHT; // top left thread y-index of the block } __syncthreads(); //all threads have value of blockElt, blockxInd, blockyInd // copy section of b into shared mem // https://stackoverflow.com/questions/24419822/efficiently-initializing-shared-memory-array-in-cuda/24419969#24419969 // use threads to write into independent locations of b[] from in [] __shared__ T b[BLOCK_WIDTH]; __shared__ T in_sub[BLOCK_HEIGHT][BLOCK_WIDTH + 31]; int threads_per_block = BLOCK_HEIGHT; int lidx = threadIdx.x; while (lidx < BLOCK_WIDTH) { b[lidx] = in[lidx + blockIdx.x * BLOCK_WIDTH]; lidx += threads_per_block; } __syncthreads(); for (int i=0; i<blockElt; i++) //each thread loads one sub-row of matrix a[]. { in_sub[threadIdx.x][i] = a[(blockyInd + threadIdx.x) * matrixWidth + blockxInd + i]; } __syncthreads(); // summing variable T cSum = (T) 0.0; int threadyInd = blockyInd + threadIdx.x; // // make sure we are inside the matrix verticallly if (threadyInd < matrixHeight) { // each thread computes one element of a block segment of the output vector for (int i=0; i<blockElt; i++) { // cSum += in_sub[threadIdx.x][i] * b[i]; } // atomic add these variables to the corresponding c index atomicAdd(out + threadyInd, cSum); } // int tid = threadIdx.x + // blockIdx.x * blockDim.x + // blockIdx.y * gridDim.x * blockDim.x; int tid = threadIdx.x + blockIdx.x * BLOCK_WIDTH + blockIdx.y * BLOCK_HEIGHT * matrixWidth ; myid[tid] = get_gpcid(); __syncthreads(); } template <class _Tp> xt::xarray<_Tp> matvec_banking (xt::xarray<_Tp> matrix_A, xt::xarray<_Tp> vector_B) { unsigned int n_rows = matrix_A.shape()[0]; unsigned int n_cols = matrix_A.shape()[1]; unsigned int size_A = n_rows * n_cols; unsigned int size_B = n_cols; assert (vector_B.shape()[0] == size_B && "matrix A and vector B shape mismatch."); assert (vector_B.shape()[1] == 1 && "vector B no. of columns != 1"); unsigned int size_C = n_rows; // Block Grid for MatMulKernel<<< >>> int blockCols = (int) ceil(n_cols / (double) BLOCK_WIDTH); int blockRows = (int) ceil(n_rows / (double) BLOCK_HEIGHT); dim3 dimBlock(BLOCK_HEIGHT); // BLOCK_HEIGHT directly corresponds to no. of threads per block i.e., one thread per row of the block. dim3 dimGrid(blockCols, blockRows); std::cout << "Gridblock size (Row x Col): (" << blockRows << ","<< blockCols << ")\t"; std::cout << "BLOCK size (Hgt x Wdth): (" << BLOCK_HEIGHT << ","<< BLOCK_WIDTH << ")\t"; unsigned int no_of_threads = blockCols*blockRows*BLOCK_HEIGHT; //no. of blocks * threads in each block // host copies of A,B,C _Tp *A = new _Tp[size_A]; _Tp *B = new _Tp[size_B]; _Tp *C = new _Tp[size_C]; gpc_id *myid = new gpc_id[no_of_threads]; // Allocate Unified Memory accessible from CPU or GPU hipMallocManaged(&A, size_A*sizeof(_Tp)); hipMallocManaged(&B, size_B*sizeof(_Tp)); hipMallocManaged(&C, size_C*sizeof(_Tp)); hipMallocManaged(&myid, no_of_threads*sizeof(gpc_id)); // Fill the matrix values from xtensor to C++ array for (int i = 0; i < size_A; i++) A[i] = matrix_A.flat(i); for (int i = 0; i < size_B; i++) B[i] = vector_B.flat(i); // shared Memory size int sharedMem = 3 * sizeof (int) + BLOCK_WIDTH * sizeof(_Tp) + BLOCK_HEIGHT * (BLOCK_WIDTH + 31) * sizeof(_Tp); // 31 is for padding s.t. (96+31) mod 32 = 1 // 3 * sizeof (int) -> to store blockElt, blockxInd, blockyInd; // initialize vector C to zero hipMemset(C, 0, n_rows*sizeof(_Tp)); // time the matvel multiplication operation // https://developer.nvidia.com/blog/how-implement-performance-metrics-cuda-cc/ hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start); // GET_ID<<<dimGrid, dimBlock, sharedMem>>>(myid); hipLaunchKernelGGL(( MatMulKernel<float>), dim3(dimGrid), dim3(dimBlock), sharedMem, 0, C, B, A, n_rows, n_cols, myid); hipDeviceSynchronize(); hipEventRecord(stop); hipEventSynchronize(stop); // for (int i = 0; i < no_of_threads; i++) // { // myid[i].t_idx = i; // myid[i].cta_idy = i*2; // } float milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); std::cout << "Execution Time: " << milliseconds << " ms" << std::endl; // Convert product vector to xtensor xt::xarray<double>::shape_type C_shape = {size_C, 1}; xt::xarray<_Tp> vec_C = xt::adapt(C, size_C, xt::no_ownership(), C_shape); // Log the output of myid // https://stackoverflow.com/questions/25918057/how-to-set-a-fixed-width-with-cout size_t headerWidths[5] = {std::string("T_IDX ").size(), std::string("WRP_ID ").size(), std::string("SM_ID ").size(), std::string("GRID_ID ").size(), std::string("CTA_IDX ").size() }; // Redirecting output to a file // https://stackoverflow.com/questions/10150468/how-to-redirect-cin-and-cout-to-files // https://stackoverflow.com/questions/29464578/append-std-output-of-a-function-to-a-file const std::string cuda_log_file = "../cuda_logfiles/cuda_log-w" + std::to_string(LAYER_WIDTH) + "x" + std::to_string(LAYER_WIDTH) + "-" + std::to_string(MODEL_SEED) + ".txt"; std::ofstream out(cuda_log_file, std::fstream::app); std::streambuf *coutbuf = std::cout.rdbuf(); //save old buf std::cout.rdbuf(out.rdbuf()); //redirect std::cout to out.txt! // std::cout << "T_IDX T_IDY T_IDZ WRP_ID SM_ID GRID_ID CTA_IDX CTA_IDY CTA_IDZ"<< std::endl; std::cout << "T_IDX WRP_ID SM_ID GRID_ID CTA_IDX CTA_IDY"<< std::endl; for (int i = 0; i < no_of_threads; i++){ std::cout << std::left << std::setw(headerWidths[0]) << myid[i].t_idx; // std::cout << std::left << std::setw(headerWidths[0]) << myid[i].t_idy; // std::cout << std::left << std::setw(headerWidths[0]) << myid[i].t_idz; std::cout << std::left << std::setw(headerWidths[1]) << myid[i].warp_id; std::cout << std::left << std::setw(headerWidths[2]) << myid[i].sm_id; std::cout << std::left << std::setw(headerWidths[3]) << myid[i].grid_id; std::cout << std::left << std::setw(headerWidths[4]) << myid[i].cta_idx; std::cout << std::left << std::setw(headerWidths[4]) << myid[i].cta_idy; // std::cout << std::left << std::setw(headerWidths[4]) << myid[i].cta_idz; std::cout << std::endl; } std::cout << "***************************" << std::endl; std::cout.rdbuf(coutbuf); //reset to standard output again hipFree(A); hipFree(B); hipFree(C); hipFree(myid); return vec_C; } int main() { // load weights from npy files const std::string dense_weights_folder = "../weights/mnist_dense-w" + std::to_string(LAYER_WIDTH) + "x" + std::to_string(LAYER_WIDTH) + "-" + std::to_string(MODEL_SEED); const std::string dense_weights_file = dense_weights_folder + "/mnist_dense-w" + std::to_string(LAYER_WIDTH) + "x" + std::to_string(LAYER_WIDTH) + "-" + std::to_string(MODEL_SEED) + "_dense_weights.npy"; // std::cout << "******************************" << std::endl; // std::cout << "Weights: " << dense_weights_file << std::endl; xt::xarray<float> dense_weights = xt::load_npy<float>(dense_weights_file); xt::xarray<float> tr_dense_weights = xt::transpose(dense_weights); // load input vector from npy file uint image_no = 69999; const std::string input_vector_file = "../data/vector_" + std::to_string(image_no) + ".npy"; // std::cout << "Input: " << input_vector_file << std::endl; xt::xarray<float> input_vector = xt::load_npy<float>(input_vector_file); // std::cout << "******************************" << std::endl; // std::cout << "Transposed Weight Matrix Shape: "<< xt::adapt(tr_dense_weights.shape()) << std::endl; // std::cout << "Input Vector Shape: "<< xt::adapt(input_vector.shape()) << std::endl; // std::cout << "******************************" << std::endl; // for (int i = 0; i < 10; ++i) // { // matvec_banking(tr_dense_weights, input_vector); // } // std::cout << "******************************" << std::endl; // Display Output auto matvecproduct = matvec_banking(tr_dense_weights, input_vector); // std::cout << "Matrix-Vector Product Shape: " << xt::adapt(matvecproduct.shape()) << std::endl; // std::cout << "Matrix-Vector Product" << std::endl; // std::cout << matvecproduct << std::endl; // std::cout << "******************************" << std::endl; return 0; }
ac9a4e776afcd5cae7a01e6510e7305dc42aecc9.cu
#include <iostream> #include <string> #include <xtensor/xarray.hpp> #include <xtensor/xio.hpp> #include <xtensor/xview.hpp> #include <xtensor/xnpy.hpp> #include <xtensor/xsort.hpp> #define BLOCK_HEIGHT 64 #define BLOCK_WIDTH 98 // GLOBAL VARIABLES uint LAYER_WIDTH = 512; uint MODEL_SEED = 52233264; // GPC_ID to get thread ID values struct GPC_ID { uint t_idx, t_idy, t_idz; uint cta_idx, cta_idy, cta_idz; uint warp_id, sm_id, grid_id; }; // https://stackoverflow.com/questions/612328/difference-between-struct-and-typedef-struct-in-c typedef struct GPC_ID gpc_id; // https://forums.developer.nvidia.com/t/any-way-to-know-on-which-sm-a-thread-is-running/19974/15 // https://www.codeproject.com/Articles/15971/Using-Inline-Assembly-in-C-C __device__ gpc_id get_gpcid(void) { gpc_id my_id; asm("mov.u32 %0, %tid.x;" : "=r"(my_id.t_idx) ); asm("mov.u32 %0, %tid.y;" : "=r"(my_id.t_idy) ); asm("mov.u32 %0, %tid.z;" : "=r"(my_id.t_idz) ); asm("mov.u32 %0, %warpid;" : "=r"(my_id.warp_id) ); asm("mov.u32 %0, %smid;" : "=r"(my_id.sm_id) ); asm("mov.u32 %0, %gridid;" : "=r"(my_id.grid_id) ); asm("mov.u32 %0, %ctaid.x;" : "=r"(my_id.cta_idx) ); asm("mov.u32 %0, %ctaid.y;" : "=r"(my_id.cta_idy) ); asm("mov.u32 %0, %ctaid.z;" : "=r"(my_id.cta_idz) ); return my_id; } __global__ void GET_ID(gpc_id *myid) { int tid = threadIdx.x + blockIdx.x * blockDim.x + blockIdx.y * gridDim.x * blockDim.x; // printf("mytid: %d", tid); myid[tid] = get_gpcid(); } // // Matrix-vector multiplication using CUDA // // Using shared memory and avoiding banking conflicts template<typename T> __global__ void MatMulKernel(T *out, T *in, T *a, const int matrixHeight, const int matrixWidth, gpc_id* myid) { // get variables for loop // copy section of b into shared mem // go through the threads vertically and sum them into a variable // atomic add these variables to the corresponding c index // looping is happening horizontally on the matrix // BLOCK_WIDTH is again horizontal // BLOCK_HEIGHT is going vertical // n_cols / BLOCK_WIDTH blocks horizontally // n_rows / BLOCK_HEIGHT block vertically // get variables for loop // variable for loop length: blockEltHeight __shared__ int blockElt; __shared__ int blockxInd; __shared__ int blockyInd; if (threadIdx.x == 0) // only the first thread of the entire block initializes the shared variables blockElt, blockxInd, blockyInd. { if ((blockIdx.x + 1) * BLOCK_WIDTH <= matrixWidth) blockElt = BLOCK_WIDTH; // NOT the rightmost block so width of block = BLOCK_WIDTH else blockElt = matrixWidth % BLOCK_WIDTH; // rightmost block so width of block = matrixWidth % BLOCK_WIDTH blockxInd = blockIdx.x * BLOCK_WIDTH; // top left thread x-index of the block blockyInd = blockIdx.y * BLOCK_HEIGHT; // top left thread y-index of the block } __syncthreads(); //all threads have value of blockElt, blockxInd, blockyInd // copy section of b into shared mem // https://stackoverflow.com/questions/24419822/efficiently-initializing-shared-memory-array-in-cuda/24419969#24419969 // use threads to write into independent locations of b[] from in [] __shared__ T b[BLOCK_WIDTH]; __shared__ T in_sub[BLOCK_HEIGHT][BLOCK_WIDTH + 31]; int threads_per_block = BLOCK_HEIGHT; int lidx = threadIdx.x; while (lidx < BLOCK_WIDTH) { b[lidx] = in[lidx + blockIdx.x * BLOCK_WIDTH]; lidx += threads_per_block; } __syncthreads(); for (int i=0; i<blockElt; i++) //each thread loads one sub-row of matrix a[]. { in_sub[threadIdx.x][i] = a[(blockyInd + threadIdx.x) * matrixWidth + blockxInd + i]; } __syncthreads(); // summing variable T cSum = (T) 0.0; int threadyInd = blockyInd + threadIdx.x; // // make sure we are inside the matrix verticallly if (threadyInd < matrixHeight) { // each thread computes one element of a block segment of the output vector for (int i=0; i<blockElt; i++) { // cSum += in_sub[threadIdx.x][i] * b[i]; } // atomic add these variables to the corresponding c index atomicAdd(out + threadyInd, cSum); } // int tid = threadIdx.x + // blockIdx.x * blockDim.x + // blockIdx.y * gridDim.x * blockDim.x; int tid = threadIdx.x + blockIdx.x * BLOCK_WIDTH + blockIdx.y * BLOCK_HEIGHT * matrixWidth ; myid[tid] = get_gpcid(); __syncthreads(); } template <class _Tp> xt::xarray<_Tp> matvec_banking (xt::xarray<_Tp> matrix_A, xt::xarray<_Tp> vector_B) { unsigned int n_rows = matrix_A.shape()[0]; unsigned int n_cols = matrix_A.shape()[1]; unsigned int size_A = n_rows * n_cols; unsigned int size_B = n_cols; assert (vector_B.shape()[0] == size_B && "matrix A and vector B shape mismatch."); assert (vector_B.shape()[1] == 1 && "vector B no. of columns != 1"); unsigned int size_C = n_rows; // Block Grid for MatMulKernel<<< >>> int blockCols = (int) ceil(n_cols / (double) BLOCK_WIDTH); int blockRows = (int) ceil(n_rows / (double) BLOCK_HEIGHT); dim3 dimBlock(BLOCK_HEIGHT); // BLOCK_HEIGHT directly corresponds to no. of threads per block i.e., one thread per row of the block. dim3 dimGrid(blockCols, blockRows); std::cout << "Gridblock size (Row x Col): (" << blockRows << ","<< blockCols << ")\t"; std::cout << "BLOCK size (Hgt x Wdth): (" << BLOCK_HEIGHT << ","<< BLOCK_WIDTH << ")\t"; unsigned int no_of_threads = blockCols*blockRows*BLOCK_HEIGHT; //no. of blocks * threads in each block // host copies of A,B,C _Tp *A = new _Tp[size_A]; _Tp *B = new _Tp[size_B]; _Tp *C = new _Tp[size_C]; gpc_id *myid = new gpc_id[no_of_threads]; // Allocate Unified Memory – accessible from CPU or GPU cudaMallocManaged(&A, size_A*sizeof(_Tp)); cudaMallocManaged(&B, size_B*sizeof(_Tp)); cudaMallocManaged(&C, size_C*sizeof(_Tp)); cudaMallocManaged(&myid, no_of_threads*sizeof(gpc_id)); // Fill the matrix values from xtensor to C++ array for (int i = 0; i < size_A; i++) A[i] = matrix_A.flat(i); for (int i = 0; i < size_B; i++) B[i] = vector_B.flat(i); // shared Memory size int sharedMem = 3 * sizeof (int) + BLOCK_WIDTH * sizeof(_Tp) + BLOCK_HEIGHT * (BLOCK_WIDTH + 31) * sizeof(_Tp); // 31 is for padding s.t. (96+31) mod 32 = 1 // 3 * sizeof (int) -> to store blockElt, blockxInd, blockyInd; // initialize vector C to zero cudaMemset(C, 0, n_rows*sizeof(_Tp)); // time the matvel multiplication operation // https://developer.nvidia.com/blog/how-implement-performance-metrics-cuda-cc/ cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); // GET_ID<<<dimGrid, dimBlock, sharedMem>>>(myid); MatMulKernel<float><<<dimGrid, dimBlock, sharedMem>>>(C, B, A, n_rows, n_cols, myid); cudaDeviceSynchronize(); cudaEventRecord(stop); cudaEventSynchronize(stop); // for (int i = 0; i < no_of_threads; i++) // { // myid[i].t_idx = i; // myid[i].cta_idy = i*2; // } float milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); std::cout << "Execution Time: " << milliseconds << " ms" << std::endl; // Convert product vector to xtensor xt::xarray<double>::shape_type C_shape = {size_C, 1}; xt::xarray<_Tp> vec_C = xt::adapt(C, size_C, xt::no_ownership(), C_shape); // Log the output of myid // https://stackoverflow.com/questions/25918057/how-to-set-a-fixed-width-with-cout size_t headerWidths[5] = {std::string("T_IDX ").size(), std::string("WRP_ID ").size(), std::string("SM_ID ").size(), std::string("GRID_ID ").size(), std::string("CTA_IDX ").size() }; // Redirecting output to a file // https://stackoverflow.com/questions/10150468/how-to-redirect-cin-and-cout-to-files // https://stackoverflow.com/questions/29464578/append-std-output-of-a-function-to-a-file const std::string cuda_log_file = "../cuda_logfiles/cuda_log-w" + std::to_string(LAYER_WIDTH) + "x" + std::to_string(LAYER_WIDTH) + "-" + std::to_string(MODEL_SEED) + ".txt"; std::ofstream out(cuda_log_file, std::fstream::app); std::streambuf *coutbuf = std::cout.rdbuf(); //save old buf std::cout.rdbuf(out.rdbuf()); //redirect std::cout to out.txt! // std::cout << "T_IDX T_IDY T_IDZ WRP_ID SM_ID GRID_ID CTA_IDX CTA_IDY CTA_IDZ"<< std::endl; std::cout << "T_IDX WRP_ID SM_ID GRID_ID CTA_IDX CTA_IDY"<< std::endl; for (int i = 0; i < no_of_threads; i++){ std::cout << std::left << std::setw(headerWidths[0]) << myid[i].t_idx; // std::cout << std::left << std::setw(headerWidths[0]) << myid[i].t_idy; // std::cout << std::left << std::setw(headerWidths[0]) << myid[i].t_idz; std::cout << std::left << std::setw(headerWidths[1]) << myid[i].warp_id; std::cout << std::left << std::setw(headerWidths[2]) << myid[i].sm_id; std::cout << std::left << std::setw(headerWidths[3]) << myid[i].grid_id; std::cout << std::left << std::setw(headerWidths[4]) << myid[i].cta_idx; std::cout << std::left << std::setw(headerWidths[4]) << myid[i].cta_idy; // std::cout << std::left << std::setw(headerWidths[4]) << myid[i].cta_idz; std::cout << std::endl; } std::cout << "***************************" << std::endl; std::cout.rdbuf(coutbuf); //reset to standard output again cudaFree(A); cudaFree(B); cudaFree(C); cudaFree(myid); return vec_C; } int main() { // load weights from npy files const std::string dense_weights_folder = "../weights/mnist_dense-w" + std::to_string(LAYER_WIDTH) + "x" + std::to_string(LAYER_WIDTH) + "-" + std::to_string(MODEL_SEED); const std::string dense_weights_file = dense_weights_folder + "/mnist_dense-w" + std::to_string(LAYER_WIDTH) + "x" + std::to_string(LAYER_WIDTH) + "-" + std::to_string(MODEL_SEED) + "_dense_weights.npy"; // std::cout << "******************************" << std::endl; // std::cout << "Weights: " << dense_weights_file << std::endl; xt::xarray<float> dense_weights = xt::load_npy<float>(dense_weights_file); xt::xarray<float> tr_dense_weights = xt::transpose(dense_weights); // load input vector from npy file uint image_no = 69999; const std::string input_vector_file = "../data/vector_" + std::to_string(image_no) + ".npy"; // std::cout << "Input: " << input_vector_file << std::endl; xt::xarray<float> input_vector = xt::load_npy<float>(input_vector_file); // std::cout << "******************************" << std::endl; // std::cout << "Transposed Weight Matrix Shape: "<< xt::adapt(tr_dense_weights.shape()) << std::endl; // std::cout << "Input Vector Shape: "<< xt::adapt(input_vector.shape()) << std::endl; // std::cout << "******************************" << std::endl; // for (int i = 0; i < 10; ++i) // { // matvec_banking(tr_dense_weights, input_vector); // } // std::cout << "******************************" << std::endl; // Display Output auto matvecproduct = matvec_banking(tr_dense_weights, input_vector); // std::cout << "Matrix-Vector Product Shape: " << xt::adapt(matvecproduct.shape()) << std::endl; // std::cout << "Matrix-Vector Product" << std::endl; // std::cout << matvecproduct << std::endl; // std::cout << "******************************" << std::endl; return 0; }
809bfc7c51bc60bf5bd26f3d1f9e698a33d60722.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <float.h> #include <ATen/ATen.h> #include <THH/THHAtomics.cuh> #include "limits.cuh" using namespace at; // fix for pytorch<=0.4.1 #define CUDA_1D_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \ i += blockDim.x * gridDim.x) #define THREADS_PER_BLOCK 1024 inline int GET_BLOCKS(const int N) { int optimal_block_num = (N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK; int max_block_num = 65000; return min(optimal_block_num, max_block_num); } //type-safe sign template <typename scalar_t> __device__ scalar_t sgn(scalar_t val) { return (scalar_t(0) < val) - (val < scalar_t(0)); } // Overflow and Underflow clamp template <typename scalar_t> __device__ scalar_t clamp(const scalar_t n, const scalar_t lower, const scalar_t upper) { const scalar_t tmp = abs(n); const scalar_t result = max(lower, min(tmp, upper)); return result * sgn(n); } template <typename scalar_t> __global__ void SoftPool1dForward(const int nthreads, const scalar_t *bottom_input, const int batches, const int channels, const int dim, const int kernel_d, const int stride_d, scalar_t *output_data){ int pooled_dim = dim/stride_d; CUDA_1D_KERNEL_LOOP(index, nthreads) { int pd = index % pooled_dim; int c = (index / pooled_dim) % channels; int n = index / pooled_dim / channels; const int offset = (n * channels + c) * dim; const scalar_t *offset_bottom_input = bottom_input + offset; scalar_t mask_sum = 0.; output_data[index] = 0.; const scalar_t upper = n_limits<scalar_t>::max(); const scalar_t lower = n_limits<scalar_t>::min(); const scalar_t zero = 0.; for(int id=0; id<kernel_d; id++){ const int d_offset = pd*stride_d + id - kernel_d/2; if(d_offset >= dim || d_offset < 0)continue; const int offset = d_offset; // (Over/Under)flow check (A.) 0 <= e^{inp[offset]} <= FLT_MAX scalar_t mask = exp(offset_bottom_input[offset]); mask = clamp(mask, zero, upper); mask_sum += mask; } // Overflow check (B.) FLT_MIN <= sum{e^{inp[offset]}} <= FLT_MAX mask_sum = clamp(mask_sum, lower, upper); for(int id=0; id<kernel_d; id++){ const int d_offset = pd*stride_d + id - kernel_d/2; if(d_offset >= dim || d_offset < 0)continue; const int offset = d_offset; // (Over/Under)flow check (C.) 0 <= e^{inp[offset]} <= FLT_MAX scalar_t mask = exp(offset_bottom_input[offset]); mask = clamp(mask, zero, upper); // Underflow check (D.) +FLT_MIN <= e^{inp[offset]}/sum{e^{inp[offset]}} <= 1 mask /= mask_sum; mask = clamp(mask, zero, upper); // Underflow check (E.) 0 <= (e^{inp[offset]}/sum{e^{inp[offset]}}) * inp[offset] <= FLT_MAX scalar_t weighted_inp = offset_bottom_input[offset] * mask; weighted_inp = clamp(weighted_inp, zero, upper); // Overflow check (F.) 0 <= sum[(e^{inp[offset]}/sum{e^{inp[offset]}}) * inp[offset]] <= FLT_MAX output_data[index] += weighted_inp; output_data[index] = clamp(output_data[index], zero, upper); } } } template <typename scalar_t> __global__ void SoftPool2dForward(const int nthreads, const scalar_t *bottom_input, const int batches, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, scalar_t *output_data){ int pooled_height = height/stride_h; int pooled_width = width/stride_w; CUDA_1D_KERNEL_LOOP(index, nthreads) { int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; const int offset = (n * channels + c) * height * width; const scalar_t *offset_bottom_input = bottom_input + offset; scalar_t mask_sum = 0.; output_data[index] = 0.; const scalar_t upper = n_limits<scalar_t>::max(); const scalar_t lower = n_limits<scalar_t>::min(); const scalar_t zero = 0.; for(int iy=0; iy<kernel_h; iy++){ const int y_offset = ph*stride_h + iy - kernel_h/2; if(y_offset >= height || y_offset < 0)continue; for(int ix=0; ix<kernel_w; ix++){ const int x_offset = pw*stride_w + ix - kernel_w/2; if(x_offset >= width || x_offset < 0)continue; const int offset = y_offset*width + x_offset; // (Over/Under)flow check (A.) 0 <= e^{inp[offset]} <= FLT_MAX scalar_t mask = exp(offset_bottom_input[offset]); mask = clamp(mask, zero, upper); mask_sum += mask; } } // Overflow check (B.) FLT_MIN <= sum{e^{inp[offset]}} <= FLT_MAX mask_sum = clamp(mask_sum, lower, upper); for(int iy=0; iy<kernel_h; iy++){ const int y_offset = ph*stride_h + iy - kernel_h/2; if(y_offset >= height || y_offset < 0)continue; for(int ix=0; ix<kernel_w; ix++){ const int x_offset = pw*stride_w + ix - kernel_w/2; if(x_offset >= width || x_offset < 0)continue; const int offset = y_offset*width + x_offset; // (Over/Under)flow check (C.) 0 <= e^{inp[offset]} <= FLT_MAX scalar_t mask = exp(offset_bottom_input[offset]); mask = clamp(mask, zero, upper); // Underflow check (D.) 0 <= e^{inp[offset]}/sum{e^{inp[offset]}} <= 1 mask /= mask_sum; mask = clamp(mask, zero, upper); // Underflow check (E.) 0 <= (e^{inp[offset]}/sum{e^{inp[offset]}}) * inp[offset] <= FLT_MAX scalar_t weighted_inp = offset_bottom_input[offset] * mask; weighted_inp = clamp(weighted_inp, zero, upper); // Overflow check (F.) 0 <= sum[(e^{inp[offset]}/sum{e^{inp[offset]}}) * inp[offset]] <= FLT_MAX output_data[index] += weighted_inp; output_data[index] = clamp(output_data[index], zero, upper); } } } } template <typename scalar_t> __global__ void SoftPool3dForward(const int nthreads, const scalar_t *bottom_input, const int batches, const int channels, const int depth, const int height, const int width, const int kernel_d, const int kernel_h, const int kernel_w, const int stride_d, const int stride_h, const int stride_w, scalar_t *output_data){ int pooled_depth = depth/stride_d; int pooled_height = height/stride_h; int pooled_width = width/stride_w; CUDA_1D_KERNEL_LOOP(index, nthreads) { int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int pd = (index / pooled_width / pooled_height) % pooled_depth; int c = (index / pooled_width / pooled_height / pooled_depth) % channels; int n = index / pooled_width / pooled_height / pooled_depth / channels; const int offset = (n * channels + c) * depth * height * width; const scalar_t *offset_bottom_input = bottom_input + offset; scalar_t mask_sum = 0.; output_data[index] = 0.; const scalar_t upper = n_limits<scalar_t>::max(); const scalar_t lower = n_limits<scalar_t>::min(); const scalar_t zero = 0.; for(int id=0; id<kernel_d; id++){ const int d_offset = pd*stride_d + id - kernel_d/2; if(d_offset >= depth || d_offset < 0)continue; for(int iy=0; iy<kernel_h; iy++){ const int y_offset = ph*stride_h + iy - kernel_h/2; if(y_offset >= height || y_offset < 0)continue; for(int ix=0; ix<kernel_w; ix++){ const int x_offset = pw*stride_w + ix - kernel_w/2; if(x_offset >= width || x_offset < 0)continue; const int offset = d_offset*height + y_offset*width + x_offset; // (Over/Under)flow check (A.) 0 <= e^{inp[offset]} <= FLT_MAX scalar_t mask = exp(offset_bottom_input[offset]); mask = clamp(mask, zero, upper); mask_sum += mask; } } } // Overflow check (B.) FLT_MIN <= sum{e^{inp[offset]}} <= FLT_MAX mask_sum = clamp(mask_sum, lower, upper); for(int id=0; id<kernel_d; id++){ const int d_offset = pd*stride_d + id - kernel_d/2; if(d_offset >= depth || d_offset < 0)continue; for(int iy=0; iy<kernel_h; iy++){ const int y_offset = ph*stride_h + iy - kernel_h/2; if(y_offset >= height || y_offset < 0)continue; for(int ix=0; ix<kernel_w; ix++){ const int x_offset = pw*stride_w + ix - kernel_w/2; if(x_offset >= width || x_offset < 0)continue; const int offset = d_offset*height + y_offset*width + x_offset; // (Over/Under)flow check (C.) 0 <= e^{inp[offset]} <= FLT_MAX scalar_t mask = exp(offset_bottom_input[offset]); mask = clamp(mask, zero, upper); // Underflow check (D.) +FLT_MIN <= e^{inp[offset]}/sum{e^{inp[offset]}} <= 1 mask /= mask_sum; mask = clamp(mask, zero, upper); // Underflow check (E.) 0 <= (e^{inp[offset]}/sum{e^{inp[offset]}}) * inp[offset] <= FLT_MAX scalar_t weighted_inp = offset_bottom_input[offset] * mask; weighted_inp = clamp(weighted_inp, zero, upper); // Overflow check (F.) 0 <= sum[(e^{inp[offset]}/sum{e^{inp[offset]}}) * inp[offset]] <= FLT_MAX output_data[index] += weighted_inp; output_data[index] = clamp(output_data[index], zero, upper); } } } } } int SoftPool1dForwardLauncher(const at::Tensor input, const int batches, const int channels, const int dim, const int kernel_d, const int stride_d, at::Tensor output){ const int output_size = batches * dim/stride_d * channels; AT_DISPATCH_FLOATING_TYPES_AND_HALF( input.scalar_type(), "SoftPool1dLauncherForward", ([&] { const scalar_t *bottom_input = input.data_ptr<scalar_t>(); scalar_t *output_data = output.data_ptr<scalar_t>(); hipLaunchKernelGGL(( SoftPool1dForward<scalar_t>) , dim3(GET_BLOCKS(output_size)), dim3(THREADS_PER_BLOCK), 0, 0, output_size, bottom_input, batches, channels, dim, kernel_d, stride_d, output_data); }) ); hipError_t err = hipGetLastError(); if (hipSuccess != err) { fprintf(stderr, "cudaCheckError() failed : %s\n", hipGetErrorString(err)); exit(-1); } return 1; } int SoftPool2dForwardLauncher(const at::Tensor input, const int batches, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, at::Tensor output){ const int output_size = batches * height/stride_h * width/stride_w * channels; AT_DISPATCH_FLOATING_TYPES_AND_HALF( input.scalar_type(), "SoftPool2dLauncherForward", ([&] { const scalar_t *bottom_input = input.data_ptr<scalar_t>(); scalar_t *output_data = output.data_ptr<scalar_t>(); hipLaunchKernelGGL(( SoftPool2dForward<scalar_t>) , dim3(GET_BLOCKS(output_size)), dim3(THREADS_PER_BLOCK), 0, 0, output_size, bottom_input, batches, channels, height, width, kernel_h, kernel_w, stride_h, stride_w, output_data); }) ); hipError_t err = hipGetLastError(); if (hipSuccess != err) { fprintf(stderr, "cudaCheckError() failed : %s\n", hipGetErrorString(err)); exit(-1); } return 1; } int SoftPool3dForwardLauncher(const at::Tensor input, const int batches, const int channels, const int depth, const int height, const int width, const int kernel_d, const int kernel_h, const int kernel_w, const int stride_d, const int stride_h, const int stride_w, at::Tensor output){ const int output_size = batches * depth/stride_d * height/stride_h * width/stride_w * channels; AT_DISPATCH_FLOATING_TYPES_AND_HALF( input.scalar_type(), "SoftPool3dLauncherForward", ([&] { const scalar_t *bottom_input = input.data_ptr<scalar_t>(); scalar_t *output_data = output.data_ptr<scalar_t>(); hipLaunchKernelGGL(( SoftPool3dForward<scalar_t>) , dim3(GET_BLOCKS(output_size)), dim3(THREADS_PER_BLOCK), 0, 0, output_size, bottom_input, batches, channels, depth, height, width, kernel_d, kernel_h, kernel_w, stride_d, stride_h, stride_w, output_data); }) ); hipError_t err = hipGetLastError(); if (hipSuccess != err) { fprintf(stderr, "cudaCheckError() failed : %s\n", hipGetErrorString(err)); exit(-1); } return 1; } template <typename scalar_t> __global__ void SoftPool1dBackward(const int nthreads, const scalar_t *diff_output, const scalar_t *data_input, const int batches, const int channels, const int dim, const int kernel_d, const int stride_d, scalar_t *diff_input){ int pooled_dim = dim/stride_d; CUDA_1D_KERNEL_LOOP(index, nthreads) { int pd = index % pooled_dim; int c = (index / pooled_dim) % channels; int n = index / pooled_dim / channels; const int offset0 = (n * channels + c) * dim; const scalar_t *offset_data_input = data_input + offset0; const scalar_t diff_output_index = diff_output[index]; scalar_t *offset_diff_input = diff_input + offset0; const int base_d = pd*stride_d - kernel_d/2; scalar_t mask_sum = 0.; const scalar_t zero = 0.; const scalar_t upper = n_limits<scalar_t>::max(); const scalar_t lower = n_limits<scalar_t>::min(); for(int id=0; id<kernel_d; id++){ const int d_offset = base_d + id; if(d_offset >= dim || d_offset < 0)continue; const int offset = d_offset; // (Over/Under)flow check (A.) 0 <= e^{inp[offset]} <= FLT_MAX scalar_t mask = exp(offset_data_input[offset]); mask = clamp(mask, zero, upper); mask_sum += mask; } // Overflow check (B.) FLT_MIN <= sum{e^{inp[offset]}} <= FLT_MAX mask_sum = clamp(mask_sum, lower, upper); for(int id=0; id<kernel_d; id++){ const int d_offset = base_d + id; if(d_offset >= dim || d_offset < 0)continue; const int offset = d_offset; // (Over/Under)flow check (C.) 0 <= e^{inp[offset]} <= FLT_MAX scalar_t mask = exp(offset_data_input[offset]); mask = clamp(mask, zero, upper); // Underflow check (D.) 0 <= e^{inp[offset]}/sum{e^{inp[offset]}} <= 1 mask /= mask_sum; mask = clamp(mask, zero, upper); // Underflow check (E.) 0 <= (e^{inp[offset]}/sum{e^{inp[offset]}}) * grad <= FLT_MAX scalar_t weighted_grad = diff_output_index * mask; weighted_grad = clamp(weighted_grad, zero, upper); atomicAdd(offset_diff_input+offset, weighted_grad); } } } template <typename scalar_t> __global__ void SoftPool2dBackward(const int nthreads, const scalar_t *diff_output, const scalar_t *data_input, const int batches, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, scalar_t *diff_input){ int pooled_height = height/stride_h; int pooled_width = width/stride_w; CUDA_1D_KERNEL_LOOP(index, nthreads) { int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; const int offset0 = (n * channels + c) * height * width; const scalar_t *offset_data_input = data_input + offset0; const scalar_t diff_output_index = diff_output[index]; scalar_t *offset_diff_input = diff_input + offset0; const int base_y = ph*stride_h - kernel_h/2; const int base_x = pw*stride_w - kernel_w/2; scalar_t mask_sum = 0.; scalar_t zero = 0.; const scalar_t upper = n_limits<scalar_t>::max(); const scalar_t lower = n_limits<scalar_t>::min(); for(int iy=0; iy<kernel_h; iy++){ const int y_offset = base_y + iy; if(y_offset >= height || y_offset < 0)continue; for(int ix=0; ix<kernel_w; ix++){ const int x_offset = base_x + ix; if(x_offset >= width || x_offset < 0)continue; const int offset = y_offset*width + x_offset; // (Over/Under)flow check (A.) 0 <= e^{inp[offset]} <= FLT_MAX scalar_t mask = exp(offset_data_input[offset]); mask = clamp(mask, zero, upper); mask_sum += mask; } } // Overflow check (B.) FLT_MIN <= sum{e^{inp[offset]}} <= FLT_MAX mask_sum = clamp(mask_sum, lower, upper); for(int iy=0; iy<kernel_h; iy++){ const int y_offset = base_y + iy; if(y_offset >= height || y_offset < 0)continue; for(int ix=0; ix<kernel_w; ix++){ const int x_offset = base_x + ix; if(x_offset >= width || x_offset < 0)continue; const int offset = y_offset*width + x_offset; // (Over/Under)flow check (C.) 0 <= e^{inp[offset]} <= FLT_MAX scalar_t mask = exp(offset_data_input[offset]); mask = clamp(mask, zero, upper); // Underflow check (D.) 0 <= e^{inp[offset]}/sum{e^{inp[offset]}} <= 1 mask /= mask_sum; mask = clamp(mask, zero, upper); // Underflow check (E.) 0 <= (e^{inp[offset]}/sum{e^{inp[offset]}}) * grad <= FLT_MAX scalar_t weighted_grad = diff_output_index * mask; weighted_grad = clamp(weighted_grad, zero, upper); atomicAdd(offset_diff_input+offset, weighted_grad); } } } } template <typename scalar_t> __global__ void SoftPool3dBackward(const int nthreads, const scalar_t *diff_output, const scalar_t *data_input, const int batches, const int channels, const int depth, const int height, const int width, const int kernel_d, const int kernel_h, const int kernel_w , const int stride_d, const int stride_h, const int stride_w, scalar_t *diff_input){ int pooled_depth = depth/stride_d; int pooled_height = width/stride_h; int pooled_width = width/stride_w; CUDA_1D_KERNEL_LOOP(index, nthreads) { int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int pd = (index / pooled_width / pooled_height) % pooled_depth; int c = (index / pooled_width / pooled_height / pooled_depth) % channels; int n = index / pooled_width / pooled_height / pooled_depth / channels; const int offset0 = (n * channels + c) * depth * height * width; const scalar_t *offset_data_input = data_input + offset0; const scalar_t diff_output_index = diff_output[index]; scalar_t *offset_diff_input = diff_input + offset0; const int base_d = pd*stride_d - kernel_d/2; const int base_y = ph*stride_h - kernel_h/2; const int base_x = pw*stride_w - kernel_w/2; scalar_t mask_sum = 0.; scalar_t zero = 0.; const scalar_t upper = n_limits<scalar_t>::max(); const scalar_t lower = n_limits<scalar_t>::min(); for(int id=0; id<kernel_d; id++){ const int d_offset = base_d + id; if(d_offset >= depth || d_offset < 0)continue; for(int iy=0; iy<kernel_h; iy++){ const int y_offset = base_y + iy; if(y_offset >= height || y_offset < 0)continue; for(int ix=0; ix<kernel_w; ix++){ const int x_offset = base_x + ix; if(x_offset >= width || x_offset < 0)continue; const int offset = d_offset*height + y_offset*width + x_offset; // (Over/Under)flow check (A.) 0 <= e^{inp[offset]} <= FLT_MAX scalar_t mask = exp(offset_data_input[offset]); mask = clamp(mask, zero, upper); mask_sum += mask; } } } // Overflow check (B.) FLT_MIN <= sum{e^{inp[offset]}} <= FLT_MAX mask_sum = clamp(mask_sum, lower, upper); for(int id=0; id<kernel_d; id++){ const int d_offset = base_d + id; if(d_offset >= depth || d_offset < 0)continue; for(int iy=0; iy<kernel_h; iy++){ const int y_offset = base_y + iy; if(y_offset >= height || y_offset < 0)continue; for(int ix=0; ix<kernel_w; ix++){ const int x_offset = base_x + ix; if(x_offset >= width || x_offset < 0)continue; const int offset = d_offset*height + y_offset*width + x_offset; // (Over/Under)flow check (C.) 0 <= e^{inp[offset]} <= FLT_MAX scalar_t mask = exp(offset_data_input[offset]); mask = clamp(mask, zero, upper); // Underflow check (D.) 0 <= e^{inp[offset]}/sum{e^{inp[offset]}} <= 1 mask /= mask_sum; mask = clamp(mask, zero, upper); // Underflow check (E.) 0 <= (e^{inp[offset]}/sum{e^{inp[offset]}}) * grad <= FLT_MAX scalar_t weighted_grad = diff_output_index * mask; weighted_grad = clamp(weighted_grad, zero, upper); atomicAdd(offset_diff_input+offset, weighted_grad); } } } } } int SoftPool1dBackwardLauncher(const at::Tensor output_grad, const at::Tensor input, const int batches, const int channels, const int dim, const int kernel_d, const int stride_d, at::Tensor input_grad){ const int output_size = batches * dim/stride_d * channels; AT_DISPATCH_FLOATING_TYPES_AND_HALF( input.scalar_type(), "SoftPool1dLauncherBackward", ([&] { scalar_t *diff_input = input_grad.data_ptr<scalar_t>(); const scalar_t *diff_output = output_grad.data_ptr<scalar_t>(); const scalar_t *data_input = input.data_ptr<scalar_t>(); hipLaunchKernelGGL(( SoftPool1dBackward<scalar_t>) , dim3(GET_BLOCKS(output_size)), dim3(THREADS_PER_BLOCK), 0, 0, output_size, diff_output, data_input, batches, channels, dim, kernel_d, stride_d, diff_input); } ) ); hipError_t err = hipGetLastError(); if (hipSuccess != err) { fprintf(stderr, "cudaCheckError() failed : %s\n", hipGetErrorString(err)); exit(-1); } return 1; } int SoftPool2dBackwardLauncher(const at::Tensor output_grad, const at::Tensor input, const int batches, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, at::Tensor input_grad){ const int output_size = batches * height/stride_h * width/stride_w * channels; AT_DISPATCH_FLOATING_TYPES_AND_HALF( input.scalar_type(), "SoftPool2dLauncherBackward", ([&] { scalar_t *diff_input = input_grad.data_ptr<scalar_t>(); const scalar_t *diff_output = output_grad.data_ptr<scalar_t>(); const scalar_t *data_input = input.data_ptr<scalar_t>(); hipLaunchKernelGGL(( SoftPool2dBackward<scalar_t>) , dim3(GET_BLOCKS(output_size)), dim3(THREADS_PER_BLOCK), 0, 0, output_size, diff_output, data_input, batches, channels, height, width, kernel_h, kernel_w, stride_h, stride_w, diff_input); } ) ); hipError_t err = hipGetLastError(); if (hipSuccess != err) { fprintf(stderr, "cudaCheckError() failed : %s\n", hipGetErrorString(err)); exit(-1); } return 1; } int SoftPool3dBackwardLauncher(const at::Tensor output_grad, const at::Tensor input, const int batches, const int channels, const int depth, const int height, const int width, const int kernel_d, const int kernel_h, const int kernel_w, const int stride_d, const int stride_h, const int stride_w, at::Tensor input_grad){ const int output_size = batches * depth/stride_d * height/stride_h * width/stride_w * channels; AT_DISPATCH_FLOATING_TYPES_AND_HALF( input.scalar_type(), "SoftPool3dLauncherBackward", ([&] { scalar_t *diff_input = input_grad.data_ptr<scalar_t>(); const scalar_t *diff_output = output_grad.data_ptr<scalar_t>(); const scalar_t *data_input = input.data_ptr<scalar_t>(); hipLaunchKernelGGL(( SoftPool3dBackward<scalar_t>) , dim3(GET_BLOCKS(output_size)), dim3(THREADS_PER_BLOCK), 0, 0, output_size, diff_output, data_input, batches, channels, depth, height, width, kernel_d, kernel_h, kernel_w, stride_d, stride_h, stride_w, diff_input); } ) ); hipError_t err = hipGetLastError(); if (hipSuccess != err) { fprintf(stderr, "cudaCheckError() failed : %s\n", hipGetErrorString(err)); exit(-1); } return 1; }
809bfc7c51bc60bf5bd26f3d1f9e698a33d60722.cu
#include <float.h> #include <ATen/ATen.h> #include <THC/THCAtomics.cuh> #include "limits.cuh" using namespace at; // fix for pytorch<=0.4.1 #define CUDA_1D_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \ i += blockDim.x * gridDim.x) #define THREADS_PER_BLOCK 1024 inline int GET_BLOCKS(const int N) { int optimal_block_num = (N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK; int max_block_num = 65000; return min(optimal_block_num, max_block_num); } //type-safe sign template <typename scalar_t> __device__ scalar_t sgn(scalar_t val) { return (scalar_t(0) < val) - (val < scalar_t(0)); } // Overflow and Underflow clamp template <typename scalar_t> __device__ scalar_t clamp(const scalar_t n, const scalar_t lower, const scalar_t upper) { const scalar_t tmp = abs(n); const scalar_t result = max(lower, min(tmp, upper)); return result * sgn(n); } template <typename scalar_t> __global__ void SoftPool1dForward(const int nthreads, const scalar_t *bottom_input, const int batches, const int channels, const int dim, const int kernel_d, const int stride_d, scalar_t *output_data){ int pooled_dim = dim/stride_d; CUDA_1D_KERNEL_LOOP(index, nthreads) { int pd = index % pooled_dim; int c = (index / pooled_dim) % channels; int n = index / pooled_dim / channels; const int offset = (n * channels + c) * dim; const scalar_t *offset_bottom_input = bottom_input + offset; scalar_t mask_sum = 0.; output_data[index] = 0.; const scalar_t upper = n_limits<scalar_t>::max(); const scalar_t lower = n_limits<scalar_t>::min(); const scalar_t zero = 0.; for(int id=0; id<kernel_d; id++){ const int d_offset = pd*stride_d + id - kernel_d/2; if(d_offset >= dim || d_offset < 0)continue; const int offset = d_offset; // (Over/Under)flow check (A.) 0 <= e^{inp[offset]} <= FLT_MAX scalar_t mask = exp(offset_bottom_input[offset]); mask = clamp(mask, zero, upper); mask_sum += mask; } // Overflow check (B.) FLT_MIN <= sum{e^{inp[offset]}} <= FLT_MAX mask_sum = clamp(mask_sum, lower, upper); for(int id=0; id<kernel_d; id++){ const int d_offset = pd*stride_d + id - kernel_d/2; if(d_offset >= dim || d_offset < 0)continue; const int offset = d_offset; // (Over/Under)flow check (C.) 0 <= e^{inp[offset]} <= FLT_MAX scalar_t mask = exp(offset_bottom_input[offset]); mask = clamp(mask, zero, upper); // Underflow check (D.) +FLT_MIN <= e^{inp[offset]}/sum{e^{inp[offset]}} <= 1 mask /= mask_sum; mask = clamp(mask, zero, upper); // Underflow check (E.) 0 <= (e^{inp[offset]}/sum{e^{inp[offset]}}) * inp[offset] <= FLT_MAX scalar_t weighted_inp = offset_bottom_input[offset] * mask; weighted_inp = clamp(weighted_inp, zero, upper); // Overflow check (F.) 0 <= sum[(e^{inp[offset]}/sum{e^{inp[offset]}}) * inp[offset]] <= FLT_MAX output_data[index] += weighted_inp; output_data[index] = clamp(output_data[index], zero, upper); } } } template <typename scalar_t> __global__ void SoftPool2dForward(const int nthreads, const scalar_t *bottom_input, const int batches, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, scalar_t *output_data){ int pooled_height = height/stride_h; int pooled_width = width/stride_w; CUDA_1D_KERNEL_LOOP(index, nthreads) { int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; const int offset = (n * channels + c) * height * width; const scalar_t *offset_bottom_input = bottom_input + offset; scalar_t mask_sum = 0.; output_data[index] = 0.; const scalar_t upper = n_limits<scalar_t>::max(); const scalar_t lower = n_limits<scalar_t>::min(); const scalar_t zero = 0.; for(int iy=0; iy<kernel_h; iy++){ const int y_offset = ph*stride_h + iy - kernel_h/2; if(y_offset >= height || y_offset < 0)continue; for(int ix=0; ix<kernel_w; ix++){ const int x_offset = pw*stride_w + ix - kernel_w/2; if(x_offset >= width || x_offset < 0)continue; const int offset = y_offset*width + x_offset; // (Over/Under)flow check (A.) 0 <= e^{inp[offset]} <= FLT_MAX scalar_t mask = exp(offset_bottom_input[offset]); mask = clamp(mask, zero, upper); mask_sum += mask; } } // Overflow check (B.) FLT_MIN <= sum{e^{inp[offset]}} <= FLT_MAX mask_sum = clamp(mask_sum, lower, upper); for(int iy=0; iy<kernel_h; iy++){ const int y_offset = ph*stride_h + iy - kernel_h/2; if(y_offset >= height || y_offset < 0)continue; for(int ix=0; ix<kernel_w; ix++){ const int x_offset = pw*stride_w + ix - kernel_w/2; if(x_offset >= width || x_offset < 0)continue; const int offset = y_offset*width + x_offset; // (Over/Under)flow check (C.) 0 <= e^{inp[offset]} <= FLT_MAX scalar_t mask = exp(offset_bottom_input[offset]); mask = clamp(mask, zero, upper); // Underflow check (D.) 0 <= e^{inp[offset]}/sum{e^{inp[offset]}} <= 1 mask /= mask_sum; mask = clamp(mask, zero, upper); // Underflow check (E.) 0 <= (e^{inp[offset]}/sum{e^{inp[offset]}}) * inp[offset] <= FLT_MAX scalar_t weighted_inp = offset_bottom_input[offset] * mask; weighted_inp = clamp(weighted_inp, zero, upper); // Overflow check (F.) 0 <= sum[(e^{inp[offset]}/sum{e^{inp[offset]}}) * inp[offset]] <= FLT_MAX output_data[index] += weighted_inp; output_data[index] = clamp(output_data[index], zero, upper); } } } } template <typename scalar_t> __global__ void SoftPool3dForward(const int nthreads, const scalar_t *bottom_input, const int batches, const int channels, const int depth, const int height, const int width, const int kernel_d, const int kernel_h, const int kernel_w, const int stride_d, const int stride_h, const int stride_w, scalar_t *output_data){ int pooled_depth = depth/stride_d; int pooled_height = height/stride_h; int pooled_width = width/stride_w; CUDA_1D_KERNEL_LOOP(index, nthreads) { int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int pd = (index / pooled_width / pooled_height) % pooled_depth; int c = (index / pooled_width / pooled_height / pooled_depth) % channels; int n = index / pooled_width / pooled_height / pooled_depth / channels; const int offset = (n * channels + c) * depth * height * width; const scalar_t *offset_bottom_input = bottom_input + offset; scalar_t mask_sum = 0.; output_data[index] = 0.; const scalar_t upper = n_limits<scalar_t>::max(); const scalar_t lower = n_limits<scalar_t>::min(); const scalar_t zero = 0.; for(int id=0; id<kernel_d; id++){ const int d_offset = pd*stride_d + id - kernel_d/2; if(d_offset >= depth || d_offset < 0)continue; for(int iy=0; iy<kernel_h; iy++){ const int y_offset = ph*stride_h + iy - kernel_h/2; if(y_offset >= height || y_offset < 0)continue; for(int ix=0; ix<kernel_w; ix++){ const int x_offset = pw*stride_w + ix - kernel_w/2; if(x_offset >= width || x_offset < 0)continue; const int offset = d_offset*height + y_offset*width + x_offset; // (Over/Under)flow check (A.) 0 <= e^{inp[offset]} <= FLT_MAX scalar_t mask = exp(offset_bottom_input[offset]); mask = clamp(mask, zero, upper); mask_sum += mask; } } } // Overflow check (B.) FLT_MIN <= sum{e^{inp[offset]}} <= FLT_MAX mask_sum = clamp(mask_sum, lower, upper); for(int id=0; id<kernel_d; id++){ const int d_offset = pd*stride_d + id - kernel_d/2; if(d_offset >= depth || d_offset < 0)continue; for(int iy=0; iy<kernel_h; iy++){ const int y_offset = ph*stride_h + iy - kernel_h/2; if(y_offset >= height || y_offset < 0)continue; for(int ix=0; ix<kernel_w; ix++){ const int x_offset = pw*stride_w + ix - kernel_w/2; if(x_offset >= width || x_offset < 0)continue; const int offset = d_offset*height + y_offset*width + x_offset; // (Over/Under)flow check (C.) 0 <= e^{inp[offset]} <= FLT_MAX scalar_t mask = exp(offset_bottom_input[offset]); mask = clamp(mask, zero, upper); // Underflow check (D.) +FLT_MIN <= e^{inp[offset]}/sum{e^{inp[offset]}} <= 1 mask /= mask_sum; mask = clamp(mask, zero, upper); // Underflow check (E.) 0 <= (e^{inp[offset]}/sum{e^{inp[offset]}}) * inp[offset] <= FLT_MAX scalar_t weighted_inp = offset_bottom_input[offset] * mask; weighted_inp = clamp(weighted_inp, zero, upper); // Overflow check (F.) 0 <= sum[(e^{inp[offset]}/sum{e^{inp[offset]}}) * inp[offset]] <= FLT_MAX output_data[index] += weighted_inp; output_data[index] = clamp(output_data[index], zero, upper); } } } } } int SoftPool1dForwardLauncher(const at::Tensor input, const int batches, const int channels, const int dim, const int kernel_d, const int stride_d, at::Tensor output){ const int output_size = batches * dim/stride_d * channels; AT_DISPATCH_FLOATING_TYPES_AND_HALF( input.scalar_type(), "SoftPool1dLauncherForward", ([&] { const scalar_t *bottom_input = input.data_ptr<scalar_t>(); scalar_t *output_data = output.data_ptr<scalar_t>(); SoftPool1dForward<scalar_t> <<<GET_BLOCKS(output_size), THREADS_PER_BLOCK>>>( output_size, bottom_input, batches, channels, dim, kernel_d, stride_d, output_data); }) ); cudaError_t err = cudaGetLastError(); if (cudaSuccess != err) { fprintf(stderr, "cudaCheckError() failed : %s\n", cudaGetErrorString(err)); exit(-1); } return 1; } int SoftPool2dForwardLauncher(const at::Tensor input, const int batches, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, at::Tensor output){ const int output_size = batches * height/stride_h * width/stride_w * channels; AT_DISPATCH_FLOATING_TYPES_AND_HALF( input.scalar_type(), "SoftPool2dLauncherForward", ([&] { const scalar_t *bottom_input = input.data_ptr<scalar_t>(); scalar_t *output_data = output.data_ptr<scalar_t>(); SoftPool2dForward<scalar_t> <<<GET_BLOCKS(output_size), THREADS_PER_BLOCK>>>( output_size, bottom_input, batches, channels, height, width, kernel_h, kernel_w, stride_h, stride_w, output_data); }) ); cudaError_t err = cudaGetLastError(); if (cudaSuccess != err) { fprintf(stderr, "cudaCheckError() failed : %s\n", cudaGetErrorString(err)); exit(-1); } return 1; } int SoftPool3dForwardLauncher(const at::Tensor input, const int batches, const int channels, const int depth, const int height, const int width, const int kernel_d, const int kernel_h, const int kernel_w, const int stride_d, const int stride_h, const int stride_w, at::Tensor output){ const int output_size = batches * depth/stride_d * height/stride_h * width/stride_w * channels; AT_DISPATCH_FLOATING_TYPES_AND_HALF( input.scalar_type(), "SoftPool3dLauncherForward", ([&] { const scalar_t *bottom_input = input.data_ptr<scalar_t>(); scalar_t *output_data = output.data_ptr<scalar_t>(); SoftPool3dForward<scalar_t> <<<GET_BLOCKS(output_size), THREADS_PER_BLOCK>>>( output_size, bottom_input, batches, channels, depth, height, width, kernel_d, kernel_h, kernel_w, stride_d, stride_h, stride_w, output_data); }) ); cudaError_t err = cudaGetLastError(); if (cudaSuccess != err) { fprintf(stderr, "cudaCheckError() failed : %s\n", cudaGetErrorString(err)); exit(-1); } return 1; } template <typename scalar_t> __global__ void SoftPool1dBackward(const int nthreads, const scalar_t *diff_output, const scalar_t *data_input, const int batches, const int channels, const int dim, const int kernel_d, const int stride_d, scalar_t *diff_input){ int pooled_dim = dim/stride_d; CUDA_1D_KERNEL_LOOP(index, nthreads) { int pd = index % pooled_dim; int c = (index / pooled_dim) % channels; int n = index / pooled_dim / channels; const int offset0 = (n * channels + c) * dim; const scalar_t *offset_data_input = data_input + offset0; const scalar_t diff_output_index = diff_output[index]; scalar_t *offset_diff_input = diff_input + offset0; const int base_d = pd*stride_d - kernel_d/2; scalar_t mask_sum = 0.; const scalar_t zero = 0.; const scalar_t upper = n_limits<scalar_t>::max(); const scalar_t lower = n_limits<scalar_t>::min(); for(int id=0; id<kernel_d; id++){ const int d_offset = base_d + id; if(d_offset >= dim || d_offset < 0)continue; const int offset = d_offset; // (Over/Under)flow check (A.) 0 <= e^{inp[offset]} <= FLT_MAX scalar_t mask = exp(offset_data_input[offset]); mask = clamp(mask, zero, upper); mask_sum += mask; } // Overflow check (B.) FLT_MIN <= sum{e^{inp[offset]}} <= FLT_MAX mask_sum = clamp(mask_sum, lower, upper); for(int id=0; id<kernel_d; id++){ const int d_offset = base_d + id; if(d_offset >= dim || d_offset < 0)continue; const int offset = d_offset; // (Over/Under)flow check (C.) 0 <= e^{inp[offset]} <= FLT_MAX scalar_t mask = exp(offset_data_input[offset]); mask = clamp(mask, zero, upper); // Underflow check (D.) 0 <= e^{inp[offset]}/sum{e^{inp[offset]}} <= 1 mask /= mask_sum; mask = clamp(mask, zero, upper); // Underflow check (E.) 0 <= (e^{inp[offset]}/sum{e^{inp[offset]}}) * grad <= FLT_MAX scalar_t weighted_grad = diff_output_index * mask; weighted_grad = clamp(weighted_grad, zero, upper); atomicAdd(offset_diff_input+offset, weighted_grad); } } } template <typename scalar_t> __global__ void SoftPool2dBackward(const int nthreads, const scalar_t *diff_output, const scalar_t *data_input, const int batches, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, scalar_t *diff_input){ int pooled_height = height/stride_h; int pooled_width = width/stride_w; CUDA_1D_KERNEL_LOOP(index, nthreads) { int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; const int offset0 = (n * channels + c) * height * width; const scalar_t *offset_data_input = data_input + offset0; const scalar_t diff_output_index = diff_output[index]; scalar_t *offset_diff_input = diff_input + offset0; const int base_y = ph*stride_h - kernel_h/2; const int base_x = pw*stride_w - kernel_w/2; scalar_t mask_sum = 0.; scalar_t zero = 0.; const scalar_t upper = n_limits<scalar_t>::max(); const scalar_t lower = n_limits<scalar_t>::min(); for(int iy=0; iy<kernel_h; iy++){ const int y_offset = base_y + iy; if(y_offset >= height || y_offset < 0)continue; for(int ix=0; ix<kernel_w; ix++){ const int x_offset = base_x + ix; if(x_offset >= width || x_offset < 0)continue; const int offset = y_offset*width + x_offset; // (Over/Under)flow check (A.) 0 <= e^{inp[offset]} <= FLT_MAX scalar_t mask = exp(offset_data_input[offset]); mask = clamp(mask, zero, upper); mask_sum += mask; } } // Overflow check (B.) FLT_MIN <= sum{e^{inp[offset]}} <= FLT_MAX mask_sum = clamp(mask_sum, lower, upper); for(int iy=0; iy<kernel_h; iy++){ const int y_offset = base_y + iy; if(y_offset >= height || y_offset < 0)continue; for(int ix=0; ix<kernel_w; ix++){ const int x_offset = base_x + ix; if(x_offset >= width || x_offset < 0)continue; const int offset = y_offset*width + x_offset; // (Over/Under)flow check (C.) 0 <= e^{inp[offset]} <= FLT_MAX scalar_t mask = exp(offset_data_input[offset]); mask = clamp(mask, zero, upper); // Underflow check (D.) 0 <= e^{inp[offset]}/sum{e^{inp[offset]}} <= 1 mask /= mask_sum; mask = clamp(mask, zero, upper); // Underflow check (E.) 0 <= (e^{inp[offset]}/sum{e^{inp[offset]}}) * grad <= FLT_MAX scalar_t weighted_grad = diff_output_index * mask; weighted_grad = clamp(weighted_grad, zero, upper); atomicAdd(offset_diff_input+offset, weighted_grad); } } } } template <typename scalar_t> __global__ void SoftPool3dBackward(const int nthreads, const scalar_t *diff_output, const scalar_t *data_input, const int batches, const int channels, const int depth, const int height, const int width, const int kernel_d, const int kernel_h, const int kernel_w , const int stride_d, const int stride_h, const int stride_w, scalar_t *diff_input){ int pooled_depth = depth/stride_d; int pooled_height = width/stride_h; int pooled_width = width/stride_w; CUDA_1D_KERNEL_LOOP(index, nthreads) { int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int pd = (index / pooled_width / pooled_height) % pooled_depth; int c = (index / pooled_width / pooled_height / pooled_depth) % channels; int n = index / pooled_width / pooled_height / pooled_depth / channels; const int offset0 = (n * channels + c) * depth * height * width; const scalar_t *offset_data_input = data_input + offset0; const scalar_t diff_output_index = diff_output[index]; scalar_t *offset_diff_input = diff_input + offset0; const int base_d = pd*stride_d - kernel_d/2; const int base_y = ph*stride_h - kernel_h/2; const int base_x = pw*stride_w - kernel_w/2; scalar_t mask_sum = 0.; scalar_t zero = 0.; const scalar_t upper = n_limits<scalar_t>::max(); const scalar_t lower = n_limits<scalar_t>::min(); for(int id=0; id<kernel_d; id++){ const int d_offset = base_d + id; if(d_offset >= depth || d_offset < 0)continue; for(int iy=0; iy<kernel_h; iy++){ const int y_offset = base_y + iy; if(y_offset >= height || y_offset < 0)continue; for(int ix=0; ix<kernel_w; ix++){ const int x_offset = base_x + ix; if(x_offset >= width || x_offset < 0)continue; const int offset = d_offset*height + y_offset*width + x_offset; // (Over/Under)flow check (A.) 0 <= e^{inp[offset]} <= FLT_MAX scalar_t mask = exp(offset_data_input[offset]); mask = clamp(mask, zero, upper); mask_sum += mask; } } } // Overflow check (B.) FLT_MIN <= sum{e^{inp[offset]}} <= FLT_MAX mask_sum = clamp(mask_sum, lower, upper); for(int id=0; id<kernel_d; id++){ const int d_offset = base_d + id; if(d_offset >= depth || d_offset < 0)continue; for(int iy=0; iy<kernel_h; iy++){ const int y_offset = base_y + iy; if(y_offset >= height || y_offset < 0)continue; for(int ix=0; ix<kernel_w; ix++){ const int x_offset = base_x + ix; if(x_offset >= width || x_offset < 0)continue; const int offset = d_offset*height + y_offset*width + x_offset; // (Over/Under)flow check (C.) 0 <= e^{inp[offset]} <= FLT_MAX scalar_t mask = exp(offset_data_input[offset]); mask = clamp(mask, zero, upper); // Underflow check (D.) 0 <= e^{inp[offset]}/sum{e^{inp[offset]}} <= 1 mask /= mask_sum; mask = clamp(mask, zero, upper); // Underflow check (E.) 0 <= (e^{inp[offset]}/sum{e^{inp[offset]}}) * grad <= FLT_MAX scalar_t weighted_grad = diff_output_index * mask; weighted_grad = clamp(weighted_grad, zero, upper); atomicAdd(offset_diff_input+offset, weighted_grad); } } } } } int SoftPool1dBackwardLauncher(const at::Tensor output_grad, const at::Tensor input, const int batches, const int channels, const int dim, const int kernel_d, const int stride_d, at::Tensor input_grad){ const int output_size = batches * dim/stride_d * channels; AT_DISPATCH_FLOATING_TYPES_AND_HALF( input.scalar_type(), "SoftPool1dLauncherBackward", ([&] { scalar_t *diff_input = input_grad.data_ptr<scalar_t>(); const scalar_t *diff_output = output_grad.data_ptr<scalar_t>(); const scalar_t *data_input = input.data_ptr<scalar_t>(); SoftPool1dBackward<scalar_t> <<<GET_BLOCKS(output_size), THREADS_PER_BLOCK>>>( output_size, diff_output, data_input, batches, channels, dim, kernel_d, stride_d, diff_input); } ) ); cudaError_t err = cudaGetLastError(); if (cudaSuccess != err) { fprintf(stderr, "cudaCheckError() failed : %s\n", cudaGetErrorString(err)); exit(-1); } return 1; } int SoftPool2dBackwardLauncher(const at::Tensor output_grad, const at::Tensor input, const int batches, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, at::Tensor input_grad){ const int output_size = batches * height/stride_h * width/stride_w * channels; AT_DISPATCH_FLOATING_TYPES_AND_HALF( input.scalar_type(), "SoftPool2dLauncherBackward", ([&] { scalar_t *diff_input = input_grad.data_ptr<scalar_t>(); const scalar_t *diff_output = output_grad.data_ptr<scalar_t>(); const scalar_t *data_input = input.data_ptr<scalar_t>(); SoftPool2dBackward<scalar_t> <<<GET_BLOCKS(output_size), THREADS_PER_BLOCK>>>( output_size, diff_output, data_input, batches, channels, height, width, kernel_h, kernel_w, stride_h, stride_w, diff_input); } ) ); cudaError_t err = cudaGetLastError(); if (cudaSuccess != err) { fprintf(stderr, "cudaCheckError() failed : %s\n", cudaGetErrorString(err)); exit(-1); } return 1; } int SoftPool3dBackwardLauncher(const at::Tensor output_grad, const at::Tensor input, const int batches, const int channels, const int depth, const int height, const int width, const int kernel_d, const int kernel_h, const int kernel_w, const int stride_d, const int stride_h, const int stride_w, at::Tensor input_grad){ const int output_size = batches * depth/stride_d * height/stride_h * width/stride_w * channels; AT_DISPATCH_FLOATING_TYPES_AND_HALF( input.scalar_type(), "SoftPool3dLauncherBackward", ([&] { scalar_t *diff_input = input_grad.data_ptr<scalar_t>(); const scalar_t *diff_output = output_grad.data_ptr<scalar_t>(); const scalar_t *data_input = input.data_ptr<scalar_t>(); SoftPool3dBackward<scalar_t> <<<GET_BLOCKS(output_size), THREADS_PER_BLOCK>>>( output_size, diff_output, data_input, batches, channels, depth, height, width, kernel_d, kernel_h, kernel_w, stride_d, stride_h, stride_w, diff_input); } ) ); cudaError_t err = cudaGetLastError(); if (cudaSuccess != err) { fprintf(stderr, "cudaCheckError() failed : %s\n", cudaGetErrorString(err)); exit(-1); } return 1; }
ef1731d4f335006778d8dd2d92a814a34b209032.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // auto-generated by op2.m on 19-Oct-2012 16:21:04 // // user function __device__ #include "dotR.h" // CUDA kernel function __global__ void op_cuda_dotR( double *arg0, double *arg1, int offset_s, int set_size ) { double arg1_l[1]; for (int d=0; d<1; d++) arg1_l[d]=ZERO_double; // process set elements for (int n=threadIdx.x+blockIdx.x*blockDim.x; n<set_size; n+=blockDim.x*gridDim.x) { // user-supplied kernel call dotR( arg0+n, arg1_l ); } // global reductions for(int d=0; d<1; d++) op_reduction<OP_INC>(&arg1[d+blockIdx.x*1],arg1_l[d]); } // host stub function void op_par_loop_dotR(char const *name, op_set set, op_arg arg0, op_arg arg1 ){ double *arg1h = (double *)arg1.data; int nargs = 2; op_arg args[2]; args[0] = arg0; args[1] = arg1; if (OP_diags>2) { printf(" kernel routine w/o indirection: dotR\n"); } op_mpi_halo_exchanges(set, nargs, args); // initialise timers double cpu_t1, cpu_t2, wall_t1=0, wall_t2=0; op_timing_realloc(6); OP_kernels[6].name = name; OP_kernels[6].count += 1; if (set->size >0) { op_timers_core(&cpu_t1, &wall_t1); // set CUDA execution parameters #ifdef OP_BLOCK_SIZE_6 int nthread = OP_BLOCK_SIZE_6; #else // int nthread = OP_block_size; int nthread = 128; #endif int nblocks = 200; // transfer global reduction data to GPU int maxblocks = nblocks; int reduct_bytes = 0; int reduct_size = 0; reduct_bytes += ROUND_UP(maxblocks*1*sizeof(double)); reduct_size = MAX(reduct_size,sizeof(double)); reallocReductArrays(reduct_bytes); reduct_bytes = 0; arg1.data = OP_reduct_h + reduct_bytes; arg1.data_d = OP_reduct_d + reduct_bytes; for (int b=0; b<maxblocks; b++) for (int d=0; d<1; d++) ((double *)arg1.data)[d+b*1] = ZERO_double; reduct_bytes += ROUND_UP(maxblocks*1*sizeof(double)); mvReductArraysToDevice(reduct_bytes); // work out shared memory requirements per element int nshared = 0; // execute plan int offset_s = nshared*OP_WARPSIZE; nshared = MAX(nshared*nthread,reduct_size*nthread); hipLaunchKernelGGL(( op_cuda_dotR), dim3(nblocks),dim3(nthread),nshared, 0, (double *) arg0.data_d, (double *) arg1.data_d, offset_s, set->size ); cutilSafeCall(hipDeviceSynchronize()); cutilCheckMsg("op_cuda_dotR execution failed\n"); // transfer global reduction data back to CPU mvReductArraysToHost(reduct_bytes); for (int b=0; b<maxblocks; b++) for (int d=0; d<1; d++) arg1h[d] = arg1h[d] + ((double *)arg1.data)[d+b*1]; arg1.data = (char *)arg1h; op_mpi_reduce(&arg1,arg1h); } op_mpi_set_dirtybit(nargs, args); // update kernel record op_timers_core(&cpu_t2, &wall_t2); OP_kernels[6].time += wall_t2 - wall_t1; OP_kernels[6].transfer += (float)set->size * arg0.size; }
ef1731d4f335006778d8dd2d92a814a34b209032.cu
// // auto-generated by op2.m on 19-Oct-2012 16:21:04 // // user function __device__ #include "dotR.h" // CUDA kernel function __global__ void op_cuda_dotR( double *arg0, double *arg1, int offset_s, int set_size ) { double arg1_l[1]; for (int d=0; d<1; d++) arg1_l[d]=ZERO_double; // process set elements for (int n=threadIdx.x+blockIdx.x*blockDim.x; n<set_size; n+=blockDim.x*gridDim.x) { // user-supplied kernel call dotR( arg0+n, arg1_l ); } // global reductions for(int d=0; d<1; d++) op_reduction<OP_INC>(&arg1[d+blockIdx.x*1],arg1_l[d]); } // host stub function void op_par_loop_dotR(char const *name, op_set set, op_arg arg0, op_arg arg1 ){ double *arg1h = (double *)arg1.data; int nargs = 2; op_arg args[2]; args[0] = arg0; args[1] = arg1; if (OP_diags>2) { printf(" kernel routine w/o indirection: dotR\n"); } op_mpi_halo_exchanges(set, nargs, args); // initialise timers double cpu_t1, cpu_t2, wall_t1=0, wall_t2=0; op_timing_realloc(6); OP_kernels[6].name = name; OP_kernels[6].count += 1; if (set->size >0) { op_timers_core(&cpu_t1, &wall_t1); // set CUDA execution parameters #ifdef OP_BLOCK_SIZE_6 int nthread = OP_BLOCK_SIZE_6; #else // int nthread = OP_block_size; int nthread = 128; #endif int nblocks = 200; // transfer global reduction data to GPU int maxblocks = nblocks; int reduct_bytes = 0; int reduct_size = 0; reduct_bytes += ROUND_UP(maxblocks*1*sizeof(double)); reduct_size = MAX(reduct_size,sizeof(double)); reallocReductArrays(reduct_bytes); reduct_bytes = 0; arg1.data = OP_reduct_h + reduct_bytes; arg1.data_d = OP_reduct_d + reduct_bytes; for (int b=0; b<maxblocks; b++) for (int d=0; d<1; d++) ((double *)arg1.data)[d+b*1] = ZERO_double; reduct_bytes += ROUND_UP(maxblocks*1*sizeof(double)); mvReductArraysToDevice(reduct_bytes); // work out shared memory requirements per element int nshared = 0; // execute plan int offset_s = nshared*OP_WARPSIZE; nshared = MAX(nshared*nthread,reduct_size*nthread); op_cuda_dotR<<<nblocks,nthread,nshared>>>( (double *) arg0.data_d, (double *) arg1.data_d, offset_s, set->size ); cutilSafeCall(cudaDeviceSynchronize()); cutilCheckMsg("op_cuda_dotR execution failed\n"); // transfer global reduction data back to CPU mvReductArraysToHost(reduct_bytes); for (int b=0; b<maxblocks; b++) for (int d=0; d<1; d++) arg1h[d] = arg1h[d] + ((double *)arg1.data)[d+b*1]; arg1.data = (char *)arg1h; op_mpi_reduce(&arg1,arg1h); } op_mpi_set_dirtybit(nargs, args); // update kernel record op_timers_core(&cpu_t2, &wall_t2); OP_kernels[6].time += wall_t2 - wall_t1; OP_kernels[6].transfer += (float)set->size * arg0.size; }
0fd5a6161c10443a66a719429aac71d4ecd8b29b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> __global__ void addKernel( int* c , const int* a , const int* b ) { int i = threadIdx.x; c[ i ] = a[ i ] + b[ i ]; } int main( ) { const int arraySize = 5; const int a[ arraySize ] = { 1, 2, 3, 4, 5 }; const int b[ arraySize ] = { 10, 20, 30, 40, 50 }; int c[ arraySize ] = { 0 }; int* dev_a = 0; int* dev_b = 0; int* dev_c = 0; hipError_t cudaStatus; // Alocar espao na memria do device cudaStatus = hipMalloc( ( void** ) &dev_c , arraySize * sizeof( int ) ); if ( cudaStatus != hipSuccess ) { fprintf( stderr , "hipMalloc failed!" ); goto Error; } cudaStatus = hipMalloc( ( void** ) &dev_a , arraySize * sizeof( int ) ); if ( cudaStatus != hipSuccess ) { fprintf( stderr , "hipMalloc failed!" ); goto Error; } cudaStatus = hipMalloc( ( void** ) &dev_b , arraySize * sizeof( int ) ); if ( cudaStatus != hipSuccess ) { fprintf( stderr , "hipMalloc failed!" ); goto Error; } // Copia os vetores do host para a device cudaStatus = hipMemcpy( dev_a , a , arraySize * sizeof( int ) , hipMemcpyHostToDevice ); if ( cudaStatus != hipSuccess ) { fprintf( stderr , "hipMemcpy failed!" ); goto Error; } cudaStatus = hipMemcpy( dev_b , b , arraySize * sizeof( int ) , hipMemcpyHostToDevice ); if ( cudaStatus != hipSuccess ) { fprintf( stderr , "hipMemcpy failed!" ); goto Error; } // Executar o kernel addKernel << <1 , arraySize >> > ( dev_c , dev_a , dev_b ); // Verificar se o kernel foi executado corretamente cudaStatus = hipGetLastError( ); if ( cudaStatus != hipSuccess ) { fprintf( stderr , "addKernel launch failed: %s\n" , hipGetErrorString( cudaStatus ) ); goto Error; } // Espera o kernel terminar e retorna quaisquer erros encontrados durante a execuo cudaStatus = hipDeviceSynchronize( ); if ( cudaStatus != hipSuccess ) { fprintf( stderr , "hipDeviceSynchronize returned error code %d after launching addKernel!\n" , cudaStatus ); goto Error; } // Copia o resultado do device para a memria do host. cudaStatus = hipMemcpy( c , dev_c , arraySize * sizeof( int ) , hipMemcpyDeviceToHost ); if ( cudaStatus != hipSuccess ) { fprintf( stderr , "hipMemcpy failed!" ); goto Error; } printf( "{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n" , c[ 0 ] , c[ 1 ] , c[ 2 ] , c[ 3 ] , c[ 4 ] ); // Limpa a memria Error: hipFree( dev_c ); hipFree( dev_a ); hipFree( dev_b ); cudaStatus = hipDeviceReset( ); if ( cudaStatus != hipSuccess ) { fprintf( stderr , "hipDeviceReset failed!" ); return 1; } return 0; }
0fd5a6161c10443a66a719429aac71d4ecd8b29b.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> __global__ void addKernel( int* c , const int* a , const int* b ) { int i = threadIdx.x; c[ i ] = a[ i ] + b[ i ]; } int main( ) { const int arraySize = 5; const int a[ arraySize ] = { 1, 2, 3, 4, 5 }; const int b[ arraySize ] = { 10, 20, 30, 40, 50 }; int c[ arraySize ] = { 0 }; int* dev_a = 0; int* dev_b = 0; int* dev_c = 0; cudaError_t cudaStatus; // Alocar espaço na memória do device cudaStatus = cudaMalloc( ( void** ) &dev_c , arraySize * sizeof( int ) ); if ( cudaStatus != cudaSuccess ) { fprintf( stderr , "cudaMalloc failed!" ); goto Error; } cudaStatus = cudaMalloc( ( void** ) &dev_a , arraySize * sizeof( int ) ); if ( cudaStatus != cudaSuccess ) { fprintf( stderr , "cudaMalloc failed!" ); goto Error; } cudaStatus = cudaMalloc( ( void** ) &dev_b , arraySize * sizeof( int ) ); if ( cudaStatus != cudaSuccess ) { fprintf( stderr , "cudaMalloc failed!" ); goto Error; } // Copia os vetores do host para a device cudaStatus = cudaMemcpy( dev_a , a , arraySize * sizeof( int ) , cudaMemcpyHostToDevice ); if ( cudaStatus != cudaSuccess ) { fprintf( stderr , "cudaMemcpy failed!" ); goto Error; } cudaStatus = cudaMemcpy( dev_b , b , arraySize * sizeof( int ) , cudaMemcpyHostToDevice ); if ( cudaStatus != cudaSuccess ) { fprintf( stderr , "cudaMemcpy failed!" ); goto Error; } // Executar o kernel addKernel << <1 , arraySize >> > ( dev_c , dev_a , dev_b ); // Verificar se o kernel foi executado corretamente cudaStatus = cudaGetLastError( ); if ( cudaStatus != cudaSuccess ) { fprintf( stderr , "addKernel launch failed: %s\n" , cudaGetErrorString( cudaStatus ) ); goto Error; } // Espera o kernel terminar e retorna quaisquer erros encontrados durante a execução cudaStatus = cudaDeviceSynchronize( ); if ( cudaStatus != cudaSuccess ) { fprintf( stderr , "cudaDeviceSynchronize returned error code %d after launching addKernel!\n" , cudaStatus ); goto Error; } // Copia o resultado do device para a memória do host. cudaStatus = cudaMemcpy( c , dev_c , arraySize * sizeof( int ) , cudaMemcpyDeviceToHost ); if ( cudaStatus != cudaSuccess ) { fprintf( stderr , "cudaMemcpy failed!" ); goto Error; } printf( "{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n" , c[ 0 ] , c[ 1 ] , c[ 2 ] , c[ 3 ] , c[ 4 ] ); // Limpa a memória Error: cudaFree( dev_c ); cudaFree( dev_a ); cudaFree( dev_b ); cudaStatus = cudaDeviceReset( ); if ( cudaStatus != cudaSuccess ) { fprintf( stderr , "cudaDeviceReset failed!" ); return 1; } return 0; }
d671726e1fdcca3227abc6f590173e798622073a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include "kernel.h" #include "math.h" #include <stdio.h> __global__ void sobelCUDA(unsigned char* inputImage, unsigned char* outputImage, int imageWidth, int imageHeight) { int pixelX = blockIdx.x * blockDim.x + threadIdx.x; int pixelY = blockIdx.y * blockDim.y + threadIdx.y; int offset = pixelX + pixelY * imageWidth; char hKernel[9] = { -1, 0, 1, -2, 0, 2, -1, 0, 1 }; char vKernel[9] = { -1, -2, -1, 0, 0, 0, 1, 2, 1 }; if (((pixelX > 0) && (pixelX < imageWidth-1)) && ((pixelY > 0) && (pixelY < imageHeight-1))) { // Magnitudes int magXR = 0, magXG = 0, magXB = 0; int magYR = 0, magYG = 0, magYB = 0; // Horizontal and Vertical convolution at point (x,y) for (int h = 0; h < 3; h++) { for (int v = 0; v < 3; v++) { // Current pixel in 3x3 convolution window int xn = pixelX + (h - 1); int yn = pixelY + (v - 1); int inputPixel = (xn + yn * imageWidth) * 4; // Horizontal Convolution int hKernelValue = hKernel[h*3 + v]; magXR += inputImage[inputPixel] * hKernelValue; magXG += inputImage[inputPixel+1] * hKernelValue; magXB += inputImage[inputPixel+2] * hKernelValue; // Vertical Convolution int vKernelValue = vKernel[h*3 + v]; magYR += inputImage[inputPixel] * vKernelValue; magYG += inputImage[inputPixel+1] * vKernelValue; magYB += inputImage[inputPixel+2] * vKernelValue; } } // Compute final pixel value // We clip the value to 255 in case we go over the 8-bit range // Instead of using norm, we apprximate using sum of abs, this // has been show to be as effective in this application int finalR = min(abs(magXR) + abs(magYR), 255); int finalG = min(abs(magXG) + abs(magYG), 255); int finalB = min(abs(magXB) + abs(magYB), 255); outputImage[offset * 4] = finalR; outputImage[offset * 4 + 1] = finalG; outputImage[offset * 4 + 2] = finalB; outputImage[offset * 4 + 3] = 255; } } void __declspec(dllexport) __cdecl sobelFilterCUDA(unsigned char* inputImageHost, unsigned char* outputImageHost, int imageWidth, int imageHeight) { unsigned char* inputImageDevice; unsigned char* outputImageDevice; int imageSize = imageWidth * imageHeight * 4 * sizeof(unsigned char); hipMalloc((void**)&inputImageDevice, imageSize); hipMalloc((void**)&outputImageDevice, imageSize); hipMemcpy(inputImageDevice, inputImageHost, imageSize, hipMemcpyHostToDevice); dim3 blockDims(16, 16); dim3 gridDims((unsigned int)ceil(((double)imageWidth / blockDims.x)), (unsigned int)ceil(((double)imageHeight / blockDims.y))); hipLaunchKernelGGL(( sobelCUDA), dim3(gridDims), dim3(blockDims), 0, 0, inputImageDevice, outputImageDevice, imageWidth, imageHeight); hipMemcpy(outputImageHost, outputImageDevice, imageSize, hipMemcpyDeviceToHost); hipFree(inputImageDevice); hipFree(outputImageDevice); }
d671726e1fdcca3227abc6f590173e798622073a.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include "kernel.h" #include "math.h" #include <stdio.h> __global__ void sobelCUDA(unsigned char* inputImage, unsigned char* outputImage, int imageWidth, int imageHeight) { int pixelX = blockIdx.x * blockDim.x + threadIdx.x; int pixelY = blockIdx.y * blockDim.y + threadIdx.y; int offset = pixelX + pixelY * imageWidth; char hKernel[9] = { -1, 0, 1, -2, 0, 2, -1, 0, 1 }; char vKernel[9] = { -1, -2, -1, 0, 0, 0, 1, 2, 1 }; if (((pixelX > 0) && (pixelX < imageWidth-1)) && ((pixelY > 0) && (pixelY < imageHeight-1))) { // Magnitudes int magXR = 0, magXG = 0, magXB = 0; int magYR = 0, magYG = 0, magYB = 0; // Horizontal and Vertical convolution at point (x,y) for (int h = 0; h < 3; h++) { for (int v = 0; v < 3; v++) { // Current pixel in 3x3 convolution window int xn = pixelX + (h - 1); int yn = pixelY + (v - 1); int inputPixel = (xn + yn * imageWidth) * 4; // Horizontal Convolution int hKernelValue = hKernel[h*3 + v]; magXR += inputImage[inputPixel] * hKernelValue; magXG += inputImage[inputPixel+1] * hKernelValue; magXB += inputImage[inputPixel+2] * hKernelValue; // Vertical Convolution int vKernelValue = vKernel[h*3 + v]; magYR += inputImage[inputPixel] * vKernelValue; magYG += inputImage[inputPixel+1] * vKernelValue; magYB += inputImage[inputPixel+2] * vKernelValue; } } // Compute final pixel value // We clip the value to 255 in case we go over the 8-bit range // Instead of using norm, we apprximate using sum of abs, this // has been show to be as effective in this application int finalR = min(abs(magXR) + abs(magYR), 255); int finalG = min(abs(magXG) + abs(magYG), 255); int finalB = min(abs(magXB) + abs(magYB), 255); outputImage[offset * 4] = finalR; outputImage[offset * 4 + 1] = finalG; outputImage[offset * 4 + 2] = finalB; outputImage[offset * 4 + 3] = 255; } } void __declspec(dllexport) __cdecl sobelFilterCUDA(unsigned char* inputImageHost, unsigned char* outputImageHost, int imageWidth, int imageHeight) { unsigned char* inputImageDevice; unsigned char* outputImageDevice; int imageSize = imageWidth * imageHeight * 4 * sizeof(unsigned char); cudaMalloc((void**)&inputImageDevice, imageSize); cudaMalloc((void**)&outputImageDevice, imageSize); cudaMemcpy(inputImageDevice, inputImageHost, imageSize, cudaMemcpyHostToDevice); dim3 blockDims(16, 16); dim3 gridDims((unsigned int)ceil(((double)imageWidth / blockDims.x)), (unsigned int)ceil(((double)imageHeight / blockDims.y))); sobelCUDA<<<gridDims, blockDims>>>(inputImageDevice, outputImageDevice, imageWidth, imageHeight); cudaMemcpy(outputImageHost, outputImageDevice, imageSize, cudaMemcpyDeviceToHost); cudaFree(inputImageDevice); cudaFree(outputImageDevice); }
e440be3509c531ac5bc1867ff3c9a9c9e8e79c79.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <cstdlib> #include <iostream> #include <hpc/cuda/check.h> #include <hpc/cuda/properties.h> #include <hpc/cuda/scal.hpp> #include <hpc/cuda/axpy.hpp> #define N 257 std::size_t ceildiv(std::size_t x, std::size_t y) { /* note that we expect x > 0 && y > 0; not safe against overflows but we expect y to be small */ return (x + y - 1) / y; } int main() { using namespace hpc::cuda; double a[N]; double b[N]; for (std::size_t i = 0; i < N; ++i) { a[i] = i; b[i] = i*i; } /* transfer vectors to GPU memory */ double* cuda_a; CHECK_CUDA(hipMalloc, (void**)&cuda_a, N * sizeof(double)); CHECK_CUDA(hipMemcpy, cuda_a, a, N * sizeof(double), hipMemcpyHostToDevice); double* cuda_b; CHECK_CUDA(hipMalloc, (void**)&cuda_b, N * sizeof(double)); CHECK_CUDA(hipMemcpy, cuda_b, b, N * sizeof(double), hipMemcpyHostToDevice); /* execute kernel function on GPU */ std::size_t warp_size = hpc::cuda::get_warp_size(); /* typically 32 */ std::size_t nof_warps = ceildiv(N, warp_size); std::size_t warps_per_block = hpc::cuda::get_max_threads_per_block() / warp_size / 4; /* typically 8*/ std::size_t nof_blocks = ceildiv(nof_warps, warps_per_block); std::size_t threads_per_block; if (nof_blocks == 1) { threads_per_block = N; } else { threads_per_block = warps_per_block * warp_size; } //axpy<<<nof_blocks, threads_per_block>>>(N, 2.0, cuda_a, cuda_b); hipLaunchKernelGGL(( scal), dim3(nof_blocks), dim3(threads_per_block), 0, 0, N,2, cuda_a); /* transfer result vector from GPU to host memory */ CHECK_CUDA(hipMemcpy, a, cuda_a, N * sizeof(double), hipMemcpyDeviceToHost); /* free space allocated at GPU memory */ CHECK_CUDA(hipFree, cuda_a); /* print result */ for (std::size_t i = 0; i < N; ++i) { std::cout << " " << a[i]; if (i % 10 == 0) std::cout << std::endl; } std::cout << std::endl; }
e440be3509c531ac5bc1867ff3c9a9c9e8e79c79.cu
#include <cstdlib> #include <iostream> #include <hpc/cuda/check.h> #include <hpc/cuda/properties.h> #include <hpc/cuda/scal.hpp> #include <hpc/cuda/axpy.hpp> #define N 257 std::size_t ceildiv(std::size_t x, std::size_t y) { /* note that we expect x > 0 && y > 0; not safe against overflows but we expect y to be small */ return (x + y - 1) / y; } int main() { using namespace hpc::cuda; double a[N]; double b[N]; for (std::size_t i = 0; i < N; ++i) { a[i] = i; b[i] = i*i; } /* transfer vectors to GPU memory */ double* cuda_a; CHECK_CUDA(cudaMalloc, (void**)&cuda_a, N * sizeof(double)); CHECK_CUDA(cudaMemcpy, cuda_a, a, N * sizeof(double), cudaMemcpyHostToDevice); double* cuda_b; CHECK_CUDA(cudaMalloc, (void**)&cuda_b, N * sizeof(double)); CHECK_CUDA(cudaMemcpy, cuda_b, b, N * sizeof(double), cudaMemcpyHostToDevice); /* execute kernel function on GPU */ std::size_t warp_size = hpc::cuda::get_warp_size(); /* typically 32 */ std::size_t nof_warps = ceildiv(N, warp_size); std::size_t warps_per_block = hpc::cuda::get_max_threads_per_block() / warp_size / 4; /* typically 8*/ std::size_t nof_blocks = ceildiv(nof_warps, warps_per_block); std::size_t threads_per_block; if (nof_blocks == 1) { threads_per_block = N; } else { threads_per_block = warps_per_block * warp_size; } //axpy<<<nof_blocks, threads_per_block>>>(N, 2.0, cuda_a, cuda_b); scal<<<nof_blocks, threads_per_block>>>(N,2, cuda_a); /* transfer result vector from GPU to host memory */ CHECK_CUDA(cudaMemcpy, a, cuda_a, N * sizeof(double), cudaMemcpyDeviceToHost); /* free space allocated at GPU memory */ CHECK_CUDA(cudaFree, cuda_a); /* print result */ for (std::size_t i = 0; i < N; ++i) { std::cout << " " << a[i]; if (i % 10 == 0) std::cout << std::endl; } std::cout << std::endl; }
ac15b6d0ad8fbba1fd7c9d32846d2fd0274301a8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2011, Alex Krizhevsky ([email protected]) * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * - Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * - Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <assert.h> #include <layer_kernels.cuh> /* * E = -log(y_t) * probs: (numOut, numCases) * labels: (1, numCases) * maxProbs: (1, numCases) * labelLogProbs: (1, numCases) (*out) * correctProbs: (1, numCases) (*out) * * target: (1, numCases) */ __global__ void kLogregCost(float* probs, float* labels, float* maxProbs, float* labelLogProbs, float* correctProbs, const int numCases, const int numOut) { const int tx = blockIdx.x * LOGREG_ERR_THREADS_X + threadIdx.x; if (tx < numCases) { const int label = int(labels[tx]); const float maxp = maxProbs[tx]; const float labelp = probs[label * numCases + tx]; labelLogProbs[tx] = __logf(labelp); /* * Compute the probability of guessing the correct case if you take the most-probable label. * * This is done like this: * * - If the most probable label is not equal to the true label, then the probability is zero. * - Otherwise, the probability is 1 / (number of labels whose probability is equal to the maximum). * * This is certainly overkill -- in practice, it's just about impossible for two labels to get assigned * maximum probability. But it's a safety measure to prevent over-estimating your accuracy. * Though it could never happen in reality. Well it could. But it wouldn't. Cool? */ if (labelp != maxp) { correctProbs[tx] = 0; } else { int numMax = 0; for (int i = 0; i < numOut; i++) { numMax += probs[i * numCases + tx] == maxp; } correctProbs[tx] = 1.0f / float(numMax); } } } /* * E = -log(y_t) * y_l: (numOut, numCases) * labels: (1, numCases) * * dE_dy_l: (numOut, numCases) */ template <bool add> __global__ void kLogregCostGrad(float* y_l, float* labels, float* dE_dy_l, const int numCases, const int numOut, const float gradCoeff) { const int tx = blockIdx.x * LOGREG_GRAD_THREADS_X + threadIdx.x; const int ty = blockIdx.y * LOGREG_GRAD_THREADS_Y + threadIdx.y; const int tidx = ty * numCases + tx; if (ty < numOut && tx < numCases) { const int label = int(labels[tx]); float v = gradCoeff * (label == ty); v = __fdividef(v, (y_l[tidx] + 0.000001)); if (add) { dE_dy_l[tidx] += v; } else { dE_dy_l[tidx] = v; } } } /* * dE_dy_l: (numOut, numCases) * y_l: (numOut, numCases) * * dE_dx_l: (numOut, numCases) */ template <bool add> __global__ void kSoftmaxGrad(float* dE_dy_l, float* y_l, float* dE_dx_l, const int numCases, const int numOut) { const int tx = blockIdx.x * LOGREG_GRAD_THREADS_X + threadIdx.x; const int ty = blockIdx.y * LOGREG_GRAD_THREADS_Y + threadIdx.y; const int tidx = ty * numCases + tx; if (ty < numOut && tx < numCases) { float v = 0; for (int j = 0; j < numOut; j++) { v += dE_dy_l[j * numCases + tx] * ((j == ty) - y_l[j * numCases + tx]); } v *= y_l[tidx]; if (add) { dE_dx_l[tidx] += v; } else { dE_dx_l[tidx] = v; } } } /* * E = -log(y_t) * y_l: (numOut, numCases) * labels: (1, numCases) * * dE_dx_l: (numOut, numCases) */ template <bool add> __global__ void kLogregSoftmaxGrad(float* y_l, float* labels, float* dE_dx_l, const int numCases, const int numOut, const float gradCoeff) { const int tx = blockIdx.x * LOGREG_GRAD_THREADS_X + threadIdx.x; const int ty = blockIdx.y * LOGREG_GRAD_THREADS_Y + threadIdx.y; const int tidx = ty * numCases + tx; if (ty < numOut && tx < numCases) { const int label = int(labels[tx]); float v = gradCoeff * ((label == ty) - y_l[tidx]); if (add) { dE_dx_l[tidx] += v; } else { dE_dx_l[tidx] = v; } } } template <int B_X, bool add> __global__ void kEltwiseMaxGrad(float* actGrad, float* input, float* output, float* target, const int numElements) { for (int i = B_X * blockIdx.x + threadIdx.x; i < numElements; i += B_X * gridDim.x) { if (add) { target[i] += actGrad[i] * (output[i] == input[i]); } else { target[i] = actGrad[i] * (output[i] == input[i]); } } } void computeEltwiseMaxGrad(NVMatrix& actGrad, NVMatrix& input, NVMatrix& output, NVMatrix& target, bool add) { assert(actGrad.isContiguous()); assert(output.isContiguous()); assert(input.isContiguous()); assert(actGrad.isSameDims(input)); assert(actGrad.isSameDims(output)); dim3 blocks(DIVUP(actGrad.getNumElements(), 128)); dim3 threads(128); if (add) { assert(actGrad.isSameDims(target)); hipFuncSetCacheConfig(kEltwiseMaxGrad<128, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kEltwiseMaxGrad<128, true>), dim3(blocks), dim3(threads), 0, 0, actGrad.getDevData(), input.getDevData(), output.getDevData(), target.getDevData(), actGrad.getNumElements()); } else { target.resize(actGrad); hipFuncSetCacheConfig(kEltwiseMaxGrad<128, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kEltwiseMaxGrad<128, false>), dim3(blocks), dim3(threads), 0, 0, actGrad.getDevData(), input.getDevData(), output.getDevData(), target.getDevData(), actGrad.getNumElements()); } cutilCheckMsg("computeEltwiseMaxGrad: Kernel execution failed"); } /* * E = -log(y_t) * probs: (numOut, numCases) * labels: (1, numCases) * maxProbs: (1, numCases) * labelLogProbs: (1, numCases) (*out) * correctProbs: (1, numCases) (*out) * * target: (1, numCases) */ void computeLogregCost(NVMatrix& labels, NVMatrix& probs, NVMatrix& labelLogProbs_out, NVMatrix& correctProbs_out) { int numCases = probs.getNumCols(); int numOut = probs.getNumRows(); assert(labels.getNumElements() == numCases); assert(!labels.isTrans()); assert(!probs.isTrans()); assert(labels.isContiguous()); assert(probs.isContiguous()); NVMatrix& maxProbs = probs.max(0); labelLogProbs_out.resize(1, numCases); correctProbs_out.resize(1, numCases); dim3 threads(LOGREG_ERR_THREADS_X, 1); dim3 blocks(DIVUP(numCases, LOGREG_ERR_THREADS_X), 1); hipFuncSetCacheConfig(kLogregCost, hipFuncCachePreferL1); hipLaunchKernelGGL(( kLogregCost), dim3(blocks), dim3(threads), 0, 0, probs.getDevData(), labels.getDevData(), maxProbs.getDevData(), labelLogProbs_out.getDevData(), correctProbs_out.getDevData(), numCases, numOut); cutilCheckMsg("computeLogregCost: Kernel execution failed"); // hipDeviceSynchronize(); delete &maxProbs; } void computeLogregGrad(NVMatrix& labels, NVMatrix& probs, NVMatrix& target, bool add, float coeff) { int numCases = probs.getLeadingDim(); int numOut = probs.getFollowingDim(); assert(labels.getNumElements() == numCases); assert(probs.isContiguous()); assert(target.isContiguous()); assert(labels.isContiguous()); assert(!labels.isTrans()); assert(!probs.isTrans()); dim3 threads(LOGREG_GRAD_THREADS_X, LOGREG_GRAD_THREADS_Y); dim3 blocks(DIVUP(numCases, LOGREG_GRAD_THREADS_X), DIVUP(numOut, LOGREG_GRAD_THREADS_Y)); if (!add) { target.resize(probs); hipLaunchKernelGGL(( kLogregCostGrad<false>), dim3(blocks), dim3(threads), 0, 0, probs.getDevData(), labels.getDevData(), target.getDevData(), numCases, numOut, coeff); } else { hipLaunchKernelGGL(( kLogregCostGrad<true>), dim3(blocks), dim3(threads), 0, 0, probs.getDevData(), labels.getDevData(), target.getDevData(), numCases, numOut, coeff); } cutilCheckMsg("computeLogregGrad: Kernel execution failed"); } void computeSoftmaxGrad(NVMatrix& acts, NVMatrix& actsGrad, NVMatrix& target, bool add) { int numCases = acts.getLeadingDim(); int numOut = acts.getFollowingDim(); assert(acts.isSameDims(actsGrad)); assert(acts.isContiguous()); assert(actsGrad.isContiguous()); assert(target.isContiguous()); assert(acts.isTrans()); assert(actsGrad.isTrans()); dim3 threads(LOGREG_GRAD_THREADS_X, LOGREG_GRAD_THREADS_Y); dim3 blocks(DIVUP(numCases, LOGREG_GRAD_THREADS_X), DIVUP(numOut, LOGREG_GRAD_THREADS_Y)); if (!add) { target.resize(acts); hipLaunchKernelGGL(( kSoftmaxGrad<false>), dim3(blocks), dim3(threads), 0, 0, actsGrad.getDevData(), acts.getDevData(), target.getDevData(), numCases, numOut); } else { hipLaunchKernelGGL(( kSoftmaxGrad<true>), dim3(blocks), dim3(threads), 0, 0, actsGrad.getDevData(), acts.getDevData(), target.getDevData(), numCases, numOut); } cutilCheckMsg("computeSoftmaxGrad: Kernel execution failed"); } void computeLogregSoftmaxGrad(NVMatrix& labels, NVMatrix& probs, NVMatrix& target, bool add, float coeff) { int numCases = probs.getLeadingDim(); int numOut = probs.getFollowingDim(); assert(labels.getNumElements() == numCases); assert(probs.isContiguous()); assert(target.isContiguous()); assert(labels.isContiguous()); assert(probs.isTrans()); dim3 threads(LOGREG_GRAD_THREADS_X, LOGREG_GRAD_THREADS_Y); dim3 blocks(DIVUP(numCases, LOGREG_GRAD_THREADS_X), DIVUP(numOut, LOGREG_GRAD_THREADS_Y)); if (!add) { target.resize(probs); hipLaunchKernelGGL(( kLogregSoftmaxGrad<false>), dim3(blocks), dim3(threads), 0, 0, probs.getDevData(), labels.getDevData(), target.getDevData(), numCases, numOut, coeff); } else { hipLaunchKernelGGL(( kLogregSoftmaxGrad<true>), dim3(blocks), dim3(threads), 0, 0, probs.getDevData(), labels.getDevData(), target.getDevData(), numCases, numOut, coeff); } cutilCheckMsg("computeLogregSoftmaxGrad: Kernel execution failed"); }
ac15b6d0ad8fbba1fd7c9d32846d2fd0274301a8.cu
/* * Copyright (c) 2011, Alex Krizhevsky ([email protected]) * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * - Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * - Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <assert.h> #include <layer_kernels.cuh> /* * E = -log(y_t) * probs: (numOut, numCases) * labels: (1, numCases) * maxProbs: (1, numCases) * labelLogProbs: (1, numCases) (*out) * correctProbs: (1, numCases) (*out) * * target: (1, numCases) */ __global__ void kLogregCost(float* probs, float* labels, float* maxProbs, float* labelLogProbs, float* correctProbs, const int numCases, const int numOut) { const int tx = blockIdx.x * LOGREG_ERR_THREADS_X + threadIdx.x; if (tx < numCases) { const int label = int(labels[tx]); const float maxp = maxProbs[tx]; const float labelp = probs[label * numCases + tx]; labelLogProbs[tx] = __logf(labelp); /* * Compute the probability of guessing the correct case if you take the most-probable label. * * This is done like this: * * - If the most probable label is not equal to the true label, then the probability is zero. * - Otherwise, the probability is 1 / (number of labels whose probability is equal to the maximum). * * This is certainly overkill -- in practice, it's just about impossible for two labels to get assigned * maximum probability. But it's a safety measure to prevent over-estimating your accuracy. * Though it could never happen in reality. Well it could. But it wouldn't. Cool? */ if (labelp != maxp) { correctProbs[tx] = 0; } else { int numMax = 0; for (int i = 0; i < numOut; i++) { numMax += probs[i * numCases + tx] == maxp; } correctProbs[tx] = 1.0f / float(numMax); } } } /* * E = -log(y_t) * y_l: (numOut, numCases) * labels: (1, numCases) * * dE_dy_l: (numOut, numCases) */ template <bool add> __global__ void kLogregCostGrad(float* y_l, float* labels, float* dE_dy_l, const int numCases, const int numOut, const float gradCoeff) { const int tx = blockIdx.x * LOGREG_GRAD_THREADS_X + threadIdx.x; const int ty = blockIdx.y * LOGREG_GRAD_THREADS_Y + threadIdx.y; const int tidx = ty * numCases + tx; if (ty < numOut && tx < numCases) { const int label = int(labels[tx]); float v = gradCoeff * (label == ty); v = __fdividef(v, (y_l[tidx] + 0.000001)); if (add) { dE_dy_l[tidx] += v; } else { dE_dy_l[tidx] = v; } } } /* * dE_dy_l: (numOut, numCases) * y_l: (numOut, numCases) * * dE_dx_l: (numOut, numCases) */ template <bool add> __global__ void kSoftmaxGrad(float* dE_dy_l, float* y_l, float* dE_dx_l, const int numCases, const int numOut) { const int tx = blockIdx.x * LOGREG_GRAD_THREADS_X + threadIdx.x; const int ty = blockIdx.y * LOGREG_GRAD_THREADS_Y + threadIdx.y; const int tidx = ty * numCases + tx; if (ty < numOut && tx < numCases) { float v = 0; for (int j = 0; j < numOut; j++) { v += dE_dy_l[j * numCases + tx] * ((j == ty) - y_l[j * numCases + tx]); } v *= y_l[tidx]; if (add) { dE_dx_l[tidx] += v; } else { dE_dx_l[tidx] = v; } } } /* * E = -log(y_t) * y_l: (numOut, numCases) * labels: (1, numCases) * * dE_dx_l: (numOut, numCases) */ template <bool add> __global__ void kLogregSoftmaxGrad(float* y_l, float* labels, float* dE_dx_l, const int numCases, const int numOut, const float gradCoeff) { const int tx = blockIdx.x * LOGREG_GRAD_THREADS_X + threadIdx.x; const int ty = blockIdx.y * LOGREG_GRAD_THREADS_Y + threadIdx.y; const int tidx = ty * numCases + tx; if (ty < numOut && tx < numCases) { const int label = int(labels[tx]); float v = gradCoeff * ((label == ty) - y_l[tidx]); if (add) { dE_dx_l[tidx] += v; } else { dE_dx_l[tidx] = v; } } } template <int B_X, bool add> __global__ void kEltwiseMaxGrad(float* actGrad, float* input, float* output, float* target, const int numElements) { for (int i = B_X * blockIdx.x + threadIdx.x; i < numElements; i += B_X * gridDim.x) { if (add) { target[i] += actGrad[i] * (output[i] == input[i]); } else { target[i] = actGrad[i] * (output[i] == input[i]); } } } void computeEltwiseMaxGrad(NVMatrix& actGrad, NVMatrix& input, NVMatrix& output, NVMatrix& target, bool add) { assert(actGrad.isContiguous()); assert(output.isContiguous()); assert(input.isContiguous()); assert(actGrad.isSameDims(input)); assert(actGrad.isSameDims(output)); dim3 blocks(DIVUP(actGrad.getNumElements(), 128)); dim3 threads(128); if (add) { assert(actGrad.isSameDims(target)); cudaFuncSetCacheConfig(kEltwiseMaxGrad<128, true>, cudaFuncCachePreferL1); kEltwiseMaxGrad<128, true><<<blocks, threads>>>(actGrad.getDevData(), input.getDevData(), output.getDevData(), target.getDevData(), actGrad.getNumElements()); } else { target.resize(actGrad); cudaFuncSetCacheConfig(kEltwiseMaxGrad<128, false>, cudaFuncCachePreferL1); kEltwiseMaxGrad<128, false><<<blocks, threads>>>(actGrad.getDevData(), input.getDevData(), output.getDevData(), target.getDevData(), actGrad.getNumElements()); } cutilCheckMsg("computeEltwiseMaxGrad: Kernel execution failed"); } /* * E = -log(y_t) * probs: (numOut, numCases) * labels: (1, numCases) * maxProbs: (1, numCases) * labelLogProbs: (1, numCases) (*out) * correctProbs: (1, numCases) (*out) * * target: (1, numCases) */ void computeLogregCost(NVMatrix& labels, NVMatrix& probs, NVMatrix& labelLogProbs_out, NVMatrix& correctProbs_out) { int numCases = probs.getNumCols(); int numOut = probs.getNumRows(); assert(labels.getNumElements() == numCases); assert(!labels.isTrans()); assert(!probs.isTrans()); assert(labels.isContiguous()); assert(probs.isContiguous()); NVMatrix& maxProbs = probs.max(0); labelLogProbs_out.resize(1, numCases); correctProbs_out.resize(1, numCases); dim3 threads(LOGREG_ERR_THREADS_X, 1); dim3 blocks(DIVUP(numCases, LOGREG_ERR_THREADS_X), 1); cudaFuncSetCacheConfig(kLogregCost, cudaFuncCachePreferL1); kLogregCost<<<blocks, threads>>>(probs.getDevData(), labels.getDevData(), maxProbs.getDevData(), labelLogProbs_out.getDevData(), correctProbs_out.getDevData(), numCases, numOut); cutilCheckMsg("computeLogregCost: Kernel execution failed"); // cudaThreadSynchronize(); delete &maxProbs; } void computeLogregGrad(NVMatrix& labels, NVMatrix& probs, NVMatrix& target, bool add, float coeff) { int numCases = probs.getLeadingDim(); int numOut = probs.getFollowingDim(); assert(labels.getNumElements() == numCases); assert(probs.isContiguous()); assert(target.isContiguous()); assert(labels.isContiguous()); assert(!labels.isTrans()); assert(!probs.isTrans()); dim3 threads(LOGREG_GRAD_THREADS_X, LOGREG_GRAD_THREADS_Y); dim3 blocks(DIVUP(numCases, LOGREG_GRAD_THREADS_X), DIVUP(numOut, LOGREG_GRAD_THREADS_Y)); if (!add) { target.resize(probs); kLogregCostGrad<false><<<blocks, threads>>>(probs.getDevData(), labels.getDevData(), target.getDevData(), numCases, numOut, coeff); } else { kLogregCostGrad<true><<<blocks, threads>>>(probs.getDevData(), labels.getDevData(), target.getDevData(), numCases, numOut, coeff); } cutilCheckMsg("computeLogregGrad: Kernel execution failed"); } void computeSoftmaxGrad(NVMatrix& acts, NVMatrix& actsGrad, NVMatrix& target, bool add) { int numCases = acts.getLeadingDim(); int numOut = acts.getFollowingDim(); assert(acts.isSameDims(actsGrad)); assert(acts.isContiguous()); assert(actsGrad.isContiguous()); assert(target.isContiguous()); assert(acts.isTrans()); assert(actsGrad.isTrans()); dim3 threads(LOGREG_GRAD_THREADS_X, LOGREG_GRAD_THREADS_Y); dim3 blocks(DIVUP(numCases, LOGREG_GRAD_THREADS_X), DIVUP(numOut, LOGREG_GRAD_THREADS_Y)); if (!add) { target.resize(acts); kSoftmaxGrad<false><<<blocks, threads>>>(actsGrad.getDevData(), acts.getDevData(), target.getDevData(), numCases, numOut); } else { kSoftmaxGrad<true><<<blocks, threads>>>(actsGrad.getDevData(), acts.getDevData(), target.getDevData(), numCases, numOut); } cutilCheckMsg("computeSoftmaxGrad: Kernel execution failed"); } void computeLogregSoftmaxGrad(NVMatrix& labels, NVMatrix& probs, NVMatrix& target, bool add, float coeff) { int numCases = probs.getLeadingDim(); int numOut = probs.getFollowingDim(); assert(labels.getNumElements() == numCases); assert(probs.isContiguous()); assert(target.isContiguous()); assert(labels.isContiguous()); assert(probs.isTrans()); dim3 threads(LOGREG_GRAD_THREADS_X, LOGREG_GRAD_THREADS_Y); dim3 blocks(DIVUP(numCases, LOGREG_GRAD_THREADS_X), DIVUP(numOut, LOGREG_GRAD_THREADS_Y)); if (!add) { target.resize(probs); kLogregSoftmaxGrad<false><<<blocks, threads>>>(probs.getDevData(), labels.getDevData(), target.getDevData(), numCases, numOut, coeff); } else { kLogregSoftmaxGrad<true><<<blocks, threads>>>(probs.getDevData(), labels.getDevData(), target.getDevData(), numCases, numOut, coeff); } cutilCheckMsg("computeLogregSoftmaxGrad: Kernel execution failed"); }
8e3307143f1f89355de0c7afc927ccbb097c3311.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /***************************************************************************** * * File: cudaSaTabsearch.cu * Author: Alex Stivala * Created: January 2010 * * $Id: cudaSaTabsearch.cu 4753 2013-11-20 03:16:27Z astivala $ * * CUDA host code for simulated annealing tableau matching (discrete). * This is a CUDA implemenation of the FORTRAN subroutine TSAMTD. * Since the GPU has limited memory (and specifically, very limited * per block shared memory), we split the database into 'small' and * 'large' structures. The small ones can run on the GPU in shared memory, * the large ones cannot so we either have to not use shared memory * (OK, but a bit slower) or run them on the host. * When runnign on the host, we can simultaneously run the GPU and * host in separate threads. For multiple GPU cards, CUDA also requires * that there is a separate host thread for each GPU, so this program * is multithreaded: each thread is either for a separate GPU or for * running the same kernel (but compiled for host) on the host CPU. * * Usage: cudaSaTabsearch [-c] [-q dbfile] [-r restarts] < inputfile * * -c : run on host CPU not GPU card * * -q : query list mode: instead of reading query data on stdin * just as in the original Fortran version tlocsd, a list * of query sids to be read from the database is read on stdin (one per * line), * and db filenaame is specified on command * line. In this mode options are assumed as LORDER=T, LTYPE=T, * LSOLN=N. The output is still to stdout, but each query following * immediately from the previous (can parse using the header comment * niformation lines as separators. * * -r restarts: number of restarts (iterations of cooling schedule). * Should be a multiple of blocksize. Defaults to 128. * * The 'database' to search is an ASCII file of tableaux * (Omega matrices) in format described in rdtabd.f. * * The results are printed to stdout as * * name rawscore norm2score z-score p-value * * * Both the name of the database file to read, and the actual * query tableau are read from stdin. * The first line is the name * of the database file. * The second line is for options. There are currently 3 logical * options, for SSE type constraint (only allow SSEs of same type ot * match) and ordering constraint (disallow out of sequence order * matches). The third is to output not just the scores but also solution * vector values. * They are single character logical values (T or F). * First is type, second is order, third is solution output, * separated by one space. * * The subsequent lines are a single tableau in the same format as * each tableau entry in the database i.e.: * * The first line of an entry is the identifier and * order of tableau (i.e. dimension of square array), then * each subsequent row is a row of the tableau, lower triangle * only (since it is symmetric). * The diagonal entries are meaningless (self-angle) in tableaux, * and are included instead to specify the SSE type, with * the following codes: * * e beta strand * xa alpha helix * xi pi helix * xg 3_10 helix * * Width of identifier is 8 chars, blank padded on right, * width of order is 4 digits, blank padded on left. * There is a single space between identifier and order. * Each entry in tableau is two characters, with a space betwen * each on a line, and one line * per row of matrix. * * Following the tableau is the distance matrix. * Each row is a row of the distance matrix, lower triangle * only (since it is symmetric). * The diagonal entries are meaningless (self-distance) * and are included instead to specify the SSE type, with * the following codes: * * 0.000 beta strand * 1.000 alpha helix * 2.000 pi helix * 3.000 3_10 helix * * Each entry in matrix is in Angstroms format * F6.3 with a space between each on a line, and one line * per row of matrix. * * * E.g.: * * /local/charikar/astivala/tableauxdb/astral/tableauxdistmatrixdb.ascii * T T F * D1UBIA_ 8 * e * OT e * LE RT xa * PD OS RD xg * RT LE RT LS e * LE RD LE LS OT e * RT LS LS RD PE OS xg * PE RT LE RD OT PE RT e * 0.000 * 4.501 0.000 * 1.662 10.386 1.000 * 16.932 17.644 9.779 3.000 * 10.588 13.738 11.815 10.527 0.000 * 15.025 18.692 17.143 15.341 6.466 0.000 * 15.298 17.276 16.276 20.075 13.264 11.610 3.000 * 7.549 11.072 12.248 12.446 4.583 9.903 15.689 0.000 * * * *****************************************************************************/ #define CUDASATABSEARCH_MAIN 1 #include <stdlib.h> #include <stdio.h> #include <getopt.h> #include <time.h> #include <string.h> #include <multithreading.h> #include <helper_cuda.h> #include <helper_timer.h> #include <hiprand/hiprand_kernel.h> #include "parsetableaux.h" #include "cudaSaTabsearch_kernel.h" #include "cudaGetDeviceConstantAddresses.h" #include "gumbelstats.h" /***************************************************************************** * * Type definitions * *****************************************************************************/ /* dbIndex_t is for the query list mode, an array of these gives for each query the index in the appropriate ('small' or 'large' according to the large flag) tableaux and distmatrix db arrays */ typedef struct dbIndex_s { bool large; /* true if query is 'large' (>MAXDIM_GPU) structure */ int index; /* index in tableaux and distmatrix db list, or 'large' tableaux and distmatrix db list if large is true */ } dbIndex_t; /* searchParams_t is a struct for parameter to tableau search functions dcelared as CUT_THREADROUTINE to be callable as threads */ typedef struct searchParams_s { int ltype; int lorder; int lsoln; /* type,order,soln flags */ int maxstart; /* number of restarts */ int maxdim; /*dimension of tableaux, distmatrices here */ int num_queries; /* number of queries; 0 if not query list mode */ int single_query_qid; /* if >=0, do only the one at this index */ dbIndex_t *query_dbindex_list; /* if num_queries>0, the query db index */ char qtab[MAXDIM*MAXDIM]; /* if num_queries==0, the query tableau */ float qdmat[MAXDIM*MAXDIM]; /* the query distmatrix*/ char qid[LABELSIZE+1]; /* the query identifier*/ int qn; /* the query order */ char *qssetypes; /* the query SSE types*/ int dbsize; /* number of entries in the db */ char *tableaux; /* the tableaux database */ float *distmatrices; /* the distance matrices database */ int *orders; /* orders of entries in db */ char *names; /* names of entries in db */ } searchParams_t; /***************************************************************************** * * Globals * *****************************************************************************/ static char dbfile[MAX_LINE_LEN]; /* database file name */ static bool use_gpu = true; /* use the GPU */ static bool use_shared_memory = true; /* use GPU shared mem for db structs */ static char *tableaux, *large_tableaux; /* small and large tableaux */ static float *distmatrices, *large_distmatrices; /* same for dist.matrices*/ static int *orders, *large_orders; /* and for orders */ static char *names, *large_names; /* and names */ static bool querydbmode = false; /* use list of query ids in db */ static char *queryid_list = NULL; /* this is the list of query ids */ static dbIndex_t *query_dbindex_list = NULL; /* and their indices in db */ static int maxstart = DEFAULT_MAXSTART; /* number of restarts */ /* * init_rng() * * Initialize CURAND pseudrandom number generator * See CUDA Toolkit 4.1 CURAND Guide (p.21) * * Parameters: * state - CURAND state for random number generation * */ __global__ void init_rng(hiprandState_t *state) { int tid=blockIdx.x*blockDim.x+threadIdx.x; /* give each therad same seed, different sequence number, no offset */ hiprand_init(1234, tid, 0, &state[tid]); } /////////////////////////////////////////////////////////////////////////////// // Common host and device function /////////////////////////////////////////////////////////////////////////////// //ceil(a / b) extern "C" int iDivUp(int a, int b){ return ((a % b) != 0) ? (a / b + 1) : (a / b); } //floor(a / b) extern "C" int iDivDown(int a, int b){ return a / b; } //Align a to nearest higher multiple of b extern "C" int iAlignUp(int a, int b){ return ((a % b) != 0) ? (a - a % b + b) : a; } //Align a to nearest lower multiple of b extern "C" int iAlignDown(int a, int b){ return a - a % b; } /* * tabsearch_host_thread - run the tableau search kernel on host CPU * * Started as a thread by cutStartThread in main * * Parameters: * params - paramter block for thread. See comments on searchParams_t defn. * * Return value: None. * */ static CUT_THREADPROC tabsearch_host_thread(searchParams_t *params) { /* extern declartions of host version of gpu constant memory */ extern int c_qn_host; // query structure size extern char c_qtab_host[MAXDIM*MAXDIM]; // query tableau extern float c_qdmat_host[MAXDIM*MAXDIM]; // query distance matrix extern char c_qssetypes_host[MAXDIM]; // main diagonal of c_qn StopWatchInterface *hTimer = NULL; double runtime; int *ssemaps; int i,j; char qid[LABELSIZE+1]; int *scores; double norm2score,zscore,pvalue; int query_count = (params->num_queries == 0 || params->single_query_qid >= 0 ? 1 : params->num_queries); hipExtent tableaux_extent = {params->maxdim, params->maxdim, params->dbsize}; hipPitchedPtr tableaux_pp = {params->tableaux, params->maxdim, params->maxdim, params->dbsize}; hipExtent distmatrices_extent = {params->maxdim*sizeof(float), params->maxdim, params->maxdim}; hipPitchedPtr distmatrices_pp = {params->distmatrices, params->maxdim*sizeof(float), params->maxdim, params->maxdim}; /* allocate space for output */ if (!(scores = (int *)malloc(params->dbsize*sizeof(int)))) { fprintf(stderr, "malloc scores failed\n"); return; } if (!(ssemaps = (int *)malloc(params->dbsize*MAXDIM*sizeof(int)))) { fprintf(stderr, "malloc ssemaps failed\n"); return; } for (int qi = 0; qi < query_count; qi++) { if (params->query_dbindex_list) { dbIndex_t *dbindex_entry = params->single_query_qid >= 0 ? &params->query_dbindex_list[params->single_query_qid] : &params->query_dbindex_list[qi]; int qdbi = dbindex_entry->index; if (dbindex_entry->large) /* query in 'large' struct db */ { strncpy(qid, large_names+qdbi*(LABELSIZE+1), LABELSIZE); c_qn_host = large_orders[qdbi]; memcpy(c_qtab_host, large_tableaux+qdbi*MAXDIM*MAXDIM, MAXDIM*MAXDIM*sizeof(char)); memcpy(c_qdmat_host, large_distmatrices+qdbi*MAXDIM*MAXDIM, MAXDIM*MAXDIM*sizeof(float)); /* NB the query in constant memory is MAXDIM not MAXDIM_GPU since constant memory larger than shared memory. */ // set the qssetypes vector as main diagonal of the query tableau for (i = 0; i < c_qn_host; i++) c_qssetypes_host[i] = (large_tableaux+qdbi*MAXDIM*MAXDIM)[INDEX2D(i,i,MAXDIM,MAXDIM)]; } else /* query in 'small' struct db */ { strncpy(qid, names+qdbi*(LABELSIZE+1), LABELSIZE); c_qn_host = orders[qdbi]; /* NB the query in constant memory is MAXDIM not MAXDIM_GPU since constant memory larger than shared memory. This means we need to reformat the matrices into the larger size if they are in the smaller class */ for (i = 0; i < orders[qdbi]; i++) { for (j = i + 1; j < orders[qdbi]; j++) { char tabcode = (tableaux+qdbi*MAXDIM_GPU*MAXDIM_GPU)[INDEX2D(i,j,MAXDIM_GPU,MAXDIM_GPU)]; c_qtab_host[INDEX2D(i,j,MAXDIM,MAXDIM)] = tabcode; c_qtab_host[INDEX2D(j,i,MAXDIM,MAXDIM)] = tabcode; float dist = (distmatrices+qdbi*MAXDIM_GPU*MAXDIM_GPU)[INDEX2D(i,j,MAXDIM_GPU,MAXDIM_GPU)]; c_qdmat_host[INDEX2D(i,j,MAXDIM,MAXDIM)] = dist; c_qdmat_host[INDEX2D(j,i,MAXDIM,MAXDIM)] = dist; } } // set the qssetypes vector as main diagonal of the query tableau for (i = 0; i < c_qn_host; i++) c_qssetypes_host[i] = (tableaux+qdbi*MAXDIM_GPU*MAXDIM_GPU)[INDEX2D(i,i,MAXDIM_GPU,MAXDIM_GPU)]; } } else { strncpy(qid, params->qid, LABELSIZE); c_qn_host = params->qn; memcpy(c_qtab_host, params->qtab, sizeof(c_qtab_host)); memcpy(c_qdmat_host, params->qdmat, sizeof(c_qdmat_host)); memcpy(c_qssetypes_host, params->qssetypes, sizeof(c_qssetypes_host)); } printf("# cudaSaTabsearch LTYPE = %c LORDER = %c LSOLN = %c\n", params->ltype ? 'T' : 'F' , params->lorder ? 'T' : 'F' , params->lsoln ? 'T' : 'F'); printf("# QUERY ID = %-8s\n", qid); printf("# DBFILE = %-80s\n", dbfile); fprintf(stderr, "Executing simulated annealing tableaux match kernel on host for query %s...\n", qid); sdkCreateTimer(&hTimer) ; sdkResetTimer(&hTimer) ; sdkStartTimer(&hTimer) ; int state = 0; /*unused*/ sa_tabsearch_host(params->dbsize, params->lorder, params->lsoln, params->maxstart, tableaux_pp, tableaux_extent, params->orders, distmatrices_pp, distmatrices_extent, scores, ssemaps, &state); sdkStopTimer(&hTimer); runtime = sdkGetTimerValue(&hTimer); fprintf(stderr, "host execution time %f ms\n", runtime); fprintf(stderr, "%f million iterations/sec\n", (params->dbsize * (params->maxstart * MAXITER) / (runtime/1000)) / 1.0e6); for (i = 0; i < params->dbsize; i++) { /* printf("%-8s %d\n", params->names+i*(LABELSIZE+1), scores[i]); */ norm2score = norm2(scores[i], params->qn, params->orders[i]); zscore = z_gumbel(norm2score, gumbel_a, gumbel_b); pvalue = pv_gumbel(zscore); printf("%-8s %d %g %g %g\n", params->names+i*(LABELSIZE+1), scores[i], norm2score, zscore, pvalue); if (params->lsoln) for (int k = 0; k < c_qn_host; k++) if (ssemaps[i*MAXDIM + k] >= 0) printf("%3d %3d\n", k+1, ssemaps[i*MAXDIM + k]+1); } } free(scores); if (params->lsoln) free(ssemaps); } /* * copyQueryToConstantMemory() - copy the query data to device constant memory * * * Parameters: * qi - the query index of the query to copy. * Otherwise (query_dbinex_list is NULL), these used: * qn -query order * qtab - query tableau (in/out: may be set here) * qdmat - query distance matrix (in/out: may be set here) * qssetypes - query SSE types vector (in/out: may be set here) * qid - query id (in/out: may be set here) * c_qn_addr - address of c_qn device constant (q_qn or c_qn_noshared) * c_qtab_addr - address of c_qtab device constant * c_qdmat_addr - address of c_qdmat device constant * c_qssetypes_addr - address c_qssetypes device constant * * * Uses the global variables query_dbindex_list, tableaux, etc. * * Return value: None. * */ static void copyQueryToConstantMemory(int qi, int qn, char *qtab, float *qdmat, char *qssetypes, char *qid, int *c_qn_addr, char *c_qtab_addr, float *c_qdmat_addr, char *c_qssetypes_addr) { StopWatchInterface *hTimer = NULL; sdkCreateTimer(&hTimer) ; sdkResetTimer(&hTimer) ; sdkStartTimer(&hTimer) ; if (query_dbindex_list) { int qdbi = query_dbindex_list[qi].index; if (query_dbindex_list[qi].large) { strncpy(qid, large_names+qdbi*(LABELSIZE+1), LABELSIZE); // set the qssetypes vector as main diagonal of the query tableau for (int i = 0; i < large_orders[qdbi]; i++) qssetypes[i] = (large_tableaux+qdbi*MAXDIM*MAXDIM)[INDEX2D(i,i,MAXDIM,MAXDIM)]; /* copy query structure to constant memory on device */ checkCudaErrors( hipMemcpy(c_qn_addr, &large_orders[qdbi], sizeof(int), hipMemcpyHostToDevice) ); /* NB the query in constant memory is MAXDIM not MAXDIM_GPU since constant memory larger than shared memory. */ checkCudaErrors( hipMemcpy(c_qtab_addr, large_tableaux+qdbi*MAXDIM*MAXDIM, MAXDIM*MAXDIM*sizeof(char), hipMemcpyHostToDevice) ); checkCudaErrors( hipMemcpy(c_qdmat_addr, large_distmatrices+qdbi*MAXDIM*MAXDIM, MAXDIM*MAXDIM*sizeof(float), hipMemcpyHostToDevice) ); checkCudaErrors( hipMemcpy(c_qssetypes_addr, qssetypes, MAXDIM*sizeof(char), hipMemcpyHostToDevice) ); } else /* query is in the 'small' structure dbase */ { strncpy(qid, names+qdbi*(LABELSIZE+1), LABELSIZE); // set the qssetypes vector as main diagonal of the query tableau for (int i = 0; i < orders[qdbi]; i++) qssetypes[i] = (tableaux+qdbi*MAXDIM_GPU*MAXDIM_GPU)[INDEX2D(i,i,MAXDIM_GPU,MAXDIM_GPU)]; /* copy query structure to constant memory on device */ checkCudaErrors( hipMemcpy(c_qn_addr, &orders[qdbi], sizeof(int), hipMemcpyHostToDevice) ); /* NB the query in constant memory is MAXDIM not MAXDIM_GPU since constant memory larger than shared memory. This means we need to reformat the matrices into the larger size if they are in the smaller class */ for (int i = 0; i < orders[qdbi]; i++) { for (int j = i + 1; j < orders[qdbi]; j++) { char tabcode = (tableaux+qdbi*MAXDIM_GPU*MAXDIM_GPU)[INDEX2D(i,j,MAXDIM_GPU,MAXDIM_GPU)]; qtab[INDEX2D(i,j,MAXDIM,MAXDIM)] = tabcode; qtab[INDEX2D(j,i,MAXDIM,MAXDIM)] = tabcode; float dist = (distmatrices+qdbi*MAXDIM_GPU*MAXDIM_GPU)[INDEX2D(i,j,MAXDIM_GPU,MAXDIM_GPU)]; qdmat[INDEX2D(i,j,MAXDIM,MAXDIM)] = dist; qdmat[INDEX2D(j,i,MAXDIM,MAXDIM)] = dist; } } checkCudaErrors( hipMemcpy(c_qtab_addr, qtab, MAXDIM*MAXDIM*sizeof(char), hipMemcpyHostToDevice) ); checkCudaErrors( hipMemcpy(c_qdmat_addr, qdmat, MAXDIM*MAXDIM*sizeof(float), hipMemcpyHostToDevice) ); checkCudaErrors( hipMemcpy(c_qssetypes_addr, qssetypes, MAXDIM*sizeof(char), hipMemcpyHostToDevice) ); } } else // single query mode - copy to constant memory { fprintf(stderr, "XXX c_qn_addr = %p , qn = %d\n", c_qn_addr, qn); checkCudaErrors( hipMemcpy(c_qn_addr, &qn, sizeof(qn), hipMemcpyHostToDevice) ); checkCudaErrors( hipMemcpy(c_qtab_addr, qtab, MAXDIM*MAXDIM*sizeof(char), hipMemcpyHostToDevice) ); checkCudaErrors( hipMemcpy(c_qdmat_addr, qdmat, MAXDIM*MAXDIM*sizeof(float), hipMemcpyHostToDevice) ); checkCudaErrors( hipMemcpy(c_qssetypes_addr, qssetypes, MAXDIM*sizeof(char), hipMemcpyHostToDevice) ); } sdkStopTimer(&hTimer) ; float qtime = sdkGetTimerValue(&hTimer); fprintf(stderr, "Copying query to constant memory took %f ms\n", qtime); } static void usage(const char *progname) { fprintf(stderr, "Usage: %s [-c] [-q dbfile]\n", progname); fprintf(stderr, " -c : run on host CPU not GPU card\n"); fprintf(stderr, " -q dbfile : database is read from dbfile, list of query\n" " ids is read from stdin\n"); fprintf(stderr, " -r restarts : number of restarts. Default %d\n", DEFAULT_MAXSTART); exit(1); } int main(int argc, char *argv[]) { CUTThread threadID[MAX_THREADS]; int num_threads = 0; int exit_status = 0; char buf[MAX_LINE_LEN]; char qtab[MAXDIM*MAXDIM]; float qdmat[MAXDIM*MAXDIM]; int qn; char qid[LABELSIZE+1]; int ltype=0,lorder=0,lsoln=0; char cltype,clorder,clsoln; FILE *dbfp; StopWatchInterface *hTimer = NULL; int total_dbsize, large_dbsize, gpu_dbsize; double dbtime,runtime; hipPitchedPtr d_tableaux; hipPitchedPtr d_distmatrices; int *d_orders; int *scores = NULL; int *ssemaps = NULL; int *d_scores; int *d_ssemaps; hipError_t cuda_errcode; int i,j; char qssetypes[MAXDIM]; int c; char *queryptr = NULL; int num_queries = 0; int large_query_count = 0; double norm2score, zscore, pvalue; while ((c = getopt(argc, argv, "cq:r:")) != -1) { switch (c) { case 'c': use_gpu = false; break; case 'q': querydbmode = true; strncpy(dbfile, optarg, sizeof(dbfile)-1); break; case 'r': maxstart = atoi(optarg); break; default: usage(argv[0]); break; } } if (querydbmode) { cltype = 'T'; ltype = 1; clorder = 'T'; lorder = 1; clsoln = 'F'; lsoln = 0; if (!(queryid_list = (char *)malloc(LABELSIZE+1))) { fprintf(stderr, "malloc queryid_list failed\n"); exit(1); } queryptr = queryid_list; while (!feof(stdin)) { if (num_queries > 0) { if ((!(queryid_list = (char *)realloc(queryid_list, (num_queries+1)*(LABELSIZE+1))))) { fprintf(stderr, "realloc queryid_list failed\n"); exit(1); } } if (!fgets(buf, MAX_LINE_LEN, stdin)) break; strncpy(queryptr, buf, LABELSIZE); queryptr[LABELSIZE-1] = '\0'; if (queryptr[strlen(queryptr)-1] == '\n') queryptr[strlen(queryptr)-1] = '\0'; queryptr += (LABELSIZE+1); num_queries++; } } else { if (fscanf(stdin, "%s\n", dbfile) != 1) { fprintf(stderr, "ERROR reading dbfilename from stdin\n"); exit(1); } if (fscanf(stdin, "%c %c %c\n", &cltype, &clorder, &clsoln) != 3) { fprintf(stderr, "ERROR reading options from stdin\n"); exit(1); } if (cltype == 'T') ltype = 1; if (clorder == 'T') lorder = 1; if (clsoln == 'T') lsoln = 1; if (fscanf(stdin, "%8s %d\n", qid, &qn) != 2) { fprintf(stderr, "ERROR parsing query tableau header from stdin\n"); exit(1); } if (parse_tableau(stdin, MAXDIM, qn, qtab) < 0) { fprintf(stderr, "ERROR parsing query tableau from stdin\n"); exit(1); } if (parse_distmatrix(stdin, MAXDIM, qn, qdmat, 0) < 0) { fprintf(stderr, "ERROR parsing query distance matrix from stdin\n"); exit(1); } } if (!ltype) { fprintf(stderr, "WARNING: LTYPE is always set to T\n"); ltype = 1; cltype = 'T'; } if (!(dbfp = fopen(dbfile, "r"))) { fprintf(stderr, "ERROR opening db file %s\n", dbfile); exit(1); } fprintf(stderr, "Loading database...\n"); sdkCreateTimer(&hTimer) ; sdkResetTimer(&hTimer) ; sdkStartTimer(&hTimer) ; total_dbsize = read_database(dbfp, &tableaux, &distmatrices, &large_tableaux, &large_distmatrices, &orders, &names, &large_orders, &large_names, &large_dbsize); if (total_dbsize < 0) { fprintf(stderr, "ERROR loading database\n"); exit(1); } gpu_dbsize = total_dbsize - large_dbsize; sdkStopTimer(&hTimer) ; dbtime = sdkGetTimerValue(&hTimer); fprintf(stderr, "Loaded %d db entries (%d order > %d) in %f ms\n", total_dbsize, large_dbsize, MAXDIM_GPU, dbtime); if (querydbmode) { /* Convert the list of query sids to list of indices in db for later rapid lookup. TODO: we should build a hash table rather than this highly inefficient linear search for each query id, but it's only done once and db not that big... */ fprintf(stderr, "Building query index list...\n"); sdkResetTimer(&hTimer) ; sdkStartTimer(&hTimer) ; if (!(query_dbindex_list = (dbIndex_t *)malloc(num_queries*sizeof(dbIndex_t)))) { fprintf(stderr, "malloc query_dbindex_list failed\n"); exit(1); } for (i = 0; i < num_queries; i++) { /* fprintf(stderr, "zzz %s\n", queryid_list+i*(LABELSIZE+1)); */ bool found = false; for (j = 0; j < gpu_dbsize; j++) /* search 'small' structure dbase */ { if (!strcasecmp(queryid_list+i*(LABELSIZE+1),names+j*(LABELSIZE+1))) { query_dbindex_list[i].large = false; query_dbindex_list[i].index = j; found = true; break; } } if (!found) { for (j = 0; j < large_dbsize; j++) /* search 'large' structure dbase*/ { if (!strcasecmp(queryid_list + i*(LABELSIZE+1), large_names + j*(LABELSIZE+1))) { query_dbindex_list[i].large = true; query_dbindex_list[i].index = j; large_query_count++; found = true; break; } } } if (!found) { fprintf(stderr, "ERROR: query %s not found\n", queryid_list+i*(LABELSIZE+1)); exit(1); } } sdkStopTimer(&hTimer); fprintf(stderr, "Built query index (%d queries (%d large)) in %f ms\n", num_queries, large_query_count, sdkGetTimerValue(&hTimer)); } else { num_queries = 0; query_dbindex_list = NULL; // set the qssetypes vector as main diagonal of the query tableau for (i = 0; i < qn; i++) qssetypes[i] = qtab[INDEX2D(i,i,MAXDIM,MAXDIM)]; } /* TODO allow multiple GPUs (need one thread for each) */ if (use_gpu) { /* int devnum = cutGetMaxGflopsDeviceId(); fprintf(stderr, "using max gflops device %d: ", devnum); */ /* If there is a compute capability 2 device ("Fermi" architecture) (or higher) then use that, and do NOT use shared memory as it is faster to just rely on the new "NVIDIA Parallel DataCache (TM)" -- just use global memory for all (small and large) structures */ int devnum, deviceCount, gflops,max_gflops=0, sel_devnum; hipGetDeviceCount(&deviceCount); if (deviceCount == 0) { fprintf(stderr, "There is no device supporting CUDA.\n"); exit(1); } fprintf(stderr, "found %d CUDA devices\n", deviceCount); for (devnum = 0; devnum < deviceCount; devnum++) { hipDeviceProp_t deviceProp; hipGetDeviceProperties(&deviceProp, devnum); if (deviceProp.major >= 2) { fprintf(stderr, "found Fermi architecture (compute capability %d.%d) device %d: %s\n", deviceProp.major, deviceProp.minor, devnum, deviceProp.name); sel_devnum = devnum; use_shared_memory = true; break; } else { gflops = deviceProp.multiProcessorCount * deviceProp.clockRate; fprintf(stderr, "device %d: %s\n", devnum, deviceProp.name); if (gflops > max_gflops) { max_gflops = gflops; sel_devnum = devnum; use_shared_memory = true; } } } fprintf(stderr, "using device %d: ", sel_devnum); hipDeviceProp_t deviceProp; hipGetDeviceProperties(&deviceProp, sel_devnum); fprintf(stderr, "%s\n", deviceProp.name); hipSetDevice( sel_devnum ); } fprintf(stderr, "maxstart = %d\n", maxstart); srand48(1234); if (use_gpu) { /* setup execution configuration parameters */ /* TODO optimize for different architectures (automatically) */ const int blocks = 128; const int NUM_THREADS = 128; dim3 dimGrid(blocks); // blocks dim3 dimBlock(NUM_THREADS); // threads per block fprintf(stderr, "Execution configuration: Grid = (%d,%d,%d) Block = (%d,%d,%d)\n", dimGrid.x,dimGrid.y,dimGrid.z, dimBlock.x,dimBlock.y,dimBlock.z); fprintf(stderr, "using shared memory for small db structs: %s\n", use_shared_memory ? "YES" : "NO"); /* first do the 'small' db structures on GPU (with shared memory) */ fprintf(stderr, "Copying database to device...\n"); sdkResetTimer(&hTimer) ; sdkStartTimer(&hTimer) ; hiprandState_t *devStates; /* allocate space on device for random number generator state */ int rc; if ((rc = hipMalloc((void **)&devStates, blocks*NUM_THREADS*sizeof(hiprandState_t))) != hipSuccess) { fprintf(stderr, "hipMalloc devStates failed %d\n", rc); exit(1); } /* initialize device random number generator */ sdkStartTimer(&hTimer) ; hipLaunchKernelGGL(( init_rng), dim3(dimGrid), dim3(dimBlock), 0, 0, devStates); if ((rc = hipGetLastError()) != hipSuccess) { fprintf(stderr, "init_rng kernel error %d\n", rc); } hipDeviceSynchronize(); if ((rc = hipGetLastError()) != hipSuccess) { fprintf(stderr, "init_rng sync error %d\n", rc); } sdkStopTimer(&hTimer) ; fprintf(stderr, "Initialized device RNG with %d states (%d KB) in %f ms\n", blocks*NUM_THREADS, blocks*NUM_THREADS*sizeof(hiprandState_t)/1024, sdkGetTimerValue(&hTimer)); hipExtent tableaux_extent = make_hipExtent(MAXDIM_GPU, MAXDIM_GPU, gpu_dbsize); checkCudaErrors( hipMalloc3D(&d_tableaux, tableaux_extent) ); fprintf(stderr, "d_tableaux.pitch == %u xsize == %u ysize == %u\n", d_tableaux.pitch, d_tableaux.xsize, d_tableaux.ysize); hipExtent distmatrices_extent = make_hipExtent(MAXDIM_GPU*sizeof(float), MAXDIM_GPU, gpu_dbsize); checkCudaErrors( hipMalloc3D(&d_distmatrices, distmatrices_extent) ); fprintf(stderr, "d_distmatrices.pitch == %u xsize == %u ysize == %u\n", d_distmatrices.pitch, d_distmatrices.xsize, d_distmatrices.ysize); checkCudaErrors( hipMalloc((void **)&d_orders, gpu_dbsize*sizeof(int)) ); hipMemcpy3DParms copyParams = { 0 }; // srcPtr is tricky: need to give pitch of row, #elements in row, // then height, omitting 3rd dimension (doesn't seem to be documented) // (I found this info on 28/1/2010 at // http://sites.google.com/site/cudaiap2009/cookbook-1). // Note pitch of row on host is just MAXDIM_GPU, we don't need padding here copyParams.srcPtr = make_hipPitchedPtr((void*)tableaux, MAXDIM_GPU, MAXDIM_GPU, MAXDIM_GPU); fprintf(stderr, "srcPtr.pitch == %u\n", copyParams.srcPtr.pitch); copyParams.dstPtr = d_tableaux; copyParams.extent = tableaux_extent; copyParams.kind = hipMemcpyHostToDevice; checkCudaErrors( hipMemcpy3D(&copyParams) ); hipMemcpy3DParms copyParams2 = { 0 }; copyParams2.srcPtr = make_hipPitchedPtr((void*)distmatrices, MAXDIM_GPU*sizeof(float), MAXDIM_GPU, MAXDIM_GPU); fprintf(stderr, "distmatrices srcPtr.pitch == %u\n", copyParams2.srcPtr.pitch); copyParams2.dstPtr = d_distmatrices; copyParams2.extent = distmatrices_extent; copyParams2.kind = hipMemcpyHostToDevice; checkCudaErrors( hipMemcpy3D(&copyParams2) ); checkCudaErrors( hipMemcpy(d_orders, orders, gpu_dbsize*sizeof(int), hipMemcpyHostToDevice) ); sdkStopTimer(&hTimer) ; dbtime = sdkGetTimerValue(&hTimer); fprintf(stderr, "Copied %d entries to GPU in %f ms\n", gpu_dbsize, dbtime); /* allocate space for output */ checkCudaErrors( hipMalloc((void **)&d_scores, gpu_dbsize*sizeof(int))); if (!(scores = (int *)malloc(gpu_dbsize*sizeof(int)))) { fprintf(stderr, "malloc scores failed\n"); goto bye; } if (lsoln) { checkCudaErrors( hipMalloc((void **)&d_ssemaps, gpu_dbsize*MAXDIM*sizeof(int))); if (!(ssemaps = (int *)malloc(gpu_dbsize*MAXDIM*sizeof(int)))) { fprintf(stderr, "malloc ssemaps failed\n"); goto bye; } } const_addr_t const_addr; int query_count = (num_queries == 0 ? 1 : num_queries); for (int qi = 0; qi < query_count; qi++) { if (use_shared_memory) { get_device_constant_addresses(&const_addr); copyQueryToConstantMemory(qi, qn, qtab, qdmat, qssetypes, qid, const_addr.c_qn_addr, const_addr.c_qtab_addr, const_addr.c_qdmat_addr, const_addr.c_qssetypes_addr); // checkCudaErrors( hipMemcpy(const_addr.c_qn_addr, &qn, sizeof(qn), hipMemcpyHostToDevice) ); fprintf(stderr,"qn=%d\n",qn); //XXX } else { get_device_constant_addresses_noshared_small(&const_addr); copyQueryToConstantMemory(qi, qn, qtab, qdmat, qssetypes, qid, const_addr.c_qn_noshared_small_addr, const_addr.c_qtab_noshared_small_addr, const_addr.c_qdmat_noshared_small_addr, const_addr.c_qssetypes_noshared_small_addr); } printf("# cudaSaTabsearch LTYPE = %c LORDER = %c LSOLN = %c\n", cltype, clorder, clsoln); printf("# QUERY ID = %-8s\n", qid); printf("# DBFILE = %-80s\n", dbfile); /* launch thread to do large db structs on host */ searchParams_t host_params; host_params.ltype = ltype; host_params.lorder = lorder; host_params.lsoln = lsoln; host_params.maxstart = maxstart; host_params.num_queries = num_queries; host_params.query_dbindex_list = query_dbindex_list; host_params.single_query_qid = qi; memcpy(host_params.qtab, qtab, sizeof(qtab)); memcpy(host_params.qdmat, qdmat, sizeof(qdmat)); memcpy(host_params.qid, qid, sizeof(qid)); host_params.qn = qn; host_params.qssetypes = qssetypes; host_params.maxdim = MAXDIM; host_params.dbsize = large_dbsize; host_params.tableaux = large_tableaux; host_params.distmatrices = large_distmatrices; host_params.orders = large_orders; host_params.names = large_names; //XXX threadID[num_threads++] = cutStartThread((CUT_THREADROUTINE)tabsearch_host_thread, &host_params); fprintf(stderr, "Executing simulated annealing tableaux match kernel (%sshared memory) on GPU for qid %s...\n", use_shared_memory ? " " : "no ", qid); checkCudaErrors( hipDeviceSynchronize() ); sdkResetTimer(&hTimer) ; sdkStartTimer(&hTimer) ; if (use_shared_memory) { int xxx_qn=-1; checkCudaErrors( hipMemcpy(&xxx_qn, const_addr.c_qn_addr, sizeof(qn), hipMemcpyDeviceToHost) ); fprintf(stderr,"xxx_qn=%d\n",xxx_qn); //XXX hipLaunchKernelGGL(( sa_tabsearch_gpu), dim3(dimGrid),dim3(dimBlock), 0, 0, gpu_dbsize, lorder, lsoln, maxstart, d_tableaux, tableaux_extent, d_orders, d_distmatrices, distmatrices_extent, d_scores, d_ssemaps, devStates); } else { hipLaunchKernelGGL(( sa_tabsearch_gpu_noshared_small), dim3(dimGrid),dim3(dimBlock), 0, 0, gpu_dbsize, lorder, lsoln, maxstart, d_tableaux, tableaux_extent, d_orders, d_distmatrices, distmatrices_extent, d_scores, d_ssemaps, devStates); } cuda_errcode = hipGetLastError(); if (cuda_errcode != hipSuccess) { fprintf(stderr, "kernel launch failed: %s\n", hipGetErrorString(cuda_errcode)); exit_status = 1; goto bye; } checkCudaErrors( hipDeviceSynchronize() ); sdkStopTimer(&hTimer) ; runtime = sdkGetTimerValue(&hTimer); fprintf(stderr, "GPU execution time %f ms\n", runtime); fprintf(stderr, "%f million iterations/sec\n", ((float)gpu_dbsize * ((float)maxstart * (float)MAXITER) / (runtime/1000)) / 1.0e6); /* Get results from device */ checkCudaErrors( hipMemcpy(scores, d_scores, gpu_dbsize*sizeof(int), hipMemcpyDeviceToHost) ); if (lsoln) checkCudaErrors( hipMemcpy(ssemaps, d_ssemaps, gpu_dbsize*MAXDIM*sizeof(int), hipMemcpyDeviceToHost) ); /* Wait for host thread */ //XXX cutWaitForThreads(threadID, num_threads); //XXX --num_threads; /* TODO we could reduce wasted time waiting by running all host (large db) queries in the one thread instead of matching up with GPU query in this loop (actuall, more like the other way around usually, the GPU ends up idle while host is still runnign since the latter is so much slower even though it has very few db entries unlike GPU) */ for (i = 0; i < gpu_dbsize; i++) { /* printf("%-8s %d\n", names+i*(LABELSIZE+1), scores[i]); */ norm2score = norm2(scores[i], qn, orders[i]); zscore = z_gumbel(norm2score, gumbel_a, gumbel_b); pvalue = pv_gumbel(zscore); printf("%-8s %d %g %g %g\n", names+i*(LABELSIZE+1), scores[i], norm2score, zscore, pvalue); if (lsoln) for (int k = 0; k < qn; k++) if (ssemaps[i*MAXDIM + k] >= 0) printf("%3d %3d\n", k+1, ssemaps[i*MAXDIM + k]+1); } } checkCudaErrors( hipFree(d_tableaux.ptr) ); checkCudaErrors( hipFree(d_distmatrices.ptr) ); checkCudaErrors( hipFree(d_orders) ); checkCudaErrors( hipFree(d_scores) ); free(scores); scores = NULL; if (lsoln) { checkCudaErrors( hipFree(d_ssemaps) ); free(ssemaps); ssemaps = NULL; } /* now do the 'large' db structures on GPU (not using shared memory) */ if (large_dbsize > 0) { fprintf(stderr, "Copying large structure database to device...\n"); sdkResetTimer(&hTimer) ; sdkStartTimer(&hTimer) ; tableaux_extent = make_hipExtent(MAXDIM, MAXDIM, large_dbsize); checkCudaErrors( hipMalloc3D(&d_tableaux, tableaux_extent) ); fprintf(stderr, "d_tableaux.pitch == %u xsize == %u ysize == %u\n", d_tableaux.pitch, d_tableaux.xsize, d_tableaux.ysize); distmatrices_extent = make_hipExtent(MAXDIM*sizeof(float), MAXDIM, large_dbsize); checkCudaErrors( hipMalloc3D(&d_distmatrices, distmatrices_extent) ); fprintf(stderr, "d_distmatrices.pitch == %u xsize == %u ysize == %u\n", d_distmatrices.pitch, d_distmatrices.xsize, d_distmatrices.ysize); checkCudaErrors( hipMalloc((void **)&d_orders, large_dbsize*sizeof(int)) ); hipMemcpy3DParms copyParamsl = { 0 }; // srcPtr is tricky: need to give pitch of row, #elements in row, // then height, omitting 3rd dimension (doesn't seem to be documented) // (I found this info on 28/1/2010 at // http://sites.google.com/site/cudaiap2009/cookbook-1). // Note pitch of row on host is just MAXDIM_GPU, we don't need padding here copyParamsl.srcPtr = make_hipPitchedPtr((void*)large_tableaux, MAXDIM, MAXDIM, MAXDIM); fprintf(stderr, "srcPtr.pitch == %u\n", copyParamsl.srcPtr.pitch); copyParamsl.dstPtr = d_tableaux; copyParamsl.extent = tableaux_extent; copyParamsl.kind = hipMemcpyHostToDevice; checkCudaErrors( hipMemcpy3D(&copyParamsl) ); hipMemcpy3DParms copyParams2l = { 0 }; copyParams2l.srcPtr = make_hipPitchedPtr((void*)large_distmatrices, MAXDIM*sizeof(float), MAXDIM, MAXDIM); fprintf(stderr, "distmatrices srcPtr.pitch == %u\n", copyParams2l.srcPtr.pitch); copyParams2l.dstPtr = d_distmatrices; copyParams2l.extent = distmatrices_extent; copyParams2l.kind = hipMemcpyHostToDevice; checkCudaErrors( hipMemcpy3D(&copyParams2l) ); checkCudaErrors( hipMemcpy(d_orders, large_orders, large_dbsize*sizeof(int), hipMemcpyHostToDevice) ); sdkStopTimer(&hTimer) ; dbtime = sdkGetTimerValue(&hTimer); fprintf(stderr, "Copied %d large entries to GPU in %f ms\n", large_dbsize, dbtime); /* allocate space for output */ checkCudaErrors( hipMalloc((void **)&d_scores, large_dbsize*sizeof(int))); if (!(scores = (int *)malloc(large_dbsize*sizeof(int)))) { fprintf(stderr, "malloc scores failed\n"); goto bye; } if (lsoln) { checkCudaErrors( hipMalloc((void **)&d_ssemaps, large_dbsize*MAXDIM*sizeof(int))); if (!(ssemaps = (int *)malloc(large_dbsize*MAXDIM*sizeof(int)))) { fprintf(stderr, "malloc ssemaps failed\n"); goto bye; } } for (int qi = 0; qi < query_count; qi++) { get_device_constant_addresses_noshared(&const_addr); copyQueryToConstantMemory(qi, qn, qtab, qdmat, qssetypes, qid, const_addr.c_qn_noshared_addr, const_addr.c_qtab_noshared_addr, const_addr.c_qdmat_noshared_addr, const_addr.c_qssetypes_noshared_addr); printf("# cudaSaTabsearch LTYPE = %c LORDER = %c LSOLN = %c\n", cltype, clorder, clsoln); printf("# QUERY ID = %-8s\n", qid); printf("# DBFILE = %-80s\n", dbfile); fprintf(stderr, "Executing simulated annealing tableaux match kernel (no shared memory) on GPU for qid %s...\n",qid); checkCudaErrors( hipDeviceSynchronize() ); sdkResetTimer(&hTimer) ; sdkStartTimer(&hTimer) ; int xxx_qn_noshared=-1; checkCudaErrors( hipMemcpy(&xxx_qn_noshared, const_addr.c_qn_noshared_addr, sizeof(qn), hipMemcpyDeviceToHost) ); fprintf(stderr,"xxx_qn_noshared=%d\n",xxx_qn_noshared); //XXX hipLaunchKernelGGL(( sa_tabsearch_gpu_noshared), dim3(dimGrid),dim3(dimBlock), 0, 0, large_dbsize, lorder, lsoln, maxstart, d_tableaux, tableaux_extent, d_orders, d_distmatrices, distmatrices_extent, d_scores, d_ssemaps, devStates); cuda_errcode = hipGetLastError(); if (cuda_errcode != hipSuccess) { fprintf(stderr, "kernel launch failed: %s\n", hipGetErrorString(cuda_errcode)); exit_status = 1; goto bye; } checkCudaErrors( hipDeviceSynchronize() ); sdkStopTimer(&hTimer) ; runtime = sdkGetTimerValue(&hTimer); fprintf(stderr, "GPU (no shared memory) execution time %f ms\n", runtime); fprintf(stderr, "%f million iterations/sec\n", ((float)large_dbsize * ((float)maxstart * (float)MAXITER) / (runtime/1000)) / 1.0e6); /* Get results from device */ checkCudaErrors( hipMemcpy(scores, d_scores, large_dbsize*sizeof(int), hipMemcpyDeviceToHost) ); if (lsoln) checkCudaErrors( hipMemcpy(ssemaps, d_ssemaps, large_dbsize * MAXDIM * sizeof(int), hipMemcpyDeviceToHost) ); for (i = 0; i < large_dbsize; i++) { /* printf("%-8s %d\n", large_names+i*(LABELSIZE+1), scores[i]); */ norm2score = norm2(scores[i], qn, large_orders[i]); zscore = z_gumbel(norm2score, gumbel_a, gumbel_b); pvalue = pv_gumbel(zscore); printf("%-8s %d %g %g %g\n", large_names+i*(LABELSIZE+1), scores[i], norm2score, zscore, pvalue); if (lsoln) for (int k = 0; k < qn; k++) if (ssemaps[i*MAXDIM + k] >= 0) printf("%3d %3d\n", k+1, ssemaps[i*MAXDIM + k]+1); } } } } else { /* running on host CPU */ searchParams_t host_params; host_params.ltype = ltype; host_params.lorder = lorder; host_params.lsoln = lsoln; host_params.maxstart = maxstart; host_params.num_queries = num_queries; host_params.single_query_qid = -1; host_params.query_dbindex_list = query_dbindex_list; memcpy(host_params.qtab, qtab, sizeof(qtab)); memcpy(host_params.qdmat, qdmat, sizeof(qdmat)); memcpy(host_params.qid, qid, sizeof(qid)); host_params.qn = qn; host_params.qssetypes = qssetypes; host_params.maxdim = MAXDIM_GPU; host_params.dbsize = gpu_dbsize; /* first do small structure db */ host_params.tableaux = tableaux; host_params.distmatrices = distmatrices; host_params.orders = orders; host_params.names = names; tabsearch_host_thread(&host_params); /* then large structure db */ if (large_dbsize > 0) { host_params.maxdim = MAXDIM; host_params.dbsize = large_dbsize; host_params.tableaux = large_tableaux; host_params.distmatrices = large_distmatrices; host_params.orders = large_orders; host_params.names = large_names; tabsearch_host_thread(&host_params); } } bye: /* cleanup and exit */ free(tableaux); free(distmatrices); free(orders); free(names); free(scores); free(large_tableaux); free(large_distmatrices); free(large_names); free(large_orders); if (lsoln) free(ssemaps); sdkDeleteTimer( &hTimer); if (use_gpu) { if (large_dbsize > 0) { checkCudaErrors( hipFree(d_tableaux.ptr) ); checkCudaErrors( hipFree(d_distmatrices.ptr) ); checkCudaErrors( hipFree(d_orders) ); checkCudaErrors( hipFree(d_scores) ); if (lsoln) checkCudaErrors( hipFree(d_ssemaps) ); } hipDeviceReset(); } exit(exit_status); }
8e3307143f1f89355de0c7afc927ccbb097c3311.cu
/***************************************************************************** * * File: cudaSaTabsearch.cu * Author: Alex Stivala * Created: January 2010 * * $Id: cudaSaTabsearch.cu 4753 2013-11-20 03:16:27Z astivala $ * * CUDA host code for simulated annealing tableau matching (discrete). * This is a CUDA implemenation of the FORTRAN subroutine TSAMTD. * Since the GPU has limited memory (and specifically, very limited * per block shared memory), we split the database into 'small' and * 'large' structures. The small ones can run on the GPU in shared memory, * the large ones cannot so we either have to not use shared memory * (OK, but a bit slower) or run them on the host. * When runnign on the host, we can simultaneously run the GPU and * host in separate threads. For multiple GPU cards, CUDA also requires * that there is a separate host thread for each GPU, so this program * is multithreaded: each thread is either for a separate GPU or for * running the same kernel (but compiled for host) on the host CPU. * * Usage: cudaSaTabsearch [-c] [-q dbfile] [-r restarts] < inputfile * * -c : run on host CPU not GPU card * * -q : query list mode: instead of reading query data on stdin * just as in the original Fortran version tlocsd, a list * of query sids to be read from the database is read on stdin (one per * line), * and db filenaame is specified on command * line. In this mode options are assumed as LORDER=T, LTYPE=T, * LSOLN=N. The output is still to stdout, but each query following * immediately from the previous (can parse using the header comment * niformation lines as separators. * * -r restarts: number of restarts (iterations of cooling schedule). * Should be a multiple of blocksize. Defaults to 128. * * The 'database' to search is an ASCII file of tableaux * (Omega matrices) in format described in rdtabd.f. * * The results are printed to stdout as * * name rawscore norm2score z-score p-value * * * Both the name of the database file to read, and the actual * query tableau are read from stdin. * The first line is the name * of the database file. * The second line is for options. There are currently 3 logical * options, for SSE type constraint (only allow SSEs of same type ot * match) and ordering constraint (disallow out of sequence order * matches). The third is to output not just the scores but also solution * vector values. * They are single character logical values (T or F). * First is type, second is order, third is solution output, * separated by one space. * * The subsequent lines are a single tableau in the same format as * each tableau entry in the database i.e.: * * The first line of an entry is the identifier and * order of tableau (i.e. dimension of square array), then * each subsequent row is a row of the tableau, lower triangle * only (since it is symmetric). * The diagonal entries are meaningless (self-angle) in tableaux, * and are included instead to specify the SSE type, with * the following codes: * * e beta strand * xa alpha helix * xi pi helix * xg 3_10 helix * * Width of identifier is 8 chars, blank padded on right, * width of order is 4 digits, blank padded on left. * There is a single space between identifier and order. * Each entry in tableau is two characters, with a space betwen * each on a line, and one line * per row of matrix. * * Following the tableau is the distance matrix. * Each row is a row of the distance matrix, lower triangle * only (since it is symmetric). * The diagonal entries are meaningless (self-distance) * and are included instead to specify the SSE type, with * the following codes: * * 0.000 beta strand * 1.000 alpha helix * 2.000 pi helix * 3.000 3_10 helix * * Each entry in matrix is in Angstroms format * F6.3 with a space between each on a line, and one line * per row of matrix. * * * E.g.: * * /local/charikar/astivala/tableauxdb/astral/tableauxdistmatrixdb.ascii * T T F * D1UBIA_ 8 * e * OT e * LE RT xa * PD OS RD xg * RT LE RT LS e * LE RD LE LS OT e * RT LS LS RD PE OS xg * PE RT LE RD OT PE RT e * 0.000 * 4.501 0.000 * 1.662 10.386 1.000 * 16.932 17.644 9.779 3.000 * 10.588 13.738 11.815 10.527 0.000 * 15.025 18.692 17.143 15.341 6.466 0.000 * 15.298 17.276 16.276 20.075 13.264 11.610 3.000 * 7.549 11.072 12.248 12.446 4.583 9.903 15.689 0.000 * * * *****************************************************************************/ #define CUDASATABSEARCH_MAIN 1 #include <stdlib.h> #include <stdio.h> #include <getopt.h> #include <time.h> #include <string.h> #include <multithreading.h> #include <helper_cuda.h> #include <helper_timer.h> #include <curand_kernel.h> #include "parsetableaux.h" #include "cudaSaTabsearch_kernel.h" #include "cudaGetDeviceConstantAddresses.h" #include "gumbelstats.h" /***************************************************************************** * * Type definitions * *****************************************************************************/ /* dbIndex_t is for the query list mode, an array of these gives for each query the index in the appropriate ('small' or 'large' according to the large flag) tableaux and distmatrix db arrays */ typedef struct dbIndex_s { bool large; /* true if query is 'large' (>MAXDIM_GPU) structure */ int index; /* index in tableaux and distmatrix db list, or 'large' tableaux and distmatrix db list if large is true */ } dbIndex_t; /* searchParams_t is a struct for parameter to tableau search functions dcelared as CUT_THREADROUTINE to be callable as threads */ typedef struct searchParams_s { int ltype; int lorder; int lsoln; /* type,order,soln flags */ int maxstart; /* number of restarts */ int maxdim; /*dimension of tableaux, distmatrices here */ int num_queries; /* number of queries; 0 if not query list mode */ int single_query_qid; /* if >=0, do only the one at this index */ dbIndex_t *query_dbindex_list; /* if num_queries>0, the query db index */ char qtab[MAXDIM*MAXDIM]; /* if num_queries==0, the query tableau */ float qdmat[MAXDIM*MAXDIM]; /* the query distmatrix*/ char qid[LABELSIZE+1]; /* the query identifier*/ int qn; /* the query order */ char *qssetypes; /* the query SSE types*/ int dbsize; /* number of entries in the db */ char *tableaux; /* the tableaux database */ float *distmatrices; /* the distance matrices database */ int *orders; /* orders of entries in db */ char *names; /* names of entries in db */ } searchParams_t; /***************************************************************************** * * Globals * *****************************************************************************/ static char dbfile[MAX_LINE_LEN]; /* database file name */ static bool use_gpu = true; /* use the GPU */ static bool use_shared_memory = true; /* use GPU shared mem for db structs */ static char *tableaux, *large_tableaux; /* small and large tableaux */ static float *distmatrices, *large_distmatrices; /* same for dist.matrices*/ static int *orders, *large_orders; /* and for orders */ static char *names, *large_names; /* and names */ static bool querydbmode = false; /* use list of query ids in db */ static char *queryid_list = NULL; /* this is the list of query ids */ static dbIndex_t *query_dbindex_list = NULL; /* and their indices in db */ static int maxstart = DEFAULT_MAXSTART; /* number of restarts */ /* * init_rng() * * Initialize CURAND pseudrandom number generator * See CUDA Toolkit 4.1 CURAND Guide (p.21) * * Parameters: * state - CURAND state for random number generation * */ __global__ void init_rng(curandState *state) { int tid=blockIdx.x*blockDim.x+threadIdx.x; /* give each therad same seed, different sequence number, no offset */ curand_init(1234, tid, 0, &state[tid]); } /////////////////////////////////////////////////////////////////////////////// // Common host and device function /////////////////////////////////////////////////////////////////////////////// //ceil(a / b) extern "C" int iDivUp(int a, int b){ return ((a % b) != 0) ? (a / b + 1) : (a / b); } //floor(a / b) extern "C" int iDivDown(int a, int b){ return a / b; } //Align a to nearest higher multiple of b extern "C" int iAlignUp(int a, int b){ return ((a % b) != 0) ? (a - a % b + b) : a; } //Align a to nearest lower multiple of b extern "C" int iAlignDown(int a, int b){ return a - a % b; } /* * tabsearch_host_thread - run the tableau search kernel on host CPU * * Started as a thread by cutStartThread in main * * Parameters: * params - paramter block for thread. See comments on searchParams_t defn. * * Return value: None. * */ static CUT_THREADPROC tabsearch_host_thread(searchParams_t *params) { /* extern declartions of host version of gpu constant memory */ extern int c_qn_host; // query structure size extern char c_qtab_host[MAXDIM*MAXDIM]; // query tableau extern float c_qdmat_host[MAXDIM*MAXDIM]; // query distance matrix extern char c_qssetypes_host[MAXDIM]; // main diagonal of c_qn StopWatchInterface *hTimer = NULL; double runtime; int *ssemaps; int i,j; char qid[LABELSIZE+1]; int *scores; double norm2score,zscore,pvalue; int query_count = (params->num_queries == 0 || params->single_query_qid >= 0 ? 1 : params->num_queries); cudaExtent tableaux_extent = {params->maxdim, params->maxdim, params->dbsize}; cudaPitchedPtr tableaux_pp = {params->tableaux, params->maxdim, params->maxdim, params->dbsize}; cudaExtent distmatrices_extent = {params->maxdim*sizeof(float), params->maxdim, params->maxdim}; cudaPitchedPtr distmatrices_pp = {params->distmatrices, params->maxdim*sizeof(float), params->maxdim, params->maxdim}; /* allocate space for output */ if (!(scores = (int *)malloc(params->dbsize*sizeof(int)))) { fprintf(stderr, "malloc scores failed\n"); return; } if (!(ssemaps = (int *)malloc(params->dbsize*MAXDIM*sizeof(int)))) { fprintf(stderr, "malloc ssemaps failed\n"); return; } for (int qi = 0; qi < query_count; qi++) { if (params->query_dbindex_list) { dbIndex_t *dbindex_entry = params->single_query_qid >= 0 ? &params->query_dbindex_list[params->single_query_qid] : &params->query_dbindex_list[qi]; int qdbi = dbindex_entry->index; if (dbindex_entry->large) /* query in 'large' struct db */ { strncpy(qid, large_names+qdbi*(LABELSIZE+1), LABELSIZE); c_qn_host = large_orders[qdbi]; memcpy(c_qtab_host, large_tableaux+qdbi*MAXDIM*MAXDIM, MAXDIM*MAXDIM*sizeof(char)); memcpy(c_qdmat_host, large_distmatrices+qdbi*MAXDIM*MAXDIM, MAXDIM*MAXDIM*sizeof(float)); /* NB the query in constant memory is MAXDIM not MAXDIM_GPU since constant memory larger than shared memory. */ // set the qssetypes vector as main diagonal of the query tableau for (i = 0; i < c_qn_host; i++) c_qssetypes_host[i] = (large_tableaux+qdbi*MAXDIM*MAXDIM)[INDEX2D(i,i,MAXDIM,MAXDIM)]; } else /* query in 'small' struct db */ { strncpy(qid, names+qdbi*(LABELSIZE+1), LABELSIZE); c_qn_host = orders[qdbi]; /* NB the query in constant memory is MAXDIM not MAXDIM_GPU since constant memory larger than shared memory. This means we need to reformat the matrices into the larger size if they are in the smaller class */ for (i = 0; i < orders[qdbi]; i++) { for (j = i + 1; j < orders[qdbi]; j++) { char tabcode = (tableaux+qdbi*MAXDIM_GPU*MAXDIM_GPU)[INDEX2D(i,j,MAXDIM_GPU,MAXDIM_GPU)]; c_qtab_host[INDEX2D(i,j,MAXDIM,MAXDIM)] = tabcode; c_qtab_host[INDEX2D(j,i,MAXDIM,MAXDIM)] = tabcode; float dist = (distmatrices+qdbi*MAXDIM_GPU*MAXDIM_GPU)[INDEX2D(i,j,MAXDIM_GPU,MAXDIM_GPU)]; c_qdmat_host[INDEX2D(i,j,MAXDIM,MAXDIM)] = dist; c_qdmat_host[INDEX2D(j,i,MAXDIM,MAXDIM)] = dist; } } // set the qssetypes vector as main diagonal of the query tableau for (i = 0; i < c_qn_host; i++) c_qssetypes_host[i] = (tableaux+qdbi*MAXDIM_GPU*MAXDIM_GPU)[INDEX2D(i,i,MAXDIM_GPU,MAXDIM_GPU)]; } } else { strncpy(qid, params->qid, LABELSIZE); c_qn_host = params->qn; memcpy(c_qtab_host, params->qtab, sizeof(c_qtab_host)); memcpy(c_qdmat_host, params->qdmat, sizeof(c_qdmat_host)); memcpy(c_qssetypes_host, params->qssetypes, sizeof(c_qssetypes_host)); } printf("# cudaSaTabsearch LTYPE = %c LORDER = %c LSOLN = %c\n", params->ltype ? 'T' : 'F' , params->lorder ? 'T' : 'F' , params->lsoln ? 'T' : 'F'); printf("# QUERY ID = %-8s\n", qid); printf("# DBFILE = %-80s\n", dbfile); fprintf(stderr, "Executing simulated annealing tableaux match kernel on host for query %s...\n", qid); sdkCreateTimer(&hTimer) ; sdkResetTimer(&hTimer) ; sdkStartTimer(&hTimer) ; int state = 0; /*unused*/ sa_tabsearch_host(params->dbsize, params->lorder, params->lsoln, params->maxstart, tableaux_pp, tableaux_extent, params->orders, distmatrices_pp, distmatrices_extent, scores, ssemaps, &state); sdkStopTimer(&hTimer); runtime = sdkGetTimerValue(&hTimer); fprintf(stderr, "host execution time %f ms\n", runtime); fprintf(stderr, "%f million iterations/sec\n", (params->dbsize * (params->maxstart * MAXITER) / (runtime/1000)) / 1.0e6); for (i = 0; i < params->dbsize; i++) { /* printf("%-8s %d\n", params->names+i*(LABELSIZE+1), scores[i]); */ norm2score = norm2(scores[i], params->qn, params->orders[i]); zscore = z_gumbel(norm2score, gumbel_a, gumbel_b); pvalue = pv_gumbel(zscore); printf("%-8s %d %g %g %g\n", params->names+i*(LABELSIZE+1), scores[i], norm2score, zscore, pvalue); if (params->lsoln) for (int k = 0; k < c_qn_host; k++) if (ssemaps[i*MAXDIM + k] >= 0) printf("%3d %3d\n", k+1, ssemaps[i*MAXDIM + k]+1); } } free(scores); if (params->lsoln) free(ssemaps); } /* * copyQueryToConstantMemory() - copy the query data to device constant memory * * * Parameters: * qi - the query index of the query to copy. * Otherwise (query_dbinex_list is NULL), these used: * qn -query order * qtab - query tableau (in/out: may be set here) * qdmat - query distance matrix (in/out: may be set here) * qssetypes - query SSE types vector (in/out: may be set here) * qid - query id (in/out: may be set here) * c_qn_addr - address of c_qn device constant (q_qn or c_qn_noshared) * c_qtab_addr - address of c_qtab device constant * c_qdmat_addr - address of c_qdmat device constant * c_qssetypes_addr - address c_qssetypes device constant * * * Uses the global variables query_dbindex_list, tableaux, etc. * * Return value: None. * */ static void copyQueryToConstantMemory(int qi, int qn, char *qtab, float *qdmat, char *qssetypes, char *qid, int *c_qn_addr, char *c_qtab_addr, float *c_qdmat_addr, char *c_qssetypes_addr) { StopWatchInterface *hTimer = NULL; sdkCreateTimer(&hTimer) ; sdkResetTimer(&hTimer) ; sdkStartTimer(&hTimer) ; if (query_dbindex_list) { int qdbi = query_dbindex_list[qi].index; if (query_dbindex_list[qi].large) { strncpy(qid, large_names+qdbi*(LABELSIZE+1), LABELSIZE); // set the qssetypes vector as main diagonal of the query tableau for (int i = 0; i < large_orders[qdbi]; i++) qssetypes[i] = (large_tableaux+qdbi*MAXDIM*MAXDIM)[INDEX2D(i,i,MAXDIM,MAXDIM)]; /* copy query structure to constant memory on device */ checkCudaErrors( cudaMemcpy(c_qn_addr, &large_orders[qdbi], sizeof(int), cudaMemcpyHostToDevice) ); /* NB the query in constant memory is MAXDIM not MAXDIM_GPU since constant memory larger than shared memory. */ checkCudaErrors( cudaMemcpy(c_qtab_addr, large_tableaux+qdbi*MAXDIM*MAXDIM, MAXDIM*MAXDIM*sizeof(char), cudaMemcpyHostToDevice) ); checkCudaErrors( cudaMemcpy(c_qdmat_addr, large_distmatrices+qdbi*MAXDIM*MAXDIM, MAXDIM*MAXDIM*sizeof(float), cudaMemcpyHostToDevice) ); checkCudaErrors( cudaMemcpy(c_qssetypes_addr, qssetypes, MAXDIM*sizeof(char), cudaMemcpyHostToDevice) ); } else /* query is in the 'small' structure dbase */ { strncpy(qid, names+qdbi*(LABELSIZE+1), LABELSIZE); // set the qssetypes vector as main diagonal of the query tableau for (int i = 0; i < orders[qdbi]; i++) qssetypes[i] = (tableaux+qdbi*MAXDIM_GPU*MAXDIM_GPU)[INDEX2D(i,i,MAXDIM_GPU,MAXDIM_GPU)]; /* copy query structure to constant memory on device */ checkCudaErrors( cudaMemcpy(c_qn_addr, &orders[qdbi], sizeof(int), cudaMemcpyHostToDevice) ); /* NB the query in constant memory is MAXDIM not MAXDIM_GPU since constant memory larger than shared memory. This means we need to reformat the matrices into the larger size if they are in the smaller class */ for (int i = 0; i < orders[qdbi]; i++) { for (int j = i + 1; j < orders[qdbi]; j++) { char tabcode = (tableaux+qdbi*MAXDIM_GPU*MAXDIM_GPU)[INDEX2D(i,j,MAXDIM_GPU,MAXDIM_GPU)]; qtab[INDEX2D(i,j,MAXDIM,MAXDIM)] = tabcode; qtab[INDEX2D(j,i,MAXDIM,MAXDIM)] = tabcode; float dist = (distmatrices+qdbi*MAXDIM_GPU*MAXDIM_GPU)[INDEX2D(i,j,MAXDIM_GPU,MAXDIM_GPU)]; qdmat[INDEX2D(i,j,MAXDIM,MAXDIM)] = dist; qdmat[INDEX2D(j,i,MAXDIM,MAXDIM)] = dist; } } checkCudaErrors( cudaMemcpy(c_qtab_addr, qtab, MAXDIM*MAXDIM*sizeof(char), cudaMemcpyHostToDevice) ); checkCudaErrors( cudaMemcpy(c_qdmat_addr, qdmat, MAXDIM*MAXDIM*sizeof(float), cudaMemcpyHostToDevice) ); checkCudaErrors( cudaMemcpy(c_qssetypes_addr, qssetypes, MAXDIM*sizeof(char), cudaMemcpyHostToDevice) ); } } else // single query mode - copy to constant memory { fprintf(stderr, "XXX c_qn_addr = %p , qn = %d\n", c_qn_addr, qn); checkCudaErrors( cudaMemcpy(c_qn_addr, &qn, sizeof(qn), cudaMemcpyHostToDevice) ); checkCudaErrors( cudaMemcpy(c_qtab_addr, qtab, MAXDIM*MAXDIM*sizeof(char), cudaMemcpyHostToDevice) ); checkCudaErrors( cudaMemcpy(c_qdmat_addr, qdmat, MAXDIM*MAXDIM*sizeof(float), cudaMemcpyHostToDevice) ); checkCudaErrors( cudaMemcpy(c_qssetypes_addr, qssetypes, MAXDIM*sizeof(char), cudaMemcpyHostToDevice) ); } sdkStopTimer(&hTimer) ; float qtime = sdkGetTimerValue(&hTimer); fprintf(stderr, "Copying query to constant memory took %f ms\n", qtime); } static void usage(const char *progname) { fprintf(stderr, "Usage: %s [-c] [-q dbfile]\n", progname); fprintf(stderr, " -c : run on host CPU not GPU card\n"); fprintf(stderr, " -q dbfile : database is read from dbfile, list of query\n" " ids is read from stdin\n"); fprintf(stderr, " -r restarts : number of restarts. Default %d\n", DEFAULT_MAXSTART); exit(1); } int main(int argc, char *argv[]) { CUTThread threadID[MAX_THREADS]; int num_threads = 0; int exit_status = 0; char buf[MAX_LINE_LEN]; char qtab[MAXDIM*MAXDIM]; float qdmat[MAXDIM*MAXDIM]; int qn; char qid[LABELSIZE+1]; int ltype=0,lorder=0,lsoln=0; char cltype,clorder,clsoln; FILE *dbfp; StopWatchInterface *hTimer = NULL; int total_dbsize, large_dbsize, gpu_dbsize; double dbtime,runtime; cudaPitchedPtr d_tableaux; cudaPitchedPtr d_distmatrices; int *d_orders; int *scores = NULL; int *ssemaps = NULL; int *d_scores; int *d_ssemaps; cudaError_t cuda_errcode; int i,j; char qssetypes[MAXDIM]; int c; char *queryptr = NULL; int num_queries = 0; int large_query_count = 0; double norm2score, zscore, pvalue; while ((c = getopt(argc, argv, "cq:r:")) != -1) { switch (c) { case 'c': use_gpu = false; break; case 'q': querydbmode = true; strncpy(dbfile, optarg, sizeof(dbfile)-1); break; case 'r': maxstart = atoi(optarg); break; default: usage(argv[0]); break; } } if (querydbmode) { cltype = 'T'; ltype = 1; clorder = 'T'; lorder = 1; clsoln = 'F'; lsoln = 0; if (!(queryid_list = (char *)malloc(LABELSIZE+1))) { fprintf(stderr, "malloc queryid_list failed\n"); exit(1); } queryptr = queryid_list; while (!feof(stdin)) { if (num_queries > 0) { if ((!(queryid_list = (char *)realloc(queryid_list, (num_queries+1)*(LABELSIZE+1))))) { fprintf(stderr, "realloc queryid_list failed\n"); exit(1); } } if (!fgets(buf, MAX_LINE_LEN, stdin)) break; strncpy(queryptr, buf, LABELSIZE); queryptr[LABELSIZE-1] = '\0'; if (queryptr[strlen(queryptr)-1] == '\n') queryptr[strlen(queryptr)-1] = '\0'; queryptr += (LABELSIZE+1); num_queries++; } } else { if (fscanf(stdin, "%s\n", dbfile) != 1) { fprintf(stderr, "ERROR reading dbfilename from stdin\n"); exit(1); } if (fscanf(stdin, "%c %c %c\n", &cltype, &clorder, &clsoln) != 3) { fprintf(stderr, "ERROR reading options from stdin\n"); exit(1); } if (cltype == 'T') ltype = 1; if (clorder == 'T') lorder = 1; if (clsoln == 'T') lsoln = 1; if (fscanf(stdin, "%8s %d\n", qid, &qn) != 2) { fprintf(stderr, "ERROR parsing query tableau header from stdin\n"); exit(1); } if (parse_tableau(stdin, MAXDIM, qn, qtab) < 0) { fprintf(stderr, "ERROR parsing query tableau from stdin\n"); exit(1); } if (parse_distmatrix(stdin, MAXDIM, qn, qdmat, 0) < 0) { fprintf(stderr, "ERROR parsing query distance matrix from stdin\n"); exit(1); } } if (!ltype) { fprintf(stderr, "WARNING: LTYPE is always set to T\n"); ltype = 1; cltype = 'T'; } if (!(dbfp = fopen(dbfile, "r"))) { fprintf(stderr, "ERROR opening db file %s\n", dbfile); exit(1); } fprintf(stderr, "Loading database...\n"); sdkCreateTimer(&hTimer) ; sdkResetTimer(&hTimer) ; sdkStartTimer(&hTimer) ; total_dbsize = read_database(dbfp, &tableaux, &distmatrices, &large_tableaux, &large_distmatrices, &orders, &names, &large_orders, &large_names, &large_dbsize); if (total_dbsize < 0) { fprintf(stderr, "ERROR loading database\n"); exit(1); } gpu_dbsize = total_dbsize - large_dbsize; sdkStopTimer(&hTimer) ; dbtime = sdkGetTimerValue(&hTimer); fprintf(stderr, "Loaded %d db entries (%d order > %d) in %f ms\n", total_dbsize, large_dbsize, MAXDIM_GPU, dbtime); if (querydbmode) { /* Convert the list of query sids to list of indices in db for later rapid lookup. TODO: we should build a hash table rather than this highly inefficient linear search for each query id, but it's only done once and db not that big... */ fprintf(stderr, "Building query index list...\n"); sdkResetTimer(&hTimer) ; sdkStartTimer(&hTimer) ; if (!(query_dbindex_list = (dbIndex_t *)malloc(num_queries*sizeof(dbIndex_t)))) { fprintf(stderr, "malloc query_dbindex_list failed\n"); exit(1); } for (i = 0; i < num_queries; i++) { /* fprintf(stderr, "zzz %s\n", queryid_list+i*(LABELSIZE+1)); */ bool found = false; for (j = 0; j < gpu_dbsize; j++) /* search 'small' structure dbase */ { if (!strcasecmp(queryid_list+i*(LABELSIZE+1),names+j*(LABELSIZE+1))) { query_dbindex_list[i].large = false; query_dbindex_list[i].index = j; found = true; break; } } if (!found) { for (j = 0; j < large_dbsize; j++) /* search 'large' structure dbase*/ { if (!strcasecmp(queryid_list + i*(LABELSIZE+1), large_names + j*(LABELSIZE+1))) { query_dbindex_list[i].large = true; query_dbindex_list[i].index = j; large_query_count++; found = true; break; } } } if (!found) { fprintf(stderr, "ERROR: query %s not found\n", queryid_list+i*(LABELSIZE+1)); exit(1); } } sdkStopTimer(&hTimer); fprintf(stderr, "Built query index (%d queries (%d large)) in %f ms\n", num_queries, large_query_count, sdkGetTimerValue(&hTimer)); } else { num_queries = 0; query_dbindex_list = NULL; // set the qssetypes vector as main diagonal of the query tableau for (i = 0; i < qn; i++) qssetypes[i] = qtab[INDEX2D(i,i,MAXDIM,MAXDIM)]; } /* TODO allow multiple GPUs (need one thread for each) */ if (use_gpu) { /* int devnum = cutGetMaxGflopsDeviceId(); fprintf(stderr, "using max gflops device %d: ", devnum); */ /* If there is a compute capability 2 device ("Fermi" architecture) (or higher) then use that, and do NOT use shared memory as it is faster to just rely on the new "NVIDIA Parallel DataCache (TM)" -- just use global memory for all (small and large) structures */ int devnum, deviceCount, gflops,max_gflops=0, sel_devnum; cudaGetDeviceCount(&deviceCount); if (deviceCount == 0) { fprintf(stderr, "There is no device supporting CUDA.\n"); exit(1); } fprintf(stderr, "found %d CUDA devices\n", deviceCount); for (devnum = 0; devnum < deviceCount; devnum++) { cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, devnum); if (deviceProp.major >= 2) { fprintf(stderr, "found Fermi architecture (compute capability %d.%d) device %d: %s\n", deviceProp.major, deviceProp.minor, devnum, deviceProp.name); sel_devnum = devnum; use_shared_memory = true; break; } else { gflops = deviceProp.multiProcessorCount * deviceProp.clockRate; fprintf(stderr, "device %d: %s\n", devnum, deviceProp.name); if (gflops > max_gflops) { max_gflops = gflops; sel_devnum = devnum; use_shared_memory = true; } } } fprintf(stderr, "using device %d: ", sel_devnum); cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, sel_devnum); fprintf(stderr, "%s\n", deviceProp.name); cudaSetDevice( sel_devnum ); } fprintf(stderr, "maxstart = %d\n", maxstart); srand48(1234); if (use_gpu) { /* setup execution configuration parameters */ /* TODO optimize for different architectures (automatically) */ const int blocks = 128; const int NUM_THREADS = 128; dim3 dimGrid(blocks); // blocks dim3 dimBlock(NUM_THREADS); // threads per block fprintf(stderr, "Execution configuration: Grid = (%d,%d,%d) Block = (%d,%d,%d)\n", dimGrid.x,dimGrid.y,dimGrid.z, dimBlock.x,dimBlock.y,dimBlock.z); fprintf(stderr, "using shared memory for small db structs: %s\n", use_shared_memory ? "YES" : "NO"); /* first do the 'small' db structures on GPU (with shared memory) */ fprintf(stderr, "Copying database to device...\n"); sdkResetTimer(&hTimer) ; sdkStartTimer(&hTimer) ; curandState *devStates; /* allocate space on device for random number generator state */ int rc; if ((rc = cudaMalloc((void **)&devStates, blocks*NUM_THREADS*sizeof(curandState))) != cudaSuccess) { fprintf(stderr, "cudaMalloc devStates failed %d\n", rc); exit(1); } /* initialize device random number generator */ sdkStartTimer(&hTimer) ; init_rng<<<dimGrid, dimBlock>>>(devStates); if ((rc = cudaGetLastError()) != cudaSuccess) { fprintf(stderr, "init_rng kernel error %d\n", rc); } cudaDeviceSynchronize(); if ((rc = cudaGetLastError()) != cudaSuccess) { fprintf(stderr, "init_rng sync error %d\n", rc); } sdkStopTimer(&hTimer) ; fprintf(stderr, "Initialized device RNG with %d states (%d KB) in %f ms\n", blocks*NUM_THREADS, blocks*NUM_THREADS*sizeof(curandState)/1024, sdkGetTimerValue(&hTimer)); cudaExtent tableaux_extent = make_cudaExtent(MAXDIM_GPU, MAXDIM_GPU, gpu_dbsize); checkCudaErrors( cudaMalloc3D(&d_tableaux, tableaux_extent) ); fprintf(stderr, "d_tableaux.pitch == %u xsize == %u ysize == %u\n", d_tableaux.pitch, d_tableaux.xsize, d_tableaux.ysize); cudaExtent distmatrices_extent = make_cudaExtent(MAXDIM_GPU*sizeof(float), MAXDIM_GPU, gpu_dbsize); checkCudaErrors( cudaMalloc3D(&d_distmatrices, distmatrices_extent) ); fprintf(stderr, "d_distmatrices.pitch == %u xsize == %u ysize == %u\n", d_distmatrices.pitch, d_distmatrices.xsize, d_distmatrices.ysize); checkCudaErrors( cudaMalloc((void **)&d_orders, gpu_dbsize*sizeof(int)) ); cudaMemcpy3DParms copyParams = { 0 }; // srcPtr is tricky: need to give pitch of row, #elements in row, // then height, omitting 3rd dimension (doesn't seem to be documented) // (I found this info on 28/1/2010 at // http://sites.google.com/site/cudaiap2009/cookbook-1). // Note pitch of row on host is just MAXDIM_GPU, we don't need padding here copyParams.srcPtr = make_cudaPitchedPtr((void*)tableaux, MAXDIM_GPU, MAXDIM_GPU, MAXDIM_GPU); fprintf(stderr, "srcPtr.pitch == %u\n", copyParams.srcPtr.pitch); copyParams.dstPtr = d_tableaux; copyParams.extent = tableaux_extent; copyParams.kind = cudaMemcpyHostToDevice; checkCudaErrors( cudaMemcpy3D(&copyParams) ); cudaMemcpy3DParms copyParams2 = { 0 }; copyParams2.srcPtr = make_cudaPitchedPtr((void*)distmatrices, MAXDIM_GPU*sizeof(float), MAXDIM_GPU, MAXDIM_GPU); fprintf(stderr, "distmatrices srcPtr.pitch == %u\n", copyParams2.srcPtr.pitch); copyParams2.dstPtr = d_distmatrices; copyParams2.extent = distmatrices_extent; copyParams2.kind = cudaMemcpyHostToDevice; checkCudaErrors( cudaMemcpy3D(&copyParams2) ); checkCudaErrors( cudaMemcpy(d_orders, orders, gpu_dbsize*sizeof(int), cudaMemcpyHostToDevice) ); sdkStopTimer(&hTimer) ; dbtime = sdkGetTimerValue(&hTimer); fprintf(stderr, "Copied %d entries to GPU in %f ms\n", gpu_dbsize, dbtime); /* allocate space for output */ checkCudaErrors( cudaMalloc((void **)&d_scores, gpu_dbsize*sizeof(int))); if (!(scores = (int *)malloc(gpu_dbsize*sizeof(int)))) { fprintf(stderr, "malloc scores failed\n"); goto bye; } if (lsoln) { checkCudaErrors( cudaMalloc((void **)&d_ssemaps, gpu_dbsize*MAXDIM*sizeof(int))); if (!(ssemaps = (int *)malloc(gpu_dbsize*MAXDIM*sizeof(int)))) { fprintf(stderr, "malloc ssemaps failed\n"); goto bye; } } const_addr_t const_addr; int query_count = (num_queries == 0 ? 1 : num_queries); for (int qi = 0; qi < query_count; qi++) { if (use_shared_memory) { get_device_constant_addresses(&const_addr); copyQueryToConstantMemory(qi, qn, qtab, qdmat, qssetypes, qid, const_addr.c_qn_addr, const_addr.c_qtab_addr, const_addr.c_qdmat_addr, const_addr.c_qssetypes_addr); // checkCudaErrors( cudaMemcpy(const_addr.c_qn_addr, &qn, sizeof(qn), cudaMemcpyHostToDevice) ); fprintf(stderr,"qn=%d\n",qn); //XXX } else { get_device_constant_addresses_noshared_small(&const_addr); copyQueryToConstantMemory(qi, qn, qtab, qdmat, qssetypes, qid, const_addr.c_qn_noshared_small_addr, const_addr.c_qtab_noshared_small_addr, const_addr.c_qdmat_noshared_small_addr, const_addr.c_qssetypes_noshared_small_addr); } printf("# cudaSaTabsearch LTYPE = %c LORDER = %c LSOLN = %c\n", cltype, clorder, clsoln); printf("# QUERY ID = %-8s\n", qid); printf("# DBFILE = %-80s\n", dbfile); /* launch thread to do large db structs on host */ searchParams_t host_params; host_params.ltype = ltype; host_params.lorder = lorder; host_params.lsoln = lsoln; host_params.maxstart = maxstart; host_params.num_queries = num_queries; host_params.query_dbindex_list = query_dbindex_list; host_params.single_query_qid = qi; memcpy(host_params.qtab, qtab, sizeof(qtab)); memcpy(host_params.qdmat, qdmat, sizeof(qdmat)); memcpy(host_params.qid, qid, sizeof(qid)); host_params.qn = qn; host_params.qssetypes = qssetypes; host_params.maxdim = MAXDIM; host_params.dbsize = large_dbsize; host_params.tableaux = large_tableaux; host_params.distmatrices = large_distmatrices; host_params.orders = large_orders; host_params.names = large_names; //XXX threadID[num_threads++] = cutStartThread((CUT_THREADROUTINE)tabsearch_host_thread, &host_params); fprintf(stderr, "Executing simulated annealing tableaux match kernel (%sshared memory) on GPU for qid %s...\n", use_shared_memory ? " " : "no ", qid); checkCudaErrors( cudaDeviceSynchronize() ); sdkResetTimer(&hTimer) ; sdkStartTimer(&hTimer) ; if (use_shared_memory) { int xxx_qn=-1; checkCudaErrors( cudaMemcpy(&xxx_qn, const_addr.c_qn_addr, sizeof(qn), cudaMemcpyDeviceToHost) ); fprintf(stderr,"xxx_qn=%d\n",xxx_qn); //XXX sa_tabsearch_gpu<<<dimGrid,dimBlock>>>(gpu_dbsize, lorder, lsoln, maxstart, d_tableaux, tableaux_extent, d_orders, d_distmatrices, distmatrices_extent, d_scores, d_ssemaps, devStates); } else { sa_tabsearch_gpu_noshared_small<<<dimGrid,dimBlock>>>(gpu_dbsize, lorder, lsoln, maxstart, d_tableaux, tableaux_extent, d_orders, d_distmatrices, distmatrices_extent, d_scores, d_ssemaps, devStates); } cuda_errcode = cudaGetLastError(); if (cuda_errcode != cudaSuccess) { fprintf(stderr, "kernel launch failed: %s\n", cudaGetErrorString(cuda_errcode)); exit_status = 1; goto bye; } checkCudaErrors( cudaDeviceSynchronize() ); sdkStopTimer(&hTimer) ; runtime = sdkGetTimerValue(&hTimer); fprintf(stderr, "GPU execution time %f ms\n", runtime); fprintf(stderr, "%f million iterations/sec\n", ((float)gpu_dbsize * ((float)maxstart * (float)MAXITER) / (runtime/1000)) / 1.0e6); /* Get results from device */ checkCudaErrors( cudaMemcpy(scores, d_scores, gpu_dbsize*sizeof(int), cudaMemcpyDeviceToHost) ); if (lsoln) checkCudaErrors( cudaMemcpy(ssemaps, d_ssemaps, gpu_dbsize*MAXDIM*sizeof(int), cudaMemcpyDeviceToHost) ); /* Wait for host thread */ //XXX cutWaitForThreads(threadID, num_threads); //XXX --num_threads; /* TODO we could reduce wasted time waiting by running all host (large db) queries in the one thread instead of matching up with GPU query in this loop (actuall, more like the other way around usually, the GPU ends up idle while host is still runnign since the latter is so much slower even though it has very few db entries unlike GPU) */ for (i = 0; i < gpu_dbsize; i++) { /* printf("%-8s %d\n", names+i*(LABELSIZE+1), scores[i]); */ norm2score = norm2(scores[i], qn, orders[i]); zscore = z_gumbel(norm2score, gumbel_a, gumbel_b); pvalue = pv_gumbel(zscore); printf("%-8s %d %g %g %g\n", names+i*(LABELSIZE+1), scores[i], norm2score, zscore, pvalue); if (lsoln) for (int k = 0; k < qn; k++) if (ssemaps[i*MAXDIM + k] >= 0) printf("%3d %3d\n", k+1, ssemaps[i*MAXDIM + k]+1); } } checkCudaErrors( cudaFree(d_tableaux.ptr) ); checkCudaErrors( cudaFree(d_distmatrices.ptr) ); checkCudaErrors( cudaFree(d_orders) ); checkCudaErrors( cudaFree(d_scores) ); free(scores); scores = NULL; if (lsoln) { checkCudaErrors( cudaFree(d_ssemaps) ); free(ssemaps); ssemaps = NULL; } /* now do the 'large' db structures on GPU (not using shared memory) */ if (large_dbsize > 0) { fprintf(stderr, "Copying large structure database to device...\n"); sdkResetTimer(&hTimer) ; sdkStartTimer(&hTimer) ; tableaux_extent = make_cudaExtent(MAXDIM, MAXDIM, large_dbsize); checkCudaErrors( cudaMalloc3D(&d_tableaux, tableaux_extent) ); fprintf(stderr, "d_tableaux.pitch == %u xsize == %u ysize == %u\n", d_tableaux.pitch, d_tableaux.xsize, d_tableaux.ysize); distmatrices_extent = make_cudaExtent(MAXDIM*sizeof(float), MAXDIM, large_dbsize); checkCudaErrors( cudaMalloc3D(&d_distmatrices, distmatrices_extent) ); fprintf(stderr, "d_distmatrices.pitch == %u xsize == %u ysize == %u\n", d_distmatrices.pitch, d_distmatrices.xsize, d_distmatrices.ysize); checkCudaErrors( cudaMalloc((void **)&d_orders, large_dbsize*sizeof(int)) ); cudaMemcpy3DParms copyParamsl = { 0 }; // srcPtr is tricky: need to give pitch of row, #elements in row, // then height, omitting 3rd dimension (doesn't seem to be documented) // (I found this info on 28/1/2010 at // http://sites.google.com/site/cudaiap2009/cookbook-1). // Note pitch of row on host is just MAXDIM_GPU, we don't need padding here copyParamsl.srcPtr = make_cudaPitchedPtr((void*)large_tableaux, MAXDIM, MAXDIM, MAXDIM); fprintf(stderr, "srcPtr.pitch == %u\n", copyParamsl.srcPtr.pitch); copyParamsl.dstPtr = d_tableaux; copyParamsl.extent = tableaux_extent; copyParamsl.kind = cudaMemcpyHostToDevice; checkCudaErrors( cudaMemcpy3D(&copyParamsl) ); cudaMemcpy3DParms copyParams2l = { 0 }; copyParams2l.srcPtr = make_cudaPitchedPtr((void*)large_distmatrices, MAXDIM*sizeof(float), MAXDIM, MAXDIM); fprintf(stderr, "distmatrices srcPtr.pitch == %u\n", copyParams2l.srcPtr.pitch); copyParams2l.dstPtr = d_distmatrices; copyParams2l.extent = distmatrices_extent; copyParams2l.kind = cudaMemcpyHostToDevice; checkCudaErrors( cudaMemcpy3D(&copyParams2l) ); checkCudaErrors( cudaMemcpy(d_orders, large_orders, large_dbsize*sizeof(int), cudaMemcpyHostToDevice) ); sdkStopTimer(&hTimer) ; dbtime = sdkGetTimerValue(&hTimer); fprintf(stderr, "Copied %d large entries to GPU in %f ms\n", large_dbsize, dbtime); /* allocate space for output */ checkCudaErrors( cudaMalloc((void **)&d_scores, large_dbsize*sizeof(int))); if (!(scores = (int *)malloc(large_dbsize*sizeof(int)))) { fprintf(stderr, "malloc scores failed\n"); goto bye; } if (lsoln) { checkCudaErrors( cudaMalloc((void **)&d_ssemaps, large_dbsize*MAXDIM*sizeof(int))); if (!(ssemaps = (int *)malloc(large_dbsize*MAXDIM*sizeof(int)))) { fprintf(stderr, "malloc ssemaps failed\n"); goto bye; } } for (int qi = 0; qi < query_count; qi++) { get_device_constant_addresses_noshared(&const_addr); copyQueryToConstantMemory(qi, qn, qtab, qdmat, qssetypes, qid, const_addr.c_qn_noshared_addr, const_addr.c_qtab_noshared_addr, const_addr.c_qdmat_noshared_addr, const_addr.c_qssetypes_noshared_addr); printf("# cudaSaTabsearch LTYPE = %c LORDER = %c LSOLN = %c\n", cltype, clorder, clsoln); printf("# QUERY ID = %-8s\n", qid); printf("# DBFILE = %-80s\n", dbfile); fprintf(stderr, "Executing simulated annealing tableaux match kernel (no shared memory) on GPU for qid %s...\n",qid); checkCudaErrors( cudaDeviceSynchronize() ); sdkResetTimer(&hTimer) ; sdkStartTimer(&hTimer) ; int xxx_qn_noshared=-1; checkCudaErrors( cudaMemcpy(&xxx_qn_noshared, const_addr.c_qn_noshared_addr, sizeof(qn), cudaMemcpyDeviceToHost) ); fprintf(stderr,"xxx_qn_noshared=%d\n",xxx_qn_noshared); //XXX sa_tabsearch_gpu_noshared<<<dimGrid,dimBlock>>>(large_dbsize, lorder, lsoln, maxstart, d_tableaux, tableaux_extent, d_orders, d_distmatrices, distmatrices_extent, d_scores, d_ssemaps, devStates); cuda_errcode = cudaGetLastError(); if (cuda_errcode != cudaSuccess) { fprintf(stderr, "kernel launch failed: %s\n", cudaGetErrorString(cuda_errcode)); exit_status = 1; goto bye; } checkCudaErrors( cudaDeviceSynchronize() ); sdkStopTimer(&hTimer) ; runtime = sdkGetTimerValue(&hTimer); fprintf(stderr, "GPU (no shared memory) execution time %f ms\n", runtime); fprintf(stderr, "%f million iterations/sec\n", ((float)large_dbsize * ((float)maxstart * (float)MAXITER) / (runtime/1000)) / 1.0e6); /* Get results from device */ checkCudaErrors( cudaMemcpy(scores, d_scores, large_dbsize*sizeof(int), cudaMemcpyDeviceToHost) ); if (lsoln) checkCudaErrors( cudaMemcpy(ssemaps, d_ssemaps, large_dbsize * MAXDIM * sizeof(int), cudaMemcpyDeviceToHost) ); for (i = 0; i < large_dbsize; i++) { /* printf("%-8s %d\n", large_names+i*(LABELSIZE+1), scores[i]); */ norm2score = norm2(scores[i], qn, large_orders[i]); zscore = z_gumbel(norm2score, gumbel_a, gumbel_b); pvalue = pv_gumbel(zscore); printf("%-8s %d %g %g %g\n", large_names+i*(LABELSIZE+1), scores[i], norm2score, zscore, pvalue); if (lsoln) for (int k = 0; k < qn; k++) if (ssemaps[i*MAXDIM + k] >= 0) printf("%3d %3d\n", k+1, ssemaps[i*MAXDIM + k]+1); } } } } else { /* running on host CPU */ searchParams_t host_params; host_params.ltype = ltype; host_params.lorder = lorder; host_params.lsoln = lsoln; host_params.maxstart = maxstart; host_params.num_queries = num_queries; host_params.single_query_qid = -1; host_params.query_dbindex_list = query_dbindex_list; memcpy(host_params.qtab, qtab, sizeof(qtab)); memcpy(host_params.qdmat, qdmat, sizeof(qdmat)); memcpy(host_params.qid, qid, sizeof(qid)); host_params.qn = qn; host_params.qssetypes = qssetypes; host_params.maxdim = MAXDIM_GPU; host_params.dbsize = gpu_dbsize; /* first do small structure db */ host_params.tableaux = tableaux; host_params.distmatrices = distmatrices; host_params.orders = orders; host_params.names = names; tabsearch_host_thread(&host_params); /* then large structure db */ if (large_dbsize > 0) { host_params.maxdim = MAXDIM; host_params.dbsize = large_dbsize; host_params.tableaux = large_tableaux; host_params.distmatrices = large_distmatrices; host_params.orders = large_orders; host_params.names = large_names; tabsearch_host_thread(&host_params); } } bye: /* cleanup and exit */ free(tableaux); free(distmatrices); free(orders); free(names); free(scores); free(large_tableaux); free(large_distmatrices); free(large_names); free(large_orders); if (lsoln) free(ssemaps); sdkDeleteTimer( &hTimer); if (use_gpu) { if (large_dbsize > 0) { checkCudaErrors( cudaFree(d_tableaux.ptr) ); checkCudaErrors( cudaFree(d_distmatrices.ptr) ); checkCudaErrors( cudaFree(d_orders) ); checkCudaErrors( cudaFree(d_scores) ); if (lsoln) checkCudaErrors( cudaFree(d_ssemaps) ); } cudaThreadExit(); } exit(exit_status); }
16758a0c423c2bfa01ea45d2f1aaa917133ab1d9.hip
// !!! This is a file automatically generated by hipify!!! /* * This program uses the device CURAND API to calculate what * proportion of pseudo-random ints have low bit set. */ #include <stdio.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <hiprand/hiprand_kernel.h> /* include MTGP host helper functions */ #include <hiprand/hiprand_mtgp32_host.h> /* include MTGP pre-computed parameter sets */ #include <rocrand/rocrand_mtgp32_11213.h> #define CUDA_CALL(x) do { if((x) != hipSuccess) { \ printf("Error at %s:%d\n",__FILE__,__LINE__); \ return EXIT_FAILURE;}} while(0) #define CURAND_CALL(x) do { if((x) != HIPRAND_STATUS_SUCCESS) { \ printf("Error at %s:%d\n",__FILE__,__LINE__); \ return EXIT_FAILURE;}} while(0) __global__ void generate_kernel(hiprandStateMtgp32_t *state, int n, int *result) { int id = threadIdx.x + blockIdx.x * 256; int count = 0; unsigned int x; /* Generate pseudo-random unsigned ints */ for(int i = 0; i < n; i++) { x = hiprand(&state[blockIdx.x]); /* Check if low bit set */ if(x & 1) { count++; } } /* Store results */ result[id] += count; } int main(int argc, char *argv[]) { int i; long long total; hiprandStateMtgp32_t *devMTGPStates; mtgp32_kernel_params_t *devKernelParams; int *devResults, *hostResults; int sampleCount = 10000; /* Allow over-ride of sample count */ if (argc == 2) { sscanf(argv[1],"%d",&sampleCount); } /* Allocate space for results on host */ hostResults = (int *)calloc(64 * 256, sizeof(int)); /* Allocate space for results on device */ CUDA_CALL(hipMalloc((void **)&devResults, 64 * 256 * sizeof(int))); /* Set results to 0 */ CUDA_CALL(hipMemset(devResults, 0, 64 * 256 * sizeof(int))); /* Allocate space for prng states on device */ CUDA_CALL(hipMalloc((void **)&devMTGPStates, 64 * sizeof(hiprandStateMtgp32_t))); /* Setup MTGP prng states */ /* Allocate space for MTGP kernel parameters */ CUDA_CALL(hipMalloc((void**)&devKernelParams, sizeof(mtgp32_kernel_params_t))); /* Reformat from predefined parameter sets to kernel format, */ /* and copy kernel parameters to device memory */ CURAND_CALL(hiprandMakeMTGP32Constants(mtgp32dc_params_fast_11213, devKernelParams)); /* Initialize one state per thread block */ CURAND_CALL(hiprandMakeMTGP32KernelState(devMTGPStates, mtgp32dc_params_fast_11213, devKernelParams, 64, 1234)); /* State setup is complete */ /* Generate and use pseudo-random */ for(i = 0; i < 10; i++) { hipLaunchKernelGGL(( generate_kernel), dim3(64), dim3(256), 0, 0, devMTGPStates, sampleCount, devResults); } /* Copy device memory to host */ CUDA_CALL(hipMemcpy(hostResults, devResults, 64 * 256 * sizeof(int), hipMemcpyDeviceToHost)); /* Show result */ total = 0; for(i = 0; i < 64 * 256; i++) { total += hostResults[i]; } printf("Fraction with low bit set was %10.13g\n", (double)total / (64.0f * 256.0f * sampleCount * 10.0f)); /* Cleanup */ CUDA_CALL(hipFree(devMTGPStates)); CUDA_CALL(hipFree(devResults)); free(hostResults); printf("^^^^ kernel_mtgp_example PASSED\n"); return EXIT_SUCCESS; }
16758a0c423c2bfa01ea45d2f1aaa917133ab1d9.cu
/* * This program uses the device CURAND API to calculate what * proportion of pseudo-random ints have low bit set. */ #include <stdio.h> #include <stdlib.h> #include <cuda.h> #include <curand_kernel.h> /* include MTGP host helper functions */ #include <curand_mtgp32_host.h> /* include MTGP pre-computed parameter sets */ #include <curand_mtgp32dc_p_11213.h> #define CUDA_CALL(x) do { if((x) != cudaSuccess) { \ printf("Error at %s:%d\n",__FILE__,__LINE__); \ return EXIT_FAILURE;}} while(0) #define CURAND_CALL(x) do { if((x) != CURAND_STATUS_SUCCESS) { \ printf("Error at %s:%d\n",__FILE__,__LINE__); \ return EXIT_FAILURE;}} while(0) __global__ void generate_kernel(curandStateMtgp32 *state, int n, int *result) { int id = threadIdx.x + blockIdx.x * 256; int count = 0; unsigned int x; /* Generate pseudo-random unsigned ints */ for(int i = 0; i < n; i++) { x = curand(&state[blockIdx.x]); /* Check if low bit set */ if(x & 1) { count++; } } /* Store results */ result[id] += count; } int main(int argc, char *argv[]) { int i; long long total; curandStateMtgp32 *devMTGPStates; mtgp32_kernel_params *devKernelParams; int *devResults, *hostResults; int sampleCount = 10000; /* Allow over-ride of sample count */ if (argc == 2) { sscanf(argv[1],"%d",&sampleCount); } /* Allocate space for results on host */ hostResults = (int *)calloc(64 * 256, sizeof(int)); /* Allocate space for results on device */ CUDA_CALL(cudaMalloc((void **)&devResults, 64 * 256 * sizeof(int))); /* Set results to 0 */ CUDA_CALL(cudaMemset(devResults, 0, 64 * 256 * sizeof(int))); /* Allocate space for prng states on device */ CUDA_CALL(cudaMalloc((void **)&devMTGPStates, 64 * sizeof(curandStateMtgp32))); /* Setup MTGP prng states */ /* Allocate space for MTGP kernel parameters */ CUDA_CALL(cudaMalloc((void**)&devKernelParams, sizeof(mtgp32_kernel_params))); /* Reformat from predefined parameter sets to kernel format, */ /* and copy kernel parameters to device memory */ CURAND_CALL(curandMakeMTGP32Constants(mtgp32dc_params_fast_11213, devKernelParams)); /* Initialize one state per thread block */ CURAND_CALL(curandMakeMTGP32KernelState(devMTGPStates, mtgp32dc_params_fast_11213, devKernelParams, 64, 1234)); /* State setup is complete */ /* Generate and use pseudo-random */ for(i = 0; i < 10; i++) { generate_kernel<<<64, 256>>>(devMTGPStates, sampleCount, devResults); } /* Copy device memory to host */ CUDA_CALL(cudaMemcpy(hostResults, devResults, 64 * 256 * sizeof(int), cudaMemcpyDeviceToHost)); /* Show result */ total = 0; for(i = 0; i < 64 * 256; i++) { total += hostResults[i]; } printf("Fraction with low bit set was %10.13g\n", (double)total / (64.0f * 256.0f * sampleCount * 10.0f)); /* Cleanup */ CUDA_CALL(cudaFree(devMTGPStates)); CUDA_CALL(cudaFree(devResults)); free(hostResults); printf("^^^^ kernel_mtgp_example PASSED\n"); return EXIT_SUCCESS; }
7872866b039d0d80a1d7a690308241b24e113af3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "paddle/phi/kernels/stack_kernel.h" #include "paddle/fluid/memory/memory.h" #include "paddle/phi/backends/gpu/gpu_context.h" #include "paddle/phi/backends/gpu/gpu_launch_config.h" #include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/kernels/funcs/fast_divmod.h" namespace phi { template <typename IndexT> struct DivmodWarpper { public: void SetDivden(IndexT dividen) { divmoder = phi::funcs::FastDivMod(dividen); } __device__ inline phi::funcs::FastDivMod::DivModT div_mod(IndexT val) { return divmoder.Divmod(val); } private: phi::funcs::FastDivMod divmoder; }; template <> struct DivmodWarpper<int64_t> { public: using DivModT = phi::AlignedVector<int64_t, 2>; void SetDivden(int64_t dividen) { dividen_ = dividen; } __device__ inline DivModT div_mod(int64_t val) { DivModT data; data[0] = val / dividen_; data[1] = val - data[0] * dividen_; return data; } private: int64_t dividen_; }; constexpr int kWarpperSize = 64; template <typename T, typename IndexT> struct PointerArray : public DivmodWarpper<IndexT> { public: const T* data[kWarpperSize]; PointerArray(const std::vector<const DenseTensor*>& x, int num, int64_t dividen) { this->SetDivden(dividen); for (auto i = 0; i < num; ++i) { data[i] = x[i]->data<T>(); } } }; template <typename Context, typename T, typename IndexT> struct PointerToPointer : public DivmodWarpper<IndexT> { public: T** data; PointerToPointer(const Context& ctx, const std::vector<const DenseTensor*>& x, int num, int64_t dividen) { this->SetDivden(dividen); auto byte_len = num * sizeof(T*); std::vector<const T*> x_datas(num); for (int i = 0; i < num; ++i) { x_datas[i] = x[i]->data<T>(); } auto tmp_x_data = paddle::memory::Alloc( ctx.GetPlace(), byte_len, phi::Stream(reinterpret_cast<phi::StreamId>(ctx.stream()))); paddle::memory::Copy(ctx.GetPlace(), tmp_x_data->ptr(), phi::CPUPlace(), reinterpret_cast<void*>(x_datas.data()), x_datas.size() * sizeof(T*), ctx.stream()); data = reinterpret_cast<T**>(tmp_x_data->ptr()); } }; template <typename T, typename IndexT, typename WarpT> __global__ void StackCUDAKernel(WarpT input_warpper, IndexT split_size, IndexT rows, IndexT cols, T* __restrict__ output) { IndexT grid_x = static_cast<IndexT>(blockIdx.x) * blockDim.x + threadIdx.x; IndexT grid_x_stride = static_cast<IndexT>(blockDim.x) * gridDim.x; IndexT grid_y_stride = static_cast<IndexT>(blockDim.y) * gridDim.y; for (; grid_x < cols; grid_x += grid_x_stride) { IndexT grid_y = static_cast<IndexT>(blockIdx.y) * blockDim.y + threadIdx.y; auto divmod_rslt = input_warpper.div_mod(grid_x); const T* input_ptr = input_warpper.data[divmod_rslt[0]]; #pragma unroll for (; grid_y < rows; grid_y += grid_y_stride) { output[grid_y * cols + grid_x] = input_ptr[grid_y * split_size + divmod_rslt[1]]; } } } template <typename T, typename Context> void StackKernel(const Context& dev_ctx, const std::vector<const DenseTensor*>& x, int axis, DenseTensor* out) { if (axis < 0) axis += (x[0]->dims().size() + 1); int n = static_cast<int>(x.size()); T* y_data = dev_ctx.template Alloc<T>(out); // Split x dim from axis to matrix int64_t x_row = 1, x_col = 1; for (int i = 0; i < axis; ++i) { x_row *= x[0]->dims()[i]; } x_col = x[0]->numel() / x_row; int64_t out_col = x_col * n; auto config = phi::backends::gpu::GetGpuLaunchConfig2D(dev_ctx, out_col, x_row); #define IMPL_STACK_CUDA_KERNEL(index_t, input_warpper) \ hipLaunchKernelGGL(( StackCUDAKernel<T, index_t, decltype(input_warpper)>) \ , dim3(config.block_per_grid), \ config.thread_per_block, \ 0, \ dev_ctx.stream(), input_warpper, \ static_cast<index_t>(x_col), \ static_cast<index_t>(x_row), \ static_cast<index_t>(out_col), \ y_data); bool use_int32 = out->numel() < std::numeric_limits<int32_t>::max(); if (n <= kWarpperSize) { if (use_int32) { PointerArray<T, int32_t> ptr_array(x, n, x_col); IMPL_STACK_CUDA_KERNEL(int32_t, ptr_array); } else { PointerArray<T, int64_t> ptr_array(x, n, x_col); IMPL_STACK_CUDA_KERNEL(int64_t, ptr_array); } } else { if (use_int32) { PointerToPointer<Context, T, int32_t> ptr_array(dev_ctx, x, n, x_col); IMPL_STACK_CUDA_KERNEL(int32_t, ptr_array); } else { PointerToPointer<Context, T, int64_t> ptr_array(dev_ctx, x, n, x_col); IMPL_STACK_CUDA_KERNEL(int64_t, ptr_array); } } #undef IMPL_STACK_CUDA_KERNEL } } // namespace phi PD_REGISTER_KERNEL(stack, GPU, ALL_LAYOUT, phi::StackKernel, float, double, int64_t, int, phi::dtype::float16, phi::dtype::bfloat16) {}
7872866b039d0d80a1d7a690308241b24e113af3.cu
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "paddle/phi/kernels/stack_kernel.h" #include "paddle/fluid/memory/memory.h" #include "paddle/phi/backends/gpu/gpu_context.h" #include "paddle/phi/backends/gpu/gpu_launch_config.h" #include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/kernels/funcs/fast_divmod.h" namespace phi { template <typename IndexT> struct DivmodWarpper { public: void SetDivden(IndexT dividen) { divmoder = phi::funcs::FastDivMod(dividen); } __device__ inline phi::funcs::FastDivMod::DivModT div_mod(IndexT val) { return divmoder.Divmod(val); } private: phi::funcs::FastDivMod divmoder; }; template <> struct DivmodWarpper<int64_t> { public: using DivModT = phi::AlignedVector<int64_t, 2>; void SetDivden(int64_t dividen) { dividen_ = dividen; } __device__ inline DivModT div_mod(int64_t val) { DivModT data; data[0] = val / dividen_; data[1] = val - data[0] * dividen_; return data; } private: int64_t dividen_; }; constexpr int kWarpperSize = 64; template <typename T, typename IndexT> struct PointerArray : public DivmodWarpper<IndexT> { public: const T* data[kWarpperSize]; PointerArray(const std::vector<const DenseTensor*>& x, int num, int64_t dividen) { this->SetDivden(dividen); for (auto i = 0; i < num; ++i) { data[i] = x[i]->data<T>(); } } }; template <typename Context, typename T, typename IndexT> struct PointerToPointer : public DivmodWarpper<IndexT> { public: T** data; PointerToPointer(const Context& ctx, const std::vector<const DenseTensor*>& x, int num, int64_t dividen) { this->SetDivden(dividen); auto byte_len = num * sizeof(T*); std::vector<const T*> x_datas(num); for (int i = 0; i < num; ++i) { x_datas[i] = x[i]->data<T>(); } auto tmp_x_data = paddle::memory::Alloc( ctx.GetPlace(), byte_len, phi::Stream(reinterpret_cast<phi::StreamId>(ctx.stream()))); paddle::memory::Copy(ctx.GetPlace(), tmp_x_data->ptr(), phi::CPUPlace(), reinterpret_cast<void*>(x_datas.data()), x_datas.size() * sizeof(T*), ctx.stream()); data = reinterpret_cast<T**>(tmp_x_data->ptr()); } }; template <typename T, typename IndexT, typename WarpT> __global__ void StackCUDAKernel(WarpT input_warpper, IndexT split_size, IndexT rows, IndexT cols, T* __restrict__ output) { IndexT grid_x = static_cast<IndexT>(blockIdx.x) * blockDim.x + threadIdx.x; IndexT grid_x_stride = static_cast<IndexT>(blockDim.x) * gridDim.x; IndexT grid_y_stride = static_cast<IndexT>(blockDim.y) * gridDim.y; for (; grid_x < cols; grid_x += grid_x_stride) { IndexT grid_y = static_cast<IndexT>(blockIdx.y) * blockDim.y + threadIdx.y; auto divmod_rslt = input_warpper.div_mod(grid_x); const T* input_ptr = input_warpper.data[divmod_rslt[0]]; #pragma unroll for (; grid_y < rows; grid_y += grid_y_stride) { output[grid_y * cols + grid_x] = input_ptr[grid_y * split_size + divmod_rslt[1]]; } } } template <typename T, typename Context> void StackKernel(const Context& dev_ctx, const std::vector<const DenseTensor*>& x, int axis, DenseTensor* out) { if (axis < 0) axis += (x[0]->dims().size() + 1); int n = static_cast<int>(x.size()); T* y_data = dev_ctx.template Alloc<T>(out); // Split x dim from axis to matrix int64_t x_row = 1, x_col = 1; for (int i = 0; i < axis; ++i) { x_row *= x[0]->dims()[i]; } x_col = x[0]->numel() / x_row; int64_t out_col = x_col * n; auto config = phi::backends::gpu::GetGpuLaunchConfig2D(dev_ctx, out_col, x_row); #define IMPL_STACK_CUDA_KERNEL(index_t, input_warpper) \ StackCUDAKernel<T, index_t, decltype(input_warpper)> \ <<<config.block_per_grid, \ config.thread_per_block, \ 0, \ dev_ctx.stream()>>>(input_warpper, \ static_cast<index_t>(x_col), \ static_cast<index_t>(x_row), \ static_cast<index_t>(out_col), \ y_data); bool use_int32 = out->numel() < std::numeric_limits<int32_t>::max(); if (n <= kWarpperSize) { if (use_int32) { PointerArray<T, int32_t> ptr_array(x, n, x_col); IMPL_STACK_CUDA_KERNEL(int32_t, ptr_array); } else { PointerArray<T, int64_t> ptr_array(x, n, x_col); IMPL_STACK_CUDA_KERNEL(int64_t, ptr_array); } } else { if (use_int32) { PointerToPointer<Context, T, int32_t> ptr_array(dev_ctx, x, n, x_col); IMPL_STACK_CUDA_KERNEL(int32_t, ptr_array); } else { PointerToPointer<Context, T, int64_t> ptr_array(dev_ctx, x, n, x_col); IMPL_STACK_CUDA_KERNEL(int64_t, ptr_array); } } #undef IMPL_STACK_CUDA_KERNEL } } // namespace phi PD_REGISTER_KERNEL(stack, GPU, ALL_LAYOUT, phi::StackKernel, float, double, int64_t, int, phi::dtype::float16, phi::dtype::bfloat16) {}
d8b3ac251e812467ec3ed1a1afb6b5ac8f3b77f7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void add( int *a, int *b, int *c ) { int tid = blockIdx.x; // this thread handles the data at its thread id if (tid < N) c[tid] = a[tid] + b[tid]; }
d8b3ac251e812467ec3ed1a1afb6b5ac8f3b77f7.cu
#include "includes.h" __global__ void add( int *a, int *b, int *c ) { int tid = blockIdx.x; // this thread handles the data at its thread id if (tid < N) c[tid] = a[tid] + b[tid]; }
c69043a96de43f4f25459499f0bc0817b21a1fe0.hip
// !!! This is a file automatically generated by hipify!!! /* Copyright 2020 MONAI Consortium Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Adapted from https://github.com/abadams/permutohedral which has the following license... MIT License Copyright (c) 2020 Andrew Adams Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #define BLOCK_SIZE 64 #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <stdio.h> #include <torch/extension.h> #include <THH/THHAtomics.cuh> #include "hash_table.cu" #include "utils/meta_macros.h" template <typename scalar_t> struct MatrixEntry { int index; scalar_t weight; }; template <typename scalar_t, int pd> __global__ static void createMatrix( const int elementCount, const scalar_t* positions, const scalar_t* values, const scalar_t* scaleFactor, MatrixEntry<scalar_t>* matrix) { const int threadId = threadIdx.x; const int idx = threadIdx.x + blockIdx.x * BLOCK_SIZE; const bool outOfBounds = idx >= elementCount; scalar_t myElevated[pd + 1]; const scalar_t* myPosition = positions + idx * pd; int myGreedy[pd + 1]; int myRank[pd + 1]; scalar_t myBarycentric[pd + 2]; __shared__ short keys[pd * BLOCK_SIZE]; short* myKey = keys + threadId * pd; if (!outOfBounds) { myElevated[pd] = -pd * myPosition[pd - 1] * scaleFactor[pd - 1]; for (int i = pd - 1; i > 0; i--) { myElevated[i] = myElevated[i + 1] - i * (myPosition[i - 1]) * scaleFactor[i - 1] + (i + 2) * myPosition[i] * scaleFactor[i]; } myElevated[0] = myElevated[1] + 2 * myPosition[0] * scaleFactor[0]; // find the closest zero-colored lattice point // greedily search for the closest zero-colored lattice point signed short sum = 0; for (int i = 0; i <= pd; i++) { scalar_t v = myElevated[i] * (1.0f / (pd + 1)); scalar_t up = ceilf(v) * (pd + 1); scalar_t down = floorf(v) * (pd + 1); myGreedy[i] = (signed short)(up - myElevated[i] < myElevated[i] - down ? up : down); sum += myGreedy[i]; } sum /= pd + 1; // sort differential to find the permutation between this simplex and the canonical one for (int i = 0; i <= pd; i++) { myRank[i] = 0; for (int j = 0; j <= pd; j++) { scalar_t iDiff = myElevated[i] - myGreedy[i]; scalar_t jDiff = myElevated[j] - myGreedy[j]; if (iDiff < jDiff || (iDiff == jDiff && i > j)) { myRank[i]++; } } } if (sum > 0) // sum too large, need to bring down the ones with the smallest differential { for (int i = 0; i <= pd; i++) { if (myRank[i] >= pd + 1 - sum) { myGreedy[i] -= (pd + 1); myRank[i] += sum - (pd + 1); } else { myRank[i] += sum; } } } else if (sum < 0) // sum too small, need to bring up the ones with largest differential { for (int i = 0; i <= pd; i++) { if (myRank[i] < -sum) { myGreedy[i] += (pd + 1); myRank[i] += sum + (pd + 1); } else { myRank[i] += sum; } } } #ifdef LINEAR_D_MEMORY for (int i = 0; i <= pd; i++) { table_zeros[idx * (pd + 1) + i] = myGreedy[i]; table_rank[idx * (pd + 1) + i] = myRank[i]; } #endif // turn delta into barycentric coords for (int i = 0; i <= pd + 1; i++) { myBarycentric[i] = 0; } for (int i = 0; i <= pd; i++) { scalar_t delta = (myElevated[i] - myGreedy[i]) * (1.0f / (pd + 1)); myBarycentric[pd - myRank[i]] += delta; myBarycentric[pd + 1 - myRank[i]] -= delta; } myBarycentric[0] += 1.0f + myBarycentric[pd + 1]; } #ifdef USE_ADDITIVE_HASH unsigned int cumulative_hash = hash<pd>(myGreedy); #endif for (int color = 0; color <= pd; color++) { // Compute the location of the lattice point explicitly (all but // the last coordinate - it's redundant because they sum to zero) if (!outOfBounds) { for (int i = 0; i < pd; i++) { myKey[i] = myGreedy[i] + color; if (myRank[i] > pd - color) { myKey[i] -= (pd + 1); } } } #ifdef USE_ADDITIVE_HASH for (int i = 0; i < pd; i++) { if (myRank[i] == pd - color) { cumulative_hash += hOffset[i]; } } #endif if (!outOfBounds) { MatrixEntry<scalar_t> r; #ifdef USE_ADDITIVE_HASH r.index = hashTableInsert<pd>(cumulative_hash, myKey, idx * (pd + 1) + color); #else r.index = hashTableInsert<pd>(myKey, idx * (pd + 1) + color); #endif r.weight = myBarycentric[color]; matrix[idx * (pd + 1) + color] = r; } } } template <typename scalar_t, int kd> __global__ static void cleanHashTable(const int elementCount, MatrixEntry<scalar_t>* matrix) { const int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx >= elementCount) return; // find my hash table entry int* e = table_entries + idx; // Check if I created my own key in the previous phase if (*e >= 0) { // Rehash my key and reset the pointer in order to merge with // any other pixel that created a different entry under the // same key. If the computation was serial this would never // happen, but sometimes race conditions can make the same key // be inserted twice. hashTableRetrieve always returns the // earlier, so it's no problem as long as we rehash now. #ifdef LINEAR_D_MEMORY // Get my key short myKey[kd]; generateKey<kd>(*e, myKey); *e = hashTableRetrieve<kd>(myKey); #else *e = hashTableRetrieve<kd>(table_keys + *e * kd); #endif } } template <typename scalar_t, int pd, int vd> __global__ static void splat( const int elementCount, scalar_t* values, MatrixEntry<scalar_t>* matrix, scalar_t* table_values) { const int color = threadIdx.y; const int idx = threadIdx.x + blockIdx.x * blockDim.x; const bool outOfBounds = idx >= elementCount; if (outOfBounds) { return; } scalar_t* myValue = values + idx * vd; MatrixEntry<scalar_t> r = matrix[idx * (pd + 1) + color]; matrix[idx * (pd + 1) + color].index = r.index = table_entries[r.index]; scalar_t* val = table_values + r.index * (vd + 1); for (int j = 0; j < vd; j++) { gpuAtomicAdd(val + j, myValue[j] * r.weight); } gpuAtomicAdd(val + vd, r.weight); } // splat splits by color, so extend the y coordinate to our blocks to represent that // dim3 oldblocks((w-1)/8+1, (h-1)/8+1, 1); // dim3 oldblockSize(8, 8, 1); // oldblocks.y *= pd+1; // splatCache<pd, vd><<<oldblocks, oldblockSize>>>(w, h, values, matrix); // int blockCount = (elementCount + 1) / BLOCK_SIZE + 1; // int blockSize = BLOCK_SIZE; // splatCache<pd, vd><<<dim3(blockCount, 1), dim3(blockSize, pd+1)>>>(elementCount, values, matrix); template <typename scalar_t, int pd, int vd> __global__ static void splatCache( const int elementCount, scalar_t* values, MatrixEntry<scalar_t>* matrix, scalar_t* table_values) { // const int x = threadIdx.x + blockIdx.x * blockDim.x; // const int y = threadIdx.y + (blockIdx.y/(pd+1)) * blockDim.y; // const int threadId = threadIdx.y*blockDim.x + threadIdx.x; // const int color = blockIdx.y % (pd+1); // const int idx = y*w + x; const int threadId = threadIdx.x; const int color = threadIdx.y; const int idx = threadIdx.x + blockIdx.x * BLOCK_SIZE; const bool outOfBounds = idx >= elementCount; __shared__ int sharedOffsets[BLOCK_SIZE]; __shared__ scalar_t sharedValues[BLOCK_SIZE * (vd + 1)]; int myOffset = -1; scalar_t* myValue = sharedValues + threadId * (vd + 1); if (!outOfBounds) { scalar_t* value = values + idx * vd; MatrixEntry<scalar_t> r = matrix[idx * (pd + 1) + color]; // convert the matrix entry from a pointer into the entries array to a pointer into the keys/values array matrix[idx * (pd + 1) + color].index = r.index = table_entries[r.index]; // record the offset into the keys/values array in shared space myOffset = sharedOffsets[threadId] = r.index * (vd + 1); for (int j = 0; j < vd; j++) { myValue[j] = value[j] * r.weight; } myValue[vd] = r.weight; } else { sharedOffsets[threadId] = -1; } __syncthreads(); // am I the first thread in this block to care about this key? if (outOfBounds) return; for (int i = 0; i < BLOCK_SIZE; i++) { if (i < threadId) { if (myOffset == sharedOffsets[i]) { // somebody else with higher priority cares about this key return; } } else if (i > threadId) { if (myOffset == sharedOffsets[i]) { // someone else with lower priority cares about this key, accumulate it into mine for (int j = 0; j <= vd; j++) { sharedValues[threadId * (vd + 1) + j] += sharedValues[i * (vd + 1) + j]; } } } } // only the threads with something to write to main memory are still going scalar_t* val = table_values + myOffset; for (int j = 0; j <= vd; j++) { gpuAtomicAdd(val + j, myValue[j]); } } template <typename scalar_t, int pd, int vd> __global__ static void blur( int n, scalar_t* newValues, MatrixEntry<scalar_t>* matrix, int color, scalar_t* table_values) { const int idx = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x * blockDim.y + threadIdx.x; if (idx >= n) return; // Check if I'm valid if (matrix[idx].index != idx) return; // find my key and the keys of my neighbours short myKey[pd + 1]; short np[pd + 1]; short nm[pd + 1]; #ifdef LINEAR_D_MEMORY generateKey<pd>(idx, myKey); for (int i = 0; i < pd; i++) { np[i] = myKey[i] + 1; nm[i] = myKey[i] - 1; } #else for (int i = 0; i < pd; i++) { myKey[i] = table_keys[idx * pd + i]; np[i] = myKey[i] + 1; nm[i] = myKey[i] - 1; } #endif np[color] -= pd + 1; nm[color] += pd + 1; #ifdef USE_ADDITIVE_HASH unsigned int hCurrent = hash<pd>(myKey); int offNp = hashTableRetrieveWithHash<pd>(hCurrent + hOffset[color], np); int offNm = hashTableRetrieveWithHash<pd>(hCurrent - hOffset[color], nm); #else int offNp = hashTableRetrieve<pd>(np); int offNm = hashTableRetrieve<pd>(nm); #endif scalar_t* valMe = table_values + (vd + 1) * idx; scalar_t* valNp = table_values + (vd + 1) * offNp; scalar_t* valNm = table_values + (vd + 1) * offNm; scalar_t* valOut = newValues + (vd + 1) * idx; if (offNp >= 0 && offNm >= 0) { for (int i = 0; i <= vd; i++) { valOut[i] = (valNp[i] + (valMe[i] * 2) + valNm[i]) / 4; } } else if (offNp >= 0) { for (int i = 0; i <= vd; i++) { valOut[i] = (valNp[i] + (valMe[i] * 2)) / 4; } } else if (offNm >= 0) { for (int i = 0; i <= vd; i++) { valOut[i] = (valNm[i] + (valMe[i] * 2)) / 4; } } else { for (int i = 0; i <= vd; i++) { valOut[i] = valMe[i] * 2; } } } template <typename scalar_t, int pd, int vd> __global__ static void slice( const int elementCount, scalar_t* values, MatrixEntry<scalar_t>* matrix, scalar_t* table_values) { const int threadId = threadIdx.x; const int idx = threadIdx.x + blockIdx.x * BLOCK_SIZE; const bool outOfBounds = idx >= elementCount; if (outOfBounds) return; __shared__ scalar_t localValue[BLOCK_SIZE * vd]; scalar_t* myValue = localValue + threadId * vd; scalar_t myWeight = 0; for (int i = 0; i < vd; i++) { myValue[i] = 0; } for (int i = 0; i <= pd; i++) { MatrixEntry<scalar_t> r = matrix[idx * (pd + 1) + i]; scalar_t* val = table_values + r.index * (vd + 1); for (int j = 0; j < vd; j++) { myValue[j] += r.weight * val[j]; } myWeight += r.weight * val[vd]; } myWeight = 1.0f / myWeight; for (int j = 0; j < vd; j++) { values[idx * vd + j] = myValue[j] * myWeight; } } template <typename scalar_t, int vd, int pd> void PermutohedralCuda(scalar_t* values, scalar_t* positions, int elementCount, bool accurate) { scalar_t blurVariance = accurate ? 0.5 : 0; scalar_t* scaleFactor; hipMalloc(&scaleFactor, pd * sizeof(scalar_t)); scalar_t scaleFactorHost[pd]; for (int i = 0; i < pd; i++) { scaleFactorHost[i] = (pd + 1) * sqrtf((1.0 / 6 + blurVariance) / ((i + 1) * (i + 2))); } hipMemcpy(scaleFactor, scaleFactorHost, pd * sizeof(scalar_t), hipMemcpyHostToDevice); MatrixEntry<scalar_t>* matrix; hipMalloc(&matrix, elementCount * (pd + 1) * sizeof(MatrixEntry<scalar_t>)); scalar_t* table_values = createHashTable<scalar_t, pd, vd + 1>(elementCount * (pd + 1)); // Populate constant memory for hash helpers unsigned long long int __host_two32 = ((unsigned long long int)1) << 32; unsigned int __host_div_c = 2 * (elementCount * (pd + 1)); unsigned int __host_div_l = ceilf(logf((float)__host_div_c) / logf(2.0f)); unsigned int __host_div_m = (__host_two32 << __host_div_l) / __host_div_c - __host_two32 + 1; hipMemcpyToSymbol(__div_c, &__host_div_c, sizeof(unsigned int)); hipMemcpyToSymbol(__div_l, &__host_div_l, sizeof(unsigned int)); hipMemcpyToSymbol(__div_m, &__host_div_m, sizeof(unsigned int)); // Populate constant memory with hash of offset vectors unsigned int hOffset_host[pd + 1]; signed short offset[pd + 1]; for (int i = 0; i < pd; offset[i] = 1, i++) ; for (int i = 0; i <= pd; i++) { offset[i] -= pd + 1; hOffset_host[i] = hash<pd>(offset); offset[i] += pd + 1; } hipMemcpyToSymbol(hOffset, &hOffset_host, sizeof(unsigned int) * (pd + 1)); int blockCount = (elementCount + 1) / BLOCK_SIZE + 1; int blockSize = BLOCK_SIZE; hipLaunchKernelGGL(( createMatrix<scalar_t, pd>), dim3(blockCount), dim3(blockSize), 0, 0, elementCount, positions, values, scaleFactor, matrix); // fix duplicate hash table entries int tableSize = elementCount * 2 * (pd + 1); int cleanBlockSize = 32; int cleanBlocks = (tableSize - 1) / cleanBlockSize + 1; hipLaunchKernelGGL(( cleanHashTable<scalar_t, pd>), dim3(cleanBlocks), dim3(cleanBlockSize), 0, 0, tableSize, matrix); hipLaunchKernelGGL(( splat<scalar_t, pd, vd>), dim3(dim3(blockCount, 1)), dim3(dim3(blockSize, pd + 1)), 0, 0, elementCount, values, matrix, table_values); if (accurate) { scalar_t* newValues; hipMalloc(&newValues, elementCount * (pd + 1) * (vd + 1) * sizeof(scalar_t)); hipMemset(newValues, 0, elementCount * (pd + 1) * (vd + 1) * sizeof(scalar_t)); for (int color = 0; color <= pd; color++) { hipLaunchKernelGGL(( blur<scalar_t, pd, vd>) , dim3(cleanBlocks), dim3(cleanBlockSize), 0, 0, elementCount * (pd + 1), newValues, matrix, color, table_values); scalar_t* swap = newValues; newValues = table_values; table_values = swap; } hipFree(newValues); } hipLaunchKernelGGL(( slice<scalar_t, pd, vd>), dim3(blockCount), dim3(blockSize), 0, 0, elementCount, values, matrix, table_values); destroyHashTable<scalar_t>(); hipFree(table_values); } #define DECLARATION(dc, fc) \ template void PermutohedralCuda<float, dc, fc>(float* values, float* positions, int elementCount, bool accurate); \ template void PermutohedralCuda<double, dc, fc>(double* values, double* positions, int elementCount, bool accurate); DO_FOR_AB(DECLARATION, 16, 19)
c69043a96de43f4f25459499f0bc0817b21a1fe0.cu
/* Copyright 2020 MONAI Consortium Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Adapted from https://github.com/abadams/permutohedral which has the following license... MIT License Copyright (c) 2020 Andrew Adams Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #define BLOCK_SIZE 64 #include <cuda.h> #include <cuda_runtime.h> #include <stdio.h> #include <torch/extension.h> #include <THC/THCAtomics.cuh> #include "hash_table.cu" #include "utils/meta_macros.h" template <typename scalar_t> struct MatrixEntry { int index; scalar_t weight; }; template <typename scalar_t, int pd> __global__ static void createMatrix( const int elementCount, const scalar_t* positions, const scalar_t* values, const scalar_t* scaleFactor, MatrixEntry<scalar_t>* matrix) { const int threadId = threadIdx.x; const int idx = threadIdx.x + blockIdx.x * BLOCK_SIZE; const bool outOfBounds = idx >= elementCount; scalar_t myElevated[pd + 1]; const scalar_t* myPosition = positions + idx * pd; int myGreedy[pd + 1]; int myRank[pd + 1]; scalar_t myBarycentric[pd + 2]; __shared__ short keys[pd * BLOCK_SIZE]; short* myKey = keys + threadId * pd; if (!outOfBounds) { myElevated[pd] = -pd * myPosition[pd - 1] * scaleFactor[pd - 1]; for (int i = pd - 1; i > 0; i--) { myElevated[i] = myElevated[i + 1] - i * (myPosition[i - 1]) * scaleFactor[i - 1] + (i + 2) * myPosition[i] * scaleFactor[i]; } myElevated[0] = myElevated[1] + 2 * myPosition[0] * scaleFactor[0]; // find the closest zero-colored lattice point // greedily search for the closest zero-colored lattice point signed short sum = 0; for (int i = 0; i <= pd; i++) { scalar_t v = myElevated[i] * (1.0f / (pd + 1)); scalar_t up = ceilf(v) * (pd + 1); scalar_t down = floorf(v) * (pd + 1); myGreedy[i] = (signed short)(up - myElevated[i] < myElevated[i] - down ? up : down); sum += myGreedy[i]; } sum /= pd + 1; // sort differential to find the permutation between this simplex and the canonical one for (int i = 0; i <= pd; i++) { myRank[i] = 0; for (int j = 0; j <= pd; j++) { scalar_t iDiff = myElevated[i] - myGreedy[i]; scalar_t jDiff = myElevated[j] - myGreedy[j]; if (iDiff < jDiff || (iDiff == jDiff && i > j)) { myRank[i]++; } } } if (sum > 0) // sum too large, need to bring down the ones with the smallest differential { for (int i = 0; i <= pd; i++) { if (myRank[i] >= pd + 1 - sum) { myGreedy[i] -= (pd + 1); myRank[i] += sum - (pd + 1); } else { myRank[i] += sum; } } } else if (sum < 0) // sum too small, need to bring up the ones with largest differential { for (int i = 0; i <= pd; i++) { if (myRank[i] < -sum) { myGreedy[i] += (pd + 1); myRank[i] += sum + (pd + 1); } else { myRank[i] += sum; } } } #ifdef LINEAR_D_MEMORY for (int i = 0; i <= pd; i++) { table_zeros[idx * (pd + 1) + i] = myGreedy[i]; table_rank[idx * (pd + 1) + i] = myRank[i]; } #endif // turn delta into barycentric coords for (int i = 0; i <= pd + 1; i++) { myBarycentric[i] = 0; } for (int i = 0; i <= pd; i++) { scalar_t delta = (myElevated[i] - myGreedy[i]) * (1.0f / (pd + 1)); myBarycentric[pd - myRank[i]] += delta; myBarycentric[pd + 1 - myRank[i]] -= delta; } myBarycentric[0] += 1.0f + myBarycentric[pd + 1]; } #ifdef USE_ADDITIVE_HASH unsigned int cumulative_hash = hash<pd>(myGreedy); #endif for (int color = 0; color <= pd; color++) { // Compute the location of the lattice point explicitly (all but // the last coordinate - it's redundant because they sum to zero) if (!outOfBounds) { for (int i = 0; i < pd; i++) { myKey[i] = myGreedy[i] + color; if (myRank[i] > pd - color) { myKey[i] -= (pd + 1); } } } #ifdef USE_ADDITIVE_HASH for (int i = 0; i < pd; i++) { if (myRank[i] == pd - color) { cumulative_hash += hOffset[i]; } } #endif if (!outOfBounds) { MatrixEntry<scalar_t> r; #ifdef USE_ADDITIVE_HASH r.index = hashTableInsert<pd>(cumulative_hash, myKey, idx * (pd + 1) + color); #else r.index = hashTableInsert<pd>(myKey, idx * (pd + 1) + color); #endif r.weight = myBarycentric[color]; matrix[idx * (pd + 1) + color] = r; } } } template <typename scalar_t, int kd> __global__ static void cleanHashTable(const int elementCount, MatrixEntry<scalar_t>* matrix) { const int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx >= elementCount) return; // find my hash table entry int* e = table_entries + idx; // Check if I created my own key in the previous phase if (*e >= 0) { // Rehash my key and reset the pointer in order to merge with // any other pixel that created a different entry under the // same key. If the computation was serial this would never // happen, but sometimes race conditions can make the same key // be inserted twice. hashTableRetrieve always returns the // earlier, so it's no problem as long as we rehash now. #ifdef LINEAR_D_MEMORY // Get my key short myKey[kd]; generateKey<kd>(*e, myKey); *e = hashTableRetrieve<kd>(myKey); #else *e = hashTableRetrieve<kd>(table_keys + *e * kd); #endif } } template <typename scalar_t, int pd, int vd> __global__ static void splat( const int elementCount, scalar_t* values, MatrixEntry<scalar_t>* matrix, scalar_t* table_values) { const int color = threadIdx.y; const int idx = threadIdx.x + blockIdx.x * blockDim.x; const bool outOfBounds = idx >= elementCount; if (outOfBounds) { return; } scalar_t* myValue = values + idx * vd; MatrixEntry<scalar_t> r = matrix[idx * (pd + 1) + color]; matrix[idx * (pd + 1) + color].index = r.index = table_entries[r.index]; scalar_t* val = table_values + r.index * (vd + 1); for (int j = 0; j < vd; j++) { gpuAtomicAdd(val + j, myValue[j] * r.weight); } gpuAtomicAdd(val + vd, r.weight); } // splat splits by color, so extend the y coordinate to our blocks to represent that // dim3 oldblocks((w-1)/8+1, (h-1)/8+1, 1); // dim3 oldblockSize(8, 8, 1); // oldblocks.y *= pd+1; // splatCache<pd, vd><<<oldblocks, oldblockSize>>>(w, h, values, matrix); // int blockCount = (elementCount + 1) / BLOCK_SIZE + 1; // int blockSize = BLOCK_SIZE; // splatCache<pd, vd><<<dim3(blockCount, 1), dim3(blockSize, pd+1)>>>(elementCount, values, matrix); template <typename scalar_t, int pd, int vd> __global__ static void splatCache( const int elementCount, scalar_t* values, MatrixEntry<scalar_t>* matrix, scalar_t* table_values) { // const int x = threadIdx.x + blockIdx.x * blockDim.x; // const int y = threadIdx.y + (blockIdx.y/(pd+1)) * blockDim.y; // const int threadId = threadIdx.y*blockDim.x + threadIdx.x; // const int color = blockIdx.y % (pd+1); // const int idx = y*w + x; const int threadId = threadIdx.x; const int color = threadIdx.y; const int idx = threadIdx.x + blockIdx.x * BLOCK_SIZE; const bool outOfBounds = idx >= elementCount; __shared__ int sharedOffsets[BLOCK_SIZE]; __shared__ scalar_t sharedValues[BLOCK_SIZE * (vd + 1)]; int myOffset = -1; scalar_t* myValue = sharedValues + threadId * (vd + 1); if (!outOfBounds) { scalar_t* value = values + idx * vd; MatrixEntry<scalar_t> r = matrix[idx * (pd + 1) + color]; // convert the matrix entry from a pointer into the entries array to a pointer into the keys/values array matrix[idx * (pd + 1) + color].index = r.index = table_entries[r.index]; // record the offset into the keys/values array in shared space myOffset = sharedOffsets[threadId] = r.index * (vd + 1); for (int j = 0; j < vd; j++) { myValue[j] = value[j] * r.weight; } myValue[vd] = r.weight; } else { sharedOffsets[threadId] = -1; } __syncthreads(); // am I the first thread in this block to care about this key? if (outOfBounds) return; for (int i = 0; i < BLOCK_SIZE; i++) { if (i < threadId) { if (myOffset == sharedOffsets[i]) { // somebody else with higher priority cares about this key return; } } else if (i > threadId) { if (myOffset == sharedOffsets[i]) { // someone else with lower priority cares about this key, accumulate it into mine for (int j = 0; j <= vd; j++) { sharedValues[threadId * (vd + 1) + j] += sharedValues[i * (vd + 1) + j]; } } } } // only the threads with something to write to main memory are still going scalar_t* val = table_values + myOffset; for (int j = 0; j <= vd; j++) { gpuAtomicAdd(val + j, myValue[j]); } } template <typename scalar_t, int pd, int vd> __global__ static void blur( int n, scalar_t* newValues, MatrixEntry<scalar_t>* matrix, int color, scalar_t* table_values) { const int idx = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x * blockDim.y + threadIdx.x; if (idx >= n) return; // Check if I'm valid if (matrix[idx].index != idx) return; // find my key and the keys of my neighbours short myKey[pd + 1]; short np[pd + 1]; short nm[pd + 1]; #ifdef LINEAR_D_MEMORY generateKey<pd>(idx, myKey); for (int i = 0; i < pd; i++) { np[i] = myKey[i] + 1; nm[i] = myKey[i] - 1; } #else for (int i = 0; i < pd; i++) { myKey[i] = table_keys[idx * pd + i]; np[i] = myKey[i] + 1; nm[i] = myKey[i] - 1; } #endif np[color] -= pd + 1; nm[color] += pd + 1; #ifdef USE_ADDITIVE_HASH unsigned int hCurrent = hash<pd>(myKey); int offNp = hashTableRetrieveWithHash<pd>(hCurrent + hOffset[color], np); int offNm = hashTableRetrieveWithHash<pd>(hCurrent - hOffset[color], nm); #else int offNp = hashTableRetrieve<pd>(np); int offNm = hashTableRetrieve<pd>(nm); #endif scalar_t* valMe = table_values + (vd + 1) * idx; scalar_t* valNp = table_values + (vd + 1) * offNp; scalar_t* valNm = table_values + (vd + 1) * offNm; scalar_t* valOut = newValues + (vd + 1) * idx; if (offNp >= 0 && offNm >= 0) { for (int i = 0; i <= vd; i++) { valOut[i] = (valNp[i] + (valMe[i] * 2) + valNm[i]) / 4; } } else if (offNp >= 0) { for (int i = 0; i <= vd; i++) { valOut[i] = (valNp[i] + (valMe[i] * 2)) / 4; } } else if (offNm >= 0) { for (int i = 0; i <= vd; i++) { valOut[i] = (valNm[i] + (valMe[i] * 2)) / 4; } } else { for (int i = 0; i <= vd; i++) { valOut[i] = valMe[i] * 2; } } } template <typename scalar_t, int pd, int vd> __global__ static void slice( const int elementCount, scalar_t* values, MatrixEntry<scalar_t>* matrix, scalar_t* table_values) { const int threadId = threadIdx.x; const int idx = threadIdx.x + blockIdx.x * BLOCK_SIZE; const bool outOfBounds = idx >= elementCount; if (outOfBounds) return; __shared__ scalar_t localValue[BLOCK_SIZE * vd]; scalar_t* myValue = localValue + threadId * vd; scalar_t myWeight = 0; for (int i = 0; i < vd; i++) { myValue[i] = 0; } for (int i = 0; i <= pd; i++) { MatrixEntry<scalar_t> r = matrix[idx * (pd + 1) + i]; scalar_t* val = table_values + r.index * (vd + 1); for (int j = 0; j < vd; j++) { myValue[j] += r.weight * val[j]; } myWeight += r.weight * val[vd]; } myWeight = 1.0f / myWeight; for (int j = 0; j < vd; j++) { values[idx * vd + j] = myValue[j] * myWeight; } } template <typename scalar_t, int vd, int pd> void PermutohedralCuda(scalar_t* values, scalar_t* positions, int elementCount, bool accurate) { scalar_t blurVariance = accurate ? 0.5 : 0; scalar_t* scaleFactor; cudaMalloc(&scaleFactor, pd * sizeof(scalar_t)); scalar_t scaleFactorHost[pd]; for (int i = 0; i < pd; i++) { scaleFactorHost[i] = (pd + 1) * sqrtf((1.0 / 6 + blurVariance) / ((i + 1) * (i + 2))); } cudaMemcpy(scaleFactor, scaleFactorHost, pd * sizeof(scalar_t), cudaMemcpyHostToDevice); MatrixEntry<scalar_t>* matrix; cudaMalloc(&matrix, elementCount * (pd + 1) * sizeof(MatrixEntry<scalar_t>)); scalar_t* table_values = createHashTable<scalar_t, pd, vd + 1>(elementCount * (pd + 1)); // Populate constant memory for hash helpers unsigned long long int __host_two32 = ((unsigned long long int)1) << 32; unsigned int __host_div_c = 2 * (elementCount * (pd + 1)); unsigned int __host_div_l = ceilf(logf((float)__host_div_c) / logf(2.0f)); unsigned int __host_div_m = (__host_two32 << __host_div_l) / __host_div_c - __host_two32 + 1; cudaMemcpyToSymbol(__div_c, &__host_div_c, sizeof(unsigned int)); cudaMemcpyToSymbol(__div_l, &__host_div_l, sizeof(unsigned int)); cudaMemcpyToSymbol(__div_m, &__host_div_m, sizeof(unsigned int)); // Populate constant memory with hash of offset vectors unsigned int hOffset_host[pd + 1]; signed short offset[pd + 1]; for (int i = 0; i < pd; offset[i] = 1, i++) ; for (int i = 0; i <= pd; i++) { offset[i] -= pd + 1; hOffset_host[i] = hash<pd>(offset); offset[i] += pd + 1; } cudaMemcpyToSymbol(hOffset, &hOffset_host, sizeof(unsigned int) * (pd + 1)); int blockCount = (elementCount + 1) / BLOCK_SIZE + 1; int blockSize = BLOCK_SIZE; createMatrix<scalar_t, pd><<<blockCount, blockSize>>>(elementCount, positions, values, scaleFactor, matrix); // fix duplicate hash table entries int tableSize = elementCount * 2 * (pd + 1); int cleanBlockSize = 32; int cleanBlocks = (tableSize - 1) / cleanBlockSize + 1; cleanHashTable<scalar_t, pd><<<cleanBlocks, cleanBlockSize>>>(tableSize, matrix); splat<scalar_t, pd, vd><<<dim3(blockCount, 1), dim3(blockSize, pd + 1)>>>(elementCount, values, matrix, table_values); if (accurate) { scalar_t* newValues; cudaMalloc(&newValues, elementCount * (pd + 1) * (vd + 1) * sizeof(scalar_t)); cudaMemset(newValues, 0, elementCount * (pd + 1) * (vd + 1) * sizeof(scalar_t)); for (int color = 0; color <= pd; color++) { blur<scalar_t, pd, vd> <<<cleanBlocks, cleanBlockSize>>>(elementCount * (pd + 1), newValues, matrix, color, table_values); scalar_t* swap = newValues; newValues = table_values; table_values = swap; } cudaFree(newValues); } slice<scalar_t, pd, vd><<<blockCount, blockSize>>>(elementCount, values, matrix, table_values); destroyHashTable<scalar_t>(); cudaFree(table_values); } #define DECLARATION(dc, fc) \ template void PermutohedralCuda<float, dc, fc>(float* values, float* positions, int elementCount, bool accurate); \ template void PermutohedralCuda<double, dc, fc>(double* values, double* positions, int elementCount, bool accurate); DO_FOR_AB(DECLARATION, 16, 19)
d4d039fdba95f3b61c7b0775a21192c1e43b3094.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <ATen/ATen.h> #include <ATen/TensorUtils.h> #include <ATen/Utils.h> #include <ATen/hip/HIPContext.h> #include <ATen/hip/HIPApplyUtils.cuh> #include <THH/THHAtomics.cuh> #include <cmath> using namespace at; // temporal fix for pytorch<=0.4.1 (see #9848) #define CUDA_1D_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \ i += blockDim.x * gridDim.x) #define THREADS_PER_BLOCK 1024 // 32 * 32 #define WARP_SIZE 32 #define THREADS_PER_PIXEL 32 #define MAX_SHARED_MEMORY 49152 #define MAX_SHARED_SCALAR_T 6144 // 49152 / 8 = 6144 #define kTileDim 32 #define kBlockRows 8 #define FORWARD_WARP_SIZE 16 #define FORWARD_THREADS_PER_PIXEL 64 #define FULL_MASK 0xffffffff inline int divideUP(const int x, const int y) { return (((x) + (y)-1) / (y)); } __device__ inline int Loc2Index(const int n, const int c, const int h, const int w, const int channel_num, const int height, const int width) { int index = w + (h + (c + n * channel_num) * height) * width; return index; } /* TODO: move this to a common place */ template <typename scalar_t> __device__ inline scalar_t min(scalar_t a, scalar_t b) { return a < b ? a : b; } template <typename scalar_t> __device__ inline scalar_t max(scalar_t a, scalar_t b) { return a > b ? a : b; } template <typename scalar_t> __device__ __forceinline__ scalar_t warpReduceSum(scalar_t val) { for (int offset = 16; offset > 0; offset /= 2) val += __shfl_down_sync(FULL_MASK, val, offset); return val; } // Splits the original matrix into submatrices with size 32 * 32. // Each block transposes one submatrix by loading it into shared memory. // Reference https://devblogs.nvidia.com/efficient-matrix-transpose-cuda-cc/ template <typename scalar_t> __global__ void BatchTranspose2DCUDAKernel(const int N, const int H, const int W, const int dh, const int dw, const scalar_t *__restrict__ X, scalar_t *__restrict__ Y) { __shared__ scalar_t tile[kTileDim][kTileDim + 1]; const int n = blockIdx.x / (dh * dw); const int k = blockIdx.x % (dh * dw); const int r = k / dw; const int c = k % dw; const int offset = n * H * W; int x = c * kTileDim + threadIdx.x; int y = r * kTileDim + threadIdx.y; int i; if (x < W) { for (i = 0; threadIdx.y + i < kTileDim && y + i < H; i += kBlockRows) { tile[threadIdx.y + i][threadIdx.x] = X[offset + (y + i) * W + x]; } } __syncthreads(); x = r * kTileDim + threadIdx.x; y = c * kTileDim + threadIdx.y; if (x < H) { for (i = 0; threadIdx.y + i < kTileDim && y + i < W; i += kBlockRows) { Y[offset + (y + i) * H + x] = tile[threadIdx.x][threadIdx.y + i]; } } } template <typename scalar_t> __global__ void DDFForward(const int num_kernels, const scalar_t *__restrict__ bottom_data, const scalar_t *__restrict__ bottom_channel_filter, const scalar_t *__restrict__ bottom_spatial_filter, const int kernel_size, const int dilation, const int stride, const int channels, const int bottom_height, const int bottom_width, const int top_height, const int top_width, scalar_t *__restrict__ top_data) { __shared__ scalar_t shared_spatial_filter[MAX_SHARED_SCALAR_T]; bool valid_index = false; int index = threadIdx.x + blockIdx.y * blockDim.x; if (index > num_kernels - 1){ return; } const int pixel_id = threadIdx.x / FORWARD_THREADS_PER_PIXEL; // pixel in block from 0 to 15 const int split_id = threadIdx.x % FORWARD_THREADS_PER_PIXEL; // thread in pixel from 0 to 63 // (n, c, ph, pw) is an element in the bottom_data index = index / FORWARD_THREADS_PER_PIXEL; const int pw = index % top_width; const int ph = index / top_width; const int n = blockIdx.x; const int start_w = pw * stride - ((kernel_size - 1) / 2)*dilation; const int end_w = pw * stride + ((kernel_size - 1) / 2)*dilation + 1; const int start_h = ph * stride - ((kernel_size - 1) / 2)*dilation; const int end_h = ph * stride + ((kernel_size - 1) / 2)*dilation + 1; scalar_t output_val = 0; scalar_t lost = 0; scalar_t t = 0; scalar_t input = 0; int c, spatial_filter_id, channel_filter_id, iy, ix, kernel_iy, kernel_ix, filter_c, bottom_id, top_id; for (c = split_id; c < kernel_size*kernel_size; c += FORWARD_THREADS_PER_PIXEL) { spatial_filter_id = Loc2Index(n, c, ph, pw, kernel_size * kernel_size, top_height, top_width); shared_spatial_filter[c * FORWARD_WARP_SIZE + pixel_id] = bottom_spatial_filter[spatial_filter_id]; } __syncthreads(); #pragma unroll for (c = split_id; c < channels; c += FORWARD_THREADS_PER_PIXEL) { output_val = 0; lost = 0; t = 0; input = 0; #pragma unroll for (iy = start_h; iy < end_h; iy+=dilation) { #pragma unroll for (ix = start_w; ix < end_w; ix+=dilation) { if (iy < 0 || iy > bottom_height - 1 || ix < 0 || ix > bottom_width - 1) { continue; } kernel_iy = (iy - start_h) / dilation; kernel_ix = (ix - start_w) / dilation; filter_c = kernel_iy * kernel_size + kernel_ix; bottom_id = Loc2Index(n, c, iy, ix, channels, bottom_height, bottom_width); spatial_filter_id = Loc2Index(n, filter_c, ph, pw, kernel_size * kernel_size, top_height, top_width); channel_filter_id = (n * channels + c ) * kernel_size * kernel_size + filter_c; // Kahan and Babuska summation, Neumaier variant input = bottom_data[bottom_id] * shared_spatial_filter[filter_c * FORWARD_WARP_SIZE + pixel_id] * bottom_channel_filter[channel_filter_id]; t = output_val + input; lost += fabs(output_val) >= fabs(input) ? (output_val - t) + input : (input - t) + output_val; output_val = t; } } top_id = Loc2Index(n, c, ph, pw, channels, top_height, top_width); // Kahan and Babuska summation, Neumaier variant top_data[top_id] = output_val + lost; } } int DDFMulForwardLauncher(const at::Tensor features, const at::Tensor channel_filter, const at::Tensor spatial_filter, const int kernel_size, const int dilation, const int stride, const int batch_size,const int channels, const int bottom_height, const int bottom_width, const int top_height, const int top_width, at::Tensor output){ hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); AT_DISPATCH_FLOATING_TYPES_AND_HALF( features.type(), "DDFForward", ([&] { const int num_kernels = top_height * top_width * FORWARD_THREADS_PER_PIXEL; dim3 grid(batch_size, at::cuda::ATenCeilDiv(num_kernels, THREADS_PER_BLOCK)); const scalar_t *bottom_data = features.data<scalar_t>(); const scalar_t *bottom_channel_filter = channel_filter.data<scalar_t>(); const scalar_t *bottom_spatial_filter = spatial_filter.data<scalar_t>(); scalar_t *top_data = output.data<scalar_t>(); hipLaunchKernelGGL(( DDFForward<scalar_t>) , dim3(grid), dim3(THREADS_PER_BLOCK), 0, stream, num_kernels, bottom_data, bottom_channel_filter, bottom_spatial_filter, kernel_size, dilation, stride, channels, bottom_height, bottom_width, top_height, top_width, top_data); })); hipError_t err = hipGetLastError(); if (hipSuccess != err) { fprintf(stderr, "cudaCheckError() failed : %s\n", hipGetErrorString(err)); exit(-1); } return 1; } template <typename scalar_t> __global__ void DDFBackward_Feature(const int num_kernels, const scalar_t *__restrict__ top_diff, const scalar_t *__restrict__ bottom_spatial_filter, const scalar_t *__restrict__ bottom_channel_filter, const int kernel_size, const int dilation, const int stride, const int channels, const int top_height, const int top_width, const int bottom_height, const int bottom_width, scalar_t *__restrict__ bottom_diff){ __shared__ scalar_t shared_spatial_filter[MAX_SHARED_SCALAR_T]; int index = threadIdx.x + blockIdx.x * blockDim.x; if (index > num_kernels - 1) { return; } const int pixel_id = threadIdx.x / THREADS_PER_PIXEL; const int split_id = threadIdx.x % THREADS_PER_PIXEL; // (n, c, ph, pw) is an element in the bottom_data index = index / THREADS_PER_PIXEL; const int pw = index % bottom_width; const int ph = (index / bottom_width) % bottom_height; const int n = index / bottom_width / bottom_height; const int start_w = pw - ((kernel_size - 1) / 2)*dilation; const int end_w = pw + ((kernel_size - 1) / 2)*dilation + 1; const int start_h = ph - ((kernel_size - 1) / 2)*dilation; const int end_h = ph + ((kernel_size - 1) / 2)*dilation + 1; for (int c = split_id; c < kernel_size * kernel_size; c += THREADS_PER_PIXEL) { const int kernel_ix = c % kernel_size ; const int kernel_iy = c / kernel_size; const int ix = start_w + kernel_ix * dilation; const int iy = start_h + kernel_iy * dilation; if (ix % stride !=0 || iy % stride !=0 || iy/stride < 0 || iy/stride > top_height - 1 || ix/stride < 0 || ix/stride > top_width - 1){ shared_spatial_filter[c * WARP_SIZE + pixel_id] = 0; continue; }; const int spatial_filter_c = kernel_size * kernel_size - c - 1; int spatial_filter_id = Loc2Index(n, spatial_filter_c, iy/stride, ix/stride, kernel_size * kernel_size, top_height, top_width); shared_spatial_filter[c * WARP_SIZE + pixel_id] = bottom_spatial_filter[spatial_filter_id]; } __syncthreads(); scalar_t output_val = 0; scalar_t lost = 0; scalar_t t = 0; scalar_t input = 0; int bottom_iy, bottom_ix, iy, ix, kernel_iy, kernel_ix, spatial_filter_c, channel_filter_id, top_id, bottom_id; #pragma unroll for (int c = split_id; c < channels; c += THREADS_PER_PIXEL){ output_val = 0; lost = 0; t = 0; input = 0; #pragma unroll for (bottom_iy = start_h; bottom_iy < end_h; bottom_iy+=dilation){ #pragma unroll for (bottom_ix = start_w; bottom_ix < end_w; bottom_ix+=dilation){ if (bottom_iy % stride != 0 || bottom_ix % stride != 0){ continue; } iy = bottom_iy / stride; ix = bottom_ix / stride; if (iy < 0 || iy > top_height - 1 || ix < 0 || ix > top_width - 1){ continue; } kernel_iy = (bottom_iy - start_h) / dilation; kernel_ix = (bottom_ix - start_w) / dilation; spatial_filter_c = kernel_iy * kernel_size + kernel_ix; channel_filter_id = Loc2Index(n, c, kernel_size - kernel_iy - 1, kernel_size - kernel_ix - 1, channels, kernel_size, kernel_size); top_id = Loc2Index(n, iy, ix, c, top_height, top_width, channels); input = shared_spatial_filter[spatial_filter_c * WARP_SIZE + pixel_id] * top_diff[top_id] * bottom_channel_filter[channel_filter_id]; t = output_val + input; lost += fabs(output_val) >= fabs(input) ? (output_val - t) + input : (input - t) + output_val; output_val = t; } } bottom_id = Loc2Index(n, ph, pw, c, bottom_height, bottom_width, channels); bottom_diff[bottom_id] = output_val + lost; } } template <typename scalar_t> __global__ void DDFBackward_Spatial_Filter(const int num_kernels, const scalar_t *__restrict__ top_diff, const scalar_t *__restrict__ bottom_data, const scalar_t *__restrict__ bottom_channel_filter, const int kernel_size, const int dilation, const int stride,const int channels, const int top_height, const int top_width, const int bottom_height, const int bottom_width, scalar_t *__restrict__ spatial_filter_diff) { int index = threadIdx.x + blockIdx.x * blockDim.x; if (index > num_kernels - 1) { return; } const int spatial_filter_channels = kernel_size * kernel_size; const int lane_id = index % WARP_SIZE; index = index / WARP_SIZE; const int spatial_filter_c = index % spatial_filter_channels; // (n, c, ph, pw) is an element in the bottom_data index = index / spatial_filter_channels; const int pw = index % top_width; const int ph = (index / top_width) % top_height; const int n = index / top_width / top_height; const int kernel_ix = spatial_filter_c % kernel_size; const int kernel_iy = spatial_filter_c / kernel_size; const int offset_ix = (kernel_ix - (kernel_size - 1) / 2) * dilation; const int offset_iy = (kernel_iy - (kernel_size - 1) / 2) * dilation; const int ix = pw * stride + offset_ix; const int iy = ph * stride + offset_iy; scalar_t output_val = 0; scalar_t lost = 0; scalar_t t = 0; scalar_t input = 0; int c, bottom_id, top_id, channel_filter_id; if (iy >= 0 && iy <= bottom_height - 1 && ix >= 0 && ix <= bottom_width - 1) { for (c = lane_id; c < channels; c += WARP_SIZE) { bottom_id = Loc2Index(n, c, iy, ix, channels, bottom_height, bottom_width); top_id = Loc2Index(n, ph, pw, c, top_height, top_width, channels); channel_filter_id = Loc2Index(n, c, kernel_iy, kernel_ix, channels, kernel_size, kernel_size); input = top_diff[top_id] * bottom_data[bottom_id] * bottom_channel_filter[channel_filter_id]; t = output_val + input; lost += fabs(output_val) >= fabs(input) ? (output_val - t) + input : (input - t) + output_val; output_val = t; } } __syncwarp(); output_val = warpReduceSum(output_val); lost = warpReduceSum(lost); if (lane_id == 0) { const int spatial_filter_id = Loc2Index(n, ph, pw, spatial_filter_c, top_height, top_width, spatial_filter_channels); spatial_filter_diff[spatial_filter_id] = output_val + lost; } } template <typename scalar_t> __global__ void DDFBackward_Channel_Filter(const int num_kernels, const scalar_t *__restrict__ top_diff, const scalar_t *__restrict__ bottom_data, const scalar_t *__restrict__ bottom_spatial_filter, const int kernel_size, const int dilation, const int stride, const int channels, const int top_height, const int top_width, const int bottom_height, const int bottom_width, scalar_t *__restrict__ channel_filter_diff){ int index = threadIdx.x + blockIdx.x * blockDim.x; if (index > num_kernels - 1) { return; } const int lane_id = index % WARP_SIZE; index = index / WARP_SIZE; const int kernel_ix = index % kernel_size; const int kernel_iy = (index / kernel_size) % kernel_size; const int c = (index / kernel_size / kernel_size ) % channels; const int n = index / kernel_size / kernel_size / channels; const int spatial_filter_c = kernel_iy * kernel_size + kernel_ix; const int offset_ix = (kernel_ix - (kernel_size - 1) / 2) * dilation; const int offset_iy = (kernel_iy - (kernel_size - 1) / 2) * dilation; scalar_t output_val = 0; scalar_t lost = 0; scalar_t t = 0; scalar_t input = 0; int iy, ix, bottom_iy, bottom_ix, top_id, spatial_filter_id, bottom_id; #pragma unroll for (index = lane_id; index < top_height*top_width; index+=WARP_SIZE){ iy = index / top_width; ix = index % top_width; bottom_iy = iy * stride; bottom_ix = ix * stride; if (bottom_iy + offset_iy < 0 || bottom_iy + offset_iy > bottom_height - 1 || bottom_ix + offset_ix < 0 || bottom_ix + offset_ix > bottom_width - 1){ continue; } top_id = Loc2Index(n, c, iy, ix, channels, top_height, top_width); spatial_filter_id = Loc2Index(n, spatial_filter_c, iy, ix, kernel_size * kernel_size, top_height, top_width); bottom_id = Loc2Index(n, c, bottom_iy + offset_iy, bottom_ix + offset_ix, channels, bottom_height, bottom_width); input = top_diff[top_id] * bottom_data[bottom_id] * bottom_spatial_filter[spatial_filter_id]; t = output_val + input; lost += fabs(output_val) >= fabs(input) ? (output_val - t) + input : (input - t) + output_val; output_val = t; } __syncwarp(); output_val = warpReduceSum(output_val); lost = warpReduceSum(lost); if (lane_id == 0) { const int channel_filter_id = Loc2Index(n, c, kernel_iy, kernel_ix, channels, kernel_size, kernel_size); channel_filter_diff[channel_filter_id] = output_val + lost; } } int DDFMulBackwardLauncher(const at::Tensor top_grad, const at::Tensor features, const at::Tensor channel_filter, const at::Tensor spatial_filter, const int kernel_size, const int dilation, const int stride, const int batch_size, const int channels, const int top_height, const int top_width, const int bottom_height, const int bottom_width, at::Tensor rtop_grad, at::Tensor rbottom_grad, at::Tensor rspatial_filter_grad, at::Tensor bottom_grad, at::Tensor channel_filter_grad, at::Tensor spatial_filter_grad){ hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); AT_DISPATCH_FLOATING_TYPES_AND_HALF( top_grad.type(), "NCHW2NHWC_Top_Grad", ([&] { const scalar_t *bottom_data = top_grad.data<scalar_t>(); scalar_t *top_data = rtop_grad.data<scalar_t>(); const int dh = divideUP(channels, kTileDim); const int dw = divideUP(top_height * top_width, kTileDim); hipLaunchKernelGGL(( BatchTranspose2DCUDAKernel<scalar_t>) , dim3(batch_size * dh * dw), dim3(dim3(kTileDim, kBlockRows)), 0, stream, batch_size, channels, top_height * top_width, dh, dw, bottom_data, top_data); })); AT_DISPATCH_FLOATING_TYPES_AND_HALF( top_grad.type(), "DDFBackward_Feature", ([&] { const int num_kernels = batch_size * bottom_height * bottom_width * THREADS_PER_PIXEL; const scalar_t *top_diff = rtop_grad.data<scalar_t>(); const scalar_t *bottom_spatial_filter = spatial_filter.data<scalar_t>(); const scalar_t *bottom_channel_filter = channel_filter.data<scalar_t>(); scalar_t *bottom_diff = rbottom_grad.data<scalar_t>(); hipLaunchKernelGGL(( DDFBackward_Feature<scalar_t>) , dim3(at::cuda::ATenCeilDiv(num_kernels, THREADS_PER_BLOCK)), dim3(THREADS_PER_BLOCK), 0, stream, num_kernels, top_diff, bottom_spatial_filter, bottom_channel_filter, kernel_size, dilation, stride, channels, top_height, top_width, bottom_height, bottom_width, bottom_diff); })); AT_DISPATCH_FLOATING_TYPES_AND_HALF( top_grad.type(), "NHWC2NCHW_Bottom_Grad", ([&] { const scalar_t *bottom_data = rbottom_grad.data<scalar_t>(); scalar_t *top_data = bottom_grad.data<scalar_t>(); const int dh = divideUP(bottom_height * bottom_width, kTileDim); const int dw = divideUP(channels, kTileDim); hipLaunchKernelGGL(( BatchTranspose2DCUDAKernel<scalar_t>) , dim3(batch_size * dh * dw), dim3(dim3(kTileDim, kBlockRows)), 0, stream, batch_size, bottom_height * bottom_width, channels, dh, dw, bottom_data, top_data); })); AT_DISPATCH_FLOATING_TYPES( top_grad.type(), "DDFBackward_Spatial_Filter", ([&] { const int num_kernels = batch_size * top_height * top_width * kernel_size * kernel_size * WARP_SIZE; const scalar_t *top_diff = rtop_grad.data<scalar_t>(); const scalar_t *bottom_data = features.data<scalar_t>(); const scalar_t *bottom_channel_filter = channel_filter.data<scalar_t>(); scalar_t *spatial_filter_diff = rspatial_filter_grad.data<scalar_t>(); hipLaunchKernelGGL(( DDFBackward_Spatial_Filter<scalar_t>) , dim3(at::cuda::ATenCeilDiv(num_kernels, THREADS_PER_BLOCK)), dim3(THREADS_PER_BLOCK), 0, stream, num_kernels, top_diff, bottom_data, bottom_channel_filter, kernel_size, dilation, stride, channels, top_height, top_width, bottom_height, bottom_width, spatial_filter_diff); })); AT_DISPATCH_FLOATING_TYPES_AND_HALF( top_grad.type(), "NHWC2NCHW_Spatial_Filter", ([&] { const scalar_t *bottom_data = rspatial_filter_grad.data<scalar_t>(); scalar_t *top_data = spatial_filter_grad.data<scalar_t>(); const int dh = divideUP(top_height * top_width, kTileDim); const int dw = divideUP(kernel_size * kernel_size, kTileDim); hipLaunchKernelGGL(( BatchTranspose2DCUDAKernel<scalar_t>) , dim3(batch_size * dh * dw), dim3(dim3(kTileDim, kBlockRows)), 0, stream, batch_size, top_height * top_width, kernel_size * kernel_size, dh, dw, bottom_data, top_data); })); AT_DISPATCH_FLOATING_TYPES( top_grad.type(), "DDFBackward_Channel_Filter", ([&] { const int num_kernels = batch_size * channels * kernel_size * kernel_size * WARP_SIZE; const scalar_t *top_diff = top_grad.data<scalar_t>(); const scalar_t *bottom_data = features.data<scalar_t>(); const scalar_t *bottom_spatial_filter = spatial_filter.data<scalar_t>(); scalar_t *channel_filter_diff = channel_filter_grad.data<scalar_t>(); hipLaunchKernelGGL(( DDFBackward_Channel_Filter<scalar_t>) , dim3(at::cuda::ATenCeilDiv(num_kernels, THREADS_PER_BLOCK)), dim3(THREADS_PER_BLOCK), 0, stream, num_kernels, top_diff, bottom_data, bottom_spatial_filter, kernel_size, dilation, stride, channels, top_height, top_width, bottom_height, bottom_width, channel_filter_diff); })); hipError_t err = hipGetLastError(); if (hipSuccess != err) { fprintf(stderr, "cudaCheckError() failed : %s\n", hipGetErrorString(err)); exit(-1); } return 1; }
d4d039fdba95f3b61c7b0775a21192c1e43b3094.cu
#include <ATen/ATen.h> #include <ATen/TensorUtils.h> #include <ATen/Utils.h> #include <ATen/cuda/CUDAContext.h> #include <ATen/cuda/CUDAApplyUtils.cuh> #include <THC/THCAtomics.cuh> #include <cmath> using namespace at; // temporal fix for pytorch<=0.4.1 (see #9848) #define CUDA_1D_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \ i += blockDim.x * gridDim.x) #define THREADS_PER_BLOCK 1024 // 32 * 32 #define WARP_SIZE 32 #define THREADS_PER_PIXEL 32 #define MAX_SHARED_MEMORY 49152 #define MAX_SHARED_SCALAR_T 6144 // 49152 / 8 = 6144 #define kTileDim 32 #define kBlockRows 8 #define FORWARD_WARP_SIZE 16 #define FORWARD_THREADS_PER_PIXEL 64 #define FULL_MASK 0xffffffff inline int divideUP(const int x, const int y) { return (((x) + (y)-1) / (y)); } __device__ inline int Loc2Index(const int n, const int c, const int h, const int w, const int channel_num, const int height, const int width) { int index = w + (h + (c + n * channel_num) * height) * width; return index; } /* TODO: move this to a common place */ template <typename scalar_t> __device__ inline scalar_t min(scalar_t a, scalar_t b) { return a < b ? a : b; } template <typename scalar_t> __device__ inline scalar_t max(scalar_t a, scalar_t b) { return a > b ? a : b; } template <typename scalar_t> __device__ __forceinline__ scalar_t warpReduceSum(scalar_t val) { for (int offset = 16; offset > 0; offset /= 2) val += __shfl_down_sync(FULL_MASK, val, offset); return val; } // Splits the original matrix into submatrices with size 32 * 32. // Each block transposes one submatrix by loading it into shared memory. // Reference https://devblogs.nvidia.com/efficient-matrix-transpose-cuda-cc/ template <typename scalar_t> __global__ void BatchTranspose2DCUDAKernel(const int N, const int H, const int W, const int dh, const int dw, const scalar_t *__restrict__ X, scalar_t *__restrict__ Y) { __shared__ scalar_t tile[kTileDim][kTileDim + 1]; const int n = blockIdx.x / (dh * dw); const int k = blockIdx.x % (dh * dw); const int r = k / dw; const int c = k % dw; const int offset = n * H * W; int x = c * kTileDim + threadIdx.x; int y = r * kTileDim + threadIdx.y; int i; if (x < W) { for (i = 0; threadIdx.y + i < kTileDim && y + i < H; i += kBlockRows) { tile[threadIdx.y + i][threadIdx.x] = X[offset + (y + i) * W + x]; } } __syncthreads(); x = r * kTileDim + threadIdx.x; y = c * kTileDim + threadIdx.y; if (x < H) { for (i = 0; threadIdx.y + i < kTileDim && y + i < W; i += kBlockRows) { Y[offset + (y + i) * H + x] = tile[threadIdx.x][threadIdx.y + i]; } } } template <typename scalar_t> __global__ void DDFForward(const int num_kernels, const scalar_t *__restrict__ bottom_data, const scalar_t *__restrict__ bottom_channel_filter, const scalar_t *__restrict__ bottom_spatial_filter, const int kernel_size, const int dilation, const int stride, const int channels, const int bottom_height, const int bottom_width, const int top_height, const int top_width, scalar_t *__restrict__ top_data) { __shared__ scalar_t shared_spatial_filter[MAX_SHARED_SCALAR_T]; bool valid_index = false; int index = threadIdx.x + blockIdx.y * blockDim.x; if (index > num_kernels - 1){ return; } const int pixel_id = threadIdx.x / FORWARD_THREADS_PER_PIXEL; // pixel in block from 0 to 15 const int split_id = threadIdx.x % FORWARD_THREADS_PER_PIXEL; // thread in pixel from 0 to 63 // (n, c, ph, pw) is an element in the bottom_data index = index / FORWARD_THREADS_PER_PIXEL; const int pw = index % top_width; const int ph = index / top_width; const int n = blockIdx.x; const int start_w = pw * stride - ((kernel_size - 1) / 2)*dilation; const int end_w = pw * stride + ((kernel_size - 1) / 2)*dilation + 1; const int start_h = ph * stride - ((kernel_size - 1) / 2)*dilation; const int end_h = ph * stride + ((kernel_size - 1) / 2)*dilation + 1; scalar_t output_val = 0; scalar_t lost = 0; scalar_t t = 0; scalar_t input = 0; int c, spatial_filter_id, channel_filter_id, iy, ix, kernel_iy, kernel_ix, filter_c, bottom_id, top_id; for (c = split_id; c < kernel_size*kernel_size; c += FORWARD_THREADS_PER_PIXEL) { spatial_filter_id = Loc2Index(n, c, ph, pw, kernel_size * kernel_size, top_height, top_width); shared_spatial_filter[c * FORWARD_WARP_SIZE + pixel_id] = bottom_spatial_filter[spatial_filter_id]; } __syncthreads(); #pragma unroll for (c = split_id; c < channels; c += FORWARD_THREADS_PER_PIXEL) { output_val = 0; lost = 0; t = 0; input = 0; #pragma unroll for (iy = start_h; iy < end_h; iy+=dilation) { #pragma unroll for (ix = start_w; ix < end_w; ix+=dilation) { if (iy < 0 || iy > bottom_height - 1 || ix < 0 || ix > bottom_width - 1) { continue; } kernel_iy = (iy - start_h) / dilation; kernel_ix = (ix - start_w) / dilation; filter_c = kernel_iy * kernel_size + kernel_ix; bottom_id = Loc2Index(n, c, iy, ix, channels, bottom_height, bottom_width); spatial_filter_id = Loc2Index(n, filter_c, ph, pw, kernel_size * kernel_size, top_height, top_width); channel_filter_id = (n * channels + c ) * kernel_size * kernel_size + filter_c; // Kahan and Babuska summation, Neumaier variant input = bottom_data[bottom_id] * shared_spatial_filter[filter_c * FORWARD_WARP_SIZE + pixel_id] * bottom_channel_filter[channel_filter_id]; t = output_val + input; lost += fabs(output_val) >= fabs(input) ? (output_val - t) + input : (input - t) + output_val; output_val = t; } } top_id = Loc2Index(n, c, ph, pw, channels, top_height, top_width); // Kahan and Babuska summation, Neumaier variant top_data[top_id] = output_val + lost; } } int DDFMulForwardLauncher(const at::Tensor features, const at::Tensor channel_filter, const at::Tensor spatial_filter, const int kernel_size, const int dilation, const int stride, const int batch_size,const int channels, const int bottom_height, const int bottom_width, const int top_height, const int top_width, at::Tensor output){ cudaStream_t stream = at::cuda::getCurrentCUDAStream(); AT_DISPATCH_FLOATING_TYPES_AND_HALF( features.type(), "DDFForward", ([&] { const int num_kernels = top_height * top_width * FORWARD_THREADS_PER_PIXEL; dim3 grid(batch_size, at::cuda::ATenCeilDiv(num_kernels, THREADS_PER_BLOCK)); const scalar_t *bottom_data = features.data<scalar_t>(); const scalar_t *bottom_channel_filter = channel_filter.data<scalar_t>(); const scalar_t *bottom_spatial_filter = spatial_filter.data<scalar_t>(); scalar_t *top_data = output.data<scalar_t>(); DDFForward<scalar_t> <<<grid, THREADS_PER_BLOCK, 0, stream>>>( num_kernels, bottom_data, bottom_channel_filter, bottom_spatial_filter, kernel_size, dilation, stride, channels, bottom_height, bottom_width, top_height, top_width, top_data); })); cudaError_t err = cudaGetLastError(); if (cudaSuccess != err) { fprintf(stderr, "cudaCheckError() failed : %s\n", cudaGetErrorString(err)); exit(-1); } return 1; } template <typename scalar_t> __global__ void DDFBackward_Feature(const int num_kernels, const scalar_t *__restrict__ top_diff, const scalar_t *__restrict__ bottom_spatial_filter, const scalar_t *__restrict__ bottom_channel_filter, const int kernel_size, const int dilation, const int stride, const int channels, const int top_height, const int top_width, const int bottom_height, const int bottom_width, scalar_t *__restrict__ bottom_diff){ __shared__ scalar_t shared_spatial_filter[MAX_SHARED_SCALAR_T]; int index = threadIdx.x + blockIdx.x * blockDim.x; if (index > num_kernels - 1) { return; } const int pixel_id = threadIdx.x / THREADS_PER_PIXEL; const int split_id = threadIdx.x % THREADS_PER_PIXEL; // (n, c, ph, pw) is an element in the bottom_data index = index / THREADS_PER_PIXEL; const int pw = index % bottom_width; const int ph = (index / bottom_width) % bottom_height; const int n = index / bottom_width / bottom_height; const int start_w = pw - ((kernel_size - 1) / 2)*dilation; const int end_w = pw + ((kernel_size - 1) / 2)*dilation + 1; const int start_h = ph - ((kernel_size - 1) / 2)*dilation; const int end_h = ph + ((kernel_size - 1) / 2)*dilation + 1; for (int c = split_id; c < kernel_size * kernel_size; c += THREADS_PER_PIXEL) { const int kernel_ix = c % kernel_size ; const int kernel_iy = c / kernel_size; const int ix = start_w + kernel_ix * dilation; const int iy = start_h + kernel_iy * dilation; if (ix % stride !=0 || iy % stride !=0 || iy/stride < 0 || iy/stride > top_height - 1 || ix/stride < 0 || ix/stride > top_width - 1){ shared_spatial_filter[c * WARP_SIZE + pixel_id] = 0; continue; }; const int spatial_filter_c = kernel_size * kernel_size - c - 1; int spatial_filter_id = Loc2Index(n, spatial_filter_c, iy/stride, ix/stride, kernel_size * kernel_size, top_height, top_width); shared_spatial_filter[c * WARP_SIZE + pixel_id] = bottom_spatial_filter[spatial_filter_id]; } __syncthreads(); scalar_t output_val = 0; scalar_t lost = 0; scalar_t t = 0; scalar_t input = 0; int bottom_iy, bottom_ix, iy, ix, kernel_iy, kernel_ix, spatial_filter_c, channel_filter_id, top_id, bottom_id; #pragma unroll for (int c = split_id; c < channels; c += THREADS_PER_PIXEL){ output_val = 0; lost = 0; t = 0; input = 0; #pragma unroll for (bottom_iy = start_h; bottom_iy < end_h; bottom_iy+=dilation){ #pragma unroll for (bottom_ix = start_w; bottom_ix < end_w; bottom_ix+=dilation){ if (bottom_iy % stride != 0 || bottom_ix % stride != 0){ continue; } iy = bottom_iy / stride; ix = bottom_ix / stride; if (iy < 0 || iy > top_height - 1 || ix < 0 || ix > top_width - 1){ continue; } kernel_iy = (bottom_iy - start_h) / dilation; kernel_ix = (bottom_ix - start_w) / dilation; spatial_filter_c = kernel_iy * kernel_size + kernel_ix; channel_filter_id = Loc2Index(n, c, kernel_size - kernel_iy - 1, kernel_size - kernel_ix - 1, channels, kernel_size, kernel_size); top_id = Loc2Index(n, iy, ix, c, top_height, top_width, channels); input = shared_spatial_filter[spatial_filter_c * WARP_SIZE + pixel_id] * top_diff[top_id] * bottom_channel_filter[channel_filter_id]; t = output_val + input; lost += fabs(output_val) >= fabs(input) ? (output_val - t) + input : (input - t) + output_val; output_val = t; } } bottom_id = Loc2Index(n, ph, pw, c, bottom_height, bottom_width, channels); bottom_diff[bottom_id] = output_val + lost; } } template <typename scalar_t> __global__ void DDFBackward_Spatial_Filter(const int num_kernels, const scalar_t *__restrict__ top_diff, const scalar_t *__restrict__ bottom_data, const scalar_t *__restrict__ bottom_channel_filter, const int kernel_size, const int dilation, const int stride,const int channels, const int top_height, const int top_width, const int bottom_height, const int bottom_width, scalar_t *__restrict__ spatial_filter_diff) { int index = threadIdx.x + blockIdx.x * blockDim.x; if (index > num_kernels - 1) { return; } const int spatial_filter_channels = kernel_size * kernel_size; const int lane_id = index % WARP_SIZE; index = index / WARP_SIZE; const int spatial_filter_c = index % spatial_filter_channels; // (n, c, ph, pw) is an element in the bottom_data index = index / spatial_filter_channels; const int pw = index % top_width; const int ph = (index / top_width) % top_height; const int n = index / top_width / top_height; const int kernel_ix = spatial_filter_c % kernel_size; const int kernel_iy = spatial_filter_c / kernel_size; const int offset_ix = (kernel_ix - (kernel_size - 1) / 2) * dilation; const int offset_iy = (kernel_iy - (kernel_size - 1) / 2) * dilation; const int ix = pw * stride + offset_ix; const int iy = ph * stride + offset_iy; scalar_t output_val = 0; scalar_t lost = 0; scalar_t t = 0; scalar_t input = 0; int c, bottom_id, top_id, channel_filter_id; if (iy >= 0 && iy <= bottom_height - 1 && ix >= 0 && ix <= bottom_width - 1) { for (c = lane_id; c < channels; c += WARP_SIZE) { bottom_id = Loc2Index(n, c, iy, ix, channels, bottom_height, bottom_width); top_id = Loc2Index(n, ph, pw, c, top_height, top_width, channels); channel_filter_id = Loc2Index(n, c, kernel_iy, kernel_ix, channels, kernel_size, kernel_size); input = top_diff[top_id] * bottom_data[bottom_id] * bottom_channel_filter[channel_filter_id]; t = output_val + input; lost += fabs(output_val) >= fabs(input) ? (output_val - t) + input : (input - t) + output_val; output_val = t; } } __syncwarp(); output_val = warpReduceSum(output_val); lost = warpReduceSum(lost); if (lane_id == 0) { const int spatial_filter_id = Loc2Index(n, ph, pw, spatial_filter_c, top_height, top_width, spatial_filter_channels); spatial_filter_diff[spatial_filter_id] = output_val + lost; } } template <typename scalar_t> __global__ void DDFBackward_Channel_Filter(const int num_kernels, const scalar_t *__restrict__ top_diff, const scalar_t *__restrict__ bottom_data, const scalar_t *__restrict__ bottom_spatial_filter, const int kernel_size, const int dilation, const int stride, const int channels, const int top_height, const int top_width, const int bottom_height, const int bottom_width, scalar_t *__restrict__ channel_filter_diff){ int index = threadIdx.x + blockIdx.x * blockDim.x; if (index > num_kernels - 1) { return; } const int lane_id = index % WARP_SIZE; index = index / WARP_SIZE; const int kernel_ix = index % kernel_size; const int kernel_iy = (index / kernel_size) % kernel_size; const int c = (index / kernel_size / kernel_size ) % channels; const int n = index / kernel_size / kernel_size / channels; const int spatial_filter_c = kernel_iy * kernel_size + kernel_ix; const int offset_ix = (kernel_ix - (kernel_size - 1) / 2) * dilation; const int offset_iy = (kernel_iy - (kernel_size - 1) / 2) * dilation; scalar_t output_val = 0; scalar_t lost = 0; scalar_t t = 0; scalar_t input = 0; int iy, ix, bottom_iy, bottom_ix, top_id, spatial_filter_id, bottom_id; #pragma unroll for (index = lane_id; index < top_height*top_width; index+=WARP_SIZE){ iy = index / top_width; ix = index % top_width; bottom_iy = iy * stride; bottom_ix = ix * stride; if (bottom_iy + offset_iy < 0 || bottom_iy + offset_iy > bottom_height - 1 || bottom_ix + offset_ix < 0 || bottom_ix + offset_ix > bottom_width - 1){ continue; } top_id = Loc2Index(n, c, iy, ix, channels, top_height, top_width); spatial_filter_id = Loc2Index(n, spatial_filter_c, iy, ix, kernel_size * kernel_size, top_height, top_width); bottom_id = Loc2Index(n, c, bottom_iy + offset_iy, bottom_ix + offset_ix, channels, bottom_height, bottom_width); input = top_diff[top_id] * bottom_data[bottom_id] * bottom_spatial_filter[spatial_filter_id]; t = output_val + input; lost += fabs(output_val) >= fabs(input) ? (output_val - t) + input : (input - t) + output_val; output_val = t; } __syncwarp(); output_val = warpReduceSum(output_val); lost = warpReduceSum(lost); if (lane_id == 0) { const int channel_filter_id = Loc2Index(n, c, kernel_iy, kernel_ix, channels, kernel_size, kernel_size); channel_filter_diff[channel_filter_id] = output_val + lost; } } int DDFMulBackwardLauncher(const at::Tensor top_grad, const at::Tensor features, const at::Tensor channel_filter, const at::Tensor spatial_filter, const int kernel_size, const int dilation, const int stride, const int batch_size, const int channels, const int top_height, const int top_width, const int bottom_height, const int bottom_width, at::Tensor rtop_grad, at::Tensor rbottom_grad, at::Tensor rspatial_filter_grad, at::Tensor bottom_grad, at::Tensor channel_filter_grad, at::Tensor spatial_filter_grad){ cudaStream_t stream = at::cuda::getCurrentCUDAStream(); AT_DISPATCH_FLOATING_TYPES_AND_HALF( top_grad.type(), "NCHW2NHWC_Top_Grad", ([&] { const scalar_t *bottom_data = top_grad.data<scalar_t>(); scalar_t *top_data = rtop_grad.data<scalar_t>(); const int dh = divideUP(channels, kTileDim); const int dw = divideUP(top_height * top_width, kTileDim); BatchTranspose2DCUDAKernel<scalar_t> <<<batch_size * dh * dw, dim3(kTileDim, kBlockRows), 0, stream>>>( batch_size, channels, top_height * top_width, dh, dw, bottom_data, top_data); })); AT_DISPATCH_FLOATING_TYPES_AND_HALF( top_grad.type(), "DDFBackward_Feature", ([&] { const int num_kernels = batch_size * bottom_height * bottom_width * THREADS_PER_PIXEL; const scalar_t *top_diff = rtop_grad.data<scalar_t>(); const scalar_t *bottom_spatial_filter = spatial_filter.data<scalar_t>(); const scalar_t *bottom_channel_filter = channel_filter.data<scalar_t>(); scalar_t *bottom_diff = rbottom_grad.data<scalar_t>(); DDFBackward_Feature<scalar_t> <<<at::cuda::ATenCeilDiv(num_kernels, THREADS_PER_BLOCK), THREADS_PER_BLOCK, 0, stream>>>( num_kernels, top_diff, bottom_spatial_filter, bottom_channel_filter, kernel_size, dilation, stride, channels, top_height, top_width, bottom_height, bottom_width, bottom_diff); })); AT_DISPATCH_FLOATING_TYPES_AND_HALF( top_grad.type(), "NHWC2NCHW_Bottom_Grad", ([&] { const scalar_t *bottom_data = rbottom_grad.data<scalar_t>(); scalar_t *top_data = bottom_grad.data<scalar_t>(); const int dh = divideUP(bottom_height * bottom_width, kTileDim); const int dw = divideUP(channels, kTileDim); BatchTranspose2DCUDAKernel<scalar_t> <<<batch_size * dh * dw, dim3(kTileDim, kBlockRows), 0, stream>>>( batch_size, bottom_height * bottom_width, channels, dh, dw, bottom_data, top_data); })); AT_DISPATCH_FLOATING_TYPES( top_grad.type(), "DDFBackward_Spatial_Filter", ([&] { const int num_kernels = batch_size * top_height * top_width * kernel_size * kernel_size * WARP_SIZE; const scalar_t *top_diff = rtop_grad.data<scalar_t>(); const scalar_t *bottom_data = features.data<scalar_t>(); const scalar_t *bottom_channel_filter = channel_filter.data<scalar_t>(); scalar_t *spatial_filter_diff = rspatial_filter_grad.data<scalar_t>(); DDFBackward_Spatial_Filter<scalar_t> <<<at::cuda::ATenCeilDiv(num_kernels, THREADS_PER_BLOCK), THREADS_PER_BLOCK, 0, stream>>>( num_kernels, top_diff, bottom_data, bottom_channel_filter, kernel_size, dilation, stride, channels, top_height, top_width, bottom_height, bottom_width, spatial_filter_diff); })); AT_DISPATCH_FLOATING_TYPES_AND_HALF( top_grad.type(), "NHWC2NCHW_Spatial_Filter", ([&] { const scalar_t *bottom_data = rspatial_filter_grad.data<scalar_t>(); scalar_t *top_data = spatial_filter_grad.data<scalar_t>(); const int dh = divideUP(top_height * top_width, kTileDim); const int dw = divideUP(kernel_size * kernel_size, kTileDim); BatchTranspose2DCUDAKernel<scalar_t> <<<batch_size * dh * dw, dim3(kTileDim, kBlockRows), 0, stream>>>( batch_size, top_height * top_width, kernel_size * kernel_size, dh, dw, bottom_data, top_data); })); AT_DISPATCH_FLOATING_TYPES( top_grad.type(), "DDFBackward_Channel_Filter", ([&] { const int num_kernels = batch_size * channels * kernel_size * kernel_size * WARP_SIZE; const scalar_t *top_diff = top_grad.data<scalar_t>(); const scalar_t *bottom_data = features.data<scalar_t>(); const scalar_t *bottom_spatial_filter = spatial_filter.data<scalar_t>(); scalar_t *channel_filter_diff = channel_filter_grad.data<scalar_t>(); DDFBackward_Channel_Filter<scalar_t> <<<at::cuda::ATenCeilDiv(num_kernels, THREADS_PER_BLOCK), THREADS_PER_BLOCK, 0, stream>>>( num_kernels, top_diff, bottom_data, bottom_spatial_filter, kernel_size, dilation, stride, channels, top_height, top_width, bottom_height, bottom_width, channel_filter_diff); })); cudaError_t err = cudaGetLastError(); if (cudaSuccess != err) { fprintf(stderr, "cudaCheckError() failed : %s\n", cudaGetErrorString(err)); exit(-1); } return 1; }
FindDublicates.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "FindDublicates.h" #include <vector> __global__ void findDublicatesNaive(unsigned int* candidates, unsigned int numberOfCandidates, unsigned int dim, bool* isAlreadyDeleted, bool* output){ unsigned int candidate = blockIdx.x*blockDim.x+threadIdx.x; unsigned int numberOfBlocks = ceilf((float)dim/32); if(candidate < numberOfCandidates && !isAlreadyDeleted[candidate]){ for(unsigned int i = candidate+1; i < numberOfCandidates; i++){ bool equal = true; for(unsigned int j = 0; j < numberOfBlocks; j++){ equal &= (candidates[candidate + numberOfCandidates*j] == candidates[i + numberOfCandidates*j]); } if(equal){ output[i] = true; } } } } __global__ void findDublicatesBreaking(unsigned int* candidates, unsigned int numberOfCandidates, unsigned int dim, bool* isAlreadyDeleted, bool* output){ unsigned int candidate = blockIdx.x*blockDim.x+threadIdx.x; unsigned int numberOfBlocks = ceilf((float)dim/32); if(candidate < numberOfCandidates && !isAlreadyDeleted[candidate]){ for(unsigned int i = candidate+1; i < numberOfCandidates; i++){ bool equal = true; for(unsigned int j = 0; j < numberOfBlocks; j++){ equal &= (candidates[candidate + numberOfCandidates*j] == candidates[i + numberOfCandidates*j]); } if(equal){ output[i] = true; break; } } } } __global__ void findDublicatesMoreBreaking(unsigned int* candidates, unsigned int numberOfCandidates, unsigned int dim, bool* isAlreadyDeleted, bool* output){ unsigned int candidate = blockIdx.x*blockDim.x+threadIdx.x; unsigned int numberOfBlocks = ceilf((float)dim/32); if(candidate < numberOfCandidates && !isAlreadyDeleted[candidate]){ for(unsigned int i = candidate+1; i < numberOfCandidates; i++){ bool equal = true; for(unsigned int j = 0; j < numberOfBlocks; j++){ equal &= (candidates[candidate + numberOfCandidates*j] == candidates[i + numberOfCandidates*j]); if(!equal){ break; } } if(equal){ output[i] = true; break; } } } } /** Thin Wrapper for findDublicates */ void findDublicatesWrapper(unsigned int dimGrid, unsigned int dimBlock, hipStream_t stream, unsigned int* candidates, unsigned int numberOfCandidates, unsigned int dim, bool* alreadyDeleted, bool* output, dublicatesType version ){ if(version == Naive){ hipLaunchKernelGGL(( findDublicatesNaive), dim3(dimGrid), dim3(dimBlock), 0, stream, candidates, numberOfCandidates, dim, alreadyDeleted, output); }else if(version == Breaking){ hipLaunchKernelGGL(( findDublicatesBreaking), dim3(dimGrid), dim3(dimBlock), 0, stream, candidates, numberOfCandidates, dim, alreadyDeleted, output); }else if(version == MoreBreaking){ hipLaunchKernelGGL(( findDublicatesMoreBreaking), dim3(dimGrid), dim3(dimBlock), 0, stream, candidates, numberOfCandidates, dim, alreadyDeleted, output); }else if(version == Hash){ findDublicatesHashTableWrapper(dimGrid, dimBlock, stream, candidates, numberOfCandidates, dim, alreadyDeleted, output); } }; void findDublicatesWrapper_mananged(unsigned int dimGrid, unsigned int dimBlock, hipStream_t stream, unsigned int* candidates, unsigned int numberOfCandidates, unsigned int dim, bool* alreadyDeleted, bool* output, dublicatesType version ){ if(version == Naive){ hipLaunchKernelGGL(( findDublicatesNaive), dim3(dimGrid), dim3(dimBlock), 0, stream, candidates, numberOfCandidates, dim, alreadyDeleted, output); }else if(version == Breaking){ hipLaunchKernelGGL(( findDublicatesBreaking), dim3(dimGrid), dim3(dimBlock), 0, stream, candidates, numberOfCandidates, dim, alreadyDeleted, output); }else if(version == MoreBreaking){ hipLaunchKernelGGL(( findDublicatesMoreBreaking), dim3(dimGrid), dim3(dimBlock), 0, stream, candidates, numberOfCandidates, dim, alreadyDeleted, output); }else if(version == Hash){ findDublicatesHashTableWrapper_mananged(dimGrid, dimBlock, stream, candidates, numberOfCandidates, dim, alreadyDeleted, output); } }; /** ONLY FOR TESTING */ std::vector<bool> findDublicatesTester(std::vector<std::vector<bool>> candidates, dublicatesType version){ size_t numberOfCandidates = candidates.size(); size_t dim = candidates.at(0).size(); size_t numberOfBlocks = ceilf((float)dim/32); size_t dimBlock = 1024; size_t dimGrid = ceilf((float)numberOfCandidates/dimBlock); size_t sizeOfCandidates = numberOfCandidates*numberOfBlocks*sizeof(unsigned int); size_t sizeOfOutput = numberOfCandidates*sizeof(bool); unsigned int* candidates_h; bool* output_h; unsigned int* candidates_d; bool* output_d; hipHostMalloc((void**) &candidates_h, sizeOfCandidates); hipHostMalloc((void**) &output_h, sizeOfOutput); hipMalloc((void**) &candidates_d, sizeOfCandidates); hipMalloc((void**) &output_d, sizeOfOutput); bool* alreadyDeleted_d; hipMalloc((void**) &alreadyDeleted_d, numberOfCandidates*sizeof(bool)); hipMemset(alreadyDeleted_d, 0, numberOfCandidates*sizeof(bool)); for(size_t i = 0; i < numberOfCandidates; i++){ size_t block = 0; size_t blockNr = 0; for(size_t j = 0; j < dim; j++){ if (j % 32 == 0 && j != 0){ candidates_h[i+blockNr*numberOfCandidates] = block; block = 0; blockNr++; } block |= (candidates.at(i).at(j) << j); } candidates_h[i+blockNr*numberOfCandidates] = block; } hipMemcpy(candidates_d, candidates_h, sizeOfCandidates, hipMemcpyHostToDevice); hipMemset(output_d, false, sizeOfOutput); if(version == Naive){ hipLaunchKernelGGL(( findDublicatesNaive), dim3(dimGrid), dim3(dimBlock), 0, 0, candidates_d, numberOfCandidates, dim, alreadyDeleted_d, output_d); }else if(version == Breaking){ hipLaunchKernelGGL(( findDublicatesBreaking), dim3(dimGrid), dim3(dimBlock), 0, 0, candidates_d, numberOfCandidates, dim, alreadyDeleted_d, output_d); }else if(version == MoreBreaking){ hipLaunchKernelGGL(( findDublicatesMoreBreaking), dim3(dimGrid), dim3(dimBlock), 0, 0, candidates_d, numberOfCandidates, dim, alreadyDeleted_d, output_d); }else if(version == Hash){ findDublicatesHashTableTester(dimGrid, dimBlock, candidates_d, numberOfCandidates, dim, alreadyDeleted_d, output_d); } hipMemcpy(output_h, output_d, sizeOfOutput, hipMemcpyDeviceToHost); auto result = std::vector<bool>(); for(size_t i = 0; i < numberOfCandidates; i++){ result.push_back(output_h[i]); } return result; }
FindDublicates.cu
#include "FindDublicates.h" #include <vector> __global__ void findDublicatesNaive(unsigned int* candidates, unsigned int numberOfCandidates, unsigned int dim, bool* isAlreadyDeleted, bool* output){ unsigned int candidate = blockIdx.x*blockDim.x+threadIdx.x; unsigned int numberOfBlocks = ceilf((float)dim/32); if(candidate < numberOfCandidates && !isAlreadyDeleted[candidate]){ for(unsigned int i = candidate+1; i < numberOfCandidates; i++){ bool equal = true; for(unsigned int j = 0; j < numberOfBlocks; j++){ equal &= (candidates[candidate + numberOfCandidates*j] == candidates[i + numberOfCandidates*j]); } if(equal){ output[i] = true; } } } } __global__ void findDublicatesBreaking(unsigned int* candidates, unsigned int numberOfCandidates, unsigned int dim, bool* isAlreadyDeleted, bool* output){ unsigned int candidate = blockIdx.x*blockDim.x+threadIdx.x; unsigned int numberOfBlocks = ceilf((float)dim/32); if(candidate < numberOfCandidates && !isAlreadyDeleted[candidate]){ for(unsigned int i = candidate+1; i < numberOfCandidates; i++){ bool equal = true; for(unsigned int j = 0; j < numberOfBlocks; j++){ equal &= (candidates[candidate + numberOfCandidates*j] == candidates[i + numberOfCandidates*j]); } if(equal){ output[i] = true; break; } } } } __global__ void findDublicatesMoreBreaking(unsigned int* candidates, unsigned int numberOfCandidates, unsigned int dim, bool* isAlreadyDeleted, bool* output){ unsigned int candidate = blockIdx.x*blockDim.x+threadIdx.x; unsigned int numberOfBlocks = ceilf((float)dim/32); if(candidate < numberOfCandidates && !isAlreadyDeleted[candidate]){ for(unsigned int i = candidate+1; i < numberOfCandidates; i++){ bool equal = true; for(unsigned int j = 0; j < numberOfBlocks; j++){ equal &= (candidates[candidate + numberOfCandidates*j] == candidates[i + numberOfCandidates*j]); if(!equal){ break; } } if(equal){ output[i] = true; break; } } } } /** Thin Wrapper for findDublicates */ void findDublicatesWrapper(unsigned int dimGrid, unsigned int dimBlock, cudaStream_t stream, unsigned int* candidates, unsigned int numberOfCandidates, unsigned int dim, bool* alreadyDeleted, bool* output, dublicatesType version ){ if(version == Naive){ findDublicatesNaive<<<dimGrid, dimBlock, 0, stream>>>(candidates, numberOfCandidates, dim, alreadyDeleted, output); }else if(version == Breaking){ findDublicatesBreaking<<<dimGrid, dimBlock, 0, stream>>>(candidates, numberOfCandidates, dim, alreadyDeleted, output); }else if(version == MoreBreaking){ findDublicatesMoreBreaking<<<dimGrid, dimBlock, 0, stream>>>(candidates, numberOfCandidates, dim, alreadyDeleted, output); }else if(version == Hash){ findDublicatesHashTableWrapper(dimGrid, dimBlock, stream, candidates, numberOfCandidates, dim, alreadyDeleted, output); } }; void findDublicatesWrapper_mananged(unsigned int dimGrid, unsigned int dimBlock, cudaStream_t stream, unsigned int* candidates, unsigned int numberOfCandidates, unsigned int dim, bool* alreadyDeleted, bool* output, dublicatesType version ){ if(version == Naive){ findDublicatesNaive<<<dimGrid, dimBlock, 0, stream>>>(candidates, numberOfCandidates, dim, alreadyDeleted, output); }else if(version == Breaking){ findDublicatesBreaking<<<dimGrid, dimBlock, 0, stream>>>(candidates, numberOfCandidates, dim, alreadyDeleted, output); }else if(version == MoreBreaking){ findDublicatesMoreBreaking<<<dimGrid, dimBlock, 0, stream>>>(candidates, numberOfCandidates, dim, alreadyDeleted, output); }else if(version == Hash){ findDublicatesHashTableWrapper_mananged(dimGrid, dimBlock, stream, candidates, numberOfCandidates, dim, alreadyDeleted, output); } }; /** ONLY FOR TESTING */ std::vector<bool> findDublicatesTester(std::vector<std::vector<bool>> candidates, dublicatesType version){ size_t numberOfCandidates = candidates.size(); size_t dim = candidates.at(0).size(); size_t numberOfBlocks = ceilf((float)dim/32); size_t dimBlock = 1024; size_t dimGrid = ceilf((float)numberOfCandidates/dimBlock); size_t sizeOfCandidates = numberOfCandidates*numberOfBlocks*sizeof(unsigned int); size_t sizeOfOutput = numberOfCandidates*sizeof(bool); unsigned int* candidates_h; bool* output_h; unsigned int* candidates_d; bool* output_d; cudaMallocHost((void**) &candidates_h, sizeOfCandidates); cudaMallocHost((void**) &output_h, sizeOfOutput); cudaMalloc((void**) &candidates_d, sizeOfCandidates); cudaMalloc((void**) &output_d, sizeOfOutput); bool* alreadyDeleted_d; cudaMalloc((void**) &alreadyDeleted_d, numberOfCandidates*sizeof(bool)); cudaMemset(alreadyDeleted_d, 0, numberOfCandidates*sizeof(bool)); for(size_t i = 0; i < numberOfCandidates; i++){ size_t block = 0; size_t blockNr = 0; for(size_t j = 0; j < dim; j++){ if (j % 32 == 0 && j != 0){ candidates_h[i+blockNr*numberOfCandidates] = block; block = 0; blockNr++; } block |= (candidates.at(i).at(j) << j); } candidates_h[i+blockNr*numberOfCandidates] = block; } cudaMemcpy(candidates_d, candidates_h, sizeOfCandidates, cudaMemcpyHostToDevice); cudaMemset(output_d, false, sizeOfOutput); if(version == Naive){ findDublicatesNaive<<<dimGrid, dimBlock>>>(candidates_d, numberOfCandidates, dim, alreadyDeleted_d, output_d); }else if(version == Breaking){ findDublicatesBreaking<<<dimGrid, dimBlock>>>(candidates_d, numberOfCandidates, dim, alreadyDeleted_d, output_d); }else if(version == MoreBreaking){ findDublicatesMoreBreaking<<<dimGrid, dimBlock>>>(candidates_d, numberOfCandidates, dim, alreadyDeleted_d, output_d); }else if(version == Hash){ findDublicatesHashTableTester(dimGrid, dimBlock, candidates_d, numberOfCandidates, dim, alreadyDeleted_d, output_d); } cudaMemcpy(output_h, output_d, sizeOfOutput, cudaMemcpyDeviceToHost); auto result = std::vector<bool>(); for(size_t i = 0; i < numberOfCandidates; i++){ result.push_back(output_h[i]); } return result; }
2f544e464e39444998e8b216273bf99d056be98e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Parallel program (CUDA) to sum up two vectors // Compile: nvcc -o vecAdd1 vecAdd1.cu // Author: Dr. Christer Karlsson #include <stdio.h> #include <stdlib.h> #include <math.h> // CUDA kernel. Each thread takes care of one element of c __global__ void vecAdd(double *a, double *b, double *c, int n) { // Make sure we do not go out of bounds for(int i = 0; i < n; i++) c[i] = a[i] + b[i]; } int main( int argc, char* argv[] ) { // Size of vectors int n = 1<<20; // Host input vectors double *h_a; double *h_b; // Host output vector double *h_c; // Device input vectors double *d_a; double *d_b; //Device output vector double *d_c; // Size, in bytes, of each vector size_t bytes = n*sizeof(double); // Allocate memory for each vector on host h_a = (double*)malloc(bytes); h_b = (double*)malloc(bytes); h_c = (double*)malloc(bytes); // Allocate memory for each vector on GPU hipMalloc(&d_a, bytes); hipMalloc(&d_b, bytes); hipMalloc(&d_c, bytes); int i; // Initialize vectors on host for( i = 0; i < n; i++ ) { h_a[i] = sin(i)*sin(i); h_b[i] = cos(i)*cos(i); } // Copy host vectors to device hipMemcpy( d_a, h_a, bytes, hipMemcpyHostToDevice); hipMemcpy( d_b, h_b, bytes, hipMemcpyHostToDevice); // Execute the kernel hipLaunchKernelGGL(( vecAdd), dim3(1), dim3(1), 0, 0, d_a, d_b, d_c, n); // Wait for the GPU to finish hipDeviceSynchronize(); // Copy array back to host hipMemcpy( h_c, d_c, bytes, hipMemcpyDeviceToHost ); // Sum up vector c and print result divided by n, this should equal 1 within // error double sum = 0; for(i=0; i<n; i++) sum += h_c[i]; printf("final result: %f\n", sum/n); // Release device memory hipFree(d_a); hipFree(d_b); hipFree(d_c); // Release host memory free(h_a); free(h_b); free(h_c); return 0; }
2f544e464e39444998e8b216273bf99d056be98e.cu
// Parallel program (CUDA) to sum up two vectors // Compile: nvcc -o vecAdd1 vecAdd1.cu // Author: Dr. Christer Karlsson #include <stdio.h> #include <stdlib.h> #include <math.h> // CUDA kernel. Each thread takes care of one element of c __global__ void vecAdd(double *a, double *b, double *c, int n) { // Make sure we do not go out of bounds for(int i = 0; i < n; i++) c[i] = a[i] + b[i]; } int main( int argc, char* argv[] ) { // Size of vectors int n = 1<<20; // Host input vectors double *h_a; double *h_b; // Host output vector double *h_c; // Device input vectors double *d_a; double *d_b; //Device output vector double *d_c; // Size, in bytes, of each vector size_t bytes = n*sizeof(double); // Allocate memory for each vector on host h_a = (double*)malloc(bytes); h_b = (double*)malloc(bytes); h_c = (double*)malloc(bytes); // Allocate memory for each vector on GPU cudaMalloc(&d_a, bytes); cudaMalloc(&d_b, bytes); cudaMalloc(&d_c, bytes); int i; // Initialize vectors on host for( i = 0; i < n; i++ ) { h_a[i] = sin(i)*sin(i); h_b[i] = cos(i)*cos(i); } // Copy host vectors to device cudaMemcpy( d_a, h_a, bytes, cudaMemcpyHostToDevice); cudaMemcpy( d_b, h_b, bytes, cudaMemcpyHostToDevice); // Execute the kernel vecAdd<<<1, 1>>>(d_a, d_b, d_c, n); // Wait for the GPU to finish cudaDeviceSynchronize(); // Copy array back to host cudaMemcpy( h_c, d_c, bytes, cudaMemcpyDeviceToHost ); // Sum up vector c and print result divided by n, this should equal 1 within // error double sum = 0; for(i=0; i<n; i++) sum += h_c[i]; printf("final result: %f\n", sum/n); // Release device memory cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); // Release host memory free(h_a); free(h_b); free(h_c); return 0; }
27d4d915f90d3f208b3e59e41e9e3f24336fc6c5.hip
// !!! This is a file automatically generated by hipify!!! #include <cstdio> #include <hip/hip_runtime.h> #include <cmath> #include <thrust/execution_policy.h> #include <thrust/random.h> #include <thrust/remove.h> #include "sceneStructs.h" #include "scene.h" #include "glm/glm.hpp" #include "glm/gtx/norm.hpp" #include "utilities.h" #include "pathtrace.h" #include "intersections.h" #include "interactions.h" #define FILENAME (strrchr(__FILE__, '/') ? strrchr(__FILE__, '/') + 1 : __FILE__) #define checkCUDAError(msg) checkCUDAErrorFn(msg, FILENAME, __LINE__) void checkCUDAErrorFn(const char *msg, const char *file, int line) { hipDeviceSynchronize(); hipError_t err = hipGetLastError(); if (hipSuccess == err) { return; } fprintf(stderr, "CUDA error"); if (file) { fprintf(stderr, " (%s:%d)", file, line); } fprintf(stderr, ": %s: %s\n", msg, hipGetErrorString(err)); getchar(); exit(EXIT_FAILURE); } __host__ __device__ thrust::default_random_engine makeSeededRandomEngine(int iter, int index, int depth) { return thrust::default_random_engine(utilhash((index + 1) * iter) ^ utilhash(depth)); } //Kernel that writes the image to the OpenGL PBO directly. __global__ void sendImageToPBO(uchar4* pbo, glm::ivec2 resolution, int iter, glm::vec3* image) { int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; if (x < resolution.x && y < resolution.y) { int index = x + (y * resolution.x); glm::vec3 pix = image[index]; glm::ivec3 color; color.x = glm::clamp((int) (pix.x / iter * 255.0), 0, 255); color.y = glm::clamp((int) (pix.y / iter * 255.0), 0, 255); color.z = glm::clamp((int) (pix.z / iter * 255.0), 0, 255); // Each thread writes one pixel location in the texture (textel) pbo[index].w = 0; pbo[index].x = color.x; pbo[index].y = color.y; pbo[index].z = color.z; } } static Scene *hst_scene = NULL; static glm::vec3 *dev_image = NULL; static Geom *dev_geo = NULL; static Material *dev_mat = NULL; static Ray *dev_rays = NULL; void pathtraceInit(Scene *scene) { hst_scene = scene; const Camera &cam = hst_scene->state.camera; const int pixelcount = cam.resolution.x * cam.resolution.y; hipMalloc(&dev_image, pixelcount * sizeof(glm::vec3)); hipMemset(dev_image, 0, pixelcount * sizeof(glm::vec3)); const int geolements = hst_scene->geoms.size(); const int matlements = hst_scene->materials.size(); hipMalloc(&dev_geo, geolements * sizeof(Geom)); hipMalloc(&dev_mat, matlements * sizeof(Material)); hipMalloc(&dev_rays, pixelcount * sizeof(Ray)); hipMemcpy(dev_geo, hst_scene->geoms.data(), geolements * sizeof(Geom), hipMemcpyHostToDevice); hipMemcpy(dev_mat, hst_scene->materials.data(), matlements * sizeof(Material), hipMemcpyHostToDevice); checkCUDAError("pathtraceInit"); } void pathtraceFree() { hipFree(dev_image); // no-op if dev_image is null hipFree(dev_geo); hipFree(dev_mat); hipFree(dev_rays); checkCUDAError("pathtraceFree"); } __global__ void rayBuilder(Camera cam, Ray *rays, float tanx, float tany, glm::vec3 right, glm::vec3 perup, int pixelcount, int iter) { int uidx = (blockIdx.x * blockDim.x) + threadIdx.x; int vidx = (blockIdx.y * blockDim.y) + threadIdx.y; int rayidx = vidx * cam.resolution.x + uidx; thrust::default_random_engine rng = makeSeededRandomEngine(iter, rayidx, 1); thrust::uniform_real_distribution<float> u01(-0.5, 0.5); float result = u01(rng); float u = (2.0f * (uidx + result) / cam.resolution.x - 1.0f); float v = (2.0f * (vidx + result) / cam.resolution.y - 1.0f); if (rayidx < pixelcount) { glm::vec3 eye = cam.position; glm::vec3 pixel = eye + cam.view - tanx*u*right - tany*v*perup; rays[rayidx].origin = eye; rays[rayidx].direction = glm::normalize(pixel - eye); rays[rayidx].color = glm::vec3(1.0f); rays[rayidx].pixel = rayidx; } } __global__ void rayDebug(Camera cam, glm::vec3 *image, Ray *rays) { int u = (blockIdx.x * blockDim.x) + threadIdx.x; int v = (blockIdx.y * blockDim.y) + threadIdx.y; int rayidx = v * cam.resolution.x + u; image[rayidx] += glm::abs(rays[rayidx].direction); } __global__ void slingRays(Ray *rays, Geom *geo, int geocount, Material *mats, glm::vec3 *image, int pixelcount, int depth, int iter) { int ridx = blockIdx.x * blockDim.x + threadIdx.x; if (ridx < pixelcount) { Ray r = rays[ridx]; if (glm::length(r.direction) < 0.0001f) { return; } const float t0 = -1.0f; float t1 = INFINITY; bool hit = false; bool light = false; bool outside = true; glm::vec3 pt = glm::vec3(0.0f); glm::vec3 temp_pt = glm::vec3(0.0f); glm::vec3 nml = glm::vec3(0.0f); glm::vec3 temp_nml = glm::vec3(0.0f); Geom ghit; Geom g = geo[0]; for (int i = 0; i < geocount; i++) { g = geo[i]; if (g.type == SPHERE) { float temp = sphereIntersectionTest(g, r, temp_pt, temp_nml, outside); if (t0 < temp && temp < t1) { t1 = temp; pt = temp_pt; nml = temp_nml; hit = true; ghit = g; } } else if (g.type == CUBE) { float temp = boxIntersectionTest(g, r, temp_pt, temp_nml, outside); if (t0 < temp && temp < t1) { t1 = temp; pt = temp_pt; nml = temp_nml; hit = true; ghit = g; } } } if (hit) { float emit = mats[ghit.materialid].emittance; if (emit > 0.0f) { r.color *= mats[ghit.materialid].emittance; r.direction = glm::vec3(0.0f); } else { thrust::default_random_engine rng = makeSeededRandomEngine(iter, ridx, depth); scatterRay(r, outside, r.color, pt, nml, mats[ghit.materialid], rng); } } else { //terminate r.color = glm::vec3(0.1f,0.1f,0.1f); r.direction = glm::vec3(0.0f); } rays[ridx] = r; } } __global__ void consumeRays(Ray *rays, glm::vec3 *image, int pixelcount) { int ridx = blockIdx.x * blockDim.x + threadIdx.x; if (ridx < pixelcount) { Ray r = rays[ridx]; if (glm::length(r.direction) < 0.00001f) { image[r.pixel] += r.color; //printf("%f %f %f \n", r.color.x, r.color.y, r.color.z); } } } /** * Wrapper for the __global__ call that sets up the kernel calls and does a ton * of memory management */ void pathtrace(uchar4 *pbo, int frame, int iter) { const int traceDepth = hst_scene->state.traceDepth; const Camera &cam = hst_scene->state.camera; const int pixelcount = cam.resolution.x * cam.resolution.y; const int blockSideLength = 8; const dim3 blockSize(blockSideLength, blockSideLength); const dim3 blocksPerGrid( (cam.resolution.x + blockSize.x - 1) / blockSize.x, (cam.resolution.y + blockSize.y - 1) / blockSize.y); /////////////////////////////////////////////////////////////////////////// // Recap: // * Initialize array of path rays (using rays that come out of the camera) // * You can pass the Camera object to that kernel. // * Each path ray is a (ray, color) pair, where color starts as the // multiplicative identity, white = (1, 1, 1). // * For debugging, you can output your ray directions as colors. // * For each depth: // * Compute one new (ray, color) pair along each path (using scatterRay). // Note that many rays will terminate by hitting a light or hitting // nothing at all. You'll have to decide how to represent your path rays // and how you'll mark terminated rays. // * Color is attenuated (multiplied) by reflections off of any object // surface. // * You can debug your ray-scene intersections by displaying various // values as colors, e.g., the first surface normal, the first bounced // ray direction, the first unlit material color, etc. // * Add all of the terminated rays' results into the appropriate pixels. // * Stream compact away all of the terminated paths. // You may use either your implementation or `thrust::remove_if` or its // cousins. // * Finally, handle all of the paths that still haven't terminated. // (Easy way is to make them black or background-colored.) // TODO: perform one iteration of path tracing float tanx = std::tan(cam.fov.x*PI/180); float tany = std::tan(cam.fov.y*PI/180); glm::vec3 right = glm::cross(cam.view, cam.up); glm::vec3 perup = glm::cross(right, cam.view); rayBuilder << <blocksPerGrid, blockSize >> >(cam, dev_rays, tanx, tany, right, perup, pixelcount, iter); checkCUDAError("rayBuilder"); //rayDebug<<<blocksPerGrid,blockSize>>>(cam,dev_image,dev_rays); int compacted = 0; dim3 blockSize1d(64, 1); dim3 blocksPerGrid1d((pixelcount - compacted + blockSize1d.x - 1) / blockSize1d.x, 1); int debug = 4; for (int i = 0; i < traceDepth; i++) { dim3 blockSize1d(64, 1); dim3 blocksPerGrid1d((pixelcount - compacted + blockSize1d.x - 1) / blockSize1d.x, 1); hipLaunchKernelGGL(( slingRays), dim3(blocksPerGrid1d), dim3(blockSize1d), 0, 0, dev_rays, dev_geo, hst_scene->geoms.size(), dev_mat, dev_image, pixelcount, i, iter); checkCUDAError("Loop Fuck"); //insert a streamcompact here } consumeRays << <blocksPerGrid1d, blockSize1d >> >(dev_rays, dev_image, pixelcount); checkCUDAError("Fuck"); /////////////////////////////////////////////////////////////////////////// // Send results to OpenGL buffer for rendering hipLaunchKernelGGL(( sendImageToPBO), dim3(blocksPerGrid), dim3(blockSize), 0, 0, pbo, cam.resolution, iter, dev_image); // Retrieve image from GPU hipMemcpy(hst_scene->state.image.data(), dev_image, pixelcount * sizeof(glm::vec3), hipMemcpyDeviceToHost); checkCUDAError("pathtrace"); }
27d4d915f90d3f208b3e59e41e9e3f24336fc6c5.cu
#include <cstdio> #include <cuda.h> #include <cmath> #include <thrust/execution_policy.h> #include <thrust/random.h> #include <thrust/remove.h> #include "sceneStructs.h" #include "scene.h" #include "glm/glm.hpp" #include "glm/gtx/norm.hpp" #include "utilities.h" #include "pathtrace.h" #include "intersections.h" #include "interactions.h" #define FILENAME (strrchr(__FILE__, '/') ? strrchr(__FILE__, '/') + 1 : __FILE__) #define checkCUDAError(msg) checkCUDAErrorFn(msg, FILENAME, __LINE__) void checkCUDAErrorFn(const char *msg, const char *file, int line) { cudaDeviceSynchronize(); cudaError_t err = cudaGetLastError(); if (cudaSuccess == err) { return; } fprintf(stderr, "CUDA error"); if (file) { fprintf(stderr, " (%s:%d)", file, line); } fprintf(stderr, ": %s: %s\n", msg, cudaGetErrorString(err)); getchar(); exit(EXIT_FAILURE); } __host__ __device__ thrust::default_random_engine makeSeededRandomEngine(int iter, int index, int depth) { return thrust::default_random_engine(utilhash((index + 1) * iter) ^ utilhash(depth)); } //Kernel that writes the image to the OpenGL PBO directly. __global__ void sendImageToPBO(uchar4* pbo, glm::ivec2 resolution, int iter, glm::vec3* image) { int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; if (x < resolution.x && y < resolution.y) { int index = x + (y * resolution.x); glm::vec3 pix = image[index]; glm::ivec3 color; color.x = glm::clamp((int) (pix.x / iter * 255.0), 0, 255); color.y = glm::clamp((int) (pix.y / iter * 255.0), 0, 255); color.z = glm::clamp((int) (pix.z / iter * 255.0), 0, 255); // Each thread writes one pixel location in the texture (textel) pbo[index].w = 0; pbo[index].x = color.x; pbo[index].y = color.y; pbo[index].z = color.z; } } static Scene *hst_scene = NULL; static glm::vec3 *dev_image = NULL; static Geom *dev_geo = NULL; static Material *dev_mat = NULL; static Ray *dev_rays = NULL; void pathtraceInit(Scene *scene) { hst_scene = scene; const Camera &cam = hst_scene->state.camera; const int pixelcount = cam.resolution.x * cam.resolution.y; cudaMalloc(&dev_image, pixelcount * sizeof(glm::vec3)); cudaMemset(dev_image, 0, pixelcount * sizeof(glm::vec3)); const int geolements = hst_scene->geoms.size(); const int matlements = hst_scene->materials.size(); cudaMalloc(&dev_geo, geolements * sizeof(Geom)); cudaMalloc(&dev_mat, matlements * sizeof(Material)); cudaMalloc(&dev_rays, pixelcount * sizeof(Ray)); cudaMemcpy(dev_geo, hst_scene->geoms.data(), geolements * sizeof(Geom), cudaMemcpyHostToDevice); cudaMemcpy(dev_mat, hst_scene->materials.data(), matlements * sizeof(Material), cudaMemcpyHostToDevice); checkCUDAError("pathtraceInit"); } void pathtraceFree() { cudaFree(dev_image); // no-op if dev_image is null cudaFree(dev_geo); cudaFree(dev_mat); cudaFree(dev_rays); checkCUDAError("pathtraceFree"); } __global__ void rayBuilder(Camera cam, Ray *rays, float tanx, float tany, glm::vec3 right, glm::vec3 perup, int pixelcount, int iter) { int uidx = (blockIdx.x * blockDim.x) + threadIdx.x; int vidx = (blockIdx.y * blockDim.y) + threadIdx.y; int rayidx = vidx * cam.resolution.x + uidx; thrust::default_random_engine rng = makeSeededRandomEngine(iter, rayidx, 1); thrust::uniform_real_distribution<float> u01(-0.5, 0.5); float result = u01(rng); float u = (2.0f * (uidx + result) / cam.resolution.x - 1.0f); float v = (2.0f * (vidx + result) / cam.resolution.y - 1.0f); if (rayidx < pixelcount) { glm::vec3 eye = cam.position; glm::vec3 pixel = eye + cam.view - tanx*u*right - tany*v*perup; rays[rayidx].origin = eye; rays[rayidx].direction = glm::normalize(pixel - eye); rays[rayidx].color = glm::vec3(1.0f); rays[rayidx].pixel = rayidx; } } __global__ void rayDebug(Camera cam, glm::vec3 *image, Ray *rays) { int u = (blockIdx.x * blockDim.x) + threadIdx.x; int v = (blockIdx.y * blockDim.y) + threadIdx.y; int rayidx = v * cam.resolution.x + u; image[rayidx] += glm::abs(rays[rayidx].direction); } __global__ void slingRays(Ray *rays, Geom *geo, int geocount, Material *mats, glm::vec3 *image, int pixelcount, int depth, int iter) { int ridx = blockIdx.x * blockDim.x + threadIdx.x; if (ridx < pixelcount) { Ray r = rays[ridx]; if (glm::length(r.direction) < 0.0001f) { return; } const float t0 = -1.0f; float t1 = INFINITY; bool hit = false; bool light = false; bool outside = true; glm::vec3 pt = glm::vec3(0.0f); glm::vec3 temp_pt = glm::vec3(0.0f); glm::vec3 nml = glm::vec3(0.0f); glm::vec3 temp_nml = glm::vec3(0.0f); Geom ghit; Geom g = geo[0]; for (int i = 0; i < geocount; i++) { g = geo[i]; if (g.type == SPHERE) { float temp = sphereIntersectionTest(g, r, temp_pt, temp_nml, outside); if (t0 < temp && temp < t1) { t1 = temp; pt = temp_pt; nml = temp_nml; hit = true; ghit = g; } } else if (g.type == CUBE) { float temp = boxIntersectionTest(g, r, temp_pt, temp_nml, outside); if (t0 < temp && temp < t1) { t1 = temp; pt = temp_pt; nml = temp_nml; hit = true; ghit = g; } } } if (hit) { float emit = mats[ghit.materialid].emittance; if (emit > 0.0f) { r.color *= mats[ghit.materialid].emittance; r.direction = glm::vec3(0.0f); } else { thrust::default_random_engine rng = makeSeededRandomEngine(iter, ridx, depth); scatterRay(r, outside, r.color, pt, nml, mats[ghit.materialid], rng); } } else { //terminate r.color = glm::vec3(0.1f,0.1f,0.1f); r.direction = glm::vec3(0.0f); } rays[ridx] = r; } } __global__ void consumeRays(Ray *rays, glm::vec3 *image, int pixelcount) { int ridx = blockIdx.x * blockDim.x + threadIdx.x; if (ridx < pixelcount) { Ray r = rays[ridx]; if (glm::length(r.direction) < 0.00001f) { image[r.pixel] += r.color; //printf("%f %f %f \n", r.color.x, r.color.y, r.color.z); } } } /** * Wrapper for the __global__ call that sets up the kernel calls and does a ton * of memory management */ void pathtrace(uchar4 *pbo, int frame, int iter) { const int traceDepth = hst_scene->state.traceDepth; const Camera &cam = hst_scene->state.camera; const int pixelcount = cam.resolution.x * cam.resolution.y; const int blockSideLength = 8; const dim3 blockSize(blockSideLength, blockSideLength); const dim3 blocksPerGrid( (cam.resolution.x + blockSize.x - 1) / blockSize.x, (cam.resolution.y + blockSize.y - 1) / blockSize.y); /////////////////////////////////////////////////////////////////////////// // Recap: // * Initialize array of path rays (using rays that come out of the camera) // * You can pass the Camera object to that kernel. // * Each path ray is a (ray, color) pair, where color starts as the // multiplicative identity, white = (1, 1, 1). // * For debugging, you can output your ray directions as colors. // * For each depth: // * Compute one new (ray, color) pair along each path (using scatterRay). // Note that many rays will terminate by hitting a light or hitting // nothing at all. You'll have to decide how to represent your path rays // and how you'll mark terminated rays. // * Color is attenuated (multiplied) by reflections off of any object // surface. // * You can debug your ray-scene intersections by displaying various // values as colors, e.g., the first surface normal, the first bounced // ray direction, the first unlit material color, etc. // * Add all of the terminated rays' results into the appropriate pixels. // * Stream compact away all of the terminated paths. // You may use either your implementation or `thrust::remove_if` or its // cousins. // * Finally, handle all of the paths that still haven't terminated. // (Easy way is to make them black or background-colored.) // TODO: perform one iteration of path tracing float tanx = std::tan(cam.fov.x*PI/180); float tany = std::tan(cam.fov.y*PI/180); glm::vec3 right = glm::cross(cam.view, cam.up); glm::vec3 perup = glm::cross(right, cam.view); rayBuilder << <blocksPerGrid, blockSize >> >(cam, dev_rays, tanx, tany, right, perup, pixelcount, iter); checkCUDAError("rayBuilder"); //rayDebug<<<blocksPerGrid,blockSize>>>(cam,dev_image,dev_rays); int compacted = 0; dim3 blockSize1d(64, 1); dim3 blocksPerGrid1d((pixelcount - compacted + blockSize1d.x - 1) / blockSize1d.x, 1); int debug = 4; for (int i = 0; i < traceDepth; i++) { dim3 blockSize1d(64, 1); dim3 blocksPerGrid1d((pixelcount - compacted + blockSize1d.x - 1) / blockSize1d.x, 1); slingRays<<<blocksPerGrid1d, blockSize1d>>>(dev_rays, dev_geo, hst_scene->geoms.size(), dev_mat, dev_image, pixelcount, i, iter); checkCUDAError("Loop Fuck"); //insert a streamcompact here } consumeRays << <blocksPerGrid1d, blockSize1d >> >(dev_rays, dev_image, pixelcount); checkCUDAError("Fuck"); /////////////////////////////////////////////////////////////////////////// // Send results to OpenGL buffer for rendering sendImageToPBO<<<blocksPerGrid, blockSize>>>(pbo, cam.resolution, iter, dev_image); // Retrieve image from GPU cudaMemcpy(hst_scene->state.image.data(), dev_image, pixelcount * sizeof(glm::vec3), cudaMemcpyDeviceToHost); checkCUDAError("pathtrace"); }
322ac631547f2381b7fda789c65cfad22cb05b3e.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "UpdateSurface.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; hipSurfaceObject_t surf = 1; unsigned int width = 1; unsigned int height = 1; float time = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( UpdateSurface), dim3(gridBlock),dim3(threadBlock), 0, 0, surf,width,height,time); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( UpdateSurface), dim3(gridBlock),dim3(threadBlock), 0, 0, surf,width,height,time); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( UpdateSurface), dim3(gridBlock),dim3(threadBlock), 0, 0, surf,width,height,time); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
322ac631547f2381b7fda789c65cfad22cb05b3e.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "UpdateSurface.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; cudaSurfaceObject_t surf = 1; unsigned int width = 1; unsigned int height = 1; float time = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); UpdateSurface<<<gridBlock,threadBlock>>>(surf,width,height,time); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { UpdateSurface<<<gridBlock,threadBlock>>>(surf,width,height,time); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { UpdateSurface<<<gridBlock,threadBlock>>>(surf,width,height,time); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
05757f1d565cdd14be5a3641cc348f7e13b601a4.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2019-2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf/column/column_device_view.cuh> #include <cudf/column/column_view.hpp> #include <cudf/types.hpp> #include <cudf/utilities/error.hpp> #include <rmm/cuda_stream_view.hpp> #include <numeric> namespace cudf { // Trivially copy all members but the children column_device_view::column_device_view(column_view source) : detail::column_device_view_base{source.type(), source.size(), source.head(), source.null_mask(), source.offset()}, _num_children{source.num_children()} { } // Free device memory allocated for children void column_device_view::destroy() { delete this; } namespace { // helper function for column_device_view::create and mutable_column_device::create methods template <typename ColumnView, typename ColumnDeviceView> std::unique_ptr<ColumnDeviceView, std::function<void(ColumnDeviceView*)>> create_device_view_from_view(ColumnView const& source, rmm::cuda_stream_view stream) { size_type num_children = source.num_children(); // First calculate the size of memory needed to hold the // child columns. This is done by calling extent() // for each of the children. auto get_extent = thrust::make_transform_iterator( thrust::make_counting_iterator(0), [&source](auto i) { return ColumnDeviceView::extent(source.child(i)); }); // pad the allocation for aligning the first pointer auto const descendant_storage_bytes = std::accumulate( get_extent, get_extent + num_children, std::size_t{alignof(ColumnDeviceView) - 1}); // A buffer of CPU memory is allocated to hold the ColumnDeviceView // objects. Once filled, the CPU memory is copied to device memory // and then set into the d_children member pointer. std::vector<char> staging_buffer(descendant_storage_bytes); // Each ColumnDeviceView instance may have child objects that // require setting some internal device pointers before being copied // from CPU to device. rmm::device_buffer* const descendant_storage = new rmm::device_buffer(descendant_storage_bytes, stream); auto deleter = [descendant_storage](ColumnDeviceView* v) { v->destroy(); delete descendant_storage; }; std::unique_ptr<ColumnDeviceView, decltype(deleter)> result{ new ColumnDeviceView(source, staging_buffer.data(), descendant_storage->data()), deleter}; // copy the CPU memory with all the children into device memory CUDA_TRY(hipMemcpyAsync(descendant_storage->data(), staging_buffer.data(), descendant_storage->size(), hipMemcpyDefault, stream.value())); stream.synchronize(); return result; } } // namespace // Place any child objects in host memory (h_ptr) and use the device // memory ptr (d_ptr) to set any child object pointers. column_device_view::column_device_view(column_view source, void* h_ptr, void* d_ptr) : detail::column_device_view_base{source.type(), source.size(), source.head(), source.null_mask(), source.offset()}, _num_children{source.num_children()} { d_children = detail::child_columns_to_device_array<column_device_view>( source.child_begin(), source.child_end(), h_ptr, d_ptr); } // Construct a unique_ptr that invokes `destroy()` as it's deleter std::unique_ptr<column_device_view, std::function<void(column_device_view*)>> column_device_view::create(column_view source, rmm::cuda_stream_view stream) { size_type num_children = source.num_children(); if (num_children == 0) { // Can't use make_unique since the ctor is protected return std::unique_ptr<column_device_view>(new column_device_view(source)); } return create_device_view_from_view<column_view, column_device_view>(source, stream); } std::size_t column_device_view::extent(column_view const& source) { auto get_extent = thrust::make_transform_iterator( thrust::make_counting_iterator(0), [&source](auto i) { return extent(source.child(i)); }); return std::accumulate( get_extent, get_extent + source.num_children(), sizeof(column_device_view)); } // For use with inplace-new to pre-fill memory to be copied to device mutable_column_device_view::mutable_column_device_view(mutable_column_view source) : detail::column_device_view_base{source.type(), source.size(), source.head(), source.null_mask(), source.offset()}, _num_children{source.num_children()} { } mutable_column_device_view::mutable_column_device_view(mutable_column_view source, void* h_ptr, void* d_ptr) : detail::column_device_view_base{source.type(), source.size(), source.head(), source.null_mask(), source.offset()}, _num_children{source.num_children()} { d_children = detail::child_columns_to_device_array<mutable_column_device_view>( source.child_begin(), source.child_end(), h_ptr, d_ptr); } // Handle freeing children void mutable_column_device_view::destroy() { delete this; } // Construct a unique_ptr that invokes `destroy()` as it's deleter std::unique_ptr<mutable_column_device_view, std::function<void(mutable_column_device_view*)>> mutable_column_device_view::create(mutable_column_view source, rmm::cuda_stream_view stream) { return source.num_children() == 0 ? std::unique_ptr<mutable_column_device_view>(new mutable_column_device_view(source)) : create_device_view_from_view<mutable_column_view, mutable_column_device_view>(source, stream); } std::size_t mutable_column_device_view::extent(mutable_column_view source) { auto get_extent = thrust::make_transform_iterator( thrust::make_counting_iterator(0), [&source](auto i) { return extent(source.child(i)); }); return std::accumulate( get_extent, get_extent + source.num_children(), sizeof(mutable_column_device_view)); } } // namespace cudf
05757f1d565cdd14be5a3641cc348f7e13b601a4.cu
/* * Copyright (c) 2019-2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf/column/column_device_view.cuh> #include <cudf/column/column_view.hpp> #include <cudf/types.hpp> #include <cudf/utilities/error.hpp> #include <rmm/cuda_stream_view.hpp> #include <numeric> namespace cudf { // Trivially copy all members but the children column_device_view::column_device_view(column_view source) : detail::column_device_view_base{source.type(), source.size(), source.head(), source.null_mask(), source.offset()}, _num_children{source.num_children()} { } // Free device memory allocated for children void column_device_view::destroy() { delete this; } namespace { // helper function for column_device_view::create and mutable_column_device::create methods template <typename ColumnView, typename ColumnDeviceView> std::unique_ptr<ColumnDeviceView, std::function<void(ColumnDeviceView*)>> create_device_view_from_view(ColumnView const& source, rmm::cuda_stream_view stream) { size_type num_children = source.num_children(); // First calculate the size of memory needed to hold the // child columns. This is done by calling extent() // for each of the children. auto get_extent = thrust::make_transform_iterator( thrust::make_counting_iterator(0), [&source](auto i) { return ColumnDeviceView::extent(source.child(i)); }); // pad the allocation for aligning the first pointer auto const descendant_storage_bytes = std::accumulate( get_extent, get_extent + num_children, std::size_t{alignof(ColumnDeviceView) - 1}); // A buffer of CPU memory is allocated to hold the ColumnDeviceView // objects. Once filled, the CPU memory is copied to device memory // and then set into the d_children member pointer. std::vector<char> staging_buffer(descendant_storage_bytes); // Each ColumnDeviceView instance may have child objects that // require setting some internal device pointers before being copied // from CPU to device. rmm::device_buffer* const descendant_storage = new rmm::device_buffer(descendant_storage_bytes, stream); auto deleter = [descendant_storage](ColumnDeviceView* v) { v->destroy(); delete descendant_storage; }; std::unique_ptr<ColumnDeviceView, decltype(deleter)> result{ new ColumnDeviceView(source, staging_buffer.data(), descendant_storage->data()), deleter}; // copy the CPU memory with all the children into device memory CUDA_TRY(cudaMemcpyAsync(descendant_storage->data(), staging_buffer.data(), descendant_storage->size(), cudaMemcpyDefault, stream.value())); stream.synchronize(); return result; } } // namespace // Place any child objects in host memory (h_ptr) and use the device // memory ptr (d_ptr) to set any child object pointers. column_device_view::column_device_view(column_view source, void* h_ptr, void* d_ptr) : detail::column_device_view_base{source.type(), source.size(), source.head(), source.null_mask(), source.offset()}, _num_children{source.num_children()} { d_children = detail::child_columns_to_device_array<column_device_view>( source.child_begin(), source.child_end(), h_ptr, d_ptr); } // Construct a unique_ptr that invokes `destroy()` as it's deleter std::unique_ptr<column_device_view, std::function<void(column_device_view*)>> column_device_view::create(column_view source, rmm::cuda_stream_view stream) { size_type num_children = source.num_children(); if (num_children == 0) { // Can't use make_unique since the ctor is protected return std::unique_ptr<column_device_view>(new column_device_view(source)); } return create_device_view_from_view<column_view, column_device_view>(source, stream); } std::size_t column_device_view::extent(column_view const& source) { auto get_extent = thrust::make_transform_iterator( thrust::make_counting_iterator(0), [&source](auto i) { return extent(source.child(i)); }); return std::accumulate( get_extent, get_extent + source.num_children(), sizeof(column_device_view)); } // For use with inplace-new to pre-fill memory to be copied to device mutable_column_device_view::mutable_column_device_view(mutable_column_view source) : detail::column_device_view_base{source.type(), source.size(), source.head(), source.null_mask(), source.offset()}, _num_children{source.num_children()} { } mutable_column_device_view::mutable_column_device_view(mutable_column_view source, void* h_ptr, void* d_ptr) : detail::column_device_view_base{source.type(), source.size(), source.head(), source.null_mask(), source.offset()}, _num_children{source.num_children()} { d_children = detail::child_columns_to_device_array<mutable_column_device_view>( source.child_begin(), source.child_end(), h_ptr, d_ptr); } // Handle freeing children void mutable_column_device_view::destroy() { delete this; } // Construct a unique_ptr that invokes `destroy()` as it's deleter std::unique_ptr<mutable_column_device_view, std::function<void(mutable_column_device_view*)>> mutable_column_device_view::create(mutable_column_view source, rmm::cuda_stream_view stream) { return source.num_children() == 0 ? std::unique_ptr<mutable_column_device_view>(new mutable_column_device_view(source)) : create_device_view_from_view<mutable_column_view, mutable_column_device_view>(source, stream); } std::size_t mutable_column_device_view::extent(mutable_column_view source) { auto get_extent = thrust::make_transform_iterator( thrust::make_counting_iterator(0), [&source](auto i) { return extent(source.child(i)); }); return std::accumulate( get_extent, get_extent + source.num_children(), sizeof(mutable_column_device_view)); } } // namespace cudf
2ada46700cf28afb5d23cfba0bab09f6f417a9e2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #pragma once #include "paddle/phi/kernels/fusion/cutlass/conv2d/conv2d_util.h" namespace phi { namespace fusion { namespace cutlass_internal { struct logical_coord { int n; int c; int h; int w; }; float diff(const half *c, const float *c_baseline, int n) { float max_diff = -1.; for (int i = 0; i < n; i++) { float c_value = __half2float(c[i]); if (std::abs(c_baseline[i] - c_value) > max_diff) { max_diff = std::abs(c_baseline[i] - c_value); } } return max_diff; } __device__ int gpu_nhwc(struct logical_coord shape, struct logical_coord index) { return index.n * shape.h * shape.w * shape.c + index.h * shape.w * shape.c + index.w * shape.c + index.c; } __global__ void naive_conv2d_kernel(const half *input, const half *weight, const half *bias, float *output, int batch, int ic, int ih, int iw, int kh, int kw, int oc, int pad_h, int pad_w, int stride_h, int stride_w, int dilation_h, int dilation_w, int oh, int ow, int groups, const half *residual, float alpha, // for leaky_relu OpType op_type) { int M = batch * oh * ow; int N = oc; int kc = ic / groups; int K = kc * kh * kw; int m_i = threadIdx.x + blockIdx.x * blockDim.x; int n_i = threadIdx.y + blockIdx.y * blockDim.y; if (m_i >= M || n_i >= N) return; int batch_i = m_i / (oh * ow); int oh_i = (m_i % (oh * ow)) / ow; int ow_i = (m_i % (oh * ow)) % ow; int oc_i = n_i; int groups_i = (oc_i / (oc / groups)); struct logical_coord weight_shape = {oc, kc, kh, kw}; struct logical_coord input_shape = {batch, ic, ih, iw}; int out_offset = m_i * N + n_i; float *out_ptr = output + out_offset; float sum = 0.f; for (int k_i = 0; k_i < K; k_i++) { int ic_i = k_i / (kh * kw) + groups_i * kc; int kh_i = (k_i % (kh * kw)) / kw; int kw_i = (k_i % (kh * kw)) % kw; struct logical_coord weight_index = {oc_i, k_i / (kh * kw), kh_i, kw_i}; int ih_i = oh_i * stride_h - pad_h + kh_i * dilation_h; int iw_i = ow_i * stride_w - pad_w + kw_i * dilation_w; if (ih_i < 0 || ih_i >= ih) continue; if (iw_i < 0 || iw_i >= iw) continue; struct logical_coord input_index = {batch_i, ic_i, ih_i, iw_i}; const half *weight_ptr = weight + gpu_nhwc(weight_shape, weight_index); const half *in_ptr = input + gpu_nhwc(input_shape, input_index); sum += __half2float(*in_ptr) * __half2float(*weight_ptr); } sum += __half2float(*(bias + oc_i)); float x = sum; switch (op_type) { case CONV2D_BIAS: *out_ptr = x; break; case CONV2D_BIAS_RELU: *out_ptr = x > 0 ? x : 0; break; case CONV2D_BIAS_SILU: *out_ptr = x * (1.f / (1 + exp(-x))); break; case CONV2D_BIAS_ADD_RELU: x += __half2float(*(residual + out_offset)); *out_ptr = x > 0 ? x : 0; break; case CONV2D_BIAS_LEAKY_RELU: *out_ptr = x > 0 ? x : (x * alpha); break; default: break; } } float conv2d_diff_gpu(const ConvAllParams &params, OpType op_type) { const half *input = params.input; const half *weight = params.weight; const half *bias = params.bias; half *output = params.output; int batch = params.batch; int ic = params.ic; int ih = params.ih; int iw = params.iw; int kh = params.kh; int kw = params.kw; int oc = params.oc; int pad_h = params.pad_h0; int pad_w = params.pad_w0; int stride_h = params.stride_h; int stride_w = params.stride_w; int dilation_h = params.dilation_h; int dilation_w = params.dilation_w; const half *residual = params.residual; int groups = params.groups; int oh = params.oh; int ow = params.ow; int M = batch * oh * ow; int N = oc; constexpr int blockM = 16; constexpr int blockN = 16; uint3 grid = {(M + blockM - 1) / blockM, (N + blockN - 1) / blockN, 1}; uint3 block = {blockM, blockN, 1}; int output_size = batch * oc * oh * ow; half *output_from_cutlass = reinterpret_cast<half *>(malloc(sizeof(half) * output_size)); hipMemcpy(output_from_cutlass, output, output_size * sizeof(half), hipMemcpyDeviceToHost); float *gpu_output; hipMalloc(&gpu_output, output_size * sizeof(float)); hipLaunchKernelGGL(( naive_conv2d_kernel), dim3(grid), dim3(block), 0, 0, input, weight, bias, gpu_output, batch, ic, ih, iw, kh, kw, oc, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, oh, ow, groups, residual, params.alpha, op_type); float *output_from_gpu = reinterpret_cast<float *>(malloc(sizeof(float) * output_size)); hipMemcpy(output_from_gpu, gpu_output, output_size * sizeof(float), hipMemcpyDeviceToHost); float max_diff = diff(output_from_cutlass, output_from_gpu, output_size); free(output_from_cutlass); free(output_from_gpu); hipFree(gpu_output); return max_diff; } std::string OpType2String(OpType op_type) { switch (op_type) { case CONV2D_BIAS: return "conv2d_bias"; break; case CONV2D_BIAS_RELU: return "conv2d_bias_relu"; break; case CONV2D_BIAS_SILU: return "conv2d_bias_silu"; break; case CONV2D_BIAS_ADD_RELU: return "conv2d_bias_add_relu"; break; case CONV2D_BIAS_LEAKY_RELU: return "conv2d_bias_leaky_relu"; default: break; } return "unnamed_op"; } int ProfileToGetBestConfig( const std::vector<std::function<cutlass::Status(ConvAllParams)>> &all_func, const ConvAllParams &params, OpType op_type) { constexpr int WARMUP = 10; constexpr int REPEAT = 100; float min_time = 100000.f; int min_time_index = -1; for (int i = 0; i < all_func.size(); i++) { cutlass::Status status; auto func = all_func[i]; // When func has large diff, we will make it nullptr. if (!func) continue; for (int ii = 0; ii < WARMUP; ii++) { status = func(params); } hipEvent_t beg, end; PADDLE_ENFORCE_GPU_SUCCESS(hipEventCreate(&beg)); PADDLE_ENFORCE_GPU_SUCCESS(hipEventCreate(&end)); PADDLE_ENFORCE_GPU_SUCCESS(hipEventRecord(beg)); for (int ii = 0; ii < REPEAT; ii++) { status = func(params); } PADDLE_ENFORCE_GPU_SUCCESS(hipEventRecord(end)); PADDLE_ENFORCE_GPU_SUCCESS(hipEventSynchronize(end)); float elapsed_time; PADDLE_ENFORCE_GPU_SUCCESS(hipEventElapsedTime(&elapsed_time, beg, end)); if (elapsed_time < min_time && status == cutlass::Status::kSuccess) { min_time = elapsed_time; min_time_index = i; // debug code VLOG(3) << OpType2String(op_type) << ": tactic " << i << " has max diff " << conv2d_diff_gpu(params, op_type) << " compared with baseline," << "cost_time: " << elapsed_time << "ms."; } } if (min_time_index < 0) { PADDLE_THROW( phi::errors::NotFound("Can't find any cutlass config for this %s op.", OpType2String(op_type).c_str())); } return min_time_index; } } // namespace cutlass_internal } // namespace fusion } // namespace phi
2ada46700cf28afb5d23cfba0bab09f6f417a9e2.cu
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #pragma once #include "paddle/phi/kernels/fusion/cutlass/conv2d/conv2d_util.h" namespace phi { namespace fusion { namespace cutlass_internal { struct logical_coord { int n; int c; int h; int w; }; float diff(const half *c, const float *c_baseline, int n) { float max_diff = -1.; for (int i = 0; i < n; i++) { float c_value = __half2float(c[i]); if (std::abs(c_baseline[i] - c_value) > max_diff) { max_diff = std::abs(c_baseline[i] - c_value); } } return max_diff; } __device__ int gpu_nhwc(struct logical_coord shape, struct logical_coord index) { return index.n * shape.h * shape.w * shape.c + index.h * shape.w * shape.c + index.w * shape.c + index.c; } __global__ void naive_conv2d_kernel(const half *input, const half *weight, const half *bias, float *output, int batch, int ic, int ih, int iw, int kh, int kw, int oc, int pad_h, int pad_w, int stride_h, int stride_w, int dilation_h, int dilation_w, int oh, int ow, int groups, const half *residual, float alpha, // for leaky_relu OpType op_type) { int M = batch * oh * ow; int N = oc; int kc = ic / groups; int K = kc * kh * kw; int m_i = threadIdx.x + blockIdx.x * blockDim.x; int n_i = threadIdx.y + blockIdx.y * blockDim.y; if (m_i >= M || n_i >= N) return; int batch_i = m_i / (oh * ow); int oh_i = (m_i % (oh * ow)) / ow; int ow_i = (m_i % (oh * ow)) % ow; int oc_i = n_i; int groups_i = (oc_i / (oc / groups)); struct logical_coord weight_shape = {oc, kc, kh, kw}; struct logical_coord input_shape = {batch, ic, ih, iw}; int out_offset = m_i * N + n_i; float *out_ptr = output + out_offset; float sum = 0.f; for (int k_i = 0; k_i < K; k_i++) { int ic_i = k_i / (kh * kw) + groups_i * kc; int kh_i = (k_i % (kh * kw)) / kw; int kw_i = (k_i % (kh * kw)) % kw; struct logical_coord weight_index = {oc_i, k_i / (kh * kw), kh_i, kw_i}; int ih_i = oh_i * stride_h - pad_h + kh_i * dilation_h; int iw_i = ow_i * stride_w - pad_w + kw_i * dilation_w; if (ih_i < 0 || ih_i >= ih) continue; if (iw_i < 0 || iw_i >= iw) continue; struct logical_coord input_index = {batch_i, ic_i, ih_i, iw_i}; const half *weight_ptr = weight + gpu_nhwc(weight_shape, weight_index); const half *in_ptr = input + gpu_nhwc(input_shape, input_index); sum += __half2float(*in_ptr) * __half2float(*weight_ptr); } sum += __half2float(*(bias + oc_i)); float x = sum; switch (op_type) { case CONV2D_BIAS: *out_ptr = x; break; case CONV2D_BIAS_RELU: *out_ptr = x > 0 ? x : 0; break; case CONV2D_BIAS_SILU: *out_ptr = x * (1.f / (1 + exp(-x))); break; case CONV2D_BIAS_ADD_RELU: x += __half2float(*(residual + out_offset)); *out_ptr = x > 0 ? x : 0; break; case CONV2D_BIAS_LEAKY_RELU: *out_ptr = x > 0 ? x : (x * alpha); break; default: break; } } float conv2d_diff_gpu(const ConvAllParams &params, OpType op_type) { const half *input = params.input; const half *weight = params.weight; const half *bias = params.bias; half *output = params.output; int batch = params.batch; int ic = params.ic; int ih = params.ih; int iw = params.iw; int kh = params.kh; int kw = params.kw; int oc = params.oc; int pad_h = params.pad_h0; int pad_w = params.pad_w0; int stride_h = params.stride_h; int stride_w = params.stride_w; int dilation_h = params.dilation_h; int dilation_w = params.dilation_w; const half *residual = params.residual; int groups = params.groups; int oh = params.oh; int ow = params.ow; int M = batch * oh * ow; int N = oc; constexpr int blockM = 16; constexpr int blockN = 16; uint3 grid = {(M + blockM - 1) / blockM, (N + blockN - 1) / blockN, 1}; uint3 block = {blockM, blockN, 1}; int output_size = batch * oc * oh * ow; half *output_from_cutlass = reinterpret_cast<half *>(malloc(sizeof(half) * output_size)); cudaMemcpy(output_from_cutlass, output, output_size * sizeof(half), cudaMemcpyDeviceToHost); float *gpu_output; cudaMalloc(&gpu_output, output_size * sizeof(float)); naive_conv2d_kernel<<<grid, block>>>(input, weight, bias, gpu_output, batch, ic, ih, iw, kh, kw, oc, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, oh, ow, groups, residual, params.alpha, op_type); float *output_from_gpu = reinterpret_cast<float *>(malloc(sizeof(float) * output_size)); cudaMemcpy(output_from_gpu, gpu_output, output_size * sizeof(float), cudaMemcpyDeviceToHost); float max_diff = diff(output_from_cutlass, output_from_gpu, output_size); free(output_from_cutlass); free(output_from_gpu); cudaFree(gpu_output); return max_diff; } std::string OpType2String(OpType op_type) { switch (op_type) { case CONV2D_BIAS: return "conv2d_bias"; break; case CONV2D_BIAS_RELU: return "conv2d_bias_relu"; break; case CONV2D_BIAS_SILU: return "conv2d_bias_silu"; break; case CONV2D_BIAS_ADD_RELU: return "conv2d_bias_add_relu"; break; case CONV2D_BIAS_LEAKY_RELU: return "conv2d_bias_leaky_relu"; default: break; } return "unnamed_op"; } int ProfileToGetBestConfig( const std::vector<std::function<cutlass::Status(ConvAllParams)>> &all_func, const ConvAllParams &params, OpType op_type) { constexpr int WARMUP = 10; constexpr int REPEAT = 100; float min_time = 100000.f; int min_time_index = -1; for (int i = 0; i < all_func.size(); i++) { cutlass::Status status; auto func = all_func[i]; // When func has large diff, we will make it nullptr. if (!func) continue; for (int ii = 0; ii < WARMUP; ii++) { status = func(params); } cudaEvent_t beg, end; PADDLE_ENFORCE_GPU_SUCCESS(cudaEventCreate(&beg)); PADDLE_ENFORCE_GPU_SUCCESS(cudaEventCreate(&end)); PADDLE_ENFORCE_GPU_SUCCESS(cudaEventRecord(beg)); for (int ii = 0; ii < REPEAT; ii++) { status = func(params); } PADDLE_ENFORCE_GPU_SUCCESS(cudaEventRecord(end)); PADDLE_ENFORCE_GPU_SUCCESS(cudaEventSynchronize(end)); float elapsed_time; PADDLE_ENFORCE_GPU_SUCCESS(cudaEventElapsedTime(&elapsed_time, beg, end)); if (elapsed_time < min_time && status == cutlass::Status::kSuccess) { min_time = elapsed_time; min_time_index = i; // debug code VLOG(3) << OpType2String(op_type) << ": tactic " << i << " has max diff " << conv2d_diff_gpu(params, op_type) << " compared with baseline," << "cost_time: " << elapsed_time << "ms."; } } if (min_time_index < 0) { PADDLE_THROW( phi::errors::NotFound("Can't find any cutlass config for this %s op.", OpType2String(op_type).c_str())); } return min_time_index; } } // namespace cutlass_internal } // namespace fusion } // namespace phi
3f641995fccfbf3537873bebc4e459f44f56ab4c.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include "../common/book.h" #define N (1024*1024) #define FULL_DATA_SIZE (N*20) __global__ void kernel( int *a, int *b, int *c ) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < N) { int idx1 = (idx + 1) % 256; int idx2 = (idx + 2) % 256; float as = (a[idx] + a[idx1] + a[idx2]) / 3.0f; float bs = (b[idx] + b[idx1] + b[idx2]) / 3.0f; c[idx] = (as + bs) / 2; } } int main( void ) { hipDeviceProp_t prop; int whichDevice; hipGetDevice( &whichDevice ); hipGetDeviceProperties( &prop, whichDevice ); if (!prop.deviceOverlap) { printf( "Device will not handle overlaps, so no " "speed up from streams\n" ); return 0; } hipEvent_t start, stop; float elapsedTime; hipEventCreate( &start ); hipEventCreate( &stop ); hipEventRecord( start, 0 ); hipStream_t stream0, stream1; hipStreamCreate( &stream0 ); hipStreamCreate( &stream1 ); int *host_a, *host_b, *host_c; int *dev_a0, *dev_b0, *dev_c0; int *dev_a1, *dev_b1, *dev_c1; hipMalloc( (void**)&dev_a0,N * sizeof(int) ); hipMalloc( (void**)&dev_b0,N * sizeof(int) ); hipMalloc( (void**)&dev_c0,N * sizeof(int) ); hipMalloc( (void**)&dev_a1,N * sizeof(int) ); hipMalloc( (void**)&dev_b1,N * sizeof(int) ); hipMalloc( (void**)&dev_c1,N * sizeof(int) ); hipHostMalloc( (void**)&host_a,FULL_DATA_SIZE*sizeof(int),hipHostMallocDefault ); hipHostMalloc( (void**)&host_b,FULL_DATA_SIZE*sizeof(int),hipHostMallocDefault ); hipHostMalloc( (void**)&host_c,FULL_DATA_SIZE*sizeof(int),hipHostMallocDefault ); for (int i=0; i<FULL_DATA_SIZE; i++) { host_a[i] = rand(); host_b[i] = rand(); } // now loop over full data, in bite-sized chunks for (int i=0; i<FULL_DATA_SIZE; i+= N*2) { hipMemcpyAsync( dev_a0, host_a+i,N * sizeof(int),hipMemcpyHostToDevice, stream0 ); hipMemcpyAsync( dev_a1, host_a+i+N,N * sizeof(int),hipMemcpyHostToDevice, stream1 ); hipMemcpyAsync( dev_b0, host_b+i,N * sizeof(int),hipMemcpyHostToDevice, stream0 ); hipMemcpyAsync( dev_b1, host_b+i+N,N * sizeof(int),hipMemcpyHostToDevice, stream1 ); hipLaunchKernelGGL(( kernel), dim3(N/256),dim3(256),0,stream0, dev_a0, dev_b0, dev_c0 ); hipLaunchKernelGGL(( kernel), dim3(N/256),dim3(256),0,stream1, dev_a1, dev_b1, dev_c1 ); hipMemcpyAsync( host_c+i, dev_c0,N * sizeof(int),hipMemcpyDeviceToHost, stream0 ); hipMemcpyAsync( host_c+i+N, dev_c1,N * sizeof(int),hipMemcpyDeviceToHost, stream1 ); } hipStreamSynchronize( stream0 ); hipStreamSynchronize( stream1 ); hipEventRecord( stop, 0 ); hipEventSynchronize( stop ); hipEventElapsedTime( &elapsedTime, start, stop ); printf( "Time taken: %3.1f ms\n", elapsedTime ); hipHostFree( host_a ); hipHostFree( host_b ); hipHostFree( host_c ); hipFree( dev_a0 ); hipFree( dev_b0 ); hipFree( dev_c0 ); hipFree( dev_a1 ); hipFree( dev_b1 ); hipFree( dev_c1 ); hipStreamDestroy( stream0 ); hipStreamDestroy( stream1 ); return 0; }
3f641995fccfbf3537873bebc4e459f44f56ab4c.cu
#include <cuda.h> #include "../common/book.h" #define N (1024*1024) #define FULL_DATA_SIZE (N*20) __global__ void kernel( int *a, int *b, int *c ) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < N) { int idx1 = (idx + 1) % 256; int idx2 = (idx + 2) % 256; float as = (a[idx] + a[idx1] + a[idx2]) / 3.0f; float bs = (b[idx] + b[idx1] + b[idx2]) / 3.0f; c[idx] = (as + bs) / 2; } } int main( void ) { cudaDeviceProp prop; int whichDevice; cudaGetDevice( &whichDevice ); cudaGetDeviceProperties( &prop, whichDevice ); if (!prop.deviceOverlap) { printf( "Device will not handle overlaps, so no " "speed up from streams\n" ); return 0; } cudaEvent_t start, stop; float elapsedTime; cudaEventCreate( &start ); cudaEventCreate( &stop ); cudaEventRecord( start, 0 ); cudaStream_t stream0, stream1; cudaStreamCreate( &stream0 ); cudaStreamCreate( &stream1 ); int *host_a, *host_b, *host_c; int *dev_a0, *dev_b0, *dev_c0; int *dev_a1, *dev_b1, *dev_c1; cudaMalloc( (void**)&dev_a0,N * sizeof(int) ); cudaMalloc( (void**)&dev_b0,N * sizeof(int) ); cudaMalloc( (void**)&dev_c0,N * sizeof(int) ); cudaMalloc( (void**)&dev_a1,N * sizeof(int) ); cudaMalloc( (void**)&dev_b1,N * sizeof(int) ); cudaMalloc( (void**)&dev_c1,N * sizeof(int) ); cudaHostAlloc( (void**)&host_a,FULL_DATA_SIZE*sizeof(int),cudaHostAllocDefault ); cudaHostAlloc( (void**)&host_b,FULL_DATA_SIZE*sizeof(int),cudaHostAllocDefault ); cudaHostAlloc( (void**)&host_c,FULL_DATA_SIZE*sizeof(int),cudaHostAllocDefault ); for (int i=0; i<FULL_DATA_SIZE; i++) { host_a[i] = rand(); host_b[i] = rand(); } // now loop over full data, in bite-sized chunks for (int i=0; i<FULL_DATA_SIZE; i+= N*2) { cudaMemcpyAsync( dev_a0, host_a+i,N * sizeof(int),cudaMemcpyHostToDevice, stream0 ); cudaMemcpyAsync( dev_a1, host_a+i+N,N * sizeof(int),cudaMemcpyHostToDevice, stream1 ); cudaMemcpyAsync( dev_b0, host_b+i,N * sizeof(int),cudaMemcpyHostToDevice, stream0 ); cudaMemcpyAsync( dev_b1, host_b+i+N,N * sizeof(int),cudaMemcpyHostToDevice, stream1 ); kernel<<<N/256,256,0,stream0>>>( dev_a0, dev_b0, dev_c0 ); kernel<<<N/256,256,0,stream1>>>( dev_a1, dev_b1, dev_c1 ); cudaMemcpyAsync( host_c+i, dev_c0,N * sizeof(int),cudaMemcpyDeviceToHost, stream0 ); cudaMemcpyAsync( host_c+i+N, dev_c1,N * sizeof(int),cudaMemcpyDeviceToHost, stream1 ); } cudaStreamSynchronize( stream0 ); cudaStreamSynchronize( stream1 ); cudaEventRecord( stop, 0 ); cudaEventSynchronize( stop ); cudaEventElapsedTime( &elapsedTime, start, stop ); printf( "Time taken: %3.1f ms\n", elapsedTime ); cudaFreeHost( host_a ); cudaFreeHost( host_b ); cudaFreeHost( host_c ); cudaFree( dev_a0 ); cudaFree( dev_b0 ); cudaFree( dev_c0 ); cudaFree( dev_a1 ); cudaFree( dev_b1 ); cudaFree( dev_c1 ); cudaStreamDestroy( stream0 ); cudaStreamDestroy( stream1 ); return 0; }
b29eac4718746c82f4f953c83a26109e52532215.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void adagradKernel ( int numberIterations, int* parameterIndices, int* counts, int dimension, float* parameters, float* gradient, float learningRate, float* history, float epsilon) { int updateIndex = blockIdx.x; int parameterIndex = parameterIndices[updateIndex]; int count = counts[updateIndex]; if(parameterIndex != -1 && count > 0) { float scalingFactor = 1.0f / (float)count; int startEntryIndex = (blockIdx.y * blockDim.x + threadIdx.x) * numberIterations; int firstParameterEntryIndex = parameterIndex * dimension; int startParameterEntryIndex = firstParameterEntryIndex + startEntryIndex; int startGradientEntryIndex = updateIndex * dimension + startEntryIndex; int exclusiveEndParameterEntryIndex = min(startParameterEntryIndex + numberIterations, firstParameterEntryIndex + dimension); int parameterEntryIndex = startParameterEntryIndex; int gradientEntryIndex = startGradientEntryIndex; while(parameterEntryIndex < exclusiveEndParameterEntryIndex) { float scaledDerivative = scalingFactor * gradient[gradientEntryIndex]; float updatedHistory = history[parameterEntryIndex] + scaledDerivative * scaledDerivative; history[parameterEntryIndex] = updatedHistory; float adaptedLearningRate = learningRate / (sqrtf(updatedHistory) + epsilon); float update = adaptedLearningRate * scalingFactor * gradient[gradientEntryIndex]; parameters[parameterEntryIndex] -= update; parameterEntryIndex++; gradientEntryIndex++; } } }
b29eac4718746c82f4f953c83a26109e52532215.cu
#include "includes.h" __global__ void adagradKernel ( int numberIterations, int* parameterIndices, int* counts, int dimension, float* parameters, float* gradient, float learningRate, float* history, float epsilon) { int updateIndex = blockIdx.x; int parameterIndex = parameterIndices[updateIndex]; int count = counts[updateIndex]; if(parameterIndex != -1 && count > 0) { float scalingFactor = 1.0f / (float)count; int startEntryIndex = (blockIdx.y * blockDim.x + threadIdx.x) * numberIterations; int firstParameterEntryIndex = parameterIndex * dimension; int startParameterEntryIndex = firstParameterEntryIndex + startEntryIndex; int startGradientEntryIndex = updateIndex * dimension + startEntryIndex; int exclusiveEndParameterEntryIndex = min(startParameterEntryIndex + numberIterations, firstParameterEntryIndex + dimension); int parameterEntryIndex = startParameterEntryIndex; int gradientEntryIndex = startGradientEntryIndex; while(parameterEntryIndex < exclusiveEndParameterEntryIndex) { float scaledDerivative = scalingFactor * gradient[gradientEntryIndex]; float updatedHistory = history[parameterEntryIndex] + scaledDerivative * scaledDerivative; history[parameterEntryIndex] = updatedHistory; float adaptedLearningRate = learningRate / (sqrtf(updatedHistory) + epsilon); float update = adaptedLearningRate * scalingFactor * gradient[gradientEntryIndex]; parameters[parameterEntryIndex] -= update; parameterEntryIndex++; gradientEntryIndex++; } } }
b91f8cd40743a6dadc65b0a9d58913dea08dc967.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // for old version titan x, the same as 1080 // pycuda._driver.device_attribute.MAX_THREADS_PER_BLOCK: 1024 // pycuda._driver.device_attribute.MAX_BLOCK_DIM_X: 1024 // pycuda._driver.device_attribute.MAX_BLOCK_DIM_Y: 1024 // pycuda._driver.device_attribute.MAX_BLOCK_DIM_Z: 64 // pycuda._driver.device_attribute.MAX_GRID_DIM_X: 2147483647 // pycuda._driver.device_attribute.MAX_GRID_DIM_Y: 65535 // pycuda._driver.device_attribute.MAX_GRID_DIM_Z: 65535 __device__ float integration(float *data, int length, int channel_amount) { } __global__ void remove_empty(float *inds, int *anchors, float *view, int *anchors_shape, int *view_shape) { }
b91f8cd40743a6dadc65b0a9d58913dea08dc967.cu
#include "cuda.h" // for old version titan x, the same as 1080 // pycuda._driver.device_attribute.MAX_THREADS_PER_BLOCK: 1024 // pycuda._driver.device_attribute.MAX_BLOCK_DIM_X: 1024 // pycuda._driver.device_attribute.MAX_BLOCK_DIM_Y: 1024 // pycuda._driver.device_attribute.MAX_BLOCK_DIM_Z: 64 // pycuda._driver.device_attribute.MAX_GRID_DIM_X: 2147483647 // pycuda._driver.device_attribute.MAX_GRID_DIM_Y: 65535 // pycuda._driver.device_attribute.MAX_GRID_DIM_Z: 65535 __device__ float integration(float *data, int length, int channel_amount) { } __global__ void remove_empty(float *inds, int *anchors, float *view, int *anchors_shape, int *view_shape) { }
d418a8e5c66155fbe4b4258b9e711988838a9a16.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #define EIGEN_USE_GPU #include "kdtree.hpp" #include "nndistance.hpp" #include "cutils.cuh" #include "tf_kdtree.hpp" #include <thrust/copy.h> #include <thrust/device_vector.h> #include <thrust/execution_policy.h> const int local_dist_buf_size = 256; //TODO: Sort and break for possible additional speedup template <typename T, typename T_calc, dim_t dims> __device__ void compQuadrDistLeafPartitionBlockwise(const Vec<T, dims>& point, const PartitionLeaf<T, dims>& partition_leaf, T* local_dist_buf, PingPongBuffer<T>& best_dist_pp, PingPongBuffer<point_i_t>& best_knn_pp, const point_i_knn_t nr_nns_searches, T& worst_dist) { //printf("Before dereferencing\n"); //2D indices const int block_size = blockDim.x * blockDim.y; // * blockDim.z; const int tidx = threadIdx.y * blockDim.x + threadIdx.x; /*printf("compQuadrDistLeafPartition: %x, ", partition_leaf.data); printf("%d, ", partition_leaf.nr_points); printf("%d\n", partition_leaf.offset);*/ const Vec<T, dims>* partition_data = reinterpret_cast<Vec<T, dims>*>(partition_leaf.data); const point_i_t partition_size = partition_leaf.nr_points; const point_i_t partition_offset = partition_leaf.offset; //assert(local_dist_buf_size >= partition_size); assert(partition_size > 0); const auto nr_buf_runs = (partition_size - 1) / local_dist_buf_size + 1; for(size_t buf_run_i = 0; buf_run_i < nr_buf_runs; buf_run_i++) { const auto remaining_partition_size = partition_size - (buf_run_i * local_dist_buf_size); const auto current_length = min(static_cast<int>(remaining_partition_size), local_dist_buf_size); compDists<T, dims>(point, partition_data + (buf_run_i * local_dist_buf_size), current_length, local_dist_buf); __syncthreads(); for(point_i_t ref_i = 0; ref_i < current_length; ref_i++) { T* best_dist_cur = best_dist_pp.getCurrentSlot(); point_i_t* best_knn_cur = best_knn_pp.getCurrentSlot(); T* pong_dist = best_dist_pp.getPongSlot(); point_i_t* pong_knn = best_knn_pp.getPongSlot(); //worst_dist = best_dist_cur[nr_nns_searches - 1]; const T_calc calc_dist = local_dist_buf[ref_i]; if(calc_dist < worst_dist) { const auto insertion_idx = knnInsertionDynamic<T_calc, false>(calc_dist, best_dist_cur, nr_nns_searches); assert(insertion_idx < nr_nns_searches); assert(worst_dist == best_dist_cur[nr_nns_searches - 1]); insertAndShiftArrayRight(best_dist_cur, pong_dist, nr_nns_searches, calc_dist, insertion_idx, worst_dist); insertAndShiftArrayRight<point_i_t>(best_knn_cur, pong_knn, nr_nns_searches, ref_i + partition_offset + (buf_run_i * local_dist_buf_size), insertion_idx); { best_dist_pp.increment(); best_knn_pp.increment(); } } __syncthreads(); } } worst_dist = best_dist_pp.getCurrentSlot()[nr_nns_searches - 1]; } template <typename T, dim_t dims> PartitionInfoDevice<T, dims>* copyPartitionToGPU(const PartitionInfo<T, dims>& partition_info) { std::array<T, dims>* structured_points_d = copyArrayToDevice(partition_info.structured_points, partition_info.nr_points); std::vector<PartitionLeaf<T, dims>> leaves_copy(partition_info.leaves, partition_info.leaves + partition_info.nr_leaves); for(auto& leaf : leaves_copy) leaf.data = structured_points_d + leaf.offset; const auto& partitions = partition_info.partitions; Partition<T>* partitions_d = copyArrayToDevice(partitions, partition_info.nr_partitions); const auto& leaves = partition_info.leaves; PartitionLeaf<T, dims>* leaves_d = copyArrayToDevice(leaves_copy.data(), partition_info.nr_leaves); point_i_t* shuffled_inds_d = copyArrayToDevice(partition_info.shuffled_inds, partition_info.nr_points); PartitionInfoDevice<T, dims> partition_info_tmp(partition_info); partition_info_tmp.partitions = partitions_d; partition_info_tmp.leaves = leaves_d; partition_info_tmp.levels = partition_info.levels; partition_info_tmp.structured_points = structured_points_d; partition_info_tmp.shuffled_inds = shuffled_inds_d; PartitionInfoDevice<T, dims>* partition_info_d; allocGPUMemory(&partition_info_d, sizeof(PartitionInfo<T, dims>)); gpuErrchk(hipMemcpy(partition_info_d, &partition_info_tmp, sizeof(PartitionInfo<T, dims>), hipMemcpyHostToDevice)); #ifndef NDEBUG gpuErrchk( hipPeekAtLastError() ); gpuErrchk( hipDeviceSynchronize() ); #endif return partition_info_d; } //Just necessary for the C++ implementation. Tensorflow already has everything on the GPU template <typename T, dim_t dims> std::tuple<T*, point_i_t*, T*> copyData(const std::vector<T>& result_dists, const std::vector<point_i_t>& result_idx, const std::vector<std::array<T, dims>>& points_query) { T* result_dists_d = copyArrayToDevice(result_dists.data(), result_dists.size()); point_i_t* result_idx_d = copyArrayToDevice(result_idx.data(), result_idx.size()); T* points_query_d = reinterpret_cast<T*>(copyArrayToDevice(points_query.data(), points_query.size())); #ifndef NDEBUG gpuErrchk( hipPeekAtLastError() ); //gpuErrchk( hipDeviceSynchronize() ); #endif return std::make_tuple(result_dists_d, result_idx_d, points_query_d); } //Just necessary for the C++ implementation. Tensorflow already has everything on the GPU template <typename T> std::tuple<T*, point_i_t*> copyDataBackToHost(const T* result_dists, const point_i_knn_t* result_idx, const size_t nr_query, const uint32_t nr_nns_searches) { T* result_dists_h = copyArrayToHost(result_dists, nr_query * nr_nns_searches); point_i_t* result_idx_h = copyArrayToHost(result_idx, nr_query * nr_nns_searches); #ifndef NDEBUG gpuErrchk( hipPeekAtLastError() ); gpuErrchk( hipDeviceSynchronize() ); #endif return std::make_tuple(result_dists_h, result_idx_h); } template std::tuple<float*, point_i_t*> copyDataBackToHost(const float* result_dists, const point_i_knn_t* result_idx, const size_t nr_query, const uint32_t nr_nns_searches); template std::tuple<double*, point_i_t*> copyDataBackToHost(const double* result_dists, const point_i_knn_t* result_idx, const size_t nr_query, const uint32_t nr_nns_searches); /** * @brief Frees the allocated GPU memory for a single KD-Tree * * @tparam T precision type of the KD-Tree * @tparam dims Dimensionality of the KD-Tree (usually 3) * @param partition_info Pointer to the device memory holding the KD-Tree information */ template <typename T, dim_t dims> void freePartitionFromGPU(PartitionInfoDevice<T, dims>* partition_info) { PartitionInfoDevice<T, dims>* local = reinterpret_cast<PartitionInfoDevice<T, dims>*>(malloc(sizeof(PartitionInfoDevice<T, dims>))); gpuErrchk(hipMemcpy(local, partition_info, sizeof(PartitionInfo<T, dims>), hipMemcpyDeviceToHost)); freeGPUMemory(local->partitions); freeGPUMemory(local->leaves); freeGPUMemory(local->structured_points); freeGPUMemory(local->shuffled_inds); freeGPUMemory(partition_info); #ifndef NDEBUG gpuErrchk( hipPeekAtLastError() ); gpuErrchk( hipDeviceSynchronize() ); #endif free(local); } /** * @brief * * @tparam T * @tparam T_calc * @tparam dims * @param tree * @param point * @param point_proj * @param current_worst_dist * @return NodeDirection Holds the direction in which the tree should be further traversed */ template <typename T, typename T_calc, dim_t dims> __device__ NodeDirection traverseTree(TreeTraversal<T, dims>& tree, const Vec<T, dims>& point, Vec<T, dims>& point_proj, const T current_worst_dist) { //printf("traverseTree begin\n"); const NodeTag tag = tree.getCurrentTagBinary(); //printf("Node Tag %d\n", tag); //printf("Lin Ind %d\n", tree.current_lin_ind); const auto& current_partition = tree.getCurrentConstPartition(); //printf("Fetched partition %d/%f\n", current_partition.axis_split, current_partition.median); const auto current_level = tree.getCurrentLevel(); const auto current_axis = current_partition.axis_split; const bool lower_than_median = point[current_axis] < current_partition.median; //printf("Selecting new leaf\n"); //All branches of the node were visited if(tag == NodeTag::left_right_visited) { if(current_level == 0) return NodeDirection::finished; //Searched everything, quit else { point_proj[current_axis] = point[current_axis]; return NodeDirection::up; //Search upper levels, nothing to do here } } //Arrived at uncomputed leaves if(tree.isLeafParent()) { assert(tag == NodeTag::uncharted); tree.setCurrentNodeTagBinary(NodeTag::left_right_visited); return NodeDirection::finished; } else { //Node has not yet been used if(tag == NodeTag::uncharted) { if(lower_than_median) { tree.setCurrentNodeTagBinary(NodeTag::left_visited); return NodeDirection::left; } else { tree.setCurrentNodeTagBinary(NodeTag::right_visited); return NodeDirection::right; } } else //Node has been used in the correct side, now we use the non-matching site: Project and test if descent is necessary { assert(!lower_than_median || tag == NodeTag::left_visited); assert(lower_than_median || tag == NodeTag::right_visited); tree.setCurrentNodeTagBinary(NodeTag::left_right_visited); //Either way we finish all nodes here if(partitionNecessary<T, T_calc, dims>(point, point_proj, current_partition, current_worst_dist)) { point_proj[current_axis] = current_partition.median; return lower_than_median ? NodeDirection::right : NodeDirection::left; } else { point_proj[current_axis] = point[current_axis]; return (current_level != 0 ? NodeDirection::up : NodeDirection::finished); } } } } template <typename T, typename T_calc, dim_t dims> inline __device__ void findNextLeaf(TreeTraversal<T, dims>& tree, const Vec<T, dims>& point, Vec<T, dims>& point_proj, const T current_worst_dist) { NodeDirection new_dir; while((new_dir = traverseTree<T, T_calc, dims>(tree, point, point_proj, current_worst_dist)) != NodeDirection::finished) { switch(new_dir) { case NodeDirection::up: tree.moveToParent(); break; case NodeDirection::left: tree.moveToLeftChild(); break; case NodeDirection::right: tree.moveToRightChild(); break; } } } const int max_nr_nodes = 2048*4; static_assert(max_nr_nodes % 4 == 0, "Alignment off, since 4 nodes fit into a byte"); const int max_nr_nns_searches = 128; template <typename T, typename T_calc, dim_t dims> __global__ void KDTreeKernel(PartitionInfoDevice<T, dims>* partition_info, const point_i_t nr_query, const Vec<T, dims>* points_query, T* all_best_dists_d, point_i_knn_t* all_best_i_d, const point_i_knn_t nr_nns_searches) { assert(nr_nns_searches <= partition_info->nr_points); const auto nr_partitions = partition_info->nr_partitions; const auto nr_leaves = partition_info->nr_leaves; //printf("%d, %d\n", nr_partitions, nr_leaves); //2D indices const auto grid_size = gridDim.x * gridDim.y; const auto blockidx = blockIdx.x + blockIdx.y*gridDim.x; const auto block_size = blockDim.x * blockDim.y; // * blockDim.z; const auto tidx = threadIdx.y * blockDim.x + threadIdx.x; //const auto global_start_idx = tidx + blockidx * grid_size; //extern __shared__ char* shared_mem; //__shared__ Vec<T, dims> buffered_query_points[nr_buffered_query_points]; //__shared__ Vec<T, dims> buffered_query_points_proj[nr_buffered_query_points]; //__shared__ tree_ind_t leaf_inds[nr_buffered_leaf_inds]; __shared__ point_i_t buffered_knn[2*max_nr_nns_searches]; __shared__ T buffered_dists[2*max_nr_nns_searches]; __shared__ TreeTraversal<T, dims> tree[1]; __shared__ NodeTag tags[max_nr_nodes/4]; PingPongBuffer<T> best_dist_pp[1]; PingPongBuffer<point_i_t> best_knn_pp; __shared__ T local_dist_buf[local_dist_buf_size]; __shared__ Vec<T, dims> point_proj[1]; __shared__ T worst_dist_[1]; __shared__ bool off_leaf_necessary[1]; //TODO: Watch out for alignment //if(tidx == 0) // local_dist_buf_pointer[0] = new T[local_dist_buf_size]; //TODO: Dynamic const auto nr_nodes = partition_info->nr_partitions; assert(max_nr_nodes >= nr_nodes); assert(nr_nns_searches <= max_nr_nns_searches); tree->partition_info = reinterpret_cast<PartitionInfo<T, dims>*>(partition_info); tree->visited_info = tags; //new NodeTag[tree->partition_info->nr_nodes]; //tree->resetPositionAndTags(); //T worst_dist = INFINITY; auto& worst_dist = worst_dist_[0]; best_dist_pp->buffers[0] = buffered_dists; best_dist_pp->buffers[1] = buffered_dists + nr_nns_searches; best_knn_pp.buffers[0] = buffered_knn; best_knn_pp.buffers[1] = buffered_knn + nr_nns_searches; for(auto j = blockidx; j < nr_query; j += grid_size) { //Fetch vars from global memory const Vec<T, dims> point = points_query[j]; *point_proj = points_query[j]; if(tidx == 0) worst_dist = INFINITY; __syncthreads(); //Buffer everything in shared memory and reset the tree fillKernel<T>(buffered_dists, buffered_dists + 2*nr_nns_searches, INFINITY); assert(nr_nodes > 0); fillKernel<NodeTag>(tags, tags + ((nr_nodes - 1)/4) + 1, NodeTag::uncharted); if(tidx == 0) { tree->current_lin_ind = tree->current_level = 0; } //Fetch the current leaf and descent __syncthreads(); if(tidx == 0) findNextLeaf<T, T_calc, dims>(*tree, point, point_proj[0], worst_dist); __syncthreads(); do { assert(tree->getCurrentLevel() == tree->getTotalLevels() - 1); assert(tree->isLeafParent()); const auto current_partition = tree->getCurrentConstPartition(); const auto current_axis = current_partition.axis_split; const bool lower_than_median = point[current_axis] < current_partition.median; const auto leaf = (lower_than_median ? tree->getLeftLeaf() : tree->getRightLeaf()); //Now compute each leaf with all threads in the current block compQuadrDistLeafPartitionBlockwise<T, T_calc, dims>(point, leaf, local_dist_buf, *best_dist_pp, best_knn_pp, nr_nns_searches, worst_dist); if(tidx == 0) { off_leaf_necessary[0] = partitionNecessary<T, T_calc, dims>(point, point_proj[0], current_partition, worst_dist); } __syncthreads(); if(off_leaf_necessary[0]) { //printf("Off partition necessary\n"); const PartitionLeaf<T, dims>& leaf = (!lower_than_median ? tree->getLeftLeaf() : tree->getRightLeaf()); //printf("Comp Dist 2\n"); compQuadrDistLeafPartitionBlockwise<T, T_calc, dims>(point, leaf, local_dist_buf, *best_dist_pp, best_knn_pp, nr_nns_searches, worst_dist); } //__syncthreads(); if(tidx == 0) findNextLeaf<T, T_calc, dims>(*tree, point, point_proj[0], worst_dist); __syncthreads(); }while(tree->getCurrentLevel() != 0); //Copy back to global memory and proceed to next point point_i_knn_t* best_i = all_best_i_d + j * nr_nns_searches; T* best_dists = all_best_dists_d + j * nr_nns_searches; T* dist_slot = best_dist_pp->getCurrentSlot(); point_i_t* knn_slot = best_knn_pp.getCurrentSlot(); copyKernel(dist_slot, dist_slot + nr_nns_searches, best_dists); copyKernel(knn_slot, knn_slot + nr_nns_searches, best_i); } //if(tidx == 0) // delete local_dist_buf_pointer[0]; } template <typename T, typename T_calc, dim_t dims> void KDTreeKNNGPUSearch(PartitionInfoDevice<T, dims>* partition_info, const point_i_t nr_query, const std::array<T, dims>* points_query, T * dist, point_i_t* idx, const point_i_knn_t nr_nns_searches) { //TODO: Dynamic implementation /*if(partition_info->nr_partitions > max_partitions || partition_info->nr_leaves > max_leaves) { throw std::runtime_error("Error, please reduce number of levels..."); }*/ if(nr_nns_searches > max_nr_nns_searches) throw std::runtime_error("TODO: Maximum number of NNs searches currently restricted"); //gpuErrchk(hipMemcpyAsync(partition_info_copy, partition_info, sizeof(PartitionInfoDevice<T, dims>), hipMemcpyDeviceToDevice)); hipLaunchKernelGGL(( initArray<T>), dim3(dim3(16, 16)),dim3(dim3(32, 32)), 0, 0, dist, INFINITY, nr_query*nr_nns_searches); #ifndef NDEBUG gpuErrchk( hipPeekAtLastError() ); gpuErrchk( hipDeviceSynchronize() ); #endif dim3 grid_dims(32, 32); dim3 block_dims(8, 8); /*#ifdef PROFILE_KDTREE hipEvent_t start, stop; float time; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); #endif*/ const auto points_query_eig = reinterpret_cast<const Vec<T, dims>*>(points_query); hipLaunchKernelGGL(( KDTreeKernel<T, T_calc, dims>), dim3(grid_dims), dim3(block_dims), 0, 0, partition_info, nr_query, points_query_eig, dist, idx, nr_nns_searches); #ifndef NDEBUG gpuErrchk( hipPeekAtLastError() ); gpuErrchk( hipDeviceSynchronize() ); #endif /*#ifdef PROFILE_KDTREE hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&time, start, stop); printf ("Time for the kernel: %f ms\n", time); #endif*/ } template void compQuadrDistLeafPartition<float, float, 3>(const std::array<float, 3>& point, const PartitionLeaf<float, 3>& partition_leaf, float* best_dists, point_i_knn_t* best_idx, const point_i_knn_t nr_nns_searches); template void compQuadrDistLeafPartition<double, double, 3>(const std::array<double, 3>& point, const PartitionLeaf<double, 3>& partition_leaf, double* best_dists, point_i_knn_t* best_idx, const point_i_knn_t nr_nns_searches); template void KDTreeKNNGPUSearch<float, float, 1>(PartitionInfoDevice<float, 1>* partition_info, const point_i_t nr_query, const std::array<float, 1>* points_query, float * dist, point_i_t* idx, const point_i_knn_t nr_nns_searches); template void KDTreeKNNGPUSearch<double, double, 1>(PartitionInfoDevice<double, 1>* partition_info, const point_i_t nr_query, const std::array<double, 1>* points_query, double * dist, point_i_t* idx, const point_i_knn_t nr_nns_searches); template void KDTreeKNNGPUSearch<float, float, 2>(PartitionInfoDevice<float, 2>* partition_info, const point_i_t nr_query, const std::array<float, 2>* points_query, float * dist, point_i_t* idx, const point_i_knn_t nr_nns_searches); template void KDTreeKNNGPUSearch<double, double, 2>(PartitionInfoDevice<double, 2>* partition_info, const point_i_t nr_query, const std::array<double, 2>* points_query, double * dist, point_i_t* idx, const point_i_knn_t nr_nns_searches); template void KDTreeKNNGPUSearch<float, float, 3>(PartitionInfoDevice<float, 3>* partition_info, const point_i_t nr_query, const std::array<float, 3>* points_query, float * dist, point_i_t* idx, const point_i_knn_t nr_nns_searches); template void KDTreeKNNGPUSearch<double, double, 3>(PartitionInfoDevice<double, 3>* partition_info, const point_i_t nr_query, const std::array<double, 3>* points_query, double * dist, point_i_t* idx, const point_i_knn_t nr_nns_searches); template PartitionInfoDevice<float, 1>* copyPartitionToGPU(const PartitionInfo<float, 1>& partition_info); template PartitionInfoDevice<float, 2>* copyPartitionToGPU(const PartitionInfo<float, 2>& partition_info); template PartitionInfoDevice<float, 3>* copyPartitionToGPU(const PartitionInfo<float, 3>& partition_info); template PartitionInfoDevice<double, 1>* copyPartitionToGPU(const PartitionInfo<double, 1>& partition_info); template PartitionInfoDevice<double, 2>* copyPartitionToGPU(const PartitionInfo<double, 2>& partition_info); template PartitionInfoDevice<double, 3>* copyPartitionToGPU(const PartitionInfo<double, 3>& partition_info); template std::tuple<float*, point_i_t*, float*> copyData<float, 3>(const std::vector<float>& result_dists, const std::vector<point_i_t>& result_idx, const std::vector<std::array<float, 3>>&); template std::tuple<double*, point_i_t*, double*> copyData<double, 3>(const std::vector<double>& result_dists, const std::vector<point_i_t>& result_idx, const std::vector<std::array<double, 3>>&); template void freePartitionFromGPU(PartitionInfoDevice<float, 1>* partition_info); template void freePartitionFromGPU(PartitionInfoDevice<float, 2>* partition_info); template void freePartitionFromGPU(PartitionInfoDevice<float, 3>* partition_info); template void freePartitionFromGPU(PartitionInfoDevice<double, 1>* partition_info); template void freePartitionFromGPU(PartitionInfoDevice<double, 2>* partition_info); template void freePartitionFromGPU(PartitionInfoDevice<double, 3>* partition_info);
d418a8e5c66155fbe4b4258b9e711988838a9a16.cu
#define EIGEN_USE_GPU #include "kdtree.hpp" #include "nndistance.hpp" #include "cutils.cuh" #include "tf_kdtree.hpp" #include <thrust/copy.h> #include <thrust/device_vector.h> #include <thrust/execution_policy.h> const int local_dist_buf_size = 256; //TODO: Sort and break for possible additional speedup template <typename T, typename T_calc, dim_t dims> __device__ void compQuadrDistLeafPartitionBlockwise(const Vec<T, dims>& point, const PartitionLeaf<T, dims>& partition_leaf, T* local_dist_buf, PingPongBuffer<T>& best_dist_pp, PingPongBuffer<point_i_t>& best_knn_pp, const point_i_knn_t nr_nns_searches, T& worst_dist) { //printf("Before dereferencing\n"); //2D indices const int block_size = blockDim.x * blockDim.y; // * blockDim.z; const int tidx = threadIdx.y * blockDim.x + threadIdx.x; /*printf("compQuadrDistLeafPartition: %x, ", partition_leaf.data); printf("%d, ", partition_leaf.nr_points); printf("%d\n", partition_leaf.offset);*/ const Vec<T, dims>* partition_data = reinterpret_cast<Vec<T, dims>*>(partition_leaf.data); const point_i_t partition_size = partition_leaf.nr_points; const point_i_t partition_offset = partition_leaf.offset; //assert(local_dist_buf_size >= partition_size); assert(partition_size > 0); const auto nr_buf_runs = (partition_size - 1) / local_dist_buf_size + 1; for(size_t buf_run_i = 0; buf_run_i < nr_buf_runs; buf_run_i++) { const auto remaining_partition_size = partition_size - (buf_run_i * local_dist_buf_size); const auto current_length = min(static_cast<int>(remaining_partition_size), local_dist_buf_size); compDists<T, dims>(point, partition_data + (buf_run_i * local_dist_buf_size), current_length, local_dist_buf); __syncthreads(); for(point_i_t ref_i = 0; ref_i < current_length; ref_i++) { T* best_dist_cur = best_dist_pp.getCurrentSlot(); point_i_t* best_knn_cur = best_knn_pp.getCurrentSlot(); T* pong_dist = best_dist_pp.getPongSlot(); point_i_t* pong_knn = best_knn_pp.getPongSlot(); //worst_dist = best_dist_cur[nr_nns_searches - 1]; const T_calc calc_dist = local_dist_buf[ref_i]; if(calc_dist < worst_dist) { const auto insertion_idx = knnInsertionDynamic<T_calc, false>(calc_dist, best_dist_cur, nr_nns_searches); assert(insertion_idx < nr_nns_searches); assert(worst_dist == best_dist_cur[nr_nns_searches - 1]); insertAndShiftArrayRight(best_dist_cur, pong_dist, nr_nns_searches, calc_dist, insertion_idx, worst_dist); insertAndShiftArrayRight<point_i_t>(best_knn_cur, pong_knn, nr_nns_searches, ref_i + partition_offset + (buf_run_i * local_dist_buf_size), insertion_idx); { best_dist_pp.increment(); best_knn_pp.increment(); } } __syncthreads(); } } worst_dist = best_dist_pp.getCurrentSlot()[nr_nns_searches - 1]; } template <typename T, dim_t dims> PartitionInfoDevice<T, dims>* copyPartitionToGPU(const PartitionInfo<T, dims>& partition_info) { std::array<T, dims>* structured_points_d = copyArrayToDevice(partition_info.structured_points, partition_info.nr_points); std::vector<PartitionLeaf<T, dims>> leaves_copy(partition_info.leaves, partition_info.leaves + partition_info.nr_leaves); for(auto& leaf : leaves_copy) leaf.data = structured_points_d + leaf.offset; const auto& partitions = partition_info.partitions; Partition<T>* partitions_d = copyArrayToDevice(partitions, partition_info.nr_partitions); const auto& leaves = partition_info.leaves; PartitionLeaf<T, dims>* leaves_d = copyArrayToDevice(leaves_copy.data(), partition_info.nr_leaves); point_i_t* shuffled_inds_d = copyArrayToDevice(partition_info.shuffled_inds, partition_info.nr_points); PartitionInfoDevice<T, dims> partition_info_tmp(partition_info); partition_info_tmp.partitions = partitions_d; partition_info_tmp.leaves = leaves_d; partition_info_tmp.levels = partition_info.levels; partition_info_tmp.structured_points = structured_points_d; partition_info_tmp.shuffled_inds = shuffled_inds_d; PartitionInfoDevice<T, dims>* partition_info_d; allocGPUMemory(&partition_info_d, sizeof(PartitionInfo<T, dims>)); gpuErrchk(cudaMemcpy(partition_info_d, &partition_info_tmp, sizeof(PartitionInfo<T, dims>), cudaMemcpyHostToDevice)); #ifndef NDEBUG gpuErrchk( cudaPeekAtLastError() ); gpuErrchk( cudaDeviceSynchronize() ); #endif return partition_info_d; } //Just necessary for the C++ implementation. Tensorflow already has everything on the GPU template <typename T, dim_t dims> std::tuple<T*, point_i_t*, T*> copyData(const std::vector<T>& result_dists, const std::vector<point_i_t>& result_idx, const std::vector<std::array<T, dims>>& points_query) { T* result_dists_d = copyArrayToDevice(result_dists.data(), result_dists.size()); point_i_t* result_idx_d = copyArrayToDevice(result_idx.data(), result_idx.size()); T* points_query_d = reinterpret_cast<T*>(copyArrayToDevice(points_query.data(), points_query.size())); #ifndef NDEBUG gpuErrchk( cudaPeekAtLastError() ); //gpuErrchk( cudaDeviceSynchronize() ); #endif return std::make_tuple(result_dists_d, result_idx_d, points_query_d); } //Just necessary for the C++ implementation. Tensorflow already has everything on the GPU template <typename T> std::tuple<T*, point_i_t*> copyDataBackToHost(const T* result_dists, const point_i_knn_t* result_idx, const size_t nr_query, const uint32_t nr_nns_searches) { T* result_dists_h = copyArrayToHost(result_dists, nr_query * nr_nns_searches); point_i_t* result_idx_h = copyArrayToHost(result_idx, nr_query * nr_nns_searches); #ifndef NDEBUG gpuErrchk( cudaPeekAtLastError() ); gpuErrchk( cudaDeviceSynchronize() ); #endif return std::make_tuple(result_dists_h, result_idx_h); } template std::tuple<float*, point_i_t*> copyDataBackToHost(const float* result_dists, const point_i_knn_t* result_idx, const size_t nr_query, const uint32_t nr_nns_searches); template std::tuple<double*, point_i_t*> copyDataBackToHost(const double* result_dists, const point_i_knn_t* result_idx, const size_t nr_query, const uint32_t nr_nns_searches); /** * @brief Frees the allocated GPU memory for a single KD-Tree * * @tparam T precision type of the KD-Tree * @tparam dims Dimensionality of the KD-Tree (usually 3) * @param partition_info Pointer to the device memory holding the KD-Tree information */ template <typename T, dim_t dims> void freePartitionFromGPU(PartitionInfoDevice<T, dims>* partition_info) { PartitionInfoDevice<T, dims>* local = reinterpret_cast<PartitionInfoDevice<T, dims>*>(malloc(sizeof(PartitionInfoDevice<T, dims>))); gpuErrchk(cudaMemcpy(local, partition_info, sizeof(PartitionInfo<T, dims>), cudaMemcpyDeviceToHost)); freeGPUMemory(local->partitions); freeGPUMemory(local->leaves); freeGPUMemory(local->structured_points); freeGPUMemory(local->shuffled_inds); freeGPUMemory(partition_info); #ifndef NDEBUG gpuErrchk( cudaPeekAtLastError() ); gpuErrchk( cudaDeviceSynchronize() ); #endif free(local); } /** * @brief * * @tparam T * @tparam T_calc * @tparam dims * @param tree * @param point * @param point_proj * @param current_worst_dist * @return NodeDirection Holds the direction in which the tree should be further traversed */ template <typename T, typename T_calc, dim_t dims> __device__ NodeDirection traverseTree(TreeTraversal<T, dims>& tree, const Vec<T, dims>& point, Vec<T, dims>& point_proj, const T current_worst_dist) { //printf("traverseTree begin\n"); const NodeTag tag = tree.getCurrentTagBinary(); //printf("Node Tag %d\n", tag); //printf("Lin Ind %d\n", tree.current_lin_ind); const auto& current_partition = tree.getCurrentConstPartition(); //printf("Fetched partition %d/%f\n", current_partition.axis_split, current_partition.median); const auto current_level = tree.getCurrentLevel(); const auto current_axis = current_partition.axis_split; const bool lower_than_median = point[current_axis] < current_partition.median; //printf("Selecting new leaf\n"); //All branches of the node were visited if(tag == NodeTag::left_right_visited) { if(current_level == 0) return NodeDirection::finished; //Searched everything, quit else { point_proj[current_axis] = point[current_axis]; return NodeDirection::up; //Search upper levels, nothing to do here } } //Arrived at uncomputed leaves if(tree.isLeafParent()) { assert(tag == NodeTag::uncharted); tree.setCurrentNodeTagBinary(NodeTag::left_right_visited); return NodeDirection::finished; } else { //Node has not yet been used if(tag == NodeTag::uncharted) { if(lower_than_median) { tree.setCurrentNodeTagBinary(NodeTag::left_visited); return NodeDirection::left; } else { tree.setCurrentNodeTagBinary(NodeTag::right_visited); return NodeDirection::right; } } else //Node has been used in the correct side, now we use the non-matching site: Project and test if descent is necessary { assert(!lower_than_median || tag == NodeTag::left_visited); assert(lower_than_median || tag == NodeTag::right_visited); tree.setCurrentNodeTagBinary(NodeTag::left_right_visited); //Either way we finish all nodes here if(partitionNecessary<T, T_calc, dims>(point, point_proj, current_partition, current_worst_dist)) { point_proj[current_axis] = current_partition.median; return lower_than_median ? NodeDirection::right : NodeDirection::left; } else { point_proj[current_axis] = point[current_axis]; return (current_level != 0 ? NodeDirection::up : NodeDirection::finished); } } } } template <typename T, typename T_calc, dim_t dims> inline __device__ void findNextLeaf(TreeTraversal<T, dims>& tree, const Vec<T, dims>& point, Vec<T, dims>& point_proj, const T current_worst_dist) { NodeDirection new_dir; while((new_dir = traverseTree<T, T_calc, dims>(tree, point, point_proj, current_worst_dist)) != NodeDirection::finished) { switch(new_dir) { case NodeDirection::up: tree.moveToParent(); break; case NodeDirection::left: tree.moveToLeftChild(); break; case NodeDirection::right: tree.moveToRightChild(); break; } } } const int max_nr_nodes = 2048*4; static_assert(max_nr_nodes % 4 == 0, "Alignment off, since 4 nodes fit into a byte"); const int max_nr_nns_searches = 128; template <typename T, typename T_calc, dim_t dims> __global__ void KDTreeKernel(PartitionInfoDevice<T, dims>* partition_info, const point_i_t nr_query, const Vec<T, dims>* points_query, T* all_best_dists_d, point_i_knn_t* all_best_i_d, const point_i_knn_t nr_nns_searches) { assert(nr_nns_searches <= partition_info->nr_points); const auto nr_partitions = partition_info->nr_partitions; const auto nr_leaves = partition_info->nr_leaves; //printf("%d, %d\n", nr_partitions, nr_leaves); //2D indices const auto grid_size = gridDim.x * gridDim.y; const auto blockidx = blockIdx.x + blockIdx.y*gridDim.x; const auto block_size = blockDim.x * blockDim.y; // * blockDim.z; const auto tidx = threadIdx.y * blockDim.x + threadIdx.x; //const auto global_start_idx = tidx + blockidx * grid_size; //extern __shared__ char* shared_mem; //__shared__ Vec<T, dims> buffered_query_points[nr_buffered_query_points]; //__shared__ Vec<T, dims> buffered_query_points_proj[nr_buffered_query_points]; //__shared__ tree_ind_t leaf_inds[nr_buffered_leaf_inds]; __shared__ point_i_t buffered_knn[2*max_nr_nns_searches]; __shared__ T buffered_dists[2*max_nr_nns_searches]; __shared__ TreeTraversal<T, dims> tree[1]; __shared__ NodeTag tags[max_nr_nodes/4]; PingPongBuffer<T> best_dist_pp[1]; PingPongBuffer<point_i_t> best_knn_pp; __shared__ T local_dist_buf[local_dist_buf_size]; __shared__ Vec<T, dims> point_proj[1]; __shared__ T worst_dist_[1]; __shared__ bool off_leaf_necessary[1]; //TODO: Watch out for alignment //if(tidx == 0) // local_dist_buf_pointer[0] = new T[local_dist_buf_size]; //TODO: Dynamic const auto nr_nodes = partition_info->nr_partitions; assert(max_nr_nodes >= nr_nodes); assert(nr_nns_searches <= max_nr_nns_searches); tree->partition_info = reinterpret_cast<PartitionInfo<T, dims>*>(partition_info); tree->visited_info = tags; //new NodeTag[tree->partition_info->nr_nodes]; //tree->resetPositionAndTags(); //T worst_dist = INFINITY; auto& worst_dist = worst_dist_[0]; best_dist_pp->buffers[0] = buffered_dists; best_dist_pp->buffers[1] = buffered_dists + nr_nns_searches; best_knn_pp.buffers[0] = buffered_knn; best_knn_pp.buffers[1] = buffered_knn + nr_nns_searches; for(auto j = blockidx; j < nr_query; j += grid_size) { //Fetch vars from global memory const Vec<T, dims> point = points_query[j]; *point_proj = points_query[j]; if(tidx == 0) worst_dist = INFINITY; __syncthreads(); //Buffer everything in shared memory and reset the tree fillKernel<T>(buffered_dists, buffered_dists + 2*nr_nns_searches, INFINITY); assert(nr_nodes > 0); fillKernel<NodeTag>(tags, tags + ((nr_nodes - 1)/4) + 1, NodeTag::uncharted); if(tidx == 0) { tree->current_lin_ind = tree->current_level = 0; } //Fetch the current leaf and descent __syncthreads(); if(tidx == 0) findNextLeaf<T, T_calc, dims>(*tree, point, point_proj[0], worst_dist); __syncthreads(); do { assert(tree->getCurrentLevel() == tree->getTotalLevels() - 1); assert(tree->isLeafParent()); const auto current_partition = tree->getCurrentConstPartition(); const auto current_axis = current_partition.axis_split; const bool lower_than_median = point[current_axis] < current_partition.median; const auto leaf = (lower_than_median ? tree->getLeftLeaf() : tree->getRightLeaf()); //Now compute each leaf with all threads in the current block compQuadrDistLeafPartitionBlockwise<T, T_calc, dims>(point, leaf, local_dist_buf, *best_dist_pp, best_knn_pp, nr_nns_searches, worst_dist); if(tidx == 0) { off_leaf_necessary[0] = partitionNecessary<T, T_calc, dims>(point, point_proj[0], current_partition, worst_dist); } __syncthreads(); if(off_leaf_necessary[0]) { //printf("Off partition necessary\n"); const PartitionLeaf<T, dims>& leaf = (!lower_than_median ? tree->getLeftLeaf() : tree->getRightLeaf()); //printf("Comp Dist 2\n"); compQuadrDistLeafPartitionBlockwise<T, T_calc, dims>(point, leaf, local_dist_buf, *best_dist_pp, best_knn_pp, nr_nns_searches, worst_dist); } //__syncthreads(); if(tidx == 0) findNextLeaf<T, T_calc, dims>(*tree, point, point_proj[0], worst_dist); __syncthreads(); }while(tree->getCurrentLevel() != 0); //Copy back to global memory and proceed to next point point_i_knn_t* best_i = all_best_i_d + j * nr_nns_searches; T* best_dists = all_best_dists_d + j * nr_nns_searches; T* dist_slot = best_dist_pp->getCurrentSlot(); point_i_t* knn_slot = best_knn_pp.getCurrentSlot(); copyKernel(dist_slot, dist_slot + nr_nns_searches, best_dists); copyKernel(knn_slot, knn_slot + nr_nns_searches, best_i); } //if(tidx == 0) // delete local_dist_buf_pointer[0]; } template <typename T, typename T_calc, dim_t dims> void KDTreeKNNGPUSearch(PartitionInfoDevice<T, dims>* partition_info, const point_i_t nr_query, const std::array<T, dims>* points_query, T * dist, point_i_t* idx, const point_i_knn_t nr_nns_searches) { //TODO: Dynamic implementation /*if(partition_info->nr_partitions > max_partitions || partition_info->nr_leaves > max_leaves) { throw std::runtime_error("Error, please reduce number of levels..."); }*/ if(nr_nns_searches > max_nr_nns_searches) throw std::runtime_error("TODO: Maximum number of NNs searches currently restricted"); //gpuErrchk(cudaMemcpyAsync(partition_info_copy, partition_info, sizeof(PartitionInfoDevice<T, dims>), cudaMemcpyDeviceToDevice)); initArray<T><<<dim3(16, 16),dim3(32, 32)>>>(dist, INFINITY, nr_query*nr_nns_searches); #ifndef NDEBUG gpuErrchk( cudaPeekAtLastError() ); gpuErrchk( cudaDeviceSynchronize() ); #endif dim3 grid_dims(32, 32); dim3 block_dims(8, 8); /*#ifdef PROFILE_KDTREE cudaEvent_t start, stop; float time; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); #endif*/ const auto points_query_eig = reinterpret_cast<const Vec<T, dims>*>(points_query); KDTreeKernel<T, T_calc, dims><<<grid_dims, block_dims>>>(partition_info, nr_query, points_query_eig, dist, idx, nr_nns_searches); #ifndef NDEBUG gpuErrchk( cudaPeekAtLastError() ); gpuErrchk( cudaDeviceSynchronize() ); #endif /*#ifdef PROFILE_KDTREE cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop); printf ("Time for the kernel: %f ms\n", time); #endif*/ } template void compQuadrDistLeafPartition<float, float, 3>(const std::array<float, 3>& point, const PartitionLeaf<float, 3>& partition_leaf, float* best_dists, point_i_knn_t* best_idx, const point_i_knn_t nr_nns_searches); template void compQuadrDistLeafPartition<double, double, 3>(const std::array<double, 3>& point, const PartitionLeaf<double, 3>& partition_leaf, double* best_dists, point_i_knn_t* best_idx, const point_i_knn_t nr_nns_searches); template void KDTreeKNNGPUSearch<float, float, 1>(PartitionInfoDevice<float, 1>* partition_info, const point_i_t nr_query, const std::array<float, 1>* points_query, float * dist, point_i_t* idx, const point_i_knn_t nr_nns_searches); template void KDTreeKNNGPUSearch<double, double, 1>(PartitionInfoDevice<double, 1>* partition_info, const point_i_t nr_query, const std::array<double, 1>* points_query, double * dist, point_i_t* idx, const point_i_knn_t nr_nns_searches); template void KDTreeKNNGPUSearch<float, float, 2>(PartitionInfoDevice<float, 2>* partition_info, const point_i_t nr_query, const std::array<float, 2>* points_query, float * dist, point_i_t* idx, const point_i_knn_t nr_nns_searches); template void KDTreeKNNGPUSearch<double, double, 2>(PartitionInfoDevice<double, 2>* partition_info, const point_i_t nr_query, const std::array<double, 2>* points_query, double * dist, point_i_t* idx, const point_i_knn_t nr_nns_searches); template void KDTreeKNNGPUSearch<float, float, 3>(PartitionInfoDevice<float, 3>* partition_info, const point_i_t nr_query, const std::array<float, 3>* points_query, float * dist, point_i_t* idx, const point_i_knn_t nr_nns_searches); template void KDTreeKNNGPUSearch<double, double, 3>(PartitionInfoDevice<double, 3>* partition_info, const point_i_t nr_query, const std::array<double, 3>* points_query, double * dist, point_i_t* idx, const point_i_knn_t nr_nns_searches); template PartitionInfoDevice<float, 1>* copyPartitionToGPU(const PartitionInfo<float, 1>& partition_info); template PartitionInfoDevice<float, 2>* copyPartitionToGPU(const PartitionInfo<float, 2>& partition_info); template PartitionInfoDevice<float, 3>* copyPartitionToGPU(const PartitionInfo<float, 3>& partition_info); template PartitionInfoDevice<double, 1>* copyPartitionToGPU(const PartitionInfo<double, 1>& partition_info); template PartitionInfoDevice<double, 2>* copyPartitionToGPU(const PartitionInfo<double, 2>& partition_info); template PartitionInfoDevice<double, 3>* copyPartitionToGPU(const PartitionInfo<double, 3>& partition_info); template std::tuple<float*, point_i_t*, float*> copyData<float, 3>(const std::vector<float>& result_dists, const std::vector<point_i_t>& result_idx, const std::vector<std::array<float, 3>>&); template std::tuple<double*, point_i_t*, double*> copyData<double, 3>(const std::vector<double>& result_dists, const std::vector<point_i_t>& result_idx, const std::vector<std::array<double, 3>>&); template void freePartitionFromGPU(PartitionInfoDevice<float, 1>* partition_info); template void freePartitionFromGPU(PartitionInfoDevice<float, 2>* partition_info); template void freePartitionFromGPU(PartitionInfoDevice<float, 3>* partition_info); template void freePartitionFromGPU(PartitionInfoDevice<double, 1>* partition_info); template void freePartitionFromGPU(PartitionInfoDevice<double, 2>* partition_info); template void freePartitionFromGPU(PartitionInfoDevice<double, 3>* partition_info);
93d3045057cfda3f28b19903674dccb9380afae0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "kernel.h" #include <iostream> #include <opencv2/core.hpp> #include <opencv2/imgcodecs.hpp> #include <stdio.h> #include <complex> #include "math.h" #include <hip/hip_complex.h> //compute numer of iterations to diverge __device__ int mandelbrotIterations(const hipDoubleComplex &z0, const int max){ hipDoubleComplex z = z0; for (int t = 0; t < max; t++){ if( (cuCreal(z)*cuCreal(z) + cuCimag(z)*cuCimag(z) ) > 4.0f){ return t; } z = cuCadd(cuCmul(z,z), z0); } return max; } __device__ int mandelbrotSet(const hipDoubleComplex &z0, const int maxIter=500){ //does it diverge? int iterations = mandelbrotIterations(z0, maxIter); //avoid division by zero if(maxIter - iterations == 0){ return 0; } //rescale value to 8 bits (CV_U8) return lrint(sqrt(iterations / (float) maxIter) * 255); } __global__ void kernel(unsigned char *d_output, int rows, int cols,float x1, float y1, float scaleX, float scaleY){ // get correspondig coordinates from grid indexes int c = blockIdx.x*blockDim.x + threadIdx.x; int r = blockIdx.y*blockDim.y + threadIdx.y; const int i = r*cols + c; // check image bounds if( (r>=rows) || (c>=cols) ){ return; } //perform operation float x0= c/scaleX + x1; float y0= r/scaleY +y1; hipDoubleComplex z0 = make_cuDoubleComplex(x0, y0); uchar value = (uchar) mandelbrotSet(z0); d_output[i]= value; } void wrapper_gpu(Mat output){ unsigned char *outputPtr = (unsigned char*) output.data; unsigned int cols = output.cols; unsigned int rows = output.rows; float x1 = -2.1f; float x2 = 0.6f; float y1 = -1.2f; float y2 = 1.2f; float scaleX = output.cols / (x2 - x1); float scaleY = output.rows / (y2 - y1); //block dimensions (threads) int Tx = 32; int Ty = 32; //grid size dimensions (blocks) int Bx = (Tx + rows -1)/Tx; int By = (Ty + cols -1)/Ty; // declare pointers to device memory unsigned char *d_in = 0; unsigned char *d_out = 0; // allocate memory in device hipMalloc(&d_in, cols*rows*sizeof(unsigned char)); hipMalloc(&d_out, cols*rows*sizeof(unsigned char)); //prepare kernel lauch dimensions const dim3 blockSize = dim3(Tx, Ty); const dim3 gridSize= dim3(Bx, By); // launch kernel in GPU hipLaunchKernelGGL(( kernel), dim3(gridSize), dim3(blockSize), 0, 0, d_out, rows, cols, x1,y1, scaleX, scaleY); // copy output from device to host hipMemcpy(outputPtr, d_out, rows*cols*sizeof(unsigned char), hipMemcpyDeviceToHost); // free the memory allocated for device arrays hipFree(d_in); hipFree(d_out); }
93d3045057cfda3f28b19903674dccb9380afae0.cu
#include "kernel.h" #include <iostream> #include <opencv2/core.hpp> #include <opencv2/imgcodecs.hpp> #include <stdio.h> #include <complex> #include "math.h" #include <cuComplex.h> //compute numer of iterations to diverge __device__ int mandelbrotIterations(const cuDoubleComplex &z0, const int max){ cuDoubleComplex z = z0; for (int t = 0; t < max; t++){ if( (cuCreal(z)*cuCreal(z) + cuCimag(z)*cuCimag(z) ) > 4.0f){ return t; } z = cuCadd(cuCmul(z,z), z0); } return max; } __device__ int mandelbrotSet(const cuDoubleComplex &z0, const int maxIter=500){ //does it diverge? int iterations = mandelbrotIterations(z0, maxIter); //avoid division by zero if(maxIter - iterations == 0){ return 0; } //rescale value to 8 bits (CV_U8) return lrint(sqrt(iterations / (float) maxIter) * 255); } __global__ void kernel(unsigned char *d_output, int rows, int cols,float x1, float y1, float scaleX, float scaleY){ // get correspondig coordinates from grid indexes int c = blockIdx.x*blockDim.x + threadIdx.x; int r = blockIdx.y*blockDim.y + threadIdx.y; const int i = r*cols + c; // check image bounds if( (r>=rows) || (c>=cols) ){ return; } //perform operation float x0= c/scaleX + x1; float y0= r/scaleY +y1; cuDoubleComplex z0 = make_cuDoubleComplex(x0, y0); uchar value = (uchar) mandelbrotSet(z0); d_output[i]= value; } void wrapper_gpu(Mat output){ unsigned char *outputPtr = (unsigned char*) output.data; unsigned int cols = output.cols; unsigned int rows = output.rows; float x1 = -2.1f; float x2 = 0.6f; float y1 = -1.2f; float y2 = 1.2f; float scaleX = output.cols / (x2 - x1); float scaleY = output.rows / (y2 - y1); //block dimensions (threads) int Tx = 32; int Ty = 32; //grid size dimensions (blocks) int Bx = (Tx + rows -1)/Tx; int By = (Ty + cols -1)/Ty; // declare pointers to device memory unsigned char *d_in = 0; unsigned char *d_out = 0; // allocate memory in device cudaMalloc(&d_in, cols*rows*sizeof(unsigned char)); cudaMalloc(&d_out, cols*rows*sizeof(unsigned char)); //prepare kernel lauch dimensions const dim3 blockSize = dim3(Tx, Ty); const dim3 gridSize= dim3(Bx, By); // launch kernel in GPU kernel<<<gridSize, blockSize>>>(d_out, rows, cols, x1,y1, scaleX, scaleY); // copy output from device to host cudaMemcpy(outputPtr, d_out, rows*cols*sizeof(unsigned char), cudaMemcpyDeviceToHost); // free the memory allocated for device arrays cudaFree(d_in); cudaFree(d_out); }
259572711aadc67d287b8c370a23b4a4dfb85a07.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #include <algorithm> #include <hip/hip_fp16.h> #include <cassert> #include "Split.hpp" //sds:splitindex0 //sds: index,The index of the output tensor. nvinfer1::Dims SplitPlugin::getOutputDimensions(int index, const nvinfer1::Dims *inputDims, int nbInputs) { assert(nbInputs == 1); assert(index < this->getNbOutputs()); nvinfer1::Dims const& input_dims = inputDims[0]; nvinfer1::Dims output_dims = input_dims; output_dims.d[_axis] = _output_lengths.at(index); return output_dims; } int SplitPlugin::initialize() { std::vector<int> segment_offsets(1, 0); for( int i=0; i<this->getNbOutputs(); ++i ) { segment_offsets.push_back(segment_offsets.back() + _output_lengths[i]); } _d_segment_offsets = segment_offsets; nvinfer1::Dims dims = this->getInputDims(0); _nx = 1; for( int i=dims.nbDims-1; i>_axis; --i ) { _nx *= dims.d[i]; } _ny = dims.d[_axis]; _nz = 1; for( int i=_axis-1; i>=0; --i ) { _nz *= dims.d[i]; } _d_output_ptrs.resize(this->getNbOutputs(), nullptr); return 0; } template<typename T> __device__ int upper_bound(T const* vals, int n, T const& key) { int i = 0; while( n > 0 ) { int m = n / 2; int j = i + m; if( !(key < vals[j]) ) { i = j + 1; n -= m + 1; } else { n = m; } } return i; } template<typename T> __global__ void split_kernel(int nsegment, int const* __restrict__ segment_offsets, T const* __restrict__ idata, T* const* odatas, int nx, int src_ny, int nz) { int x0 = threadIdx.x + blockIdx.x * blockDim.x; int src_y0 = threadIdx.y + blockIdx.y * blockDim.y; int z0 = threadIdx.z + blockIdx.z * blockDim.z; for( int z=z0; z<nz; z+=blockDim.z*gridDim.z ) { for( int src_y=src_y0; src_y<src_ny; src_y+=blockDim.y*gridDim.y ) { for( int x=x0; x<nx; x+=blockDim.x*gridDim.x ) { int segment = upper_bound(segment_offsets, nsegment, src_y) - 1; int dst_y = src_y - segment_offsets[segment]; int dst_ny = segment_offsets[segment + 1] - segment_offsets[segment]; odatas[segment][x + nx*(dst_y + dst_ny*z)] = idata[x + nx*(src_y + src_ny*z)]; } } } } //sds:inputsoutputs //sds:pluginenqueueinputsaddPluginV2inputs // inputsinitializeenqueue // outputs? outputsgetOutputDimsgpu? int SplitPlugin::enqueue(int batchSize, const void *const *inputs, void **outputs, void *workspace, hipStream_t stream) { auto const& input_dims = this->getInputDims(0); int const* d_segment_offsets_ptr = thrust::raw_pointer_cast(&_d_segment_offsets[0]); float const* idata = reinterpret_cast<float const*>(inputs[0]); float* const* h_odatas = reinterpret_cast<float* const*>(outputs); float** odatas = thrust::raw_pointer_cast(&_d_output_ptrs[0]); hipError_t cuda_status = hipMemcpyAsync(odatas, h_odatas, _d_output_ptrs.size() * sizeof(float*), hipMemcpyHostToDevice, stream); if( cuda_status != hipSuccess ) { return 1; } int nz = _nz * batchSize; dim3 block(32, 16); dim3 grid(::min((_nx - 1) / block.x + 1, 65535u), ::min((_ny - 1) / block.y + 1, 65535u), ::min((_nz - 1) / block.z + 1, 65535u)); if (getDataType()==nvinfer1::DataType::kFLOAT) { hipLaunchKernelGGL(( split_kernel), dim3(grid), dim3(block), 0, stream, _d_segment_offsets.size(), d_segment_offsets_ptr, idata, odatas, _nx, _ny, nz); } else { hipLaunchKernelGGL(( split_kernel), dim3(grid), dim3(block), 0, stream, _d_segment_offsets.size(), d_segment_offsets_ptr, (__half const*)idata, (__half**)odatas, _nx, _ny, nz); } return hipGetLastError() != hipSuccess; }
259572711aadc67d287b8c370a23b4a4dfb85a07.cu
/* * Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #include <algorithm> #include <cuda_fp16.h> #include <cassert> #include "Split.hpp" //sds:对于这个split来说,这里的index必须是0 //sds: index,The index of the output tensor. nvinfer1::Dims SplitPlugin::getOutputDimensions(int index, const nvinfer1::Dims *inputDims, int nbInputs) { assert(nbInputs == 1); assert(index < this->getNbOutputs()); nvinfer1::Dims const& input_dims = inputDims[0]; nvinfer1::Dims output_dims = input_dims; output_dims.d[_axis] = _output_lengths.at(index); return output_dims; } int SplitPlugin::initialize() { std::vector<int> segment_offsets(1, 0); for( int i=0; i<this->getNbOutputs(); ++i ) { segment_offsets.push_back(segment_offsets.back() + _output_lengths[i]); } _d_segment_offsets = segment_offsets; nvinfer1::Dims dims = this->getInputDims(0); _nx = 1; for( int i=dims.nbDims-1; i>_axis; --i ) { _nx *= dims.d[i]; } _ny = dims.d[_axis]; _nz = 1; for( int i=_axis-1; i>=0; --i ) { _nz *= dims.d[i]; } _d_output_ptrs.resize(this->getNbOutputs(), nullptr); return 0; } template<typename T> __device__ int upper_bound(T const* vals, int n, T const& key) { int i = 0; while( n > 0 ) { int m = n / 2; int j = i + m; if( !(key < vals[j]) ) { i = j + 1; n -= m + 1; } else { n = m; } } return i; } template<typename T> __global__ void split_kernel(int nsegment, int const* __restrict__ segment_offsets, T const* __restrict__ idata, T* const* odatas, int nx, int src_ny, int nz) { int x0 = threadIdx.x + blockIdx.x * blockDim.x; int src_y0 = threadIdx.y + blockIdx.y * blockDim.y; int z0 = threadIdx.z + blockIdx.z * blockDim.z; for( int z=z0; z<nz; z+=blockDim.z*gridDim.z ) { for( int src_y=src_y0; src_y<src_ny; src_y+=blockDim.y*gridDim.y ) { for( int x=x0; x<nx; x+=blockDim.x*gridDim.x ) { int segment = upper_bound(segment_offsets, nsegment, src_y) - 1; int dst_y = src_y - segment_offsets[segment]; int dst_ny = segment_offsets[segment + 1] - segment_offsets[segment]; odatas[segment][x + nx*(dst_y + dst_ny*z)] = idata[x + nx*(src_y + src_ny*z)]; } } } } //sds:这里的inputs是在显存,outputs是在内存。 //sds:每个plugin进入enqueue,带过来的inputs有可能在内存或者显存,由addPluginV2时传入的inputs决定 // inputs是一个指针,维度信息需要自己初始化,比如在initialize中或者enqueue中。 // outputs? outputs已经按照getOutputDims指定的初始化,应该都是gpu指针吧? int SplitPlugin::enqueue(int batchSize, const void *const *inputs, void **outputs, void *workspace, cudaStream_t stream) { auto const& input_dims = this->getInputDims(0); int const* d_segment_offsets_ptr = thrust::raw_pointer_cast(&_d_segment_offsets[0]); float const* idata = reinterpret_cast<float const*>(inputs[0]); float* const* h_odatas = reinterpret_cast<float* const*>(outputs); float** odatas = thrust::raw_pointer_cast(&_d_output_ptrs[0]); cudaError_t cuda_status = cudaMemcpyAsync(odatas, h_odatas, _d_output_ptrs.size() * sizeof(float*), cudaMemcpyHostToDevice, stream); if( cuda_status != cudaSuccess ) { return 1; } int nz = _nz * batchSize; dim3 block(32, 16); dim3 grid(std::min((_nx - 1) / block.x + 1, 65535u), std::min((_ny - 1) / block.y + 1, 65535u), std::min((_nz - 1) / block.z + 1, 65535u)); if (getDataType()==nvinfer1::DataType::kFLOAT) { split_kernel<<<grid, block, 0, stream>>> (_d_segment_offsets.size(), d_segment_offsets_ptr, idata, odatas, _nx, _ny, nz); } else { split_kernel<<<grid, block, 0, stream>>> (_d_segment_offsets.size(), d_segment_offsets_ptr, (__half const*)idata, (__half**)odatas, _nx, _ny, nz); } return cudaGetLastError() != cudaSuccess; }
416a68b815d5c92efb62973ea06e795d3129c20f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.7.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date September 2015 @precisions normal z -> c d s @author Hartwig Anzt */ #include "common_magmasparse.h" #define BLOCK_SIZE 512 #define PRECISION_z // These routines merge multiple kernels from zmergecg into one // for a description see // "Reformulated Conjugate Gradient for the Energy-Aware // Solution of Linear Systems on GPUs (ICPP '13) // accelerated reduction for one vector __global__ void magma_zcgreduce_kernel_spmv1( int Gs, int n, magmaDoubleComplex * vtmp, magmaDoubleComplex * vtmp2 ) { extern __shared__ magmaDoubleComplex temp[]; int Idx = threadIdx.x; int blockSize = 128; int gridSize = blockSize * 2 * gridDim.x; temp[Idx] = MAGMA_Z_MAKE( 0.0, 0.0); int i = blockIdx.x * ( blockSize * 2 ) + Idx; while (i < Gs ) { temp[ Idx ] += vtmp[ i ]; temp[ Idx ] += ( i + blockSize < Gs ) ? vtmp[ i + blockSize ] : MAGMA_Z_MAKE( 0.0, 0.0); i += gridSize; } __syncthreads(); if ( Idx < 64 ) { temp[ Idx ] += temp[ Idx + 64 ]; } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ) { temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ) { volatile double *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif #if defined(PRECISION_s) if( Idx < 32 ) { volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif if ( Idx == 0 ) { vtmp2[ blockIdx.x ] = temp[ 0 ]; } } // computes the SpMV using CSR and the first step of the reduction __global__ void magma_zcgmerge_spmvcsr_kernel( int n, magmaDoubleComplex * dval, magma_index_t * drowptr, magma_index_t * dcolind, magmaDoubleComplex * d, magmaDoubleComplex * z, magmaDoubleComplex * vtmp ) { extern __shared__ magmaDoubleComplex temp[]; int Idx = threadIdx.x; int i = blockIdx.x * blockDim.x + Idx; int j; temp[ Idx ] = MAGMA_Z_MAKE( 0.0, 0.0); if( i<n ) { magmaDoubleComplex dot = MAGMA_Z_ZERO; int start = drowptr[ i ]; int end = drowptr[ i+1 ]; for( j=start; j<end; j++) dot += dval[ j ] * d[ dcolind[j] ]; z[ i ] = dot; temp[ Idx ] = d[ i ] * dot; } __syncthreads(); if ( Idx < 128 ) { temp[ Idx ] += temp[ Idx + 128 ]; } __syncthreads(); if ( Idx < 64 ) { temp[ Idx ] += temp[ Idx + 64 ]; } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ) { temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ) { volatile double *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif #if defined(PRECISION_s) if( Idx < 32 ) { volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif if ( Idx == 0 ) { vtmp[ blockIdx.x ] = temp[ 0 ]; } } // computes the SpMV using ELL and the first step of the reduction __global__ void magma_zcgmerge_spmvell_kernel( int n, int num_cols_per_row, magmaDoubleComplex * dval, magma_index_t * dcolind, magmaDoubleComplex * d, magmaDoubleComplex * z, magmaDoubleComplex * vtmp ) { extern __shared__ magmaDoubleComplex temp[]; int Idx = threadIdx.x; int i = blockIdx.x * blockDim.x + Idx; temp[ Idx ] = MAGMA_Z_MAKE( 0.0, 0.0); if(i < n ) { magmaDoubleComplex dot = MAGMA_Z_MAKE(0.0, 0.0); for ( int k = 0; k < num_cols_per_row; k++ ) { int col = dcolind [ n * k + i ]; magmaDoubleComplex val = dval [ n * k + i ]; if( val != 0) dot += val * d[ col ]; } z[ i ] = dot; temp[ Idx ] = d[ i ] * dot; } __syncthreads(); if ( Idx < 128 ) { temp[ Idx ] += temp[ Idx + 128 ]; } __syncthreads(); if ( Idx < 64 ) { temp[ Idx ] += temp[ Idx + 64 ]; } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ) { temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ) { volatile double *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif #if defined(PRECISION_s) if( Idx < 32 ) { volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif if ( Idx == 0 ) { vtmp[ blockIdx.x ] = temp[ 0 ]; } } // computes the SpMV using ELLPACK and the first step of the reduction __global__ void magma_zcgmerge_spmvellpack_kernel( int n, int num_cols_per_row, magmaDoubleComplex * dval, magma_index_t * dcolind, magmaDoubleComplex * d, magmaDoubleComplex * z, magmaDoubleComplex * vtmp ) { extern __shared__ magmaDoubleComplex temp[]; int Idx = threadIdx.x; int i = blockIdx.x * blockDim.x + Idx; temp[ Idx ] = MAGMA_Z_MAKE( 0.0, 0.0); if(i < n ) { magmaDoubleComplex dot = MAGMA_Z_MAKE(0.0, 0.0); for ( int k = 0; k < num_cols_per_row; k++ ) { int col = dcolind [ num_cols_per_row * i + k ]; magmaDoubleComplex val = dval [ num_cols_per_row * i + k ]; if( val != 0) dot += val * d[ col ]; } z[ i ] = dot; temp[ Idx ] = d[ i ] * dot; } __syncthreads(); if ( Idx < 128 ) { temp[ Idx ] += temp[ Idx + 128 ]; } __syncthreads(); if ( Idx < 64 ) { temp[ Idx ] += temp[ Idx + 64 ]; } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ) { temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ) { volatile double *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif #if defined(PRECISION_s) if( Idx < 32 ) { volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif if ( Idx == 0 ) { vtmp[ blockIdx.x ] = temp[ 0 ]; } } // computes the SpMV using ELLRT 8 threads per row __global__ void magma_zcgmerge_spmvellpackrt_kernel_8( int n, magmaDoubleComplex * dval, magma_index_t * dcolind, magma_index_t * drowlength, magmaDoubleComplex * d, magmaDoubleComplex * z, magmaDoubleComplex * vtmp, magma_int_t T, magma_int_t alignment ) { int idx = blockIdx.y * gridDim.x * blockDim.x + blockDim.x * blockIdx.x + threadIdx.x; // global thread index int idb = threadIdx.x; // local thread index int idp = idb%T; // number of threads assigned to one row int i = idx/T; // row index extern __shared__ magmaDoubleComplex shared[]; if(i < n ) { magmaDoubleComplex dot = MAGMA_Z_MAKE(0.0, 0.0); int max_ = magma_ceildiv( drowlength[i], T ); // number of elements each thread handles for ( int k = 0; k < max_; k++ ) { // original code in paper (not working for me) //magmaDoubleComplex val = dval[ k*(T*alignment)+(i*T)+idp ]; //int col = dcolind [ k*(T*alignment)+(i*T)+idp ]; // new code (working for me) magmaDoubleComplex val = dval[ k*(T)+(i*alignment)+idp ]; int col = dcolind [ k*(T)+(i*alignment)+idp ]; dot += val * d[ col ]; } shared[idb] = dot; if( idp < 4 ) { shared[idb]+=shared[idb+4]; if( idp < 2 ) shared[idb]+=shared[idb+2]; if( idp == 0 ) { z[i] = (shared[idb]+shared[idb+1]); } } } } // computes the SpMV using ELLRT 8 threads per row __global__ void magma_zcgmerge_spmvellpackrt_kernel_16( int n, magmaDoubleComplex * dval, magma_index_t * dcolind, magma_index_t * drowlength, magmaDoubleComplex * d, magmaDoubleComplex * z, magmaDoubleComplex * vtmp, magma_int_t T, magma_int_t alignment ) { int idx = blockIdx.y * gridDim.x * blockDim.x + blockDim.x * blockIdx.x + threadIdx.x; // global thread index int idb = threadIdx.x; // local thread index int idp = idb%T; // number of threads assigned to one row int i = idx/T; // row index extern __shared__ magmaDoubleComplex shared[]; if(i < n ) { magmaDoubleComplex dot = MAGMA_Z_MAKE(0.0, 0.0); int max_ = magma_ceildiv( drowlength[i], T ); // number of elements each thread handles for ( int k = 0; k < max_; k++ ) { // original code in paper (not working for me) //magmaDoubleComplex val = dval[ k*(T*alignment)+(i*T)+idp ]; //int col = dcolind [ k*(T*alignment)+(i*T)+idp ]; // new code (working for me) magmaDoubleComplex val = dval[ k*(T)+(i*alignment)+idp ]; int col = dcolind [ k*(T)+(i*alignment)+idp ]; dot += val * d[ col ]; } shared[idb] = dot; if( idp < 8 ) { shared[idb]+=shared[idb+8]; if( idp < 4 ) shared[idb]+=shared[idb+4]; if( idp < 2 ) shared[idb]+=shared[idb+2]; if( idp == 0 ) { z[i] = (shared[idb]+shared[idb+1]); } } } } // computes the SpMV using ELLRT 8 threads per row __global__ void magma_zcgmerge_spmvellpackrt_kernel_32( int n, magmaDoubleComplex * dval, magma_index_t * dcolind, magma_index_t * drowlength, magmaDoubleComplex * d, magmaDoubleComplex * z, magmaDoubleComplex * vtmp, magma_int_t T, magma_int_t alignment ) { int idx = blockIdx.y * gridDim.x * blockDim.x + blockDim.x * blockIdx.x + threadIdx.x; // global thread index int idb = threadIdx.x; // local thread index int idp = idb%T; // number of threads assigned to one row int i = idx/T; // row index extern __shared__ magmaDoubleComplex shared[]; if(i < n ) { magmaDoubleComplex dot = MAGMA_Z_MAKE(0.0, 0.0); int max_ = magma_ceildiv( drowlength[i], T ); // number of elements each thread handles for ( int k = 0; k < max_; k++ ) { // original code in paper (not working for me) //magmaDoubleComplex val = dval[ k*(T*alignment)+(i*T)+idp ]; //int col = dcolind [ k*(T*alignment)+(i*T)+idp ]; // new code (working for me) magmaDoubleComplex val = dval[ k*(T)+(i*alignment)+idp ]; int col = dcolind [ k*(T)+(i*alignment)+idp ]; dot += val * d[ col ]; } shared[idb] = dot; if( idp < 16 ) { shared[idb]+=shared[idb+16]; if( idp < 8 ) shared[idb]+=shared[idb+8]; if( idp < 4 ) shared[idb]+=shared[idb+4]; if( idp < 2 ) shared[idb]+=shared[idb+2]; if( idp == 0 ) { z[i] = (shared[idb]+shared[idb+1]); } } } } // additional kernel necessary to compute first reduction step __global__ void magma_zcgmerge_spmvellpackrt_kernel2( int n, magmaDoubleComplex * z, magmaDoubleComplex * d, magmaDoubleComplex * vtmp2 ) { extern __shared__ magmaDoubleComplex temp[]; int Idx = threadIdx.x; int i = blockIdx.x * blockDim.x + Idx; temp[ Idx ] = ( i < n ) ? z[i]*d[i] : MAGMA_Z_MAKE(0.0, 0.0); __syncthreads(); if ( Idx < 128 ) { temp[ Idx ] += temp[ Idx + 128 ]; } __syncthreads(); if ( Idx < 64 ) { temp[ Idx ] += temp[ Idx + 64 ]; } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ) { temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ) { volatile double *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif #if defined(PRECISION_s) if( Idx < 32 ) { volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif if ( Idx == 0 ) { vtmp2[ blockIdx.x ] = temp[ 0 ]; } } // computes the SpMV using SELLC __global__ void magma_zcgmerge_spmvsellc_kernel( int num_rows, int blocksize, magmaDoubleComplex * dval, magma_index_t * dcolind, magma_index_t * drowptr, magmaDoubleComplex * d, magmaDoubleComplex * z, magmaDoubleComplex * vtmp) { extern __shared__ magmaDoubleComplex temp[]; int Idx = threadIdx.x; int i = blockIdx.x * blockDim.x + Idx; int offset = drowptr[ blockIdx.x ]; int border = (drowptr[ blockIdx.x+1 ]-offset)/blocksize; temp[ Idx ] = MAGMA_Z_MAKE( 0.0, 0.0); if(i < num_rows ) { magmaDoubleComplex dot = MAGMA_Z_MAKE(0.0, 0.0); for ( int n = 0; n < border; n ++) { int col = dcolind [offset+ blocksize * n + Idx ]; magmaDoubleComplex val = dval[offset+ blocksize * n + Idx]; if( val != 0) { dot=dot+val*d[col]; } } z[ i ] = dot; temp[ Idx ] = d[ i ] * dot; } __syncthreads(); if ( Idx < 128 ) { temp[ Idx ] += temp[ Idx + 128 ]; } __syncthreads(); if ( Idx < 64 ) { temp[ Idx ] += temp[ Idx + 64 ]; } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ) { temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ) { volatile double *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif #if defined(PRECISION_s) if( Idx < 32 ) { volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif if ( Idx == 0 ) { vtmp[ blockIdx.x ] = temp[ 0 ]; } } // SELLP SpMV kernel // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel __global__ void magma_zcgmerge_spmvsellpt_kernel_8( int num_rows, int blocksize, int T, magmaDoubleComplex * dval, magma_index_t * dcolind, magma_index_t * drowptr, magmaDoubleComplex * d, magmaDoubleComplex * z) { // T threads assigned to each row int idx = threadIdx.y; // thread in row int idy = threadIdx.x; // local row int ldx = idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index extern __shared__ magmaDoubleComplex shared[]; if(row < num_rows ) { magmaDoubleComplex dot = MAGMA_Z_MAKE(0.0, 0.0); int offset = drowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (drowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles for ( int k = 0; k < max_; k++ ) { magmaDoubleComplex val = dval[ offset + ldx + block*k ]; int col = dcolind[ offset + ldx + block*k ]; dot += val * d[ col ]; } shared[ldx] = dot; __syncthreads(); if( idx < 4 ) { shared[ldx]+=shared[ldx+blocksize*4]; __syncthreads(); if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2]; __syncthreads(); if( idx == 0 ) { z[row] = (shared[ldx]+shared[ldx+blocksize*1]); } } } } // SELLP SpMV kernel // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel __global__ void magma_zcgmerge_spmvsellpt_kernel_16( int num_rows, int blocksize, int T, magmaDoubleComplex * dval, magma_index_t * dcolind, magma_index_t * drowptr, magmaDoubleComplex * d, magmaDoubleComplex * z) { // T threads assigned to each row int idx = threadIdx.y; // thread in row int idy = threadIdx.x; // local row int ldx = idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index extern __shared__ magmaDoubleComplex shared[]; if(row < num_rows ) { magmaDoubleComplex dot = MAGMA_Z_MAKE(0.0, 0.0); int offset = drowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (drowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles for ( int k = 0; k < max_; k++ ) { magmaDoubleComplex val = dval[ offset + ldx + block*k ]; int col = dcolind[ offset + ldx + block*k ]; dot += val * d[ col ]; } shared[ldx] = dot; __syncthreads(); if( idx < 8 ) { shared[ldx]+=shared[ldx+blocksize*8]; __syncthreads(); if( idx < 4 ) shared[ldx]+=shared[ldx+blocksize*4]; __syncthreads(); if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2]; __syncthreads(); if( idx == 0 ) { z[row] = (shared[ldx]+shared[ldx+blocksize*1]); } } } } // SELLP SpMV kernel // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel __global__ void magma_zcgmerge_spmvsellpt_kernel_32( int num_rows, int blocksize, int T, magmaDoubleComplex * dval, magma_index_t * dcolind, magma_index_t * drowptr, magmaDoubleComplex * d, magmaDoubleComplex * z) { // T threads assigned to each row int idx = threadIdx.y; // thread in row int idy = threadIdx.x; // local row int ldx = idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index extern __shared__ magmaDoubleComplex shared[]; if(row < num_rows ) { magmaDoubleComplex dot = MAGMA_Z_MAKE(0.0, 0.0); int offset = drowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (drowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles for ( int k = 0; k < max_; k++ ) { magmaDoubleComplex val = dval[ offset + ldx + block*k ]; int col = dcolind[ offset + ldx + block*k ]; dot += val * d[ col ]; } shared[ldx] = dot; __syncthreads(); if( idx < 16 ) { shared[ldx]+=shared[ldx+blocksize*16]; __syncthreads(); if( idx < 8 ) shared[ldx]+=shared[ldx+blocksize*8]; __syncthreads(); if( idx < 4 ) shared[ldx]+=shared[ldx+blocksize*4]; __syncthreads(); if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2]; __syncthreads(); if( idx == 0 ) { z[row] = (shared[ldx]+shared[ldx+blocksize*1]); } } } } // kernel to handle scalars __global__ void // rho = beta/tmp; gamma = beta; magma_zcg_rhokernel( magmaDoubleComplex * skp ) { int i = blockIdx.x * blockDim.x + threadIdx.x; if( i==0 ) { magmaDoubleComplex tmp = skp[1]; skp[3] = tmp/skp[4]; skp[2] = tmp; } } /** Purpose ------- Merges the first SpmV using different formats with the dot product and the computation of rho Arguments --------- @param[in] A magma_z_matrix input matrix @param[in] d1 magmaDoubleComplex_ptr temporary vector @param[in] d2 magmaDoubleComplex_ptr temporary vector @param[in] dd magmaDoubleComplex_ptr input vector d @param[out] dz magmaDoubleComplex_ptr input vector z @param[out] skp magmaDoubleComplex_ptr array for parameters ( skp[3]=rho ) @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_zgegpuk ********************************************************************/ extern "C" magma_int_t magma_zcgmerge_spmv1( magma_z_matrix A, magmaDoubleComplex_ptr d1, magmaDoubleComplex_ptr d2, magmaDoubleComplex_ptr dd, magmaDoubleComplex_ptr dz, magmaDoubleComplex_ptr skp, magma_queue_t queue ) { // set queue for old dense routines magma_queue_t orig_queue; magmablasGetKernelStream( &orig_queue ); int local_block_size=256; dim3 Bs( local_block_size ); dim3 Gs( magma_ceildiv( A.num_rows, local_block_size ) ); dim3 Gs_next; int Ms = local_block_size * sizeof( magmaDoubleComplex ); magmaDoubleComplex_ptr aux1 = d1, aux2 = d2; int b = 1; if ( A.storage_type == Magma_CSR ) hipLaunchKernelGGL(( magma_zcgmerge_spmvcsr_kernel), dim3(Gs), dim3(Bs), Ms, queue , A.num_rows, A.dval, A.drow, A.dcol, dd, dz, d1 ); else if ( A.storage_type == Magma_ELLPACKT ) hipLaunchKernelGGL(( magma_zcgmerge_spmvellpack_kernel), dim3(Gs), dim3(Bs), Ms, queue , A.num_rows, A.max_nnz_row, A.dval, A.dcol, dd, dz, d1 ); else if ( A.storage_type == Magma_ELL ) hipLaunchKernelGGL(( magma_zcgmerge_spmvell_kernel), dim3(Gs), dim3(Bs), Ms, queue , A.num_rows, A.max_nnz_row, A.dval, A.dcol, dd, dz, d1 ); else if ( A.storage_type == Magma_SELLP ) { int num_threadssellp = A.blocksize*A.alignment; magma_int_t arch = magma_getdevice_arch(); if ( arch < 200 && num_threadssellp > 256 ) printf("error: too much shared memory requested.\n"); dim3 block( A.blocksize, A.alignment, 1); int dimgrid1 = int( sqrt( double( A.numblocks ))); int dimgrid2 = magma_ceildiv( A.numblocks, dimgrid1 ); dim3 gridsellp( dimgrid1, dimgrid2, 1); int Mssellp = num_threadssellp * sizeof( magmaDoubleComplex ); if ( A.alignment == 8) hipLaunchKernelGGL(( magma_zcgmerge_spmvsellpt_kernel_8) , dim3(gridsellp), dim3(block), Mssellp, queue , A.num_rows, A.blocksize, A.alignment, A.dval, A.dcol, A.drow, dd, dz); else if ( A.alignment == 16) hipLaunchKernelGGL(( magma_zcgmerge_spmvsellpt_kernel_16) , dim3(gridsellp), dim3(block), Mssellp, queue , A.num_rows, A.blocksize, A.alignment, A.dval, A.dcol, A.drow, dd, dz); else if ( A.alignment == 32) hipLaunchKernelGGL(( magma_zcgmerge_spmvsellpt_kernel_32) , dim3(gridsellp), dim3(block), Mssellp, queue , A.num_rows, A.blocksize, A.alignment, A.dval, A.dcol, A.drow, dd, dz); else printf("error: alignment not supported.\n"); // in case of using SELLP, we can't efficiently merge the // dot product and the first reduction loop into the SpMV kernel // as the SpMV grid would result in low occupancy. hipLaunchKernelGGL(( magma_zcgmerge_spmvellpackrt_kernel2), dim3(Gs), dim3(Bs), Ms, queue , A.num_rows, dz, dd, d1 ); } else if ( A.storage_type == Magma_ELLRT ) { // in case of using ELLRT, we need a different grid, assigning // threads_per_row processors to each row // the block size is num_threads // fixed values int num_blocks = ( (A.num_rows+A.blocksize-1)/A.blocksize); int num_threads = A.alignment*A.blocksize; int real_row_length = ((int)(A.max_nnz_row+A.alignment-1)/A.alignment) *A.alignment; magma_int_t arch = magma_getdevice_arch(); if ( arch < 200 && num_threads > 256 ) printf("error: too much shared memory requested.\n"); int dimgrid1 = int( sqrt( double( num_blocks ))); int dimgrid2 = magma_ceildiv( num_blocks, dimgrid1 ); dim3 gridellrt( dimgrid1, dimgrid2, 1); int Mellrt = A.alignment * A.blocksize * sizeof( magmaDoubleComplex ); // printf("launch kernel: %dx%d %d %d\n", grid.x, grid.y, num_threads , Ms); if ( A.alignment == 32 ) { hipLaunchKernelGGL(( magma_zcgmerge_spmvellpackrt_kernel_32) , dim3(gridellrt), dim3(num_threads) , Mellrt, queue , A.num_rows, A.dval, A.dcol, A.drow, dd, dz, d1, A.alignment, real_row_length ); } else if ( A.alignment == 16 ) { hipLaunchKernelGGL(( magma_zcgmerge_spmvellpackrt_kernel_16) , dim3(gridellrt), dim3(num_threads) , Mellrt, queue , A.num_rows, A.dval, A.dcol, A.drow, dd, dz, d1, A.alignment, real_row_length ); } else if ( A.alignment == 8 ) { hipLaunchKernelGGL(( magma_zcgmerge_spmvellpackrt_kernel_8) , dim3(gridellrt), dim3(num_threads) , Mellrt, queue , A.num_rows, A.dval, A.dcol, A.drow, dd, dz, d1, A.alignment, real_row_length ); } else { printf("error: alignment %d not supported.\n", int(A.alignment) ); return MAGMA_ERR_NOT_SUPPORTED; } // in case of using ELLRT, we can't efficiently merge the // dot product and the first reduction loop into the SpMV kernel // as the SpMV grid would result in low occupancy. hipLaunchKernelGGL(( magma_zcgmerge_spmvellpackrt_kernel2), dim3(Gs), dim3(Bs), Ms, queue , A.num_rows, dz, dd, d1 ); } while( Gs.x > 1 ) { Gs_next.x = ( Gs.x+Bs.x-1 )/ Bs.x; if ( Gs_next.x == 1 ) Gs_next.x = 2; hipLaunchKernelGGL(( magma_zcgreduce_kernel_spmv1), dim3(Gs_next.x/2), dim3(Bs.x/2), Ms/2 , 0, Gs.x, A.num_rows, aux1, aux2 ); Gs_next.x = Gs_next.x /2; Gs.x = Gs_next.x; b = 1 - b; if ( b ) { aux1 = d1; aux2 = d2; } else { aux2 = d1; aux1 = d2; } } magma_zcopyvector( 1, aux1, 1, skp+4, 1 ); dim3 Bs2( 2 ); dim3 Gs2( 1 ); hipLaunchKernelGGL(( magma_zcg_rhokernel), dim3(Gs2), dim3(Bs2), 0, 0, skp ); magmablasSetKernelStream( orig_queue ); return MAGMA_SUCCESS; } /* -------------------------------------------------------------------------- */ // updates x and r and computes the first part of the dot product r*r __global__ void magma_zcgmerge_xrbeta_kernel( int n, magmaDoubleComplex * x, magmaDoubleComplex * r, magmaDoubleComplex * d, magmaDoubleComplex * z, magmaDoubleComplex * skp, magmaDoubleComplex * vtmp ) { extern __shared__ magmaDoubleComplex temp[]; int Idx = threadIdx.x; int i = blockIdx.x * blockDim.x + Idx; magmaDoubleComplex rho = skp[3]; magmaDoubleComplex mrho = MAGMA_Z_MAKE( -1.0, 0.0)*rho; temp[ Idx ] = MAGMA_Z_MAKE( 0.0, 0.0); if( i<n ) { x[i] += rho * d[i]; r[i] += mrho * z[i]; temp[ Idx ] = r[i] * r[i]; } __syncthreads(); if ( Idx < 128 ) { temp[ Idx ] += temp[ Idx + 128 ]; } __syncthreads(); if ( Idx < 64 ) { temp[ Idx ] += temp[ Idx + 64 ]; } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ) { temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ) { volatile double *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif #if defined(PRECISION_s) if( Idx < 32 ) { volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif if ( Idx == 0 ) { vtmp[ blockIdx.x ] = temp[ 0 ]; } } // kernel to handle scalars __global__ void //alpha = beta / gamma magma_zcg_alphabetakernel( magmaDoubleComplex * skp ) { int i = blockIdx.x * blockDim.x + threadIdx.x; if( i==0 ) { magmaDoubleComplex tmp1 = skp[1]; skp[0] = tmp1/skp[2]; //printf("beta=%e\n", MAGMA_Z_REAL(tmp1)); } } // update search Krylov vector d __global__ void magma_zcg_d_kernel( int n, magmaDoubleComplex * skp, magmaDoubleComplex * r, magmaDoubleComplex * d ) { int i = blockIdx.x * blockDim.x + threadIdx.x; magmaDoubleComplex alpha = skp[0]; if( i<n ) { d[i] = r[i] + alpha * d[i]; } } /** Purpose ------- Merges the update of r and x with the dot product and performs then the update for the Krylov vector d Arguments --------- @param[in] n int dimension n @param[in] d1 magmaDoubleComplex_ptr temporary vector @param[in] d2 magmaDoubleComplex_ptr temporary vector @param[in,out] dx magmaDoubleComplex_ptr input vector x @param[in,out] dr magmaDoubleComplex_ptr input/output vector r @param[in] dd magmaDoubleComplex_ptr input vector d @param[in] dz magmaDoubleComplex_ptr input vector z @param[in] skp magmaDoubleComplex_ptr array for parameters @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_zsygpuk ********************************************************************/ extern "C" magma_int_t magma_zcgmerge_xrbeta( int n, magmaDoubleComplex_ptr d1, magmaDoubleComplex_ptr d2, magmaDoubleComplex_ptr dx, magmaDoubleComplex_ptr dr, magmaDoubleComplex_ptr dd, magmaDoubleComplex_ptr dz, magmaDoubleComplex_ptr skp, magma_queue_t queue ) { // set queue for old dense routines magma_queue_t orig_queue; magmablasGetKernelStream( &orig_queue ); int local_block_size=256; dim3 Bs( local_block_size ); dim3 Gs( magma_ceildiv( n, local_block_size ) ); dim3 Gs_next; int Ms = 2*local_block_size * sizeof( magmaDoubleComplex ); magmaDoubleComplex_ptr aux1 = d1, aux2 = d2; int b = 1; hipLaunchKernelGGL(( magma_zcgmerge_xrbeta_kernel), dim3(Gs), dim3(Bs), Ms, 0, n, dx, dr, dd, dz, skp, d1); while( Gs.x > 1 ) { Gs_next.x = ( Gs.x+Bs.x-1 )/ Bs.x; if ( Gs_next.x == 1 ) Gs_next.x = 2; hipLaunchKernelGGL(( magma_zcgreduce_kernel_spmv1), dim3(Gs_next.x/2), dim3(Bs.x/2), Ms/2 , 0, Gs.x, n, aux1, aux2 ); Gs_next.x = Gs_next.x /2; Gs.x = Gs_next.x; b = 1 - b; if ( b ) { aux1 = d1; aux2 = d2; } else { aux2 = d1; aux1 = d2; } } magma_zcopyvector( 1, aux1, 1, skp+1, 1 ); dim3 Bs2( 2 ); dim3 Gs2( 1 ); hipLaunchKernelGGL(( magma_zcg_alphabetakernel), dim3(Gs2), dim3(Bs2), 0, 0, skp ); dim3 Bs3( local_block_size ); dim3 Gs3( magma_ceildiv( n, local_block_size ) ); hipLaunchKernelGGL(( magma_zcg_d_kernel), dim3(Gs3), dim3(Bs3), 0, 0, n, skp, dr, dd ); magmablasSetKernelStream( orig_queue ); return MAGMA_SUCCESS; } /* -------------------------------------------------------------------------- */
416a68b815d5c92efb62973ea06e795d3129c20f.cu
/* -- MAGMA (version 1.7.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date September 2015 @precisions normal z -> c d s @author Hartwig Anzt */ #include "common_magmasparse.h" #define BLOCK_SIZE 512 #define PRECISION_z // These routines merge multiple kernels from zmergecg into one // for a description see // "Reformulated Conjugate Gradient for the Energy-Aware // Solution of Linear Systems on GPUs (ICPP '13) // accelerated reduction for one vector __global__ void magma_zcgreduce_kernel_spmv1( int Gs, int n, magmaDoubleComplex * vtmp, magmaDoubleComplex * vtmp2 ) { extern __shared__ magmaDoubleComplex temp[]; int Idx = threadIdx.x; int blockSize = 128; int gridSize = blockSize * 2 * gridDim.x; temp[Idx] = MAGMA_Z_MAKE( 0.0, 0.0); int i = blockIdx.x * ( blockSize * 2 ) + Idx; while (i < Gs ) { temp[ Idx ] += vtmp[ i ]; temp[ Idx ] += ( i + blockSize < Gs ) ? vtmp[ i + blockSize ] : MAGMA_Z_MAKE( 0.0, 0.0); i += gridSize; } __syncthreads(); if ( Idx < 64 ) { temp[ Idx ] += temp[ Idx + 64 ]; } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ) { temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ) { volatile double *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif #if defined(PRECISION_s) if( Idx < 32 ) { volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif if ( Idx == 0 ) { vtmp2[ blockIdx.x ] = temp[ 0 ]; } } // computes the SpMV using CSR and the first step of the reduction __global__ void magma_zcgmerge_spmvcsr_kernel( int n, magmaDoubleComplex * dval, magma_index_t * drowptr, magma_index_t * dcolind, magmaDoubleComplex * d, magmaDoubleComplex * z, magmaDoubleComplex * vtmp ) { extern __shared__ magmaDoubleComplex temp[]; int Idx = threadIdx.x; int i = blockIdx.x * blockDim.x + Idx; int j; temp[ Idx ] = MAGMA_Z_MAKE( 0.0, 0.0); if( i<n ) { magmaDoubleComplex dot = MAGMA_Z_ZERO; int start = drowptr[ i ]; int end = drowptr[ i+1 ]; for( j=start; j<end; j++) dot += dval[ j ] * d[ dcolind[j] ]; z[ i ] = dot; temp[ Idx ] = d[ i ] * dot; } __syncthreads(); if ( Idx < 128 ) { temp[ Idx ] += temp[ Idx + 128 ]; } __syncthreads(); if ( Idx < 64 ) { temp[ Idx ] += temp[ Idx + 64 ]; } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ) { temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ) { volatile double *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif #if defined(PRECISION_s) if( Idx < 32 ) { volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif if ( Idx == 0 ) { vtmp[ blockIdx.x ] = temp[ 0 ]; } } // computes the SpMV using ELL and the first step of the reduction __global__ void magma_zcgmerge_spmvell_kernel( int n, int num_cols_per_row, magmaDoubleComplex * dval, magma_index_t * dcolind, magmaDoubleComplex * d, magmaDoubleComplex * z, magmaDoubleComplex * vtmp ) { extern __shared__ magmaDoubleComplex temp[]; int Idx = threadIdx.x; int i = blockIdx.x * blockDim.x + Idx; temp[ Idx ] = MAGMA_Z_MAKE( 0.0, 0.0); if(i < n ) { magmaDoubleComplex dot = MAGMA_Z_MAKE(0.0, 0.0); for ( int k = 0; k < num_cols_per_row; k++ ) { int col = dcolind [ n * k + i ]; magmaDoubleComplex val = dval [ n * k + i ]; if( val != 0) dot += val * d[ col ]; } z[ i ] = dot; temp[ Idx ] = d[ i ] * dot; } __syncthreads(); if ( Idx < 128 ) { temp[ Idx ] += temp[ Idx + 128 ]; } __syncthreads(); if ( Idx < 64 ) { temp[ Idx ] += temp[ Idx + 64 ]; } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ) { temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ) { volatile double *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif #if defined(PRECISION_s) if( Idx < 32 ) { volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif if ( Idx == 0 ) { vtmp[ blockIdx.x ] = temp[ 0 ]; } } // computes the SpMV using ELLPACK and the first step of the reduction __global__ void magma_zcgmerge_spmvellpack_kernel( int n, int num_cols_per_row, magmaDoubleComplex * dval, magma_index_t * dcolind, magmaDoubleComplex * d, magmaDoubleComplex * z, magmaDoubleComplex * vtmp ) { extern __shared__ magmaDoubleComplex temp[]; int Idx = threadIdx.x; int i = blockIdx.x * blockDim.x + Idx; temp[ Idx ] = MAGMA_Z_MAKE( 0.0, 0.0); if(i < n ) { magmaDoubleComplex dot = MAGMA_Z_MAKE(0.0, 0.0); for ( int k = 0; k < num_cols_per_row; k++ ) { int col = dcolind [ num_cols_per_row * i + k ]; magmaDoubleComplex val = dval [ num_cols_per_row * i + k ]; if( val != 0) dot += val * d[ col ]; } z[ i ] = dot; temp[ Idx ] = d[ i ] * dot; } __syncthreads(); if ( Idx < 128 ) { temp[ Idx ] += temp[ Idx + 128 ]; } __syncthreads(); if ( Idx < 64 ) { temp[ Idx ] += temp[ Idx + 64 ]; } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ) { temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ) { volatile double *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif #if defined(PRECISION_s) if( Idx < 32 ) { volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif if ( Idx == 0 ) { vtmp[ blockIdx.x ] = temp[ 0 ]; } } // computes the SpMV using ELLRT 8 threads per row __global__ void magma_zcgmerge_spmvellpackrt_kernel_8( int n, magmaDoubleComplex * dval, magma_index_t * dcolind, magma_index_t * drowlength, magmaDoubleComplex * d, magmaDoubleComplex * z, magmaDoubleComplex * vtmp, magma_int_t T, magma_int_t alignment ) { int idx = blockIdx.y * gridDim.x * blockDim.x + blockDim.x * blockIdx.x + threadIdx.x; // global thread index int idb = threadIdx.x; // local thread index int idp = idb%T; // number of threads assigned to one row int i = idx/T; // row index extern __shared__ magmaDoubleComplex shared[]; if(i < n ) { magmaDoubleComplex dot = MAGMA_Z_MAKE(0.0, 0.0); int max_ = magma_ceildiv( drowlength[i], T ); // number of elements each thread handles for ( int k = 0; k < max_; k++ ) { // original code in paper (not working for me) //magmaDoubleComplex val = dval[ k*(T*alignment)+(i*T)+idp ]; //int col = dcolind [ k*(T*alignment)+(i*T)+idp ]; // new code (working for me) magmaDoubleComplex val = dval[ k*(T)+(i*alignment)+idp ]; int col = dcolind [ k*(T)+(i*alignment)+idp ]; dot += val * d[ col ]; } shared[idb] = dot; if( idp < 4 ) { shared[idb]+=shared[idb+4]; if( idp < 2 ) shared[idb]+=shared[idb+2]; if( idp == 0 ) { z[i] = (shared[idb]+shared[idb+1]); } } } } // computes the SpMV using ELLRT 8 threads per row __global__ void magma_zcgmerge_spmvellpackrt_kernel_16( int n, magmaDoubleComplex * dval, magma_index_t * dcolind, magma_index_t * drowlength, magmaDoubleComplex * d, magmaDoubleComplex * z, magmaDoubleComplex * vtmp, magma_int_t T, magma_int_t alignment ) { int idx = blockIdx.y * gridDim.x * blockDim.x + blockDim.x * blockIdx.x + threadIdx.x; // global thread index int idb = threadIdx.x; // local thread index int idp = idb%T; // number of threads assigned to one row int i = idx/T; // row index extern __shared__ magmaDoubleComplex shared[]; if(i < n ) { magmaDoubleComplex dot = MAGMA_Z_MAKE(0.0, 0.0); int max_ = magma_ceildiv( drowlength[i], T ); // number of elements each thread handles for ( int k = 0; k < max_; k++ ) { // original code in paper (not working for me) //magmaDoubleComplex val = dval[ k*(T*alignment)+(i*T)+idp ]; //int col = dcolind [ k*(T*alignment)+(i*T)+idp ]; // new code (working for me) magmaDoubleComplex val = dval[ k*(T)+(i*alignment)+idp ]; int col = dcolind [ k*(T)+(i*alignment)+idp ]; dot += val * d[ col ]; } shared[idb] = dot; if( idp < 8 ) { shared[idb]+=shared[idb+8]; if( idp < 4 ) shared[idb]+=shared[idb+4]; if( idp < 2 ) shared[idb]+=shared[idb+2]; if( idp == 0 ) { z[i] = (shared[idb]+shared[idb+1]); } } } } // computes the SpMV using ELLRT 8 threads per row __global__ void magma_zcgmerge_spmvellpackrt_kernel_32( int n, magmaDoubleComplex * dval, magma_index_t * dcolind, magma_index_t * drowlength, magmaDoubleComplex * d, magmaDoubleComplex * z, magmaDoubleComplex * vtmp, magma_int_t T, magma_int_t alignment ) { int idx = blockIdx.y * gridDim.x * blockDim.x + blockDim.x * blockIdx.x + threadIdx.x; // global thread index int idb = threadIdx.x; // local thread index int idp = idb%T; // number of threads assigned to one row int i = idx/T; // row index extern __shared__ magmaDoubleComplex shared[]; if(i < n ) { magmaDoubleComplex dot = MAGMA_Z_MAKE(0.0, 0.0); int max_ = magma_ceildiv( drowlength[i], T ); // number of elements each thread handles for ( int k = 0; k < max_; k++ ) { // original code in paper (not working for me) //magmaDoubleComplex val = dval[ k*(T*alignment)+(i*T)+idp ]; //int col = dcolind [ k*(T*alignment)+(i*T)+idp ]; // new code (working for me) magmaDoubleComplex val = dval[ k*(T)+(i*alignment)+idp ]; int col = dcolind [ k*(T)+(i*alignment)+idp ]; dot += val * d[ col ]; } shared[idb] = dot; if( idp < 16 ) { shared[idb]+=shared[idb+16]; if( idp < 8 ) shared[idb]+=shared[idb+8]; if( idp < 4 ) shared[idb]+=shared[idb+4]; if( idp < 2 ) shared[idb]+=shared[idb+2]; if( idp == 0 ) { z[i] = (shared[idb]+shared[idb+1]); } } } } // additional kernel necessary to compute first reduction step __global__ void magma_zcgmerge_spmvellpackrt_kernel2( int n, magmaDoubleComplex * z, magmaDoubleComplex * d, magmaDoubleComplex * vtmp2 ) { extern __shared__ magmaDoubleComplex temp[]; int Idx = threadIdx.x; int i = blockIdx.x * blockDim.x + Idx; temp[ Idx ] = ( i < n ) ? z[i]*d[i] : MAGMA_Z_MAKE(0.0, 0.0); __syncthreads(); if ( Idx < 128 ) { temp[ Idx ] += temp[ Idx + 128 ]; } __syncthreads(); if ( Idx < 64 ) { temp[ Idx ] += temp[ Idx + 64 ]; } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ) { temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ) { volatile double *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif #if defined(PRECISION_s) if( Idx < 32 ) { volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif if ( Idx == 0 ) { vtmp2[ blockIdx.x ] = temp[ 0 ]; } } // computes the SpMV using SELLC __global__ void magma_zcgmerge_spmvsellc_kernel( int num_rows, int blocksize, magmaDoubleComplex * dval, magma_index_t * dcolind, magma_index_t * drowptr, magmaDoubleComplex * d, magmaDoubleComplex * z, magmaDoubleComplex * vtmp) { extern __shared__ magmaDoubleComplex temp[]; int Idx = threadIdx.x; int i = blockIdx.x * blockDim.x + Idx; int offset = drowptr[ blockIdx.x ]; int border = (drowptr[ blockIdx.x+1 ]-offset)/blocksize; temp[ Idx ] = MAGMA_Z_MAKE( 0.0, 0.0); if(i < num_rows ) { magmaDoubleComplex dot = MAGMA_Z_MAKE(0.0, 0.0); for ( int n = 0; n < border; n ++) { int col = dcolind [offset+ blocksize * n + Idx ]; magmaDoubleComplex val = dval[offset+ blocksize * n + Idx]; if( val != 0) { dot=dot+val*d[col]; } } z[ i ] = dot; temp[ Idx ] = d[ i ] * dot; } __syncthreads(); if ( Idx < 128 ) { temp[ Idx ] += temp[ Idx + 128 ]; } __syncthreads(); if ( Idx < 64 ) { temp[ Idx ] += temp[ Idx + 64 ]; } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ) { temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ) { volatile double *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif #if defined(PRECISION_s) if( Idx < 32 ) { volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif if ( Idx == 0 ) { vtmp[ blockIdx.x ] = temp[ 0 ]; } } // SELLP SpMV kernel // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel __global__ void magma_zcgmerge_spmvsellpt_kernel_8( int num_rows, int blocksize, int T, magmaDoubleComplex * dval, magma_index_t * dcolind, magma_index_t * drowptr, magmaDoubleComplex * d, magmaDoubleComplex * z) { // T threads assigned to each row int idx = threadIdx.y; // thread in row int idy = threadIdx.x; // local row int ldx = idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index extern __shared__ magmaDoubleComplex shared[]; if(row < num_rows ) { magmaDoubleComplex dot = MAGMA_Z_MAKE(0.0, 0.0); int offset = drowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (drowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles for ( int k = 0; k < max_; k++ ) { magmaDoubleComplex val = dval[ offset + ldx + block*k ]; int col = dcolind[ offset + ldx + block*k ]; dot += val * d[ col ]; } shared[ldx] = dot; __syncthreads(); if( idx < 4 ) { shared[ldx]+=shared[ldx+blocksize*4]; __syncthreads(); if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2]; __syncthreads(); if( idx == 0 ) { z[row] = (shared[ldx]+shared[ldx+blocksize*1]); } } } } // SELLP SpMV kernel // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel __global__ void magma_zcgmerge_spmvsellpt_kernel_16( int num_rows, int blocksize, int T, magmaDoubleComplex * dval, magma_index_t * dcolind, magma_index_t * drowptr, magmaDoubleComplex * d, magmaDoubleComplex * z) { // T threads assigned to each row int idx = threadIdx.y; // thread in row int idy = threadIdx.x; // local row int ldx = idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index extern __shared__ magmaDoubleComplex shared[]; if(row < num_rows ) { magmaDoubleComplex dot = MAGMA_Z_MAKE(0.0, 0.0); int offset = drowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (drowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles for ( int k = 0; k < max_; k++ ) { magmaDoubleComplex val = dval[ offset + ldx + block*k ]; int col = dcolind[ offset + ldx + block*k ]; dot += val * d[ col ]; } shared[ldx] = dot; __syncthreads(); if( idx < 8 ) { shared[ldx]+=shared[ldx+blocksize*8]; __syncthreads(); if( idx < 4 ) shared[ldx]+=shared[ldx+blocksize*4]; __syncthreads(); if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2]; __syncthreads(); if( idx == 0 ) { z[row] = (shared[ldx]+shared[ldx+blocksize*1]); } } } } // SELLP SpMV kernel // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel __global__ void magma_zcgmerge_spmvsellpt_kernel_32( int num_rows, int blocksize, int T, magmaDoubleComplex * dval, magma_index_t * dcolind, magma_index_t * drowptr, magmaDoubleComplex * d, magmaDoubleComplex * z) { // T threads assigned to each row int idx = threadIdx.y; // thread in row int idy = threadIdx.x; // local row int ldx = idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index extern __shared__ magmaDoubleComplex shared[]; if(row < num_rows ) { magmaDoubleComplex dot = MAGMA_Z_MAKE(0.0, 0.0); int offset = drowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (drowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles for ( int k = 0; k < max_; k++ ) { magmaDoubleComplex val = dval[ offset + ldx + block*k ]; int col = dcolind[ offset + ldx + block*k ]; dot += val * d[ col ]; } shared[ldx] = dot; __syncthreads(); if( idx < 16 ) { shared[ldx]+=shared[ldx+blocksize*16]; __syncthreads(); if( idx < 8 ) shared[ldx]+=shared[ldx+blocksize*8]; __syncthreads(); if( idx < 4 ) shared[ldx]+=shared[ldx+blocksize*4]; __syncthreads(); if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2]; __syncthreads(); if( idx == 0 ) { z[row] = (shared[ldx]+shared[ldx+blocksize*1]); } } } } // kernel to handle scalars __global__ void // rho = beta/tmp; gamma = beta; magma_zcg_rhokernel( magmaDoubleComplex * skp ) { int i = blockIdx.x * blockDim.x + threadIdx.x; if( i==0 ) { magmaDoubleComplex tmp = skp[1]; skp[3] = tmp/skp[4]; skp[2] = tmp; } } /** Purpose ------- Merges the first SpmV using different formats with the dot product and the computation of rho Arguments --------- @param[in] A magma_z_matrix input matrix @param[in] d1 magmaDoubleComplex_ptr temporary vector @param[in] d2 magmaDoubleComplex_ptr temporary vector @param[in] dd magmaDoubleComplex_ptr input vector d @param[out] dz magmaDoubleComplex_ptr input vector z @param[out] skp magmaDoubleComplex_ptr array for parameters ( skp[3]=rho ) @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_zgegpuk ********************************************************************/ extern "C" magma_int_t magma_zcgmerge_spmv1( magma_z_matrix A, magmaDoubleComplex_ptr d1, magmaDoubleComplex_ptr d2, magmaDoubleComplex_ptr dd, magmaDoubleComplex_ptr dz, magmaDoubleComplex_ptr skp, magma_queue_t queue ) { // set queue for old dense routines magma_queue_t orig_queue; magmablasGetKernelStream( &orig_queue ); int local_block_size=256; dim3 Bs( local_block_size ); dim3 Gs( magma_ceildiv( A.num_rows, local_block_size ) ); dim3 Gs_next; int Ms = local_block_size * sizeof( magmaDoubleComplex ); magmaDoubleComplex_ptr aux1 = d1, aux2 = d2; int b = 1; if ( A.storage_type == Magma_CSR ) magma_zcgmerge_spmvcsr_kernel<<<Gs, Bs, Ms, queue >>> ( A.num_rows, A.dval, A.drow, A.dcol, dd, dz, d1 ); else if ( A.storage_type == Magma_ELLPACKT ) magma_zcgmerge_spmvellpack_kernel<<<Gs, Bs, Ms, queue >>> ( A.num_rows, A.max_nnz_row, A.dval, A.dcol, dd, dz, d1 ); else if ( A.storage_type == Magma_ELL ) magma_zcgmerge_spmvell_kernel<<<Gs, Bs, Ms, queue >>> ( A.num_rows, A.max_nnz_row, A.dval, A.dcol, dd, dz, d1 ); else if ( A.storage_type == Magma_SELLP ) { int num_threadssellp = A.blocksize*A.alignment; magma_int_t arch = magma_getdevice_arch(); if ( arch < 200 && num_threadssellp > 256 ) printf("error: too much shared memory requested.\n"); dim3 block( A.blocksize, A.alignment, 1); int dimgrid1 = int( sqrt( double( A.numblocks ))); int dimgrid2 = magma_ceildiv( A.numblocks, dimgrid1 ); dim3 gridsellp( dimgrid1, dimgrid2, 1); int Mssellp = num_threadssellp * sizeof( magmaDoubleComplex ); if ( A.alignment == 8) magma_zcgmerge_spmvsellpt_kernel_8 <<< gridsellp, block, Mssellp, queue >>> ( A.num_rows, A.blocksize, A.alignment, A.dval, A.dcol, A.drow, dd, dz); else if ( A.alignment == 16) magma_zcgmerge_spmvsellpt_kernel_16 <<< gridsellp, block, Mssellp, queue >>> ( A.num_rows, A.blocksize, A.alignment, A.dval, A.dcol, A.drow, dd, dz); else if ( A.alignment == 32) magma_zcgmerge_spmvsellpt_kernel_32 <<< gridsellp, block, Mssellp, queue >>> ( A.num_rows, A.blocksize, A.alignment, A.dval, A.dcol, A.drow, dd, dz); else printf("error: alignment not supported.\n"); // in case of using SELLP, we can't efficiently merge the // dot product and the first reduction loop into the SpMV kernel // as the SpMV grid would result in low occupancy. magma_zcgmerge_spmvellpackrt_kernel2<<<Gs, Bs, Ms, queue >>> ( A.num_rows, dz, dd, d1 ); } else if ( A.storage_type == Magma_ELLRT ) { // in case of using ELLRT, we need a different grid, assigning // threads_per_row processors to each row // the block size is num_threads // fixed values int num_blocks = ( (A.num_rows+A.blocksize-1)/A.blocksize); int num_threads = A.alignment*A.blocksize; int real_row_length = ((int)(A.max_nnz_row+A.alignment-1)/A.alignment) *A.alignment; magma_int_t arch = magma_getdevice_arch(); if ( arch < 200 && num_threads > 256 ) printf("error: too much shared memory requested.\n"); int dimgrid1 = int( sqrt( double( num_blocks ))); int dimgrid2 = magma_ceildiv( num_blocks, dimgrid1 ); dim3 gridellrt( dimgrid1, dimgrid2, 1); int Mellrt = A.alignment * A.blocksize * sizeof( magmaDoubleComplex ); // printf("launch kernel: %dx%d %d %d\n", grid.x, grid.y, num_threads , Ms); if ( A.alignment == 32 ) { magma_zcgmerge_spmvellpackrt_kernel_32 <<< gridellrt, num_threads , Mellrt, queue >>> ( A.num_rows, A.dval, A.dcol, A.drow, dd, dz, d1, A.alignment, real_row_length ); } else if ( A.alignment == 16 ) { magma_zcgmerge_spmvellpackrt_kernel_16 <<< gridellrt, num_threads , Mellrt, queue >>> ( A.num_rows, A.dval, A.dcol, A.drow, dd, dz, d1, A.alignment, real_row_length ); } else if ( A.alignment == 8 ) { magma_zcgmerge_spmvellpackrt_kernel_8 <<< gridellrt, num_threads , Mellrt, queue >>> ( A.num_rows, A.dval, A.dcol, A.drow, dd, dz, d1, A.alignment, real_row_length ); } else { printf("error: alignment %d not supported.\n", int(A.alignment) ); return MAGMA_ERR_NOT_SUPPORTED; } // in case of using ELLRT, we can't efficiently merge the // dot product and the first reduction loop into the SpMV kernel // as the SpMV grid would result in low occupancy. magma_zcgmerge_spmvellpackrt_kernel2<<<Gs, Bs, Ms, queue >>> ( A.num_rows, dz, dd, d1 ); } while( Gs.x > 1 ) { Gs_next.x = ( Gs.x+Bs.x-1 )/ Bs.x; if ( Gs_next.x == 1 ) Gs_next.x = 2; magma_zcgreduce_kernel_spmv1<<< Gs_next.x/2, Bs.x/2, Ms/2 >>> ( Gs.x, A.num_rows, aux1, aux2 ); Gs_next.x = Gs_next.x /2; Gs.x = Gs_next.x; b = 1 - b; if ( b ) { aux1 = d1; aux2 = d2; } else { aux2 = d1; aux1 = d2; } } magma_zcopyvector( 1, aux1, 1, skp+4, 1 ); dim3 Bs2( 2 ); dim3 Gs2( 1 ); magma_zcg_rhokernel<<<Gs2, Bs2, 0>>>( skp ); magmablasSetKernelStream( orig_queue ); return MAGMA_SUCCESS; } /* -------------------------------------------------------------------------- */ // updates x and r and computes the first part of the dot product r*r __global__ void magma_zcgmerge_xrbeta_kernel( int n, magmaDoubleComplex * x, magmaDoubleComplex * r, magmaDoubleComplex * d, magmaDoubleComplex * z, magmaDoubleComplex * skp, magmaDoubleComplex * vtmp ) { extern __shared__ magmaDoubleComplex temp[]; int Idx = threadIdx.x; int i = blockIdx.x * blockDim.x + Idx; magmaDoubleComplex rho = skp[3]; magmaDoubleComplex mrho = MAGMA_Z_MAKE( -1.0, 0.0)*rho; temp[ Idx ] = MAGMA_Z_MAKE( 0.0, 0.0); if( i<n ) { x[i] += rho * d[i]; r[i] += mrho * z[i]; temp[ Idx ] = r[i] * r[i]; } __syncthreads(); if ( Idx < 128 ) { temp[ Idx ] += temp[ Idx + 128 ]; } __syncthreads(); if ( Idx < 64 ) { temp[ Idx ] += temp[ Idx + 64 ]; } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ) { temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ) { volatile double *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif #if defined(PRECISION_s) if( Idx < 32 ) { volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif if ( Idx == 0 ) { vtmp[ blockIdx.x ] = temp[ 0 ]; } } // kernel to handle scalars __global__ void //alpha = beta / gamma magma_zcg_alphabetakernel( magmaDoubleComplex * skp ) { int i = blockIdx.x * blockDim.x + threadIdx.x; if( i==0 ) { magmaDoubleComplex tmp1 = skp[1]; skp[0] = tmp1/skp[2]; //printf("beta=%e\n", MAGMA_Z_REAL(tmp1)); } } // update search Krylov vector d __global__ void magma_zcg_d_kernel( int n, magmaDoubleComplex * skp, magmaDoubleComplex * r, magmaDoubleComplex * d ) { int i = blockIdx.x * blockDim.x + threadIdx.x; magmaDoubleComplex alpha = skp[0]; if( i<n ) { d[i] = r[i] + alpha * d[i]; } } /** Purpose ------- Merges the update of r and x with the dot product and performs then the update for the Krylov vector d Arguments --------- @param[in] n int dimension n @param[in] d1 magmaDoubleComplex_ptr temporary vector @param[in] d2 magmaDoubleComplex_ptr temporary vector @param[in,out] dx magmaDoubleComplex_ptr input vector x @param[in,out] dr magmaDoubleComplex_ptr input/output vector r @param[in] dd magmaDoubleComplex_ptr input vector d @param[in] dz magmaDoubleComplex_ptr input vector z @param[in] skp magmaDoubleComplex_ptr array for parameters @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_zsygpuk ********************************************************************/ extern "C" magma_int_t magma_zcgmerge_xrbeta( int n, magmaDoubleComplex_ptr d1, magmaDoubleComplex_ptr d2, magmaDoubleComplex_ptr dx, magmaDoubleComplex_ptr dr, magmaDoubleComplex_ptr dd, magmaDoubleComplex_ptr dz, magmaDoubleComplex_ptr skp, magma_queue_t queue ) { // set queue for old dense routines magma_queue_t orig_queue; magmablasGetKernelStream( &orig_queue ); int local_block_size=256; dim3 Bs( local_block_size ); dim3 Gs( magma_ceildiv( n, local_block_size ) ); dim3 Gs_next; int Ms = 2*local_block_size * sizeof( magmaDoubleComplex ); magmaDoubleComplex_ptr aux1 = d1, aux2 = d2; int b = 1; magma_zcgmerge_xrbeta_kernel<<<Gs, Bs, Ms>>> ( n, dx, dr, dd, dz, skp, d1); while( Gs.x > 1 ) { Gs_next.x = ( Gs.x+Bs.x-1 )/ Bs.x; if ( Gs_next.x == 1 ) Gs_next.x = 2; magma_zcgreduce_kernel_spmv1<<< Gs_next.x/2, Bs.x/2, Ms/2 >>> ( Gs.x, n, aux1, aux2 ); Gs_next.x = Gs_next.x /2; Gs.x = Gs_next.x; b = 1 - b; if ( b ) { aux1 = d1; aux2 = d2; } else { aux2 = d1; aux1 = d2; } } magma_zcopyvector( 1, aux1, 1, skp+1, 1 ); dim3 Bs2( 2 ); dim3 Gs2( 1 ); magma_zcg_alphabetakernel<<<Gs2, Bs2, 0>>>( skp ); dim3 Bs3( local_block_size ); dim3 Gs3( magma_ceildiv( n, local_block_size ) ); magma_zcg_d_kernel<<<Gs3, Bs3, 0>>>( n, skp, dr, dd ); magmablasSetKernelStream( orig_queue ); return MAGMA_SUCCESS; } /* -------------------------------------------------------------------------- */
6121cc9240303155aacb8e4908d972b76ce3216e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <math.h> #include <omp.h> #include <stdio.h> #include <stdlib.h> #include <time.h> const int INF = (1 << 30) - 1; int vertex_num, edge_num, matrix_size; int *dist; double cal_time(struct timespec start, struct timespec end) { struct timespec temp; if ((end.tv_nsec - start.tv_nsec) < 0) { temp.tv_sec = end.tv_sec - start.tv_sec - 1; temp.tv_nsec = 1000000000 + end.tv_nsec - start.tv_nsec; } else { temp.tv_sec = end.tv_sec - start.tv_sec; temp.tv_nsec = end.tv_nsec - start.tv_nsec; } return temp.tv_sec + (double)temp.tv_nsec / 1000000000.0; } __device__ __host__ size_t index_convert(int i, int j, int row_size) { return i * row_size + j; } void input(char *input_file_path, int block_factor) { FILE *input_file = fopen(input_file_path, "rb"); fread(&vertex_num, sizeof(int), 1, input_file); fread(&edge_num, sizeof(int), 1, input_file); matrix_size = ceil((double)vertex_num / (double)block_factor) * block_factor; hipHostMalloc((void **)&dist, matrix_size * matrix_size * sizeof(int)); for (int i = 0; i < matrix_size; ++i) { for (int j = 0; j < matrix_size; ++j) { if (i != j) dist[index_convert(i, j, matrix_size)] = INF; else if (i < vertex_num) dist[index_convert(i, j, matrix_size)] = 0; else dist[index_convert(i, j, matrix_size)] = INF; } } int data[3]; for (int i = 0; i < edge_num; ++i) { fread(data, sizeof(int), 3, input_file); dist[index_convert(data[0], data[1], matrix_size)] = data[2]; } fclose(input_file); } void output(char *output_file_path) { FILE *output_file = fopen(output_file_path, "w"); for (int i = 0; i < vertex_num; ++i) { fwrite(&dist[index_convert(i, 0, matrix_size)], sizeof(int), vertex_num, output_file); } fclose(output_file); } __constant__ int size[3]; //matrix size, block_factor, grid_size __global__ void phase1(int *d_dist, int round) { __shared__ int pivot[1024]; int i = threadIdx.y; int j = threadIdx.x; int i_offset = 32 * round; int j_offset = 32 * round; pivot[index_convert(i, j, 32)] = d_dist[index_convert(i_offset + i, j_offset + j, size[0])]; #pragma unroll 32 for (int k = 0; k < 32; ++k) { __syncthreads(); if (pivot[index_convert(i, j, 32)] > pivot[index_convert(i, k, 32)] + pivot[index_convert(k, j, 32)]) pivot[index_convert(i, j, 32)] = pivot[index_convert(i, k, 32)] + pivot[index_convert(k, j, 32)]; } d_dist[index_convert(i_offset + i, j_offset + j, size[0])] = pivot[index_convert(i, j, 32)]; } __global__ void phase2(int *d_dist, int round) { __shared__ int self[1024], pivot[1024]; int i = threadIdx.y; int j = threadIdx.x; int i_offset, j_offset; if (blockIdx.x == 0 && blockIdx.y != round) { i_offset = 32 * blockIdx.y; j_offset = 32 * round; self[index_convert(i, j, 32)] = d_dist[index_convert(i_offset + i, j_offset + j, size[0])]; pivot[index_convert(i, j, 32)] = d_dist[index_convert(j_offset + i, j_offset + j, size[0])]; #pragma unroll 32 for (int k = 0; k < 32; ++k) { __syncthreads(); if (self[index_convert(i, j, 32)] > self[index_convert(i, k, 32)] + pivot[index_convert(k, j, 32)]) self[index_convert(i, j, 32)] = self[index_convert(i, k, 32)] + pivot[index_convert(k, j, 32)]; } d_dist[index_convert(i_offset + i, j_offset + j, size[0])] = self[index_convert(i, j, 32)]; } else if (blockIdx.y != round) { i_offset = 32 * round; j_offset = 32 * blockIdx.y; self[index_convert(i, j, 32)] = d_dist[index_convert(i_offset + i, j_offset + j, size[0])]; pivot[index_convert(i, j, 32)] = d_dist[index_convert(i_offset + i, i_offset + j, size[0])]; #pragma unroll 32 for (int k = 0; k < 32; ++k) { __syncthreads(); if (self[index_convert(i, j, 32)] > pivot[index_convert(i, k, 32)] + self[index_convert(k, j, 32)]) self[index_convert(i, j, 32)] = pivot[index_convert(i, k, 32)] + self[index_convert(k, j, 32)]; } d_dist[index_convert(i_offset + i, j_offset + j, size[0])] = self[index_convert(i, j, 32)]; } } __global__ void phase3(int *d_dist, int round, int grid_offset) { __shared__ int col[1024], row[1024]; int self; int block_i = grid_offset + blockIdx.y; int block_j = blockIdx.x; if (block_i == round || block_j == round) return; int i = threadIdx.y; int j = threadIdx.x; int i_offset = 32 * block_i; int j_offset = 32 * block_j; int r_offset = 32 * round; self = d_dist[index_convert(i_offset + i, j_offset + j, size[0])]; col[index_convert(i, j, 32)] = d_dist[index_convert(i_offset + i, r_offset + j, size[0])]; row[index_convert(i, j, 32)] = d_dist[index_convert(r_offset + i, j_offset + j, size[0])]; #pragma unroll 32 for (int k = 0; k < 32; ++k) { __syncthreads(); if (self > col[index_convert(i, k, 32)] + row[index_convert(k, j, 32)]) self = col[index_convert(i, k, 32)] + row[index_convert(k, j, 32)]; } d_dist[index_convert(i_offset + i, j_offset + j, size[0])] = self; } int main(int argc, char **argv) { const int block_factor = 32, device_num = 2; input(argv[1], block_factor); int grid_size = matrix_size / block_factor; int *d_dist[2]; #pragma omp parallel num_threads(device_num) { int device_id = omp_get_thread_num(); hipSetDevice(device_id); int size_info[3] = {matrix_size, block_factor, grid_size}; hipMemcpyToSymbol(size, size_info, 3 * sizeof(int)); int grid_partition = grid_size / device_num; int grid_offset = device_id * grid_partition; int grid_count = grid_partition; if (device_id == device_num - 1) grid_count += grid_size % device_num; size_t grid_start = grid_offset * block_factor * matrix_size; hipMalloc(&(d_dist[device_id]), (size_t)sizeof(int) * matrix_size * matrix_size); #pragma omp barrier hipMemcpy(&(d_dist[device_id][grid_start]), &(dist[grid_start]), (size_t)sizeof(int) * block_factor * grid_count * matrix_size, hipMemcpyHostToDevice); dim3 block(block_factor, block_factor); dim3 grid2(2, grid_size); dim3 grid3(grid_size, grid_count); for (int r = 0; r < grid_size; ++r) { if (grid_offset <= r && r < grid_offset + grid_count) { size_t copy_start = r * block_factor * matrix_size; if (device_id == 0) hipMemcpy(&(d_dist[1][copy_start]), &(d_dist[0][copy_start]), (size_t)sizeof(int) * block_factor * matrix_size, hipMemcpyDeviceToDevice); else hipMemcpy(&(d_dist[0][copy_start]), &(d_dist[1][copy_start]), (size_t)sizeof(int) * block_factor * matrix_size, hipMemcpyDeviceToDevice); } #pragma omp barrier hipLaunchKernelGGL(( phase1), dim3(1), dim3(block), 0, 0, d_dist[device_id], r); hipLaunchKernelGGL(( phase2), dim3(grid2), dim3(block), 0, 0, d_dist[device_id], r); hipLaunchKernelGGL(( phase3), dim3(grid3), dim3(block), 0, 0, d_dist[device_id], r, grid_offset); } hipMemcpy(&(dist[grid_start]), &(d_dist[device_id][grid_start]), (size_t)sizeof(int) * block_factor * grid_count * matrix_size, hipMemcpyDeviceToHost); hipFree(d_dist[omp_get_thread_num()]); #pragma omp barrier } output(argv[2]); hipFree(dist); return 0; }
6121cc9240303155aacb8e4908d972b76ce3216e.cu
#include <math.h> #include <omp.h> #include <stdio.h> #include <stdlib.h> #include <time.h> const int INF = (1 << 30) - 1; int vertex_num, edge_num, matrix_size; int *dist; double cal_time(struct timespec start, struct timespec end) { struct timespec temp; if ((end.tv_nsec - start.tv_nsec) < 0) { temp.tv_sec = end.tv_sec - start.tv_sec - 1; temp.tv_nsec = 1000000000 + end.tv_nsec - start.tv_nsec; } else { temp.tv_sec = end.tv_sec - start.tv_sec; temp.tv_nsec = end.tv_nsec - start.tv_nsec; } return temp.tv_sec + (double)temp.tv_nsec / 1000000000.0; } __device__ __host__ size_t index_convert(int i, int j, int row_size) { return i * row_size + j; } void input(char *input_file_path, int block_factor) { FILE *input_file = fopen(input_file_path, "rb"); fread(&vertex_num, sizeof(int), 1, input_file); fread(&edge_num, sizeof(int), 1, input_file); matrix_size = ceil((double)vertex_num / (double)block_factor) * block_factor; cudaMallocHost((void **)&dist, matrix_size * matrix_size * sizeof(int)); for (int i = 0; i < matrix_size; ++i) { for (int j = 0; j < matrix_size; ++j) { if (i != j) dist[index_convert(i, j, matrix_size)] = INF; else if (i < vertex_num) dist[index_convert(i, j, matrix_size)] = 0; else dist[index_convert(i, j, matrix_size)] = INF; } } int data[3]; for (int i = 0; i < edge_num; ++i) { fread(data, sizeof(int), 3, input_file); dist[index_convert(data[0], data[1], matrix_size)] = data[2]; } fclose(input_file); } void output(char *output_file_path) { FILE *output_file = fopen(output_file_path, "w"); for (int i = 0; i < vertex_num; ++i) { fwrite(&dist[index_convert(i, 0, matrix_size)], sizeof(int), vertex_num, output_file); } fclose(output_file); } __constant__ int size[3]; //matrix size, block_factor, grid_size __global__ void phase1(int *d_dist, int round) { __shared__ int pivot[1024]; int i = threadIdx.y; int j = threadIdx.x; int i_offset = 32 * round; int j_offset = 32 * round; pivot[index_convert(i, j, 32)] = d_dist[index_convert(i_offset + i, j_offset + j, size[0])]; #pragma unroll 32 for (int k = 0; k < 32; ++k) { __syncthreads(); if (pivot[index_convert(i, j, 32)] > pivot[index_convert(i, k, 32)] + pivot[index_convert(k, j, 32)]) pivot[index_convert(i, j, 32)] = pivot[index_convert(i, k, 32)] + pivot[index_convert(k, j, 32)]; } d_dist[index_convert(i_offset + i, j_offset + j, size[0])] = pivot[index_convert(i, j, 32)]; } __global__ void phase2(int *d_dist, int round) { __shared__ int self[1024], pivot[1024]; int i = threadIdx.y; int j = threadIdx.x; int i_offset, j_offset; if (blockIdx.x == 0 && blockIdx.y != round) { i_offset = 32 * blockIdx.y; j_offset = 32 * round; self[index_convert(i, j, 32)] = d_dist[index_convert(i_offset + i, j_offset + j, size[0])]; pivot[index_convert(i, j, 32)] = d_dist[index_convert(j_offset + i, j_offset + j, size[0])]; #pragma unroll 32 for (int k = 0; k < 32; ++k) { __syncthreads(); if (self[index_convert(i, j, 32)] > self[index_convert(i, k, 32)] + pivot[index_convert(k, j, 32)]) self[index_convert(i, j, 32)] = self[index_convert(i, k, 32)] + pivot[index_convert(k, j, 32)]; } d_dist[index_convert(i_offset + i, j_offset + j, size[0])] = self[index_convert(i, j, 32)]; } else if (blockIdx.y != round) { i_offset = 32 * round; j_offset = 32 * blockIdx.y; self[index_convert(i, j, 32)] = d_dist[index_convert(i_offset + i, j_offset + j, size[0])]; pivot[index_convert(i, j, 32)] = d_dist[index_convert(i_offset + i, i_offset + j, size[0])]; #pragma unroll 32 for (int k = 0; k < 32; ++k) { __syncthreads(); if (self[index_convert(i, j, 32)] > pivot[index_convert(i, k, 32)] + self[index_convert(k, j, 32)]) self[index_convert(i, j, 32)] = pivot[index_convert(i, k, 32)] + self[index_convert(k, j, 32)]; } d_dist[index_convert(i_offset + i, j_offset + j, size[0])] = self[index_convert(i, j, 32)]; } } __global__ void phase3(int *d_dist, int round, int grid_offset) { __shared__ int col[1024], row[1024]; int self; int block_i = grid_offset + blockIdx.y; int block_j = blockIdx.x; if (block_i == round || block_j == round) return; int i = threadIdx.y; int j = threadIdx.x; int i_offset = 32 * block_i; int j_offset = 32 * block_j; int r_offset = 32 * round; self = d_dist[index_convert(i_offset + i, j_offset + j, size[0])]; col[index_convert(i, j, 32)] = d_dist[index_convert(i_offset + i, r_offset + j, size[0])]; row[index_convert(i, j, 32)] = d_dist[index_convert(r_offset + i, j_offset + j, size[0])]; #pragma unroll 32 for (int k = 0; k < 32; ++k) { __syncthreads(); if (self > col[index_convert(i, k, 32)] + row[index_convert(k, j, 32)]) self = col[index_convert(i, k, 32)] + row[index_convert(k, j, 32)]; } d_dist[index_convert(i_offset + i, j_offset + j, size[0])] = self; } int main(int argc, char **argv) { const int block_factor = 32, device_num = 2; input(argv[1], block_factor); int grid_size = matrix_size / block_factor; int *d_dist[2]; #pragma omp parallel num_threads(device_num) { int device_id = omp_get_thread_num(); cudaSetDevice(device_id); int size_info[3] = {matrix_size, block_factor, grid_size}; cudaMemcpyToSymbol(size, size_info, 3 * sizeof(int)); int grid_partition = grid_size / device_num; int grid_offset = device_id * grid_partition; int grid_count = grid_partition; if (device_id == device_num - 1) grid_count += grid_size % device_num; size_t grid_start = grid_offset * block_factor * matrix_size; cudaMalloc(&(d_dist[device_id]), (size_t)sizeof(int) * matrix_size * matrix_size); #pragma omp barrier cudaMemcpy(&(d_dist[device_id][grid_start]), &(dist[grid_start]), (size_t)sizeof(int) * block_factor * grid_count * matrix_size, cudaMemcpyHostToDevice); dim3 block(block_factor, block_factor); dim3 grid2(2, grid_size); dim3 grid3(grid_size, grid_count); for (int r = 0; r < grid_size; ++r) { if (grid_offset <= r && r < grid_offset + grid_count) { size_t copy_start = r * block_factor * matrix_size; if (device_id == 0) cudaMemcpy(&(d_dist[1][copy_start]), &(d_dist[0][copy_start]), (size_t)sizeof(int) * block_factor * matrix_size, cudaMemcpyDeviceToDevice); else cudaMemcpy(&(d_dist[0][copy_start]), &(d_dist[1][copy_start]), (size_t)sizeof(int) * block_factor * matrix_size, cudaMemcpyDeviceToDevice); } #pragma omp barrier phase1<<<1, block>>>(d_dist[device_id], r); phase2<<<grid2, block>>>(d_dist[device_id], r); phase3<<<grid3, block>>>(d_dist[device_id], r, grid_offset); } cudaMemcpy(&(dist[grid_start]), &(d_dist[device_id][grid_start]), (size_t)sizeof(int) * block_factor * grid_count * matrix_size, cudaMemcpyDeviceToHost); cudaFree(d_dist[omp_get_thread_num()]); #pragma omp barrier } output(argv[2]); cudaFree(dist); return 0; }
5e49f6f6f067f0ef11d2dbb41b2b46eb214dae3e.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "Div0.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int *matrix = NULL; hipMalloc(&matrix, XSIZE*YSIZE); int *newMatrix = NULL; hipMalloc(&newMatrix, XSIZE*YSIZE); int nx = 1; int ny = 1; int Max = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( Div0), dim3(gridBlock),dim3(threadBlock), 0, 0, matrix,newMatrix,nx,ny,Max); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( Div0), dim3(gridBlock),dim3(threadBlock), 0, 0, matrix,newMatrix,nx,ny,Max); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( Div0), dim3(gridBlock),dim3(threadBlock), 0, 0, matrix,newMatrix,nx,ny,Max); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
5e49f6f6f067f0ef11d2dbb41b2b46eb214dae3e.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "Div0.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int *matrix = NULL; cudaMalloc(&matrix, XSIZE*YSIZE); int *newMatrix = NULL; cudaMalloc(&newMatrix, XSIZE*YSIZE); int nx = 1; int ny = 1; int Max = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); Div0<<<gridBlock,threadBlock>>>(matrix,newMatrix,nx,ny,Max); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { Div0<<<gridBlock,threadBlock>>>(matrix,newMatrix,nx,ny,Max); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { Div0<<<gridBlock,threadBlock>>>(matrix,newMatrix,nx,ny,Max); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
b609be96e38dd8759b66afb874a550f591d61359.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<stdio.h> #include "aeslib.h" #define CHECK(call) \ { \ const hipError_t error = call; \ if (error != hipSuccess) \ { \ printf("Error: %s:%d, ", __FILE__, __LINE__); \ printf("code: %d, reason: %s\n", error, \ hipGetErrorString(error)); \ exit(1); \ } \ } __device__ aes256_context ctx; __device__ unsigned char key[32]; __device__ void initialize(){ for( unsigned char i = 0; i < 32; i++ ){ key[i] = i ; } } __device__ void encrypt( unsigned char* ptr ){ printf( "Before encry. %s\n", ptr ); aes256_init(&ctx, key); aes256_encrypt_ecb(&ctx, ptr); printf( "Encryped on gpu %s\n", ptr ); aes256_init(&ctx, key); aes256_decrypt_ecb(&ctx, ptr); printf( "dEncryped on gpu %s\n", ptr ); aes256_done(&ctx); } __global__ void copy( unsigned char* ptr ){ initialize(); encrypt( ptr ); } int main(){ unsigned char* array = (unsigned char* ) malloc( 7 ); array[0] = 'u'; array[1] = 'm'; array[2] = 'i'; array[3] = 't'; array[4] = 'a'; array[5] = 'y'; array[6] = '\0'; unsigned char* cc = ( unsigned char* )malloc( sizeof(array) ); unsigned char* dev ; CHECK( hipMalloc( (void**)&dev, 7 ) ); CHECK(hipMemcpy( dev, array, 7, hipMemcpyHostToDevice )); hipLaunchKernelGGL(( copy), dim3(1),dim3(1), 0, 0, dev); CHECK(hipMemcpy( cc ,dev, 7, hipMemcpyDeviceToHost )); printf("Cuda status: %s\n", hipGetErrorString( hipGetLastError() ) ); hipDeviceReset(); printf( "\nEncryped on gpu %s\n", cc ); return 0; }
b609be96e38dd8759b66afb874a550f591d61359.cu
#include<stdio.h> #include "aeslib.h" #define CHECK(call) \ { \ const cudaError_t error = call; \ if (error != cudaSuccess) \ { \ printf("Error: %s:%d, ", __FILE__, __LINE__); \ printf("code: %d, reason: %s\n", error, \ cudaGetErrorString(error)); \ exit(1); \ } \ } __device__ aes256_context ctx; __device__ unsigned char key[32]; __device__ void initialize(){ for( unsigned char i = 0; i < 32; i++ ){ key[i] = i ; } } __device__ void encrypt( unsigned char* ptr ){ printf( "Before encry. %s\n", ptr ); aes256_init(&ctx, key); aes256_encrypt_ecb(&ctx, ptr); printf( "Encryped on gpu %s\n", ptr ); aes256_init(&ctx, key); aes256_decrypt_ecb(&ctx, ptr); printf( "dEncryped on gpu %s\n", ptr ); aes256_done(&ctx); } __global__ void copy( unsigned char* ptr ){ initialize(); encrypt( ptr ); } int main(){ unsigned char* array = (unsigned char* ) malloc( 7 ); array[0] = 'u'; array[1] = 'm'; array[2] = 'i'; array[3] = 't'; array[4] = 'a'; array[5] = 'y'; array[6] = '\0'; unsigned char* cc = ( unsigned char* )malloc( sizeof(array) ); unsigned char* dev ; CHECK( cudaMalloc( (void**)&dev, 7 ) ); CHECK(cudaMemcpy( dev, array, 7, cudaMemcpyHostToDevice )); copy<<<1,1>>>(dev); CHECK(cudaMemcpy( cc ,dev, 7, cudaMemcpyDeviceToHost )); printf("Cuda status: %s\n", cudaGetErrorString( cudaGetLastError() ) ); cudaDeviceReset(); printf( "\nEncryped on gpu %s\n", cc ); return 0; }
383af99af25922895175161799a1f50f728a9162.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<opencv2/core/core.hpp> #include<opencv2/highgui/highgui.hpp> #include<iostream> #include<helper_cuda.h> #define DIM 1024 #define PI 3.1415926f #define MAX_TEMP 1.0f #define MIN_TEMP 0.0001f #define SPEED 0.25f using namespace cv; using namespace std; struct DataBlock{ unsigned char *output_bitmap; float *dev_inSrc; float *dev_outSrc; float *dev_constSrc; Mat *bitmap; hipEvent_t st, ed; float totalTime; float frames; }; __device__ unsigned char value(float n1, float n2, int hue){ if(hue>360) hue -= 360; else if(hue<0) hue += 360; if(hue<60) return (unsigned char)(255*(n1+(n2-n1)*hue/60)); if(hue<180) return (unsigned char)(255*n2); if(hue<240) return (unsigned char)(255*(n1+(n2-n1)*(240-hue)/60)); return (unsigned char)(255*n1); } __global__ void float_to_color(unsigned char *optr, float const *outSrc){ int x = threadIdx.x+blockIdx.x*blockDim.x; int y = threadIdx.y+blockIdx.y*blockDim.y; int offset = x+y*blockDim.x*gridDim.x; float l = outSrc[offset]; float s = 1; int h = (180+(int)(360.f*outSrc[offset]))%360; float m1, m2; if(l<=0.5f) m2 = l*(1+s); else m2 = l+s-1*s; m1 = 2*l-m2; optr[offset*4+0] = value(m1,m2,h+120); optr[offset*4+1] = value(m1,m2,h); optr[offset*4+2] = value(m1,m2,h-120); optr[offset*4+3] = 255; } //-------------------------------- __global__ void copy_const_kernel(float *iptr, float const *cptr){ int x = threadIdx.x+blockIdx.x*blockDim.x; int y = threadIdx.y+blockIdx.y*blockDim.y; int offset = x+y*blockDim.x*gridDim.x; // cptr0 if(cptr[offset] != 0) iptr[offset] = cptr[offset]; } __global__ void blend_kernel(float *outSrc, float const *inSrc){ int x = threadIdx.x+blockIdx.x*blockDim.x; int y = threadIdx.y+blockIdx.y*blockDim.y; int offset = x+y*blockDim.x*gridDim.x; int left = offset - 1; int right = offset + 1; if(x==0) left++; if(x == DIM-1) right--; int top = offset - DIM; int bottom = offset + DIM; if(y == 0) top += DIM; if(y == DIM-1) bottom -= DIM; outSrc[offset] = inSrc[offset] + \ SPEED *(inSrc[top] + inSrc[bottom] +\ inSrc[left] + inSrc[right] -\ inSrc[offset]*4); } //------------------------------------ void anim_gpu(DataBlock *d, int ticks){ checkCudaErrors(hipEventRecord(d->st,0)); dim3 grid(DIM/16, DIM/16); dim3 threads(16,16); Mat *bitmap = d->bitmap; for(int i=0; i<90; i++){ hipLaunchKernelGGL(( copy_const_kernel), dim3(grid), dim3(threads), 0, 0, d->dev_inSrc, d->dev_constSrc); hipLaunchKernelGGL(( blend_kernel), dim3(grid),dim3(threads), 0, 0, d->dev_outSrc, d->dev_inSrc); // auto tmp = d->dev_outSrc; d->dev_outSrc = d->dev_inSrc; d->dev_inSrc = tmp; } // , // () hipLaunchKernelGGL(( float_to_color), dim3(grid),dim3(threads), 0, 0, d->output_bitmap, d->dev_inSrc); checkCudaErrors(hipMemcpy(bitmap->data, d->output_bitmap, bitmap->elemSize()*bitmap->total(), hipMemcpyDeviceToHost)); checkCudaErrors(hipEventRecord(d->ed,0)); checkCudaErrors(hipEventSynchronize(d->ed)); float elapsedTime; checkCudaErrors(hipEventElapsedTime(&elapsedTime, d->st, d->ed)); d->totalTime += elapsedTime; ++d->frames; cout<<"["<<d->frames<<"] frames total take times:"<<d->totalTime<<" ms;" <<"cur take time:"<<elapsedTime<<" ms"<<endl; } //=================================== int main(int argc, char *argv[]){ DataBlock data; Mat bitmap(DIM,DIM,CV_8UC4, Scalar(0,0,0,0)); data.bitmap = &bitmap; data.totalTime = 0; data.frames = 0; checkCudaErrors(hipEventCreate(&data.st)); checkCudaErrors(hipEventCreate(&data.ed)); checkCudaErrors(hipEventRecord(data.st,0)); checkCudaErrors(hipMalloc((void**)&data.output_bitmap, bitmap.elemSize()*bitmap.total())); checkCudaErrors(hipMalloc((void**)&data.dev_inSrc, bitmap.elemSize()*bitmap.total())); checkCudaErrors(hipMalloc((void**)&data.dev_outSrc, bitmap.elemSize()*bitmap.total())); checkCudaErrors(hipMalloc((void**)&data.dev_constSrc, bitmap.elemSize()*bitmap.total())); // ,floatrgba float *temp = (float*)malloc(bitmap.elemSize()*bitmap.total()); for(int i=0; i<DIM*DIM; i++){ temp[i] = 0.f; int x = i%DIM; int y = i/DIM; // [xmin,ymin,xmax,ymax]=[300,310,600,601] if((x>300) && (x<600) && (y>310) && (y<601)) temp[i] = MAX_TEMP; } // temp[DIM*100 + 100] = (MAX_TEMP+MIN_TEMP)/2; // 0 temp[DIM*700 + 100] = MIN_TEMP; temp[DIM*300+300] = MIN_TEMP; temp[DIM*200+700] = MIN_TEMP; for(int y=800; y<900; y++){ for(int x=400; x<500; x++){ temp[x+y*DIM] = MIN_TEMP; } } // const checkCudaErrors(hipMemcpy(data.dev_constSrc, temp, bitmap.elemSize()*bitmap.total(), hipMemcpyHostToDevice)); //------------------------ //, for(int y=800; y<DIM; y++){ for(int x=0; x<200; x++){ temp[x+y*DIM] = MAX_TEMP; } } checkCudaErrors(hipMemcpy(data.dev_inSrc, temp, bitmap.elemSize()*bitmap.total(), hipMemcpyHostToDevice)); free(temp); for(int i=0; i<190; i++){ anim_gpu(&data,1); // checkCudaErrors(hipMemcpy(bitmap.data,data.output_bitmap,bitmap.elemSize()*bitmap.total(), // hipMemcpyDeviceToHost)); imshow("display",*data.bitmap); waitKey(1); } checkCudaErrors(hipEventDestroy(data.st)); checkCudaErrors(hipEventDestroy(data.ed)); checkCudaErrors(hipFree(data.output_bitmap)); checkCudaErrors(hipFree(data.dev_inSrc)); checkCudaErrors(hipFree(data.dev_outSrc)); checkCudaErrors(hipFree(data.dev_constSrc)); }
383af99af25922895175161799a1f50f728a9162.cu
#include<opencv2/core/core.hpp> #include<opencv2/highgui/highgui.hpp> #include<iostream> #include<helper_cuda.h> #define DIM 1024 #define PI 3.1415926f #define MAX_TEMP 1.0f #define MIN_TEMP 0.0001f #define SPEED 0.25f using namespace cv; using namespace std; struct DataBlock{ unsigned char *output_bitmap; float *dev_inSrc; float *dev_outSrc; float *dev_constSrc; Mat *bitmap; cudaEvent_t st, ed; float totalTime; float frames; }; __device__ unsigned char value(float n1, float n2, int hue){ if(hue>360) hue -= 360; else if(hue<0) hue += 360; if(hue<60) return (unsigned char)(255*(n1+(n2-n1)*hue/60)); if(hue<180) return (unsigned char)(255*n2); if(hue<240) return (unsigned char)(255*(n1+(n2-n1)*(240-hue)/60)); return (unsigned char)(255*n1); } __global__ void float_to_color(unsigned char *optr, float const *outSrc){ int x = threadIdx.x+blockIdx.x*blockDim.x; int y = threadIdx.y+blockIdx.y*blockDim.y; int offset = x+y*blockDim.x*gridDim.x; float l = outSrc[offset]; float s = 1; int h = (180+(int)(360.f*outSrc[offset]))%360; float m1, m2; if(l<=0.5f) m2 = l*(1+s); else m2 = l+s-1*s; m1 = 2*l-m2; optr[offset*4+0] = value(m1,m2,h+120); optr[offset*4+1] = value(m1,m2,h); optr[offset*4+2] = value(m1,m2,h-120); optr[offset*4+3] = 255; } //-------------------------------- __global__ void copy_const_kernel(float *iptr, float const *cptr){ int x = threadIdx.x+blockIdx.x*blockDim.x; int y = threadIdx.y+blockIdx.y*blockDim.y; int offset = x+y*blockDim.x*gridDim.x; // cptr表示热源图,将其中非0的,也就是热源和热黑洞,覆盖到输出图上 if(cptr[offset] != 0) iptr[offset] = cptr[offset]; } __global__ void blend_kernel(float *outSrc, float const *inSrc){ int x = threadIdx.x+blockIdx.x*blockDim.x; int y = threadIdx.y+blockIdx.y*blockDim.y; int offset = x+y*blockDim.x*gridDim.x; int left = offset - 1; int right = offset + 1; if(x==0) left++; if(x == DIM-1) right--; int top = offset - DIM; int bottom = offset + DIM; if(y == 0) top += DIM; if(y == DIM-1) bottom -= DIM; outSrc[offset] = inSrc[offset] + \ SPEED *(inSrc[top] + inSrc[bottom] +\ inSrc[left] + inSrc[right] -\ inSrc[offset]*4); } //------------------------------------ void anim_gpu(DataBlock *d, int ticks){ checkCudaErrors(cudaEventRecord(d->st,0)); dim3 grid(DIM/16, DIM/16); dim3 threads(16,16); Mat *bitmap = d->bitmap; for(int i=0; i<90; i++){ copy_const_kernel<<<grid, threads>>>(d->dev_inSrc, d->dev_constSrc); blend_kernel<<<grid,threads>>>(d->dev_outSrc, d->dev_inSrc); // 指针交换 auto tmp = d->dev_outSrc; d->dev_outSrc = d->dev_inSrc; d->dev_inSrc = tmp; } // 将每个浮点数转换成颜色值, 浮点数即像素点,表示当前值大小, // 然后需要将该像素值的大小(假设就是灰度值),转换成颜色值 float_to_color<<<grid,threads>>>(d->output_bitmap, d->dev_inSrc); checkCudaErrors(cudaMemcpy(bitmap->data, d->output_bitmap, bitmap->elemSize()*bitmap->total(), cudaMemcpyDeviceToHost)); checkCudaErrors(cudaEventRecord(d->ed,0)); checkCudaErrors(cudaEventSynchronize(d->ed)); float elapsedTime; checkCudaErrors(cudaEventElapsedTime(&elapsedTime, d->st, d->ed)); d->totalTime += elapsedTime; ++d->frames; cout<<"["<<d->frames<<"] frames total take times:"<<d->totalTime<<" ms;" <<"cur take time:"<<elapsedTime<<" ms"<<endl; } //=================================== int main(int argc, char *argv[]){ DataBlock data; Mat bitmap(DIM,DIM,CV_8UC4, Scalar(0,0,0,0)); data.bitmap = &bitmap; data.totalTime = 0; data.frames = 0; checkCudaErrors(cudaEventCreate(&data.st)); checkCudaErrors(cudaEventCreate(&data.ed)); checkCudaErrors(cudaEventRecord(data.st,0)); checkCudaErrors(cudaMalloc((void**)&data.output_bitmap, bitmap.elemSize()*bitmap.total())); checkCudaErrors(cudaMalloc((void**)&data.dev_inSrc, bitmap.elemSize()*bitmap.total())); checkCudaErrors(cudaMalloc((void**)&data.dev_outSrc, bitmap.elemSize()*bitmap.total())); checkCudaErrors(cudaMalloc((void**)&data.dev_constSrc, bitmap.elemSize()*bitmap.total())); // 随机生成热源点,这里用float代表一个像素点的rgba四个值 float *temp = (float*)malloc(bitmap.elemSize()*bitmap.total()); for(int i=0; i<DIM*DIM; i++){ temp[i] = 0.f; int x = i%DIM; int y = i/DIM; // 在[xmin,ymin,xmax,ymax]=[300,310,600,601]区域是个白热区 if((x>300) && (x<600) && (y>310) && (y<601)) temp[i] = MAX_TEMP; } // 图中间加个半热源 temp[DIM*100 + 100] = (MAX_TEMP+MIN_TEMP)/2; // 几个地方为热度黑洞,即这个区域热度永远为0 temp[DIM*700 + 100] = MIN_TEMP; temp[DIM*300+300] = MIN_TEMP; temp[DIM*200+700] = MIN_TEMP; for(int y=800; y<900; y++){ for(int x=400; x<500; x++){ temp[x+y*DIM] = MIN_TEMP; } } // 将初始化好的作为const图, checkCudaErrors(cudaMemcpy(data.dev_constSrc, temp, bitmap.elemSize()*bitmap.total(), cudaMemcpyHostToDevice)); //------------------------ //新增高温区域,该区域会越来越小,证明热量在散失 for(int y=800; y<DIM; y++){ for(int x=0; x<200; x++){ temp[x+y*DIM] = MAX_TEMP; } } checkCudaErrors(cudaMemcpy(data.dev_inSrc, temp, bitmap.elemSize()*bitmap.total(), cudaMemcpyHostToDevice)); free(temp); for(int i=0; i<190; i++){ anim_gpu(&data,1); // checkCudaErrors(cudaMemcpy(bitmap.data,data.output_bitmap,bitmap.elemSize()*bitmap.total(), // cudaMemcpyDeviceToHost)); imshow("display",*data.bitmap); waitKey(1); } checkCudaErrors(cudaEventDestroy(data.st)); checkCudaErrors(cudaEventDestroy(data.ed)); checkCudaErrors(cudaFree(data.output_bitmap)); checkCudaErrors(cudaFree(data.dev_inSrc)); checkCudaErrors(cudaFree(data.dev_outSrc)); checkCudaErrors(cudaFree(data.dev_constSrc)); }
afee2a0ff1488baa7c441b7ef062ac8b2e2dc39f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <kernel.h> #include <stdint.h> __global__ void add_uint8(uint32_t *A, uint32_t *B, uint32_t *C, int N){ int id = blockIdx.x*blockDim.x+threadIdx.x; if (id<N){ C[id] = A[id] + B[id]; } } void cuda_add(uint32_t *A, uint32_t *B, uint32_t *C, int N){ size_t size = N*sizeof(uint32_t); uint32_t *d_A, *d_B, *d_C; hipMalloc(&d_A, size); hipMalloc(&d_B, size); hipMalloc(&d_C, size); hipMemcpy(d_A, A, size, hipMemcpyHostToDevice); hipMemcpy(d_B, B, size, hipMemcpyHostToDevice); int blockSize = 64; int numBlocks = N/64 + 1; hipLaunchKernelGGL(( add_uint8), dim3(numBlocks),dim3(blockSize), 0, 0, d_A,d_B,d_C,N); hipMemcpy(C,d_C, size, hipMemcpyDeviceToHost); hipFree(d_A); hipFree(d_B); hipFree(d_C); }
afee2a0ff1488baa7c441b7ef062ac8b2e2dc39f.cu
#include <kernel.h> #include <stdint.h> __global__ void add_uint8(uint32_t *A, uint32_t *B, uint32_t *C, int N){ int id = blockIdx.x*blockDim.x+threadIdx.x; if (id<N){ C[id] = A[id] + B[id]; } } void cuda_add(uint32_t *A, uint32_t *B, uint32_t *C, int N){ size_t size = N*sizeof(uint32_t); uint32_t *d_A, *d_B, *d_C; cudaMalloc(&d_A, size); cudaMalloc(&d_B, size); cudaMalloc(&d_C, size); cudaMemcpy(d_A, A, size, cudaMemcpyHostToDevice); cudaMemcpy(d_B, B, size, cudaMemcpyHostToDevice); int blockSize = 64; int numBlocks = N/64 + 1; add_uint8<<<numBlocks,blockSize>>>(d_A,d_B,d_C,N); cudaMemcpy(C,d_C, size, cudaMemcpyDeviceToHost); cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); }
3ef6ccbb22419d836372b5a853c62cb7b304ff40.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2016, Julian Straub <[email protected]> Licensed * under the MIT license. See the license file LICENSE. */ #include <assert.h> #include <tdp/eigen/dense.h> #include <tdp/cuda/cuda.h> #include <tdp/nvidia/helper_cuda.h> #include <tdp/data/image.h> #include <tdp/data/managed_image.h> #include <tdp/camera/camera.h> #include <tdp/camera/camera_poly.h> #include <tdp/reductions/reductions.cuh> #include <tdp/manifold/SO3.h> #include <tdp/cuda/cuda.cuh> //#include <tdp/icp/icp.cuh> //#include <tdp/icp/photoSO3.h> namespace tdp { // T_mc: R_model_observation template<int BLK_SIZE, int D, typename Derived> __global__ void KernelSO3TextureStep( Image<float> grey_p, Image<float> grey_c, Image<Vector2fda> gradGrey_c, Image<Vector3fda> rays, SO3f R_cp, CameraBase<float,D,Derived> cam, int N_PER_T, Image<float> out ) { assert(BLK_SIZE >=11); const int tid = threadIdx.x; const int idx = threadIdx.x + blockDim.x * blockIdx.x; const int idS = idx*N_PER_T; const int N = grey_p.w_*grey_p.h_; const int idE = min(N,(idx+1)*N_PER_T); SharedMemory<Vector11fda> smem; Vector11fda* sum = smem.getPointer(); sum[tid] = Vector11fda::Zero(); for (int id=idS; id<idE; ++id) { const int u = id%grey_p.w_; const int v = id/grey_p.w_; tdp::Vector3fda ray_c = R_cp*rays(u,v); tdp::Vector2fda x = cam.Project(ray_c); if (grey_p.Inside(x)) { float ab[4]; Eigen::Map<Vector3fda> Ai(&(ab[0])); Ai = -(R_cp.matrix()*SO3mat<float>::invVee(rays(u,v))).transpose()* cam.Jproject(ray_c).transpose() * gradGrey_c.GetBilinear(x); ab[3] = -grey_c.GetBilinear(x) + grey_p(u,v); Eigen::Matrix<float,11,1,Eigen::DontAlign> upperTriangle; int k=0; #pragma unroll for (int i=0; i<4; ++i) { for (int j=i; j<4; ++j) { upperTriangle(k++) = ab[i]*ab[j]; } } upperTriangle(10) = 1.; // to get number of data points sum[tid] += upperTriangle; } } __syncthreads(); //sync the threads #pragma unroll for(int s=(BLK_SIZE)/2; s>1; s>>=1) { if(tid < s) { sum[tid] += sum[tid+s]; } __syncthreads(); } if(tid < 11) { // sum the last two remaining matrixes directly into global memory atomicAdd(&out[tid], sum[0](tid)+sum[1](tid)); } } template<int D, typename Derived> void SO3TextureStep ( Image<float> grey_p, Image<float> grey_c, Image<Vector2fda> gradGrey_c, Image<Vector3fda> rays, SO3f R_cp, const CameraBase<float,D,Derived>& cam, Eigen::Matrix<float,3,3,Eigen::DontAlign>& ATA, Eigen::Matrix<float,3,1,Eigen::DontAlign>& ATb, float& error, float& count ) { const size_t BLK_SIZE = 32; size_t N = grey_p.w_*grey_p.h_; dim3 threads, blocks; ComputeKernelParamsForArray(blocks,threads,N/10,BLK_SIZE); ManagedDeviceImage<float> out(11,1); hipMemset(out.ptr_, 0, 11*sizeof(float)); hipLaunchKernelGGL(( KernelSO3TextureStep<BLK_SIZE,D,Derived>), dim3(blocks),dim3(threads), BLK_SIZE*sizeof(Vector11fda), 0, grey_p, grey_c, gradGrey_c, rays, R_cp, cam, 10, out); checkCudaErrors(hipDeviceSynchronize()); ManagedHostImage<float> sumAb(11,1); hipMemcpy(sumAb.ptr_,out.ptr_,11*sizeof(float), hipMemcpyDeviceToHost); //for (int i=0; i<29; ++i) std::cout << sumAb[i] << "\t"; //std::cout << std::endl; ATA.fill(0.); ATb.fill(0.); int k = 0; for (int i=0; i<3; ++i) { for (int j=i; j<4; ++j) { float val = sumAb[k++]; if (j==3) { ATb(i) = val; } else { ATA(i,j) = val; ATA(j,i) = val; } } } count = sumAb[10]; error = sumAb[9]/count; // std::cout << ATA << std::endl << ATb.transpose() << std::endl; // std::cout << "\terror&count " << error << " " << count << std::endl; } template void SO3TextureStep ( Image<float> grey_p, Image<float> grey_c, Image<Vector2fda> gradGrey_c, Image<Vector3fda> rays, SO3f R_cp, const CameraBase<float,Camera<float>::NumParams,Camera<float>>& cam, Eigen::Matrix<float,3,3,Eigen::DontAlign>& ATA, Eigen::Matrix<float,3,1,Eigen::DontAlign>& ATb, float& error, float& count ); template void SO3TextureStep ( Image<float> grey_p, Image<float> grey_c, Image<Vector2fda> gradGrey_c, Image<Vector3fda> rays, SO3f R_cp, const CameraBase<float,CameraPoly3<float>::NumParams,CameraPoly3<float>>& cam, Eigen::Matrix<float,3,3,Eigen::DontAlign>& ATA, Eigen::Matrix<float,3,1,Eigen::DontAlign>& ATb, float& error, float& count ); // explicit instantiation //template void SO3TextureStep ( // Image<float> grey_p, // Image<float> grey_c, // Image<Vector2fda> gradGrey_c, // Image<Vector3fda> rays, // SO3f R_cp, // const CameraBase<float,Camera<float>::NumParams,Camera<float>>& cam, // Eigen::Matrix<float,3,3,Eigen::DontAlign>& ATA, // Eigen::Matrix<float,3,1,Eigen::DontAlign>& ATb, // float& error, // float& count // ); //template void SO3TextureStep ( // Image<float> grey_p, // Image<float> grey_c, // Image<Vector2fda> gradGrey_c, // Image<Vector3fda> rays, // SO3f R_cp, // const CameraBase<float,CameraPoly3<float>::NumParams,CameraPoly3<float>>& cam, // Eigen::Matrix<float,3,3,Eigen::DontAlign>& ATA, // Eigen::Matrix<float,3,1,Eigen::DontAlign>& ATb, // float& error, // float& count // ); }
3ef6ccbb22419d836372b5a853c62cb7b304ff40.cu
/* Copyright (c) 2016, Julian Straub <[email protected]> Licensed * under the MIT license. See the license file LICENSE. */ #include <assert.h> #include <tdp/eigen/dense.h> #include <tdp/cuda/cuda.h> #include <tdp/nvidia/helper_cuda.h> #include <tdp/data/image.h> #include <tdp/data/managed_image.h> #include <tdp/camera/camera.h> #include <tdp/camera/camera_poly.h> #include <tdp/reductions/reductions.cuh> #include <tdp/manifold/SO3.h> #include <tdp/cuda/cuda.cuh> //#include <tdp/icp/icp.cuh> //#include <tdp/icp/photoSO3.h> namespace tdp { // T_mc: R_model_observation template<int BLK_SIZE, int D, typename Derived> __global__ void KernelSO3TextureStep( Image<float> grey_p, Image<float> grey_c, Image<Vector2fda> gradGrey_c, Image<Vector3fda> rays, SO3f R_cp, CameraBase<float,D,Derived> cam, int N_PER_T, Image<float> out ) { assert(BLK_SIZE >=11); const int tid = threadIdx.x; const int idx = threadIdx.x + blockDim.x * blockIdx.x; const int idS = idx*N_PER_T; const int N = grey_p.w_*grey_p.h_; const int idE = min(N,(idx+1)*N_PER_T); SharedMemory<Vector11fda> smem; Vector11fda* sum = smem.getPointer(); sum[tid] = Vector11fda::Zero(); for (int id=idS; id<idE; ++id) { const int u = id%grey_p.w_; const int v = id/grey_p.w_; tdp::Vector3fda ray_c = R_cp*rays(u,v); tdp::Vector2fda x = cam.Project(ray_c); if (grey_p.Inside(x)) { float ab[4]; Eigen::Map<Vector3fda> Ai(&(ab[0])); Ai = -(R_cp.matrix()*SO3mat<float>::invVee(rays(u,v))).transpose()* cam.Jproject(ray_c).transpose() * gradGrey_c.GetBilinear(x); ab[3] = -grey_c.GetBilinear(x) + grey_p(u,v); Eigen::Matrix<float,11,1,Eigen::DontAlign> upperTriangle; int k=0; #pragma unroll for (int i=0; i<4; ++i) { for (int j=i; j<4; ++j) { upperTriangle(k++) = ab[i]*ab[j]; } } upperTriangle(10) = 1.; // to get number of data points sum[tid] += upperTriangle; } } __syncthreads(); //sync the threads #pragma unroll for(int s=(BLK_SIZE)/2; s>1; s>>=1) { if(tid < s) { sum[tid] += sum[tid+s]; } __syncthreads(); } if(tid < 11) { // sum the last two remaining matrixes directly into global memory atomicAdd(&out[tid], sum[0](tid)+sum[1](tid)); } } template<int D, typename Derived> void SO3TextureStep ( Image<float> grey_p, Image<float> grey_c, Image<Vector2fda> gradGrey_c, Image<Vector3fda> rays, SO3f R_cp, const CameraBase<float,D,Derived>& cam, Eigen::Matrix<float,3,3,Eigen::DontAlign>& ATA, Eigen::Matrix<float,3,1,Eigen::DontAlign>& ATb, float& error, float& count ) { const size_t BLK_SIZE = 32; size_t N = grey_p.w_*grey_p.h_; dim3 threads, blocks; ComputeKernelParamsForArray(blocks,threads,N/10,BLK_SIZE); ManagedDeviceImage<float> out(11,1); cudaMemset(out.ptr_, 0, 11*sizeof(float)); KernelSO3TextureStep<BLK_SIZE,D,Derived><<<blocks,threads, BLK_SIZE*sizeof(Vector11fda)>>>( grey_p, grey_c, gradGrey_c, rays, R_cp, cam, 10, out); checkCudaErrors(cudaDeviceSynchronize()); ManagedHostImage<float> sumAb(11,1); cudaMemcpy(sumAb.ptr_,out.ptr_,11*sizeof(float), cudaMemcpyDeviceToHost); //for (int i=0; i<29; ++i) std::cout << sumAb[i] << "\t"; //std::cout << std::endl; ATA.fill(0.); ATb.fill(0.); int k = 0; for (int i=0; i<3; ++i) { for (int j=i; j<4; ++j) { float val = sumAb[k++]; if (j==3) { ATb(i) = val; } else { ATA(i,j) = val; ATA(j,i) = val; } } } count = sumAb[10]; error = sumAb[9]/count; // std::cout << ATA << std::endl << ATb.transpose() << std::endl; // std::cout << "\terror&count " << error << " " << count << std::endl; } template void SO3TextureStep ( Image<float> grey_p, Image<float> grey_c, Image<Vector2fda> gradGrey_c, Image<Vector3fda> rays, SO3f R_cp, const CameraBase<float,Camera<float>::NumParams,Camera<float>>& cam, Eigen::Matrix<float,3,3,Eigen::DontAlign>& ATA, Eigen::Matrix<float,3,1,Eigen::DontAlign>& ATb, float& error, float& count ); template void SO3TextureStep ( Image<float> grey_p, Image<float> grey_c, Image<Vector2fda> gradGrey_c, Image<Vector3fda> rays, SO3f R_cp, const CameraBase<float,CameraPoly3<float>::NumParams,CameraPoly3<float>>& cam, Eigen::Matrix<float,3,3,Eigen::DontAlign>& ATA, Eigen::Matrix<float,3,1,Eigen::DontAlign>& ATb, float& error, float& count ); // explicit instantiation //template void SO3TextureStep ( // Image<float> grey_p, // Image<float> grey_c, // Image<Vector2fda> gradGrey_c, // Image<Vector3fda> rays, // SO3f R_cp, // const CameraBase<float,Camera<float>::NumParams,Camera<float>>& cam, // Eigen::Matrix<float,3,3,Eigen::DontAlign>& ATA, // Eigen::Matrix<float,3,1,Eigen::DontAlign>& ATb, // float& error, // float& count // ); //template void SO3TextureStep ( // Image<float> grey_p, // Image<float> grey_c, // Image<Vector2fda> gradGrey_c, // Image<Vector3fda> rays, // SO3f R_cp, // const CameraBase<float,CameraPoly3<float>::NumParams,CameraPoly3<float>>& cam, // Eigen::Matrix<float,3,3,Eigen::DontAlign>& ATA, // Eigen::Matrix<float,3,1,Eigen::DontAlign>& ATb, // float& error, // float& count // ); }
8f006e8d305376563c6348e42a248f2ae5ea665d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void VecAdd() { }
8f006e8d305376563c6348e42a248f2ae5ea665d.cu
#include "includes.h" __global__ void VecAdd() { }
273c9be2350c5d356610d056422f092cbe69f5c8.hip
// !!! This is a file automatically generated by hipify!!! // test convolution using factorized formula // compile with // nvcc -I.. -D__TYPE__=float -Wno-deprecated-gpu-targets -DCUDA_BLOCK_SIZE=192 -DMAXTHREADSPERBLOCK0=1024 -DSHAREDMEMPERBLOCK0=49152 -std=c++11 -O2 -o build/test_factorized test_factorized.cu // we define an arbitrary function F, // then use a factorized version FF of the same function and test #include <stdio.h> #include <assert.h> #include <hip/hip_runtime.h> #include <vector> #include <ctime> #include <algorithm> #include "core/formulas/newsyntax.h" #include "core/GpuConv1D.cu" #include "core/GpuConv2D.cu" #include "core/CpuConv.cpp" #include "core/reductions/sum.h" using namespace keops; __TYPE__ floatrand() { return ((__TYPE__) std::rand())/RAND_MAX-.5; // random value between -.5 and .5 } template < class V > void fillrandom(V& v) { generate(v.begin(), v.end(), floatrand); // fills vector with random values } int main() { // In this part we define the symbolic variables of the function using X = Var<1,3,0>; // X is the second variable and represents a 3D vector using Y = Var<2,3,1>; // Y is the third variable and represents a 3D vector using U = Var<3,4,0>; // U is the fourth variable and represents a 4D vector using V = Var<4,4,1>; // V is the fifth variable and represents a 4D vector using Beta = Var<5,3,1>; // Beta is the sixth variable and represents a 3D vector using C = Param<0,1>; // C is the first variable and is a scalar parameter // symbolic expression of the function ------------------------------------------------------ // here we define F to be F0+F0+F0+F0+F0+F0+F0+F0 where F0 = <U,V>^2 * exp(-C*|X-Y|^2) * Beta in usual notations // with the standard implementation it means we will compute 8 times F0 to evaluate F using F0 = decltype(InvKeopsNS( Inv( IntCst(1) + KeopsNS<C>()* SqNorm2(KeopsNS<X>()-KeopsNS<Y>()))) );//Scal<Exp<Scal<C,Minus<SqNorm2<Subtract<X,Y>>>>>,Beta>; using F1 = Add<F0,F0>; using F = Grad<F0, X, Var<6,3,0>>; std::cout << std::endl << "Function F : " << std::endl; std::cout << PrintFormula<F>(); std::cout << std::endl << std::endl; // now we factorize F0 from F : new formula FF computes the same as F but will evaluate first F0 once and then just does three vector additions using FF = AutoFactorize < F >; std::cout << "Function FF = factorized version of F :" << std::endl; std::cout << "Factor = " << std::endl; std::cout << PrintFormula<FF::Factor>(); std::cout << std::endl << "Factorized Formula = " << std::endl; //using INDS = pack<0,1,2,3,4,5>; // just to print the formula we define a dummy INDS... using INDS = pack<0,1,2,3,4,5,6>; // just to print the formula we define a dummy INDS... std::cout << PrintFormula<FF::FactorizedFormula<INDS>>(); std::cout << std::endl << std::endl; using FUNCONVF = Sum_Reduction<F>; // now we test ------------------------------------------------------------------------------ std::cout << std::endl << "Testing F" << std::endl; int Nx=1511, Ny=1001; __TYPE__ s; std::vector<__TYPE__> vf(Nx*FUNCONVF::DIM); fillrandom(vf); __TYPE__ *f = vf.data(); std::vector<__TYPE__> vx(Nx*X::DIM); fillrandom(vx); __TYPE__ *x = vx.data(); std::vector<__TYPE__> vy(Ny*Y::DIM); fillrandom(vy); __TYPE__ *y = vy.data(); std::vector<__TYPE__> vu(Nx*U::DIM); fillrandom(vu); __TYPE__ *u = vu.data(); std::vector<__TYPE__> vv(Ny*V::DIM); fillrandom(vv); __TYPE__ *v = vv.data(); std::vector<__TYPE__> vb(Ny*Beta::DIM); fillrandom(vb); __TYPE__ *b = vb.data(); std::vector<__TYPE__> resgpu1(Nx*FUNCONVF::DIM), resgpu2(Nx*FUNCONVF::DIM), rescpu(Nx*FUNCONVF::DIM); __TYPE__ params[1]; __TYPE__ Sigma = 1; params[0] = 1.0/(Sigma*Sigma); clock_t begin, end; begin = clock(); int deviceID = 0; hipSetDevice(deviceID); end = clock(); std::cout << "time for GPU initialization : " << double(end - begin) / CLOCKS_PER_SEC << std::endl; begin = clock(); Eval<FUNCONVF,GpuConv1D_FromHost>::Run(Nx, Ny, f, params, x, y, u, v, b, x); end = clock(); std::cout << "time for GPU computation (first run) : " << double(end - begin) / CLOCKS_PER_SEC << std::endl; resgpu1 = vf; fillrandom(vf); begin = clock(); Eval<FUNCONVF,GpuConv2D_FromHost>::Run(Nx, Ny, f, params, x, y, u, v, b, x); end = clock(); std::cout << "time for GPU computation (second run) : " << double(end - begin) / CLOCKS_PER_SEC << std::endl; resgpu2 = vf; fillrandom(vf); if(Nx*Ny<1e8) { begin = clock(); Eval<FUNCONVF,CpuConv>::Run(Nx, Ny, f, params, x, y, u, v, b, x); end = clock(); std::cout << "time for CPU computation : " << double(end - begin) / CLOCKS_PER_SEC << std::endl; rescpu = vf; fillrandom(vf); // display values std::cout << std::endl << "resgpu1 = "; for(int i=0; i<5; i++) std::cout << resgpu1[i] << " "; std::cout << std::endl << "resgpu2 = "; for(int i=0; i<5; i++) std::cout << resgpu2[i] << " "; std::cout << std::endl << "rescpu = "; for(int i=0; i<5; i++) std::cout << rescpu[i] << " "; // display mean of errors s = 0; for(int i=0; i<Nx*FUNCONVF::DIM; i++) s += std::abs(resgpu1[i]-rescpu[i]); std::cout << std::endl << "mean abs error (cpu vs gpu1) =" << s/Nx << std::endl; s = 0; for(int i=0; i<Nx*FUNCONVF::DIM; i++) s += std::abs(resgpu2[i]-rescpu[i]); std::cout << "mean abs error (cpu vs gpu2) =" << s/Nx << std::endl; s = 0; } for(int i=0; i<Nx*FUNCONVF::DIM; i++) s += std::abs(resgpu1[i]-resgpu2[i]); std::cout << "mean abs error (gpu1 vs gpu2) =" << s/Nx << std::endl; /// testing FF std::cout << std::endl << std::endl << "Testing FF" << std::endl; using FUNCONVFF = Sum_Reduction<FF>; begin = clock(); Eval<FUNCONVFF,GpuConv1D_FromHost>::Run(Nx, Ny, f, params, x, y, u, v, b, x); end = clock(); std::cout << "time for GPU computation (first run) : " << double(end - begin) / CLOCKS_PER_SEC << std::endl; resgpu1 = vf; fillrandom(vf); begin = clock(); Eval<FUNCONVFF,GpuConv2D_FromHost>::Run(Nx, Ny, f, params, x, y, u, v, b, x); end = clock(); std::cout << "time for GPU computation (second run) : " << double(end - begin) / CLOCKS_PER_SEC << std::endl; resgpu2 = vf; fillrandom(vf); if(Nx*Ny<1e8) { begin = clock(); Eval<FUNCONVFF,CpuConv>::Run(Nx, Ny, f, params, x, y, u, v, b, x); end = clock(); std::cout << "time for CPU computation : " << double(end - begin) / CLOCKS_PER_SEC << std::endl; rescpu = vf; fillrandom(vf); // display values std::cout << std::endl << "resgpu1 = "; for(int i=0; i<5; i++) std::cout << resgpu1[i] << " "; std::cout << std::endl << "resgpu2 = "; for(int i=0; i<5; i++) std::cout << resgpu2[i] << " "; std::cout << std::endl << "rescpu = "; for(int i=0; i<5; i++) std::cout << rescpu[i] << " "; // display mean of errors s = 0; for(int i=0; i<Nx*FUNCONVF::DIM; i++) s += std::abs(resgpu1[i]-rescpu[i]); std::cout << std::endl << "mean abs error (cpu vs gpu1) =" << s/Nx << std::endl; s = 0; for(int i=0; i<Nx*FUNCONVF::DIM; i++) s += std::abs(resgpu2[i]-rescpu[i]); std::cout << "mean abs error (cpu vs gpu2) =" << s/Nx << std::endl; } s = 0; for(int i=0; i<Nx*FUNCONVF::DIM; i++) s += std::abs(resgpu1[i]-resgpu2[i]); std::cout << "mean abs error (gpu1 vs gpu2) =" << s/Nx << std::endl; return 0 ; }
273c9be2350c5d356610d056422f092cbe69f5c8.cu
// test convolution using factorized formula // compile with // nvcc -I.. -D__TYPE__=float -Wno-deprecated-gpu-targets -DCUDA_BLOCK_SIZE=192 -DMAXTHREADSPERBLOCK0=1024 -DSHAREDMEMPERBLOCK0=49152 -std=c++11 -O2 -o build/test_factorized test_factorized.cu // we define an arbitrary function F, // then use a factorized version FF of the same function and test #include <stdio.h> #include <assert.h> #include <cuda.h> #include <vector> #include <ctime> #include <algorithm> #include "core/formulas/newsyntax.h" #include "core/GpuConv1D.cu" #include "core/GpuConv2D.cu" #include "core/CpuConv.cpp" #include "core/reductions/sum.h" using namespace keops; __TYPE__ floatrand() { return ((__TYPE__) std::rand())/RAND_MAX-.5; // random value between -.5 and .5 } template < class V > void fillrandom(V& v) { generate(v.begin(), v.end(), floatrand); // fills vector with random values } int main() { // In this part we define the symbolic variables of the function using X = Var<1,3,0>; // X is the second variable and represents a 3D vector using Y = Var<2,3,1>; // Y is the third variable and represents a 3D vector using U = Var<3,4,0>; // U is the fourth variable and represents a 4D vector using V = Var<4,4,1>; // V is the fifth variable and represents a 4D vector using Beta = Var<5,3,1>; // Beta is the sixth variable and represents a 3D vector using C = Param<0,1>; // C is the first variable and is a scalar parameter // symbolic expression of the function ------------------------------------------------------ // here we define F to be F0+F0+F0+F0+F0+F0+F0+F0 where F0 = <U,V>^2 * exp(-C*|X-Y|^2) * Beta in usual notations // with the standard implementation it means we will compute 8 times F0 to evaluate F using F0 = decltype(InvKeopsNS( Inv( IntCst(1) + KeopsNS<C>()* SqNorm2(KeopsNS<X>()-KeopsNS<Y>()))) );//Scal<Exp<Scal<C,Minus<SqNorm2<Subtract<X,Y>>>>>,Beta>; using F1 = Add<F0,F0>; using F = Grad<F0, X, Var<6,3,0>>; std::cout << std::endl << "Function F : " << std::endl; std::cout << PrintFormula<F>(); std::cout << std::endl << std::endl; // now we factorize F0 from F : new formula FF computes the same as F but will evaluate first F0 once and then just does three vector additions using FF = AutoFactorize < F >; std::cout << "Function FF = factorized version of F :" << std::endl; std::cout << "Factor = " << std::endl; std::cout << PrintFormula<FF::Factor>(); std::cout << std::endl << "Factorized Formula = " << std::endl; //using INDS = pack<0,1,2,3,4,5>; // just to print the formula we define a dummy INDS... using INDS = pack<0,1,2,3,4,5,6>; // just to print the formula we define a dummy INDS... std::cout << PrintFormula<FF::FactorizedFormula<INDS>>(); std::cout << std::endl << std::endl; using FUNCONVF = Sum_Reduction<F>; // now we test ------------------------------------------------------------------------------ std::cout << std::endl << "Testing F" << std::endl; int Nx=1511, Ny=1001; __TYPE__ s; std::vector<__TYPE__> vf(Nx*FUNCONVF::DIM); fillrandom(vf); __TYPE__ *f = vf.data(); std::vector<__TYPE__> vx(Nx*X::DIM); fillrandom(vx); __TYPE__ *x = vx.data(); std::vector<__TYPE__> vy(Ny*Y::DIM); fillrandom(vy); __TYPE__ *y = vy.data(); std::vector<__TYPE__> vu(Nx*U::DIM); fillrandom(vu); __TYPE__ *u = vu.data(); std::vector<__TYPE__> vv(Ny*V::DIM); fillrandom(vv); __TYPE__ *v = vv.data(); std::vector<__TYPE__> vb(Ny*Beta::DIM); fillrandom(vb); __TYPE__ *b = vb.data(); std::vector<__TYPE__> resgpu1(Nx*FUNCONVF::DIM), resgpu2(Nx*FUNCONVF::DIM), rescpu(Nx*FUNCONVF::DIM); __TYPE__ params[1]; __TYPE__ Sigma = 1; params[0] = 1.0/(Sigma*Sigma); clock_t begin, end; begin = clock(); int deviceID = 0; cudaSetDevice(deviceID); end = clock(); std::cout << "time for GPU initialization : " << double(end - begin) / CLOCKS_PER_SEC << std::endl; begin = clock(); Eval<FUNCONVF,GpuConv1D_FromHost>::Run(Nx, Ny, f, params, x, y, u, v, b, x); end = clock(); std::cout << "time for GPU computation (first run) : " << double(end - begin) / CLOCKS_PER_SEC << std::endl; resgpu1 = vf; fillrandom(vf); begin = clock(); Eval<FUNCONVF,GpuConv2D_FromHost>::Run(Nx, Ny, f, params, x, y, u, v, b, x); end = clock(); std::cout << "time for GPU computation (second run) : " << double(end - begin) / CLOCKS_PER_SEC << std::endl; resgpu2 = vf; fillrandom(vf); if(Nx*Ny<1e8) { begin = clock(); Eval<FUNCONVF,CpuConv>::Run(Nx, Ny, f, params, x, y, u, v, b, x); end = clock(); std::cout << "time for CPU computation : " << double(end - begin) / CLOCKS_PER_SEC << std::endl; rescpu = vf; fillrandom(vf); // display values std::cout << std::endl << "resgpu1 = "; for(int i=0; i<5; i++) std::cout << resgpu1[i] << " "; std::cout << std::endl << "resgpu2 = "; for(int i=0; i<5; i++) std::cout << resgpu2[i] << " "; std::cout << std::endl << "rescpu = "; for(int i=0; i<5; i++) std::cout << rescpu[i] << " "; // display mean of errors s = 0; for(int i=0; i<Nx*FUNCONVF::DIM; i++) s += std::abs(resgpu1[i]-rescpu[i]); std::cout << std::endl << "mean abs error (cpu vs gpu1) =" << s/Nx << std::endl; s = 0; for(int i=0; i<Nx*FUNCONVF::DIM; i++) s += std::abs(resgpu2[i]-rescpu[i]); std::cout << "mean abs error (cpu vs gpu2) =" << s/Nx << std::endl; s = 0; } for(int i=0; i<Nx*FUNCONVF::DIM; i++) s += std::abs(resgpu1[i]-resgpu2[i]); std::cout << "mean abs error (gpu1 vs gpu2) =" << s/Nx << std::endl; /// testing FF std::cout << std::endl << std::endl << "Testing FF" << std::endl; using FUNCONVFF = Sum_Reduction<FF>; begin = clock(); Eval<FUNCONVFF,GpuConv1D_FromHost>::Run(Nx, Ny, f, params, x, y, u, v, b, x); end = clock(); std::cout << "time for GPU computation (first run) : " << double(end - begin) / CLOCKS_PER_SEC << std::endl; resgpu1 = vf; fillrandom(vf); begin = clock(); Eval<FUNCONVFF,GpuConv2D_FromHost>::Run(Nx, Ny, f, params, x, y, u, v, b, x); end = clock(); std::cout << "time for GPU computation (second run) : " << double(end - begin) / CLOCKS_PER_SEC << std::endl; resgpu2 = vf; fillrandom(vf); if(Nx*Ny<1e8) { begin = clock(); Eval<FUNCONVFF,CpuConv>::Run(Nx, Ny, f, params, x, y, u, v, b, x); end = clock(); std::cout << "time for CPU computation : " << double(end - begin) / CLOCKS_PER_SEC << std::endl; rescpu = vf; fillrandom(vf); // display values std::cout << std::endl << "resgpu1 = "; for(int i=0; i<5; i++) std::cout << resgpu1[i] << " "; std::cout << std::endl << "resgpu2 = "; for(int i=0; i<5; i++) std::cout << resgpu2[i] << " "; std::cout << std::endl << "rescpu = "; for(int i=0; i<5; i++) std::cout << rescpu[i] << " "; // display mean of errors s = 0; for(int i=0; i<Nx*FUNCONVF::DIM; i++) s += std::abs(resgpu1[i]-rescpu[i]); std::cout << std::endl << "mean abs error (cpu vs gpu1) =" << s/Nx << std::endl; s = 0; for(int i=0; i<Nx*FUNCONVF::DIM; i++) s += std::abs(resgpu2[i]-rescpu[i]); std::cout << "mean abs error (cpu vs gpu2) =" << s/Nx << std::endl; } s = 0; for(int i=0; i<Nx*FUNCONVF::DIM; i++) s += std::abs(resgpu1[i]-resgpu2[i]); std::cout << "mean abs error (gpu1 vs gpu2) =" << s/Nx << std::endl; return 0 ; }
8d8f00186749060ed8cd79364d7e694eafc0ee5c.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <stdio.h> #include "freshman.h" void sumArrays(float * a,float * b,float * res,const int size) { for(int i=0;i<size;i+=4) { res[i]=a[i]+b[i]; res[i+1]=a[i+1]+b[i+1]; res[i+2]=a[i+2]+b[i+2]; res[i+3]=a[i+3]+b[i+3]; } } __global__ void sumArraysGPU(float*a,float*b,float*res) { //int i=threadIdx.x; int i=blockIdx.x*blockDim.x+threadIdx.x; res[i]=a[i]+b[i]; } int main(int argc,char **argv) { int dev = 0; hipSetDevice(dev); int nElem=1<<14; printf("Vector size:%d\n",nElem); int nByte=sizeof(float)*nElem; float *a_h=(float*)malloc(nByte); float *b_h=(float*)malloc(nByte); float *res_h=(float*)malloc(nByte); float *res_from_gpu_h=(float*)malloc(nByte); memset(res_h,0,nByte); memset(res_from_gpu_h,0,nByte); float *a_d,*b_d,*res_d; CHECK(hipMalloc((float**)&a_d,nByte)); CHECK(hipMalloc((float**)&b_d,nByte)); CHECK(hipMalloc((float**)&res_d,nByte)); initialData(a_h,nElem); initialData(b_h,nElem); CHECK(hipMemcpy(a_d,a_h,nByte,hipMemcpyHostToDevice)); CHECK(hipMemcpy(b_d,b_h,nByte,hipMemcpyHostToDevice)); dim3 block(1024); dim3 grid(nElem/block.x); hipLaunchKernelGGL(( sumArraysGPU), dim3(grid),dim3(block), 0, 0, a_d,b_d,res_d); printf("Execution configuration<<<%d,%d>>>\n",grid.x,block.x); CHECK(hipMemcpy(res_from_gpu_h,res_d,nByte,hipMemcpyDeviceToHost)); sumArrays(a_h,b_h,res_h,nElem); checkResult(res_h,res_from_gpu_h,nElem); hipFree(a_d); hipFree(b_d); hipFree(res_d); free(a_h); free(b_h); free(res_h); free(res_from_gpu_h); return 0; }
8d8f00186749060ed8cd79364d7e694eafc0ee5c.cu
#include <cuda_runtime.h> #include <stdio.h> #include "freshman.h" void sumArrays(float * a,float * b,float * res,const int size) { for(int i=0;i<size;i+=4) { res[i]=a[i]+b[i]; res[i+1]=a[i+1]+b[i+1]; res[i+2]=a[i+2]+b[i+2]; res[i+3]=a[i+3]+b[i+3]; } } __global__ void sumArraysGPU(float*a,float*b,float*res) { //int i=threadIdx.x; int i=blockIdx.x*blockDim.x+threadIdx.x; res[i]=a[i]+b[i]; } int main(int argc,char **argv) { int dev = 0; cudaSetDevice(dev); int nElem=1<<14; printf("Vector size:%d\n",nElem); int nByte=sizeof(float)*nElem; float *a_h=(float*)malloc(nByte); float *b_h=(float*)malloc(nByte); float *res_h=(float*)malloc(nByte); float *res_from_gpu_h=(float*)malloc(nByte); memset(res_h,0,nByte); memset(res_from_gpu_h,0,nByte); float *a_d,*b_d,*res_d; CHECK(cudaMalloc((float**)&a_d,nByte)); CHECK(cudaMalloc((float**)&b_d,nByte)); CHECK(cudaMalloc((float**)&res_d,nByte)); initialData(a_h,nElem); initialData(b_h,nElem); CHECK(cudaMemcpy(a_d,a_h,nByte,cudaMemcpyHostToDevice)); CHECK(cudaMemcpy(b_d,b_h,nByte,cudaMemcpyHostToDevice)); dim3 block(1024); dim3 grid(nElem/block.x); sumArraysGPU<<<grid,block>>>(a_d,b_d,res_d); printf("Execution configuration<<<%d,%d>>>\n",grid.x,block.x); CHECK(cudaMemcpy(res_from_gpu_h,res_d,nByte,cudaMemcpyDeviceToHost)); sumArrays(a_h,b_h,res_h,nElem); checkResult(res_h,res_from_gpu_h,nElem); cudaFree(a_d); cudaFree(b_d); cudaFree(res_d); free(a_h); free(b_h); free(res_h); free(res_from_gpu_h); return 0; }
9f5e865501cbf188d1560cc6af28773bee2926ce.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include "hip/hip_runtime.h" #include "device_launch_parameters.h" __global__ void print_details_of_warps() { int gid = blockIdx.y * gridDim.x * blockDim.x + blockIdx.x * blockDim.x + threadIdx.x; int warp_id = threadIdx.x / 32; int gbid = blockIdx.y * gridDim.x + blockIdx.x; printf("tid : %d, bid.x : %d, bid.y : %d, gid : %d, warp_id : %d, gbid : %d \n", threadIdx.x, blockIdx.x, blockIdx.y, gid, warp_id, gbid); } int main(int argc , char** argv) { dim3 block_size(42); dim3 grid_size(2,2); print_details_of_warps << <grid_size,block_size >> > (); hipDeviceSynchronize(); hipDeviceReset(); return EXIT_SUCCESS; }
9f5e865501cbf188d1560cc6af28773bee2926ce.cu
#include <stdio.h> #include <stdlib.h> #include "cuda_runtime.h" #include "device_launch_parameters.h" __global__ void print_details_of_warps() { int gid = blockIdx.y * gridDim.x * blockDim.x + blockIdx.x * blockDim.x + threadIdx.x; int warp_id = threadIdx.x / 32; int gbid = blockIdx.y * gridDim.x + blockIdx.x; printf("tid : %d, bid.x : %d, bid.y : %d, gid : %d, warp_id : %d, gbid : %d \n", threadIdx.x, blockIdx.x, blockIdx.y, gid, warp_id, gbid); } int main(int argc , char** argv) { dim3 block_size(42); dim3 grid_size(2,2); print_details_of_warps << <grid_size,block_size >> > (); cudaDeviceSynchronize(); cudaDeviceReset(); return EXIT_SUCCESS; }
746b7cef9f6f40b248d3aeeefbd2c2c310b6e709.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "cnn.h" #include "timer.h" #include <thrust/device_vector.h> #include <stdio.h> /* * TODO * Define kernel here */ __global__ void pooling( float * inputs, float * outputs, int N, int D, int NoImg) { // Store each work-items unique row and column int i = blockIdx.x * blockDim.x + threadIdx.x; // N*N int j = blockIdx.y * blockDim.y + threadIdx.y; // D int n = blockIdx.z * blockDim.z + threadIdx.z; // NoImg if (i < N*N && j < D && n < NoImg) { int x = i/N; int y = i - x*N; float max = 0; for (int k = 0; k < 2; k++) { for (int l = 0; l < 2; l++) { float pixel = inputs[(x*2 + k)*2*N + y*2+l + (j*N*N*4) + (4*N*N*D*n)]; max = (max > pixel) ? max : pixel; } } outputs[i + (j*N*N) + (N*N*D*n)] = max; } } __global__ void convolution_v1( float * inputs, float * outputs, float * filters, float * biases, int N, int D1, int D2, int NoImg) { // Store each work-items unique row and column int d = blockIdx.x * blockDim.x + threadIdx.x; // N*N int d2 = blockIdx.y * blockDim.y + threadIdx.y; // D2 int n = blockIdx.z * blockDim.z + threadIdx.z; // NoImg extern __shared__ float tmpFilters[]; if (d < N*N && d2 < D2 && n < NoImg) { for (int t = 0; t < D1; t+=1) { for (int i = 0; i < 9; i++) tmpFilters[i + (3*3* (threadIdx.y*D1 + t))] = filters[i + (3*3 * (d2*D1 + t))]; } __syncthreads(); int i = d/N; int j = d- i*N; int oIdx = i*N + j + (N*N*d2) + (N*N*D2*n); outputs[oIdx] = 0; // Unroll 1 times for (int t = 0; t < D1; t+=1) { float sum = 0; for (int k = 0; k < 3; k++) { for (int l = 0; l < 3; l++) { int x = i + k - 1; int y = j + l - 1; if (x >= 0 && x < N && y >= 0 && y < N) sum += inputs[x*N + y + N*N*t + (N*N*D1*n)] * filters[k*3 + l + (3*3 * (d2*D1 + t))]; } } outputs[oIdx] += sum; } // RELU float bias = biases[d2]; outputs[oIdx] = (outputs[oIdx] + bias > 0) ? (outputs[oIdx] + bias) : 0; } } __global__ void convolution_v2( float * inputs, float * outputs, float * filters, float * biases, int N, int D1, int D2, int NoImg) { // Store each work-items unique row and column int x1 = blockIdx.x * blockDim.x + threadIdx.x; // N*N*D2*NoImg if (x1 < N*N*D2*NoImg) { // Calculate index values int n = x1/(N*N*D2); int tmp1 = x1 - n*(N*N*D2); int d2 = tmp1/(N*N); int tmp2 = tmp1 - d2*(N*N); int i = tmp2/N; int j = tmp2 - i*N; int oIdx = x1; //i*N + j + (N*N*d2) + (N*N*D2*n); outputs[oIdx] = 0; // Unroll 1 times for (int t = 0; t < D1; t+=1) { float sum = 0; for (int k = 0; k < 3; k++) { for (int l = 0; l < 3; l++) { int x = i + k - 1; int y = j + l - 1; if (x >= 0 && x < N && y >= 0 && y < N) sum += inputs[x*N + y + N*N*t + (N*N*D1*n)] * filters[k*3 + l + (3*3 * (d2*D1 + t))]; } } outputs[oIdx] += sum; } // RELU float bias = biases[d2]; outputs[oIdx] = (outputs[oIdx] + bias > 0) ? (outputs[oIdx] + bias) : 0; } } __global__ void fc( float * input_neuron, float * output_neuron, float * weights, float * biases, const int N, const int M, const int NoImg) { int x = blockIdx.x * blockDim.x + threadIdx.x; // M int y = blockIdx.y * blockDim.y + threadIdx.y; // NoImg if (x < M && y < NoImg) { float sum = 0; for (int i = 0; i < N; i++) { sum += weights[x*N + i] * input_neuron[i + N*y]; } output_neuron[x + M*y] = sum + biases[x]; // RELU if (output_neuron[x + M*y] < 0) { output_neuron[x + M*y] = 0; } } } __global__ void softmax_kernel( float * output, int N) { int i = threadIdx.x; float sum = 0; for (i = 0; i < N; i++) { sum += exp(output[i]); } for (i = 0; i < N; i++) { output[i] = exp(output[i]) / sum; } } /************************ CUDA ************************/ #define NormalToOne(x) (((x)<=0)?(1):x) #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true) { if (code != hipSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line); if (abort) exit(code); } } // show memory usage of GPU static void show_mem_gpu(const char *info) { size_t free_byte ; size_t total_byte ; gpuErrchk(hipMemGetInfo( &free_byte, &total_byte )) ; double free_db = (double)free_byte ; double total_db = (double)total_byte ; double used_db = total_db - free_db ; //printf("%s - GPU memory usage: used = %.3f MB, free = %.3f MB, total = %.3f MB\n", // info, used_db/1024.0/1024.0, free_db/1024.0/1024.0, total_db/1024.0/1024.0); } float data_transfer_time = 0; float pooling_time = 0; float conv_time = 0; float fc_time = 0; float softmax_time = 0; /* * D = channel size * N = width and height of an output image * Thus, input is (D, N * 2, N * 2) and output is (D, N, N). */ static void pooling_layer(float *inputs, float *outputs, int D, int N, int NoImg) { hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); show_mem_gpu("Before pooling"); // Call gpu kernel dim3 threadsPerBlock(8, 8, 1); if (N < 4) threadsPerBlock.x = N*N; threadsPerBlock.z = NormalToOne(1024 / (threadsPerBlock.x*threadsPerBlock.y)); dim3 numBlocks((N*N + threadsPerBlock.x - 1)/threadsPerBlock.x, (D + threadsPerBlock.y - 1)/threadsPerBlock.y, (NoImg + threadsPerBlock.z - 1)/threadsPerBlock.z); hipEventRecord(start); hipLaunchKernelGGL(( pooling), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, inputs, outputs, N, D, NoImg); hipEventRecord(stop); hipEventSynchronize(stop); show_mem_gpu("After pooling"); float milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); pooling_time += milliseconds/1000; } static void convolution_layer_v1(float *inputs, float *outputs, float *filters, float *biases, int D2, int D1, int N, int NoImg) { hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); // Call GPU kernel dim3 threadsPerBlock(8, 8, 16); if (N < 4) threadsPerBlock.x = N*N; threadsPerBlock.z = NormalToOne(1024 / (threadsPerBlock.x*threadsPerBlock.y)); dim3 numBlocks((N*N + threadsPerBlock.x - 1)/threadsPerBlock.x, (D2 + threadsPerBlock.y - 1)/threadsPerBlock.y, (NoImg + threadsPerBlock.z - 1)/threadsPerBlock.z); hipEventRecord(start); hipLaunchKernelGGL(( convolution_v1), dim3(numBlocks), dim3(threadsPerBlock), 3*3*D1*threadsPerBlock.y*sizeof(float), 0, inputs, outputs, filters, biases, N, D1, D2, NoImg); hipEventRecord(stop); gpuErrchk(hipEventSynchronize(stop)); float milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); printf("conv time: %f ms\n", milliseconds); conv_time += milliseconds/1000; } static void convolution_layer_v2(float *inputs, float *outputs, float *filters, float *biases, int D2, int D1, int N, int NoImg) { hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); show_mem_gpu("Before conv"); // Call GPU kernel dim3 threadsPerBlock(1024, 1, 1); dim3 numBlocks((N*N*D2*NoImg + threadsPerBlock.x - 1)/threadsPerBlock.x, 1, 1); hipEventRecord(start); hipLaunchKernelGGL(( convolution_v2), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, inputs, outputs, filters, biases, N, D1, D2, NoImg); hipEventRecord(stop); gpuErrchk(hipEventSynchronize(stop)); show_mem_gpu("After conv"); float milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); printf("conv time: %f ms\n", milliseconds); conv_time += milliseconds/1000; } /* * M = output size * N = input size */ static void fc_layer(float *input_neuron, float *output_neuron, float *weights, float *biases, int M, int N, int NoImg) { hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); // Call GPU kernel dim3 blockSize(16, 1); if (M < 64) blockSize.x = M; blockSize.y = NormalToOne(1024 / blockSize.x); dim3 gridSize((M + blockSize.x - 1) / blockSize.x, (NoImg + blockSize.y - 1)/blockSize.y); hipEventRecord(start); hipLaunchKernelGGL(( fc), dim3(gridSize), dim3(blockSize), 0, 0, input_neuron, output_neuron, weights, biases, N, M, NoImg); hipEventRecord(stop); hipEventSynchronize(stop); float milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); fc_time += milliseconds/1000; } static void softmax(float *output, int N) { timer_start(1); int i; float max = output[0]; for (i = 1; i < N; i++) { max = (output[i] > max)?output[i]:max; } float sum = 0; for (i = 0; i < N; i++) { sum += exp(output[i] - max); } for (i = 0; i < N; i++) { output[i] = exp(output[i] - max) / sum; } softmax_time += timer_end(1); } static int find_max(float *fc, int N) { int i; int maxid = 0; float maxval = 0; for (i = 0; i < N; i++) { if (maxval < fc[i]) { maxval = fc[i]; maxid = i; } } return maxid; } float* alloc_layer(size_t n) { return (float*)malloc(n * sizeof(float)); } void cnn_init() { /* * TODO * Initialize OpenCL objects as global variables. For example, * clGetPlatformIDs(1, &platform, NULL); */ } const int NETWORK_SIZES[] = { 64 * 3 * 3 * 3, 64, 64 * 64 * 3 * 3, 64, 128 * 64 * 3 * 3, 128, 128 * 128 * 3 * 3, 128, 256 * 128 * 3 * 3, 256, 256 * 256 * 3 * 3, 256, 256 * 256 * 3 * 3, 256, 512 * 256 * 3 * 3, 512, 512 * 512 * 3 * 3, 512, 512 * 512 * 3 * 3, 512, 512 * 512 * 3 * 3, 512, 512 * 512 * 3 * 3, 512, 512 * 512 * 3 * 3, 512, 512 * 512, 512, 512 * 512, 512, 10 * 512, 10 }; const int OUTPUT_SIZES[] = { 64 * 32 * 32, 64 * 32 * 32, 64 * 16 * 16, 128 * 16 * 16, 128 * 16 * 16, 128 * 8 * 8, 256 * 8 * 8, 256 * 8 * 8, 256 * 8 * 8, 256 * 4 * 4, 512 * 4 * 4, 512 * 4 * 4, 512 * 4 * 4, 512 * 2 * 2, 512 * 2 * 2, 512 * 2 * 2, 512 * 2 * 2, 512 * 1 * 1, 512, 512, 10 }; void cnn(float *images, float **network, int *labels, float *confidences, int num_images, int batch_size) { /* * TODO * Implement here. * Write classification results to labels and confidences. * See "cnn_seq.c" if you don't know what to do. */ // slice the network into weights and biases float *w1_1, *b1_1, *w1_2, *b1_2; float *w2_1, *b2_1, *w2_2, *b2_2; float *w3_1, *b3_1, *w3_2, *b3_2, *w3_3, *b3_3; float *w4_1, *b4_1, *w4_2, *b4_2, *w4_3, *b4_3; float *w5_1, *b5_1, *w5_2, *b5_2, *w5_3, *b5_3; float *w1, *b1, *w2, *b2, *w3, *b3; // Set data for weights and biases w1_1 = network[0]; b1_1 = network[1]; w1_2 = network[2]; b1_2 = network[3]; w2_1 = network[4]; b2_1 = network[5]; w2_2 = network[6]; b2_2 = network[7]; w3_1 = network[8]; b3_1 = network[9]; w3_2 = network[10]; b3_2 = network[11]; w3_3 = network[12]; b3_3 = network[13]; w4_1 = network[14]; b4_1 = network[15]; w4_2 = network[16]; b4_2 = network[17]; w4_3 = network[18]; b4_3 = network[19]; w5_1 = network[20]; b5_1 = network[21]; w5_2 = network[22]; b5_2 = network[23]; w5_3 = network[24]; b5_3 = network[25]; w1 = network[26]; b1 = network[27]; w2 = network[28]; b2 = network[29]; w3 = network[30]; b3 = network[31]; // Allocate vectors in device memory float *d_w1_1, *d_b1_1, *d_w1_2, *d_b1_2; float *d_w2_1, *d_b2_1, *d_w2_2, *d_b2_2; float *d_w3_1, *d_b3_1, *d_w3_2, *d_b3_2, *d_w3_3, *d_b3_3; float *d_w4_1, *d_b4_1, *d_w4_2, *d_b4_2, *d_w4_3, *d_b4_3; float *d_w5_1, *d_b5_1, *d_w5_2, *d_b5_2, *d_w5_3, *d_b5_3; float *d_w1, *d_b1, *d_w2, *d_b2, *d_w3, *d_b3; hipMalloc(&d_w1_1, NETWORK_SIZES[0] * sizeof(float)); hipMalloc(&d_w1_2, NETWORK_SIZES[2] * sizeof(float)); hipMalloc(&d_w2_1, NETWORK_SIZES[4] * sizeof(float)); hipMalloc(&d_w2_2, NETWORK_SIZES[6] * sizeof(float)); hipMalloc(&d_w3_1, NETWORK_SIZES[8] * sizeof(float)); hipMalloc(&d_w3_2, NETWORK_SIZES[10] * sizeof(float)); hipMalloc(&d_w3_3, NETWORK_SIZES[12] * sizeof(float)); hipMalloc(&d_w4_1, NETWORK_SIZES[14] * sizeof(float)); hipMalloc(&d_w4_2, NETWORK_SIZES[16] * sizeof(float)); hipMalloc(&d_w4_3, NETWORK_SIZES[18] * sizeof(float)); hipMalloc(&d_w5_1, NETWORK_SIZES[20] * sizeof(float)); hipMalloc(&d_w5_2, NETWORK_SIZES[22] * sizeof(float)); hipMalloc(&d_w5_3, NETWORK_SIZES[24] * sizeof(float)); hipMalloc(&d_w1, NETWORK_SIZES[26] * sizeof(float)); hipMalloc(&d_w2, NETWORK_SIZES[28] * sizeof(float)); hipMalloc(&d_w3, NETWORK_SIZES[30] * sizeof(float)); hipMalloc(&d_b1_1, NETWORK_SIZES[1] * sizeof(float)); hipMalloc(&d_b1_2, NETWORK_SIZES[3] * sizeof(float)); hipMalloc(&d_b2_1, NETWORK_SIZES[5] * sizeof(float)); hipMalloc(&d_b2_2, NETWORK_SIZES[7] * sizeof(float)); hipMalloc(&d_b3_1, NETWORK_SIZES[9] * sizeof(float)); hipMalloc(&d_b3_2, NETWORK_SIZES[11] * sizeof(float)); hipMalloc(&d_b3_3, NETWORK_SIZES[13] * sizeof(float)); hipMalloc(&d_b4_1, NETWORK_SIZES[15] * sizeof(float)); hipMalloc(&d_b4_2, NETWORK_SIZES[17] * sizeof(float)); hipMalloc(&d_b4_3, NETWORK_SIZES[19] * sizeof(float)); hipMalloc(&d_b5_1, NETWORK_SIZES[21] * sizeof(float)); hipMalloc(&d_b5_2, NETWORK_SIZES[23] * sizeof(float)); hipMalloc(&d_b5_3, NETWORK_SIZES[25] * sizeof(float)); hipMalloc(&d_b1, NETWORK_SIZES[27] * sizeof(float)); hipMalloc(&d_b2, NETWORK_SIZES[29] * sizeof(float)); hipMalloc(&d_b3, NETWORK_SIZES[31] * sizeof(float)); // Create cudaEvent to measure cuda time hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); // Copy vectors from host memory to device memory hipEventRecord(start); hipMemcpy(d_w1_1, w1_1, NETWORK_SIZES[0] * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_w1_2, w1_2, NETWORK_SIZES[2] * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_w2_1, w2_1, NETWORK_SIZES[4] * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_w2_2, w2_2, NETWORK_SIZES[6] * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_w3_1, w3_1, NETWORK_SIZES[8] * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_w3_2, w3_2, NETWORK_SIZES[10] * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_w3_3, w3_3, NETWORK_SIZES[12] * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_w4_1, w4_1, NETWORK_SIZES[14] * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_w4_2, w4_2, NETWORK_SIZES[16] * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_w4_3, w4_3, NETWORK_SIZES[18] * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_w5_1, w5_1, NETWORK_SIZES[20] * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_w5_2, w5_2, NETWORK_SIZES[22] * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_w5_3, w5_3, NETWORK_SIZES[24] * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_w1, w1, NETWORK_SIZES[26] * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_w2, w2, NETWORK_SIZES[28] * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_w3, w3, NETWORK_SIZES[30] * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_b1_1, b1_1, NETWORK_SIZES[1] * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_b1_2, b1_2, NETWORK_SIZES[3] * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_b2_1, b2_1, NETWORK_SIZES[5] * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_b2_2, b2_2, NETWORK_SIZES[7] * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_b3_1, b3_1, NETWORK_SIZES[9] * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_b3_2, b3_2, NETWORK_SIZES[11] * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_b3_3, b3_3, NETWORK_SIZES[13] * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_b4_1, b4_1, NETWORK_SIZES[15] * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_b4_2, b4_2, NETWORK_SIZES[17] * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_b4_3, b4_3, NETWORK_SIZES[19] * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_b5_1, b5_1, NETWORK_SIZES[21] * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_b5_2, b5_2, NETWORK_SIZES[23] * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_b5_3, b5_3, NETWORK_SIZES[25] * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_b1, b1, NETWORK_SIZES[27] * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_b2, b2, NETWORK_SIZES[29] * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_b3, b3, NETWORK_SIZES[31] * sizeof(float), hipMemcpyHostToDevice); hipEventRecord(stop); hipEventSynchronize(stop); float milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); data_transfer_time = milliseconds/1000; printf("network data transfer time = %f s\n", data_transfer_time); data_transfer_time = 0; show_mem_gpu("After network data transfer"); // Batch images size int batchImg = batch_size; printf("batch size = %d\n", batchImg); // Allocate output vectors in device memory to transfer between layers float *d_c1_1, *d_c1_2, *d_p1; float *d_c2_1, *d_c2_2, *d_p2; float *d_c3_1, *d_c3_2, *d_c3_3, *d_p3; float *d_c4_1, *d_c4_2, *d_c4_3, *d_p4; float *d_c5_1, *d_c5_2, *d_c5_3, *d_p5; float *d_fc1, *d_fc2, *d_fc3; /*hipMalloc(&d_c1_1, batchImg * OUTPUT_SIZES[0] * sizeof(float)); hipMalloc(&d_c1_2, batchImg * OUTPUT_SIZES[1] * sizeof(float)); hipMalloc(&d_p1, batchImg * OUTPUT_SIZES[2] * sizeof(float)); hipMalloc(&d_c2_1, batchImg * OUTPUT_SIZES[3] * sizeof(float)); hipMalloc(&d_c2_2, batchImg * OUTPUT_SIZES[4] * sizeof(float)); hipMalloc(&d_p2, batchImg * OUTPUT_SIZES[5] * sizeof(float)); hipMalloc(&d_c3_1, batchImg * OUTPUT_SIZES[6] * sizeof(float)); hipMalloc(&d_c3_2, batchImg * OUTPUT_SIZES[7] * sizeof(float)); hipMalloc(&d_c3_3, batchImg * OUTPUT_SIZES[8] * sizeof(float)); hipMalloc(&d_p3, batchImg * OUTPUT_SIZES[9] * sizeof(float)); hipMalloc(&d_c4_1, batchImg * OUTPUT_SIZES[10] * sizeof(float)); hipMalloc(&d_c4_2, batchImg * OUTPUT_SIZES[11] * sizeof(float)); hipMalloc(&d_c4_3, batchImg * OUTPUT_SIZES[12] * sizeof(float)); hipMalloc(&d_p4, batchImg * OUTPUT_SIZES[13] * sizeof(float)); hipMalloc(&d_c5_1, batchImg * OUTPUT_SIZES[14] * sizeof(float)); hipMalloc(&d_c5_2, batchImg * OUTPUT_SIZES[15] * sizeof(float)); hipMalloc(&d_c5_3, batchImg * OUTPUT_SIZES[16] * sizeof(float)); hipMalloc(&d_p5, batchImg * OUTPUT_SIZES[17] * sizeof(float)); hipMalloc(&d_fc1, batchImg * OUTPUT_SIZES[18] * sizeof(float)); hipMalloc(&d_fc2, batchImg * OUTPUT_SIZES[19] * sizeof(float)); hipMalloc(&d_fc3, batchImg * OUTPUT_SIZES[20] * sizeof(float));*/ show_mem_gpu("After malloc output vectors"); // run network size_t image_size = batchImg*3*32*32 * sizeof(float); float *d_image; hipMalloc(&d_image, image_size); int start_num_images = num_images%batchImg; // Images will processed by batch for(int i = start_num_images; i < num_images; i += batchImg) { printf("i = %d\n", i); // Copy image from host to device float *image = images + i * 3 * 32 * 32; hipEventRecord(start); hipMemcpy(d_image, image, image_size, hipMemcpyHostToDevice); hipEventRecord(stop); hipEventSynchronize(stop); milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); data_transfer_time += milliseconds/1000; hipMalloc(&d_c1_1, batchImg * OUTPUT_SIZES[0] * sizeof(float)); convolution_layer_v2(d_image, d_c1_1, d_w1_1, d_b1_1, 64, 3, 32, batchImg); hipMalloc(&d_c1_2, batchImg * OUTPUT_SIZES[1] * sizeof(float)); convolution_layer_v2(d_c1_1, d_c1_2, d_w1_2, d_b1_2, 64, 64, 32, batchImg); hipFree(d_c1_1); hipMalloc(&d_p1, batchImg * OUTPUT_SIZES[2] * sizeof(float)); pooling_layer(d_c1_2, d_p1, 64, 16, batchImg); hipFree(d_c1_2); hipMalloc(&d_c2_1, batchImg * OUTPUT_SIZES[3] * sizeof(float)); convolution_layer_v2(d_p1, d_c2_1, d_w2_1, d_b2_1, 128, 64, 16, batchImg); hipFree(d_p1); hipMalloc(&d_c2_2, batchImg * OUTPUT_SIZES[4] * sizeof(float)); convolution_layer_v2(d_c2_1, d_c2_2, d_w2_2, d_b2_2, 128, 128, 16, batchImg); hipFree(d_c2_1); hipMalloc(&d_p2, batchImg * OUTPUT_SIZES[5] * sizeof(float)); pooling_layer(d_c2_2, d_p2, 128, 8, batchImg); hipFree(d_c2_2); hipMalloc(&d_c3_1, batchImg * OUTPUT_SIZES[6] * sizeof(float)); convolution_layer_v2(d_p2, d_c3_1, d_w3_1, d_b3_1, 256, 128, 8, batchImg); hipFree(d_p2); hipMalloc(&d_c3_2, batchImg * OUTPUT_SIZES[7] * sizeof(float)); convolution_layer_v2(d_c3_1, d_c3_2, d_w3_2, d_b3_2, 256, 256, 8, batchImg); hipFree(d_c3_1); hipMalloc(&d_c3_3, batchImg * OUTPUT_SIZES[8] * sizeof(float)); convolution_layer_v2(d_c3_2, d_c3_3, d_w3_3, d_b3_3, 256, 256, 8, batchImg); hipFree(d_c3_2); hipMalloc(&d_p3, batchImg * OUTPUT_SIZES[9] * sizeof(float)); pooling_layer(d_c3_3, d_p3, 256, 4, batchImg); hipFree(d_c3_3); hipMalloc(&d_c4_1, batchImg * OUTPUT_SIZES[10] * sizeof(float)); convolution_layer_v2(d_p3, d_c4_1, d_w4_1, d_b4_1, 512, 256, 4, batchImg); hipFree(d_p3); hipMalloc(&d_c4_2, batchImg * OUTPUT_SIZES[11] * sizeof(float)); convolution_layer_v2(d_c4_1, d_c4_2, d_w4_2, d_b4_2, 512, 512, 4, batchImg); hipFree(d_c4_1); hipMalloc(&d_c4_3, batchImg * OUTPUT_SIZES[12] * sizeof(float)); convolution_layer_v2(d_c4_2, d_c4_3, d_w4_3, d_b4_3, 512, 512, 4, batchImg); hipFree(d_c4_2); hipMalloc(&d_p4, batchImg * OUTPUT_SIZES[13] * sizeof(float)); pooling_layer(d_c4_3, d_p4, 512, 2, batchImg); hipFree(d_c4_3); hipMalloc(&d_c5_1, batchImg * OUTPUT_SIZES[14] * sizeof(float)); convolution_layer_v2(d_p4, d_c5_1, d_w5_1, d_b5_1, 512, 512, 2, batchImg); hipFree(d_p4); hipMalloc(&d_c5_2, batchImg * OUTPUT_SIZES[15] * sizeof(float)); convolution_layer_v2(d_c5_1, d_c5_2, d_w5_2, d_b5_2, 512, 512, 2, batchImg); hipFree(d_c5_1); hipMalloc(&d_c5_3, batchImg * OUTPUT_SIZES[16] * sizeof(float)); convolution_layer_v2(d_c5_2, d_c5_3, d_w5_3, d_b5_3, 512, 512, 2, batchImg); hipFree(d_c5_2); hipMalloc(&d_p5, batchImg * OUTPUT_SIZES[17] * sizeof(float)); pooling_layer(d_c5_3, d_p5, 512, 1, batchImg); hipFree(d_c5_3); hipMalloc(&d_fc1, batchImg * OUTPUT_SIZES[18] * sizeof(float)); fc_layer(d_p5, d_fc1, d_w1, d_b1, 512, 512, batchImg); hipFree(d_p5); hipMalloc(&d_fc2, batchImg * OUTPUT_SIZES[19] * sizeof(float)); fc_layer(d_fc1, d_fc2, d_w2, d_b2, 512, 512, batchImg); hipFree(d_fc1); hipMalloc(&d_fc3, batchImg * OUTPUT_SIZES[20] * sizeof(float)); fc_layer(d_fc2, d_fc3, d_w3, d_b3, 10, 512, batchImg); hipFree(d_fc2); // Copy result from device memory to host memory float *fc3_mul = alloc_layer(OUTPUT_SIZES[20] * batchImg); hipEventRecord(start); hipMemcpy(fc3_mul, d_fc3, batchImg * OUTPUT_SIZES[20] * sizeof(float), hipMemcpyDeviceToHost); hipEventRecord(stop); hipEventSynchronize(stop); milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); data_transfer_time += milliseconds/1000; // Predicted labels for (int j = 0; j < batchImg; j++) { float *fc3 = fc3_mul + j*10; softmax(fc3, 10); int idx = i + j; labels[idx] = find_max(fc3, 10); confidences[idx] = fc3[labels[idx]]; } free(fc3_mul); hipFree(d_fc3); } hipFree(d_image); // The remaining images size_t image_size2 = start_num_images*3*32*32 * sizeof(float); float *d_image2; batchImg = start_num_images; for(int i = 0; i < start_num_images; i += start_num_images) { // Copy image from host to device float *image = images + i * 3 * 32 * 32; hipEventRecord(start); hipMalloc(&d_image2, image_size2); hipMemcpy(d_image2, image, image_size2, hipMemcpyHostToDevice); hipEventRecord(stop); hipEventSynchronize(stop); milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); data_transfer_time += milliseconds/1000; hipEventRecord(start); hipMalloc(&d_c1_1, batchImg * OUTPUT_SIZES[0] * sizeof(float)); hipEventRecord(stop); hipEventSynchronize(stop); milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); data_transfer_time += milliseconds/1000; convolution_layer_v2(d_image2, d_c1_1, d_w1_1, d_b1_1, 64, 3, 32, batchImg); hipEventRecord(start); hipFree(d_image2); hipEventRecord(stop); hipEventSynchronize(stop); milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); data_transfer_time += milliseconds/1000; hipEventRecord(start); hipMalloc(&d_c1_2, batchImg * OUTPUT_SIZES[1] * sizeof(float)); hipEventRecord(stop); hipEventSynchronize(stop); milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); data_transfer_time += milliseconds/1000; convolution_layer_v2(d_c1_1, d_c1_2, d_w1_2, d_b1_2, 64, 64, 32, batchImg); hipEventRecord(start); hipFree(d_c1_1); hipEventRecord(stop); hipEventSynchronize(stop); milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); data_transfer_time += milliseconds/1000; hipEventRecord(start); hipMalloc(&d_p1, batchImg * OUTPUT_SIZES[2] * sizeof(float)); hipEventRecord(stop); hipEventSynchronize(stop); milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); data_transfer_time += milliseconds/1000; pooling_layer(d_c1_2, d_p1, 64, 16, batchImg); hipEventRecord(start); hipFree(d_c1_2); hipEventRecord(stop); hipEventSynchronize(stop); milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); data_transfer_time += milliseconds/1000; hipEventRecord(start); hipMalloc(&d_c2_1, batchImg * OUTPUT_SIZES[3] * sizeof(float)); hipEventRecord(stop); hipEventSynchronize(stop); milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); data_transfer_time += milliseconds/1000; convolution_layer_v2(d_p1, d_c2_1, d_w2_1, d_b2_1, 128, 64, 16, batchImg); hipEventRecord(start); hipFree(d_p1); hipEventRecord(stop); hipEventSynchronize(stop); milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); data_transfer_time += milliseconds/1000; hipEventRecord(start); hipMalloc(&d_c2_2, batchImg * OUTPUT_SIZES[4] * sizeof(float)); hipEventRecord(stop); hipEventSynchronize(stop); milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); data_transfer_time += milliseconds/1000; convolution_layer_v2(d_c2_1, d_c2_2, d_w2_2, d_b2_2, 128, 128, 16, batchImg); hipEventRecord(start); hipFree(d_c2_1); hipEventRecord(stop); hipEventSynchronize(stop); milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); data_transfer_time += milliseconds/1000; hipEventRecord(start); hipMalloc(&d_p2, batchImg * OUTPUT_SIZES[5] * sizeof(float)); hipEventRecord(stop); hipEventSynchronize(stop); milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); data_transfer_time += milliseconds/1000; pooling_layer(d_c2_2, d_p2, 128, 8, batchImg); hipEventRecord(start); hipFree(d_c2_2); hipEventRecord(stop); hipEventSynchronize(stop); milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); data_transfer_time += milliseconds/1000; hipEventRecord(start); hipMalloc(&d_c3_1, batchImg * OUTPUT_SIZES[6] * sizeof(float)); hipEventRecord(stop); hipEventSynchronize(stop); milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); data_transfer_time += milliseconds/1000; convolution_layer_v2(d_p2, d_c3_1, d_w3_1, d_b3_1, 256, 128, 8, batchImg); hipEventRecord(start); hipFree(d_p2); hipEventRecord(stop); hipEventSynchronize(stop); milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); data_transfer_time += milliseconds/1000; hipMalloc(&d_c3_2, batchImg * OUTPUT_SIZES[7] * sizeof(float)); hipEventRecord(stop); hipEventSynchronize(stop); milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); data_transfer_time += milliseconds/1000; convolution_layer_v2(d_c3_1, d_c3_2, d_w3_2, d_b3_2, 256, 256, 8, batchImg); hipEventRecord(start); hipFree(d_c3_1); hipEventRecord(stop); hipEventSynchronize(stop); milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); data_transfer_time += milliseconds/1000; hipEventRecord(start); hipMalloc(&d_c3_3, batchImg * OUTPUT_SIZES[8] * sizeof(float)); hipEventRecord(stop); hipEventSynchronize(stop); milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); data_transfer_time += milliseconds/1000; convolution_layer_v2(d_c3_2, d_c3_3, d_w3_3, d_b3_3, 256, 256, 8, batchImg); hipEventRecord(start); hipFree(d_c3_2); hipEventRecord(stop); hipEventSynchronize(stop); milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); data_transfer_time += milliseconds/1000; hipMalloc(&d_p3, batchImg * OUTPUT_SIZES[9] * sizeof(float)); hipEventRecord(stop); hipEventSynchronize(stop); milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); data_transfer_time += milliseconds/1000; pooling_layer(d_c3_3, d_p3, 256, 4, batchImg); hipEventRecord(start); hipFree(d_c3_3); hipEventRecord(stop); hipEventSynchronize(stop); milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); data_transfer_time += milliseconds/1000; hipEventRecord(start); hipMalloc(&d_c4_1, batchImg * OUTPUT_SIZES[10] * sizeof(float)); hipEventRecord(stop); hipEventSynchronize(stop); milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); data_transfer_time += milliseconds/1000; convolution_layer_v2(d_p3, d_c4_1, d_w4_1, d_b4_1, 512, 256, 4, batchImg); hipEventRecord(start); hipFree(d_p3); hipEventRecord(stop); hipEventSynchronize(stop); milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); data_transfer_time += milliseconds/1000; hipEventRecord(start); hipMalloc(&d_c4_2, batchImg * OUTPUT_SIZES[11] * sizeof(float)); hipEventRecord(stop); hipEventSynchronize(stop); milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); data_transfer_time += milliseconds/1000; convolution_layer_v2(d_c4_1, d_c4_2, d_w4_2, d_b4_2, 512, 512, 4, batchImg); hipEventRecord(start); hipFree(d_c4_1); hipEventRecord(stop); hipEventSynchronize(stop); milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); data_transfer_time += milliseconds/1000; hipMalloc(&d_c4_3, batchImg * OUTPUT_SIZES[12] * sizeof(float)); hipEventRecord(stop); hipEventSynchronize(stop); milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); data_transfer_time += milliseconds/1000; convolution_layer_v2(d_c4_2, d_c4_3, d_w4_3, d_b4_3, 512, 512, 4, batchImg); hipEventRecord(start); hipFree(d_c4_2); hipEventRecord(stop); hipEventSynchronize(stop); milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); data_transfer_time += milliseconds/1000; hipEventRecord(start); hipMalloc(&d_p4, batchImg * OUTPUT_SIZES[13] * sizeof(float)); hipEventRecord(stop); hipEventSynchronize(stop); milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); data_transfer_time += milliseconds/1000; pooling_layer(d_c4_3, d_p4, 512, 2, batchImg); hipEventRecord(start); hipFree(d_c4_3); hipEventRecord(stop); hipEventSynchronize(stop); milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); data_transfer_time += milliseconds/1000; hipEventRecord(start); hipMalloc(&d_c5_1, batchImg * OUTPUT_SIZES[14] * sizeof(float)); hipEventRecord(stop); hipEventSynchronize(stop); milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); data_transfer_time += milliseconds/1000; convolution_layer_v2(d_p4, d_c5_1, d_w5_1, d_b5_1, 512, 512, 2, batchImg); hipEventRecord(start); hipFree(d_p4); hipEventRecord(stop); hipEventSynchronize(stop); milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); data_transfer_time += milliseconds/1000; hipEventRecord(start); hipMalloc(&d_c5_2, batchImg * OUTPUT_SIZES[15] * sizeof(float)); hipEventRecord(stop); hipEventSynchronize(stop); milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); data_transfer_time += milliseconds/1000; convolution_layer_v2(d_c5_1, d_c5_2, d_w5_2, d_b5_2, 512, 512, 2, batchImg); hipEventRecord(start); hipFree(d_c5_1); hipEventRecord(stop); hipEventSynchronize(stop); milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); data_transfer_time += milliseconds/1000; hipEventRecord(start); hipMalloc(&d_c5_3, batchImg * OUTPUT_SIZES[16] * sizeof(float)); hipEventRecord(stop); hipEventSynchronize(stop); milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); data_transfer_time += milliseconds/1000; convolution_layer_v2(d_c5_2, d_c5_3, d_w5_3, d_b5_3, 512, 512, 2, batchImg); hipEventRecord(start); hipFree(d_c5_2); hipEventRecord(stop); hipEventSynchronize(stop); milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); data_transfer_time += milliseconds/1000; hipEventRecord(start); hipMalloc(&d_p5, batchImg * OUTPUT_SIZES[17] * sizeof(float)); hipEventRecord(stop); hipEventSynchronize(stop); milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); data_transfer_time += milliseconds/1000; pooling_layer(d_c5_3, d_p5, 512, 1, batchImg); hipEventRecord(start); hipFree(d_c5_3); hipEventRecord(stop); hipEventSynchronize(stop); milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); data_transfer_time += milliseconds/1000; hipEventRecord(start); hipMalloc(&d_fc1, batchImg * OUTPUT_SIZES[18] * sizeof(float)); hipEventRecord(stop); hipEventSynchronize(stop); milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); data_transfer_time += milliseconds/1000; fc_layer(d_p5, d_fc1, d_w1, d_b1, 512, 512, batchImg); hipEventRecord(start); hipFree(d_p5); hipEventRecord(stop); hipEventSynchronize(stop); milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); data_transfer_time += milliseconds/1000; hipEventRecord(start); hipMalloc(&d_fc2, batchImg * OUTPUT_SIZES[19] * sizeof(float)); hipEventRecord(stop); hipEventSynchronize(stop); milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); data_transfer_time += milliseconds/1000; fc_layer(d_fc1, d_fc2, d_w2, d_b2, 512, 512, batchImg); hipEventRecord(start); hipFree(d_fc1); hipEventRecord(stop); hipEventSynchronize(stop); milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); data_transfer_time += milliseconds/1000; hipEventRecord(start); hipMalloc(&d_fc3, batchImg * OUTPUT_SIZES[20] * sizeof(float)); hipEventRecord(stop); hipEventSynchronize(stop); milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); data_transfer_time += milliseconds/1000; fc_layer(d_fc2, d_fc3, d_w3, d_b3, 10, 512, batchImg); hipEventRecord(start); hipFree(d_fc2); hipEventRecord(stop); hipEventSynchronize(stop); milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); data_transfer_time += milliseconds/1000; // Copy result from device memory to host memory float *fc3_mul = alloc_layer(OUTPUT_SIZES[20] * batchImg); hipEventRecord(start); hipMemcpy(fc3_mul, d_fc3, batchImg * OUTPUT_SIZES[20] * sizeof(float), hipMemcpyDeviceToHost); hipEventRecord(stop); hipEventSynchronize(stop); milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); data_transfer_time += milliseconds/1000; // Predicted labels for (int j = 0; j < batchImg; j++) { float *fc3 = fc3_mul + j*10; softmax(fc3, 10); int idx = i + j; labels[idx] = find_max(fc3, 10); confidences[idx] = fc3[labels[idx]]; } free(fc3_mul); hipEventRecord(start); hipFree(d_fc3); hipEventRecord(stop); hipEventSynchronize(stop); milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); data_transfer_time += milliseconds/1000; } printf("data transfer time = %f s\n", data_transfer_time); printf("pooing time = %f s\n", pooling_time); printf("convolution time = %f s\n", conv_time); printf("fully connected time = %f s\n", fc_time); printf("softmax time = %f s\n", softmax_time); hipFree(d_w1_1); hipFree(d_b1_1); hipFree(d_w1_2); hipFree(d_b1_2); hipFree(d_w2_1); hipFree(d_b2_2); hipFree(d_w2_2); hipFree(d_b2_2); hipFree(d_w3_1); hipFree(d_b3_1); hipFree(d_w3_2); hipFree(d_b3_2); hipFree(d_w3_3); hipFree(d_b3_3); hipFree(d_w4_1); hipFree(d_b4_1); hipFree(d_w4_2); hipFree(d_b4_2); hipFree(d_w4_3); hipFree(d_b4_3); hipFree(d_w5_1); hipFree(d_b5_1); hipFree(d_w5_2); hipFree(d_b5_2); hipFree(d_w5_3); hipFree(d_b5_3); hipFree(d_w1); hipFree(d_b1); hipFree(d_w2); hipFree(d_b2); hipFree(d_w3); hipFree(d_b3); /*hipFree(d_c1_1); hipFree(d_c1_2); hipFree(d_p1); hipFree(d_c2_1); hipFree(d_c2_2); hipFree(d_p2); hipFree(d_c3_1); hipFree(d_c3_2); hipFree(d_c3_3); hipFree(d_p3); hipFree(d_c4_1); hipFree(d_c4_2); hipFree(d_c4_3); hipFree(d_p4); hipFree(d_c5_1); hipFree(d_c5_2); hipFree(d_c5_3); hipFree(d_p5); hipFree(d_fc1); hipFree(d_fc2); hipFree(d_fc3);*/ }
746b7cef9f6f40b248d3aeeefbd2c2c310b6e709.cu
#include "cnn.h" #include "timer.h" #include <thrust/device_vector.h> #include <stdio.h> /* * TODO * Define kernel here */ __global__ void pooling( float * inputs, float * outputs, int N, int D, int NoImg) { // Store each work-item’s unique row and column int i = blockIdx.x * blockDim.x + threadIdx.x; // N*N int j = blockIdx.y * blockDim.y + threadIdx.y; // D int n = blockIdx.z * blockDim.z + threadIdx.z; // NoImg if (i < N*N && j < D && n < NoImg) { int x = i/N; int y = i - x*N; float max = 0; for (int k = 0; k < 2; k++) { for (int l = 0; l < 2; l++) { float pixel = inputs[(x*2 + k)*2*N + y*2+l + (j*N*N*4) + (4*N*N*D*n)]; max = (max > pixel) ? max : pixel; } } outputs[i + (j*N*N) + (N*N*D*n)] = max; } } __global__ void convolution_v1( float * inputs, float * outputs, float * filters, float * biases, int N, int D1, int D2, int NoImg) { // Store each work-item’s unique row and column int d = blockIdx.x * blockDim.x + threadIdx.x; // N*N int d2 = blockIdx.y * blockDim.y + threadIdx.y; // D2 int n = blockIdx.z * blockDim.z + threadIdx.z; // NoImg extern __shared__ float tmpFilters[]; if (d < N*N && d2 < D2 && n < NoImg) { for (int t = 0; t < D1; t+=1) { for (int i = 0; i < 9; i++) tmpFilters[i + (3*3* (threadIdx.y*D1 + t))] = filters[i + (3*3 * (d2*D1 + t))]; } __syncthreads(); int i = d/N; int j = d- i*N; int oIdx = i*N + j + (N*N*d2) + (N*N*D2*n); outputs[oIdx] = 0; // Unroll 1 times for (int t = 0; t < D1; t+=1) { float sum = 0; for (int k = 0; k < 3; k++) { for (int l = 0; l < 3; l++) { int x = i + k - 1; int y = j + l - 1; if (x >= 0 && x < N && y >= 0 && y < N) sum += inputs[x*N + y + N*N*t + (N*N*D1*n)] * filters[k*3 + l + (3*3 * (d2*D1 + t))]; } } outputs[oIdx] += sum; } // RELU float bias = biases[d2]; outputs[oIdx] = (outputs[oIdx] + bias > 0) ? (outputs[oIdx] + bias) : 0; } } __global__ void convolution_v2( float * inputs, float * outputs, float * filters, float * biases, int N, int D1, int D2, int NoImg) { // Store each work-item’s unique row and column int x1 = blockIdx.x * blockDim.x + threadIdx.x; // N*N*D2*NoImg if (x1 < N*N*D2*NoImg) { // Calculate index values int n = x1/(N*N*D2); int tmp1 = x1 - n*(N*N*D2); int d2 = tmp1/(N*N); int tmp2 = tmp1 - d2*(N*N); int i = tmp2/N; int j = tmp2 - i*N; int oIdx = x1; //i*N + j + (N*N*d2) + (N*N*D2*n); outputs[oIdx] = 0; // Unroll 1 times for (int t = 0; t < D1; t+=1) { float sum = 0; for (int k = 0; k < 3; k++) { for (int l = 0; l < 3; l++) { int x = i + k - 1; int y = j + l - 1; if (x >= 0 && x < N && y >= 0 && y < N) sum += inputs[x*N + y + N*N*t + (N*N*D1*n)] * filters[k*3 + l + (3*3 * (d2*D1 + t))]; } } outputs[oIdx] += sum; } // RELU float bias = biases[d2]; outputs[oIdx] = (outputs[oIdx] + bias > 0) ? (outputs[oIdx] + bias) : 0; } } __global__ void fc( float * input_neuron, float * output_neuron, float * weights, float * biases, const int N, const int M, const int NoImg) { int x = blockIdx.x * blockDim.x + threadIdx.x; // M int y = blockIdx.y * blockDim.y + threadIdx.y; // NoImg if (x < M && y < NoImg) { float sum = 0; for (int i = 0; i < N; i++) { sum += weights[x*N + i] * input_neuron[i + N*y]; } output_neuron[x + M*y] = sum + biases[x]; // RELU if (output_neuron[x + M*y] < 0) { output_neuron[x + M*y] = 0; } } } __global__ void softmax_kernel( float * output, int N) { int i = threadIdx.x; float sum = 0; for (i = 0; i < N; i++) { sum += exp(output[i]); } for (i = 0; i < N; i++) { output[i] = exp(output[i]) / sum; } } /************************ CUDA ************************/ #define NormalToOne(x) (((x)<=0)?(1):x) #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true) { if (code != cudaSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } // show memory usage of GPU static void show_mem_gpu(const char *info) { size_t free_byte ; size_t total_byte ; gpuErrchk(cudaMemGetInfo( &free_byte, &total_byte )) ; double free_db = (double)free_byte ; double total_db = (double)total_byte ; double used_db = total_db - free_db ; //printf("%s - GPU memory usage: used = %.3f MB, free = %.3f MB, total = %.3f MB\n", // info, used_db/1024.0/1024.0, free_db/1024.0/1024.0, total_db/1024.0/1024.0); } float data_transfer_time = 0; float pooling_time = 0; float conv_time = 0; float fc_time = 0; float softmax_time = 0; /* * D = channel size * N = width and height of an output image * Thus, input is (D, N * 2, N * 2) and output is (D, N, N). */ static void pooling_layer(float *inputs, float *outputs, int D, int N, int NoImg) { cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); show_mem_gpu("Before pooling"); // Call gpu kernel dim3 threadsPerBlock(8, 8, 1); if (N < 4) threadsPerBlock.x = N*N; threadsPerBlock.z = NormalToOne(1024 / (threadsPerBlock.x*threadsPerBlock.y)); dim3 numBlocks((N*N + threadsPerBlock.x - 1)/threadsPerBlock.x, (D + threadsPerBlock.y - 1)/threadsPerBlock.y, (NoImg + threadsPerBlock.z - 1)/threadsPerBlock.z); cudaEventRecord(start); pooling<<<numBlocks, threadsPerBlock>>>(inputs, outputs, N, D, NoImg); cudaEventRecord(stop); cudaEventSynchronize(stop); show_mem_gpu("After pooling"); float milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); pooling_time += milliseconds/1000; } static void convolution_layer_v1(float *inputs, float *outputs, float *filters, float *biases, int D2, int D1, int N, int NoImg) { cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); // Call GPU kernel dim3 threadsPerBlock(8, 8, 16); if (N < 4) threadsPerBlock.x = N*N; threadsPerBlock.z = NormalToOne(1024 / (threadsPerBlock.x*threadsPerBlock.y)); dim3 numBlocks((N*N + threadsPerBlock.x - 1)/threadsPerBlock.x, (D2 + threadsPerBlock.y - 1)/threadsPerBlock.y, (NoImg + threadsPerBlock.z - 1)/threadsPerBlock.z); cudaEventRecord(start); convolution_v1<<<numBlocks, threadsPerBlock, 3*3*D1*threadsPerBlock.y*sizeof(float)>>>(inputs, outputs, filters, biases, N, D1, D2, NoImg); cudaEventRecord(stop); gpuErrchk(cudaEventSynchronize(stop)); float milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); printf("conv time: %f ms\n", milliseconds); conv_time += milliseconds/1000; } static void convolution_layer_v2(float *inputs, float *outputs, float *filters, float *biases, int D2, int D1, int N, int NoImg) { cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); show_mem_gpu("Before conv"); // Call GPU kernel dim3 threadsPerBlock(1024, 1, 1); dim3 numBlocks((N*N*D2*NoImg + threadsPerBlock.x - 1)/threadsPerBlock.x, 1, 1); cudaEventRecord(start); convolution_v2<<<numBlocks, threadsPerBlock>>>(inputs, outputs, filters, biases, N, D1, D2, NoImg); cudaEventRecord(stop); gpuErrchk(cudaEventSynchronize(stop)); show_mem_gpu("After conv"); float milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); printf("conv time: %f ms\n", milliseconds); conv_time += milliseconds/1000; } /* * M = output size * N = input size */ static void fc_layer(float *input_neuron, float *output_neuron, float *weights, float *biases, int M, int N, int NoImg) { cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); // Call GPU kernel dim3 blockSize(16, 1); if (M < 64) blockSize.x = M; blockSize.y = NormalToOne(1024 / blockSize.x); dim3 gridSize((M + blockSize.x - 1) / blockSize.x, (NoImg + blockSize.y - 1)/blockSize.y); cudaEventRecord(start); fc<<<gridSize, blockSize>>>(input_neuron, output_neuron, weights, biases, N, M, NoImg); cudaEventRecord(stop); cudaEventSynchronize(stop); float milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); fc_time += milliseconds/1000; } static void softmax(float *output, int N) { timer_start(1); int i; float max = output[0]; for (i = 1; i < N; i++) { max = (output[i] > max)?output[i]:max; } float sum = 0; for (i = 0; i < N; i++) { sum += exp(output[i] - max); } for (i = 0; i < N; i++) { output[i] = exp(output[i] - max) / sum; } softmax_time += timer_end(1); } static int find_max(float *fc, int N) { int i; int maxid = 0; float maxval = 0; for (i = 0; i < N; i++) { if (maxval < fc[i]) { maxval = fc[i]; maxid = i; } } return maxid; } float* alloc_layer(size_t n) { return (float*)malloc(n * sizeof(float)); } void cnn_init() { /* * TODO * Initialize OpenCL objects as global variables. For example, * clGetPlatformIDs(1, &platform, NULL); */ } const int NETWORK_SIZES[] = { 64 * 3 * 3 * 3, 64, 64 * 64 * 3 * 3, 64, 128 * 64 * 3 * 3, 128, 128 * 128 * 3 * 3, 128, 256 * 128 * 3 * 3, 256, 256 * 256 * 3 * 3, 256, 256 * 256 * 3 * 3, 256, 512 * 256 * 3 * 3, 512, 512 * 512 * 3 * 3, 512, 512 * 512 * 3 * 3, 512, 512 * 512 * 3 * 3, 512, 512 * 512 * 3 * 3, 512, 512 * 512 * 3 * 3, 512, 512 * 512, 512, 512 * 512, 512, 10 * 512, 10 }; const int OUTPUT_SIZES[] = { 64 * 32 * 32, 64 * 32 * 32, 64 * 16 * 16, 128 * 16 * 16, 128 * 16 * 16, 128 * 8 * 8, 256 * 8 * 8, 256 * 8 * 8, 256 * 8 * 8, 256 * 4 * 4, 512 * 4 * 4, 512 * 4 * 4, 512 * 4 * 4, 512 * 2 * 2, 512 * 2 * 2, 512 * 2 * 2, 512 * 2 * 2, 512 * 1 * 1, 512, 512, 10 }; void cnn(float *images, float **network, int *labels, float *confidences, int num_images, int batch_size) { /* * TODO * Implement here. * Write classification results to labels and confidences. * See "cnn_seq.c" if you don't know what to do. */ // slice the network into weights and biases float *w1_1, *b1_1, *w1_2, *b1_2; float *w2_1, *b2_1, *w2_2, *b2_2; float *w3_1, *b3_1, *w3_2, *b3_2, *w3_3, *b3_3; float *w4_1, *b4_1, *w4_2, *b4_2, *w4_3, *b4_3; float *w5_1, *b5_1, *w5_2, *b5_2, *w5_3, *b5_3; float *w1, *b1, *w2, *b2, *w3, *b3; // Set data for weights and biases w1_1 = network[0]; b1_1 = network[1]; w1_2 = network[2]; b1_2 = network[3]; w2_1 = network[4]; b2_1 = network[5]; w2_2 = network[6]; b2_2 = network[7]; w3_1 = network[8]; b3_1 = network[9]; w3_2 = network[10]; b3_2 = network[11]; w3_3 = network[12]; b3_3 = network[13]; w4_1 = network[14]; b4_1 = network[15]; w4_2 = network[16]; b4_2 = network[17]; w4_3 = network[18]; b4_3 = network[19]; w5_1 = network[20]; b5_1 = network[21]; w5_2 = network[22]; b5_2 = network[23]; w5_3 = network[24]; b5_3 = network[25]; w1 = network[26]; b1 = network[27]; w2 = network[28]; b2 = network[29]; w3 = network[30]; b3 = network[31]; // Allocate vectors in device memory float *d_w1_1, *d_b1_1, *d_w1_2, *d_b1_2; float *d_w2_1, *d_b2_1, *d_w2_2, *d_b2_2; float *d_w3_1, *d_b3_1, *d_w3_2, *d_b3_2, *d_w3_3, *d_b3_3; float *d_w4_1, *d_b4_1, *d_w4_2, *d_b4_2, *d_w4_3, *d_b4_3; float *d_w5_1, *d_b5_1, *d_w5_2, *d_b5_2, *d_w5_3, *d_b5_3; float *d_w1, *d_b1, *d_w2, *d_b2, *d_w3, *d_b3; cudaMalloc(&d_w1_1, NETWORK_SIZES[0] * sizeof(float)); cudaMalloc(&d_w1_2, NETWORK_SIZES[2] * sizeof(float)); cudaMalloc(&d_w2_1, NETWORK_SIZES[4] * sizeof(float)); cudaMalloc(&d_w2_2, NETWORK_SIZES[6] * sizeof(float)); cudaMalloc(&d_w3_1, NETWORK_SIZES[8] * sizeof(float)); cudaMalloc(&d_w3_2, NETWORK_SIZES[10] * sizeof(float)); cudaMalloc(&d_w3_3, NETWORK_SIZES[12] * sizeof(float)); cudaMalloc(&d_w4_1, NETWORK_SIZES[14] * sizeof(float)); cudaMalloc(&d_w4_2, NETWORK_SIZES[16] * sizeof(float)); cudaMalloc(&d_w4_3, NETWORK_SIZES[18] * sizeof(float)); cudaMalloc(&d_w5_1, NETWORK_SIZES[20] * sizeof(float)); cudaMalloc(&d_w5_2, NETWORK_SIZES[22] * sizeof(float)); cudaMalloc(&d_w5_3, NETWORK_SIZES[24] * sizeof(float)); cudaMalloc(&d_w1, NETWORK_SIZES[26] * sizeof(float)); cudaMalloc(&d_w2, NETWORK_SIZES[28] * sizeof(float)); cudaMalloc(&d_w3, NETWORK_SIZES[30] * sizeof(float)); cudaMalloc(&d_b1_1, NETWORK_SIZES[1] * sizeof(float)); cudaMalloc(&d_b1_2, NETWORK_SIZES[3] * sizeof(float)); cudaMalloc(&d_b2_1, NETWORK_SIZES[5] * sizeof(float)); cudaMalloc(&d_b2_2, NETWORK_SIZES[7] * sizeof(float)); cudaMalloc(&d_b3_1, NETWORK_SIZES[9] * sizeof(float)); cudaMalloc(&d_b3_2, NETWORK_SIZES[11] * sizeof(float)); cudaMalloc(&d_b3_3, NETWORK_SIZES[13] * sizeof(float)); cudaMalloc(&d_b4_1, NETWORK_SIZES[15] * sizeof(float)); cudaMalloc(&d_b4_2, NETWORK_SIZES[17] * sizeof(float)); cudaMalloc(&d_b4_3, NETWORK_SIZES[19] * sizeof(float)); cudaMalloc(&d_b5_1, NETWORK_SIZES[21] * sizeof(float)); cudaMalloc(&d_b5_2, NETWORK_SIZES[23] * sizeof(float)); cudaMalloc(&d_b5_3, NETWORK_SIZES[25] * sizeof(float)); cudaMalloc(&d_b1, NETWORK_SIZES[27] * sizeof(float)); cudaMalloc(&d_b2, NETWORK_SIZES[29] * sizeof(float)); cudaMalloc(&d_b3, NETWORK_SIZES[31] * sizeof(float)); // Create cudaEvent to measure cuda time cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); // Copy vectors from host memory to device memory cudaEventRecord(start); cudaMemcpy(d_w1_1, w1_1, NETWORK_SIZES[0] * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_w1_2, w1_2, NETWORK_SIZES[2] * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_w2_1, w2_1, NETWORK_SIZES[4] * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_w2_2, w2_2, NETWORK_SIZES[6] * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_w3_1, w3_1, NETWORK_SIZES[8] * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_w3_2, w3_2, NETWORK_SIZES[10] * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_w3_3, w3_3, NETWORK_SIZES[12] * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_w4_1, w4_1, NETWORK_SIZES[14] * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_w4_2, w4_2, NETWORK_SIZES[16] * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_w4_3, w4_3, NETWORK_SIZES[18] * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_w5_1, w5_1, NETWORK_SIZES[20] * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_w5_2, w5_2, NETWORK_SIZES[22] * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_w5_3, w5_3, NETWORK_SIZES[24] * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_w1, w1, NETWORK_SIZES[26] * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_w2, w2, NETWORK_SIZES[28] * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_w3, w3, NETWORK_SIZES[30] * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_b1_1, b1_1, NETWORK_SIZES[1] * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_b1_2, b1_2, NETWORK_SIZES[3] * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_b2_1, b2_1, NETWORK_SIZES[5] * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_b2_2, b2_2, NETWORK_SIZES[7] * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_b3_1, b3_1, NETWORK_SIZES[9] * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_b3_2, b3_2, NETWORK_SIZES[11] * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_b3_3, b3_3, NETWORK_SIZES[13] * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_b4_1, b4_1, NETWORK_SIZES[15] * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_b4_2, b4_2, NETWORK_SIZES[17] * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_b4_3, b4_3, NETWORK_SIZES[19] * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_b5_1, b5_1, NETWORK_SIZES[21] * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_b5_2, b5_2, NETWORK_SIZES[23] * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_b5_3, b5_3, NETWORK_SIZES[25] * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_b1, b1, NETWORK_SIZES[27] * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_b2, b2, NETWORK_SIZES[29] * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_b3, b3, NETWORK_SIZES[31] * sizeof(float), cudaMemcpyHostToDevice); cudaEventRecord(stop); cudaEventSynchronize(stop); float milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); data_transfer_time = milliseconds/1000; printf("network data transfer time = %f s\n", data_transfer_time); data_transfer_time = 0; show_mem_gpu("After network data transfer"); // Batch images size int batchImg = batch_size; printf("batch size = %d\n", batchImg); // Allocate output vectors in device memory to transfer between layers float *d_c1_1, *d_c1_2, *d_p1; float *d_c2_1, *d_c2_2, *d_p2; float *d_c3_1, *d_c3_2, *d_c3_3, *d_p3; float *d_c4_1, *d_c4_2, *d_c4_3, *d_p4; float *d_c5_1, *d_c5_2, *d_c5_3, *d_p5; float *d_fc1, *d_fc2, *d_fc3; /*cudaMalloc(&d_c1_1, batchImg * OUTPUT_SIZES[0] * sizeof(float)); cudaMalloc(&d_c1_2, batchImg * OUTPUT_SIZES[1] * sizeof(float)); cudaMalloc(&d_p1, batchImg * OUTPUT_SIZES[2] * sizeof(float)); cudaMalloc(&d_c2_1, batchImg * OUTPUT_SIZES[3] * sizeof(float)); cudaMalloc(&d_c2_2, batchImg * OUTPUT_SIZES[4] * sizeof(float)); cudaMalloc(&d_p2, batchImg * OUTPUT_SIZES[5] * sizeof(float)); cudaMalloc(&d_c3_1, batchImg * OUTPUT_SIZES[6] * sizeof(float)); cudaMalloc(&d_c3_2, batchImg * OUTPUT_SIZES[7] * sizeof(float)); cudaMalloc(&d_c3_3, batchImg * OUTPUT_SIZES[8] * sizeof(float)); cudaMalloc(&d_p3, batchImg * OUTPUT_SIZES[9] * sizeof(float)); cudaMalloc(&d_c4_1, batchImg * OUTPUT_SIZES[10] * sizeof(float)); cudaMalloc(&d_c4_2, batchImg * OUTPUT_SIZES[11] * sizeof(float)); cudaMalloc(&d_c4_3, batchImg * OUTPUT_SIZES[12] * sizeof(float)); cudaMalloc(&d_p4, batchImg * OUTPUT_SIZES[13] * sizeof(float)); cudaMalloc(&d_c5_1, batchImg * OUTPUT_SIZES[14] * sizeof(float)); cudaMalloc(&d_c5_2, batchImg * OUTPUT_SIZES[15] * sizeof(float)); cudaMalloc(&d_c5_3, batchImg * OUTPUT_SIZES[16] * sizeof(float)); cudaMalloc(&d_p5, batchImg * OUTPUT_SIZES[17] * sizeof(float)); cudaMalloc(&d_fc1, batchImg * OUTPUT_SIZES[18] * sizeof(float)); cudaMalloc(&d_fc2, batchImg * OUTPUT_SIZES[19] * sizeof(float)); cudaMalloc(&d_fc3, batchImg * OUTPUT_SIZES[20] * sizeof(float));*/ show_mem_gpu("After malloc output vectors"); // run network size_t image_size = batchImg*3*32*32 * sizeof(float); float *d_image; cudaMalloc(&d_image, image_size); int start_num_images = num_images%batchImg; // Images will processed by batch for(int i = start_num_images; i < num_images; i += batchImg) { printf("i = %d\n", i); // Copy image from host to device float *image = images + i * 3 * 32 * 32; cudaEventRecord(start); cudaMemcpy(d_image, image, image_size, cudaMemcpyHostToDevice); cudaEventRecord(stop); cudaEventSynchronize(stop); milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); data_transfer_time += milliseconds/1000; cudaMalloc(&d_c1_1, batchImg * OUTPUT_SIZES[0] * sizeof(float)); convolution_layer_v2(d_image, d_c1_1, d_w1_1, d_b1_1, 64, 3, 32, batchImg); cudaMalloc(&d_c1_2, batchImg * OUTPUT_SIZES[1] * sizeof(float)); convolution_layer_v2(d_c1_1, d_c1_2, d_w1_2, d_b1_2, 64, 64, 32, batchImg); cudaFree(d_c1_1); cudaMalloc(&d_p1, batchImg * OUTPUT_SIZES[2] * sizeof(float)); pooling_layer(d_c1_2, d_p1, 64, 16, batchImg); cudaFree(d_c1_2); cudaMalloc(&d_c2_1, batchImg * OUTPUT_SIZES[3] * sizeof(float)); convolution_layer_v2(d_p1, d_c2_1, d_w2_1, d_b2_1, 128, 64, 16, batchImg); cudaFree(d_p1); cudaMalloc(&d_c2_2, batchImg * OUTPUT_SIZES[4] * sizeof(float)); convolution_layer_v2(d_c2_1, d_c2_2, d_w2_2, d_b2_2, 128, 128, 16, batchImg); cudaFree(d_c2_1); cudaMalloc(&d_p2, batchImg * OUTPUT_SIZES[5] * sizeof(float)); pooling_layer(d_c2_2, d_p2, 128, 8, batchImg); cudaFree(d_c2_2); cudaMalloc(&d_c3_1, batchImg * OUTPUT_SIZES[6] * sizeof(float)); convolution_layer_v2(d_p2, d_c3_1, d_w3_1, d_b3_1, 256, 128, 8, batchImg); cudaFree(d_p2); cudaMalloc(&d_c3_2, batchImg * OUTPUT_SIZES[7] * sizeof(float)); convolution_layer_v2(d_c3_1, d_c3_2, d_w3_2, d_b3_2, 256, 256, 8, batchImg); cudaFree(d_c3_1); cudaMalloc(&d_c3_3, batchImg * OUTPUT_SIZES[8] * sizeof(float)); convolution_layer_v2(d_c3_2, d_c3_3, d_w3_3, d_b3_3, 256, 256, 8, batchImg); cudaFree(d_c3_2); cudaMalloc(&d_p3, batchImg * OUTPUT_SIZES[9] * sizeof(float)); pooling_layer(d_c3_3, d_p3, 256, 4, batchImg); cudaFree(d_c3_3); cudaMalloc(&d_c4_1, batchImg * OUTPUT_SIZES[10] * sizeof(float)); convolution_layer_v2(d_p3, d_c4_1, d_w4_1, d_b4_1, 512, 256, 4, batchImg); cudaFree(d_p3); cudaMalloc(&d_c4_2, batchImg * OUTPUT_SIZES[11] * sizeof(float)); convolution_layer_v2(d_c4_1, d_c4_2, d_w4_2, d_b4_2, 512, 512, 4, batchImg); cudaFree(d_c4_1); cudaMalloc(&d_c4_3, batchImg * OUTPUT_SIZES[12] * sizeof(float)); convolution_layer_v2(d_c4_2, d_c4_3, d_w4_3, d_b4_3, 512, 512, 4, batchImg); cudaFree(d_c4_2); cudaMalloc(&d_p4, batchImg * OUTPUT_SIZES[13] * sizeof(float)); pooling_layer(d_c4_3, d_p4, 512, 2, batchImg); cudaFree(d_c4_3); cudaMalloc(&d_c5_1, batchImg * OUTPUT_SIZES[14] * sizeof(float)); convolution_layer_v2(d_p4, d_c5_1, d_w5_1, d_b5_1, 512, 512, 2, batchImg); cudaFree(d_p4); cudaMalloc(&d_c5_2, batchImg * OUTPUT_SIZES[15] * sizeof(float)); convolution_layer_v2(d_c5_1, d_c5_2, d_w5_2, d_b5_2, 512, 512, 2, batchImg); cudaFree(d_c5_1); cudaMalloc(&d_c5_3, batchImg * OUTPUT_SIZES[16] * sizeof(float)); convolution_layer_v2(d_c5_2, d_c5_3, d_w5_3, d_b5_3, 512, 512, 2, batchImg); cudaFree(d_c5_2); cudaMalloc(&d_p5, batchImg * OUTPUT_SIZES[17] * sizeof(float)); pooling_layer(d_c5_3, d_p5, 512, 1, batchImg); cudaFree(d_c5_3); cudaMalloc(&d_fc1, batchImg * OUTPUT_SIZES[18] * sizeof(float)); fc_layer(d_p5, d_fc1, d_w1, d_b1, 512, 512, batchImg); cudaFree(d_p5); cudaMalloc(&d_fc2, batchImg * OUTPUT_SIZES[19] * sizeof(float)); fc_layer(d_fc1, d_fc2, d_w2, d_b2, 512, 512, batchImg); cudaFree(d_fc1); cudaMalloc(&d_fc3, batchImg * OUTPUT_SIZES[20] * sizeof(float)); fc_layer(d_fc2, d_fc3, d_w3, d_b3, 10, 512, batchImg); cudaFree(d_fc2); // Copy result from device memory to host memory float *fc3_mul = alloc_layer(OUTPUT_SIZES[20] * batchImg); cudaEventRecord(start); cudaMemcpy(fc3_mul, d_fc3, batchImg * OUTPUT_SIZES[20] * sizeof(float), cudaMemcpyDeviceToHost); cudaEventRecord(stop); cudaEventSynchronize(stop); milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); data_transfer_time += milliseconds/1000; // Predicted labels for (int j = 0; j < batchImg; j++) { float *fc3 = fc3_mul + j*10; softmax(fc3, 10); int idx = i + j; labels[idx] = find_max(fc3, 10); confidences[idx] = fc3[labels[idx]]; } free(fc3_mul); cudaFree(d_fc3); } cudaFree(d_image); // The remaining images size_t image_size2 = start_num_images*3*32*32 * sizeof(float); float *d_image2; batchImg = start_num_images; for(int i = 0; i < start_num_images; i += start_num_images) { // Copy image from host to device float *image = images + i * 3 * 32 * 32; cudaEventRecord(start); cudaMalloc(&d_image2, image_size2); cudaMemcpy(d_image2, image, image_size2, cudaMemcpyHostToDevice); cudaEventRecord(stop); cudaEventSynchronize(stop); milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); data_transfer_time += milliseconds/1000; cudaEventRecord(start); cudaMalloc(&d_c1_1, batchImg * OUTPUT_SIZES[0] * sizeof(float)); cudaEventRecord(stop); cudaEventSynchronize(stop); milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); data_transfer_time += milliseconds/1000; convolution_layer_v2(d_image2, d_c1_1, d_w1_1, d_b1_1, 64, 3, 32, batchImg); cudaEventRecord(start); cudaFree(d_image2); cudaEventRecord(stop); cudaEventSynchronize(stop); milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); data_transfer_time += milliseconds/1000; cudaEventRecord(start); cudaMalloc(&d_c1_2, batchImg * OUTPUT_SIZES[1] * sizeof(float)); cudaEventRecord(stop); cudaEventSynchronize(stop); milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); data_transfer_time += milliseconds/1000; convolution_layer_v2(d_c1_1, d_c1_2, d_w1_2, d_b1_2, 64, 64, 32, batchImg); cudaEventRecord(start); cudaFree(d_c1_1); cudaEventRecord(stop); cudaEventSynchronize(stop); milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); data_transfer_time += milliseconds/1000; cudaEventRecord(start); cudaMalloc(&d_p1, batchImg * OUTPUT_SIZES[2] * sizeof(float)); cudaEventRecord(stop); cudaEventSynchronize(stop); milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); data_transfer_time += milliseconds/1000; pooling_layer(d_c1_2, d_p1, 64, 16, batchImg); cudaEventRecord(start); cudaFree(d_c1_2); cudaEventRecord(stop); cudaEventSynchronize(stop); milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); data_transfer_time += milliseconds/1000; cudaEventRecord(start); cudaMalloc(&d_c2_1, batchImg * OUTPUT_SIZES[3] * sizeof(float)); cudaEventRecord(stop); cudaEventSynchronize(stop); milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); data_transfer_time += milliseconds/1000; convolution_layer_v2(d_p1, d_c2_1, d_w2_1, d_b2_1, 128, 64, 16, batchImg); cudaEventRecord(start); cudaFree(d_p1); cudaEventRecord(stop); cudaEventSynchronize(stop); milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); data_transfer_time += milliseconds/1000; cudaEventRecord(start); cudaMalloc(&d_c2_2, batchImg * OUTPUT_SIZES[4] * sizeof(float)); cudaEventRecord(stop); cudaEventSynchronize(stop); milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); data_transfer_time += milliseconds/1000; convolution_layer_v2(d_c2_1, d_c2_2, d_w2_2, d_b2_2, 128, 128, 16, batchImg); cudaEventRecord(start); cudaFree(d_c2_1); cudaEventRecord(stop); cudaEventSynchronize(stop); milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); data_transfer_time += milliseconds/1000; cudaEventRecord(start); cudaMalloc(&d_p2, batchImg * OUTPUT_SIZES[5] * sizeof(float)); cudaEventRecord(stop); cudaEventSynchronize(stop); milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); data_transfer_time += milliseconds/1000; pooling_layer(d_c2_2, d_p2, 128, 8, batchImg); cudaEventRecord(start); cudaFree(d_c2_2); cudaEventRecord(stop); cudaEventSynchronize(stop); milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); data_transfer_time += milliseconds/1000; cudaEventRecord(start); cudaMalloc(&d_c3_1, batchImg * OUTPUT_SIZES[6] * sizeof(float)); cudaEventRecord(stop); cudaEventSynchronize(stop); milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); data_transfer_time += milliseconds/1000; convolution_layer_v2(d_p2, d_c3_1, d_w3_1, d_b3_1, 256, 128, 8, batchImg); cudaEventRecord(start); cudaFree(d_p2); cudaEventRecord(stop); cudaEventSynchronize(stop); milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); data_transfer_time += milliseconds/1000; cudaMalloc(&d_c3_2, batchImg * OUTPUT_SIZES[7] * sizeof(float)); cudaEventRecord(stop); cudaEventSynchronize(stop); milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); data_transfer_time += milliseconds/1000; convolution_layer_v2(d_c3_1, d_c3_2, d_w3_2, d_b3_2, 256, 256, 8, batchImg); cudaEventRecord(start); cudaFree(d_c3_1); cudaEventRecord(stop); cudaEventSynchronize(stop); milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); data_transfer_time += milliseconds/1000; cudaEventRecord(start); cudaMalloc(&d_c3_3, batchImg * OUTPUT_SIZES[8] * sizeof(float)); cudaEventRecord(stop); cudaEventSynchronize(stop); milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); data_transfer_time += milliseconds/1000; convolution_layer_v2(d_c3_2, d_c3_3, d_w3_3, d_b3_3, 256, 256, 8, batchImg); cudaEventRecord(start); cudaFree(d_c3_2); cudaEventRecord(stop); cudaEventSynchronize(stop); milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); data_transfer_time += milliseconds/1000; cudaMalloc(&d_p3, batchImg * OUTPUT_SIZES[9] * sizeof(float)); cudaEventRecord(stop); cudaEventSynchronize(stop); milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); data_transfer_time += milliseconds/1000; pooling_layer(d_c3_3, d_p3, 256, 4, batchImg); cudaEventRecord(start); cudaFree(d_c3_3); cudaEventRecord(stop); cudaEventSynchronize(stop); milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); data_transfer_time += milliseconds/1000; cudaEventRecord(start); cudaMalloc(&d_c4_1, batchImg * OUTPUT_SIZES[10] * sizeof(float)); cudaEventRecord(stop); cudaEventSynchronize(stop); milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); data_transfer_time += milliseconds/1000; convolution_layer_v2(d_p3, d_c4_1, d_w4_1, d_b4_1, 512, 256, 4, batchImg); cudaEventRecord(start); cudaFree(d_p3); cudaEventRecord(stop); cudaEventSynchronize(stop); milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); data_transfer_time += milliseconds/1000; cudaEventRecord(start); cudaMalloc(&d_c4_2, batchImg * OUTPUT_SIZES[11] * sizeof(float)); cudaEventRecord(stop); cudaEventSynchronize(stop); milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); data_transfer_time += milliseconds/1000; convolution_layer_v2(d_c4_1, d_c4_2, d_w4_2, d_b4_2, 512, 512, 4, batchImg); cudaEventRecord(start); cudaFree(d_c4_1); cudaEventRecord(stop); cudaEventSynchronize(stop); milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); data_transfer_time += milliseconds/1000; cudaMalloc(&d_c4_3, batchImg * OUTPUT_SIZES[12] * sizeof(float)); cudaEventRecord(stop); cudaEventSynchronize(stop); milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); data_transfer_time += milliseconds/1000; convolution_layer_v2(d_c4_2, d_c4_3, d_w4_3, d_b4_3, 512, 512, 4, batchImg); cudaEventRecord(start); cudaFree(d_c4_2); cudaEventRecord(stop); cudaEventSynchronize(stop); milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); data_transfer_time += milliseconds/1000; cudaEventRecord(start); cudaMalloc(&d_p4, batchImg * OUTPUT_SIZES[13] * sizeof(float)); cudaEventRecord(stop); cudaEventSynchronize(stop); milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); data_transfer_time += milliseconds/1000; pooling_layer(d_c4_3, d_p4, 512, 2, batchImg); cudaEventRecord(start); cudaFree(d_c4_3); cudaEventRecord(stop); cudaEventSynchronize(stop); milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); data_transfer_time += milliseconds/1000; cudaEventRecord(start); cudaMalloc(&d_c5_1, batchImg * OUTPUT_SIZES[14] * sizeof(float)); cudaEventRecord(stop); cudaEventSynchronize(stop); milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); data_transfer_time += milliseconds/1000; convolution_layer_v2(d_p4, d_c5_1, d_w5_1, d_b5_1, 512, 512, 2, batchImg); cudaEventRecord(start); cudaFree(d_p4); cudaEventRecord(stop); cudaEventSynchronize(stop); milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); data_transfer_time += milliseconds/1000; cudaEventRecord(start); cudaMalloc(&d_c5_2, batchImg * OUTPUT_SIZES[15] * sizeof(float)); cudaEventRecord(stop); cudaEventSynchronize(stop); milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); data_transfer_time += milliseconds/1000; convolution_layer_v2(d_c5_1, d_c5_2, d_w5_2, d_b5_2, 512, 512, 2, batchImg); cudaEventRecord(start); cudaFree(d_c5_1); cudaEventRecord(stop); cudaEventSynchronize(stop); milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); data_transfer_time += milliseconds/1000; cudaEventRecord(start); cudaMalloc(&d_c5_3, batchImg * OUTPUT_SIZES[16] * sizeof(float)); cudaEventRecord(stop); cudaEventSynchronize(stop); milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); data_transfer_time += milliseconds/1000; convolution_layer_v2(d_c5_2, d_c5_3, d_w5_3, d_b5_3, 512, 512, 2, batchImg); cudaEventRecord(start); cudaFree(d_c5_2); cudaEventRecord(stop); cudaEventSynchronize(stop); milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); data_transfer_time += milliseconds/1000; cudaEventRecord(start); cudaMalloc(&d_p5, batchImg * OUTPUT_SIZES[17] * sizeof(float)); cudaEventRecord(stop); cudaEventSynchronize(stop); milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); data_transfer_time += milliseconds/1000; pooling_layer(d_c5_3, d_p5, 512, 1, batchImg); cudaEventRecord(start); cudaFree(d_c5_3); cudaEventRecord(stop); cudaEventSynchronize(stop); milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); data_transfer_time += milliseconds/1000; cudaEventRecord(start); cudaMalloc(&d_fc1, batchImg * OUTPUT_SIZES[18] * sizeof(float)); cudaEventRecord(stop); cudaEventSynchronize(stop); milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); data_transfer_time += milliseconds/1000; fc_layer(d_p5, d_fc1, d_w1, d_b1, 512, 512, batchImg); cudaEventRecord(start); cudaFree(d_p5); cudaEventRecord(stop); cudaEventSynchronize(stop); milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); data_transfer_time += milliseconds/1000; cudaEventRecord(start); cudaMalloc(&d_fc2, batchImg * OUTPUT_SIZES[19] * sizeof(float)); cudaEventRecord(stop); cudaEventSynchronize(stop); milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); data_transfer_time += milliseconds/1000; fc_layer(d_fc1, d_fc2, d_w2, d_b2, 512, 512, batchImg); cudaEventRecord(start); cudaFree(d_fc1); cudaEventRecord(stop); cudaEventSynchronize(stop); milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); data_transfer_time += milliseconds/1000; cudaEventRecord(start); cudaMalloc(&d_fc3, batchImg * OUTPUT_SIZES[20] * sizeof(float)); cudaEventRecord(stop); cudaEventSynchronize(stop); milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); data_transfer_time += milliseconds/1000; fc_layer(d_fc2, d_fc3, d_w3, d_b3, 10, 512, batchImg); cudaEventRecord(start); cudaFree(d_fc2); cudaEventRecord(stop); cudaEventSynchronize(stop); milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); data_transfer_time += milliseconds/1000; // Copy result from device memory to host memory float *fc3_mul = alloc_layer(OUTPUT_SIZES[20] * batchImg); cudaEventRecord(start); cudaMemcpy(fc3_mul, d_fc3, batchImg * OUTPUT_SIZES[20] * sizeof(float), cudaMemcpyDeviceToHost); cudaEventRecord(stop); cudaEventSynchronize(stop); milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); data_transfer_time += milliseconds/1000; // Predicted labels for (int j = 0; j < batchImg; j++) { float *fc3 = fc3_mul + j*10; softmax(fc3, 10); int idx = i + j; labels[idx] = find_max(fc3, 10); confidences[idx] = fc3[labels[idx]]; } free(fc3_mul); cudaEventRecord(start); cudaFree(d_fc3); cudaEventRecord(stop); cudaEventSynchronize(stop); milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); data_transfer_time += milliseconds/1000; } printf("data transfer time = %f s\n", data_transfer_time); printf("pooing time = %f s\n", pooling_time); printf("convolution time = %f s\n", conv_time); printf("fully connected time = %f s\n", fc_time); printf("softmax time = %f s\n", softmax_time); cudaFree(d_w1_1); cudaFree(d_b1_1); cudaFree(d_w1_2); cudaFree(d_b1_2); cudaFree(d_w2_1); cudaFree(d_b2_2); cudaFree(d_w2_2); cudaFree(d_b2_2); cudaFree(d_w3_1); cudaFree(d_b3_1); cudaFree(d_w3_2); cudaFree(d_b3_2); cudaFree(d_w3_3); cudaFree(d_b3_3); cudaFree(d_w4_1); cudaFree(d_b4_1); cudaFree(d_w4_2); cudaFree(d_b4_2); cudaFree(d_w4_3); cudaFree(d_b4_3); cudaFree(d_w5_1); cudaFree(d_b5_1); cudaFree(d_w5_2); cudaFree(d_b5_2); cudaFree(d_w5_3); cudaFree(d_b5_3); cudaFree(d_w1); cudaFree(d_b1); cudaFree(d_w2); cudaFree(d_b2); cudaFree(d_w3); cudaFree(d_b3); /*cudaFree(d_c1_1); cudaFree(d_c1_2); cudaFree(d_p1); cudaFree(d_c2_1); cudaFree(d_c2_2); cudaFree(d_p2); cudaFree(d_c3_1); cudaFree(d_c3_2); cudaFree(d_c3_3); cudaFree(d_p3); cudaFree(d_c4_1); cudaFree(d_c4_2); cudaFree(d_c4_3); cudaFree(d_p4); cudaFree(d_c5_1); cudaFree(d_c5_2); cudaFree(d_c5_3); cudaFree(d_p5); cudaFree(d_fc1); cudaFree(d_fc2); cudaFree(d_fc3);*/ }
d9dd9f74d8a50099e77ba9f560baf0c8329ee762.hip
// !!! This is a file automatically generated by hipify!!! #include "prevChecks.hu" #include "errorMacros.hu" void checkCLIArguments(int argc, char *argv[], unsigned int *width) { unsigned int i, ovfCheck, curDigit; //basic syntax and positive integer if(argc!=2) SPIT("Usage: %s <matrix_width>\n", argv[0]); (*width) = 0; for(i=0; i<strlen(argv[1]); i++) { (*width) *= 10; if(argv[1][i]<'0' || argv[1][i]>'9') SPIT("Could not parse \"%s\" as positive integer\n", argv[1]); //sum with overflow check curDigit = (unsigned int) (argv[1][i]-'0'); ovfCheck = (*width); (*width) += curDigit; if((*width) < curDigit | (*width) < ovfCheck) SPIT("Integer \"%s\" too big, overflows\n", argv[1]); } } void checkCUDAPresent(void) { int count; hipGetDeviceCount(&count); if(count>0) return; SPIT("No CUDA capable devices found!\n"); }
d9dd9f74d8a50099e77ba9f560baf0c8329ee762.cu
#include "prevChecks.hu" #include "errorMacros.hu" void checkCLIArguments(int argc, char *argv[], unsigned int *width) { unsigned int i, ovfCheck, curDigit; //basic syntax and positive integer if(argc!=2) SPIT("Usage: %s <matrix_width>\n", argv[0]); (*width) = 0; for(i=0; i<strlen(argv[1]); i++) { (*width) *= 10; if(argv[1][i]<'0' || argv[1][i]>'9') SPIT("Could not parse \"%s\" as positive integer\n", argv[1]); //sum with overflow check curDigit = (unsigned int) (argv[1][i]-'0'); ovfCheck = (*width); (*width) += curDigit; if((*width) < curDigit | (*width) < ovfCheck) SPIT("Integer \"%s\" too big, overflows\n", argv[1]); } } void checkCUDAPresent(void) { int count; cudaGetDeviceCount(&count); if(count>0) return; SPIT("No CUDA capable devices found!\n"); }
388e3678c7e39bb61c2f7b913ad5a45fad6e02e1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void xvpy_f32 (float* x, float* v, float* y, int len) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < len) { y[idx] += x[idx] * v[idx]; } }
388e3678c7e39bb61c2f7b913ad5a45fad6e02e1.cu
#include "includes.h" __global__ void xvpy_f32 (float* x, float* v, float* y, int len) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < len) { y[idx] += x[idx] * v[idx]; } }
950c6d03361fe4ff19904c7dbb626569aa22fb06.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.5.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date September 2014 @generated from clag2z.cu mixed zc -> ds, Wed Sep 17 15:08:23 2014 @author Mark Gates */ #include "common_magma.h" #define PRECISION_d #define BLK_X 64 #define BLK_Y 32 /* Divides matrix into ceil( m/BLK_X ) x ceil( n/BLK_Y ) blocks. Each block has BLK_X threads. Each thread loops across one row, updating BLK_Y entries. Code similar to slat2d and zlaset. */ __global__ void slag2d_kernel( int m, int n, const float *SA, int ldsa, double *A, int lda ) { int ind = blockIdx.x*BLK_X + threadIdx.x; int iby = blockIdx.y*BLK_Y; /* check if full block-column */ bool full = (iby + BLK_Y <= n); /* do only rows inside matrix */ if ( ind < m ) { A += ind + iby*lda; SA += ind + iby*ldsa; if ( full ) { // full block-column #pragma unroll for( int j=0; j < BLK_Y; ++j ) { A[j*lda] = (double)( SA[j*ldsa] ); } } else { // partial block-column for( int j=0; j < BLK_Y && iby+j < n; ++j ) { A[j*lda] = (double)( SA[j*ldsa] ); } } } } /** Purpose ------- SLAG2D_STREAM converts a single-real matrix, SA, to a double-real matrix, A. Note that while it is possible to overflow while converting from double to single, it is not possible to overflow when converting from single to double. Arguments --------- @param[in] m INTEGER The number of lines of the matrix A. M >= 0. @param[in] n INTEGER The number of columns of the matrix A. N >= 0. @param[in] SA REAL array, dimension (LDSA,N) On entry, the M-by-N coefficient matrix SA. @param[in] ldsa INTEGER The leading dimension of the array SA. LDSA >= max(1,M). @param[out] A DOUBLE PRECISION array, dimension (LDA,N) On exit, the M-by-N coefficient matrix A. @param[in] lda INTEGER The leading dimension of the array A. LDA >= max(1,M). @param[out] info INTEGER - = 0: successful exit - < 0: if INFO = -i, the i-th argument had an illegal value @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_saux2 ********************************************************************/ extern "C" void magmablas_slag2d_q( magma_int_t m, magma_int_t n, const float *SA, magma_int_t ldsa, double *A, magma_int_t lda, magma_int_t *info, magma_queue_t queue) { *info = 0; if ( m < 0 ) *info = -1; else if ( n < 0 ) *info = -2; else if ( ldsa < max(1,m) ) *info = -4; else if ( lda < max(1,m) ) *info = -6; if (*info != 0) { magma_xerbla( __func__, -(*info) ); return; //*info; } /* quick return */ if ( m == 0 || n == 0 ) { return; } dim3 threads( BLK_X ); dim3 grid( (m+BLK_X-1)/BLK_X, (n+BLK_Y-1)/BLK_Y ); hipLaunchKernelGGL(( slag2d_kernel), dim3(grid), dim3(threads), 0, queue , m, n, SA, ldsa, A, lda ); } /** @see magmablas_slag2d_q @ingroup magma_saux2 ********************************************************************/ extern "C" void magmablas_slag2d( magma_int_t m, magma_int_t n, const float *SA, magma_int_t ldsa, double *A, magma_int_t lda, magma_int_t *info) { magmablas_slag2d_q( m, n, SA, ldsa, A, lda, info, magma_stream ); }
950c6d03361fe4ff19904c7dbb626569aa22fb06.cu
/* -- MAGMA (version 1.5.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date September 2014 @generated from clag2z.cu mixed zc -> ds, Wed Sep 17 15:08:23 2014 @author Mark Gates */ #include "common_magma.h" #define PRECISION_d #define BLK_X 64 #define BLK_Y 32 /* Divides matrix into ceil( m/BLK_X ) x ceil( n/BLK_Y ) blocks. Each block has BLK_X threads. Each thread loops across one row, updating BLK_Y entries. Code similar to slat2d and zlaset. */ __global__ void slag2d_kernel( int m, int n, const float *SA, int ldsa, double *A, int lda ) { int ind = blockIdx.x*BLK_X + threadIdx.x; int iby = blockIdx.y*BLK_Y; /* check if full block-column */ bool full = (iby + BLK_Y <= n); /* do only rows inside matrix */ if ( ind < m ) { A += ind + iby*lda; SA += ind + iby*ldsa; if ( full ) { // full block-column #pragma unroll for( int j=0; j < BLK_Y; ++j ) { A[j*lda] = (double)( SA[j*ldsa] ); } } else { // partial block-column for( int j=0; j < BLK_Y && iby+j < n; ++j ) { A[j*lda] = (double)( SA[j*ldsa] ); } } } } /** Purpose ------- SLAG2D_STREAM converts a single-real matrix, SA, to a double-real matrix, A. Note that while it is possible to overflow while converting from double to single, it is not possible to overflow when converting from single to double. Arguments --------- @param[in] m INTEGER The number of lines of the matrix A. M >= 0. @param[in] n INTEGER The number of columns of the matrix A. N >= 0. @param[in] SA REAL array, dimension (LDSA,N) On entry, the M-by-N coefficient matrix SA. @param[in] ldsa INTEGER The leading dimension of the array SA. LDSA >= max(1,M). @param[out] A DOUBLE PRECISION array, dimension (LDA,N) On exit, the M-by-N coefficient matrix A. @param[in] lda INTEGER The leading dimension of the array A. LDA >= max(1,M). @param[out] info INTEGER - = 0: successful exit - < 0: if INFO = -i, the i-th argument had an illegal value @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_saux2 ********************************************************************/ extern "C" void magmablas_slag2d_q( magma_int_t m, magma_int_t n, const float *SA, magma_int_t ldsa, double *A, magma_int_t lda, magma_int_t *info, magma_queue_t queue) { *info = 0; if ( m < 0 ) *info = -1; else if ( n < 0 ) *info = -2; else if ( ldsa < max(1,m) ) *info = -4; else if ( lda < max(1,m) ) *info = -6; if (*info != 0) { magma_xerbla( __func__, -(*info) ); return; //*info; } /* quick return */ if ( m == 0 || n == 0 ) { return; } dim3 threads( BLK_X ); dim3 grid( (m+BLK_X-1)/BLK_X, (n+BLK_Y-1)/BLK_Y ); slag2d_kernel<<< grid, threads, 0, queue >>> ( m, n, SA, ldsa, A, lda ); } /** @see magmablas_slag2d_q @ingroup magma_saux2 ********************************************************************/ extern "C" void magmablas_slag2d( magma_int_t m, magma_int_t n, const float *SA, magma_int_t ldsa, double *A, magma_int_t lda, magma_int_t *info) { magmablas_slag2d_q( m, n, SA, ldsa, A, lda, info, magma_stream ); }
9af36f832d82d941b3a3e3e9455175ad4ff58a31.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.6.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date November 2014 dsymv.cu is nearly identical to dsymv.cu, just change names and drop . @generated from zhemv.cu normal z -> d, Sat Nov 15 19:53:59 2014 @author Mark Gates */ #include "common_magma.h" #define PRECISION_d #define NB_X 64 #define NB_Y 4 #define bank_shift 33 #define quarter_NB_X 16 #define half_NB_X 32 /******************************************************************************* Lower case, compute block multiply, work = A*x, for any size n: [ A11*x1 A12*x2 A13*x3 ] [ A11 A12 A13 ] [ x1 ] [ --- (A21*x1 + A22*x2) A23*x3 ] = [ A21 A22 A23 ] * [ x2 ] [ --- --- (A31*x1 + A32*x2 + A33*x3) ] [ A31 A32 A33 ] [ x3 ] Uses a 64x4 thread block. For diagonal tiles, covers a 64x64 tile using three 32x32 tiles (plus one gets transposed). For off-diagonal tiles, covers a 64x64 tile using four 64x16 tiles. In both cases, each thread multiplies 4 elements. For rows past the bottom of the matrix, the A pointer is adjusted to be the last valid row of A, which multiple threads will read. Extra rows are ignored when saving results to work. Columns past the right edge are explicitly ignored when loading. x values past the bottom are set to zero, thus, extra columns are zeroed when multiplying. ********************************************************************/ __global__ void dsymv_kernel_L( int n, const double * __restrict__ A, int lda, const double * __restrict__ x, int incx, double * __restrict__ work) { #if defined(PRECISION_s) || defined(PRECISION_d) || defined(PRECISION_c) || (__CUDA_ARCH__ >= 200) // treats sA as 16x64 block #define sA16(i_, j_) (sA[(i_)][(j_)]) // i.e., sA[ (i_)*(NB_X+3) + (j_) ] // treats sA as 32x32 block #define sA32(i_, j_) (sA[0][(i_) + bank_shift*(j_)]) // 64x4 thread block const int tx = threadIdx.x; const int ty = threadIdx.y; const int blk = blockIdx.x; const int blk_ind = NB_X * blk; const int td = NB_X * ty + tx; // 32x8 thread block const int tx2 = td % half_NB_X; const int ty2 = td / half_NB_X; // If this blk has fewer than NB_X rows, partial is the number of valid rows, // so tx = 0, ..., partial-1 are valid rows, and tx >= partial are invalid. // Else, partial == 0. const int partial = (blk == gridDim.x - 1 ? (n % NB_X) : 0); double psum, psum2; double total = MAGMA_D_ZERO; // sA is used as a 32x32 block, sA32(i,j), // and as a 16x64 block, sA16(i,j), in different parts of the code. // sA must be at least half_NB_X*bank_shift = 32x33 = 1056; // quarter_NB_X*(NB_X + 2) = 16*(64 + 2) = 1056 __shared__ double sA [quarter_NB_X][NB_X + 3]; /* Why +3? seems it only needs +2. Does +3 reduce bank conflicts? */ __shared__ double sx [NB_X]; // for x[ blk ] __shared__ double sx2[NB_X]; // for x[ blk2 ], which cycles over all blocks left of diag double rA[4]; double psums[4]; // -------------------- // load 64x1 block x(blk_ind + 0:63) into sx x += (blk_ind + tx)*incx; // x is x(blk_ind + tx) if ( ty == 0 ) { if ( partial && tx >= partial ) { sx[tx] = MAGMA_D_ZERO; } else { sx[tx] = x[0]; } } // -------------------- // move to 32x32 diag block A += blk_ind * (lda + 1); // A is A(blk_ind, blk_ind) A += ty2*lda + tx2; // A is A(blk_ind + tx2, blk_ind + ty2) // load 32x32 diag block A(blk_ind + 0:31, blk_ind + 0:31) into sA, // as four 32x8 sections one after another: // columns 0:7, then 8:15, then 16:23, then 24:31 if ( partial ) { if ( tx2 >= partial ) { A = A - tx2 + (partial - 1); } #pragma unroll for(int j=0; j < half_NB_X; j += 8) { if ( ty2+j < partial ) { sA32(tx2, ty2 + j) = A[j*lda]; } } if ( tx2 >= partial ) { A = A + tx2 - (partial - 1); } } else { #pragma unroll for(int j=0; j < half_NB_X; j += 8) { sA32(tx2, ty2 + j) = A[j*lda]; } } __syncthreads(); // symmetrize 32x32 diag block, copying lower to upper triangle, // as four 32x8 sections in parallel: // columns 0,4,8,12,16,20,24,28; then 1,5,...,29; then 2,6,...,30, then 3,7,...,31 #pragma unroll for(int j=ty2*4; j < ty2*4 + 4; j++) { if ( j < tx2 ) sA32(j, tx2) = ( sA32(tx2, j) ); } __syncthreads(); // multiply 32x32 diag block * x // each thread does partial row sA(tx2, ty2*4 : ty2*4 + 3) psum = MAGMA_D_ZERO; #pragma unroll for(int j=0; j < 4; j++) { psum += sA32(tx2, ty2*4 + j) * sx[ty2*4 + j]; } __syncthreads(); // store partial row sums sA32(ty2, tx2) = psum; __syncthreads(); // sum up partial row sums, so thread (tx2,0) has total for row (blk_ind + tx2) if ( ty2 == 0 ) { total = sA32(0, tx2) + sA32(1, tx2) + sA32(2, tx2) + sA32(3, tx2) + sA32(4, tx2) + sA32(5, tx2) + sA32(6, tx2) + sA32(7, tx2); } __syncthreads(); // -------------------- // move to next 32x32 diag block, then repeat steps from first diag block A += half_NB_X + half_NB_X*lda; // A is A(blk_ind + NB/2 + tx2, blk_ind + NB/2 + ty2) // load 32x32 diag block A[block + 0:31, block + 0:31] into sA if ( partial ) { if ( tx2 + half_NB_X >= partial ) { A = A - (tx2 + half_NB_X) + (partial - 1); } #pragma unroll for(int j=0; j < half_NB_X; j += 8) { if ( ty2+j + half_NB_X < partial ) { sA32(tx2, ty2 + j) = A[j*lda]; } } if ( tx2 + half_NB_X >= partial ) { A = A + (tx2 + half_NB_X) - (partial - 1); } } else { #pragma unroll for(int j=0; j < half_NB_X; j += 8) { sA32(tx2, ty2 + j) = A[j*lda]; } } __syncthreads(); // symmetrize 32x32 diag block, copying lower to upper triangle #pragma unroll for(int j=ty2*4; j < ty2*4 + 4; j++) { if ( j < tx2 ) sA32(j, tx2) = ( sA32(tx2, j) ); } __syncthreads(); // multiply 32x32 diag block * x psum = MAGMA_D_ZERO; #pragma unroll for(int j=0; j < 4; j++) { psum += sA32(tx2, ty2*4 + j) * sx[half_NB_X + ty2*4 + j]; } __syncthreads(); // store partial row sums sA32(ty2, tx2) = psum; __syncthreads(); // sum up partial row sums, so thread (tx2,1) has total for row (blk_ind + NB/2 + tx2) if ( ty2 == 1 ) { total = sA32(0, tx2) + sA32(1, tx2) + sA32(2, tx2) + sA32(3, tx2) + sA32(4, tx2) + sA32(5, tx2) + sA32(6, tx2) + sA32(7, tx2); } __syncthreads(); // -------------------- // move to off-diag 32x32 block A -= half_NB_X*lda; // A is A(blk_ind + NB/2 + tx2, blk_ind + ty2) // load 32x32 block of A into sA, // as four 32x8 sections one after another: // columns 0:7, then 8:15, then 16:23, then 24:31 if ( partial ) { if ( tx2 + half_NB_X >= partial ) { A = A - (tx2 + half_NB_X) + (partial - 1); } #pragma unroll for(int j=0; j < half_NB_X; j += 8) { if ( ty2+j < partial ) { sA32(tx2, ty2 + j) = A[j*lda]; } } if ( tx2 + half_NB_X >= partial ) { A = A + (tx2 + half_NB_X) - (partial - 1); } } else { #pragma unroll for(int j=0; j < half_NB_X; j += 8) { sA32(tx2, ty2 + j) = A[j*lda]; } } __syncthreads(); // multiply 32x32 block (below diag) psum = MAGMA_D_ZERO; #pragma unroll for(int j=0; j < 4; j++) { psum += sA32(tx2, ty2 + j*8) * sx[j*8 + ty2]; } //__syncthreads(); // no sync needed here // multiply transposed 32x32 block (above diag) psum2 = MAGMA_D_ZERO; #pragma unroll for(int j=0; j < 4; j++) { psum2 += ( sA32(ty2*4 + j, tx2) ) * sx[half_NB_X + ty2*4 + j]; } __syncthreads(); // store partial sums for non-transposed 32x32 block sA32(ty2, tx2) = psum; __syncthreads(); // sum up partial row sums, so thread (tx2,1) has total for row (blk_ind + NB/2 + tx2) if ( ty2 == 1 ) { total = total + sA32(0, tx2) + sA32(1, tx2) + sA32(2, tx2) + sA32(3, tx2) + sA32(4, tx2) + sA32(5, tx2) + sA32(6, tx2) + sA32(7, tx2); } __syncthreads(); // store partial sums for transposed 32x32 block sA32(ty2, tx2) = psum2; __syncthreads(); // sum up partial row sums, so thread (tx2,0) has total for row (blk_ind + tx2) if ( ty2 == 0 ) { total = total + sA32(0, tx2) + sA32(1, tx2) + sA32(2, tx2) + sA32(3, tx2) + sA32(4, tx2) + sA32(5, tx2) + sA32(6, tx2) + sA32(7, tx2); } __syncthreads(); // -------------------- // move to left most 64x64 block in block row, and // switch thread offset from (tx2,ty2) 32x8 block to (tx,ty) 64x4 block A -= half_NB_X; // A is A(blk_ind + tx2, blk_ind + ty2) A -= ty2*lda + tx2; // A is A(blk_ind, blk_ind) A -= blk_ind*lda; // A is A(blk_ind, 0) A += 4*ty*lda + tx; // A is A(blk_ind + tx, 4*ty) if ( partial && tx >= partial ) { A = A - tx + (partial - 1); } x -= blk_ind * incx; // x is x(tx) // 16x16 thread block const int tx4 = td % quarter_NB_X; const int ty4 = td / quarter_NB_X; work += blk*lda + tx4; // work is work(tx4, blk) for(int blk2=0; blk2 < blk; ++blk2) { // load 64x1 block x(blk2_ind + 0:63) into sx2 // since this block is left of diagonal, x cannot be partial rows if ( ty == 0 ) { sx2[tx] = x[blk2*NB_X*incx]; } __syncthreads(); for( int k=0; k < 4; k++ ) { // load 64x16 block of A into rA, 4 elements per thread, // as four 64x4 sections in parallel: // columns 0,4,8,12; then 1,5,9,13; then 2,6,10,14; then 3,7,11,15 // since this block is left of diagonal, it cannot be partial columns #pragma unroll for(int j=0; j < 4; j++) { rA[j] = A[j*lda]; } // 1) multiply 64x16 block A * x2 // each thread does partial row rA(tx + 16*k, ty*4 + 16*k : ty*4 + 3 + 16*k) // 2) multiply transposed 16x64 block A**H * x, // storing each product Aji*xi to sA(j,i) #pragma unroll for(int j=0; j < 4; j++) { total += rA[j] * sx2[quarter_NB_X*k + ty*4 + j]; sA16(ty*4 + j, tx) = ( rA[j] ) * sx[tx]; } __syncthreads(); // do partial row sums for transposed 16x64 result // use 16x16 thread grid (tx4, ty4) instead of 64x4 (tx, ty) // sum sixteen 16x4 sections in parallel: // columns 0,4,8,...,60; then 1,5,...,61; then 2,6,...,62; then 3,7,...,63 psum2 = MAGMA_D_ZERO; #pragma unroll for(int j=0; j < 4; j++) { psum2 += sA16(tx4, ty4*4 + j); } __syncthreads(); // store partial row sums (locally) psums[k] = psum2; // move to next 64x16 block A += lda * quarter_NB_X; // A is A(blk_ind + tx#, blk2*NB_x + k*NB_X/4 + 4*ty), # or partial } // store partial row sums #pragma unroll for(int k=0; k < 4; k++) { sA16(tx4, ty4 + quarter_NB_X*k) = psums[k]; } __syncthreads(); // sum up partial row sums and store final total to workspace // thread (tx4,ty4) where ty4 < 4 sums row tx4 + ty4*16 // since this is the transposed block above the diagonal, it cannot be partial rows if ( ty4 < 4 ) { int k = ty4*quarter_NB_X; psum2 = sA16(tx4, 0 + k) + sA16(tx4, 1 + k) + sA16(tx4, 2 + k) + sA16(tx4, 3 + k) + sA16(tx4, 4 + k) + sA16(tx4, 5 + k) + sA16(tx4, 6 + k) + sA16(tx4, 7 + k) + sA16(tx4, 8 + k) + sA16(tx4, 9 + k) + sA16(tx4, 10 + k) + sA16(tx4, 11 + k) + sA16(tx4, 12 + k) + sA16(tx4, 13 + k) + sA16(tx4, 14 + k) + sA16(tx4, 15 + k); work[blk2*NB_X + k] = psum2; // store at work( blk2*NB_X + tx4 + ty4*16, blk ) } __syncthreads(); } work -= tx4; // work is work(blk_ind) work += tx; // work is work(blk_ind + tx) // store row sums sA16(ty, tx) = total; __syncthreads(); // sum up final total for row tx if ( ty == 0 && (partial == 0 || tx < partial) ) { total = sA16(0, tx) + sA16(1, tx) + sA16(2, tx) + sA16(3, tx); work[blk*NB_X] = total; // store at work( blk*NB_X + tx, blk ) } #endif /* PRECISION_[sdc] || (__CUDA_ARCH__ >= 200) */ } /************************************************************** Lower case, sum up final results On input: [ A11*x1 A12*x2 A13*x3 ] work = [ --- (A21*x1 + A22*x2) A23*x3 ] [ --- --- (A31*x1 + A32*x2 + A33*x3) ] On output: [ A11*x1 + A12*x2 + A13*x3 ] y = alpha*[ A11*x1 + A22*x2 + A23*x3 ] + beta*y [ A21*x1 + A22*x2 + A33*x3 ] Previously: [ A11*x1 --- ] work = [ A12*x2 (A21*x1 + A22*x2) --- ] [ A13*x3 A23*x3 (A31*x1 + A32*x2 + A33*x3) ] which doesn't work as well because A13*x3 has 64 rows, while A31*x1 has only n % NB rows. This is why it used to need lwork = lda*(blocks + 1) instead of lda*blocks. ********************************************************************/ __global__ void dsymv_kernel_L_sum( int n, double alpha, int lda, double beta, double * __restrict__ y, int incy, double * __restrict__ work ) { int tx = threadIdx.x; int blk = blockIdx.x; int blk_ind = blk * NB_X; int ind = blk_ind + tx; if ( ind < n ) { work += ind + blk*lda; double Ax = MAGMA_D_ZERO; for(int i = blk_ind; i < n; i += NB_X) { Ax += work[0]; work += lda; } y[ind * incy] = beta*y[ind * incy] + alpha*Ax; } } /************************************************************** * Lower case, launch kernels */ extern "C" void magmablas_dsymv_L( magma_int_t n, double alpha, const double *dA, magma_int_t ldda, const double *dx, magma_int_t incx, double beta, double *dy, magma_int_t incy, double *dwork) { magma_int_t blocks = (n - 1)/NB_X + 1; dim3 grid( blocks, 1, 1 ); dim3 threads( NB_X, NB_Y, 1 ); hipLaunchKernelGGL(( dsymv_kernel_L), dim3(grid), dim3(threads), 0, magma_stream , n, dA, ldda, dx, incx, dwork); dim3 threads_sum( NB_X, 1, 1 ); hipLaunchKernelGGL(( dsymv_kernel_L_sum), dim3(grid), dim3(threads_sum), 0, magma_stream , n, alpha, ldda, beta, dy, incy, dwork); } /** Purpose ------- magmablas_dsymv_work performs the matrix-vector operation: y := alpha*A*x + beta*y, where alpha and beta are scalars, x and y are n element vectors and A is an n by n symmetric matrix. Arguments ---------- @param[in] uplo magma_uplo_t. On entry, UPLO specifies whether the upper or lower triangular part of the array A is to be referenced as follows: - = MagmaUpper: Only the upper triangular part of A is to be referenced. - = MagmaLower: Only the lower triangular part of A is to be referenced. @param[in] n INTEGER. On entry, N specifies the order of the matrix A. N must be at least zero. @param[in] alpha DOUBLE PRECISION. On entry, ALPHA specifies the scalar alpha. @param[in] dA DOUBLE PRECISION array of DIMENSION ( LDDA, n ). Before entry with UPLO = MagmaUpper, the leading n by n upper triangular part of the array A must contain the upper triangular part of the symmetric matrix and the strictly lower triangular part of A is not referenced. Before entry with UPLO = MagmaLower, the leading n by n lower triangular part of the array A must contain the lower triangular part of the symmetric matrix and the strictly upper triangular part of A is not referenced. Note that the imaginary parts of the diagonal elements need not be set and are assumed to be zero. @param[in] ldda INTEGER. On entry, LDDA specifies the first dimension of A as declared in the calling (sub) program. LDDA must be at least max( 1, n ). It is recommended that ldda is multiple of 16. Otherwise performance would be deteriorated as the memory accesses would not be fully coalescent. @param[in] dx DOUBLE PRECISION array of dimension at least ( 1 + ( n - 1 )*abs( INCX ) ). Before entry, the incremented array X must contain the n element vector x. @param[in] incx INTEGER. On entry, INCX specifies the increment for the elements of X. INCX must not be zero. @param[in] beta DOUBLE PRECISION. On entry, BETA specifies the scalar beta. When BETA is supplied as zero then Y need not be set on input. @param[in,out] dy DOUBLE PRECISION array of dimension at least ( 1 + ( n - 1 )*abs( INCY ) ). Before entry, the incremented array Y must contain the n element vector y. On exit, Y is overwritten by the updated vector y. @param[in] incy INTEGER. On entry, INCY specifies the increment for the elements of Y. INCY must not be zero. @param[in] dwork (workspace) DOUBLE PRECISION array on the GPU, dimension (MAX(1, LWORK)), @param[in] lwork INTEGER. The dimension of the array DWORK. LWORK >= LDDA * ceil( N / NB_X ), where NB_X = 64. MAGMA implements dsymv through two steps: 1) perform the multiplication in each thread block and put the intermediate value in dwork. 2) sum the intermediate values and store the final result in y. magamblas_dsymv_work requires users to provide a workspace, while magmablas_dsymv is a wrapper routine allocating the workspace inside the routine and provides the same interface as cublas. If users need to call dsymv frequently, we suggest using magmablas_dsymv_work instead of magmablas_dsymv. As the overhead to allocate and free in device memory in magmablas_dsymv would hurt performance. Our tests show that this penalty is about 10 Gflop/s when the matrix size is around 10000. @ingroup magma_dblas2 ********************************************************************/ extern "C" magma_int_t magmablas_dsymv_work( magma_uplo_t uplo, magma_int_t n, double alpha, magmaDouble_const_ptr dA, magma_int_t ldda, magmaDouble_const_ptr dx, magma_int_t incx, double beta, magmaDouble_ptr dy, magma_int_t incy, magmaDouble_ptr dwork, magma_int_t lwork) { #if defined(PRECISION_z) // z precision requires CUDA ARCH 2.x; call CUBLAS version instead. magma_int_t arch = magma_getdevice_arch(); if ( arch < 200 ) { magma_dsymv( uplo, n, alpha, dA, ldda, dx, incx, beta, dy, incy ); return MAGMA_SUCCESS; } #endif // -------------------- // [sdc] precisions, or z precision with CUDA ARCH 2.x int upper = (uplo == MagmaUpper); magma_int_t blocks = (n - 1)/NB_X + 1; magma_int_t lwmin = ldda*blocks; /* * Test the input parameters. */ magma_int_t info = 0; if ((! upper) && (uplo != MagmaLower)) { info = -1; } else if ( n < 0 ) { info = -2; } else if ( ldda < max(1, n) ) { info = -5; } else if ( incx == 0 ) { info = -7; } else if ( incy == 0 ) { info = -10; } else if ( lwork < lwmin ) { info = -12; } if (info != 0) { magma_xerbla( __func__, -(info) ); return info; } /* * Quick return if possible. */ if ( (n == 0) || ( MAGMA_D_EQUAL(alpha, MAGMA_D_ZERO) && MAGMA_D_EQUAL(beta, MAGMA_D_ONE) ) ) return info; /* TODO: Upper case is not implemented in MAGMA */ if ( upper ) { magma_dsymv( uplo, n, alpha, dA, ldda, dx, incx, beta, dy, incy); } else { magmablas_dsymv_L(n, alpha, dA, ldda, dx, incx, beta, dy, incy, dwork); } return info; } /** Purpose ------- magmablas_dsymv performs the matrix-vector operation: y := alpha*A*x + beta*y, where alpha and beta are scalars, x and y are n element vectors and A is an n by n symmetric matrix. Arguments ---------- @param[in] uplo magma_uplo_t. On entry, UPLO specifies whether the upper or lower triangular part of the array A is to be referenced as follows: - = MagmaUpper: Only the upper triangular part of A is to be referenced. - = MagmaLower: Only the lower triangular part of A is to be referenced. @param[in] n INTEGER. On entry, N specifies the order of the matrix A. N must be at least zero. @param[in] alpha DOUBLE PRECISION. On entry, ALPHA specifies the scalar alpha. @param[in] dA DOUBLE PRECISION array of DIMENSION ( LDDA, n ). Before entry with UPLO = MagmaUpper, the leading n by n upper triangular part of the array A must contain the upper triangular part of the symmetric matrix and the strictly lower triangular part of A is not referenced. Before entry with UPLO = MagmaLower, the leading n by n lower triangular part of the array A must contain the lower triangular part of the symmetric matrix and the strictly upper triangular part of A is not referenced. Note that the imaginary parts of the diagonal elements need not be set and are assumed to be zero. @param[in] ldda INTEGER. On entry, LDDA specifies the first dimension of A as declared in the calling (sub) program. LDDA must be at least max( 1, n ). It is recommended that ldda is multiple of 16. Otherwise performance would be deteriorated as the memory accesses would not be fully coalescent. @param[in] dx DOUBLE PRECISION array of dimension at least ( 1 + ( n - 1 )*abs( INCX ) ). Before entry, the incremented array X must contain the n element vector x. @param[in] incx INTEGER. On entry, INCX specifies the increment for the elements of X. INCX must not be zero. @param[in] beta DOUBLE PRECISION. On entry, BETA specifies the scalar beta. When BETA is supplied as zero then Y need not be set on input. @param[in,out] dy DOUBLE PRECISION array of dimension at least ( 1 + ( n - 1 )*abs( INCY ) ). Before entry, the incremented array Y must contain the n element vector y. On exit, Y is overwritten by the updated vector y. @param[in] incy INTEGER. On entry, INCY specifies the increment for the elements of Y. INCY must not be zero. @ingroup magma_dblas2 ********************************************************************/ extern "C" magma_int_t magmablas_dsymv( magma_uplo_t uplo, magma_int_t n, double alpha, magmaDouble_const_ptr dA, magma_int_t ldda, magmaDouble_const_ptr dx, magma_int_t incx, double beta, magmaDouble_ptr dy, magma_int_t incy) { #if defined(PRECISION_z) // z precision requires CUDA ARCH 2.x; call CUBLAS version instead. magma_int_t arch = magma_getdevice_arch(); if ( arch < 200 ) { magma_dsymv( uplo, n, alpha, dA, ldda, dx, incx, beta, dy, incy ); return MAGMA_SUCCESS; } #endif // -------------------- // [sdc] precisions, or z precision with CUDA ARCH 2.x int upper = (uplo == MagmaUpper); /* * Test the input parameters. */ magma_int_t info = 0; if ((! upper) && (uplo != MagmaLower)) { info = -1; } else if ( n < 0 ) { info = -2; } else if ( ldda < max(1, n) ) { info = -5; } else if ( incx == 0 ) { info = -7; } else if ( incy == 0 ) { info = -10; } if (info != 0) { magma_xerbla( __func__, -(info) ); return info; } /* * Quick return if possible. */ if ( (n == 0) || ( MAGMA_D_EQUAL(alpha, MAGMA_D_ZERO) && MAGMA_D_EQUAL(beta, MAGMA_D_ONE) ) ) return info; /* TODO: Upper case is not implemented in MAGMA */ if ( upper ) { magma_dsymv( uplo, n, alpha, dA, ldda, dx, incx, beta, dy, incy); } else { double *dwork; magma_int_t blocks = (n - 1)/NB_X + 1; magma_int_t lwork = ldda*blocks; magma_dmalloc( &dwork, lwork ); if ( dwork == NULL ) { info = MAGMA_ERR_DEVICE_ALLOC; magma_xerbla( __func__, -(info) ); } else { magmablas_dsymv_L(n, alpha, dA, ldda, dx, incx, beta, dy, incy, dwork); } magma_free( dwork ); } return info; }
9af36f832d82d941b3a3e3e9455175ad4ff58a31.cu
/* -- MAGMA (version 1.6.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date November 2014 dsymv.cu is nearly identical to dsymv.cu, just change names and drop . @generated from zhemv.cu normal z -> d, Sat Nov 15 19:53:59 2014 @author Mark Gates */ #include "common_magma.h" #define PRECISION_d #define NB_X 64 #define NB_Y 4 #define bank_shift 33 #define quarter_NB_X 16 #define half_NB_X 32 /******************************************************************************* Lower case, compute block multiply, work = A*x, for any size n: [ A11*x1 A12*x2 A13*x3 ] [ A11 A12 A13 ] [ x1 ] [ --- (A21*x1 + A22*x2) A23*x3 ] = [ A21 A22 A23 ] * [ x2 ] [ --- --- (A31*x1 + A32*x2 + A33*x3) ] [ A31 A32 A33 ] [ x3 ] Uses a 64x4 thread block. For diagonal tiles, covers a 64x64 tile using three 32x32 tiles (plus one gets transposed). For off-diagonal tiles, covers a 64x64 tile using four 64x16 tiles. In both cases, each thread multiplies 4 elements. For rows past the bottom of the matrix, the A pointer is adjusted to be the last valid row of A, which multiple threads will read. Extra rows are ignored when saving results to work. Columns past the right edge are explicitly ignored when loading. x values past the bottom are set to zero, thus, extra columns are zeroed when multiplying. ********************************************************************/ __global__ void dsymv_kernel_L( int n, const double * __restrict__ A, int lda, const double * __restrict__ x, int incx, double * __restrict__ work) { #if defined(PRECISION_s) || defined(PRECISION_d) || defined(PRECISION_c) || (__CUDA_ARCH__ >= 200) // treats sA as 16x64 block #define sA16(i_, j_) (sA[(i_)][(j_)]) // i.e., sA[ (i_)*(NB_X+3) + (j_) ] // treats sA as 32x32 block #define sA32(i_, j_) (sA[0][(i_) + bank_shift*(j_)]) // 64x4 thread block const int tx = threadIdx.x; const int ty = threadIdx.y; const int blk = blockIdx.x; const int blk_ind = NB_X * blk; const int td = NB_X * ty + tx; // 32x8 thread block const int tx2 = td % half_NB_X; const int ty2 = td / half_NB_X; // If this blk has fewer than NB_X rows, partial is the number of valid rows, // so tx = 0, ..., partial-1 are valid rows, and tx >= partial are invalid. // Else, partial == 0. const int partial = (blk == gridDim.x - 1 ? (n % NB_X) : 0); double psum, psum2; double total = MAGMA_D_ZERO; // sA is used as a 32x32 block, sA32(i,j), // and as a 16x64 block, sA16(i,j), in different parts of the code. // sA must be at least half_NB_X*bank_shift = 32x33 = 1056; // quarter_NB_X*(NB_X + 2) = 16*(64 + 2) = 1056 __shared__ double sA [quarter_NB_X][NB_X + 3]; /* Why +3? seems it only needs +2. Does +3 reduce bank conflicts? */ __shared__ double sx [NB_X]; // for x[ blk ] __shared__ double sx2[NB_X]; // for x[ blk2 ], which cycles over all blocks left of diag double rA[4]; double psums[4]; // -------------------- // load 64x1 block x(blk_ind + 0:63) into sx x += (blk_ind + tx)*incx; // x is x(blk_ind + tx) if ( ty == 0 ) { if ( partial && tx >= partial ) { sx[tx] = MAGMA_D_ZERO; } else { sx[tx] = x[0]; } } // -------------------- // move to 32x32 diag block A += blk_ind * (lda + 1); // A is A(blk_ind, blk_ind) A += ty2*lda + tx2; // A is A(blk_ind + tx2, blk_ind + ty2) // load 32x32 diag block A(blk_ind + 0:31, blk_ind + 0:31) into sA, // as four 32x8 sections one after another: // columns 0:7, then 8:15, then 16:23, then 24:31 if ( partial ) { if ( tx2 >= partial ) { A = A - tx2 + (partial - 1); } #pragma unroll for(int j=0; j < half_NB_X; j += 8) { if ( ty2+j < partial ) { sA32(tx2, ty2 + j) = A[j*lda]; } } if ( tx2 >= partial ) { A = A + tx2 - (partial - 1); } } else { #pragma unroll for(int j=0; j < half_NB_X; j += 8) { sA32(tx2, ty2 + j) = A[j*lda]; } } __syncthreads(); // symmetrize 32x32 diag block, copying lower to upper triangle, // as four 32x8 sections in parallel: // columns 0,4,8,12,16,20,24,28; then 1,5,...,29; then 2,6,...,30, then 3,7,...,31 #pragma unroll for(int j=ty2*4; j < ty2*4 + 4; j++) { if ( j < tx2 ) sA32(j, tx2) = ( sA32(tx2, j) ); } __syncthreads(); // multiply 32x32 diag block * x // each thread does partial row sA(tx2, ty2*4 : ty2*4 + 3) psum = MAGMA_D_ZERO; #pragma unroll for(int j=0; j < 4; j++) { psum += sA32(tx2, ty2*4 + j) * sx[ty2*4 + j]; } __syncthreads(); // store partial row sums sA32(ty2, tx2) = psum; __syncthreads(); // sum up partial row sums, so thread (tx2,0) has total for row (blk_ind + tx2) if ( ty2 == 0 ) { total = sA32(0, tx2) + sA32(1, tx2) + sA32(2, tx2) + sA32(3, tx2) + sA32(4, tx2) + sA32(5, tx2) + sA32(6, tx2) + sA32(7, tx2); } __syncthreads(); // -------------------- // move to next 32x32 diag block, then repeat steps from first diag block A += half_NB_X + half_NB_X*lda; // A is A(blk_ind + NB/2 + tx2, blk_ind + NB/2 + ty2) // load 32x32 diag block A[block + 0:31, block + 0:31] into sA if ( partial ) { if ( tx2 + half_NB_X >= partial ) { A = A - (tx2 + half_NB_X) + (partial - 1); } #pragma unroll for(int j=0; j < half_NB_X; j += 8) { if ( ty2+j + half_NB_X < partial ) { sA32(tx2, ty2 + j) = A[j*lda]; } } if ( tx2 + half_NB_X >= partial ) { A = A + (tx2 + half_NB_X) - (partial - 1); } } else { #pragma unroll for(int j=0; j < half_NB_X; j += 8) { sA32(tx2, ty2 + j) = A[j*lda]; } } __syncthreads(); // symmetrize 32x32 diag block, copying lower to upper triangle #pragma unroll for(int j=ty2*4; j < ty2*4 + 4; j++) { if ( j < tx2 ) sA32(j, tx2) = ( sA32(tx2, j) ); } __syncthreads(); // multiply 32x32 diag block * x psum = MAGMA_D_ZERO; #pragma unroll for(int j=0; j < 4; j++) { psum += sA32(tx2, ty2*4 + j) * sx[half_NB_X + ty2*4 + j]; } __syncthreads(); // store partial row sums sA32(ty2, tx2) = psum; __syncthreads(); // sum up partial row sums, so thread (tx2,1) has total for row (blk_ind + NB/2 + tx2) if ( ty2 == 1 ) { total = sA32(0, tx2) + sA32(1, tx2) + sA32(2, tx2) + sA32(3, tx2) + sA32(4, tx2) + sA32(5, tx2) + sA32(6, tx2) + sA32(7, tx2); } __syncthreads(); // -------------------- // move to off-diag 32x32 block A -= half_NB_X*lda; // A is A(blk_ind + NB/2 + tx2, blk_ind + ty2) // load 32x32 block of A into sA, // as four 32x8 sections one after another: // columns 0:7, then 8:15, then 16:23, then 24:31 if ( partial ) { if ( tx2 + half_NB_X >= partial ) { A = A - (tx2 + half_NB_X) + (partial - 1); } #pragma unroll for(int j=0; j < half_NB_X; j += 8) { if ( ty2+j < partial ) { sA32(tx2, ty2 + j) = A[j*lda]; } } if ( tx2 + half_NB_X >= partial ) { A = A + (tx2 + half_NB_X) - (partial - 1); } } else { #pragma unroll for(int j=0; j < half_NB_X; j += 8) { sA32(tx2, ty2 + j) = A[j*lda]; } } __syncthreads(); // multiply 32x32 block (below diag) psum = MAGMA_D_ZERO; #pragma unroll for(int j=0; j < 4; j++) { psum += sA32(tx2, ty2 + j*8) * sx[j*8 + ty2]; } //__syncthreads(); // no sync needed here // multiply transposed 32x32 block (above diag) psum2 = MAGMA_D_ZERO; #pragma unroll for(int j=0; j < 4; j++) { psum2 += ( sA32(ty2*4 + j, tx2) ) * sx[half_NB_X + ty2*4 + j]; } __syncthreads(); // store partial sums for non-transposed 32x32 block sA32(ty2, tx2) = psum; __syncthreads(); // sum up partial row sums, so thread (tx2,1) has total for row (blk_ind + NB/2 + tx2) if ( ty2 == 1 ) { total = total + sA32(0, tx2) + sA32(1, tx2) + sA32(2, tx2) + sA32(3, tx2) + sA32(4, tx2) + sA32(5, tx2) + sA32(6, tx2) + sA32(7, tx2); } __syncthreads(); // store partial sums for transposed 32x32 block sA32(ty2, tx2) = psum2; __syncthreads(); // sum up partial row sums, so thread (tx2,0) has total for row (blk_ind + tx2) if ( ty2 == 0 ) { total = total + sA32(0, tx2) + sA32(1, tx2) + sA32(2, tx2) + sA32(3, tx2) + sA32(4, tx2) + sA32(5, tx2) + sA32(6, tx2) + sA32(7, tx2); } __syncthreads(); // -------------------- // move to left most 64x64 block in block row, and // switch thread offset from (tx2,ty2) 32x8 block to (tx,ty) 64x4 block A -= half_NB_X; // A is A(blk_ind + tx2, blk_ind + ty2) A -= ty2*lda + tx2; // A is A(blk_ind, blk_ind) A -= blk_ind*lda; // A is A(blk_ind, 0) A += 4*ty*lda + tx; // A is A(blk_ind + tx, 4*ty) if ( partial && tx >= partial ) { A = A - tx + (partial - 1); } x -= blk_ind * incx; // x is x(tx) // 16x16 thread block const int tx4 = td % quarter_NB_X; const int ty4 = td / quarter_NB_X; work += blk*lda + tx4; // work is work(tx4, blk) for(int blk2=0; blk2 < blk; ++blk2) { // load 64x1 block x(blk2_ind + 0:63) into sx2 // since this block is left of diagonal, x cannot be partial rows if ( ty == 0 ) { sx2[tx] = x[blk2*NB_X*incx]; } __syncthreads(); for( int k=0; k < 4; k++ ) { // load 64x16 block of A into rA, 4 elements per thread, // as four 64x4 sections in parallel: // columns 0,4,8,12; then 1,5,9,13; then 2,6,10,14; then 3,7,11,15 // since this block is left of diagonal, it cannot be partial columns #pragma unroll for(int j=0; j < 4; j++) { rA[j] = A[j*lda]; } // 1) multiply 64x16 block A * x2 // each thread does partial row rA(tx + 16*k, ty*4 + 16*k : ty*4 + 3 + 16*k) // 2) multiply transposed 16x64 block A**H * x, // storing each product Aji*xi to sA(j,i) #pragma unroll for(int j=0; j < 4; j++) { total += rA[j] * sx2[quarter_NB_X*k + ty*4 + j]; sA16(ty*4 + j, tx) = ( rA[j] ) * sx[tx]; } __syncthreads(); // do partial row sums for transposed 16x64 result // use 16x16 thread grid (tx4, ty4) instead of 64x4 (tx, ty) // sum sixteen 16x4 sections in parallel: // columns 0,4,8,...,60; then 1,5,...,61; then 2,6,...,62; then 3,7,...,63 psum2 = MAGMA_D_ZERO; #pragma unroll for(int j=0; j < 4; j++) { psum2 += sA16(tx4, ty4*4 + j); } __syncthreads(); // store partial row sums (locally) psums[k] = psum2; // move to next 64x16 block A += lda * quarter_NB_X; // A is A(blk_ind + tx#, blk2*NB_x + k*NB_X/4 + 4*ty), # or partial } // store partial row sums #pragma unroll for(int k=0; k < 4; k++) { sA16(tx4, ty4 + quarter_NB_X*k) = psums[k]; } __syncthreads(); // sum up partial row sums and store final total to workspace // thread (tx4,ty4) where ty4 < 4 sums row tx4 + ty4*16 // since this is the transposed block above the diagonal, it cannot be partial rows if ( ty4 < 4 ) { int k = ty4*quarter_NB_X; psum2 = sA16(tx4, 0 + k) + sA16(tx4, 1 + k) + sA16(tx4, 2 + k) + sA16(tx4, 3 + k) + sA16(tx4, 4 + k) + sA16(tx4, 5 + k) + sA16(tx4, 6 + k) + sA16(tx4, 7 + k) + sA16(tx4, 8 + k) + sA16(tx4, 9 + k) + sA16(tx4, 10 + k) + sA16(tx4, 11 + k) + sA16(tx4, 12 + k) + sA16(tx4, 13 + k) + sA16(tx4, 14 + k) + sA16(tx4, 15 + k); work[blk2*NB_X + k] = psum2; // store at work( blk2*NB_X + tx4 + ty4*16, blk ) } __syncthreads(); } work -= tx4; // work is work(blk_ind) work += tx; // work is work(blk_ind + tx) // store row sums sA16(ty, tx) = total; __syncthreads(); // sum up final total for row tx if ( ty == 0 && (partial == 0 || tx < partial) ) { total = sA16(0, tx) + sA16(1, tx) + sA16(2, tx) + sA16(3, tx); work[blk*NB_X] = total; // store at work( blk*NB_X + tx, blk ) } #endif /* PRECISION_[sdc] || (__CUDA_ARCH__ >= 200) */ } /************************************************************** Lower case, sum up final results On input: [ A11*x1 A12*x2 A13*x3 ] work = [ --- (A21*x1 + A22*x2) A23*x3 ] [ --- --- (A31*x1 + A32*x2 + A33*x3) ] On output: [ A11*x1 + A12*x2 + A13*x3 ] y = alpha*[ A11*x1 + A22*x2 + A23*x3 ] + beta*y [ A21*x1 + A22*x2 + A33*x3 ] Previously: [ A11*x1 --- ] work = [ A12*x2 (A21*x1 + A22*x2) --- ] [ A13*x3 A23*x3 (A31*x1 + A32*x2 + A33*x3) ] which doesn't work as well because A13*x3 has 64 rows, while A31*x1 has only n % NB rows. This is why it used to need lwork = lda*(blocks + 1) instead of lda*blocks. ********************************************************************/ __global__ void dsymv_kernel_L_sum( int n, double alpha, int lda, double beta, double * __restrict__ y, int incy, double * __restrict__ work ) { int tx = threadIdx.x; int blk = blockIdx.x; int blk_ind = blk * NB_X; int ind = blk_ind + tx; if ( ind < n ) { work += ind + blk*lda; double Ax = MAGMA_D_ZERO; for(int i = blk_ind; i < n; i += NB_X) { Ax += work[0]; work += lda; } y[ind * incy] = beta*y[ind * incy] + alpha*Ax; } } /************************************************************** * Lower case, launch kernels */ extern "C" void magmablas_dsymv_L( magma_int_t n, double alpha, const double *dA, magma_int_t ldda, const double *dx, magma_int_t incx, double beta, double *dy, magma_int_t incy, double *dwork) { magma_int_t blocks = (n - 1)/NB_X + 1; dim3 grid( blocks, 1, 1 ); dim3 threads( NB_X, NB_Y, 1 ); dsymv_kernel_L<<< grid, threads, 0, magma_stream >>> (n, dA, ldda, dx, incx, dwork); dim3 threads_sum( NB_X, 1, 1 ); dsymv_kernel_L_sum<<< grid, threads_sum, 0, magma_stream >>> (n, alpha, ldda, beta, dy, incy, dwork); } /** Purpose ------- magmablas_dsymv_work performs the matrix-vector operation: y := alpha*A*x + beta*y, where alpha and beta are scalars, x and y are n element vectors and A is an n by n symmetric matrix. Arguments ---------- @param[in] uplo magma_uplo_t. On entry, UPLO specifies whether the upper or lower triangular part of the array A is to be referenced as follows: - = MagmaUpper: Only the upper triangular part of A is to be referenced. - = MagmaLower: Only the lower triangular part of A is to be referenced. @param[in] n INTEGER. On entry, N specifies the order of the matrix A. N must be at least zero. @param[in] alpha DOUBLE PRECISION. On entry, ALPHA specifies the scalar alpha. @param[in] dA DOUBLE PRECISION array of DIMENSION ( LDDA, n ). Before entry with UPLO = MagmaUpper, the leading n by n upper triangular part of the array A must contain the upper triangular part of the symmetric matrix and the strictly lower triangular part of A is not referenced. Before entry with UPLO = MagmaLower, the leading n by n lower triangular part of the array A must contain the lower triangular part of the symmetric matrix and the strictly upper triangular part of A is not referenced. Note that the imaginary parts of the diagonal elements need not be set and are assumed to be zero. @param[in] ldda INTEGER. On entry, LDDA specifies the first dimension of A as declared in the calling (sub) program. LDDA must be at least max( 1, n ). It is recommended that ldda is multiple of 16. Otherwise performance would be deteriorated as the memory accesses would not be fully coalescent. @param[in] dx DOUBLE PRECISION array of dimension at least ( 1 + ( n - 1 )*abs( INCX ) ). Before entry, the incremented array X must contain the n element vector x. @param[in] incx INTEGER. On entry, INCX specifies the increment for the elements of X. INCX must not be zero. @param[in] beta DOUBLE PRECISION. On entry, BETA specifies the scalar beta. When BETA is supplied as zero then Y need not be set on input. @param[in,out] dy DOUBLE PRECISION array of dimension at least ( 1 + ( n - 1 )*abs( INCY ) ). Before entry, the incremented array Y must contain the n element vector y. On exit, Y is overwritten by the updated vector y. @param[in] incy INTEGER. On entry, INCY specifies the increment for the elements of Y. INCY must not be zero. @param[in] dwork (workspace) DOUBLE PRECISION array on the GPU, dimension (MAX(1, LWORK)), @param[in] lwork INTEGER. The dimension of the array DWORK. LWORK >= LDDA * ceil( N / NB_X ), where NB_X = 64. MAGMA implements dsymv through two steps: 1) perform the multiplication in each thread block and put the intermediate value in dwork. 2) sum the intermediate values and store the final result in y. magamblas_dsymv_work requires users to provide a workspace, while magmablas_dsymv is a wrapper routine allocating the workspace inside the routine and provides the same interface as cublas. If users need to call dsymv frequently, we suggest using magmablas_dsymv_work instead of magmablas_dsymv. As the overhead to allocate and free in device memory in magmablas_dsymv would hurt performance. Our tests show that this penalty is about 10 Gflop/s when the matrix size is around 10000. @ingroup magma_dblas2 ********************************************************************/ extern "C" magma_int_t magmablas_dsymv_work( magma_uplo_t uplo, magma_int_t n, double alpha, magmaDouble_const_ptr dA, magma_int_t ldda, magmaDouble_const_ptr dx, magma_int_t incx, double beta, magmaDouble_ptr dy, magma_int_t incy, magmaDouble_ptr dwork, magma_int_t lwork) { #if defined(PRECISION_z) // z precision requires CUDA ARCH 2.x; call CUBLAS version instead. magma_int_t arch = magma_getdevice_arch(); if ( arch < 200 ) { magma_dsymv( uplo, n, alpha, dA, ldda, dx, incx, beta, dy, incy ); return MAGMA_SUCCESS; } #endif // -------------------- // [sdc] precisions, or z precision with CUDA ARCH 2.x int upper = (uplo == MagmaUpper); magma_int_t blocks = (n - 1)/NB_X + 1; magma_int_t lwmin = ldda*blocks; /* * Test the input parameters. */ magma_int_t info = 0; if ((! upper) && (uplo != MagmaLower)) { info = -1; } else if ( n < 0 ) { info = -2; } else if ( ldda < max(1, n) ) { info = -5; } else if ( incx == 0 ) { info = -7; } else if ( incy == 0 ) { info = -10; } else if ( lwork < lwmin ) { info = -12; } if (info != 0) { magma_xerbla( __func__, -(info) ); return info; } /* * Quick return if possible. */ if ( (n == 0) || ( MAGMA_D_EQUAL(alpha, MAGMA_D_ZERO) && MAGMA_D_EQUAL(beta, MAGMA_D_ONE) ) ) return info; /* TODO: Upper case is not implemented in MAGMA */ if ( upper ) { magma_dsymv( uplo, n, alpha, dA, ldda, dx, incx, beta, dy, incy); } else { magmablas_dsymv_L(n, alpha, dA, ldda, dx, incx, beta, dy, incy, dwork); } return info; } /** Purpose ------- magmablas_dsymv performs the matrix-vector operation: y := alpha*A*x + beta*y, where alpha and beta are scalars, x and y are n element vectors and A is an n by n symmetric matrix. Arguments ---------- @param[in] uplo magma_uplo_t. On entry, UPLO specifies whether the upper or lower triangular part of the array A is to be referenced as follows: - = MagmaUpper: Only the upper triangular part of A is to be referenced. - = MagmaLower: Only the lower triangular part of A is to be referenced. @param[in] n INTEGER. On entry, N specifies the order of the matrix A. N must be at least zero. @param[in] alpha DOUBLE PRECISION. On entry, ALPHA specifies the scalar alpha. @param[in] dA DOUBLE PRECISION array of DIMENSION ( LDDA, n ). Before entry with UPLO = MagmaUpper, the leading n by n upper triangular part of the array A must contain the upper triangular part of the symmetric matrix and the strictly lower triangular part of A is not referenced. Before entry with UPLO = MagmaLower, the leading n by n lower triangular part of the array A must contain the lower triangular part of the symmetric matrix and the strictly upper triangular part of A is not referenced. Note that the imaginary parts of the diagonal elements need not be set and are assumed to be zero. @param[in] ldda INTEGER. On entry, LDDA specifies the first dimension of A as declared in the calling (sub) program. LDDA must be at least max( 1, n ). It is recommended that ldda is multiple of 16. Otherwise performance would be deteriorated as the memory accesses would not be fully coalescent. @param[in] dx DOUBLE PRECISION array of dimension at least ( 1 + ( n - 1 )*abs( INCX ) ). Before entry, the incremented array X must contain the n element vector x. @param[in] incx INTEGER. On entry, INCX specifies the increment for the elements of X. INCX must not be zero. @param[in] beta DOUBLE PRECISION. On entry, BETA specifies the scalar beta. When BETA is supplied as zero then Y need not be set on input. @param[in,out] dy DOUBLE PRECISION array of dimension at least ( 1 + ( n - 1 )*abs( INCY ) ). Before entry, the incremented array Y must contain the n element vector y. On exit, Y is overwritten by the updated vector y. @param[in] incy INTEGER. On entry, INCY specifies the increment for the elements of Y. INCY must not be zero. @ingroup magma_dblas2 ********************************************************************/ extern "C" magma_int_t magmablas_dsymv( magma_uplo_t uplo, magma_int_t n, double alpha, magmaDouble_const_ptr dA, magma_int_t ldda, magmaDouble_const_ptr dx, magma_int_t incx, double beta, magmaDouble_ptr dy, magma_int_t incy) { #if defined(PRECISION_z) // z precision requires CUDA ARCH 2.x; call CUBLAS version instead. magma_int_t arch = magma_getdevice_arch(); if ( arch < 200 ) { magma_dsymv( uplo, n, alpha, dA, ldda, dx, incx, beta, dy, incy ); return MAGMA_SUCCESS; } #endif // -------------------- // [sdc] precisions, or z precision with CUDA ARCH 2.x int upper = (uplo == MagmaUpper); /* * Test the input parameters. */ magma_int_t info = 0; if ((! upper) && (uplo != MagmaLower)) { info = -1; } else if ( n < 0 ) { info = -2; } else if ( ldda < max(1, n) ) { info = -5; } else if ( incx == 0 ) { info = -7; } else if ( incy == 0 ) { info = -10; } if (info != 0) { magma_xerbla( __func__, -(info) ); return info; } /* * Quick return if possible. */ if ( (n == 0) || ( MAGMA_D_EQUAL(alpha, MAGMA_D_ZERO) && MAGMA_D_EQUAL(beta, MAGMA_D_ONE) ) ) return info; /* TODO: Upper case is not implemented in MAGMA */ if ( upper ) { magma_dsymv( uplo, n, alpha, dA, ldda, dx, incx, beta, dy, incy); } else { double *dwork; magma_int_t blocks = (n - 1)/NB_X + 1; magma_int_t lwork = ldda*blocks; magma_dmalloc( &dwork, lwork ); if ( dwork == NULL ) { info = MAGMA_ERR_DEVICE_ALLOC; magma_xerbla( __func__, -(info) ); } else { magmablas_dsymv_L(n, alpha, dA, ldda, dx, incx, beta, dy, incy, dwork); } magma_free( dwork ); } return info; }
ac23ad44d8a59f526384de9db47afde416baaee8.hip
// !!! This is a file automatically generated by hipify!!! #include <cstdio> #include <hip/hip_runtime.h> #include <cmath> #include <thrust/execution_policy.h> #include <thrust/random.h> #include <thrust/remove.h> #include "stream_compaction/efficient.h" #include <thrust/partition.h> #include "sceneStructs.h" #include "scene.h" #include "glm/glm.hpp" #include "glm/gtx/norm.hpp" #include "utilities.h" #include "pathtrace.h" #include "intersections.h" #include "interactions.h" #define ERRORCHECK 1 #define CACHEFIRSTBOUNCE 1 #define ANTIALIAS 0 #define DOF 0 #define COMPACT 0 #define SORTBYMATERIAL 0 #define RUSSIANROULETTE 0 // TURN ONLY ONE ON. ALL OFF MEANS NAIVE #define DIRECTLIGHTING 0 #define DIRECTLIGHTING_LASTBOUNCE 0 #define FULLLIGHTING 0 //#define FILENAME (strrchr(__FILE__, '/') ? strrchr(__FILE__, '/') + 1 : __FILE__) //#define checkCUDAError(msg) checkCUDAErrorFn(msg, FILENAME, __LINE__) //void checkCUDAErrorFn(const char *msg, const char *file, int line) { //#if ERRORCHECK // hipDeviceSynchronize(); // hipError_t err = hipGetLastError(); // if (hipSuccess == err) { // return; // } // // fprintf(stderr, "CUDA error"); // if (file) { // fprintf(stderr, " (%s:%d)", file, line); // } // fprintf(stderr, ": %s: %s\n", msg, hipGetErrorString(err)); //# ifdef _WIN32 // getchar(); //# endif // exit(EXIT_FAILURE); //#endif //} __host__ __device__ thrust::default_random_engine makeSeededRandomEngine(int iter, int index, int depth) { int h = utilhash((1 << 31) | (depth << 22) | iter) ^ utilhash(index); return thrust::default_random_engine(h); } //Kernel that writes the image to the OpenGL PBO directly. __global__ void sendImageToPBO(uchar4* pbo, glm::ivec2 resolution, int iter, glm::vec3* image) { int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; if (x < resolution.x && y < resolution.y) { int index = x + (y * resolution.x); glm::vec3 pix = image[index]; glm::ivec3 color; color.x = glm::clamp((int) (pix.x / iter * 255.0), 0, 255); color.y = glm::clamp((int) (pix.y / iter * 255.0), 0, 255); color.z = glm::clamp((int) (pix.z / iter * 255.0), 0, 255); /* color.x = glm::clamp((int)(pix.x * 255), 0, 255); color.y = glm::clamp((int)(pix.y * 255), 0, 255); color.z = glm::clamp((int)(pix.z * 255), 0, 255);*/ // Each thread writes one pixel location in the texture (textel) pbo[index].w = 0; pbo[index].x = color.x; pbo[index].y = color.y; pbo[index].z = color.z; } } static Scene * hst_scene = NULL; static glm::vec3 * dev_image = NULL; static Geom * dev_geoms = NULL; static Material * dev_materials = NULL; static PathSegment * dev_paths = NULL; static ShadeableIntersection * dev_intersections = NULL; // TODO: static variables for device memory, any extra info you need, etc static ShadeableIntersection * dev_intersections_cache = NULL; static int * dev_compact_idx = NULL; static int * dev_sort_paths = NULL; static int * dev_sort_intersections = NULL; static Geom * dev_lights = NULL; // ... void pathtraceInit(Scene *scene) { hst_scene = scene; const Camera &cam = hst_scene->state.camera; const int pixelcount = cam.resolution.x * cam.resolution.y; hipMalloc(&dev_image, pixelcount * sizeof(glm::vec3)); hipMemset(dev_image, 0, pixelcount * sizeof(glm::vec3)); hipMalloc(&dev_paths, pixelcount * sizeof(PathSegment)); hipMalloc(&dev_geoms, scene->geoms.size() * sizeof(Geom)); hipMemcpy(dev_geoms, scene->geoms.data(), scene->geoms.size() * sizeof(Geom), hipMemcpyHostToDevice); hipMalloc(&dev_materials, scene->materials.size() * sizeof(Material)); hipMemcpy(dev_materials, scene->materials.data(), scene->materials.size() * sizeof(Material), hipMemcpyHostToDevice); hipMalloc(&dev_intersections, pixelcount * sizeof(ShadeableIntersection)); hipMemset(dev_intersections, 0, pixelcount * sizeof(ShadeableIntersection)); // TODO: initialize any extra device memeory you need hipMalloc(&dev_intersections_cache, pixelcount * sizeof(ShadeableIntersection)); hipMemset(dev_intersections_cache, 0, pixelcount * sizeof(ShadeableIntersection)); hipMalloc(&dev_compact_idx, pixelcount * sizeof(int)); hipMemset(dev_compact_idx, 0, pixelcount * sizeof(int)); hipMalloc(&dev_sort_paths, pixelcount * sizeof(int)); hipMemset(dev_sort_paths, 0, pixelcount * sizeof(int)); hipMalloc(&dev_sort_intersections, pixelcount * sizeof(int)); hipMemset(dev_sort_intersections, 0, pixelcount * sizeof(int)); hipMalloc(&dev_lights, scene->lights.size() * sizeof(Geom)); hipMemcpy(dev_lights, scene->lights.data(), scene->lights.size() * sizeof(Geom), hipMemcpyHostToDevice); checkCUDAError("pathtraceInit"); } void pathtraceFree() { hipFree(dev_image); // no-op if dev_image is null hipFree(dev_paths); hipFree(dev_geoms); hipFree(dev_materials); hipFree(dev_intersections); // TODO: clean up any extra device memory you created hipFree(dev_intersections_cache); hipFree(dev_compact_idx); hipFree(dev_sort_paths); hipFree(dev_sort_intersections); //hipFree(dev_sort_material); hipFree(dev_lights); checkCUDAError("pathtraceFree"); } /** * Generate PathSegments with rays from the camera through the screen into the * scene, which is the first bounce of rays. * * Antialiasing - add rays for sub-pixel sampling * motion blur - jitter rays "in time" * lens effect - jitter ray origin positions based on a lens */ __global__ void generateRayFromCamera(Camera cam, int iter, int traceDepth, PathSegment* pathSegments, int* compact_idx) { int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; if (x < cam.resolution.x && y < cam.resolution.y) { int index = x + (y * cam.resolution.x); compact_idx[index] = index + 1; // used in stream compaction.. PathSegment & segment = pathSegments[index]; segment.ray.origin = cam.position; segment.color = glm::vec3(1.0f, 1.0f, 1.0f); // TODO: implement antialiasing by jittering the ray // DOUBLE CHECK THIS LOGIC.. thrust::default_random_engine rng = makeSeededRandomEngine(iter, index, 0); float jit_x = x, jit_y = y; if (ANTIALIAS) { thrust::uniform_real_distribution<float> u01(-0.5, 0.5); jit_x += u01(rng); jit_y += u01(rng); } segment.ray.direction = glm::normalize(cam.view - cam.right * cam.pixelLength.x * (jit_x - (float)cam.resolution.x * 0.5f) - cam.up * cam.pixelLength.y * (jit_y - (float)cam.resolution.y * 0.5f) ); if (DOF && cam.lensRadius > 0.f) { applyDof(segment.ray, rng, cam); } segment.pixelIndex = index; segment.remainingBounces = traceDepth; segment.throughput = glm::vec3(1.f, 1.f, 1.f); } } // TODO: // computeIntersections handles generating ray intersections ONLY. // Generating new rays is handled in your shader(s). // Feel free to modify the code below. __global__ void computeIntersections( int depth , int num_paths , PathSegment * pathSegments, int * compact_idx , Geom * geoms , int geoms_size , ShadeableIntersection * intersections // , int * material_type ) { int index = blockIdx.x * blockDim.x + threadIdx.x; //int path_index = COMPACT ? compact_idx[index] - 1 : index; int path_index = index; if (path_index > num_paths || path_index == -1) return; PathSegment pathSegment = pathSegments[path_index]; //if (pathSegment.remainingBounces == 0) return; float t; glm::vec3 intersect_point; glm::vec3 normal; float t_min = FLT_MAX; int hit_geom_index = -1; bool outside = true; glm::vec3 tmp_intersect; glm::vec3 tmp_normal; // naive parse through global geoms for (int i = 0; i < geoms_size; i++) { Geom & geom = geoms[i]; if (geom.type == CUBE) { t = boxIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside); } else if (geom.type == SPHERE) { t = sphereIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside); } // TODO: add more intersection tests here... triangle? metaball? CSG? // Compute the minimum t from the intersection tests to determine what // scene geometry object was hit first. if (t > 0.0f && t_min > t) { t_min = t; hit_geom_index = i; intersect_point = tmp_intersect; normal = tmp_normal; } } if (hit_geom_index == -1) { intersections[path_index].t = -1.0f; // material_type[path_index] = -1; } else { //The ray hits something intersections[path_index].t = t_min; intersections[path_index].materialId = geoms[hit_geom_index].materialid; intersections[path_index].surfaceNormal = normal; // material_type[path_index] = intersections[path_index].materialId; intersections[path_index].outside = outside; } } // LOOK: "fake" shader demonstrating what you might do with the info in // a ShadeableIntersection, as well as how to use thrust's random number // generator. Observe that since the thrust random number generator basically // adds "noise" to the iteration, the image should start off noisy and get // cleaner as more iterations are computed. // // Note that this shader does NOT do a BSDF evaluation! // Your shaders should handle that - this can allow techniques such as // bump mapping. __global__ void shadeFakeMaterial ( int iter , int num_paths , ShadeableIntersection * shadeableIntersections , PathSegment * pathSegments , Material * materials , int depth ) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < num_paths) { ShadeableIntersection intersection = shadeableIntersections[idx]; if (intersection.t > 0.0f) { // if the intersection exists... // Set up the RNG // LOOK: this is how you use thrust's RNG! Please look at // makeSeededRandomEngine as well. thrust::default_random_engine rng = makeSeededRandomEngine(iter, idx, depth); thrust::uniform_real_distribution<float> u01(0, 1); Material material = materials[intersection.materialId]; glm::vec3 materialColor = material.color; // If the material indicates that the object was a light, "light" the ray if (material.emittance > 0.0f) { pathSegments[idx].color *= (materialColor * material.emittance); pathSegments[idx].remainingBounces = 0; } // Otherwise, do some pseudo-lighting computation. This is actually more // like what you would expect from shading in a rasterizer like OpenGL. // TODO: replace this! you should be able to start with basically a one-liner else { float lightTerm = glm::dot(intersection.surfaceNormal, glm::vec3(0.0f, 1.0f, 0.0f)); pathSegments[idx].color *= (materialColor * lightTerm) * 0.3f + ((1.0f - intersection.t * 0.02f) * materialColor) * 0.7f; pathSegments[idx].color *= u01(rng); // apply some noise because why not } // If there was no intersection, color the ray black. // Lots of renderers use 4 channel color, RGBA, where A = alpha, often // used for opacity, in which case they can indicate "no opacity". // This can be useful for post-processing and image compositing. } else { pathSegments[idx].color = glm::vec3(0.0f); pathSegments[idx].remainingBounces = 0; } } } // BSDF based material - NAIVE lighting __global__ void shadeBSDFMaterialNaive( int iter , int num_paths , ShadeableIntersection * shadeableIntersections , PathSegment * pathSegments, int * compact_idx , Material * materials , int depth ) { int index = blockIdx.x * blockDim.x + threadIdx.x; // USE THE ACTIVE INDICES IF COMPACTION IS USED //int idx = COMPACT ? compact_idx[index] - 1 : index; int idx = index; if (idx > num_paths || idx == -1) { compact_idx[index] = 0; return; } PathSegment& pathSegment = pathSegments[idx]; #if !COMPACT if (pathSegment.remainingBounces <= 0) { return; } #endif if (idx < num_paths) { ShadeableIntersection intersection = shadeableIntersections[idx]; if (intersection.t > 0.0f) { thrust::default_random_engine rng = makeSeededRandomEngine(iter, idx, depth); Material& material = materials[intersection.materialId]; // If the material indicates that the object was a light, "light" the ray if (material.emittance > 0.0f) { pathSegment.color *= (material.color * material.emittance); //pathSegment.color += (material.color * material.emittance)/8.f; pathSegment.remainingBounces = 0; } else { glm::vec3 intpt = pathSegment.ray.origin + intersection.t * pathSegment.ray.direction; scatterRay(pathSegment, intpt, intersection.surfaceNormal, intersection.outside, material, rng); pathSegment.remainingBounces--; #if RUSSIANROULETTE // russian roulette path termination if (depth > 4) { float q = max(pathSegment.color.x, max(pathSegment.color.y, pathSegment.color.z)); thrust::uniform_real_distribution<float> u01(0, 1); if ((u01(rng)) > q) { pathSegment.remainingBounces = 0; } else { pathSegment.color /= q; } } #endif } } else { pathSegment.color = glm::vec3(0.0f); pathSegment.remainingBounces = 0; } } if (pathSegment.remainingBounces == 0 && COMPACT) { compact_idx[index] = 0; } } // BSDF based material - DIRECT lighting __global__ void shadeBSDFMaterialDirect( int iter, int num_paths, ShadeableIntersection * shadeableIntersections, PathSegment * pathSegments, int * compact_idx, Material * materials, Geom * geoms, Geom * lights, int numGeoms, int numLights) { int index = blockIdx.x * blockDim.x + threadIdx.x; // USE THE ACTIVE INDICES IF COMPACTION IS USED //int idx = COMPACT ? compact_idx[index] - 1 : index; int idx = index; if (idx > num_paths || idx == -1) return; PathSegment& pathSegment = pathSegments[idx]; #if !COMPACT if (pathSegment.remainingBounces <= 0) { return; } #endif if (idx < num_paths) { ShadeableIntersection intersection = shadeableIntersections[idx]; if (intersection.t > 0.0f) { thrust::default_random_engine rng = makeSeededRandomEngine(iter, idx, 0); Material& material = materials[intersection.materialId]; // If the material indicates that the object was a light, "light" the ray if (material.emittance > 0.0f) { pathSegment.color *= (material.color * material.emittance); pathSegment.remainingBounces = 0; } else { // DIRECT LIGHTING SHADING COMPUTATION glm::vec3 intpt = pathSegment.ray.origin + intersection.t * pathSegment.ray.direction; computeDirectLight(pathSegment, intpt, intersection.surfaceNormal, material, materials, rng, geoms, lights, numGeoms, numLights); pathSegment.remainingBounces = 0; // NO BOUNCES } } else { pathSegment.color = glm::vec3(0.0f); pathSegment.remainingBounces = 0; } } if (pathSegment.remainingBounces == 0 && COMPACT) { compact_idx[index] = 0; } } __global__ void shadeBSDFMaterialFull( int iter, int depth, int num_paths, ShadeableIntersection * shadeableIntersections, PathSegment * pathSegments, int * compact_idx, Material * materials, Geom * geoms, Geom * lights, int numGeoms, int numLights) { int index = blockIdx.x * blockDim.x + threadIdx.x; // USE THE ACTIVE INDICES IF COMPACTION IS USED //int idx = COMPACT ? compact_idx[index] - 1 : index; int idx = index; if (idx > num_paths || idx == -1) { compact_idx[index] = 0; return; } PathSegment& pathSegment = pathSegments[idx]; PathSegment psCopy = pathSegments[idx]; #if !COMPACT if (pathSegment.remainingBounces <= 0) { return; } #endif /*if (idx == 320000) { printf("hi"); }*/ if (idx < num_paths) { //if (depth == 0) { // throughput[idx] = glm::vec3(1.f); // accumulated[idx] = glm::vec3(0.f); //} ShadeableIntersection intersection = shadeableIntersections[idx]; if (intersection.t > 0.0f) { thrust::default_random_engine rng = makeSeededRandomEngine(iter, idx, depth); Material& material = materials[intersection.materialId]; #if FULLLIGHTING if (depth == 0) { pathSegment.color = glm::vec3(0.f); } #endif if (depth == 0 || pathSegment.specular) { glm::vec3 Le(0.f); if (material.emittance > 0.f && glm::dot(intersection.surfaceNormal, -pathSegment.ray.direction)) { Le = material.color * material.emittance; } pathSegment.color += Le * pathSegment.throughput; } if (material.emittance > 0.0f) { //pathSegment.color += (material.color * material.emittance) * throughput[idx]; pathSegment.remainingBounces = 0; } else { // update specular bounce.. pathSegment.specular = material.hasReflective || material.hasRefractive ? true : false; glm::vec3 intpt = pathSegment.ray.origin + intersection.t * pathSegment.ray.direction; #if DIRECTLIGHTING_LASTBOUNCE // DL at last bounce only float fpdf = scatterRay(pathSegment, intpt, intersection.surfaceNormal, intersection.outside, material, rng); pathSegment.remainingBounces--; if (pathSegment.remainingBounces == 0) { float gpdf = computeDirectLight(psCopy, intpt, intersection.surfaceNormal, material, materials, rng, geoms, lights, numGeoms, numLights); pathSegment.color *= psCopy.color; } #else glm::vec3 colL(0.f), colB(0.f); // L = light, B = brdf // colL colL = sampleLightMIS(psCopy, intpt, intersection.surfaceNormal, material, materials, rng, geoms, lights, numGeoms, numLights); // colB colB = sampleBsdfMIS(pathSegment, intpt, intersection.surfaceNormal, intersection.outside, material, rng, lights, numLights); pathSegment.color += pathSegment.throughput * colL; pathSegment.throughput *= colB; //PathSegment ps = pathSegment; //scatterRay(ps, intpt, intersection.surfaceNormal, intersection.outside, material, rng); //pathSegment.ray = ps.ray; //pathSegment.color += pathSegment.throughput * colL; //pathSegment.throughput *= ps.color; pathSegment.remainingBounces--; #endif #if RUSSIANROULETTE // russian roulette path termination if (depth > 4) { float q = max(pathSegment.throughput.x, max(pathSegment.throughput.y, pathSegment.throughput.z)); thrust::uniform_real_distribution<float> u01(0, 1); if ((u01(rng)) > q) { pathSegment.remainingBounces = 0; } else { pathSegment.throughput /= q; } } #endif } } else { pathSegment.color = glm::vec3(0.0f); pathSegment.remainingBounces = 0; } } if (pathSegment.remainingBounces == 0 && COMPACT) { compact_idx[index] = 0; } } // Add the current iteration's output to the overall image __global__ void finalGather(int nPaths, glm::vec3 * image, PathSegment * iterationPaths, int iter) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index < nPaths) { PathSegment iterationPath = iterationPaths[index]; // image[iterationPath.pixelIndex] = (image[iterationPath.pixelIndex] * float(iter) + iterationPath.color) / float(iter + 1); image[iterationPath.pixelIndex] += iterationPath.color; } } //__global__ void finalGather(int nPaths, glm::vec3 * image, PathSegment * iterationPaths) //{ // int index = (blockIdx.x * blockDim.x) + threadIdx.x; // // if (index < nPaths) // { // PathSegment iterationPath = iterationPaths[index]; // image[iterationPath.pixelIndex] += iterationPath.color; // } //} __global__ void setMaterial(int nPaths, int * pathSegmentIndices, int * intersectionIndices, ShadeableIntersection *intersections) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index < nPaths) { pathSegmentIndices[index] = intersectionIndices[index] = intersections[index].materialId; } } // THRUST'S STREAM COMPACTION // https://thrust.github.io/doc/group__stream__compaction.html // // struct used for predicate for thrust::remove_if / thrust::partition struct isTerminated { __host__ __device__ bool operator()(const PathSegment &segment) { return (segment.remainingBounces > 0); } }; /** * Wrapper for the __global__ call that sets up the kernel calls and does a ton * of memory management */ void pathtrace(uchar4 *pbo, int frame, int iter) { const int traceDepth = hst_scene->state.traceDepth; const Camera &cam = hst_scene->state.camera; const int pixelcount = cam.resolution.x * cam.resolution.y; // 2D block for generating ray from camera const dim3 blockSize2d(8, 8); const dim3 blocksPerGrid2d( (cam.resolution.x + blockSize2d.x - 1) / blockSize2d.x, (cam.resolution.y + blockSize2d.y - 1) / blockSize2d.y); // 1D block for path tracing const int blockSize1d = 128; /////////////////////////////////////////////////////////////////////////// // Recap: // * Initialize array of path rays (using rays that come out of the camera) // * You can pass the Camera object to that kernel. // * Each path ray must carry at minimum a (ray, color) pair, // * where color starts as the multiplicative identity, white = (1, 1, 1). // * This has already been done for you. // * For each depth: // * Compute an intersection in the scene for each path ray. // A very naive version of this has been implemented for you, but feel // free to add more primitives and/or a better algorithm. // Currently, intersection distance is recorded as a parametric distance, // t, or a "distance along the ray." t = -1.0 indicates no intersection. // * Color is attenuated (multiplied) by reflections off of any object // * TODO: Stream compact away all of the terminated paths. // You may use either your implementation or `thrust::remove_if` or its // cousins. // * Note that you can't really use a 2D kernel launch any more - switch // to 1D. // * TODO: Shade the rays that intersected something or didn't bottom out. // That is, color the ray by performing a color computation according // to the shader, then generate a new ray to continue the ray path. // We recommend just updating the ray's PathSegment in place. // Note that this step may come before or after stream compaction, // since some shaders you write may also cause a path to terminate. // * Finally, add this iteration's results to the image. This has been done // for you. // TODO: perform one iteration of path tracing hipLaunchKernelGGL(( generateRayFromCamera) , dim3(blocksPerGrid2d), dim3(blockSize2d), 0, 0, cam, iter, traceDepth, dev_paths, dev_compact_idx); checkCUDAError("generate camera ray"); int depth = 0; PathSegment* dev_path_end = dev_paths + pixelcount; int num_paths = dev_path_end - dev_paths; int np = dev_path_end - dev_paths; // --- PathSegment Tracing Stage --- // Shoot ray into scene, bounce between objects, push shading chunks bool iterationComplete = false; while (!iterationComplete) { // clean shading chunks hipMemset(dev_intersections, 0, pixelcount * sizeof(ShadeableIntersection)); // tracing dim3 numblocksPathSegmentTracing = (num_paths + blockSize1d - 1) / blockSize1d; if (depth == 0 && iter > 1 && CACHEFIRSTBOUNCE) { hipMemcpy(dev_intersections, dev_intersections_cache, pixelcount * sizeof(ShadeableIntersection), hipMemcpyDeviceToDevice); } else { hipLaunchKernelGGL(( computeIntersections) , dim3(numblocksPathSegmentTracing), dim3(blockSize1d), 0, 0, depth , num_paths , dev_paths, dev_compact_idx , dev_geoms , hst_scene->geoms.size() , dev_intersections // , dev_sort_material ); checkCUDAError("trace one bounce"); if (depth == 0 && iter == 1 && CACHEFIRSTBOUNCE) { // update cache with dev_intersections hipMemcpy(dev_intersections_cache, dev_intersections, pixelcount * sizeof(ShadeableIntersection), hipMemcpyDeviceToDevice); } } hipDeviceSynchronize(); // TODO: // --- Shading Stage --- // Shade path segments based on intersections and generate new rays by // evaluating the BSDF. // Start off with just a big kernel that handles all the different // materials you have in the scenefile. // TODO: compare between directly shading the path segments and shading // path segments that have been reshuffled to be contiguous in memory. /* shadeFakeMaterial<<<numblocksPathSegmentTracing, blockSize1d>>> ( iter, num_paths, dev_intersections, dev_paths, dev_materials );*/ //printf("%d ", num_paths); #if SORTBYMATERIAL hipLaunchKernelGGL(( setMaterial) , dim3(numblocksPathSegmentTracing), dim3(blockSize1d), 0, 0, num_paths, dev_sort_paths, dev_sort_intersections, dev_intersections); thrust::sort_by_key(thrust::device, dev_sort_paths, dev_sort_paths + num_paths, dev_paths); thrust::sort_by_key(thrust::device, dev_sort_intersections, dev_sort_intersections + num_paths, dev_intersections); #endif #if FULLLIGHTING || DIRECTLIGHTING_LASTBOUNCE shadeBSDFMaterialFull << <numblocksPathSegmentTracing, blockSize1d >> > ( iter, depth, num_paths, dev_intersections, dev_paths, dev_compact_idx, dev_materials, dev_geoms, dev_lights, hst_scene->geoms.size(), hst_scene->lights.size()); #elif DIRECTLIGHTING hipLaunchKernelGGL(( shadeBSDFMaterialDirect) , dim3(numblocksPathSegmentTracing), dim3(blockSize1d), 0, 0, iter, num_paths, dev_intersections, dev_paths, dev_compact_idx, dev_materials, dev_geoms, dev_lights, hst_scene->geoms.size(), hst_scene->lights.size()); iterationComplete = true; // DIRECT LIGHTING - 1 BOUNCE ONLY #else hipLaunchKernelGGL(( shadeBSDFMaterialNaive) , dim3(numblocksPathSegmentTracing), dim3(blockSize1d), 0, 0, iter, num_paths, dev_intersections, dev_paths, dev_compact_idx, dev_materials, depth); #endif #if COMPACT //printf("%d ", num_paths); PathSegment *end = thrust::partition(thrust::device, dev_paths, dev_paths + num_paths, isTerminated()); num_paths = end - dev_paths; //num_paths = StreamCompaction::Efficient::compact(num_paths, dev_compact_idx, dev_compact_idx); if (num_paths <= 0) { iterationComplete = true; } #endif depth++; if (depth == traceDepth) { iterationComplete = true; } } num_paths = dev_path_end - dev_paths; // Assemble this iteration and apply it to the image dim3 numBlocksPixels = (pixelcount + blockSize1d - 1) / blockSize1d; hipLaunchKernelGGL(( finalGather) , dim3(numBlocksPixels), dim3(blockSize1d), 0, 0, num_paths, dev_image, dev_paths, iter); /////////////////////////////////////////////////////////////////////////// // Send results to OpenGL buffer for rendering hipLaunchKernelGGL(( sendImageToPBO), dim3(blocksPerGrid2d), dim3(blockSize2d), 0, 0, pbo, cam.resolution, iter, dev_image); // Retrieve image from GPU hipMemcpy(hst_scene->state.image.data(), dev_image, pixelcount * sizeof(glm::vec3), hipMemcpyDeviceToHost); checkCUDAError("pathtrace"); }
ac23ad44d8a59f526384de9db47afde416baaee8.cu
#include <cstdio> #include <cuda.h> #include <cmath> #include <thrust/execution_policy.h> #include <thrust/random.h> #include <thrust/remove.h> #include "stream_compaction/efficient.h" #include <thrust/partition.h> #include "sceneStructs.h" #include "scene.h" #include "glm/glm.hpp" #include "glm/gtx/norm.hpp" #include "utilities.h" #include "pathtrace.h" #include "intersections.h" #include "interactions.h" #define ERRORCHECK 1 #define CACHEFIRSTBOUNCE 1 #define ANTIALIAS 0 #define DOF 0 #define COMPACT 0 #define SORTBYMATERIAL 0 #define RUSSIANROULETTE 0 // TURN ONLY ONE ON. ALL OFF MEANS NAIVE #define DIRECTLIGHTING 0 #define DIRECTLIGHTING_LASTBOUNCE 0 #define FULLLIGHTING 0 //#define FILENAME (strrchr(__FILE__, '/') ? strrchr(__FILE__, '/') + 1 : __FILE__) //#define checkCUDAError(msg) checkCUDAErrorFn(msg, FILENAME, __LINE__) //void checkCUDAErrorFn(const char *msg, const char *file, int line) { //#if ERRORCHECK // cudaDeviceSynchronize(); // cudaError_t err = cudaGetLastError(); // if (cudaSuccess == err) { // return; // } // // fprintf(stderr, "CUDA error"); // if (file) { // fprintf(stderr, " (%s:%d)", file, line); // } // fprintf(stderr, ": %s: %s\n", msg, cudaGetErrorString(err)); //# ifdef _WIN32 // getchar(); //# endif // exit(EXIT_FAILURE); //#endif //} __host__ __device__ thrust::default_random_engine makeSeededRandomEngine(int iter, int index, int depth) { int h = utilhash((1 << 31) | (depth << 22) | iter) ^ utilhash(index); return thrust::default_random_engine(h); } //Kernel that writes the image to the OpenGL PBO directly. __global__ void sendImageToPBO(uchar4* pbo, glm::ivec2 resolution, int iter, glm::vec3* image) { int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; if (x < resolution.x && y < resolution.y) { int index = x + (y * resolution.x); glm::vec3 pix = image[index]; glm::ivec3 color; color.x = glm::clamp((int) (pix.x / iter * 255.0), 0, 255); color.y = glm::clamp((int) (pix.y / iter * 255.0), 0, 255); color.z = glm::clamp((int) (pix.z / iter * 255.0), 0, 255); /* color.x = glm::clamp((int)(pix.x * 255), 0, 255); color.y = glm::clamp((int)(pix.y * 255), 0, 255); color.z = glm::clamp((int)(pix.z * 255), 0, 255);*/ // Each thread writes one pixel location in the texture (textel) pbo[index].w = 0; pbo[index].x = color.x; pbo[index].y = color.y; pbo[index].z = color.z; } } static Scene * hst_scene = NULL; static glm::vec3 * dev_image = NULL; static Geom * dev_geoms = NULL; static Material * dev_materials = NULL; static PathSegment * dev_paths = NULL; static ShadeableIntersection * dev_intersections = NULL; // TODO: static variables for device memory, any extra info you need, etc static ShadeableIntersection * dev_intersections_cache = NULL; static int * dev_compact_idx = NULL; static int * dev_sort_paths = NULL; static int * dev_sort_intersections = NULL; static Geom * dev_lights = NULL; // ... void pathtraceInit(Scene *scene) { hst_scene = scene; const Camera &cam = hst_scene->state.camera; const int pixelcount = cam.resolution.x * cam.resolution.y; cudaMalloc(&dev_image, pixelcount * sizeof(glm::vec3)); cudaMemset(dev_image, 0, pixelcount * sizeof(glm::vec3)); cudaMalloc(&dev_paths, pixelcount * sizeof(PathSegment)); cudaMalloc(&dev_geoms, scene->geoms.size() * sizeof(Geom)); cudaMemcpy(dev_geoms, scene->geoms.data(), scene->geoms.size() * sizeof(Geom), cudaMemcpyHostToDevice); cudaMalloc(&dev_materials, scene->materials.size() * sizeof(Material)); cudaMemcpy(dev_materials, scene->materials.data(), scene->materials.size() * sizeof(Material), cudaMemcpyHostToDevice); cudaMalloc(&dev_intersections, pixelcount * sizeof(ShadeableIntersection)); cudaMemset(dev_intersections, 0, pixelcount * sizeof(ShadeableIntersection)); // TODO: initialize any extra device memeory you need cudaMalloc(&dev_intersections_cache, pixelcount * sizeof(ShadeableIntersection)); cudaMemset(dev_intersections_cache, 0, pixelcount * sizeof(ShadeableIntersection)); cudaMalloc(&dev_compact_idx, pixelcount * sizeof(int)); cudaMemset(dev_compact_idx, 0, pixelcount * sizeof(int)); cudaMalloc(&dev_sort_paths, pixelcount * sizeof(int)); cudaMemset(dev_sort_paths, 0, pixelcount * sizeof(int)); cudaMalloc(&dev_sort_intersections, pixelcount * sizeof(int)); cudaMemset(dev_sort_intersections, 0, pixelcount * sizeof(int)); cudaMalloc(&dev_lights, scene->lights.size() * sizeof(Geom)); cudaMemcpy(dev_lights, scene->lights.data(), scene->lights.size() * sizeof(Geom), cudaMemcpyHostToDevice); checkCUDAError("pathtraceInit"); } void pathtraceFree() { cudaFree(dev_image); // no-op if dev_image is null cudaFree(dev_paths); cudaFree(dev_geoms); cudaFree(dev_materials); cudaFree(dev_intersections); // TODO: clean up any extra device memory you created cudaFree(dev_intersections_cache); cudaFree(dev_compact_idx); cudaFree(dev_sort_paths); cudaFree(dev_sort_intersections); //cudaFree(dev_sort_material); cudaFree(dev_lights); checkCUDAError("pathtraceFree"); } /** * Generate PathSegments with rays from the camera through the screen into the * scene, which is the first bounce of rays. * * Antialiasing - add rays for sub-pixel sampling * motion blur - jitter rays "in time" * lens effect - jitter ray origin positions based on a lens */ __global__ void generateRayFromCamera(Camera cam, int iter, int traceDepth, PathSegment* pathSegments, int* compact_idx) { int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; if (x < cam.resolution.x && y < cam.resolution.y) { int index = x + (y * cam.resolution.x); compact_idx[index] = index + 1; // used in stream compaction.. PathSegment & segment = pathSegments[index]; segment.ray.origin = cam.position; segment.color = glm::vec3(1.0f, 1.0f, 1.0f); // TODO: implement antialiasing by jittering the ray // DOUBLE CHECK THIS LOGIC.. thrust::default_random_engine rng = makeSeededRandomEngine(iter, index, 0); float jit_x = x, jit_y = y; if (ANTIALIAS) { thrust::uniform_real_distribution<float> u01(-0.5, 0.5); jit_x += u01(rng); jit_y += u01(rng); } segment.ray.direction = glm::normalize(cam.view - cam.right * cam.pixelLength.x * (jit_x - (float)cam.resolution.x * 0.5f) - cam.up * cam.pixelLength.y * (jit_y - (float)cam.resolution.y * 0.5f) ); if (DOF && cam.lensRadius > 0.f) { applyDof(segment.ray, rng, cam); } segment.pixelIndex = index; segment.remainingBounces = traceDepth; segment.throughput = glm::vec3(1.f, 1.f, 1.f); } } // TODO: // computeIntersections handles generating ray intersections ONLY. // Generating new rays is handled in your shader(s). // Feel free to modify the code below. __global__ void computeIntersections( int depth , int num_paths , PathSegment * pathSegments, int * compact_idx , Geom * geoms , int geoms_size , ShadeableIntersection * intersections // , int * material_type ) { int index = blockIdx.x * blockDim.x + threadIdx.x; //int path_index = COMPACT ? compact_idx[index] - 1 : index; int path_index = index; if (path_index > num_paths || path_index == -1) return; PathSegment pathSegment = pathSegments[path_index]; //if (pathSegment.remainingBounces == 0) return; float t; glm::vec3 intersect_point; glm::vec3 normal; float t_min = FLT_MAX; int hit_geom_index = -1; bool outside = true; glm::vec3 tmp_intersect; glm::vec3 tmp_normal; // naive parse through global geoms for (int i = 0; i < geoms_size; i++) { Geom & geom = geoms[i]; if (geom.type == CUBE) { t = boxIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside); } else if (geom.type == SPHERE) { t = sphereIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside); } // TODO: add more intersection tests here... triangle? metaball? CSG? // Compute the minimum t from the intersection tests to determine what // scene geometry object was hit first. if (t > 0.0f && t_min > t) { t_min = t; hit_geom_index = i; intersect_point = tmp_intersect; normal = tmp_normal; } } if (hit_geom_index == -1) { intersections[path_index].t = -1.0f; // material_type[path_index] = -1; } else { //The ray hits something intersections[path_index].t = t_min; intersections[path_index].materialId = geoms[hit_geom_index].materialid; intersections[path_index].surfaceNormal = normal; // material_type[path_index] = intersections[path_index].materialId; intersections[path_index].outside = outside; } } // LOOK: "fake" shader demonstrating what you might do with the info in // a ShadeableIntersection, as well as how to use thrust's random number // generator. Observe that since the thrust random number generator basically // adds "noise" to the iteration, the image should start off noisy and get // cleaner as more iterations are computed. // // Note that this shader does NOT do a BSDF evaluation! // Your shaders should handle that - this can allow techniques such as // bump mapping. __global__ void shadeFakeMaterial ( int iter , int num_paths , ShadeableIntersection * shadeableIntersections , PathSegment * pathSegments , Material * materials , int depth ) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < num_paths) { ShadeableIntersection intersection = shadeableIntersections[idx]; if (intersection.t > 0.0f) { // if the intersection exists... // Set up the RNG // LOOK: this is how you use thrust's RNG! Please look at // makeSeededRandomEngine as well. thrust::default_random_engine rng = makeSeededRandomEngine(iter, idx, depth); thrust::uniform_real_distribution<float> u01(0, 1); Material material = materials[intersection.materialId]; glm::vec3 materialColor = material.color; // If the material indicates that the object was a light, "light" the ray if (material.emittance > 0.0f) { pathSegments[idx].color *= (materialColor * material.emittance); pathSegments[idx].remainingBounces = 0; } // Otherwise, do some pseudo-lighting computation. This is actually more // like what you would expect from shading in a rasterizer like OpenGL. // TODO: replace this! you should be able to start with basically a one-liner else { float lightTerm = glm::dot(intersection.surfaceNormal, glm::vec3(0.0f, 1.0f, 0.0f)); pathSegments[idx].color *= (materialColor * lightTerm) * 0.3f + ((1.0f - intersection.t * 0.02f) * materialColor) * 0.7f; pathSegments[idx].color *= u01(rng); // apply some noise because why not } // If there was no intersection, color the ray black. // Lots of renderers use 4 channel color, RGBA, where A = alpha, often // used for opacity, in which case they can indicate "no opacity". // This can be useful for post-processing and image compositing. } else { pathSegments[idx].color = glm::vec3(0.0f); pathSegments[idx].remainingBounces = 0; } } } // BSDF based material - NAIVE lighting __global__ void shadeBSDFMaterialNaive( int iter , int num_paths , ShadeableIntersection * shadeableIntersections , PathSegment * pathSegments, int * compact_idx , Material * materials , int depth ) { int index = blockIdx.x * blockDim.x + threadIdx.x; // USE THE ACTIVE INDICES IF COMPACTION IS USED //int idx = COMPACT ? compact_idx[index] - 1 : index; int idx = index; if (idx > num_paths || idx == -1) { compact_idx[index] = 0; return; } PathSegment& pathSegment = pathSegments[idx]; #if !COMPACT if (pathSegment.remainingBounces <= 0) { return; } #endif if (idx < num_paths) { ShadeableIntersection intersection = shadeableIntersections[idx]; if (intersection.t > 0.0f) { thrust::default_random_engine rng = makeSeededRandomEngine(iter, idx, depth); Material& material = materials[intersection.materialId]; // If the material indicates that the object was a light, "light" the ray if (material.emittance > 0.0f) { pathSegment.color *= (material.color * material.emittance); //pathSegment.color += (material.color * material.emittance)/8.f; pathSegment.remainingBounces = 0; } else { glm::vec3 intpt = pathSegment.ray.origin + intersection.t * pathSegment.ray.direction; scatterRay(pathSegment, intpt, intersection.surfaceNormal, intersection.outside, material, rng); pathSegment.remainingBounces--; #if RUSSIANROULETTE // russian roulette path termination if (depth > 4) { float q = max(pathSegment.color.x, max(pathSegment.color.y, pathSegment.color.z)); thrust::uniform_real_distribution<float> u01(0, 1); if ((u01(rng)) > q) { pathSegment.remainingBounces = 0; } else { pathSegment.color /= q; } } #endif } } else { pathSegment.color = glm::vec3(0.0f); pathSegment.remainingBounces = 0; } } if (pathSegment.remainingBounces == 0 && COMPACT) { compact_idx[index] = 0; } } // BSDF based material - DIRECT lighting __global__ void shadeBSDFMaterialDirect( int iter, int num_paths, ShadeableIntersection * shadeableIntersections, PathSegment * pathSegments, int * compact_idx, Material * materials, Geom * geoms, Geom * lights, int numGeoms, int numLights) { int index = blockIdx.x * blockDim.x + threadIdx.x; // USE THE ACTIVE INDICES IF COMPACTION IS USED //int idx = COMPACT ? compact_idx[index] - 1 : index; int idx = index; if (idx > num_paths || idx == -1) return; PathSegment& pathSegment = pathSegments[idx]; #if !COMPACT if (pathSegment.remainingBounces <= 0) { return; } #endif if (idx < num_paths) { ShadeableIntersection intersection = shadeableIntersections[idx]; if (intersection.t > 0.0f) { thrust::default_random_engine rng = makeSeededRandomEngine(iter, idx, 0); Material& material = materials[intersection.materialId]; // If the material indicates that the object was a light, "light" the ray if (material.emittance > 0.0f) { pathSegment.color *= (material.color * material.emittance); pathSegment.remainingBounces = 0; } else { // DIRECT LIGHTING SHADING COMPUTATION glm::vec3 intpt = pathSegment.ray.origin + intersection.t * pathSegment.ray.direction; computeDirectLight(pathSegment, intpt, intersection.surfaceNormal, material, materials, rng, geoms, lights, numGeoms, numLights); pathSegment.remainingBounces = 0; // NO BOUNCES } } else { pathSegment.color = glm::vec3(0.0f); pathSegment.remainingBounces = 0; } } if (pathSegment.remainingBounces == 0 && COMPACT) { compact_idx[index] = 0; } } __global__ void shadeBSDFMaterialFull( int iter, int depth, int num_paths, ShadeableIntersection * shadeableIntersections, PathSegment * pathSegments, int * compact_idx, Material * materials, Geom * geoms, Geom * lights, int numGeoms, int numLights) { int index = blockIdx.x * blockDim.x + threadIdx.x; // USE THE ACTIVE INDICES IF COMPACTION IS USED //int idx = COMPACT ? compact_idx[index] - 1 : index; int idx = index; if (idx > num_paths || idx == -1) { compact_idx[index] = 0; return; } PathSegment& pathSegment = pathSegments[idx]; PathSegment psCopy = pathSegments[idx]; #if !COMPACT if (pathSegment.remainingBounces <= 0) { return; } #endif /*if (idx == 320000) { printf("hi"); }*/ if (idx < num_paths) { //if (depth == 0) { // throughput[idx] = glm::vec3(1.f); // accumulated[idx] = glm::vec3(0.f); //} ShadeableIntersection intersection = shadeableIntersections[idx]; if (intersection.t > 0.0f) { thrust::default_random_engine rng = makeSeededRandomEngine(iter, idx, depth); Material& material = materials[intersection.materialId]; #if FULLLIGHTING if (depth == 0) { pathSegment.color = glm::vec3(0.f); } #endif if (depth == 0 || pathSegment.specular) { glm::vec3 Le(0.f); if (material.emittance > 0.f && glm::dot(intersection.surfaceNormal, -pathSegment.ray.direction)) { Le = material.color * material.emittance; } pathSegment.color += Le * pathSegment.throughput; } if (material.emittance > 0.0f) { //pathSegment.color += (material.color * material.emittance) * throughput[idx]; pathSegment.remainingBounces = 0; } else { // update specular bounce.. pathSegment.specular = material.hasReflective || material.hasRefractive ? true : false; glm::vec3 intpt = pathSegment.ray.origin + intersection.t * pathSegment.ray.direction; #if DIRECTLIGHTING_LASTBOUNCE // DL at last bounce only float fpdf = scatterRay(pathSegment, intpt, intersection.surfaceNormal, intersection.outside, material, rng); pathSegment.remainingBounces--; if (pathSegment.remainingBounces == 0) { float gpdf = computeDirectLight(psCopy, intpt, intersection.surfaceNormal, material, materials, rng, geoms, lights, numGeoms, numLights); pathSegment.color *= psCopy.color; } #else glm::vec3 colL(0.f), colB(0.f); // L = light, B = brdf // colL colL = sampleLightMIS(psCopy, intpt, intersection.surfaceNormal, material, materials, rng, geoms, lights, numGeoms, numLights); // colB colB = sampleBsdfMIS(pathSegment, intpt, intersection.surfaceNormal, intersection.outside, material, rng, lights, numLights); pathSegment.color += pathSegment.throughput * colL; pathSegment.throughput *= colB; //PathSegment ps = pathSegment; //scatterRay(ps, intpt, intersection.surfaceNormal, intersection.outside, material, rng); //pathSegment.ray = ps.ray; //pathSegment.color += pathSegment.throughput * colL; //pathSegment.throughput *= ps.color; pathSegment.remainingBounces--; #endif #if RUSSIANROULETTE // russian roulette path termination if (depth > 4) { float q = max(pathSegment.throughput.x, max(pathSegment.throughput.y, pathSegment.throughput.z)); thrust::uniform_real_distribution<float> u01(0, 1); if ((u01(rng)) > q) { pathSegment.remainingBounces = 0; } else { pathSegment.throughput /= q; } } #endif } } else { pathSegment.color = glm::vec3(0.0f); pathSegment.remainingBounces = 0; } } if (pathSegment.remainingBounces == 0 && COMPACT) { compact_idx[index] = 0; } } // Add the current iteration's output to the overall image __global__ void finalGather(int nPaths, glm::vec3 * image, PathSegment * iterationPaths, int iter) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index < nPaths) { PathSegment iterationPath = iterationPaths[index]; // image[iterationPath.pixelIndex] = (image[iterationPath.pixelIndex] * float(iter) + iterationPath.color) / float(iter + 1); image[iterationPath.pixelIndex] += iterationPath.color; } } //__global__ void finalGather(int nPaths, glm::vec3 * image, PathSegment * iterationPaths) //{ // int index = (blockIdx.x * blockDim.x) + threadIdx.x; // // if (index < nPaths) // { // PathSegment iterationPath = iterationPaths[index]; // image[iterationPath.pixelIndex] += iterationPath.color; // } //} __global__ void setMaterial(int nPaths, int * pathSegmentIndices, int * intersectionIndices, ShadeableIntersection *intersections) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index < nPaths) { pathSegmentIndices[index] = intersectionIndices[index] = intersections[index].materialId; } } // THRUST'S STREAM COMPACTION // https://thrust.github.io/doc/group__stream__compaction.html // // struct used for predicate for thrust::remove_if / thrust::partition struct isTerminated { __host__ __device__ bool operator()(const PathSegment &segment) { return (segment.remainingBounces > 0); } }; /** * Wrapper for the __global__ call that sets up the kernel calls and does a ton * of memory management */ void pathtrace(uchar4 *pbo, int frame, int iter) { const int traceDepth = hst_scene->state.traceDepth; const Camera &cam = hst_scene->state.camera; const int pixelcount = cam.resolution.x * cam.resolution.y; // 2D block for generating ray from camera const dim3 blockSize2d(8, 8); const dim3 blocksPerGrid2d( (cam.resolution.x + blockSize2d.x - 1) / blockSize2d.x, (cam.resolution.y + blockSize2d.y - 1) / blockSize2d.y); // 1D block for path tracing const int blockSize1d = 128; /////////////////////////////////////////////////////////////////////////// // Recap: // * Initialize array of path rays (using rays that come out of the camera) // * You can pass the Camera object to that kernel. // * Each path ray must carry at minimum a (ray, color) pair, // * where color starts as the multiplicative identity, white = (1, 1, 1). // * This has already been done for you. // * For each depth: // * Compute an intersection in the scene for each path ray. // A very naive version of this has been implemented for you, but feel // free to add more primitives and/or a better algorithm. // Currently, intersection distance is recorded as a parametric distance, // t, or a "distance along the ray." t = -1.0 indicates no intersection. // * Color is attenuated (multiplied) by reflections off of any object // * TODO: Stream compact away all of the terminated paths. // You may use either your implementation or `thrust::remove_if` or its // cousins. // * Note that you can't really use a 2D kernel launch any more - switch // to 1D. // * TODO: Shade the rays that intersected something or didn't bottom out. // That is, color the ray by performing a color computation according // to the shader, then generate a new ray to continue the ray path. // We recommend just updating the ray's PathSegment in place. // Note that this step may come before or after stream compaction, // since some shaders you write may also cause a path to terminate. // * Finally, add this iteration's results to the image. This has been done // for you. // TODO: perform one iteration of path tracing generateRayFromCamera <<<blocksPerGrid2d, blockSize2d>>> (cam, iter, traceDepth, dev_paths, dev_compact_idx); checkCUDAError("generate camera ray"); int depth = 0; PathSegment* dev_path_end = dev_paths + pixelcount; int num_paths = dev_path_end - dev_paths; int np = dev_path_end - dev_paths; // --- PathSegment Tracing Stage --- // Shoot ray into scene, bounce between objects, push shading chunks bool iterationComplete = false; while (!iterationComplete) { // clean shading chunks cudaMemset(dev_intersections, 0, pixelcount * sizeof(ShadeableIntersection)); // tracing dim3 numblocksPathSegmentTracing = (num_paths + blockSize1d - 1) / blockSize1d; if (depth == 0 && iter > 1 && CACHEFIRSTBOUNCE) { cudaMemcpy(dev_intersections, dev_intersections_cache, pixelcount * sizeof(ShadeableIntersection), cudaMemcpyDeviceToDevice); } else { computeIntersections <<<numblocksPathSegmentTracing, blockSize1d>>> ( depth , num_paths , dev_paths, dev_compact_idx , dev_geoms , hst_scene->geoms.size() , dev_intersections // , dev_sort_material ); checkCUDAError("trace one bounce"); if (depth == 0 && iter == 1 && CACHEFIRSTBOUNCE) { // update cache with dev_intersections cudaMemcpy(dev_intersections_cache, dev_intersections, pixelcount * sizeof(ShadeableIntersection), cudaMemcpyDeviceToDevice); } } cudaDeviceSynchronize(); // TODO: // --- Shading Stage --- // Shade path segments based on intersections and generate new rays by // evaluating the BSDF. // Start off with just a big kernel that handles all the different // materials you have in the scenefile. // TODO: compare between directly shading the path segments and shading // path segments that have been reshuffled to be contiguous in memory. /* shadeFakeMaterial<<<numblocksPathSegmentTracing, blockSize1d>>> ( iter, num_paths, dev_intersections, dev_paths, dev_materials );*/ //printf("%d ", num_paths); #if SORTBYMATERIAL setMaterial <<<numblocksPathSegmentTracing, blockSize1d>>> (num_paths, dev_sort_paths, dev_sort_intersections, dev_intersections); thrust::sort_by_key(thrust::device, dev_sort_paths, dev_sort_paths + num_paths, dev_paths); thrust::sort_by_key(thrust::device, dev_sort_intersections, dev_sort_intersections + num_paths, dev_intersections); #endif #if FULLLIGHTING || DIRECTLIGHTING_LASTBOUNCE shadeBSDFMaterialFull << <numblocksPathSegmentTracing, blockSize1d >> > ( iter, depth, num_paths, dev_intersections, dev_paths, dev_compact_idx, dev_materials, dev_geoms, dev_lights, hst_scene->geoms.size(), hst_scene->lights.size()); #elif DIRECTLIGHTING shadeBSDFMaterialDirect <<<numblocksPathSegmentTracing, blockSize1d>>> ( iter, num_paths, dev_intersections, dev_paths, dev_compact_idx, dev_materials, dev_geoms, dev_lights, hst_scene->geoms.size(), hst_scene->lights.size()); iterationComplete = true; // DIRECT LIGHTING - 1 BOUNCE ONLY #else shadeBSDFMaterialNaive <<<numblocksPathSegmentTracing, blockSize1d>>> ( iter, num_paths, dev_intersections, dev_paths, dev_compact_idx, dev_materials, depth); #endif #if COMPACT //printf("%d ", num_paths); PathSegment *end = thrust::partition(thrust::device, dev_paths, dev_paths + num_paths, isTerminated()); num_paths = end - dev_paths; //num_paths = StreamCompaction::Efficient::compact(num_paths, dev_compact_idx, dev_compact_idx); if (num_paths <= 0) { iterationComplete = true; } #endif depth++; if (depth == traceDepth) { iterationComplete = true; } } num_paths = dev_path_end - dev_paths; // Assemble this iteration and apply it to the image dim3 numBlocksPixels = (pixelcount + blockSize1d - 1) / blockSize1d; finalGather <<<numBlocksPixels, blockSize1d>>> (num_paths, dev_image, dev_paths, iter); /////////////////////////////////////////////////////////////////////////// // Send results to OpenGL buffer for rendering sendImageToPBO<<<blocksPerGrid2d, blockSize2d>>>(pbo, cam.resolution, iter, dev_image); // Retrieve image from GPU cudaMemcpy(hst_scene->state.image.data(), dev_image, pixelcount * sizeof(glm::vec3), cudaMemcpyDeviceToHost); checkCUDAError("pathtrace"); }
5c593aaa8f8a3f32c632e509561d3ddcff7714fe.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "../AdjointSolver.h" #include "../CommonKernels.h" #include "../Utils3D.h" namespace ar3d { __device__ __inline__ void groundDistanceAdjoint( const real4& groundPlane, const real3& position, const real3& adjNormal, real adjDistance, real4& adjGroundPlane, real3& adjPosition) { adjGroundPlane.x += adjNormal.x + adjDistance * position.x; adjGroundPlane.y += adjNormal.y + adjDistance * position.y; adjGroundPlane.z += adjNormal.z + adjDistance * position.z; adjGroundPlane.w -= adjDistance; adjPosition += adjDistance * groundPlane; } __device__ __inline__ void groundDistanceDtAdjoint( const real4& groundPlane, const real3& velocity, real adjDistance, real4& adjGroundPlane, real3& adjVelocity) { adjGroundPlane.x += adjDistance * velocity.x; adjGroundPlane.y += adjDistance * velocity.y; adjGroundPlane.z += adjDistance * velocity.z; adjVelocity += adjDistance * make_real3(groundPlane.x, groundPlane.y, groundPlane.z); } __global__ void GridAdjointApplyCollisionForcesKernel(dim3 size, const real3* refPositions, const real3* displacements, const real3* velocities, const int4* mapping, const real8* sdfs, const real8* surfaceWeights, real4 groundPlane, real groundStiffness, real softminAlpha, real timestep, real theta, const real3* adjForces, real3* adjDisplacementsOut, real3* adjVelocitiesOut, real4* adjGroundPlaneOut) { CUMAT_KERNEL_1D_LOOP(elementIdx, size) //node indices const int4 map = mapping[elementIdx]; const int nodeIdx[8] = { map.x, map.x + 1, map.y, map.y + 1, map.z, map.z + 1, map.w, map.w + 1 }; //load position + displacement + velocity + init forces real3 posx[8]; real3 velx[8]; float3 forcex[8]; #pragma unroll for (int i = 0; i<8; ++i) { posx[i] = refPositions[nodeIdx[i]] + displacements[nodeIdx[i]]; velx[i] = velocities[nodeIdx[i]]; forcex[i] = make_float3(0); } //prepare adjoint output real4 adjGroundPlane = make_real4(0, 0, 0, 0); real3 adjPosx[8] = {}; real3 adjVelx[8] = {}; //prepare intersection points static int EDGES[12][2] = { { 0, 1 }, { 2, 3 }, { 0, 2 }, { 1, 3 }, { 4, 5 }, { 6, 7 }, { 4, 6 }, { 5, 7 }, { 0, 4 }, { 1, 5 }, { 2, 6 }, { 3, 7 } }; real8 phiTmp = sdfs[elementIdx]; real phi[8] = { phiTmp.first.x, phiTmp.first.y, phiTmp.first.z, phiTmp.first.w, phiTmp.second.x, phiTmp.second.y, phiTmp.second.z, phiTmp.second.w }; //adjoint: integrate the force over the surface real8 sw = surfaceWeights[elementIdx]; real3 adjForceX[8] = { adjForces[nodeIdx[0]] * sw.first.x, adjForces[nodeIdx[1]] * sw.first.y, adjForces[nodeIdx[2]] * sw.first.z, adjForces[nodeIdx[3]] * sw.first.w, adjForces[nodeIdx[4]] * sw.second.x, adjForces[nodeIdx[5]] * sw.second.x, adjForces[nodeIdx[6]] * sw.second.x, adjForces[nodeIdx[7]] * sw.second.x }; //for each of those intersection points, compute the adjoint for (int i=0; i<12; ++i) { //find intersection real intersection = phi[EDGES[i][0]] / (phi[EDGES[i][0]] - phi[EDGES[i][1]]); if (intersection < 0 || intersection > 1 || isnan(intersection)) continue; //get interpolated collision point on the edge real3 pos = posx[EDGES[i][0]] * (1 - intersection) + posx[EDGES[i][1]] * intersection; real3 vel = velx[EDGES[i][0]] * (1 - intersection) + velx[EDGES[i][1]] * intersection; //collide them against the ground -> compute force real4 normalDist = SoftBodySimulation3D::groundDistance(groundPlane, pos); real softmin = ar3d::utils::softmin(normalDist.w, softminAlpha); real distDt = SoftBodySimulation3D::groundDistanceDt(groundPlane, vel); real fCurrent = -groundStiffness * softmin; real fDt = -groundStiffness * (ar3d::utils::softminDx(normalDist.w, softminAlpha) * distDt); real fNext = fCurrent + timestep * fDt; real f = theta * fNext + (1 - theta) * fCurrent; if (f <= 1e-10) continue; real3 fVec = make_real3(normalDist.x, normalDist.y, normalDist.z) * f; //adjoint: blend them into the forces real adjIntersection = dot3(adjForceX[EDGES[i][0]] - adjForceX[EDGES[i][1]], fVec); real3 adjFVec = (1 - intersection) * adjForceX[EDGES[i][0]] + intersection * adjForceX[EDGES[i][1]]; //adjoint: collide them against the ground real3 adjNormal = f * adjFVec; real adjF = dot3(normalDist, adjFVec); real adjFNext = theta * adjF; real adjFCurrent = (1 - theta) * adjF; adjFCurrent += adjFNext; real adjFDt = timestep * adjFNext; real adjDistDt = -groundStiffness * ar3d::utils::softminDx(normalDist.w, softminAlpha) * adjFDt; real adjDist = utils::softminDxAdjoint(normalDist.w, softminAlpha, -groundStiffness * distDt * adjFDt); real adjSoftmin = -groundStiffness * adjFCurrent; real3 adjVel = make_real3(0), adjPos = make_real3(0); groundDistanceDtAdjoint(groundPlane, vel, adjDistDt, adjGroundPlane, adjVel); adjDist += utils::softminAdjoint(normalDist.w, softminAlpha, adjSoftmin); groundDistanceAdjoint(groundPlane, pos, adjNormal, adjDist, adjGroundPlane, adjPos); //adjoint: interpolated collision point adjVelx[EDGES[i][0]] = (1 - intersection) * adjVel; adjVelx[EDGES[i][1]] = intersection * adjVel; adjPosx[EDGES[i][0]] = (1 - intersection) * adjPos; adjPosx[EDGES[i][1]] = intersection * adjPos; } //write result atomicAddReal4(adjGroundPlaneOut, adjGroundPlane); #pragma unroll for (int i = 0; i<8; ++i) { atomicAddReal3(adjDisplacementsOut + nodeIdx[i], adjPosx[i]); atomicAddReal3(adjVelocitiesOut + nodeIdx[i], adjVelx[i]); } CUMAT_KERNEL_1D_LOOP_END } void AdjointSolver::adjointApplyCollisionForces(const Input& input, const SoftBodySimulation3D::Settings& settings, const Vector3X& displacements, const Vector3X& velocities, const Vector3X& adjBodyForces, Vector3X& adjDisplacementsOut, Vector3X& adjVelocitiesOut, double4& adjGroundPlaneOut) { cuMat::Context& ctx = cuMat::Context::current(); cuMat::KernelLaunchConfig cfg = ctx.createLaunchConfig1D(input.numActiveCells_, GridAdjointApplyCollisionForcesKernel); cuMat::Matrix<real4, 1, 1, 1, 0> adjGroundPlane; adjGroundPlane.setZero(); hipLaunchKernelGGL(( GridAdjointApplyCollisionForcesKernel) , dim3(cfg.block_count), dim3(cfg.thread_per_block), 0, ctx.stream() , cfg.virtual_size, input.referencePositions_.data(), displacements.data(), velocities.data(), input.mapping_.data(), input.cellSdfs_.data(), input.interpolationBoundaryWeights_.data(), settings.groundPlane_, settings.groundStiffness_, settings.softmaxAlpha_, settings.timestep_, settings.newmarkTheta_, adjBodyForces.data(), adjDisplacementsOut.data(), adjVelocitiesOut.data(), adjGroundPlane.data()); CUMAT_CHECK_ERROR(); real4 adjPlane = static_cast<real4>(adjGroundPlane); adjGroundPlaneOut += make_double4(adjPlane.x, adjPlane.y, adjPlane.z, adjPlane.w); } }
5c593aaa8f8a3f32c632e509561d3ddcff7714fe.cu
#include "../AdjointSolver.h" #include "../CommonKernels.h" #include "../Utils3D.h" namespace ar3d { __device__ __inline__ void groundDistanceAdjoint( const real4& groundPlane, const real3& position, const real3& adjNormal, real adjDistance, real4& adjGroundPlane, real3& adjPosition) { adjGroundPlane.x += adjNormal.x + adjDistance * position.x; adjGroundPlane.y += adjNormal.y + adjDistance * position.y; adjGroundPlane.z += adjNormal.z + adjDistance * position.z; adjGroundPlane.w -= adjDistance; adjPosition += adjDistance * groundPlane; } __device__ __inline__ void groundDistanceDtAdjoint( const real4& groundPlane, const real3& velocity, real adjDistance, real4& adjGroundPlane, real3& adjVelocity) { adjGroundPlane.x += adjDistance * velocity.x; adjGroundPlane.y += adjDistance * velocity.y; adjGroundPlane.z += adjDistance * velocity.z; adjVelocity += adjDistance * make_real3(groundPlane.x, groundPlane.y, groundPlane.z); } __global__ void GridAdjointApplyCollisionForcesKernel(dim3 size, const real3* refPositions, const real3* displacements, const real3* velocities, const int4* mapping, const real8* sdfs, const real8* surfaceWeights, real4 groundPlane, real groundStiffness, real softminAlpha, real timestep, real theta, const real3* adjForces, real3* adjDisplacementsOut, real3* adjVelocitiesOut, real4* adjGroundPlaneOut) { CUMAT_KERNEL_1D_LOOP(elementIdx, size) //node indices const int4 map = mapping[elementIdx]; const int nodeIdx[8] = { map.x, map.x + 1, map.y, map.y + 1, map.z, map.z + 1, map.w, map.w + 1 }; //load position + displacement + velocity + init forces real3 posx[8]; real3 velx[8]; float3 forcex[8]; #pragma unroll for (int i = 0; i<8; ++i) { posx[i] = refPositions[nodeIdx[i]] + displacements[nodeIdx[i]]; velx[i] = velocities[nodeIdx[i]]; forcex[i] = make_float3(0); } //prepare adjoint output real4 adjGroundPlane = make_real4(0, 0, 0, 0); real3 adjPosx[8] = {}; real3 adjVelx[8] = {}; //prepare intersection points static int EDGES[12][2] = { { 0, 1 }, { 2, 3 }, { 0, 2 }, { 1, 3 }, { 4, 5 }, { 6, 7 }, { 4, 6 }, { 5, 7 }, { 0, 4 }, { 1, 5 }, { 2, 6 }, { 3, 7 } }; real8 phiTmp = sdfs[elementIdx]; real phi[8] = { phiTmp.first.x, phiTmp.first.y, phiTmp.first.z, phiTmp.first.w, phiTmp.second.x, phiTmp.second.y, phiTmp.second.z, phiTmp.second.w }; //adjoint: integrate the force over the surface real8 sw = surfaceWeights[elementIdx]; real3 adjForceX[8] = { adjForces[nodeIdx[0]] * sw.first.x, adjForces[nodeIdx[1]] * sw.first.y, adjForces[nodeIdx[2]] * sw.first.z, adjForces[nodeIdx[3]] * sw.first.w, adjForces[nodeIdx[4]] * sw.second.x, adjForces[nodeIdx[5]] * sw.second.x, adjForces[nodeIdx[6]] * sw.second.x, adjForces[nodeIdx[7]] * sw.second.x }; //for each of those intersection points, compute the adjoint for (int i=0; i<12; ++i) { //find intersection real intersection = phi[EDGES[i][0]] / (phi[EDGES[i][0]] - phi[EDGES[i][1]]); if (intersection < 0 || intersection > 1 || isnan(intersection)) continue; //get interpolated collision point on the edge real3 pos = posx[EDGES[i][0]] * (1 - intersection) + posx[EDGES[i][1]] * intersection; real3 vel = velx[EDGES[i][0]] * (1 - intersection) + velx[EDGES[i][1]] * intersection; //collide them against the ground -> compute force real4 normalDist = SoftBodySimulation3D::groundDistance(groundPlane, pos); real softmin = ar3d::utils::softmin(normalDist.w, softminAlpha); real distDt = SoftBodySimulation3D::groundDistanceDt(groundPlane, vel); real fCurrent = -groundStiffness * softmin; real fDt = -groundStiffness * (ar3d::utils::softminDx(normalDist.w, softminAlpha) * distDt); real fNext = fCurrent + timestep * fDt; real f = theta * fNext + (1 - theta) * fCurrent; if (f <= 1e-10) continue; real3 fVec = make_real3(normalDist.x, normalDist.y, normalDist.z) * f; //adjoint: blend them into the forces real adjIntersection = dot3(adjForceX[EDGES[i][0]] - adjForceX[EDGES[i][1]], fVec); real3 adjFVec = (1 - intersection) * adjForceX[EDGES[i][0]] + intersection * adjForceX[EDGES[i][1]]; //adjoint: collide them against the ground real3 adjNormal = f * adjFVec; real adjF = dot3(normalDist, adjFVec); real adjFNext = theta * adjF; real adjFCurrent = (1 - theta) * adjF; adjFCurrent += adjFNext; real adjFDt = timestep * adjFNext; real adjDistDt = -groundStiffness * ar3d::utils::softminDx(normalDist.w, softminAlpha) * adjFDt; real adjDist = utils::softminDxAdjoint(normalDist.w, softminAlpha, -groundStiffness * distDt * adjFDt); real adjSoftmin = -groundStiffness * adjFCurrent; real3 adjVel = make_real3(0), adjPos = make_real3(0); groundDistanceDtAdjoint(groundPlane, vel, adjDistDt, adjGroundPlane, adjVel); adjDist += utils::softminAdjoint(normalDist.w, softminAlpha, adjSoftmin); groundDistanceAdjoint(groundPlane, pos, adjNormal, adjDist, adjGroundPlane, adjPos); //adjoint: interpolated collision point adjVelx[EDGES[i][0]] = (1 - intersection) * adjVel; adjVelx[EDGES[i][1]] = intersection * adjVel; adjPosx[EDGES[i][0]] = (1 - intersection) * adjPos; adjPosx[EDGES[i][1]] = intersection * adjPos; } //write result atomicAddReal4(adjGroundPlaneOut, adjGroundPlane); #pragma unroll for (int i = 0; i<8; ++i) { atomicAddReal3(adjDisplacementsOut + nodeIdx[i], adjPosx[i]); atomicAddReal3(adjVelocitiesOut + nodeIdx[i], adjVelx[i]); } CUMAT_KERNEL_1D_LOOP_END } void AdjointSolver::adjointApplyCollisionForces(const Input& input, const SoftBodySimulation3D::Settings& settings, const Vector3X& displacements, const Vector3X& velocities, const Vector3X& adjBodyForces, Vector3X& adjDisplacementsOut, Vector3X& adjVelocitiesOut, double4& adjGroundPlaneOut) { cuMat::Context& ctx = cuMat::Context::current(); cuMat::KernelLaunchConfig cfg = ctx.createLaunchConfig1D(input.numActiveCells_, GridAdjointApplyCollisionForcesKernel); cuMat::Matrix<real4, 1, 1, 1, 0> adjGroundPlane; adjGroundPlane.setZero(); GridAdjointApplyCollisionForcesKernel <<< cfg.block_count, cfg.thread_per_block, 0, ctx.stream() >>>( cfg.virtual_size, input.referencePositions_.data(), displacements.data(), velocities.data(), input.mapping_.data(), input.cellSdfs_.data(), input.interpolationBoundaryWeights_.data(), settings.groundPlane_, settings.groundStiffness_, settings.softmaxAlpha_, settings.timestep_, settings.newmarkTheta_, adjBodyForces.data(), adjDisplacementsOut.data(), adjVelocitiesOut.data(), adjGroundPlane.data()); CUMAT_CHECK_ERROR(); real4 adjPlane = static_cast<real4>(adjGroundPlane); adjGroundPlaneOut += make_double4(adjPlane.x, adjPlane.y, adjPlane.z, adjPlane.w); } }
52607fe22b9474bc9c35a90e0d569291d1f83a2d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "sharedmem.cuh" #include <iostream> #include "rocblas.h" #include <float.h> #include "reduce_min_kernel.h" template <class T> __host__ void min_with_index_host(T x1, T x2,int y1, int y2,T &out_val, int & out_index) { if(x1==fminf(x1,x2)) { out_val=x1; out_index=y1; } else { out_val=x2; out_index=y2; } } template <class T> __device__ void min_with_index(T x1, T x2,int y1, int y2,T &out_val, int & out_index) { if(x1==fminf(x1,x2)) { out_val=x1; out_index=y1; } else { out_val=x2; out_index=y2; } } template <class T, unsigned int blockSize> __global__ void reduce_min_kernel(T *g_odata, T *g_idata,int* index_o, unsigned int n) { __shared__ T sdata[blockSize]; __shared__ int index[blockSize]; unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*(blockSize*2) + threadIdx.x; unsigned int gridSize = blockSize*2*gridDim.x; // T thMin = fminf(g_idata[i], g_idata[i + blockSize]); T thMin=0; int thMin_index=0; min_with_index<T>(g_idata[i],g_idata[i+blockSize],i,i+blockSize,thMin,thMin_index); i += gridSize; while (i < n) { T a=0; int a_index; min_with_index<T>(g_idata[i],g_idata[i+blockSize],i,i+blockSize,a,a_index); // = fminf(g_idata[i], g_idata[i + blockSize]); min_with_index<T>(thMin,a,thMin_index,a_index,thMin,thMin_index); // thMin = fminf(thMin, a); i += gridSize; } sdata[tid] =thMin; index[tid]=thMin_index; __syncthreads(); if (blockSize >= 512) { if (tid < 256) { min_with_index<T>(sdata[tid],sdata[tid+256],index[tid],index[tid+256],sdata[tid],index[tid]); } __syncthreads(); } if (blockSize >= 256) { if (tid < 128) { min_with_index<T>(sdata[tid],sdata[tid+128],index[tid],index[tid+128],sdata[tid],index[tid]); } __syncthreads(); } if (blockSize >= 128) { if (tid < 64) { min_with_index<T>(sdata[tid],sdata[tid+64],index[tid],index[tid+64],sdata[tid],index[tid]); } __syncthreads(); } if (tid < 32) { if (blockSize >= 64) { min_with_index<T>(sdata[tid],sdata[tid+32],index[tid],index[tid+32],sdata[tid],index[tid]); } if (blockSize >= 32) { min_with_index<T>(sdata[tid],sdata[tid+16],index[tid],index[tid+16],sdata[tid],index[tid]); } if (blockSize >= 16) { min_with_index<T>(sdata[tid],sdata[tid+8],index[tid],index[tid+8],sdata[tid],index[tid]); } if (blockSize >= 8) { min_with_index<T>(sdata[tid],sdata[tid+4],index[tid],index[tid+4],sdata[tid],index[tid]); } if (blockSize >= 4) { min_with_index<T>(sdata[tid],sdata[tid+2],index[tid],index[tid+2],sdata[tid],index[tid]); } if (blockSize >= 2) { min_with_index<T>(sdata[tid],sdata[tid+1],index[tid],index[tid+1],sdata[tid],index[tid]); } } // write result for this block to global mem if (tid == 0) { g_odata[blockIdx.x] = sdata[0]; index_o[blockIdx.x]=index[0]; } } //////////////////////////////////////////////////////////////////////////////// // Wrapper function for kernel launch //////////////////////////////////////////////////////////////////////////////// template <class T> T reduce_min(T *d_idata, int size, int & final_index) { const int maxThreads = 128; const int maxBlocks = 128; int threads = 1; if(size%2!=0) { size=size-1; } if( size != 1 ) { threads = (size < maxThreads*2) ? size / 2 : maxThreads; } int blocks = size / (threads * 2); blocks = min(maxBlocks, blocks); T * d_odata; hipblasAlloc(blocks, sizeof(T), (void**)&d_odata); int * index_o; hipblasAlloc(blocks, sizeof(int), (void**)&index_o); dim3 dimBlock(threads, 1, 1); dim3 dimGrid(blocks, 1, 1); int smemSize = threads * sizeof(T); switch (threads) { case 512: hipLaunchKernelGGL(( reduce_min_kernel<T, 512>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_odata, d_idata,index_o, size); break; case 256: hipLaunchKernelGGL(( reduce_min_kernel<T, 256>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_odata, d_idata,index_o, size); break; case 128: hipLaunchKernelGGL(( reduce_min_kernel<T, 128>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_odata, d_idata,index_o, size); break; case 64: hipLaunchKernelGGL(( reduce_min_kernel<T, 64>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_odata, d_idata,index_o, size); break; case 32: hipLaunchKernelGGL(( reduce_min_kernel<T, 32>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_odata, d_idata,index_o, size); break; case 16: hipLaunchKernelGGL(( reduce_min_kernel<T, 16>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_odata, d_idata,index_o, size); break; case 8: hipLaunchKernelGGL(( reduce_min_kernel<T, 8>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_odata, d_idata,index_o, size); break; case 4: hipLaunchKernelGGL(( reduce_min_kernel<T, 4>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_odata, d_idata,index_o, size); break; case 2: hipLaunchKernelGGL(( reduce_min_kernel<T, 2>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_odata, d_idata,index_o, size); break; case 1: hipLaunchKernelGGL(( reduce_min_kernel<T, 1>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_odata, d_idata,index_o, size); break; default: exit(1); } T* h_odata = new T[blocks]; int * h_index=new int[blocks]; hipMemcpy( h_index, index_o, blocks*sizeof(int), hipMemcpyDeviceToHost); hipMemcpy( h_odata, d_odata, blocks*sizeof(T), hipMemcpyDeviceToHost); T result = h_odata[0]; int result_index=h_index[0]; for( int i = 1; i < blocks; i++ ) { min_with_index_host<T>(result,h_odata[i],result_index,h_index[i],result,result_index); } delete[] h_odata; final_index=result_index; return result; } template <class T> int IndexOfMin(T * didata, int N) { int final_index=0; bool odd=false; if(N%2!=0) { N=N-1; odd=true; } float value=reduce_min(didata,N,final_index); if(odd) { T * single=new T[1]; // ublasGetVector (int n, int elemSize, const void *x, // int incx, void *y, int incy) hipblasGetVector(1,sizeof(T),&didata[N],1,single,1); if(single[0]<value) { final_index=N; } } return final_index; }
52607fe22b9474bc9c35a90e0d569291d1f83a2d.cu
#include "sharedmem.cuh" #include <iostream> #include "cublas.h" #include <float.h> #include "reduce_min_kernel.h" template <class T> __host__ void min_with_index_host(T x1, T x2,int y1, int y2,T &out_val, int & out_index) { if(x1==fminf(x1,x2)) { out_val=x1; out_index=y1; } else { out_val=x2; out_index=y2; } } template <class T> __device__ void min_with_index(T x1, T x2,int y1, int y2,T &out_val, int & out_index) { if(x1==fminf(x1,x2)) { out_val=x1; out_index=y1; } else { out_val=x2; out_index=y2; } } template <class T, unsigned int blockSize> __global__ void reduce_min_kernel(T *g_odata, T *g_idata,int* index_o, unsigned int n) { __shared__ T sdata[blockSize]; __shared__ int index[blockSize]; unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*(blockSize*2) + threadIdx.x; unsigned int gridSize = blockSize*2*gridDim.x; // T thMin = fminf(g_idata[i], g_idata[i + blockSize]); T thMin=0; int thMin_index=0; min_with_index<T>(g_idata[i],g_idata[i+blockSize],i,i+blockSize,thMin,thMin_index); i += gridSize; while (i < n) { T a=0; int a_index; min_with_index<T>(g_idata[i],g_idata[i+blockSize],i,i+blockSize,a,a_index); // = fminf(g_idata[i], g_idata[i + blockSize]); min_with_index<T>(thMin,a,thMin_index,a_index,thMin,thMin_index); // thMin = fminf(thMin, a); i += gridSize; } sdata[tid] =thMin; index[tid]=thMin_index; __syncthreads(); if (blockSize >= 512) { if (tid < 256) { min_with_index<T>(sdata[tid],sdata[tid+256],index[tid],index[tid+256],sdata[tid],index[tid]); } __syncthreads(); } if (blockSize >= 256) { if (tid < 128) { min_with_index<T>(sdata[tid],sdata[tid+128],index[tid],index[tid+128],sdata[tid],index[tid]); } __syncthreads(); } if (blockSize >= 128) { if (tid < 64) { min_with_index<T>(sdata[tid],sdata[tid+64],index[tid],index[tid+64],sdata[tid],index[tid]); } __syncthreads(); } if (tid < 32) { if (blockSize >= 64) { min_with_index<T>(sdata[tid],sdata[tid+32],index[tid],index[tid+32],sdata[tid],index[tid]); } if (blockSize >= 32) { min_with_index<T>(sdata[tid],sdata[tid+16],index[tid],index[tid+16],sdata[tid],index[tid]); } if (blockSize >= 16) { min_with_index<T>(sdata[tid],sdata[tid+8],index[tid],index[tid+8],sdata[tid],index[tid]); } if (blockSize >= 8) { min_with_index<T>(sdata[tid],sdata[tid+4],index[tid],index[tid+4],sdata[tid],index[tid]); } if (blockSize >= 4) { min_with_index<T>(sdata[tid],sdata[tid+2],index[tid],index[tid+2],sdata[tid],index[tid]); } if (blockSize >= 2) { min_with_index<T>(sdata[tid],sdata[tid+1],index[tid],index[tid+1],sdata[tid],index[tid]); } } // write result for this block to global mem if (tid == 0) { g_odata[blockIdx.x] = sdata[0]; index_o[blockIdx.x]=index[0]; } } //////////////////////////////////////////////////////////////////////////////// // Wrapper function for kernel launch //////////////////////////////////////////////////////////////////////////////// template <class T> T reduce_min(T *d_idata, int size, int & final_index) { const int maxThreads = 128; const int maxBlocks = 128; int threads = 1; if(size%2!=0) { size=size-1; } if( size != 1 ) { threads = (size < maxThreads*2) ? size / 2 : maxThreads; } int blocks = size / (threads * 2); blocks = min(maxBlocks, blocks); T * d_odata; cublasAlloc(blocks, sizeof(T), (void**)&d_odata); int * index_o; cublasAlloc(blocks, sizeof(int), (void**)&index_o); dim3 dimBlock(threads, 1, 1); dim3 dimGrid(blocks, 1, 1); int smemSize = threads * sizeof(T); switch (threads) { case 512: reduce_min_kernel<T, 512><<< dimGrid, dimBlock, smemSize >>>(d_odata, d_idata,index_o, size); break; case 256: reduce_min_kernel<T, 256><<< dimGrid, dimBlock, smemSize >>>(d_odata, d_idata,index_o, size); break; case 128: reduce_min_kernel<T, 128><<< dimGrid, dimBlock, smemSize >>>(d_odata, d_idata,index_o, size); break; case 64: reduce_min_kernel<T, 64><<< dimGrid, dimBlock, smemSize >>>(d_odata, d_idata,index_o, size); break; case 32: reduce_min_kernel<T, 32><<< dimGrid, dimBlock, smemSize >>>(d_odata, d_idata,index_o, size); break; case 16: reduce_min_kernel<T, 16><<< dimGrid, dimBlock, smemSize >>>(d_odata, d_idata,index_o, size); break; case 8: reduce_min_kernel<T, 8><<< dimGrid, dimBlock, smemSize >>>(d_odata, d_idata,index_o, size); break; case 4: reduce_min_kernel<T, 4><<< dimGrid, dimBlock, smemSize >>>(d_odata, d_idata,index_o, size); break; case 2: reduce_min_kernel<T, 2><<< dimGrid, dimBlock, smemSize >>>(d_odata, d_idata,index_o, size); break; case 1: reduce_min_kernel<T, 1><<< dimGrid, dimBlock, smemSize >>>(d_odata, d_idata,index_o, size); break; default: exit(1); } T* h_odata = new T[blocks]; int * h_index=new int[blocks]; cudaMemcpy( h_index, index_o, blocks*sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy( h_odata, d_odata, blocks*sizeof(T), cudaMemcpyDeviceToHost); T result = h_odata[0]; int result_index=h_index[0]; for( int i = 1; i < blocks; i++ ) { min_with_index_host<T>(result,h_odata[i],result_index,h_index[i],result,result_index); } delete[] h_odata; final_index=result_index; return result; } template <class T> int IndexOfMin(T * didata, int N) { int final_index=0; bool odd=false; if(N%2!=0) { N=N-1; odd=true; } float value=reduce_min(didata,N,final_index); if(odd) { T * single=new T[1]; // ublasGetVector (int n, int elemSize, const void *x, // int incx, void *y, int incy) cublasGetVector(1,sizeof(T),&didata[N],1,single,1); if(single[0]<value) { final_index=N; } } return final_index; }
3a55bcc724664550a0c95dd3fc2683c5a0e54379.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // auto-generated by ops.py // __constant__ int xdim0_update_halo_kernel1_b2; int xdim0_update_halo_kernel1_b2_h = -1; __constant__ int ydim0_update_halo_kernel1_b2; int ydim0_update_halo_kernel1_b2_h = -1; __constant__ int xdim1_update_halo_kernel1_b2; int xdim1_update_halo_kernel1_b2_h = -1; __constant__ int ydim1_update_halo_kernel1_b2; int ydim1_update_halo_kernel1_b2_h = -1; __constant__ int xdim2_update_halo_kernel1_b2; int xdim2_update_halo_kernel1_b2_h = -1; __constant__ int ydim2_update_halo_kernel1_b2; int ydim2_update_halo_kernel1_b2_h = -1; __constant__ int xdim3_update_halo_kernel1_b2; int xdim3_update_halo_kernel1_b2_h = -1; __constant__ int ydim3_update_halo_kernel1_b2; int ydim3_update_halo_kernel1_b2_h = -1; __constant__ int xdim4_update_halo_kernel1_b2; int xdim4_update_halo_kernel1_b2_h = -1; __constant__ int ydim4_update_halo_kernel1_b2; int ydim4_update_halo_kernel1_b2_h = -1; __constant__ int xdim5_update_halo_kernel1_b2; int xdim5_update_halo_kernel1_b2_h = -1; __constant__ int ydim5_update_halo_kernel1_b2; int ydim5_update_halo_kernel1_b2_h = -1; __constant__ int xdim6_update_halo_kernel1_b2; int xdim6_update_halo_kernel1_b2_h = -1; __constant__ int ydim6_update_halo_kernel1_b2; int ydim6_update_halo_kernel1_b2_h = -1; #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #undef OPS_ACC3 #undef OPS_ACC4 #undef OPS_ACC5 #undef OPS_ACC6 #define OPS_ACC0(x, y, z) \ (x + xdim0_update_halo_kernel1_b2 * (y) + \ xdim0_update_halo_kernel1_b2 * ydim0_update_halo_kernel1_b2 * (z)) #define OPS_ACC1(x, y, z) \ (x + xdim1_update_halo_kernel1_b2 * (y) + \ xdim1_update_halo_kernel1_b2 * ydim1_update_halo_kernel1_b2 * (z)) #define OPS_ACC2(x, y, z) \ (x + xdim2_update_halo_kernel1_b2 * (y) + \ xdim2_update_halo_kernel1_b2 * ydim2_update_halo_kernel1_b2 * (z)) #define OPS_ACC3(x, y, z) \ (x + xdim3_update_halo_kernel1_b2 * (y) + \ xdim3_update_halo_kernel1_b2 * ydim3_update_halo_kernel1_b2 * (z)) #define OPS_ACC4(x, y, z) \ (x + xdim4_update_halo_kernel1_b2 * (y) + \ xdim4_update_halo_kernel1_b2 * ydim4_update_halo_kernel1_b2 * (z)) #define OPS_ACC5(x, y, z) \ (x + xdim5_update_halo_kernel1_b2 * (y) + \ xdim5_update_halo_kernel1_b2 * ydim5_update_halo_kernel1_b2 * (z)) #define OPS_ACC6(x, y, z) \ (x + xdim6_update_halo_kernel1_b2 * (y) + \ xdim6_update_halo_kernel1_b2 * ydim6_update_halo_kernel1_b2 * (z)) // user function __device__ inline void update_halo_kernel1_b2_gpu(double *density0, double *density1, double *energy0, double *energy1, double *pressure, double *viscosity, double *soundspeed, const int *fields) { if (fields[FIELD_DENSITY0] == 1) density0[OPS_ACC0(0, 0, 0)] = density0[OPS_ACC0(0, 3, 0)]; if (fields[FIELD_DENSITY1] == 1) density1[OPS_ACC1(0, 0, 0)] = density1[OPS_ACC1(0, 3, 0)]; if (fields[FIELD_ENERGY0] == 1) energy0[OPS_ACC2(0, 0, 0)] = energy0[OPS_ACC2(0, 3, 0)]; if (fields[FIELD_ENERGY1] == 1) energy1[OPS_ACC3(0, 0, 0)] = energy1[OPS_ACC3(0, 3, 0)]; if (fields[FIELD_PRESSURE] == 1) pressure[OPS_ACC4(0, 0, 0)] = pressure[OPS_ACC4(0, 3, 0)]; if (fields[FIELD_VISCOSITY] == 1) viscosity[OPS_ACC5(0, 0, 0)] = viscosity[OPS_ACC5(0, 3, 0)]; if (fields[FIELD_SOUNDSPEED] == 1) soundspeed[OPS_ACC6(0, 0, 0)] = soundspeed[OPS_ACC6(0, 3, 0)]; } #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #undef OPS_ACC3 #undef OPS_ACC4 #undef OPS_ACC5 #undef OPS_ACC6 __global__ void ops_update_halo_kernel1_b2(double *__restrict arg0, double *__restrict arg1, double *__restrict arg2, double *__restrict arg3, double *__restrict arg4, double *__restrict arg5, double *__restrict arg6, const int *__restrict arg7, int size0, int size1, int size2) { int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_update_halo_kernel1_b2 + idx_z * 1 * 1 * xdim0_update_halo_kernel1_b2 * ydim0_update_halo_kernel1_b2; arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_update_halo_kernel1_b2 + idx_z * 1 * 1 * xdim1_update_halo_kernel1_b2 * ydim1_update_halo_kernel1_b2; arg2 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim2_update_halo_kernel1_b2 + idx_z * 1 * 1 * xdim2_update_halo_kernel1_b2 * ydim2_update_halo_kernel1_b2; arg3 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim3_update_halo_kernel1_b2 + idx_z * 1 * 1 * xdim3_update_halo_kernel1_b2 * ydim3_update_halo_kernel1_b2; arg4 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim4_update_halo_kernel1_b2 + idx_z * 1 * 1 * xdim4_update_halo_kernel1_b2 * ydim4_update_halo_kernel1_b2; arg5 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim5_update_halo_kernel1_b2 + idx_z * 1 * 1 * xdim5_update_halo_kernel1_b2 * ydim5_update_halo_kernel1_b2; arg6 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim6_update_halo_kernel1_b2 + idx_z * 1 * 1 * xdim6_update_halo_kernel1_b2 * ydim6_update_halo_kernel1_b2; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { update_halo_kernel1_b2_gpu(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7); } } // host stub function #ifndef OPS_LAZY void ops_par_loop_update_halo_kernel1_b2(char const *name, ops_block block, int dim, int *range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4, ops_arg arg5, ops_arg arg6, ops_arg arg7) { #else void ops_par_loop_update_halo_kernel1_b2_execute(ops_kernel_descriptor *desc) { int dim = desc->dim; int *range = desc->range; ops_arg arg0 = desc->args[0]; ops_arg arg1 = desc->args[1]; ops_arg arg2 = desc->args[2]; ops_arg arg3 = desc->args[3]; ops_arg arg4 = desc->args[4]; ops_arg arg5 = desc->args[5]; ops_arg arg6 = desc->args[6]; ops_arg arg7 = desc->args[7]; #endif // Timing double t1, t2, c1, c2; ops_arg args[8] = {arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7}; #if CHECKPOINTING && !OPS_LAZY if (!ops_checkpointing_before(args, 8, range, 12)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(12, "update_halo_kernel1_b2"); OPS_kernels[12].count++; ops_timers_core(&c1, &t1); } // compute locally allocated range for the sub-block int start[3]; int end[3]; #if OPS_MPI && !OPS_LAZY sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for (int n = 0; n < 3; n++) { start[n] = sb->decomp_disp[n]; end[n] = sb->decomp_disp[n] + sb->decomp_size[n]; if (start[n] >= range[2 * n]) { start[n] = 0; } else { start[n] = range[2 * n] - start[n]; } if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0) start[n] = range[2 * n]; if (end[n] >= range[2 * n + 1]) { end[n] = range[2 * n + 1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n] == MPI_PROC_NULL && (range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n])) end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]); } #else for (int n = 0; n < 3; n++) { start[n] = range[2 * n]; end[n] = range[2 * n + 1]; } #endif int x_size = MAX(0, end[0] - start[0]); int y_size = MAX(0, end[1] - start[1]); int z_size = MAX(0, end[2] - start[2]); int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; int xdim2 = args[2].dat->size[0]; int ydim2 = args[2].dat->size[1]; int xdim3 = args[3].dat->size[0]; int ydim3 = args[3].dat->size[1]; int xdim4 = args[4].dat->size[0]; int ydim4 = args[4].dat->size[1]; int xdim5 = args[5].dat->size[0]; int ydim5 = args[5].dat->size[1]; int xdim6 = args[6].dat->size[0]; int ydim6 = args[6].dat->size[1]; if (xdim0 != xdim0_update_halo_kernel1_b2_h || ydim0 != ydim0_update_halo_kernel1_b2_h || xdim1 != xdim1_update_halo_kernel1_b2_h || ydim1 != ydim1_update_halo_kernel1_b2_h || xdim2 != xdim2_update_halo_kernel1_b2_h || ydim2 != ydim2_update_halo_kernel1_b2_h || xdim3 != xdim3_update_halo_kernel1_b2_h || ydim3 != ydim3_update_halo_kernel1_b2_h || xdim4 != xdim4_update_halo_kernel1_b2_h || ydim4 != ydim4_update_halo_kernel1_b2_h || xdim5 != xdim5_update_halo_kernel1_b2_h || ydim5 != ydim5_update_halo_kernel1_b2_h || xdim6 != xdim6_update_halo_kernel1_b2_h || ydim6 != ydim6_update_halo_kernel1_b2_h) { hipMemcpyToSymbol(xdim0_update_halo_kernel1_b2, &xdim0, sizeof(int)); xdim0_update_halo_kernel1_b2_h = xdim0; hipMemcpyToSymbol(ydim0_update_halo_kernel1_b2, &ydim0, sizeof(int)); ydim0_update_halo_kernel1_b2_h = ydim0; hipMemcpyToSymbol(xdim1_update_halo_kernel1_b2, &xdim1, sizeof(int)); xdim1_update_halo_kernel1_b2_h = xdim1; hipMemcpyToSymbol(ydim1_update_halo_kernel1_b2, &ydim1, sizeof(int)); ydim1_update_halo_kernel1_b2_h = ydim1; hipMemcpyToSymbol(xdim2_update_halo_kernel1_b2, &xdim2, sizeof(int)); xdim2_update_halo_kernel1_b2_h = xdim2; hipMemcpyToSymbol(ydim2_update_halo_kernel1_b2, &ydim2, sizeof(int)); ydim2_update_halo_kernel1_b2_h = ydim2; hipMemcpyToSymbol(xdim3_update_halo_kernel1_b2, &xdim3, sizeof(int)); xdim3_update_halo_kernel1_b2_h = xdim3; hipMemcpyToSymbol(ydim3_update_halo_kernel1_b2, &ydim3, sizeof(int)); ydim3_update_halo_kernel1_b2_h = ydim3; hipMemcpyToSymbol(xdim4_update_halo_kernel1_b2, &xdim4, sizeof(int)); xdim4_update_halo_kernel1_b2_h = xdim4; hipMemcpyToSymbol(ydim4_update_halo_kernel1_b2, &ydim4, sizeof(int)); ydim4_update_halo_kernel1_b2_h = ydim4; hipMemcpyToSymbol(xdim5_update_halo_kernel1_b2, &xdim5, sizeof(int)); xdim5_update_halo_kernel1_b2_h = xdim5; hipMemcpyToSymbol(ydim5_update_halo_kernel1_b2, &ydim5, sizeof(int)); ydim5_update_halo_kernel1_b2_h = ydim5; hipMemcpyToSymbol(xdim6_update_halo_kernel1_b2, &xdim6, sizeof(int)); xdim6_update_halo_kernel1_b2_h = xdim6; hipMemcpyToSymbol(ydim6_update_halo_kernel1_b2, &ydim6, sizeof(int)); ydim6_update_halo_kernel1_b2_h = ydim6; } int *arg7h = (int *)arg7.data; dim3 grid((x_size - 1) / OPS_block_size_x + 1, (y_size - 1) / OPS_block_size_y + 1, z_size); dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1); int consts_bytes = 0; consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int)); reallocConstArrays(consts_bytes); consts_bytes = 0; arg7.data = OPS_consts_h + consts_bytes; arg7.data_d = OPS_consts_d + consts_bytes; for (int d = 0; d < NUM_FIELDS; d++) ((int *)arg7.data)[d] = arg7h[d]; consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int)); mvConstArraysToDevice(consts_bytes); int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size); int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size); int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size); int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size); int dat4 = (OPS_soa ? args[4].dat->type_size : args[4].dat->elem_size); int dat5 = (OPS_soa ? args[5].dat->type_size : args[5].dat->elem_size); int dat6 = (OPS_soa ? args[6].dat->type_size : args[6].dat->elem_size); char *p_a[8]; // set up initial pointers int base0 = args[0].dat->base_offset + dat0 * 1 * (start[0] * args[0].stencil->stride[0]); base0 = base0 + dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]); base0 = base0 + dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2]); p_a[0] = (char *)args[0].data_d + base0; int base1 = args[1].dat->base_offset + dat1 * 1 * (start[0] * args[1].stencil->stride[0]); base1 = base1 + dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]); base1 = base1 + dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2]); p_a[1] = (char *)args[1].data_d + base1; int base2 = args[2].dat->base_offset + dat2 * 1 * (start[0] * args[2].stencil->stride[0]); base2 = base2 + dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1]); base2 = base2 + dat2 * args[2].dat->size[0] * args[2].dat->size[1] * (start[2] * args[2].stencil->stride[2]); p_a[2] = (char *)args[2].data_d + base2; int base3 = args[3].dat->base_offset + dat3 * 1 * (start[0] * args[3].stencil->stride[0]); base3 = base3 + dat3 * args[3].dat->size[0] * (start[1] * args[3].stencil->stride[1]); base3 = base3 + dat3 * args[3].dat->size[0] * args[3].dat->size[1] * (start[2] * args[3].stencil->stride[2]); p_a[3] = (char *)args[3].data_d + base3; int base4 = args[4].dat->base_offset + dat4 * 1 * (start[0] * args[4].stencil->stride[0]); base4 = base4 + dat4 * args[4].dat->size[0] * (start[1] * args[4].stencil->stride[1]); base4 = base4 + dat4 * args[4].dat->size[0] * args[4].dat->size[1] * (start[2] * args[4].stencil->stride[2]); p_a[4] = (char *)args[4].data_d + base4; int base5 = args[5].dat->base_offset + dat5 * 1 * (start[0] * args[5].stencil->stride[0]); base5 = base5 + dat5 * args[5].dat->size[0] * (start[1] * args[5].stencil->stride[1]); base5 = base5 + dat5 * args[5].dat->size[0] * args[5].dat->size[1] * (start[2] * args[5].stencil->stride[2]); p_a[5] = (char *)args[5].data_d + base5; int base6 = args[6].dat->base_offset + dat6 * 1 * (start[0] * args[6].stencil->stride[0]); base6 = base6 + dat6 * args[6].dat->size[0] * (start[1] * args[6].stencil->stride[1]); base6 = base6 + dat6 * args[6].dat->size[0] * args[6].dat->size[1] * (start[2] * args[6].stencil->stride[2]); p_a[6] = (char *)args[6].data_d + base6; #ifndef OPS_LAZY ops_H_D_exchanges_device(args, 8); ops_halo_exchanges(args, 8, range); #endif if (OPS_diags > 1) { ops_timers_core(&c2, &t2); OPS_kernels[12].mpi_time += t2 - t1; } // call kernel wrapper function, passing in pointers to data hipLaunchKernelGGL(( ops_update_halo_kernel1_b2), dim3(grid), dim3(tblock), 0, 0, (double *)p_a[0], (double *)p_a[1], (double *)p_a[2], (double *)p_a[3], (double *)p_a[4], (double *)p_a[5], (double *)p_a[6], (int *)arg7.data_d, x_size, y_size, z_size); cutilSafeCall(hipGetLastError()); if (OPS_diags > 1) { cutilSafeCall(hipDeviceSynchronize()); ops_timers_core(&c1, &t1); OPS_kernels[12].time += t1 - t2; } #ifndef OPS_LAZY ops_set_dirtybit_device(args, 8); ops_set_halo_dirtybit3(&args[0], range); ops_set_halo_dirtybit3(&args[1], range); ops_set_halo_dirtybit3(&args[2], range); ops_set_halo_dirtybit3(&args[3], range); ops_set_halo_dirtybit3(&args[4], range); ops_set_halo_dirtybit3(&args[5], range); ops_set_halo_dirtybit3(&args[6], range); #endif if (OPS_diags > 1) { // Update kernel record ops_timers_core(&c2, &t2); OPS_kernels[12].mpi_time += t2 - t1; OPS_kernels[12].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[12].transfer += ops_compute_transfer(dim, start, end, &arg1); OPS_kernels[12].transfer += ops_compute_transfer(dim, start, end, &arg2); OPS_kernels[12].transfer += ops_compute_transfer(dim, start, end, &arg3); OPS_kernels[12].transfer += ops_compute_transfer(dim, start, end, &arg4); OPS_kernels[12].transfer += ops_compute_transfer(dim, start, end, &arg5); OPS_kernels[12].transfer += ops_compute_transfer(dim, start, end, &arg6); } } #ifdef OPS_LAZY void ops_par_loop_update_halo_kernel1_b2(char const *name, ops_block block, int dim, int *range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4, ops_arg arg5, ops_arg arg6, ops_arg arg7) { ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor)); desc->name = name; desc->block = block; desc->dim = dim; desc->device = 1; desc->index = 12; desc->hash = 5381; desc->hash = ((desc->hash << 5) + desc->hash) + 12; for (int i = 0; i < 6; i++) { desc->range[i] = range[i]; desc->orig_range[i] = range[i]; desc->hash = ((desc->hash << 5) + desc->hash) + range[i]; } desc->nargs = 8; desc->args = (ops_arg *)malloc(8 * sizeof(ops_arg)); desc->args[0] = arg0; desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index; desc->args[1] = arg1; desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index; desc->args[2] = arg2; desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index; desc->args[3] = arg3; desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index; desc->args[4] = arg4; desc->hash = ((desc->hash << 5) + desc->hash) + arg4.dat->index; desc->args[5] = arg5; desc->hash = ((desc->hash << 5) + desc->hash) + arg5.dat->index; desc->args[6] = arg6; desc->hash = ((desc->hash << 5) + desc->hash) + arg6.dat->index; desc->args[7] = arg7; char *tmp = (char *)malloc(NUM_FIELDS * sizeof(int)); memcpy(tmp, arg7.data, NUM_FIELDS * sizeof(int)); desc->args[7].data = tmp; desc->function = ops_par_loop_update_halo_kernel1_b2_execute; if (OPS_diags > 1) { ops_timing_realloc(12, "update_halo_kernel1_b2"); } ops_enqueue_kernel(desc); } #endif
3a55bcc724664550a0c95dd3fc2683c5a0e54379.cu
// // auto-generated by ops.py // __constant__ int xdim0_update_halo_kernel1_b2; int xdim0_update_halo_kernel1_b2_h = -1; __constant__ int ydim0_update_halo_kernel1_b2; int ydim0_update_halo_kernel1_b2_h = -1; __constant__ int xdim1_update_halo_kernel1_b2; int xdim1_update_halo_kernel1_b2_h = -1; __constant__ int ydim1_update_halo_kernel1_b2; int ydim1_update_halo_kernel1_b2_h = -1; __constant__ int xdim2_update_halo_kernel1_b2; int xdim2_update_halo_kernel1_b2_h = -1; __constant__ int ydim2_update_halo_kernel1_b2; int ydim2_update_halo_kernel1_b2_h = -1; __constant__ int xdim3_update_halo_kernel1_b2; int xdim3_update_halo_kernel1_b2_h = -1; __constant__ int ydim3_update_halo_kernel1_b2; int ydim3_update_halo_kernel1_b2_h = -1; __constant__ int xdim4_update_halo_kernel1_b2; int xdim4_update_halo_kernel1_b2_h = -1; __constant__ int ydim4_update_halo_kernel1_b2; int ydim4_update_halo_kernel1_b2_h = -1; __constant__ int xdim5_update_halo_kernel1_b2; int xdim5_update_halo_kernel1_b2_h = -1; __constant__ int ydim5_update_halo_kernel1_b2; int ydim5_update_halo_kernel1_b2_h = -1; __constant__ int xdim6_update_halo_kernel1_b2; int xdim6_update_halo_kernel1_b2_h = -1; __constant__ int ydim6_update_halo_kernel1_b2; int ydim6_update_halo_kernel1_b2_h = -1; #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #undef OPS_ACC3 #undef OPS_ACC4 #undef OPS_ACC5 #undef OPS_ACC6 #define OPS_ACC0(x, y, z) \ (x + xdim0_update_halo_kernel1_b2 * (y) + \ xdim0_update_halo_kernel1_b2 * ydim0_update_halo_kernel1_b2 * (z)) #define OPS_ACC1(x, y, z) \ (x + xdim1_update_halo_kernel1_b2 * (y) + \ xdim1_update_halo_kernel1_b2 * ydim1_update_halo_kernel1_b2 * (z)) #define OPS_ACC2(x, y, z) \ (x + xdim2_update_halo_kernel1_b2 * (y) + \ xdim2_update_halo_kernel1_b2 * ydim2_update_halo_kernel1_b2 * (z)) #define OPS_ACC3(x, y, z) \ (x + xdim3_update_halo_kernel1_b2 * (y) + \ xdim3_update_halo_kernel1_b2 * ydim3_update_halo_kernel1_b2 * (z)) #define OPS_ACC4(x, y, z) \ (x + xdim4_update_halo_kernel1_b2 * (y) + \ xdim4_update_halo_kernel1_b2 * ydim4_update_halo_kernel1_b2 * (z)) #define OPS_ACC5(x, y, z) \ (x + xdim5_update_halo_kernel1_b2 * (y) + \ xdim5_update_halo_kernel1_b2 * ydim5_update_halo_kernel1_b2 * (z)) #define OPS_ACC6(x, y, z) \ (x + xdim6_update_halo_kernel1_b2 * (y) + \ xdim6_update_halo_kernel1_b2 * ydim6_update_halo_kernel1_b2 * (z)) // user function __device__ inline void update_halo_kernel1_b2_gpu(double *density0, double *density1, double *energy0, double *energy1, double *pressure, double *viscosity, double *soundspeed, const int *fields) { if (fields[FIELD_DENSITY0] == 1) density0[OPS_ACC0(0, 0, 0)] = density0[OPS_ACC0(0, 3, 0)]; if (fields[FIELD_DENSITY1] == 1) density1[OPS_ACC1(0, 0, 0)] = density1[OPS_ACC1(0, 3, 0)]; if (fields[FIELD_ENERGY0] == 1) energy0[OPS_ACC2(0, 0, 0)] = energy0[OPS_ACC2(0, 3, 0)]; if (fields[FIELD_ENERGY1] == 1) energy1[OPS_ACC3(0, 0, 0)] = energy1[OPS_ACC3(0, 3, 0)]; if (fields[FIELD_PRESSURE] == 1) pressure[OPS_ACC4(0, 0, 0)] = pressure[OPS_ACC4(0, 3, 0)]; if (fields[FIELD_VISCOSITY] == 1) viscosity[OPS_ACC5(0, 0, 0)] = viscosity[OPS_ACC5(0, 3, 0)]; if (fields[FIELD_SOUNDSPEED] == 1) soundspeed[OPS_ACC6(0, 0, 0)] = soundspeed[OPS_ACC6(0, 3, 0)]; } #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #undef OPS_ACC3 #undef OPS_ACC4 #undef OPS_ACC5 #undef OPS_ACC6 __global__ void ops_update_halo_kernel1_b2(double *__restrict arg0, double *__restrict arg1, double *__restrict arg2, double *__restrict arg3, double *__restrict arg4, double *__restrict arg5, double *__restrict arg6, const int *__restrict arg7, int size0, int size1, int size2) { int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_update_halo_kernel1_b2 + idx_z * 1 * 1 * xdim0_update_halo_kernel1_b2 * ydim0_update_halo_kernel1_b2; arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_update_halo_kernel1_b2 + idx_z * 1 * 1 * xdim1_update_halo_kernel1_b2 * ydim1_update_halo_kernel1_b2; arg2 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim2_update_halo_kernel1_b2 + idx_z * 1 * 1 * xdim2_update_halo_kernel1_b2 * ydim2_update_halo_kernel1_b2; arg3 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim3_update_halo_kernel1_b2 + idx_z * 1 * 1 * xdim3_update_halo_kernel1_b2 * ydim3_update_halo_kernel1_b2; arg4 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim4_update_halo_kernel1_b2 + idx_z * 1 * 1 * xdim4_update_halo_kernel1_b2 * ydim4_update_halo_kernel1_b2; arg5 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim5_update_halo_kernel1_b2 + idx_z * 1 * 1 * xdim5_update_halo_kernel1_b2 * ydim5_update_halo_kernel1_b2; arg6 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim6_update_halo_kernel1_b2 + idx_z * 1 * 1 * xdim6_update_halo_kernel1_b2 * ydim6_update_halo_kernel1_b2; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { update_halo_kernel1_b2_gpu(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7); } } // host stub function #ifndef OPS_LAZY void ops_par_loop_update_halo_kernel1_b2(char const *name, ops_block block, int dim, int *range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4, ops_arg arg5, ops_arg arg6, ops_arg arg7) { #else void ops_par_loop_update_halo_kernel1_b2_execute(ops_kernel_descriptor *desc) { int dim = desc->dim; int *range = desc->range; ops_arg arg0 = desc->args[0]; ops_arg arg1 = desc->args[1]; ops_arg arg2 = desc->args[2]; ops_arg arg3 = desc->args[3]; ops_arg arg4 = desc->args[4]; ops_arg arg5 = desc->args[5]; ops_arg arg6 = desc->args[6]; ops_arg arg7 = desc->args[7]; #endif // Timing double t1, t2, c1, c2; ops_arg args[8] = {arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7}; #if CHECKPOINTING && !OPS_LAZY if (!ops_checkpointing_before(args, 8, range, 12)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(12, "update_halo_kernel1_b2"); OPS_kernels[12].count++; ops_timers_core(&c1, &t1); } // compute locally allocated range for the sub-block int start[3]; int end[3]; #if OPS_MPI && !OPS_LAZY sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for (int n = 0; n < 3; n++) { start[n] = sb->decomp_disp[n]; end[n] = sb->decomp_disp[n] + sb->decomp_size[n]; if (start[n] >= range[2 * n]) { start[n] = 0; } else { start[n] = range[2 * n] - start[n]; } if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0) start[n] = range[2 * n]; if (end[n] >= range[2 * n + 1]) { end[n] = range[2 * n + 1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n] == MPI_PROC_NULL && (range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n])) end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]); } #else for (int n = 0; n < 3; n++) { start[n] = range[2 * n]; end[n] = range[2 * n + 1]; } #endif int x_size = MAX(0, end[0] - start[0]); int y_size = MAX(0, end[1] - start[1]); int z_size = MAX(0, end[2] - start[2]); int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; int xdim2 = args[2].dat->size[0]; int ydim2 = args[2].dat->size[1]; int xdim3 = args[3].dat->size[0]; int ydim3 = args[3].dat->size[1]; int xdim4 = args[4].dat->size[0]; int ydim4 = args[4].dat->size[1]; int xdim5 = args[5].dat->size[0]; int ydim5 = args[5].dat->size[1]; int xdim6 = args[6].dat->size[0]; int ydim6 = args[6].dat->size[1]; if (xdim0 != xdim0_update_halo_kernel1_b2_h || ydim0 != ydim0_update_halo_kernel1_b2_h || xdim1 != xdim1_update_halo_kernel1_b2_h || ydim1 != ydim1_update_halo_kernel1_b2_h || xdim2 != xdim2_update_halo_kernel1_b2_h || ydim2 != ydim2_update_halo_kernel1_b2_h || xdim3 != xdim3_update_halo_kernel1_b2_h || ydim3 != ydim3_update_halo_kernel1_b2_h || xdim4 != xdim4_update_halo_kernel1_b2_h || ydim4 != ydim4_update_halo_kernel1_b2_h || xdim5 != xdim5_update_halo_kernel1_b2_h || ydim5 != ydim5_update_halo_kernel1_b2_h || xdim6 != xdim6_update_halo_kernel1_b2_h || ydim6 != ydim6_update_halo_kernel1_b2_h) { cudaMemcpyToSymbol(xdim0_update_halo_kernel1_b2, &xdim0, sizeof(int)); xdim0_update_halo_kernel1_b2_h = xdim0; cudaMemcpyToSymbol(ydim0_update_halo_kernel1_b2, &ydim0, sizeof(int)); ydim0_update_halo_kernel1_b2_h = ydim0; cudaMemcpyToSymbol(xdim1_update_halo_kernel1_b2, &xdim1, sizeof(int)); xdim1_update_halo_kernel1_b2_h = xdim1; cudaMemcpyToSymbol(ydim1_update_halo_kernel1_b2, &ydim1, sizeof(int)); ydim1_update_halo_kernel1_b2_h = ydim1; cudaMemcpyToSymbol(xdim2_update_halo_kernel1_b2, &xdim2, sizeof(int)); xdim2_update_halo_kernel1_b2_h = xdim2; cudaMemcpyToSymbol(ydim2_update_halo_kernel1_b2, &ydim2, sizeof(int)); ydim2_update_halo_kernel1_b2_h = ydim2; cudaMemcpyToSymbol(xdim3_update_halo_kernel1_b2, &xdim3, sizeof(int)); xdim3_update_halo_kernel1_b2_h = xdim3; cudaMemcpyToSymbol(ydim3_update_halo_kernel1_b2, &ydim3, sizeof(int)); ydim3_update_halo_kernel1_b2_h = ydim3; cudaMemcpyToSymbol(xdim4_update_halo_kernel1_b2, &xdim4, sizeof(int)); xdim4_update_halo_kernel1_b2_h = xdim4; cudaMemcpyToSymbol(ydim4_update_halo_kernel1_b2, &ydim4, sizeof(int)); ydim4_update_halo_kernel1_b2_h = ydim4; cudaMemcpyToSymbol(xdim5_update_halo_kernel1_b2, &xdim5, sizeof(int)); xdim5_update_halo_kernel1_b2_h = xdim5; cudaMemcpyToSymbol(ydim5_update_halo_kernel1_b2, &ydim5, sizeof(int)); ydim5_update_halo_kernel1_b2_h = ydim5; cudaMemcpyToSymbol(xdim6_update_halo_kernel1_b2, &xdim6, sizeof(int)); xdim6_update_halo_kernel1_b2_h = xdim6; cudaMemcpyToSymbol(ydim6_update_halo_kernel1_b2, &ydim6, sizeof(int)); ydim6_update_halo_kernel1_b2_h = ydim6; } int *arg7h = (int *)arg7.data; dim3 grid((x_size - 1) / OPS_block_size_x + 1, (y_size - 1) / OPS_block_size_y + 1, z_size); dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1); int consts_bytes = 0; consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int)); reallocConstArrays(consts_bytes); consts_bytes = 0; arg7.data = OPS_consts_h + consts_bytes; arg7.data_d = OPS_consts_d + consts_bytes; for (int d = 0; d < NUM_FIELDS; d++) ((int *)arg7.data)[d] = arg7h[d]; consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int)); mvConstArraysToDevice(consts_bytes); int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size); int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size); int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size); int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size); int dat4 = (OPS_soa ? args[4].dat->type_size : args[4].dat->elem_size); int dat5 = (OPS_soa ? args[5].dat->type_size : args[5].dat->elem_size); int dat6 = (OPS_soa ? args[6].dat->type_size : args[6].dat->elem_size); char *p_a[8]; // set up initial pointers int base0 = args[0].dat->base_offset + dat0 * 1 * (start[0] * args[0].stencil->stride[0]); base0 = base0 + dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]); base0 = base0 + dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2]); p_a[0] = (char *)args[0].data_d + base0; int base1 = args[1].dat->base_offset + dat1 * 1 * (start[0] * args[1].stencil->stride[0]); base1 = base1 + dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]); base1 = base1 + dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2]); p_a[1] = (char *)args[1].data_d + base1; int base2 = args[2].dat->base_offset + dat2 * 1 * (start[0] * args[2].stencil->stride[0]); base2 = base2 + dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1]); base2 = base2 + dat2 * args[2].dat->size[0] * args[2].dat->size[1] * (start[2] * args[2].stencil->stride[2]); p_a[2] = (char *)args[2].data_d + base2; int base3 = args[3].dat->base_offset + dat3 * 1 * (start[0] * args[3].stencil->stride[0]); base3 = base3 + dat3 * args[3].dat->size[0] * (start[1] * args[3].stencil->stride[1]); base3 = base3 + dat3 * args[3].dat->size[0] * args[3].dat->size[1] * (start[2] * args[3].stencil->stride[2]); p_a[3] = (char *)args[3].data_d + base3; int base4 = args[4].dat->base_offset + dat4 * 1 * (start[0] * args[4].stencil->stride[0]); base4 = base4 + dat4 * args[4].dat->size[0] * (start[1] * args[4].stencil->stride[1]); base4 = base4 + dat4 * args[4].dat->size[0] * args[4].dat->size[1] * (start[2] * args[4].stencil->stride[2]); p_a[4] = (char *)args[4].data_d + base4; int base5 = args[5].dat->base_offset + dat5 * 1 * (start[0] * args[5].stencil->stride[0]); base5 = base5 + dat5 * args[5].dat->size[0] * (start[1] * args[5].stencil->stride[1]); base5 = base5 + dat5 * args[5].dat->size[0] * args[5].dat->size[1] * (start[2] * args[5].stencil->stride[2]); p_a[5] = (char *)args[5].data_d + base5; int base6 = args[6].dat->base_offset + dat6 * 1 * (start[0] * args[6].stencil->stride[0]); base6 = base6 + dat6 * args[6].dat->size[0] * (start[1] * args[6].stencil->stride[1]); base6 = base6 + dat6 * args[6].dat->size[0] * args[6].dat->size[1] * (start[2] * args[6].stencil->stride[2]); p_a[6] = (char *)args[6].data_d + base6; #ifndef OPS_LAZY ops_H_D_exchanges_device(args, 8); ops_halo_exchanges(args, 8, range); #endif if (OPS_diags > 1) { ops_timers_core(&c2, &t2); OPS_kernels[12].mpi_time += t2 - t1; } // call kernel wrapper function, passing in pointers to data ops_update_halo_kernel1_b2<<<grid, tblock>>>( (double *)p_a[0], (double *)p_a[1], (double *)p_a[2], (double *)p_a[3], (double *)p_a[4], (double *)p_a[5], (double *)p_a[6], (int *)arg7.data_d, x_size, y_size, z_size); cutilSafeCall(cudaGetLastError()); if (OPS_diags > 1) { cutilSafeCall(cudaDeviceSynchronize()); ops_timers_core(&c1, &t1); OPS_kernels[12].time += t1 - t2; } #ifndef OPS_LAZY ops_set_dirtybit_device(args, 8); ops_set_halo_dirtybit3(&args[0], range); ops_set_halo_dirtybit3(&args[1], range); ops_set_halo_dirtybit3(&args[2], range); ops_set_halo_dirtybit3(&args[3], range); ops_set_halo_dirtybit3(&args[4], range); ops_set_halo_dirtybit3(&args[5], range); ops_set_halo_dirtybit3(&args[6], range); #endif if (OPS_diags > 1) { // Update kernel record ops_timers_core(&c2, &t2); OPS_kernels[12].mpi_time += t2 - t1; OPS_kernels[12].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[12].transfer += ops_compute_transfer(dim, start, end, &arg1); OPS_kernels[12].transfer += ops_compute_transfer(dim, start, end, &arg2); OPS_kernels[12].transfer += ops_compute_transfer(dim, start, end, &arg3); OPS_kernels[12].transfer += ops_compute_transfer(dim, start, end, &arg4); OPS_kernels[12].transfer += ops_compute_transfer(dim, start, end, &arg5); OPS_kernels[12].transfer += ops_compute_transfer(dim, start, end, &arg6); } } #ifdef OPS_LAZY void ops_par_loop_update_halo_kernel1_b2(char const *name, ops_block block, int dim, int *range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4, ops_arg arg5, ops_arg arg6, ops_arg arg7) { ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor)); desc->name = name; desc->block = block; desc->dim = dim; desc->device = 1; desc->index = 12; desc->hash = 5381; desc->hash = ((desc->hash << 5) + desc->hash) + 12; for (int i = 0; i < 6; i++) { desc->range[i] = range[i]; desc->orig_range[i] = range[i]; desc->hash = ((desc->hash << 5) + desc->hash) + range[i]; } desc->nargs = 8; desc->args = (ops_arg *)malloc(8 * sizeof(ops_arg)); desc->args[0] = arg0; desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index; desc->args[1] = arg1; desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index; desc->args[2] = arg2; desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index; desc->args[3] = arg3; desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index; desc->args[4] = arg4; desc->hash = ((desc->hash << 5) + desc->hash) + arg4.dat->index; desc->args[5] = arg5; desc->hash = ((desc->hash << 5) + desc->hash) + arg5.dat->index; desc->args[6] = arg6; desc->hash = ((desc->hash << 5) + desc->hash) + arg6.dat->index; desc->args[7] = arg7; char *tmp = (char *)malloc(NUM_FIELDS * sizeof(int)); memcpy(tmp, arg7.data, NUM_FIELDS * sizeof(int)); desc->args[7].data = tmp; desc->function = ops_par_loop_update_halo_kernel1_b2_execute; if (OPS_diags > 1) { ops_timing_realloc(12, "update_halo_kernel1_b2"); } ops_enqueue_kernel(desc); } #endif
3f4fde0b91510f18df21c6011444b62efdac66a2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "bitmap_approx.cuh" #include <emmintrin.h> #define THREADS 32 //THREADS MUST BE SET TO 32 __constant__ BSpline2d dSplines; BSpline2d generateTestBSplineIntegrals(int pixels, int elements) { int const pxPerElem = pixels / elements; int const pxPerSpline = 3 * pxPerElem; double* spline = new double[pxPerSpline]; for (int i = 0; i < 3 * pxPerElem; i++) { spline[i] = 1.0L / pxPerElem; } float* spline2D = new float[pxPerSpline * pxPerSpline]; float sum = 0; double sump[3][3] = {0}; for (int i = 0; i < pxPerSpline; i++) { for (int j = 0; j < pxPerSpline; j++) { spline2D[i * pxPerSpline + j] = spline[i] * spline[j]; sum += spline2D[i * pxPerSpline + j]; sump[i / pxPerElem][j / pxPerElem] += spline2D[i * pxPerSpline + j]; } } BSpline2d bs; bs.spline = spline2D; bs.size = pxPerSpline; bs.sum = sum; memcpy(&bs.sump, &sump, sizeof(sump)); return bs; } double inline spline1(double x) { return 0.5 * x * x; } double inline spline2(double x) { return (-2 * (x + 1) * (x + 1) + 6 * (x + 1) - 3) * 0.5; } double inline spline3(double x) { return 0.5 * (1 - x) * (1 - x); } double splinePart(double *a,double *b,int size,int globsize,int globi,int globj,float * out) { double sum = 0; int globjbackup = globj; for (int i = 0; i < size; i++,globi++) { globj = globjbackup; for (int j = 0; j < size; j++,globj++) { out[globi*globsize + globj] = a[i] * b[j] / size/size; sum += out[globi*globsize + globj]; } } return sum; } BSpline2d generate2DSplineIntegrals(int pixels, int elements) { int const pxPerElem = pixels / elements; int const pxPerSpline = 3 * pxPerElem; double const pxSpan = 1.0 / pxPerElem; double* s1 = new double[pxPerElem]; double* s2 = new double[pxPerElem]; double* s3 = new double[pxPerElem]; double x = pxSpan / 2; for (int i = 0; i < pxPerElem; i++ , x = x + pxSpan) { s1[i] = spline1(x); s2[i] = spline2(x); s3[i] = spline3(x); } float* spline2D = new float[pxPerSpline * pxPerSpline]; double sum = 0; double sump[3][3] = { 0 }; sump[0][0] = splinePart(s1, s1, pxPerElem, pxPerSpline, 0, 0, spline2D); sump[0][1] = splinePart(s1, s2, pxPerElem, pxPerSpline, 0, pxPerElem, spline2D); sump[0][2] = splinePart(s1, s3, pxPerElem, pxPerSpline, 0, 2*pxPerElem, spline2D); sump[1][0] = splinePart(s2, s1, pxPerElem, pxPerSpline, pxPerElem, 0, spline2D); sump[1][1] = splinePart(s2, s2, pxPerElem, pxPerSpline, pxPerElem, pxPerElem, spline2D); sump[1][2] = splinePart(s2, s3, pxPerElem, pxPerSpline, pxPerElem, 2*pxPerElem, spline2D); sump[2][0] = splinePart(s3, s1, pxPerElem, pxPerSpline, 2*pxPerElem, 0, spline2D); sump[2][1] = splinePart(s3, s2, pxPerElem, pxPerSpline, 2*pxPerElem, pxPerElem, spline2D); sump[2][2] = splinePart(s3, s3, pxPerElem, pxPerSpline, 2*pxPerElem, 2*pxPerElem, spline2D); double sump2[3][3] = { { 1.0L / 20,13.0L / 120,1.0L / 120 }, { 13.0L / 120,45.0L / 100,13.0L / 120 }, { 1.0L / 120,13.0L / 120,1.0L / 20 } }; BSpline2d bs; bs.spline = spline2D; bs.size = pxPerSpline; bs.sum = sum; memcpy(&bs.sump, &sump2, sizeof(sump)); delete[] s1; delete[] s2; delete[] s3; return bs; } ///(ceil(pixPerElem/threadsPerBlock)+1)+2 __global__ void computeRightSide(float* dSplineArray, int toWriteSize, float* bitmap, float* dRightSide, int pixPerElem, int bitmapSize, int memoryBlocks, int memBlockSize, int elements, int elements2, int idleThreads) { __shared__ float toSum[THREADS]; extern __shared__ float toWrite[]; unsigned blockStart = blockIdx.x * blockDim.x; int idx = blockStart + threadIdx.x; int threadsfloat; if (idx >= bitmapSize * bitmapSize) { return; } if (idx >= (gridDim.x * blockDim.x - THREADS)) { threadsfloat = THREADS - idleThreads; } else { threadsfloat = THREADS; } int row = idx / bitmapSize; int col = idx % bitmapSize; int pxIdx = idx; float pixel = bitmap[pxIdx]; //compute summing indices int elemStart = threadIdx.x - idx % pixPerElem; int nextStart = elemStart + pixPerElem; elemStart -= (elemStart < 0) * elemStart; // elemStart < 0 ? elemStart : 0 nextStart -= (nextStart > threadsfloat) * (nextStart - threadsfloat);// nextStart > THREADS ? THREADS : nextStart, moze >= ? int half = nextStart - elemStart; int threads = half / 2; half = (half + 1) / 2; //in case of odd float of elements - omit middle element, leave it for next iteration for (int splineRowDispl = pixPerElem * 2; splineRowDispl >= 0; splineRowDispl -= pixPerElem) //2ppe,1ppe,0ppe displacement { if (threadIdx.x < toWriteSize) toWrite[threadIdx.x] = 0; for (int splineColDispl = pixPerElem * 2; splineColDispl >= 0; splineColDispl -= pixPerElem)//2ppe + col { int splineCol = (col + splineColDispl) % dSplines.size; int splineIdx = (row % pixPerElem + splineRowDispl) * dSplines.size + splineCol; //spline row + spline col toSum[threadIdx.x] = pixel*dSplineArray[splineIdx]; int t = threads; int h = half; while (threadIdx.x - elemStart < t) // size/=2 { // __syncthreads(); toSum[threadIdx.x] += toSum[threadIdx.x + h]; t = h / 2; h = (h + 1) / 2; } // __syncthreads(); if (threadIdx.x == elemStart) { int splinePart = splineCol / pixPerElem;//0,1,2 splinePart = 2 - splinePart; //revert spline to 2,1,0 to get shift int elem = idx / pixPerElem - blockStart / pixPerElem; // find elem to which thread belongs, 0,1,2,3,4..last in the block int rowShift = 2 * (idx / pixPerElem / elements - blockStart / pixPerElem / elements);//with each row shift gets greater by two, because row has elems + 2 elements elem = (elem + splinePart + rowShift); atomicAdd(toWrite + elem, toSum[threadIdx.x]); //mozna zastapic atomc add dodajc 3 razy wiecej pamieci toWrite i watki sumuja modulo, a potem sumuje sie te 3 bloki pamiec } } int blockToWrite = (blockIdx.x % memoryBlocks) * memBlockSize; int rowDisplacement = (pixPerElem * 2 - splineRowDispl);//bottom spline to first row, middle to middle, top spline to bottom row int indexToWrite = blockStart / pixPerElem + 2 * (blockStart / pixPerElem / elements) + threadIdx.x;//consecutiveElement indexToWrite += rowDisplacement * elements2 + blockToWrite;//plus elemsRowShift int restFromLast = pixPerElem - blockStart % pixPerElem; //when blockstart % ppe == 0 restFromLast is invalid (pixPerElem), but elemsInBlock is still being computed correctly int elemsInBlock = (restFromLast > 0) + (threadsfloat - restFromLast + pixPerElem - 1) / pixPerElem + 2 + 2 * ((blockStart + threadsfloat - 1) / pixPerElem / elements - blockStart / pixPerElem / elements);//pixPerElem-1 = ceil(x) + 2*float of rows in this block if (threadIdx.x < elemsInBlock) { atomicAdd(dRightSide + indexToWrite, toWrite[threadIdx.x]); } } } //threads = elem2*elem2 __global__ void sumVerticalPxels(float* dRightSide, float* dOut, int elements2, int pixPerElem, float area) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= elements2 * elements2) return; float sum = 0; int elemStartRow = (idx / elements2) * pixPerElem * elements2;//skip all vertical elements which belong to previous row for (int i = 0; i < pixPerElem; i++) { int row = elements2 * i; int col = idx % elements2; sum += dRightSide[elemStartRow + col + row]; } dOut[idx] = sum * area; } //threads = elements2*pixPerElem*elements2 __global__ void sumBlocks(float* dRightSide, int memoryBlocks, int memBlockSize) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= memBlockSize) return; float sum = 0; for (int block = 0; block < memoryBlocks; block++) { sum += dRightSide[memBlockSize * block + idx]; } dRightSide[idx] = sum; } //width and height of bitmap must be equal, and moreover, size of bitmap must be divisible without remainder by float of elements #define BLOCKS(N) (N+THREADS-1)/THREADS number* generateBitmapRightSide(char* bpmPath, int elements, BSpline2d* outBSpline,float * colors) { int elements2 = elements + 2; Bitmap bitmap = readBmp(bpmPath,colors); if (bitmap.height != bitmap.width || bitmap.width % elements != 0) throw "Bitmap dimensions must be equal. Bitmap size must be divisible by number of elements without remainder."; float* dBitmap = nullptr; BSpline2d bSplines = generate2DSplineIntegrals(bitmap.width, elements); // BSpline2d bSplines = generateTestBSplineIntegrals(bitmap.width, elements); if (outBSpline != nullptr) *outBSpline = bSplines; float* dSplineArray; ERRCHECK(hipMemcpyToSymbol(dSplines, &bSplines, sizeof(BSpline2d))); ERRCHECK(hipMalloc(&dSplineArray, sizeof(float)*bSplines.size*bSplines.size)); ERRCHECK(hipMemcpy(dSplineArray, bSplines.spline, sizeof(float)*bSplines.size*bSplines.size, hipMemcpyHostToDevice)); ERRCHECK(hipMalloc(&dBitmap, sizeof(float)*bitmap.width*bitmap.width)); ERRCHECK(hipMemcpy(dBitmap, bitmap.bitmap, sizeof(float)*bitmap.width*bitmap.height, hipMemcpyHostToDevice)); delete[] bitmap.bitmap; float* dRightSide = nullptr; int pixPerElem = bitmap.width / elements; int floatOfMemoryBlocks = (3 * pixPerElem + THREADS - 1) / THREADS + (3 * pixPerElem < THREADS ? 1 : 0);//one element spans on 3 elements and corresponding float of blocks int blockSize = elements2 * elements2 * pixPerElem; int sharedMemorySize = 1 + (THREADS - 1 + pixPerElem - 1) / pixPerElem;//max float of elemens processed in one block sharedMemorySize += 2 + 2 * sharedMemorySize / elements + 4;//+ float of rows in elements, every row extends mem size by 2, +4 account for additional first and last element int totalThreads = BLOCKS(bitmap.width*bitmap.width) * THREADS; int idleThreads = totalThreads - bitmap.width * bitmap.width; float* rightSide = new float[elements2 * elements2]; double area = 1; ERRCHECK(hipMalloc(&dRightSide, sizeof(float)*blockSize*floatOfMemoryBlocks)) ERRCHECK(hipMemset(dRightSide, 0, sizeof(float)*blockSize*floatOfMemoryBlocks)); ERRCHECK(hipDeviceSynchronize()); // showMemoryConsumption(); hipLaunchKernelGGL(( computeRightSide), dim3(BLOCKS(bitmap.width*bitmap.width)), dim3(THREADS),sizeof(float) * sharedMemorySize , 0, dSplineArray, sharedMemorySize, dBitmap, dRightSide, pixPerElem, bitmap.width, floatOfMemoryBlocks, blockSize, elements, elements2, idleThreads); ERRCHECK(hipGetLastError()); ERRCHECK(hipDeviceSynchronize()); hipLaunchKernelGGL(( sumBlocks), dim3(BLOCKS(blockSize)),dim3(THREADS), 0, 0, dRightSide, floatOfMemoryBlocks, blockSize); ERRCHECK(hipGetLastError()); ERRCHECK(hipDeviceSynchronize()); hipLaunchKernelGGL(( sumVerticalPxels), dim3(BLOCKS(elements2*elements2)),dim3(THREADS), 0, 0, dRightSide, dRightSide + blockSize, elements2, pixPerElem, area); ERRCHECK(hipGetLastError()); ERRCHECK(hipDeviceSynchronize()); ERRCHECK(hipMemcpy(rightSide,dRightSide + blockSize, sizeof(float)*elements2*elements2, hipMemcpyDeviceToHost)); ERRCHECK(hipFree(dSplineArray)); ERRCHECK(hipFree(dBitmap)); ERRCHECK(hipFree(dRightSide)); #ifdef DOUBLE_NUMBER double* rightSideD = new double[elements2 * elements2]; for (int i = 0; i < elements2 * elements2; i++) rightSideD[i] = rightSide[i]; delete[] rightSide; return rightSideD; #endif #ifdef FLOAT_NUMBER return rightSide; #endif } void measureGenBitmap(char* bmpPath, int elements, int iters = 1) { clock_t start, end; start = clock(); for (int i = 0; i < iters; i++) { generateBitmapRightSide(bmpPath, elements); } end = clock(); printf("time %f\n", ((float)(end - start) / CLOCKS_PER_SEC) / iters); } number* generateBitmapLeftSide(BSpline2d bSplines, int elements) { int len = elements * 5; number* leftSide = new number[len]; double a = bSplines.sump[2][0]; double b = bSplines.sump[2][1] + bSplines.sump[1][0]; double c = bSplines.sump[2][2] + bSplines.sump[1][1] + bSplines.sump[0][0]; double d = bSplines.sump[1][2] + bSplines.sump[0][1]; double e = bSplines.sump[0][2]; for (int i = 0; i < len; i += 5) { leftSide[i] = a; leftSide[i + 1] = b; leftSide[i + 2] = c; leftSide[i + 3] = d; leftSide[i + 4] = e; } leftSide[0] = 0; leftSide[1] = 0; leftSide[5] = 0; leftSide[len - 6] = 0; leftSide[len - 2] = 0; leftSide[len - 1] = 0; return leftSide; } number getApprox(number x, number y, number* solution, int elements) { double const elemSpan = 1.0 / elements; int ex = x * elements; int ey = y * elements; double ly = y - elemSpan * ey; double ly1 = ly + elemSpan; double ly2 = ly1 + elemSpan; double lx = x - elemSpan * ex; double lx1 = lx + elemSpan; double lx2 = lx1 + elemSpan; int elements2 = elements + 2; double approx = 0; approx += spline1(lx) * spline1(ly) * solution[ex * elements2 + ey]; approx += spline1(lx) * spline2(ly) * solution[ex * elements2 + ey + 1]; approx += spline1(lx) * spline3(ly) * solution[ex * elements2 + ey + 2]; ex++; approx += spline2(lx) * spline1(ly) * solution[ex * elements2 + ey]; approx += spline2(lx) * spline2(ly) * solution[ex * elements2 + ey + 1]; approx += spline2(lx) * spline3(ly) * solution[ex * elements2 + ey + 2]; ex++; approx += spline3(lx) * spline1(ly) * solution[ex * elements2 + ey]; approx += spline3(lx) * spline2(ly) * solution[ex * elements2 + ey + 1]; approx += spline3(lx) * spline3(ly) * solution[ex * elements2 + ey + 2]; return approx; } number* getBitmapApprox(number* solution, int elements, int resolution, char * storePath) { number* approx = new number[resolution * resolution]; double span = 1.0L / resolution; number x = -span / 2; for (int i = 0; i < resolution; i++) { x += span; number y = -span / 2; for (int j = 0; j < resolution; j++) { y += span; approx[i * resolution + j] = getApprox(x, y, solution, elements); } } if(storePath != nullptr) saveArray(storePath, resolution, approx); return approx; }
3f4fde0b91510f18df21c6011444b62efdac66a2.cu
#include "bitmap_approx.cuh" #include <emmintrin.h> #define THREADS 32 //THREADS MUST BE SET TO 32 __constant__ BSpline2d dSplines; BSpline2d generateTestBSplineIntegrals(int pixels, int elements) { int const pxPerElem = pixels / elements; int const pxPerSpline = 3 * pxPerElem; double* spline = new double[pxPerSpline]; for (int i = 0; i < 3 * pxPerElem; i++) { spline[i] = 1.0L / pxPerElem; } float* spline2D = new float[pxPerSpline * pxPerSpline]; float sum = 0; double sump[3][3] = {0}; for (int i = 0; i < pxPerSpline; i++) { for (int j = 0; j < pxPerSpline; j++) { spline2D[i * pxPerSpline + j] = spline[i] * spline[j]; sum += spline2D[i * pxPerSpline + j]; sump[i / pxPerElem][j / pxPerElem] += spline2D[i * pxPerSpline + j]; } } BSpline2d bs; bs.spline = spline2D; bs.size = pxPerSpline; bs.sum = sum; memcpy(&bs.sump, &sump, sizeof(sump)); return bs; } double inline spline1(double x) { return 0.5 * x * x; } double inline spline2(double x) { return (-2 * (x + 1) * (x + 1) + 6 * (x + 1) - 3) * 0.5; } double inline spline3(double x) { return 0.5 * (1 - x) * (1 - x); } double splinePart(double *a,double *b,int size,int globsize,int globi,int globj,float * out) { double sum = 0; int globjbackup = globj; for (int i = 0; i < size; i++,globi++) { globj = globjbackup; for (int j = 0; j < size; j++,globj++) { out[globi*globsize + globj] = a[i] * b[j] / size/size; sum += out[globi*globsize + globj]; } } return sum; } BSpline2d generate2DSplineIntegrals(int pixels, int elements) { int const pxPerElem = pixels / elements; int const pxPerSpline = 3 * pxPerElem; double const pxSpan = 1.0 / pxPerElem; double* s1 = new double[pxPerElem]; double* s2 = new double[pxPerElem]; double* s3 = new double[pxPerElem]; double x = pxSpan / 2; for (int i = 0; i < pxPerElem; i++ , x = x + pxSpan) { s1[i] = spline1(x); s2[i] = spline2(x); s3[i] = spline3(x); } float* spline2D = new float[pxPerSpline * pxPerSpline]; double sum = 0; double sump[3][3] = { 0 }; sump[0][0] = splinePart(s1, s1, pxPerElem, pxPerSpline, 0, 0, spline2D); sump[0][1] = splinePart(s1, s2, pxPerElem, pxPerSpline, 0, pxPerElem, spline2D); sump[0][2] = splinePart(s1, s3, pxPerElem, pxPerSpline, 0, 2*pxPerElem, spline2D); sump[1][0] = splinePart(s2, s1, pxPerElem, pxPerSpline, pxPerElem, 0, spline2D); sump[1][1] = splinePart(s2, s2, pxPerElem, pxPerSpline, pxPerElem, pxPerElem, spline2D); sump[1][2] = splinePart(s2, s3, pxPerElem, pxPerSpline, pxPerElem, 2*pxPerElem, spline2D); sump[2][0] = splinePart(s3, s1, pxPerElem, pxPerSpline, 2*pxPerElem, 0, spline2D); sump[2][1] = splinePart(s3, s2, pxPerElem, pxPerSpline, 2*pxPerElem, pxPerElem, spline2D); sump[2][2] = splinePart(s3, s3, pxPerElem, pxPerSpline, 2*pxPerElem, 2*pxPerElem, spline2D); double sump2[3][3] = { { 1.0L / 20,13.0L / 120,1.0L / 120 }, { 13.0L / 120,45.0L / 100,13.0L / 120 }, { 1.0L / 120,13.0L / 120,1.0L / 20 } }; BSpline2d bs; bs.spline = spline2D; bs.size = pxPerSpline; bs.sum = sum; memcpy(&bs.sump, &sump2, sizeof(sump)); delete[] s1; delete[] s2; delete[] s3; return bs; } ///(ceil(pixPerElem/threadsPerBlock)+1)+2 __global__ void computeRightSide(float* dSplineArray, int toWriteSize, float* bitmap, float* dRightSide, int pixPerElem, int bitmapSize, int memoryBlocks, int memBlockSize, int elements, int elements2, int idleThreads) { __shared__ float toSum[THREADS]; extern __shared__ float toWrite[]; unsigned blockStart = blockIdx.x * blockDim.x; int idx = blockStart + threadIdx.x; int threadsfloat; if (idx >= bitmapSize * bitmapSize) { return; } if (idx >= (gridDim.x * blockDim.x - THREADS)) { threadsfloat = THREADS - idleThreads; } else { threadsfloat = THREADS; } int row = idx / bitmapSize; int col = idx % bitmapSize; int pxIdx = idx; float pixel = bitmap[pxIdx]; //compute summing indices int elemStart = threadIdx.x - idx % pixPerElem; int nextStart = elemStart + pixPerElem; elemStart -= (elemStart < 0) * elemStart; // elemStart < 0 ? elemStart : 0 nextStart -= (nextStart > threadsfloat) * (nextStart - threadsfloat);// nextStart > THREADS ? THREADS : nextStart, moze >= ? int half = nextStart - elemStart; int threads = half / 2; half = (half + 1) / 2; //in case of odd float of elements - omit middle element, leave it for next iteration for (int splineRowDispl = pixPerElem * 2; splineRowDispl >= 0; splineRowDispl -= pixPerElem) //2ppe,1ppe,0ppe displacement { if (threadIdx.x < toWriteSize) toWrite[threadIdx.x] = 0; for (int splineColDispl = pixPerElem * 2; splineColDispl >= 0; splineColDispl -= pixPerElem)//2ppe + col { int splineCol = (col + splineColDispl) % dSplines.size; int splineIdx = (row % pixPerElem + splineRowDispl) * dSplines.size + splineCol; //spline row + spline col toSum[threadIdx.x] = pixel*dSplineArray[splineIdx]; int t = threads; int h = half; while (threadIdx.x - elemStart < t) // size/=2 { // __syncthreads(); toSum[threadIdx.x] += toSum[threadIdx.x + h]; t = h / 2; h = (h + 1) / 2; } // __syncthreads(); if (threadIdx.x == elemStart) { int splinePart = splineCol / pixPerElem;//0,1,2 splinePart = 2 - splinePart; //revert spline to 2,1,0 to get shift int elem = idx / pixPerElem - blockStart / pixPerElem; // find elem to which thread belongs, 0,1,2,3,4..last in the block int rowShift = 2 * (idx / pixPerElem / elements - blockStart / pixPerElem / elements);//with each row shift gets greater by two, because row has elems + 2 elements elem = (elem + splinePart + rowShift); atomicAdd(toWrite + elem, toSum[threadIdx.x]); //mozna zastapic atomc add dodając 3 razy wiecej pamieci toWrite i watki sumuja modulo, a potem sumuje sie te 3 bloki pamiec } } int blockToWrite = (blockIdx.x % memoryBlocks) * memBlockSize; int rowDisplacement = (pixPerElem * 2 - splineRowDispl);//bottom spline to first row, middle to middle, top spline to bottom row int indexToWrite = blockStart / pixPerElem + 2 * (blockStart / pixPerElem / elements) + threadIdx.x;//consecutiveElement indexToWrite += rowDisplacement * elements2 + blockToWrite;//plus elemsRowShift int restFromLast = pixPerElem - blockStart % pixPerElem; //when blockstart % ppe == 0 restFromLast is invalid (pixPerElem), but elemsInBlock is still being computed correctly int elemsInBlock = (restFromLast > 0) + (threadsfloat - restFromLast + pixPerElem - 1) / pixPerElem + 2 + 2 * ((blockStart + threadsfloat - 1) / pixPerElem / elements - blockStart / pixPerElem / elements);//pixPerElem-1 = ceil(x) + 2*float of rows in this block if (threadIdx.x < elemsInBlock) { atomicAdd(dRightSide + indexToWrite, toWrite[threadIdx.x]); } } } //threads = elem2*elem2 __global__ void sumVerticalPxels(float* dRightSide, float* dOut, int elements2, int pixPerElem, float area) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= elements2 * elements2) return; float sum = 0; int elemStartRow = (idx / elements2) * pixPerElem * elements2;//skip all vertical elements which belong to previous row for (int i = 0; i < pixPerElem; i++) { int row = elements2 * i; int col = idx % elements2; sum += dRightSide[elemStartRow + col + row]; } dOut[idx] = sum * area; } //threads = elements2*pixPerElem*elements2 __global__ void sumBlocks(float* dRightSide, int memoryBlocks, int memBlockSize) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= memBlockSize) return; float sum = 0; for (int block = 0; block < memoryBlocks; block++) { sum += dRightSide[memBlockSize * block + idx]; } dRightSide[idx] = sum; } //width and height of bitmap must be equal, and moreover, size of bitmap must be divisible without remainder by float of elements #define BLOCKS(N) (N+THREADS-1)/THREADS number* generateBitmapRightSide(char* bpmPath, int elements, BSpline2d* outBSpline,float * colors) { int elements2 = elements + 2; Bitmap bitmap = readBmp(bpmPath,colors); if (bitmap.height != bitmap.width || bitmap.width % elements != 0) throw "Bitmap dimensions must be equal. Bitmap size must be divisible by number of elements without remainder."; float* dBitmap = nullptr; BSpline2d bSplines = generate2DSplineIntegrals(bitmap.width, elements); // BSpline2d bSplines = generateTestBSplineIntegrals(bitmap.width, elements); if (outBSpline != nullptr) *outBSpline = bSplines; float* dSplineArray; ERRCHECK(cudaMemcpyToSymbol(dSplines, &bSplines, sizeof(BSpline2d))); ERRCHECK(cudaMalloc(&dSplineArray, sizeof(float)*bSplines.size*bSplines.size)); ERRCHECK(cudaMemcpy(dSplineArray, bSplines.spline, sizeof(float)*bSplines.size*bSplines.size, cudaMemcpyHostToDevice)); ERRCHECK(cudaMalloc(&dBitmap, sizeof(float)*bitmap.width*bitmap.width)); ERRCHECK(cudaMemcpy(dBitmap, bitmap.bitmap, sizeof(float)*bitmap.width*bitmap.height, cudaMemcpyHostToDevice)); delete[] bitmap.bitmap; float* dRightSide = nullptr; int pixPerElem = bitmap.width / elements; int floatOfMemoryBlocks = (3 * pixPerElem + THREADS - 1) / THREADS + (3 * pixPerElem < THREADS ? 1 : 0);//one element spans on 3 elements and corresponding float of blocks int blockSize = elements2 * elements2 * pixPerElem; int sharedMemorySize = 1 + (THREADS - 1 + pixPerElem - 1) / pixPerElem;//max float of elemens processed in one block sharedMemorySize += 2 + 2 * sharedMemorySize / elements + 4;//+ float of rows in elements, every row extends mem size by 2, +4 account for additional first and last element int totalThreads = BLOCKS(bitmap.width*bitmap.width) * THREADS; int idleThreads = totalThreads - bitmap.width * bitmap.width; float* rightSide = new float[elements2 * elements2]; double area = 1; ERRCHECK(cudaMalloc(&dRightSide, sizeof(float)*blockSize*floatOfMemoryBlocks)) ERRCHECK(cudaMemset(dRightSide, 0, sizeof(float)*blockSize*floatOfMemoryBlocks)); ERRCHECK(cudaDeviceSynchronize()); // showMemoryConsumption(); computeRightSide<<<BLOCKS(bitmap.width*bitmap.width), THREADS,sizeof(float) * sharedMemorySize >>>(dSplineArray, sharedMemorySize, dBitmap, dRightSide, pixPerElem, bitmap.width, floatOfMemoryBlocks, blockSize, elements, elements2, idleThreads); ERRCHECK(cudaGetLastError()); ERRCHECK(cudaDeviceSynchronize()); sumBlocks<<<BLOCKS(blockSize),THREADS>>>(dRightSide, floatOfMemoryBlocks, blockSize); ERRCHECK(cudaGetLastError()); ERRCHECK(cudaDeviceSynchronize()); sumVerticalPxels<<<BLOCKS(elements2*elements2),THREADS>>>(dRightSide, dRightSide + blockSize, elements2, pixPerElem, area); ERRCHECK(cudaGetLastError()); ERRCHECK(cudaDeviceSynchronize()); ERRCHECK(cudaMemcpy(rightSide,dRightSide + blockSize, sizeof(float)*elements2*elements2, cudaMemcpyDeviceToHost)); ERRCHECK(cudaFree(dSplineArray)); ERRCHECK(cudaFree(dBitmap)); ERRCHECK(cudaFree(dRightSide)); #ifdef DOUBLE_NUMBER double* rightSideD = new double[elements2 * elements2]; for (int i = 0; i < elements2 * elements2; i++) rightSideD[i] = rightSide[i]; delete[] rightSide; return rightSideD; #endif #ifdef FLOAT_NUMBER return rightSide; #endif } void measureGenBitmap(char* bmpPath, int elements, int iters = 1) { clock_t start, end; start = clock(); for (int i = 0; i < iters; i++) { generateBitmapRightSide(bmpPath, elements); } end = clock(); printf("time %f\n", ((float)(end - start) / CLOCKS_PER_SEC) / iters); } number* generateBitmapLeftSide(BSpline2d bSplines, int elements) { int len = elements * 5; number* leftSide = new number[len]; double a = bSplines.sump[2][0]; double b = bSplines.sump[2][1] + bSplines.sump[1][0]; double c = bSplines.sump[2][2] + bSplines.sump[1][1] + bSplines.sump[0][0]; double d = bSplines.sump[1][2] + bSplines.sump[0][1]; double e = bSplines.sump[0][2]; for (int i = 0; i < len; i += 5) { leftSide[i] = a; leftSide[i + 1] = b; leftSide[i + 2] = c; leftSide[i + 3] = d; leftSide[i + 4] = e; } leftSide[0] = 0; leftSide[1] = 0; leftSide[5] = 0; leftSide[len - 6] = 0; leftSide[len - 2] = 0; leftSide[len - 1] = 0; return leftSide; } number getApprox(number x, number y, number* solution, int elements) { double const elemSpan = 1.0 / elements; int ex = x * elements; int ey = y * elements; double ly = y - elemSpan * ey; double ly1 = ly + elemSpan; double ly2 = ly1 + elemSpan; double lx = x - elemSpan * ex; double lx1 = lx + elemSpan; double lx2 = lx1 + elemSpan; int elements2 = elements + 2; double approx = 0; approx += spline1(lx) * spline1(ly) * solution[ex * elements2 + ey]; approx += spline1(lx) * spline2(ly) * solution[ex * elements2 + ey + 1]; approx += spline1(lx) * spline3(ly) * solution[ex * elements2 + ey + 2]; ex++; approx += spline2(lx) * spline1(ly) * solution[ex * elements2 + ey]; approx += spline2(lx) * spline2(ly) * solution[ex * elements2 + ey + 1]; approx += spline2(lx) * spline3(ly) * solution[ex * elements2 + ey + 2]; ex++; approx += spline3(lx) * spline1(ly) * solution[ex * elements2 + ey]; approx += spline3(lx) * spline2(ly) * solution[ex * elements2 + ey + 1]; approx += spline3(lx) * spline3(ly) * solution[ex * elements2 + ey + 2]; return approx; } number* getBitmapApprox(number* solution, int elements, int resolution, char * storePath) { number* approx = new number[resolution * resolution]; double span = 1.0L / resolution; number x = -span / 2; for (int i = 0; i < resolution; i++) { x += span; number y = -span / 2; for (int j = 0; j < resolution; j++) { y += span; approx[i * resolution + j] = getApprox(x, y, solution, elements); } } if(storePath != nullptr) saveArray(storePath, resolution, approx); return approx; }
dc98862e4441993e9aed9dc565be875ad6e5e9b0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 GUnless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/platform/cuda_primitives.h" #include "paddle/fluid/memory/memory.h" #include <vector> #include "util.cu.h" namespace paddle { namespace operators { using Tensor = framework::Tensor; static constexpr int kNumCUDAThreads = 512; static constexpr int kNumMaximumNumBlocks = 4096; static inline int NumBlocks(const int N) { return ::min((N + kNumCUDAThreads - 1) / kNumCUDAThreads, kNumMaximumNumBlocks); } template <typename T> class TopPoolOpCUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext &ctx) const override { PADDLE_ENFORCE(platform::is_gpu_place(ctx.GetPlace()), "This kernel only runs on GPU device."); auto *x = ctx.Input<Tensor>("X"); auto *max_map = ctx.Output<Tensor>("MaxMap"); auto *output = ctx.Output<Tensor>("Output"); auto *x_data = x->data<T>(); auto x_dims = x->dims(); int NC_num = x_dims[0] * x_dims[1]; int height = x_dims[2]; int width = x_dims[3]; int num = x->numel(); auto& dev_ctx = ctx.cuda_device_context(); int *max_map_data = max_map->mutable_data<int>(x_dims, dev_ctx.GetPlace()); T *output_data = output->mutable_data<T>(x_dims, dev_ctx.GetPlace()); auto gpu_place = boost::get<platform::CUDAPlace>(dev_ctx.GetPlace()); int threads = kNumCUDAThreads; int blocks = NumBlocks(num / height); auto max_val_ptr = memory::Alloc(gpu_place, num / height * sizeof(T)); T* max_val_data = reinterpret_cast<T*>(max_val_ptr->ptr()); auto max_ind_ptr = memory::Alloc(gpu_place, num / height * sizeof(int)); int* max_ind_data = reinterpret_cast<int*>(max_ind_ptr->ptr()); hipLaunchKernelGGL(( GetMaxInfo<T>), dim3(blocks), dim3(threads), 0, dev_ctx.stream(), x->data<T>(), NC_num, height, width, 2, true, max_val_data, max_ind_data, max_map_data); blocks = NumBlocks(num); hipLaunchKernelGGL(( ScatterAddFw<T>), dim3(blocks), dim3(threads), 0, dev_ctx.stream(), x->data<T>(), max_map_data, NC_num, height, width, 2, output_data); } }; template <typename T> class TopPoolGradOpCUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { auto* x = ctx.Input<Tensor>("X"); auto* max_map = ctx.Input<Tensor>("MaxMap"); auto* out_grad = ctx.Input<Tensor>(framework::GradVarName("Output")); auto* in_grad = ctx.Output<Tensor>(framework::GradVarName("X")); auto x_dims = x->dims(); auto& dev_ctx = ctx.cuda_device_context(); T* in_grad_data = in_grad->mutable_data<T>(x_dims, dev_ctx.GetPlace()); auto gpu_place = boost::get<platform::CUDAPlace>(dev_ctx.GetPlace()); int threads = kNumCUDAThreads; int NC_num = x_dims[0] * x_dims[1]; int height = x_dims[2]; int width = x_dims[3]; int grad_num = in_grad->numel(); int blocks = NumBlocks(grad_num); hipLaunchKernelGGL(( FillConstant<T>), dim3(blocks), dim3(threads), 0, dev_ctx.stream(), in_grad_data, 0, grad_num); hipLaunchKernelGGL(( ScatterAddBw<T>), dim3(blocks), dim3(threads), 0, dev_ctx.stream(), out_grad->data<T>(), max_map->data<int>(), NC_num, height, width, 2, in_grad_data); } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL(top_pool, ops::TopPoolOpCUDAKernel<float>, ops::TopPoolOpCUDAKernel<double>); REGISTER_OP_CUDA_KERNEL(top_pool_grad, ops::TopPoolGradOpCUDAKernel<float>, ops::TopPoolGradOpCUDAKernel<double>);
dc98862e4441993e9aed9dc565be875ad6e5e9b0.cu
/* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 GUnless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/platform/cuda_primitives.h" #include "paddle/fluid/memory/memory.h" #include <vector> #include "util.cu.h" namespace paddle { namespace operators { using Tensor = framework::Tensor; static constexpr int kNumCUDAThreads = 512; static constexpr int kNumMaximumNumBlocks = 4096; static inline int NumBlocks(const int N) { return std::min((N + kNumCUDAThreads - 1) / kNumCUDAThreads, kNumMaximumNumBlocks); } template <typename T> class TopPoolOpCUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext &ctx) const override { PADDLE_ENFORCE(platform::is_gpu_place(ctx.GetPlace()), "This kernel only runs on GPU device."); auto *x = ctx.Input<Tensor>("X"); auto *max_map = ctx.Output<Tensor>("MaxMap"); auto *output = ctx.Output<Tensor>("Output"); auto *x_data = x->data<T>(); auto x_dims = x->dims(); int NC_num = x_dims[0] * x_dims[1]; int height = x_dims[2]; int width = x_dims[3]; int num = x->numel(); auto& dev_ctx = ctx.cuda_device_context(); int *max_map_data = max_map->mutable_data<int>(x_dims, dev_ctx.GetPlace()); T *output_data = output->mutable_data<T>(x_dims, dev_ctx.GetPlace()); auto gpu_place = boost::get<platform::CUDAPlace>(dev_ctx.GetPlace()); int threads = kNumCUDAThreads; int blocks = NumBlocks(num / height); auto max_val_ptr = memory::Alloc(gpu_place, num / height * sizeof(T)); T* max_val_data = reinterpret_cast<T*>(max_val_ptr->ptr()); auto max_ind_ptr = memory::Alloc(gpu_place, num / height * sizeof(int)); int* max_ind_data = reinterpret_cast<int*>(max_ind_ptr->ptr()); GetMaxInfo<T><<<blocks, threads, 0, dev_ctx.stream()>>>(x->data<T>(), NC_num, height, width, 2, true, max_val_data, max_ind_data, max_map_data); blocks = NumBlocks(num); ScatterAddFw<T><<<blocks, threads, 0, dev_ctx.stream()>>>(x->data<T>(), max_map_data, NC_num, height, width, 2, output_data); } }; template <typename T> class TopPoolGradOpCUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { auto* x = ctx.Input<Tensor>("X"); auto* max_map = ctx.Input<Tensor>("MaxMap"); auto* out_grad = ctx.Input<Tensor>(framework::GradVarName("Output")); auto* in_grad = ctx.Output<Tensor>(framework::GradVarName("X")); auto x_dims = x->dims(); auto& dev_ctx = ctx.cuda_device_context(); T* in_grad_data = in_grad->mutable_data<T>(x_dims, dev_ctx.GetPlace()); auto gpu_place = boost::get<platform::CUDAPlace>(dev_ctx.GetPlace()); int threads = kNumCUDAThreads; int NC_num = x_dims[0] * x_dims[1]; int height = x_dims[2]; int width = x_dims[3]; int grad_num = in_grad->numel(); int blocks = NumBlocks(grad_num); FillConstant<T><<<blocks, threads, 0, dev_ctx.stream()>>>(in_grad_data, 0, grad_num); ScatterAddBw<T><<<blocks, threads, 0, dev_ctx.stream()>>>(out_grad->data<T>(), max_map->data<int>(), NC_num, height, width, 2, in_grad_data); } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL(top_pool, ops::TopPoolOpCUDAKernel<float>, ops::TopPoolOpCUDAKernel<double>); REGISTER_OP_CUDA_KERNEL(top_pool_grad, ops::TopPoolGradOpCUDAKernel<float>, ops::TopPoolGradOpCUDAKernel<double>);
57d959822ecd90c76f3d65eec544b037e8b4c232.hip
// !!! This is a file automatically generated by hipify!!! /*! * Copyright 2020 by Contributors * \file rank_metric.cc * \brief prediction rank based metrics. * \author Kailong Chen, Tianqi Chen */ #include <rabit/rabit.h> #include <dmlc/registry.h> #include <xgboost/metric.h> #include <xgboost/host_device_vector.h> #include <thrust/iterator/discard_iterator.h> #include <cmath> #include <array> #include <vector> #include "metric_common.h" #include "../common/math.h" #include "../common/device_helpers.cuh" namespace xgboost { namespace metric { // tag the this file, used by force static link later. DMLC_REGISTRY_FILE_TAG(rank_metric_gpu); /*! \brief Evaluate rank list on GPU */ template <typename EvalMetricT> struct EvalRankGpu : public Metric, public EvalRankConfig { public: bst_float Eval(const HostDeviceVector<bst_float> &preds, const MetaInfo &info, bool distributed) override { // Sanity check is done by the caller std::vector<unsigned> tgptr(2, 0); tgptr[1] = static_cast<unsigned>(preds.Size()); const std::vector<unsigned> &gptr = info.group_ptr_.size() == 0 ? tgptr : info.group_ptr_; const auto ngroups = static_cast<bst_omp_uint>(gptr.size() - 1); auto device = tparam_->gpu_id; dh::safe_cuda(hipSetDevice(device)); info.labels_.SetDevice(device); preds.SetDevice(device); auto dpreds = preds.ConstDevicePointer(); auto dlabels = info.labels_.ConstDevicePointer(); // Sort all the predictions dh::SegmentSorter<float> segment_pred_sorter; segment_pred_sorter.SortItems(dpreds, preds.Size(), gptr); // Compute individual group metric and sum them up return EvalMetricT::EvalMetric(segment_pred_sorter, dlabels, *this); } const char* Name() const override { return name.c_str(); } explicit EvalRankGpu(const char* name, const char* param) { using namespace std; // NOLINT(*) if (param != nullptr) { std::ostringstream os; if (sscanf(param, "%u[-]?", &this->topn) == 1) { os << name << '@' << param; this->name = os.str(); } else { os << name << param; this->name = os.str(); } if (param[strlen(param) - 1] == '-') { this->minus = true; } } else { this->name = name; } } }; /*! \brief Precision at N, for both classification and rank */ struct EvalPrecisionGpu { public: static double EvalMetric(const dh::SegmentSorter<float> &pred_sorter, const float *dlabels, const EvalRankConfig &ecfg) { // Group info on device const auto &dgroups = pred_sorter.GetGroupsSpan(); const auto ngroups = pred_sorter.GetNumGroups(); const auto &dgroup_idx = pred_sorter.GetGroupSegmentsSpan(); // Original positions of the predictions after they have been sorted const auto &dpreds_orig_pos = pred_sorter.GetOriginalPositionsSpan(); // First, determine non zero labels in the dataset individually auto DetermineNonTrivialLabelLambda = [=] __device__(uint32_t idx) { return (static_cast<unsigned>(dlabels[dpreds_orig_pos[idx]]) != 0) ? 1 : 0; }; // NOLINT // Find each group's metric sum dh::caching_device_vector<uint32_t> hits(ngroups, 0); const auto nitems = pred_sorter.GetNumItems(); auto *dhits = hits.data().get(); int device_id = -1; dh::safe_cuda(hipGetDevice(&device_id)); // For each group item compute the aggregated precision dh::LaunchN(nitems, nullptr, [=] __device__(uint32_t idx) { const auto group_idx = dgroup_idx[idx]; const auto group_begin = dgroups[group_idx]; const auto ridx = idx - group_begin; if (ridx < ecfg.topn && DetermineNonTrivialLabelLambda(idx)) { atomicAdd(&dhits[group_idx], 1); } }); // Allocator to be used for managing space overhead while performing reductions dh::XGBCachingDeviceAllocator<char> alloc; return static_cast<double>(thrust::reduce(thrust::hip::par(alloc), hits.begin(), hits.end())) / ecfg.topn; } }; /*! \brief NDCG: Normalized Discounted Cumulative Gain at N */ struct EvalNDCGGpu { public: static void ComputeDCG(const dh::SegmentSorter<float> &pred_sorter, const float *dlabels, const EvalRankConfig &ecfg, // The order in which labels have to be accessed. The order is determined // by sorting the predictions or the labels for the entire dataset const xgboost::common::Span<const uint32_t> &dlabels_sort_order, dh::caching_device_vector<double> *dcgptr) { dh::caching_device_vector<double> &dcgs(*dcgptr); // Group info on device const auto &dgroups = pred_sorter.GetGroupsSpan(); const auto &dgroup_idx = pred_sorter.GetGroupSegmentsSpan(); // First, determine non zero labels in the dataset individually auto DetermineNonTrivialLabelLambda = [=] __device__(uint32_t idx) { return (static_cast<unsigned>(dlabels[dlabels_sort_order[idx]])); }; // NOLINT // Find each group's DCG value const auto nitems = pred_sorter.GetNumItems(); auto *ddcgs = dcgs.data().get(); int device_id = -1; dh::safe_cuda(hipGetDevice(&device_id)); // For each group item compute the aggregated precision dh::LaunchN(nitems, nullptr, [=] __device__(uint32_t idx) { const auto group_idx = dgroup_idx[idx]; const auto group_begin = dgroups[group_idx]; const auto ridx = idx - group_begin; auto label = DetermineNonTrivialLabelLambda(idx); if (ridx < ecfg.topn && label) { atomicAdd(&ddcgs[group_idx], ((1 << label) - 1) / std::log2(ridx + 2.0)); } }); } static double EvalMetric(const dh::SegmentSorter<float> &pred_sorter, const float *dlabels, const EvalRankConfig &ecfg) { // Sort the labels and compute IDCG dh::SegmentSorter<float> segment_label_sorter; segment_label_sorter.SortItems(dlabels, pred_sorter.GetNumItems(), pred_sorter.GetGroupSegmentsSpan()); uint32_t ngroups = pred_sorter.GetNumGroups(); dh::caching_device_vector<double> idcg(ngroups, 0); ComputeDCG(pred_sorter, dlabels, ecfg, segment_label_sorter.GetOriginalPositionsSpan(), &idcg); // Compute the DCG values next dh::caching_device_vector<double> dcg(ngroups, 0); ComputeDCG(pred_sorter, dlabels, ecfg, pred_sorter.GetOriginalPositionsSpan(), &dcg); double *ddcg = dcg.data().get(); double *didcg = idcg.data().get(); int device_id = -1; dh::safe_cuda(hipGetDevice(&device_id)); // Compute the group's DCG and reduce it across all groups dh::LaunchN(ngroups, nullptr, [=] __device__(uint32_t gidx) { if (didcg[gidx] == 0.0f) { ddcg[gidx] = (ecfg.minus) ? 0.0f : 1.0f; } else { ddcg[gidx] /= didcg[gidx]; } }); // Allocator to be used for managing space overhead while performing reductions dh::XGBCachingDeviceAllocator<char> alloc; return thrust::reduce(thrust::hip::par(alloc), dcg.begin(), dcg.end()); } }; /*! \brief Mean Average Precision at N, for both classification and rank */ struct EvalMAPGpu { public: static double EvalMetric(const dh::SegmentSorter<float> &pred_sorter, const float *dlabels, const EvalRankConfig &ecfg) { // Group info on device const auto &dgroups = pred_sorter.GetGroupsSpan(); const auto ngroups = pred_sorter.GetNumGroups(); const auto &dgroup_idx = pred_sorter.GetGroupSegmentsSpan(); // Original positions of the predictions after they have been sorted const auto &dpreds_orig_pos = pred_sorter.GetOriginalPositionsSpan(); // First, determine non zero labels in the dataset individually const auto nitems = pred_sorter.GetNumItems(); dh::caching_device_vector<uint32_t> hits(nitems, 0); auto DetermineNonTrivialLabelLambda = [=] __device__(uint32_t idx) { return (static_cast<unsigned>(dlabels[dpreds_orig_pos[idx]]) != 0) ? 1 : 0; }; // NOLINT thrust::transform(thrust::make_counting_iterator(static_cast<uint32_t>(0)), thrust::make_counting_iterator(nitems), hits.begin(), DetermineNonTrivialLabelLambda); // Allocator to be used by sort for managing space overhead while performing prefix scans dh::XGBCachingDeviceAllocator<char> alloc; // Next, prefix scan the nontrivial labels that are segmented to accumulate them. // This is required for computing the metric sum // Data segmented into different groups... thrust::inclusive_scan_by_key(thrust::hip::par(alloc), dh::tcbegin(dgroup_idx), dh::tcend(dgroup_idx), hits.begin(), // Input value hits.begin()); // In-place scan // Find each group's metric sum dh::caching_device_vector<double> sumap(ngroups, 0); auto *dsumap = sumap.data().get(); const auto *dhits = hits.data().get(); int device_id = -1; dh::safe_cuda(hipGetDevice(&device_id)); // For each group item compute the aggregated precision dh::LaunchN(nitems, nullptr, [=] __device__(uint32_t idx) { if (DetermineNonTrivialLabelLambda(idx)) { const auto group_idx = dgroup_idx[idx]; const auto group_begin = dgroups[group_idx]; const auto ridx = idx - group_begin; if (ridx < ecfg.topn) { atomicAdd(&dsumap[group_idx], static_cast<double>(dhits[idx]) / (ridx + 1)); } } }); // Aggregate the group's item precisions dh::LaunchN(ngroups, nullptr, [=] __device__(uint32_t gidx) { auto nhits = dgroups[gidx + 1] ? dhits[dgroups[gidx + 1] - 1] : 0; if (nhits != 0) { dsumap[gidx] /= nhits; } else { if (ecfg.minus) { dsumap[gidx] = 0; } else { dsumap[gidx] = 1; } } }); return thrust::reduce(thrust::hip::par(alloc), sumap.begin(), sumap.end()); } }; XGBOOST_REGISTER_GPU_METRIC(PrecisionGpu, "pre") .describe("precision@k for rank computed on GPU.") .set_body([](const char* param) { return new EvalRankGpu<EvalPrecisionGpu>("pre", param); }); XGBOOST_REGISTER_GPU_METRIC(NDCGGpu, "ndcg") .describe("ndcg@k for rank computed on GPU.") .set_body([](const char* param) { return new EvalRankGpu<EvalNDCGGpu>("ndcg", param); }); XGBOOST_REGISTER_GPU_METRIC(MAPGpu, "map") .describe("map@k for rank computed on GPU.") .set_body([](const char* param) { return new EvalRankGpu<EvalMAPGpu>("map", param); }); } // namespace metric } // namespace xgboost
57d959822ecd90c76f3d65eec544b037e8b4c232.cu
/*! * Copyright 2020 by Contributors * \file rank_metric.cc * \brief prediction rank based metrics. * \author Kailong Chen, Tianqi Chen */ #include <rabit/rabit.h> #include <dmlc/registry.h> #include <xgboost/metric.h> #include <xgboost/host_device_vector.h> #include <thrust/iterator/discard_iterator.h> #include <cmath> #include <array> #include <vector> #include "metric_common.h" #include "../common/math.h" #include "../common/device_helpers.cuh" namespace xgboost { namespace metric { // tag the this file, used by force static link later. DMLC_REGISTRY_FILE_TAG(rank_metric_gpu); /*! \brief Evaluate rank list on GPU */ template <typename EvalMetricT> struct EvalRankGpu : public Metric, public EvalRankConfig { public: bst_float Eval(const HostDeviceVector<bst_float> &preds, const MetaInfo &info, bool distributed) override { // Sanity check is done by the caller std::vector<unsigned> tgptr(2, 0); tgptr[1] = static_cast<unsigned>(preds.Size()); const std::vector<unsigned> &gptr = info.group_ptr_.size() == 0 ? tgptr : info.group_ptr_; const auto ngroups = static_cast<bst_omp_uint>(gptr.size() - 1); auto device = tparam_->gpu_id; dh::safe_cuda(cudaSetDevice(device)); info.labels_.SetDevice(device); preds.SetDevice(device); auto dpreds = preds.ConstDevicePointer(); auto dlabels = info.labels_.ConstDevicePointer(); // Sort all the predictions dh::SegmentSorter<float> segment_pred_sorter; segment_pred_sorter.SortItems(dpreds, preds.Size(), gptr); // Compute individual group metric and sum them up return EvalMetricT::EvalMetric(segment_pred_sorter, dlabels, *this); } const char* Name() const override { return name.c_str(); } explicit EvalRankGpu(const char* name, const char* param) { using namespace std; // NOLINT(*) if (param != nullptr) { std::ostringstream os; if (sscanf(param, "%u[-]?", &this->topn) == 1) { os << name << '@' << param; this->name = os.str(); } else { os << name << param; this->name = os.str(); } if (param[strlen(param) - 1] == '-') { this->minus = true; } } else { this->name = name; } } }; /*! \brief Precision at N, for both classification and rank */ struct EvalPrecisionGpu { public: static double EvalMetric(const dh::SegmentSorter<float> &pred_sorter, const float *dlabels, const EvalRankConfig &ecfg) { // Group info on device const auto &dgroups = pred_sorter.GetGroupsSpan(); const auto ngroups = pred_sorter.GetNumGroups(); const auto &dgroup_idx = pred_sorter.GetGroupSegmentsSpan(); // Original positions of the predictions after they have been sorted const auto &dpreds_orig_pos = pred_sorter.GetOriginalPositionsSpan(); // First, determine non zero labels in the dataset individually auto DetermineNonTrivialLabelLambda = [=] __device__(uint32_t idx) { return (static_cast<unsigned>(dlabels[dpreds_orig_pos[idx]]) != 0) ? 1 : 0; }; // NOLINT // Find each group's metric sum dh::caching_device_vector<uint32_t> hits(ngroups, 0); const auto nitems = pred_sorter.GetNumItems(); auto *dhits = hits.data().get(); int device_id = -1; dh::safe_cuda(cudaGetDevice(&device_id)); // For each group item compute the aggregated precision dh::LaunchN(nitems, nullptr, [=] __device__(uint32_t idx) { const auto group_idx = dgroup_idx[idx]; const auto group_begin = dgroups[group_idx]; const auto ridx = idx - group_begin; if (ridx < ecfg.topn && DetermineNonTrivialLabelLambda(idx)) { atomicAdd(&dhits[group_idx], 1); } }); // Allocator to be used for managing space overhead while performing reductions dh::XGBCachingDeviceAllocator<char> alloc; return static_cast<double>(thrust::reduce(thrust::cuda::par(alloc), hits.begin(), hits.end())) / ecfg.topn; } }; /*! \brief NDCG: Normalized Discounted Cumulative Gain at N */ struct EvalNDCGGpu { public: static void ComputeDCG(const dh::SegmentSorter<float> &pred_sorter, const float *dlabels, const EvalRankConfig &ecfg, // The order in which labels have to be accessed. The order is determined // by sorting the predictions or the labels for the entire dataset const xgboost::common::Span<const uint32_t> &dlabels_sort_order, dh::caching_device_vector<double> *dcgptr) { dh::caching_device_vector<double> &dcgs(*dcgptr); // Group info on device const auto &dgroups = pred_sorter.GetGroupsSpan(); const auto &dgroup_idx = pred_sorter.GetGroupSegmentsSpan(); // First, determine non zero labels in the dataset individually auto DetermineNonTrivialLabelLambda = [=] __device__(uint32_t idx) { return (static_cast<unsigned>(dlabels[dlabels_sort_order[idx]])); }; // NOLINT // Find each group's DCG value const auto nitems = pred_sorter.GetNumItems(); auto *ddcgs = dcgs.data().get(); int device_id = -1; dh::safe_cuda(cudaGetDevice(&device_id)); // For each group item compute the aggregated precision dh::LaunchN(nitems, nullptr, [=] __device__(uint32_t idx) { const auto group_idx = dgroup_idx[idx]; const auto group_begin = dgroups[group_idx]; const auto ridx = idx - group_begin; auto label = DetermineNonTrivialLabelLambda(idx); if (ridx < ecfg.topn && label) { atomicAdd(&ddcgs[group_idx], ((1 << label) - 1) / std::log2(ridx + 2.0)); } }); } static double EvalMetric(const dh::SegmentSorter<float> &pred_sorter, const float *dlabels, const EvalRankConfig &ecfg) { // Sort the labels and compute IDCG dh::SegmentSorter<float> segment_label_sorter; segment_label_sorter.SortItems(dlabels, pred_sorter.GetNumItems(), pred_sorter.GetGroupSegmentsSpan()); uint32_t ngroups = pred_sorter.GetNumGroups(); dh::caching_device_vector<double> idcg(ngroups, 0); ComputeDCG(pred_sorter, dlabels, ecfg, segment_label_sorter.GetOriginalPositionsSpan(), &idcg); // Compute the DCG values next dh::caching_device_vector<double> dcg(ngroups, 0); ComputeDCG(pred_sorter, dlabels, ecfg, pred_sorter.GetOriginalPositionsSpan(), &dcg); double *ddcg = dcg.data().get(); double *didcg = idcg.data().get(); int device_id = -1; dh::safe_cuda(cudaGetDevice(&device_id)); // Compute the group's DCG and reduce it across all groups dh::LaunchN(ngroups, nullptr, [=] __device__(uint32_t gidx) { if (didcg[gidx] == 0.0f) { ddcg[gidx] = (ecfg.minus) ? 0.0f : 1.0f; } else { ddcg[gidx] /= didcg[gidx]; } }); // Allocator to be used for managing space overhead while performing reductions dh::XGBCachingDeviceAllocator<char> alloc; return thrust::reduce(thrust::cuda::par(alloc), dcg.begin(), dcg.end()); } }; /*! \brief Mean Average Precision at N, for both classification and rank */ struct EvalMAPGpu { public: static double EvalMetric(const dh::SegmentSorter<float> &pred_sorter, const float *dlabels, const EvalRankConfig &ecfg) { // Group info on device const auto &dgroups = pred_sorter.GetGroupsSpan(); const auto ngroups = pred_sorter.GetNumGroups(); const auto &dgroup_idx = pred_sorter.GetGroupSegmentsSpan(); // Original positions of the predictions after they have been sorted const auto &dpreds_orig_pos = pred_sorter.GetOriginalPositionsSpan(); // First, determine non zero labels in the dataset individually const auto nitems = pred_sorter.GetNumItems(); dh::caching_device_vector<uint32_t> hits(nitems, 0); auto DetermineNonTrivialLabelLambda = [=] __device__(uint32_t idx) { return (static_cast<unsigned>(dlabels[dpreds_orig_pos[idx]]) != 0) ? 1 : 0; }; // NOLINT thrust::transform(thrust::make_counting_iterator(static_cast<uint32_t>(0)), thrust::make_counting_iterator(nitems), hits.begin(), DetermineNonTrivialLabelLambda); // Allocator to be used by sort for managing space overhead while performing prefix scans dh::XGBCachingDeviceAllocator<char> alloc; // Next, prefix scan the nontrivial labels that are segmented to accumulate them. // This is required for computing the metric sum // Data segmented into different groups... thrust::inclusive_scan_by_key(thrust::cuda::par(alloc), dh::tcbegin(dgroup_idx), dh::tcend(dgroup_idx), hits.begin(), // Input value hits.begin()); // In-place scan // Find each group's metric sum dh::caching_device_vector<double> sumap(ngroups, 0); auto *dsumap = sumap.data().get(); const auto *dhits = hits.data().get(); int device_id = -1; dh::safe_cuda(cudaGetDevice(&device_id)); // For each group item compute the aggregated precision dh::LaunchN(nitems, nullptr, [=] __device__(uint32_t idx) { if (DetermineNonTrivialLabelLambda(idx)) { const auto group_idx = dgroup_idx[idx]; const auto group_begin = dgroups[group_idx]; const auto ridx = idx - group_begin; if (ridx < ecfg.topn) { atomicAdd(&dsumap[group_idx], static_cast<double>(dhits[idx]) / (ridx + 1)); } } }); // Aggregate the group's item precisions dh::LaunchN(ngroups, nullptr, [=] __device__(uint32_t gidx) { auto nhits = dgroups[gidx + 1] ? dhits[dgroups[gidx + 1] - 1] : 0; if (nhits != 0) { dsumap[gidx] /= nhits; } else { if (ecfg.minus) { dsumap[gidx] = 0; } else { dsumap[gidx] = 1; } } }); return thrust::reduce(thrust::cuda::par(alloc), sumap.begin(), sumap.end()); } }; XGBOOST_REGISTER_GPU_METRIC(PrecisionGpu, "pre") .describe("precision@k for rank computed on GPU.") .set_body([](const char* param) { return new EvalRankGpu<EvalPrecisionGpu>("pre", param); }); XGBOOST_REGISTER_GPU_METRIC(NDCGGpu, "ndcg") .describe("ndcg@k for rank computed on GPU.") .set_body([](const char* param) { return new EvalRankGpu<EvalNDCGGpu>("ndcg", param); }); XGBOOST_REGISTER_GPU_METRIC(MAPGpu, "map") .describe("map@k for rank computed on GPU.") .set_body([](const char* param) { return new EvalRankGpu<EvalMAPGpu>("map", param); }); } // namespace metric } // namespace xgboost
22472208a1e1cfd45f46c16b668bee90ce892a31.hip
// !!! This is a file automatically generated by hipify!!! /** * Copyright 1993-2015 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ // System includes #include <stdio.h> #include <assert.h> #include <stdlib.h> #define NUM_VERTICES 1024 #define SEQUENTIAL 0 #define POS(i, j) (((j) * NUM_VERTICES) + i) #define PRINT_EDGES 0 #define PSEUDO_RANDOM_SIZE 1024 #define BLOCK_SIZE 32 #define DEBUG 1 // CUDA API error checking macro static void handleError(hipError_t err, const char *file, int line ) { if (err != hipSuccess) { printf("%s in %s at line %d\n", hipGetErrorString(err), file, line ); exit(EXIT_FAILURE); } } #define cudaCheck( err ) (handleError(err, __FILE__, __LINE__ )) int sampleBinomialDistribution(double succesProbability) { return (int)((double)rand() / (double)RAND_MAX < succesProbability); } void printArray(int arr[]) { printf("Array("); for (int i = 0; i < NUM_VERTICES; i++) { if (arr[i] > 0) { printf("%d:%d, ", i, arr[i]); } } printf(")\n"); } void printEdges(int *adjacencyMatrix) { printf("Edges: \n"); for (int i = 0; i < NUM_VERTICES; i++) { for (int j = 0; j < NUM_VERTICES; j++) { if (adjacencyMatrix[POS(i, j)] == 1) { printf("\t(%d, %d)\n", i, j); } } } } void star(int *adjacencyMatrix) { memset(adjacencyMatrix, 0, NUM_VERTICES * NUM_VERTICES); for (int j = 1; j < NUM_VERTICES; j++) { adjacencyMatrix[POS(0, j)] = 1; adjacencyMatrix[POS(j, 0)] = 1; } } void cycle(int *adjacencyMatrix) { memset(adjacencyMatrix, 0, NUM_VERTICES * NUM_VERTICES); for (int i = 0; i < NUM_VERTICES; i++) { adjacencyMatrix[POS(i, (i + 1) % NUM_VERTICES)] = 1; adjacencyMatrix[POS(i, (i + NUM_VERTICES - 1) % NUM_VERTICES)] = 1; } } // Skewed random graph void randomGraph(int *adjacencyMatrix) { memset(adjacencyMatrix, 0, NUM_VERTICES * NUM_VERTICES); int edge; for (int i = 0; i < NUM_VERTICES; i++) { for (int j = i + 1; j < NUM_VERTICES; j++) { double probability = (((double) i) + j) / (4 * NUM_VERTICES); edge = sampleBinomialDistribution(probability); adjacencyMatrix[POS(i, j)] = edge; adjacencyMatrix[POS(j, i)] = edge; } } } void fillPseudoRandoms(float pseudoRandoms[]) { for (int i = 0; i < PSEUDO_RANDOM_SIZE; i++) { pseudoRandoms[i] = 1. * (i % 3) / 3.; } } bool verifyMaximalIndependentSet(int *adjacencyMatrix, int* independentSet) { for (int i = 0; i < NUM_VERTICES; i++) { int neighborsInV = 0; for (int j = 0; j < NUM_VERTICES; j++) { if (i != j && adjacencyMatrix[POS(i, j)] == 1 && independentSet[j] == 1) { if (independentSet[i]) { printf("Set is not independent as it contains neighbors %d and %d", i, j); return false; } neighborsInV++; } } if (independentSet[i] == 0 && neighborsInV == 0) { printf("Set is not maximal as %d (not in set) has no neighbors in set\n", i); return false; } } return true; } void lfIndependentSet(int *adjacencyMatrix, int* independentSet) { memset(independentSet, 0, NUM_VERTICES); for (int i = 0; i < NUM_VERTICES; i++) { bool hasNeighborsInV = false; for (int j = 0; j < i; j++) { if (adjacencyMatrix[POS(i, j)] == 1 && independentSet[j] == 1) { hasNeighborsInV = true; } } if (!hasNeighborsInV) { independentSet[i] = 1; } } } int main() { srand(1); // Allocate host memory for matrices A, B and C size_t adjacencyMatrix_mem_size = sizeof(int) * NUM_VERTICES * NUM_VERTICES; int *adjacencyMatrix = (int *) malloc(adjacencyMatrix_mem_size); int *independentSet = (int *) malloc(NUM_VERTICES * sizeof(int)); if (adjacencyMatrix == NULL || independentSet == NULL) { fprintf(stderr, "Failed to allocate host matrices!\n"); exit(EXIT_FAILURE); } //randomGraph(adjacencyMatrix); randomGraph(adjacencyMatrix); if (SEQUENTIAL) { lfIndependentSet(adjacencyMatrix, independentSet); } else { printf("Computing result using CUDA Kernel...\n"); float randoms[PSEUDO_RANDOM_SIZE]; fillPseudoRandoms(randoms); // Allocate device memory int *d_adjacencyMatrix, *d_degrees, *d_independentSet, *d_marked, *d_removedNodes, *d_existsNonRemoved; float *d_randoms; /** Feel free to use other structures! **/ cudaCheck(hipMalloc((void **) &d_adjacencyMatrix, adjacencyMatrix_mem_size)); cudaCheck(hipMalloc((void **) &d_degrees, sizeof(int) * NUM_VERTICES)); cudaCheck(hipMalloc((void **) &d_independentSet, sizeof(int) * NUM_VERTICES)); cudaCheck(hipMalloc((void **) &d_marked, sizeof(int) * NUM_VERTICES)); cudaCheck(hipMalloc((void **) &d_removedNodes, sizeof(int) * NUM_VERTICES)); cudaCheck(hipMalloc((void **) &d_existsNonRemoved, sizeof(int))); cudaCheck(hipMalloc((void **) &d_randoms, sizeof(float) * PSEUDO_RANDOM_SIZE)); // copy host memory to device cudaCheck(hipMemcpy(d_adjacencyMatrix, adjacencyMatrix, adjacencyMatrix_mem_size, hipMemcpyHostToDevice)); cudaCheck(hipMemcpy(d_randoms, &randoms, sizeof(float) * PSEUDO_RANDOM_SIZE, hipMemcpyHostToDevice)); /* Run your kernel(s) here */ // Copy result from device to host cudaCheck(hipMemcpy(independentSet, d_independentSet, NUM_VERTICES * sizeof(int), hipMemcpyDeviceToHost)); hipFree(d_adjacencyMatrix); hipFree(d_degrees); hipFree(d_marked); hipFree(d_independentSet); hipFree(d_removedNodes); } printf("Checking computed result for correctness: "); if (PRINT_EDGES) printEdges(adjacencyMatrix); printArray(independentSet); bool correct = verifyMaximalIndependentSet(adjacencyMatrix, independentSet); printf("%s\n", correct ? "Result = PASS" : "Result = FAIL"); // Clean up memory free(adjacencyMatrix); free(independentSet); if (correct) { return EXIT_SUCCESS; } else { return EXIT_FAILURE; } }
22472208a1e1cfd45f46c16b668bee90ce892a31.cu
/** * Copyright 1993-2015 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ // System includes #include <stdio.h> #include <assert.h> #include <stdlib.h> #define NUM_VERTICES 1024 #define SEQUENTIAL 0 #define POS(i, j) (((j) * NUM_VERTICES) + i) #define PRINT_EDGES 0 #define PSEUDO_RANDOM_SIZE 1024 #define BLOCK_SIZE 32 #define DEBUG 1 // CUDA API error checking macro static void handleError(cudaError_t err, const char *file, int line ) { if (err != cudaSuccess) { printf("%s in %s at line %d\n", cudaGetErrorString(err), file, line ); exit(EXIT_FAILURE); } } #define cudaCheck( err ) (handleError(err, __FILE__, __LINE__ )) int sampleBinomialDistribution(double succesProbability) { return (int)((double)rand() / (double)RAND_MAX < succesProbability); } void printArray(int arr[]) { printf("Array("); for (int i = 0; i < NUM_VERTICES; i++) { if (arr[i] > 0) { printf("%d:%d, ", i, arr[i]); } } printf(")\n"); } void printEdges(int *adjacencyMatrix) { printf("Edges: \n"); for (int i = 0; i < NUM_VERTICES; i++) { for (int j = 0; j < NUM_VERTICES; j++) { if (adjacencyMatrix[POS(i, j)] == 1) { printf("\t(%d, %d)\n", i, j); } } } } void star(int *adjacencyMatrix) { memset(adjacencyMatrix, 0, NUM_VERTICES * NUM_VERTICES); for (int j = 1; j < NUM_VERTICES; j++) { adjacencyMatrix[POS(0, j)] = 1; adjacencyMatrix[POS(j, 0)] = 1; } } void cycle(int *adjacencyMatrix) { memset(adjacencyMatrix, 0, NUM_VERTICES * NUM_VERTICES); for (int i = 0; i < NUM_VERTICES; i++) { adjacencyMatrix[POS(i, (i + 1) % NUM_VERTICES)] = 1; adjacencyMatrix[POS(i, (i + NUM_VERTICES - 1) % NUM_VERTICES)] = 1; } } // Skewed random graph void randomGraph(int *adjacencyMatrix) { memset(adjacencyMatrix, 0, NUM_VERTICES * NUM_VERTICES); int edge; for (int i = 0; i < NUM_VERTICES; i++) { for (int j = i + 1; j < NUM_VERTICES; j++) { double probability = (((double) i) + j) / (4 * NUM_VERTICES); edge = sampleBinomialDistribution(probability); adjacencyMatrix[POS(i, j)] = edge; adjacencyMatrix[POS(j, i)] = edge; } } } void fillPseudoRandoms(float pseudoRandoms[]) { for (int i = 0; i < PSEUDO_RANDOM_SIZE; i++) { pseudoRandoms[i] = 1. * (i % 3) / 3.; } } bool verifyMaximalIndependentSet(int *adjacencyMatrix, int* independentSet) { for (int i = 0; i < NUM_VERTICES; i++) { int neighborsInV = 0; for (int j = 0; j < NUM_VERTICES; j++) { if (i != j && adjacencyMatrix[POS(i, j)] == 1 && independentSet[j] == 1) { if (independentSet[i]) { printf("Set is not independent as it contains neighbors %d and %d", i, j); return false; } neighborsInV++; } } if (independentSet[i] == 0 && neighborsInV == 0) { printf("Set is not maximal as %d (not in set) has no neighbors in set\n", i); return false; } } return true; } void lfIndependentSet(int *adjacencyMatrix, int* independentSet) { memset(independentSet, 0, NUM_VERTICES); for (int i = 0; i < NUM_VERTICES; i++) { bool hasNeighborsInV = false; for (int j = 0; j < i; j++) { if (adjacencyMatrix[POS(i, j)] == 1 && independentSet[j] == 1) { hasNeighborsInV = true; } } if (!hasNeighborsInV) { independentSet[i] = 1; } } } int main() { srand(1); // Allocate host memory for matrices A, B and C size_t adjacencyMatrix_mem_size = sizeof(int) * NUM_VERTICES * NUM_VERTICES; int *adjacencyMatrix = (int *) malloc(adjacencyMatrix_mem_size); int *independentSet = (int *) malloc(NUM_VERTICES * sizeof(int)); if (adjacencyMatrix == NULL || independentSet == NULL) { fprintf(stderr, "Failed to allocate host matrices!\n"); exit(EXIT_FAILURE); } //randomGraph(adjacencyMatrix); randomGraph(adjacencyMatrix); if (SEQUENTIAL) { lfIndependentSet(adjacencyMatrix, independentSet); } else { printf("Computing result using CUDA Kernel...\n"); float randoms[PSEUDO_RANDOM_SIZE]; fillPseudoRandoms(randoms); // Allocate device memory int *d_adjacencyMatrix, *d_degrees, *d_independentSet, *d_marked, *d_removedNodes, *d_existsNonRemoved; float *d_randoms; /** Feel free to use other structures! **/ cudaCheck(cudaMalloc((void **) &d_adjacencyMatrix, adjacencyMatrix_mem_size)); cudaCheck(cudaMalloc((void **) &d_degrees, sizeof(int) * NUM_VERTICES)); cudaCheck(cudaMalloc((void **) &d_independentSet, sizeof(int) * NUM_VERTICES)); cudaCheck(cudaMalloc((void **) &d_marked, sizeof(int) * NUM_VERTICES)); cudaCheck(cudaMalloc((void **) &d_removedNodes, sizeof(int) * NUM_VERTICES)); cudaCheck(cudaMalloc((void **) &d_existsNonRemoved, sizeof(int))); cudaCheck(cudaMalloc((void **) &d_randoms, sizeof(float) * PSEUDO_RANDOM_SIZE)); // copy host memory to device cudaCheck(cudaMemcpy(d_adjacencyMatrix, adjacencyMatrix, adjacencyMatrix_mem_size, cudaMemcpyHostToDevice)); cudaCheck(cudaMemcpy(d_randoms, &randoms, sizeof(float) * PSEUDO_RANDOM_SIZE, cudaMemcpyHostToDevice)); /* Run your kernel(s) here */ // Copy result from device to host cudaCheck(cudaMemcpy(independentSet, d_independentSet, NUM_VERTICES * sizeof(int), cudaMemcpyDeviceToHost)); cudaFree(d_adjacencyMatrix); cudaFree(d_degrees); cudaFree(d_marked); cudaFree(d_independentSet); cudaFree(d_removedNodes); } printf("Checking computed result for correctness: "); if (PRINT_EDGES) printEdges(adjacencyMatrix); printArray(independentSet); bool correct = verifyMaximalIndependentSet(adjacencyMatrix, independentSet); printf("%s\n", correct ? "Result = PASS" : "Result = FAIL"); // Clean up memory free(adjacencyMatrix); free(independentSet); if (correct) { return EXIT_SUCCESS; } else { return EXIT_FAILURE; } }
97b1d2683b9d7adb5cf3d1d10b99a066fd2bccb1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author [email protected] // #include <ops/declarable/helpers/image_suppression.h> #include <array/NDArrayFactory.h> #include <legacy/NativeOps.h> #include <exceptions/cuda_exception.h> #include <queue> namespace sd { namespace ops { namespace helpers { //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // needToSuppressWithThreshold - predicate for suppression // boxes - boxes tensor buffer // boxesShape boxes tensor shape // previousIndex - index for current pos value // nextIndex - index for neighbor pos value // threshold - threashold value to suppress // // return value: true, if threshold is overcome, false otherwise // template <typename T> static __device__ bool needToSuppressWithThreshold(T* boxes, Nd4jLong const* boxesShape, int previousIndex, int nextIndex, T threshold) { Nd4jLong previous0[] = {previousIndex, 0}; Nd4jLong previous1[] = {previousIndex, 1}; Nd4jLong previous2[] = {previousIndex, 2}; Nd4jLong previous3[] = {previousIndex, 3}; Nd4jLong next0[] = {nextIndex, 0}; Nd4jLong next1[] = {nextIndex, 1}; Nd4jLong next2[] = {nextIndex, 2}; Nd4jLong next3[] = {nextIndex, 3}; // we have rectangle with given max values. Compute vexes of rectangle first T minYPrev = sd::math::nd4j_min(boxes[shape::getOffset(boxesShape, previous0)], boxes[shape::getOffset(boxesShape, previous2)]); T minXPrev = sd::math::nd4j_min(boxes[shape::getOffset(boxesShape, previous1)], boxes[shape::getOffset(boxesShape, previous3)]); T maxYPrev = sd::math::nd4j_max(boxes[shape::getOffset(boxesShape, previous0)], boxes[shape::getOffset(boxesShape, previous2)]); T maxXPrev = sd::math::nd4j_max(boxes[shape::getOffset(boxesShape, previous1)], boxes[shape::getOffset(boxesShape, previous3)]); T minYNext = sd::math::nd4j_min(boxes[shape::getOffset(boxesShape, next0)], boxes[shape::getOffset(boxesShape, next2)]); T minXNext = sd::math::nd4j_min(boxes[shape::getOffset(boxesShape, next1)], boxes[shape::getOffset(boxesShape, next3)]); T maxYNext = sd::math::nd4j_max(boxes[shape::getOffset(boxesShape, next0)], boxes[shape::getOffset(boxesShape, next2)]); T maxXNext = sd::math::nd4j_max(boxes[shape::getOffset(boxesShape, next1)], boxes[shape::getOffset(boxesShape, next3)]); // compute areas for comparation T areaPrev = (maxYPrev - minYPrev) * (maxXPrev - minXPrev); T areaNext = (maxYNext - minYNext) * (maxXNext - minXNext); // of course, areas should be positive if (areaNext <= T(0.f) || areaPrev <= T(0.f)) return false; // compute intersection of rectangles T minIntersectionY = sd::math::nd4j_max(minYPrev, minYNext); T minIntersectionX = sd::math::nd4j_max(minXPrev, minXNext); T maxIntersectionY = sd::math::nd4j_min(maxYPrev, maxYNext); T maxIntersectionX = sd::math::nd4j_min(maxXPrev, maxXNext); T intersectionArea = sd::math::nd4j_max(T(maxIntersectionY - minIntersectionY), T(0.0f)) * sd::math::nd4j_max(T(maxIntersectionX - minIntersectionX), T(0.0f)); T intersectionValue = intersectionArea / (areaPrev + areaNext - intersectionArea); // final check return intersectionValue > threshold; } template <typename T> static __device__ T similirityV3(T* boxes, Nd4jLong const* boxesShape, int previousIndex, int nextIndex) { Nd4jLong previous0[] = {previousIndex, 0}; Nd4jLong previous1[] = {previousIndex, 1}; Nd4jLong previous2[] = {previousIndex, 2}; Nd4jLong previous3[] = {previousIndex, 3}; Nd4jLong next0[] = {nextIndex, 0}; Nd4jLong next1[] = {nextIndex, 1}; Nd4jLong next2[] = {nextIndex, 2}; Nd4jLong next3[] = {nextIndex, 3}; // we have rectangle with given max values. Compute vexes of rectangle first T minYPrev = sd::math::nd4j_min(boxes[shape::getOffset(boxesShape, previous0)], boxes[shape::getOffset(boxesShape, previous2)]); T minXPrev = sd::math::nd4j_min(boxes[shape::getOffset(boxesShape, previous1)], boxes[shape::getOffset(boxesShape, previous3)]); T maxYPrev = sd::math::nd4j_max(boxes[shape::getOffset(boxesShape, previous0)], boxes[shape::getOffset(boxesShape, previous2)]); T maxXPrev = sd::math::nd4j_max(boxes[shape::getOffset(boxesShape, previous1)], boxes[shape::getOffset(boxesShape, previous3)]); T minYNext = sd::math::nd4j_min(boxes[shape::getOffset(boxesShape, next0)], boxes[shape::getOffset(boxesShape, next2)]); T minXNext = sd::math::nd4j_min(boxes[shape::getOffset(boxesShape, next1)], boxes[shape::getOffset(boxesShape, next3)]); T maxYNext = sd::math::nd4j_max(boxes[shape::getOffset(boxesShape, next0)], boxes[shape::getOffset(boxesShape, next2)]); T maxXNext = sd::math::nd4j_max(boxes[shape::getOffset(boxesShape, next1)], boxes[shape::getOffset(boxesShape, next3)]); // compute areas for comparation T areaPrev = (maxYPrev - minYPrev) * (maxXPrev - minXPrev); T areaNext = (maxYNext - minYNext) * (maxXNext - minXNext); // of course, areas should be positive if (areaNext <= T(0.f) || areaPrev <= T(0.f)) return false; // compute intersection of rectangles T minIntersectionY = sd::math::nd4j_max(minYPrev, minYNext); T minIntersectionX = sd::math::nd4j_max(minXPrev, minXNext); T maxIntersectionY = sd::math::nd4j_min(maxYPrev, maxYNext); T maxIntersectionX = sd::math::nd4j_min(maxXPrev, maxXNext); T intersectionArea = sd::math::nd4j_max(T(maxIntersectionY - minIntersectionY), T(0.0f)) * sd::math::nd4j_max(T(maxIntersectionX - minIntersectionX), T(0.0f)); T intersectionValue = intersectionArea / (areaPrev + areaNext - intersectionArea); // final check return intersectionValue; } //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // shouldSelectKernel - compute status for all selected rectangles (boxes) // // we compute boolean flag as shared uint32 and return it on final only for the first thread // template <typename T, typename I> static __global__ void shouldSelectKernel(T* boxesBuf, Nd4jLong const* boxesShape, I* indexBuf, I* selectedIndicesData, double threshold, int numSelected, int i, bool* shouldSelect) { auto tid = blockIdx.x * blockDim.x + threadIdx.x; auto step = gridDim.x * blockDim.x; __shared__ unsigned int shouldSelectShared; if (threadIdx.x == 0) { shouldSelectShared = (unsigned int)shouldSelect[0]; } __syncthreads(); for (int j = numSelected - 1 - tid; j >= 0; j -= step) { if (shouldSelectShared) { if (needToSuppressWithThreshold(boxesBuf, boxesShape, indexBuf[i], indexBuf[selectedIndicesData[j]], T(threshold))) atomicCAS(&shouldSelectShared, 1, 0); // exchange only when need to suppress } } __syncthreads(); // final move: collect result if (threadIdx.x == 0) { *shouldSelect = shouldSelectShared > 0; } } //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // indices - type depended, indicesLong - type defined (only 64bit integers) // template <typename I> static __global__ void copyIndices(void* indices, void* indicesLong, Nd4jLong len) { I* indexBuf = reinterpret_cast<I*>(indices); Nd4jLong* srcBuf = reinterpret_cast<Nd4jLong*>(indicesLong);; auto tid = threadIdx.x + blockIdx.x * blockDim.x; auto step = blockDim.x * gridDim.x; for (auto i = tid; i < len; i += step) indexBuf[i] = (I)srcBuf[i]; } template <typename T, typename I> static __global__ void suppressScores(T* scores, I* indices, Nd4jLong length, T scoreThreshold) { auto start = blockIdx.x * blockDim.x; auto step = gridDim.x * blockDim.x; for (auto e = start + threadIdx.x; e < (int)length; e += step) { if (scores[e] < scoreThreshold) { scores[e] = scoreThreshold; indices[e] = -1; } else { indices[e] = I(e); } } } //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // nonMaxSuppressionV2 algorithm - given from TF NonMaxSuppressionV2 implementation // template <typename T, typename I> static void nonMaxSuppressionV2_(sd::LaunchContext* context, NDArray* boxes, NDArray* scales, int maxSize, double threshold, double scoreThreshold, NDArray* output) { auto stream = context->getCudaStream(); NDArray::prepareSpecialUse({output}, {boxes, scales}); std::unique_ptr<NDArray> indices(NDArrayFactory::create_<I>('c', {scales->lengthOf()}, context)); // - 1, scales->lengthOf()); //, scales->getContext()); NDArray scores(*scales); Nd4jPointer extras[2] = {nullptr, stream}; auto indexBuf = indices->dataBuffer()->specialAsT<I>();///reinterpret_cast<I*>(indices->specialBuffer()); auto scoreBuf = scores.dataBuffer()->specialAsT<T>(); hipLaunchKernelGGL(( suppressScores<T,I>), dim3(128), dim3(128), 128, *stream, scoreBuf, indexBuf, scores.lengthOf(), T(scoreThreshold)); indices->tickWriteDevice(); sortByValue(extras, indices->buffer(), indices->shapeInfo(), indices->specialBuffer(), indices->specialShapeInfo(), scores.buffer(), scores.shapeInfo(), scores.specialBuffer(), scores.specialShapeInfo(), true); indices->tickWriteDevice(); NDArray selectedIndices = NDArrayFactory::create<I>('c', {output->lengthOf()}, context); int numSelected = 0; int numBoxes = boxes->sizeAt(0); auto boxesBuf = reinterpret_cast<T*>(boxes->specialBuffer()); auto selectedIndicesData = reinterpret_cast<I*>(selectedIndices.specialBuffer()); auto outputBuf = reinterpret_cast<I*>(output->specialBuffer()); bool* shouldSelectD; auto err = hipMalloc(&shouldSelectD, sizeof(bool)); if (err) { throw cuda_exception::build("helpers::nonMaxSuppressionV2: Cannot allocate memory for bool flag", err); } for (I i = 0; i < boxes->sizeAt(0); ++i) { bool shouldSelect = numSelected < output->lengthOf(); if (shouldSelect) { err = hipMemcpy(shouldSelectD, &shouldSelect, sizeof(bool), hipMemcpyHostToDevice); if (err) { throw cuda_exception::build("helpers::nonMaxSuppressionV2: Cannot set up bool flag to device", err); } hipLaunchKernelGGL(( shouldSelectKernel<T,I>), dim3(128), dim3(256), 1024, *stream, boxesBuf, boxes->specialShapeInfo(), indexBuf, selectedIndicesData, threshold, numSelected, i, shouldSelectD); err = hipMemcpy(&shouldSelect, shouldSelectD, sizeof(bool), hipMemcpyDeviceToHost); if (err) { throw cuda_exception::build("helpers::nonMaxSuppressionV2: Cannot set up bool flag to host", err); } } if (shouldSelect) { hipMemcpy(reinterpret_cast<I*>(output->specialBuffer()) + numSelected, indexBuf + i, sizeof(I), hipMemcpyDeviceToDevice); hipMemcpy(selectedIndicesData + numSelected, &i, sizeof(I), hipMemcpyHostToDevice); numSelected++; } } err = hipFree(shouldSelectD); if (err) { throw cuda_exception::build("helpers::nonMaxSuppressionV2: Cannot deallocate memory for bool flag", err); } } //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template <typename T, typename I> static __device__ bool checkOverlapBoxes(T* boxes, Nd4jLong const* shape, T* scores, I* indices, I* selectedIndices, I* startIndices, I selectedSize, I nextCandidateIndex, T overlapThreshold, T scoreThreshold, bool simple) { bool shouldHardSuppress = false; T& nextCandidateScore = scores[nextCandidateIndex]; I selectedIndex = indices[nextCandidateIndex]; I finish = startIndices[nextCandidateIndex]; for (int j = selectedSize; j > finish; --j) { T boxVal; if (simple) { Nd4jLong xPos[] = {selectedIndex, selectedIndices[j - 1]}; auto xShift = shape::getOffset(shape, xPos, 0); boxVal = boxes[xShift]; } else { boxVal = similirityV3(boxes, shape, selectedIndex, selectedIndices[j - 1]); } if (boxVal > static_cast<T>(overlapThreshold)) nextCandidateScore = static_cast<T>(0.f); // First decide whether to perform hard suppression if (boxVal >= overlapThreshold) { shouldHardSuppress = true; break; } // If nextCandidate survives hard suppression, apply soft suppression if (nextCandidateScore <= static_cast<T>(scoreThreshold)) break; } return shouldHardSuppress; } //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template <typename T, typename I> static __global__ void suppressNonMaxOverlapKernel(T* boxes, Nd4jLong const* boxesShape, T* scoresData, I* indices, I* startIndices, Nd4jLong length, I maxOutputLen, T overlapThreshold, T scoreThreshold, I* output, Nd4jLong const* outputShape, I* outputLength, bool simple) { __shared__ I selectedSize; __shared__ I* tempOutput; if (threadIdx.x == 0) { selectedSize = outputLength?*outputLength:maxOutputLen; extern __shared__ unsigned char shmem[]; tempOutput = (I*)shmem; } __syncthreads(); auto start = blockIdx.x * blockDim.x; auto step = blockDim.x * gridDim.x; for (I nextCandidateIndex = start + threadIdx.x; selectedSize < maxOutputLen && nextCandidateIndex < (I)length; ) { auto originalScore = scoresData[nextCandidateIndex];//nextCandidate._score; I nextCandidateBoxIndex = indices[nextCandidateIndex]; auto selectedSizeMark = selectedSize; // skip for cases when index is less than 0 (under score threshold) if (nextCandidateBoxIndex < 0) { nextCandidateIndex += step; continue; } // check for overlaps bool shouldHardSuppress = checkOverlapBoxes(boxes, boxesShape, scoresData, indices, tempOutput, startIndices, selectedSize, nextCandidateIndex, overlapThreshold, scoreThreshold, simple);//false; T nextCandidateScore = scoresData[nextCandidateIndex]; startIndices[nextCandidateIndex] = selectedSize; if (!shouldHardSuppress) { if (nextCandidateScore == originalScore) { // Suppression has not occurred, so select nextCandidate if (output) output[selectedSize] = nextCandidateBoxIndex; tempOutput[selectedSize] = nextCandidateBoxIndex; math::atomics::nd4j_atomicAdd(&selectedSize, (I)1); } if (nextCandidateScore > scoreThreshold) { // Soft suppression has occurred and current score is still greater than // scoreThreshold; add nextCandidate back onto priority queue. continue; // in some cases, this index not 0 } } nextCandidateIndex += step; } if (threadIdx.x == 0) { if (outputLength) *outputLength = selectedSize; } } //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template <typename T, typename I> static Nd4jLong nonMaxSuppressionGeneric_(sd::LaunchContext* context, NDArray* boxes, NDArray* scores, int outputSize, double overlapThreshold, double scoreThreshold, NDArray* output, bool simple) { auto stream = context->getCudaStream(); if (output) NDArray::prepareSpecialUse({output}, {boxes, scores}); else { if (!boxes->isActualOnDeviceSide()) boxes->syncToDevice(); if (!scores->isActualOnDeviceSide()) scores->syncToDevice(); } NDArray indices = NDArrayFactory::create<I>('c', {scores->lengthOf()}, context); // - 1, scales->lengthOf()); //, scales->getContext()); NDArray startPositions = NDArrayFactory::create<I>('c', {scores->lengthOf()}, context); NDArray selectedScores(*scores); Nd4jPointer extras[2] = {nullptr, stream}; auto indexBuf = indices.dataBuffer()->specialAsT<I>();///reinterpret_cast<I*>(indices->specialBuffer()); hipLaunchKernelGGL(( suppressScores), dim3(128), dim3(128), 128, *stream, selectedScores.dataBuffer()->specialAsT<T>(), indexBuf, selectedScores.lengthOf(), T(scoreThreshold)); sortByValue(extras, indices.buffer(), indices.shapeInfo(), indices.specialBuffer(), indices.specialShapeInfo(), selectedScores.buffer(), selectedScores.shapeInfo(), selectedScores.specialBuffer(), selectedScores.specialShapeInfo(), true); indices.tickWriteDevice(); selectedScores.tickWriteDevice(); auto scoresData = selectedScores.dataBuffer()->specialAsT<T>();//, numBoxes, scoresData.begin()); auto startIndices = startPositions.dataBuffer()->specialAsT<I>(); I selectedSize = 0; Nd4jLong res = 0; if (output) { // this part used when output shape already calculated to fill up values on output DataBuffer selectedSizeBuf(&selectedSize, sizeof(I), DataTypeUtils::fromT<I>()); hipLaunchKernelGGL(( suppressNonMaxOverlapKernel), dim3(1), dim3(1), 1024, *stream , boxes->dataBuffer()->specialAsT<T>(), boxes->specialShapeInfo(), scoresData, indexBuf, startIndices, scores->lengthOf(), (I) outputSize, T(overlapThreshold), T(scoreThreshold), output->dataBuffer()->specialAsT<I>(), output->specialShapeInfo(), selectedSizeBuf.specialAsT<I>(), simple); } else { // this case used on calculation of output shape. Output and output shape shoulde be nullptr. DataBuffer selectedSizeBuf(&selectedSize, sizeof(I), DataTypeUtils::fromT<I>()); hipLaunchKernelGGL(( suppressNonMaxOverlapKernel), dim3(1), dim3(1), 1024, *stream , boxes->dataBuffer()->specialAsT<T>(), boxes->specialShapeInfo(), scoresData, indexBuf, startIndices, scores->lengthOf(), (I)outputSize, T(overlapThreshold), T(scoreThreshold), (I*)nullptr, (Nd4jLong*) nullptr, selectedSizeBuf.specialAsT<I>(), simple); selectedSizeBuf.syncToPrimary(context, true); res = *selectedSizeBuf.primaryAsT<I>(); } if (output) NDArray::registerSpecialUse({output}, {boxes, scores}); return res; } //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// void nonMaxSuppression(sd::LaunchContext * context, NDArray* boxes, NDArray* scales, int maxSize, double threshold, double scoreThreshold, NDArray* output) { BUILD_DOUBLE_SELECTOR(boxes->dataType(), output->dataType(), nonMaxSuppressionV2_, (context, boxes, scales, maxSize, threshold, scoreThreshold, output), FLOAT_TYPES, INDEXING_TYPES); } //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// Nd4jLong nonMaxSuppressionGeneric(sd::LaunchContext * context, NDArray* boxes, NDArray* scales, int maxSize, double threshold, double scoreThreshold, NDArray* output) { BUILD_DOUBLE_SELECTOR(boxes->dataType(), output ? output->dataType():DataType::INT32, return nonMaxSuppressionGeneric_, (context, boxes, scales, maxSize, threshold, scoreThreshold, output, true), FLOAT_TYPES, INDEXING_TYPES); return boxes->sizeAt(0); } Nd4jLong nonMaxSuppressionV3(sd::LaunchContext* context, NDArray* boxes, NDArray* scores, int maxSize, double overlapThreshold, double scoreThreshold, NDArray* output) { BUILD_DOUBLE_SELECTOR(boxes->dataType(), output ? output->dataType():DataType::INT32, return nonMaxSuppressionGeneric_, (context, boxes, scores, maxSize, overlapThreshold, scoreThreshold, output, false), FLOAT_TYPES, INDEXING_TYPES); return boxes->sizeAt(0); } } } }
97b1d2683b9d7adb5cf3d1d10b99a066fd2bccb1.cu
/******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author [email protected] // #include <ops/declarable/helpers/image_suppression.h> #include <array/NDArrayFactory.h> #include <legacy/NativeOps.h> #include <exceptions/cuda_exception.h> #include <queue> namespace sd { namespace ops { namespace helpers { //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // needToSuppressWithThreshold - predicate for suppression // boxes - boxes tensor buffer // boxesShape boxes tensor shape // previousIndex - index for current pos value // nextIndex - index for neighbor pos value // threshold - threashold value to suppress // // return value: true, if threshold is overcome, false otherwise // template <typename T> static __device__ bool needToSuppressWithThreshold(T* boxes, Nd4jLong const* boxesShape, int previousIndex, int nextIndex, T threshold) { Nd4jLong previous0[] = {previousIndex, 0}; Nd4jLong previous1[] = {previousIndex, 1}; Nd4jLong previous2[] = {previousIndex, 2}; Nd4jLong previous3[] = {previousIndex, 3}; Nd4jLong next0[] = {nextIndex, 0}; Nd4jLong next1[] = {nextIndex, 1}; Nd4jLong next2[] = {nextIndex, 2}; Nd4jLong next3[] = {nextIndex, 3}; // we have rectangle with given max values. Compute vexes of rectangle first T minYPrev = sd::math::nd4j_min(boxes[shape::getOffset(boxesShape, previous0)], boxes[shape::getOffset(boxesShape, previous2)]); T minXPrev = sd::math::nd4j_min(boxes[shape::getOffset(boxesShape, previous1)], boxes[shape::getOffset(boxesShape, previous3)]); T maxYPrev = sd::math::nd4j_max(boxes[shape::getOffset(boxesShape, previous0)], boxes[shape::getOffset(boxesShape, previous2)]); T maxXPrev = sd::math::nd4j_max(boxes[shape::getOffset(boxesShape, previous1)], boxes[shape::getOffset(boxesShape, previous3)]); T minYNext = sd::math::nd4j_min(boxes[shape::getOffset(boxesShape, next0)], boxes[shape::getOffset(boxesShape, next2)]); T minXNext = sd::math::nd4j_min(boxes[shape::getOffset(boxesShape, next1)], boxes[shape::getOffset(boxesShape, next3)]); T maxYNext = sd::math::nd4j_max(boxes[shape::getOffset(boxesShape, next0)], boxes[shape::getOffset(boxesShape, next2)]); T maxXNext = sd::math::nd4j_max(boxes[shape::getOffset(boxesShape, next1)], boxes[shape::getOffset(boxesShape, next3)]); // compute areas for comparation T areaPrev = (maxYPrev - minYPrev) * (maxXPrev - minXPrev); T areaNext = (maxYNext - minYNext) * (maxXNext - minXNext); // of course, areas should be positive if (areaNext <= T(0.f) || areaPrev <= T(0.f)) return false; // compute intersection of rectangles T minIntersectionY = sd::math::nd4j_max(minYPrev, minYNext); T minIntersectionX = sd::math::nd4j_max(minXPrev, minXNext); T maxIntersectionY = sd::math::nd4j_min(maxYPrev, maxYNext); T maxIntersectionX = sd::math::nd4j_min(maxXPrev, maxXNext); T intersectionArea = sd::math::nd4j_max(T(maxIntersectionY - minIntersectionY), T(0.0f)) * sd::math::nd4j_max(T(maxIntersectionX - minIntersectionX), T(0.0f)); T intersectionValue = intersectionArea / (areaPrev + areaNext - intersectionArea); // final check return intersectionValue > threshold; } template <typename T> static __device__ T similirityV3(T* boxes, Nd4jLong const* boxesShape, int previousIndex, int nextIndex) { Nd4jLong previous0[] = {previousIndex, 0}; Nd4jLong previous1[] = {previousIndex, 1}; Nd4jLong previous2[] = {previousIndex, 2}; Nd4jLong previous3[] = {previousIndex, 3}; Nd4jLong next0[] = {nextIndex, 0}; Nd4jLong next1[] = {nextIndex, 1}; Nd4jLong next2[] = {nextIndex, 2}; Nd4jLong next3[] = {nextIndex, 3}; // we have rectangle with given max values. Compute vexes of rectangle first T minYPrev = sd::math::nd4j_min(boxes[shape::getOffset(boxesShape, previous0)], boxes[shape::getOffset(boxesShape, previous2)]); T minXPrev = sd::math::nd4j_min(boxes[shape::getOffset(boxesShape, previous1)], boxes[shape::getOffset(boxesShape, previous3)]); T maxYPrev = sd::math::nd4j_max(boxes[shape::getOffset(boxesShape, previous0)], boxes[shape::getOffset(boxesShape, previous2)]); T maxXPrev = sd::math::nd4j_max(boxes[shape::getOffset(boxesShape, previous1)], boxes[shape::getOffset(boxesShape, previous3)]); T minYNext = sd::math::nd4j_min(boxes[shape::getOffset(boxesShape, next0)], boxes[shape::getOffset(boxesShape, next2)]); T minXNext = sd::math::nd4j_min(boxes[shape::getOffset(boxesShape, next1)], boxes[shape::getOffset(boxesShape, next3)]); T maxYNext = sd::math::nd4j_max(boxes[shape::getOffset(boxesShape, next0)], boxes[shape::getOffset(boxesShape, next2)]); T maxXNext = sd::math::nd4j_max(boxes[shape::getOffset(boxesShape, next1)], boxes[shape::getOffset(boxesShape, next3)]); // compute areas for comparation T areaPrev = (maxYPrev - minYPrev) * (maxXPrev - minXPrev); T areaNext = (maxYNext - minYNext) * (maxXNext - minXNext); // of course, areas should be positive if (areaNext <= T(0.f) || areaPrev <= T(0.f)) return false; // compute intersection of rectangles T minIntersectionY = sd::math::nd4j_max(minYPrev, minYNext); T minIntersectionX = sd::math::nd4j_max(minXPrev, minXNext); T maxIntersectionY = sd::math::nd4j_min(maxYPrev, maxYNext); T maxIntersectionX = sd::math::nd4j_min(maxXPrev, maxXNext); T intersectionArea = sd::math::nd4j_max(T(maxIntersectionY - minIntersectionY), T(0.0f)) * sd::math::nd4j_max(T(maxIntersectionX - minIntersectionX), T(0.0f)); T intersectionValue = intersectionArea / (areaPrev + areaNext - intersectionArea); // final check return intersectionValue; } //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // shouldSelectKernel - compute status for all selected rectangles (boxes) // // we compute boolean flag as shared uint32 and return it on final only for the first thread // template <typename T, typename I> static __global__ void shouldSelectKernel(T* boxesBuf, Nd4jLong const* boxesShape, I* indexBuf, I* selectedIndicesData, double threshold, int numSelected, int i, bool* shouldSelect) { auto tid = blockIdx.x * blockDim.x + threadIdx.x; auto step = gridDim.x * blockDim.x; __shared__ unsigned int shouldSelectShared; if (threadIdx.x == 0) { shouldSelectShared = (unsigned int)shouldSelect[0]; } __syncthreads(); for (int j = numSelected - 1 - tid; j >= 0; j -= step) { if (shouldSelectShared) { if (needToSuppressWithThreshold(boxesBuf, boxesShape, indexBuf[i], indexBuf[selectedIndicesData[j]], T(threshold))) atomicCAS(&shouldSelectShared, 1, 0); // exchange only when need to suppress } } __syncthreads(); // final move: collect result if (threadIdx.x == 0) { *shouldSelect = shouldSelectShared > 0; } } //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // indices - type depended, indicesLong - type defined (only 64bit integers) // template <typename I> static __global__ void copyIndices(void* indices, void* indicesLong, Nd4jLong len) { I* indexBuf = reinterpret_cast<I*>(indices); Nd4jLong* srcBuf = reinterpret_cast<Nd4jLong*>(indicesLong);; auto tid = threadIdx.x + blockIdx.x * blockDim.x; auto step = blockDim.x * gridDim.x; for (auto i = tid; i < len; i += step) indexBuf[i] = (I)srcBuf[i]; } template <typename T, typename I> static __global__ void suppressScores(T* scores, I* indices, Nd4jLong length, T scoreThreshold) { auto start = blockIdx.x * blockDim.x; auto step = gridDim.x * blockDim.x; for (auto e = start + threadIdx.x; e < (int)length; e += step) { if (scores[e] < scoreThreshold) { scores[e] = scoreThreshold; indices[e] = -1; } else { indices[e] = I(e); } } } //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // nonMaxSuppressionV2 algorithm - given from TF NonMaxSuppressionV2 implementation // template <typename T, typename I> static void nonMaxSuppressionV2_(sd::LaunchContext* context, NDArray* boxes, NDArray* scales, int maxSize, double threshold, double scoreThreshold, NDArray* output) { auto stream = context->getCudaStream(); NDArray::prepareSpecialUse({output}, {boxes, scales}); std::unique_ptr<NDArray> indices(NDArrayFactory::create_<I>('c', {scales->lengthOf()}, context)); // - 1, scales->lengthOf()); //, scales->getContext()); NDArray scores(*scales); Nd4jPointer extras[2] = {nullptr, stream}; auto indexBuf = indices->dataBuffer()->specialAsT<I>();///reinterpret_cast<I*>(indices->specialBuffer()); auto scoreBuf = scores.dataBuffer()->specialAsT<T>(); suppressScores<T,I><<<128, 128, 128, *stream>>>(scoreBuf, indexBuf, scores.lengthOf(), T(scoreThreshold)); indices->tickWriteDevice(); sortByValue(extras, indices->buffer(), indices->shapeInfo(), indices->specialBuffer(), indices->specialShapeInfo(), scores.buffer(), scores.shapeInfo(), scores.specialBuffer(), scores.specialShapeInfo(), true); indices->tickWriteDevice(); NDArray selectedIndices = NDArrayFactory::create<I>('c', {output->lengthOf()}, context); int numSelected = 0; int numBoxes = boxes->sizeAt(0); auto boxesBuf = reinterpret_cast<T*>(boxes->specialBuffer()); auto selectedIndicesData = reinterpret_cast<I*>(selectedIndices.specialBuffer()); auto outputBuf = reinterpret_cast<I*>(output->specialBuffer()); bool* shouldSelectD; auto err = cudaMalloc(&shouldSelectD, sizeof(bool)); if (err) { throw cuda_exception::build("helpers::nonMaxSuppressionV2: Cannot allocate memory for bool flag", err); } for (I i = 0; i < boxes->sizeAt(0); ++i) { bool shouldSelect = numSelected < output->lengthOf(); if (shouldSelect) { err = cudaMemcpy(shouldSelectD, &shouldSelect, sizeof(bool), cudaMemcpyHostToDevice); if (err) { throw cuda_exception::build("helpers::nonMaxSuppressionV2: Cannot set up bool flag to device", err); } shouldSelectKernel<T,I><<<128, 256, 1024, *stream>>>(boxesBuf, boxes->specialShapeInfo(), indexBuf, selectedIndicesData, threshold, numSelected, i, shouldSelectD); err = cudaMemcpy(&shouldSelect, shouldSelectD, sizeof(bool), cudaMemcpyDeviceToHost); if (err) { throw cuda_exception::build("helpers::nonMaxSuppressionV2: Cannot set up bool flag to host", err); } } if (shouldSelect) { cudaMemcpy(reinterpret_cast<I*>(output->specialBuffer()) + numSelected, indexBuf + i, sizeof(I), cudaMemcpyDeviceToDevice); cudaMemcpy(selectedIndicesData + numSelected, &i, sizeof(I), cudaMemcpyHostToDevice); numSelected++; } } err = cudaFree(shouldSelectD); if (err) { throw cuda_exception::build("helpers::nonMaxSuppressionV2: Cannot deallocate memory for bool flag", err); } } //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template <typename T, typename I> static __device__ bool checkOverlapBoxes(T* boxes, Nd4jLong const* shape, T* scores, I* indices, I* selectedIndices, I* startIndices, I selectedSize, I nextCandidateIndex, T overlapThreshold, T scoreThreshold, bool simple) { bool shouldHardSuppress = false; T& nextCandidateScore = scores[nextCandidateIndex]; I selectedIndex = indices[nextCandidateIndex]; I finish = startIndices[nextCandidateIndex]; for (int j = selectedSize; j > finish; --j) { T boxVal; if (simple) { Nd4jLong xPos[] = {selectedIndex, selectedIndices[j - 1]}; auto xShift = shape::getOffset(shape, xPos, 0); boxVal = boxes[xShift]; } else { boxVal = similirityV3(boxes, shape, selectedIndex, selectedIndices[j - 1]); } if (boxVal > static_cast<T>(overlapThreshold)) nextCandidateScore = static_cast<T>(0.f); // First decide whether to perform hard suppression if (boxVal >= overlapThreshold) { shouldHardSuppress = true; break; } // If nextCandidate survives hard suppression, apply soft suppression if (nextCandidateScore <= static_cast<T>(scoreThreshold)) break; } return shouldHardSuppress; } //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template <typename T, typename I> static __global__ void suppressNonMaxOverlapKernel(T* boxes, Nd4jLong const* boxesShape, T* scoresData, I* indices, I* startIndices, Nd4jLong length, I maxOutputLen, T overlapThreshold, T scoreThreshold, I* output, Nd4jLong const* outputShape, I* outputLength, bool simple) { __shared__ I selectedSize; __shared__ I* tempOutput; if (threadIdx.x == 0) { selectedSize = outputLength?*outputLength:maxOutputLen; extern __shared__ unsigned char shmem[]; tempOutput = (I*)shmem; } __syncthreads(); auto start = blockIdx.x * blockDim.x; auto step = blockDim.x * gridDim.x; for (I nextCandidateIndex = start + threadIdx.x; selectedSize < maxOutputLen && nextCandidateIndex < (I)length; ) { auto originalScore = scoresData[nextCandidateIndex];//nextCandidate._score; I nextCandidateBoxIndex = indices[nextCandidateIndex]; auto selectedSizeMark = selectedSize; // skip for cases when index is less than 0 (under score threshold) if (nextCandidateBoxIndex < 0) { nextCandidateIndex += step; continue; } // check for overlaps bool shouldHardSuppress = checkOverlapBoxes(boxes, boxesShape, scoresData, indices, tempOutput, startIndices, selectedSize, nextCandidateIndex, overlapThreshold, scoreThreshold, simple);//false; T nextCandidateScore = scoresData[nextCandidateIndex]; startIndices[nextCandidateIndex] = selectedSize; if (!shouldHardSuppress) { if (nextCandidateScore == originalScore) { // Suppression has not occurred, so select nextCandidate if (output) output[selectedSize] = nextCandidateBoxIndex; tempOutput[selectedSize] = nextCandidateBoxIndex; math::atomics::nd4j_atomicAdd(&selectedSize, (I)1); } if (nextCandidateScore > scoreThreshold) { // Soft suppression has occurred and current score is still greater than // scoreThreshold; add nextCandidate back onto priority queue. continue; // in some cases, this index not 0 } } nextCandidateIndex += step; } if (threadIdx.x == 0) { if (outputLength) *outputLength = selectedSize; } } //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template <typename T, typename I> static Nd4jLong nonMaxSuppressionGeneric_(sd::LaunchContext* context, NDArray* boxes, NDArray* scores, int outputSize, double overlapThreshold, double scoreThreshold, NDArray* output, bool simple) { auto stream = context->getCudaStream(); if (output) NDArray::prepareSpecialUse({output}, {boxes, scores}); else { if (!boxes->isActualOnDeviceSide()) boxes->syncToDevice(); if (!scores->isActualOnDeviceSide()) scores->syncToDevice(); } NDArray indices = NDArrayFactory::create<I>('c', {scores->lengthOf()}, context); // - 1, scales->lengthOf()); //, scales->getContext()); NDArray startPositions = NDArrayFactory::create<I>('c', {scores->lengthOf()}, context); NDArray selectedScores(*scores); Nd4jPointer extras[2] = {nullptr, stream}; auto indexBuf = indices.dataBuffer()->specialAsT<I>();///reinterpret_cast<I*>(indices->specialBuffer()); suppressScores<<<128, 128, 128, *stream>>>(selectedScores.dataBuffer()->specialAsT<T>(), indexBuf, selectedScores.lengthOf(), T(scoreThreshold)); sortByValue(extras, indices.buffer(), indices.shapeInfo(), indices.specialBuffer(), indices.specialShapeInfo(), selectedScores.buffer(), selectedScores.shapeInfo(), selectedScores.specialBuffer(), selectedScores.specialShapeInfo(), true); indices.tickWriteDevice(); selectedScores.tickWriteDevice(); auto scoresData = selectedScores.dataBuffer()->specialAsT<T>();//, numBoxes, scoresData.begin()); auto startIndices = startPositions.dataBuffer()->specialAsT<I>(); I selectedSize = 0; Nd4jLong res = 0; if (output) { // this part used when output shape already calculated to fill up values on output DataBuffer selectedSizeBuf(&selectedSize, sizeof(I), DataTypeUtils::fromT<I>()); suppressNonMaxOverlapKernel<<<1, 1, 1024, *stream >>> (boxes->dataBuffer()->specialAsT<T>(), boxes->specialShapeInfo(), scoresData, indexBuf, startIndices, scores->lengthOf(), (I) outputSize, T(overlapThreshold), T(scoreThreshold), output->dataBuffer()->specialAsT<I>(), output->specialShapeInfo(), selectedSizeBuf.specialAsT<I>(), simple); } else { // this case used on calculation of output shape. Output and output shape shoulde be nullptr. DataBuffer selectedSizeBuf(&selectedSize, sizeof(I), DataTypeUtils::fromT<I>()); suppressNonMaxOverlapKernel<<<1, 1, 1024, *stream >>> (boxes->dataBuffer()->specialAsT<T>(), boxes->specialShapeInfo(), scoresData, indexBuf, startIndices, scores->lengthOf(), (I)outputSize, T(overlapThreshold), T(scoreThreshold), (I*)nullptr, (Nd4jLong*) nullptr, selectedSizeBuf.specialAsT<I>(), simple); selectedSizeBuf.syncToPrimary(context, true); res = *selectedSizeBuf.primaryAsT<I>(); } if (output) NDArray::registerSpecialUse({output}, {boxes, scores}); return res; } //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// void nonMaxSuppression(sd::LaunchContext * context, NDArray* boxes, NDArray* scales, int maxSize, double threshold, double scoreThreshold, NDArray* output) { BUILD_DOUBLE_SELECTOR(boxes->dataType(), output->dataType(), nonMaxSuppressionV2_, (context, boxes, scales, maxSize, threshold, scoreThreshold, output), FLOAT_TYPES, INDEXING_TYPES); } //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// Nd4jLong nonMaxSuppressionGeneric(sd::LaunchContext * context, NDArray* boxes, NDArray* scales, int maxSize, double threshold, double scoreThreshold, NDArray* output) { BUILD_DOUBLE_SELECTOR(boxes->dataType(), output ? output->dataType():DataType::INT32, return nonMaxSuppressionGeneric_, (context, boxes, scales, maxSize, threshold, scoreThreshold, output, true), FLOAT_TYPES, INDEXING_TYPES); return boxes->sizeAt(0); } Nd4jLong nonMaxSuppressionV3(sd::LaunchContext* context, NDArray* boxes, NDArray* scores, int maxSize, double overlapThreshold, double scoreThreshold, NDArray* output) { BUILD_DOUBLE_SELECTOR(boxes->dataType(), output ? output->dataType():DataType::INT32, return nonMaxSuppressionGeneric_, (context, boxes, scores, maxSize, overlapThreshold, scoreThreshold, output, false), FLOAT_TYPES, INDEXING_TYPES); return boxes->sizeAt(0); } } } }
c2dbe6f2c27bead31542fc1d6c499211739472ba.hip
// !!! This is a file automatically generated by hipify!!! /* 1-bit BMMA code. Runs at 500TOPS for matrix size of 4096x4096x8192. Borrows largely from CUDA-SDK. By Boyuan */ #include <assert.h> #include <hip/hip_runtime.h> #include <mma.h> #include <stdio.h> #include <helper_cuda.h> #include <helper_functions.h> #define CHUNK_K 4 #define SKEW 1 #define WARPS_PER_BLOCK 8 #define WARP_SIZE 32 #define THREADS_PER_BLOCK WARP_SIZE * WARPS_PER_BLOCK #define CHUNK_LINE_BYTES CHUNK_K * sizeof(int4) #define WARP_COPY_BYTES WARP_SIZE * sizeof(int4) #define CHUNK_COPY_LINES_PER_WARP WARP_COPY_BYTES / CHUNK_LINE_BYTES #define CHUNK_COPY_LINE_LANES WARP_SIZE / CHUNK_COPY_LINES_PER_WARP #define BLOCK_ROW_WARPS 2 #define BLOCK_COL_WARPS 4 #define WARP_ROW_TILES 4 #define WARP_COL_TILES 2 #define BLOCK_ROW_TILES WARP_ROW_TILES * BLOCK_ROW_WARPS #define BLOCK_COL_TILES WARP_COL_TILES * BLOCK_COL_WARPS #define M 8 #define N 8 #define K 128 #define checkKernelErrors(expr) \ do { \ expr; \ \ hipError_t __err = hipGetLastError(); \ if (__err != hipSuccess) { \ printf("Line %d: '%s' failed: %s\n", __LINE__, #expr, \ hipGetErrorString(__err)); \ abort(); \ } \ } while (0) using namespace nvcuda; using namespace nvcuda::wmma::experimental; // Assume that Kernel size is 3x3. // Assume CIN is 128. __global__ void compute_conv_imma(const int4 *W, const int4 *X, int *Output, int Height, int Width, int CIN, int COUT) { // GEMM Configuration int X_bit_offset = Height * Width * CIN/128; extern __shared__ int4 shmem[][CHUNK_K+SKEW]; // TODO: Padding opportunity may exist here. // Warp and lane identification. const unsigned int warpId = threadIdx.x / WARP_SIZE; const unsigned int laneId = threadIdx.x % WARP_SIZE; for (unsigned int block_pos = blockIdx.x;; block_pos += gridDim.x) { const unsigned int block_i = (block_pos/(COUT/64)) / (Width/8) * 4; const unsigned int block_j = (block_pos/(COUT/64)) % (Width/8) * 8; const unsigned int block_z = block_pos % (COUT/64) * 64; if (block_i >= Height) { break; } int image_starting_idx = block_i * 4 * Width * CIN/32 + block_j * 8 * CIN/32; wmma::fragment<wmma::accumulator, 8, 8, 128, int> c[WARP_COL_TILES] [WARP_ROW_TILES]; for(int i=0; i < WARP_COL_TILES; i++) for(int j=0; j < WARP_ROW_TILES; j++) wmma::fill_fragment(c[i][j], 0); int GL_idx; if (threadIdx.x < 240) { GL_idx = image_starting_idx + (threadIdx.x/40)*Width*CIN/32 + threadIdx.x%40; *((int*)&shmem[128][0]+threadIdx.x) = *((int*)X+GL_idx); } if (threadIdx.x < 240) { GL_idx = image_starting_idx + (threadIdx.x/40)*Width*CIN/32 + threadIdx.x%40 + X_bit_offset; *((int*)&shmem[128][0]+threadIdx.x+240) = *((int*)X+GL_idx); } __syncthreads(); // Go through the global K dimension by a fixed step at a time. #pragma unroll for (int tile_k = 0; tile_k < int(9*CIN/128/4); tile_k += CHUNK_K) { int SHMEM_i = threadIdx.x/4; int SHMEM_part = SHMEM_i / 32; int SHMEM_offset = SHMEM_i % 32; int feature_expand_idx = SHMEM_part * 15 * CIN/2 + (SHMEM_offset/8)*10*CIN/128 + (SHMEM_offset%8)*CIN/128; int t = threadIdx.x % 4; int thread_expand_idx = feature_expand_idx + (tile_k*4+t)/(3*CIN/128)*10*(CIN/128) + (tile_k*4+t)%(3*CIN/128); shmem[SHMEM_i][t] = *(&shmem[128][0]+thread_expand_idx); SHMEM_i += 64; int weight_load_idx = SHMEM_part * 9 * CIN * COUT / 128 + (block_z + SHMEM_offset) * 9 * CIN/128; int thread_load_idx = weight_load_idx + (tile_k*4 + t) * CIN/128; shmem[SHMEM_i][t] = W[thread_load_idx]; __syncthreads(); // Compute a grid of C matrix tiles in each warp. #pragma unroll for (int k_step = 0; k_step < CHUNK_K; k_step++) { wmma::fragment<wmma::matrix_a, M, N, K, precision::b1, wmma::row_major> a[WARP_COL_TILES]; wmma::fragment<wmma::matrix_b, M, N, K, precision::b1, wmma::col_major> b[WARP_ROW_TILES]; #pragma unroll for (int i = 0; i < WARP_COL_TILES; i++) { size_t shmem_idx_a = (warpId / 2) * M * 4 + (i * M); const int4 *tile_ptr = &shmem[shmem_idx_a][k_step]; wmma::load_matrix_sync(a[i], tile_ptr, (CHUNK_K + SKEW)*128); #pragma unroll for (int j = 0; j < WARP_ROW_TILES; j++) { if (i == 0) { // Load the B matrix fragment once, because it is going to be // reused against the other A matrix fragments. size_t shmem_idx_b = 64 + (WARP_ROW_TILES * N) * (warpId % 2) + (j * N); const int4 *tile_ptr = &shmem[shmem_idx_b][k_step * (K/128)]; wmma::load_matrix_sync(b[j], tile_ptr, (CHUNK_K + SKEW)*128); } // printf("ckpt4\n"); wmma::bmma_sync(c[i][j], a[i], b[j], c[i][j]); } } } __syncthreads(); } // // Needs special handle for the remaining K. // Store the D fragments to shared memory. #pragma unroll for (int i = 0; i < WARP_COL_TILES; i++) { #pragma unroll for (int j = 0; j < WARP_ROW_TILES; j++) { int *tile_ptr = (int*)&shmem[0][0] + warpId*8*64 + (i*4+j) * 64; wmma::store_matrix_sync(tile_ptr, c[i][j], 8, wmma::mem_row_major); } } __syncthreads(); if (threadIdx.x < 32) { int num1 = 0; int num2 = 0; for (int j = 0; j < 32; j++) { int tile_i = threadIdx.x%16/8; int element_i = (threadIdx.x%16)%8; int tile_j = j%32/8; int element_j = (j%32)%8; int final_i = warpId * 8 + tile_i*4+tile_j; int final_j = element_i *8 + element_j; int v0 = *((int*)&shmem[0][0]+final_i*64+final_j); int v1 = *((int*)&shmem[0][0]+final_i*64+final_j+32); int v2 = *((int*)&shmem[0][0]+(final_i+32)*64+final_j); int v3 = *((int*)&shmem[0][0]+(final_i+32)*64+final_j+32); int tmp = v0 + 2*v1 + 2*v2 + 4*v3; int tmp1 = tmp&1; int tmp2 = tmp&2; num1 = (num1 << 1) | tmp1; num2 = (num2 << 1) | tmp2; } *(Output+(threadIdx.x/8)*Width + threadIdx.x%8) = num1; *(Output+(threadIdx.x/8)*Width + threadIdx.x%8+ Height*Width*COUT/32) = num2; } __syncthreads(); } } // void init_matrices(int4 *A, int4 *B){ // int *A_int = (int*) A; // int *B_int = (int*) B; // for(int i = 0; i < M_GLOBAL; i++) { // for(int j = 0; j < K_GLOBAL/32; j++) { // A_int[i*K_GLOBAL/32+j] = rand(); // } // } // for(int i = 0; i < N_GLOBAL; i++) { // for(int j = 0; j < K_GLOBAL/32; j++) { // B_int[i*K_GLOBAL/32+j] = 0xFFFFFFFF; // B_int[i*K_GLOBAL/32+j] = rand(); // } // } // } // int popcnt(int i) { // // Java: use int, and use >>> instead of >> // // C or C++: use int // i = i - ((i >> 1) & 0x55555555); // i = (i & 0x33333333) + ((i >> 2) & 0x33333333); // return (((i + (i >> 4)) & 0x0F0F0F0F) * 0x01010101) >> 24; // } // void compute_ref(int4 *A, int4 *B, int *ref_C) { // int *A_int = (int*) A; // int *B_int = (int*) B; // for (int m = 0; m < M_GLOBAL; m++) { // for (int n = 0; n < N_GLOBAL; n++) { // int tmp = 0; // for (int k = 0; k < K_GLOBAL; k += 32) { // // bit vector from row A and column B, accumulation and addition. // tmp += popcnt(A_int[(m*K_GLOBAL + k)/32] ^ B_int[(n*K_GLOBAL + k)/32]); // } // // ref_C[m * K + n]= K - 2 * tmp; // ref_C[m * N_GLOBAL + n]= tmp; // } // } // } // void validate_results(int *C, int* ref_C, int M_, int N_) { // printf("Checking computed result for correctness: "); // bool correct = true; // double eps = 1.e-6; // machine zero // for(int i = 0; i < M_; i++) { // for(int j = 0; j < N_; j++) { // int idx = i*N_+j; // double dst = fabs(C[idx] - ref_C[idx]); // double abs = fabs(C[idx]) * fabs(ref_C[idx]); // double ref_err = dst / abs; // if (ref_err > eps) { // // printf("Error! Matrix[%05d]=%.8f, ref=%.8f error term is > %E\n",, eps); // printf("i: %d, j: %d, C: %d, ref_C: %d\n", i, j, C[idx], ref_C[idx]); // // printf("non equal\n"); // correct = false; // } // } // } // printf("%s\n", correct ? "Result = PASS" : "Result = FAIL"); // } // #define verify_output int main(int argc, char **argv) { printf("Initializing...\n"); int dev = findCudaDevice(argc, (const char **)argv); hipDeviceProp_t deviceProp; checkCudaErrors(hipGetDeviceProperties(&deviceProp, dev)); int Height = 256; int Width = 32; int CIN = 128; int COUT = 128; int bit = 2; int4 *X = NULL; int4 *W = NULL; int *Output = NULL; checkCudaErrors(hipMalloc(reinterpret_cast<void **>(&X), sizeof(int4) * Height * Width * (CIN/128) * bit)); checkCudaErrors(hipMalloc(reinterpret_cast<void **>(&W), sizeof(int4) * 9 * (CIN/128) * COUT * bit)); checkCudaErrors(hipMalloc(reinterpret_cast<void **>(&Output), sizeof(int4) * Height * Width * (COUT/128) * bit)); // #ifdef verify_output // printf("Preparing validation data for GPU...\n"); // int4 *W_h = NULL; // int4 *X_h = NULL; // int *Output_h = NULL; // X_h = (int4 *)malloc(sizeof(int4) * H * W * (CIN/128) * X_bit); // W_h = (int4 *)malloc(sizeof(int4) * 9 * (CIN/128) * COUT * W_bit); // Output_h = (int *)malloc(sizeof(int4) * H * W * (COUT/128) * X_bit); // init_matrices(A_h, B_h); // checkCudaErrors(hipMemcpy(A, A_h, sizeof(int4) * M_GLOBAL * (K_GLOBAL/128), hipMemcpyHostToDevice)); // checkCudaErrors(hipMemcpy(B, B_h, sizeof(int4) * N_GLOBAL * (K_GLOBAL/128), hipMemcpyHostToDevice)); // #endif int SHMEM_SZ = 65536; checkCudaErrors(hipFuncSetAttribute( compute_conv_imma, hipFuncAttributeMaxDynamicSharedMemorySize, SHMEM_SZ)); // Run ours NUM_PROFILES times and record time. float bmma_ms_avg = 0.0f; for(int iter=0; iter<200; ++iter){ float bmma_ms = 0.0f; hipEvent_t bmma_start; hipEvent_t bmma_end; hipEventCreate(&bmma_start); hipEventCreate(&bmma_end); hipEventRecord(bmma_start); checkKernelErrors( hipLaunchKernelGGL(( (compute_conv_imma), dim3(deviceProp.multiProcessorCount), dim3(THREADS_PER_BLOCK), SHMEM_SZ, 0, W, X, Output, Height, Width, CIN, COUT))); hipEventRecord(bmma_end); hipEventSynchronize(bmma_end); hipEventElapsedTime(&bmma_ms, bmma_start, bmma_end); hipEventDestroy(bmma_start); hipEventDestroy(bmma_end); bmma_ms_avg += bmma_ms; } bmma_ms_avg = bmma_ms_avg/200.0f; printf("Time: %f ms\n", bmma_ms_avg); printf("TOPS: %.2f\n", (((double)9 * CIN * Height * Width * COUT * 2)/(bmma_ms_avg/1000.)) / 1e12); // #ifdef verify_output // printf("Validating results...\n"); // checkCudaErrors(hipMemcpy(C_h, C, sizeof(int) * M_GLOBAL * N_GLOBAL, hipMemcpyDeviceToHost)); // int *C_ref = (int *)malloc(sizeof(int) * M_GLOBAL * N_GLOBAL); // /* Copmpute reference matrix on CPU */ // // compute_ref(A_h, B_h, C_ref); // /* validation results */ // // validate_results(C_h, C_ref, M_GLOBAL, N_GLOBAL); // #endif // free(A_h); // free(B_h); // free(C_h); // checkCudaErrors(hipFree(reinterpret_cast<void *>(A))); // checkCudaErrors(hipFree(reinterpret_cast<void *>(B))); // checkCudaErrors(hipFree(reinterpret_cast<void *>(C))); return EXIT_SUCCESS; }
c2dbe6f2c27bead31542fc1d6c499211739472ba.cu
/* 1-bit BMMA code. Runs at 500TOPS for matrix size of 4096x4096x8192. Borrows largely from CUDA-SDK. By Boyuan */ #include <assert.h> #include <cuda.h> #include <mma.h> #include <stdio.h> #include <helper_cuda.h> #include <helper_functions.h> #define CHUNK_K 4 #define SKEW 1 #define WARPS_PER_BLOCK 8 #define WARP_SIZE 32 #define THREADS_PER_BLOCK WARP_SIZE * WARPS_PER_BLOCK #define CHUNK_LINE_BYTES CHUNK_K * sizeof(int4) #define WARP_COPY_BYTES WARP_SIZE * sizeof(int4) #define CHUNK_COPY_LINES_PER_WARP WARP_COPY_BYTES / CHUNK_LINE_BYTES #define CHUNK_COPY_LINE_LANES WARP_SIZE / CHUNK_COPY_LINES_PER_WARP #define BLOCK_ROW_WARPS 2 #define BLOCK_COL_WARPS 4 #define WARP_ROW_TILES 4 #define WARP_COL_TILES 2 #define BLOCK_ROW_TILES WARP_ROW_TILES * BLOCK_ROW_WARPS #define BLOCK_COL_TILES WARP_COL_TILES * BLOCK_COL_WARPS #define M 8 #define N 8 #define K 128 #define checkKernelErrors(expr) \ do { \ expr; \ \ cudaError_t __err = cudaGetLastError(); \ if (__err != cudaSuccess) { \ printf("Line %d: '%s' failed: %s\n", __LINE__, #expr, \ cudaGetErrorString(__err)); \ abort(); \ } \ } while (0) using namespace nvcuda; using namespace nvcuda::wmma::experimental; // Assume that Kernel size is 3x3. // Assume CIN is 128. __global__ void compute_conv_imma(const int4 *W, const int4 *X, int *Output, int Height, int Width, int CIN, int COUT) { // GEMM Configuration int X_bit_offset = Height * Width * CIN/128; extern __shared__ int4 shmem[][CHUNK_K+SKEW]; // TODO: Padding opportunity may exist here. // Warp and lane identification. const unsigned int warpId = threadIdx.x / WARP_SIZE; const unsigned int laneId = threadIdx.x % WARP_SIZE; for (unsigned int block_pos = blockIdx.x;; block_pos += gridDim.x) { const unsigned int block_i = (block_pos/(COUT/64)) / (Width/8) * 4; const unsigned int block_j = (block_pos/(COUT/64)) % (Width/8) * 8; const unsigned int block_z = block_pos % (COUT/64) * 64; if (block_i >= Height) { break; } int image_starting_idx = block_i * 4 * Width * CIN/32 + block_j * 8 * CIN/32; wmma::fragment<wmma::accumulator, 8, 8, 128, int> c[WARP_COL_TILES] [WARP_ROW_TILES]; for(int i=0; i < WARP_COL_TILES; i++) for(int j=0; j < WARP_ROW_TILES; j++) wmma::fill_fragment(c[i][j], 0); int GL_idx; if (threadIdx.x < 240) { GL_idx = image_starting_idx + (threadIdx.x/40)*Width*CIN/32 + threadIdx.x%40; *((int*)&shmem[128][0]+threadIdx.x) = *((int*)X+GL_idx); } if (threadIdx.x < 240) { GL_idx = image_starting_idx + (threadIdx.x/40)*Width*CIN/32 + threadIdx.x%40 + X_bit_offset; *((int*)&shmem[128][0]+threadIdx.x+240) = *((int*)X+GL_idx); } __syncthreads(); // Go through the global K dimension by a fixed step at a time. #pragma unroll for (int tile_k = 0; tile_k < int(9*CIN/128/4); tile_k += CHUNK_K) { int SHMEM_i = threadIdx.x/4; int SHMEM_part = SHMEM_i / 32; int SHMEM_offset = SHMEM_i % 32; int feature_expand_idx = SHMEM_part * 15 * CIN/2 + (SHMEM_offset/8)*10*CIN/128 + (SHMEM_offset%8)*CIN/128; int t = threadIdx.x % 4; int thread_expand_idx = feature_expand_idx + (tile_k*4+t)/(3*CIN/128)*10*(CIN/128) + (tile_k*4+t)%(3*CIN/128); shmem[SHMEM_i][t] = *(&shmem[128][0]+thread_expand_idx); SHMEM_i += 64; int weight_load_idx = SHMEM_part * 9 * CIN * COUT / 128 + (block_z + SHMEM_offset) * 9 * CIN/128; int thread_load_idx = weight_load_idx + (tile_k*4 + t) * CIN/128; shmem[SHMEM_i][t] = W[thread_load_idx]; __syncthreads(); // Compute a grid of C matrix tiles in each warp. #pragma unroll for (int k_step = 0; k_step < CHUNK_K; k_step++) { wmma::fragment<wmma::matrix_a, M, N, K, precision::b1, wmma::row_major> a[WARP_COL_TILES]; wmma::fragment<wmma::matrix_b, M, N, K, precision::b1, wmma::col_major> b[WARP_ROW_TILES]; #pragma unroll for (int i = 0; i < WARP_COL_TILES; i++) { size_t shmem_idx_a = (warpId / 2) * M * 4 + (i * M); const int4 *tile_ptr = &shmem[shmem_idx_a][k_step]; wmma::load_matrix_sync(a[i], tile_ptr, (CHUNK_K + SKEW)*128); #pragma unroll for (int j = 0; j < WARP_ROW_TILES; j++) { if (i == 0) { // Load the B matrix fragment once, because it is going to be // reused against the other A matrix fragments. size_t shmem_idx_b = 64 + (WARP_ROW_TILES * N) * (warpId % 2) + (j * N); const int4 *tile_ptr = &shmem[shmem_idx_b][k_step * (K/128)]; wmma::load_matrix_sync(b[j], tile_ptr, (CHUNK_K + SKEW)*128); } // printf("ckpt4\n"); wmma::bmma_sync(c[i][j], a[i], b[j], c[i][j]); } } } __syncthreads(); } // // Needs special handle for the remaining K. // Store the D fragments to shared memory. #pragma unroll for (int i = 0; i < WARP_COL_TILES; i++) { #pragma unroll for (int j = 0; j < WARP_ROW_TILES; j++) { int *tile_ptr = (int*)&shmem[0][0] + warpId*8*64 + (i*4+j) * 64; wmma::store_matrix_sync(tile_ptr, c[i][j], 8, wmma::mem_row_major); } } __syncthreads(); if (threadIdx.x < 32) { int num1 = 0; int num2 = 0; for (int j = 0; j < 32; j++) { int tile_i = threadIdx.x%16/8; int element_i = (threadIdx.x%16)%8; int tile_j = j%32/8; int element_j = (j%32)%8; int final_i = warpId * 8 + tile_i*4+tile_j; int final_j = element_i *8 + element_j; int v0 = *((int*)&shmem[0][0]+final_i*64+final_j); int v1 = *((int*)&shmem[0][0]+final_i*64+final_j+32); int v2 = *((int*)&shmem[0][0]+(final_i+32)*64+final_j); int v3 = *((int*)&shmem[0][0]+(final_i+32)*64+final_j+32); int tmp = v0 + 2*v1 + 2*v2 + 4*v3; int tmp1 = tmp&1; int tmp2 = tmp&2; num1 = (num1 << 1) | tmp1; num2 = (num2 << 1) | tmp2; } *(Output+(threadIdx.x/8)*Width + threadIdx.x%8) = num1; *(Output+(threadIdx.x/8)*Width + threadIdx.x%8+ Height*Width*COUT/32) = num2; } __syncthreads(); } } // void init_matrices(int4 *A, int4 *B){ // int *A_int = (int*) A; // int *B_int = (int*) B; // for(int i = 0; i < M_GLOBAL; i++) { // for(int j = 0; j < K_GLOBAL/32; j++) { // A_int[i*K_GLOBAL/32+j] = rand(); // } // } // for(int i = 0; i < N_GLOBAL; i++) { // for(int j = 0; j < K_GLOBAL/32; j++) { // B_int[i*K_GLOBAL/32+j] = 0xFFFFFFFF; // B_int[i*K_GLOBAL/32+j] = rand(); // } // } // } // int popcnt(int i) { // // Java: use int, and use >>> instead of >> // // C or C++: use int // i = i - ((i >> 1) & 0x55555555); // i = (i & 0x33333333) + ((i >> 2) & 0x33333333); // return (((i + (i >> 4)) & 0x0F0F0F0F) * 0x01010101) >> 24; // } // void compute_ref(int4 *A, int4 *B, int *ref_C) { // int *A_int = (int*) A; // int *B_int = (int*) B; // for (int m = 0; m < M_GLOBAL; m++) { // for (int n = 0; n < N_GLOBAL; n++) { // int tmp = 0; // for (int k = 0; k < K_GLOBAL; k += 32) { // // bit vector from row A and column B, accumulation and addition. // tmp += popcnt(A_int[(m*K_GLOBAL + k)/32] ^ B_int[(n*K_GLOBAL + k)/32]); // } // // ref_C[m * K + n]= K - 2 * tmp; // ref_C[m * N_GLOBAL + n]= tmp; // } // } // } // void validate_results(int *C, int* ref_C, int M_, int N_) { // printf("Checking computed result for correctness: "); // bool correct = true; // double eps = 1.e-6; // machine zero // for(int i = 0; i < M_; i++) { // for(int j = 0; j < N_; j++) { // int idx = i*N_+j; // double dst = fabs(C[idx] - ref_C[idx]); // double abs = fabs(C[idx]) * fabs(ref_C[idx]); // double ref_err = dst / abs; // if (ref_err > eps) { // // printf("Error! Matrix[%05d]=%.8f, ref=%.8f error term is > %E\n",, eps); // printf("i: %d, j: %d, C: %d, ref_C: %d\n", i, j, C[idx], ref_C[idx]); // // printf("non equal\n"); // correct = false; // } // } // } // printf("%s\n", correct ? "Result = PASS" : "Result = FAIL"); // } // #define verify_output int main(int argc, char **argv) { printf("Initializing...\n"); int dev = findCudaDevice(argc, (const char **)argv); cudaDeviceProp deviceProp; checkCudaErrors(cudaGetDeviceProperties(&deviceProp, dev)); int Height = 256; int Width = 32; int CIN = 128; int COUT = 128; int bit = 2; int4 *X = NULL; int4 *W = NULL; int *Output = NULL; checkCudaErrors(cudaMalloc(reinterpret_cast<void **>(&X), sizeof(int4) * Height * Width * (CIN/128) * bit)); checkCudaErrors(cudaMalloc(reinterpret_cast<void **>(&W), sizeof(int4) * 9 * (CIN/128) * COUT * bit)); checkCudaErrors(cudaMalloc(reinterpret_cast<void **>(&Output), sizeof(int4) * Height * Width * (COUT/128) * bit)); // #ifdef verify_output // printf("Preparing validation data for GPU...\n"); // int4 *W_h = NULL; // int4 *X_h = NULL; // int *Output_h = NULL; // X_h = (int4 *)malloc(sizeof(int4) * H * W * (CIN/128) * X_bit); // W_h = (int4 *)malloc(sizeof(int4) * 9 * (CIN/128) * COUT * W_bit); // Output_h = (int *)malloc(sizeof(int4) * H * W * (COUT/128) * X_bit); // init_matrices(A_h, B_h); // checkCudaErrors(cudaMemcpy(A, A_h, sizeof(int4) * M_GLOBAL * (K_GLOBAL/128), cudaMemcpyHostToDevice)); // checkCudaErrors(cudaMemcpy(B, B_h, sizeof(int4) * N_GLOBAL * (K_GLOBAL/128), cudaMemcpyHostToDevice)); // #endif int SHMEM_SZ = 65536; checkCudaErrors(cudaFuncSetAttribute( compute_conv_imma, cudaFuncAttributeMaxDynamicSharedMemorySize, SHMEM_SZ)); // Run ours NUM_PROFILES times and record time. float bmma_ms_avg = 0.0f; for(int iter=0; iter<200; ++iter){ float bmma_ms = 0.0f; cudaEvent_t bmma_start; cudaEvent_t bmma_end; cudaEventCreate(&bmma_start); cudaEventCreate(&bmma_end); cudaEventRecord(bmma_start); checkKernelErrors( (compute_conv_imma<<<deviceProp.multiProcessorCount, THREADS_PER_BLOCK, SHMEM_SZ>>>(W, X, Output, Height, Width, CIN, COUT))); cudaEventRecord(bmma_end); cudaEventSynchronize(bmma_end); cudaEventElapsedTime(&bmma_ms, bmma_start, bmma_end); cudaEventDestroy(bmma_start); cudaEventDestroy(bmma_end); bmma_ms_avg += bmma_ms; } bmma_ms_avg = bmma_ms_avg/200.0f; printf("Time: %f ms\n", bmma_ms_avg); printf("TOPS: %.2f\n", (((double)9 * CIN * Height * Width * COUT * 2)/(bmma_ms_avg/1000.)) / 1e12); // #ifdef verify_output // printf("Validating results...\n"); // checkCudaErrors(cudaMemcpy(C_h, C, sizeof(int) * M_GLOBAL * N_GLOBAL, cudaMemcpyDeviceToHost)); // int *C_ref = (int *)malloc(sizeof(int) * M_GLOBAL * N_GLOBAL); // /* Copmpute reference matrix on CPU */ // // compute_ref(A_h, B_h, C_ref); // /* validation results */ // // validate_results(C_h, C_ref, M_GLOBAL, N_GLOBAL); // #endif // free(A_h); // free(B_h); // free(C_h); // checkCudaErrors(cudaFree(reinterpret_cast<void *>(A))); // checkCudaErrors(cudaFree(reinterpret_cast<void *>(B))); // checkCudaErrors(cudaFree(reinterpret_cast<void *>(C))); return EXIT_SUCCESS; }
7c26cf84cb22e65bf8d13bc7816e77e76954cb31.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifndef __Rabbit_KERNEL_CU__ #define __Rabbit_KERNEL_CU__ #define __mem(mm,i,j,N) ((mm)[(i)+(j)*(N)]) #define max(a,b) (((a)>(b))?(a):(b)) #define min(a,b) (((a)<(b))?(a):(b)) #define rotl32(v, n) \ ((u32)((v) << (n)) | ((v) >> (32 - (n)))) #define DEFINE_STATE(x,c,carry)\ u32 x##0, x##1, x##2, x##3,\ x##4, x##5, x##6, x##7;\ u32 c##0, c##1, c##2, c##3,\ c##4, c##5, c##6, c##7;\ u32 carry #define LOAD8(s,g_s)\ do {\ s##0 = __mem((g_s),tID,0,nr_streams);\ s##1 = __mem((g_s),tID,1,nr_streams);\ s##2 = __mem((g_s),tID,2,nr_streams);\ s##3 = __mem((g_s),tID,3,nr_streams);\ s##4 = __mem((g_s),tID,4,nr_streams);\ s##5 = __mem((g_s),tID,5,nr_streams);\ s##6 = __mem((g_s),tID,6,nr_streams);\ s##7 = __mem((g_s),tID,7,nr_streams);\ } while(0) #define LOAD_STATE(x,g_x,c,g_c,carry,g_carry)\ do {\ LOAD8(x,g_x); LOAD8(c,g_c);\ carry = __mem((g_carry),tID,0,nr_streams);\ } while(0) #define STORE8(s,g_s)\ do {\ __mem((g_s),tID,0,nr_streams) = s##0;\ __mem((g_s),tID,1,nr_streams) = s##1;\ __mem((g_s),tID,2,nr_streams) = s##2;\ __mem((g_s),tID,3,nr_streams) = s##3;\ __mem((g_s),tID,4,nr_streams) = s##4;\ __mem((g_s),tID,5,nr_streams) = s##5;\ __mem((g_s),tID,6,nr_streams) = s##6;\ __mem((g_s),tID,7,nr_streams) = s##7;\ } while(0) #define SAVE_STATE(x,g_x,c,g_c,carry,g_carry)\ do {\ STORE8(x,g_x); STORE8(c,g_c);\ __mem((g_carry),tID,0,nr_streams) = carry;\ } while(0) __device__ u32 g_func(u32 x) { u32 a, b, h, l; a = x&0xFFFF; b = x>>16; h = ((((u32)(a*a)>>17) + (u32)(a*b))>>15) + b*b; l = x*x; return (u32)(h^l); } #define NEXT_STATE()\ do {\ u32 g0,g1,g2,g3,\ g4,g5,g6,g7;\ \ /* Temporary variables */\ u32 c_prev,c_tmp;\ \ /* Calculate new counter values */\ c_prev=c0;c0 = (u32)(c0 + 0x4D34D34D + carry);\ c_tmp=c1; c1 = (u32)(c1 + 0xD34D34D3 + (c0 < c_prev)); c_prev=c_tmp;\ c_tmp=c2; c2 = (u32)(c2 + 0x34D34D34 + (c1 < c_prev)); c_prev=c_tmp;\ c_tmp=c3; c3 = (u32)(c3 + 0x4D34D34D + (c2 < c_prev)); c_prev=c_tmp;\ c_tmp=c4; c4 = (u32)(c4 + 0xD34D34D3 + (c3 < c_prev)); c_prev=c_tmp;\ c_tmp=c5; c5 = (u32)(c5 + 0x34D34D34 + (c4 < c_prev)); c_prev=c_tmp;\ c_tmp=c6; c6 = (u32)(c6 + 0x4D34D34D + (c5 < c_prev)); c_prev=c_tmp;\ c_tmp=c7; c7 = (u32)(c7 + 0xD34D34D3 + (c6 < c_prev));\ carry = (c7 < c_tmp);\ g0=g_func((u32)(x0+c0));\ g1=g_func((u32)(x1+c1));\ g2=g_func((u32)(x2+c2));\ g3=g_func((u32)(x3+c3));\ g4=g_func((u32)(x4+c4));\ g5=g_func((u32)(x5+c5));\ g6=g_func((u32)(x6+c6));\ g7=g_func((u32)(x7+c7));\ x0 = g0;\ x1 = g1;\ x2 = g2;\ x3 = g3;\ x4 = g4;\ x5 = g5;\ x6 = g6;\ x7 = g7;\ x0 = (u32)(g0 + rotl32(g7,16) + rotl32(g6, 16));\ x1 = (u32)(g1 + rotl32(g0, 8) + g7);\ x2 = (u32)(g2 + rotl32(g1,16) + rotl32(g0, 16));\ x3 = (u32)(g3 + rotl32(g2, 8) + g1);\ x4 = (u32)(g4 + rotl32(g3,16) + rotl32(g2, 16));\ x5 = (u32)(g5 + rotl32(g4, 8) + g3);\ x6 = (u32)(g6 + rotl32(g5,16) + rotl32(g4, 16));\ x7 = (u32)(g7 + rotl32(g6, 8) + g5);\ } while(0) __global__ void Rabbit_keysetup(u32* g_x, u32 *g_c, u32 *g_carry, u32 *keys, u32 key_size) { u32 tID=blockIdx.x*blockDim.x+threadIdx.x; u32 nr_streams=blockDim.x*gridDim.x; u32 k0, k1, k2, k3; DEFINE_STATE(x,c,carry); k0=__mem(keys,tID,0,nr_streams); k1=__mem(keys,tID,1,nr_streams); k2=__mem(keys,tID,2,nr_streams); k3=__mem(keys,tID,3,nr_streams); x0 = k0; x2 = k1; x4 = k2; x6 = k3; x1 = (u32)(k3<<16) | (k2>>16); x3 = (u32)(k0<<16) | (k3>>16); x5 = (u32)(k1<<16) | (k0>>16); x7 = (u32)(k2<<16) | (k1>>16); c0 = rotl32(k2, 16); c2 = rotl32(k3, 16); c4 = rotl32(k0, 16); c6 = rotl32(k1, 16); c1 = (k0&0xFFFF0000) | (k1&0xFFFF); c3 = (k1&0xFFFF0000) | (k2&0xFFFF); c5 = (k2&0xFFFF0000) | (k3&0xFFFF); c7 = (k3&0xFFFF0000) | (k0&0xFFFF); carry = 0; for(int i=0;i<4;i++) { NEXT_STATE(); } c0 ^= x4; c1 ^= x5; c2 ^= x6; c3 ^= x7; c4 ^= x0; c5 ^= x1; c6 ^= x2; c7 ^= x3; SAVE_STATE(x,g_x,c,g_c,carry,g_carry); } __global__ void Rabbit_ivsetup(u32* g_x, u32 *g_c, u32 *g_carry, u32 *ivs, u32 iv_size) { u32 tID=blockIdx.x*blockDim.x+threadIdx.x; u32 nr_streams=blockDim.x*gridDim.x; u32 i0, i1, i2, i3; DEFINE_STATE(x,c,carry); LOAD_STATE(x,g_x,c,g_c,carry,g_carry); i0=__mem(ivs,tID,0,nr_streams); i2=__mem(ivs,tID,1,nr_streams); i1 = (i0>>16) | (i2&0xFFFF0000); i3 = (i2<<16) | (i0&0x0000FFFF); c0 ^= i0; c1 ^= i1; c2 ^= i2; c3 ^= i3; c4 ^= i0; c5 ^= i1; c6 ^= i2; c7 ^= i3; for(int i=0;i<4;i++) { NEXT_STATE(); } SAVE_STATE(x,g_x,c,g_c,carry,g_carry); } __global__ void Rabbit_process_bytes(gSTREAM_action act, u32* g_x, u32 *g_c, u32 *g_carry, u32 *buff, u32 nr_words) { u32 tID=blockIdx.x*blockDim.x+threadIdx.x; u32 nr_streams=blockDim.x*gridDim.x; DEFINE_STATE(x,c,carry); LOAD_STATE(x,g_x,c,g_c,carry,g_carry); for(int w=0;w<nr_words/4;w++) { NEXT_STATE(); if(act!=GEN_KEYSTREAM) { __mem(buff,tID,(4*w),nr_streams) ^= x0^(x5>>16)^(u32)(x3<<16); __mem(buff,tID,(4*w+1),nr_streams) ^= x2^(x7>>16)^(u32)(x5<<16); __mem(buff,tID,(4*w+2),nr_streams) ^= x4^(x1>>16)^(u32)(x7<<16); __mem(buff,tID,(4*w+3),nr_streams) ^= x6^(x3>>16)^(u32)(x1<<16); } else { __mem(buff,tID,(4*w),nr_streams) = x0^(x5>>16)^(u32)(x3<<16); __mem(buff,tID,(4*w+1),nr_streams) = x2^(x7>>16)^(u32)(x5<<16); __mem(buff,tID,(4*w+2),nr_streams) = x4^(x1>>16)^(u32)(x7<<16); __mem(buff,tID,(4*w+3),nr_streams) = x6^(x3>>16)^(u32)(x1<<16); } } if(nr_words%4) { /* handle remaining partial 4-byte blocks */ NEXT_STATE(); if(act!=GEN_KEYSTREAM) { if((nr_words%4)==3) { __mem(buff,tID,(nr_words-3),nr_streams) ^= x0^(x5>>16)^(u32)(x3<<16); __mem(buff,tID,(nr_words-2),nr_streams) ^= x2^(x7>>16)^(u32)(x5<<16); __mem(buff,tID,(nr_words-1),nr_streams) ^= x4^(x1>>16)^(u32)(x7<<16); } else if((nr_words%4)==2) { __mem(buff,tID,(nr_words-2),nr_streams) ^= x0^(x5>>16)^(u32)(x3<<16); __mem(buff,tID,(nr_words-1),nr_streams) ^= x2^(x7>>16)^(u32)(x5<<16); } else { //==1 __mem(buff,tID,(nr_words-1),nr_streams) ^= x0^(x5>>16)^(u32)(x3<<16); } } else { if((nr_words%4)==3) { __mem(buff,tID,(nr_words-3),nr_streams) = x0^(x5>>16)^(u32)(x3<<16); __mem(buff,tID,(nr_words-2),nr_streams) = x2^(x7>>16)^(u32)(x5<<16); __mem(buff,tID,(nr_words-1),nr_streams) = x4^(x1>>16)^(u32)(x7<<16); } else if((nr_words%4)==2) { __mem(buff,tID,(nr_words-2),nr_streams) = x0^(x5>>16)^(u32)(x3<<16); __mem(buff,tID,(nr_words-1),nr_streams) = x2^(x7>>16)^(u32)(x5<<16); } else { //==1 __mem(buff,tID,(nr_words-1),nr_streams) = x0^(x5>>16)^(u32)(x3<<16); } } } SAVE_STATE(x,g_x,c,g_c,carry,g_carry); } #endif
7c26cf84cb22e65bf8d13bc7816e77e76954cb31.cu
#ifndef __Rabbit_KERNEL_CU__ #define __Rabbit_KERNEL_CU__ #define __mem(mm,i,j,N) ((mm)[(i)+(j)*(N)]) #define max(a,b) (((a)>(b))?(a):(b)) #define min(a,b) (((a)<(b))?(a):(b)) #define rotl32(v, n) \ ((u32)((v) << (n)) | ((v) >> (32 - (n)))) #define DEFINE_STATE(x,c,carry)\ u32 x##0, x##1, x##2, x##3,\ x##4, x##5, x##6, x##7;\ u32 c##0, c##1, c##2, c##3,\ c##4, c##5, c##6, c##7;\ u32 carry #define LOAD8(s,g_s)\ do {\ s##0 = __mem((g_s),tID,0,nr_streams);\ s##1 = __mem((g_s),tID,1,nr_streams);\ s##2 = __mem((g_s),tID,2,nr_streams);\ s##3 = __mem((g_s),tID,3,nr_streams);\ s##4 = __mem((g_s),tID,4,nr_streams);\ s##5 = __mem((g_s),tID,5,nr_streams);\ s##6 = __mem((g_s),tID,6,nr_streams);\ s##7 = __mem((g_s),tID,7,nr_streams);\ } while(0) #define LOAD_STATE(x,g_x,c,g_c,carry,g_carry)\ do {\ LOAD8(x,g_x); LOAD8(c,g_c);\ carry = __mem((g_carry),tID,0,nr_streams);\ } while(0) #define STORE8(s,g_s)\ do {\ __mem((g_s),tID,0,nr_streams) = s##0;\ __mem((g_s),tID,1,nr_streams) = s##1;\ __mem((g_s),tID,2,nr_streams) = s##2;\ __mem((g_s),tID,3,nr_streams) = s##3;\ __mem((g_s),tID,4,nr_streams) = s##4;\ __mem((g_s),tID,5,nr_streams) = s##5;\ __mem((g_s),tID,6,nr_streams) = s##6;\ __mem((g_s),tID,7,nr_streams) = s##7;\ } while(0) #define SAVE_STATE(x,g_x,c,g_c,carry,g_carry)\ do {\ STORE8(x,g_x); STORE8(c,g_c);\ __mem((g_carry),tID,0,nr_streams) = carry;\ } while(0) __device__ u32 g_func(u32 x) { u32 a, b, h, l; a = x&0xFFFF; b = x>>16; h = ((((u32)(a*a)>>17) + (u32)(a*b))>>15) + b*b; l = x*x; return (u32)(h^l); } #define NEXT_STATE()\ do {\ u32 g0,g1,g2,g3,\ g4,g5,g6,g7;\ \ /* Temporary variables */\ u32 c_prev,c_tmp;\ \ /* Calculate new counter values */\ c_prev=c0;c0 = (u32)(c0 + 0x4D34D34D + carry);\ c_tmp=c1; c1 = (u32)(c1 + 0xD34D34D3 + (c0 < c_prev)); c_prev=c_tmp;\ c_tmp=c2; c2 = (u32)(c2 + 0x34D34D34 + (c1 < c_prev)); c_prev=c_tmp;\ c_tmp=c3; c3 = (u32)(c3 + 0x4D34D34D + (c2 < c_prev)); c_prev=c_tmp;\ c_tmp=c4; c4 = (u32)(c4 + 0xD34D34D3 + (c3 < c_prev)); c_prev=c_tmp;\ c_tmp=c5; c5 = (u32)(c5 + 0x34D34D34 + (c4 < c_prev)); c_prev=c_tmp;\ c_tmp=c6; c6 = (u32)(c6 + 0x4D34D34D + (c5 < c_prev)); c_prev=c_tmp;\ c_tmp=c7; c7 = (u32)(c7 + 0xD34D34D3 + (c6 < c_prev));\ carry = (c7 < c_tmp);\ g0=g_func((u32)(x0+c0));\ g1=g_func((u32)(x1+c1));\ g2=g_func((u32)(x2+c2));\ g3=g_func((u32)(x3+c3));\ g4=g_func((u32)(x4+c4));\ g5=g_func((u32)(x5+c5));\ g6=g_func((u32)(x6+c6));\ g7=g_func((u32)(x7+c7));\ x0 = g0;\ x1 = g1;\ x2 = g2;\ x3 = g3;\ x4 = g4;\ x5 = g5;\ x6 = g6;\ x7 = g7;\ x0 = (u32)(g0 + rotl32(g7,16) + rotl32(g6, 16));\ x1 = (u32)(g1 + rotl32(g0, 8) + g7);\ x2 = (u32)(g2 + rotl32(g1,16) + rotl32(g0, 16));\ x3 = (u32)(g3 + rotl32(g2, 8) + g1);\ x4 = (u32)(g4 + rotl32(g3,16) + rotl32(g2, 16));\ x5 = (u32)(g5 + rotl32(g4, 8) + g3);\ x6 = (u32)(g6 + rotl32(g5,16) + rotl32(g4, 16));\ x7 = (u32)(g7 + rotl32(g6, 8) + g5);\ } while(0) __global__ void Rabbit_keysetup(u32* g_x, u32 *g_c, u32 *g_carry, u32 *keys, u32 key_size) { u32 tID=blockIdx.x*blockDim.x+threadIdx.x; u32 nr_streams=blockDim.x*gridDim.x; u32 k0, k1, k2, k3; DEFINE_STATE(x,c,carry); k0=__mem(keys,tID,0,nr_streams); k1=__mem(keys,tID,1,nr_streams); k2=__mem(keys,tID,2,nr_streams); k3=__mem(keys,tID,3,nr_streams); x0 = k0; x2 = k1; x4 = k2; x6 = k3; x1 = (u32)(k3<<16) | (k2>>16); x3 = (u32)(k0<<16) | (k3>>16); x5 = (u32)(k1<<16) | (k0>>16); x7 = (u32)(k2<<16) | (k1>>16); c0 = rotl32(k2, 16); c2 = rotl32(k3, 16); c4 = rotl32(k0, 16); c6 = rotl32(k1, 16); c1 = (k0&0xFFFF0000) | (k1&0xFFFF); c3 = (k1&0xFFFF0000) | (k2&0xFFFF); c5 = (k2&0xFFFF0000) | (k3&0xFFFF); c7 = (k3&0xFFFF0000) | (k0&0xFFFF); carry = 0; for(int i=0;i<4;i++) { NEXT_STATE(); } c0 ^= x4; c1 ^= x5; c2 ^= x6; c3 ^= x7; c4 ^= x0; c5 ^= x1; c6 ^= x2; c7 ^= x3; SAVE_STATE(x,g_x,c,g_c,carry,g_carry); } __global__ void Rabbit_ivsetup(u32* g_x, u32 *g_c, u32 *g_carry, u32 *ivs, u32 iv_size) { u32 tID=blockIdx.x*blockDim.x+threadIdx.x; u32 nr_streams=blockDim.x*gridDim.x; u32 i0, i1, i2, i3; DEFINE_STATE(x,c,carry); LOAD_STATE(x,g_x,c,g_c,carry,g_carry); i0=__mem(ivs,tID,0,nr_streams); i2=__mem(ivs,tID,1,nr_streams); i1 = (i0>>16) | (i2&0xFFFF0000); i3 = (i2<<16) | (i0&0x0000FFFF); c0 ^= i0; c1 ^= i1; c2 ^= i2; c3 ^= i3; c4 ^= i0; c5 ^= i1; c6 ^= i2; c7 ^= i3; for(int i=0;i<4;i++) { NEXT_STATE(); } SAVE_STATE(x,g_x,c,g_c,carry,g_carry); } __global__ void Rabbit_process_bytes(gSTREAM_action act, u32* g_x, u32 *g_c, u32 *g_carry, u32 *buff, u32 nr_words) { u32 tID=blockIdx.x*blockDim.x+threadIdx.x; u32 nr_streams=blockDim.x*gridDim.x; DEFINE_STATE(x,c,carry); LOAD_STATE(x,g_x,c,g_c,carry,g_carry); for(int w=0;w<nr_words/4;w++) { NEXT_STATE(); if(act!=GEN_KEYSTREAM) { __mem(buff,tID,(4*w),nr_streams) ^= x0^(x5>>16)^(u32)(x3<<16); __mem(buff,tID,(4*w+1),nr_streams) ^= x2^(x7>>16)^(u32)(x5<<16); __mem(buff,tID,(4*w+2),nr_streams) ^= x4^(x1>>16)^(u32)(x7<<16); __mem(buff,tID,(4*w+3),nr_streams) ^= x6^(x3>>16)^(u32)(x1<<16); } else { __mem(buff,tID,(4*w),nr_streams) = x0^(x5>>16)^(u32)(x3<<16); __mem(buff,tID,(4*w+1),nr_streams) = x2^(x7>>16)^(u32)(x5<<16); __mem(buff,tID,(4*w+2),nr_streams) = x4^(x1>>16)^(u32)(x7<<16); __mem(buff,tID,(4*w+3),nr_streams) = x6^(x3>>16)^(u32)(x1<<16); } } if(nr_words%4) { /* handle remaining partial 4-byte blocks */ NEXT_STATE(); if(act!=GEN_KEYSTREAM) { if((nr_words%4)==3) { __mem(buff,tID,(nr_words-3),nr_streams) ^= x0^(x5>>16)^(u32)(x3<<16); __mem(buff,tID,(nr_words-2),nr_streams) ^= x2^(x7>>16)^(u32)(x5<<16); __mem(buff,tID,(nr_words-1),nr_streams) ^= x4^(x1>>16)^(u32)(x7<<16); } else if((nr_words%4)==2) { __mem(buff,tID,(nr_words-2),nr_streams) ^= x0^(x5>>16)^(u32)(x3<<16); __mem(buff,tID,(nr_words-1),nr_streams) ^= x2^(x7>>16)^(u32)(x5<<16); } else { //==1 __mem(buff,tID,(nr_words-1),nr_streams) ^= x0^(x5>>16)^(u32)(x3<<16); } } else { if((nr_words%4)==3) { __mem(buff,tID,(nr_words-3),nr_streams) = x0^(x5>>16)^(u32)(x3<<16); __mem(buff,tID,(nr_words-2),nr_streams) = x2^(x7>>16)^(u32)(x5<<16); __mem(buff,tID,(nr_words-1),nr_streams) = x4^(x1>>16)^(u32)(x7<<16); } else if((nr_words%4)==2) { __mem(buff,tID,(nr_words-2),nr_streams) = x0^(x5>>16)^(u32)(x3<<16); __mem(buff,tID,(nr_words-1),nr_streams) = x2^(x7>>16)^(u32)(x5<<16); } else { //==1 __mem(buff,tID,(nr_words-1),nr_streams) = x0^(x5>>16)^(u32)(x3<<16); } } } SAVE_STATE(x,g_x,c,g_c,carry,g_carry); } #endif
3850678f8f0e61631307e565efae2d891686d9bc.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "THHTensorRandom.h" #include "THHGeneral.h" #include <thrust/functional.h> #include <hiprand/hiprand.h> #include <hiprand/hiprand_kernel.h> #include <hiprand/hiprand_mtgp32_host.h> #include <rocrand/rocrand_mtgp32_11213.h> #define MAX_NUM_BLOCKS 64 #define BLOCK_SIZE 256 #ifndef DIVUP #define DIVUP(x, y) (((x) + (y) - 1) / (y)) #endif /* Sets up generator. Allocates but does not create the generator states. */ __host__ void initializeGenerator(Generator* gen) { THCudaCheck(hipMalloc((void**)&gen->gen_states, MAX_NUM_BLOCKS * sizeof(hiprandStateMtgp32_t))); THCudaCheck(hipMalloc((void**)&gen->kernel_params, sizeof(mtgp32_kernel_params_t))); } /* Frees memory allocated during setup. */ __host__ void destroyGenerator(Generator* gen) { if (gen->gen_states) { THCudaCheck(hipFree(gen->gen_states)); gen->gen_states = NULL; } if (gen->kernel_params) { THCudaCheck(hipFree(gen->kernel_params)); gen->kernel_params = NULL; } } /* Creates a new generator state given the seed. */ __host__ void createGeneratorState(Generator* gen, unsigned long seed) { if (hiprandMakeMTGP32Constants(mtgp32dc_params_fast_11213, gen->kernel_params) != HIPRAND_STATUS_SUCCESS) { THError("Creating MTGP constants failed."); } if (hiprandMakeMTGP32KernelState(gen->gen_states, mtgp32dc_params_fast_11213, gen->kernel_params, MAX_NUM_BLOCKS, seed) != HIPRAND_STATUS_SUCCESS) { THError("Creating MTGP kernel state failed."); } } /* Initialize generator array (must be called before any other function) */ __host__ void THCRandom_init(THCState* state, int devices, int current_device) { THCRNGState* rng_state = state->rngState; rng_state->num_devices = devices; rng_state->gen = (Generator*)malloc(rng_state->num_devices * sizeof(Generator)); for (int i = 0; i < rng_state->num_devices; ++i) { rng_state->gen[i].initf = 0; rng_state->gen[i].initial_seed = 0; rng_state->gen[i].gen_states = NULL; rng_state->gen[i].kernel_params = NULL; } rng_state->current_gen = &rng_state->gen[current_device]; // Initialize the generator for the current device. Other generators will be // initialized on-demand in THCRandom_setGenerator. initializeGenerator(rng_state->current_gen); THCRandom_seed(state); } /* Destroy generators and free memory */ __host__ void THCRandom_shutdown(THCState* state) { THCRNGState* rng_state = state->rngState; if (rng_state->gen == NULL) return; for (int i = 0; i < rng_state->num_devices; ++i) { destroyGenerator(&rng_state->gen[i]); } free(rng_state->gen); rng_state->gen = NULL; rng_state->current_gen = NULL; } /* Set the generator for the current device */ __host__ void THCRandom_setGenerator(THCState* state, int device) { THCRNGState* rng_state = state->rngState; if (device >= rng_state->num_devices) THError("Invalid device index."); rng_state->current_gen = &rng_state->gen[device]; if (rng_state->current_gen->initf == 0) { initializeGenerator(rng_state->current_gen); THCRandom_seed(state); } } /* Reset the generator for the current device after a device reset */ __host__ void THCRandom_resetGenerator(THCState* state) { THCRNGState* rng_state = state->rngState; initializeGenerator(rng_state->current_gen); THCRandom_manualSeed(state, rng_state->current_gen->initial_seed); } /* Random seed */ __host__ unsigned long THCRandom_seed(THCState* state) { unsigned long s = (unsigned long)time(0); THCRandom_manualSeed(state, s); return s; } __host__ unsigned long THCRandom_seedAll(THCState* state) { unsigned long s = (unsigned long)time(0); THCRandom_manualSeedAll(state, s); return s; } /* Manually set the seed */ __host__ void THCRandom_manualSeed(THCState* state, unsigned long seed) { THCRNGState* rng_state = state->rngState; if (rng_state->current_gen == NULL) { THError("Random number generators have not been initialized."); } rng_state->current_gen->initial_seed = seed; createGeneratorState(rng_state->current_gen, seed); rng_state->current_gen->initf = 1; } __host__ void THCRandom_manualSeedAll(THCState* state, unsigned long seed) { THCRNGState* rng_state = state->rngState; int currentDevice; THCudaCheck(hipGetDevice(&currentDevice)); for (int i = 0; i < rng_state->num_devices; ++i) { THCudaCheck(hipSetDevice(i)); THCRandom_setGenerator(state, i); THCRandom_manualSeed(state, seed); } THCudaCheck(hipSetDevice(currentDevice)); THCRandom_setGenerator(state, currentDevice); } /* Get the initial seed */ __host__ unsigned long THCRandom_initialSeed(THCState* state) { return state->rngState->current_gen->initial_seed; } __host__ void THCRandom_getRNGState(THCState* state, THByteTensor *rng_state) { // The RNG state comprises the MTPG32 states and the seed. static const size_t states_size = MAX_NUM_BLOCKS * sizeof(hiprandStateMtgp32_t); static const size_t seed_size = sizeof(unsigned long); static const size_t total_size = states_size + seed_size; THByteTensor_resize1d(rng_state, total_size); THArgCheck(THByteTensor_nElement(rng_state) == total_size, 1, "RNG state is wrong size"); THArgCheck(THByteTensor_isContiguous(rng_state), 1, "RNG state must be contiguous"); THCudaCheck(hipMemcpy(THByteTensor_data(rng_state), state->rngState->current_gen->gen_states, states_size, hipMemcpyDeviceToHost)); memcpy(THByteTensor_data(rng_state) + states_size, &state->rngState->current_gen->initial_seed, seed_size); } __host__ void THCRandom_setRNGState(THCState* state, THByteTensor *rng_state) { static const size_t states_size = MAX_NUM_BLOCKS * sizeof(hiprandStateMtgp32_t); static const size_t seed_size = sizeof(unsigned long); static const size_t total_size = states_size + seed_size; THArgCheck(THByteTensor_nElement(rng_state) == total_size, 1, "RNG state is wrong size"); THArgCheck(THByteTensor_isContiguous(rng_state), 1, "RNG state must be contiguous"); THCudaCheck(hipMemcpy(state->rngState->current_gen->gen_states, THByteTensor_data(rng_state), states_size, hipMemcpyHostToDevice)); memcpy(&state->rngState->current_gen->initial_seed, THByteTensor_data(rng_state) + states_size, seed_size); } #define GENERATE_KERNEL1(NAME, ARG1, CURAND_FUNC, TRANSFORM) \ __global__ void NAME(hiprandStateMtgp32_t *state, int size, float *result, ARG1) \ { \ int idx = blockIdx.x * BLOCK_SIZE + threadIdx.x; \ int rounded_size = DIVUP(size, BLOCK_SIZE) * BLOCK_SIZE; \ for (int i = idx; i < rounded_size; i += BLOCK_SIZE * MAX_NUM_BLOCKS) { \ float x = CURAND_FUNC(&state[blockIdx.x]); \ if (i < size) { \ x = TRANSFORM; \ result[i] = x; \ } \ } \ } #define GENERATE_KERNEL2(NAME, ARG1, ARG2, CURAND_FUNC, TRANSFORM) \ __global__ void NAME(hiprandStateMtgp32_t *state, int size, float *result, ARG1, ARG2) \ { \ int idx = blockIdx.x * BLOCK_SIZE + threadIdx.x; \ int rounded_size = DIVUP(size, BLOCK_SIZE) * BLOCK_SIZE; \ for (int i = idx; i < rounded_size; i += BLOCK_SIZE * MAX_NUM_BLOCKS) { \ float x = CURAND_FUNC(&state[blockIdx.x]); \ if (i < size) { \ x = TRANSFORM; \ result[i] = x; \ } \ } \ } GENERATE_KERNEL2(generate_uniform, double a, double b, hiprand_uniform, x * (b-a) + a) GENERATE_KERNEL1(generate_bernoulli, double p, hiprand_uniform, (float)x <= p) GENERATE_KERNEL2(generate_normal, double mean, double stdv, hiprand_normal, (x * stdv) + mean) GENERATE_KERNEL1(generate_geometric, double p, hiprand_uniform, (log(1-x) / log(p)) + 1) GENERATE_KERNEL1(generate_exponential, double lambda, hiprand_uniform, (float)(-1. / lambda * log(1-x))) GENERATE_KERNEL2(generate_cauchy, double median, double sigma, hiprand_uniform, (float)(median + sigma * tan(M_PI*(x-0.5)))) #undef GENERATE_KERNEL1 #undef GENERATE_KERNEL2 /* Separate kernel because hiprand_log_normal gets extra parameters. */ __global__ void generate_log_normal(hiprandStateMtgp32_t *state, int size, float *result, float mean, float stddev) { int idx = blockIdx.x * BLOCK_SIZE + threadIdx.x; int rounded_size = DIVUP(size, BLOCK_SIZE) * BLOCK_SIZE; for (int i = idx; i < rounded_size; i += BLOCK_SIZE * MAX_NUM_BLOCKS) { float x = hiprand_log_normal(&state[blockIdx.x], mean, stddev); if (i < size) { result[i] = x; } } } #define NUM_BLOCKS min((int)DIVUP(size, BLOCK_SIZE), MAX_NUM_BLOCKS) THC_API void THCudaTensor_uniform(THCState* state, THCudaTensor *self_, double a, double b) { if (state->rngState->current_gen == NULL) { THError("Random number generators have not been initialized."); } THCudaTensor *self = THCudaTensor_newContiguous(state, self_); long size = THCudaTensor_nElement(state, self); float *data = THCudaTensor_data(state, self); hipLaunchKernelGGL(( generate_uniform), dim3(NUM_BLOCKS), dim3(BLOCK_SIZE), 0, 0, state->rngState->current_gen->gen_states, size, data, a, b); THCudaTensor_freeCopyTo(state, self, self_); }; THC_API void THCudaTensor_bernoulli(THCState* state, THCudaTensor *self_, double p) { if (state->rngState->current_gen == NULL) { THError("Random number generators have not been initialized."); } THCudaTensor *self = THCudaTensor_newContiguous(state, self_); long size = THCudaTensor_nElement(state, self); float *data = THCudaTensor_data(state, self); hipLaunchKernelGGL(( generate_bernoulli), dim3(NUM_BLOCKS), dim3(BLOCK_SIZE), 0, 0, state->rngState->current_gen->gen_states, size, data, p); THCudaTensor_freeCopyTo(state, self, self_); }; THC_API void THCudaTensor_normal(THCState* state, THCudaTensor *self_, double mean, double stdv) { if (state->rngState->current_gen == NULL) { THError("Random number generators have not been initialized."); } THCudaTensor *self = THCudaTensor_newContiguous(state, self_); long size = THCudaTensor_nElement(state, self); float *data = THCudaTensor_data(state, self); hipLaunchKernelGGL(( generate_normal), dim3(NUM_BLOCKS), dim3(BLOCK_SIZE), 0, 0, state->rngState->current_gen->gen_states, size, data, mean, stdv); THCudaTensor_freeCopyTo(state, self, self_); }; THC_API void THCudaTensor_logNormal(THCState* state, THCudaTensor *self_, double mean, double stdv) { if (state->rngState->current_gen == NULL) { THError("Random number generators have not been initialized."); } THCudaTensor *self = THCudaTensor_newContiguous(state, self_); long size = THCudaTensor_nElement(state, self); float *data = THCudaTensor_data(state, self); hipLaunchKernelGGL(( generate_log_normal), dim3(NUM_BLOCKS), dim3(BLOCK_SIZE), 0, 0, state->rngState->current_gen->gen_states, size, data, mean, stdv); THCudaTensor_freeCopyTo(state, self, self_); }; THC_API void THCudaTensor_geometric(THCState* state, THCudaTensor *self_, double p) { if (state->rngState->current_gen == NULL) { THError("Random number generators have not been initialized."); } THCudaTensor *self = THCudaTensor_newContiguous(state, self_); long size = THCudaTensor_nElement(state, self); float *data = THCudaTensor_data(state, self); hipLaunchKernelGGL(( generate_geometric), dim3(NUM_BLOCKS), dim3(BLOCK_SIZE), 0, 0, state->rngState->current_gen->gen_states, size, data, p); THCudaTensor_freeCopyTo(state, self, self_); }; THC_API void THCudaTensor_exponential(THCState* state, THCudaTensor *self_, double lambda) { if (state->rngState->current_gen == NULL) { THError("Random number generators have not been initialized."); } THCudaTensor *self = THCudaTensor_newContiguous(state, self_); long size = THCudaTensor_nElement(state, self); float *data = THCudaTensor_data(state, self); hipLaunchKernelGGL(( generate_exponential), dim3(NUM_BLOCKS), dim3(BLOCK_SIZE), 0, 0, state->rngState->current_gen->gen_states, size, data, lambda); THCudaTensor_freeCopyTo(state, self, self_); }; THC_API void THCudaTensor_cauchy(THCState* state, THCudaTensor *self_, double median, double sigma) { if (state->rngState->current_gen == NULL) { THError("Random number generators have not been initialized."); } THCudaTensor *self = THCudaTensor_newContiguous(state, self_); long size = THCudaTensor_nElement(state, self); float *data = THCudaTensor_data(state, self); hipLaunchKernelGGL(( generate_cauchy), dim3(NUM_BLOCKS), dim3(BLOCK_SIZE), 0, 0, state->rngState->current_gen->gen_states, size, data, median, sigma); THCudaTensor_freeCopyTo(state, self, self_); }; #undef NUM_BLOCKS
3850678f8f0e61631307e565efae2d891686d9bc.cu
#include "THCTensorRandom.h" #include "THCGeneral.h" #include <thrust/functional.h> #include <curand.h> #include <curand_kernel.h> #include <curand_mtgp32_host.h> #include <curand_mtgp32dc_p_11213.h> #define MAX_NUM_BLOCKS 64 #define BLOCK_SIZE 256 #ifndef DIVUP #define DIVUP(x, y) (((x) + (y) - 1) / (y)) #endif /* Sets up generator. Allocates but does not create the generator states. */ __host__ void initializeGenerator(Generator* gen) { THCudaCheck(cudaMalloc((void**)&gen->gen_states, MAX_NUM_BLOCKS * sizeof(curandStateMtgp32))); THCudaCheck(cudaMalloc((void**)&gen->kernel_params, sizeof(mtgp32_kernel_params))); } /* Frees memory allocated during setup. */ __host__ void destroyGenerator(Generator* gen) { if (gen->gen_states) { THCudaCheck(cudaFree(gen->gen_states)); gen->gen_states = NULL; } if (gen->kernel_params) { THCudaCheck(cudaFree(gen->kernel_params)); gen->kernel_params = NULL; } } /* Creates a new generator state given the seed. */ __host__ void createGeneratorState(Generator* gen, unsigned long seed) { if (curandMakeMTGP32Constants(mtgp32dc_params_fast_11213, gen->kernel_params) != CURAND_STATUS_SUCCESS) { THError("Creating MTGP constants failed."); } if (curandMakeMTGP32KernelState(gen->gen_states, mtgp32dc_params_fast_11213, gen->kernel_params, MAX_NUM_BLOCKS, seed) != CURAND_STATUS_SUCCESS) { THError("Creating MTGP kernel state failed."); } } /* Initialize generator array (must be called before any other function) */ __host__ void THCRandom_init(THCState* state, int devices, int current_device) { THCRNGState* rng_state = state->rngState; rng_state->num_devices = devices; rng_state->gen = (Generator*)malloc(rng_state->num_devices * sizeof(Generator)); for (int i = 0; i < rng_state->num_devices; ++i) { rng_state->gen[i].initf = 0; rng_state->gen[i].initial_seed = 0; rng_state->gen[i].gen_states = NULL; rng_state->gen[i].kernel_params = NULL; } rng_state->current_gen = &rng_state->gen[current_device]; // Initialize the generator for the current device. Other generators will be // initialized on-demand in THCRandom_setGenerator. initializeGenerator(rng_state->current_gen); THCRandom_seed(state); } /* Destroy generators and free memory */ __host__ void THCRandom_shutdown(THCState* state) { THCRNGState* rng_state = state->rngState; if (rng_state->gen == NULL) return; for (int i = 0; i < rng_state->num_devices; ++i) { destroyGenerator(&rng_state->gen[i]); } free(rng_state->gen); rng_state->gen = NULL; rng_state->current_gen = NULL; } /* Set the generator for the current device */ __host__ void THCRandom_setGenerator(THCState* state, int device) { THCRNGState* rng_state = state->rngState; if (device >= rng_state->num_devices) THError("Invalid device index."); rng_state->current_gen = &rng_state->gen[device]; if (rng_state->current_gen->initf == 0) { initializeGenerator(rng_state->current_gen); THCRandom_seed(state); } } /* Reset the generator for the current device after a device reset */ __host__ void THCRandom_resetGenerator(THCState* state) { THCRNGState* rng_state = state->rngState; initializeGenerator(rng_state->current_gen); THCRandom_manualSeed(state, rng_state->current_gen->initial_seed); } /* Random seed */ __host__ unsigned long THCRandom_seed(THCState* state) { unsigned long s = (unsigned long)time(0); THCRandom_manualSeed(state, s); return s; } __host__ unsigned long THCRandom_seedAll(THCState* state) { unsigned long s = (unsigned long)time(0); THCRandom_manualSeedAll(state, s); return s; } /* Manually set the seed */ __host__ void THCRandom_manualSeed(THCState* state, unsigned long seed) { THCRNGState* rng_state = state->rngState; if (rng_state->current_gen == NULL) { THError("Random number generators have not been initialized."); } rng_state->current_gen->initial_seed = seed; createGeneratorState(rng_state->current_gen, seed); rng_state->current_gen->initf = 1; } __host__ void THCRandom_manualSeedAll(THCState* state, unsigned long seed) { THCRNGState* rng_state = state->rngState; int currentDevice; THCudaCheck(cudaGetDevice(&currentDevice)); for (int i = 0; i < rng_state->num_devices; ++i) { THCudaCheck(cudaSetDevice(i)); THCRandom_setGenerator(state, i); THCRandom_manualSeed(state, seed); } THCudaCheck(cudaSetDevice(currentDevice)); THCRandom_setGenerator(state, currentDevice); } /* Get the initial seed */ __host__ unsigned long THCRandom_initialSeed(THCState* state) { return state->rngState->current_gen->initial_seed; } __host__ void THCRandom_getRNGState(THCState* state, THByteTensor *rng_state) { // The RNG state comprises the MTPG32 states and the seed. static const size_t states_size = MAX_NUM_BLOCKS * sizeof(curandStateMtgp32); static const size_t seed_size = sizeof(unsigned long); static const size_t total_size = states_size + seed_size; THByteTensor_resize1d(rng_state, total_size); THArgCheck(THByteTensor_nElement(rng_state) == total_size, 1, "RNG state is wrong size"); THArgCheck(THByteTensor_isContiguous(rng_state), 1, "RNG state must be contiguous"); THCudaCheck(cudaMemcpy(THByteTensor_data(rng_state), state->rngState->current_gen->gen_states, states_size, cudaMemcpyDeviceToHost)); memcpy(THByteTensor_data(rng_state) + states_size, &state->rngState->current_gen->initial_seed, seed_size); } __host__ void THCRandom_setRNGState(THCState* state, THByteTensor *rng_state) { static const size_t states_size = MAX_NUM_BLOCKS * sizeof(curandStateMtgp32); static const size_t seed_size = sizeof(unsigned long); static const size_t total_size = states_size + seed_size; THArgCheck(THByteTensor_nElement(rng_state) == total_size, 1, "RNG state is wrong size"); THArgCheck(THByteTensor_isContiguous(rng_state), 1, "RNG state must be contiguous"); THCudaCheck(cudaMemcpy(state->rngState->current_gen->gen_states, THByteTensor_data(rng_state), states_size, cudaMemcpyHostToDevice)); memcpy(&state->rngState->current_gen->initial_seed, THByteTensor_data(rng_state) + states_size, seed_size); } #define GENERATE_KERNEL1(NAME, ARG1, CURAND_FUNC, TRANSFORM) \ __global__ void NAME(curandStateMtgp32 *state, int size, float *result, ARG1) \ { \ int idx = blockIdx.x * BLOCK_SIZE + threadIdx.x; \ int rounded_size = DIVUP(size, BLOCK_SIZE) * BLOCK_SIZE; \ for (int i = idx; i < rounded_size; i += BLOCK_SIZE * MAX_NUM_BLOCKS) { \ float x = CURAND_FUNC(&state[blockIdx.x]); \ if (i < size) { \ x = TRANSFORM; \ result[i] = x; \ } \ } \ } #define GENERATE_KERNEL2(NAME, ARG1, ARG2, CURAND_FUNC, TRANSFORM) \ __global__ void NAME(curandStateMtgp32 *state, int size, float *result, ARG1, ARG2) \ { \ int idx = blockIdx.x * BLOCK_SIZE + threadIdx.x; \ int rounded_size = DIVUP(size, BLOCK_SIZE) * BLOCK_SIZE; \ for (int i = idx; i < rounded_size; i += BLOCK_SIZE * MAX_NUM_BLOCKS) { \ float x = CURAND_FUNC(&state[blockIdx.x]); \ if (i < size) { \ x = TRANSFORM; \ result[i] = x; \ } \ } \ } GENERATE_KERNEL2(generate_uniform, double a, double b, curand_uniform, x * (b-a) + a) GENERATE_KERNEL1(generate_bernoulli, double p, curand_uniform, (float)x <= p) GENERATE_KERNEL2(generate_normal, double mean, double stdv, curand_normal, (x * stdv) + mean) GENERATE_KERNEL1(generate_geometric, double p, curand_uniform, (log(1-x) / log(p)) + 1) GENERATE_KERNEL1(generate_exponential, double lambda, curand_uniform, (float)(-1. / lambda * log(1-x))) GENERATE_KERNEL2(generate_cauchy, double median, double sigma, curand_uniform, (float)(median + sigma * tan(M_PI*(x-0.5)))) #undef GENERATE_KERNEL1 #undef GENERATE_KERNEL2 /* Separate kernel because curand_log_normal gets extra parameters. */ __global__ void generate_log_normal(curandStateMtgp32 *state, int size, float *result, float mean, float stddev) { int idx = blockIdx.x * BLOCK_SIZE + threadIdx.x; int rounded_size = DIVUP(size, BLOCK_SIZE) * BLOCK_SIZE; for (int i = idx; i < rounded_size; i += BLOCK_SIZE * MAX_NUM_BLOCKS) { float x = curand_log_normal(&state[blockIdx.x], mean, stddev); if (i < size) { result[i] = x; } } } #define NUM_BLOCKS min((int)DIVUP(size, BLOCK_SIZE), MAX_NUM_BLOCKS) THC_API void THCudaTensor_uniform(THCState* state, THCudaTensor *self_, double a, double b) { if (state->rngState->current_gen == NULL) { THError("Random number generators have not been initialized."); } THCudaTensor *self = THCudaTensor_newContiguous(state, self_); long size = THCudaTensor_nElement(state, self); float *data = THCudaTensor_data(state, self); generate_uniform<<<NUM_BLOCKS, BLOCK_SIZE>>>( state->rngState->current_gen->gen_states, size, data, a, b); THCudaTensor_freeCopyTo(state, self, self_); }; THC_API void THCudaTensor_bernoulli(THCState* state, THCudaTensor *self_, double p) { if (state->rngState->current_gen == NULL) { THError("Random number generators have not been initialized."); } THCudaTensor *self = THCudaTensor_newContiguous(state, self_); long size = THCudaTensor_nElement(state, self); float *data = THCudaTensor_data(state, self); generate_bernoulli<<<NUM_BLOCKS, BLOCK_SIZE>>>( state->rngState->current_gen->gen_states, size, data, p); THCudaTensor_freeCopyTo(state, self, self_); }; THC_API void THCudaTensor_normal(THCState* state, THCudaTensor *self_, double mean, double stdv) { if (state->rngState->current_gen == NULL) { THError("Random number generators have not been initialized."); } THCudaTensor *self = THCudaTensor_newContiguous(state, self_); long size = THCudaTensor_nElement(state, self); float *data = THCudaTensor_data(state, self); generate_normal<<<NUM_BLOCKS, BLOCK_SIZE>>>( state->rngState->current_gen->gen_states, size, data, mean, stdv); THCudaTensor_freeCopyTo(state, self, self_); }; THC_API void THCudaTensor_logNormal(THCState* state, THCudaTensor *self_, double mean, double stdv) { if (state->rngState->current_gen == NULL) { THError("Random number generators have not been initialized."); } THCudaTensor *self = THCudaTensor_newContiguous(state, self_); long size = THCudaTensor_nElement(state, self); float *data = THCudaTensor_data(state, self); generate_log_normal<<<NUM_BLOCKS, BLOCK_SIZE>>>( state->rngState->current_gen->gen_states, size, data, mean, stdv); THCudaTensor_freeCopyTo(state, self, self_); }; THC_API void THCudaTensor_geometric(THCState* state, THCudaTensor *self_, double p) { if (state->rngState->current_gen == NULL) { THError("Random number generators have not been initialized."); } THCudaTensor *self = THCudaTensor_newContiguous(state, self_); long size = THCudaTensor_nElement(state, self); float *data = THCudaTensor_data(state, self); generate_geometric<<<NUM_BLOCKS, BLOCK_SIZE>>>( state->rngState->current_gen->gen_states, size, data, p); THCudaTensor_freeCopyTo(state, self, self_); }; THC_API void THCudaTensor_exponential(THCState* state, THCudaTensor *self_, double lambda) { if (state->rngState->current_gen == NULL) { THError("Random number generators have not been initialized."); } THCudaTensor *self = THCudaTensor_newContiguous(state, self_); long size = THCudaTensor_nElement(state, self); float *data = THCudaTensor_data(state, self); generate_exponential<<<NUM_BLOCKS, BLOCK_SIZE>>>( state->rngState->current_gen->gen_states, size, data, lambda); THCudaTensor_freeCopyTo(state, self, self_); }; THC_API void THCudaTensor_cauchy(THCState* state, THCudaTensor *self_, double median, double sigma) { if (state->rngState->current_gen == NULL) { THError("Random number generators have not been initialized."); } THCudaTensor *self = THCudaTensor_newContiguous(state, self_); long size = THCudaTensor_nElement(state, self); float *data = THCudaTensor_data(state, self); generate_cauchy<<<NUM_BLOCKS, BLOCK_SIZE>>>( state->rngState->current_gen->gen_states, size, data, median, sigma); THCudaTensor_freeCopyTo(state, self, self_); }; #undef NUM_BLOCKS
8980e5100a104defb713ff387b4df13a5ca7c75d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" extern "C" __global__ void saxpy(float a, float *x, float *y, float *out, size_t n) { size_t tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < n) { out[tid] = a * x[tid] + y[tid]; } }
8980e5100a104defb713ff387b4df13a5ca7c75d.cu
extern "C" __global__ void saxpy(float a, float *x, float *y, float *out, size_t n) { size_t tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < n) { out[tid] = a * x[tid] + y[tid]; } }
9b1117d4d3001b8f3d037b1419383340d9a0a12f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "medianBlur.hpp" const size_t BLOCK_ROWS = 32; const size_t BLOCK_COLS = 16; const size_t MEDIAN_LENGTH = 9; /* * Graciously based on * http://stackoverflow.com/questions/22315903/cuda-median-filter-implementation-does-not-produce-desired-results */ __global__ void blurKernel ( unsigned char *d_input_img, unsigned char *d_output_img, int d_iRows, int d_iCols) { unsigned int row = blockIdx.y * blockDim.y + threadIdx.y; unsigned int col = blockIdx.x * blockDim.x + threadIdx.x; unsigned char window[MEDIAN_LENGTH]; if (col > d_iCols || row >= d_iRows) { return; } window[0] = (row==0||col==0) ? 0 : d_input_img[(row-1)*d_iCols+(col-1)]; window[1] = (row==0) ? 0 : d_input_img[(row-1)*d_iCols+col]; window[2] = (row==0||col==d_iCols-1) ? 0 : d_input_img[(row-1)*d_iCols+(col+1)]; window[3] = (col==0) ? 0 : d_input_img[row*d_iCols+(col-1)]; window[4] = d_input_img[row*d_iCols+col]; window[5] = (col==d_iCols-1) ? 0 : d_input_img[row*d_iCols+(col+1)]; window[6] = (row==d_iRows-1||col==0) ? 0 : d_input_img[(row+1)*d_iCols+(col-1)]; window[7] = (row==d_iRows-1) ? 0 : d_input_img[(row+1)*d_iCols+col]; window[8] = (row==d_iRows-1||col==d_iCols-1) ? 0 : d_input_img[(row+1)*d_iCols+(col+1)]; // Order elements for (unsigned int j = 0; j < 5; ++j) { // Find position of minimum element unsigned char temp = window[j]; unsigned int idx = j; for (unsigned int l = j + 1; l < 9; ++l) { if (window[l] < temp) { idx = l; temp = window[l]; } } // Put found minimum element in its place window[idx] = window[j]; window[j] = temp; } d_output_img[row*d_iCols + col] = window[4]; } void gpuMedianBlur(const cv::Mat& input, const cv::Mat& output) { unsigned char *device_input, *device_output; size_t d_ipimgSize = input.step * input.rows; size_t d_opimgSize = output.step * output.rows; hipMalloc((void**) &device_input, d_ipimgSize); hipMalloc((void**) &device_output, d_opimgSize); hipMemcpy(device_input, input.data, d_ipimgSize, hipMemcpyHostToDevice); dim3 Threads(BLOCK_ROWS, BLOCK_COLS); dim3 Blocks( (input.cols + Threads.x - 1) / Threads.x, (input.rows + Threads.y - 1) / Threads.y); hipLaunchKernelGGL(( blurKernel), dim3(Blocks), dim3(Threads), 0, 0, device_input, device_output, input.rows, input.cols); hipDeviceSynchronize(); hipMemcpy(output.data, device_output, d_opimgSize, hipMemcpyDeviceToHost); hipFree(device_input); hipFree(device_output); }
9b1117d4d3001b8f3d037b1419383340d9a0a12f.cu
#include "medianBlur.hpp" const size_t BLOCK_ROWS = 32; const size_t BLOCK_COLS = 16; const size_t MEDIAN_LENGTH = 9; /* * Graciously based on * http://stackoverflow.com/questions/22315903/cuda-median-filter-implementation-does-not-produce-desired-results */ __global__ void blurKernel ( unsigned char *d_input_img, unsigned char *d_output_img, int d_iRows, int d_iCols) { unsigned int row = blockIdx.y * blockDim.y + threadIdx.y; unsigned int col = blockIdx.x * blockDim.x + threadIdx.x; unsigned char window[MEDIAN_LENGTH]; if (col > d_iCols || row >= d_iRows) { return; } window[0] = (row==0||col==0) ? 0 : d_input_img[(row-1)*d_iCols+(col-1)]; window[1] = (row==0) ? 0 : d_input_img[(row-1)*d_iCols+col]; window[2] = (row==0||col==d_iCols-1) ? 0 : d_input_img[(row-1)*d_iCols+(col+1)]; window[3] = (col==0) ? 0 : d_input_img[row*d_iCols+(col-1)]; window[4] = d_input_img[row*d_iCols+col]; window[5] = (col==d_iCols-1) ? 0 : d_input_img[row*d_iCols+(col+1)]; window[6] = (row==d_iRows-1||col==0) ? 0 : d_input_img[(row+1)*d_iCols+(col-1)]; window[7] = (row==d_iRows-1) ? 0 : d_input_img[(row+1)*d_iCols+col]; window[8] = (row==d_iRows-1||col==d_iCols-1) ? 0 : d_input_img[(row+1)*d_iCols+(col+1)]; // Order elements for (unsigned int j = 0; j < 5; ++j) { // Find position of minimum element unsigned char temp = window[j]; unsigned int idx = j; for (unsigned int l = j + 1; l < 9; ++l) { if (window[l] < temp) { idx = l; temp = window[l]; } } // Put found minimum element in its place window[idx] = window[j]; window[j] = temp; } d_output_img[row*d_iCols + col] = window[4]; } void gpuMedianBlur(const cv::Mat& input, const cv::Mat& output) { unsigned char *device_input, *device_output; size_t d_ipimgSize = input.step * input.rows; size_t d_opimgSize = output.step * output.rows; cudaMalloc((void**) &device_input, d_ipimgSize); cudaMalloc((void**) &device_output, d_opimgSize); cudaMemcpy(device_input, input.data, d_ipimgSize, cudaMemcpyHostToDevice); dim3 Threads(BLOCK_ROWS, BLOCK_COLS); dim3 Blocks( (input.cols + Threads.x - 1) / Threads.x, (input.rows + Threads.y - 1) / Threads.y); blurKernel<<< Blocks, Threads>>>( device_input, device_output, input.rows, input.cols); cudaDeviceSynchronize(); cudaMemcpy(output.data, device_output, d_opimgSize, cudaMemcpyDeviceToHost); cudaFree(device_input); cudaFree(device_output); }
81d7d7a3fec59b8d85223781498bafb8b63877b1.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include "stdio.h" #include <vector> #include <hip/hip_runtime.h> #include "TSystem.h" #include "TMinuit.h" #include "TRandom3.h" #include "math.h" // #include "cuPrintf.hip" #include <thrust/device_vector.h> #include <thrust/host_vector.h> #include <thrust/functional.h> #include <thrust/transform_reduce.h> std::vector<double> theEvents; __constant__ __device__ double dev_params[2]; thrust::device_vector<double>* d_theEvents; // *** Testing purpose implementation // *** Following works but probably slows down the process *** // __device__ __host__ double gauss (double x, double mean, double sigma) { // return pow((x-mean)/sigma, 2); // } struct GaussianFunctor { // GaussianFunctor(double _mean, double _sigma) : mean(_mean), sigma(_sigma) {} __device__ double operator() (double x) { double mean = dev_params[0]; double sigma = dev_params[1]; // return -2*log(exp(-0.5*pow((x-mean)/sigma, 2))); return pow((x-mean)/sigma,2); // *** Start test implementatoin *** // *** Following three commented lines will make use of global gauss function in order to use the same one function from a GPU struct and a CPU method *** // double returnvalue = gauss(x, mean, sigma); // printf("Gauss: x = %f, mean = %f, sigma = %f --> return = %f\n", x, mean, sigma, returnvalue); // return returnvalue; // *** End test implementation *** } // private: // double mean, sigma; }; // FOR TESTING PURPOSES template <typename T> struct square { __host__ __device__ T operator() (const T& x) const { return x * x; } }; void FitFcn (int& npar, double* deriv, double& fun, double* param, int flg) { double mean = param[0]; double sigma = param[1]; double nll = 0; for (unsigned int i = 0; i < theEvents.size(); i++) { double x = theEvents[i]; double thisEventProb = exp(-0.5*pow((x-mean)/sigma, 2)); nll -= 2*log(thisEventProb); // *** Test implementation // nll -= gauss(x, mean, sigma); } fun = nll; } void dev_FitFcn (int& npar, double* deriv, double& fun, double* param, int flg) { hipMemcpyToSymbol("dev_params", param, 2*sizeof(double), 0, hipMemcpyHostToDevice); fun = thrust::transform_reduce(d_theEvents->begin(), d_theEvents->end(), GaussianFunctor(), 0., thrust::plus<double>()); } int main(int argc, char** argv) { // gSystem->Load("libMinuit"); std::cout << "############################" << std::endl << "## You're lucky! Because of the default TMinuit output into the shell, I implemented a bunch of line separators!" << std::endl << "############################" << std::endl << std::endl; int sizeOfVector = 10000; if (argc > 1) sizeOfVector = atoi(argv[1]); TRandom3 myRandom(23); for (int i = 0; i < sizeOfVector; i++) { theEvents.push_back(myRandom.Gaus(0,1)); // if (i % 100 == 0) std::cout << "## Just pushed " << theEvents[i] << " into number array" << std::endl; } TMinuit minuit(2); std::cout << "## TMINUIT:: Defining parameters ##" << std::endl; minuit.DefineParameter(0, "mean", 0, 0.1, -1, 1); minuit.DefineParameter(1, "sigma", 1, 0.1, 0.5, 1.5); std::cout << "## TMINUIT:: Setting Functoin ##" << std::endl; minuit.SetFCN(&FitFcn); std::cout << "## TMINUIT:: Calling Migrad() ##" << std::endl; minuit.Migrad(); std::cout << "############################" << std::endl; std::cout << "## Now on with the parallized version" << std::endl; std::cout << "############################" << std::endl; thrust::device_vector<double> d_localEvents(theEvents); d_theEvents = &d_localEvents; TMinuit dev_minuit(2); dev_minuit.DefineParameter(0, "dmean", 0, 0.1, -1, 1); dev_minuit.DefineParameter(1, "dsigma", 1, 0.1, 0.5, 1.5); dev_minuit.SetFCN(&dev_FitFcn); dev_minuit.Migrad(); }
81d7d7a3fec59b8d85223781498bafb8b63877b1.cu
#include <iostream> #include "stdio.h" #include <vector> #include <cuda.h> #include "TSystem.h" #include "TMinuit.h" #include "TRandom3.h" #include "math.h" // #include "cuPrintf.cu" #include <thrust/device_vector.h> #include <thrust/host_vector.h> #include <thrust/functional.h> #include <thrust/transform_reduce.h> std::vector<double> theEvents; __constant__ __device__ double dev_params[2]; thrust::device_vector<double>* d_theEvents; // *** Testing purpose implementation // *** Following works but probably slows down the process *** // __device__ __host__ double gauss (double x, double mean, double sigma) { // return pow((x-mean)/sigma, 2); // } struct GaussianFunctor { // GaussianFunctor(double _mean, double _sigma) : mean(_mean), sigma(_sigma) {} __device__ double operator() (double x) { double mean = dev_params[0]; double sigma = dev_params[1]; // return -2*log(exp(-0.5*pow((x-mean)/sigma, 2))); return pow((x-mean)/sigma,2); // *** Start test implementatoin *** // *** Following three commented lines will make use of global gauss function in order to use the same one function from a GPU struct and a CPU method *** // double returnvalue = gauss(x, mean, sigma); // printf("Gauss: x = %f, mean = %f, sigma = %f --> return = %f\n", x, mean, sigma, returnvalue); // return returnvalue; // *** End test implementation *** } // private: // double mean, sigma; }; // FOR TESTING PURPOSES template <typename T> struct square { __host__ __device__ T operator() (const T& x) const { return x * x; } }; void FitFcn (int& npar, double* deriv, double& fun, double* param, int flg) { double mean = param[0]; double sigma = param[1]; double nll = 0; for (unsigned int i = 0; i < theEvents.size(); i++) { double x = theEvents[i]; double thisEventProb = exp(-0.5*pow((x-mean)/sigma, 2)); nll -= 2*log(thisEventProb); // *** Test implementation // nll -= gauss(x, mean, sigma); } fun = nll; } void dev_FitFcn (int& npar, double* deriv, double& fun, double* param, int flg) { cudaMemcpyToSymbol("dev_params", param, 2*sizeof(double), 0, cudaMemcpyHostToDevice); fun = thrust::transform_reduce(d_theEvents->begin(), d_theEvents->end(), GaussianFunctor(), 0., thrust::plus<double>()); } int main(int argc, char** argv) { // gSystem->Load("libMinuit"); std::cout << "############################" << std::endl << "## You're lucky! Because of the default TMinuit output into the shell, I implemented a bunch of line separators!" << std::endl << "############################" << std::endl << std::endl; int sizeOfVector = 10000; if (argc > 1) sizeOfVector = atoi(argv[1]); TRandom3 myRandom(23); for (int i = 0; i < sizeOfVector; i++) { theEvents.push_back(myRandom.Gaus(0,1)); // if (i % 100 == 0) std::cout << "## Just pushed " << theEvents[i] << " into number array" << std::endl; } TMinuit minuit(2); std::cout << "## TMINUIT:: Defining parameters ##" << std::endl; minuit.DefineParameter(0, "mean", 0, 0.1, -1, 1); minuit.DefineParameter(1, "sigma", 1, 0.1, 0.5, 1.5); std::cout << "## TMINUIT:: Setting Functoin ##" << std::endl; minuit.SetFCN(&FitFcn); std::cout << "## TMINUIT:: Calling Migrad() ##" << std::endl; minuit.Migrad(); std::cout << "############################" << std::endl; std::cout << "## Now on with the parallized version" << std::endl; std::cout << "############################" << std::endl; thrust::device_vector<double> d_localEvents(theEvents); d_theEvents = &d_localEvents; TMinuit dev_minuit(2); dev_minuit.DefineParameter(0, "dmean", 0, 0.1, -1, 1); dev_minuit.DefineParameter(1, "dsigma", 1, 0.1, 0.5, 1.5); dev_minuit.SetFCN(&dev_FitFcn); dev_minuit.Migrad(); }
3132fd0f448d5f7d6f77c12a39575f68a93b5285.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <string.h> #include <hip/hip_runtime.h> #include "hip/hip_runtime.h" #include "device_launch_parameters.h" /* * file name: matrix.cu * * matrix.cu contains the code that realize some common used matrix operations in CUDA * * this is a toy program for learning CUDA, some functions are reusable in other project * */ #include <stdlib.h> #include <assert.h> #define BLOCK_SIZE 16 /* ********************************************************************* function name: gpu_matrix_mult description: dot product of two matrix (not only square) parameters: &a GPU device pointer to a m X n matrix (A) &b GPU device pointer to a n X k matrix (B) &c GPU device output purpose pointer to a m X k matrix (C) to store the result Note: grid and block should be configured as: dim3 dimGrid((k + BLOCK_SIZE - 1) / BLOCK_SIZE, (m + BLOCK_SIZE - 1) / BLOCK_SIZE); dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); further sppedup can be obtained by using shared memory to decrease global memory access times return: none ********************************************************************* */ __global__ void gpu_matrix_mult(int *a, int *b, int *c, int m, int n, int k) { int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; int sum = 0; if (col < k && row < m) { for (int i = 0; i < n; i++) { sum += a[row * n + i] * b[i * k + col]; } c[row * k + col] = sum; } } /* ********************************************************************* function name: gpu_square_matrix_mult description: dot product of two matrix (not only square) in GPU parameters: &a GPU device pointer to a n X n matrix (A) &b GPU device pointer to a n X n matrix (B) &c GPU device output purpose pointer to a n X n matrix (C) to store the result Note: grid and block should be configured as: dim3 dim_grid((n - 1) / BLOCK_SIZE + 1, (n - 1) / BLOCK_SIZE + 1, 1); dim3 dim_block(BLOCK_SIZE, BLOCK_SIZE, 1); return: none ********************************************************************* */ __global__ void gpu_square_matrix_mult(int *d_a, int *d_b, int *d_result, int n) { __shared__ int tile_a[BLOCK_SIZE][BLOCK_SIZE]; __shared__ int tile_b[BLOCK_SIZE][BLOCK_SIZE]; int row = blockIdx.y * BLOCK_SIZE + threadIdx.y; int col = blockIdx.x * BLOCK_SIZE + threadIdx.x; int tmp = 0; int idx; for (int sub = 0; sub < gridDim.x; ++sub) { idx = row * n + sub * BLOCK_SIZE + threadIdx.x; if (idx >= n * n) { // n may not divisible by BLOCK_SIZE tile_a[threadIdx.y][threadIdx.x] = 0; } else { tile_a[threadIdx.y][threadIdx.x] = d_a[idx]; } idx = (sub * BLOCK_SIZE + threadIdx.y) * n + col; if (idx >= n * n) { tile_b[threadIdx.y][threadIdx.x] = 0; } else { tile_b[threadIdx.y][threadIdx.x] = d_b[idx]; } __syncthreads(); for (int k = 0; k < BLOCK_SIZE; ++k) { tmp += tile_a[threadIdx.y][k] * tile_b[k][threadIdx.x]; } __syncthreads(); } if (row < n && col < n) { d_result[row * n + col] = tmp; } } /* ********************************************************************* function name: gpu_matrix_transpose description: matrix transpose parameters: &mat_in GPU device pointer to a rows X cols matrix &mat_out GPU device output purpose pointer to a cols X rows matrix to store the result Note: grid and block should be configured as: dim3 dim_grid((n - 1) / BLOCK_SIZE + 1, (n - 1) / BLOCK_SIZE + 1, 1); dim3 dim_block(BLOCK_SIZE, BLOCK_SIZE, 1); return: none ********************************************************************* */ __global__ void gpu_matrix_transpose(int* mat_in, int* mat_out, unsigned int rows, unsigned int cols) { unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; unsigned int idy = blockIdx.y * blockDim.y + threadIdx.y; if (idx < cols && idy < rows) { unsigned int pos = idy * cols + idx; unsigned int trans_pos = idx * rows + idy; mat_out[trans_pos] = mat_in[pos]; } } /* ********************************************************************* function name: cpu_matrix_mult description: dot product of two matrix (not only square) in CPU, for validating GPU results parameters: &a CPU host pointer to a m X n matrix (A) &b CPU host pointer to a n X k matrix (B) &c CPU host output purpose pointer to a m X k matrix (C) to store the result return: none ********************************************************************* */ void cpu_matrix_mult(int *h_a, int *h_b, int *h_result, int m, int n, int k) { for (int i = 0; i < m; ++i) { for (int j = 0; j < k; ++j) { int tmp = 0.0; for (int h = 0; h < n; ++h) { tmp += h_a[i * n + h] * h_b[h * k + j]; } h_result[i * k + j] = tmp; } } } /* ********************************************************************* function name: main description: test and compare parameters: none return: none ********************************************************************* */ int main(int argc, char const *argv[]) { int m, n, k; /* Fixed seed for illustration */ srand(3333); printf("please type in m n and k\n"); scanf("%d %d %d", &m, &n, &k); // allocate memory in host RAM, h_cc is used to store CPU result int *h_a, *h_b, *h_c, *h_cc; h_a = (int*)malloc(sizeof(int)*m*n); h_b = (int*)malloc(sizeof(int)*n*k); h_c = (int*)malloc(sizeof(int)*m*k); h_cc = (int*)malloc(sizeof(int)*m*k); // random initialize matrix A for (int i = 0; i < m; ++i) { for (int j = 0; j < n; ++j) { h_a[i * n + j] = rand() % 1024; } } // random initialize matrix B for (int i = 0; i < n; ++i) { for (int j = 0; j < k; ++j) { h_b[i * k + j] = rand() % 1024; } } float gpu_elapsed_time_ms, cpu_elapsed_time_ms; // some events to count the execution time hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); // start to count execution time of GPU version hipEventRecord(start, 0); // Allocate memory space on the device int *d_a, *d_b, *d_c; hipMalloc((void **)&d_a, sizeof(int)*m*n); hipMalloc((void **)&d_b, sizeof(int)*n*k); hipMalloc((void **)&d_c, sizeof(int)*m*k); // copy matrix A and B from host to device memory hipMemcpy(d_a, h_a, sizeof(int)*m*n, hipMemcpyHostToDevice); hipMemcpy(d_b, h_b, sizeof(int)*n*k, hipMemcpyHostToDevice); dim3 dimGrid((k + BLOCK_SIZE - 1) / BLOCK_SIZE, (m + BLOCK_SIZE - 1) / BLOCK_SIZE); dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); // Launch kernel if (m == n && n == k) { //unsigned int grid_rows = sqrt(BLOCK_SIZE); //unsigned int grid_cols = m/ grid_rows; //if(size % grid_rows != 0){ //grid_cols++;} //dim3 dimGrid(grid_cols, grid_cols,1); //dim3 dimBlock(grid_rows, grid_rows,1); //this is the correct kernal size for different thread sizes.. gpu_square_matrix_mult <<<dimGrid, dimBlock >> > (d_a, d_b, d_c, n); } else { gpu_matrix_mult <<<dimGrid, dimBlock >> > (d_a, d_b, d_c, m, n, k); } // Transefr results from device to host hipMemcpy(h_c, d_c, sizeof(int)*m*k, hipMemcpyDeviceToHost); hipDeviceSynchronize(); // time counting terminate hipEventRecord(stop, 0); hipEventSynchronize(stop); // compute time elapse on GPU computing hipEventElapsedTime(&gpu_elapsed_time_ms, start, stop); printf("Time elapsed on matrix multiplication of %dx%d . %dx%d on GPU: %f ms.\n\n", m, n, n, k, gpu_elapsed_time_ms); // start the CPU version hipEventRecord(start, 0); //cpu_matrix_mult(h_a, h_b, h_cc, m, n, k); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&cpu_elapsed_time_ms, start, stop); ///printf("Time elapsed on matrix multiplication of %dx%d . %dx%d on CPU: %f ms.\n\n", m, n, n, k, cpu_elapsed_time_ms); // validate results computed by GPU /*int all_ok = 1; for (int i = 0; i < m; ++i) { for (int j = 0; j < k; ++j) { //printf("[%d][%d]:%d == [%d][%d]:%d, ", i, j, h_cc[i*k + j], i, j, h_c[i*k + j]); if (h_cc[i*k + j] != h_c[i*k + j]) { all_ok = 0; } } //printf("\n"); } // roughly compute speedup if (all_ok) { printf("all results are correct!!!, speedup = %f\n", cpu_elapsed_time_ms / gpu_elapsed_time_ms); } else { printf("incorrect results\n"); }*/ // free memory /*hipFree(d_a); hipFree(d_b); hipFree(d_c); hipHostFree(h_a); hipHostFree(h_b); hipHostFree(h_c); hipHostFree(h_cc);*/ return 0; }
3132fd0f448d5f7d6f77c12a39575f68a93b5285.cu
#include <stdio.h> #include <string.h> #include <cuda.h> #include "cuda_runtime.h" #include "device_launch_parameters.h" /* * file name: matrix.cu * * matrix.cu contains the code that realize some common used matrix operations in CUDA * * this is a toy program for learning CUDA, some functions are reusable in other project * */ #include <stdlib.h> #include <assert.h> #define BLOCK_SIZE 16 /* ********************************************************************* function name: gpu_matrix_mult description: dot product of two matrix (not only square) parameters: &a GPU device pointer to a m X n matrix (A) &b GPU device pointer to a n X k matrix (B) &c GPU device output purpose pointer to a m X k matrix (C) to store the result Note: grid and block should be configured as: dim3 dimGrid((k + BLOCK_SIZE - 1) / BLOCK_SIZE, (m + BLOCK_SIZE - 1) / BLOCK_SIZE); dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); further sppedup can be obtained by using shared memory to decrease global memory access times return: none ********************************************************************* */ __global__ void gpu_matrix_mult(int *a, int *b, int *c, int m, int n, int k) { int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; int sum = 0; if (col < k && row < m) { for (int i = 0; i < n; i++) { sum += a[row * n + i] * b[i * k + col]; } c[row * k + col] = sum; } } /* ********************************************************************* function name: gpu_square_matrix_mult description: dot product of two matrix (not only square) in GPU parameters: &a GPU device pointer to a n X n matrix (A) &b GPU device pointer to a n X n matrix (B) &c GPU device output purpose pointer to a n X n matrix (C) to store the result Note: grid and block should be configured as: dim3 dim_grid((n - 1) / BLOCK_SIZE + 1, (n - 1) / BLOCK_SIZE + 1, 1); dim3 dim_block(BLOCK_SIZE, BLOCK_SIZE, 1); return: none ********************************************************************* */ __global__ void gpu_square_matrix_mult(int *d_a, int *d_b, int *d_result, int n) { __shared__ int tile_a[BLOCK_SIZE][BLOCK_SIZE]; __shared__ int tile_b[BLOCK_SIZE][BLOCK_SIZE]; int row = blockIdx.y * BLOCK_SIZE + threadIdx.y; int col = blockIdx.x * BLOCK_SIZE + threadIdx.x; int tmp = 0; int idx; for (int sub = 0; sub < gridDim.x; ++sub) { idx = row * n + sub * BLOCK_SIZE + threadIdx.x; if (idx >= n * n) { // n may not divisible by BLOCK_SIZE tile_a[threadIdx.y][threadIdx.x] = 0; } else { tile_a[threadIdx.y][threadIdx.x] = d_a[idx]; } idx = (sub * BLOCK_SIZE + threadIdx.y) * n + col; if (idx >= n * n) { tile_b[threadIdx.y][threadIdx.x] = 0; } else { tile_b[threadIdx.y][threadIdx.x] = d_b[idx]; } __syncthreads(); for (int k = 0; k < BLOCK_SIZE; ++k) { tmp += tile_a[threadIdx.y][k] * tile_b[k][threadIdx.x]; } __syncthreads(); } if (row < n && col < n) { d_result[row * n + col] = tmp; } } /* ********************************************************************* function name: gpu_matrix_transpose description: matrix transpose parameters: &mat_in GPU device pointer to a rows X cols matrix &mat_out GPU device output purpose pointer to a cols X rows matrix to store the result Note: grid and block should be configured as: dim3 dim_grid((n - 1) / BLOCK_SIZE + 1, (n - 1) / BLOCK_SIZE + 1, 1); dim3 dim_block(BLOCK_SIZE, BLOCK_SIZE, 1); return: none ********************************************************************* */ __global__ void gpu_matrix_transpose(int* mat_in, int* mat_out, unsigned int rows, unsigned int cols) { unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; unsigned int idy = blockIdx.y * blockDim.y + threadIdx.y; if (idx < cols && idy < rows) { unsigned int pos = idy * cols + idx; unsigned int trans_pos = idx * rows + idy; mat_out[trans_pos] = mat_in[pos]; } } /* ********************************************************************* function name: cpu_matrix_mult description: dot product of two matrix (not only square) in CPU, for validating GPU results parameters: &a CPU host pointer to a m X n matrix (A) &b CPU host pointer to a n X k matrix (B) &c CPU host output purpose pointer to a m X k matrix (C) to store the result return: none ********************************************************************* */ void cpu_matrix_mult(int *h_a, int *h_b, int *h_result, int m, int n, int k) { for (int i = 0; i < m; ++i) { for (int j = 0; j < k; ++j) { int tmp = 0.0; for (int h = 0; h < n; ++h) { tmp += h_a[i * n + h] * h_b[h * k + j]; } h_result[i * k + j] = tmp; } } } /* ********************************************************************* function name: main description: test and compare parameters: none return: none ********************************************************************* */ int main(int argc, char const *argv[]) { int m, n, k; /* Fixed seed for illustration */ srand(3333); printf("please type in m n and k\n"); scanf("%d %d %d", &m, &n, &k); // allocate memory in host RAM, h_cc is used to store CPU result int *h_a, *h_b, *h_c, *h_cc; h_a = (int*)malloc(sizeof(int)*m*n); h_b = (int*)malloc(sizeof(int)*n*k); h_c = (int*)malloc(sizeof(int)*m*k); h_cc = (int*)malloc(sizeof(int)*m*k); // random initialize matrix A for (int i = 0; i < m; ++i) { for (int j = 0; j < n; ++j) { h_a[i * n + j] = rand() % 1024; } } // random initialize matrix B for (int i = 0; i < n; ++i) { for (int j = 0; j < k; ++j) { h_b[i * k + j] = rand() % 1024; } } float gpu_elapsed_time_ms, cpu_elapsed_time_ms; // some events to count the execution time cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); // start to count execution time of GPU version cudaEventRecord(start, 0); // Allocate memory space on the device int *d_a, *d_b, *d_c; cudaMalloc((void **)&d_a, sizeof(int)*m*n); cudaMalloc((void **)&d_b, sizeof(int)*n*k); cudaMalloc((void **)&d_c, sizeof(int)*m*k); // copy matrix A and B from host to device memory cudaMemcpy(d_a, h_a, sizeof(int)*m*n, cudaMemcpyHostToDevice); cudaMemcpy(d_b, h_b, sizeof(int)*n*k, cudaMemcpyHostToDevice); dim3 dimGrid((k + BLOCK_SIZE - 1) / BLOCK_SIZE, (m + BLOCK_SIZE - 1) / BLOCK_SIZE); dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); // Launch kernel if (m == n && n == k) { //unsigned int grid_rows = sqrt(BLOCK_SIZE); //unsigned int grid_cols = m/ grid_rows; //if(size % grid_rows != 0){ //grid_cols++;} //dim3 dimGrid(grid_cols, grid_cols,1); //dim3 dimBlock(grid_rows, grid_rows,1); //this is the correct kernal size for different thread sizes.. gpu_square_matrix_mult <<<dimGrid, dimBlock >> > (d_a, d_b, d_c, n); } else { gpu_matrix_mult <<<dimGrid, dimBlock >> > (d_a, d_b, d_c, m, n, k); } // Transefr results from device to host cudaMemcpy(h_c, d_c, sizeof(int)*m*k, cudaMemcpyDeviceToHost); cudaThreadSynchronize(); // time counting terminate cudaEventRecord(stop, 0); cudaEventSynchronize(stop); // compute time elapse on GPU computing cudaEventElapsedTime(&gpu_elapsed_time_ms, start, stop); printf("Time elapsed on matrix multiplication of %dx%d . %dx%d on GPU: %f ms.\n\n", m, n, n, k, gpu_elapsed_time_ms); // start the CPU version cudaEventRecord(start, 0); //cpu_matrix_mult(h_a, h_b, h_cc, m, n, k); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&cpu_elapsed_time_ms, start, stop); ///printf("Time elapsed on matrix multiplication of %dx%d . %dx%d on CPU: %f ms.\n\n", m, n, n, k, cpu_elapsed_time_ms); // validate results computed by GPU /*int all_ok = 1; for (int i = 0; i < m; ++i) { for (int j = 0; j < k; ++j) { //printf("[%d][%d]:%d == [%d][%d]:%d, ", i, j, h_cc[i*k + j], i, j, h_c[i*k + j]); if (h_cc[i*k + j] != h_c[i*k + j]) { all_ok = 0; } } //printf("\n"); } // roughly compute speedup if (all_ok) { printf("all results are correct!!!, speedup = %f\n", cpu_elapsed_time_ms / gpu_elapsed_time_ms); } else { printf("incorrect results\n"); }*/ // free memory /*cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); cudaFreeHost(h_a); cudaFreeHost(h_b); cudaFreeHost(h_c); cudaFreeHost(h_cc);*/ return 0; }
65f83ab0539879fab21dea92254898fe35647a9f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> __global__ void shift_forward(int * value) { int index = threadIdx.x; __shared__ int array[64]; array[index] = threadIdx.x; __syncthreads(); // Garantir que todos os valores foram armazenados antes de comear o shift if(index < 63) { int tmp = array[index + 1]; __syncthreads(); // Salvar cada valor antes que o mesmo seja trocado por outra thread value[index] = tmp; __syncthreads(); } } int main(int argc,char ** argv) { const int ARRAY_SIZE = 64; const int SIZE = ARRAY_SIZE * sizeof(int); int * d_out; hipMalloc((void **) &d_out,SIZE); hipLaunchKernelGGL(( shift_forward), dim3(1),dim3(64), 0, 0, d_out); int h_out[ARRAY_SIZE]; hipMemcpy(h_out,d_out,SIZE,hipMemcpyDeviceToHost); for(int i = 0 ; i < ARRAY_SIZE;i++) { printf("%d ",h_out[i]); } printf("\n"); return 0; }
65f83ab0539879fab21dea92254898fe35647a9f.cu
#include <stdio.h> __global__ void shift_forward(int * value) { int index = threadIdx.x; __shared__ int array[64]; array[index] = threadIdx.x; __syncthreads(); // Garantir que todos os valores foram armazenados antes de começar o shift if(index < 63) { int tmp = array[index + 1]; __syncthreads(); // Salvar cada valor antes que o mesmo seja trocado por outra thread value[index] = tmp; __syncthreads(); } } int main(int argc,char ** argv) { const int ARRAY_SIZE = 64; const int SIZE = ARRAY_SIZE * sizeof(int); int * d_out; cudaMalloc((void **) &d_out,SIZE); shift_forward<<<1,64>>>(d_out); int h_out[ARRAY_SIZE]; cudaMemcpy(h_out,d_out,SIZE,cudaMemcpyDeviceToHost); for(int i = 0 ; i < ARRAY_SIZE;i++) { printf("%d ",h_out[i]); } printf("\n"); return 0; }
92a44798a07d8315ea250f7f2d1b9e96894c36bb.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // #include <vector> // #include <iostream> // #include "utils.h" // #include "cuda_error_check.cuh" // #include "initial_graph.hpp" // #include "parse_graph.hpp" // __global__ void own_kernel(std::vector<initial_vertex> * peeps, int offset, int * anyChange){ // //update me based on my neighbors. Toggle anyChange as needed. // //Offset will tell you who I am. // } // void own(std::vector<initial_vertex> * peeps, int blockSize, int blockNum){ // setTime(); // /* // * Do all the things here! // **/ // std::cout << "Took " << getTime() << "ms.\n"; // }
92a44798a07d8315ea250f7f2d1b9e96894c36bb.cu
// #include <vector> // #include <iostream> // #include "utils.h" // #include "cuda_error_check.cuh" // #include "initial_graph.hpp" // #include "parse_graph.hpp" // __global__ void own_kernel(std::vector<initial_vertex> * peeps, int offset, int * anyChange){ // //update me based on my neighbors. Toggle anyChange as needed. // //Offset will tell you who I am. // } // void own(std::vector<initial_vertex> * peeps, int blockSize, int blockNum){ // setTime(); // /* // * Do all the things here! // **/ // std::cout << "Took " << getTime() << "ms.\n"; // }